MLIR  22.0.0git
BufferizableOpInterfaceImpl.cpp
Go to the documentation of this file.
1 //===- BufferizableOpInterfaceImpl.cpp - Impl. of BufferizableOpInterface -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These BufferizableOpInterface implementations provide analysis-related
10 // interface methods only. They are getting bufferized by the
11 // SparseTensorConversion pass.
12 
14 
17 #include "mlir/IR/Operation.h"
18 #include "mlir/IR/PatternMatch.h"
19 
20 using namespace mlir::bufferization;
21 using namespace mlir::sparse_tensor;
22 
23 namespace mlir {
24 namespace sparse_tensor {
25 namespace {
26 
27 template <typename ConcreteModel, typename ConcreteOp>
28 struct SparseBufferizableOpInterfaceExternalModel
29  : public BufferizableOpInterface::ExternalModel<ConcreteModel, ConcreteOp> {
30  LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
32  BufferizationState &state) const {
33  return op->emitError(
34  "sparse_tensor ops must be bufferized with the sparsifier");
35  }
36 };
37 
38 struct ConcatenateOpInterface
39  : SparseBufferizableOpInterfaceExternalModel<ConcatenateOpInterface,
40  sparse_tensor::ConcatenateOp> {
41  bool bufferizesToAllocation(Operation *op, Value value) const { return true; }
42 
43  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
44  const AnalysisState &state) const {
45  return true;
46  }
47 
48  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
49  const AnalysisState &state) const {
50  return false;
51  }
52 
53  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
54  const AnalysisState &state) const {
55  return {};
56  }
57 
58  bool isWritable(Operation *op, Value value,
59  const AnalysisState &state) const {
60  return true;
61  }
62 };
63 
64 struct ConvertOpInterface : public SparseBufferizableOpInterfaceExternalModel<
65  ConvertOpInterface, sparse_tensor::ConvertOp> {
66  bool bufferizesToAllocation(Operation *op, Value value) const {
67  // ConvertOps may allocate. (Unless they convert between two identical
68  // types, then they fold away.)
69  return true;
70  }
71 
72  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
73  const AnalysisState &state) const {
74  return true;
75  }
76 
77  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
78  const AnalysisState &state) const {
79  return false;
80  }
81 
82  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
83  const AnalysisState &state) const {
84  return {};
85  }
86 
87  bool isWritable(Operation *op, Value value,
88  const AnalysisState &state) const {
89  return true;
90  }
91 };
92 
93 struct LoadOpInterface
94  : public SparseBufferizableOpInterfaceExternalModel<LoadOpInterface,
95  sparse_tensor::LoadOp> {
96  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
97  const AnalysisState &state) const {
98  return false;
99  }
100 
101  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
102  const AnalysisState &state) const {
103  return false;
104  }
105 
106  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
107  const AnalysisState &state) const {
108  return {{op->getOpResult(0), BufferRelation::Equivalent}};
109  }
110 };
111 
112 struct NewOpInterface
113  : public SparseBufferizableOpInterfaceExternalModel<NewOpInterface,
114  sparse_tensor::NewOp> {
115  bool resultBufferizesToMemoryWrite(Operation *op, OpResult opResult,
116  const AnalysisState &state) const {
117  // NewOps allocate but do not write.
118  return false;
119  }
120 
121  bool bufferizesToAllocation(Operation *op, Value value) const { return true; }
122 };
123 
124 struct AssembleOpInterface
125  : public SparseBufferizableOpInterfaceExternalModel<
126  AssembleOpInterface, sparse_tensor::AssembleOp> {
127  bool bufferizesToAllocation(Operation *op, Value value) const {
128  // AssembleOp reuses all the buffers instead of allocating new ones
129  return false;
130  }
131 
132  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
133  const AnalysisState &state) const {
134  return true;
135  }
136 
137  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
138  const AnalysisState &state) const {
139  return false;
140  }
141 
142  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
143  const AnalysisState &state) const {
144  assert(op->getNumResults() == 1);
145  // AssembleOp reuses the input tensors as values/coordinates instead of
146  // creating new ones when packing into a COO format.
147  return {{op->getOpResult(0), BufferRelation::Equivalent}};
148  }
149 
150  BufferRelation bufferRelation(Operation *oo, OpResult opResult,
151  const AnalysisState &state) const {
153  }
154 };
155 
156 struct DisassembleOpInterface
157  : public SparseBufferizableOpInterfaceExternalModel<
158  DisassembleOpInterface, sparse_tensor::DisassembleOp> {
159  bool bufferizesToAllocation(Operation *op, Value value) const {
160  // The output buffer is pre-allocated by the user.
161  return false;
162  }
163 
164  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
165  const AnalysisState &state) const {
166  // The first operand is the sparse tensor that we are unpacking.
167  return opOperand.getOperandNumber() == 0;
168  }
169 
170  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
171  const AnalysisState &state) const {
172  // We write into the output operand.
173  assert(2 * (op->getNumOperands() - 1) == op->getNumResults());
174  return opOperand.getOperandNumber() > 0;
175  }
176 
177  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
178  const AnalysisState &state) const {
179  assert(2 * (op->getNumOperands() - 1) == op->getNumResults());
180 
181  if (opOperand.getOperandNumber() == 0)
182  return {};
183  // We write directly into the output tensors and returns them.
184  return {{op->getResult(opOperand.getOperandNumber() - 1),
186  }
187 };
188 
189 struct ForeachOpInterface : public SparseBufferizableOpInterfaceExternalModel<
190  ForeachOpInterface, sparse_tensor::ForeachOp> {
191  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
192  const AnalysisState &state) const {
193  return true;
194  }
195 
196  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
197  const AnalysisState &state) const {
198  return false;
199  }
200 
201  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
202  const AnalysisState &state) const {
203  return {};
204  }
205 
206  LogicalResult verifyAnalysis(Operation *op,
207  const AnalysisState &state) const {
208  // A more complex analysis (similar to scf.for) is needed if the op returns
209  // a tensor. That tensor would have to be bufferized (not implemented yet).
210  for (OpResult result : op->getResults()) {
211  if (isa<TensorType>(result.getType()))
212  return op->emitOpError("tensor results are not supported yet");
213  }
214  return success();
215  }
216 };
217 
218 struct NumberOfEntriesOpInterface
219  : public SparseBufferizableOpInterfaceExternalModel<
220  NumberOfEntriesOpInterface, sparse_tensor::NumberOfEntriesOp> {
221  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
222  const AnalysisState &state) const {
223  return true;
224  }
225 
226  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
227  const AnalysisState &state) const {
228  return false;
229  }
230 
231  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
232  const AnalysisState &state) const {
233  return {};
234  }
235 };
236 
237 struct ToCoordinatesBufferOpInterface
238  : public SparseBufferizableOpInterfaceExternalModel<
239  ToCoordinatesBufferOpInterface,
240  sparse_tensor::ToCoordinatesBufferOp> {
241  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
242  const AnalysisState &state) const {
243  return true;
244  }
245 
246  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
247  const AnalysisState &state) const {
248  // Potential writes into memory through the result of
249  // `sparse_tensor.coordinates` are not considered.
250  return false;
251  }
252 
253  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
254  const AnalysisState &state) const {
255  return {};
256  }
257 };
258 
259 struct ToCoordinatesOpInterface
260  : public SparseBufferizableOpInterfaceExternalModel<
261  ToCoordinatesOpInterface, sparse_tensor::ToCoordinatesOp> {
262  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
263  const AnalysisState &state) const {
264  return true;
265  }
266 
267  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
268  const AnalysisState &state) const {
269  // Potential writes into memory through the result of
270  // `sparse_tensor.coordinates` are not considered.
271  return false;
272  }
273 
274  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
275  const AnalysisState &state) const {
276  return {};
277  }
278 };
279 
280 struct ToPositionsOpInterface
281  : public SparseBufferizableOpInterfaceExternalModel<
282  ToPositionsOpInterface, sparse_tensor::ToPositionsOp> {
283  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
284  const AnalysisState &state) const {
285  return true;
286  }
287 
288  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
289  const AnalysisState &state) const {
290  // Potential writes into memory through the result of
291  // `sparse_tensor.positions` are not considered.
292  return false;
293  }
294 
295  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
296  const AnalysisState &state) const {
297  return {};
298  }
299 };
300 
301 struct ToValuesOpInterface
302  : public SparseBufferizableOpInterfaceExternalModel<
303  ToValuesOpInterface, sparse_tensor::ToValuesOp> {
304  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
305  const AnalysisState &state) const {
306  return true;
307  }
308 
309  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
310  const AnalysisState &state) const {
311  // Potential writes into memory through the result of sparse_tensor.values
312  // are not considered.
313  return false;
314  }
315 
316  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
317  const AnalysisState &state) const {
318  return {};
319  }
320 };
321 
322 } // namespace
323 } // namespace sparse_tensor
324 } // namespace mlir
325 
327  DialectRegistry &registry) {
328  registry.addExtension(+[](MLIRContext *ctx,
329  sparse_tensor::SparseTensorDialect *dialect) {
330  sparse_tensor::ConcatenateOp::attachInterface<ConcatenateOpInterface>(*ctx);
331  sparse_tensor::ConvertOp::attachInterface<ConvertOpInterface>(*ctx);
332  sparse_tensor::LoadOp::attachInterface<LoadOpInterface>(*ctx);
333  sparse_tensor::NewOp::attachInterface<NewOpInterface>(*ctx);
334  sparse_tensor::NumberOfEntriesOp::attachInterface<
335  NumberOfEntriesOpInterface>(*ctx);
336  sparse_tensor::AssembleOp::attachInterface<AssembleOpInterface>(*ctx);
337  sparse_tensor::DisassembleOp::attachInterface<DisassembleOpInterface>(*ctx);
338  sparse_tensor::ForeachOp::attachInterface<ForeachOpInterface>(*ctx);
339  sparse_tensor::ToCoordinatesBufferOp::attachInterface<
340  ToCoordinatesBufferOpInterface>(*ctx);
341  sparse_tensor::ToCoordinatesOp::attachInterface<ToCoordinatesOpInterface>(
342  *ctx);
343  sparse_tensor::ToPositionsOp::attachInterface<ToPositionsOpInterface>(*ctx);
344  sparse_tensor::ToValuesOp::attachInterface<ToValuesOpInterface>(*ctx);
345  });
346 }
static llvm::ManagedStatic< PassManagerOptions > options
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
AnalysisState provides a variety of helper functions for dealing with tensor values.
BufferizationState provides information about the state of the IR during the bufferization process.
BufferRelation
Specifies a fine-grain relationship between buffers to enable more analysis.
void registerBufferizableOpInterfaceExternalModels(DialectRegistry &registry)
Include the generated interface declarations.
Options for BufferizableOpInterface-based bufferization.