MLIR  18.0.0git
BufferizableOpInterfaceImpl.cpp
Go to the documentation of this file.
1 //===- BufferizableOpInterfaceImpl.cpp - Impl. of BufferizableOpInterface -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These BufferizableOpInterface implementations provide analysis-related
10 // interface methods only. They are getting bufferized by the
11 // SparseTensorConversion pass.
12 
14 
18 #include "mlir/IR/Dialect.h"
19 #include "mlir/IR/Operation.h"
20 #include "mlir/IR/PatternMatch.h"
21 
22 using namespace mlir::bufferization;
23 using namespace mlir::sparse_tensor;
24 
25 namespace mlir {
26 namespace sparse_tensor {
27 namespace {
28 
29 template <typename ConcreteModel, typename ConcreteOp>
30 struct SparseBufferizableOpInterfaceExternalModel
31  : public BufferizableOpInterface::ExternalModel<ConcreteModel, ConcreteOp> {
32  LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
33  const BufferizationOptions &options) const {
34  return op->emitError(
35  "sparse_tensor ops must be bufferized with the sparsifier");
36  }
37 };
38 
39 struct ConcatenateOpInterface
40  : SparseBufferizableOpInterfaceExternalModel<ConcatenateOpInterface,
41  sparse_tensor::ConcatenateOp> {
42  bool bufferizesToAllocation(Operation *op, Value value) const { return true; }
43 
44  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
45  const AnalysisState &state) const {
46  return true;
47  }
48 
49  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
50  const AnalysisState &state) const {
51  return false;
52  }
53 
54  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
55  const AnalysisState &state) const {
56  return {};
57  }
58 
59  bool isWritable(Operation *op, Value value,
60  const AnalysisState &state) const {
61  return true;
62  }
63 };
64 
65 struct ConvertOpInterface : public SparseBufferizableOpInterfaceExternalModel<
66  ConvertOpInterface, sparse_tensor::ConvertOp> {
67  bool bufferizesToAllocation(Operation *op, Value value) const {
68  // ConvertOps may allocate. (Unless they convert between two identical
69  // types, then they fold away.)
70  return true;
71  }
72 
73  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
74  const AnalysisState &state) const {
75  return true;
76  }
77 
78  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
79  const AnalysisState &state) const {
80  return false;
81  }
82 
83  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
84  const AnalysisState &state) const {
85  return {};
86  }
87 
88  bool isWritable(Operation *op, Value value,
89  const AnalysisState &state) const {
90  return true;
91  }
92 };
93 
94 struct LoadOpInterface
95  : public SparseBufferizableOpInterfaceExternalModel<LoadOpInterface,
96  sparse_tensor::LoadOp> {
97  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
98  const AnalysisState &state) const {
99  return false;
100  }
101 
102  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
103  const AnalysisState &state) const {
104  return false;
105  }
106 
107  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
108  const AnalysisState &state) const {
109  return {{op->getOpResult(0), BufferRelation::Equivalent}};
110  }
111 };
112 
113 struct NewOpInterface
114  : public SparseBufferizableOpInterfaceExternalModel<NewOpInterface,
115  sparse_tensor::NewOp> {
116  bool resultBufferizesToMemoryWrite(Operation *op, OpResult opResult,
117  const AnalysisState &state) const {
118  // NewOps allocate but do not write.
119  return false;
120  }
121 
122  bool bufferizesToAllocation(Operation *op, Value value) const { return true; }
123 };
124 
125 struct AssembleOpInterface
126  : public SparseBufferizableOpInterfaceExternalModel<
127  AssembleOpInterface, sparse_tensor::AssembleOp> {
128  bool bufferizesToAllocation(Operation *op, Value value) const {
129  // AssembleOp reuses all the buffers instead of allocating new ones
130  return false;
131  }
132 
133  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
134  const AnalysisState &state) const {
135  return true;
136  }
137 
138  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
139  const AnalysisState &state) const {
140  return false;
141  }
142 
143  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
144  const AnalysisState &state) const {
145  assert(op->getNumResults() == 1);
146  // AssembleOp reuses the input tensors as values/coordinates instead of
147  // creating new ones when packing into a COO format.
148  return {{op->getOpResult(0), BufferRelation::Equivalent}};
149  }
150 
151  BufferRelation bufferRelation(Operation *oo, OpResult opResult,
152  const AnalysisState &state) const {
154  }
155 };
156 
157 struct DisassembleOpInterface
158  : public SparseBufferizableOpInterfaceExternalModel<
159  DisassembleOpInterface, sparse_tensor::DisassembleOp> {
160  bool bufferizesToAllocation(Operation *op, Value value) const {
161  // The output buffer is pre-allocated by the user.
162  return false;
163  }
164 
165  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
166  const AnalysisState &state) const {
167  // The first operand is the sparse tensor that we are unpacking.
168  return opOperand.getOperandNumber() == 0;
169  }
170 
171  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
172  const AnalysisState &state) const {
173  // We write into the output operand.
174  assert(2 * (op->getNumOperands() - 1) == op->getNumResults());
175  return opOperand.getOperandNumber() > 0;
176  }
177 
178  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
179  const AnalysisState &state) const {
180  assert(2 * (op->getNumOperands() - 1) == op->getNumResults());
181 
182  if (opOperand.getOperandNumber() == 0)
183  return {};
184  // We write directly into the output tensors and returns them.
185  return {{op->getResult(opOperand.getOperandNumber() - 1),
187  }
188 };
189 
190 struct InsertOpInterface : public SparseBufferizableOpInterfaceExternalModel<
191  InsertOpInterface, sparse_tensor::InsertOp> {
192  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
193  const AnalysisState &state) const {
194  return true;
195  }
196 
197  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
198  const AnalysisState &state) const {
199  // InsertOp writes to memory.
200  return true;
201  }
202 
203  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
204  const AnalysisState &state) const {
205  // InsertOp returns an alias of its operand.
206  assert(op->getNumResults() == 1);
207  return {{op->getOpResult(0), BufferRelation::Equivalent}};
208  }
209 };
210 
211 struct NumberOfEntriesOpInterface
212  : public SparseBufferizableOpInterfaceExternalModel<
213  NumberOfEntriesOpInterface, sparse_tensor::NumberOfEntriesOp> {
214  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
215  const AnalysisState &state) const {
216  return true;
217  }
218 
219  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
220  const AnalysisState &state) const {
221  return false;
222  }
223 
224  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
225  const AnalysisState &state) const {
226  return {};
227  }
228 };
229 
230 struct ToCoordinatesBufferOpInterface
231  : public SparseBufferizableOpInterfaceExternalModel<
232  ToCoordinatesBufferOpInterface,
233  sparse_tensor::ToCoordinatesBufferOp> {
234  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
235  const AnalysisState &state) const {
236  return true;
237  }
238 
239  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
240  const AnalysisState &state) const {
241  // Potential writes into memory through the result of
242  // `sparse_tensor.coordinates` are not considered.
243  return false;
244  }
245 
246  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
247  const AnalysisState &state) const {
248  return {};
249  }
250 };
251 
252 struct ToCoordinatesOpInterface
253  : public SparseBufferizableOpInterfaceExternalModel<
254  ToCoordinatesOpInterface, sparse_tensor::ToCoordinatesOp> {
255  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
256  const AnalysisState &state) const {
257  return true;
258  }
259 
260  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
261  const AnalysisState &state) const {
262  // Potential writes into memory through the result of
263  // `sparse_tensor.coordinates` are not considered.
264  return false;
265  }
266 
267  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
268  const AnalysisState &state) const {
269  return {};
270  }
271 };
272 
273 struct ToPositionsOpInterface
274  : public SparseBufferizableOpInterfaceExternalModel<
275  ToPositionsOpInterface, sparse_tensor::ToPositionsOp> {
276  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
277  const AnalysisState &state) const {
278  return true;
279  }
280 
281  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
282  const AnalysisState &state) const {
283  // Potential writes into memory through the result of
284  // `sparse_tensor.positions` are not considered.
285  return false;
286  }
287 
288  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
289  const AnalysisState &state) const {
290  return {};
291  }
292 };
293 
294 struct ToValuesOpInterface
295  : public SparseBufferizableOpInterfaceExternalModel<
296  ToValuesOpInterface, sparse_tensor::ToValuesOp> {
297  bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
298  const AnalysisState &state) const {
299  return true;
300  }
301 
302  bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
303  const AnalysisState &state) const {
304  // Potential writes into memory through the result of sparse_tensor.values
305  // are not considered.
306  return false;
307  }
308 
309  AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
310  const AnalysisState &state) const {
311  return {};
312  }
313 };
314 
315 } // namespace
316 } // namespace sparse_tensor
317 } // namespace mlir
318 
320  DialectRegistry &registry) {
321  registry.addExtension(+[](MLIRContext *ctx,
322  sparse_tensor::SparseTensorDialect *dialect) {
323  sparse_tensor::ConcatenateOp::attachInterface<ConcatenateOpInterface>(*ctx);
324  sparse_tensor::ConvertOp::attachInterface<ConvertOpInterface>(*ctx);
325  sparse_tensor::LoadOp::attachInterface<LoadOpInterface>(*ctx);
326  sparse_tensor::NewOp::attachInterface<NewOpInterface>(*ctx);
327  sparse_tensor::InsertOp::attachInterface<InsertOpInterface>(*ctx);
328  sparse_tensor::NumberOfEntriesOp::attachInterface<
329  NumberOfEntriesOpInterface>(*ctx);
330  sparse_tensor::AssembleOp::attachInterface<AssembleOpInterface>(*ctx);
331  sparse_tensor::DisassembleOp::attachInterface<DisassembleOpInterface>(*ctx);
332  sparse_tensor::ToCoordinatesBufferOp::attachInterface<
333  ToCoordinatesBufferOpInterface>(*ctx);
334  sparse_tensor::ToCoordinatesOp::attachInterface<ToCoordinatesOpInterface>(
335  *ctx);
336  sparse_tensor::ToPositionsOp::attachInterface<ToPositionsOpInterface>(*ctx);
337  sparse_tensor::ToValuesOp::attachInterface<ToValuesOpInterface>(*ctx);
338  });
339 }
static llvm::ManagedStatic< PassManagerOptions > options
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
void addExtension(std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
AnalysisState provides a variety of helper functions for dealing with tensor values.
BufferRelation
Specifies a fine-grain relationship between buffers to enable more analysis.
void registerBufferizableOpInterfaceExternalModels(DialectRegistry &registry)
Include the generated interface declarations.
Options for BufferizableOpInterface-based bufferization.