MLIR 23.0.0git
BufferizableOpInterfaceImpl.cpp
Go to the documentation of this file.
1//===- BufferizableOpInterfaceImpl.cpp - Impl. of BufferizableOpInterface -===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// These BufferizableOpInterface implementations provide analysis-related
10// interface methods only. They are getting bufferized by the
11// SparseTensorConversion pass.
12
14
17#include "mlir/IR/Operation.h"
19
20using namespace mlir::bufferization;
21using namespace mlir::sparse_tensor;
22
23namespace mlir {
24namespace sparse_tensor {
25namespace {
26
27template <typename ConcreteModel, typename ConcreteOp>
28struct SparseBufferizableOpInterfaceExternalModel
29 : public BufferizableOpInterface::ExternalModel<ConcreteModel, ConcreteOp> {
30 LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
31 const BufferizationOptions &options,
32 BufferizationState &state) const {
33 return op->emitError(
34 "sparse_tensor ops must be bufferized with the sparsifier");
35 }
36};
37
38struct ConcatenateOpInterface
39 : SparseBufferizableOpInterfaceExternalModel<ConcatenateOpInterface,
40 sparse_tensor::ConcatenateOp> {
41 bool bufferizesToAllocation(Operation *op, Value value) const { return true; }
42
43 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
44 const AnalysisState &state) const {
45 return true;
46 }
47
48 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
49 const AnalysisState &state) const {
50 return false;
51 }
52
53 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
54 const AnalysisState &state) const {
55 return {};
56 }
57
58 bool isWritable(Operation *op, Value value,
59 const AnalysisState &state) const {
60 return true;
61 }
62};
63
64struct ConvertOpInterface : public SparseBufferizableOpInterfaceExternalModel<
65 ConvertOpInterface, sparse_tensor::ConvertOp> {
66 bool bufferizesToAllocation(Operation *op, Value value) const {
67 // ConvertOps may allocate. (Unless they convert between two identical
68 // types, then they fold away.)
69 return true;
70 }
71
72 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
73 const AnalysisState &state) const {
74 return true;
75 }
76
77 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
78 const AnalysisState &state) const {
79 return false;
80 }
81
82 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
83 const AnalysisState &state) const {
84 return {};
85 }
86
87 bool isWritable(Operation *op, Value value,
88 const AnalysisState &state) const {
89 return true;
90 }
91};
92
93struct LoadOpInterface
94 : public SparseBufferizableOpInterfaceExternalModel<LoadOpInterface,
95 sparse_tensor::LoadOp> {
96 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
97 const AnalysisState &state) const {
98 return false;
99 }
100
101 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
102 const AnalysisState &state) const {
103 return false;
104 }
105
106 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
107 const AnalysisState &state) const {
108 return {{op->getOpResult(0), BufferRelation::Equivalent}};
109 }
110};
111
112struct NewOpInterface
113 : public SparseBufferizableOpInterfaceExternalModel<NewOpInterface,
114 sparse_tensor::NewOp> {
115 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
116 const AnalysisState &state) const {
117 // The source tensor is read to create the sparse tensor.
118 return true;
119 }
120
121 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
122 const AnalysisState &state) const {
123 // NewOp does not write to the source.
124 return false;
125 }
126
127 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
128 const AnalysisState &state) const {
129 // The result is a newly allocated sparse tensor, not an alias of the input.
130 return {};
131 }
132
133 bool resultBufferizesToMemoryWrite(Operation *op, OpResult opResult,
134 const AnalysisState &state) const {
135 // NewOps allocate but do not write.
136 return false;
137 }
138
139 bool bufferizesToAllocation(Operation *op, Value value) const { return true; }
140};
141
142struct AssembleOpInterface
143 : public SparseBufferizableOpInterfaceExternalModel<
144 AssembleOpInterface, sparse_tensor::AssembleOp> {
145 bool bufferizesToAllocation(Operation *op, Value value) const {
146 // AssembleOp reuses all the buffers instead of allocating new ones
147 return false;
148 }
149
150 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
151 const AnalysisState &state) const {
152 return true;
153 }
154
155 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
156 const AnalysisState &state) const {
157 return false;
158 }
159
160 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
161 const AnalysisState &state) const {
162 assert(op->getNumResults() == 1);
163 // AssembleOp reuses the input tensors as values/coordinates instead of
164 // creating new ones when packing into a COO format.
165 return {{op->getOpResult(0), BufferRelation::Equivalent}};
166 }
167
168 BufferRelation bufferRelation(Operation *oo, OpResult opResult,
169 const AnalysisState &state) const {
170 return BufferRelation::Unknown;
171 }
172};
173
174struct DisassembleOpInterface
175 : public SparseBufferizableOpInterfaceExternalModel<
176 DisassembleOpInterface, sparse_tensor::DisassembleOp> {
177 bool bufferizesToAllocation(Operation *op, Value value) const {
178 // The output buffer is pre-allocated by the user.
179 return false;
180 }
181
182 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
183 const AnalysisState &state) const {
184 // The first operand is the sparse tensor that we are unpacking.
185 return opOperand.getOperandNumber() == 0;
186 }
187
188 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
189 const AnalysisState &state) const {
190 // We write into the output operand.
191 assert(2 * (op->getNumOperands() - 1) == op->getNumResults());
192 return opOperand.getOperandNumber() > 0;
193 }
194
195 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
196 const AnalysisState &state) const {
197 assert(2 * (op->getNumOperands() - 1) == op->getNumResults());
198
199 if (opOperand.getOperandNumber() == 0)
200 return {};
201 // We write directly into the output tensors and returns them.
202 return {{op->getResult(opOperand.getOperandNumber() - 1),
203 BufferRelation::Equivalent}};
204 }
205};
206
207struct ForeachOpInterface : public SparseBufferizableOpInterfaceExternalModel<
208 ForeachOpInterface, sparse_tensor::ForeachOp> {
209 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
210 const AnalysisState &state) const {
211 return true;
212 }
213
214 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
215 const AnalysisState &state) const {
216 return false;
217 }
218
219 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
220 const AnalysisState &state) const {
221 return {};
222 }
223
224 LogicalResult verifyAnalysis(Operation *op,
225 const AnalysisState &state) const {
226 // A more complex analysis (similar to scf.for) is needed if the op returns
227 // a tensor. That tensor would have to be bufferized (not implemented yet).
228 for (OpResult result : op->getResults()) {
229 if (isa<TensorType>(result.getType()))
230 return op->emitOpError("tensor results are not supported yet");
231 }
232 return success();
233 }
234};
235
236struct NumberOfEntriesOpInterface
237 : public SparseBufferizableOpInterfaceExternalModel<
238 NumberOfEntriesOpInterface, sparse_tensor::NumberOfEntriesOp> {
239 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
240 const AnalysisState &state) const {
241 return true;
242 }
243
244 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
245 const AnalysisState &state) const {
246 return false;
247 }
248
249 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
250 const AnalysisState &state) const {
251 return {};
252 }
253};
254
255struct ToCoordinatesBufferOpInterface
256 : public SparseBufferizableOpInterfaceExternalModel<
257 ToCoordinatesBufferOpInterface,
258 sparse_tensor::ToCoordinatesBufferOp> {
259 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
260 const AnalysisState &state) const {
261 return true;
262 }
263
264 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
265 const AnalysisState &state) const {
266 // Potential writes into memory through the result of
267 // `sparse_tensor.coordinates` are not considered.
268 return false;
269 }
270
271 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
272 const AnalysisState &state) const {
273 return {};
274 }
275};
276
277struct ToCoordinatesOpInterface
278 : public SparseBufferizableOpInterfaceExternalModel<
279 ToCoordinatesOpInterface, sparse_tensor::ToCoordinatesOp> {
280 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
281 const AnalysisState &state) const {
282 return true;
283 }
284
285 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
286 const AnalysisState &state) const {
287 // Potential writes into memory through the result of
288 // `sparse_tensor.coordinates` are not considered.
289 return false;
290 }
291
292 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
293 const AnalysisState &state) const {
294 return {};
295 }
296};
297
298struct ToPositionsOpInterface
299 : public SparseBufferizableOpInterfaceExternalModel<
300 ToPositionsOpInterface, sparse_tensor::ToPositionsOp> {
301 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
302 const AnalysisState &state) const {
303 return true;
304 }
305
306 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
307 const AnalysisState &state) const {
308 // Potential writes into memory through the result of
309 // `sparse_tensor.positions` are not considered.
310 return false;
311 }
312
313 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
314 const AnalysisState &state) const {
315 return {};
316 }
317};
318
319struct ToValuesOpInterface
320 : public SparseBufferizableOpInterfaceExternalModel<
321 ToValuesOpInterface, sparse_tensor::ToValuesOp> {
322 bool bufferizesToMemoryRead(Operation *op, OpOperand &opOperand,
323 const AnalysisState &state) const {
324 return true;
325 }
326
327 bool bufferizesToMemoryWrite(Operation *op, OpOperand &opOperand,
328 const AnalysisState &state) const {
329 // Potential writes into memory through the result of sparse_tensor.values
330 // are not considered.
331 return false;
332 }
333
334 AliasingValueList getAliasingValues(Operation *op, OpOperand &opOperand,
335 const AnalysisState &state) const {
336 return {};
337 }
338};
339
340} // namespace
341} // namespace sparse_tensor
342} // namespace mlir
343
345 DialectRegistry &registry) {
346 registry.addExtension(+[](MLIRContext *ctx,
347 sparse_tensor::SparseTensorDialect *dialect) {
348 sparse_tensor::ConcatenateOp::attachInterface<ConcatenateOpInterface>(*ctx);
349 sparse_tensor::ConvertOp::attachInterface<ConvertOpInterface>(*ctx);
350 sparse_tensor::LoadOp::attachInterface<LoadOpInterface>(*ctx);
351 sparse_tensor::NewOp::attachInterface<NewOpInterface>(*ctx);
352 sparse_tensor::NumberOfEntriesOp::attachInterface<
353 NumberOfEntriesOpInterface>(*ctx);
354 sparse_tensor::AssembleOp::attachInterface<AssembleOpInterface>(*ctx);
355 sparse_tensor::DisassembleOp::attachInterface<DisassembleOpInterface>(*ctx);
356 sparse_tensor::ForeachOp::attachInterface<ForeachOpInterface>(*ctx);
357 sparse_tensor::ToCoordinatesBufferOp::attachInterface<
358 ToCoordinatesBufferOpInterface>(*ctx);
359 sparse_tensor::ToCoordinatesOp::attachInterface<ToCoordinatesOpInterface>(
360 *ctx);
361 sparse_tensor::ToPositionsOp::attachInterface<ToPositionsOpInterface>(*ctx);
362 sparse_tensor::ToValuesOp::attachInterface<ToValuesOpInterface>(*ctx);
363 });
364}
return success()
static llvm::ManagedStatic< PassManagerOptions > options
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
MLIRContext is the top-level object for a collection of MLIR operations.
Definition MLIRContext.h:63
unsigned getOperandNumber()
Return which operand this is in the OpOperand list of the Operation.
Definition Value.cpp:226
OpResult getOpResult(unsigned idx)
Definition Operation.h:421
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition Operation.h:407
unsigned getNumOperands()
Definition Operation.h:346
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
result_range getResults()
Definition Operation.h:415
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers.
unsigned getNumResults()
Return the number of results held by this operation.
Definition Operation.h:404
void registerBufferizableOpInterfaceExternalModels(DialectRegistry &registry)
Include the generated interface declarations.