MLIR  20.0.0git
SparsificationAndBufferizationPass.cpp
Go to the documentation of this file.
1 //===- SparsificationAndBufferizationPass.cpp - Tensor to Memref Lowering -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 
28 #include "mlir/Pass/PassManager.h"
29 #include "mlir/Transforms/Passes.h"
30 
31 using namespace mlir;
32 
33 namespace mlir {
34 
35 #define GEN_PASS_DEF_SPARSIFICATIONANDBUFFERIZATION
36 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h.inc"
37 
38 namespace sparse_tensor {
39 
40 /// Return `true` if one of the given types is a sparse tensor type.
41 static bool containsSparseTensor(TypeRange types) {
42  for (Type t : types)
43  if (isa<TensorType>(t) && getSparseTensorEncoding(t))
44  return true;
45  return false;
46 }
47 
48 /// A pass that lowers tensor ops to memref ops, regardless of whether they are
49 /// dense or sparse.
50 ///
51 /// One-Shot Analysis is used to detect RaW conflicts and to insert buffer
52 /// copies of the tensor level (`insertTensorCopies`). Afterwards, the lowering
53 /// of tensor ops to memref ops follows a different code path depending on
54 /// whether the op is sparse or dense:
55 ///
56 /// * Sparse tensor ops are lowered through Sparsification and follow-up pass
57 /// that lowers sparse_tensor dialect ops.
58 /// * Dense tensor ops are lowered through BufferizableOpInterface
59 /// implementations.
61  : public impl::SparsificationAndBufferizationBase<
62  SparsificationAndBufferizationPass> {
63 public:
64  // Private pass options only.
66  const bufferization::OneShotBufferizationOptions &bufferizationOptions,
67  const SparsificationOptions &sparsificationOptions,
68  bool createSparseDeallocs, bool enableRuntimeLibrary,
69  bool enableBufferInitialization)
70  : bufferizationOptions(bufferizationOptions),
71  sparsificationOptions(sparsificationOptions),
72  createSparseDeallocs(createSparseDeallocs),
73  enableRuntimeLibrary(enableRuntimeLibrary),
74  enableBufferInitialization(enableBufferInitialization) {}
75  // Private pass options and visible pass options.
77  const bufferization::OneShotBufferizationOptions &bufferizationOptions,
78  const SparsificationOptions &sparsificationOptions,
79  bool createSparseDeallocs, bool enableRuntimeLibrary,
80  bool enableBufferInitialization, unsigned vl, bool vla, bool index32,
81  bool gpu, SparseEmitStrategy emitStrategy,
82  SparseParallelizationStrategy parallelizationStrategy)
83  : bufferizationOptions(bufferizationOptions),
84  sparsificationOptions(sparsificationOptions),
85  createSparseDeallocs(createSparseDeallocs),
86  enableRuntimeLibrary(enableRuntimeLibrary),
87  enableBufferInitialization(enableBufferInitialization) {
88  // Set the visible pass options explicitly.
89  vectorLength = vl;
90  enableVLAVectorization = vla;
91  enableSIMDIndex32 = index32;
92  enableGPULibgen = gpu;
93  sparseEmitStrategy = emitStrategy;
94  parallelization = parallelizationStrategy;
95  }
96 
97  /// Bufferize all dense ops. This assumes that no further analysis is needed
98  /// and that all required buffer copies were already inserted by
99  /// `insertTensorCopies` in the form of `bufferization.alloc_tensor` ops.
100  LogicalResult runDenseBufferization() {
102  bufferizationOptions;
103  // Skip all sparse ops.
104  updatedOptions.opFilter.denyOperation([&](Operation *op) {
107  return true;
108  if (auto funcOp = dyn_cast<func::FuncOp>(op)) {
109  FunctionType funcType = funcOp.getFunctionType();
110  if (containsSparseTensor(funcType.getInputs()) ||
111  containsSparseTensor(funcType.getResults()))
112  return true;
113  }
114  return false;
115  });
116 
117  if (failed(bufferization::bufferizeModuleOp(cast<ModuleOp>(getOperation()),
118  updatedOptions)))
119  return failure();
120 
122  return success();
123  }
124 
125  void runOnOperation() override {
126  // Overrides the default emit strategy using user-provided value.
127  this->sparsificationOptions.sparseEmitStrategy = sparseEmitStrategy;
128 
129  // Overrides the default parallelization strategy using user-provided value.
130  this->sparsificationOptions.parallelizationStrategy = parallelization;
131 
132  // Run enabling transformations.
133  {
134  OpPassManager pm("builtin.module");
136  pm.addNestedPass<func::FuncOp>(
138  if (failed(runPipeline(pm, getOperation())))
139  return signalPassFailure();
140  }
141 
142  // Insert tensor copies. This step runs One-Shot Analysis (which analyzes
143  // SSA use-def chains of tensor IR) and decides where buffer copies are
144  // needed and where buffers can be written to in-place. These decisions are
145  // materialized in the IR in the form of `bufferization.alloc_tensor` ops.
146  //
147  // Note: All following steps in this pass must be careful not to modify the
148  // structure of the IR (i.e., tensor use-def chains), as that could
149  // invalidate the results of the analysis. From now on, only small and
150  // localized rewrites are allowed, such as replacing a tensor op with its
151  // memref equivalent.
152  if (failed(bufferization::insertTensorCopies(getOperation(),
153  bufferizationOptions)))
154  return signalPassFailure();
155 
156  // Option `testAnalysisOnly` is a debug/testing flag. If set, the results of
157  // OneShotAnalysis are added to the IR via attributes. In that case, do not
158  // continue with the remaining pipeline.
159  if (bufferizationOptions.testAnalysisOnly)
160  return;
161 
162  // Bufferize all sparse ops. No further analysis is needed. All required
163  // buffer copies were already inserted by `insertTensorCopies` in the form
164  // of `bufferization.alloc_tensor` ops.
165  {
166  OpPassManager pm("builtin.module");
167  if (enableGPULibgen)
168  pm.addPass(createSparseGPUCodegenPass(0, enableRuntimeLibrary));
170  pm.addPass(createSparsificationPass(sparsificationOptions));
171  if (sparsificationOptions.sparseEmitStrategy ==
173  pm.addNestedPass<func::FuncOp>(createSparseSpaceCollapsePass());
175  }
176 
178  pm.addPass(createLowerSparseOpsToForeachPass(enableRuntimeLibrary,
179  /*enableConvert=*/true));
180  pm.addPass(
182  pm.addNestedPass<func::FuncOp>(createLowerForeachToSCFPass());
184  if (vectorLength > 0) {
186  vectorLength, enableVLAVectorization, enableSIMDIndex32));
187  }
188  if (enableRuntimeLibrary) {
190  } else {
191  pm.addPass(createSparseTensorCodegenPass(createSparseDeallocs,
192  enableBufferInitialization));
193  pm.addPass(createSparseBufferRewritePass(enableBufferInitialization));
194  }
195  if (failed(runPipeline(pm, getOperation())))
196  return signalPassFailure();
197  }
198 
199  // Bufferize all dense ops.
200  if (failed(runDenseBufferization()))
201  signalPassFailure();
202  }
203 
204 private:
205  bufferization::OneShotBufferizationOptions bufferizationOptions;
206  SparsificationOptions sparsificationOptions;
207  bool createSparseDeallocs;
208  bool enableRuntimeLibrary;
209  bool enableBufferInitialization;
210 };
211 
212 } // namespace sparse_tensor
213 } // namespace mlir
214 
217  using namespace mlir::bufferization;
219  options.bufferizeFunctionBoundaries = true;
220  options.setFunctionBoundaryTypeConversion(LayoutMapOption::IdentityLayoutMap);
221  options.unknownTypeConverterFn = [](Value value, Attribute memorySpace,
222  const BufferizationOptions &options) {
224  cast<TensorType>(value.getType()), memorySpace);
225  };
226  if (analysisOnly) {
227  options.testAnalysisOnly = true;
228  options.printConflicts = true;
229  }
230  // Since this mini-pipeline may be used in alternative pipelines (viz.
231  // different from the default "sparsifier" pipeline) where unknown ops
232  // are handled by alternative bufferization methods that are downstream
233  // of this mini-pipeline, we allow unknown ops by default (failure to
234  // bufferize is eventually apparent by failing to convert to LLVM IR).
235  options.allowUnknownOps = true;
236  return options;
237 }
238 
239 std::unique_ptr<mlir::Pass> mlir::createSparsificationAndBufferizationPass() {
240  SparsificationOptions sparseOptions;
241  return std::make_unique<
243  getBufferizationOptionsForSparsification(/*analysisOnly=*/false),
244  sparseOptions,
245  /*createSparseDeallocs=*/false,
246  /*enableRuntimeLibrary=*/false,
247  /*enableBufferInitialization=*/false);
248 }
249 
251  const bufferization::OneShotBufferizationOptions &bufferizationOptions,
252  const SparsificationOptions &sparsificationOptions,
253  bool createSparseDeallocs, bool enableRuntimeLibrary,
254  bool enableBufferInitialization, unsigned vectorLength,
255  bool enableVLAVectorization, bool enableSIMDIndex32, bool enableGPULibgen,
256  SparseEmitStrategy emitStrategy,
257  SparseParallelizationStrategy parallelizationStrategy) {
258  return std::make_unique<
260  bufferizationOptions, sparsificationOptions, createSparseDeallocs,
261  enableRuntimeLibrary, enableBufferInitialization, vectorLength,
262  enableVLAVectorization, enableSIMDIndex32, enableGPULibgen, emitStrategy,
263  parallelizationStrategy);
264 }
static llvm::ManagedStatic< PassManagerOptions > options
Attributes are known-constant values of operations.
Definition: Attributes.h:25
This class represents a pass manager that runs passes on either a specific operation type,...
Definition: PassManager.h:47
void addPass(std::unique_ptr< Pass > pass)
Add the given pass to this pass manager.
Definition: Pass.cpp:363
void addNestedPass(std::unique_ptr< Pass > pass)
Add the given pass to a nested pass manager for the given operation kind OpT.
Definition: PassManager.h:116
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
operand_range getOperands()
Returns an iterator on the underlying Value's.
Definition: Operation.h:378
result_range getResults()
Definition: Operation.h:415
This class provides an abstraction over the various different ranges of value types.
Definition: TypeRange.h:36
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:129
void denyOperation()
Deny the given ops.
A pass that lowers tensor ops to memref ops, regardless of whether they are dense or sparse.
SparsificationAndBufferizationPass(const bufferization::OneShotBufferizationOptions &bufferizationOptions, const SparsificationOptions &sparsificationOptions, bool createSparseDeallocs, bool enableRuntimeLibrary, bool enableBufferInitialization, unsigned vl, bool vla, bool index32, bool gpu, SparseEmitStrategy emitStrategy, SparseParallelizationStrategy parallelizationStrategy)
SparsificationAndBufferizationPass(const bufferization::OneShotBufferizationOptions &bufferizationOptions, const SparsificationOptions &sparsificationOptions, bool createSparseDeallocs, bool enableRuntimeLibrary, bool enableBufferInitialization)
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
LogicalResult insertTensorCopies(Operation *op, const OneShotBufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Resolve RaW and other conflicts by inserting bufferization.alloc_tensor ops.
void removeBufferizationAttributesInModule(ModuleOp moduleOp)
Remove bufferization attributes on every FuncOp arguments in the ModuleOp.
std::unique_ptr< Pass > createEmptyTensorToAllocTensorPass()
Create a pass that rewrites tensor.empty to bufferization.alloc_tensor.
llvm::LogicalResult bufferizeModuleOp(ModuleOp moduleOp, const OneShotBufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Bufferize op and its nested ops that implement BufferizableOpInterface.
static bool containsSparseTensor(TypeRange types)
Return true if one of the given types is a sparse tensor type.
SparseTensorEncodingAttr getSparseTensorEncoding(Type type)
Convenience method to get a sparse encoding attribute from a type.
Include the generated interface declarations.
std::unique_ptr< Pass > createSparseVectorizationPass()
std::unique_ptr< Pass > createLowerSparseOpsToForeachPass()
std::unique_ptr< Pass > createSparseTensorCodegenPass()
std::unique_ptr< Pass > createSparseGPUCodegenPass()
std::unique_ptr< Pass > createSparseSpaceCollapsePass()
std::unique_ptr< Pass > createLoopInvariantCodeMotionPass()
Creates a loop invariant code motion pass that hoists loop invariant instructions out of the loop.
bufferization::OneShotBufferizationOptions getBufferizationOptionsForSparsification(bool analysisOnly)
std::unique_ptr< Pass > createSparseReinterpretMapPass()
std::unique_ptr< Pass > createSparseTensorConversionPass()
std::unique_ptr< Pass > createSparseBufferRewritePass()
std::unique_ptr< Pass > createSparsificationAndBufferizationPass()
SparseParallelizationStrategy
Defines a parallelization strategy.
Definition: Passes.h:36
SparseEmitStrategy
Defines a scope for reinterpret map pass.
Definition: Passes.h:52
std::unique_ptr< Pass > createPreSparsificationRewritePass()
std::unique_ptr< Pass > createLowerForeachToSCFPass()
std::unique_ptr< Pass > createLowerSparseIterationToSCFPass()
std::unique_ptr< Pass > createStageSparseOperationsPass()
std::unique_ptr< Pass > createSparsificationPass()
Options for the Sparsification pass.
Definition: Passes.h:93
SparseEmitStrategy sparseEmitStrategy
Definition: Passes.h:107
SparseParallelizationStrategy parallelizationStrategy
Definition: Passes.h:106
Options for BufferizableOpInterface-based bufferization.
bool testAnalysisOnly
If set to true, does not modify the IR apart from adding attributes (for checking the results of the ...
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
Options for analysis-enabled bufferization.