MLIR  19.0.0git
SparseTensorPasses.cpp
Go to the documentation of this file.
1 //===- SparseTensorPasses.cpp - Pass for autogen sparse tensor code -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
23 
24 namespace mlir {
25 #define GEN_PASS_DEF_SPARSEASSEMBLER
26 #define GEN_PASS_DEF_SPARSEREINTERPRETMAP
27 #define GEN_PASS_DEF_PRESPARSIFICATIONREWRITE
28 #define GEN_PASS_DEF_SPARSIFICATIONPASS
29 #define GEN_PASS_DEF_LOWERSPARSEOPSTOFOREACH
30 #define GEN_PASS_DEF_LOWERFOREACHTOSCF
31 #define GEN_PASS_DEF_SPARSETENSORCONVERSIONPASS
32 #define GEN_PASS_DEF_SPARSETENSORCODEGEN
33 #define GEN_PASS_DEF_SPARSEBUFFERREWRITE
34 #define GEN_PASS_DEF_SPARSEVECTORIZATION
35 #define GEN_PASS_DEF_SPARSEGPUCODEGEN
36 #define GEN_PASS_DEF_STAGESPARSEOPERATIONS
37 #define GEN_PASS_DEF_STORAGESPECIFIERTOLLVM
38 #include "mlir/Dialect/SparseTensor/Transforms/Passes.h.inc"
39 } // namespace mlir
40 
41 using namespace mlir;
42 using namespace mlir::sparse_tensor;
43 
44 namespace {
45 
46 //===----------------------------------------------------------------------===//
47 // Passes implementation.
48 //===----------------------------------------------------------------------===//
49 
50 struct SparseAssembler : public impl::SparseAssemblerBase<SparseAssembler> {
51  SparseAssembler() = default;
52  SparseAssembler(const SparseAssembler &pass) = default;
53 
54  void runOnOperation() override {
55  auto *ctx = &getContext();
56  RewritePatternSet patterns(ctx);
57  populateSparseAssembler(patterns);
58  (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
59  }
60 };
61 
62 struct SparseReinterpretMap
63  : public impl::SparseReinterpretMapBase<SparseReinterpretMap> {
64  SparseReinterpretMap() = default;
65  SparseReinterpretMap(const SparseReinterpretMap &pass) = default;
66  SparseReinterpretMap(const SparseReinterpretMapOptions &options) {
67  scope = options.scope;
68  }
69 
70  void runOnOperation() override {
71  auto *ctx = &getContext();
72  RewritePatternSet patterns(ctx);
73  populateSparseReinterpretMap(patterns, scope);
74  (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
75  }
76 };
77 
78 struct PreSparsificationRewritePass
79  : public impl::PreSparsificationRewriteBase<PreSparsificationRewritePass> {
80  PreSparsificationRewritePass() = default;
81  PreSparsificationRewritePass(const PreSparsificationRewritePass &pass) =
82  default;
83 
84  void runOnOperation() override {
85  auto *ctx = &getContext();
86  RewritePatternSet patterns(ctx);
88  (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
89  }
90 };
91 
92 struct SparsificationPass
93  : public impl::SparsificationPassBase<SparsificationPass> {
94  SparsificationPass() = default;
95  SparsificationPass(const SparsificationPass &pass) = default;
96  SparsificationPass(const SparsificationOptions &options) {
97  parallelization = options.parallelizationStrategy;
98  sparseEmitStrategy = options.sparseEmitStrategy;
99  enableRuntimeLibrary = options.enableRuntimeLibrary;
100  }
101 
102  void runOnOperation() override {
103  auto *ctx = &getContext();
104  // Translate strategy flags to strategy options.
105  SparsificationOptions options(parallelization, sparseEmitStrategy,
106  enableRuntimeLibrary);
107  // Apply sparsification and cleanup rewriting.
108  RewritePatternSet patterns(ctx);
110  scf::ForOp::getCanonicalizationPatterns(patterns, ctx);
111  (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
112  }
113 };
114 
115 struct StageSparseOperationsPass
116  : public impl::StageSparseOperationsBase<StageSparseOperationsPass> {
117  StageSparseOperationsPass() = default;
118  StageSparseOperationsPass(const StageSparseOperationsPass &pass) = default;
119  void runOnOperation() override {
120  auto *ctx = &getContext();
121  RewritePatternSet patterns(ctx);
123  (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
124  }
125 };
126 
127 struct LowerSparseOpsToForeachPass
128  : public impl::LowerSparseOpsToForeachBase<LowerSparseOpsToForeachPass> {
129  LowerSparseOpsToForeachPass() = default;
130  LowerSparseOpsToForeachPass(const LowerSparseOpsToForeachPass &pass) =
131  default;
132  LowerSparseOpsToForeachPass(bool enableRT, bool convert) {
133  enableRuntimeLibrary = enableRT;
134  enableConvert = convert;
135  }
136 
137  void runOnOperation() override {
138  auto *ctx = &getContext();
139  RewritePatternSet patterns(ctx);
140  populateLowerSparseOpsToForeachPatterns(patterns, enableRuntimeLibrary,
141  enableConvert);
142  (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
143  }
144 };
145 
146 struct LowerForeachToSCFPass
147  : public impl::LowerForeachToSCFBase<LowerForeachToSCFPass> {
148  LowerForeachToSCFPass() = default;
149  LowerForeachToSCFPass(const LowerForeachToSCFPass &pass) = default;
150 
151  void runOnOperation() override {
152  auto *ctx = &getContext();
153  RewritePatternSet patterns(ctx);
155  (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
156  }
157 };
158 
159 struct SparseTensorConversionPass
160  : public impl::SparseTensorConversionPassBase<SparseTensorConversionPass> {
161  SparseTensorConversionPass() = default;
162  SparseTensorConversionPass(const SparseTensorConversionPass &pass) = default;
163 
164  void runOnOperation() override {
165  auto *ctx = &getContext();
166  RewritePatternSet patterns(ctx);
168  ConversionTarget target(*ctx);
169  // Everything in the sparse dialect must go!
170  target.addIllegalDialect<SparseTensorDialect>();
171  // All dynamic rules below accept new function, call, return, and various
172  // tensor and bufferization operations as legal output of the rewriting
173  // provided that all sparse tensor types have been fully rewritten.
174  target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
175  return converter.isSignatureLegal(op.getFunctionType());
176  });
177  target.addDynamicallyLegalOp<func::CallOp>([&](func::CallOp op) {
178  return converter.isSignatureLegal(op.getCalleeType());
179  });
180  target.addDynamicallyLegalOp<func::ReturnOp>([&](func::ReturnOp op) {
181  return converter.isLegal(op.getOperandTypes());
182  });
183  target.addDynamicallyLegalOp<tensor::DimOp>([&](tensor::DimOp op) {
184  return converter.isLegal(op.getOperandTypes());
185  });
186  target.addDynamicallyLegalOp<tensor::CastOp>([&](tensor::CastOp op) {
187  return converter.isLegal(op.getSource().getType()) &&
188  converter.isLegal(op.getDest().getType());
189  });
190  target.addDynamicallyLegalOp<tensor::ExpandShapeOp>(
191  [&](tensor::ExpandShapeOp op) {
192  return converter.isLegal(op.getSrc().getType()) &&
193  converter.isLegal(op.getResult().getType());
194  });
195  target.addDynamicallyLegalOp<tensor::CollapseShapeOp>(
196  [&](tensor::CollapseShapeOp op) {
197  return converter.isLegal(op.getSrc().getType()) &&
198  converter.isLegal(op.getResult().getType());
199  });
200  target.addDynamicallyLegalOp<bufferization::AllocTensorOp>(
201  [&](bufferization::AllocTensorOp op) {
202  return converter.isLegal(op.getType());
203  });
204  target.addDynamicallyLegalOp<bufferization::DeallocTensorOp>(
205  [&](bufferization::DeallocTensorOp op) {
206  return converter.isLegal(op.getTensor().getType());
207  });
208  // The following operations and dialects may be introduced by the
209  // rewriting rules, and are therefore marked as legal.
210  target.addLegalOp<complex::ConstantOp, complex::NotEqualOp, linalg::FillOp,
211  linalg::YieldOp, tensor::ExtractOp,
212  tensor::FromElementsOp>();
213  target.addLegalDialect<
214  arith::ArithDialect, bufferization::BufferizationDialect,
215  LLVM::LLVMDialect, memref::MemRefDialect, scf::SCFDialect>();
216 
217  // Populate with rules and apply rewriting rules.
218  populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns,
219  converter);
220  populateCallOpTypeConversionPattern(patterns, converter);
222  target);
223  populateSparseTensorConversionPatterns(converter, patterns);
224  if (failed(applyPartialConversion(getOperation(), target,
225  std::move(patterns))))
226  signalPassFailure();
227  }
228 };
229 
230 struct SparseTensorCodegenPass
231  : public impl::SparseTensorCodegenBase<SparseTensorCodegenPass> {
232  SparseTensorCodegenPass() = default;
233  SparseTensorCodegenPass(const SparseTensorCodegenPass &pass) = default;
234  SparseTensorCodegenPass(bool createDeallocs, bool enableInit) {
235  createSparseDeallocs = createDeallocs;
236  enableBufferInitialization = enableInit;
237  }
238 
239  void runOnOperation() override {
240  auto *ctx = &getContext();
241  RewritePatternSet patterns(ctx);
243  ConversionTarget target(*ctx);
244  // Most ops in the sparse dialect must go!
245  target.addIllegalDialect<SparseTensorDialect>();
246  target.addLegalOp<SortOp>();
247  target.addLegalOp<PushBackOp>();
248  // Storage specifier outlives sparse tensor pipeline.
249  target.addLegalOp<GetStorageSpecifierOp>();
250  target.addLegalOp<SetStorageSpecifierOp>();
251  target.addLegalOp<StorageSpecifierInitOp>();
252  // Note that tensor::FromElementsOp might be yield after lowering unpack.
253  target.addLegalOp<tensor::FromElementsOp>();
254  // All dynamic rules below accept new function, call, return, and
255  // various tensor and bufferization operations as legal output of the
256  // rewriting provided that all sparse tensor types have been fully
257  // rewritten.
258  target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
259  return converter.isSignatureLegal(op.getFunctionType());
260  });
261  target.addDynamicallyLegalOp<func::CallOp>([&](func::CallOp op) {
262  return converter.isSignatureLegal(op.getCalleeType());
263  });
264  target.addDynamicallyLegalOp<func::ReturnOp>([&](func::ReturnOp op) {
265  return converter.isLegal(op.getOperandTypes());
266  });
267  target.addDynamicallyLegalOp<bufferization::AllocTensorOp>(
268  [&](bufferization::AllocTensorOp op) {
269  return converter.isLegal(op.getType());
270  });
271  target.addDynamicallyLegalOp<bufferization::DeallocTensorOp>(
272  [&](bufferization::DeallocTensorOp op) {
273  return converter.isLegal(op.getTensor().getType());
274  });
275  // The following operations and dialects may be introduced by the
276  // codegen rules, and are therefore marked as legal.
277  target.addLegalOp<linalg::FillOp>();
278  target.addLegalDialect<
279  arith::ArithDialect, bufferization::BufferizationDialect,
280  complex::ComplexDialect, memref::MemRefDialect, scf::SCFDialect>();
281  target.addLegalOp<UnrealizedConversionCastOp>();
282  // Populate with rules and apply rewriting rules.
283  populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns,
284  converter);
286  target);
288  converter, patterns, createSparseDeallocs, enableBufferInitialization);
289  if (failed(applyPartialConversion(getOperation(), target,
290  std::move(patterns))))
291  signalPassFailure();
292  }
293 };
294 
295 struct SparseBufferRewritePass
296  : public impl::SparseBufferRewriteBase<SparseBufferRewritePass> {
297  SparseBufferRewritePass() = default;
298  SparseBufferRewritePass(const SparseBufferRewritePass &pass) = default;
299  SparseBufferRewritePass(bool enableInit) {
300  enableBufferInitialization = enableInit;
301  }
302 
303  void runOnOperation() override {
304  auto *ctx = &getContext();
305  RewritePatternSet patterns(ctx);
306  populateSparseBufferRewriting(patterns, enableBufferInitialization);
307  (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
308  }
309 };
310 
311 struct SparseVectorizationPass
312  : public impl::SparseVectorizationBase<SparseVectorizationPass> {
313  SparseVectorizationPass() = default;
314  SparseVectorizationPass(const SparseVectorizationPass &pass) = default;
315  SparseVectorizationPass(unsigned vl, bool vla, bool sidx32) {
316  vectorLength = vl;
317  enableVLAVectorization = vla;
318  enableSIMDIndex32 = sidx32;
319  }
320 
321  void runOnOperation() override {
322  if (vectorLength == 0)
323  return signalPassFailure();
324  auto *ctx = &getContext();
325  RewritePatternSet patterns(ctx);
327  patterns, vectorLength, enableVLAVectorization, enableSIMDIndex32);
329  (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
330  }
331 };
332 
333 struct SparseGPUCodegenPass
334  : public impl::SparseGPUCodegenBase<SparseGPUCodegenPass> {
335  SparseGPUCodegenPass() = default;
336  SparseGPUCodegenPass(const SparseGPUCodegenPass &pass) = default;
337  SparseGPUCodegenPass(unsigned nT, bool enableRT) {
338  numThreads = nT;
339  enableRuntimeLibrary = enableRT;
340  }
341 
342  void runOnOperation() override {
343  auto *ctx = &getContext();
344  RewritePatternSet patterns(ctx);
345  if (numThreads == 0)
346  populateSparseGPULibgenPatterns(patterns, enableRuntimeLibrary);
347  else
348  populateSparseGPUCodegenPatterns(patterns, numThreads);
349  (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
350  }
351 };
352 
353 struct StorageSpecifierToLLVMPass
354  : public impl::StorageSpecifierToLLVMBase<StorageSpecifierToLLVMPass> {
355  StorageSpecifierToLLVMPass() = default;
356 
357  void runOnOperation() override {
358  auto *ctx = &getContext();
359  ConversionTarget target(*ctx);
360  RewritePatternSet patterns(ctx);
362 
363  // All ops in the sparse dialect must go!
364  target.addIllegalDialect<SparseTensorDialect>();
365  target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
366  return converter.isSignatureLegal(op.getFunctionType());
367  });
368  target.addDynamicallyLegalOp<func::CallOp>([&](func::CallOp op) {
369  return converter.isSignatureLegal(op.getCalleeType());
370  });
371  target.addDynamicallyLegalOp<func::ReturnOp>([&](func::ReturnOp op) {
372  return converter.isLegal(op.getOperandTypes());
373  });
374  target.addLegalDialect<arith::ArithDialect, LLVM::LLVMDialect>();
375 
376  populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns,
377  converter);
378  populateCallOpTypeConversionPattern(patterns, converter);
380  populateReturnOpTypeConversionPattern(patterns, converter);
382  target);
383  populateStorageSpecifierToLLVMPatterns(converter, patterns);
384  if (failed(applyPartialConversion(getOperation(), target,
385  std::move(patterns))))
386  signalPassFailure();
387  }
388 };
389 
390 } // namespace
391 
392 //===----------------------------------------------------------------------===//
393 // Pass creation methods.
394 //===----------------------------------------------------------------------===//
395 
396 std::unique_ptr<Pass> mlir::createSparseAssembler() {
397  return std::make_unique<SparseAssembler>();
398 }
399 
400 std::unique_ptr<Pass> mlir::createSparseReinterpretMapPass() {
401  return std::make_unique<SparseReinterpretMap>();
402 }
403 
404 std::unique_ptr<Pass>
406  SparseReinterpretMapOptions options;
407  options.scope = scope;
408  return std::make_unique<SparseReinterpretMap>(options);
409 }
410 
412  return std::make_unique<PreSparsificationRewritePass>();
413 }
414 
415 std::unique_ptr<Pass> mlir::createSparsificationPass() {
416  return std::make_unique<SparsificationPass>();
417 }
418 
419 std::unique_ptr<Pass>
421  return std::make_unique<SparsificationPass>(options);
422 }
423 
424 std::unique_ptr<Pass> mlir::createStageSparseOperationsPass() {
425  return std::make_unique<StageSparseOperationsPass>();
426 }
427 
429  return std::make_unique<LowerSparseOpsToForeachPass>();
430 }
431 
432 std::unique_ptr<Pass>
433 mlir::createLowerSparseOpsToForeachPass(bool enableRT, bool enableConvert) {
434  return std::make_unique<LowerSparseOpsToForeachPass>(enableRT, enableConvert);
435 }
436 
437 std::unique_ptr<Pass> mlir::createLowerForeachToSCFPass() {
438  return std::make_unique<LowerForeachToSCFPass>();
439 }
440 
441 std::unique_ptr<Pass> mlir::createSparseTensorConversionPass() {
442  return std::make_unique<SparseTensorConversionPass>();
443 }
444 
445 std::unique_ptr<Pass> mlir::createSparseTensorCodegenPass() {
446  return std::make_unique<SparseTensorCodegenPass>();
447 }
448 
449 std::unique_ptr<Pass>
450 mlir::createSparseTensorCodegenPass(bool createSparseDeallocs,
451  bool enableBufferInitialization) {
452  return std::make_unique<SparseTensorCodegenPass>(createSparseDeallocs,
453  enableBufferInitialization);
454 }
455 
456 std::unique_ptr<Pass> mlir::createSparseBufferRewritePass() {
457  return std::make_unique<SparseBufferRewritePass>();
458 }
459 
460 std::unique_ptr<Pass>
461 mlir::createSparseBufferRewritePass(bool enableBufferInitialization) {
462  return std::make_unique<SparseBufferRewritePass>(enableBufferInitialization);
463 }
464 
465 std::unique_ptr<Pass> mlir::createSparseVectorizationPass() {
466  return std::make_unique<SparseVectorizationPass>();
467 }
468 
469 std::unique_ptr<Pass>
471  bool enableVLAVectorization,
472  bool enableSIMDIndex32) {
473  return std::make_unique<SparseVectorizationPass>(
474  vectorLength, enableVLAVectorization, enableSIMDIndex32);
475 }
476 
477 std::unique_ptr<Pass> mlir::createSparseGPUCodegenPass() {
478  return std::make_unique<SparseGPUCodegenPass>();
479 }
480 
481 std::unique_ptr<Pass> mlir::createSparseGPUCodegenPass(unsigned numThreads,
482  bool enableRT) {
483  return std::make_unique<SparseGPUCodegenPass>(numThreads, enableRT);
484 }
485 
486 std::unique_ptr<Pass> mlir::createStorageSpecifierToLLVMPass() {
487  return std::make_unique<StorageSpecifierToLLVMPass>();
488 }
static MLIRContext * getContext(OpFoldResult val)
static llvm::ManagedStatic< PassManagerOptions > options
This class describes a specific conversion target.
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition: Operation.h:402
operand_type_range getOperandTypes()
Definition: Operation.h:392
Sparse tensor type converter into an actual buffer.
Definition: Passes.h:166
Sparse tensor type converter into an opaque pointer.
Definition: Passes.h:150
bool isLegal(Type type) const
Return true if the given type is legal for this type converter, i.e.
bool isSignatureLegal(FunctionType ty) const
Return true if the inputs and outputs of the given function type are legal.
Type getType() const
Return the type of this value.
Definition: Value.h:125
void populateSCFStructuralTypeConversionsAndLegality(TypeConverter &typeConverter, RewritePatternSet &patterns, ConversionTarget &target)
Populates patterns for SCF structural type conversions and sets up the provided ConversionTarget with...
void populateVectorToVectorCanonicalizationPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Collect a set of vector-to-vector canonicalization patterns.
Include the generated interface declarations.
std::unique_ptr< Pass > createSparseVectorizationPass()
std::unique_ptr< Pass > createSparseAssembler()
std::unique_ptr< Pass > createLowerSparseOpsToForeachPass()
std::unique_ptr< Pass > createSparseTensorCodegenPass()
void populateCallOpTypeConversionPattern(RewritePatternSet &patterns, TypeConverter &converter)
Add a pattern to the given pattern list to convert the operand and result types of a CallOp with the ...
void populateReturnOpTypeConversionPattern(RewritePatternSet &patterns, TypeConverter &converter)
Add a pattern to the given pattern list to rewrite return ops to use operands that have been legalize...
std::unique_ptr< Pass > createSparseGPUCodegenPass()
std::unique_ptr< Pass > createSparseReinterpretMapPass()
void populateSparseReinterpretMap(RewritePatternSet &patterns, ReinterpretMapScope scope)
void populateBranchOpInterfaceTypeConversionPattern(RewritePatternSet &patterns, TypeConverter &converter, function_ref< bool(BranchOpInterface branchOp, int idx)> shouldConvertBranchOperand=nullptr)
Add a pattern to the given pattern list to rewrite branch operations to use operands that have been l...
void populateSparseGPULibgenPatterns(RewritePatternSet &patterns, bool enableRT)
std::unique_ptr< Pass > createSparseTensorConversionPass()
std::unique_ptr< Pass > createSparseBufferRewritePass()
void populateSparseBufferRewriting(RewritePatternSet &patterns, bool enableBufferInitialization)
void populatePreSparsificationRewriting(RewritePatternSet &patterns)
void populateSparseAssembler(RewritePatternSet &patterns)
void populateSparseVectorizationPatterns(RewritePatternSet &patterns, unsigned vectorLength, bool enableVLAVectorization, bool enableSIMDIndex32)
Populates the given patterns list with vectorization rules.
void populateStorageSpecifierToLLVMPatterns(TypeConverter &converter, RewritePatternSet &patterns)
ReinterpretMapScope
Defines a scope for reinterpret map pass.
Definition: Passes.h:44
void populateSparsificationPatterns(RewritePatternSet &patterns, const SparsificationOptions &options=SparsificationOptions())
Sets up sparsification rewriting rules with the given options.
void populateLowerSparseOpsToForeachPatterns(RewritePatternSet &patterns, bool enableRT, bool enableConvert)
LogicalResult applyPatternsAndFoldGreedily(Region &region, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr)
Rewrite ops in the given region, which must be isolated from above, by repeatedly applying the highes...
std::unique_ptr< Pass > createStorageSpecifierToLLVMPass()
std::unique_ptr< Pass > createPreSparsificationRewritePass()
std::unique_ptr< Pass > createLowerForeachToSCFPass()
void populateStageSparseOperationsPatterns(RewritePatternSet &patterns)
Sets up StageSparseOperation rewriting rules.
void populateSparseTensorConversionPatterns(TypeConverter &typeConverter, RewritePatternSet &patterns)
Sets up sparse tensor conversion rules.
void populateSparseGPUCodegenPatterns(RewritePatternSet &patterns, unsigned numThreads)
std::unique_ptr< Pass > createStageSparseOperationsPass()
void populateSparseTensorCodegenPatterns(TypeConverter &typeConverter, RewritePatternSet &patterns, bool createSparseDeallocs, bool enableBufferInitialization)
Sets up sparse tensor codegen rules.
std::unique_ptr< Pass > createSparsificationPass()
void populateLowerForeachToSCFPatterns(RewritePatternSet &patterns)
LogicalResult applyPartialConversion(ArrayRef< Operation * > ops, const ConversionTarget &target, const FrozenRewritePatternSet &patterns, ConversionConfig config=ConversionConfig())
Below we define several entry points for operation conversion.
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value.
Definition: LogicalResult.h:72
Options for the Sparsification pass.
Definition: Passes.h:90