MLIR  20.0.0git
Bufferize.cpp
Go to the documentation of this file.
1 //===- Bufferize.cpp - Bufferization utilities ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 
19 #include "mlir/IR/Diagnostics.h"
20 #include "mlir/IR/Operation.h"
23 #include "mlir/Pass/PassManager.h"
24 #include "mlir/Transforms/Passes.h"
25 #include <optional>
26 
27 namespace mlir {
28 namespace bufferization {
29 #define GEN_PASS_DEF_BUFFERIZATIONBUFFERIZE
30 #define GEN_PASS_DEF_ONESHOTBUFFERIZE
31 #include "mlir/Dialect/Bufferization/Transforms/Passes.h.inc"
32 } // namespace bufferization
33 } // namespace mlir
34 
35 #define DEBUG_TYPE "bufferize"
36 
37 using namespace mlir;
38 using namespace mlir::bufferization;
39 
40 namespace {
41 
42 static LayoutMapOption parseLayoutMapOption(const std::string &s) {
43  if (s == "fully-dynamic-layout-map")
44  return LayoutMapOption::FullyDynamicLayoutMap;
45  if (s == "identity-layout-map")
46  return LayoutMapOption::IdentityLayoutMap;
47  if (s == "infer-layout-map")
48  return LayoutMapOption::InferLayoutMap;
49  llvm_unreachable("invalid layout map option");
50 }
51 
53 parseHeuristicOption(const std::string &s) {
54  if (s == "bottom-up")
56  if (s == "top-down")
58  if (s == "bottom-up-from-terminators")
61  if (s == "fuzzer")
63  llvm_unreachable("invalid analysisheuristic option");
64 }
65 
66 struct OneShotBufferizePass
67  : public bufferization::impl::OneShotBufferizeBase<OneShotBufferizePass> {
68  OneShotBufferizePass() = default;
69 
70  explicit OneShotBufferizePass(const OneShotBufferizationOptions &options)
71  : options(options) {}
72 
73  void getDependentDialects(DialectRegistry &registry) const override {
74  registry
75  .insert<bufferization::BufferizationDialect, memref::MemRefDialect>();
76  }
77 
78  void runOnOperation() override {
80  if (!options) {
81  // Make new bufferization options if none were provided when creating the
82  // pass.
83  opt.allowReturnAllocsFromLoops = allowReturnAllocsFromLoops;
84  opt.allowUnknownOps = allowUnknownOps;
85  opt.analysisFuzzerSeed = analysisFuzzerSeed;
86  opt.analysisHeuristic = parseHeuristicOption(analysisHeuristic);
87  opt.copyBeforeWrite = copyBeforeWrite;
88  opt.dumpAliasSets = dumpAliasSets;
90  parseLayoutMapOption(functionBoundaryTypeConversion));
91 
92  if (mustInferMemorySpace && useEncodingForMemorySpace) {
93  emitError(getOperation()->getLoc())
94  << "only one of 'must-infer-memory-space' and "
95  "'use-encoding-for-memory-space' are allowed in "
96  << getArgument();
97  return signalPassFailure();
98  }
99 
100  if (mustInferMemorySpace) {
102  [](TensorType t) -> std::optional<Attribute> {
103  return std::nullopt;
104  };
105  }
106 
107  if (useEncodingForMemorySpace) {
109  [](TensorType t) -> std::optional<Attribute> {
110  if (auto rtt = dyn_cast<RankedTensorType>(t))
111  return rtt.getEncoding();
112  return std::nullopt;
113  };
114  }
115 
116  opt.printConflicts = printConflicts;
117  opt.bufferAlignment = bufferAlignment;
118  opt.testAnalysisOnly = testAnalysisOnly;
119  opt.bufferizeFunctionBoundaries = bufferizeFunctionBoundaries;
120  opt.checkParallelRegions = checkParallelRegions;
121  opt.noAnalysisFuncFilter = noAnalysisFuncFilter;
122 
123  // Configure type converter.
124  LayoutMapOption unknownTypeConversionOption =
125  parseLayoutMapOption(unknownTypeConversion);
126  if (unknownTypeConversionOption == LayoutMapOption::InferLayoutMap) {
128  "Invalid option: 'infer-layout-map' is not a valid value for "
129  "'unknown-type-conversion'");
130  return signalPassFailure();
131  }
132  opt.unknownTypeConverterFn = [=](Value value, Attribute memorySpace,
133  const BufferizationOptions &options) {
134  auto tensorType = cast<TensorType>(value.getType());
135  if (unknownTypeConversionOption == LayoutMapOption::IdentityLayoutMap)
137  tensorType, memorySpace);
138  assert(unknownTypeConversionOption ==
139  LayoutMapOption::FullyDynamicLayoutMap &&
140  "invalid layout map option");
142  memorySpace);
143  };
144 
145  // Configure op filter.
146  OpFilter::Entry::FilterFn filterFn = [&](Operation *op) {
147  // Filter may be specified via options.
148  if (this->dialectFilter.hasValue())
149  return llvm::is_contained(this->dialectFilter,
150  op->getDialect()->getNamespace());
151  // No filter specified: All other ops are allowed.
152  return true;
153  };
154  opt.opFilter.allowOperation(filterFn);
155  } else {
156  opt = *options;
157  }
158 
159  if (opt.copyBeforeWrite && opt.testAnalysisOnly) {
160  // These two flags do not make sense together: "copy-before-write"
161  // indicates that copies should be inserted before every memory write,
162  // but "test-analysis-only" indicates that only the analysis should be
163  // tested. (I.e., no IR is bufferized.)
165  "Invalid option: 'copy-before-write' cannot be used with "
166  "'test-analysis-only'");
167  return signalPassFailure();
168  }
169 
170  if (opt.printConflicts && !opt.testAnalysisOnly) {
171  emitError(
173  "Invalid option: 'print-conflicts' requires 'test-analysis-only'");
174  return signalPassFailure();
175  }
176 
177  if (opt.dumpAliasSets && !opt.testAnalysisOnly) {
178  emitError(
180  "Invalid option: 'dump-alias-sets' requires 'test-analysis-only'");
181  return signalPassFailure();
182  }
183 
184  BufferizationStatistics statistics;
185  ModuleOp moduleOp = getOperation();
186  if (opt.bufferizeFunctionBoundaries) {
187  if (failed(runOneShotModuleBufferize(moduleOp, opt, &statistics))) {
188  signalPassFailure();
189  return;
190  }
191  } else {
192  if (!opt.noAnalysisFuncFilter.empty()) {
194  "Invalid option: 'no-analysis-func-filter' requires "
195  "'bufferize-function-boundaries'");
196  return signalPassFailure();
197  }
198  if (failed(runOneShotBufferize(moduleOp, opt, &statistics))) {
199  signalPassFailure();
200  return;
201  }
202  }
203 
204  // Set pass statistics.
205  this->numBufferAlloc = statistics.numBufferAlloc;
206  this->numTensorInPlace = statistics.numTensorInPlace;
207  this->numTensorOutOfPlace = statistics.numTensorOutOfPlace;
208  }
209 
210 private:
211  std::optional<OneShotBufferizationOptions> options;
212 };
213 } // namespace
214 
216  return std::make_unique<OneShotBufferizePass>();
217 }
218 
221  return std::make_unique<OneShotBufferizePass>(options);
222 }
223 
224 //===----------------------------------------------------------------------===//
225 // BufferizableOpInterface-based Bufferization
226 //===----------------------------------------------------------------------===//
227 
228 namespace {
229 /// A rewriter that keeps track of extra information during bufferization.
230 class BufferizationRewriter : public IRRewriter, public RewriterBase::Listener {
231 public:
232  BufferizationRewriter(MLIRContext *ctx, DenseSet<Operation *> &erasedOps,
233  DenseSet<Operation *> &toMemrefOps,
234  SmallVector<Operation *> &worklist,
236  BufferizationStatistics *statistics)
237  : IRRewriter(ctx), erasedOps(erasedOps), toMemrefOps(toMemrefOps),
238  worklist(worklist), analysisState(options), statistics(statistics) {
239  setListener(this);
240  }
241 
242 protected:
243  void notifyOperationErased(Operation *op) override {
244  erasedOps.insert(op);
245  // Erase if present.
246  toMemrefOps.erase(op);
247  }
248 
249  void notifyOperationInserted(Operation *op, InsertPoint previous) override {
250  // We only care about newly created ops.
251  if (previous.isSet())
252  return;
253 
254  erasedOps.erase(op);
255 
256  // Gather statistics about allocs.
257  if (statistics) {
258  if (auto sideEffectingOp = dyn_cast<MemoryEffectOpInterface>(op))
259  statistics->numBufferAlloc += static_cast<int64_t>(
260  sideEffectingOp.hasEffect<MemoryEffects::Allocate>());
261  }
262 
263  // Keep track of to_memref ops.
264  if (isa<ToMemrefOp>(op)) {
265  toMemrefOps.insert(op);
266  return;
267  }
268 
269  // Skip to_tensor ops.
270  if (isa<ToTensorOp>(op))
271  return;
272 
273  // Skip non-tensor ops.
274  if (!hasTensorSemantics(op))
275  return;
276 
277  // Skip ops that are not allowed to be bufferized.
278  auto const &options = analysisState.getOptions();
279  if (!options.isOpAllowed(op))
280  return;
281 
282  // Add op to worklist.
283  worklist.push_back(op);
284  }
285 
286 private:
287  /// A set of all erased ops.
288  DenseSet<Operation *> &erasedOps;
289 
290  /// A set of all to_memref ops.
291  DenseSet<Operation *> &toMemrefOps;
292 
293  /// The worklist of ops to be bufferized.
294  SmallVector<Operation *> &worklist;
295 
296  /// The analysis state. Used for debug assertions and access to the
297  /// bufferization options.
298  const AnalysisState analysisState;
299 
300  /// Bufferization statistics for debugging.
301  BufferizationStatistics *statistics;
302 };
303 } // namespace
304 
307  BufferizationStatistics *statistics) {
308  if (options.copyBeforeWrite) {
309  AnalysisState state(options);
310  if (failed(insertTensorCopies(op, state)))
311  return failure();
312  }
313 
314  // Keep track of to_memref ops.
315  DenseSet<Operation *> toMemrefOps;
316  op->walk([&](ToMemrefOp toMemrefOp) { toMemrefOps.insert(toMemrefOp); });
317 
318  // Gather all bufferizable ops in top-to-bottom order.
319  //
320  // We should ideally know the exact memref type of all operands when
321  // bufferizing an op. (This is the case when bufferizing top-to-bottom.)
322  // Otherwise, we have to use a memref type with a fully dynamic layout map to
323  // avoid copies. We are currently missing patterns for layout maps to
324  // canonicalize away (or canonicalize to more precise layouts).
325  SmallVector<Operation *> worklist;
326  op->walk<WalkOrder::PostOrder>([&](Operation *op) {
327  if (options.isOpAllowed(op) && hasTensorSemantics(op))
328  worklist.push_back(op);
329  });
330 
331  // Keep track of all erased ops.
332  DenseSet<Operation *> erasedOps;
333 
334  // Bufferize all ops.
335  BufferizationRewriter rewriter(op->getContext(), erasedOps, toMemrefOps,
336  worklist, options, statistics);
337  for (unsigned i = 0; i < worklist.size(); ++i) {
338  Operation *nextOp = worklist[i];
339  // Skip ops that were erased.
340  if (erasedOps.contains(nextOp))
341  continue;
342  // Skip ops that are not bufferizable or not allowed.
343  auto bufferizableOp = options.dynCastBufferizableOp(nextOp);
344  if (!bufferizableOp)
345  continue;
346  // Skip ops that no longer have tensor semantics.
347  if (!hasTensorSemantics(nextOp))
348  continue;
349  // Check for unsupported unstructured control flow.
350  if (!bufferizableOp.supportsUnstructuredControlFlow())
351  for (Region &r : nextOp->getRegions())
352  if (r.getBlocks().size() > 1)
353  return nextOp->emitOpError(
354  "op or BufferizableOpInterface implementation does not support "
355  "unstructured control flow, but at least one region has multiple "
356  "blocks");
357 
358  // Bufferize the op.
359  LLVM_DEBUG(llvm::dbgs()
360  << "//===-------------------------------------------===//\n"
361  << "IR after bufferizing: " << nextOp->getName() << "\n");
362  rewriter.setInsertionPoint(nextOp);
363  if (failed(bufferizableOp.bufferize(rewriter, options))) {
364  LLVM_DEBUG(llvm::dbgs()
365  << "failed to bufferize\n"
366  << "//===-------------------------------------------===//\n");
367  return nextOp->emitError("failed to bufferize op");
368  }
369  LLVM_DEBUG(llvm::dbgs()
370  << *op
371  << "\n//===-------------------------------------------===//\n");
372  }
373 
374  // Return early if the top-level op is entirely gone.
375  if (erasedOps.contains(op))
376  return success();
377 
378  // Fold all to_memref(to_tensor(x)) pairs.
379  for (Operation *op : toMemrefOps) {
380  rewriter.setInsertionPoint(op);
382  rewriter, cast<ToMemrefOp>(op), options);
383  }
384 
385  // Remove all dead to_tensor ops.
386  op->walk<WalkOrder::PostOrder>([&](ToTensorOp toTensorOp) {
387  if (toTensorOp->getUses().empty()) {
388  rewriter.eraseOp(toTensorOp);
389  return WalkResult::skip();
390  }
391  return WalkResult::advance();
392  });
393 
394  /// Check the result of bufferization. Return an error if an op was not
395  /// bufferized, unless partial bufferization is allowed.
396  if (options.allowUnknownOps)
397  return success();
398 
399  for (Operation *op : worklist) {
400  // Skip ops that are entirely gone.
401  if (erasedOps.contains(op))
402  continue;
403  // Ops that no longer have tensor semantics (because they were updated
404  // in-place) are allowed.
405  if (!hasTensorSemantics(op))
406  continue;
407  // Continue ops that are not allowed.
408  if (!options.isOpAllowed(op))
409  continue;
410  // Ops without any uses and no side effects will fold away.
411  if (op->getUses().empty() && isMemoryEffectFree(op))
412  continue;
413  // ToTensorOps/ToMemrefOps are allowed in the output.
414  if (isa<ToTensorOp, ToMemrefOp>(op))
415  continue;
416  return op->emitError("op was not bufferized");
417  }
418 
419  return success();
420 }
421 
422 LogicalResult
424  const BufferizationOptions &options) {
425  OpBuilder::InsertionGuard g(rewriter);
426  auto bufferizableOp = options.dynCastBufferizableOp(block->getParentOp());
427  if (!bufferizableOp)
428  return failure();
429 
430  // Compute the new signature.
431  SmallVector<Type> newTypes;
432  for (BlockArgument &bbArg : block->getArguments()) {
433  auto tensorType = dyn_cast<TensorType>(bbArg.getType());
434  if (!tensorType) {
435  newTypes.push_back(bbArg.getType());
436  continue;
437  }
438 
439  FailureOr<BaseMemRefType> memrefType =
441  if (failed(memrefType))
442  return failure();
443  newTypes.push_back(*memrefType);
444  }
445 
446  // Change the type of all block arguments.
447  for (auto [bbArg, type] : llvm::zip(block->getArguments(), newTypes)) {
448  if (bbArg.getType() == type)
449  continue;
450 
451  // Collect all uses of the bbArg.
452  SmallVector<OpOperand *> bbArgUses;
453  for (OpOperand &use : bbArg.getUses())
454  bbArgUses.push_back(&use);
455 
456  // Change the bbArg type to memref.
457  bbArg.setType(type);
458 
459  // Replace all uses of the original tensor bbArg.
460  rewriter.setInsertionPointToStart(block);
461  if (!bbArgUses.empty()) {
462  Value toTensorOp =
463  rewriter.create<bufferization::ToTensorOp>(bbArg.getLoc(), bbArg);
464  for (OpOperand *use : bbArgUses)
465  use->set(toTensorOp);
466  }
467  }
468 
469  // Bufferize callers of the block.
470  for (Operation *op : block->getUsers()) {
471  auto branchOp = dyn_cast<BranchOpInterface>(op);
472  if (!branchOp)
473  return op->emitOpError("cannot bufferize ops with block references that "
474  "do not implement BranchOpInterface");
475 
476  auto it = llvm::find(op->getSuccessors(), block);
477  assert(it != op->getSuccessors().end() && "could find successor");
478  int64_t successorIdx = std::distance(op->getSuccessors().begin(), it);
479 
480  SuccessorOperands operands = branchOp.getSuccessorOperands(successorIdx);
481  SmallVector<Value> newOperands;
482  for (auto [operand, type] :
483  llvm::zip(operands.getForwardedOperands(), newTypes)) {
484  if (operand.getType() == type) {
485  // Not a tensor type. Nothing to do for this operand.
486  newOperands.push_back(operand);
487  continue;
488  }
489  FailureOr<BaseMemRefType> operandBufferType =
491  if (failed(operandBufferType))
492  return failure();
493  rewriter.setInsertionPointAfterValue(operand);
494  Value bufferizedOperand = rewriter.create<bufferization::ToMemrefOp>(
495  operand.getLoc(), *operandBufferType, operand);
496  // A cast is needed if the operand and the block argument have different
497  // bufferized types.
498  if (type != *operandBufferType)
499  bufferizedOperand = rewriter.create<memref::CastOp>(
500  operand.getLoc(), type, bufferizedOperand);
501  newOperands.push_back(bufferizedOperand);
502  }
503  operands.getMutableForwardedOperands().assign(newOperands);
504  }
505 
506  return success();
507 }
static MLIRContext * getContext(OpFoldResult val)
static llvm::ManagedStatic< PassManagerOptions > options
Base class for generic analysis states.
Attributes are known-constant values of operations.
Definition: Attributes.h:25
This class represents an argument of a Block.
Definition: Value.h:319
Block represents an ordered list of Operations.
Definition: Block.h:33
BlockArgListType getArguments()
Definition: Block.h:87
Operation * getParentOp()
Returns the closest surrounding operation that contains this block.
Definition: Block.cpp:33
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
user_range getUsers() const
Returns a range of all users.
Definition: UseDefLists.h:274
This class coordinates rewriting a piece of IR outside of a pattern rewrite, providing a way to keep ...
Definition: PatternMatch.h:772
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
void assign(ValueRange values)
Assign this range to the given values.
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:357
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:440
void setInsertionPointAfterValue(Value val)
Sets the insertion point to the node after the specified value.
Definition: Builders.h:430
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:497
This class represents an operand of an operation.
Definition: Value.h:267
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
std::enable_if_t< llvm::function_traits< std::decay_t< FnT > >::num_args==1, RetT > walk(FnT &&callback)
Walk the operation by calling the callback for each nested operation (including this one),...
Definition: Operation.h:798
MLIRContext * getContext()
Return the context this operation is associated with.
Definition: Operation.h:216
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:268
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
Definition: Operation.h:677
OperationName getName()
The name of an operation is the key identifier for it.
Definition: Operation.h:119
SuccessorRange getSuccessors()
Definition: Operation.h:704
use_range getUses()
Returns a range of all uses, which is useful for iterating over all uses.
Definition: Operation.h:847
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers.
Definition: Operation.cpp:671
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition: Region.h:26
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:400
This class models how operands are forwarded to block arguments in control flow.
MutableOperandRange getMutableForwardedOperands() const
Get the range of operands that are simply forwarded to the successor.
OperandRange getForwardedOperands() const
Get the range of operands that are simply forwarded to the successor.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Definition: BuiltinTypes.h:102
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:129
static WalkResult skip()
Definition: Visitors.h:52
static WalkResult advance()
Definition: Visitors.h:51
AnalysisState provides a variety of helper functions for dealing with tensor values.
void allowOperation()
Allow the given ops.
LogicalResult runOneShotBufferize(Operation *op, const OneShotBufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Run One-Shot Bufferize on the given op: Analysis + Bufferization.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
llvm::LogicalResult runOneShotModuleBufferize(ModuleOp moduleOp, const bufferization::OneShotBufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Run One-Shot Module Bufferization on the given module.
LogicalResult bufferizeOp(Operation *op, const BufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Bufferize op and its nested ops that implement BufferizableOpInterface.
Definition: Bufferize.cpp:305
LogicalResult insertTensorCopies(Operation *op, const OneShotBufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Resolve RaW and other conflicts by inserting bufferization.alloc_tensor ops.
LogicalResult bufferizeBlockSignature(Block *block, RewriterBase &rewriter, const BufferizationOptions &options)
Bufferize the signature of block and its callers (i.e., ops that have the given block as a successor)...
Definition: Bufferize.cpp:423
std::unique_ptr< Pass > createOneShotBufferizePass()
Create a pass that bufferizes all ops that implement BufferizableOpInterface with One-Shot Bufferize.
Definition: Bufferize.cpp:215
LogicalResult foldToMemrefToTensorPair(RewriterBase &rewriter, ToMemrefOp toMemref, const BufferizationOptions &options)
Try to fold to_memref(to_tensor(x)).
FailureOr< BaseMemRefType > getBufferType(Value value, const BufferizationOptions &options)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
bool hasTensorSemantics(Operation *op)
Return "true" if the given op has tensor semantics and should be bufferized.
Include the generated interface declarations.
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
bool isMemoryEffectFree(Operation *op)
Returns true if the given operation is free of memory effects.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
The following effect indicates that the operation allocates from some resource.
Options for BufferizableOpInterface-based bufferization.
bool copyBeforeWrite
If set to true, the analysis is skipped.
void setFunctionBoundaryTypeConversion(LayoutMapOption layoutMapOption)
This function controls buffer types on function signatures.
bool allowUnknownOps
Specifies whether not bufferizable ops are allowed in the input.
unsigned int bufferAlignment
Buffer alignment for new memory allocations.
bool printConflicts
If set to true, the IR is annotated with details about RaW conflicts.
bool testAnalysisOnly
If set to true, does not modify the IR apart from adding attributes (for checking the results of the ...
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
UnknownTypeConverterFn unknownTypeConverterFn
Type converter from tensors to memrefs.
bool bufferizeFunctionBoundaries
Specifies whether function boundaries (ops in the func dialect) should be bufferized or not.
Bufferization statistics for debugging.
Definition: Bufferize.h:34
Options for analysis-enabled bufferization.
unsigned analysisFuzzerSeed
Seed for the analysis fuzzer.
bool dumpAliasSets
Specifies whether the tensor IR should be annotated with alias sets.
bool allowReturnAllocsFromLoops
Specifies whether returning newly allocated memrefs from loops should be allowed.
AnalysisHeuristic analysisHeuristic
The heuristic controls the order in which ops are traversed during the analysis.
llvm::ArrayRef< std::string > noAnalysisFuncFilter
Specify the functions that should not be analyzed.
std::function< bool(Operation *)> FilterFn
If the filter function evaluates to true, the filter matches.