MLIR  20.0.0git
Bufferize.cpp
Go to the documentation of this file.
1 //===- Bufferize.cpp - Bufferization utilities ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 
19 #include "mlir/IR/Diagnostics.h"
20 #include "mlir/IR/Operation.h"
23 #include "mlir/Pass/PassManager.h"
24 #include "mlir/Transforms/Passes.h"
25 #include <optional>
26 
27 namespace mlir {
28 namespace bufferization {
29 #define GEN_PASS_DEF_FINALIZINGBUFFERIZE
30 #define GEN_PASS_DEF_BUFFERIZATIONBUFFERIZE
31 #define GEN_PASS_DEF_ONESHOTBUFFERIZE
32 #include "mlir/Dialect/Bufferization/Transforms/Passes.h.inc"
33 } // namespace bufferization
34 } // namespace mlir
35 
36 #define DEBUG_TYPE "bufferize"
37 
38 using namespace mlir;
39 using namespace mlir::bufferization;
40 
41 //===----------------------------------------------------------------------===//
42 // BufferizeTypeConverter
43 //===----------------------------------------------------------------------===//
44 
46  ValueRange inputs, Location loc) {
47  assert(inputs.size() == 1);
48  assert(isa<BaseMemRefType>(inputs[0].getType()));
49  return builder.create<bufferization::ToTensorOp>(loc, type, inputs[0]);
50 }
51 
52 /// Registers conversions into BufferizeTypeConverter
54  // Keep all types unchanged.
55  addConversion([](Type type) { return type; });
56  // Convert RankedTensorType to MemRefType.
57  addConversion([](RankedTensorType type) -> Type {
58  return MemRefType::get(type.getShape(), type.getElementType());
59  });
60  // Convert UnrankedTensorType to UnrankedMemRefType.
61  addConversion([](UnrankedTensorType type) -> Type {
62  return UnrankedMemRefType::get(type.getElementType(), 0);
63  });
67  ValueRange inputs, Location loc) -> Value {
68  assert(inputs.size() == 1 && "expected exactly one input");
69 
70  if (auto inputType = dyn_cast<MemRefType>(inputs[0].getType())) {
71  // MemRef to MemRef cast.
72  assert(inputType != type && "expected different types");
73  // Unranked to ranked and ranked to unranked casts must be explicit.
74  auto rankedDestType = dyn_cast<MemRefType>(type);
75  if (!rankedDestType)
76  return nullptr;
78  options.bufferAlignment = 0;
79  FailureOr<Value> replacement =
80  castOrReallocMemRefValue(builder, inputs[0], rankedDestType, options);
81  if (failed(replacement))
82  return nullptr;
83  return *replacement;
84  }
85 
86  if (isa<TensorType>(inputs[0].getType())) {
87  // Tensor to MemRef cast.
88  return builder.create<bufferization::ToMemrefOp>(loc, type, inputs[0]);
89  }
90 
91  llvm_unreachable("only tensor/memref input types supported");
92  });
93 }
94 
96  ConversionTarget &target) {
97  target.addLegalOp<bufferization::ToTensorOp, bufferization::ToMemrefOp>();
98 }
99 
100 namespace {
101 // In a finalizing bufferize conversion, we know that all tensors have been
102 // converted to memrefs, thus, this op becomes an identity.
103 class BufferizeToTensorOp
104  : public OpConversionPattern<bufferization::ToTensorOp> {
105 public:
107  LogicalResult
108  matchAndRewrite(bufferization::ToTensorOp op, OpAdaptor adaptor,
109  ConversionPatternRewriter &rewriter) const override {
110  rewriter.replaceOp(op, adaptor.getMemref());
111  return success();
112  }
113 };
114 } // namespace
115 
116 namespace {
117 // In a finalizing bufferize conversion, we know that all tensors have been
118 // converted to memrefs, thus, this op becomes an identity.
119 class BufferizeToMemrefOp
120  : public OpConversionPattern<bufferization::ToMemrefOp> {
121 public:
123  LogicalResult
124  matchAndRewrite(bufferization::ToMemrefOp op, OpAdaptor adaptor,
125  ConversionPatternRewriter &rewriter) const override {
126  rewriter.replaceOp(op, adaptor.getTensor());
127  return success();
128  }
129 };
130 } // namespace
131 
133  BufferizeTypeConverter &typeConverter, RewritePatternSet &patterns) {
134  patterns.add<BufferizeToTensorOp, BufferizeToMemrefOp>(typeConverter,
135  patterns.getContext());
136 }
137 
138 namespace {
139 struct FinalizingBufferizePass
140  : public bufferization::impl::FinalizingBufferizeBase<
141  FinalizingBufferizePass> {
142  using FinalizingBufferizeBase<
143  FinalizingBufferizePass>::FinalizingBufferizeBase;
144 
145  void runOnOperation() override {
146  auto func = getOperation();
147  auto *context = &getContext();
148 
149  BufferizeTypeConverter typeConverter;
150  RewritePatternSet patterns(context);
151  ConversionTarget target(*context);
152 
154 
155  // If all result types are legal, and all block arguments are legal (ensured
156  // by func conversion above), then all types in the program are legal.
157  //
158  // We also check that the operand types are legal to avoid creating invalid
159  // IR. For example, this prevents
160  // populateEliminateBufferizeMaterializationsPatterns from updating the
161  // types of the operands to a return op without updating the enclosing
162  // function.
163  target.markUnknownOpDynamicallyLegal(
164  [&](Operation *op) { return typeConverter.isLegal(op); });
165 
166  if (failed(applyFullConversion(func, target, std::move(patterns))))
167  signalPassFailure();
168  }
169 };
170 
171 static LayoutMapOption parseLayoutMapOption(const std::string &s) {
172  if (s == "fully-dynamic-layout-map")
173  return LayoutMapOption::FullyDynamicLayoutMap;
174  if (s == "identity-layout-map")
175  return LayoutMapOption::IdentityLayoutMap;
176  if (s == "infer-layout-map")
177  return LayoutMapOption::InferLayoutMap;
178  llvm_unreachable("invalid layout map option");
179 }
180 
182 parseHeuristicOption(const std::string &s) {
183  if (s == "bottom-up")
185  if (s == "top-down")
187  if (s == "bottom-up-from-terminators")
190  if (s == "fuzzer")
192  llvm_unreachable("invalid analysisheuristic option");
193 }
194 
195 struct OneShotBufferizePass
196  : public bufferization::impl::OneShotBufferizeBase<OneShotBufferizePass> {
197  OneShotBufferizePass() = default;
198 
199  explicit OneShotBufferizePass(const OneShotBufferizationOptions &options)
200  : options(options) {}
201 
202  void getDependentDialects(DialectRegistry &registry) const override {
203  registry
204  .insert<bufferization::BufferizationDialect, memref::MemRefDialect>();
205  }
206 
207  void runOnOperation() override {
209  if (!options) {
210  // Make new bufferization options if none were provided when creating the
211  // pass.
212  opt.allowReturnAllocsFromLoops = allowReturnAllocsFromLoops;
213  opt.allowUnknownOps = allowUnknownOps;
214  opt.analysisFuzzerSeed = analysisFuzzerSeed;
215  opt.analysisHeuristic = parseHeuristicOption(analysisHeuristic);
216  opt.copyBeforeWrite = copyBeforeWrite;
217  opt.dumpAliasSets = dumpAliasSets;
219  parseLayoutMapOption(functionBoundaryTypeConversion));
220  if (mustInferMemorySpace) {
222  [](TensorType t) -> std::optional<Attribute> {
223  return std::nullopt;
224  };
225  }
226  opt.printConflicts = printConflicts;
227  opt.testAnalysisOnly = testAnalysisOnly;
228  opt.bufferizeFunctionBoundaries = bufferizeFunctionBoundaries;
229  opt.checkParallelRegions = checkParallelRegions;
230  opt.noAnalysisFuncFilter = noAnalysisFuncFilter;
231 
232  // Configure type converter.
233  LayoutMapOption unknownTypeConversionOption =
234  parseLayoutMapOption(unknownTypeConversion);
235  if (unknownTypeConversionOption == LayoutMapOption::InferLayoutMap) {
237  "Invalid option: 'infer-layout-map' is not a valid value for "
238  "'unknown-type-conversion'");
239  return signalPassFailure();
240  }
241  opt.unknownTypeConverterFn = [=](Value value, Attribute memorySpace,
242  const BufferizationOptions &options) {
243  auto tensorType = cast<TensorType>(value.getType());
244  if (unknownTypeConversionOption == LayoutMapOption::IdentityLayoutMap)
246  tensorType, memorySpace);
247  assert(unknownTypeConversionOption ==
248  LayoutMapOption::FullyDynamicLayoutMap &&
249  "invalid layout map option");
251  memorySpace);
252  };
253 
254  // Configure op filter.
255  OpFilter::Entry::FilterFn filterFn = [&](Operation *op) {
256  // Filter may be specified via options.
257  if (this->dialectFilter.hasValue())
258  return llvm::is_contained(this->dialectFilter,
259  op->getDialect()->getNamespace());
260  // No filter specified: All other ops are allowed.
261  return true;
262  };
263  opt.opFilter.allowOperation(filterFn);
264  } else {
265  opt = *options;
266  }
267 
268  if (opt.copyBeforeWrite && opt.testAnalysisOnly) {
269  // These two flags do not make sense together: "copy-before-write"
270  // indicates that copies should be inserted before every memory write,
271  // but "test-analysis-only" indicates that only the analysis should be
272  // tested. (I.e., no IR is bufferized.)
274  "Invalid option: 'copy-before-write' cannot be used with "
275  "'test-analysis-only'");
276  return signalPassFailure();
277  }
278 
279  if (opt.printConflicts && !opt.testAnalysisOnly) {
280  emitError(
282  "Invalid option: 'print-conflicts' requires 'test-analysis-only'");
283  return signalPassFailure();
284  }
285 
286  if (opt.dumpAliasSets && !opt.testAnalysisOnly) {
287  emitError(
289  "Invalid option: 'dump-alias-sets' requires 'test-analysis-only'");
290  return signalPassFailure();
291  }
292 
293  BufferizationStatistics statistics;
294  ModuleOp moduleOp = getOperation();
295  if (opt.bufferizeFunctionBoundaries) {
296  if (failed(runOneShotModuleBufferize(moduleOp, opt, &statistics))) {
297  signalPassFailure();
298  return;
299  }
300  } else {
301  if (!opt.noAnalysisFuncFilter.empty()) {
303  "Invalid option: 'no-analysis-func-filter' requires "
304  "'bufferize-function-boundaries'");
305  return signalPassFailure();
306  }
307  if (failed(runOneShotBufferize(moduleOp, opt, &statistics))) {
308  signalPassFailure();
309  return;
310  }
311  }
312 
313  // Set pass statistics.
314  this->numBufferAlloc = statistics.numBufferAlloc;
315  this->numTensorInPlace = statistics.numTensorInPlace;
316  this->numTensorOutOfPlace = statistics.numTensorOutOfPlace;
317  }
318 
319 private:
320  std::optional<OneShotBufferizationOptions> options;
321 };
322 } // namespace
323 
325  return std::make_unique<OneShotBufferizePass>();
326 }
327 
330  return std::make_unique<OneShotBufferizePass>(options);
331 }
332 
333 std::unique_ptr<OperationPass<func::FuncOp>>
335  return std::make_unique<FinalizingBufferizePass>();
336 }
337 
338 //===----------------------------------------------------------------------===//
339 // BufferizableOpInterface-based Bufferization
340 //===----------------------------------------------------------------------===//
341 
342 namespace {
343 /// A rewriter that keeps track of extra information during bufferization.
344 class BufferizationRewriter : public IRRewriter, public RewriterBase::Listener {
345 public:
346  BufferizationRewriter(MLIRContext *ctx, DenseSet<Operation *> &erasedOps,
347  DenseSet<Operation *> &toMemrefOps,
348  SmallVector<Operation *> &worklist,
350  BufferizationStatistics *statistics)
351  : IRRewriter(ctx), erasedOps(erasedOps), toMemrefOps(toMemrefOps),
352  worklist(worklist), analysisState(options), statistics(statistics) {
353  setListener(this);
354  }
355 
356 protected:
357  void notifyOperationErased(Operation *op) override {
358  erasedOps.insert(op);
359  // Erase if present.
360  toMemrefOps.erase(op);
361  }
362 
363  void notifyOperationInserted(Operation *op, InsertPoint previous) override {
364  // We only care about newly created ops.
365  if (previous.isSet())
366  return;
367 
368  erasedOps.erase(op);
369 
370  // Gather statistics about allocs.
371  if (statistics) {
372  if (auto sideEffectingOp = dyn_cast<MemoryEffectOpInterface>(op))
373  statistics->numBufferAlloc += static_cast<int64_t>(
374  sideEffectingOp.hasEffect<MemoryEffects::Allocate>());
375  }
376 
377  // Keep track of to_memref ops.
378  if (isa<ToMemrefOp>(op)) {
379  toMemrefOps.insert(op);
380  return;
381  }
382 
383  // Skip to_tensor ops.
384  if (isa<ToTensorOp>(op))
385  return;
386 
387  // Skip non-tensor ops.
388  if (!hasTensorSemantics(op))
389  return;
390 
391  // Skip ops that are not allowed to be bufferized.
392  auto const &options = analysisState.getOptions();
393  if (!options.isOpAllowed(op))
394  return;
395 
396  // Add op to worklist.
397  worklist.push_back(op);
398  }
399 
400 private:
401  /// A set of all erased ops.
402  DenseSet<Operation *> &erasedOps;
403 
404  /// A set of all to_memref ops.
405  DenseSet<Operation *> &toMemrefOps;
406 
407  /// The worklist of ops to be bufferized.
408  SmallVector<Operation *> &worklist;
409 
410  /// The analysis state. Used for debug assertions and access to the
411  /// bufferization options.
412  const AnalysisState analysisState;
413 
414  /// Bufferization statistics for debugging.
415  BufferizationStatistics *statistics;
416 };
417 } // namespace
418 
421  BufferizationStatistics *statistics) {
422  if (options.copyBeforeWrite) {
423  AnalysisState state(options);
424  if (failed(insertTensorCopies(op, state)))
425  return failure();
426  }
427 
428  // Keep track of to_memref ops.
429  DenseSet<Operation *> toMemrefOps;
430  op->walk([&](ToMemrefOp toMemrefOp) { toMemrefOps.insert(toMemrefOp); });
431 
432  // Gather all bufferizable ops in top-to-bottom order.
433  //
434  // We should ideally know the exact memref type of all operands when
435  // bufferizing an op. (This is the case when bufferizing top-to-bottom.)
436  // Otherwise, we have to use a memref type with a fully dynamic layout map to
437  // avoid copies. We are currently missing patterns for layout maps to
438  // canonicalize away (or canonicalize to more precise layouts).
439  SmallVector<Operation *> worklist;
440  op->walk<WalkOrder::PostOrder>([&](Operation *op) {
441  if (options.isOpAllowed(op) && hasTensorSemantics(op))
442  worklist.push_back(op);
443  });
444 
445  // Keep track of all erased ops.
446  DenseSet<Operation *> erasedOps;
447 
448  // Bufferize all ops.
449  BufferizationRewriter rewriter(op->getContext(), erasedOps, toMemrefOps,
450  worklist, options, statistics);
451  for (unsigned i = 0; i < worklist.size(); ++i) {
452  Operation *nextOp = worklist[i];
453  // Skip ops that were erased.
454  if (erasedOps.contains(nextOp))
455  continue;
456  // Skip ops that are not bufferizable or not allowed.
457  auto bufferizableOp = options.dynCastBufferizableOp(nextOp);
458  if (!bufferizableOp)
459  continue;
460  // Skip ops that no longer have tensor semantics.
461  if (!hasTensorSemantics(nextOp))
462  continue;
463  // Check for unsupported unstructured control flow.
464  if (!bufferizableOp.supportsUnstructuredControlFlow())
465  for (Region &r : nextOp->getRegions())
466  if (r.getBlocks().size() > 1)
467  return nextOp->emitOpError(
468  "op or BufferizableOpInterface implementation does not support "
469  "unstructured control flow, but at least one region has multiple "
470  "blocks");
471 
472  // Bufferize the op.
473  LLVM_DEBUG(llvm::dbgs()
474  << "//===-------------------------------------------===//\n"
475  << "IR after bufferizing: " << nextOp->getName() << "\n");
476  rewriter.setInsertionPoint(nextOp);
477  if (failed(bufferizableOp.bufferize(rewriter, options))) {
478  LLVM_DEBUG(llvm::dbgs()
479  << "failed to bufferize\n"
480  << "//===-------------------------------------------===//\n");
481  return nextOp->emitError("failed to bufferize op");
482  }
483  LLVM_DEBUG(llvm::dbgs()
484  << *op
485  << "\n//===-------------------------------------------===//\n");
486  }
487 
488  // Return early if the top-level op is entirely gone.
489  if (erasedOps.contains(op))
490  return success();
491 
492  // Fold all to_memref(to_tensor(x)) pairs.
493  for (Operation *op : toMemrefOps) {
494  rewriter.setInsertionPoint(op);
496  rewriter, cast<ToMemrefOp>(op), options);
497  }
498 
499  // Remove all dead to_tensor ops.
500  op->walk<WalkOrder::PostOrder>([&](ToTensorOp toTensorOp) {
501  if (toTensorOp->getUses().empty()) {
502  rewriter.eraseOp(toTensorOp);
503  return WalkResult::skip();
504  }
505  return WalkResult::advance();
506  });
507 
508  /// Check the result of bufferization. Return an error if an op was not
509  /// bufferized, unless partial bufferization is allowed.
510  if (options.allowUnknownOps)
511  return success();
512 
513  for (Operation *op : worklist) {
514  // Skip ops that are entirely gone.
515  if (erasedOps.contains(op))
516  continue;
517  // Ops that no longer have tensor semantics (because they were updated
518  // in-place) are allowed.
519  if (!hasTensorSemantics(op))
520  continue;
521  // Continue ops that are not allowed.
522  if (!options.isOpAllowed(op))
523  continue;
524  // Ops without any uses and no side effects will fold away.
525  if (op->getUses().empty() && isMemoryEffectFree(op))
526  continue;
527  // ToTensorOps/ToMemrefOps are allowed in the output.
528  if (isa<ToTensorOp, ToMemrefOp>(op))
529  continue;
530  return op->emitError("op was not bufferized");
531  }
532 
533  return success();
534 }
535 
536 LogicalResult
538  const BufferizationOptions &options) {
539  OpBuilder::InsertionGuard g(rewriter);
540  auto bufferizableOp = options.dynCastBufferizableOp(block->getParentOp());
541  if (!bufferizableOp)
542  return failure();
543 
544  // Compute the new signature.
545  SmallVector<Type> newTypes;
546  for (BlockArgument &bbArg : block->getArguments()) {
547  auto tensorType = dyn_cast<TensorType>(bbArg.getType());
548  if (!tensorType) {
549  newTypes.push_back(bbArg.getType());
550  continue;
551  }
552 
553  FailureOr<BaseMemRefType> memrefType =
555  if (failed(memrefType))
556  return failure();
557  newTypes.push_back(*memrefType);
558  }
559 
560  // Change the type of all block arguments.
561  for (auto [bbArg, type] : llvm::zip(block->getArguments(), newTypes)) {
562  if (bbArg.getType() == type)
563  continue;
564 
565  // Collect all uses of the bbArg.
566  SmallVector<OpOperand *> bbArgUses;
567  for (OpOperand &use : bbArg.getUses())
568  bbArgUses.push_back(&use);
569 
570  // Change the bbArg type to memref.
571  bbArg.setType(type);
572 
573  // Replace all uses of the original tensor bbArg.
574  rewriter.setInsertionPointToStart(block);
575  if (!bbArgUses.empty()) {
576  Value toTensorOp =
577  rewriter.create<bufferization::ToTensorOp>(bbArg.getLoc(), bbArg);
578  for (OpOperand *use : bbArgUses)
579  use->set(toTensorOp);
580  }
581  }
582 
583  // Bufferize callers of the block.
584  for (Operation *op : block->getUsers()) {
585  auto branchOp = dyn_cast<BranchOpInterface>(op);
586  if (!branchOp)
587  return op->emitOpError("cannot bufferize ops with block references that "
588  "do not implement BranchOpInterface");
589 
590  auto it = llvm::find(op->getSuccessors(), block);
591  assert(it != op->getSuccessors().end() && "could find successor");
592  int64_t successorIdx = std::distance(op->getSuccessors().begin(), it);
593 
594  SuccessorOperands operands = branchOp.getSuccessorOperands(successorIdx);
595  SmallVector<Value> newOperands;
596  for (auto [operand, type] :
597  llvm::zip(operands.getForwardedOperands(), newTypes)) {
598  if (operand.getType() == type) {
599  // Not a tensor type. Nothing to do for this operand.
600  newOperands.push_back(operand);
601  continue;
602  }
603  FailureOr<BaseMemRefType> operandBufferType =
605  if (failed(operandBufferType))
606  return failure();
607  rewriter.setInsertionPointAfterValue(operand);
608  Value bufferizedOperand = rewriter.create<bufferization::ToMemrefOp>(
609  operand.getLoc(), *operandBufferType, operand);
610  // A cast is needed if the operand and the block argument have different
611  // bufferized types.
612  if (type != *operandBufferType)
613  bufferizedOperand = rewriter.create<memref::CastOp>(
614  operand.getLoc(), type, bufferizedOperand);
615  newOperands.push_back(bufferizedOperand);
616  }
617  operands.getMutableForwardedOperands().assign(newOperands);
618  }
619 
620  return success();
621 }
622 
625  options.allowUnknownOps = true;
626  options.copyBeforeWrite = true;
627  options.enforceAliasingInvariants = false;
628  options.unknownTypeConverterFn = [](Value value, Attribute memorySpace,
629  const BufferizationOptions &options) {
631  cast<TensorType>(value.getType()), memorySpace);
632  };
633  options.opFilter.allowDialect<BufferizationDialect>();
634  return options;
635 }
static Value materializeToTensor(OpBuilder &builder, TensorType type, ValueRange inputs, Location loc)
Definition: Bufferize.cpp:45
static MLIRContext * getContext(OpFoldResult val)
static llvm::ManagedStatic< PassManagerOptions > options
Base class for generic analysis states.
Attributes are known-constant values of operations.
Definition: Attributes.h:25
This class provides a shared interface for ranked and unranked memref types.
Definition: BuiltinTypes.h:144
This class represents an argument of a Block.
Definition: Value.h:319
Block represents an ordered list of Operations.
Definition: Block.h:31
BlockArgListType getArguments()
Definition: Block.h:85
Operation * getParentOp()
Returns the closest surrounding operation that contains this block.
Definition: Block.cpp:30
This class implements a pattern rewriter for use with ConversionPatterns.
void replaceOp(Operation *op, ValueRange newValues) override
PatternRewriter hook for replacing an operation.
This class describes a specific conversion target.
void addLegalOp(OperationName op)
Register the given operations as legal.
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
StringRef getNamespace() const
Definition: Dialect.h:54
user_range getUsers() const
Returns a range of all users.
Definition: UseDefLists.h:274
This class coordinates rewriting a piece of IR outside of a pattern rewrite, providing a way to keep ...
Definition: PatternMatch.h:766
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:63
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
void assign(ValueRange values)
Assign this range to the given values.
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:351
This class helps build Operations.
Definition: Builders.h:210
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:434
void setInsertionPointAfterValue(Value val)
Sets the insertion point to the node after the specified value.
Definition: Builders.h:424
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:468
OpConversionPattern is a wrapper around ConversionPattern that allows for matching and rewriting agai...
OpConversionPattern(MLIRContext *context, PatternBenefit benefit=1)
This class represents an operand of an operation.
Definition: Value.h:267
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Definition: Operation.h:220
std::enable_if_t< llvm::function_traits< std::decay_t< FnT > >::num_args==1, RetT > walk(FnT &&callback)
Walk the operation by calling the callback for each nested operation (including this one),...
Definition: Operation.h:793
MLIRContext * getContext()
Return the context this operation is associated with.
Definition: Operation.h:216
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:268
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
Definition: Operation.h:672
OperationName getName()
The name of an operation is the key identifier for it.
Definition: Operation.h:119
SuccessorRange getSuccessors()
Definition: Operation.h:699
use_range getUses()
Returns a range of all uses, which is useful for iterating over all uses.
Definition: Operation.h:842
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers.
Definition: Operation.cpp:671
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition: Region.h:26
MLIRContext * getContext() const
Definition: PatternMatch.h:823
RewritePatternSet & add(ConstructorArg &&arg, ConstructorArgs &&...args)
Add an instance of each of the pattern types 'Ts' to the pattern list with the given arguments.
Definition: PatternMatch.h:847
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:400
This class models how operands are forwarded to block arguments in control flow.
MutableOperandRange getMutableForwardedOperands() const
Get the range of operands that are simply forwarded to the successor.
OperandRange getForwardedOperands() const
Get the range of operands that are simply forwarded to the successor.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Definition: BuiltinTypes.h:97
void addConversion(FnT &&callback)
Register a conversion function.
bool isLegal(Type type) const
Return true if the given type is legal for this type converter, i.e.
void addArgumentMaterialization(FnT &&callback)
All of the following materializations require function objects that are convertible to the following ...
void addSourceMaterialization(FnT &&callback)
This method registers a materialization that will be called when converting a legal replacement value...
void addTargetMaterialization(FnT &&callback)
This method registers a materialization that will be called when converting an illegal (source) value...
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:381
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:129
static WalkResult skip()
Definition: Visitors.h:52
static WalkResult advance()
Definition: Visitors.h:51
AnalysisState provides a variety of helper functions for dealing with tensor values.
A helper type converter class that automatically populates the relevant materializations and type con...
Definition: Bufferize.h:43
BufferizeTypeConverter()
Registers conversions into BufferizeTypeConverter.
Definition: Bufferize.cpp:53
void allowOperation()
Allow the given ops.
LogicalResult runOneShotBufferize(Operation *op, const OneShotBufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Run One-Shot Bufferize on the given op: Analysis + Bufferization.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
FailureOr< Value > castOrReallocMemRefValue(OpBuilder &b, Value value, MemRefType type, const BufferizationOptions &options)
Try to cast the given ranked MemRef-typed value to the given ranked MemRef type.
void populateEliminateBufferizeMaterializationsPatterns(BufferizeTypeConverter &typeConverter, RewritePatternSet &patterns)
Populate patterns to eliminate bufferize materializations.
Definition: Bufferize.cpp:132
llvm::LogicalResult runOneShotModuleBufferize(ModuleOp moduleOp, const bufferization::OneShotBufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Run One-Shot Module Bufferization on the given module.
void populateBufferizeMaterializationLegality(ConversionTarget &target)
Marks ops used by bufferization for type conversion materializations as "legal" in the given Conversi...
Definition: Bufferize.cpp:95
LogicalResult bufferizeOp(Operation *op, const BufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Bufferize op and its nested ops that implement BufferizableOpInterface.
Definition: Bufferize.cpp:419
LogicalResult insertTensorCopies(Operation *op, const OneShotBufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Resolve RaW and other conflicts by inserting bufferization.alloc_tensor ops.
std::unique_ptr< OperationPass< func::FuncOp > > createFinalizingBufferizePass()
Creates a pass that finalizes a partial bufferization by removing remaining bufferization....
Definition: Bufferize.cpp:334
LogicalResult bufferizeBlockSignature(Block *block, RewriterBase &rewriter, const BufferizationOptions &options)
Bufferize the signature of block and its callers (i.e., ops that have the given block as a successor)...
Definition: Bufferize.cpp:537
std::unique_ptr< Pass > createOneShotBufferizePass()
Create a pass that bufferizes all ops that implement BufferizableOpInterface with One-Shot Bufferize.
Definition: Bufferize.cpp:324
LogicalResult foldToMemrefToTensorPair(RewriterBase &rewriter, ToMemrefOp toMemref, const BufferizationOptions &options)
Try to fold to_memref(to_tensor(x)).
FailureOr< BaseMemRefType > getBufferType(Value value, const BufferizationOptions &options)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
BufferizationOptions getPartialBufferizationOptions()
Return BufferizationOptions such that the bufferizeOp behaves like the old (deprecated) partial,...
Definition: Bufferize.cpp:623
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
bool hasTensorSemantics(Operation *op)
Return "true" if the given op has tensor semantics and should be bufferized.
Include the generated interface declarations.
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
Definition: Utils.cpp:305
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
LogicalResult applyFullConversion(ArrayRef< Operation * > ops, const ConversionTarget &target, const FrozenRewritePatternSet &patterns, ConversionConfig config=ConversionConfig())
Apply a complete conversion on the given operations, and all nested operations.
bool isMemoryEffectFree(Operation *op)
Returns true if the given operation is free of memory effects.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
The following effect indicates that the operation allocates from some resource.
Options for BufferizableOpInterface-based bufferization.
bool copyBeforeWrite
If set to true, the analysis is skipped.
void setFunctionBoundaryTypeConversion(LayoutMapOption layoutMapOption)
This function controls buffer types on function signatures.
bool allowUnknownOps
Specifies whether not bufferizable ops are allowed in the input.
bool printConflicts
If set to true, the IR is annotated with details about RaW conflicts.
bool testAnalysisOnly
If set to true, does not modify the IR apart from adding attributes (for checking the results of the ...
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
UnknownTypeConverterFn unknownTypeConverterFn
Type converter from tensors to memrefs.
bool bufferizeFunctionBoundaries
Specifies whether function boundaries (ops in the func dialect) should be bufferized or not.
Bufferization statistics for debugging.
Definition: Bufferize.h:34
Options for analysis-enabled bufferization.
unsigned analysisFuzzerSeed
Seed for the analysis fuzzer.
bool dumpAliasSets
Specifies whether the tensor IR should be annotated with alias sets.
bool allowReturnAllocsFromLoops
Specifies whether returning newly allocated memrefs from loops should be allowed.
AnalysisHeuristic analysisHeuristic
The heuristic controls the order in which ops are traversed during the analysis.
llvm::ArrayRef< std::string > noAnalysisFuncFilter
Specify the functions that should not be analyzed.
std::function< bool(Operation *)> FilterFn
If the filter function evaluates to true, the filter matches.