MLIR  20.0.0git
Bufferize.cpp
Go to the documentation of this file.
1 //===- Bufferize.cpp - Bufferization utilities ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 
19 #include "mlir/IR/Diagnostics.h"
20 #include "mlir/IR/Operation.h"
23 #include "mlir/Pass/PassManager.h"
24 #include "mlir/Transforms/Passes.h"
25 #include <optional>
26 
27 namespace mlir {
28 namespace bufferization {
29 #define GEN_PASS_DEF_FINALIZINGBUFFERIZE
30 #define GEN_PASS_DEF_BUFFERIZATIONBUFFERIZE
31 #define GEN_PASS_DEF_ONESHOTBUFFERIZE
32 #include "mlir/Dialect/Bufferization/Transforms/Passes.h.inc"
33 } // namespace bufferization
34 } // namespace mlir
35 
36 #define DEBUG_TYPE "bufferize"
37 
38 using namespace mlir;
39 using namespace mlir::bufferization;
40 
41 //===----------------------------------------------------------------------===//
42 // BufferizeTypeConverter
43 //===----------------------------------------------------------------------===//
44 
46  ValueRange inputs, Location loc) {
47  assert(inputs.size() == 1);
48  assert(isa<BaseMemRefType>(inputs[0].getType()));
49  return builder.create<bufferization::ToTensorOp>(loc, type, inputs[0]);
50 }
51 
52 /// Registers conversions into BufferizeTypeConverter
54  // Keep all types unchanged.
55  addConversion([](Type type) { return type; });
56  // Convert RankedTensorType to MemRefType.
57  addConversion([](RankedTensorType type) -> Type {
58  return MemRefType::get(type.getShape(), type.getElementType());
59  });
60  // Convert UnrankedTensorType to UnrankedMemRefType.
61  addConversion([](UnrankedTensorType type) -> Type {
62  return UnrankedMemRefType::get(type.getElementType(), 0);
63  });
67  ValueRange inputs, Location loc) -> Value {
68  assert(inputs.size() == 1 && "expected exactly one input");
69 
70  if (auto inputType = dyn_cast<MemRefType>(inputs[0].getType())) {
71  // MemRef to MemRef cast.
72  assert(inputType != type && "expected different types");
73  // Unranked to ranked and ranked to unranked casts must be explicit.
74  auto rankedDestType = dyn_cast<MemRefType>(type);
75  if (!rankedDestType)
76  return nullptr;
78  options.bufferAlignment = 0;
79  FailureOr<Value> replacement =
80  castOrReallocMemRefValue(builder, inputs[0], rankedDestType, options);
81  if (failed(replacement))
82  return nullptr;
83  return *replacement;
84  }
85 
86  if (isa<TensorType>(inputs[0].getType())) {
87  // Tensor to MemRef cast.
88  return builder.create<bufferization::ToMemrefOp>(loc, type, inputs[0]);
89  }
90 
91  llvm_unreachable("only tensor/memref input types supported");
92  });
93 }
94 
96  ConversionTarget &target) {
97  target.addLegalOp<bufferization::ToTensorOp, bufferization::ToMemrefOp>();
98 }
99 
100 namespace {
101 // In a finalizing bufferize conversion, we know that all tensors have been
102 // converted to memrefs, thus, this op becomes an identity.
103 class BufferizeToTensorOp
104  : public OpConversionPattern<bufferization::ToTensorOp> {
105 public:
107  LogicalResult
108  matchAndRewrite(bufferization::ToTensorOp op, OpAdaptor adaptor,
109  ConversionPatternRewriter &rewriter) const override {
110  rewriter.replaceOp(op, adaptor.getMemref());
111  return success();
112  }
113 };
114 } // namespace
115 
116 namespace {
117 // In a finalizing bufferize conversion, we know that all tensors have been
118 // converted to memrefs, thus, this op becomes an identity.
119 class BufferizeToMemrefOp
120  : public OpConversionPattern<bufferization::ToMemrefOp> {
121 public:
123  LogicalResult
124  matchAndRewrite(bufferization::ToMemrefOp op, OpAdaptor adaptor,
125  ConversionPatternRewriter &rewriter) const override {
126  rewriter.replaceOp(op, adaptor.getTensor());
127  return success();
128  }
129 };
130 } // namespace
131 
133  const BufferizeTypeConverter &typeConverter, RewritePatternSet &patterns) {
134  patterns.add<BufferizeToTensorOp, BufferizeToMemrefOp>(typeConverter,
135  patterns.getContext());
136 }
137 
138 namespace {
139 struct FinalizingBufferizePass
140  : public bufferization::impl::FinalizingBufferizeBase<
141  FinalizingBufferizePass> {
142  using FinalizingBufferizeBase<
143  FinalizingBufferizePass>::FinalizingBufferizeBase;
144 
145  void runOnOperation() override {
146  auto func = getOperation();
147  auto *context = &getContext();
148 
149  BufferizeTypeConverter typeConverter;
150  RewritePatternSet patterns(context);
151  ConversionTarget target(*context);
152 
154 
155  // If all result types are legal, and all block arguments are legal (ensured
156  // by func conversion above), then all types in the program are legal.
157  //
158  // We also check that the operand types are legal to avoid creating invalid
159  // IR. For example, this prevents
160  // populateEliminateBufferizeMaterializationsPatterns from updating the
161  // types of the operands to a return op without updating the enclosing
162  // function.
163  target.markUnknownOpDynamicallyLegal(
164  [&](Operation *op) { return typeConverter.isLegal(op); });
165 
166  if (failed(applyFullConversion(func, target, std::move(patterns))))
167  signalPassFailure();
168  }
169 };
170 
171 static LayoutMapOption parseLayoutMapOption(const std::string &s) {
172  if (s == "fully-dynamic-layout-map")
173  return LayoutMapOption::FullyDynamicLayoutMap;
174  if (s == "identity-layout-map")
175  return LayoutMapOption::IdentityLayoutMap;
176  if (s == "infer-layout-map")
177  return LayoutMapOption::InferLayoutMap;
178  llvm_unreachable("invalid layout map option");
179 }
180 
182 parseHeuristicOption(const std::string &s) {
183  if (s == "bottom-up")
185  if (s == "top-down")
187  if (s == "bottom-up-from-terminators")
190  if (s == "fuzzer")
192  llvm_unreachable("invalid analysisheuristic option");
193 }
194 
195 struct OneShotBufferizePass
196  : public bufferization::impl::OneShotBufferizeBase<OneShotBufferizePass> {
197  OneShotBufferizePass() = default;
198 
199  explicit OneShotBufferizePass(const OneShotBufferizationOptions &options)
200  : options(options) {}
201 
202  void getDependentDialects(DialectRegistry &registry) const override {
203  registry
204  .insert<bufferization::BufferizationDialect, memref::MemRefDialect>();
205  }
206 
207  void runOnOperation() override {
209  if (!options) {
210  // Make new bufferization options if none were provided when creating the
211  // pass.
212  opt.allowReturnAllocsFromLoops = allowReturnAllocsFromLoops;
213  opt.allowUnknownOps = allowUnknownOps;
214  opt.analysisFuzzerSeed = analysisFuzzerSeed;
215  opt.analysisHeuristic = parseHeuristicOption(analysisHeuristic);
216  opt.copyBeforeWrite = copyBeforeWrite;
217  opt.dumpAliasSets = dumpAliasSets;
219  parseLayoutMapOption(functionBoundaryTypeConversion));
220  if (mustInferMemorySpace) {
222  [](TensorType t) -> std::optional<Attribute> {
223  return std::nullopt;
224  };
225  }
226  opt.printConflicts = printConflicts;
227  opt.bufferAlignment = bufferAlignment;
228  opt.testAnalysisOnly = testAnalysisOnly;
229  opt.bufferizeFunctionBoundaries = bufferizeFunctionBoundaries;
230  opt.checkParallelRegions = checkParallelRegions;
231  opt.noAnalysisFuncFilter = noAnalysisFuncFilter;
232 
233  // Configure type converter.
234  LayoutMapOption unknownTypeConversionOption =
235  parseLayoutMapOption(unknownTypeConversion);
236  if (unknownTypeConversionOption == LayoutMapOption::InferLayoutMap) {
238  "Invalid option: 'infer-layout-map' is not a valid value for "
239  "'unknown-type-conversion'");
240  return signalPassFailure();
241  }
242  opt.unknownTypeConverterFn = [=](Value value, Attribute memorySpace,
243  const BufferizationOptions &options) {
244  auto tensorType = cast<TensorType>(value.getType());
245  if (unknownTypeConversionOption == LayoutMapOption::IdentityLayoutMap)
247  tensorType, memorySpace);
248  assert(unknownTypeConversionOption ==
249  LayoutMapOption::FullyDynamicLayoutMap &&
250  "invalid layout map option");
252  memorySpace);
253  };
254 
255  // Configure op filter.
256  OpFilter::Entry::FilterFn filterFn = [&](Operation *op) {
257  // Filter may be specified via options.
258  if (this->dialectFilter.hasValue())
259  return llvm::is_contained(this->dialectFilter,
260  op->getDialect()->getNamespace());
261  // No filter specified: All other ops are allowed.
262  return true;
263  };
264  opt.opFilter.allowOperation(filterFn);
265  } else {
266  opt = *options;
267  }
268 
269  if (opt.copyBeforeWrite && opt.testAnalysisOnly) {
270  // These two flags do not make sense together: "copy-before-write"
271  // indicates that copies should be inserted before every memory write,
272  // but "test-analysis-only" indicates that only the analysis should be
273  // tested. (I.e., no IR is bufferized.)
275  "Invalid option: 'copy-before-write' cannot be used with "
276  "'test-analysis-only'");
277  return signalPassFailure();
278  }
279 
280  if (opt.printConflicts && !opt.testAnalysisOnly) {
281  emitError(
283  "Invalid option: 'print-conflicts' requires 'test-analysis-only'");
284  return signalPassFailure();
285  }
286 
287  if (opt.dumpAliasSets && !opt.testAnalysisOnly) {
288  emitError(
290  "Invalid option: 'dump-alias-sets' requires 'test-analysis-only'");
291  return signalPassFailure();
292  }
293 
294  BufferizationStatistics statistics;
295  ModuleOp moduleOp = getOperation();
296  if (opt.bufferizeFunctionBoundaries) {
297  if (failed(runOneShotModuleBufferize(moduleOp, opt, &statistics))) {
298  signalPassFailure();
299  return;
300  }
301  } else {
302  if (!opt.noAnalysisFuncFilter.empty()) {
304  "Invalid option: 'no-analysis-func-filter' requires "
305  "'bufferize-function-boundaries'");
306  return signalPassFailure();
307  }
308  if (failed(runOneShotBufferize(moduleOp, opt, &statistics))) {
309  signalPassFailure();
310  return;
311  }
312  }
313 
314  // Set pass statistics.
315  this->numBufferAlloc = statistics.numBufferAlloc;
316  this->numTensorInPlace = statistics.numTensorInPlace;
317  this->numTensorOutOfPlace = statistics.numTensorOutOfPlace;
318  }
319 
320 private:
321  std::optional<OneShotBufferizationOptions> options;
322 };
323 } // namespace
324 
326  return std::make_unique<OneShotBufferizePass>();
327 }
328 
331  return std::make_unique<OneShotBufferizePass>(options);
332 }
333 
334 std::unique_ptr<OperationPass<func::FuncOp>>
336  return std::make_unique<FinalizingBufferizePass>();
337 }
338 
339 //===----------------------------------------------------------------------===//
340 // BufferizableOpInterface-based Bufferization
341 //===----------------------------------------------------------------------===//
342 
343 namespace {
344 /// A rewriter that keeps track of extra information during bufferization.
345 class BufferizationRewriter : public IRRewriter, public RewriterBase::Listener {
346 public:
347  BufferizationRewriter(MLIRContext *ctx, DenseSet<Operation *> &erasedOps,
348  DenseSet<Operation *> &toMemrefOps,
349  SmallVector<Operation *> &worklist,
351  BufferizationStatistics *statistics)
352  : IRRewriter(ctx), erasedOps(erasedOps), toMemrefOps(toMemrefOps),
353  worklist(worklist), analysisState(options), statistics(statistics) {
354  setListener(this);
355  }
356 
357 protected:
358  void notifyOperationErased(Operation *op) override {
359  erasedOps.insert(op);
360  // Erase if present.
361  toMemrefOps.erase(op);
362  }
363 
364  void notifyOperationInserted(Operation *op, InsertPoint previous) override {
365  // We only care about newly created ops.
366  if (previous.isSet())
367  return;
368 
369  erasedOps.erase(op);
370 
371  // Gather statistics about allocs.
372  if (statistics) {
373  if (auto sideEffectingOp = dyn_cast<MemoryEffectOpInterface>(op))
374  statistics->numBufferAlloc += static_cast<int64_t>(
375  sideEffectingOp.hasEffect<MemoryEffects::Allocate>());
376  }
377 
378  // Keep track of to_memref ops.
379  if (isa<ToMemrefOp>(op)) {
380  toMemrefOps.insert(op);
381  return;
382  }
383 
384  // Skip to_tensor ops.
385  if (isa<ToTensorOp>(op))
386  return;
387 
388  // Skip non-tensor ops.
389  if (!hasTensorSemantics(op))
390  return;
391 
392  // Skip ops that are not allowed to be bufferized.
393  auto const &options = analysisState.getOptions();
394  if (!options.isOpAllowed(op))
395  return;
396 
397  // Add op to worklist.
398  worklist.push_back(op);
399  }
400 
401 private:
402  /// A set of all erased ops.
403  DenseSet<Operation *> &erasedOps;
404 
405  /// A set of all to_memref ops.
406  DenseSet<Operation *> &toMemrefOps;
407 
408  /// The worklist of ops to be bufferized.
409  SmallVector<Operation *> &worklist;
410 
411  /// The analysis state. Used for debug assertions and access to the
412  /// bufferization options.
413  const AnalysisState analysisState;
414 
415  /// Bufferization statistics for debugging.
416  BufferizationStatistics *statistics;
417 };
418 } // namespace
419 
422  BufferizationStatistics *statistics) {
423  if (options.copyBeforeWrite) {
424  AnalysisState state(options);
425  if (failed(insertTensorCopies(op, state)))
426  return failure();
427  }
428 
429  // Keep track of to_memref ops.
430  DenseSet<Operation *> toMemrefOps;
431  op->walk([&](ToMemrefOp toMemrefOp) { toMemrefOps.insert(toMemrefOp); });
432 
433  // Gather all bufferizable ops in top-to-bottom order.
434  //
435  // We should ideally know the exact memref type of all operands when
436  // bufferizing an op. (This is the case when bufferizing top-to-bottom.)
437  // Otherwise, we have to use a memref type with a fully dynamic layout map to
438  // avoid copies. We are currently missing patterns for layout maps to
439  // canonicalize away (or canonicalize to more precise layouts).
440  SmallVector<Operation *> worklist;
441  op->walk<WalkOrder::PostOrder>([&](Operation *op) {
442  if (options.isOpAllowed(op) && hasTensorSemantics(op))
443  worklist.push_back(op);
444  });
445 
446  // Keep track of all erased ops.
447  DenseSet<Operation *> erasedOps;
448 
449  // Bufferize all ops.
450  BufferizationRewriter rewriter(op->getContext(), erasedOps, toMemrefOps,
451  worklist, options, statistics);
452  for (unsigned i = 0; i < worklist.size(); ++i) {
453  Operation *nextOp = worklist[i];
454  // Skip ops that were erased.
455  if (erasedOps.contains(nextOp))
456  continue;
457  // Skip ops that are not bufferizable or not allowed.
458  auto bufferizableOp = options.dynCastBufferizableOp(nextOp);
459  if (!bufferizableOp)
460  continue;
461  // Skip ops that no longer have tensor semantics.
462  if (!hasTensorSemantics(nextOp))
463  continue;
464  // Check for unsupported unstructured control flow.
465  if (!bufferizableOp.supportsUnstructuredControlFlow())
466  for (Region &r : nextOp->getRegions())
467  if (r.getBlocks().size() > 1)
468  return nextOp->emitOpError(
469  "op or BufferizableOpInterface implementation does not support "
470  "unstructured control flow, but at least one region has multiple "
471  "blocks");
472 
473  // Bufferize the op.
474  LLVM_DEBUG(llvm::dbgs()
475  << "//===-------------------------------------------===//\n"
476  << "IR after bufferizing: " << nextOp->getName() << "\n");
477  rewriter.setInsertionPoint(nextOp);
478  if (failed(bufferizableOp.bufferize(rewriter, options))) {
479  LLVM_DEBUG(llvm::dbgs()
480  << "failed to bufferize\n"
481  << "//===-------------------------------------------===//\n");
482  return nextOp->emitError("failed to bufferize op");
483  }
484  LLVM_DEBUG(llvm::dbgs()
485  << *op
486  << "\n//===-------------------------------------------===//\n");
487  }
488 
489  // Return early if the top-level op is entirely gone.
490  if (erasedOps.contains(op))
491  return success();
492 
493  // Fold all to_memref(to_tensor(x)) pairs.
494  for (Operation *op : toMemrefOps) {
495  rewriter.setInsertionPoint(op);
497  rewriter, cast<ToMemrefOp>(op), options);
498  }
499 
500  // Remove all dead to_tensor ops.
501  op->walk<WalkOrder::PostOrder>([&](ToTensorOp toTensorOp) {
502  if (toTensorOp->getUses().empty()) {
503  rewriter.eraseOp(toTensorOp);
504  return WalkResult::skip();
505  }
506  return WalkResult::advance();
507  });
508 
509  /// Check the result of bufferization. Return an error if an op was not
510  /// bufferized, unless partial bufferization is allowed.
511  if (options.allowUnknownOps)
512  return success();
513 
514  for (Operation *op : worklist) {
515  // Skip ops that are entirely gone.
516  if (erasedOps.contains(op))
517  continue;
518  // Ops that no longer have tensor semantics (because they were updated
519  // in-place) are allowed.
520  if (!hasTensorSemantics(op))
521  continue;
522  // Continue ops that are not allowed.
523  if (!options.isOpAllowed(op))
524  continue;
525  // Ops without any uses and no side effects will fold away.
526  if (op->getUses().empty() && isMemoryEffectFree(op))
527  continue;
528  // ToTensorOps/ToMemrefOps are allowed in the output.
529  if (isa<ToTensorOp, ToMemrefOp>(op))
530  continue;
531  return op->emitError("op was not bufferized");
532  }
533 
534  return success();
535 }
536 
537 LogicalResult
539  const BufferizationOptions &options) {
540  OpBuilder::InsertionGuard g(rewriter);
541  auto bufferizableOp = options.dynCastBufferizableOp(block->getParentOp());
542  if (!bufferizableOp)
543  return failure();
544 
545  // Compute the new signature.
546  SmallVector<Type> newTypes;
547  for (BlockArgument &bbArg : block->getArguments()) {
548  auto tensorType = dyn_cast<TensorType>(bbArg.getType());
549  if (!tensorType) {
550  newTypes.push_back(bbArg.getType());
551  continue;
552  }
553 
554  FailureOr<BaseMemRefType> memrefType =
556  if (failed(memrefType))
557  return failure();
558  newTypes.push_back(*memrefType);
559  }
560 
561  // Change the type of all block arguments.
562  for (auto [bbArg, type] : llvm::zip(block->getArguments(), newTypes)) {
563  if (bbArg.getType() == type)
564  continue;
565 
566  // Collect all uses of the bbArg.
567  SmallVector<OpOperand *> bbArgUses;
568  for (OpOperand &use : bbArg.getUses())
569  bbArgUses.push_back(&use);
570 
571  // Change the bbArg type to memref.
572  bbArg.setType(type);
573 
574  // Replace all uses of the original tensor bbArg.
575  rewriter.setInsertionPointToStart(block);
576  if (!bbArgUses.empty()) {
577  Value toTensorOp =
578  rewriter.create<bufferization::ToTensorOp>(bbArg.getLoc(), bbArg);
579  for (OpOperand *use : bbArgUses)
580  use->set(toTensorOp);
581  }
582  }
583 
584  // Bufferize callers of the block.
585  for (Operation *op : block->getUsers()) {
586  auto branchOp = dyn_cast<BranchOpInterface>(op);
587  if (!branchOp)
588  return op->emitOpError("cannot bufferize ops with block references that "
589  "do not implement BranchOpInterface");
590 
591  auto it = llvm::find(op->getSuccessors(), block);
592  assert(it != op->getSuccessors().end() && "could find successor");
593  int64_t successorIdx = std::distance(op->getSuccessors().begin(), it);
594 
595  SuccessorOperands operands = branchOp.getSuccessorOperands(successorIdx);
596  SmallVector<Value> newOperands;
597  for (auto [operand, type] :
598  llvm::zip(operands.getForwardedOperands(), newTypes)) {
599  if (operand.getType() == type) {
600  // Not a tensor type. Nothing to do for this operand.
601  newOperands.push_back(operand);
602  continue;
603  }
604  FailureOr<BaseMemRefType> operandBufferType =
606  if (failed(operandBufferType))
607  return failure();
608  rewriter.setInsertionPointAfterValue(operand);
609  Value bufferizedOperand = rewriter.create<bufferization::ToMemrefOp>(
610  operand.getLoc(), *operandBufferType, operand);
611  // A cast is needed if the operand and the block argument have different
612  // bufferized types.
613  if (type != *operandBufferType)
614  bufferizedOperand = rewriter.create<memref::CastOp>(
615  operand.getLoc(), type, bufferizedOperand);
616  newOperands.push_back(bufferizedOperand);
617  }
618  operands.getMutableForwardedOperands().assign(newOperands);
619  }
620 
621  return success();
622 }
623 
626  options.allowUnknownOps = true;
627  options.copyBeforeWrite = true;
628  options.enforceAliasingInvariants = false;
629  options.unknownTypeConverterFn = [](Value value, Attribute memorySpace,
630  const BufferizationOptions &options) {
632  cast<TensorType>(value.getType()), memorySpace);
633  };
634  options.opFilter.allowDialect<BufferizationDialect>();
635  return options;
636 }
static Value materializeToTensor(OpBuilder &builder, TensorType type, ValueRange inputs, Location loc)
Definition: Bufferize.cpp:45
static MLIRContext * getContext(OpFoldResult val)
static llvm::ManagedStatic< PassManagerOptions > options
Base class for generic analysis states.
Attributes are known-constant values of operations.
Definition: Attributes.h:25
This class provides a shared interface for ranked and unranked memref types.
Definition: BuiltinTypes.h:149
This class represents an argument of a Block.
Definition: Value.h:319
Block represents an ordered list of Operations.
Definition: Block.h:33
BlockArgListType getArguments()
Definition: Block.h:87
Operation * getParentOp()
Returns the closest surrounding operation that contains this block.
Definition: Block.cpp:33
This class implements a pattern rewriter for use with ConversionPatterns.
void replaceOp(Operation *op, ValueRange newValues) override
Replace the given operation with the new values.
This class describes a specific conversion target.
void addLegalOp(OperationName op)
Register the given operations as legal.
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
StringRef getNamespace() const
Definition: Dialect.h:54
user_range getUsers() const
Returns a range of all users.
Definition: UseDefLists.h:274
This class coordinates rewriting a piece of IR outside of a pattern rewrite, providing a way to keep ...
Definition: PatternMatch.h:772
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:66
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
void assign(ValueRange values)
Assign this range to the given values.
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:356
This class helps build Operations.
Definition: Builders.h:215
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:439
void setInsertionPointAfterValue(Value val)
Sets the insertion point to the node after the specified value.
Definition: Builders.h:429
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:497
OpConversionPattern is a wrapper around ConversionPattern that allows for matching and rewriting agai...
OpConversionPattern(MLIRContext *context, PatternBenefit benefit=1)
This class represents an operand of an operation.
Definition: Value.h:267
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Definition: Operation.h:220
std::enable_if_t< llvm::function_traits< std::decay_t< FnT > >::num_args==1, RetT > walk(FnT &&callback)
Walk the operation by calling the callback for each nested operation (including this one),...
Definition: Operation.h:793
MLIRContext * getContext()
Return the context this operation is associated with.
Definition: Operation.h:216
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:268
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
Definition: Operation.h:672
OperationName getName()
The name of an operation is the key identifier for it.
Definition: Operation.h:119
SuccessorRange getSuccessors()
Definition: Operation.h:699
use_range getUses()
Returns a range of all uses, which is useful for iterating over all uses.
Definition: Operation.h:842
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers.
Definition: Operation.cpp:671
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition: Region.h:26
MLIRContext * getContext() const
Definition: PatternMatch.h:829
RewritePatternSet & add(ConstructorArg &&arg, ConstructorArgs &&...args)
Add an instance of each of the pattern types 'Ts' to the pattern list with the given arguments.
Definition: PatternMatch.h:853
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:400
This class models how operands are forwarded to block arguments in control flow.
MutableOperandRange getMutableForwardedOperands() const
Get the range of operands that are simply forwarded to the successor.
OperandRange getForwardedOperands() const
Get the range of operands that are simply forwarded to the successor.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Definition: BuiltinTypes.h:102
void addConversion(FnT &&callback)
Register a conversion function.
bool isLegal(Type type) const
Return true if the given type is legal for this type converter, i.e.
void addArgumentMaterialization(FnT &&callback)
All of the following materializations require function objects that are convertible to the following ...
void addSourceMaterialization(FnT &&callback)
This method registers a materialization that will be called when converting a legal replacement value...
void addTargetMaterialization(FnT &&callback)
This method registers a materialization that will be called when converting an illegal (source) value...
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:381
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:129
static WalkResult skip()
Definition: Visitors.h:52
static WalkResult advance()
Definition: Visitors.h:51
AnalysisState provides a variety of helper functions for dealing with tensor values.
A helper type converter class that automatically populates the relevant materializations and type con...
Definition: Bufferize.h:43
BufferizeTypeConverter()
Registers conversions into BufferizeTypeConverter.
Definition: Bufferize.cpp:53
void allowOperation()
Allow the given ops.
LogicalResult runOneShotBufferize(Operation *op, const OneShotBufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Run One-Shot Bufferize on the given op: Analysis + Bufferization.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
FailureOr< Value > castOrReallocMemRefValue(OpBuilder &b, Value value, MemRefType type, const BufferizationOptions &options)
Try to cast the given ranked MemRef-typed value to the given ranked MemRef type.
llvm::LogicalResult runOneShotModuleBufferize(ModuleOp moduleOp, const bufferization::OneShotBufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Run One-Shot Module Bufferization on the given module.
void populateBufferizeMaterializationLegality(ConversionTarget &target)
Marks ops used by bufferization for type conversion materializations as "legal" in the given Conversi...
Definition: Bufferize.cpp:95
LogicalResult bufferizeOp(Operation *op, const BufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Bufferize op and its nested ops that implement BufferizableOpInterface.
Definition: Bufferize.cpp:420
LogicalResult insertTensorCopies(Operation *op, const OneShotBufferizationOptions &options, BufferizationStatistics *statistics=nullptr)
Resolve RaW and other conflicts by inserting bufferization.alloc_tensor ops.
std::unique_ptr< OperationPass< func::FuncOp > > createFinalizingBufferizePass()
Creates a pass that finalizes a partial bufferization by removing remaining bufferization....
Definition: Bufferize.cpp:335
LogicalResult bufferizeBlockSignature(Block *block, RewriterBase &rewriter, const BufferizationOptions &options)
Bufferize the signature of block and its callers (i.e., ops that have the given block as a successor)...
Definition: Bufferize.cpp:538
std::unique_ptr< Pass > createOneShotBufferizePass()
Create a pass that bufferizes all ops that implement BufferizableOpInterface with One-Shot Bufferize.
Definition: Bufferize.cpp:325
LogicalResult foldToMemrefToTensorPair(RewriterBase &rewriter, ToMemrefOp toMemref, const BufferizationOptions &options)
Try to fold to_memref(to_tensor(x)).
FailureOr< BaseMemRefType > getBufferType(Value value, const BufferizationOptions &options)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
BufferizationOptions getPartialBufferizationOptions()
Return BufferizationOptions such that the bufferizeOp behaves like the old (deprecated) partial,...
Definition: Bufferize.cpp:624
void populateEliminateBufferizeMaterializationsPatterns(const BufferizeTypeConverter &typeConverter, RewritePatternSet &patterns)
Populate patterns to eliminate bufferize materializations.
Definition: Bufferize.cpp:132
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
bool hasTensorSemantics(Operation *op)
Return "true" if the given op has tensor semantics and should be bufferized.
Include the generated interface declarations.
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
Definition: Utils.cpp:305
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
LogicalResult applyFullConversion(ArrayRef< Operation * > ops, const ConversionTarget &target, const FrozenRewritePatternSet &patterns, ConversionConfig config=ConversionConfig())
Apply a complete conversion on the given operations, and all nested operations.
bool isMemoryEffectFree(Operation *op)
Returns true if the given operation is free of memory effects.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
The following effect indicates that the operation allocates from some resource.
Options for BufferizableOpInterface-based bufferization.
bool copyBeforeWrite
If set to true, the analysis is skipped.
void setFunctionBoundaryTypeConversion(LayoutMapOption layoutMapOption)
This function controls buffer types on function signatures.
bool allowUnknownOps
Specifies whether not bufferizable ops are allowed in the input.
unsigned int bufferAlignment
Buffer alignment for new memory allocations.
bool printConflicts
If set to true, the IR is annotated with details about RaW conflicts.
bool testAnalysisOnly
If set to true, does not modify the IR apart from adding attributes (for checking the results of the ...
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
UnknownTypeConverterFn unknownTypeConverterFn
Type converter from tensors to memrefs.
bool bufferizeFunctionBoundaries
Specifies whether function boundaries (ops in the func dialect) should be bufferized or not.
Bufferization statistics for debugging.
Definition: Bufferize.h:34
Options for analysis-enabled bufferization.
unsigned analysisFuzzerSeed
Seed for the analysis fuzzer.
bool dumpAliasSets
Specifies whether the tensor IR should be annotated with alias sets.
bool allowReturnAllocsFromLoops
Specifies whether returning newly allocated memrefs from loops should be allowed.
AnalysisHeuristic analysisHeuristic
The heuristic controls the order in which ops are traversed during the analysis.
llvm::ArrayRef< std::string > noAnalysisFuncFilter
Specify the functions that should not be analyzed.
std::function< bool(Operation *)> FilterFn
If the filter function evaluates to true, the filter matches.