23 #include "llvm/Support/DebugLog.h"
27 namespace bufferization {
28 #define GEN_PASS_DEF_ONESHOTBUFFERIZEPASS
29 #include "mlir/Dialect/Bufferization/Transforms/Passes.h.inc"
33 #define DEBUG_TYPE "bufferize"
41 parseHeuristicOption(
const std::string &s) {
46 if (s ==
"bottom-up-from-terminators")
51 llvm_unreachable(
"invalid analysisheuristic option");
54 struct OneShotBufferizePass
55 :
public bufferization::impl::OneShotBufferizePassBase<
56 OneShotBufferizePass> {
59 void runOnOperation()
override {
72 if (mustInferMemorySpace && useEncodingForMemorySpace) {
74 <<
"only one of 'must-infer-memory-space' and "
75 "'use-encoding-for-memory-space' are allowed in "
77 return signalPassFailure();
80 if (mustInferMemorySpace) {
87 if (useEncodingForMemorySpace) {
90 if (
auto rtt = dyn_cast<RankedTensorType>(t))
91 return rtt.getEncoding();
104 LayoutMapOption unknownTypeConversionOption = unknownTypeConversion;
105 if (unknownTypeConversionOption == LayoutMapOption::InferLayoutMap) {
107 "Invalid option: 'infer-layout-map' is not a valid value for "
108 "'unknown-type-conversion'");
109 return signalPassFailure();
114 if (unknownTypeConversionOption == LayoutMapOption::IdentityLayoutMap)
116 tensorType, memorySpace);
117 assert(unknownTypeConversionOption ==
118 LayoutMapOption::FullyDynamicLayoutMap &&
119 "invalid layout map option");
127 if (this->dialectFilter.hasValue() && !(*this->dialectFilter).empty())
128 return llvm::is_contained(this->dialectFilter,
129 op->getDialect()->getNamespace());
144 "Invalid option: 'copy-before-write' cannot be used with "
145 "'test-analysis-only'");
146 return signalPassFailure();
152 "Invalid option: 'print-conflicts' requires 'test-analysis-only'");
153 return signalPassFailure();
159 "Invalid option: 'dump-alias-sets' requires 'test-analysis-only'");
160 return signalPassFailure();
165 ModuleOp moduleOp = getOperation();
175 "Invalid option: 'no-analysis-func-filter' requires "
176 "'bufferize-function-boundaries'");
177 return signalPassFailure();
192 std::optional<OneShotBufferizationOptions>
options;
209 :
IRRewriter(ctx), erasedOps(erasedOps), toBufferOps(toBufferOps),
210 worklist(worklist), analysisState(
options), statistics(statistics) {
215 void notifyOperationErased(
Operation *op)
override {
216 erasedOps.insert(op);
218 toBufferOps.erase(op);
221 void notifyOperationInserted(
Operation *op, InsertPoint previous)
override {
223 if (previous.isSet())
230 if (
auto sideEffectingOp = dyn_cast<MemoryEffectOpInterface>(op))
231 statistics->numBufferAlloc +=
static_cast<int64_t
>(
236 if (isa<ToBufferOp>(op)) {
237 toBufferOps.insert(op);
242 if (isa<ToTensorOp>(op))
250 auto const &
options = analysisState.getOptions();
255 worklist.push_back(op);
289 op->
walk([&](ToBufferOp toBufferOp) { toBufferOps.insert(toBufferOp); });
301 worklist.push_back(op);
308 BufferizationRewriter rewriter(op->
getContext(), erasedOps, toBufferOps,
309 worklist,
options, statistics);
310 for (
unsigned i = 0; i < worklist.size(); ++i) {
313 if (erasedOps.contains(nextOp))
316 auto bufferizableOp =
options.dynCastBufferizableOp(nextOp);
323 if (!bufferizableOp.supportsUnstructuredControlFlow())
325 if (r.getBlocks().size() > 1)
327 "op or BufferizableOpInterface implementation does not support "
328 "unstructured control flow, but at least one region has multiple "
332 LDBG(3) <<
"//===-------------------------------------------===//\n"
333 <<
"IR after bufferizing: " << nextOp->
getName();
334 rewriter.setInsertionPoint(nextOp);
336 bufferizableOp.bufferize(rewriter,
options, bufferizationState))) {
337 LDBG(2) <<
"failed to bufferize\n"
338 <<
"//===-------------------------------------------===//";
339 return nextOp->
emitError(
"failed to bufferize op");
341 LDBG(3) << *op <<
"\n//===-------------------------------------------===//";
345 if (erasedOps.contains(op))
350 rewriter.setInsertionPoint(op);
352 rewriter, cast<ToBufferOp>(op),
options);
357 if (toTensorOp->getUses().empty()) {
358 rewriter.eraseOp(toTensorOp);
371 if (erasedOps.contains(op))
384 if (isa<ToTensorOp, ToBufferOp>(op))
386 return op->
emitError(
"op was not bufferized");
404 auto tensorType = dyn_cast<TensorType>(bbArg.getType());
406 newTypes.push_back(bbArg.getType());
410 FailureOr<BufferLikeType> bufferType =
414 newTypes.push_back(*bufferType);
418 for (
auto [bbArg, type] : llvm::zip(block->
getArguments(), newTypes)) {
419 if (bbArg.getType() == type)
425 bbArgUses.push_back(&use);
427 Type tensorType = bbArg.getType();
433 if (!bbArgUses.empty()) {
434 Value toTensorOp = bufferization::ToTensorOp::create(
435 rewriter, bbArg.getLoc(), tensorType, bbArg);
437 use->set(toTensorOp);
443 auto branchOp = dyn_cast<BranchOpInterface>(op);
445 return op->
emitOpError(
"cannot bufferize ops with block references that "
446 "do not implement BranchOpInterface");
449 assert(it != op->
getSuccessors().end() &&
"could find successor");
450 int64_t successorIdx = std::distance(op->
getSuccessors().begin(), it);
454 for (
auto [operand, type] :
456 if (operand.getType() == type) {
458 newOperands.push_back(operand);
461 FailureOr<BufferLikeType> operandBufferType =
463 if (
failed(operandBufferType))
466 Value bufferizedOperand = bufferization::ToBufferOp::create(
467 rewriter, operand.getLoc(), *operandBufferType, operand);
470 if (type != *operandBufferType)
471 bufferizedOperand = memref::CastOp::create(rewriter, operand.getLoc(),
472 type, bufferizedOperand);
473 newOperands.push_back(bufferizedOperand);
static MLIRContext * getContext(OpFoldResult val)
static llvm::ManagedStatic< PassManagerOptions > options
Base class for generic analysis states.
Attributes are known-constant values of operations.
This class represents an argument of a Block.
Block represents an ordered list of Operations.
BlockArgListType getArguments()
Operation * getParentOp()
Returns the closest surrounding operation that contains this block.
user_range getUsers() const
Returns a range of all users.
This class coordinates rewriting a piece of IR outside of a pattern rewrite, providing a way to keep ...
MLIRContext is the top-level object for a collection of MLIR operations.
void assign(ValueRange values)
Assign this range to the given values.
RAII guard to reset the insertion point of the builder when destroyed.
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
void setInsertionPointAfterValue(Value val)
Sets the insertion point to the node after the specified value.
This class represents an operand of an operation.
Operation is the basic unit of execution within MLIR.
std::enable_if_t< llvm::function_traits< std::decay_t< FnT > >::num_args==1, RetT > walk(FnT &&callback)
Walk the operation by calling the callback for each nested operation (including this one),...
MLIRContext * getContext()
Return the context this operation is associated with.
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
OperationName getName()
The name of an operation is the key identifier for it.
SuccessorRange getSuccessors()
use_range getUses()
Returns a range of all uses, which is useful for iterating over all uses.
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
This class models how operands are forwarded to block arguments in control flow.
MutableOperandRange getMutableForwardedOperands() const
Get the range of operands that are simply forwarded to the successor.
OperandRange getForwardedOperands() const
Get the range of operands that are simply forwarded to the successor.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
static WalkResult advance()
AnalysisState provides a variety of helper functions for dealing with tensor values.
BufferizationState provides information about the state of the IR during the bufferization process.
void allowOperation()
Allow the given ops.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
LogicalResult bufferizeOp(Operation *op, const BufferizationOptions &options, BufferizationState &bufferizationState, BufferizationStatistics *statistics=nullptr)
Bufferize op and its nested ops that implement BufferizableOpInterface.
LogicalResult bufferizeBlockSignature(Block *block, RewriterBase &rewriter, const BufferizationOptions &options, BufferizationState &state)
Bufferize the signature of block and its callers (i.e., ops that have the given block as a successor)...
LogicalResult insertTensorCopies(Operation *op, const OneShotBufferizationOptions &options, const BufferizationState &bufferizationState, BufferizationStatistics *statistics=nullptr)
Resolve RaW and other conflicts by inserting bufferization.alloc_tensor ops.
LogicalResult runOneShotBufferize(Operation *op, const OneShotBufferizationOptions &options, BufferizationState &state, BufferizationStatistics *statistics=nullptr)
Run One-Shot Bufferize on the given op: Analysis + Bufferization.
LogicalResult foldToBufferToTensorPair(RewriterBase &rewriter, ToBufferOp toBuffer, const BufferizationOptions &options)
Try to fold to_buffer(to_tensor(x)).
FailureOr< BufferLikeType > getBufferType(Value value, const BufferizationOptions &options, const BufferizationState &state)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
llvm::LogicalResult runOneShotModuleBufferize(Operation *moduleOp, const bufferization::OneShotBufferizationOptions &options, BufferizationState &state, BufferizationStatistics *statistics=nullptr)
Run One-Shot Module Bufferization on the given SymbolTable.
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
bool hasTensorSemantics(Operation *op)
Return "true" if the given op has tensor semantics and should be bufferized.
Include the generated interface declarations.
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
bool isMemoryEffectFree(Operation *op)
Returns true if the given operation is free of memory effects.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
The following effect indicates that the operation allocates from some resource.
Options for BufferizableOpInterface-based bufferization.
bool copyBeforeWrite
If set to true, the analysis is skipped.
void setFunctionBoundaryTypeConversion(LayoutMapOption layoutMapOption)
This function controls buffer types on function signatures.
bool allowUnknownOps
Specifies whether not bufferizable ops are allowed in the input.
unsigned int bufferAlignment
Buffer alignment for new memory allocations.
bool printConflicts
If set to true, the IR is annotated with details about RaW conflicts.
bool testAnalysisOnly
If set to true, does not modify the IR apart from adding attributes (for checking the results of the ...
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
bool checkParallelRegions
UnknownTypeConverterFn unknownTypeConverterFn
Type converter from tensors to memrefs.
bool bufferizeFunctionBoundaries
Specifies whether function boundaries (ops in the func dialect) should be bufferized or not.
DefaultMemorySpaceFn defaultMemorySpaceFn
Bufferization statistics for debugging.
int64_t numTensorOutOfPlace
Options for analysis-enabled bufferization.
unsigned analysisFuzzerSeed
Seed for the analysis fuzzer.
bool dumpAliasSets
Specifies whether the tensor IR should be annotated with alias sets.
bool allowReturnAllocsFromLoops
Specifies whether returning newly allocated memrefs from loops should be allowed.
AnalysisHeuristic analysisHeuristic
The heuristic controls the order in which ops are traversed during the analysis.
@ BottomUpFromTerminators
llvm::ArrayRef< std::string > noAnalysisFuncFilter
Specify the functions that should not be analyzed.
std::function< bool(Operation *)> FilterFn
If the filter function evaluates to true, the filter matches.