21 #include "llvm/ADT/ScopeExit.h"
22 #include "llvm/Support/Debug.h"
29 namespace bufferization {
31 #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
38 #define DEBUG_TYPE "bufferizable-op-interface"
39 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
40 #define LDBG(X) LLVM_DEBUG(DBGS() << (X))
43 using namespace bufferization;
48 if (
auto bufferizableOp =
options.dynCastBufferizableOp(op))
58 if (
auto iter = enclosingRepetitiveRegionCache.find_as(op);
59 iter != enclosingRepetitiveRegionCache.end())
61 return enclosingRepetitiveRegionCache[op] =
67 if (
auto iter = enclosingRepetitiveRegionCache.find_as(value);
68 iter != enclosingRepetitiveRegionCache.end())
76 visitedRegions.push_back(region);
81 enclosingRepetitiveRegionCache[value] = region;
82 for (
Region *r : visitedRegions)
83 enclosingRepetitiveRegionCache[r] = region;
89 if (
auto iter = enclosingRepetitiveRegionCache.find_as(block);
90 iter != enclosingRepetitiveRegionCache.end())
104 enclosingRepetitiveRegionCache[block] = region;
105 for (
Region *r : visitedRegions)
106 enclosingRepetitiveRegionCache[r] = region;
112 auto key = std::make_pair(op0, op1);
113 if (
auto iter = insideMutuallyExclusiveRegionsCache.find(key);
114 iter != insideMutuallyExclusiveRegionsCache.end())
118 insideMutuallyExclusiveRegionsCache[key] = result;
119 insideMutuallyExclusiveRegionsCache[std::make_pair(op1, op0)] = result;
124 enclosingRepetitiveRegionCache.clear();
125 insideMutuallyExclusiveRegionsCache.clear();
142 if (bufferizableOp &&
145 "expected that all parallel regions are also repetitive regions");
154 if (
auto opResult = llvm::dyn_cast<OpResult>(value))
155 return opResult.getDefiningOp();
156 return llvm::cast<BlockArgument>(value).getOwner()->
getParentOp();
166 if (llvm::isa<RankedTensorType>(shapedValue.
getType())) {
167 tensor = shapedValue;
168 }
else if (llvm::isa<MemRefType>(shapedValue.
getType())) {
169 tensor = b.
create<ToTensorOp>(loc, shapedValue);
170 }
else if (llvm::isa<UnrankedTensorType>(shapedValue.
getType()) ||
171 llvm::isa<UnrankedMemRefType>(shapedValue.
getType())) {
173 ->
emitError(
"copying of unranked tensors is not implemented");
175 llvm_unreachable(
"expected RankedTensorType or MemRefType");
177 RankedTensorType tensorType = llvm::cast<RankedTensorType>(tensor.
getType());
182 bool reifiedShapes =
false;
183 if (llvm::isa<RankedTensorType>(shapedValue.
getType()) &&
184 llvm::isa<OpResult>(shapedValue)) {
188 reifiedShapes =
true;
190 resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
191 for (
const auto &dim :
enumerate(tensorType.getShape()))
192 if (ShapedType::isDynamic(dim.value()))
193 dynamicSizes.push_back(cast<Value>(shape[dim.index()]));
203 auto allocTensorOp = b.
create<AllocTensorOp>(loc, tensorType, dynamicSizes,
210 if (failed(copyBufferType))
212 std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
214 memorySpace =
options.defaultMemorySpaceFn(tensorType);
215 if (memorySpace.has_value())
216 allocTensorOp.setMemorySpaceAttr(memorySpace.value());
217 return allocTensorOp.getResult();
220 LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
231 Type operandType = opOperand.get().getType();
232 if (!llvm::isa<TensorType>(operandType))
234 if (state.isInPlace(opOperand))
236 if (llvm::isa<UnrankedTensorType>(operandType))
237 return op->
emitError(
"copying of unranked tensors is not implemented");
241 isa<OpResult>(aliasingValues.
getAliases()[0].value) &&
242 !state.bufferizesToMemoryWrite(opOperand) &&
243 state.getAliasingOpOperands(aliasingValues.
getAliases()[0].value)
244 .getNumAliases() == 1 &&
245 !isa<UnrankedTensorType>(
246 aliasingValues.
getAliases()[0].value.getType())) {
254 outOfPlaceValues.push_back(value);
255 if (!state.canOmitTensorCopy(opOperand))
256 copiedOpValues.insert(value);
259 outOfPlaceOpOperands.push_back(&opOperand);
260 if (!state.canOmitTensorCopy(opOperand))
261 copiedOpOperands.insert(&opOperand);
267 for (
OpOperand *opOperand : outOfPlaceOpOperands) {
269 rewriter, op->
getLoc(), opOperand->get(), state.getOptions(),
270 copiedOpOperands.contains(opOperand));
278 for (
Value value : outOfPlaceValues) {
280 rewriter, op->
getLoc(), value, state.getOptions(),
281 copiedOpValues.count(value));
288 if (use->getOwner() ==
copy->getDefiningOp())
292 if (isa<tensor::DimOp>(use->getOwner()))
307 bool isAllowed = !hasAllowRule();
308 for (
const Entry &entry : entries) {
309 bool filterResult = entry.fn(op);
310 switch (entry.type) {
312 isAllowed |= filterResult;
342 llvm::cast<TensorType>(value.
getType()), memorySpace);
349 : functionArgTypeConverterFn(defaultFunctionArgTypeConverter),
350 unknownTypeConverterFn(defaultUnknownTypeConverter) {}
355 bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->
getDialect());
362 BufferizableOpInterface
366 auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
369 return bufferizableOp;
372 BufferizableOpInterface
378 LayoutMapOption layoutMapOption) {
382 if (layoutMapOption == LayoutMapOption::IdentityLayoutMap)
389 layoutMapOption == LayoutMapOption::InferLayoutMap;
397 if (
auto bbArg = llvm::dyn_cast<BlockArgument>(value)) {
408 if (
auto bufferizableOp =
getOptions().dynCastBufferizableOp(op))
409 return bufferizableOp.getAliasingOpOperands(value, *
this);
418 if (
auto bufferizableOp =
420 return bufferizableOp.getAliasingValues(opOperand, *
this);
429 if (
auto bufferizableOp =
431 return bufferizableOp.bufferizesToMemoryRead(opOperand, *
this);
441 if (
auto bufferizableOp =
443 return bufferizableOp.bufferizesToMemoryWrite(opOperand, *
this);
453 if (
auto bufferizableOp =
455 return bufferizableOp.bufferizesToAliasOnly(opOperand, *
this);
463 auto opResult = llvm::dyn_cast<OpResult>(value);
469 return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *
this);
476 assert(llvm::isa<TensorType>(value.
getType()) &&
"expected TensorType");
480 workingSet.push_back(&use);
482 while (!workingSet.empty()) {
483 OpOperand *uMaybeReading = workingSet.pop_back_val();
484 if (!visited.insert(uMaybeReading).second)
490 for (
OpOperand &use : alias.value.getUses())
491 workingSet.push_back(&use);
510 workingSet.insert(opOperand->
get());
512 if (visitedOpOperands)
513 visitedOpOperands->insert(opOperand);
515 while (!workingSet.empty()) {
516 Value value = workingSet.pop_back_val();
518 if (!
config.revisitAlreadyVisitedValues && visited.contains(value)) {
520 if (
config.alwaysIncludeLeaves)
521 result.insert(value);
524 visited.insert(value);
526 if (condition(value)) {
527 result.insert(value);
534 if (
config.alwaysIncludeLeaves)
535 result.insert(value);
543 if (
config.alwaysIncludeLeaves)
544 result.insert(value);
549 if (
config.followEquivalentOnly &&
553 if (
config.alwaysIncludeLeaves)
554 result.insert(value);
561 if (
config.alwaysIncludeLeaves)
562 result.insert(value);
566 if (
config.followSameTypeOrCastsOnly &&
567 a.opOperand->get().getType() != value.
getType() &&
571 if (
config.alwaysIncludeLeaves)
572 result.insert(value);
576 workingSet.insert(a.opOperand->get());
577 if (visitedOpOperands)
578 visitedOpOperands->insert(a.opOperand);
589 config.alwaysIncludeLeaves =
false;
618 llvm::none_of(aliases,
628 if (isa<ToMemrefOp>(opOperand.
getOwner()))
656 auto rankedTensorType = llvm::dyn_cast<RankedTensorType>(tensor.
getType());
657 assert((!rankedTensorType || llvm::cast<MemRefType>(memrefType).getRank() ==
658 rankedTensorType.getRank()) &&
659 "to_memref would be invalid: mismatching ranks");
666 auto tensorType = llvm::dyn_cast<TensorType>(value.
getType());
667 assert(tensorType &&
"unexpected non-tensor type");
671 if (
auto toTensorOp = value.
getDefiningOp<bufferization::ToTensorOp>())
672 return toTensorOp.getMemref();
678 if (failed(memrefType))
682 .
create<bufferization::ToMemrefOp>(value.
getLoc(), *memrefType, value)
687 FailureOr<BaseMemRefType>
694 FailureOr<BaseMemRefType>
697 assert(llvm::isa<TensorType>(value.
getType()) &&
698 "unexpected non-tensor type");
699 invocationStack.push_back(value);
701 llvm::make_scope_exit([&]() { invocationStack.pop_back(); });
705 auto bufferizableOp =
options.dynCastBufferizableOp(op);
707 return bufferizableOp.getBufferType(value,
options, invocationStack);
712 if (!memSpace.has_value())
713 return op->emitError(
"could not infer memory space");
719 if (
auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
720 return bufferizableOp.hasTensorSemantics();
728 "expected one value per OpResult");
734 Value replacement = values[opResult.getResultNumber()];
735 if (llvm::isa<TensorType>(opResult.getType())) {
738 assert((llvm::isa<MemRefType>(replacement.
getType()) ||
739 llvm::isa<UnrankedMemRefType>(replacement.
getType())) &&
740 "tensor op result should be replaced with a memref value");
745 replacement = rewriter.
create<bufferization::ToTensorOp>(
746 replacement.
getLoc(), opResult.getType(), replacement);
748 replacements.push_back(replacement);
768 .
create<memref::AllocOp>(loc, type, dynShape,
771 return b.
create<memref::AllocOp>(loc, type, dynShape).getResult();
778 return (*
memCpyFn)(b, loc, from, to);
780 b.
create<memref::CopyOp>(loc, from, to);
790 MemRefLayoutAttrInterface layout,
792 auto tensorType = llvm::cast<TensorType>(value.
getType());
795 if (
auto unrankedTensorType =
796 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
797 assert(!layout &&
"UnrankedTensorType cannot have a layout map");
803 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
806 rankedTensorType.getElementType(), layout,
810 return options.unknownTypeConverterFn(value, memorySpace,
options);
817 if (
auto unrankedTensorType =
818 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
824 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
825 int64_t dynamicOffset = ShapedType::kDynamic;
827 ShapedType::kDynamic);
829 dynamicOffset, dynamicStrides);
831 rankedTensorType.getElementType(), stridedLayout,
841 if (
auto unrankedTensorType =
842 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
848 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
849 MemRefLayoutAttrInterface layout = {};
851 rankedTensorType.getElementType(), layout,
861 auto bufferizableOp = cast<BufferizableOpInterface>(opResult.
getDefiningOp());
863 bufferizableOp.getAliasingOpOperands(opResult, state);
873 return state.bufferizesToMemoryWrite(*alias.
opOperand);
906 auto isMemoryWriteInsideOp = [&](
Value v) {
910 return state.bufferizesToMemoryWrite(v);
913 config.alwaysIncludeLeaves =
false;
916 .findValueInReverseUseDefChain(alias.
opOperand,
917 isMemoryWriteInsideOp,
config)
931 if (!llvm::isa<TensorType>(opOperand.
get().
getType()))
934 for (
const auto &it : aliasingValues)
935 if (it.value == value)
936 result.emplace_back(&opOperand, it.relation, it.isDefinite);
944 assert(llvm::isa<TensorType>(value.
getType()) &&
"expected tensor type");
947 if (llvm::isa<BlockArgument>(value))
952 auto opResult = llvm::cast<OpResult>(value);
959 Value equivalentOperand = aliases.
getAliases().front().opOperand->get();
967 if (!memSpace.has_value())
968 return op->
emitError(
"could not infer memory space");
974 BufferizableOpInterface bufferizableOp,
unsigned index) {
975 assert(index < bufferizableOp->getNumRegions() &&
"invalid region index");
976 auto regionInterface =
977 dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
978 if (!regionInterface)
980 return regionInterface.isRepetitiveRegion(index);
987 if (
auto bbArg = dyn_cast<BlockArgument>(value))
988 if (bbArg.getOwner() != &bbArg.getOwner()->getParent()->getBlocks().front())
996 if (isa<TensorType>(operand.get().getType()))
1009 if (llvm::isa<TensorType>(result.getType()))
1012 if (!region.getBlocks().empty())
1013 for (
BlockArgument bbArg : region.getBlocks().front().getArguments())
1014 if (isa<TensorType>(bbArg.getType()))
1022 return any_of(r.getBlocks(), [&](Block &b) {
1023 return any_of(b.getArguments(), [&](BlockArgument bbArg) {
1024 return isaTensor(bbArg.getType());
1028 if (hasTensorBlockArgument)
static void ensureToMemrefOpIsValid(Value tensor, Type memrefType)
static void setInsertionPointAfter(OpBuilder &b, Value value)
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
Base class for generic analysis states.
Attributes are known-constant values of operations.
This class provides a shared interface for ranked and unranked memref types.
This class represents an argument of a Block.
Block represents an ordered list of Operations.
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
IntegerAttr getI64IntegerAttr(int64_t value)
IRValueT get() const
Return the current value being used by this operand.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
RAII guard to reset the insertion point of the builder when destroyed.
This class helps build Operations.
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
This class represents an operand of an operation.
This is a value defined by a result of an operation.
Operation is the basic unit of execution within MLIR.
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Location getLoc()
The source location the operation was defined or derived from.
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Block * getBlock()
Returns the operation block that contains this operation.
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
operand_type_range getOperandTypes()
MutableArrayRef< OpOperand > getOpOperands()
result_type_range getResultTypes()
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
result_range getOpResults()
Region * getParentRegion()
Returns the region to which the instruction belongs.
unsigned getNumResults()
Return the number of results held by this operation.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Operation * getParentOp()
Return the parent operation this region is attached to.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
This class provides an efficient unique identifier for a specific C++ type.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Location getLoc() const
Return the location of this value.
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Region * getParentRegion()
Return the Region in which this Value is defined.
size_t getNumAliases() const
ArrayRef< T > getAliases() const
AnalysisState provides a variety of helper functions for dealing with tensor values.
bool isValueRead(Value value) const
Return true if the given value is read by an op that bufferizes to a memory read.
AliasingValueList getAliasingValues(OpOperand &opOperand) const
Determine which Value will alias with opOperand if the op is bufferized in place.
SetVector< Value > findValueInReverseUseDefChain(OpOperand *opOperand, llvm::function_ref< bool(Value)> condition, TraversalConfig config=TraversalConfig(), llvm::DenseSet< OpOperand * > *visitedOpOperands=nullptr) const
Starting from opOperand, follow the use-def chain in reverse, always selecting the aliasing OpOperand...
virtual bool areAliasingBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 may bufferize to aliasing buffers.
virtual bool hasUndefinedContents(OpOperand *opOperand) const
Return true if the given tensor has undefined contents.
bool insideMutuallyExclusiveRegions(Operation *op0, Operation *op1)
Checks whether op0 and op1 are inside mutually exclusive regions.
bool canOmitTensorCopy(OpOperand &opOperand) const
Return true if a copy can always be avoided when allocating a new tensor for the given OpOperand.
bool bufferizesToMemoryWrite(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory write.
virtual bool isInPlace(OpOperand &opOperand) const
Return true if the given OpResult has been decided to bufferize inplace.
SetVector< Value > findDefinitions(OpOperand *opOperand) const
Find the values that may define the contents of the given value at runtime.
bool bufferizesToAliasOnly(OpOperand &opOperand) const
Return true if opOperand does neither read nor write but bufferizes to an alias.
AliasingOpOperandList getAliasingOpOperands(Value value) const
Determine which OpOperand* will alias with value if the op is bufferized in place.
AnalysisState(const BufferizationOptions &options)
Region * getEnclosingRepetitiveRegion(Operation *op, const BufferizationOptions &options)
Return the closest enclosing repetitive region around the given op.
const BufferizationOptions & getOptions() const
Return a reference to the BufferizationOptions.
bool bufferizesToMemoryRead(OpOperand &opOperand) const
Return true if opOperand bufferizes to a memory read.
virtual bool areEquivalentBufferizedValues(Value v1, Value v2) const
Return true if v1 and v2 bufferize to equivalent buffers.
virtual void resetCache()
bool isOpAllowed(Operation *op) const
Return whether the op is allowed or not.
Operation * getOwner() const
Return the owner of this operand.
AliasingOpOperandList defaultGetAliasingOpOperands(Value value, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::getAliasingOpOperands.
bool defaultResultBufferizesToMemoryWrite(OpResult opResult, const AnalysisState &state)
This is the default implementation of BufferizableOpInterface::resultBufferizesToMemoryWrite.
AliasingValueList unknownGetAliasingValues(OpOperand &opOperand)
This is the default implementation of getAliasingValues in case the owner op does not implement the B...
bool defaultIsRepetitiveRegion(BufferizableOpInterface bufferizableOp, unsigned index)
This is the default implementation of BufferizableOpInterface::isRepetitiveRegion.
AliasingOpOperandList unknownGetAliasingOpOperands(Value value)
This is the default implementation of getAliasingOpOperands in case the defining op does not implemen...
bool defaultHasTensorSemantics(Operation *op)
This is the default implementation of BufferizableOpInterface::hasTensorSemantics.
FailureOr< BaseMemRefType > defaultGetBufferType(Value value, const BufferizationOptions &options, SmallVector< Value > &invocationStack)
This is the default implementation of BufferizableOpInterface::getBufferType.
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
Operation * getOwnerOfValue(Value value)
Return the owner of the given value.
BaseMemRefType getMemRefType(Value value, const BufferizationOptions &options, MemRefLayoutAttrInterface layout={}, Attribute memorySpace=nullptr)
Return a MemRefType to which the type of the given value can be bufferized.
Region * getParallelRegion(Region *region, const BufferizationOptions &options)
If region is a parallel region, return region.
Region * getNextEnclosingRepetitiveRegion(Region *region, const BufferizationOptions &options)
Assuming that the given region is repetitive, find the next enclosing repetitive region.
AliasList< AliasingOpOperand > AliasingOpOperandList
A list of possible aliasing OpOperands.
FailureOr< Value > allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, const BufferizationOptions &options, bool copy=true)
Create an AllocTensorOp for the given shaped value (memref or tensor).
FailureOr< BaseMemRefType > getBufferType(Value value, const BufferizationOptions &options)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options)
Lookup the buffer for the given value.
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
bool hasTensorSemantics(Operation *op)
Return "true" if the given op has tensor semantics and should be bufferized.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Include the generated interface declarations.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
const FrozenRewritePatternSet GreedyRewriteConfig config
bool insideMutuallyExclusiveRegions(Operation *a, Operation *b)
Return true if a and b are in mutually exclusive regions as per RegionBranchOpInterface.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
A maybe aliasing OpOperand.
Options for BufferizableOpInterface-based bufferization.
std::function< void(AnalysisState &)> AnalysisStateInitFn
Initializer function for analysis state.
void setFunctionBoundaryTypeConversion(LayoutMapOption layoutMapOption)
This function controls buffer types on function signatures.
BufferizableOpInterface dynCastBufferizableOp(Operation *op) const
Try to cast the given op to BufferizableOpInterface if the op is allow listed.
bool inferFunctionResultLayout
If true, function result types are inferred from the body of the function.
unsigned int bufferAlignment
Buffer alignment for new memory allocations.
FunctionArgTypeConverterFn functionArgTypeConverterFn
Type converter from tensors to memrefs.
std::optional< AllocationFn > allocationFn
Helper functions for allocation and memory copying.
OpFilter opFilter
A filter that specifies which ops should be bufferized and which ops should be ignored.
bool isOpAllowed(Operation *op) const
Return true if the given op should be bufferized.
std::optional< MemCpyFn > memCpyFn
bool bufferizeFunctionBoundaries
Specifies whether function boundaries (ops in the func dialect) should be bufferized or not.
FailureOr< Value > createAlloc(OpBuilder &b, Location loc, MemRefType type, ValueRange dynShape) const
Create a memref allocation with the given type and dynamic extents.
LogicalResult createMemCpy(OpBuilder &b, Location loc, Value from, Value to) const
Creates a memcpy between two given buffers.
SmallVector< AnalysisStateInitFn > stateInitializers
Initializer functions for analysis state.
Traversal parameters for findValueInReverseUseDefChain.