20#include "llvm/ADT/ScopeExit.h"
21#include "llvm/ADT/SmallVectorExtras.h"
30#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
37#define DEBUG_TYPE "bufferizable-op-interface"
45 if (
auto bufferizableOp =
options.dynCastBufferizableOp(op))
51Region *AnalysisState::getEnclosingRepetitiveRegion(
55 if (
auto iter = enclosingRepetitiveRegionCache.find_as(op);
56 iter != enclosingRepetitiveRegionCache.end())
58 return enclosingRepetitiveRegionCache[op] =
62Region *AnalysisState::getEnclosingRepetitiveRegion(
64 if (
auto iter = enclosingRepetitiveRegionCache.find_as(value);
65 iter != enclosingRepetitiveRegionCache.end())
71 SmallVector<Region *> visitedRegions;
73 visitedRegions.push_back(region);
78 enclosingRepetitiveRegionCache[value] = region;
79 for (Region *r : visitedRegions)
80 enclosingRepetitiveRegionCache[r] = region;
84Region *AnalysisState::getEnclosingRepetitiveRegion(
86 if (
auto iter = enclosingRepetitiveRegionCache.find_as(block);
87 iter != enclosingRepetitiveRegionCache.end())
91 Operation *op =
nullptr;
94 SmallVector<Region *> visitedRegions;
101 enclosingRepetitiveRegionCache[block] = region;
102 for (Region *r : visitedRegions)
103 enclosingRepetitiveRegionCache[r] = region;
107bool AnalysisState::insideMutuallyExclusiveRegions(
Operation *op0,
109 auto key = std::make_pair(op0, op1);
110 if (
auto iter = insideMutuallyExclusiveRegionsCache.find(key);
111 iter != insideMutuallyExclusiveRegionsCache.end())
115 insideMutuallyExclusiveRegionsCache[key] =
result;
116 insideMutuallyExclusiveRegionsCache[std::make_pair(op1, op0)] =
result;
120void AnalysisState::resetCache() {
121 enclosingRepetitiveRegionCache.clear();
122 insideMutuallyExclusiveRegionsCache.clear();
133Region *bufferization::getNextEnclosingRepetitiveRegion(
144 const BufferizationOptions &
options) {
147 if (bufferizableOp &&
150 "expected that all parallel regions are also repetitive regions");
159 if (
auto opResult = llvm::dyn_cast<OpResult>(value))
160 return opResult.getDefiningOp();
161 return llvm::cast<BlockArgument>(value).getOwner()->
getParentOp();
169FailureOr<Value> bufferization::allocateTensorForShapedValue(
171 const BufferizationOptions &
options,
const BufferizationState &state,
174 if (llvm::isa<RankedTensorType>(shapedValue.
getType())) {
176 }
else if (llvm::isa<MemRefType>(shapedValue.
getType())) {
177 tensor = ToTensorOp::create(
180 }
else if (llvm::isa<UnrankedTensorType>(shapedValue.
getType()) ||
181 llvm::isa<UnrankedMemRefType>(shapedValue.
getType())) {
183 ->
emitError(
"copying of unranked tensors is not implemented");
185 llvm_unreachable(
"expected RankedTensorType or MemRefType");
187 RankedTensorType tensorType = llvm::cast<RankedTensorType>(
tensor.getType());
192 bool reifiedShapes =
false;
193 if (llvm::isa<RankedTensorType>(shapedValue.
getType()) &&
194 llvm::isa<OpResult>(shapedValue)) {
198 reifiedShapes =
true;
200 resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
201 for (
const auto &dim :
enumerate(tensorType.getShape())) {
202 if (ShapedType::isDynamic(dim.value())) {
203 dynamicSizes.push_back(
216 auto allocTensorOp = AllocTensorOp::create(
b, loc, tensorType, dynamicSizes,
221 return allocTensorOp.getResult();
222 auto copyBufferType =
224 if (
failed(copyBufferType))
226 std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
228 memorySpace =
options.defaultMemorySpaceFn(tensorType);
229 if (memorySpace.has_value())
230 allocTensorOp.setMemorySpaceAttr(memorySpace.value());
231 return allocTensorOp.getResult();
236LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
238 const BufferizationState &bufferizationState) {
248 Type operandType = opOperand.get().getType();
249 if (!llvm::isa<TensorType>(operandType))
251 if (analysisState.isInPlace(opOperand))
253 if (llvm::isa<UnrankedTensorType>(operandType))
254 return op->
emitError(
"copying of unranked tensors is not implemented");
256 AliasingValueList aliasingValues =
257 analysisState.getAliasingValues(opOperand);
258 if (aliasingValues.getNumAliases() == 1 &&
259 isa<OpResult>(aliasingValues.getAliases()[0].value) &&
260 !analysisState.bufferizesToMemoryWrite(opOperand) &&
262 .getAliasingOpOperands(aliasingValues.getAliases()[0].value)
263 .getNumAliases() == 1 &&
264 !isa<UnrankedTensorType>(
265 aliasingValues.getAliases()[0].value.getType())) {
272 Value value = aliasingValues.getAliases()[0].value;
273 outOfPlaceValues.push_back(value);
274 if (!analysisState.canOmitTensorCopy(opOperand))
275 copiedOpValues.insert(value);
278 outOfPlaceOpOperands.push_back(&opOperand);
279 if (!analysisState.canOmitTensorCopy(opOperand))
280 copiedOpOperands.insert(&opOperand);
286 for (
OpOperand *opOperand : outOfPlaceOpOperands) {
287 FailureOr<Value>
copy = allocateTensorForShapedValue(
288 rewriter, op->
getLoc(), opOperand->get(), analysisState.getOptions(),
289 bufferizationState, copiedOpOperands.contains(opOperand));
297 for (
Value value : outOfPlaceValues) {
298 FailureOr<Value>
copy = allocateTensorForShapedValue(
299 rewriter, op->
getLoc(), value, analysisState.getOptions(),
300 bufferizationState, copiedOpValues.count(value));
307 if (use->getOwner() ==
copy->getDefiningOp())
311 if (isa<tensor::DimOp>(use->getOwner()))
324bool OpFilter::isOpAllowed(
Operation *op)
const {
326 bool isAllowed = !hasAllowRule();
327 for (
const Entry &entry : entries) {
328 bool filterResult = entry.fn(op);
329 switch (entry.type) {
331 isAllowed |= filterResult;
351defaultFunctionArgTypeConverter(TensorLikeType type,
Attribute memorySpace,
353 const BufferizationOptions &
options) {
354 if (
auto tensorType = mlir::dyn_cast<TensorType>(type)) {
355 return cast<BufferLikeType>(
356 getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace));
361 type.getBufferType(
options, [&]() {
return funcOp->emitError(); });
362 assert(succeeded(bufferType) &&
363 "a valid buffer is always expected at function boundary");
369 const BufferizationOptions &
options) {
370 return getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace);
376BufferizationOptions::BufferizationOptions()
377 : functionArgTypeConverterFn(defaultFunctionArgTypeConverter),
378 unknownTypeConverterFn(defaultUnknownTypeConverter) {}
380bool BufferizationOptions::isOpAllowed(
Operation *op)
const {
383 bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->
getDialect());
384 if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
387 return opFilter.isOpAllowed(op);
390BufferizableOpInterface
391BufferizationOptions::dynCastBufferizableOp(
Operation *op)
const {
392 if (!isOpAllowed(op))
394 auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
397 return bufferizableOp;
400BufferizableOpInterface
401BufferizationOptions::dynCastBufferizableOp(
Value value)
const {
405void BufferizationOptions::setFunctionBoundaryTypeConversion(
406 LayoutMapOption layoutMapOption) {
407 functionArgTypeConverterFn = [=](TensorLikeType type, Attribute memorySpace,
409 const BufferizationOptions &
options) {
410 if (
auto tensorType = mlir::dyn_cast<TensorType>(type)) {
411 if (layoutMapOption == LayoutMapOption::IdentityLayoutMap)
412 return cast<BufferLikeType>(
413 bufferization::getMemRefTypeWithStaticIdentityLayout(tensorType,
415 return cast<BufferLikeType>(
416 bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType,
422 type.getBufferType(
options, [&]() {
return funcOp->emitError(); });
423 assert(succeeded(bufferType) &&
424 "a valid buffer is always expected at function boundary");
427 inferFunctionResultLayout =
428 layoutMapOption == LayoutMapOption::InferLayoutMap;
436 if (
auto bbArg = llvm::dyn_cast<BlockArgument>(value)) {
437 b.setInsertionPointToStart(bbArg.getOwner());
445AliasingOpOperandList AnalysisState::getAliasingOpOperands(
Value value)
const {
447 if (
auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
448 return bufferizableOp.getAliasingOpOperands(value, *
this);
451 return detail::unknownGetAliasingOpOperands(value);
456AliasingValueList AnalysisState::getAliasingValues(
OpOperand &opOperand)
const {
457 if (
auto bufferizableOp =
458 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
459 return bufferizableOp.getAliasingValues(opOperand, *
this);
462 return detail::unknownGetAliasingValues(opOperand);
467bool AnalysisState::bufferizesToMemoryRead(
OpOperand &opOperand)
const {
468 if (
auto bufferizableOp =
469 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
470 return bufferizableOp.bufferizesToMemoryRead(opOperand, *
this);
479bool AnalysisState::bufferizesToMemoryWrite(
OpOperand &opOperand)
const {
480 if (
auto bufferizableOp =
481 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
482 return bufferizableOp.bufferizesToMemoryWrite(opOperand, *
this);
491bool AnalysisState::bufferizesToAliasOnly(
OpOperand &opOperand)
const {
492 if (
auto bufferizableOp =
493 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
494 return bufferizableOp.bufferizesToAliasOnly(opOperand, *
this);
501bool AnalysisState::bufferizesToMemoryWrite(
Value value)
const {
502 auto opResult = llvm::dyn_cast<OpResult>(value);
505 auto bufferizableOp = getOptions().dynCastBufferizableOp(value);
508 return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *
this);
514bool AnalysisState::isValueRead(
Value value)
const {
515 assert(llvm::isa<TensorLikeType>(value.
getType()) &&
516 "expected TensorLikeType");
517 SmallVector<OpOperand *> workingSet;
519 for (OpOperand &use : value.
getUses())
520 workingSet.push_back(&use);
522 while (!workingSet.empty()) {
523 OpOperand *uMaybeReading = workingSet.pop_back_val();
524 if (!visited.insert(uMaybeReading).second)
528 if (bufferizesToAliasOnly(*uMaybeReading))
529 for (AliasingValue alias : getAliasingValues(*uMaybeReading))
530 for (OpOperand &use : alias.value.getUses())
531 workingSet.push_back(&use);
532 if (bufferizesToMemoryRead(*uMaybeReading))
544llvm::SetVector<Value> AnalysisState::findValueInReverseUseDefChain(
546 TraversalConfig config,
547 llvm::DenseSet<OpOperand *> *visitedOpOperands)
const {
548 llvm::DenseSet<Value> visited;
549 llvm::SetVector<Value>
result, workingSet;
550 workingSet.insert(opOperand->
get());
552 if (visitedOpOperands)
553 visitedOpOperands->insert(opOperand);
555 while (!workingSet.empty()) {
556 Value value = workingSet.pop_back_val();
558 if (!config.revisitAlreadyVisitedValues && visited.contains(value)) {
560 if (config.alwaysIncludeLeaves)
564 visited.insert(value);
566 if (condition(value)) {
571 if (!config.followUnknownOps && !
options.dynCastBufferizableOp(value)) {
574 if (config.alwaysIncludeLeaves)
579 AliasingOpOperandList aliases = getAliasingOpOperands(value);
580 if (aliases.getNumAliases() == 0) {
583 if (config.alwaysIncludeLeaves)
588 for (AliasingOpOperand a : aliases) {
589 if (config.followEquivalentOnly &&
590 a.relation != BufferRelation::Equivalent) {
593 if (config.alwaysIncludeLeaves)
598 if (config.followInPlaceOnly && !isInPlace(*a.opOperand)) {
601 if (config.alwaysIncludeLeaves)
606 if (config.followSameTypeOrCastsOnly &&
607 a.opOperand->get().getType() != value.
getType() &&
611 if (config.alwaysIncludeLeaves)
616 workingSet.insert(a.opOperand->get());
617 if (visitedOpOperands)
618 visitedOpOperands->insert(a.opOperand);
626llvm::SetVector<Value>
627AnalysisState::findDefinitions(
OpOperand *opOperand)
const {
628 TraversalConfig config;
629 config.alwaysIncludeLeaves =
false;
630 return findValueInReverseUseDefChain(
631 opOperand, [&](Value v) {
return this->bufferizesToMemoryWrite(v); },
640 for (
const BufferizationOptions::AnalysisStateInitFn &fn :
645bool AnalysisState::canOmitTensorCopy(
OpOperand &opOperand)
const {
647 if (hasUndefinedContents(&opOperand))
652 if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
656 AliasingValueList aliases = getAliasingValues(opOperand);
657 if (!bufferizesToMemoryRead(opOperand) &&
658 llvm::none_of(aliases,
659 [&](AliasingValue a) {
return isValueRead(a.value); }))
666bool AnalysisState::isInPlace(
OpOperand &opOperand)
const {
668 if (isa<ToBufferOp>(opOperand.
getOwner()))
673 return !bufferizesToMemoryWrite(opOperand);
676bool AnalysisState::areEquivalentBufferizedValues(
Value v1,
Value v2)
const {
682bool AnalysisState::areAliasingBufferizedValues(
Value v1,
Value v2)
const {
688bool AnalysisState::hasUndefinedContents(
OpOperand *opOperand)
const {
694 const BufferizationOptions &
options,
695 const BufferizationState &state) {
697 auto tensorType = llvm::dyn_cast<TensorLikeType>(value.
getType());
698 assert(tensorType &&
"unexpected non-tensor type");
702 if (
auto toTensorOp = value.
getDefiningOp<bufferization::ToTensorOp>())
703 return toTensorOp.getBuffer();
712 return bufferization::ToBufferOp::create(rewriter, value.
getLoc(),
718FailureOr<BufferLikeType>
719bufferization::getBufferType(
Value value,
const BufferizationOptions &
options,
720 const BufferizationState &state) {
726FailureOr<BufferLikeType>
727bufferization::getBufferType(
Value value,
const BufferizationOptions &
options,
728 const BufferizationState &state,
730 assert(llvm::isa<TensorLikeType>(value.
getType()) &&
731 "unexpected non-tensor type");
732 invocationStack.push_back(value);
733 llvm::scope_exit popFromStack([&]() { invocationStack.pop_back(); });
737 auto bufferizableOp =
options.dynCastBufferizableOp(op);
739 return bufferizableOp.getBufferType(value,
options, state, invocationStack);
742 return cast<TensorLikeType>(value.
getType()).getBufferType(
options, [&]() {
743 return op->emitError();
747bool bufferization::hasTensorSemantics(
Operation *op) {
748 if (
auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
749 return bufferizableOp.hasTensorSemantics();
750 return detail::defaultHasTensorSemantics(op);
753void bufferization::replaceOpWithBufferizedValues(
RewriterBase &rewriter,
757 "expected one value per OpResult");
764 if (llvm::isa<TensorLikeType>(opResult.getType())) {
767 assert(llvm::isa<BufferLikeType>(
replacement.getType()) &&
768 "tensor op result should be replaced with a buffer value");
791 return (*allocationFn)(
b, loc, type, dynShape, bufferAlignment);
794 if (bufferAlignment != 0)
795 return memref::AllocOp::create(
b, loc, type, dynShape,
796 b.getI64IntegerAttr(bufferAlignment))
798 return memref::AllocOp::create(
b, loc, type, dynShape).getResult();
805 return (*memCpyFn)(
b, loc, from, to);
807 memref::CopyOp::create(
b, loc, from, to);
816 const BufferizationOptions &
options,
817 MemRefLayoutAttrInterface layout,
820 if (
auto unrankedTensorType =
821 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
822 assert(!layout &&
"UnrankedTensorType cannot have a layout map");
823 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
828 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
830 return MemRefType::get(rankedTensorType.getShape(),
831 rankedTensorType.getElementType(), layout,
835 return options.unknownTypeConverterFn(tensorType, memorySpace,
options);
839bufferization::getMemRefTypeWithFullyDynamicLayout(
TensorType tensorType,
842 if (
auto unrankedTensorType =
843 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
844 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
849 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
850 int64_t dynamicOffset = ShapedType::kDynamic;
852 ShapedType::kDynamic);
853 auto stridedLayout = StridedLayoutAttr::get(tensorType.
getContext(),
854 dynamicOffset, dynamicStrides);
855 return MemRefType::get(rankedTensorType.getShape(),
856 rankedTensorType.getElementType(), stridedLayout,
863bufferization::getMemRefTypeWithStaticIdentityLayout(
TensorType tensorType,
866 if (
auto unrankedTensorType =
867 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
868 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
873 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
874 MemRefLayoutAttrInterface layout = {};
875 return MemRefType::get(rankedTensorType.getShape(),
876 rankedTensorType.getElementType(), layout,
884bool bufferization::detail::defaultResultBufferizesToMemoryWrite(
886 auto bufferizableOp = cast<BufferizableOpInterface>(opResult.
getDefiningOp());
887 AliasingOpOperandList opOperands =
888 bufferizableOp.getAliasingOpOperands(opResult, state);
892 if (opOperands.getAliases().empty())
897 if (llvm::any_of(opOperands, [&](AliasingOpOperand alias) {
898 return state.bufferizesToMemoryWrite(*alias.opOperand);
931 auto isMemoryWriteInsideOp = [&](
Value v) {
935 return state.bufferizesToMemoryWrite(v);
937 TraversalConfig config;
938 config.alwaysIncludeLeaves =
false;
939 for (AliasingOpOperand alias : opOperands) {
941 .findValueInReverseUseDefChain(alias.opOperand,
942 isMemoryWriteInsideOp, config)
951AliasingOpOperandList bufferization::detail::defaultGetAliasingOpOperands(
956 if (!llvm::isa<TensorLikeType>(opOperand.
get().
getType()))
958 AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
959 for (
const auto &it : aliasingValues)
960 if (it.value == value)
961 result.emplace_back(&opOperand, it.relation, it.isDefinite);
963 return AliasingOpOperandList(std::move(
result));
966FailureOr<BufferLikeType> bufferization::detail::defaultGetBufferType(
968 const BufferizationState &bufferizationState,
970 assert(llvm::isa<TensorType>(value.
getType()) &&
"expected tensor type");
971 auto tensorType = cast<TensorType>(value.
getType());
977 <<
"cannot bufferize value of type " << tensorType
978 <<
": element type " << elementType
979 <<
" is not a valid memref element type";
982 if (llvm::isa<BlockArgument>(value)) {
983 return cast<BufferLikeType>(
984 bufferization::getMemRefType(tensorType,
options));
989 auto opResult = llvm::cast<OpResult>(value);
991 AliasingOpOperandList aliases = analysisState.getAliasingOpOperands(opResult);
992 if (aliases.getNumAliases() > 0 &&
993 aliases.getAliases()[0].relation == BufferRelation::Equivalent) {
996 Value equivalentOperand = aliases.getAliases().front().opOperand->get();
1005 if (!memSpace.has_value())
1006 return op->
emitError(
"could not infer memory space");
1008 return cast<BufferLikeType>(
1012bool bufferization::detail::defaultIsRepetitiveRegion(
1013 BufferizableOpInterface bufferizableOp,
unsigned index) {
1014 assert(index < bufferizableOp->getNumRegions() &&
"invalid region index");
1015 auto regionInterface =
1016 dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
1017 if (!regionInterface)
1019 return regionInterface.isRepetitiveRegion(
index);
1022AliasingOpOperandList
1023bufferization::detail::unknownGetAliasingOpOperands(
Value value) {
1026 if (
auto bbArg = dyn_cast<BlockArgument>(value))
1027 if (bbArg.getOwner() != &bbArg.getOwner()->getParent()->getBlocks().front())
1033 AliasingOpOperandList r;
1035 if (isa<TensorLikeType>(operand.get().getType()))
1036 r.addAlias({&operand, BufferRelation::Unknown,
false});
1041bufferization::detail::unknownGetAliasingValues(
OpOperand &opOperand) {
1046 AliasingValueList r;
1048 if (llvm::isa<TensorLikeType>(
result.getType()))
1049 r.addAlias({
result, BufferRelation::Unknown,
false});
1053 if (isa<TensorLikeType>(bbArg.getType()))
1054 r.addAlias({bbArg, BufferRelation::Unknown,
false});
1058bool bufferization::detail::defaultHasTensorSemantics(
Operation *op) {
1059 auto isaTensor = [](
Type t) {
return isa<TensorLikeType>(t); };
1061 return any_of(r.getBlocks(), [&](Block &b) {
1062 return any_of(b.getArguments(), [&](BlockArgument bbArg) {
1063 return isaTensor(bbArg.getType());
1067 if (hasTensorBlockArgument)
1075FailureOr<BaseMemRefType>
1076bufferization::detail::asMemRefType(FailureOr<BufferLikeType> bufferType) {
1079 return cast<BaseMemRefType>(*bufferType);
1082bool bufferization::detail::typesMatchAfterBufferization(
Operation &op,
1085 return mlir::succeeded(
1086 cast<TensorLikeType>(
tensor.getType())
1087 .verifyCompatibleBufferType(cast<BufferLikeType>(buffer.
getType()),
1088 [&]() { return op.emitError(); }));
static void setInsertionPointAfter(OpBuilder &b, Value value)
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
*if copies could not be generated due to yet unimplemented cases *copyInPlacementStart and copyOutPlacementStart in copyPlacementBlock *specify the insertion points where the incoming copies and outgoing should be the output argument nBegin is set to its * replacement(set to `begin` if no invalidation happens). Since outgoing *copies could have been inserted at `end`
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
static RankedTensorType getBufferType(const SparseTensorType &stt, bool needTmpCOO)
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
static Operation * getOwnerOfValue(Value value)
Base class for generic analysis states.
AnalysisState(LatticeAnchor anchor)
Create the analysis state on the given lattice anchor.
Attributes are known-constant values of operations.
This class provides a shared interface for ranked and unranked memref types.
static bool isValidElementType(Type type)
Return true if the specified element type is ok in a memref.
This class represents an argument of a Block.
Block represents an ordered list of Operations.
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
IRValueT get() const
Return the current value being used by this operand.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
RAII guard to reset the insertion point of the builder when destroyed.
This class helps build Operations.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
This class represents an operand of an operation.
This is a value defined by a result of an operation.
Operation is the basic unit of execution within MLIR.
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Block * getBlock()
Returns the operation block that contains this operation.
Location getLoc()
The source location the operation was defined or derived from.
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
MutableArrayRef< OpOperand > getOpOperands()
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
operand_type_range getOperandTypes()
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
result_type_range getResultTypes()
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
result_range getOpResults()
Region * getParentRegion()
Returns the region to which the instruction belongs.
unsigned getNumResults()
Return the number of results held by this operation.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Operation * getParentOp()
Return the parent operation this region is attached to.
BlockListType & getBlocks()
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
This class represents a collection of SymbolTables.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Type getElementType() const
Returns the element type of this tensor type.
This class provides an efficient unique identifier for a specific C++ type.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Location getLoc() const
Return the location of this value.
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Region * getParentRegion()
Return the Region in which this Value is defined.
Operation * getOwner() const
Return the owner of this operand.
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Type getTensorTypeFromMemRefType(Type type)
Return an unranked/ranked tensor type for the given unranked/ranked memref type.
MemRefType getMemRefType(T &&t)
Convenience method to abbreviate casting getType().
Include the generated interface declarations.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
bool insideMutuallyExclusiveRegions(Operation *a, Operation *b)
Return true if a and b are in mutually exclusive regions as per RegionBranchOpInterface.
llvm::DenseSet< ValueT, ValueInfoT > DenseSet
Region * getEnclosingRepetitiveRegion(Operation *op)
Return the first enclosing region of the given op that may be executed repetitively as per RegionBran...
SmallVector< SmallVector< OpFoldResult > > ReifiedRankedShapedTypeDims
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...