20#include "llvm/ADT/ScopeExit.h"
29#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
36#define DEBUG_TYPE "bufferizable-op-interface"
44 if (
auto bufferizableOp =
options.dynCastBufferizableOp(op))
50Region *AnalysisState::getEnclosingRepetitiveRegion(
54 if (
auto iter = enclosingRepetitiveRegionCache.find_as(op);
55 iter != enclosingRepetitiveRegionCache.end())
57 return enclosingRepetitiveRegionCache[op] =
61Region *AnalysisState::getEnclosingRepetitiveRegion(
63 if (
auto iter = enclosingRepetitiveRegionCache.find_as(value);
64 iter != enclosingRepetitiveRegionCache.end())
70 SmallVector<Region *> visitedRegions;
72 visitedRegions.push_back(region);
77 enclosingRepetitiveRegionCache[value] = region;
78 for (Region *r : visitedRegions)
79 enclosingRepetitiveRegionCache[r] = region;
83Region *AnalysisState::getEnclosingRepetitiveRegion(
85 if (
auto iter = enclosingRepetitiveRegionCache.find_as(block);
86 iter != enclosingRepetitiveRegionCache.end())
90 Operation *op =
nullptr;
93 SmallVector<Region *> visitedRegions;
100 enclosingRepetitiveRegionCache[block] = region;
101 for (Region *r : visitedRegions)
102 enclosingRepetitiveRegionCache[r] = region;
106bool AnalysisState::insideMutuallyExclusiveRegions(
Operation *op0,
108 auto key = std::make_pair(op0, op1);
109 if (
auto iter = insideMutuallyExclusiveRegionsCache.find(key);
110 iter != insideMutuallyExclusiveRegionsCache.end())
114 insideMutuallyExclusiveRegionsCache[key] =
result;
115 insideMutuallyExclusiveRegionsCache[std::make_pair(op1, op0)] =
result;
119void AnalysisState::resetCache() {
120 enclosingRepetitiveRegionCache.clear();
121 insideMutuallyExclusiveRegionsCache.clear();
128Region *bufferization::getNextEnclosingRepetitiveRegion(
139 const BufferizationOptions &
options) {
142 if (bufferizableOp &&
145 "expected that all parallel regions are also repetitive regions");
154 if (
auto opResult = llvm::dyn_cast<OpResult>(value))
155 return opResult.getDefiningOp();
156 return llvm::cast<BlockArgument>(value).getOwner()->
getParentOp();
162FailureOr<Value> bufferization::allocateTensorForShapedValue(
164 const BufferizationOptions &
options,
const BufferizationState &state,
167 if (llvm::isa<RankedTensorType>(shapedValue.
getType())) {
169 }
else if (llvm::isa<MemRefType>(shapedValue.
getType())) {
170 tensor = ToTensorOp::create(
173 }
else if (llvm::isa<UnrankedTensorType>(shapedValue.
getType()) ||
174 llvm::isa<UnrankedMemRefType>(shapedValue.
getType())) {
176 ->
emitError(
"copying of unranked tensors is not implemented");
178 llvm_unreachable(
"expected RankedTensorType or MemRefType");
180 RankedTensorType tensorType = llvm::cast<RankedTensorType>(
tensor.getType());
185 bool reifiedShapes =
false;
186 if (llvm::isa<RankedTensorType>(shapedValue.
getType()) &&
187 llvm::isa<OpResult>(shapedValue)) {
191 reifiedShapes =
true;
193 resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
194 for (
const auto &dim :
enumerate(tensorType.getShape())) {
195 if (ShapedType::isDynamic(dim.value())) {
196 dynamicSizes.push_back(
209 auto allocTensorOp = AllocTensorOp::create(
b, loc, tensorType, dynamicSizes,
214 return allocTensorOp.getResult();
215 auto copyBufferType =
217 if (
failed(copyBufferType))
219 std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
221 memorySpace =
options.defaultMemorySpaceFn(tensorType);
222 if (memorySpace.has_value())
223 allocTensorOp.setMemorySpaceAttr(memorySpace.value());
224 return allocTensorOp.getResult();
227LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
229 const BufferizationState &bufferizationState) {
239 Type operandType = opOperand.get().getType();
240 if (!llvm::isa<TensorType>(operandType))
242 if (analysisState.isInPlace(opOperand))
244 if (llvm::isa<UnrankedTensorType>(operandType))
245 return op->
emitError(
"copying of unranked tensors is not implemented");
247 AliasingValueList aliasingValues =
248 analysisState.getAliasingValues(opOperand);
249 if (aliasingValues.getNumAliases() == 1 &&
250 isa<OpResult>(aliasingValues.getAliases()[0].value) &&
251 !analysisState.bufferizesToMemoryWrite(opOperand) &&
253 .getAliasingOpOperands(aliasingValues.getAliases()[0].value)
254 .getNumAliases() == 1 &&
255 !isa<UnrankedTensorType>(
256 aliasingValues.getAliases()[0].value.getType())) {
263 Value value = aliasingValues.getAliases()[0].value;
264 outOfPlaceValues.push_back(value);
265 if (!analysisState.canOmitTensorCopy(opOperand))
266 copiedOpValues.insert(value);
269 outOfPlaceOpOperands.push_back(&opOperand);
270 if (!analysisState.canOmitTensorCopy(opOperand))
271 copiedOpOperands.insert(&opOperand);
277 for (
OpOperand *opOperand : outOfPlaceOpOperands) {
278 FailureOr<Value>
copy = allocateTensorForShapedValue(
279 rewriter, op->
getLoc(), opOperand->get(), analysisState.getOptions(),
280 bufferizationState, copiedOpOperands.contains(opOperand));
288 for (
Value value : outOfPlaceValues) {
289 FailureOr<Value>
copy = allocateTensorForShapedValue(
290 rewriter, op->
getLoc(), value, analysisState.getOptions(),
291 bufferizationState, copiedOpValues.count(value));
298 if (use->getOwner() ==
copy->getDefiningOp())
302 if (isa<tensor::DimOp>(use->getOwner()))
315bool OpFilter::isOpAllowed(
Operation *op)
const {
317 bool isAllowed = !hasAllowRule();
318 for (
const Entry &entry : entries) {
319 bool filterResult = entry.fn(op);
320 switch (entry.type) {
322 isAllowed |= filterResult;
342defaultFunctionArgTypeConverter(TensorLikeType type,
Attribute memorySpace,
344 const BufferizationOptions &
options) {
345 if (
auto tensorType = mlir::dyn_cast<TensorType>(type)) {
346 return cast<BufferLikeType>(
347 getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace));
352 type.getBufferType(
options, [&]() {
return funcOp->emitError(); });
353 assert(succeeded(bufferType) &&
354 "a valid buffer is always expected at function boundary");
360 const BufferizationOptions &
options) {
361 return getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace);
367BufferizationOptions::BufferizationOptions()
368 : functionArgTypeConverterFn(defaultFunctionArgTypeConverter),
369 unknownTypeConverterFn(defaultUnknownTypeConverter) {}
371bool BufferizationOptions::isOpAllowed(
Operation *op)
const {
374 bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->
getDialect());
375 if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
378 return opFilter.isOpAllowed(op);
381BufferizableOpInterface
382BufferizationOptions::dynCastBufferizableOp(
Operation *op)
const {
383 if (!isOpAllowed(op))
385 auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
388 return bufferizableOp;
391BufferizableOpInterface
392BufferizationOptions::dynCastBufferizableOp(
Value value)
const {
396void BufferizationOptions::setFunctionBoundaryTypeConversion(
397 LayoutMapOption layoutMapOption) {
398 functionArgTypeConverterFn = [=](TensorLikeType type, Attribute memorySpace,
400 const BufferizationOptions &
options) {
401 if (
auto tensorType = mlir::dyn_cast<TensorType>(type)) {
402 if (layoutMapOption == LayoutMapOption::IdentityLayoutMap)
403 return cast<BufferLikeType>(
404 bufferization::getMemRefTypeWithStaticIdentityLayout(tensorType,
406 return cast<BufferLikeType>(
407 bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType,
413 type.getBufferType(
options, [&]() {
return funcOp->emitError(); });
414 assert(succeeded(bufferType) &&
415 "a valid buffer is always expected at function boundary");
418 inferFunctionResultLayout =
419 layoutMapOption == LayoutMapOption::InferLayoutMap;
427 if (
auto bbArg = llvm::dyn_cast<BlockArgument>(value)) {
428 b.setInsertionPointToStart(bbArg.getOwner());
436AliasingOpOperandList AnalysisState::getAliasingOpOperands(
Value value)
const {
438 if (
auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
439 return bufferizableOp.getAliasingOpOperands(value, *
this);
442 return detail::unknownGetAliasingOpOperands(value);
447AliasingValueList AnalysisState::getAliasingValues(
OpOperand &opOperand)
const {
448 if (
auto bufferizableOp =
449 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
450 return bufferizableOp.getAliasingValues(opOperand, *
this);
453 return detail::unknownGetAliasingValues(opOperand);
458bool AnalysisState::bufferizesToMemoryRead(
OpOperand &opOperand)
const {
459 if (
auto bufferizableOp =
460 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
461 return bufferizableOp.bufferizesToMemoryRead(opOperand, *
this);
470bool AnalysisState::bufferizesToMemoryWrite(
OpOperand &opOperand)
const {
471 if (
auto bufferizableOp =
472 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
473 return bufferizableOp.bufferizesToMemoryWrite(opOperand, *
this);
482bool AnalysisState::bufferizesToAliasOnly(
OpOperand &opOperand)
const {
483 if (
auto bufferizableOp =
484 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
485 return bufferizableOp.bufferizesToAliasOnly(opOperand, *
this);
492bool AnalysisState::bufferizesToMemoryWrite(
Value value)
const {
493 auto opResult = llvm::dyn_cast<OpResult>(value);
496 auto bufferizableOp = getOptions().dynCastBufferizableOp(value);
499 return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *
this);
505bool AnalysisState::isValueRead(
Value value)
const {
506 assert(llvm::isa<TensorType>(value.
getType()) &&
"expected TensorType");
507 SmallVector<OpOperand *> workingSet;
509 for (OpOperand &use : value.
getUses())
510 workingSet.push_back(&use);
512 while (!workingSet.empty()) {
513 OpOperand *uMaybeReading = workingSet.pop_back_val();
514 if (!visited.insert(uMaybeReading).second)
518 if (bufferizesToAliasOnly(*uMaybeReading))
519 for (AliasingValue alias : getAliasingValues(*uMaybeReading))
520 for (OpOperand &use : alias.value.getUses())
521 workingSet.push_back(&use);
522 if (bufferizesToMemoryRead(*uMaybeReading))
534llvm::SetVector<Value> AnalysisState::findValueInReverseUseDefChain(
537 llvm::DenseSet<OpOperand *> *visitedOpOperands)
const {
538 llvm::DenseSet<Value> visited;
539 llvm::SetVector<Value>
result, workingSet;
540 workingSet.insert(opOperand->
get());
542 if (visitedOpOperands)
543 visitedOpOperands->insert(opOperand);
545 while (!workingSet.empty()) {
546 Value value = workingSet.pop_back_val();
548 if (!
config.revisitAlreadyVisitedValues && visited.contains(value)) {
550 if (
config.alwaysIncludeLeaves)
554 visited.insert(value);
556 if (condition(value)) {
561 if (!
config.followUnknownOps && !
options.dynCastBufferizableOp(value)) {
564 if (
config.alwaysIncludeLeaves)
569 AliasingOpOperandList aliases = getAliasingOpOperands(value);
570 if (aliases.getNumAliases() == 0) {
573 if (
config.alwaysIncludeLeaves)
578 for (AliasingOpOperand a : aliases) {
579 if (
config.followEquivalentOnly &&
580 a.relation != BufferRelation::Equivalent) {
583 if (
config.alwaysIncludeLeaves)
588 if (
config.followInPlaceOnly && !isInPlace(*a.opOperand)) {
591 if (
config.alwaysIncludeLeaves)
596 if (
config.followSameTypeOrCastsOnly &&
597 a.opOperand->get().getType() != value.
getType() &&
601 if (
config.alwaysIncludeLeaves)
606 workingSet.insert(a.opOperand->get());
607 if (visitedOpOperands)
608 visitedOpOperands->insert(a.opOperand);
616llvm::SetVector<Value>
617AnalysisState::findDefinitions(
OpOperand *opOperand)
const {
619 config.alwaysIncludeLeaves =
false;
620 return findValueInReverseUseDefChain(
621 opOperand, [&](Value v) {
return this->bufferizesToMemoryWrite(v); },
630 for (
const BufferizationOptions::AnalysisStateInitFn &fn :
635bool AnalysisState::canOmitTensorCopy(
OpOperand &opOperand)
const {
637 if (hasUndefinedContents(&opOperand))
642 if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
646 AliasingValueList aliases = getAliasingValues(opOperand);
647 if (!bufferizesToMemoryRead(opOperand) &&
648 llvm::none_of(aliases,
649 [&](AliasingValue a) {
return isValueRead(a.value); }))
656bool AnalysisState::isInPlace(
OpOperand &opOperand)
const {
658 if (isa<ToBufferOp>(opOperand.
getOwner()))
663 return !bufferizesToMemoryWrite(opOperand);
666bool AnalysisState::areEquivalentBufferizedValues(
Value v1,
Value v2)
const {
672bool AnalysisState::areAliasingBufferizedValues(
Value v1,
Value v2)
const {
678bool AnalysisState::hasUndefinedContents(
OpOperand *opOperand)
const {
684 const BufferizationOptions &
options,
685 const BufferizationState &state) {
687 auto tensorType = llvm::dyn_cast<TensorLikeType>(value.
getType());
688 assert(tensorType &&
"unexpected non-tensor type");
692 if (
auto toTensorOp = value.
getDefiningOp<bufferization::ToTensorOp>())
693 return toTensorOp.getBuffer();
702 return bufferization::ToBufferOp::create(rewriter, value.
getLoc(),
708FailureOr<BufferLikeType>
709bufferization::getBufferType(
Value value,
const BufferizationOptions &
options,
710 const BufferizationState &state) {
716FailureOr<BufferLikeType>
717bufferization::getBufferType(
Value value,
const BufferizationOptions &
options,
718 const BufferizationState &state,
720 assert(llvm::isa<TensorLikeType>(value.
getType()) &&
721 "unexpected non-tensor type");
722 invocationStack.push_back(value);
724 llvm::make_scope_exit([&]() { invocationStack.pop_back(); });
728 auto bufferizableOp =
options.dynCastBufferizableOp(op);
730 return bufferizableOp.getBufferType(value,
options, state, invocationStack);
733 return cast<TensorLikeType>(value.
getType()).getBufferType(
options, [&]() {
734 return op->emitError();
738bool bufferization::hasTensorSemantics(
Operation *op) {
739 if (
auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
740 return bufferizableOp.hasTensorSemantics();
741 return detail::defaultHasTensorSemantics(op);
744void bufferization::replaceOpWithBufferizedValues(
RewriterBase &rewriter,
748 "expected one value per OpResult");
755 if (llvm::isa<TensorLikeType>(opResult.getType())) {
758 assert(llvm::isa<BufferLikeType>(
replacement.getType()) &&
759 "tensor op result should be replaced with a buffer value");
782 return (*allocationFn)(
b, loc, type, dynShape, bufferAlignment);
785 if (bufferAlignment != 0)
786 return memref::AllocOp::create(
b, loc, type, dynShape,
787 b.getI64IntegerAttr(bufferAlignment))
789 return memref::AllocOp::create(
b, loc, type, dynShape).getResult();
796 return (*memCpyFn)(
b, loc, from, to);
798 memref::CopyOp::create(
b, loc, from, to);
807 const BufferizationOptions &
options,
808 MemRefLayoutAttrInterface layout,
811 if (
auto unrankedTensorType =
812 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
813 assert(!layout &&
"UnrankedTensorType cannot have a layout map");
814 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
819 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
821 return MemRefType::get(rankedTensorType.getShape(),
822 rankedTensorType.getElementType(), layout,
826 return options.unknownTypeConverterFn(tensorType, memorySpace,
options);
830bufferization::getMemRefTypeWithFullyDynamicLayout(
TensorType tensorType,
833 if (
auto unrankedTensorType =
834 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
835 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
840 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
841 int64_t dynamicOffset = ShapedType::kDynamic;
843 ShapedType::kDynamic);
844 auto stridedLayout = StridedLayoutAttr::get(tensorType.
getContext(),
845 dynamicOffset, dynamicStrides);
846 return MemRefType::get(rankedTensorType.getShape(),
847 rankedTensorType.getElementType(), stridedLayout,
854bufferization::getMemRefTypeWithStaticIdentityLayout(
TensorType tensorType,
857 if (
auto unrankedTensorType =
858 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
859 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
864 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
865 MemRefLayoutAttrInterface layout = {};
866 return MemRefType::get(rankedTensorType.getShape(),
867 rankedTensorType.getElementType(), layout,
875bool bufferization::detail::defaultResultBufferizesToMemoryWrite(
877 auto bufferizableOp = cast<BufferizableOpInterface>(opResult.
getDefiningOp());
878 AliasingOpOperandList opOperands =
879 bufferizableOp.getAliasingOpOperands(opResult, state);
883 if (opOperands.getAliases().empty())
888 if (llvm::any_of(opOperands, [&](AliasingOpOperand alias) {
889 return state.bufferizesToMemoryWrite(*alias.opOperand);
922 auto isMemoryWriteInsideOp = [&](
Value v) {
926 return state.bufferizesToMemoryWrite(v);
929 config.alwaysIncludeLeaves =
false;
930 for (AliasingOpOperand alias : opOperands) {
932 .findValueInReverseUseDefChain(alias.opOperand,
933 isMemoryWriteInsideOp,
config)
942AliasingOpOperandList bufferization::detail::defaultGetAliasingOpOperands(
947 if (!llvm::isa<TensorType>(opOperand.
get().
getType()))
949 AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
950 for (
const auto &it : aliasingValues)
951 if (it.value == value)
952 result.emplace_back(&opOperand, it.relation, it.isDefinite);
954 return AliasingOpOperandList(std::move(
result));
957FailureOr<BufferLikeType> bufferization::detail::defaultGetBufferType(
959 const BufferizationState &bufferizationState,
961 assert(llvm::isa<TensorType>(value.
getType()) &&
"expected tensor type");
962 auto tensorType = cast<TensorType>(value.
getType());
965 if (llvm::isa<BlockArgument>(value)) {
966 return cast<BufferLikeType>(
967 bufferization::getMemRefType(tensorType,
options));
972 auto opResult = llvm::cast<OpResult>(value);
974 AliasingOpOperandList aliases = analysisState.getAliasingOpOperands(opResult);
975 if (aliases.getNumAliases() > 0 &&
976 aliases.getAliases()[0].relation == BufferRelation::Equivalent) {
979 Value equivalentOperand = aliases.getAliases().front().opOperand->get();
988 if (!memSpace.has_value())
989 return op->
emitError(
"could not infer memory space");
991 return cast<BufferLikeType>(
995bool bufferization::detail::defaultIsRepetitiveRegion(
996 BufferizableOpInterface bufferizableOp,
unsigned index) {
997 assert(index < bufferizableOp->getNumRegions() &&
"invalid region index");
998 auto regionInterface =
999 dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
1000 if (!regionInterface)
1002 return regionInterface.isRepetitiveRegion(
index);
1005AliasingOpOperandList
1006bufferization::detail::unknownGetAliasingOpOperands(
Value value) {
1009 if (
auto bbArg = dyn_cast<BlockArgument>(value))
1010 if (bbArg.getOwner() != &bbArg.getOwner()->getParent()->getBlocks().front())
1016 AliasingOpOperandList r;
1018 if (isa<TensorType>(operand.get().getType()))
1019 r.addAlias({&operand, BufferRelation::Unknown,
false});
1024bufferization::detail::unknownGetAliasingValues(
OpOperand &opOperand) {
1029 AliasingValueList r;
1031 if (llvm::isa<TensorType>(
result.getType()))
1032 r.addAlias({
result, BufferRelation::Unknown,
false});
1036 if (isa<TensorType>(bbArg.getType()))
1037 r.addAlias({bbArg, BufferRelation::Unknown,
false});
1041bool bufferization::detail::defaultHasTensorSemantics(
Operation *op) {
1042 auto isaTensor = [](
Type t) {
return isa<TensorLikeType>(t); };
1044 return any_of(r.getBlocks(), [&](Block &b) {
1045 return any_of(b.getArguments(), [&](BlockArgument bbArg) {
1046 return isaTensor(bbArg.getType());
1050 if (hasTensorBlockArgument)
1058FailureOr<BaseMemRefType>
1059bufferization::detail::asMemRefType(FailureOr<BufferLikeType> bufferType) {
1062 return cast<BaseMemRefType>(*bufferType);
1065bool bufferization::detail::typesMatchAfterBufferization(
Operation &op,
1068 return mlir::succeeded(
1069 cast<TensorLikeType>(
tensor.getType())
1070 .verifyCompatibleBufferType(cast<BufferLikeType>(buffer.
getType()),
1071 [&]() { return op.emitError(); }));
static void setInsertionPointAfter(OpBuilder &b, Value value)
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
*if copies could not be generated due to yet unimplemented cases *copyInPlacementStart and copyOutPlacementStart in copyPlacementBlock *specify the insertion points where the incoming copies and outgoing should be the output argument nBegin is set to its * replacement(set to `begin` if no invalidation happens). Since outgoing *copies could have been inserted at `end`
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
static RankedTensorType getBufferType(const SparseTensorType &stt, bool needTmpCOO)
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
static Operation * getOwnerOfValue(Value value)
Base class for generic analysis states.
AnalysisState(LatticeAnchor anchor)
Create the analysis state on the given lattice anchor.
Attributes are known-constant values of operations.
This class provides a shared interface for ranked and unranked memref types.
This class represents an argument of a Block.
Block represents an ordered list of Operations.
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
IRValueT get() const
Return the current value being used by this operand.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
RAII guard to reset the insertion point of the builder when destroyed.
This class helps build Operations.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
This class represents an operand of an operation.
This is a value defined by a result of an operation.
Operation is the basic unit of execution within MLIR.
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Block * getBlock()
Returns the operation block that contains this operation.
Location getLoc()
The source location the operation was defined or derived from.
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
MutableArrayRef< OpOperand > getOpOperands()
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
operand_type_range getOperandTypes()
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
result_type_range getResultTypes()
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
result_range getOpResults()
Region * getParentRegion()
Returns the region to which the instruction belongs.
unsigned getNumResults()
Return the number of results held by this operation.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Operation * getParentOp()
Return the parent operation this region is attached to.
BlockListType & getBlocks()
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
This class represents a collection of SymbolTables.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
This class provides an efficient unique identifier for a specific C++ type.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Location getLoc() const
Return the location of this value.
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Region * getParentRegion()
Return the Region in which this Value is defined.
Operation * getOwner() const
Return the owner of this operand.
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Type getTensorTypeFromMemRefType(Type type)
Return an unranked/ranked tensor type for the given unranked/ranked memref type.
MemRefType getMemRefType(T &&t)
Convenience method to abbreviate casting getType().
Include the generated interface declarations.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
const FrozenRewritePatternSet GreedyRewriteConfig config
bool insideMutuallyExclusiveRegions(Operation *a, Operation *b)
Return true if a and b are in mutually exclusive regions as per RegionBranchOpInterface.
llvm::DenseSet< ValueT, ValueInfoT > DenseSet
Region * getEnclosingRepetitiveRegion(Operation *op)
Return the first enclosing region of the given op that may be executed repetitively as per RegionBran...
SmallVector< SmallVector< OpFoldResult > > ReifiedRankedShapedTypeDims
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...