20#include "llvm/ADT/ScopeExit.h"
29#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
36#define DEBUG_TYPE "bufferizable-op-interface"
44 if (
auto bufferizableOp =
options.dynCastBufferizableOp(op))
50Region *AnalysisState::getEnclosingRepetitiveRegion(
54 if (
auto iter = enclosingRepetitiveRegionCache.find_as(op);
55 iter != enclosingRepetitiveRegionCache.end())
57 return enclosingRepetitiveRegionCache[op] =
61Region *AnalysisState::getEnclosingRepetitiveRegion(
63 if (
auto iter = enclosingRepetitiveRegionCache.find_as(value);
64 iter != enclosingRepetitiveRegionCache.end())
70 SmallVector<Region *> visitedRegions;
72 visitedRegions.push_back(region);
77 enclosingRepetitiveRegionCache[value] = region;
78 for (Region *r : visitedRegions)
79 enclosingRepetitiveRegionCache[r] = region;
83Region *AnalysisState::getEnclosingRepetitiveRegion(
85 if (
auto iter = enclosingRepetitiveRegionCache.find_as(block);
86 iter != enclosingRepetitiveRegionCache.end())
90 Operation *op =
nullptr;
93 SmallVector<Region *> visitedRegions;
100 enclosingRepetitiveRegionCache[block] = region;
101 for (Region *r : visitedRegions)
102 enclosingRepetitiveRegionCache[r] = region;
106bool AnalysisState::insideMutuallyExclusiveRegions(
Operation *op0,
108 auto key = std::make_pair(op0, op1);
109 if (
auto iter = insideMutuallyExclusiveRegionsCache.find(key);
110 iter != insideMutuallyExclusiveRegionsCache.end())
114 insideMutuallyExclusiveRegionsCache[key] =
result;
115 insideMutuallyExclusiveRegionsCache[std::make_pair(op1, op0)] =
result;
119void AnalysisState::resetCache() {
120 enclosingRepetitiveRegionCache.clear();
121 insideMutuallyExclusiveRegionsCache.clear();
128Region *bufferization::getNextEnclosingRepetitiveRegion(
139 const BufferizationOptions &
options) {
142 if (bufferizableOp &&
145 "expected that all parallel regions are also repetitive regions");
154 if (
auto opResult = llvm::dyn_cast<OpResult>(value))
155 return opResult.getDefiningOp();
156 return llvm::cast<BlockArgument>(value).getOwner()->
getParentOp();
162FailureOr<Value> bufferization::allocateTensorForShapedValue(
164 const BufferizationOptions &
options,
const BufferizationState &state,
167 if (llvm::isa<RankedTensorType>(shapedValue.
getType())) {
169 }
else if (llvm::isa<MemRefType>(shapedValue.
getType())) {
170 tensor = ToTensorOp::create(
173 }
else if (llvm::isa<UnrankedTensorType>(shapedValue.
getType()) ||
174 llvm::isa<UnrankedMemRefType>(shapedValue.
getType())) {
176 ->
emitError(
"copying of unranked tensors is not implemented");
178 llvm_unreachable(
"expected RankedTensorType or MemRefType");
180 RankedTensorType tensorType = llvm::cast<RankedTensorType>(
tensor.getType());
185 bool reifiedShapes =
false;
186 if (llvm::isa<RankedTensorType>(shapedValue.
getType()) &&
187 llvm::isa<OpResult>(shapedValue)) {
191 reifiedShapes =
true;
193 resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
194 for (
const auto &dim :
enumerate(tensorType.getShape())) {
195 if (ShapedType::isDynamic(dim.value())) {
196 dynamicSizes.push_back(
209 auto allocTensorOp = AllocTensorOp::create(
b, loc, tensorType, dynamicSizes,
214 return allocTensorOp.getResult();
215 auto copyBufferType =
217 if (
failed(copyBufferType))
219 std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
221 memorySpace =
options.defaultMemorySpaceFn(tensorType);
222 if (memorySpace.has_value())
223 allocTensorOp.setMemorySpaceAttr(memorySpace.value());
224 return allocTensorOp.getResult();
227LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
229 const BufferizationState &bufferizationState) {
239 Type operandType = opOperand.get().getType();
240 if (!llvm::isa<TensorType>(operandType))
242 if (analysisState.isInPlace(opOperand))
244 if (llvm::isa<UnrankedTensorType>(operandType))
245 return op->
emitError(
"copying of unranked tensors is not implemented");
247 AliasingValueList aliasingValues =
248 analysisState.getAliasingValues(opOperand);
249 if (aliasingValues.getNumAliases() == 1 &&
250 isa<OpResult>(aliasingValues.getAliases()[0].value) &&
251 !analysisState.bufferizesToMemoryWrite(opOperand) &&
253 .getAliasingOpOperands(aliasingValues.getAliases()[0].value)
254 .getNumAliases() == 1 &&
255 !isa<UnrankedTensorType>(
256 aliasingValues.getAliases()[0].value.getType())) {
263 Value value = aliasingValues.getAliases()[0].value;
264 outOfPlaceValues.push_back(value);
265 if (!analysisState.canOmitTensorCopy(opOperand))
266 copiedOpValues.insert(value);
269 outOfPlaceOpOperands.push_back(&opOperand);
270 if (!analysisState.canOmitTensorCopy(opOperand))
271 copiedOpOperands.insert(&opOperand);
277 for (
OpOperand *opOperand : outOfPlaceOpOperands) {
278 FailureOr<Value>
copy = allocateTensorForShapedValue(
279 rewriter, op->
getLoc(), opOperand->get(), analysisState.getOptions(),
280 bufferizationState, copiedOpOperands.contains(opOperand));
288 for (
Value value : outOfPlaceValues) {
289 FailureOr<Value>
copy = allocateTensorForShapedValue(
290 rewriter, op->
getLoc(), value, analysisState.getOptions(),
291 bufferizationState, copiedOpValues.count(value));
298 if (use->getOwner() ==
copy->getDefiningOp())
302 if (isa<tensor::DimOp>(use->getOwner()))
315bool OpFilter::isOpAllowed(
Operation *op)
const {
317 bool isAllowed = !hasAllowRule();
318 for (
const Entry &entry : entries) {
319 bool filterResult = entry.fn(op);
320 switch (entry.type) {
322 isAllowed |= filterResult;
342defaultFunctionArgTypeConverter(TensorLikeType type,
Attribute memorySpace,
344 const BufferizationOptions &
options) {
345 if (
auto tensorType = mlir::dyn_cast<TensorType>(type)) {
346 return cast<BufferLikeType>(
347 getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace));
352 type.getBufferType(
options, [&]() {
return funcOp->emitError(); });
353 assert(succeeded(bufferType) &&
354 "a valid buffer is always expected at function boundary");
360 const BufferizationOptions &
options) {
361 return getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace);
367BufferizationOptions::BufferizationOptions()
368 : functionArgTypeConverterFn(defaultFunctionArgTypeConverter),
369 unknownTypeConverterFn(defaultUnknownTypeConverter) {}
371bool BufferizationOptions::isOpAllowed(
Operation *op)
const {
374 bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->
getDialect());
375 if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
378 return opFilter.isOpAllowed(op);
381BufferizableOpInterface
382BufferizationOptions::dynCastBufferizableOp(
Operation *op)
const {
383 if (!isOpAllowed(op))
385 auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
388 return bufferizableOp;
391BufferizableOpInterface
392BufferizationOptions::dynCastBufferizableOp(
Value value)
const {
396void BufferizationOptions::setFunctionBoundaryTypeConversion(
397 LayoutMapOption layoutMapOption) {
398 functionArgTypeConverterFn = [=](TensorLikeType type, Attribute memorySpace,
400 const BufferizationOptions &
options) {
401 if (
auto tensorType = mlir::dyn_cast<TensorType>(type)) {
402 if (layoutMapOption == LayoutMapOption::IdentityLayoutMap)
403 return cast<BufferLikeType>(
404 bufferization::getMemRefTypeWithStaticIdentityLayout(tensorType,
406 return cast<BufferLikeType>(
407 bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType,
413 type.getBufferType(
options, [&]() {
return funcOp->emitError(); });
414 assert(succeeded(bufferType) &&
415 "a valid buffer is always expected at function boundary");
418 inferFunctionResultLayout =
419 layoutMapOption == LayoutMapOption::InferLayoutMap;
427 if (
auto bbArg = llvm::dyn_cast<BlockArgument>(value)) {
428 b.setInsertionPointToStart(bbArg.getOwner());
436AliasingOpOperandList AnalysisState::getAliasingOpOperands(
Value value)
const {
438 if (
auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
439 return bufferizableOp.getAliasingOpOperands(value, *
this);
442 return detail::unknownGetAliasingOpOperands(value);
447AliasingValueList AnalysisState::getAliasingValues(
OpOperand &opOperand)
const {
448 if (
auto bufferizableOp =
449 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
450 return bufferizableOp.getAliasingValues(opOperand, *
this);
453 return detail::unknownGetAliasingValues(opOperand);
458bool AnalysisState::bufferizesToMemoryRead(
OpOperand &opOperand)
const {
459 if (
auto bufferizableOp =
460 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
461 return bufferizableOp.bufferizesToMemoryRead(opOperand, *
this);
470bool AnalysisState::bufferizesToMemoryWrite(
OpOperand &opOperand)
const {
471 if (
auto bufferizableOp =
472 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
473 return bufferizableOp.bufferizesToMemoryWrite(opOperand, *
this);
482bool AnalysisState::bufferizesToAliasOnly(
OpOperand &opOperand)
const {
483 if (
auto bufferizableOp =
484 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
485 return bufferizableOp.bufferizesToAliasOnly(opOperand, *
this);
492bool AnalysisState::bufferizesToMemoryWrite(
Value value)
const {
493 auto opResult = llvm::dyn_cast<OpResult>(value);
496 auto bufferizableOp = getOptions().dynCastBufferizableOp(value);
499 return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *
this);
505bool AnalysisState::isValueRead(
Value value)
const {
506 assert(llvm::isa<TensorType>(value.
getType()) &&
"expected TensorType");
507 SmallVector<OpOperand *> workingSet;
509 for (OpOperand &use : value.
getUses())
510 workingSet.push_back(&use);
512 while (!workingSet.empty()) {
513 OpOperand *uMaybeReading = workingSet.pop_back_val();
514 if (!visited.insert(uMaybeReading).second)
518 if (bufferizesToAliasOnly(*uMaybeReading))
519 for (AliasingValue alias : getAliasingValues(*uMaybeReading))
520 for (OpOperand &use : alias.value.getUses())
521 workingSet.push_back(&use);
522 if (bufferizesToMemoryRead(*uMaybeReading))
534llvm::SetVector<Value> AnalysisState::findValueInReverseUseDefChain(
537 llvm::DenseSet<OpOperand *> *visitedOpOperands)
const {
538 llvm::DenseSet<Value> visited;
539 llvm::SetVector<Value>
result, workingSet;
540 workingSet.insert(opOperand->
get());
542 if (visitedOpOperands)
543 visitedOpOperands->insert(opOperand);
545 while (!workingSet.empty()) {
546 Value value = workingSet.pop_back_val();
548 if (!
config.revisitAlreadyVisitedValues && visited.contains(value)) {
550 if (
config.alwaysIncludeLeaves)
554 visited.insert(value);
556 if (condition(value)) {
561 if (!
config.followUnknownOps && !
options.dynCastBufferizableOp(value)) {
564 if (
config.alwaysIncludeLeaves)
569 AliasingOpOperandList aliases = getAliasingOpOperands(value);
570 if (aliases.getNumAliases() == 0) {
573 if (
config.alwaysIncludeLeaves)
578 for (AliasingOpOperand a : aliases) {
579 if (
config.followEquivalentOnly &&
580 a.relation != BufferRelation::Equivalent) {
583 if (
config.alwaysIncludeLeaves)
588 if (
config.followInPlaceOnly && !isInPlace(*a.opOperand)) {
591 if (
config.alwaysIncludeLeaves)
596 if (
config.followSameTypeOrCastsOnly &&
597 a.opOperand->get().getType() != value.
getType() &&
601 if (
config.alwaysIncludeLeaves)
606 workingSet.insert(a.opOperand->get());
607 if (visitedOpOperands)
608 visitedOpOperands->insert(a.opOperand);
616llvm::SetVector<Value>
617AnalysisState::findDefinitions(
OpOperand *opOperand)
const {
619 config.alwaysIncludeLeaves =
false;
620 return findValueInReverseUseDefChain(
621 opOperand, [&](Value v) {
return this->bufferizesToMemoryWrite(v); },
630 for (
const BufferizationOptions::AnalysisStateInitFn &fn :
635bool AnalysisState::canOmitTensorCopy(
OpOperand &opOperand)
const {
637 if (hasUndefinedContents(&opOperand))
642 if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
646 AliasingValueList aliases = getAliasingValues(opOperand);
647 if (!bufferizesToMemoryRead(opOperand) &&
648 llvm::none_of(aliases,
649 [&](AliasingValue a) {
return isValueRead(a.value); }))
656bool AnalysisState::isInPlace(
OpOperand &opOperand)
const {
658 if (isa<ToBufferOp>(opOperand.
getOwner()))
663 return !bufferizesToMemoryWrite(opOperand);
666bool AnalysisState::areEquivalentBufferizedValues(
Value v1,
Value v2)
const {
672bool AnalysisState::areAliasingBufferizedValues(
Value v1,
Value v2)
const {
678bool AnalysisState::hasUndefinedContents(
OpOperand *opOperand)
const {
684 const BufferizationOptions &
options,
685 const BufferizationState &state) {
687 auto tensorType = llvm::dyn_cast<TensorLikeType>(value.
getType());
688 assert(tensorType &&
"unexpected non-tensor type");
692 if (
auto toTensorOp = value.
getDefiningOp<bufferization::ToTensorOp>())
693 return toTensorOp.getBuffer();
702 return bufferization::ToBufferOp::create(rewriter, value.
getLoc(),
708FailureOr<BufferLikeType>
709bufferization::getBufferType(
Value value,
const BufferizationOptions &
options,
710 const BufferizationState &state) {
716FailureOr<BufferLikeType>
717bufferization::getBufferType(
Value value,
const BufferizationOptions &
options,
718 const BufferizationState &state,
720 assert(llvm::isa<TensorLikeType>(value.
getType()) &&
721 "unexpected non-tensor type");
722 invocationStack.push_back(value);
723 llvm::scope_exit popFromStack([&]() { invocationStack.pop_back(); });
727 auto bufferizableOp =
options.dynCastBufferizableOp(op);
729 return bufferizableOp.getBufferType(value,
options, state, invocationStack);
732 return cast<TensorLikeType>(value.
getType()).getBufferType(
options, [&]() {
733 return op->emitError();
737bool bufferization::hasTensorSemantics(
Operation *op) {
738 if (
auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
739 return bufferizableOp.hasTensorSemantics();
740 return detail::defaultHasTensorSemantics(op);
743void bufferization::replaceOpWithBufferizedValues(
RewriterBase &rewriter,
747 "expected one value per OpResult");
754 if (llvm::isa<TensorLikeType>(opResult.getType())) {
757 assert(llvm::isa<BufferLikeType>(
replacement.getType()) &&
758 "tensor op result should be replaced with a buffer value");
781 return (*allocationFn)(
b, loc, type, dynShape, bufferAlignment);
784 if (bufferAlignment != 0)
785 return memref::AllocOp::create(
b, loc, type, dynShape,
786 b.getI64IntegerAttr(bufferAlignment))
788 return memref::AllocOp::create(
b, loc, type, dynShape).getResult();
795 return (*memCpyFn)(
b, loc, from, to);
797 memref::CopyOp::create(
b, loc, from, to);
806 const BufferizationOptions &
options,
807 MemRefLayoutAttrInterface layout,
810 if (
auto unrankedTensorType =
811 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
812 assert(!layout &&
"UnrankedTensorType cannot have a layout map");
813 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
818 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
820 return MemRefType::get(rankedTensorType.getShape(),
821 rankedTensorType.getElementType(), layout,
825 return options.unknownTypeConverterFn(tensorType, memorySpace,
options);
829bufferization::getMemRefTypeWithFullyDynamicLayout(
TensorType tensorType,
832 if (
auto unrankedTensorType =
833 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
834 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
839 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
840 int64_t dynamicOffset = ShapedType::kDynamic;
842 ShapedType::kDynamic);
843 auto stridedLayout = StridedLayoutAttr::get(tensorType.
getContext(),
844 dynamicOffset, dynamicStrides);
845 return MemRefType::get(rankedTensorType.getShape(),
846 rankedTensorType.getElementType(), stridedLayout,
853bufferization::getMemRefTypeWithStaticIdentityLayout(
TensorType tensorType,
856 if (
auto unrankedTensorType =
857 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
858 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
863 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
864 MemRefLayoutAttrInterface layout = {};
865 return MemRefType::get(rankedTensorType.getShape(),
866 rankedTensorType.getElementType(), layout,
874bool bufferization::detail::defaultResultBufferizesToMemoryWrite(
876 auto bufferizableOp = cast<BufferizableOpInterface>(opResult.
getDefiningOp());
877 AliasingOpOperandList opOperands =
878 bufferizableOp.getAliasingOpOperands(opResult, state);
882 if (opOperands.getAliases().empty())
887 if (llvm::any_of(opOperands, [&](AliasingOpOperand alias) {
888 return state.bufferizesToMemoryWrite(*alias.opOperand);
921 auto isMemoryWriteInsideOp = [&](
Value v) {
925 return state.bufferizesToMemoryWrite(v);
928 config.alwaysIncludeLeaves =
false;
929 for (AliasingOpOperand alias : opOperands) {
931 .findValueInReverseUseDefChain(alias.opOperand,
932 isMemoryWriteInsideOp,
config)
941AliasingOpOperandList bufferization::detail::defaultGetAliasingOpOperands(
946 if (!llvm::isa<TensorType>(opOperand.
get().
getType()))
948 AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
949 for (
const auto &it : aliasingValues)
950 if (it.value == value)
951 result.emplace_back(&opOperand, it.relation, it.isDefinite);
953 return AliasingOpOperandList(std::move(
result));
956FailureOr<BufferLikeType> bufferization::detail::defaultGetBufferType(
958 const BufferizationState &bufferizationState,
960 assert(llvm::isa<TensorType>(value.
getType()) &&
"expected tensor type");
961 auto tensorType = cast<TensorType>(value.
getType());
967 <<
"cannot bufferize value of type " << tensorType
968 <<
": element type " << elementType
969 <<
" is not a valid memref element type";
972 if (llvm::isa<BlockArgument>(value)) {
973 return cast<BufferLikeType>(
974 bufferization::getMemRefType(tensorType,
options));
979 auto opResult = llvm::cast<OpResult>(value);
981 AliasingOpOperandList aliases = analysisState.getAliasingOpOperands(opResult);
982 if (aliases.getNumAliases() > 0 &&
983 aliases.getAliases()[0].relation == BufferRelation::Equivalent) {
986 Value equivalentOperand = aliases.getAliases().front().opOperand->get();
995 if (!memSpace.has_value())
996 return op->
emitError(
"could not infer memory space");
998 return cast<BufferLikeType>(
1002bool bufferization::detail::defaultIsRepetitiveRegion(
1003 BufferizableOpInterface bufferizableOp,
unsigned index) {
1004 assert(index < bufferizableOp->getNumRegions() &&
"invalid region index");
1005 auto regionInterface =
1006 dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
1007 if (!regionInterface)
1009 return regionInterface.isRepetitiveRegion(
index);
1012AliasingOpOperandList
1013bufferization::detail::unknownGetAliasingOpOperands(
Value value) {
1016 if (
auto bbArg = dyn_cast<BlockArgument>(value))
1017 if (bbArg.getOwner() != &bbArg.getOwner()->getParent()->getBlocks().front())
1023 AliasingOpOperandList r;
1025 if (isa<TensorType>(operand.get().getType()))
1026 r.addAlias({&operand, BufferRelation::Unknown,
false});
1031bufferization::detail::unknownGetAliasingValues(
OpOperand &opOperand) {
1036 AliasingValueList r;
1038 if (llvm::isa<TensorType>(
result.getType()))
1039 r.addAlias({
result, BufferRelation::Unknown,
false});
1043 if (isa<TensorType>(bbArg.getType()))
1044 r.addAlias({bbArg, BufferRelation::Unknown,
false});
1048bool bufferization::detail::defaultHasTensorSemantics(
Operation *op) {
1049 auto isaTensor = [](
Type t) {
return isa<TensorLikeType>(t); };
1051 return any_of(r.getBlocks(), [&](Block &b) {
1052 return any_of(b.getArguments(), [&](BlockArgument bbArg) {
1053 return isaTensor(bbArg.getType());
1057 if (hasTensorBlockArgument)
1065FailureOr<BaseMemRefType>
1066bufferization::detail::asMemRefType(FailureOr<BufferLikeType> bufferType) {
1069 return cast<BaseMemRefType>(*bufferType);
1072bool bufferization::detail::typesMatchAfterBufferization(
Operation &op,
1075 return mlir::succeeded(
1076 cast<TensorLikeType>(
tensor.getType())
1077 .verifyCompatibleBufferType(cast<BufferLikeType>(buffer.
getType()),
1078 [&]() { return op.emitError(); }));
static void setInsertionPointAfter(OpBuilder &b, Value value)
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
*if copies could not be generated due to yet unimplemented cases *copyInPlacementStart and copyOutPlacementStart in copyPlacementBlock *specify the insertion points where the incoming copies and outgoing should be the output argument nBegin is set to its * replacement(set to `begin` if no invalidation happens). Since outgoing *copies could have been inserted at `end`
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
static RankedTensorType getBufferType(const SparseTensorType &stt, bool needTmpCOO)
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
static Operation * getOwnerOfValue(Value value)
Base class for generic analysis states.
AnalysisState(LatticeAnchor anchor)
Create the analysis state on the given lattice anchor.
Attributes are known-constant values of operations.
This class provides a shared interface for ranked and unranked memref types.
static bool isValidElementType(Type type)
Return true if the specified element type is ok in a memref.
This class represents an argument of a Block.
Block represents an ordered list of Operations.
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
IRValueT get() const
Return the current value being used by this operand.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
RAII guard to reset the insertion point of the builder when destroyed.
This class helps build Operations.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
This class represents an operand of an operation.
This is a value defined by a result of an operation.
Operation is the basic unit of execution within MLIR.
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Block * getBlock()
Returns the operation block that contains this operation.
Location getLoc()
The source location the operation was defined or derived from.
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
MutableArrayRef< OpOperand > getOpOperands()
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
operand_type_range getOperandTypes()
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
result_type_range getResultTypes()
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
result_range getOpResults()
Region * getParentRegion()
Returns the region to which the instruction belongs.
unsigned getNumResults()
Return the number of results held by this operation.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Operation * getParentOp()
Return the parent operation this region is attached to.
BlockListType & getBlocks()
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
This class represents a collection of SymbolTables.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Type getElementType() const
Returns the element type of this tensor type.
This class provides an efficient unique identifier for a specific C++ type.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Location getLoc() const
Return the location of this value.
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Region * getParentRegion()
Return the Region in which this Value is defined.
Operation * getOwner() const
Return the owner of this operand.
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Type getTensorTypeFromMemRefType(Type type)
Return an unranked/ranked tensor type for the given unranked/ranked memref type.
MemRefType getMemRefType(T &&t)
Convenience method to abbreviate casting getType().
Include the generated interface declarations.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
const FrozenRewritePatternSet GreedyRewriteConfig config
bool insideMutuallyExclusiveRegions(Operation *a, Operation *b)
Return true if a and b are in mutually exclusive regions as per RegionBranchOpInterface.
llvm::DenseSet< ValueT, ValueInfoT > DenseSet
Region * getEnclosingRepetitiveRegion(Operation *op)
Return the first enclosing region of the given op that may be executed repetitively as per RegionBran...
SmallVector< SmallVector< OpFoldResult > > ReifiedRankedShapedTypeDims
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...