20#include "llvm/ADT/ScopeExit.h"
29#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
36#define DEBUG_TYPE "bufferizable-op-interface"
44 if (
auto bufferizableOp =
options.dynCastBufferizableOp(op))
50Region *AnalysisState::getEnclosingRepetitiveRegion(
54 if (
auto iter = enclosingRepetitiveRegionCache.find_as(op);
55 iter != enclosingRepetitiveRegionCache.end())
57 return enclosingRepetitiveRegionCache[op] =
61Region *AnalysisState::getEnclosingRepetitiveRegion(
63 if (
auto iter = enclosingRepetitiveRegionCache.find_as(value);
64 iter != enclosingRepetitiveRegionCache.end())
70 SmallVector<Region *> visitedRegions;
72 visitedRegions.push_back(region);
77 enclosingRepetitiveRegionCache[value] = region;
78 for (Region *r : visitedRegions)
79 enclosingRepetitiveRegionCache[r] = region;
83Region *AnalysisState::getEnclosingRepetitiveRegion(
85 if (
auto iter = enclosingRepetitiveRegionCache.find_as(block);
86 iter != enclosingRepetitiveRegionCache.end())
90 Operation *op =
nullptr;
93 SmallVector<Region *> visitedRegions;
100 enclosingRepetitiveRegionCache[block] = region;
101 for (Region *r : visitedRegions)
102 enclosingRepetitiveRegionCache[r] = region;
106bool AnalysisState::insideMutuallyExclusiveRegions(
Operation *op0,
108 auto key = std::make_pair(op0, op1);
109 if (
auto iter = insideMutuallyExclusiveRegionsCache.find(key);
110 iter != insideMutuallyExclusiveRegionsCache.end())
114 insideMutuallyExclusiveRegionsCache[key] =
result;
115 insideMutuallyExclusiveRegionsCache[std::make_pair(op1, op0)] =
result;
119void AnalysisState::resetCache() {
120 enclosingRepetitiveRegionCache.clear();
121 insideMutuallyExclusiveRegionsCache.clear();
132Region *bufferization::getNextEnclosingRepetitiveRegion(
143 const BufferizationOptions &
options) {
146 if (bufferizableOp &&
149 "expected that all parallel regions are also repetitive regions");
158 if (
auto opResult = llvm::dyn_cast<OpResult>(value))
159 return opResult.getDefiningOp();
160 return llvm::cast<BlockArgument>(value).getOwner()->
getParentOp();
166FailureOr<Value> bufferization::allocateTensorForShapedValue(
168 const BufferizationOptions &
options,
const BufferizationState &state,
171 if (llvm::isa<RankedTensorType>(shapedValue.
getType())) {
173 }
else if (llvm::isa<MemRefType>(shapedValue.
getType())) {
174 tensor = ToTensorOp::create(
177 }
else if (llvm::isa<UnrankedTensorType>(shapedValue.
getType()) ||
178 llvm::isa<UnrankedMemRefType>(shapedValue.
getType())) {
180 ->
emitError(
"copying of unranked tensors is not implemented");
182 llvm_unreachable(
"expected RankedTensorType or MemRefType");
184 RankedTensorType tensorType = llvm::cast<RankedTensorType>(
tensor.getType());
189 bool reifiedShapes =
false;
190 if (llvm::isa<RankedTensorType>(shapedValue.
getType()) &&
191 llvm::isa<OpResult>(shapedValue)) {
195 reifiedShapes =
true;
197 resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
198 for (
const auto &dim :
enumerate(tensorType.getShape())) {
199 if (ShapedType::isDynamic(dim.value())) {
200 dynamicSizes.push_back(
213 auto allocTensorOp = AllocTensorOp::create(
b, loc, tensorType, dynamicSizes,
218 return allocTensorOp.getResult();
219 auto copyBufferType =
221 if (
failed(copyBufferType))
223 std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
225 memorySpace =
options.defaultMemorySpaceFn(tensorType);
226 if (memorySpace.has_value())
227 allocTensorOp.setMemorySpaceAttr(memorySpace.value());
228 return allocTensorOp.getResult();
231LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
233 const BufferizationState &bufferizationState) {
243 Type operandType = opOperand.get().getType();
244 if (!llvm::isa<TensorType>(operandType))
246 if (analysisState.isInPlace(opOperand))
248 if (llvm::isa<UnrankedTensorType>(operandType))
249 return op->
emitError(
"copying of unranked tensors is not implemented");
251 AliasingValueList aliasingValues =
252 analysisState.getAliasingValues(opOperand);
253 if (aliasingValues.getNumAliases() == 1 &&
254 isa<OpResult>(aliasingValues.getAliases()[0].value) &&
255 !analysisState.bufferizesToMemoryWrite(opOperand) &&
257 .getAliasingOpOperands(aliasingValues.getAliases()[0].value)
258 .getNumAliases() == 1 &&
259 !isa<UnrankedTensorType>(
260 aliasingValues.getAliases()[0].value.getType())) {
267 Value value = aliasingValues.getAliases()[0].value;
268 outOfPlaceValues.push_back(value);
269 if (!analysisState.canOmitTensorCopy(opOperand))
270 copiedOpValues.insert(value);
273 outOfPlaceOpOperands.push_back(&opOperand);
274 if (!analysisState.canOmitTensorCopy(opOperand))
275 copiedOpOperands.insert(&opOperand);
281 for (
OpOperand *opOperand : outOfPlaceOpOperands) {
282 FailureOr<Value>
copy = allocateTensorForShapedValue(
283 rewriter, op->
getLoc(), opOperand->get(), analysisState.getOptions(),
284 bufferizationState, copiedOpOperands.contains(opOperand));
292 for (
Value value : outOfPlaceValues) {
293 FailureOr<Value>
copy = allocateTensorForShapedValue(
294 rewriter, op->
getLoc(), value, analysisState.getOptions(),
295 bufferizationState, copiedOpValues.count(value));
302 if (use->getOwner() ==
copy->getDefiningOp())
306 if (isa<tensor::DimOp>(use->getOwner()))
319bool OpFilter::isOpAllowed(
Operation *op)
const {
321 bool isAllowed = !hasAllowRule();
322 for (
const Entry &entry : entries) {
323 bool filterResult = entry.fn(op);
324 switch (entry.type) {
326 isAllowed |= filterResult;
346defaultFunctionArgTypeConverter(TensorLikeType type,
Attribute memorySpace,
348 const BufferizationOptions &
options) {
349 if (
auto tensorType = mlir::dyn_cast<TensorType>(type)) {
350 return cast<BufferLikeType>(
351 getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace));
356 type.getBufferType(
options, [&]() {
return funcOp->emitError(); });
357 assert(succeeded(bufferType) &&
358 "a valid buffer is always expected at function boundary");
364 const BufferizationOptions &
options) {
365 return getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace);
371BufferizationOptions::BufferizationOptions()
372 : functionArgTypeConverterFn(defaultFunctionArgTypeConverter),
373 unknownTypeConverterFn(defaultUnknownTypeConverter) {}
375bool BufferizationOptions::isOpAllowed(
Operation *op)
const {
378 bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->
getDialect());
379 if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
382 return opFilter.isOpAllowed(op);
385BufferizableOpInterface
386BufferizationOptions::dynCastBufferizableOp(
Operation *op)
const {
387 if (!isOpAllowed(op))
389 auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
392 return bufferizableOp;
395BufferizableOpInterface
396BufferizationOptions::dynCastBufferizableOp(
Value value)
const {
400void BufferizationOptions::setFunctionBoundaryTypeConversion(
401 LayoutMapOption layoutMapOption) {
402 functionArgTypeConverterFn = [=](TensorLikeType type, Attribute memorySpace,
404 const BufferizationOptions &
options) {
405 if (
auto tensorType = mlir::dyn_cast<TensorType>(type)) {
406 if (layoutMapOption == LayoutMapOption::IdentityLayoutMap)
407 return cast<BufferLikeType>(
408 bufferization::getMemRefTypeWithStaticIdentityLayout(tensorType,
410 return cast<BufferLikeType>(
411 bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType,
417 type.getBufferType(
options, [&]() {
return funcOp->emitError(); });
418 assert(succeeded(bufferType) &&
419 "a valid buffer is always expected at function boundary");
422 inferFunctionResultLayout =
423 layoutMapOption == LayoutMapOption::InferLayoutMap;
431 if (
auto bbArg = llvm::dyn_cast<BlockArgument>(value)) {
432 b.setInsertionPointToStart(bbArg.getOwner());
440AliasingOpOperandList AnalysisState::getAliasingOpOperands(
Value value)
const {
442 if (
auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
443 return bufferizableOp.getAliasingOpOperands(value, *
this);
446 return detail::unknownGetAliasingOpOperands(value);
451AliasingValueList AnalysisState::getAliasingValues(
OpOperand &opOperand)
const {
452 if (
auto bufferizableOp =
453 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
454 return bufferizableOp.getAliasingValues(opOperand, *
this);
457 return detail::unknownGetAliasingValues(opOperand);
462bool AnalysisState::bufferizesToMemoryRead(
OpOperand &opOperand)
const {
463 if (
auto bufferizableOp =
464 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
465 return bufferizableOp.bufferizesToMemoryRead(opOperand, *
this);
474bool AnalysisState::bufferizesToMemoryWrite(
OpOperand &opOperand)
const {
475 if (
auto bufferizableOp =
476 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
477 return bufferizableOp.bufferizesToMemoryWrite(opOperand, *
this);
486bool AnalysisState::bufferizesToAliasOnly(
OpOperand &opOperand)
const {
487 if (
auto bufferizableOp =
488 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
489 return bufferizableOp.bufferizesToAliasOnly(opOperand, *
this);
496bool AnalysisState::bufferizesToMemoryWrite(
Value value)
const {
497 auto opResult = llvm::dyn_cast<OpResult>(value);
500 auto bufferizableOp = getOptions().dynCastBufferizableOp(value);
503 return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *
this);
509bool AnalysisState::isValueRead(
Value value)
const {
510 assert(llvm::isa<TensorType>(value.
getType()) &&
"expected TensorType");
511 SmallVector<OpOperand *> workingSet;
513 for (OpOperand &use : value.
getUses())
514 workingSet.push_back(&use);
516 while (!workingSet.empty()) {
517 OpOperand *uMaybeReading = workingSet.pop_back_val();
518 if (!visited.insert(uMaybeReading).second)
522 if (bufferizesToAliasOnly(*uMaybeReading))
523 for (AliasingValue alias : getAliasingValues(*uMaybeReading))
524 for (OpOperand &use : alias.value.getUses())
525 workingSet.push_back(&use);
526 if (bufferizesToMemoryRead(*uMaybeReading))
538llvm::SetVector<Value> AnalysisState::findValueInReverseUseDefChain(
541 llvm::DenseSet<OpOperand *> *visitedOpOperands)
const {
542 llvm::DenseSet<Value> visited;
543 llvm::SetVector<Value>
result, workingSet;
544 workingSet.insert(opOperand->
get());
546 if (visitedOpOperands)
547 visitedOpOperands->insert(opOperand);
549 while (!workingSet.empty()) {
550 Value value = workingSet.pop_back_val();
552 if (!
config.revisitAlreadyVisitedValues && visited.contains(value)) {
554 if (
config.alwaysIncludeLeaves)
558 visited.insert(value);
560 if (condition(value)) {
565 if (!
config.followUnknownOps && !
options.dynCastBufferizableOp(value)) {
568 if (
config.alwaysIncludeLeaves)
573 AliasingOpOperandList aliases = getAliasingOpOperands(value);
574 if (aliases.getNumAliases() == 0) {
577 if (
config.alwaysIncludeLeaves)
582 for (AliasingOpOperand a : aliases) {
583 if (
config.followEquivalentOnly &&
584 a.relation != BufferRelation::Equivalent) {
587 if (
config.alwaysIncludeLeaves)
592 if (
config.followInPlaceOnly && !isInPlace(*a.opOperand)) {
595 if (
config.alwaysIncludeLeaves)
600 if (
config.followSameTypeOrCastsOnly &&
601 a.opOperand->get().getType() != value.
getType() &&
605 if (
config.alwaysIncludeLeaves)
610 workingSet.insert(a.opOperand->get());
611 if (visitedOpOperands)
612 visitedOpOperands->insert(a.opOperand);
620llvm::SetVector<Value>
621AnalysisState::findDefinitions(
OpOperand *opOperand)
const {
623 config.alwaysIncludeLeaves =
false;
624 return findValueInReverseUseDefChain(
625 opOperand, [&](Value v) {
return this->bufferizesToMemoryWrite(v); },
634 for (
const BufferizationOptions::AnalysisStateInitFn &fn :
639bool AnalysisState::canOmitTensorCopy(
OpOperand &opOperand)
const {
641 if (hasUndefinedContents(&opOperand))
646 if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
650 AliasingValueList aliases = getAliasingValues(opOperand);
651 if (!bufferizesToMemoryRead(opOperand) &&
652 llvm::none_of(aliases,
653 [&](AliasingValue a) {
return isValueRead(a.value); }))
660bool AnalysisState::isInPlace(
OpOperand &opOperand)
const {
662 if (isa<ToBufferOp>(opOperand.
getOwner()))
667 return !bufferizesToMemoryWrite(opOperand);
670bool AnalysisState::areEquivalentBufferizedValues(
Value v1,
Value v2)
const {
676bool AnalysisState::areAliasingBufferizedValues(
Value v1,
Value v2)
const {
682bool AnalysisState::hasUndefinedContents(
OpOperand *opOperand)
const {
688 const BufferizationOptions &
options,
689 const BufferizationState &state) {
691 auto tensorType = llvm::dyn_cast<TensorLikeType>(value.
getType());
692 assert(tensorType &&
"unexpected non-tensor type");
696 if (
auto toTensorOp = value.
getDefiningOp<bufferization::ToTensorOp>())
697 return toTensorOp.getBuffer();
706 return bufferization::ToBufferOp::create(rewriter, value.
getLoc(),
712FailureOr<BufferLikeType>
713bufferization::getBufferType(
Value value,
const BufferizationOptions &
options,
714 const BufferizationState &state) {
720FailureOr<BufferLikeType>
721bufferization::getBufferType(
Value value,
const BufferizationOptions &
options,
722 const BufferizationState &state,
724 assert(llvm::isa<TensorLikeType>(value.
getType()) &&
725 "unexpected non-tensor type");
726 invocationStack.push_back(value);
727 llvm::scope_exit popFromStack([&]() { invocationStack.pop_back(); });
731 auto bufferizableOp =
options.dynCastBufferizableOp(op);
733 return bufferizableOp.getBufferType(value,
options, state, invocationStack);
736 return cast<TensorLikeType>(value.
getType()).getBufferType(
options, [&]() {
737 return op->emitError();
741bool bufferization::hasTensorSemantics(
Operation *op) {
742 if (
auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
743 return bufferizableOp.hasTensorSemantics();
744 return detail::defaultHasTensorSemantics(op);
747void bufferization::replaceOpWithBufferizedValues(
RewriterBase &rewriter,
751 "expected one value per OpResult");
758 if (llvm::isa<TensorLikeType>(opResult.getType())) {
761 assert(llvm::isa<BufferLikeType>(
replacement.getType()) &&
762 "tensor op result should be replaced with a buffer value");
785 return (*allocationFn)(
b, loc, type, dynShape, bufferAlignment);
788 if (bufferAlignment != 0)
789 return memref::AllocOp::create(
b, loc, type, dynShape,
790 b.getI64IntegerAttr(bufferAlignment))
792 return memref::AllocOp::create(
b, loc, type, dynShape).getResult();
799 return (*memCpyFn)(
b, loc, from, to);
801 memref::CopyOp::create(
b, loc, from, to);
810 const BufferizationOptions &
options,
811 MemRefLayoutAttrInterface layout,
814 if (
auto unrankedTensorType =
815 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
816 assert(!layout &&
"UnrankedTensorType cannot have a layout map");
817 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
822 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
824 return MemRefType::get(rankedTensorType.getShape(),
825 rankedTensorType.getElementType(), layout,
829 return options.unknownTypeConverterFn(tensorType, memorySpace,
options);
833bufferization::getMemRefTypeWithFullyDynamicLayout(
TensorType tensorType,
836 if (
auto unrankedTensorType =
837 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
838 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
843 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
844 int64_t dynamicOffset = ShapedType::kDynamic;
846 ShapedType::kDynamic);
847 auto stridedLayout = StridedLayoutAttr::get(tensorType.
getContext(),
848 dynamicOffset, dynamicStrides);
849 return MemRefType::get(rankedTensorType.getShape(),
850 rankedTensorType.getElementType(), stridedLayout,
857bufferization::getMemRefTypeWithStaticIdentityLayout(
TensorType tensorType,
860 if (
auto unrankedTensorType =
861 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
862 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
867 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
868 MemRefLayoutAttrInterface layout = {};
869 return MemRefType::get(rankedTensorType.getShape(),
870 rankedTensorType.getElementType(), layout,
878bool bufferization::detail::defaultResultBufferizesToMemoryWrite(
880 auto bufferizableOp = cast<BufferizableOpInterface>(opResult.
getDefiningOp());
881 AliasingOpOperandList opOperands =
882 bufferizableOp.getAliasingOpOperands(opResult, state);
886 if (opOperands.getAliases().empty())
891 if (llvm::any_of(opOperands, [&](AliasingOpOperand alias) {
892 return state.bufferizesToMemoryWrite(*alias.opOperand);
925 auto isMemoryWriteInsideOp = [&](
Value v) {
929 return state.bufferizesToMemoryWrite(v);
932 config.alwaysIncludeLeaves =
false;
933 for (AliasingOpOperand alias : opOperands) {
935 .findValueInReverseUseDefChain(alias.opOperand,
936 isMemoryWriteInsideOp,
config)
945AliasingOpOperandList bufferization::detail::defaultGetAliasingOpOperands(
950 if (!llvm::isa<TensorType>(opOperand.
get().
getType()))
952 AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
953 for (
const auto &it : aliasingValues)
954 if (it.value == value)
955 result.emplace_back(&opOperand, it.relation, it.isDefinite);
957 return AliasingOpOperandList(std::move(
result));
960FailureOr<BufferLikeType> bufferization::detail::defaultGetBufferType(
962 const BufferizationState &bufferizationState,
964 assert(llvm::isa<TensorType>(value.
getType()) &&
"expected tensor type");
965 auto tensorType = cast<TensorType>(value.
getType());
971 <<
"cannot bufferize value of type " << tensorType
972 <<
": element type " << elementType
973 <<
" is not a valid memref element type";
976 if (llvm::isa<BlockArgument>(value)) {
977 return cast<BufferLikeType>(
978 bufferization::getMemRefType(tensorType,
options));
983 auto opResult = llvm::cast<OpResult>(value);
985 AliasingOpOperandList aliases = analysisState.getAliasingOpOperands(opResult);
986 if (aliases.getNumAliases() > 0 &&
987 aliases.getAliases()[0].relation == BufferRelation::Equivalent) {
990 Value equivalentOperand = aliases.getAliases().front().opOperand->get();
999 if (!memSpace.has_value())
1000 return op->
emitError(
"could not infer memory space");
1002 return cast<BufferLikeType>(
1006bool bufferization::detail::defaultIsRepetitiveRegion(
1007 BufferizableOpInterface bufferizableOp,
unsigned index) {
1008 assert(index < bufferizableOp->getNumRegions() &&
"invalid region index");
1009 auto regionInterface =
1010 dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
1011 if (!regionInterface)
1013 return regionInterface.isRepetitiveRegion(
index);
1016AliasingOpOperandList
1017bufferization::detail::unknownGetAliasingOpOperands(
Value value) {
1020 if (
auto bbArg = dyn_cast<BlockArgument>(value))
1021 if (bbArg.getOwner() != &bbArg.getOwner()->getParent()->getBlocks().front())
1027 AliasingOpOperandList r;
1029 if (isa<TensorType>(operand.get().getType()))
1030 r.addAlias({&operand, BufferRelation::Unknown,
false});
1035bufferization::detail::unknownGetAliasingValues(
OpOperand &opOperand) {
1040 AliasingValueList r;
1042 if (llvm::isa<TensorType>(
result.getType()))
1043 r.addAlias({
result, BufferRelation::Unknown,
false});
1047 if (isa<TensorType>(bbArg.getType()))
1048 r.addAlias({bbArg, BufferRelation::Unknown,
false});
1052bool bufferization::detail::defaultHasTensorSemantics(
Operation *op) {
1053 auto isaTensor = [](
Type t) {
return isa<TensorLikeType>(t); };
1055 return any_of(r.getBlocks(), [&](Block &b) {
1056 return any_of(b.getArguments(), [&](BlockArgument bbArg) {
1057 return isaTensor(bbArg.getType());
1061 if (hasTensorBlockArgument)
1069FailureOr<BaseMemRefType>
1070bufferization::detail::asMemRefType(FailureOr<BufferLikeType> bufferType) {
1073 return cast<BaseMemRefType>(*bufferType);
1076bool bufferization::detail::typesMatchAfterBufferization(
Operation &op,
1079 return mlir::succeeded(
1080 cast<TensorLikeType>(
tensor.getType())
1081 .verifyCompatibleBufferType(cast<BufferLikeType>(buffer.
getType()),
1082 [&]() { return op.emitError(); }));
static void setInsertionPointAfter(OpBuilder &b, Value value)
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
*if copies could not be generated due to yet unimplemented cases *copyInPlacementStart and copyOutPlacementStart in copyPlacementBlock *specify the insertion points where the incoming copies and outgoing should be the output argument nBegin is set to its * replacement(set to `begin` if no invalidation happens). Since outgoing *copies could have been inserted at `end`
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
static RankedTensorType getBufferType(const SparseTensorType &stt, bool needTmpCOO)
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
static Operation * getOwnerOfValue(Value value)
Base class for generic analysis states.
AnalysisState(LatticeAnchor anchor)
Create the analysis state on the given lattice anchor.
Attributes are known-constant values of operations.
This class provides a shared interface for ranked and unranked memref types.
static bool isValidElementType(Type type)
Return true if the specified element type is ok in a memref.
This class represents an argument of a Block.
Block represents an ordered list of Operations.
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
IRValueT get() const
Return the current value being used by this operand.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
RAII guard to reset the insertion point of the builder when destroyed.
This class helps build Operations.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
This class represents an operand of an operation.
This is a value defined by a result of an operation.
Operation is the basic unit of execution within MLIR.
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Block * getBlock()
Returns the operation block that contains this operation.
Location getLoc()
The source location the operation was defined or derived from.
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
MutableArrayRef< OpOperand > getOpOperands()
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
operand_type_range getOperandTypes()
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
result_type_range getResultTypes()
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
result_range getOpResults()
Region * getParentRegion()
Returns the region to which the instruction belongs.
unsigned getNumResults()
Return the number of results held by this operation.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Operation * getParentOp()
Return the parent operation this region is attached to.
BlockListType & getBlocks()
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
This class represents a collection of SymbolTables.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Type getElementType() const
Returns the element type of this tensor type.
This class provides an efficient unique identifier for a specific C++ type.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Location getLoc() const
Return the location of this value.
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Region * getParentRegion()
Return the Region in which this Value is defined.
Operation * getOwner() const
Return the owner of this operand.
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Type getTensorTypeFromMemRefType(Type type)
Return an unranked/ranked tensor type for the given unranked/ranked memref type.
MemRefType getMemRefType(T &&t)
Convenience method to abbreviate casting getType().
Include the generated interface declarations.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
const FrozenRewritePatternSet GreedyRewriteConfig config
bool insideMutuallyExclusiveRegions(Operation *a, Operation *b)
Return true if a and b are in mutually exclusive regions as per RegionBranchOpInterface.
llvm::DenseSet< ValueT, ValueInfoT > DenseSet
Region * getEnclosingRepetitiveRegion(Operation *op)
Return the first enclosing region of the given op that may be executed repetitively as per RegionBran...
SmallVector< SmallVector< OpFoldResult > > ReifiedRankedShapedTypeDims
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...