20#include "llvm/ADT/ScopeExit.h"
21#include "llvm/ADT/SmallVectorExtras.h"
30#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.cpp.inc"
37#define DEBUG_TYPE "bufferizable-op-interface"
45 if (
auto bufferizableOp =
options.dynCastBufferizableOp(op))
51Region *AnalysisState::getEnclosingRepetitiveRegion(
55 if (
auto iter = enclosingRepetitiveRegionCache.find_as(op);
56 iter != enclosingRepetitiveRegionCache.end())
58 return enclosingRepetitiveRegionCache[op] =
62Region *AnalysisState::getEnclosingRepetitiveRegion(
64 if (
auto iter = enclosingRepetitiveRegionCache.find_as(value);
65 iter != enclosingRepetitiveRegionCache.end())
71 SmallVector<Region *> visitedRegions;
73 visitedRegions.push_back(region);
78 enclosingRepetitiveRegionCache[value] = region;
79 for (Region *r : visitedRegions)
80 enclosingRepetitiveRegionCache[r] = region;
84Region *AnalysisState::getEnclosingRepetitiveRegion(
86 if (
auto iter = enclosingRepetitiveRegionCache.find_as(block);
87 iter != enclosingRepetitiveRegionCache.end())
91 Operation *op =
nullptr;
94 SmallVector<Region *> visitedRegions;
101 enclosingRepetitiveRegionCache[block] = region;
102 for (Region *r : visitedRegions)
103 enclosingRepetitiveRegionCache[r] = region;
107bool AnalysisState::insideMutuallyExclusiveRegions(
Operation *op0,
109 auto key = std::make_pair(op0, op1);
110 if (
auto iter = insideMutuallyExclusiveRegionsCache.find(key);
111 iter != insideMutuallyExclusiveRegionsCache.end())
115 insideMutuallyExclusiveRegionsCache[key] =
result;
116 insideMutuallyExclusiveRegionsCache[std::make_pair(op1, op0)] =
result;
120void AnalysisState::resetCache() {
121 enclosingRepetitiveRegionCache.clear();
122 insideMutuallyExclusiveRegionsCache.clear();
133Region *bufferization::getNextEnclosingRepetitiveRegion(
144 const BufferizationOptions &
options) {
147 if (bufferizableOp &&
150 "expected that all parallel regions are also repetitive regions");
159 if (
auto opResult = llvm::dyn_cast<OpResult>(value))
160 return opResult.getDefiningOp();
161 return llvm::cast<BlockArgument>(value).getOwner()->
getParentOp();
167FailureOr<Value> bufferization::allocateTensorForShapedValue(
169 const BufferizationOptions &
options,
const BufferizationState &state,
172 if (llvm::isa<RankedTensorType>(shapedValue.
getType())) {
174 }
else if (llvm::isa<MemRefType>(shapedValue.
getType())) {
175 tensor = ToTensorOp::create(
178 }
else if (llvm::isa<UnrankedTensorType>(shapedValue.
getType()) ||
179 llvm::isa<UnrankedMemRefType>(shapedValue.
getType())) {
181 ->
emitError(
"copying of unranked tensors is not implemented");
183 llvm_unreachable(
"expected RankedTensorType or MemRefType");
185 RankedTensorType tensorType = llvm::cast<RankedTensorType>(
tensor.getType());
190 bool reifiedShapes =
false;
191 if (llvm::isa<RankedTensorType>(shapedValue.
getType()) &&
192 llvm::isa<OpResult>(shapedValue)) {
196 reifiedShapes =
true;
198 resultDims[llvm::cast<OpResult>(shapedValue).getResultNumber()];
199 for (
const auto &dim :
enumerate(tensorType.getShape())) {
200 if (ShapedType::isDynamic(dim.value())) {
201 dynamicSizes.push_back(
214 auto allocTensorOp = AllocTensorOp::create(
b, loc, tensorType, dynamicSizes,
219 return allocTensorOp.getResult();
220 auto copyBufferType =
222 if (
failed(copyBufferType))
224 std::optional<Attribute> memorySpace = copyBufferType->getMemorySpace();
226 memorySpace =
options.defaultMemorySpaceFn(tensorType);
227 if (memorySpace.has_value())
228 allocTensorOp.setMemorySpaceAttr(memorySpace.value());
229 return allocTensorOp.getResult();
232LogicalResult BufferizableOpInterface::resolveTensorOpOperandConflicts(
234 const BufferizationState &bufferizationState) {
244 Type operandType = opOperand.get().getType();
245 if (!llvm::isa<TensorType>(operandType))
247 if (analysisState.isInPlace(opOperand))
249 if (llvm::isa<UnrankedTensorType>(operandType))
250 return op->
emitError(
"copying of unranked tensors is not implemented");
252 AliasingValueList aliasingValues =
253 analysisState.getAliasingValues(opOperand);
254 if (aliasingValues.getNumAliases() == 1 &&
255 isa<OpResult>(aliasingValues.getAliases()[0].value) &&
256 !analysisState.bufferizesToMemoryWrite(opOperand) &&
258 .getAliasingOpOperands(aliasingValues.getAliases()[0].value)
259 .getNumAliases() == 1 &&
260 !isa<UnrankedTensorType>(
261 aliasingValues.getAliases()[0].value.getType())) {
268 Value value = aliasingValues.getAliases()[0].value;
269 outOfPlaceValues.push_back(value);
270 if (!analysisState.canOmitTensorCopy(opOperand))
271 copiedOpValues.insert(value);
274 outOfPlaceOpOperands.push_back(&opOperand);
275 if (!analysisState.canOmitTensorCopy(opOperand))
276 copiedOpOperands.insert(&opOperand);
282 for (
OpOperand *opOperand : outOfPlaceOpOperands) {
283 FailureOr<Value>
copy = allocateTensorForShapedValue(
284 rewriter, op->
getLoc(), opOperand->get(), analysisState.getOptions(),
285 bufferizationState, copiedOpOperands.contains(opOperand));
293 for (
Value value : outOfPlaceValues) {
294 FailureOr<Value>
copy = allocateTensorForShapedValue(
295 rewriter, op->
getLoc(), value, analysisState.getOptions(),
296 bufferizationState, copiedOpValues.count(value));
303 if (use->getOwner() ==
copy->getDefiningOp())
307 if (isa<tensor::DimOp>(use->getOwner()))
320bool OpFilter::isOpAllowed(
Operation *op)
const {
322 bool isAllowed = !hasAllowRule();
323 for (
const Entry &entry : entries) {
324 bool filterResult = entry.fn(op);
325 switch (entry.type) {
327 isAllowed |= filterResult;
347defaultFunctionArgTypeConverter(TensorLikeType type,
Attribute memorySpace,
349 const BufferizationOptions &
options) {
350 if (
auto tensorType = mlir::dyn_cast<TensorType>(type)) {
351 return cast<BufferLikeType>(
352 getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace));
357 type.getBufferType(
options, [&]() {
return funcOp->emitError(); });
358 assert(succeeded(bufferType) &&
359 "a valid buffer is always expected at function boundary");
365 const BufferizationOptions &
options) {
366 return getMemRefTypeWithFullyDynamicLayout(tensorType, memorySpace);
372BufferizationOptions::BufferizationOptions()
373 : functionArgTypeConverterFn(defaultFunctionArgTypeConverter),
374 unknownTypeConverterFn(defaultUnknownTypeConverter) {}
376bool BufferizationOptions::isOpAllowed(
Operation *op)
const {
379 bool isFuncBoundaryOp = isa_and_nonnull<func::FuncDialect>(op->
getDialect());
380 if (!bufferizeFunctionBoundaries && isFuncBoundaryOp)
383 return opFilter.isOpAllowed(op);
386BufferizableOpInterface
387BufferizationOptions::dynCastBufferizableOp(
Operation *op)
const {
388 if (!isOpAllowed(op))
390 auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op);
393 return bufferizableOp;
396BufferizableOpInterface
397BufferizationOptions::dynCastBufferizableOp(
Value value)
const {
401void BufferizationOptions::setFunctionBoundaryTypeConversion(
402 LayoutMapOption layoutMapOption) {
403 functionArgTypeConverterFn = [=](TensorLikeType type, Attribute memorySpace,
405 const BufferizationOptions &
options) {
406 if (
auto tensorType = mlir::dyn_cast<TensorType>(type)) {
407 if (layoutMapOption == LayoutMapOption::IdentityLayoutMap)
408 return cast<BufferLikeType>(
409 bufferization::getMemRefTypeWithStaticIdentityLayout(tensorType,
411 return cast<BufferLikeType>(
412 bufferization::getMemRefTypeWithFullyDynamicLayout(tensorType,
418 type.getBufferType(
options, [&]() {
return funcOp->emitError(); });
419 assert(succeeded(bufferType) &&
420 "a valid buffer is always expected at function boundary");
423 inferFunctionResultLayout =
424 layoutMapOption == LayoutMapOption::InferLayoutMap;
432 if (
auto bbArg = llvm::dyn_cast<BlockArgument>(value)) {
433 b.setInsertionPointToStart(bbArg.getOwner());
441AliasingOpOperandList AnalysisState::getAliasingOpOperands(
Value value)
const {
443 if (
auto bufferizableOp = getOptions().dynCastBufferizableOp(op))
444 return bufferizableOp.getAliasingOpOperands(value, *
this);
447 return detail::unknownGetAliasingOpOperands(value);
452AliasingValueList AnalysisState::getAliasingValues(
OpOperand &opOperand)
const {
453 if (
auto bufferizableOp =
454 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
455 return bufferizableOp.getAliasingValues(opOperand, *
this);
458 return detail::unknownGetAliasingValues(opOperand);
463bool AnalysisState::bufferizesToMemoryRead(
OpOperand &opOperand)
const {
464 if (
auto bufferizableOp =
465 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
466 return bufferizableOp.bufferizesToMemoryRead(opOperand, *
this);
475bool AnalysisState::bufferizesToMemoryWrite(
OpOperand &opOperand)
const {
476 if (
auto bufferizableOp =
477 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
478 return bufferizableOp.bufferizesToMemoryWrite(opOperand, *
this);
487bool AnalysisState::bufferizesToAliasOnly(
OpOperand &opOperand)
const {
488 if (
auto bufferizableOp =
489 getOptions().dynCastBufferizableOp(opOperand.
getOwner()))
490 return bufferizableOp.bufferizesToAliasOnly(opOperand, *
this);
497bool AnalysisState::bufferizesToMemoryWrite(
Value value)
const {
498 auto opResult = llvm::dyn_cast<OpResult>(value);
501 auto bufferizableOp = getOptions().dynCastBufferizableOp(value);
504 return bufferizableOp.resultBufferizesToMemoryWrite(opResult, *
this);
510bool AnalysisState::isValueRead(
Value value)
const {
511 assert(llvm::isa<TensorType>(value.
getType()) &&
"expected TensorType");
512 SmallVector<OpOperand *> workingSet;
514 for (OpOperand &use : value.
getUses())
515 workingSet.push_back(&use);
517 while (!workingSet.empty()) {
518 OpOperand *uMaybeReading = workingSet.pop_back_val();
519 if (!visited.insert(uMaybeReading).second)
523 if (bufferizesToAliasOnly(*uMaybeReading))
524 for (AliasingValue alias : getAliasingValues(*uMaybeReading))
525 for (OpOperand &use : alias.value.getUses())
526 workingSet.push_back(&use);
527 if (bufferizesToMemoryRead(*uMaybeReading))
539llvm::SetVector<Value> AnalysisState::findValueInReverseUseDefChain(
542 llvm::DenseSet<OpOperand *> *visitedOpOperands)
const {
543 llvm::DenseSet<Value> visited;
544 llvm::SetVector<Value>
result, workingSet;
545 workingSet.insert(opOperand->
get());
547 if (visitedOpOperands)
548 visitedOpOperands->insert(opOperand);
550 while (!workingSet.empty()) {
551 Value value = workingSet.pop_back_val();
553 if (!
config.revisitAlreadyVisitedValues && visited.contains(value)) {
555 if (
config.alwaysIncludeLeaves)
559 visited.insert(value);
561 if (condition(value)) {
566 if (!
config.followUnknownOps && !
options.dynCastBufferizableOp(value)) {
569 if (
config.alwaysIncludeLeaves)
574 AliasingOpOperandList aliases = getAliasingOpOperands(value);
575 if (aliases.getNumAliases() == 0) {
578 if (
config.alwaysIncludeLeaves)
583 for (AliasingOpOperand a : aliases) {
584 if (
config.followEquivalentOnly &&
585 a.relation != BufferRelation::Equivalent) {
588 if (
config.alwaysIncludeLeaves)
593 if (
config.followInPlaceOnly && !isInPlace(*a.opOperand)) {
596 if (
config.alwaysIncludeLeaves)
601 if (
config.followSameTypeOrCastsOnly &&
602 a.opOperand->get().getType() != value.
getType() &&
606 if (
config.alwaysIncludeLeaves)
611 workingSet.insert(a.opOperand->get());
612 if (visitedOpOperands)
613 visitedOpOperands->insert(a.opOperand);
621llvm::SetVector<Value>
622AnalysisState::findDefinitions(
OpOperand *opOperand)
const {
624 config.alwaysIncludeLeaves =
false;
625 return findValueInReverseUseDefChain(
626 opOperand, [&](Value v) {
return this->bufferizesToMemoryWrite(v); },
635 for (
const BufferizationOptions::AnalysisStateInitFn &fn :
640bool AnalysisState::canOmitTensorCopy(
OpOperand &opOperand)
const {
642 if (hasUndefinedContents(&opOperand))
647 if (bufferizesToMemoryWrite(opOperand) && !bufferizesToMemoryRead(opOperand))
651 AliasingValueList aliases = getAliasingValues(opOperand);
652 if (!bufferizesToMemoryRead(opOperand) &&
653 llvm::none_of(aliases,
654 [&](AliasingValue a) {
return isValueRead(a.value); }))
661bool AnalysisState::isInPlace(
OpOperand &opOperand)
const {
663 if (isa<ToBufferOp>(opOperand.
getOwner()))
668 return !bufferizesToMemoryWrite(opOperand);
671bool AnalysisState::areEquivalentBufferizedValues(
Value v1,
Value v2)
const {
677bool AnalysisState::areAliasingBufferizedValues(
Value v1,
Value v2)
const {
683bool AnalysisState::hasUndefinedContents(
OpOperand *opOperand)
const {
689 const BufferizationOptions &
options,
690 const BufferizationState &state) {
692 auto tensorType = llvm::dyn_cast<TensorLikeType>(value.
getType());
693 assert(tensorType &&
"unexpected non-tensor type");
697 if (
auto toTensorOp = value.
getDefiningOp<bufferization::ToTensorOp>())
698 return toTensorOp.getBuffer();
707 return bufferization::ToBufferOp::create(rewriter, value.
getLoc(),
713FailureOr<BufferLikeType>
714bufferization::getBufferType(
Value value,
const BufferizationOptions &
options,
715 const BufferizationState &state) {
721FailureOr<BufferLikeType>
722bufferization::getBufferType(
Value value,
const BufferizationOptions &
options,
723 const BufferizationState &state,
725 assert(llvm::isa<TensorLikeType>(value.
getType()) &&
726 "unexpected non-tensor type");
727 invocationStack.push_back(value);
728 llvm::scope_exit popFromStack([&]() { invocationStack.pop_back(); });
732 auto bufferizableOp =
options.dynCastBufferizableOp(op);
734 return bufferizableOp.getBufferType(value,
options, state, invocationStack);
737 return cast<TensorLikeType>(value.
getType()).getBufferType(
options, [&]() {
738 return op->emitError();
742bool bufferization::hasTensorSemantics(
Operation *op) {
743 if (
auto bufferizableOp = dyn_cast<BufferizableOpInterface>(op))
744 return bufferizableOp.hasTensorSemantics();
745 return detail::defaultHasTensorSemantics(op);
748void bufferization::replaceOpWithBufferizedValues(
RewriterBase &rewriter,
752 "expected one value per OpResult");
759 if (llvm::isa<TensorLikeType>(opResult.getType())) {
762 assert(llvm::isa<BufferLikeType>(
replacement.getType()) &&
763 "tensor op result should be replaced with a buffer value");
786 return (*allocationFn)(
b, loc, type, dynShape, bufferAlignment);
789 if (bufferAlignment != 0)
790 return memref::AllocOp::create(
b, loc, type, dynShape,
791 b.getI64IntegerAttr(bufferAlignment))
793 return memref::AllocOp::create(
b, loc, type, dynShape).getResult();
800 return (*memCpyFn)(
b, loc, from, to);
802 memref::CopyOp::create(
b, loc, from, to);
811 const BufferizationOptions &
options,
812 MemRefLayoutAttrInterface layout,
815 if (
auto unrankedTensorType =
816 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
817 assert(!layout &&
"UnrankedTensorType cannot have a layout map");
818 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
823 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
825 return MemRefType::get(rankedTensorType.getShape(),
826 rankedTensorType.getElementType(), layout,
830 return options.unknownTypeConverterFn(tensorType, memorySpace,
options);
834bufferization::getMemRefTypeWithFullyDynamicLayout(
TensorType tensorType,
837 if (
auto unrankedTensorType =
838 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
839 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
844 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
845 int64_t dynamicOffset = ShapedType::kDynamic;
847 ShapedType::kDynamic);
848 auto stridedLayout = StridedLayoutAttr::get(tensorType.
getContext(),
849 dynamicOffset, dynamicStrides);
850 return MemRefType::get(rankedTensorType.getShape(),
851 rankedTensorType.getElementType(), stridedLayout,
858bufferization::getMemRefTypeWithStaticIdentityLayout(
TensorType tensorType,
861 if (
auto unrankedTensorType =
862 llvm::dyn_cast<UnrankedTensorType>(tensorType)) {
863 return UnrankedMemRefType::get(unrankedTensorType.getElementType(),
868 auto rankedTensorType = llvm::cast<RankedTensorType>(tensorType);
869 MemRefLayoutAttrInterface layout = {};
870 return MemRefType::get(rankedTensorType.getShape(),
871 rankedTensorType.getElementType(), layout,
879bool bufferization::detail::defaultResultBufferizesToMemoryWrite(
881 auto bufferizableOp = cast<BufferizableOpInterface>(opResult.
getDefiningOp());
882 AliasingOpOperandList opOperands =
883 bufferizableOp.getAliasingOpOperands(opResult, state);
887 if (opOperands.getAliases().empty())
892 if (llvm::any_of(opOperands, [&](AliasingOpOperand alias) {
893 return state.bufferizesToMemoryWrite(*alias.opOperand);
926 auto isMemoryWriteInsideOp = [&](
Value v) {
930 return state.bufferizesToMemoryWrite(v);
933 config.alwaysIncludeLeaves =
false;
934 for (AliasingOpOperand alias : opOperands) {
936 .findValueInReverseUseDefChain(alias.opOperand,
937 isMemoryWriteInsideOp,
config)
946AliasingOpOperandList bufferization::detail::defaultGetAliasingOpOperands(
951 if (!llvm::isa<TensorType>(opOperand.
get().
getType()))
953 AliasingValueList aliasingValues = state.getAliasingValues(opOperand);
954 for (
const auto &it : aliasingValues)
955 if (it.value == value)
956 result.emplace_back(&opOperand, it.relation, it.isDefinite);
958 return AliasingOpOperandList(std::move(
result));
961FailureOr<BufferLikeType> bufferization::detail::defaultGetBufferType(
963 const BufferizationState &bufferizationState,
965 assert(llvm::isa<TensorType>(value.
getType()) &&
"expected tensor type");
966 auto tensorType = cast<TensorType>(value.
getType());
972 <<
"cannot bufferize value of type " << tensorType
973 <<
": element type " << elementType
974 <<
" is not a valid memref element type";
977 if (llvm::isa<BlockArgument>(value)) {
978 return cast<BufferLikeType>(
979 bufferization::getMemRefType(tensorType,
options));
984 auto opResult = llvm::cast<OpResult>(value);
986 AliasingOpOperandList aliases = analysisState.getAliasingOpOperands(opResult);
987 if (aliases.getNumAliases() > 0 &&
988 aliases.getAliases()[0].relation == BufferRelation::Equivalent) {
991 Value equivalentOperand = aliases.getAliases().front().opOperand->get();
1000 if (!memSpace.has_value())
1001 return op->
emitError(
"could not infer memory space");
1003 return cast<BufferLikeType>(
1007bool bufferization::detail::defaultIsRepetitiveRegion(
1008 BufferizableOpInterface bufferizableOp,
unsigned index) {
1009 assert(index < bufferizableOp->getNumRegions() &&
"invalid region index");
1010 auto regionInterface =
1011 dyn_cast<RegionBranchOpInterface>(bufferizableOp.getOperation());
1012 if (!regionInterface)
1014 return regionInterface.isRepetitiveRegion(
index);
1017AliasingOpOperandList
1018bufferization::detail::unknownGetAliasingOpOperands(
Value value) {
1021 if (
auto bbArg = dyn_cast<BlockArgument>(value))
1022 if (bbArg.getOwner() != &bbArg.getOwner()->getParent()->getBlocks().front())
1028 AliasingOpOperandList r;
1030 if (isa<TensorType>(operand.get().getType()))
1031 r.addAlias({&operand, BufferRelation::Unknown,
false});
1036bufferization::detail::unknownGetAliasingValues(
OpOperand &opOperand) {
1041 AliasingValueList r;
1043 if (llvm::isa<TensorType>(
result.getType()))
1044 r.addAlias({
result, BufferRelation::Unknown,
false});
1048 if (isa<TensorType>(bbArg.getType()))
1049 r.addAlias({bbArg, BufferRelation::Unknown,
false});
1053bool bufferization::detail::defaultHasTensorSemantics(
Operation *op) {
1054 auto isaTensor = [](
Type t) {
return isa<TensorLikeType>(t); };
1056 return any_of(r.getBlocks(), [&](Block &b) {
1057 return any_of(b.getArguments(), [&](BlockArgument bbArg) {
1058 return isaTensor(bbArg.getType());
1062 if (hasTensorBlockArgument)
1070FailureOr<BaseMemRefType>
1071bufferization::detail::asMemRefType(FailureOr<BufferLikeType> bufferType) {
1074 return cast<BaseMemRefType>(*bufferType);
1077bool bufferization::detail::typesMatchAfterBufferization(
Operation &op,
1080 return mlir::succeeded(
1081 cast<TensorLikeType>(
tensor.getType())
1082 .verifyCompatibleBufferType(cast<BufferLikeType>(buffer.
getType()),
1083 [&]() { return op.emitError(); }));
static void setInsertionPointAfter(OpBuilder &b, Value value)
static bool isRepetitiveRegion(Region *region, const BufferizationOptions &options)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
*if copies could not be generated due to yet unimplemented cases *copyInPlacementStart and copyOutPlacementStart in copyPlacementBlock *specify the insertion points where the incoming copies and outgoing should be the output argument nBegin is set to its * replacement(set to `begin` if no invalidation happens). Since outgoing *copies could have been inserted at `end`
static bool isaTensor(Type t)
static llvm::ManagedStatic< PassManagerOptions > options
static RankedTensorType getBufferType(const SparseTensorType &stt, bool needTmpCOO)
#define MLIR_DEFINE_EXPLICIT_TYPE_ID(CLASS_NAME)
static Operation * getOwnerOfValue(Value value)
Base class for generic analysis states.
AnalysisState(LatticeAnchor anchor)
Create the analysis state on the given lattice anchor.
Attributes are known-constant values of operations.
This class provides a shared interface for ranked and unranked memref types.
static bool isValidElementType(Type type)
Return true if the specified element type is ok in a memref.
This class represents an argument of a Block.
Block represents an ordered list of Operations.
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
IRValueT get() const
Return the current value being used by this operand.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
RAII guard to reset the insertion point of the builder when destroyed.
This class helps build Operations.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
This class represents an operand of an operation.
This is a value defined by a result of an operation.
Operation is the basic unit of execution within MLIR.
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Block * getBlock()
Returns the operation block that contains this operation.
Location getLoc()
The source location the operation was defined or derived from.
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
MutableArrayRef< OpOperand > getOpOperands()
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
operand_type_range getOperandTypes()
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
result_type_range getResultTypes()
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
result_range getOpResults()
Region * getParentRegion()
Returns the region to which the instruction belongs.
unsigned getNumResults()
Return the number of results held by this operation.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
unsigned getRegionNumber()
Return the number of this region in the parent operation.
Operation * getParentOp()
Return the parent operation this region is attached to.
BlockListType & getBlocks()
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
This class represents a collection of SymbolTables.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
Type getElementType() const
Returns the element type of this tensor type.
This class provides an efficient unique identifier for a specific C++ type.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
use_range getUses() const
Returns a range of all uses, which is useful for iterating over all uses.
Location getLoc() const
Return the location of this value.
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Region * getParentRegion()
Return the Region in which this Value is defined.
Operation * getOwner() const
Return the owner of this operand.
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Type getTensorTypeFromMemRefType(Type type)
Return an unranked/ranked tensor type for the given unranked/ranked memref type.
MemRefType getMemRefType(T &&t)
Convenience method to abbreviate casting getType().
Include the generated interface declarations.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
const FrozenRewritePatternSet GreedyRewriteConfig config
bool insideMutuallyExclusiveRegions(Operation *a, Operation *b)
Return true if a and b are in mutually exclusive regions as per RegionBranchOpInterface.
llvm::DenseSet< ValueT, ValueInfoT > DenseSet
Region * getEnclosingRepetitiveRegion(Operation *op)
Return the first enclosing region of the given op that may be executed repetitively as per RegionBran...
SmallVector< SmallVector< OpFoldResult > > ReifiedRankedShapedTypeDims
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...