28 auto srcType = llvm::cast<MemRefType>(value.
getType());
31 if (srcType.getElementType() != destType.getElementType())
33 if (srcType.getRank() != destType.getRank())
39 auto isGuaranteedCastCompatible = [](MemRefType source, MemRefType target) {
40 int64_t sourceOffset, targetOffset;
42 if (
failed(source.getStridesAndOffset(sourceStrides, sourceOffset)) ||
43 failed(target.getStridesAndOffset(targetStrides, targetOffset)))
45 auto dynamicToStatic = [](int64_t a, int64_t b) {
46 return ShapedType::isDynamic(a) && ShapedType::isStatic(b);
48 if (dynamicToStatic(sourceOffset, targetOffset))
50 for (
auto it : zip(sourceStrides, targetStrides))
51 if (dynamicToStatic(std::get<0>(it), std::get<1>(it)))
59 if (memref::CastOp::areCastCompatible(srcType, destType) &&
60 isGuaranteedCastCompatible(srcType, destType)) {
61 Value casted = memref::CastOp::create(b, value.
getLoc(), destType, value);
67 for (
int i = 0; i < destType.getRank(); ++i) {
68 if (destType.getShape()[i] != ShapedType::kDynamic)
70 Value size = memref::DimOp::create(b, loc, value, i);
71 dynamicOperands.push_back(size);
74 FailureOr<Value>
copy =
75 options.createAlloc(b, loc, destType, dynamicOperands);
88 auto bufferToTensor = toBuffer.getTensor().getDefiningOp<ToTensorOp>();
92 Type srcType = bufferToTensor.getBuffer().getType();
93 Type destType = toBuffer.getType();
96 if (srcType == destType) {
97 rewriter.
replaceOp(toBuffer, bufferToTensor.getBuffer());
101 auto rankedSrcType = llvm::dyn_cast<MemRefType>(srcType);
102 auto rankedDestType = llvm::dyn_cast<MemRefType>(destType);
103 auto unrankedSrcType = llvm::dyn_cast<UnrankedMemRefType>(srcType);
106 if (rankedSrcType && rankedDestType) {
108 rewriter, bufferToTensor.getBuffer(), rankedDestType,
options);
112 rewriter.
replaceOp(toBuffer, *replacement);
118 if (unrankedSrcType && rankedDestType)
123 assert(memref::CastOp::areCastCompatible(srcType, destType) &&
124 "expected that types are cast compatible");
126 bufferToTensor.getBuffer());
133 auto shapedType = llvm::cast<ShapedType>(shapedValue.
getType());
134 for (int64_t i = 0; i < shapedType.getRank(); ++i) {
135 if (shapedType.isDynamicDim(i)) {
136 if (llvm::isa<MemRefType>(shapedType)) {
137 dynamicDims.push_back(memref::DimOp::create(b, loc, shapedValue, i));
139 assert(llvm::isa<RankedTensorType>(shapedType) &&
"expected tensor");
140 dynamicDims.push_back(tensor::DimOp::create(b, loc, shapedValue, i));
150 LogicalResult AllocTensorOp::bufferize(
RewriterBase &rewriter,
157 if (getOperation()->getUses().empty()) {
158 rewriter.
eraseOp(getOperation());
165 FailureOr<Value> maybeCopyBuffer =
167 if (
failed(maybeCopyBuffer))
169 copyBuffer = *maybeCopyBuffer;
178 assert(dynamicDims.empty() &&
"expected either `copy` or `dynamicDims`");
181 FailureOr<Value> alloc =
options.createAlloc(
182 rewriter, loc, llvm::cast<MemRefType>(*allocType), dynamicDims);
188 if (
failed(
options.createMemCpy(rewriter, loc, copyBuffer, *alloc)))
198 bool AllocTensorOp::resultBufferizesToMemoryWrite(
OpResult opResult,
201 return static_cast<bool>(getCopy());
204 bool AllocTensorOp::bufferizesToMemoryRead(
OpOperand &opOperand,
207 "expected copy operand");
211 bool AllocTensorOp::bufferizesToMemoryWrite(
OpOperand &opOperand,
214 "expected copy operand");
224 FailureOr<BufferLikeType>
228 assert(value == getResult() &&
"invalid value");
232 if (getMemorySpace().has_value()) {
233 memorySpace = *getMemorySpace();
234 }
else if (getCopy()) {
235 auto copyBufferType =
237 getCopy(),
options, state, invocationStack));
238 if (
failed(copyBufferType))
240 memorySpace = copyBufferType->getMemorySpace();
244 return getOperation()->emitError(
"could not infer memory space");
247 return cast<BufferLikeType>(
253 return emitError(
"dynamic sizes not needed when copying a tensor");
256 <<
getType().getNumDynamicDims() <<
" dynamic sizes";
258 return emitError(
"expected that `copy` and return type match");
263 RankedTensorType type,
ValueRange dynamicSizes) {
264 build(builder, result, type, dynamicSizes,
Value(),
270 RankedTensorType type,
ValueRange dynamicSizes,
272 build(builder, result, type, dynamicSizes,
copy,
Value(),
278 IntegerAttr memorySpace) {
279 build(builder, result, type, dynamicSizes,
copy,
Value(),
298 LogicalResult matchAndRewrite(AllocTensorOp op,
304 unsigned int dynValCounter = 0;
305 for (int64_t i = 0; i < op.getType().getRank(); ++i) {
306 if (!op.isDynamicDim(i))
308 Value value = op.getDynamicSizes()[dynValCounter++];
311 int64_t dim = intVal.getSExtValue();
313 newShape[i] = intVal.getSExtValue();
315 newDynamicSizes.push_back(value);
317 newDynamicSizes.push_back(value);
321 newShape, op.getType().getElementType(), op.getType().getEncoding());
322 if (newType == op.getType())
324 auto newOp = AllocTensorOp::create(rewriter, op.getLoc(), newType,
325 newDynamicSizes,
Value());
334 LogicalResult matchAndRewrite(tensor::DimOp dimOp,
336 std::optional<int64_t> maybeConstantIndex = dimOp.getConstantIndex();
337 auto allocTensorOp = dimOp.getSource().getDefiningOp<AllocTensorOp>();
338 if (!allocTensorOp || !maybeConstantIndex)
340 if (*maybeConstantIndex < 0 ||
341 *maybeConstantIndex >= allocTensorOp.getType().getRank())
343 if (!allocTensorOp.getType().isDynamicDim(*maybeConstantIndex))
346 dimOp, allocTensorOp.getDynamicSize(rewriter, *maybeConstantIndex));
354 results.
add<FoldDimOfAllocTensorOp, ReplaceStaticShapeDims>(ctx);
359 auto shapes = llvm::to_vector<4>(
360 llvm::map_range(llvm::seq<int64_t>(0,
getType().getRank()),
362 if (isDynamicDim(dim))
363 return getDynamicSize(builder, dim);
366 reifiedReturnShapes.emplace_back(std::move(shapes));
377 if (copyKeyword.succeeded())
383 if (sizeHintKeyword.succeeded())
397 if (copyKeyword.succeeded())
400 if (sizeHintKeyword.succeeded())
403 result.
addAttribute(AllocTensorOp::getOperandSegmentSizeAttr(),
405 {static_cast<int32_t>(dynamicSizesOperands.size()),
406 static_cast<int32_t>(copyKeyword.succeeded()),
407 static_cast<int32_t>(sizeHintKeyword.succeeded())}));
414 p <<
" copy(" << getCopy() <<
")";
416 p <<
" size_hint=" << getSizeHint();
418 AllocTensorOp::getOperandSegmentSizeAttr()});
420 auto type = getResult().getType();
421 if (
auto validType = llvm::dyn_cast<::mlir::TensorType>(type))
428 assert(isDynamicDim(idx) &&
"expected dynamic dim");
430 return tensor::DimOp::create(b, getLoc(), getCopy(), idx);
431 return getOperand(getIndexOfDynamicSize(idx));
449 LogicalResult matchAndRewrite(CloneOp cloneOp,
451 if (cloneOp.use_empty()) {
456 Value source = cloneOp.getInput();
457 if (source.
getType() != cloneOp.getType() &&
458 !memref::CastOp::areCastCompatible({source.getType()},
459 {cloneOp.getType()}))
464 Value canonicalSource = source;
465 while (
auto iface = dyn_cast_or_null<ViewLikeOpInterface>(
467 if (canonicalSource != iface.getViewDest()) {
470 canonicalSource = iface.getViewSource();
473 std::optional<Operation *> maybeCloneDeallocOp =
476 if (!maybeCloneDeallocOp.has_value())
478 std::optional<Operation *> maybeSourceDeallocOp =
480 if (!maybeSourceDeallocOp.has_value())
482 Operation *cloneDeallocOp = *maybeCloneDeallocOp;
483 Operation *sourceDeallocOp = *maybeSourceDeallocOp;
487 if (cloneDeallocOp && sourceDeallocOp &&
491 Block *currentBlock = cloneOp->getBlock();
493 if (cloneDeallocOp && cloneDeallocOp->
getBlock() == currentBlock) {
494 redundantDealloc = cloneDeallocOp;
495 }
else if (sourceDeallocOp && sourceDeallocOp->
getBlock() == currentBlock) {
496 redundantDealloc = sourceDeallocOp;
499 if (!redundantDealloc)
507 for (
Operation *pos = cloneOp->getNextNode(); pos != redundantDealloc;
508 pos = pos->getNextNode()) {
512 auto effectInterface = dyn_cast<MemoryEffectOpInterface>(pos);
513 if (!effectInterface)
519 if (source.
getType() != cloneOp.getType())
520 source = memref::CastOp::create(rewriter, cloneOp.getLoc(),
521 cloneOp.getType(), source);
523 rewriter.
eraseOp(redundantDealloc);
532 results.
add<SimplifyClones>(context);
539 LogicalResult DeallocTensorOp::bufferize(
RewriterBase &rewriter,
545 memref::DeallocOp::create(rewriter, getLoc(), *buffer);
546 rewriter.
eraseOp(getOperation());
554 bool MaterializeInDestinationOp::bufferizesToMemoryRead(
556 return opOperand == getSourceMutable();
559 bool MaterializeInDestinationOp::bufferizesToMemoryWrite(
561 if (opOperand == getDestMutable()) {
562 assert(isa<TensorType>(getDest().
getType()) &&
"expected tensor type");
568 bool MaterializeInDestinationOp::mustBufferizeInPlace(
577 MaterializeInDestinationOp::getAliasingValues(
OpOperand &opOperand,
579 if (opOperand == getDestMutable()) {
580 assert(isa<TensorType>(getDest().
getType()) &&
"expected tensor type");
587 MaterializeInDestinationOp::bufferize(
RewriterBase &rewriter,
590 bool tensorDest = isa<TensorType>(getDest().
getType());
593 FailureOr<Value> maybeBuffer =
597 buffer = *maybeBuffer;
599 assert(isa<BaseMemRefType>(getDest().
getType()) &&
"expected memref type");
605 if (
failed(
options.createMemCpy(rewriter, getLoc(), *srcBuffer, buffer)))
612 bool MaterializeInDestinationOp::bufferizesToElementwiseAccess(
621 if (getOperation()->getNumResults() == 1) {
622 assert(isa<TensorType>(getDest().
getType()) &&
"expected tensor type");
623 reifiedReturnShapes.resize(1,
625 reifiedReturnShapes[0] =
633 if (isa<TensorType>(getDest().
getType())) {
646 assert(isa<BaseMemRefType>(getDest().
getType()) &&
"expected memref type");
647 assert(getRestrict() &&
648 "expected that ops with memrefs dest have 'restrict'");
650 return ToTensorOp::create(
653 true, getWritable());
656 bool MaterializeInDestinationOp::isEquivalentSubset(
658 return equivalenceFn(getDest(), candidate);
662 MaterializeInDestinationOp::getValuesNeededToBuildSubsetExtraction() {
666 OpOperand &MaterializeInDestinationOp::getSourceOperand() {
667 return getOperation()->getOpOperand(0) ;
670 bool MaterializeInDestinationOp::operatesOnEquivalentSubset(
671 SubsetOpInterface subsetOp,
676 bool MaterializeInDestinationOp::operatesOnDisjointSubset(
677 SubsetOpInterface subsetOp,
683 if (!isa<TensorType, BaseMemRefType>(getDest().
getType()))
684 return emitOpError(
"'dest' must be a tensor or a memref");
685 if (
auto destType = dyn_cast<TensorType>(getDest().
getType())) {
686 if (getOperation()->getNumResults() != 1)
687 return emitOpError(
"tensor 'dest' implies exactly one tensor result");
688 if (destType != getResult().
getType())
689 return emitOpError(
"result and 'dest' types must match");
691 if (isa<BaseMemRefType>(getDest().
getType()) &&
692 getOperation()->getNumResults() != 0)
693 return emitOpError(
"memref 'dest' implies zero results");
694 if (getRestrict() && !isa<BaseMemRefType>(getDest().
getType()))
695 return emitOpError(
"'restrict' is valid only for memref destinations");
696 if (getWritable() != isa<BaseMemRefType>(getDest().
getType()))
697 return emitOpError(
"'writable' must be specified if and only if the "
698 "destination is of memref type");
700 ShapedType destType = cast<ShapedType>(getDest().
getType());
701 if (srcType.
hasRank() != destType.hasRank())
702 return emitOpError(
"source/destination shapes are incompatible");
704 if (srcType.getRank() != destType.getRank())
705 return emitOpError(
"rank mismatch between source and destination shape");
706 for (
auto [src, dest] :
707 llvm::zip(srcType.
getShape(), destType.getShape())) {
708 if (src == ShapedType::kDynamic || dest == ShapedType::kDynamic) {
714 return emitOpError(
"source/destination shapes are incompatible");
720 void MaterializeInDestinationOp::build(
OpBuilder &builder,
723 auto destTensorType = dyn_cast<TensorType>(dest.
getType());
724 build(builder, state, destTensorType ? destTensorType :
Type(),
728 bool MaterializeInDestinationOp::isWritable(
Value value,
730 return isa<TensorType>(getDest().
getType()) ? true : getWritable();
734 return getDestMutable();
737 void MaterializeInDestinationOp::getEffects(
740 if (isa<BaseMemRefType>(getDest().
getType()))
750 return getWritable();
754 if (
auto toBuffer =
getBuffer().getDefiningOp<ToBufferOp>())
757 if (toBuffer->getBlock() == this->getOperation()->getBlock() &&
758 toBuffer->getNextNode() == this->getOperation())
759 return toBuffer.getTensor();
767 LogicalResult matchAndRewrite(tensor::DimOp dimOp,
769 auto memrefToTensorOp = dimOp.getSource().getDefiningOp<ToTensorOp>();
770 if (!memrefToTensorOp)
774 dimOp, memrefToTensorOp.getBuffer(), dimOp.getIndex());
782 results.
add<DimOfToTensorFolder>(context);
790 if (
auto memrefToTensor = getTensor().getDefiningOp<ToTensorOp>())
791 if (memrefToTensor.getBuffer().getType() ==
getType())
792 return memrefToTensor.getBuffer();
802 LogicalResult matchAndRewrite(ToBufferOp toBuffer,
804 auto tensorCastOperand =
805 toBuffer.getOperand().getDefiningOp<tensor::CastOp>();
806 if (!tensorCastOperand)
808 auto srcTensorType = llvm::dyn_cast<RankedTensorType>(
809 tensorCastOperand.getOperand().getType());
812 auto currentOutputMemRefType =
813 dyn_cast<BaseMemRefType>(toBuffer.getResult().getType());
814 if (!currentOutputMemRefType)
817 auto memrefType = currentOutputMemRefType.cloneWith(
818 srcTensorType.getShape(), srcTensorType.getElementType());
819 Value memref = ToBufferOp::create(rewriter, toBuffer.getLoc(), memrefType,
820 tensorCastOperand.getOperand(),
821 toBuffer.getReadOnly());
833 LogicalResult matchAndRewrite(ToBufferOp toBuffer,
846 LogicalResult matchAndRewrite(memref::LoadOp load,
848 auto toBuffer = load.getMemref().getDefiningOp<ToBufferOp>();
862 LogicalResult matchAndRewrite(memref::DimOp dimOp,
864 auto castOp = dimOp.getSource().getDefiningOp<ToBufferOp>();
867 Value newSource = castOp.getOperand();
878 results.
add<DimOfCastOp, LoadOfToBuffer, ToBufferOfCast,
879 ToBufferToTensorFolding>(context);
882 LogicalResult ToBufferOp::bufferize(
RewriterBase &rewriter,
892 std::optional<Operation *> CloneOp::buildDealloc(
OpBuilder &builder,
894 return memref::DeallocOp::create(builder, alloc.
getLoc(), alloc)
898 std::optional<Value> CloneOp::buildClone(
OpBuilder &builder,
Value alloc) {
899 return CloneOp::create(builder, alloc.
getLoc(), alloc).getResult();
906 LogicalResult DeallocOp::inferReturnTypes(
907 MLIRContext *context, std::optional<::mlir::Location> location,
910 DeallocOpAdaptor adaptor(operands, attributes, properties, regions);
917 if (getMemrefs().size() != getConditions().size())
919 "must have the same number of conditions as memrefs to deallocate");
920 if (getRetained().size() != getUpdatedConditions().size())
921 return emitOpError(
"must have the same number of updated conditions "
922 "(results) as retained operands");
930 if (deallocOp.getMemrefs() == memrefs &&
931 deallocOp.getConditions() == conditions)
935 deallocOp.getMemrefsMutable().assign(memrefs);
936 deallocOp.getConditionsMutable().assign(conditions);
956 struct DeallocRemoveDuplicateDeallocMemrefs
960 LogicalResult matchAndRewrite(DeallocOp deallocOp,
965 for (
auto [i, memref, cond] :
967 if (memrefToCondition.count(memref)) {
970 Value &newCond = newConditions[memrefToCondition[memref]];
973 arith::OrIOp::create(rewriter, deallocOp.getLoc(), newCond, cond);
975 memrefToCondition.insert({memref, newConditions.size()});
976 newMemrefs.push_back(memref);
977 newConditions.push_back(cond);
998 struct DeallocRemoveDuplicateRetainedMemrefs
1002 LogicalResult matchAndRewrite(DeallocOp deallocOp,
1009 for (
auto retained : deallocOp.getRetained()) {
1010 if (seen.count(retained)) {
1011 resultReplacementIdx.push_back(seen[retained]);
1016 newRetained.push_back(retained);
1017 resultReplacementIdx.push_back(i++);
1022 if (newRetained.size() == deallocOp.getRetained().size())
1028 DeallocOp::create(rewriter, deallocOp.getLoc(), deallocOp.getMemrefs(),
1029 deallocOp.getConditions(), newRetained);
1031 llvm::map_range(resultReplacementIdx, [&](
unsigned idx) {
1032 return newDeallocOp.getUpdatedConditions()[idx];
1034 rewriter.
replaceOp(deallocOp, replacements);
1047 LogicalResult matchAndRewrite(DeallocOp deallocOp,
1049 if (deallocOp.getMemrefs().empty()) {
1050 Value constFalse = arith::ConstantOp::create(rewriter, deallocOp.getLoc(),
1076 LogicalResult matchAndRewrite(DeallocOp deallocOp,
1079 for (
auto [memref, cond] :
1080 llvm::zip(deallocOp.getMemrefs(), deallocOp.getConditions())) {
1082 newMemrefs.push_back(memref);
1083 newConditions.push_back(cond);
1113 LogicalResult matchAndRewrite(DeallocOp deallocOp,
1116 llvm::map_range(deallocOp.getMemrefs(), [&](
Value memref) {
1117 auto extractStridedOp =
1118 memref.getDefiningOp<memref::ExtractStridedMetadataOp>();
1119 if (!extractStridedOp)
1121 Value allocMemref = extractStridedOp.getOperand();
1122 auto allocOp = allocMemref.getDefiningOp<MemoryEffectOpInterface>();
1125 if (allocOp.getEffectOnValue<MemoryEffects::Allocate>(allocMemref))
1131 deallocOp.getConditions(), rewriter);
1149 struct RemoveAllocDeallocPairWhenNoOtherUsers
1153 LogicalResult matchAndRewrite(DeallocOp deallocOp,
1157 for (
auto [memref, cond] :
1158 llvm::zip(deallocOp.getMemrefs(), deallocOp.getConditions())) {
1159 if (
auto allocOp = memref.getDefiningOp<MemoryEffectOpInterface>()) {
1164 hasSingleEffect<MemoryEffects::Allocate>(allocOp, memref) &&
1165 memref.hasOneUse()) {
1166 toDelete.push_back(allocOp);
1171 newMemrefs.push_back(memref);
1172 newConditions.push_back(cond);
1195 patterns.add<DeallocRemoveDuplicateDeallocMemrefs,
1196 DeallocRemoveDuplicateRetainedMemrefs, EraseEmptyDealloc,
1197 EraseAlwaysFalseDealloc, SkipExtractMetadataOfAlloc,
1198 RemoveAllocDeallocPairWhenNoOtherUsers>(context);
1205 #define GET_OP_CLASSES
1206 #include "mlir/Dialect/Bufferization/IR/BufferizationOps.cpp.inc"
static LogicalResult updateDeallocIfChanged(DeallocOp deallocOp, ValueRange memrefs, ValueRange conditions, PatternRewriter &rewriter)
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
static llvm::ManagedStatic< PassManagerOptions > options
static void print(spirv::VerCapExtAttr triple, DialectAsmPrinter &printer)
static RankedTensorType getBufferType(const SparseTensorType &stt, bool needTmpCOO)
static void getDynamicSizes(RankedTensorType tp, ValueRange sizes, SmallVectorImpl< Value > &dynSizes)
Collects the dynamic dimension sizes for tp with the assumption that sizes are the dimension sizes fo...
Base class for generic analysis states.
virtual Builder & getBuilder() const =0
Return a builder which provides useful access to MLIRContext, global objects like types and attribute...
virtual ParseResult parseOptionalAttrDict(NamedAttrList &result)=0
Parse a named dictionary into 'result' if it is present.
virtual ParseResult parseOptionalKeyword(StringRef keyword)=0
Parse the given keyword if present.
virtual ParseResult parseRParen()=0
Parse a ) token.
virtual ParseResult parseEqual()=0
Parse a = token.
virtual ParseResult parseCustomTypeWithFallback(Type &result, function_ref< ParseResult(Type &result)> parseType)=0
Parse a custom type with the provided callback, unless the next token is #, in which case the generic...
virtual ParseResult parseColon()=0
Parse a : token.
virtual ParseResult parseLParen()=0
Parse a ( token.
void printStrippedAttrOrType(AttrOrType attrOrType)
Print the provided attribute in the context of an operation custom printer/parser: this will invoke d...
Attributes are known-constant values of operations.
Block represents an ordered list of Operations.
IntegerAttr getIndexAttr(int64_t value)
DenseI32ArrayAttr getDenseI32ArrayAttr(ArrayRef< int32_t > values)
BoolAttr getBoolAttr(bool value)
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
This class provides a mutable adaptor for a range of operands.
The OpAsmParser has methods for interacting with the asm parser: parsing things from it,...
virtual ParseResult resolveOperand(const UnresolvedOperand &operand, Type type, SmallVectorImpl< Value > &result)=0
Resolve an operand to an SSA value, emitting an error on failure.
ParseResult resolveOperands(Operands &&operands, Type type, SmallVectorImpl< Value > &result)
Resolve a list of operands to SSA values, emitting an error on failure, or appending the results to t...
virtual ParseResult parseOperand(UnresolvedOperand &result, bool allowResultNumber=true)=0
Parse a single SSA value operand name along with a result number if allowResultNumber is true.
virtual ParseResult parseOperandList(SmallVectorImpl< UnresolvedOperand > &result, Delimiter delimiter=Delimiter::None, bool allowResultNumber=true, int requiredOperandCount=-1)=0
Parse zero or more SSA comma-separated operand references with a specified surrounding delimiter,...
This is a pure-virtual base class that exposes the asmprinter hooks necessary to implement a custom p...
virtual void printOptionalAttrDict(ArrayRef< NamedAttribute > attrs, ArrayRef< StringRef > elidedAttrs={})=0
If the specified operation has attributes, print out an attribute dictionary with their values.
RAII guard to reset the insertion point of the builder when destroyed.
This class helps build Operations.
This class represents a single result from folding an operation.
This class represents an operand of an operation.
unsigned getOperandNumber()
Return which operand this is in the OpOperand list of the Operation.
This is a value defined by a result of an operation.
Simple wrapper around a void* in order to express generically how to pass in op properties through AP...
Operation is the basic unit of execution within MLIR.
Block * getBlock()
Returns the operation block that contains this operation.
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
This class provides an abstraction over the different types of ranges over Regions.
RewritePatternSet & add(ConstructorArg &&arg, ConstructorArgs &&...args)
Add an instance of each of the pattern types 'Ts' to the pattern list with the given arguments.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
This class represents a specific instance of an effect.
static DerivedEffect * get()
Returns a unique instance for the derived effect class.
static DefaultResource * get()
Returns a unique instance for the given effect class.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
ArrayRef< int64_t > getShape() const
Returns the shape of this tensor type.
bool hasRank() const
Returns if this type is ranked, i.e. it has a known number of dimensions.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
Location getLoc() const
Return the location of this value.
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
BufferizationState provides information about the state of the IR during the bufferization process.
FailureOr< BaseMemRefType > asMemRefType(FailureOr< BufferLikeType > bufferType)
This is a helper function used when buffer type is guaranteed to be memref.
void populateDeallocOpCanonicalizationPatterns(RewritePatternSet &patterns, MLIRContext *context)
Add the canonicalization patterns for bufferization.dealloc to the given pattern set to make them ava...
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
FailureOr< Value > castOrReallocMemRefValue(OpBuilder &b, Value value, MemRefType type, const BufferizationOptions &options)
Try to cast the given ranked MemRef-typed value to the given ranked MemRef type.
LogicalResult foldToBufferToTensorPair(RewriterBase &rewriter, ToBufferOp toBuffer, const BufferizationOptions &options)
Try to fold to_buffer(to_tensor(x)).
Value buildSubsetExtraction(RewriterBase &rewriter, SubsetInsertionOpInterface op, tensor::EmptyOp emptyTensorOp, Operation *user)
This method builds and returns a subset extraction value for the destination tensor that the given op...
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options, const BufferizationState &state)
Lookup the buffer for the given value.
FailureOr< BufferLikeType > getBufferType(Value value, const BufferizationOptions &options, const BufferizationState &state)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
void populateDynamicDimSizes(OpBuilder &b, Location loc, Value shapedValue, SmallVector< Value > &dynamicDims)
Populate dynamicDims with tensor::DimOp / memref::DimOp results for all dynamic dimensions of the giv...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Type getTensorTypeFromMemRefType(Type type)
Return an unranked/ranked tensor type for the given unranked/ranked memref type.
std::optional< Operation * > findDealloc(Value allocValue)
Finds a single dealloc operation for the given allocated value.
LogicalResult foldMemRefCast(Operation *op, Value inner=nullptr)
This is a common utility used for patterns of the form "someop(memref.cast) -> someop".
QueryRef parse(llvm::StringRef line, const QuerySession &qs)
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given tensor value.
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
detail::constant_int_value_binder m_ConstantInt(IntegerAttr::ValueType *bind_value)
Matches a constant holding a scalar/vector/tensor integer (splat) and writes the integer value to bin...
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
detail::constant_int_predicate_matcher m_Zero()
Matches a constant scalar / vector splat / tensor splat integer zero.
const FrozenRewritePatternSet & patterns
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
LogicalResult verify(Operation *op, bool verifyRecursively=true)
Perform (potentially expensive) checks of invariants, used to detect compiler bugs,...
The following effect indicates that the operation allocates from some resource.
The following effect indicates that the operation frees some resource that has been allocated.
This is the representation of an operand reference.
OpRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting against an...
This represents an operation in an abstracted form, suitable for use with the builder APIs.
SmallVector< Value, 4 > operands
void addAttribute(StringRef name, Attribute attr)
Add an attribute with the specified name.
void addTypes(ArrayRef< Type > newTypes)
Options for BufferizableOpInterface-based bufferization.