34 struct CastOpInterface
35 :
public BufferizableOpInterface::ExternalModel<CastOpInterface,
52 FailureOr<BaseMemRefType>
56 auto castOp = cast<tensor::CastOp>(op);
58 castOp.getSource(),
options, state, invocationStack);
59 if (failed(maybeSrcBufferType))
61 Attribute memorySpace = maybeSrcBufferType->getMemorySpace();
67 if (isa<UnrankedTensorType>(castOp.getSource().getType())) {
74 if (isa<UnrankedTensorType>(castOp.getType())) {
80 auto rankedResultType = cast<RankedTensorType>(castOp.getType());
82 rankedResultType.getShape(), rankedResultType.getElementType(),
83 llvm::cast<MemRefType>(*maybeSrcBufferType).getLayout(), memorySpace);
89 auto castOp = cast<tensor::CastOp>(op);
92 FailureOr<Value> resultBuffer =
94 if (failed(resultBuffer))
98 auto resultMemRefType =
100 if (failed(resultMemRefType))
102 if (resultBuffer->getType() == *resultMemRefType) {
109 assert(memref::CastOp::areCastCompatible(resultBuffer->getType(),
110 *resultMemRefType) &&
111 "CallOp::bufferize: cast incompatible");
112 replaceOpWithNewBufferizedOp<memref::CastOp>(
113 rewriter, op, *resultMemRefType, *resultBuffer);
120 struct CollapseShapeOpInterface
121 :
public BufferizableOpInterface::ExternalModel<CollapseShapeOpInterface,
122 tensor::CollapseShapeOp> {
143 FailureOr<BaseMemRefType>
147 auto collapseShapeOp = cast<tensor::CollapseShapeOp>(op);
149 collapseShapeOp.getSrc(),
options, state, invocationStack);
150 if (failed(maybeSrcBufferType))
152 auto srcBufferType = llvm::cast<MemRefType>(*maybeSrcBufferType);
153 bool canBeCollapsed = memref::CollapseShapeOp::isGuaranteedCollapsible(
154 srcBufferType, collapseShapeOp.getReassociationIndices());
156 if (!canBeCollapsed) {
158 RankedTensorType tensorResultType = collapseShapeOp.getResultType();
160 tensorResultType, srcBufferType.getMemorySpace());
163 return memref::CollapseShapeOp::computeCollapsedType(
164 srcBufferType, collapseShapeOp.getReassociationIndices());
170 auto collapseShapeOp = cast<tensor::CollapseShapeOp>(op);
171 RankedTensorType tensorResultType = collapseShapeOp.getResultType();
172 FailureOr<Value> maybeBuffer =
174 if (failed(maybeBuffer))
176 Value buffer = *maybeBuffer;
177 auto bufferType = cast<MemRefType>(buffer.
getType());
179 if (tensorResultType.getRank() == 0) {
181 MemRefType resultType;
183 if (bufferType.getLayout().isIdentity()) {
185 MemRefLayoutAttrInterface layout;
187 layout, bufferType.getMemorySpace());
193 if (failed(bufferType.getStridesAndOffset(strides, offset)))
196 {}, tensorResultType.getElementType(),
198 bufferType.getMemorySpace());
201 replaceOpWithNewBufferizedOp<memref::CollapseShapeOp>(
202 rewriter, op, resultType, buffer, collapseShapeOp.getReassociation());
209 bool canBeCollapsed = memref::CollapseShapeOp::isGuaranteedCollapsible(
210 bufferType, collapseShapeOp.getReassociationIndices());
211 if (!canBeCollapsed) {
215 rewriter, op->
getLoc(), collapseShapeOp.getSrc(),
options, state);
216 if (failed(tensorAlloc))
220 collapseShapeOp.getSrcType().getElementType(),
221 AffineMap(), bufferType.getMemorySpace());
222 buffer = rewriter.
create<bufferization::ToBufferOp>(
223 op->
getLoc(), memrefType, *tensorAlloc);
227 replaceOpWithNewBufferizedOp<memref::CollapseShapeOp>(
228 rewriter, op, buffer, collapseShapeOp.getReassociationIndices());
234 struct DimOpInterface
235 :
public BufferizableOpInterface::ExternalModel<DimOpInterface,
256 auto dimOp = cast<tensor::DimOp>(op);
257 FailureOr<Value> v =
getBuffer(rewriter, dimOp.getSource(),
options, state);
260 replaceOpWithNewBufferizedOp<memref::DimOp>(rewriter, op, *v,
267 struct EmptyOpInterface
268 :
public BufferizableOpInterface::ExternalModel<EmptyOpInterface,
270 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
281 auto emptyOp = cast<tensor::EmptyOp>(op);
293 if (failed(allocTensor))
301 struct ExpandShapeOpInterface
302 :
public BufferizableOpInterface::ExternalModel<ExpandShapeOpInterface,
303 tensor::ExpandShapeOp> {
321 FailureOr<BaseMemRefType>
325 auto expandShapeOp = cast<tensor::ExpandShapeOp>(op);
327 expandShapeOp.getSrc(),
options, state, invocationStack);
328 if (failed(maybeSrcBufferType))
330 auto srcBufferType = llvm::cast<MemRefType>(*maybeSrcBufferType);
331 auto maybeResultType = memref::ExpandShapeOp::computeExpandedType(
332 srcBufferType, expandShapeOp.getResultType().getShape(),
333 expandShapeOp.getReassociationIndices());
334 if (failed(maybeResultType))
336 return *maybeResultType;
342 auto expandShapeOp = cast<tensor::ExpandShapeOp>(op);
343 auto tensorResultType = expandShapeOp.getResultType();
344 FailureOr<Value> buffer =
349 auto memrefExpandShape = rewriter.
create<memref::ExpandShapeOp>(
350 op->
getLoc(), tensorResultType.getShape(), *buffer,
351 expandShapeOp.getReassociationIndices(),
352 expandShapeOp.getMixedOutputShape());
354 memrefExpandShape->getResults());
360 struct ExtractSliceOpInterface
361 :
public BufferizableOpInterface::ExternalModel<ExtractSliceOpInterface,
362 tensor::ExtractSliceOp> {
381 auto extractSliceOp = cast<tensor::ExtractSliceOp>(op);
385 Location loc = extractSliceOp.getLoc();
388 FailureOr<Value> srcMemref =
390 if (failed(srcMemref))
395 extractSliceOp.getResult(),
options, state);
396 if (failed(resultMemrefType))
398 Value subView = rewriter.
create<memref::SubViewOp>(
399 loc, llvm::cast<MemRefType>(*resultMemrefType), *srcMemref,
400 mixedOffsets, mixedSizes, mixedStrides);
406 FailureOr<BaseMemRefType>
410 auto extractSliceOp = cast<tensor::ExtractSliceOp>(op);
411 assert(value == extractSliceOp.getResult() &&
"invalid value");
413 extractSliceOp.getSource(),
options, state, invocationStack);
414 if (failed(srcMemrefType))
419 return memref::SubViewOp::inferRankReducedResultType(
420 extractSliceOp.getType().getShape(),
421 llvm::cast<MemRefType>(*srcMemrefType), mixedOffsets, mixedSizes,
427 struct ExtractOpInterface
428 :
public BufferizableOpInterface::ExternalModel<ExtractOpInterface,
448 auto extractOp = cast<tensor::ExtractOp>(op);
449 FailureOr<Value> srcMemref =
451 if (failed(srcMemref))
453 replaceOpWithNewBufferizedOp<memref::LoadOp>(rewriter, op, *srcMemref,
454 extractOp.getIndices());
464 OperandRange::iterator &elementIt,
466 if (dim ==
static_cast<int>(shape.size()) - 1) {
467 for (
int i = 0; i < shape.back(); ++i) {
468 indices.back() = constants[i];
469 rewriter.
create<memref::StoreOp>(loc, *elementIt, buffer, indices);
474 for (
int i = 0; i < shape[dim]; ++i) {
475 indices[dim] = constants[i];
476 createStores(rewriter, loc, dim + 1, buffer, shape, constants, elementIt,
482 struct FromElementsOpInterface
483 :
public BufferizableOpInterface::ExternalModel<FromElementsOpInterface,
484 tensor::FromElementsOp> {
486 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
491 auto fromElementsOp = cast<tensor::FromElementsOp>(op);
492 auto tensorType = cast<RankedTensorType>(fromElementsOp.getType());
496 auto shape = tensorType.getShape();
499 rewriter, loc, fromElementsOp.getResult(),
options, state,
501 if (failed(tensorAlloc))
503 FailureOr<BaseMemRefType> memrefType =
505 if (failed(memrefType))
507 Value buffer = rewriter.
create<bufferization::ToBufferOp>(
508 op->
getLoc(), *memrefType, *tensorAlloc);
511 if (fromElementsOp.getElements().empty()) {
518 rewriter.
create<memref::StoreOp>(
519 loc, fromElementsOp.getElements().front(), buffer);
525 auto maxDim = *llvm::max_element(shape);
527 constants.reserve(maxDim);
528 for (
int i = 0; i < maxDim; ++i)
529 constants.push_back(rewriter.
create<arith::ConstantIndexOp>(loc, i));
532 auto elementIt = fromElementsOp.getElements().begin();
534 createStores(rewriter, loc, 0, buffer, shape, constants, elementIt,
565 Value tensorDestination,
568 assert(generateBody.
hasOneBlock() &&
"expected body with single block");
569 auto tensorType = cast<RankedTensorType>(tensorDestination.
getType());
578 Block &linalgBody = linalgOp.getMapper().emplaceBlock();
583 for (int64_t dim = 0; dim < tensorType.getRank(); ++dim)
584 indices.push_back(rewriter.
create<linalg::IndexOp>(loc, dim));
588 auto yieldOp = cast<tensor::YieldOp>(linalgBody.getTerminator());
591 return linalgOp.getResult()[0];
595 struct GenerateOpInterface
596 :
public BufferizableOpInterface::ExternalModel<GenerateOpInterface,
597 tensor::GenerateOp> {
599 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
604 auto generateOp = cast<tensor::GenerateOp>(op);
606 auto type = generateOp.getResult().getType();
610 return op->
emitError(
"memory space not implemented yet");
615 rewriter, loc, generateOp.getResult(),
options, state,
617 if (failed(tensorAlloc))
620 Value result = lowerGenerateLikeOpBody(rewriter, loc, *tensorAlloc,
621 generateOp.getDynamicExtents(),
622 generateOp.getBody());
633 struct InsertOpInterface
639 auto insertOp = cast<tensor::InsertOp>(op);
640 FailureOr<Value> destMemref =
642 if (failed(destMemref))
644 rewriter.
create<memref::StoreOp>(insertOp.getLoc(), insertOp.getScalar(),
645 *destMemref, insertOp.getIndices());
651 template <
typename InsertOpTy>
652 static bool insertSliceOpRequiresRead(InsertOpTy insertSliceOp,
655 if (opOperand == insertSliceOp.getSourceMutable())
659 assert(opOperand == insertSliceOp.getDestMutable() &&
"expected dest");
663 bool allOffsetsZero =
664 llvm::all_of(insertSliceOp.getMixedOffsets(),
isZeroInteger);
665 RankedTensorType destType = insertSliceOp.getDestType();
666 bool sizesMatchDestSizes =
670 return !(allOffsetsZero && sizesMatchDestSizes && allStridesOne);
678 struct InsertSliceOpInterface
680 tensor::InsertSliceOp> {
683 return insertSliceOpRequiresRead(cast<tensor::InsertSliceOp>(op),
695 auto insertSliceOp = cast<tensor::InsertSliceOp>(op);
699 Location loc = insertSliceOp.getLoc();
702 FailureOr<Value> dstMemref =
704 if (failed(dstMemref))
708 auto dstMemrefType = cast<MemRefType>(dstMemref->getType());
709 MemRefType subviewMemRefType =
710 memref::SubViewOp::inferRankReducedResultType(
711 insertSliceOp.getSourceType().getShape(), dstMemrefType,
712 mixedOffsets, mixedSizes, mixedStrides);
713 Value subView = rewriter.
create<memref::SubViewOp>(
714 loc, subviewMemRefType, *dstMemref, mixedOffsets, mixedSizes,
719 FailureOr<Value> srcMemref =
721 if (failed(srcMemref))
723 if (failed(
options.createMemCpy(rewriter, loc, *srcMemref, subView)))
735 struct PadOpInterface
736 :
public BufferizableOpInterface::ExternalModel<PadOpInterface,
738 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
755 FailureOr<BaseMemRefType>
760 auto padOp = cast<tensor::PadOp>(op);
762 padOp.getSource(),
options, state, invocationStack);
763 if (failed(maybeSrcBufferType))
765 MemRefLayoutAttrInterface layout;
767 padOp.getResultType().getElementType(), layout,
768 maybeSrcBufferType->getMemorySpace());
774 auto padOp = cast<tensor::PadOp>(op);
776 RankedTensorType resultType = padOp.getResultType();
777 RankedTensorType srcType = padOp.getSourceType();
780 if (
auto value = dyn_cast<Value>(ofr))
791 for (int64_t i = 0; i < resultType.getRank(); ++i) {
792 if (!resultType.isDynamicDim(i))
794 Value srcDim = rewriter.
create<tensor::DimOp>(loc, padOp.getSource(), i);
795 Value lowPad = toValue(mixedLowPad[i]);
796 Value highPad = toValue(mixedHighPad[i]);
800 Value sum = rewriter.
create<affine::AffineApplyOp>(
801 loc, sumExpr,
ValueRange{srcDim, lowPad, highPad});
802 dynamicSizes.push_back(sum);
807 rewriter, loc, padOp.getResult(),
options, state,
809 if (failed(tensorAlloc))
815 Value filledBuffer = lowerGenerateLikeOpBody(
816 rewriter, loc, *tensorAlloc, dynamicSizes, padOp.getBodyRegion());
824 padOp, padOp.getSource(), filledBuffer,
825 padOp.getMixedLowPad(), sliceSizes, sliceStrides);
832 struct RankOpInterface
833 :
public BufferizableOpInterface::ExternalModel<RankOpInterface,
854 auto rankOp = cast<tensor::RankOp>(op);
859 replaceOpWithNewBufferizedOp<memref::RankOp>(rewriter, op, rankOp.getType(),
866 struct ReshapeOpInterface
867 :
public BufferizableOpInterface::ExternalModel<ReshapeOpInterface,
872 auto reshapeOp = cast<tensor::ReshapeOp>(op);
873 return opOperand == reshapeOp.getShapeMutable();
884 auto reshapeOp = cast<tensor::ReshapeOp>(op);
885 if (reshapeOp.getSourceMutable() != opOperand)
893 auto reshapeOp = cast<tensor::ReshapeOp>(op);
894 FailureOr<Value> srcBuffer =
896 FailureOr<Value> shapeBuffer =
898 if (failed(srcBuffer) || failed(shapeBuffer))
900 auto maybeResultMemRefType =
902 if (failed(maybeResultMemRefType))
908 auto srcType = llvm::dyn_cast<MemRefType>(srcBuffer->getType());
909 if (srcType && !srcType.getLayout().isIdentity()) {
911 rewriter, op->
getLoc(), reshapeOp.getSource(),
options, state);
912 if (failed(tensorAlloc))
915 srcType.getShape(), srcType.getElementType(),
AffineMap(),
916 cast<BaseMemRefType>(srcBuffer->getType()).getMemorySpace());
918 .
create<bufferization::ToBufferOp>(
919 op->
getLoc(), memrefType, *tensorAlloc)
923 replaceOpWithNewBufferizedOp<memref::ReshapeOp>(
924 rewriter, op, maybeResultMemRefType.value(), *srcBuffer, *shapeBuffer);
928 FailureOr<BaseMemRefType>
932 auto reshapeOp = cast<tensor::ReshapeOp>(op);
933 assert(value == reshapeOp.getResult() &&
"unexpected value provided");
935 reshapeOp.getSource(),
options, state, invocationStack);
936 if (failed(maybeSourceBufferType))
939 reshapeOp.getResult().getType(),
940 cast<BaseMemRefType>(maybeSourceBufferType.value()).getMemorySpace());
945 struct ParallelInsertSliceOpInterface
946 :
public BufferizableOpInterface::ExternalModel<
947 ParallelInsertSliceOpInterface, ParallelInsertSliceOp> {
955 return opOperand == cast<ParallelInsertSliceOp>(op).getSourceMutable();
960 auto parallelInsertSliceOp = cast<ParallelInsertSliceOp>(op);
961 return opOperand == parallelInsertSliceOp.getDestMutable();
968 auto parallelInsertSliceOp = cast<ParallelInsertSliceOp>(op);
969 ParallelCombiningOpInterface parallelCombiningParent =
970 parallelInsertSliceOp.getParallelCombiningParent();
976 FailureOr<Value> destBuffer =
978 if (failed(destBuffer))
980 FailureOr<Value> srcBuffer =
982 if (failed(srcBuffer))
986 auto destBufferType = cast<MemRefType>(destBuffer->getType());
987 MemRefType subviewMemRefType =
988 memref::SubViewOp::inferRankReducedResultType(
989 parallelInsertSliceOp.getSourceType().getShape(), destBufferType,
990 parallelInsertSliceOp.getMixedOffsets(),
991 parallelInsertSliceOp.getMixedSizes(),
992 parallelInsertSliceOp.getMixedStrides());
993 Value subview = rewriter.
create<memref::SubViewOp>(
994 parallelInsertSliceOp.getLoc(), subviewMemRefType, *destBuffer,
995 parallelInsertSliceOp.getMixedOffsets(),
996 parallelInsertSliceOp.getMixedSizes(),
997 parallelInsertSliceOp.getMixedStrides());
1000 if (failed(
options.createMemCpy(rewriter, parallelInsertSliceOp.getLoc(),
1001 *srcBuffer, subview)))
1012 if (hasEffect<MemoryEffects::Free>(user)) {
1013 if (user->getBlock() == parallelCombiningParent->getBlock())
1014 rewriter.
moveOpBefore(user, user->getBlock()->getTerminator());
1036 struct SplatOpInterface
1037 :
public BufferizableOpInterface::ExternalModel<SplatOpInterface,
1040 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
1046 auto splatOp = cast<tensor::SplatOp>(op);
1051 rewriter, loc, splatOp.getResult(),
options, state,
1053 if (failed(tensorAlloc))
1057 auto tensorType = cast<RankedTensorType>(tensorAlloc->getType());
1061 return op->
emitError(
"memory space not implemented yet");
1066 Block &linalgBody = linalgOp.getMapper().emplaceBlock();
1070 rewriter.
create<linalg::YieldOp>(loc, splatOp.getInput());
1071 rewriter.
replaceOp(splatOp, linalgOp.getResult()[0]);
1080 struct ConcatOpInterface
1081 :
public BufferizableOpInterface::ExternalModel<ConcatOpInterface,
1084 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
1105 auto concatOp = cast<tensor::ConcatOp>(op);
1110 rewriter, loc, concatOp.getResult(),
options, state,
1112 if (failed(tensorAlloc))
1114 auto tensorType = cast<RankedTensorType>(tensorAlloc->getType());
1118 return op->
emitError(
"memory space not implemented yet");
1120 MemRefLayoutAttrInterface layout;
1121 MemRefType memrefType =
1123 concatOp.getResultType().getElementType(), layout);
1124 Value dstBuffer = rewriter.
create<bufferization::ToBufferOp>(
1125 op->
getLoc(), memrefType, *tensorAlloc);
1128 uint64_t concatDim = concatOp.getDim();
1129 bool dynamicConcatDim =
false;
1137 for (
const auto &[dimIdx, dimSize] :
1139 if (dimSize == ShapedType::kDynamic) {
1140 auto dimOp = rewriter.
create<memref::DimOp>(loc, dstBuffer, dimIdx);
1141 sizes.push_back(dimOp.getResult());
1142 if (dimIdx == concatDim)
1143 dynamicConcatDim =
true;
1149 int64_t concatDimOffset = 0;
1150 std::optional<Value> dynamicOffset;
1151 std::optional<Value> dynamicSize;
1152 if (dynamicConcatDim) {
1155 dynamicOffset = rewriter.
create<arith::ConstantIndexOp>(loc, 0);
1158 for (
auto operand : concatOp.getInputs()) {
1161 if (failed(srcBuffer))
1167 auto operandTensorType = cast<RankedTensorType>(operand.getType());
1168 int64_t operandConcatDimSize = operandTensorType.getDimSize(concatDim);
1170 if (dynamicConcatDim) {
1171 offsets[concatDim] = dynamicOffset.value();
1172 dynamicSize = rewriter.
create<memref::DimOp>(loc, *srcBuffer, concatDim)
1174 sizes[concatDim] = dynamicSize.value();
1176 sizes[concatDim] = rewriter.
getIndexAttr(operandConcatDimSize);
1177 offsets[concatDim] = rewriter.
getIndexAttr(concatDimOffset);
1181 auto dstMemrefType = cast<MemRefType>(memrefType);
1182 MemRefType subviewMemRefType =
1183 memref::SubViewOp::inferRankReducedResultType(
1184 operandTensorType.getShape(), dstMemrefType, offsets, sizes,
1186 Value subview = rewriter.
create<memref::SubViewOp>(
1187 loc, subviewMemRefType, dstBuffer, offsets, sizes, strides);
1190 if (failed(
options.createMemCpy(rewriter, loc, *srcBuffer, subview)))
1193 if (dynamicConcatDim) {
1194 dynamicOffset = rewriter.
create<arith::AddIOp>(
1195 loc, dynamicOffset.value(), dynamicSize.value());
1197 concatDimOffset += operandConcatDimSize;
1213 CastOp::attachInterface<CastOpInterface>(*ctx);
1214 CollapseShapeOp::attachInterface<CollapseShapeOpInterface>(*ctx);
1215 ConcatOp::attachInterface<ConcatOpInterface>(*ctx);
1216 DimOp::attachInterface<DimOpInterface>(*ctx);
1217 EmptyOp::attachInterface<EmptyOpInterface>(*ctx);
1218 ExpandShapeOp::attachInterface<ExpandShapeOpInterface>(*ctx);
1219 ExtractSliceOp::attachInterface<ExtractSliceOpInterface>(*ctx);
1220 ExtractOp::attachInterface<ExtractOpInterface>(*ctx);
1221 FromElementsOp::attachInterface<FromElementsOpInterface>(*ctx);
1222 GenerateOp::attachInterface<GenerateOpInterface>(*ctx);
1223 InsertOp::attachInterface<InsertOpInterface>(*ctx);
1224 InsertSliceOp::attachInterface<InsertSliceOpInterface>(*ctx);
1225 PadOp::attachInterface<PadOpInterface>(*ctx);
1226 ParallelInsertSliceOp::attachInterface<ParallelInsertSliceOpInterface>(
1228 RankOp::attachInterface<RankOpInterface>(*ctx);
1229 ReshapeOp::attachInterface<ReshapeOpInterface>(*ctx);
1230 SplatOp::attachInterface<SplatOpInterface>(*ctx);
1233 ctx->
loadDialect<arith::ArithDialect, linalg::LinalgDialect>();
static llvm::ManagedStatic< PassManagerOptions > options
Base type for affine expression.
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Base class for generic analysis states.
Attributes are known-constant values of operations.
Block represents an ordered list of Operations.
IntegerAttr getIndexAttr(int64_t value)
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
void loadDialect()
Load a dialect in the context.
RAII guard to reset the insertion point of the builder when destroyed.
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
This class represents a single result from folding an operation.
This class represents an operand of an operation.
This is a value defined by a result of an operation.
Operation is the basic unit of execution within MLIR.
OpResult getOpResult(unsigned idx)
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
MLIRContext * getContext()
Return the context this operation is associated with.
Location getLoc()
The source location the operation was defined or derived from.
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
user_range getUsers()
Returns a range of all users.
use_range getUses()
Returns a range of all uses, which is useful for iterating over all uses.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
unsigned getNumArguments()
bool hasOneBlock()
Return true if this region has exactly one block.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void mergeBlocks(Block *source, Block *dest, ValueRange argValues=std::nullopt)
Inline the operations of block 'source' into the end of block 'dest'.
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
void moveOpBefore(Operation *op, Operation *existingOp)
Unlink this operation from its current block and insert it right before existingOp which may be in th...
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
BufferizationState provides information about the state of the IR during the bufferization process.
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
FailureOr< BaseMemRefType > getBufferType(Value value, const BufferizationOptions &options, const BufferizationState &state)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options, const BufferizationState &state)
Lookup the buffer for the given value.
FailureOr< Value > allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, const BufferizationOptions &options, const BufferizationState &state, bool copy=true)
Create an AllocTensorOp for the given shaped value (memref or tensor).
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given memref value.
void registerSubsetOpInterfaceExternalModels(DialectRegistry ®istry)
void registerBufferizableOpInterfaceExternalModels(DialectRegistry ®istry)
Include the generated interface declarations.
bool areConstantIntValues(ArrayRef< OpFoldResult > ofrs, ArrayRef< int64_t > values)
Return true if all of ofrs are constant integers equal to the corresponding value in values.
std::optional< int64_t > getConstantIntValue(OpFoldResult ofr)
If ofr is a constant integer or an IntegerAttr, return the integer.
bool areAllConstantIntValue(ArrayRef< OpFoldResult > ofrs, int64_t value)
Return true if all of ofrs are constant integers equal to value.
bool isZeroInteger(OpFoldResult v)
Return true if v is an IntegerAttr with value 0.
void bindSymbols(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to SymbolExpr at positions: [0 .
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
Options for BufferizableOpInterface-based bufferization.
Bufferizable ops that implement the DestinationStyleOpInterface can use this external model base clas...