33 struct CastOpInterface
34 :
public BufferizableOpInterface::ExternalModel<CastOpInterface,
51 FailureOr<BufferLikeType>
55 auto castOp = cast<tensor::CastOp>(op);
56 auto maybeSrcBufferType =
58 castOp.getSource(),
options, state, invocationStack));
59 if (failed(maybeSrcBufferType))
61 Attribute memorySpace = maybeSrcBufferType->getMemorySpace();
67 if (isa<UnrankedTensorType>(castOp.getSource().getType())) {
70 return cast<BufferLikeType>(
75 if (isa<UnrankedTensorType>(castOp.getType())) {
76 return cast<BufferLikeType>(
82 auto rankedResultType = cast<RankedTensorType>(castOp.getType());
84 rankedResultType.getShape(), rankedResultType.getElementType(),
85 llvm::cast<MemRefType>(*maybeSrcBufferType).getLayout(), memorySpace));
91 auto castOp = cast<tensor::CastOp>(op);
94 FailureOr<Value> resultBuffer =
96 if (failed(resultBuffer))
100 auto resultMemRefType =
102 if (failed(resultMemRefType))
104 if (resultBuffer->getType() == *resultMemRefType) {
111 assert(memref::CastOp::areCastCompatible(resultBuffer->getType(),
112 *resultMemRefType) &&
113 "CallOp::bufferize: cast incompatible");
114 replaceOpWithNewBufferizedOp<memref::CastOp>(
115 rewriter, op, *resultMemRefType, *resultBuffer);
122 struct CollapseShapeOpInterface
123 :
public BufferizableOpInterface::ExternalModel<CollapseShapeOpInterface,
124 tensor::CollapseShapeOp> {
145 FailureOr<BufferLikeType>
149 auto collapseShapeOp = cast<tensor::CollapseShapeOp>(op);
151 collapseShapeOp.getSrc(),
options, state, invocationStack);
152 if (failed(maybeSrcBufferType))
154 auto srcBufferType = llvm::cast<MemRefType>(*maybeSrcBufferType);
155 bool canBeCollapsed = memref::CollapseShapeOp::isGuaranteedCollapsible(
156 srcBufferType, collapseShapeOp.getReassociationIndices());
158 if (!canBeCollapsed) {
160 RankedTensorType tensorResultType = collapseShapeOp.getResultType();
161 return cast<BufferLikeType>(
163 tensorResultType, srcBufferType.getMemorySpace()));
166 return cast<BufferLikeType>(memref::CollapseShapeOp::computeCollapsedType(
167 srcBufferType, collapseShapeOp.getReassociationIndices()));
173 auto collapseShapeOp = cast<tensor::CollapseShapeOp>(op);
174 RankedTensorType tensorResultType = collapseShapeOp.getResultType();
175 FailureOr<Value> maybeBuffer =
177 if (failed(maybeBuffer))
179 Value buffer = *maybeBuffer;
180 auto bufferType = cast<MemRefType>(buffer.
getType());
182 if (tensorResultType.getRank() == 0) {
184 MemRefType resultType;
186 if (bufferType.getLayout().isIdentity()) {
188 MemRefLayoutAttrInterface layout;
190 layout, bufferType.getMemorySpace());
196 if (failed(bufferType.getStridesAndOffset(strides, offset)))
199 {}, tensorResultType.getElementType(),
201 bufferType.getMemorySpace());
204 replaceOpWithNewBufferizedOp<memref::CollapseShapeOp>(
205 rewriter, op, resultType, buffer, collapseShapeOp.getReassociation());
212 bool canBeCollapsed = memref::CollapseShapeOp::isGuaranteedCollapsible(
213 bufferType, collapseShapeOp.getReassociationIndices());
214 if (!canBeCollapsed) {
218 rewriter, op->
getLoc(), collapseShapeOp.getSrc(),
options, state);
219 if (failed(tensorAlloc))
223 collapseShapeOp.getSrcType().getElementType(),
224 AffineMap(), bufferType.getMemorySpace());
225 buffer = bufferization::ToBufferOp::create(rewriter, op->
getLoc(),
226 memrefType, *tensorAlloc);
230 replaceOpWithNewBufferizedOp<memref::CollapseShapeOp>(
231 rewriter, op, buffer, collapseShapeOp.getReassociationIndices());
237 struct DimOpInterface
238 :
public BufferizableOpInterface::ExternalModel<DimOpInterface,
259 auto dimOp = cast<tensor::DimOp>(op);
260 FailureOr<Value> v =
getBuffer(rewriter, dimOp.getSource(),
options, state);
263 replaceOpWithNewBufferizedOp<memref::DimOp>(rewriter, op, *v,
270 struct EmptyOpInterface
271 :
public BufferizableOpInterface::ExternalModel<EmptyOpInterface,
273 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
284 auto emptyOp = cast<tensor::EmptyOp>(op);
296 if (failed(allocTensor))
304 struct ExpandShapeOpInterface
305 :
public BufferizableOpInterface::ExternalModel<ExpandShapeOpInterface,
306 tensor::ExpandShapeOp> {
324 FailureOr<BufferLikeType>
328 auto expandShapeOp = cast<tensor::ExpandShapeOp>(op);
330 expandShapeOp.getSrc(),
options, state, invocationStack);
331 if (failed(maybeSrcBufferType))
333 auto srcBufferType = llvm::cast<MemRefType>(*maybeSrcBufferType);
334 auto maybeResultType = memref::ExpandShapeOp::computeExpandedType(
335 srcBufferType, expandShapeOp.getResultType().getShape(),
336 expandShapeOp.getReassociationIndices());
337 if (failed(maybeResultType))
339 return cast<BufferLikeType>(*maybeResultType);
345 auto expandShapeOp = cast<tensor::ExpandShapeOp>(op);
346 auto tensorResultType = expandShapeOp.getResultType();
347 FailureOr<Value> buffer =
352 auto memrefExpandShape = memref::ExpandShapeOp::create(
353 rewriter, op->
getLoc(), tensorResultType.getShape(), *buffer,
354 expandShapeOp.getReassociationIndices(),
355 expandShapeOp.getMixedOutputShape());
357 memrefExpandShape->getResults());
363 struct ExtractSliceOpInterface
364 :
public BufferizableOpInterface::ExternalModel<ExtractSliceOpInterface,
365 tensor::ExtractSliceOp> {
384 auto extractSliceOp = cast<tensor::ExtractSliceOp>(op);
388 Location loc = extractSliceOp.getLoc();
391 FailureOr<Value> srcMemref =
393 if (failed(srcMemref))
398 extractSliceOp.getResult(),
options, state);
399 if (failed(resultMemrefType))
401 Value subView = memref::SubViewOp::create(
402 rewriter, loc, llvm::cast<MemRefType>(*resultMemrefType), *srcMemref,
403 mixedOffsets, mixedSizes, mixedStrides);
409 FailureOr<BufferLikeType>
413 auto extractSliceOp = cast<tensor::ExtractSliceOp>(op);
414 assert(value == extractSliceOp.getResult() &&
"invalid value");
416 extractSliceOp.getSource(),
options, state, invocationStack);
417 if (failed(srcMemrefType))
422 return cast<BufferLikeType>(memref::SubViewOp::inferRankReducedResultType(
423 extractSliceOp.getType().getShape(),
424 llvm::cast<MemRefType>(*srcMemrefType), mixedOffsets, mixedSizes,
430 struct ExtractOpInterface
431 :
public BufferizableOpInterface::ExternalModel<ExtractOpInterface,
451 auto extractOp = cast<tensor::ExtractOp>(op);
452 FailureOr<Value> srcMemref =
454 if (failed(srcMemref))
456 replaceOpWithNewBufferizedOp<memref::LoadOp>(rewriter, op, *srcMemref,
457 extractOp.getIndices());
467 OperandRange::iterator &elementIt,
469 if (dim ==
static_cast<int>(shape.size()) - 1) {
470 for (
int i = 0; i < shape.back(); ++i) {
471 indices.back() = constants[i];
472 memref::StoreOp::create(rewriter, loc, *elementIt, buffer, indices);
477 for (
int i = 0; i < shape[dim]; ++i) {
478 indices[dim] = constants[i];
479 createStores(rewriter, loc, dim + 1, buffer, shape, constants, elementIt,
485 struct FromElementsOpInterface
486 :
public BufferizableOpInterface::ExternalModel<FromElementsOpInterface,
487 tensor::FromElementsOp> {
489 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
494 auto fromElementsOp = cast<tensor::FromElementsOp>(op);
495 auto tensorType = cast<RankedTensorType>(fromElementsOp.getType());
499 auto shape = tensorType.getShape();
502 rewriter, loc, fromElementsOp.getResult(),
options, state,
504 if (failed(tensorAlloc))
506 FailureOr<BufferLikeType> memrefType =
508 if (failed(memrefType))
510 Value buffer = bufferization::ToBufferOp::create(rewriter, op->
getLoc(),
511 *memrefType, *tensorAlloc);
514 if (fromElementsOp.getElements().empty()) {
521 memref::StoreOp::create(rewriter, loc,
522 fromElementsOp.getElements().front(), buffer);
528 auto maxDim = *llvm::max_element(shape);
530 constants.reserve(maxDim);
531 for (
int i = 0; i < maxDim; ++i)
535 auto elementIt = fromElementsOp.getElements().begin();
537 createStores(rewriter, loc, 0, buffer, shape, constants, elementIt,
568 Value tensorDestination,
571 assert(generateBody.
hasOneBlock() &&
"expected body with single block");
572 auto tensorType = cast<RankedTensorType>(tensorDestination.
getType());
579 linalg::MapOp::create(rewriter, loc, tensorType,
ValueRange(),
581 Block &linalgBody = linalgOp.getMapper().emplaceBlock();
586 for (int64_t dim = 0; dim < tensorType.getRank(); ++dim)
587 indices.push_back(linalg::IndexOp::create(rewriter, loc, dim));
591 auto yieldOp = cast<tensor::YieldOp>(linalgBody.
getTerminator());
594 return linalgOp.getResult()[0];
598 struct GenerateOpInterface
599 :
public BufferizableOpInterface::ExternalModel<GenerateOpInterface,
600 tensor::GenerateOp> {
602 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
607 auto generateOp = cast<tensor::GenerateOp>(op);
609 auto type = generateOp.getResult().getType();
613 return op->
emitError(
"memory space not implemented yet");
618 rewriter, loc, generateOp.getResult(),
options, state,
620 if (failed(tensorAlloc))
623 Value result = lowerGenerateLikeOpBody(rewriter, loc, *tensorAlloc,
624 generateOp.getDynamicExtents(),
625 generateOp.getBody());
636 struct InsertOpInterface
642 auto insertOp = cast<tensor::InsertOp>(op);
643 FailureOr<Value> destMemref =
645 if (failed(destMemref))
647 memref::StoreOp::create(rewriter, insertOp.getLoc(), insertOp.getScalar(),
648 *destMemref, insertOp.getIndices());
654 template <
typename InsertOpTy>
655 static bool insertSliceOpRequiresRead(InsertOpTy insertSliceOp,
658 if (opOperand == insertSliceOp.getSourceMutable())
662 assert(opOperand == insertSliceOp.getDestMutable() &&
"expected dest");
666 bool allOffsetsZero =
667 llvm::all_of(insertSliceOp.getMixedOffsets(),
isZeroInteger);
668 RankedTensorType destType = insertSliceOp.getDestType();
669 bool sizesMatchDestSizes =
673 return !(allOffsetsZero && sizesMatchDestSizes && allStridesOne);
681 struct InsertSliceOpInterface
683 tensor::InsertSliceOp> {
686 return insertSliceOpRequiresRead(cast<tensor::InsertSliceOp>(op),
698 auto insertSliceOp = cast<tensor::InsertSliceOp>(op);
702 Location loc = insertSliceOp.getLoc();
705 FailureOr<Value> dstMemref =
707 if (failed(dstMemref))
711 auto dstMemrefType = cast<MemRefType>(dstMemref->getType());
712 MemRefType subviewMemRefType =
713 memref::SubViewOp::inferRankReducedResultType(
714 insertSliceOp.getSourceType().getShape(), dstMemrefType,
715 mixedOffsets, mixedSizes, mixedStrides);
717 memref::SubViewOp::create(rewriter, loc, subviewMemRefType, *dstMemref,
718 mixedOffsets, mixedSizes, mixedStrides);
722 FailureOr<Value> srcMemref =
724 if (failed(srcMemref))
726 if (failed(
options.createMemCpy(rewriter, loc, *srcMemref, subView)))
738 struct PadOpInterface
739 :
public BufferizableOpInterface::ExternalModel<PadOpInterface,
741 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
758 FailureOr<BufferLikeType>
763 auto padOp = cast<tensor::PadOp>(op);
764 auto maybeSrcBufferType =
766 padOp.getSource(),
options, state, invocationStack));
767 if (failed(maybeSrcBufferType))
769 MemRefLayoutAttrInterface layout;
770 return cast<BufferLikeType>(
772 padOp.getResultType().getElementType(), layout,
773 maybeSrcBufferType->getMemorySpace()));
779 auto padOp = cast<tensor::PadOp>(op);
781 RankedTensorType resultType = padOp.getResultType();
782 RankedTensorType srcType = padOp.getSourceType();
785 if (
auto value = dyn_cast<Value>(ofr))
796 for (int64_t i = 0; i < resultType.getRank(); ++i) {
797 if (!resultType.isDynamicDim(i))
799 Value srcDim = tensor::DimOp::create(rewriter, loc, padOp.getSource(), i);
800 Value lowPad = toValue(mixedLowPad[i]);
801 Value highPad = toValue(mixedHighPad[i]);
805 Value sum = affine::AffineApplyOp::create(
806 rewriter, loc, sumExpr,
ValueRange{srcDim, lowPad, highPad});
807 dynamicSizes.push_back(sum);
812 rewriter, loc, padOp.getResult(),
options, state,
814 if (failed(tensorAlloc))
820 Value filledBuffer = lowerGenerateLikeOpBody(
821 rewriter, loc, *tensorAlloc, dynamicSizes, padOp.getBodyRegion());
829 padOp, padOp.getSource(), filledBuffer,
830 padOp.getMixedLowPad(), sliceSizes, sliceStrides);
837 struct RankOpInterface
838 :
public BufferizableOpInterface::ExternalModel<RankOpInterface,
859 auto rankOp = cast<tensor::RankOp>(op);
864 replaceOpWithNewBufferizedOp<memref::RankOp>(rewriter, op, rankOp.getType(),
871 struct ReshapeOpInterface
872 :
public BufferizableOpInterface::ExternalModel<ReshapeOpInterface,
877 auto reshapeOp = cast<tensor::ReshapeOp>(op);
878 return opOperand == reshapeOp.getShapeMutable();
889 auto reshapeOp = cast<tensor::ReshapeOp>(op);
890 if (reshapeOp.getSourceMutable() != opOperand)
898 auto reshapeOp = cast<tensor::ReshapeOp>(op);
899 FailureOr<Value> srcBuffer =
901 FailureOr<Value> shapeBuffer =
903 if (failed(srcBuffer) || failed(shapeBuffer))
905 auto maybeResultMemRefType =
907 if (failed(maybeResultMemRefType))
913 auto srcType = llvm::dyn_cast<MemRefType>(srcBuffer->getType());
914 if (srcType && !srcType.getLayout().isIdentity()) {
916 rewriter, op->
getLoc(), reshapeOp.getSource(),
options, state);
917 if (failed(tensorAlloc))
920 srcType.getShape(), srcType.getElementType(),
AffineMap(),
921 cast<BaseMemRefType>(srcBuffer->getType()).getMemorySpace());
922 srcBuffer = bufferization::ToBufferOp::create(rewriter, op->
getLoc(),
923 memrefType, *tensorAlloc)
927 replaceOpWithNewBufferizedOp<memref::ReshapeOp>(
928 rewriter, op, maybeResultMemRefType.value(), *srcBuffer, *shapeBuffer);
932 FailureOr<BufferLikeType>
936 auto reshapeOp = cast<tensor::ReshapeOp>(op);
937 assert(value == reshapeOp.getResult() &&
"unexpected value provided");
939 reshapeOp.getSource(),
options, state, invocationStack);
940 if (failed(maybeSourceBufferType))
943 reshapeOp.getResult().getType(),
944 cast<BaseMemRefType>(maybeSourceBufferType.value()).getMemorySpace()));
949 struct ParallelInsertSliceOpInterface
950 :
public BufferizableOpInterface::ExternalModel<
951 ParallelInsertSliceOpInterface, ParallelInsertSliceOp> {
959 return opOperand == cast<ParallelInsertSliceOp>(op).getSourceMutable();
964 auto parallelInsertSliceOp = cast<ParallelInsertSliceOp>(op);
965 return opOperand == parallelInsertSliceOp.getDestMutable();
972 auto parallelInsertSliceOp = cast<ParallelInsertSliceOp>(op);
973 ParallelCombiningOpInterface parallelCombiningParent =
974 parallelInsertSliceOp.getParallelCombiningParent();
980 FailureOr<Value> destBuffer =
982 if (failed(destBuffer))
984 FailureOr<Value> srcBuffer =
986 if (failed(srcBuffer))
990 auto destBufferType = cast<MemRefType>(destBuffer->getType());
991 MemRefType subviewMemRefType =
992 memref::SubViewOp::inferRankReducedResultType(
993 parallelInsertSliceOp.getSourceType().getShape(), destBufferType,
994 parallelInsertSliceOp.getMixedOffsets(),
995 parallelInsertSliceOp.getMixedSizes(),
996 parallelInsertSliceOp.getMixedStrides());
997 Value subview = memref::SubViewOp::create(
998 rewriter, parallelInsertSliceOp.getLoc(), subviewMemRefType,
999 *destBuffer, parallelInsertSliceOp.getMixedOffsets(),
1000 parallelInsertSliceOp.getMixedSizes(),
1001 parallelInsertSliceOp.getMixedStrides());
1004 if (failed(
options.createMemCpy(rewriter, parallelInsertSliceOp.getLoc(),
1005 *srcBuffer, subview)))
1016 if (hasEffect<MemoryEffects::Free>(user)) {
1017 if (user->getBlock() == parallelCombiningParent->getBlock())
1018 rewriter.
moveOpBefore(user, user->getBlock()->getTerminator());
1040 struct SplatOpInterface
1041 :
public BufferizableOpInterface::ExternalModel<SplatOpInterface,
1044 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
1050 auto splatOp = cast<tensor::SplatOp>(op);
1055 rewriter, loc, splatOp.getResult(),
options, state,
1057 if (failed(tensorAlloc))
1061 auto tensorType = cast<RankedTensorType>(tensorAlloc->getType());
1065 return op->
emitError(
"memory space not implemented yet");
1067 auto linalgOp = linalg::MapOp::create(rewriter, loc, tensorType,
1070 Block &linalgBody = linalgOp.getMapper().emplaceBlock();
1074 linalg::YieldOp::create(rewriter, loc, splatOp.getInput());
1075 rewriter.
replaceOp(splatOp, linalgOp.getResult()[0]);
1084 struct ConcatOpInterface
1085 :
public BufferizableOpInterface::ExternalModel<ConcatOpInterface,
1088 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
1109 auto concatOp = cast<tensor::ConcatOp>(op);
1114 rewriter, loc, concatOp.getResult(),
options, state,
1116 if (failed(tensorAlloc))
1118 auto tensorType = cast<RankedTensorType>(tensorAlloc->getType());
1122 return op->
emitError(
"memory space not implemented yet");
1124 MemRefLayoutAttrInterface layout;
1125 MemRefType memrefType =
1127 concatOp.getResultType().getElementType(), layout);
1128 Value dstBuffer = bufferization::ToBufferOp::create(
1129 rewriter, op->
getLoc(), memrefType, *tensorAlloc);
1132 uint64_t concatDim = concatOp.getDim();
1133 bool dynamicConcatDim =
false;
1141 for (
const auto &[dimIdx, dimSize] :
1143 if (dimSize == ShapedType::kDynamic) {
1144 auto dimOp = memref::DimOp::create(rewriter, loc, dstBuffer, dimIdx);
1145 sizes.push_back(dimOp.getResult());
1146 if (dimIdx == concatDim)
1147 dynamicConcatDim =
true;
1153 int64_t concatDimOffset = 0;
1154 std::optional<Value> dynamicOffset;
1155 std::optional<Value> dynamicSize;
1156 if (dynamicConcatDim) {
1162 for (
auto operand : concatOp.getInputs()) {
1165 if (failed(srcBuffer))
1171 auto operandTensorType = cast<RankedTensorType>(operand.getType());
1172 int64_t operandConcatDimSize = operandTensorType.getDimSize(concatDim);
1174 if (dynamicConcatDim) {
1175 offsets[concatDim] = dynamicOffset.value();
1177 memref::DimOp::create(rewriter, loc, *srcBuffer, concatDim)
1179 sizes[concatDim] = dynamicSize.value();
1181 sizes[concatDim] = rewriter.
getIndexAttr(operandConcatDimSize);
1182 offsets[concatDim] = rewriter.
getIndexAttr(concatDimOffset);
1186 auto dstMemrefType = cast<MemRefType>(memrefType);
1187 MemRefType subviewMemRefType =
1188 memref::SubViewOp::inferRankReducedResultType(
1189 operandTensorType.getShape(), dstMemrefType, offsets, sizes,
1191 Value subview = memref::SubViewOp::create(
1192 rewriter, loc, subviewMemRefType, dstBuffer, offsets, sizes, strides);
1195 if (failed(
options.createMemCpy(rewriter, loc, *srcBuffer, subview)))
1198 if (dynamicConcatDim) {
1199 dynamicOffset = arith::AddIOp::create(
1200 rewriter, loc, dynamicOffset.value(), dynamicSize.value());
1202 concatDimOffset += operandConcatDimSize;
1218 CastOp::attachInterface<CastOpInterface>(*ctx);
1219 CollapseShapeOp::attachInterface<CollapseShapeOpInterface>(*ctx);
1220 ConcatOp::attachInterface<ConcatOpInterface>(*ctx);
1221 DimOp::attachInterface<DimOpInterface>(*ctx);
1222 EmptyOp::attachInterface<EmptyOpInterface>(*ctx);
1223 ExpandShapeOp::attachInterface<ExpandShapeOpInterface>(*ctx);
1224 ExtractSliceOp::attachInterface<ExtractSliceOpInterface>(*ctx);
1225 ExtractOp::attachInterface<ExtractOpInterface>(*ctx);
1226 FromElementsOp::attachInterface<FromElementsOpInterface>(*ctx);
1227 GenerateOp::attachInterface<GenerateOpInterface>(*ctx);
1228 InsertOp::attachInterface<InsertOpInterface>(*ctx);
1229 InsertSliceOp::attachInterface<InsertSliceOpInterface>(*ctx);
1230 PadOp::attachInterface<PadOpInterface>(*ctx);
1231 ParallelInsertSliceOp::attachInterface<ParallelInsertSliceOpInterface>(
1233 RankOp::attachInterface<RankOpInterface>(*ctx);
1234 ReshapeOp::attachInterface<ReshapeOpInterface>(*ctx);
1235 SplatOp::attachInterface<SplatOpInterface>(*ctx);
1238 ctx->
loadDialect<arith::ArithDialect, linalg::LinalgDialect>();
static llvm::ManagedStatic< PassManagerOptions > options
Base type for affine expression.
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Base class for generic analysis states.
Attributes are known-constant values of operations.
Block represents an ordered list of Operations.
Operation * getTerminator()
Get the terminator operation of this block.
IntegerAttr getIndexAttr(int64_t value)
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
void loadDialect()
Load a dialect in the context.
RAII guard to reset the insertion point of the builder when destroyed.
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
This class represents a single result from folding an operation.
This class represents an operand of an operation.
This is a value defined by a result of an operation.
Operation is the basic unit of execution within MLIR.
OpResult getOpResult(unsigned idx)
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
MLIRContext * getContext()
Return the context this operation is associated with.
Location getLoc()
The source location the operation was defined or derived from.
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
user_range getUsers()
Returns a range of all users.
use_range getUses()
Returns a range of all uses, which is useful for iterating over all uses.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
unsigned getNumArguments()
bool hasOneBlock()
Return true if this region has exactly one block.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
void moveOpBefore(Operation *op, Operation *existingOp)
Unlink this operation from its current block and insert it right before existingOp which may be in th...
void mergeBlocks(Block *source, Block *dest, ValueRange argValues={})
Inline the operations of block 'source' into the end of block 'dest'.
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
static ConstantIndexOp create(OpBuilder &builder, Location location, int64_t value)
BufferizationState provides information about the state of the IR during the bufferization process.
FailureOr< BaseMemRefType > asMemRefType(FailureOr< BufferLikeType > bufferType)
This is a helper function used when buffer type is guaranteed to be memref.
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options, const BufferizationState &state)
Lookup the buffer for the given value.
FailureOr< BufferLikeType > getBufferType(Value value, const BufferizationOptions &options, const BufferizationState &state)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
FailureOr< Value > allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, const BufferizationOptions &options, const BufferizationState &state, bool copy=true)
Create an AllocTensorOp for the given shaped value (memref or tensor).
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given memref value.
void registerSubsetOpInterfaceExternalModels(DialectRegistry ®istry)
void registerBufferizableOpInterfaceExternalModels(DialectRegistry ®istry)
Include the generated interface declarations.
bool areConstantIntValues(ArrayRef< OpFoldResult > ofrs, ArrayRef< int64_t > values)
Return true if all of ofrs are constant integers equal to the corresponding value in values.
std::optional< int64_t > getConstantIntValue(OpFoldResult ofr)
If ofr is a constant integer or an IntegerAttr, return the integer.
bool areAllConstantIntValue(ArrayRef< OpFoldResult > ofrs, int64_t value)
Return true if all of ofrs are constant integers equal to value.
bool isZeroInteger(OpFoldResult v)
Return true if v is an IntegerAttr with value 0.
void bindSymbols(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to SymbolExpr at positions: [0 .
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
Options for BufferizableOpInterface-based bufferization.
Bufferizable ops that implement the DestinationStyleOpInterface can use this external model base clas...