34 struct CastOpInterface
35 :
public BufferizableOpInterface::ExternalModel<CastOpInterface,
52 FailureOr<BaseMemRefType>
55 auto castOp = cast<tensor::CastOp>(op);
57 castOp.getSource(),
options, invocationStack);
58 if (failed(maybeSrcBufferType))
60 Attribute memorySpace = maybeSrcBufferType->getMemorySpace();
66 if (isa<UnrankedTensorType>(castOp.getSource().getType())) {
73 if (isa<UnrankedTensorType>(castOp.getType())) {
79 auto rankedResultType = cast<RankedTensorType>(castOp.getType());
81 rankedResultType.getShape(), rankedResultType.getElementType(),
82 llvm::cast<MemRefType>(*maybeSrcBufferType).getLayout(), memorySpace);
87 auto castOp = cast<tensor::CastOp>(op);
90 FailureOr<Value> resultBuffer =
92 if (failed(resultBuffer))
96 auto resultMemRefType =
98 if (failed(resultMemRefType))
100 if (resultBuffer->getType() == *resultMemRefType) {
107 assert(memref::CastOp::areCastCompatible(resultBuffer->getType(),
108 *resultMemRefType) &&
109 "CallOp::bufferize: cast incompatible");
110 replaceOpWithNewBufferizedOp<memref::CastOp>(
111 rewriter, op, *resultMemRefType, *resultBuffer);
118 struct CollapseShapeOpInterface
119 :
public BufferizableOpInterface::ExternalModel<CollapseShapeOpInterface,
120 tensor::CollapseShapeOp> {
141 FailureOr<BaseMemRefType>
144 auto collapseShapeOp = cast<tensor::CollapseShapeOp>(op);
146 collapseShapeOp.getSrc(),
options, invocationStack);
147 if (failed(maybeSrcBufferType))
149 auto srcBufferType = llvm::cast<MemRefType>(*maybeSrcBufferType);
150 bool canBeCollapsed = memref::CollapseShapeOp::isGuaranteedCollapsible(
151 srcBufferType, collapseShapeOp.getReassociationIndices());
153 if (!canBeCollapsed) {
155 RankedTensorType tensorResultType = collapseShapeOp.getResultType();
157 tensorResultType, srcBufferType.getMemorySpace());
160 return memref::CollapseShapeOp::computeCollapsedType(
161 srcBufferType, collapseShapeOp.getReassociationIndices());
166 auto collapseShapeOp = cast<tensor::CollapseShapeOp>(op);
167 RankedTensorType tensorResultType = collapseShapeOp.getResultType();
168 FailureOr<Value> maybeBuffer =
170 if (failed(maybeBuffer))
172 Value buffer = *maybeBuffer;
173 auto bufferType = cast<MemRefType>(buffer.
getType());
175 if (tensorResultType.getRank() == 0) {
177 MemRefType resultType;
179 if (bufferType.getLayout().isIdentity()) {
181 MemRefLayoutAttrInterface layout;
183 layout, bufferType.getMemorySpace());
192 {}, tensorResultType.getElementType(),
194 bufferType.getMemorySpace());
197 replaceOpWithNewBufferizedOp<memref::CollapseShapeOp>(
198 rewriter, op, resultType, buffer, collapseShapeOp.getReassociation());
205 bool canBeCollapsed = memref::CollapseShapeOp::isGuaranteedCollapsible(
206 bufferType, collapseShapeOp.getReassociationIndices());
207 if (!canBeCollapsed) {
212 if (failed(tensorAlloc))
216 collapseShapeOp.getSrcType().getElementType(),
217 AffineMap(), bufferType.getMemorySpace());
218 buffer = rewriter.
create<bufferization::ToMemrefOp>(
219 op->
getLoc(), memrefType, *tensorAlloc);
223 replaceOpWithNewBufferizedOp<memref::CollapseShapeOp>(
224 rewriter, op, buffer, collapseShapeOp.getReassociationIndices());
230 struct DimOpInterface
231 :
public BufferizableOpInterface::ExternalModel<DimOpInterface,
251 auto dimOp = cast<tensor::DimOp>(op);
255 replaceOpWithNewBufferizedOp<memref::DimOp>(rewriter, op, *v,
262 struct EmptyOpInterface
263 :
public BufferizableOpInterface::ExternalModel<EmptyOpInterface,
265 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
275 auto emptyOp = cast<tensor::EmptyOp>(op);
285 rewriter, op->
getLoc(), emptyOp.getResult(),
options,
false);
286 if (failed(allocTensor))
294 struct ExpandShapeOpInterface
295 :
public BufferizableOpInterface::ExternalModel<ExpandShapeOpInterface,
296 tensor::ExpandShapeOp> {
314 FailureOr<BaseMemRefType>
317 auto expandShapeOp = cast<tensor::ExpandShapeOp>(op);
319 expandShapeOp.getSrc(),
options, invocationStack);
320 if (failed(maybeSrcBufferType))
322 auto srcBufferType = llvm::cast<MemRefType>(*maybeSrcBufferType);
323 auto maybeResultType = memref::ExpandShapeOp::computeExpandedType(
324 srcBufferType, expandShapeOp.getResultType().getShape(),
325 expandShapeOp.getReassociationIndices());
326 if (failed(maybeResultType))
328 return *maybeResultType;
333 auto expandShapeOp = cast<tensor::ExpandShapeOp>(op);
334 auto tensorResultType = expandShapeOp.getResultType();
335 FailureOr<Value> buffer =
345 replaceOpWithNewBufferizedOp<memref::ExpandShapeOp>(
346 rewriter, op, tensorResultType.getShape(), *buffer,
347 expandShapeOp.getReassociationIndices());
353 struct ExtractSliceOpInterface
354 :
public BufferizableOpInterface::ExternalModel<ExtractSliceOpInterface,
355 tensor::ExtractSliceOp> {
373 auto extractSliceOp = cast<tensor::ExtractSliceOp>(op);
377 Location loc = extractSliceOp.getLoc();
380 FailureOr<Value> srcMemref =
382 if (failed(srcMemref))
386 auto resultMemrefType =
388 if (failed(resultMemrefType))
390 Value subView = rewriter.
create<memref::SubViewOp>(
391 loc, llvm::cast<MemRefType>(*resultMemrefType), *srcMemref,
392 mixedOffsets, mixedSizes, mixedStrides);
398 FailureOr<BaseMemRefType>
401 auto extractSliceOp = cast<tensor::ExtractSliceOp>(op);
402 assert(value == extractSliceOp.getResult() &&
"invalid value");
404 extractSliceOp.getSource(),
options, invocationStack);
405 if (failed(srcMemrefType))
410 return cast<BaseMemRefType>(memref::SubViewOp::inferRankReducedResultType(
411 extractSliceOp.getType().getShape(),
412 llvm::cast<MemRefType>(*srcMemrefType), mixedOffsets, mixedSizes,
418 struct ExtractOpInterface
419 :
public BufferizableOpInterface::ExternalModel<ExtractOpInterface,
438 auto extractOp = cast<tensor::ExtractOp>(op);
439 FailureOr<Value> srcMemref =
441 if (failed(srcMemref))
443 replaceOpWithNewBufferizedOp<memref::LoadOp>(rewriter, op, *srcMemref,
444 extractOp.getIndices());
454 OperandRange::iterator &elementIt,
456 if (dim ==
static_cast<int>(shape.size()) - 1) {
457 for (
int i = 0; i < shape.back(); ++i) {
458 indices.back() = constants[i];
459 rewriter.
create<memref::StoreOp>(loc, *elementIt, buffer, indices);
464 for (
int i = 0; i < shape[dim]; ++i) {
465 indices[dim] = constants[i];
466 createStores(rewriter, loc, dim + 1, buffer, shape, constants, elementIt,
472 struct FromElementsOpInterface
473 :
public BufferizableOpInterface::ExternalModel<FromElementsOpInterface,
474 tensor::FromElementsOp> {
476 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
480 auto fromElementsOp = cast<tensor::FromElementsOp>(op);
481 auto tensorType = cast<RankedTensorType>(fromElementsOp.getType());
485 return op->
emitError(
"memory space not implemented yet");
489 auto shape = tensorType.getShape();
492 rewriter, loc, fromElementsOp.getResult(),
options,
494 if (failed(tensorAlloc))
498 Value buffer = rewriter.
create<bufferization::ToMemrefOp>(
499 op->
getLoc(), memrefType, *tensorAlloc);
502 if (fromElementsOp.getElements().empty()) {
509 rewriter.
create<memref::StoreOp>(
510 loc, fromElementsOp.getElements().front(), buffer);
516 auto maxDim = *llvm::max_element(shape);
518 constants.reserve(maxDim);
519 for (
int i = 0; i < maxDim; ++i)
520 constants.push_back(rewriter.
create<arith::ConstantIndexOp>(loc, i));
523 auto elementIt = fromElementsOp.getElements().begin();
525 createStores(rewriter, loc, 0, buffer, shape, constants, elementIt,
556 Value tensorDestination,
559 assert(generateBody.
hasOneBlock() &&
"expected body with single block");
560 auto tensorType = cast<RankedTensorType>(tensorDestination.
getType());
569 Block &linalgBody = linalgOp.getMapper().emplaceBlock();
574 for (int64_t dim = 0; dim < tensorType.getRank(); ++dim)
575 indices.push_back(rewriter.
create<linalg::IndexOp>(loc, dim));
579 auto yieldOp = cast<tensor::YieldOp>(linalgBody.getTerminator());
582 return linalgOp.getResult()[0];
586 struct GenerateOpInterface
587 :
public BufferizableOpInterface::ExternalModel<GenerateOpInterface,
588 tensor::GenerateOp> {
590 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
594 auto generateOp = cast<tensor::GenerateOp>(op);
596 auto type = generateOp.getResult().getType();
600 return op->
emitError(
"memory space not implemented yet");
605 rewriter, loc, generateOp.getResult(),
options,
607 if (failed(tensorAlloc))
610 Value result = lowerGenerateLikeOpBody(rewriter, loc, *tensorAlloc,
611 generateOp.getDynamicExtents(),
612 generateOp.getBody());
623 struct InsertOpInterface
628 auto insertOp = cast<tensor::InsertOp>(op);
629 FailureOr<Value> destMemref =
631 if (failed(destMemref))
633 rewriter.
create<memref::StoreOp>(insertOp.getLoc(), insertOp.getScalar(),
634 *destMemref, insertOp.getIndices());
640 template <
typename InsertOpTy>
641 static bool insertSliceOpRequiresRead(InsertOpTy insertSliceOp,
644 if (opOperand == insertSliceOp.getSourceMutable())
648 assert(opOperand == insertSliceOp.getDestMutable() &&
"expected dest");
652 bool allOffsetsZero =
653 llvm::all_of(insertSliceOp.getMixedOffsets(),
isZeroIndex);
654 RankedTensorType destType = insertSliceOp.getDestType();
655 bool sizesMatchDestSizes =
659 return !(allOffsetsZero && sizesMatchDestSizes && allStridesOne);
667 struct InsertSliceOpInterface
669 tensor::InsertSliceOp> {
672 return insertSliceOpRequiresRead(cast<tensor::InsertSliceOp>(op),
683 auto insertSliceOp = cast<tensor::InsertSliceOp>(op);
687 Location loc = insertSliceOp.getLoc();
690 FailureOr<Value> dstMemref =
692 if (failed(dstMemref))
696 auto dstMemrefType = cast<MemRefType>(dstMemref->getType());
697 auto subviewMemRefType =
698 cast<MemRefType>(memref::SubViewOp::inferRankReducedResultType(
699 insertSliceOp.getSourceType().getShape(), dstMemrefType,
700 mixedOffsets, mixedSizes, mixedStrides));
701 Value subView = rewriter.
create<memref::SubViewOp>(
702 loc, subviewMemRefType, *dstMemref, mixedOffsets, mixedSizes,
707 FailureOr<Value> srcMemref =
709 if (failed(srcMemref))
711 if (failed(
options.createMemCpy(rewriter, loc, *srcMemref, subView)))
723 struct PadOpInterface
724 :
public BufferizableOpInterface::ExternalModel<PadOpInterface,
726 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
743 FailureOr<BaseMemRefType>
747 auto padOp = cast<tensor::PadOp>(op);
749 padOp.getSource(),
options, invocationStack);
750 if (failed(maybeSrcBufferType))
752 MemRefLayoutAttrInterface layout;
754 padOp.getResultType().getElementType(), layout,
755 maybeSrcBufferType->getMemorySpace());
760 auto padOp = cast<tensor::PadOp>(op);
762 RankedTensorType resultType = padOp.getResultType();
763 RankedTensorType srcType = padOp.getSourceType();
767 return ofr.get<
Value>();
777 for (int64_t i = 0; i < resultType.getRank(); ++i) {
778 if (!resultType.isDynamicDim(i))
780 Value srcDim = rewriter.
create<tensor::DimOp>(loc, padOp.getSource(), i);
781 Value lowPad = toValue(mixedLowPad[i]);
782 Value highPad = toValue(mixedHighPad[i]);
786 Value sum = rewriter.
create<affine::AffineApplyOp>(
787 loc, sumExpr,
ValueRange{srcDim, lowPad, highPad});
788 dynamicSizes.push_back(sum);
792 FailureOr<Value> tensorAlloc =
795 if (failed(tensorAlloc))
801 Value filledBuffer = lowerGenerateLikeOpBody(
802 rewriter, loc, *tensorAlloc, dynamicSizes, padOp.getBodyRegion());
810 padOp, padOp.getSource(), filledBuffer,
811 padOp.getMixedLowPad(), sliceSizes, sliceStrides);
818 struct RankOpInterface
819 :
public BufferizableOpInterface::ExternalModel<RankOpInterface,
839 auto rankOp = cast<tensor::RankOp>(op);
843 replaceOpWithNewBufferizedOp<memref::RankOp>(rewriter, op, rankOp.getType(),
850 struct ReshapeOpInterface
851 :
public BufferizableOpInterface::ExternalModel<ReshapeOpInterface,
856 auto reshapeOp = cast<tensor::ReshapeOp>(op);
857 return opOperand == reshapeOp.getShapeMutable();
872 auto reshapeOp = cast<tensor::ReshapeOp>(op);
873 FailureOr<Value> srcBuffer =
875 FailureOr<Value> shapeBuffer =
877 if (failed(srcBuffer) || failed(shapeBuffer))
879 auto maybeResultMemRefType =
881 if (failed(maybeResultMemRefType))
887 auto srcType = llvm::dyn_cast<MemRefType>(srcBuffer->getType());
888 if (srcType && !srcType.getLayout().isIdentity()) {
891 if (failed(tensorAlloc))
894 srcType.getShape(), srcType.getElementType(),
AffineMap(),
895 cast<BaseMemRefType>(srcBuffer->getType()).getMemorySpace());
897 .
create<bufferization::ToMemrefOp>(
898 op->
getLoc(), memrefType, *tensorAlloc)
902 replaceOpWithNewBufferizedOp<memref::ReshapeOp>(
903 rewriter, op, maybeResultMemRefType.value(), *srcBuffer, *shapeBuffer);
907 FailureOr<BaseMemRefType>
910 auto reshapeOp = cast<tensor::ReshapeOp>(op);
911 assert(value == reshapeOp.getResult() &&
"unexpected value provided");
913 reshapeOp.getSource(),
options, invocationStack);
914 if (failed(maybeSourceBufferType))
917 reshapeOp.getResult().getType(),
918 cast<BaseMemRefType>(maybeSourceBufferType.value()).getMemorySpace());
923 struct ParallelInsertSliceOpInterface
924 :
public BufferizableOpInterface::ExternalModel<
925 ParallelInsertSliceOpInterface, ParallelInsertSliceOp> {
933 return insertSliceOpRequiresRead(cast<tensor::ParallelInsertSliceOp>(op),
939 auto parallelInsertSliceOp = cast<ParallelInsertSliceOp>(op);
940 return opOperand == parallelInsertSliceOp.getDestMutable();
946 auto parallelInsertSliceOp = cast<ParallelInsertSliceOp>(op);
947 ParallelCombiningOpInterface parallelCombiningParent =
948 parallelInsertSliceOp.getParallelCombiningParent();
954 FailureOr<Value> destBuffer =
956 if (failed(destBuffer))
958 FailureOr<Value> srcBuffer =
960 if (failed(srcBuffer))
964 auto destBufferType = cast<MemRefType>(destBuffer->getType());
965 auto subviewMemRefType =
966 cast<MemRefType>(memref::SubViewOp::inferRankReducedResultType(
967 parallelInsertSliceOp.getSourceType().getShape(), destBufferType,
968 parallelInsertSliceOp.getMixedOffsets(),
969 parallelInsertSliceOp.getMixedSizes(),
970 parallelInsertSliceOp.getMixedStrides()));
971 Value subview = rewriter.
create<memref::SubViewOp>(
972 parallelInsertSliceOp.getLoc(), subviewMemRefType, *destBuffer,
973 parallelInsertSliceOp.getMixedOffsets(),
974 parallelInsertSliceOp.getMixedSizes(),
975 parallelInsertSliceOp.getMixedStrides());
978 if (failed(
options.createMemCpy(rewriter, parallelInsertSliceOp.getLoc(),
979 *srcBuffer, subview)))
990 if (hasEffect<MemoryEffects::Free>(user)) {
991 if (user->getBlock() == parallelCombiningParent->getBlock())
992 rewriter.
moveOpBefore(user, user->getBlock()->getTerminator());
1012 struct SplatOpInterface
1013 :
public BufferizableOpInterface::ExternalModel<SplatOpInterface,
1016 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
1021 auto splatOp = cast<tensor::SplatOp>(op);
1026 rewriter, loc, splatOp.getResult(),
options,
1028 if (failed(tensorAlloc))
1032 auto tensorType = cast<RankedTensorType>(tensorAlloc->getType());
1036 return op->
emitError(
"memory space not implemented yet");
1041 Block &linalgBody = linalgOp.getMapper().emplaceBlock();
1045 rewriter.
create<linalg::YieldOp>(loc, splatOp.getInput());
1046 rewriter.
replaceOp(splatOp, linalgOp.getResult()[0]);
1059 CastOp::attachInterface<CastOpInterface>(*ctx);
1060 CollapseShapeOp::attachInterface<CollapseShapeOpInterface>(*ctx);
1061 DimOp::attachInterface<DimOpInterface>(*ctx);
1062 EmptyOp::attachInterface<EmptyOpInterface>(*ctx);
1063 ExpandShapeOp::attachInterface<ExpandShapeOpInterface>(*ctx);
1064 ExtractSliceOp::attachInterface<ExtractSliceOpInterface>(*ctx);
1065 ExtractOp::attachInterface<ExtractOpInterface>(*ctx);
1066 FromElementsOp::attachInterface<FromElementsOpInterface>(*ctx);
1067 GenerateOp::attachInterface<GenerateOpInterface>(*ctx);
1068 InsertOp::attachInterface<InsertOpInterface>(*ctx);
1069 InsertSliceOp::attachInterface<InsertSliceOpInterface>(*ctx);
1070 PadOp::attachInterface<PadOpInterface>(*ctx);
1071 ParallelInsertSliceOp::attachInterface<ParallelInsertSliceOpInterface>(
1073 RankOp::attachInterface<RankOpInterface>(*ctx);
1074 ReshapeOp::attachInterface<ReshapeOpInterface>(*ctx);
1075 SplatOp::attachInterface<SplatOpInterface>(*ctx);
1078 ctx->
loadDialect<arith::ArithDialect, linalg::LinalgDialect>();
static llvm::ManagedStatic< PassManagerOptions > options
Base type for affine expression.
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Base class for generic analysis states.
Attributes are known-constant values of operations.
Block represents an ordered list of Operations.
IntegerAttr getIndexAttr(int64_t value)
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
void loadDialect()
Load a dialect in the context.
RAII guard to reset the insertion point of the builder when destroyed.
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
This class represents a single result from folding an operation.
This class represents an operand of an operation.
This is a value defined by a result of an operation.
Operation is the basic unit of execution within MLIR.
OpResult getOpResult(unsigned idx)
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
MLIRContext * getContext()
Return the context this operation is associated with.
Location getLoc()
The source location the operation was defined or derived from.
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
user_range getUsers()
Returns a range of all users.
use_range getUses()
Returns a range of all uses, which is useful for iterating over all uses.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
unsigned getNumArguments()
bool hasOneBlock()
Return true if this region has exactly one block.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void mergeBlocks(Block *source, Block *dest, ValueRange argValues=std::nullopt)
Inline the operations of block 'source' into the end of block 'dest'.
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
void moveOpBefore(Operation *op, Operation *existingOp)
Unlink this operation from its current block and insert it right before existingOp which may be in th...
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
FailureOr< Value > allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, const BufferizationOptions &options, bool copy=true)
Create an AllocTensorOp for the given shaped value (memref or tensor).
FailureOr< BaseMemRefType > getBufferType(Value value, const BufferizationOptions &options)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options)
Lookup the buffer for the given value.
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given memref value.
void registerSubsetOpInterfaceExternalModels(DialectRegistry ®istry)
void registerBufferizableOpInterfaceExternalModels(DialectRegistry ®istry)
Include the generated interface declarations.
bool isZeroIndex(OpFoldResult v)
Return true if v is an IntegerAttr with value 0 of a ConstantIndexOp with attribute with value 0.
bool areConstantIntValues(ArrayRef< OpFoldResult > ofrs, ArrayRef< int64_t > values)
Return true if all of ofrs are constant integers equal to the corresponding value in values.
std::optional< int64_t > getConstantIntValue(OpFoldResult ofr)
If ofr is a constant integer or an IntegerAttr, return the integer.
bool areAllConstantIntValue(ArrayRef< OpFoldResult > ofrs, int64_t value)
Return true if all of ofrs are constant integers equal to value.
LogicalResult getStridesAndOffset(MemRefType t, SmallVectorImpl< int64_t > &strides, int64_t &offset)
Returns the strides of the MemRef if the layout map is in strided form.
void bindSymbols(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to SymbolExpr at positions: [0 .
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
Options for BufferizableOpInterface-based bufferization.
Bufferizable ops that implement the DestinationStyleOpInterface can use this external model base clas...