34 struct CastOpInterface
35 :
public BufferizableOpInterface::ExternalModel<CastOpInterface,
52 FailureOr<BaseMemRefType>
55 auto castOp = cast<tensor::CastOp>(op);
57 castOp.getSource(),
options, invocationStack);
58 if (failed(maybeSrcBufferType))
60 Attribute memorySpace = maybeSrcBufferType->getMemorySpace();
66 if (isa<UnrankedTensorType>(castOp.getSource().getType())) {
73 if (isa<UnrankedTensorType>(castOp.getType())) {
79 auto rankedResultType = cast<RankedTensorType>(castOp.getType());
81 rankedResultType.getShape(), rankedResultType.getElementType(),
82 llvm::cast<MemRefType>(*maybeSrcBufferType).getLayout(), memorySpace);
87 auto castOp = cast<tensor::CastOp>(op);
90 FailureOr<Value> resultBuffer =
92 if (failed(resultBuffer))
96 auto resultMemRefType =
98 if (failed(resultMemRefType))
100 if (resultBuffer->getType() == *resultMemRefType) {
107 assert(memref::CastOp::areCastCompatible(resultBuffer->getType(),
108 *resultMemRefType) &&
109 "CallOp::bufferize: cast incompatible");
110 replaceOpWithNewBufferizedOp<memref::CastOp>(
111 rewriter, op, *resultMemRefType, *resultBuffer);
118 struct CollapseShapeOpInterface
119 :
public BufferizableOpInterface::ExternalModel<CollapseShapeOpInterface,
120 tensor::CollapseShapeOp> {
141 FailureOr<BaseMemRefType>
144 auto collapseShapeOp = cast<tensor::CollapseShapeOp>(op);
146 collapseShapeOp.getSrc(),
options, invocationStack);
147 if (failed(maybeSrcBufferType))
149 auto srcBufferType = llvm::cast<MemRefType>(*maybeSrcBufferType);
150 bool canBeCollapsed = memref::CollapseShapeOp::isGuaranteedCollapsible(
151 srcBufferType, collapseShapeOp.getReassociationIndices());
153 if (!canBeCollapsed) {
155 RankedTensorType tensorResultType = collapseShapeOp.getResultType();
157 tensorResultType, srcBufferType.getMemorySpace());
160 return memref::CollapseShapeOp::computeCollapsedType(
161 srcBufferType, collapseShapeOp.getReassociationIndices());
166 auto collapseShapeOp = cast<tensor::CollapseShapeOp>(op);
167 RankedTensorType tensorResultType = collapseShapeOp.getResultType();
168 FailureOr<Value> maybeBuffer =
170 if (failed(maybeBuffer))
172 Value buffer = *maybeBuffer;
173 auto bufferType = cast<MemRefType>(buffer.
getType());
175 if (tensorResultType.getRank() == 0) {
177 MemRefType resultType;
179 if (bufferType.getLayout().isIdentity()) {
181 MemRefLayoutAttrInterface layout;
183 layout, bufferType.getMemorySpace());
189 if (failed(bufferType.getStridesAndOffset(strides, offset)))
192 {}, tensorResultType.getElementType(),
194 bufferType.getMemorySpace());
197 replaceOpWithNewBufferizedOp<memref::CollapseShapeOp>(
198 rewriter, op, resultType, buffer, collapseShapeOp.getReassociation());
205 bool canBeCollapsed = memref::CollapseShapeOp::isGuaranteedCollapsible(
206 bufferType, collapseShapeOp.getReassociationIndices());
207 if (!canBeCollapsed) {
212 if (failed(tensorAlloc))
216 collapseShapeOp.getSrcType().getElementType(),
217 AffineMap(), bufferType.getMemorySpace());
218 buffer = rewriter.
create<bufferization::ToMemrefOp>(
219 op->
getLoc(), memrefType, *tensorAlloc);
223 replaceOpWithNewBufferizedOp<memref::CollapseShapeOp>(
224 rewriter, op, buffer, collapseShapeOp.getReassociationIndices());
230 struct DimOpInterface
231 :
public BufferizableOpInterface::ExternalModel<DimOpInterface,
251 auto dimOp = cast<tensor::DimOp>(op);
255 replaceOpWithNewBufferizedOp<memref::DimOp>(rewriter, op, *v,
262 struct EmptyOpInterface
263 :
public BufferizableOpInterface::ExternalModel<EmptyOpInterface,
265 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
275 auto emptyOp = cast<tensor::EmptyOp>(op);
285 rewriter, op->
getLoc(), emptyOp.getResult(),
options,
false);
286 if (failed(allocTensor))
294 struct ExpandShapeOpInterface
295 :
public BufferizableOpInterface::ExternalModel<ExpandShapeOpInterface,
296 tensor::ExpandShapeOp> {
314 FailureOr<BaseMemRefType>
317 auto expandShapeOp = cast<tensor::ExpandShapeOp>(op);
319 expandShapeOp.getSrc(),
options, invocationStack);
320 if (failed(maybeSrcBufferType))
322 auto srcBufferType = llvm::cast<MemRefType>(*maybeSrcBufferType);
323 auto maybeResultType = memref::ExpandShapeOp::computeExpandedType(
324 srcBufferType, expandShapeOp.getResultType().getShape(),
325 expandShapeOp.getReassociationIndices());
326 if (failed(maybeResultType))
328 return *maybeResultType;
333 auto expandShapeOp = cast<tensor::ExpandShapeOp>(op);
334 auto tensorResultType = expandShapeOp.getResultType();
335 FailureOr<Value> buffer =
340 auto memrefExpandShape = rewriter.
create<memref::ExpandShapeOp>(
341 op->
getLoc(), tensorResultType.getShape(), *buffer,
342 expandShapeOp.getReassociationIndices(),
343 expandShapeOp.getMixedOutputShape());
345 memrefExpandShape->getResults());
351 struct ExtractSliceOpInterface
352 :
public BufferizableOpInterface::ExternalModel<ExtractSliceOpInterface,
353 tensor::ExtractSliceOp> {
371 auto extractSliceOp = cast<tensor::ExtractSliceOp>(op);
375 Location loc = extractSliceOp.getLoc();
378 FailureOr<Value> srcMemref =
380 if (failed(srcMemref))
384 auto resultMemrefType =
386 if (failed(resultMemrefType))
388 Value subView = rewriter.
create<memref::SubViewOp>(
389 loc, llvm::cast<MemRefType>(*resultMemrefType), *srcMemref,
390 mixedOffsets, mixedSizes, mixedStrides);
396 FailureOr<BaseMemRefType>
399 auto extractSliceOp = cast<tensor::ExtractSliceOp>(op);
400 assert(value == extractSliceOp.getResult() &&
"invalid value");
402 extractSliceOp.getSource(),
options, invocationStack);
403 if (failed(srcMemrefType))
408 return memref::SubViewOp::inferRankReducedResultType(
409 extractSliceOp.getType().getShape(),
410 llvm::cast<MemRefType>(*srcMemrefType), mixedOffsets, mixedSizes,
416 struct ExtractOpInterface
417 :
public BufferizableOpInterface::ExternalModel<ExtractOpInterface,
436 auto extractOp = cast<tensor::ExtractOp>(op);
437 FailureOr<Value> srcMemref =
439 if (failed(srcMemref))
441 replaceOpWithNewBufferizedOp<memref::LoadOp>(rewriter, op, *srcMemref,
442 extractOp.getIndices());
452 OperandRange::iterator &elementIt,
454 if (dim ==
static_cast<int>(shape.size()) - 1) {
455 for (
int i = 0; i < shape.back(); ++i) {
456 indices.back() = constants[i];
457 rewriter.
create<memref::StoreOp>(loc, *elementIt, buffer, indices);
462 for (
int i = 0; i < shape[dim]; ++i) {
463 indices[dim] = constants[i];
464 createStores(rewriter, loc, dim + 1, buffer, shape, constants, elementIt,
470 struct FromElementsOpInterface
471 :
public BufferizableOpInterface::ExternalModel<FromElementsOpInterface,
472 tensor::FromElementsOp> {
474 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
478 auto fromElementsOp = cast<tensor::FromElementsOp>(op);
479 auto tensorType = cast<RankedTensorType>(fromElementsOp.getType());
483 auto shape = tensorType.getShape();
486 rewriter, loc, fromElementsOp.getResult(),
options,
488 if (failed(tensorAlloc))
490 FailureOr<BaseMemRefType> memrefType =
492 if (failed(memrefType))
494 Value buffer = rewriter.
create<bufferization::ToMemrefOp>(
495 op->
getLoc(), *memrefType, *tensorAlloc);
498 if (fromElementsOp.getElements().empty()) {
505 rewriter.
create<memref::StoreOp>(
506 loc, fromElementsOp.getElements().front(), buffer);
512 auto maxDim = *llvm::max_element(shape);
514 constants.reserve(maxDim);
515 for (
int i = 0; i < maxDim; ++i)
516 constants.push_back(rewriter.
create<arith::ConstantIndexOp>(loc, i));
519 auto elementIt = fromElementsOp.getElements().begin();
521 createStores(rewriter, loc, 0, buffer, shape, constants, elementIt,
552 Value tensorDestination,
555 assert(generateBody.
hasOneBlock() &&
"expected body with single block");
556 auto tensorType = cast<RankedTensorType>(tensorDestination.
getType());
565 Block &linalgBody = linalgOp.getMapper().emplaceBlock();
570 for (int64_t dim = 0; dim < tensorType.getRank(); ++dim)
571 indices.push_back(rewriter.
create<linalg::IndexOp>(loc, dim));
575 auto yieldOp = cast<tensor::YieldOp>(linalgBody.getTerminator());
578 return linalgOp.getResult()[0];
582 struct GenerateOpInterface
583 :
public BufferizableOpInterface::ExternalModel<GenerateOpInterface,
584 tensor::GenerateOp> {
586 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
590 auto generateOp = cast<tensor::GenerateOp>(op);
592 auto type = generateOp.getResult().getType();
596 return op->
emitError(
"memory space not implemented yet");
601 rewriter, loc, generateOp.getResult(),
options,
603 if (failed(tensorAlloc))
606 Value result = lowerGenerateLikeOpBody(rewriter, loc, *tensorAlloc,
607 generateOp.getDynamicExtents(),
608 generateOp.getBody());
619 struct InsertOpInterface
624 auto insertOp = cast<tensor::InsertOp>(op);
625 FailureOr<Value> destMemref =
627 if (failed(destMemref))
629 rewriter.
create<memref::StoreOp>(insertOp.getLoc(), insertOp.getScalar(),
630 *destMemref, insertOp.getIndices());
636 template <
typename InsertOpTy>
637 static bool insertSliceOpRequiresRead(InsertOpTy insertSliceOp,
640 if (opOperand == insertSliceOp.getSourceMutable())
644 assert(opOperand == insertSliceOp.getDestMutable() &&
"expected dest");
648 bool allOffsetsZero =
649 llvm::all_of(insertSliceOp.getMixedOffsets(),
isZeroIndex);
650 RankedTensorType destType = insertSliceOp.getDestType();
651 bool sizesMatchDestSizes =
655 return !(allOffsetsZero && sizesMatchDestSizes && allStridesOne);
663 struct InsertSliceOpInterface
665 tensor::InsertSliceOp> {
668 return insertSliceOpRequiresRead(cast<tensor::InsertSliceOp>(op),
679 auto insertSliceOp = cast<tensor::InsertSliceOp>(op);
683 Location loc = insertSliceOp.getLoc();
686 FailureOr<Value> dstMemref =
688 if (failed(dstMemref))
692 auto dstMemrefType = cast<MemRefType>(dstMemref->getType());
693 MemRefType subviewMemRefType =
694 memref::SubViewOp::inferRankReducedResultType(
695 insertSliceOp.getSourceType().getShape(), dstMemrefType,
696 mixedOffsets, mixedSizes, mixedStrides);
697 Value subView = rewriter.
create<memref::SubViewOp>(
698 loc, subviewMemRefType, *dstMemref, mixedOffsets, mixedSizes,
703 FailureOr<Value> srcMemref =
705 if (failed(srcMemref))
707 if (failed(
options.createMemCpy(rewriter, loc, *srcMemref, subView)))
719 struct PadOpInterface
720 :
public BufferizableOpInterface::ExternalModel<PadOpInterface,
722 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
739 FailureOr<BaseMemRefType>
743 auto padOp = cast<tensor::PadOp>(op);
745 padOp.getSource(),
options, invocationStack);
746 if (failed(maybeSrcBufferType))
748 MemRefLayoutAttrInterface layout;
750 padOp.getResultType().getElementType(), layout,
751 maybeSrcBufferType->getMemorySpace());
756 auto padOp = cast<tensor::PadOp>(op);
758 RankedTensorType resultType = padOp.getResultType();
759 RankedTensorType srcType = padOp.getSourceType();
762 if (
auto value = dyn_cast<Value>(ofr))
773 for (int64_t i = 0; i < resultType.getRank(); ++i) {
774 if (!resultType.isDynamicDim(i))
776 Value srcDim = rewriter.
create<tensor::DimOp>(loc, padOp.getSource(), i);
777 Value lowPad = toValue(mixedLowPad[i]);
778 Value highPad = toValue(mixedHighPad[i]);
782 Value sum = rewriter.
create<affine::AffineApplyOp>(
783 loc, sumExpr,
ValueRange{srcDim, lowPad, highPad});
784 dynamicSizes.push_back(sum);
788 FailureOr<Value> tensorAlloc =
791 if (failed(tensorAlloc))
797 Value filledBuffer = lowerGenerateLikeOpBody(
798 rewriter, loc, *tensorAlloc, dynamicSizes, padOp.getBodyRegion());
806 padOp, padOp.getSource(), filledBuffer,
807 padOp.getMixedLowPad(), sliceSizes, sliceStrides);
814 struct RankOpInterface
815 :
public BufferizableOpInterface::ExternalModel<RankOpInterface,
835 auto rankOp = cast<tensor::RankOp>(op);
839 replaceOpWithNewBufferizedOp<memref::RankOp>(rewriter, op, rankOp.getType(),
846 struct ReshapeOpInterface
847 :
public BufferizableOpInterface::ExternalModel<ReshapeOpInterface,
852 auto reshapeOp = cast<tensor::ReshapeOp>(op);
853 return opOperand == reshapeOp.getShapeMutable();
864 auto reshapeOp = cast<tensor::ReshapeOp>(op);
865 if (reshapeOp.getSourceMutable() != opOperand)
872 auto reshapeOp = cast<tensor::ReshapeOp>(op);
873 FailureOr<Value> srcBuffer =
875 FailureOr<Value> shapeBuffer =
877 if (failed(srcBuffer) || failed(shapeBuffer))
879 auto maybeResultMemRefType =
881 if (failed(maybeResultMemRefType))
887 auto srcType = llvm::dyn_cast<MemRefType>(srcBuffer->getType());
888 if (srcType && !srcType.getLayout().isIdentity()) {
891 if (failed(tensorAlloc))
894 srcType.getShape(), srcType.getElementType(),
AffineMap(),
895 cast<BaseMemRefType>(srcBuffer->getType()).getMemorySpace());
897 .
create<bufferization::ToMemrefOp>(
898 op->
getLoc(), memrefType, *tensorAlloc)
902 replaceOpWithNewBufferizedOp<memref::ReshapeOp>(
903 rewriter, op, maybeResultMemRefType.value(), *srcBuffer, *shapeBuffer);
907 FailureOr<BaseMemRefType>
910 auto reshapeOp = cast<tensor::ReshapeOp>(op);
911 assert(value == reshapeOp.getResult() &&
"unexpected value provided");
913 reshapeOp.getSource(),
options, invocationStack);
914 if (failed(maybeSourceBufferType))
917 reshapeOp.getResult().getType(),
918 cast<BaseMemRefType>(maybeSourceBufferType.value()).getMemorySpace());
923 struct ParallelInsertSliceOpInterface
924 :
public BufferizableOpInterface::ExternalModel<
925 ParallelInsertSliceOpInterface, ParallelInsertSliceOp> {
933 return opOperand == cast<ParallelInsertSliceOp>(op).getSourceMutable();
938 auto parallelInsertSliceOp = cast<ParallelInsertSliceOp>(op);
939 return opOperand == parallelInsertSliceOp.getDestMutable();
945 auto parallelInsertSliceOp = cast<ParallelInsertSliceOp>(op);
946 ParallelCombiningOpInterface parallelCombiningParent =
947 parallelInsertSliceOp.getParallelCombiningParent();
953 FailureOr<Value> destBuffer =
955 if (failed(destBuffer))
957 FailureOr<Value> srcBuffer =
959 if (failed(srcBuffer))
963 auto destBufferType = cast<MemRefType>(destBuffer->getType());
964 MemRefType subviewMemRefType =
965 memref::SubViewOp::inferRankReducedResultType(
966 parallelInsertSliceOp.getSourceType().getShape(), destBufferType,
967 parallelInsertSliceOp.getMixedOffsets(),
968 parallelInsertSliceOp.getMixedSizes(),
969 parallelInsertSliceOp.getMixedStrides());
970 Value subview = rewriter.
create<memref::SubViewOp>(
971 parallelInsertSliceOp.getLoc(), subviewMemRefType, *destBuffer,
972 parallelInsertSliceOp.getMixedOffsets(),
973 parallelInsertSliceOp.getMixedSizes(),
974 parallelInsertSliceOp.getMixedStrides());
977 if (failed(
options.createMemCpy(rewriter, parallelInsertSliceOp.getLoc(),
978 *srcBuffer, subview)))
989 if (hasEffect<MemoryEffects::Free>(user)) {
990 if (user->getBlock() == parallelCombiningParent->getBlock())
991 rewriter.
moveOpBefore(user, user->getBlock()->getTerminator());
1011 struct SplatOpInterface
1012 :
public BufferizableOpInterface::ExternalModel<SplatOpInterface,
1015 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
1020 auto splatOp = cast<tensor::SplatOp>(op);
1025 rewriter, loc, splatOp.getResult(),
options,
1027 if (failed(tensorAlloc))
1031 auto tensorType = cast<RankedTensorType>(tensorAlloc->getType());
1035 return op->
emitError(
"memory space not implemented yet");
1040 Block &linalgBody = linalgOp.getMapper().emplaceBlock();
1044 rewriter.
create<linalg::YieldOp>(loc, splatOp.getInput());
1045 rewriter.
replaceOp(splatOp, linalgOp.getResult()[0]);
1058 CastOp::attachInterface<CastOpInterface>(*ctx);
1059 CollapseShapeOp::attachInterface<CollapseShapeOpInterface>(*ctx);
1060 DimOp::attachInterface<DimOpInterface>(*ctx);
1061 EmptyOp::attachInterface<EmptyOpInterface>(*ctx);
1062 ExpandShapeOp::attachInterface<ExpandShapeOpInterface>(*ctx);
1063 ExtractSliceOp::attachInterface<ExtractSliceOpInterface>(*ctx);
1064 ExtractOp::attachInterface<ExtractOpInterface>(*ctx);
1065 FromElementsOp::attachInterface<FromElementsOpInterface>(*ctx);
1066 GenerateOp::attachInterface<GenerateOpInterface>(*ctx);
1067 InsertOp::attachInterface<InsertOpInterface>(*ctx);
1068 InsertSliceOp::attachInterface<InsertSliceOpInterface>(*ctx);
1069 PadOp::attachInterface<PadOpInterface>(*ctx);
1070 ParallelInsertSliceOp::attachInterface<ParallelInsertSliceOpInterface>(
1072 RankOp::attachInterface<RankOpInterface>(*ctx);
1073 ReshapeOp::attachInterface<ReshapeOpInterface>(*ctx);
1074 SplatOp::attachInterface<SplatOpInterface>(*ctx);
1077 ctx->
loadDialect<arith::ArithDialect, linalg::LinalgDialect>();
static llvm::ManagedStatic< PassManagerOptions > options
Base type for affine expression.
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Base class for generic analysis states.
Attributes are known-constant values of operations.
Block represents an ordered list of Operations.
IntegerAttr getIndexAttr(int64_t value)
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
void loadDialect()
Load a dialect in the context.
RAII guard to reset the insertion point of the builder when destroyed.
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
This class represents a single result from folding an operation.
This class represents an operand of an operation.
This is a value defined by a result of an operation.
Operation is the basic unit of execution within MLIR.
OpResult getOpResult(unsigned idx)
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
MLIRContext * getContext()
Return the context this operation is associated with.
Location getLoc()
The source location the operation was defined or derived from.
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
user_range getUsers()
Returns a range of all users.
use_range getUses()
Returns a range of all uses, which is useful for iterating over all uses.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
unsigned getNumArguments()
bool hasOneBlock()
Return true if this region has exactly one block.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void mergeBlocks(Block *source, Block *dest, ValueRange argValues=std::nullopt)
Inline the operations of block 'source' into the end of block 'dest'.
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
void moveOpBefore(Operation *op, Operation *existingOp)
Unlink this operation from its current block and insert it right before existingOp which may be in th...
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
FailureOr< Value > allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, const BufferizationOptions &options, bool copy=true)
Create an AllocTensorOp for the given shaped value (memref or tensor).
FailureOr< BaseMemRefType > getBufferType(Value value, const BufferizationOptions &options)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options)
Lookup the buffer for the given value.
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given memref value.
void registerSubsetOpInterfaceExternalModels(DialectRegistry ®istry)
void registerBufferizableOpInterfaceExternalModels(DialectRegistry ®istry)
Include the generated interface declarations.
bool isZeroIndex(OpFoldResult v)
Return true if v is an IntegerAttr with value 0 of a ConstantIndexOp with attribute with value 0.
bool areConstantIntValues(ArrayRef< OpFoldResult > ofrs, ArrayRef< int64_t > values)
Return true if all of ofrs are constant integers equal to the corresponding value in values.
std::optional< int64_t > getConstantIntValue(OpFoldResult ofr)
If ofr is a constant integer or an IntegerAttr, return the integer.
bool areAllConstantIntValue(ArrayRef< OpFoldResult > ofrs, int64_t value)
Return true if all of ofrs are constant integers equal to value.
void bindSymbols(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to SymbolExpr at positions: [0 .
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
Options for BufferizableOpInterface-based bufferization.
Bufferizable ops that implement the DestinationStyleOpInterface can use this external model base clas...