34 struct CastOpInterface
35 :
public BufferizableOpInterface::ExternalModel<CastOpInterface,
52 FailureOr<BaseMemRefType>
55 auto castOp = cast<tensor::CastOp>(op);
57 castOp.getSource(),
options, invocationStack);
58 if (failed(maybeSrcBufferType))
60 Attribute memorySpace = maybeSrcBufferType->getMemorySpace();
66 if (isa<UnrankedTensorType>(castOp.getSource().getType())) {
73 if (isa<UnrankedTensorType>(castOp.getType())) {
79 auto rankedResultType = cast<RankedTensorType>(castOp.getType());
81 rankedResultType.getShape(), rankedResultType.getElementType(),
82 llvm::cast<MemRefType>(*maybeSrcBufferType).getLayout(), memorySpace);
87 auto castOp = cast<tensor::CastOp>(op);
90 FailureOr<Value> resultBuffer =
92 if (failed(resultBuffer))
96 auto resultMemRefType =
98 if (failed(resultMemRefType))
100 if (resultBuffer->getType() == *resultMemRefType) {
107 assert(memref::CastOp::areCastCompatible(resultBuffer->getType(),
108 *resultMemRefType) &&
109 "CallOp::bufferize: cast incompatible");
110 replaceOpWithNewBufferizedOp<memref::CastOp>(
111 rewriter, op, *resultMemRefType, *resultBuffer);
118 struct CollapseShapeOpInterface
119 :
public BufferizableOpInterface::ExternalModel<CollapseShapeOpInterface,
120 tensor::CollapseShapeOp> {
141 FailureOr<BaseMemRefType>
144 auto collapseShapeOp = cast<tensor::CollapseShapeOp>(op);
146 collapseShapeOp.getSrc(),
options, invocationStack);
147 if (failed(maybeSrcBufferType))
149 auto srcBufferType = llvm::cast<MemRefType>(*maybeSrcBufferType);
150 bool canBeCollapsed = memref::CollapseShapeOp::isGuaranteedCollapsible(
151 srcBufferType, collapseShapeOp.getReassociationIndices());
153 if (!canBeCollapsed) {
155 RankedTensorType tensorResultType = collapseShapeOp.getResultType();
157 tensorResultType, srcBufferType.getMemorySpace());
160 return memref::CollapseShapeOp::computeCollapsedType(
161 srcBufferType, collapseShapeOp.getReassociationIndices());
166 auto collapseShapeOp = cast<tensor::CollapseShapeOp>(op);
167 RankedTensorType tensorResultType = collapseShapeOp.getResultType();
168 FailureOr<Value> maybeBuffer =
170 if (failed(maybeBuffer))
172 Value buffer = *maybeBuffer;
173 auto bufferType = cast<MemRefType>(buffer.
getType());
175 if (tensorResultType.getRank() == 0) {
177 MemRefType resultType;
179 if (bufferType.getLayout().isIdentity()) {
181 MemRefLayoutAttrInterface layout;
183 layout, bufferType.getMemorySpace());
192 {}, tensorResultType.getElementType(),
194 bufferType.getMemorySpace());
197 replaceOpWithNewBufferizedOp<memref::CollapseShapeOp>(
198 rewriter, op, resultType, buffer, collapseShapeOp.getReassociation());
205 bool canBeCollapsed = memref::CollapseShapeOp::isGuaranteedCollapsible(
206 bufferType, collapseShapeOp.getReassociationIndices());
207 if (!canBeCollapsed) {
212 if (failed(tensorAlloc))
216 collapseShapeOp.getSrcType().getElementType(),
217 AffineMap(), bufferType.getMemorySpace());
218 buffer = rewriter.
create<bufferization::ToMemrefOp>(
219 op->
getLoc(), memrefType, *tensorAlloc);
223 replaceOpWithNewBufferizedOp<memref::CollapseShapeOp>(
224 rewriter, op, buffer, collapseShapeOp.getReassociationIndices());
230 struct DimOpInterface
231 :
public BufferizableOpInterface::ExternalModel<DimOpInterface,
251 auto dimOp = cast<tensor::DimOp>(op);
255 replaceOpWithNewBufferizedOp<memref::DimOp>(rewriter, op, *v,
262 struct EmptyOpInterface
263 :
public BufferizableOpInterface::ExternalModel<EmptyOpInterface,
265 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
275 auto emptyOp = cast<tensor::EmptyOp>(op);
285 rewriter, op->
getLoc(), emptyOp.getResult(),
options,
false);
286 if (failed(allocTensor))
294 struct ExpandShapeOpInterface
295 :
public BufferizableOpInterface::ExternalModel<ExpandShapeOpInterface,
296 tensor::ExpandShapeOp> {
314 FailureOr<BaseMemRefType>
317 auto expandShapeOp = cast<tensor::ExpandShapeOp>(op);
319 expandShapeOp.getSrc(),
options, invocationStack);
320 if (failed(maybeSrcBufferType))
322 auto srcBufferType = llvm::cast<MemRefType>(*maybeSrcBufferType);
323 auto maybeResultType = memref::ExpandShapeOp::computeExpandedType(
324 srcBufferType, expandShapeOp.getResultType().getShape(),
325 expandShapeOp.getReassociationIndices());
326 if (failed(maybeResultType))
328 return *maybeResultType;
333 auto expandShapeOp = cast<tensor::ExpandShapeOp>(op);
334 auto tensorResultType = expandShapeOp.getResultType();
335 FailureOr<Value> buffer =
345 replaceOpWithNewBufferizedOp<memref::ExpandShapeOp>(
346 rewriter, op, tensorResultType.getShape(), *buffer,
347 expandShapeOp.getReassociationIndices());
353 struct ExtractSliceOpInterface
354 :
public BufferizableOpInterface::ExternalModel<ExtractSliceOpInterface,
355 tensor::ExtractSliceOp> {
373 auto extractSliceOp = cast<tensor::ExtractSliceOp>(op);
377 Location loc = extractSliceOp.getLoc();
380 FailureOr<Value> srcMemref =
382 if (failed(srcMemref))
386 auto resultMemrefType =
388 if (failed(resultMemrefType))
390 Value subView = rewriter.
create<memref::SubViewOp>(
391 loc, llvm::cast<MemRefType>(*resultMemrefType), *srcMemref,
392 mixedOffsets, mixedSizes, mixedStrides);
398 FailureOr<BaseMemRefType>
401 auto extractSliceOp = cast<tensor::ExtractSliceOp>(op);
402 assert(value == extractSliceOp.getResult() &&
"invalid value");
404 extractSliceOp.getSource(),
options, invocationStack);
405 if (failed(srcMemrefType))
410 return cast<BaseMemRefType>(memref::SubViewOp::inferRankReducedResultType(
411 extractSliceOp.getType().getShape(),
412 llvm::cast<MemRefType>(*srcMemrefType), mixedOffsets, mixedSizes,
418 struct ExtractOpInterface
419 :
public BufferizableOpInterface::ExternalModel<ExtractOpInterface,
438 auto extractOp = cast<tensor::ExtractOp>(op);
439 FailureOr<Value> srcMemref =
441 if (failed(srcMemref))
443 replaceOpWithNewBufferizedOp<memref::LoadOp>(rewriter, op, *srcMemref,
444 extractOp.getIndices());
454 OperandRange::iterator &elementIt,
456 if (dim ==
static_cast<int>(shape.size()) - 1) {
457 for (
int i = 0; i < shape.back(); ++i) {
458 indices.back() = constants[i];
459 rewriter.
create<memref::StoreOp>(loc, *elementIt, buffer, indices);
464 for (
int i = 0; i < shape[dim]; ++i) {
465 indices[dim] = constants[i];
466 createStores(rewriter, loc, dim + 1, buffer, shape, constants, elementIt,
472 struct FromElementsOpInterface
473 :
public BufferizableOpInterface::ExternalModel<FromElementsOpInterface,
474 tensor::FromElementsOp> {
476 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
480 auto fromElementsOp = cast<tensor::FromElementsOp>(op);
481 auto tensorType = cast<RankedTensorType>(fromElementsOp.getType());
485 auto shape = tensorType.getShape();
488 rewriter, loc, fromElementsOp.getResult(),
options,
490 if (failed(tensorAlloc))
492 FailureOr<BaseMemRefType> memrefType =
494 if (failed(memrefType))
496 Value buffer = rewriter.
create<bufferization::ToMemrefOp>(
497 op->
getLoc(), *memrefType, *tensorAlloc);
500 if (fromElementsOp.getElements().empty()) {
507 rewriter.
create<memref::StoreOp>(
508 loc, fromElementsOp.getElements().front(), buffer);
514 auto maxDim = *llvm::max_element(shape);
516 constants.reserve(maxDim);
517 for (
int i = 0; i < maxDim; ++i)
518 constants.push_back(rewriter.
create<arith::ConstantIndexOp>(loc, i));
521 auto elementIt = fromElementsOp.getElements().begin();
523 createStores(rewriter, loc, 0, buffer, shape, constants, elementIt,
554 Value tensorDestination,
557 assert(generateBody.
hasOneBlock() &&
"expected body with single block");
558 auto tensorType = cast<RankedTensorType>(tensorDestination.
getType());
567 Block &linalgBody = linalgOp.getMapper().emplaceBlock();
572 for (int64_t dim = 0; dim < tensorType.getRank(); ++dim)
573 indices.push_back(rewriter.
create<linalg::IndexOp>(loc, dim));
577 auto yieldOp = cast<tensor::YieldOp>(linalgBody.getTerminator());
580 return linalgOp.getResult()[0];
584 struct GenerateOpInterface
585 :
public BufferizableOpInterface::ExternalModel<GenerateOpInterface,
586 tensor::GenerateOp> {
588 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
592 auto generateOp = cast<tensor::GenerateOp>(op);
594 auto type = generateOp.getResult().getType();
598 return op->
emitError(
"memory space not implemented yet");
603 rewriter, loc, generateOp.getResult(),
options,
605 if (failed(tensorAlloc))
608 Value result = lowerGenerateLikeOpBody(rewriter, loc, *tensorAlloc,
609 generateOp.getDynamicExtents(),
610 generateOp.getBody());
621 struct InsertOpInterface
626 auto insertOp = cast<tensor::InsertOp>(op);
627 FailureOr<Value> destMemref =
629 if (failed(destMemref))
631 rewriter.
create<memref::StoreOp>(insertOp.getLoc(), insertOp.getScalar(),
632 *destMemref, insertOp.getIndices());
638 template <
typename InsertOpTy>
639 static bool insertSliceOpRequiresRead(InsertOpTy insertSliceOp,
642 if (opOperand == insertSliceOp.getSourceMutable())
646 assert(opOperand == insertSliceOp.getDestMutable() &&
"expected dest");
650 bool allOffsetsZero =
651 llvm::all_of(insertSliceOp.getMixedOffsets(),
isZeroIndex);
652 RankedTensorType destType = insertSliceOp.getDestType();
653 bool sizesMatchDestSizes =
657 return !(allOffsetsZero && sizesMatchDestSizes && allStridesOne);
665 struct InsertSliceOpInterface
667 tensor::InsertSliceOp> {
670 return insertSliceOpRequiresRead(cast<tensor::InsertSliceOp>(op),
681 auto insertSliceOp = cast<tensor::InsertSliceOp>(op);
685 Location loc = insertSliceOp.getLoc();
688 FailureOr<Value> dstMemref =
690 if (failed(dstMemref))
694 auto dstMemrefType = cast<MemRefType>(dstMemref->getType());
695 auto subviewMemRefType =
696 cast<MemRefType>(memref::SubViewOp::inferRankReducedResultType(
697 insertSliceOp.getSourceType().getShape(), dstMemrefType,
698 mixedOffsets, mixedSizes, mixedStrides));
699 Value subView = rewriter.
create<memref::SubViewOp>(
700 loc, subviewMemRefType, *dstMemref, mixedOffsets, mixedSizes,
705 FailureOr<Value> srcMemref =
707 if (failed(srcMemref))
709 if (failed(
options.createMemCpy(rewriter, loc, *srcMemref, subView)))
721 struct PadOpInterface
722 :
public BufferizableOpInterface::ExternalModel<PadOpInterface,
724 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
741 FailureOr<BaseMemRefType>
745 auto padOp = cast<tensor::PadOp>(op);
747 padOp.getSource(),
options, invocationStack);
748 if (failed(maybeSrcBufferType))
750 MemRefLayoutAttrInterface layout;
752 padOp.getResultType().getElementType(), layout,
753 maybeSrcBufferType->getMemorySpace());
758 auto padOp = cast<tensor::PadOp>(op);
760 RankedTensorType resultType = padOp.getResultType();
761 RankedTensorType srcType = padOp.getSourceType();
764 if (
auto value = dyn_cast<Value>(ofr))
775 for (int64_t i = 0; i < resultType.getRank(); ++i) {
776 if (!resultType.isDynamicDim(i))
778 Value srcDim = rewriter.
create<tensor::DimOp>(loc, padOp.getSource(), i);
779 Value lowPad = toValue(mixedLowPad[i]);
780 Value highPad = toValue(mixedHighPad[i]);
784 Value sum = rewriter.
create<affine::AffineApplyOp>(
785 loc, sumExpr,
ValueRange{srcDim, lowPad, highPad});
786 dynamicSizes.push_back(sum);
790 FailureOr<Value> tensorAlloc =
793 if (failed(tensorAlloc))
799 Value filledBuffer = lowerGenerateLikeOpBody(
800 rewriter, loc, *tensorAlloc, dynamicSizes, padOp.getBodyRegion());
808 padOp, padOp.getSource(), filledBuffer,
809 padOp.getMixedLowPad(), sliceSizes, sliceStrides);
816 struct RankOpInterface
817 :
public BufferizableOpInterface::ExternalModel<RankOpInterface,
837 auto rankOp = cast<tensor::RankOp>(op);
841 replaceOpWithNewBufferizedOp<memref::RankOp>(rewriter, op, rankOp.getType(),
848 struct ReshapeOpInterface
849 :
public BufferizableOpInterface::ExternalModel<ReshapeOpInterface,
854 auto reshapeOp = cast<tensor::ReshapeOp>(op);
855 return opOperand == reshapeOp.getShapeMutable();
870 auto reshapeOp = cast<tensor::ReshapeOp>(op);
871 FailureOr<Value> srcBuffer =
873 FailureOr<Value> shapeBuffer =
875 if (failed(srcBuffer) || failed(shapeBuffer))
877 auto maybeResultMemRefType =
879 if (failed(maybeResultMemRefType))
885 auto srcType = llvm::dyn_cast<MemRefType>(srcBuffer->getType());
886 if (srcType && !srcType.getLayout().isIdentity()) {
889 if (failed(tensorAlloc))
892 srcType.getShape(), srcType.getElementType(),
AffineMap(),
893 cast<BaseMemRefType>(srcBuffer->getType()).getMemorySpace());
895 .
create<bufferization::ToMemrefOp>(
896 op->
getLoc(), memrefType, *tensorAlloc)
900 replaceOpWithNewBufferizedOp<memref::ReshapeOp>(
901 rewriter, op, maybeResultMemRefType.value(), *srcBuffer, *shapeBuffer);
905 FailureOr<BaseMemRefType>
908 auto reshapeOp = cast<tensor::ReshapeOp>(op);
909 assert(value == reshapeOp.getResult() &&
"unexpected value provided");
911 reshapeOp.getSource(),
options, invocationStack);
912 if (failed(maybeSourceBufferType))
915 reshapeOp.getResult().getType(),
916 cast<BaseMemRefType>(maybeSourceBufferType.value()).getMemorySpace());
921 struct ParallelInsertSliceOpInterface
922 :
public BufferizableOpInterface::ExternalModel<
923 ParallelInsertSliceOpInterface, ParallelInsertSliceOp> {
931 return insertSliceOpRequiresRead(cast<tensor::ParallelInsertSliceOp>(op),
937 auto parallelInsertSliceOp = cast<ParallelInsertSliceOp>(op);
938 return opOperand == parallelInsertSliceOp.getDestMutable();
944 auto parallelInsertSliceOp = cast<ParallelInsertSliceOp>(op);
945 ParallelCombiningOpInterface parallelCombiningParent =
946 parallelInsertSliceOp.getParallelCombiningParent();
952 FailureOr<Value> destBuffer =
954 if (failed(destBuffer))
956 FailureOr<Value> srcBuffer =
958 if (failed(srcBuffer))
962 auto destBufferType = cast<MemRefType>(destBuffer->getType());
963 auto subviewMemRefType =
964 cast<MemRefType>(memref::SubViewOp::inferRankReducedResultType(
965 parallelInsertSliceOp.getSourceType().getShape(), destBufferType,
966 parallelInsertSliceOp.getMixedOffsets(),
967 parallelInsertSliceOp.getMixedSizes(),
968 parallelInsertSliceOp.getMixedStrides()));
969 Value subview = rewriter.
create<memref::SubViewOp>(
970 parallelInsertSliceOp.getLoc(), subviewMemRefType, *destBuffer,
971 parallelInsertSliceOp.getMixedOffsets(),
972 parallelInsertSliceOp.getMixedSizes(),
973 parallelInsertSliceOp.getMixedStrides());
976 if (failed(
options.createMemCpy(rewriter, parallelInsertSliceOp.getLoc(),
977 *srcBuffer, subview)))
988 if (hasEffect<MemoryEffects::Free>(user)) {
989 if (user->getBlock() == parallelCombiningParent->getBlock())
990 rewriter.
moveOpBefore(user, user->getBlock()->getTerminator());
1010 struct SplatOpInterface
1011 :
public BufferizableOpInterface::ExternalModel<SplatOpInterface,
1014 bool bufferizesToAllocation(
Operation *op,
Value value)
const {
return true; }
1019 auto splatOp = cast<tensor::SplatOp>(op);
1024 rewriter, loc, splatOp.getResult(),
options,
1026 if (failed(tensorAlloc))
1030 auto tensorType = cast<RankedTensorType>(tensorAlloc->getType());
1034 return op->
emitError(
"memory space not implemented yet");
1039 Block &linalgBody = linalgOp.getMapper().emplaceBlock();
1043 rewriter.
create<linalg::YieldOp>(loc, splatOp.getInput());
1044 rewriter.
replaceOp(splatOp, linalgOp.getResult()[0]);
1057 CastOp::attachInterface<CastOpInterface>(*ctx);
1058 CollapseShapeOp::attachInterface<CollapseShapeOpInterface>(*ctx);
1059 DimOp::attachInterface<DimOpInterface>(*ctx);
1060 EmptyOp::attachInterface<EmptyOpInterface>(*ctx);
1061 ExpandShapeOp::attachInterface<ExpandShapeOpInterface>(*ctx);
1062 ExtractSliceOp::attachInterface<ExtractSliceOpInterface>(*ctx);
1063 ExtractOp::attachInterface<ExtractOpInterface>(*ctx);
1064 FromElementsOp::attachInterface<FromElementsOpInterface>(*ctx);
1065 GenerateOp::attachInterface<GenerateOpInterface>(*ctx);
1066 InsertOp::attachInterface<InsertOpInterface>(*ctx);
1067 InsertSliceOp::attachInterface<InsertSliceOpInterface>(*ctx);
1068 PadOp::attachInterface<PadOpInterface>(*ctx);
1069 ParallelInsertSliceOp::attachInterface<ParallelInsertSliceOpInterface>(
1071 RankOp::attachInterface<RankOpInterface>(*ctx);
1072 ReshapeOp::attachInterface<ReshapeOpInterface>(*ctx);
1073 SplatOp::attachInterface<SplatOpInterface>(*ctx);
1076 ctx->
loadDialect<arith::ArithDialect, linalg::LinalgDialect>();
static llvm::ManagedStatic< PassManagerOptions > options
Base type for affine expression.
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Base class for generic analysis states.
Attributes are known-constant values of operations.
Block represents an ordered list of Operations.
IntegerAttr getIndexAttr(int64_t value)
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
void loadDialect()
Load a dialect in the context.
RAII guard to reset the insertion point of the builder when destroyed.
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
This class represents a single result from folding an operation.
This class represents an operand of an operation.
This is a value defined by a result of an operation.
Operation is the basic unit of execution within MLIR.
OpResult getOpResult(unsigned idx)
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
MLIRContext * getContext()
Return the context this operation is associated with.
Location getLoc()
The source location the operation was defined or derived from.
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
user_range getUsers()
Returns a range of all users.
use_range getUses()
Returns a range of all uses, which is useful for iterating over all uses.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
unsigned getNumArguments()
bool hasOneBlock()
Return true if this region has exactly one block.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void mergeBlocks(Block *source, Block *dest, ValueRange argValues=std::nullopt)
Inline the operations of block 'source' into the end of block 'dest'.
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
void moveOpBefore(Operation *op, Operation *existingOp)
Unlink this operation from its current block and insert it right before existingOp which may be in th...
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
void replaceOpWithBufferizedValues(RewriterBase &rewriter, Operation *op, ValueRange values)
Replace an op with replacement values.
BaseMemRefType getMemRefTypeWithStaticIdentityLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with a static identity layout (i.e., no layout map).
FailureOr< Value > allocateTensorForShapedValue(OpBuilder &b, Location loc, Value shapedValue, const BufferizationOptions &options, bool copy=true)
Create an AllocTensorOp for the given shaped value (memref or tensor).
FailureOr< BaseMemRefType > getBufferType(Value value, const BufferizationOptions &options)
Return the buffer type for a given Value (tensor) after bufferization without bufferizing any IR.
FailureOr< Value > getBuffer(RewriterBase &rewriter, Value value, const BufferizationOptions &options)
Lookup the buffer for the given value.
BaseMemRefType getMemRefTypeWithFullyDynamicLayout(TensorType tensorType, Attribute memorySpace=nullptr)
Return a MemRef type with fully dynamic layout.
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given memref value.
void registerSubsetOpInterfaceExternalModels(DialectRegistry ®istry)
void registerBufferizableOpInterfaceExternalModels(DialectRegistry ®istry)
Include the generated interface declarations.
bool isZeroIndex(OpFoldResult v)
Return true if v is an IntegerAttr with value 0 of a ConstantIndexOp with attribute with value 0.
bool areConstantIntValues(ArrayRef< OpFoldResult > ofrs, ArrayRef< int64_t > values)
Return true if all of ofrs are constant integers equal to the corresponding value in values.
std::optional< int64_t > getConstantIntValue(OpFoldResult ofr)
If ofr is a constant integer or an IntegerAttr, return the integer.
bool areAllConstantIntValue(ArrayRef< OpFoldResult > ofrs, int64_t value)
Return true if all of ofrs are constant integers equal to value.
LogicalResult getStridesAndOffset(MemRefType t, SmallVectorImpl< int64_t > &strides, int64_t &offset)
Returns the strides of the MemRef if the layout map is in strided form.
void bindSymbols(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to SymbolExpr at positions: [0 .
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
Options for BufferizableOpInterface-based bufferization.
Bufferizable ops that implement the DestinationStyleOpInterface can use this external model base clas...