23 if (!type.hasStaticShape())
33 int64_t runningStride = 1;
34 int64_t curDim = strides.size() - 1;
36 while (curDim >= 0 && strides[curDim] == runningStride) {
37 runningStride *= type.getDimSize(curDim);
42 while (curDim >= 0 && type.getDimSize(curDim) == 1) {
54 unsigned sourceRank = sizes.size();
55 assert(sizes.size() == strides.size() &&
56 "expected as many sizes as strides for a memref");
60 assert(indicesVec.size() == strides.size() &&
61 "expected as many indices as rank of memref");
72 for (
unsigned i = 0; i < sourceRank; ++i) {
73 unsigned offsetIdx = 2 * i;
74 addMulMap = addMulMap + symbols[offsetIdx] * symbols[offsetIdx + 1];
75 offsetValues[offsetIdx] = indicesVec[i];
76 offsetValues[offsetIdx + 1] = strides[i];
78 mulMap = mulMap * symbols[i];
83 int64_t scaler = dstBits / srcBits;
84 addMulMap = addMulMap.
floorDiv(scaler);
88 builder, loc, addMulMap, offsetValues);
96 builder, loc, s0.
floorDiv(scaler), {offset});
98 return {{adjustBaseOffset, linearizedSize}, linearizedIndices};
106 if (!sizes.empty()) {
110 for (
int index = sizes.size() - 1; index > 0; --index) {
112 builder, loc, s0 * s1,
118 std::tie(linearizedMemRefInfo, std::ignore) =
121 return linearizedMemRefInfo;
129 std::vector<Operation *> opUses;
132 if (isa<memref::DeallocOp>(useOp) ||
134 !mlir::hasEffect<MemoryEffects::Read>(useOp)) ||
136 opUses.push_back(useOp);
141 uses.insert(uses.end(), opUses.begin(), opUses.end());
146 std::vector<Operation *> opToErase;
147 parentOp->
walk([&](memref::AllocOp op) {
148 std::vector<Operation *> candidates;
150 opToErase.insert(opToErase.end(), candidates.begin(), candidates.end());
151 opToErase.push_back(op.getOperation());
Base type for affine expression.
AffineExpr floorDiv(uint64_t v) const
IntegerAttr getIndexAttr(int64_t value)
AffineExpr getAffineConstantExpr(int64_t constant)
MLIRContext * getContext() const
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
This class helps build Operations.
This class represents a single result from folding an operation.
This class represents an operand of an operation.
Operation is the basic unit of execution within MLIR.
std::enable_if_t< llvm::function_traits< std::decay_t< FnT > >::num_args==1, RetT > walk(FnT &&callback)
Walk the operation by calling the callback for each nested operation (including this one),...
unsigned getNumRegions()
Returns the number of regions held by this operation.
unsigned getNumResults()
Return the number of results held by this operation.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
OpFoldResult makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands)
Constructs an AffineApplyOp that applies map to operands after composing the map with the maps of any...
static bool resultIsNotRead(Operation *op, std::vector< Operation * > &uses)
Returns true if all the uses of op are not read/load.
void eraseDeadAllocAndStores(RewriterBase &rewriter, Operation *parentOp)
std::pair< LinearizedMemRefInfo, OpFoldResult > getLinearizedMemRefOffsetAndSize(OpBuilder &builder, Location loc, int srcBits, int dstBits, OpFoldResult offset, ArrayRef< OpFoldResult > sizes, ArrayRef< OpFoldResult > strides, ArrayRef< OpFoldResult > indices={})
bool isStaticShapeAndContiguousRowMajor(MemRefType type)
Returns true, if the memref type has static shapes and represents a contiguous chunk of memory.
Include the generated interface declarations.
LogicalResult getStridesAndOffset(MemRefType t, SmallVectorImpl< int64_t > &strides, int64_t &offset)
Returns the strides of the MemRef if the layout map is in strided form.
void bindSymbols(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to SymbolExpr at positions: [0 .
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value.
void bindSymbolsList(MLIRContext *ctx, MutableArrayRef< AffineExprTy > exprs)
For a memref with offset, sizes and strides, returns the offset and size to use for the linearized me...