23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/Support/MathExtras.h"
28 #define GEN_PASS_DEF_OPTIMIZESHAREDMEMORY
29 #include "mlir/Dialect/NVGPU/Transforms/Passes.h.inc"
49 int64_t srcDim, int64_t tgtDim) {
52 Value src = indices[srcDim];
56 const int64_t permuteEveryN = std::max<int64_t>(
58 memrefTy.getElementTypeBitWidth()) /
72 int64_t m = llvm::Log2_64(memrefTy.getDimSize(tgtDim));
75 int64_t mask = (1LL << (m - n)) - 1;
76 if (permuteEveryN > 1)
77 mask = mask << llvm::Log2_64(permuteEveryN);
78 Value srcBits = b.
create<arith::ConstantIndexOp>(loc, mask);
79 srcBits = b.
create<arith::AndIOp>(loc, src, srcBits);
83 if (permuteEveryN > 1) {
84 int64_t shlBits = n - llvm::Log2_64(permuteEveryN);
86 Value finalShiftVal = b.
create<arith::ConstantIndexOp>(loc, shlBits);
87 srcBits = b.
createOrFold<arith::ShLIOp>(loc, srcBits, finalShiftVal);
88 }
else if (shlBits < 0) {
89 Value finalShiftVal = b.
create<arith::ConstantIndexOp>(loc, -1 * shlBits);
90 srcBits = b.
createOrFold<arith::ShRUIOp>(loc, srcBits, finalShiftVal);
93 Value finalShiftVal = b.
create<arith::ConstantIndexOp>(loc, n);
94 srcBits = b.
createOrFold<arith::ShLIOp>(loc, srcBits, finalShiftVal);
97 Value permutedVectorIdx =
98 b.
create<arith::XOrIOp>(loc, indices[tgtDim], srcBits);
99 return permutedVectorIdx;
104 MemRefType memrefTy, int64_t srcDim,
117 MemoryEffectOpInterface iface = dyn_cast<MemoryEffectOpInterface>(op);
120 std::optional<MemoryEffects::EffectInstance> effect =
123 readOps.push_back(op);
128 writeOps.push_back(op);
133 if (llvm::any_of(readOps, [](
Operation *op) {
134 return !isa<memref::LoadOp, vector::LoadOp, nvgpu::LdMatrixOp>(op) ||
138 if (llvm::any_of(writeOps, [](
Operation *op) {
139 return !isa<memref::StoreOp, vector::StoreOp, nvgpu::DeviceAsyncCopyOp>(
151 auto memRefType = dyn_cast<MemRefType>(memrefValue.
getType());
152 if (!memRefType || !NVGPUDialect::hasSharedMemoryAddressSpace(memRefType))
157 bool hasSubView =
false;
158 parentOp->
walk([&](memref::SubViewOp subView) { hasSubView =
true; });
164 const int64_t rowSize = memRefType.getDimSize(memRefType.getRank() - 1);
165 const int64_t rowsPerLine =
168 const int64_t threadGroupSize =
170 if (rowsPerLine >= threadGroupSize)
181 if (shmReadOps.empty() || shmWriteOps.empty())
186 int64_t tgtDim = memRefType.getRank() - 1;
187 int64_t srcDim = memRefType.getRank() - 2;
190 while (!shmWriteOps.empty()) {
191 Operation *shmWriteOp = shmWriteOps.back();
192 shmWriteOps.pop_back();
198 memRefType, srcDim, tgtDim);
203 while (!shmReadOps.empty()) {
204 Operation *shmReadOp = shmReadOps.back();
205 shmReadOps.pop_back();
211 memRefType, srcDim, tgtDim);
219 class OptimizeSharedMemoryPass
220 :
public nvgpu::impl::OptimizeSharedMemoryBase<OptimizeSharedMemoryPass> {
222 OptimizeSharedMemoryPass() =
default;
224 void runOnOperation()
override {
227 op->
walk([&](memref::AllocOp allocOp) {
228 if (!NVGPUDialect::hasSharedMemoryAddressSpace(allocOp.getType()))
230 shmAllocOps.push_back(allocOp);
232 for (
auto allocOp : shmAllocOps) {
234 allocOp.getMemref())))
242 return std::make_unique<OptimizeSharedMemoryPass>();
constexpr int64_t kSharedMemoryLineSizeBytes
The size of a shared memory line according to NV documentation.
static void transformIndices(OpBuilder &builder, Location loc, SmallVector< Value, 4 > &indices, MemRefType memrefTy, int64_t srcDim, int64_t tgtDim)
constexpr int64_t kDefaultVectorSizeBits
We optimize for 128bit accesses, but this can be made an argument in the future.
static Value permuteVectorOffset(OpBuilder &b, Location loc, ArrayRef< Value > indices, MemRefType memrefTy, int64_t srcDim, int64_t tgtDim)
Uses srcIndexValue to permute tgtIndexValue via `result = xor(floordiv(srcIdxVal,permuteEveryN),...
static LogicalResult getShmReadAndWriteOps(Operation *parentOp, Value shmMemRef, SmallVector< Operation *, 16 > &readOps, SmallVector< Operation *, 16 > &writeOps)
Return all operations within parentOp that read from or write to shmMemRef.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
This class helps build Operations.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Operation is the basic unit of execution within MLIR.
std::enable_if_t< llvm::function_traits< std::decay_t< FnT > >::num_args==1, RetT > walk(FnT &&callback)
Walk the operation by calling the callback for each nested operation (including this one),...
MLIRContext * getContext()
Return the context this operation is associated with.
Location getLoc()
The source location the operation was defined or derived from.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
llvm::LogicalResult optimizeSharedMemoryReadsAndWrites(Operation *parentOp, Value memrefValue)
Passes.
std::unique_ptr< Pass > createOptimizeSharedMemoryPass()
Create a pass to optimize shared memory reads and writes.
void setIndices(Operation *op, ArrayRef< Value > indices)
Set the indices that the given load/store operation is operating on.
Operation::operand_range getIndices(Operation *op)
Get the indices that the given load/store operation is operating on.
Include the generated interface declarations.
The following effect indicates that the operation reads from some resource.
The following effect indicates that the operation writes to some resource.