31#include "llvm/ADT/DenseSet.h"
32#include "llvm/Support/DebugLog.h"
35#define DEBUG_TYPE "loops-to-gpu"
69 llvm_unreachable(
"dim3 position out of bounds");
76 return forOp.getLowerBoundOperands();
81 return forOp.getUpperBoundOperands();
88 forOp.getStepAsInt());
111 Region &limit = forOp.getRegion();
112 for (
unsigned i = 0, e = numDims; i < e; ++i) {
113 Operation *nested = &forOp.getBody()->front();
116 return forOp.emitError(
117 "loops with bounds depending on other mapped loops "
118 "are not supported");
125 auto begin = forOp.getBody()->begin(), end = forOp.getBody()->end();
126 if (forOp.getBody()->empty() || std::next(begin, 2) != end)
127 return forOp.emitError(
"expected perfectly nested loops in the body");
129 if (!(forOp = dyn_cast<AffineForOp>(nested)))
130 return nested->
emitError(
"expected a nested loop");
136 unsigned numBlockDims,
137 unsigned numThreadDims) {
138 if (numBlockDims < 1 || numThreadDims < 1) {
139 LDBG() <<
"nothing to map";
143 if (numBlockDims > 3) {
144 return forOp.emitError(
"cannot map to more than 3 block dimensions");
146 if (numThreadDims > 3) {
147 return forOp.emitError(
"cannot map to more than 3 thread dimensions");
155struct AffineLoopToGpuConverter {
156 std::optional<AffineForOp> collectBounds(AffineForOp forOp,
159 void createLaunch(AffineForOp rootForOp, AffineForOp innermostForOp,
160 unsigned numBlockDims,
unsigned numThreadDims);
163 SmallVector<Value, 6> dims;
165 SmallVector<Value, 6> lbs;
167 SmallVector<Value, 6> ivs;
169 SmallVector<Value, 6> steps;
178std::optional<AffineForOp>
179AffineLoopToGpuConverter::collectBounds(AffineForOp forOp,
unsigned numLoops) {
180 OpBuilder builder(forOp.getOperation());
181 dims.reserve(numLoops);
182 lbs.reserve(numLoops);
183 ivs.reserve(numLoops);
184 steps.reserve(numLoops);
185 AffineForOp currentLoop = forOp;
186 for (
unsigned i = 0; i < numLoops; ++i) {
189 if (!lowerBound || !upperBound) {
193 Value range = arith::SubIOp::create(builder, currentLoop.getLoc(),
194 upperBound, lowerBound);
197 range = arith::CeilDivSIOp::create(builder, currentLoop.getLoc(), range,
199 dims.push_back(range);
201 lbs.push_back(lowerBound);
202 ivs.push_back(currentLoop.getInductionVar());
203 steps.push_back(step);
205 if (i != numLoops - 1)
206 currentLoop = cast<AffineForOp>(¤tLoop.getBody()->front());
215void AffineLoopToGpuConverter::createLaunch(AffineForOp rootForOp,
216 AffineForOp innermostForOp,
217 unsigned numBlockDims,
218 unsigned numThreadDims) {
219 OpBuilder builder(rootForOp.getOperation());
223 (numBlockDims < 3 || numThreadDims < 3)
226 Value gridSizeX = numBlockDims > 0 ? dims[0] : constOne;
227 Value gridSizeY = numBlockDims > 1 ? dims[1] : constOne;
228 Value gridSizeZ = numBlockDims > 2 ? dims[2] : constOne;
229 Value blockSizeX = numThreadDims > 0 ? dims[numBlockDims] : constOne;
230 Value blockSizeY = numThreadDims > 1 ? dims[numBlockDims + 1] : constOne;
231 Value blockSizeZ = numThreadDims > 2 ? dims[numBlockDims + 2] : constOne;
236 gpu::LaunchOp::create(builder, rootForOp.getLoc(), gridSizeX, gridSizeY,
237 gridSizeZ, blockSizeX, blockSizeY, blockSizeZ);
243 Operation &terminator = innermostForOp.getBody()->back();
244 Location terminatorLoc = terminator.
getLoc();
246 builder.setInsertionPointToEnd(innermostForOp.getBody());
247 gpu::TerminatorOp::create(builder, terminatorLoc,
TypeRange());
248 launchOp.getBody().front().getOperations().splice(
249 launchOp.getBody().front().begin(),
250 innermostForOp.getBody()->getOperations());
256 builder.setInsertionPointToStart(&launchOp.getBody().front());
257 auto *lbArgumentIt = lbs.begin();
258 auto *stepArgumentIt = steps.begin();
259 for (
const auto &en : llvm::enumerate(ivs)) {
261 en.index() < numBlockDims
263 :
getDim3Value(launchOp.getThreadIds(), en.index() - numBlockDims);
264 Value step = steps[en.index()];
266 id = arith::MulIOp::create(builder, rootForOp.getLoc(), step,
id);
268 Value ivReplacement =
269 arith::AddIOp::create(builder, rootForOp.getLoc(), *lbArgumentIt,
id);
270 en.value().replaceAllUsesWith(ivReplacement);
271 std::advance(lbArgumentIt, 1);
272 std::advance(stepArgumentIt, 1);
281 unsigned numBlockDims,
282 unsigned numThreadDims) {
286 AffineLoopToGpuConverter converter;
287 auto maybeInnerLoop =
288 converter.collectBounds(forOp, numBlockDims + numThreadDims);
291 converter.createLaunch(forOp, *maybeInnerLoop, numBlockDims, numThreadDims);
297 unsigned numBlockDims,
298 unsigned numThreadDims) {
299 return ::convertAffineLoopNestToGPULaunch(forOp, numBlockDims, numThreadDims);
303struct ParallelToGpuLaunchLowering :
public OpRewritePattern<ParallelOp> {
304 using OpRewritePattern<ParallelOp>::OpRewritePattern;
306 LogicalResult matchAndRewrite(ParallelOp parallelOp,
307 PatternRewriter &rewriter)
const override;
321 if (
auto constExpr = dyn_cast<AffineConstantExpr>(
result)) {
323 constExpr.getValue());
328 if (
auto minOp = upperBound.
getDefiningOp<arith::MinSIOp>()) {
329 for (
Value operand : {minOp.getLhs(), minOp.getRhs()}) {
335 if (
auto multiplyOp = upperBound.
getDefiningOp<arith::MulIOp>()) {
336 if (
auto lhs = dyn_cast_or_null<arith::ConstantIndexOp>(
339 if (
auto rhs = dyn_cast_or_null<arith::ConstantIndexOp>(
344 if ((
lhs.value() < 0) != (
rhs.value() < 0))
348 lhs.value() *
rhs.value());
356 return processor != gpu::Processor::Sequential;
361 case gpu::Processor::BlockX:
363 case gpu::Processor::BlockY:
365 case gpu::Processor::BlockZ:
367 case gpu::Processor::ThreadX:
369 case gpu::Processor::ThreadY:
371 case gpu::Processor::ThreadZ:
376 "invalid processor type while retrieving launch op argument number");
402 ParallelOp parallelOp, gpu::LaunchOp launchOp,
IRMapping &cloningMap,
411 if (!mapping || parallelOp.getNumResults() > 1)
416 auto launchIndependent = [&launchOp](
Value val) {
417 return val.getParentRegion()->isAncestor(launchOp->getParentRegion());
420 auto ensureLaunchIndependent = [&rewriter,
422 if (launchIndependent(val))
424 if (
auto constOp = val.getDefiningOp<arith::ConstantOp>())
425 return arith::ConstantOp::create(rewriter, constOp.getLoc(),
430 for (
auto config : llvm::zip(
431 mapping, parallelOp.getInductionVars(), parallelOp.getLowerBound(),
432 parallelOp.getUpperBound(), parallelOp.getStep())) {
434 Value iv, lowerBound, upperBound, step;
435 std::tie(mappingAttribute, iv, lowerBound, upperBound, step) =
config;
437 dyn_cast<gpu::ParallelLoopDimMappingAttr>(mappingAttribute);
439 return parallelOp.emitOpError()
440 <<
"expected mapping attribute for lowering to GPU";
442 gpu::Processor processor = annotation.getProcessor();
461 mappedStep = ensureLaunchIndependent(mappedStep);
462 mappedLowerBound = ensureLaunchIndependent(mappedLowerBound);
465 if (!mappedStep || !mappedLowerBound) {
467 parallelOp,
"lower bound / step must be constant or defined above "
471 newIndex = AffineApplyOp::create(
472 rewriter, loc, annotation.getMap().compose(lowerAndStep),
473 ValueRange{operand, mappedStep, mappedLowerBound});
476 if (annotation.getBound()) {
484 if (!launchIndependent(lowerBound) &&
485 !isa_and_nonnull<arith::ConstantOp>(lowerBound.
getDefiningOp()))
488 if (!launchIndependent(step) &&
493 bool boundIsPrecise =
494 launchIndependent(upperBound) ||
495 isa_and_nonnull<arith::ConstantOp>(upperBound.
getDefiningOp());
499 if (!boundIsPrecise) {
504 "cannot derive loop-invariant upper bound for number of"
515 Value launchBound = AffineApplyOp::create(
516 rewriter, loc, annotation.getBound().compose(stepMap),
518 ensureLaunchIndependent(
519 cloningMap.lookupOrDefault(upperBound)),
520 ensureLaunchIndependent(
521 cloningMap.lookupOrDefault(lowerBound)),
522 ensureLaunchIndependent(cloningMap.lookupOrDefault(step))});
525 if (!bounds.try_emplace(processor, launchBound).second) {
527 parallelOp,
"cannot redefine the bound for processor " +
528 Twine(
static_cast<int64_t>(processor)));
531 if (!boundIsPrecise) {
534 arith::CmpIOp pred = arith::CmpIOp::create(
535 rewriter, loc, arith::CmpIPredicate::slt, newIndex,
537 scf::IfOp ifOp = scf::IfOp::create(rewriter, loc, pred,
false);
542 worklist.push_back(launchOp.getOperation());
547 auto loopOp = scf::ForOp::create(rewriter, loc,
551 newIndex = loopOp.getInductionVar();
556 worklist.push_back(launchOp.getOperation());
558 cloningMap.
map(iv, newIndex);
563 for (
const auto &namedAttr : parallelOp->getAttrs()) {
565 namedAttr.getName() == ParallelOp::getOperandSegmentSizeAttr())
567 launchOp->setAttr(namedAttr.getName(), namedAttr.getValue());
570 Block *body = parallelOp.getBody();
571 worklist.reserve(worklist.size() + body->
getOperations().size());
574 isa<scf::ReduceOp>(terminator) && terminator->
getOperands().size() == 1) {
575 worklist.push_back(terminator);
578 worklist.push_back(&op);
612ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp,
613 PatternRewriter &rewriter)
const {
619 if (
auto parentLoop = parallelOp->getParentOfType<ParallelOp>())
623 Location loc = parallelOp.getLoc();
626 gpu::LaunchOp launchOp = gpu::LaunchOp::create(
627 rewriter, parallelOp.getLoc(), constantOne, constantOne, constantOne,
628 constantOne, constantOne, constantOne);
630 gpu::TerminatorOp::create(rewriter, loc);
633 IRMapping cloningMap;
634 llvm::DenseMap<gpu::Processor, Value> launchBounds;
635 SmallVector<Operation *, 16> worklist;
637 launchBounds, rewriter)))
641 bool seenSideeffects =
false;
643 bool leftNestingScope =
false;
644 LocalAliasAnalysis aliasAnalysis;
645 llvm::DenseSet<Value> writtenBuffer;
646 while (!worklist.empty()) {
647 Operation *op = worklist.pop_back_val();
653 if (
auto nestedParallel = dyn_cast<ParallelOp>(op)) {
657 if (seenSideeffects) {
658 WalkResult walkRes = nestedParallel.walk([&](Operation *nestedOp) {
662 auto memEffectInterface = dyn_cast<MemoryEffectOpInterface>(nestedOp);
663 if (!memEffectInterface)
666 SmallVector<MemoryEffects::EffectInstance> effects;
667 memEffectInterface.getEffects(effects);
669 if (isa<MemoryEffects::Read>(effect.getEffect()) ||
670 isa<MemoryEffects::Write>(effect.getEffect())) {
671 Value baseBuffer = effect.getValue();
674 for (Value val : writtenBuffer) {
675 if (aliasAnalysis.
alias(baseBuffer, val) !=
691 worklist, launchBounds, rewriter)))
693 }
else if (op == launchOp.getOperation()) {
698 leftNestingScope =
true;
699 seenSideeffects =
false;
700 writtenBuffer.clear();
701 }
else if (
auto reduceOp = dyn_cast<scf::ReduceOp>(op)) {
708 if (!newValue || !operand.getType().isSignlessIntOrFloat())
711 llvm::SetVector<Value> externalValues;
713 if (externalValues.size())
716 auto gpuRedOp = gpu::AllReduceOp::create(rewriter, loc, newValue);
717 cloningMap.
map(parentLoop->getResult(0), gpuRedOp.getResult());
720 gpuRedOp.getRegion().begin());
722 auto scfReturn = gpuRedOp.getRegion().front().getTerminator();
726 scfReturn, scfReturn->getOperands().front());
730 Operation *
clone = rewriter.
clone(*op, cloningMap);
735 if (
auto memEffectInterface =
736 dyn_cast<MemoryEffectOpInterface>(
clone)) {
737 SmallVector<MemoryEffects::EffectInstance> effects;
738 memEffectInterface.getEffects(effects);
740 if (isa<MemoryEffects::Write>(effect.getEffect())) {
741 Value writtenBase = effect.getValue();
746 writtenBuffer.insert(writtenBase);
755 if (seenSideeffects && leftNestingScope)
762 for (
auto bound : launchBounds)
775 target.addLegalDialect<memref::MemRefDialect>();
776 target.addDynamicallyLegalOp<scf::ParallelOp>([](scf::ParallelOp parallelOp) {
783 op->
walk([](scf::ParallelOp parallelOp) {
static LogicalResult checkAffineLoopNestMappableImpl(AffineForOp forOp, unsigned numDims)
static Value getOrEmitUpperBound(AffineForOp forOp, OpBuilder &builder)
static Value getDim3Value(const gpu::KernelDim3 &dim3, unsigned pos)
static LogicalResult processParallelLoop(ParallelOp parallelOp, gpu::LaunchOp launchOp, IRMapping &cloningMap, SmallVectorImpl< Operation * > &worklist, DenseMap< gpu::Processor, Value > &bounds, PatternRewriter &rewriter)
Modifies the current transformation state to capture the effect of the given scf.parallel operation o...
static bool isMappedToProcessor(gpu::Processor processor)
static Operation::operand_range getLowerBoundOperands(AffineForOp forOp)
static Value getOrCreateStep(AffineForOp forOp, OpBuilder &builder)
static Value getOrEmitLowerBound(AffineForOp forOp, OpBuilder &builder)
static Value deriveStaticUpperBound(Value upperBound, PatternRewriter &rewriter)
Tries to derive a static upper bound from the defining operation of upperBound.
static unsigned getLaunchOpArgumentNum(gpu::Processor processor)
static constexpr StringLiteral kVisitedAttrName
static Operation::operand_range getUpperBoundOperands(AffineForOp forOp)
static LogicalResult checkAffineLoopNestMappable(AffineForOp forOp, unsigned numBlockDims, unsigned numThreadDims)
Base type for affine expression.
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
@ NoAlias
The two locations do not alias at all.
Attributes are known-constant values of operations.
Block represents an ordered list of Operations.
OpListType & getOperations()
Operation * getTerminator()
Get the terminator operation of this block.
iterator_range< iterator > without_terminator()
Return an iterator range over the operation within this block excluding the terminator operation at t...
AffineExpr getAffineSymbolExpr(unsigned position)
AffineExpr getAffineDimExpr(unsigned position)
This is a utility class for mapping one set of IR entities to another.
auto lookupOrDefault(T from) const
Lookup a mapped value within the map.
void map(Value from, Value to)
Inserts a new mapping for 'from' to 'to'.
auto lookupOrNull(T from) const
Lookup a mapped value within the map.
AliasResult alias(Value lhs, Value rhs)
Given two values, return their aliasing behavior.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
RAII guard to reset the insertion point of the builder when destroyed.
This class helps build Operations.
InsertPoint saveInsertionPoint() const
Return a saved insertion point.
Block::iterator getInsertionPoint() const
Returns the current insertion point of the builder.
Operation * clone(Operation &op, IRMapping &mapper)
Creates a deep copy of the specified operation, remapping any operands that use values outside of the...
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
void setInsertionPointToEnd(Block *block)
Sets the insertion point to the end of the specified block.
void restoreInsertionPoint(InsertPoint ip)
Restore the insert point to a previously saved point.
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Operation is the basic unit of execution within MLIR.
unsigned getNumRegions()
Returns the number of regions held by this operation.
Location getLoc()
The source location the operation was defined or derived from.
OperandRange operand_range
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
operand_range getOperands()
Returns an iterator on the underlying Value's.
std::enable_if_t< llvm::function_traits< std::decay_t< FnT > >::num_args==1, RetT > walk(FnT &&callback)
Walk the operation by calling the callback for each nested operation (including this one),...
result_range getResults()
void erase()
Remove this operation from its parent block and delete it.
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
This class contains a list of basic blocks and a link to the parent operation it is attached to.
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the listener that the IR failed to be rewritten because of a match failure,...
void inlineRegionBefore(Region ®ion, Region &parent, Region::iterator before)
Move the blocks that belong to "region" before the given position in another region "parent".
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
static WalkResult advance()
bool wasInterrupted() const
Returns true if the walk was interrupted.
static WalkResult interrupt()
Specialization of arith.constant op that returns an integer of index type.
static ConstantIndexOp create(OpBuilder &builder, Location location, int64_t value)
SideEffects::EffectInstance< Effect > EffectInstance
StringRef getMappingAttrName()
Name of the mapping attribute produced by loop mappers.
Value constantOne(OpBuilder &builder, Location loc, Type tp)
Generates a 1-valued constant of the given type.
Include the generated interface declarations.
void finalizeParallelLoopToGPUConversion(Operation *op)
Clean up after applyPartialConversion/applyFullConversion call.
void populateParallelLoopToGPUPatterns(RewritePatternSet &patterns)
Adds the conversion pattern from scf.parallel to gpu.launch to the provided pattern list.
std::optional< int64_t > getConstantIntValue(OpFoldResult ofr)
If ofr is a constant integer or an IntegerAttr, return the integer.
const FrozenRewritePatternSet GreedyRewriteConfig config
LogicalResult convertAffineLoopNestToGPULaunch(affine::AffineForOp forOp, unsigned numBlockDims, unsigned numThreadDims)
Convert a perfect affine loop nest with the outermost loop identified by forOp into a gpu::Launch ope...
bool isMemoryEffectFree(Operation *op)
Returns true if the given operation is free of memory effects.
Value lowerAffineUpperBound(affine::AffineForOp op, OpBuilder &builder)
Emit code that computes the upper bound of the given affine loop using standard arithmetic operations...
const FrozenRewritePatternSet & patterns
void getUsedValuesDefinedAbove(Region ®ion, Region &limit, SetVector< Value > &values)
Fill values with a list of values defined at the ancestors of the limit region and used within region...
Operation * clone(OpBuilder &b, Operation *op, TypeRange newResultTypes, ValueRange newOperands)
llvm::DenseMap< KeyT, ValueT, KeyInfoT, BucketT > DenseMap
bool areValuesDefinedAbove(Range values, Region &limit)
Check if all values in the provided range are defined above the limit region.
void configureParallelLoopToGPULegality(ConversionTarget &target)
Configures the rewrite target such that only scf.parallel operations that are not rewritten by the pr...
Value lowerAffineLowerBound(affine::AffineForOp op, OpBuilder &builder)
Emit code that computes the lower bound of the given affine loop using standard arithmetic operations...
Utility class for the GPU dialect to represent triples of Values accessible through ....