25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/Support/DebugLog.h"
32 #define DEBUG_TYPE "scf-utils"
37 bool replaceIterOperandsUsesInLoop) {
43 assert(loopNest.size() <= 10 &&
44 "exceeded recursion limit when yielding value from loop nest");
76 if (loopNest.size() == 1) {
78 cast<scf::ForOp>(*loopNest.back().replaceWithAdditionalYields(
79 rewriter, newIterOperands, replaceIterOperandsUsesInLoop,
81 return {innerMostLoop};
91 innerNewBBArgs, newYieldValuesFn,
92 replaceIterOperandsUsesInLoop);
93 return llvm::to_vector(llvm::map_range(
94 newLoopNest.front().getResults().take_back(innerNewBBArgs.size()),
97 scf::ForOp outerMostLoop =
98 cast<scf::ForOp>(*loopNest.front().replaceWithAdditionalYields(
99 rewriter, newIterOperands, replaceIterOperandsUsesInLoop, fn));
100 newLoopNest.insert(newLoopNest.begin(), outerMostLoop);
117 func::CallOp *callOp) {
118 assert(!funcName.empty() &&
"funcName cannot be empty");
132 ValueRange outlinedValues(captures.getArrayRef());
139 outlinedFuncArgTypes.push_back(arg.getType());
140 outlinedFuncArgLocs.push_back(arg.getLoc());
142 for (
Value value : outlinedValues) {
143 outlinedFuncArgTypes.push_back(value.getType());
144 outlinedFuncArgLocs.push_back(value.getLoc());
146 FunctionType outlinedFuncType =
150 func::FuncOp::create(rewriter, loc, funcName, outlinedFuncType);
151 Block *outlinedFuncBody = outlinedFunc.addEntryBlock();
156 auto outlinedFuncBlockArgs = outlinedFuncBody->
getArguments();
161 originalBlock, outlinedFuncBody,
162 outlinedFuncBlockArgs.take_front(numOriginalBlockArguments));
165 func::ReturnOp::create(rewriter, loc, originalTerminator->
getResultTypes(),
172 ®ion, region.
begin(),
173 TypeRange{outlinedFuncArgTypes}.take_front(numOriginalBlockArguments),
175 .take_front(numOriginalBlockArguments));
180 llvm::append_range(callValues, newBlock->
getArguments());
181 llvm::append_range(callValues, outlinedValues);
182 auto call = func::CallOp::create(rewriter, loc, outlinedFunc, callValues);
191 rewriter.
clone(*originalTerminator, bvm);
192 rewriter.
eraseOp(originalTerminator);
197 for (
auto it : llvm::zip(outlinedValues, outlinedFuncBlockArgs.take_back(
198 outlinedValues.size()))) {
199 Value orig = std::get<0>(it);
200 Value repl = std::get<1>(it);
210 return outlinedFunc->isProperAncestor(opOperand.
getOwner());
218 func::FuncOp *thenFn, StringRef thenFnName,
219 func::FuncOp *elseFn, StringRef elseFnName) {
222 FailureOr<func::FuncOp> outlinedFuncOpOrFailure;
223 if (thenFn && !ifOp.getThenRegion().empty()) {
225 rewriter, loc, ifOp.getThenRegion(), thenFnName);
226 if (failed(outlinedFuncOpOrFailure))
228 *thenFn = *outlinedFuncOpOrFailure;
230 if (elseFn && !ifOp.getElseRegion().empty()) {
232 rewriter, loc, ifOp.getElseRegion(), elseFnName);
233 if (failed(outlinedFuncOpOrFailure))
235 *elseFn = *outlinedFuncOpOrFailure;
242 assert(rootOp !=
nullptr &&
"Root operation must not be a nullptr.");
243 bool rootEnclosesPloops =
false;
245 for (
Block &block : region.getBlocks()) {
248 rootEnclosesPloops |= enclosesPloops;
249 if (
auto ploop = dyn_cast<scf::ParallelOp>(op)) {
250 rootEnclosesPloops =
true;
254 result.push_back(ploop);
259 return rootEnclosesPloops;
267 assert(divisor > 0 &&
"expected positive divisor");
269 "expected integer or index-typed value");
271 Value divisorMinusOneCst = arith::ConstantOp::create(
273 Value divisorCst = arith::ConstantOp::create(
275 Value sum = arith::AddIOp::create(builder, loc, dividend, divisorMinusOneCst);
276 return arith::DivUIOp::create(builder, loc, sum, divisorCst);
286 "expected integer or index-typed value");
287 Value cstOne = arith::ConstantOp::create(
289 Value divisorMinusOne = arith::SubIOp::create(builder, loc, divisor, cstOne);
290 Value sum = arith::AddIOp::create(builder, loc, dividend, divisorMinusOne);
291 return arith::DivUIOp::create(builder, loc, sum, divisor);
307 Block *loopBodyBlock,
Value forOpIV, uint64_t unrollFactor,
317 annotateFn = defaultAnnotateFn;
326 for (
unsigned i = 1; i < unrollFactor; i++) {
330 operandMap.
map(iterArgs, lastYielded);
335 Value ivUnroll = ivRemapFn(i, forOpIV, builder);
336 operandMap.
map(forOpIV, ivUnroll);
340 for (
auto it = loopBodyBlock->
begin(); it != std::next(srcBlockEnd); it++) {
342 annotateFn(i, clonedOp, builder);
346 for (
unsigned i = 0, e = lastYielded.size(); i < e; i++)
352 for (
auto it = loopBodyBlock->
begin(); it != std::next(srcBlockEnd); it++)
353 annotateFn(0, &*it, builder);
362 scf::ForOp forOp, uint64_t unrollFactor,
364 assert(unrollFactor > 0 &&
"expected positive unroll factor");
367 if (llvm::hasSingleElement(forOp.getBody()->getOperations()))
374 auto loc = forOp.getLoc();
375 Value step = forOp.getStep();
376 Value upperBoundUnrolled;
378 bool generateEpilogueLoop =
true;
381 if (constTripCount) {
386 if (unrollFactor == 1) {
387 if (*constTripCount == 1 &&
388 failed(forOp.promoteIfSingleIteration(rewriter)))
393 int64_t tripCountEvenMultiple =
394 *constTripCount - (*constTripCount % unrollFactor);
395 int64_t upperBoundUnrolledCst = lbCst + tripCountEvenMultiple * stepCst;
396 int64_t stepUnrolledCst = stepCst * unrollFactor;
399 generateEpilogueLoop = upperBoundUnrolledCst < ubCst;
400 if (generateEpilogueLoop)
401 upperBoundUnrolled = arith::ConstantOp::create(
404 upperBoundUnrolledCst));
406 upperBoundUnrolled = forOp.getUpperBound();
410 stepCst == stepUnrolledCst
412 : arith::ConstantOp::create(boundsBuilder, loc,
414 step.
getType(), stepUnrolledCst));
419 auto lowerBound = forOp.getLowerBound();
420 auto upperBound = forOp.getUpperBound();
422 arith::SubIOp::create(boundsBuilder, loc, upperBound, lowerBound);
424 Value unrollFactorCst = arith::ConstantOp::create(
428 arith::RemSIOp::create(boundsBuilder, loc, tripCount, unrollFactorCst);
430 Value tripCountEvenMultiple =
431 arith::SubIOp::create(boundsBuilder, loc, tripCount, tripCountRem);
433 upperBoundUnrolled = arith::AddIOp::create(
434 boundsBuilder, loc, lowerBound,
435 arith::MulIOp::create(boundsBuilder, loc, tripCountEvenMultiple, step));
438 arith::MulIOp::create(boundsBuilder, loc, step, unrollFactorCst);
444 if (generateEpilogueLoop) {
445 OpBuilder epilogueBuilder(forOp->getContext());
447 auto epilogueForOp = cast<scf::ForOp>(epilogueBuilder.
clone(*forOp));
448 epilogueForOp.setLowerBound(upperBoundUnrolled);
451 auto results = forOp.getResults();
452 auto epilogueResults = epilogueForOp.getResults();
454 for (
auto e : llvm::zip(results, epilogueResults)) {
455 std::get<0>(e).replaceAllUsesWith(std::get<1>(e));
457 epilogueForOp->setOperands(epilogueForOp.getNumControlOperands(),
458 epilogueForOp.getInitArgs().size(), results);
459 if (epilogueForOp.promoteIfSingleIteration(rewriter).failed())
464 forOp.setUpperBound(upperBoundUnrolled);
465 forOp.setStep(stepUnrolled);
467 auto iterArgs =
ValueRange(forOp.getRegionIterArgs());
468 auto yieldedValues = forOp.getBody()->getTerminator()->getOperands();
471 forOp.getBody(), forOp.getInductionVar(), unrollFactor,
474 auto stride = arith::MulIOp::create(
476 arith::ConstantOp::create(b, loc,
477 b.getIntegerAttr(iv.getType(), i)));
478 return arith::AddIOp::create(b, loc, iv, stride);
480 annotateFn, iterArgs, yieldedValues);
482 if (forOp.promoteIfSingleIteration(rewriter).failed())
491 if (!mayBeConstantTripCount.has_value())
493 uint64_t tripCount = *mayBeConstantTripCount;
497 return forOp.promoteIfSingleIteration(rewriter);
504 auto walkResult = forOp.walk([&](scf::ForOp innerForOp) {
505 if (!forOp.isDefinedOutsideOfLoop(innerForOp.getLowerBound()) ||
506 !forOp.isDefinedOutsideOfLoop(innerForOp.getUpperBound()) ||
507 !forOp.isDefinedOutsideOfLoop(innerForOp.getStep()))
512 return !walkResult.wasInterrupted();
517 uint64_t unrollJamFactor) {
518 assert(unrollJamFactor > 0 &&
"unroll jam factor should be positive");
520 if (unrollJamFactor == 1)
526 LDBG() <<
"failed to unroll and jam: inner bounds are not invariant";
531 if (forOp->getNumResults() > 0) {
532 LDBG() <<
"failed to unroll and jam: unsupported loop with results";
539 if (!tripCount.has_value()) {
541 LDBG() <<
"failed to unroll and jam: trip count could not be determined";
544 if (unrollJamFactor > *tripCount) {
545 LDBG() <<
"unroll and jam factor is greater than trip count, set factor to "
548 unrollJamFactor = *tripCount;
549 }
else if (*tripCount % unrollJamFactor != 0) {
550 LDBG() <<
"failed to unroll and jam: unsupported trip count that is not a "
551 "multiple of unroll jam factor";
556 if (llvm::hasSingleElement(forOp.getBody()->getOperations()))
566 forOp.walk([&](scf::ForOp innerForOp) { innerLoops.push_back(innerForOp); });
577 for (scf::ForOp oldForOp : innerLoops) {
579 ValueRange oldIterOperands = oldForOp.getInits();
580 ValueRange oldIterArgs = oldForOp.getRegionIterArgs();
582 cast<scf::YieldOp>(oldForOp.getBody()->getTerminator()).getOperands();
585 for (
unsigned i = unrollJamFactor - 1; i >= 1; --i) {
586 dupIterOperands.append(oldIterOperands.begin(), oldIterOperands.end());
587 dupYieldOperands.append(oldYieldOperands.begin(), oldYieldOperands.end());
591 bool forOpReplaced = oldForOp == forOp;
592 scf::ForOp newForOp =
593 cast<scf::ForOp>(*oldForOp.replaceWithAdditionalYields(
594 rewriter, dupIterOperands,
false,
596 return dupYieldOperands;
598 newInnerLoops.push_back(newForOp);
603 ValueRange newIterArgs = newForOp.getRegionIterArgs();
604 unsigned oldNumIterArgs = oldIterArgs.size();
605 ValueRange newResults = newForOp.getResults();
606 unsigned oldNumResults = newResults.size() / unrollJamFactor;
607 assert(oldNumIterArgs == oldNumResults &&
608 "oldNumIterArgs must be the same as oldNumResults");
609 for (
unsigned i = unrollJamFactor - 1; i >= 1; --i) {
610 for (
unsigned j = 0;
j < oldNumIterArgs; ++
j) {
614 operandMaps[i - 1].map(newIterArgs[
j],
615 newIterArgs[i * oldNumIterArgs +
j]);
616 operandMaps[i - 1].map(newResults[
j],
617 newResults[i * oldNumResults +
j]);
624 int64_t step = forOp.getConstantStep()->getSExtValue();
626 forOp.getLoc(), forOp.getStep(),
628 forOp.getLoc(), rewriter.
getIndexAttr(unrollJamFactor)));
629 forOp.setStep(newStep);
630 auto forOpIV = forOp.getInductionVar();
633 for (
unsigned i = unrollJamFactor - 1; i >= 1; --i) {
634 for (
auto &subBlock : subBlocks) {
637 OpBuilder builder(subBlock.first->getBlock(), std::next(subBlock.second));
646 builder.
createOrFold<arith::AddIOp>(forOp.getLoc(), forOpIV, ivTag);
647 operandMaps[i - 1].map(forOpIV, ivUnroll);
650 for (
auto it = subBlock.first; it != std::next(subBlock.second); ++it)
651 builder.
clone(*it, operandMaps[i - 1]);
654 for (
auto newForOp : newInnerLoops) {
655 unsigned oldNumIterOperands =
656 newForOp.getNumRegionIterArgs() / unrollJamFactor;
657 unsigned numControlOperands = newForOp.getNumControlOperands();
658 auto yieldOp = cast<scf::YieldOp>(newForOp.getBody()->getTerminator());
659 unsigned oldNumYieldOperands = yieldOp.getNumOperands() / unrollJamFactor;
660 assert(oldNumIterOperands == oldNumYieldOperands &&
661 "oldNumIterOperands must be the same as oldNumYieldOperands");
662 for (
unsigned j = 0;
j < oldNumIterOperands; ++
j) {
666 newForOp.setOperand(numControlOperands + i * oldNumIterOperands +
j,
667 operandMaps[i - 1].lookupOrDefault(
668 newForOp.getOperand(numControlOperands +
j)));
670 i * oldNumYieldOperands +
j,
671 operandMaps[i - 1].lookupOrDefault(yieldOp.getOperand(
j)));
677 (void)forOp.promoteIfSingleIteration(rewriter);
684 Range normalizedLoopBounds;
690 normalizedLoopBounds.
size =
692 return normalizedLoopBounds;
704 bool isZeroBased =
false;
706 isZeroBased = lbCst.value() == 0;
708 bool isStepOne =
false;
710 isStepOne = stepCst.value() == 1;
714 "expected matching types");
719 if (isZeroBased && isStepOne)
720 return {lb, ub, step};
730 newUpperBound = rewriter.
createOrFold<arith::CeilDivSIOp>(
738 return {newLowerBound, newUpperBound, newStep};
752 Value denormalizedIvVal =
759 if (
Operation *preservedUse = denormalizedIvVal.getDefiningOp()) {
760 preservedUses.insert(preservedUse);
769 if (
getType(origLb).isIndex()) {
773 Value denormalizedIv;
778 Value scaled = normalizedIv;
780 Value origStepValue =
782 scaled = arith::MulIOp::create(rewriter, loc, normalizedIv, origStepValue);
785 denormalizedIv = scaled;
788 denormalizedIv = arith::AddIOp::create(rewriter, loc, scaled, origLbValue);
797 assert(!values.empty() &&
"unexecpted empty array");
802 for (
auto v : values) {
812 assert(!values.empty() &&
"unexpected empty list");
818 std::optional<Value> productOf;
819 for (
auto v : values) {
821 if (vOne && vOne.value() == 1)
824 productOf = arith::MulIOp::create(rewriter, loc, productOf.value(), v)
830 productOf = arith::ConstantOp::create(
834 return productOf.value();
850 Operation *delinearizedOp = affine::AffineDelinearizeIndexOp::create(
851 rewriter, loc, linearizedIv, ubs);
852 auto resultVals = llvm::map_to_vector(
860 llvm::BitVector isUbOne(ubs.size());
863 if (ubCst && ubCst.value() == 1)
868 unsigned numLeadingOneUbs = 0;
870 if (!isUbOne.test(index)) {
873 delinearizedIvs[index] = arith::ConstantOp::create(
874 rewriter, loc, rewriter.
getZeroAttr(ub.getType()));
878 Value previous = linearizedIv;
879 for (
unsigned i = numLeadingOneUbs, e = ubs.size(); i < e; ++i) {
880 unsigned idx = ubs.size() - (i - numLeadingOneUbs) - 1;
881 if (i != numLeadingOneUbs && !isUbOne.test(idx + 1)) {
882 previous = arith::DivSIOp::create(rewriter, loc, previous, ubs[idx + 1]);
887 if (!isUbOne.test(idx)) {
888 iv = arith::RemSIOp::create(rewriter, loc, previous, ubs[idx]);
891 iv = arith::ConstantOp::create(
892 rewriter, loc, rewriter.
getZeroAttr(ubs[idx].getType()));
895 delinearizedIvs[idx] = iv;
897 return {delinearizedIvs, preservedUsers};
902 if (loops.size() < 2)
905 scf::ForOp innermost = loops.back();
906 scf::ForOp outermost = loops.front();
910 for (
auto loop : loops) {
913 Value lb = loop.getLowerBound();
914 Value ub = loop.getUpperBound();
915 Value step = loop.getStep();
921 newLoopRange.offset));
925 newLoopRange.stride));
929 loop.getInductionVar(), lb, step);
938 loops, [](
auto loop) {
return loop.getUpperBound(); });
940 outermost.setUpperBound(upperBound);
944 rewriter, loc, outermost.getInductionVar(), upperBounds);
948 for (
int i = loops.size() - 1; i > 0; --i) {
949 auto outerLoop = loops[i - 1];
950 auto innerLoop = loops[i];
952 Operation *innerTerminator = innerLoop.getBody()->getTerminator();
953 auto yieldedVals = llvm::to_vector(innerTerminator->
getOperands());
954 assert(llvm::equal(outerLoop.getRegionIterArgs(), innerLoop.getInitArgs()));
955 for (
Value &yieldedVal : yieldedVals) {
958 auto iter = llvm::find(innerLoop.getRegionIterArgs(), yieldedVal);
959 if (iter != innerLoop.getRegionIterArgs().end()) {
960 unsigned iterArgIndex = iter - innerLoop.getRegionIterArgs().begin();
962 assert(iterArgIndex < innerLoop.getInitArgs().size());
963 yieldedVal = innerLoop.getInitArgs()[iterArgIndex];
966 rewriter.
eraseOp(innerTerminator);
969 innerBlockArgs.push_back(delinearizeIvs[i]);
970 llvm::append_range(innerBlockArgs, outerLoop.getRegionIterArgs());
973 rewriter.
replaceOp(innerLoop, yieldedVals);
982 IRRewriter rewriter(loops.front().getContext());
987 LogicalResult result(failure());
997 for (
unsigned i = 0, e = loops.size(); i < e; ++i) {
998 operandsDefinedAbove[i] = i;
999 for (
unsigned j = 0;
j < i; ++
j) {
1001 loops[i].getUpperBound(),
1002 loops[i].getStep()};
1004 operandsDefinedAbove[i] =
j;
1015 iterArgChainStart[0] = 0;
1016 for (
unsigned i = 1, e = loops.size(); i < e; ++i) {
1018 iterArgChainStart[i] = i;
1019 auto outerloop = loops[i - 1];
1020 auto innerLoop = loops[i];
1021 if (outerloop.getNumRegionIterArgs() != innerLoop.getNumRegionIterArgs()) {
1024 if (!llvm::equal(outerloop.getRegionIterArgs(), innerLoop.getInitArgs())) {
1027 auto outerloopTerminator = outerloop.getBody()->getTerminator();
1028 if (!llvm::equal(outerloopTerminator->getOperands(),
1029 innerLoop.getResults())) {
1032 iterArgChainStart[i] = iterArgChainStart[i - 1];
1038 for (
unsigned end = loops.size(); end > 0; --end) {
1040 for (; start < end - 1; ++start) {
1042 *std::max_element(std::next(operandsDefinedAbove.begin(), start),
1043 std::next(operandsDefinedAbove.begin(), end));
1046 if (iterArgChainStart[end - 1] > start)
1055 if (start != end - 1)
1063 ArrayRef<std::vector<unsigned>> combinedDimensions) {
1069 auto sortedDimensions = llvm::to_vector<3>(combinedDimensions);
1070 for (
auto &dims : sortedDimensions)
1075 for (
unsigned i = 0, e = loops.getNumLoops(); i < e; ++i) {
1078 Value lb = loops.getLowerBound()[i];
1079 Value ub = loops.getUpperBound()[i];
1080 Value step = loops.getStep()[i];
1083 rewriter, loops.getLoc(), newLoopRange.size));
1094 for (
auto &sortedDimension : sortedDimensions) {
1096 for (
auto idx : sortedDimension) {
1097 newUpperBound = arith::MulIOp::create(rewriter, loc, newUpperBound,
1098 normalizedUpperBounds[idx]);
1100 lowerBounds.push_back(cst0);
1101 steps.push_back(cst1);
1102 upperBounds.push_back(newUpperBound);
1111 auto newPloop = scf::ParallelOp::create(
1112 rewriter, loc, lowerBounds, upperBounds, steps,
1114 for (
unsigned i = 0, e = combinedDimensions.size(); i < e; ++i) {
1115 Value previous = ploopIVs[i];
1116 unsigned numberCombinedDimensions = combinedDimensions[i].size();
1118 for (unsigned j = numberCombinedDimensions - 1; j > 0; --j) {
1119 unsigned idx = combinedDimensions[i][j];
1122 Value iv = arith::RemSIOp::create(insideBuilder, loc, previous,
1123 normalizedUpperBounds[idx]);
1124 replaceAllUsesInRegionWith(loops.getBody()->getArgument(idx), iv,
1129 previous = arith::DivSIOp::create(insideBuilder, loc, previous,
1130 normalizedUpperBounds[idx]);
1134 unsigned idx = combinedDimensions[i][0];
1136 previous, loops.getRegion());
1141 loops.getBody()->back().erase();
1142 newPloop.getBody()->getOperations().splice(
1144 loops.getBody()->getOperations());
1157 return op != inner.getOperation();
1160 LogicalResult status = success();
1162 for (
auto &op : outer.getBody()->without_terminator()) {
1164 if (&op == inner.getOperation())
1167 if (forwardSlice.count(&op) > 0) {
1172 if (isa<scf::ForOp>(op))
1175 if (op.getNumRegions() > 0) {
1185 toHoist.push_back(&op);
1187 auto *outerForOp = outer.getOperation();
1188 for (
auto *op : toHoist)
1189 op->moveBefore(outerForOp);
1198 LogicalResult status = success();
1199 const Loops &interTile = tileLoops.first;
1200 const Loops &intraTile = tileLoops.second;
1201 auto size = interTile.size();
1202 assert(size == intraTile.size());
1205 for (
unsigned s = 1; s < size; ++s)
1206 status = succeeded(status) ?
hoistOpsBetween(intraTile[0], intraTile[s])
1208 for (
unsigned s = 1; s < size; ++s)
1209 status = succeeded(status) ?
hoistOpsBetween(interTile[0], interTile[s])
1218 template <
typename T>
1222 for (
unsigned i = 0; i < maxLoops; ++i) {
1223 forOps.push_back(rootForOp);
1225 if (body.
begin() != std::prev(body.
end(), 2))
1228 rootForOp = dyn_cast<T>(&body.
front());
1236 auto originalStep = forOp.getStep();
1237 auto iv = forOp.getInductionVar();
1240 forOp.setStep(arith::MulIOp::create(b, forOp.getLoc(), originalStep, factor));
1243 for (
auto t : targets) {
1245 auto begin = t.getBody()->begin();
1246 auto nOps = t.getBody()->getOperations().size();
1249 auto b = OpBuilder::atBlockTerminator((t.getBody()));
1250 Value stepped = arith::AddIOp::create(b, t.getLoc(), iv, forOp.getStep());
1252 arith::MinSIOp::create(b, t.getLoc(), forOp.getUpperBound(), stepped);
1255 auto newForOp = scf::ForOp::create(b, t.getLoc(), iv, ub, originalStep);
1256 newForOp.getBody()->getOperations().splice(
1257 newForOp.getBody()->getOperations().begin(),
1258 t.getBody()->getOperations(), begin, std::next(begin, nOps - 1));
1260 newForOp.getRegion());
1262 innerLoops.push_back(newForOp);
1270 template <
typename SizeType>
1272 scf::ForOp target) {
1278 assert(res.size() == 1 &&
"Expected 1 inner forOp");
1287 for (
auto it : llvm::zip(forOps, sizes)) {
1288 auto step =
stripmineSink(std::get<0>(it), std::get<1>(it), currentTargets);
1289 res.push_back(step);
1290 currentTargets = step;
1296 scf::ForOp target) {
1299 res.push_back(llvm::getSingleElement(loops));
1307 forOps.reserve(sizes.size());
1309 if (forOps.size() < sizes.size())
1310 sizes = sizes.take_front(forOps.size());
1325 forOps.reserve(sizes.size());
1327 if (forOps.size() < sizes.size())
1328 sizes = sizes.take_front(forOps.size());
1335 tileSizes.reserve(sizes.size());
1336 for (
unsigned i = 0, e = sizes.size(); i < e; ++i) {
1337 assert(sizes[i] > 0 &&
"expected strictly positive size for strip-mining");
1339 auto forOp = forOps[i];
1341 auto loc = forOp.getLoc();
1342 Value diff = arith::SubIOp::create(builder, loc, forOp.getUpperBound(),
1343 forOp.getLowerBound());
1345 Value iterationsPerBlock =
1347 tileSizes.push_back(iterationsPerBlock);
1351 auto intraTile =
tile(forOps, tileSizes, forOps.back());
1352 TileLoops tileLoops = std::make_pair(forOps, intraTile);
1363 scf::ForallOp source,
1365 unsigned numTargetOuts = target.getNumResults();
1366 unsigned numSourceOuts = source.getNumResults();
1370 llvm::append_range(fusedOuts, target.getOutputs());
1371 llvm::append_range(fusedOuts, source.getOutputs());
1375 scf::ForallOp fusedLoop = scf::ForallOp::create(
1376 rewriter, source.getLoc(), source.getMixedLowerBound(),
1377 source.getMixedUpperBound(), source.getMixedStep(), fusedOuts,
1378 source.getMapping());
1382 mapping.
map(target.getInductionVars(), fusedLoop.getInductionVars());
1383 mapping.
map(source.getInductionVars(), fusedLoop.getInductionVars());
1386 mapping.
map(target.getRegionIterArgs(),
1387 fusedLoop.getRegionIterArgs().take_front(numTargetOuts));
1388 mapping.
map(source.getRegionIterArgs(),
1389 fusedLoop.getRegionIterArgs().take_back(numSourceOuts));
1393 for (
Operation &op : target.getBody()->without_terminator())
1394 rewriter.
clone(op, mapping);
1395 for (
Operation &op : source.getBody()->without_terminator())
1396 rewriter.
clone(op, mapping);
1399 scf::InParallelOp targetTerm = target.getTerminator();
1400 scf::InParallelOp sourceTerm = source.getTerminator();
1401 scf::InParallelOp fusedTerm = fusedLoop.getTerminator();
1403 for (
Operation &op : targetTerm.getYieldingOps())
1404 rewriter.
clone(op, mapping);
1405 for (
Operation &op : sourceTerm.getYieldingOps())
1406 rewriter.
clone(op, mapping);
1409 rewriter.
replaceOp(target, fusedLoop.getResults().take_front(numTargetOuts));
1410 rewriter.
replaceOp(source, fusedLoop.getResults().take_back(numSourceOuts));
1418 unsigned numTargetOuts = target.getNumResults();
1419 unsigned numSourceOuts = source.getNumResults();
1423 llvm::append_range(fusedInitArgs, target.getInitArgs());
1424 llvm::append_range(fusedInitArgs, source.getInitArgs());
1429 scf::ForOp fusedLoop = scf::ForOp::create(
1430 rewriter, source.getLoc(), source.getLowerBound(), source.getUpperBound(),
1431 source.getStep(), fusedInitArgs);
1435 mapping.
map(target.getInductionVar(), fusedLoop.getInductionVar());
1436 mapping.
map(target.getRegionIterArgs(),
1437 fusedLoop.getRegionIterArgs().take_front(numTargetOuts));
1438 mapping.
map(source.getInductionVar(), fusedLoop.getInductionVar());
1439 mapping.
map(source.getRegionIterArgs(),
1440 fusedLoop.getRegionIterArgs().take_back(numSourceOuts));
1444 for (
Operation &op : target.getBody()->without_terminator())
1445 rewriter.
clone(op, mapping);
1446 for (
Operation &op : source.getBody()->without_terminator())
1447 rewriter.
clone(op, mapping);
1451 for (
Value operand : target.getBody()->getTerminator()->getOperands())
1453 for (
Value operand : source.getBody()->getTerminator()->getOperands())
1455 if (!yieldResults.empty())
1456 scf::YieldOp::create(rewriter, source.getLoc(), yieldResults);
1459 rewriter.
replaceOp(target, fusedLoop.getResults().take_front(numTargetOuts));
1460 rewriter.
replaceOp(source, fusedLoop.getResults().take_back(numSourceOuts));
1466 scf::ForallOp forallOp) {
1471 if (forallOp.isNormalized())
1475 auto loc = forallOp.getLoc();
1478 for (
auto [lb, ub, step] : llvm::zip_equal(lbs, ubs, steps)) {
1479 Range normalizedLoopParams =
1481 newUbs.push_back(normalizedLoopParams.
size);
1487 auto normalizedForallOp = scf::ForallOp::create(
1488 rewriter, loc, newUbs, forallOp.getOutputs(), forallOp.getMapping(),
1492 normalizedForallOp.getBodyRegion(),
1493 normalizedForallOp.getBodyRegion().begin());
1495 rewriter.
eraseBlock(&normalizedForallOp.getBodyRegion().back());
1499 for (
auto [idx, iv] :
1506 rewriter.
replaceOp(forallOp, normalizedForallOp);
1507 return normalizedForallOp;
static std::optional< int64_t > getConstantTripCount(scf::ForOp forOp)
Returns the trip count of forOp if its' low bound, high bound and step are constants,...
static OpFoldResult getProductOfIndexes(RewriterBase &rewriter, Location loc, ArrayRef< OpFoldResult > values)
static LogicalResult tryIsolateBands(const TileLoops &tileLoops)
static void getPerfectlyNestedLoopsImpl(SmallVectorImpl< T > &forOps, T rootForOp, unsigned maxLoops=std::numeric_limits< unsigned >::max())
Collect perfectly nested loops starting from rootForOps.
static LogicalResult hoistOpsBetween(scf::ForOp outer, scf::ForOp inner)
static void generateUnrolledLoop(Block *loopBodyBlock, Value forOpIV, uint64_t unrollFactor, function_ref< Value(unsigned, Value, OpBuilder)> ivRemapFn, function_ref< void(unsigned, Operation *, OpBuilder)> annotateFn, ValueRange iterArgs, ValueRange yieldedValues)
Generates unrolled copies of scf::ForOp 'loopBodyBlock', with associated 'forOpIV' by 'unrollFactor',...
static Loops stripmineSink(scf::ForOp forOp, Value factor, ArrayRef< scf::ForOp > targets)
static std::pair< SmallVector< Value >, SmallPtrSet< Operation *, 2 > > delinearizeInductionVariable(RewriterBase &rewriter, Location loc, Value linearizedIv, ArrayRef< Value > ubs)
For each original loop, the value of the induction variable can be obtained by dividing the induction...
static Value ceilDivPositive(OpBuilder &builder, Location loc, Value dividend, int64_t divisor)
static Value getProductOfIntsOrIndexes(RewriterBase &rewriter, Location loc, ArrayRef< Value > values)
Helper function to multiply a sequence of values.
static void denormalizeInductionVariableForIndexType(RewriterBase &rewriter, Location loc, Value normalizedIv, OpFoldResult origLb, OpFoldResult origStep)
Range emitNormalizedLoopBoundsForIndexType(RewriterBase &rewriter, Location loc, OpFoldResult lb, OpFoldResult ub, OpFoldResult step)
static bool areInnerBoundsInvariant(scf::ForOp forOp)
Check if bounds of all inner loops are defined outside of forOp and return false if not.
static int64_t product(ArrayRef< int64_t > vals)
static llvm::ManagedStatic< PassManagerOptions > options
static Value max(ImplicitLocOpBuilder &builder, Value value, Value bound)
Base type for affine expression.
This class represents an argument of a Block.
Block represents an ordered list of Operations.
OpListType::iterator iterator
unsigned getNumArguments()
Operation * getTerminator()
Get the terminator operation of this block.
BlockArgListType getArguments()
IntegerAttr getIndexAttr(int64_t value)
IntegerAttr getIntegerAttr(Type type, int64_t value)
TypedAttr getZeroAttr(Type type)
MLIRContext * getContext() const
TypedAttr getOneAttr(Type type)
This is a utility class for mapping one set of IR entities to another.
auto lookupOrDefault(T from) const
Lookup a mapped value within the map.
void map(Value from, Value to)
Inserts a new mapping for 'from' to 'to'.
This class coordinates rewriting a piece of IR outside of a pattern rewrite, providing a way to keep ...
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
RAII guard to reset the insertion point of the builder when destroyed.
This class helps build Operations.
Block * createBlock(Region *parent, Region::iterator insertPt={}, TypeRange argTypes={}, ArrayRef< Location > locs={})
Add new block with 'argTypes' arguments and set the insertion point to the end of it.
Operation * clone(Operation &op, IRMapping &mapper)
Creates a deep copy of the specified operation, remapping any operands that use values outside of the...
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
static OpBuilder atBlockTerminator(Block *block, Listener *listener=nullptr)
Create a builder and set the insertion point to before the block terminator.
void setInsertionPointToEnd(Block *block)
Sets the insertion point to the end of the specified block.
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
This class represents a single result from folding an operation.
This class represents an operand of an operation.
This is a value defined by a result of an operation.
Operation is the basic unit of execution within MLIR.
Operation * clone(IRMapping &mapper, CloneOptions options=CloneOptions::all())
Create a deep copy of this operation, remapping any operands that use values outside of the operation...
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
operand_type_range getOperandTypes()
result_type_range getResultTypes()
operand_range getOperands()
Returns an iterator on the underlying Value's.
void setOperands(ValueRange operands)
Replace the current operands of this operation with the ones provided in 'operands'.
result_range getResults()
This class contains a list of basic blocks and a link to the parent operation it is attached to.
BlockArgListType getArguments()
ParentT getParentOfType()
Find the first parent operation of the given type, or nullptr if there is no ancestor operation.
bool hasOneBlock()
Return true if this region has exactly one block.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void eraseBlock(Block *block)
This method erases all operations in a block.
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
void replaceAllUsesExcept(Value from, Value to, Operation *exceptedUser)
Find uses of from and replace them with to except if the user is exceptedUser.
virtual void inlineBlockBefore(Block *source, Block *dest, Block::iterator before, ValueRange argValues={})
Inline the operations of block 'source' into block 'dest' before the given position.
void mergeBlocks(Block *source, Block *dest, ValueRange argValues={})
Inline the operations of block 'source' into the end of block 'dest'.
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
void inlineRegionBefore(Region ®ion, Region &parent, Region::iterator before)
Move the blocks that belong to "region" before the given position in another region "parent".
This class provides an abstraction over the various different ranges of value types.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
bool isIntOrIndex() const
Return true if this is an integer (of any signedness) or an index type.
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
bool use_empty() const
Returns true if this value has no uses.
void replaceUsesWithIf(Value newValue, function_ref< bool(OpOperand &)> shouldReplace)
Replace all uses of 'this' value with 'newValue' if the given callback returns true.
Type getType() const
Return the type of this value.
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
static WalkResult advance()
static WalkResult interrupt()
Specialization of arith.constant op that returns an integer of index type.
static ConstantIndexOp create(OpBuilder &builder, Location location, int64_t value)
Operation * getOwner() const
Return the owner of this operand.
OpFoldResult makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands, bool composeAffineMin=false)
Constructs an AffineApplyOp that applies map to operands after composing the map with the maps of any...
SmallVector< SmallVector< AffineForOp, 8 >, 8 > tile(ArrayRef< AffineForOp > forOps, ArrayRef< uint64_t > sizes, ArrayRef< AffineForOp > targets)
Performs tiling fo imperfectly nested loops (with interchange) by strip-mining the forOps by sizes an...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Include the generated interface declarations.
void getPerfectlyNestedLoops(SmallVectorImpl< scf::ForOp > &nestedLoops, scf::ForOp root)
Get perfectly nested sequence of loops starting at root of loop nest (the first op being another Affi...
LogicalResult outlineIfOp(RewriterBase &b, scf::IfOp ifOp, func::FuncOp *thenFn, StringRef thenFnName, func::FuncOp *elseFn, StringRef elseFnName)
Outline the then and/or else regions of ifOp as follows:
void replaceAllUsesInRegionWith(Value orig, Value replacement, Region ®ion)
Replace all uses of orig within the given region with replacement.
SmallVector< scf::ForOp > replaceLoopNestWithNewYields(RewriterBase &rewriter, MutableArrayRef< scf::ForOp > loopNest, ValueRange newIterOperands, const NewYieldValuesFn &newYieldValuesFn, bool replaceIterOperandsUsesInLoop=true)
Update a perfectly nested loop nest to yield new values from the innermost loop and propagating it up...
std::optional< int64_t > getConstantIntValue(OpFoldResult ofr)
If ofr is a constant integer or an IntegerAttr, return the integer.
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
LogicalResult coalescePerfectlyNestedSCFForLoops(scf::ForOp op)
Walk an affine.for to find a band to coalesce.
void bindDims(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to DimExpr at positions: [0 .
std::pair< Loops, Loops > TileLoops
Value getValueOrCreateConstantIntOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
LogicalResult loopUnrollFull(scf::ForOp forOp)
Unrolls this loop completely.
void collapseParallelLoops(RewriterBase &rewriter, scf::ParallelOp loops, ArrayRef< std::vector< unsigned >> combinedDimensions)
Take the ParallelLoop and for each set of dimension indices, combine them into a single dimension.
bool isMemoryEffectFree(Operation *op)
Returns true if the given operation is free of memory effects.
std::optional< int64_t > constantTripCount(OpFoldResult lb, OpFoldResult ub, OpFoldResult step)
Return the number of iterations for a loop with a lower bound lb, upper bound ub and step step.
std::function< SmallVector< Value >(OpBuilder &b, Location loc, ArrayRef< BlockArgument > newBbArgs)> NewYieldValuesFn
A function that returns the additional yielded values during replaceWithAdditionalYields.
Loops tilePerfectlyNested(scf::ForOp rootForOp, ArrayRef< Value > sizes)
Tile a nest of scf::ForOp loops rooted at rootForOp with the given (parametric) sizes.
FailureOr< UnrolledLoopInfo > loopUnrollByFactor(scf::ForOp forOp, uint64_t unrollFactor, function_ref< void(unsigned, Operation *, OpBuilder)> annotateFn=nullptr)
Unrolls this for operation by the specified unroll factor.
LogicalResult loopUnrollJamByFactor(scf::ForOp forOp, uint64_t unrollFactor)
Unrolls and jams this scf.for operation by the specified unroll factor.
bool getInnermostParallelLoops(Operation *rootOp, SmallVectorImpl< scf::ParallelOp > &result)
Get a list of innermost parallel loops contained in rootOp.
bool isZeroInteger(OpFoldResult v)
Return true if v is an IntegerAttr with value 0.
void bindSymbols(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to SymbolExpr at positions: [0 .
void getUsedValuesDefinedAbove(Region ®ion, Region &limit, SetVector< Value > &values)
Fill values with a list of values defined at the ancestors of the limit region and used within region...
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
SmallVector< Loops, 8 > tile(ArrayRef< scf::ForOp > forOps, ArrayRef< Value > sizes, ArrayRef< scf::ForOp > targets)
Performs tiling fo imperfectly nested loops (with interchange) by strip-mining the forOps by sizes an...
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
FailureOr< func::FuncOp > outlineSingleBlockRegion(RewriterBase &rewriter, Location loc, Region ®ion, StringRef funcName, func::CallOp *callOp=nullptr)
Outline a region with a single block into a new FuncOp.
OpFoldResult getAsOpFoldResult(Value val)
Given a value, try to extract a constant Attribute.
bool areValuesDefinedAbove(Range values, Region &limit)
Check if all values in the provided range are defined above the limit region.
void denormalizeInductionVariable(RewriterBase &rewriter, Location loc, Value normalizedIv, OpFoldResult origLb, OpFoldResult origStep)
Get back the original induction variable values after loop normalization.
scf::ForallOp fuseIndependentSiblingForallLoops(scf::ForallOp target, scf::ForallOp source, RewriterBase &rewriter)
Given two scf.forall loops, target and source, fuses target into source.
LogicalResult coalesceLoops(MutableArrayRef< scf::ForOp > loops)
Replace a perfect nest of "for" loops with a single linearized loop.
scf::ForOp fuseIndependentSiblingForLoops(scf::ForOp target, scf::ForOp source, RewriterBase &rewriter)
Given two scf.for loops, target and source, fuses target into source.
TileLoops extractFixedOuterLoops(scf::ForOp rootFOrOp, ArrayRef< int64_t > sizes)
Range emitNormalizedLoopBounds(RewriterBase &rewriter, Location loc, OpFoldResult lb, OpFoldResult ub, OpFoldResult step)
Materialize bounds and step of a zero-based and unit-step loop derived by normalizing the specified b...
bool isOneInteger(OpFoldResult v)
Return true if v is an IntegerAttr with value 1.
LogicalResult foldDynamicIndexList(SmallVectorImpl< OpFoldResult > &ofrs, bool onlyNonNegative=false, bool onlyNonZero=false)
Returns "success" when any of the elements in ofrs is a constant value.
FailureOr< scf::ForallOp > normalizeForallOp(RewriterBase &rewriter, scf::ForallOp forallOp)
Normalize an scf.forall operation.
void getForwardSlice(Operation *op, SetVector< Operation * > *forwardSlice, const ForwardSliceOptions &options={})
Fills forwardSlice with the computed forward slice (i.e.
SmallVector< std::pair< Block::iterator, Block::iterator > > subBlocks
Represents a range (offset, size, and stride) where each element of the triple may be dynamic or stat...
std::optional< scf::ForOp > epilogueLoopOp
std::optional< scf::ForOp > mainLoopOp
Eliminates variable at the specified position using Fourier-Motzkin variable elimination.