25#include "llvm/ADT/STLExtras.h"
26#include "llvm/ADT/SmallBitVector.h"
27#include "llvm/ADT/SmallVectorExtras.h"
28#include "llvm/ADT/TypeSwitch.h"
29#include "llvm/Support/DebugLog.h"
30#include "llvm/Support/LogicalResult.h"
31#include "llvm/Support/MathExtras.h"
38using llvm::divideCeilSigned;
39using llvm::divideFloorSigned;
42#define DEBUG_TYPE "affine-ops"
44#include "mlir/Dialect/Affine/IR/AffineOpsDialect.cpp.inc"
51 if (
auto arg = dyn_cast<BlockArgument>(value))
52 return arg.getParentRegion() == region;
75 if (llvm::isa<BlockArgument>(value))
76 return legalityCheck(mapping.
lookup(value), dest);
83 bool isDimLikeOp = isa<ShapedDimOpInterface>(value.
getDefiningOp());
94 return llvm::all_of(values, [&](
Value v) {
101template <
typename OpTy>
104 static_assert(llvm::is_one_of<OpTy, AffineReadOpInterface,
105 AffineWriteOpInterface>::value,
106 "only ops with affine read/write interface are supported");
113 dimOperands, src, dest, mapping,
117 symbolOperands, src, dest, mapping,
134 op.getMapOperands(), src, dest, mapping,
139 op.getMapOperands(), src, dest, mapping,
150struct AffineInlinerInterface :
public DialectInlinerInterface {
151 using DialectInlinerInterface::DialectInlinerInterface;
162 IRMapping &valueMapping)
const final {
166 if (!isa<AffineParallelOp, AffineForOp, AffineIfOp>(destOp))
177 for (Operation &op : srcBlock) {
179 if (
auto iface = dyn_cast<MemoryEffectOpInterface>(op)) {
180 if (iface.hasNoEffect())
187 llvm::TypeSwitch<Operation *, bool>(&op)
188 .Case<AffineApplyOp, AffineReadOpInterface,
189 AffineWriteOpInterface>([&](
auto op) {
192 .Default([](Operation *) {
206 bool isLegalToInline(Operation *op, Region *region,
bool wouldBeCloned,
207 IRMapping &valueMapping)
const final {
212 Operation *parentOp = region->getParentOp();
213 return parentOp->
hasTrait<OpTrait::AffineScope>() ||
214 isa<AffineForOp, AffineParallelOp, AffineIfOp>(parentOp);
218 bool shouldAnalyzeRecursively(Operation *op)
const final {
return true; }
226void AffineDialect::initialize() {
229#include "mlir/Dialect/Affine/IR/AffineOps.cpp.inc"
231 addInterfaces<AffineInlinerInterface>();
232 declarePromisedInterfaces<ValueBoundsOpInterface, AffineApplyOp, AffineMaxOp,
241 if (
auto poison = dyn_cast<ub::PoisonAttr>(value))
242 return ub::PoisonOp::create(builder, loc, type, poison);
243 return arith::ConstantOp::materialize(builder, value, type, loc);
251 if (
auto arg = dyn_cast<BlockArgument>(value)) {
267 while (
auto *parentOp = curOp->getParentOp()) {
269 return curOp->getParentRegion();
278 if (!isa<AffineForOp, AffineIfOp, AffineParallelOp>(parentOp))
303 auto *parentOp = llvm::cast<BlockArgument>(value).getOwner()->getParentOp();
331 if (
auto applyOp = dyn_cast<AffineApplyOp>(op))
332 return applyOp.isValidDim(region);
335 if (isa<AffineDelinearizeIndexOp, AffineLinearizeIndexOp>(op))
336 return llvm::all_of(op->getOperands(),
337 [&](
Value arg) { return ::isValidDim(arg, region); });
340 if (
auto dimOp = dyn_cast<ShapedDimOpInterface>(op))
348template <
typename AnyMemRefDefOp>
351 MemRefType memRefType = memrefDefOp.getType();
354 if (
index >= memRefType.getRank()) {
359 if (!memRefType.isDynamicDim(
index))
362 unsigned dynamicDimPos = memRefType.getDynamicDimIndex(
index);
363 return isValidSymbol(*(memrefDefOp.getDynamicSizes().begin() + dynamicDimPos),
375 if (llvm::isa<BlockArgument>(dimOp.getShapedValue()))
383 if (!
index.has_value())
387 Operation *op = dimOp.getShapedValue().getDefiningOp();
388 while (
auto castOp = dyn_cast<memref::CastOp>(op)) {
390 if (isa<UnrankedMemRefType>(castOp.getSource().getType()))
392 op = castOp.getSource().getDefiningOp();
399 .Case<memref::ViewOp, memref::SubViewOp, memref::AllocOp>(
401 .Default([](
Operation *) {
return false; });
435 if (parentRegion == region)
476 if (
isPure(defOp) && llvm::all_of(defOp->getOperands(), [&](
Value operand) {
477 return affine::isValidSymbol(operand, region);
483 if (
auto dimOp = dyn_cast<ShapedDimOpInterface>(defOp))
501 printer <<
'(' << operands.take_front(numDims) <<
')';
502 if (operands.size() > numDims)
503 printer <<
'[' << operands.drop_front(numDims) <<
']';
513 numDims = opInfos.size();
527template <
typename OpTy>
532 for (
auto operand : operands) {
533 if (opIt++ < numDims) {
535 return op.emitOpError(
"operand cannot be used as a dimension id");
537 return op.emitOpError(
"operand cannot be used as a symbol");
548 return AffineValueMap(getAffineMap(), getOperands(), getResult());
555 AffineMapAttr mapAttr;
561 auto map = mapAttr.getValue();
563 if (map.getNumDims() != numDims ||
564 numDims + map.getNumSymbols() !=
result.operands.size()) {
566 "dimension or symbol index mismatch");
569 result.types.append(map.getNumResults(), indexTy);
574 p <<
" " << getMapAttr();
576 getAffineMap().getNumDims(), p);
580LogicalResult AffineApplyOp::verify() {
587 "operand count and affine map dimension and symbol count must match");
591 return emitOpError(
"mapping must produce one value");
597 for (
Value operand : getMapOperands().drop_front(affineMap.
getNumDims())) {
599 return emitError(
"dimensional operand cannot be used as a symbol");
607bool AffineApplyOp::isValidDim() {
608 return llvm::all_of(getOperands(),
615bool AffineApplyOp::isValidDim(
Region *region) {
616 return llvm::all_of(getOperands(),
617 [&](
Value op) { return ::isValidDim(op, region); });
622bool AffineApplyOp::isValidSymbol() {
623 return llvm::all_of(getOperands(),
629bool AffineApplyOp::isValidSymbol(
Region *region) {
630 return llvm::all_of(getOperands(), [&](
Value operand) {
636 auto map = getAffineMap();
639 auto expr = map.getResult(0);
640 if (
auto dim = dyn_cast<AffineDimExpr>(expr))
641 return getOperand(dim.getPosition());
642 if (
auto sym = dyn_cast<AffineSymbolExpr>(expr))
643 return getOperand(map.getNumDims() + sym.getPosition());
647 bool hasPoison =
false;
649 map.constantFold(adaptor.getMapOperands(),
result, &hasPoison);
669 auto dimExpr = dyn_cast<AffineDimExpr>(e);
679 Value operand = operands[dimExpr.getPosition()];
684 if (forOp.hasConstantLowerBound() && forOp.getConstantLowerBound() == 0) {
685 operandDivisor = forOp.getStepAsInt();
687 uint64_t lbLargestKnownDivisor =
688 forOp.getLowerBoundMap().getLargestKnownDivisorOfMapExprs();
689 operandDivisor = std::gcd(lbLargestKnownDivisor, forOp.getStepAsInt());
692 return operandDivisor;
699 if (
auto constExpr = dyn_cast<AffineConstantExpr>(e)) {
700 int64_t constVal = constExpr.getValue();
701 return constVal >= 0 && constVal < k;
703 auto dimExpr = dyn_cast<AffineDimExpr>(e);
706 Value operand = operands[dimExpr.getPosition()];
710 if (forOp.hasConstantLowerBound() && forOp.getConstantLowerBound() >= 0 &&
711 forOp.hasConstantUpperBound() && forOp.getConstantUpperBound() <= k) {
727 auto bin = dyn_cast<AffineBinaryOpExpr>(e);
735 quotientTimesDiv = llhs;
741 quotientTimesDiv = rlhs;
751 if (forOp && forOp.hasConstantLowerBound())
752 return forOp.getConstantLowerBound();
759 if (!forOp || !forOp.hasConstantUpperBound())
764 if (forOp.hasConstantLowerBound()) {
765 return forOp.getConstantUpperBound() - 1 -
766 (forOp.getConstantUpperBound() - forOp.getConstantLowerBound() - 1) %
767 forOp.getStepAsInt();
769 return forOp.getConstantUpperBound() - 1;
780 constLowerBounds.reserve(operands.size());
781 constUpperBounds.reserve(operands.size());
782 for (
Value operand : operands) {
787 if (
auto constExpr = dyn_cast<AffineConstantExpr>(expr))
788 return constExpr.getValue();
803 constLowerBounds.reserve(operands.size());
804 constUpperBounds.reserve(operands.size());
805 for (
Value operand : operands) {
810 std::optional<int64_t> lowerBound;
811 if (
auto constExpr = dyn_cast<AffineConstantExpr>(expr)) {
812 lowerBound = constExpr.getValue();
815 constLowerBounds, constUpperBounds,
826 auto binExpr = dyn_cast<AffineBinaryOpExpr>(expr);
837 binExpr = dyn_cast<AffineBinaryOpExpr>(expr);
845 lhs = binExpr.getLHS();
846 rhs = binExpr.getRHS();
847 auto rhsConst = dyn_cast<AffineConstantExpr>(
rhs);
851 int64_t rhsConstVal = rhsConst.getValue();
853 if (rhsConstVal <= 0)
858 std::optional<int64_t> lhsLbConst =
860 std::optional<int64_t> lhsUbConst =
862 if (lhsLbConst && lhsUbConst) {
863 int64_t lhsLbConstVal = *lhsLbConst;
864 int64_t lhsUbConstVal = *lhsUbConst;
868 divideFloorSigned(lhsLbConstVal, rhsConstVal) ==
869 divideFloorSigned(lhsUbConstVal, rhsConstVal)) {
871 divideFloorSigned(lhsLbConstVal, rhsConstVal), context);
877 divideCeilSigned(lhsLbConstVal, rhsConstVal) ==
878 divideCeilSigned(lhsUbConstVal, rhsConstVal)) {
885 lhsLbConstVal < rhsConstVal && lhsUbConstVal < rhsConstVal) {
898 if (rhsConstVal % divisor == 0 &&
900 expr = quotientTimesDiv.
floorDiv(rhsConst);
901 }
else if (divisor % rhsConstVal == 0 &&
903 expr =
rem % rhsConst;
929 if (operands.empty())
935 constLowerBounds.reserve(operands.size());
936 constUpperBounds.reserve(operands.size());
937 for (
Value operand : operands) {
951 if (
auto constExpr = dyn_cast<AffineConstantExpr>(e)) {
952 lowerBounds.push_back(constExpr.getValue());
953 upperBounds.push_back(constExpr.getValue());
955 lowerBounds.push_back(
957 constLowerBounds, constUpperBounds,
959 upperBounds.push_back(
961 constLowerBounds, constUpperBounds,
968 for (
auto exprEn : llvm::enumerate(map.
getResults())) {
970 unsigned i = exprEn.index();
972 if (lowerBounds[i] && upperBounds[i] && *lowerBounds[i] == *upperBounds[i])
977 if (!upperBounds[i]) {
978 irredundantExprs.push_back(e);
983 if (!llvm::any_of(llvm::enumerate(lowerBounds), [&](
const auto &en) {
984 auto otherLowerBound = en.value();
985 unsigned pos = en.index();
986 if (pos == i || !otherLowerBound)
988 if (*otherLowerBound > *upperBounds[i])
990 if (*otherLowerBound < *upperBounds[i])
995 if (upperBounds[pos] && lowerBounds[i] &&
996 lowerBounds[i] == upperBounds[i] &&
997 otherLowerBound == *upperBounds[pos] && i < pos)
1001 irredundantExprs.push_back(e);
1003 if (!lowerBounds[i]) {
1004 irredundantExprs.push_back(e);
1008 if (!llvm::any_of(llvm::enumerate(upperBounds), [&](
const auto &en) {
1009 auto otherUpperBound = en.value();
1010 unsigned pos = en.index();
1011 if (pos == i || !otherUpperBound)
1013 if (*otherUpperBound < *lowerBounds[i])
1015 if (*otherUpperBound > *lowerBounds[i])
1017 if (lowerBounds[pos] && upperBounds[i] &&
1018 lowerBounds[i] == upperBounds[i] &&
1019 otherUpperBound == lowerBounds[pos] && i < pos)
1023 irredundantExprs.push_back(e);
1037 assert(map.
getNumInputs() == operands.size() &&
"invalid operands for map");
1043 newResults.push_back(expr);
1066 LDBG() <<
"replaceAffineMinBoundingBoxExpression: `" << minOp <<
"`";
1067 AffineMap affineMinMap = minOp.getAffineMap();
1070 for (
unsigned i = 0, e = affineMinMap.
getNumResults(); i < e; ++i) {
1076 minOp.getOperands())))
1084 for (
auto [i, dim] : llvm::enumerate(minOp.getDimOperands())) {
1085 auto it = llvm::find(dims, dim);
1086 if (it == dims.end()) {
1087 unmappedDims.push_back(i);
1093 for (
auto [i, sym] : llvm::enumerate(minOp.getSymbolOperands())) {
1094 auto it = llvm::find(syms, sym);
1095 if (it == syms.end()) {
1096 unmappedSyms.push_back(i);
1109 if (llvm::any_of(unmappedDims,
1110 [&](
unsigned i) {
return expr.isFunctionOfDim(i); }) ||
1111 llvm::any_of(unmappedSyms,
1112 [&](
unsigned i) {
return expr.isFunctionOfSymbol(i); }))
1118 repl[dimOrSym.
ceilDiv(convertedExpr)] = c1;
1120 repl[(dimOrSym + convertedExpr - 1).floorDiv(convertedExpr)] = c1;
1125 return success(*map != initialMap);
1134 AffineExpr e,
const llvm::SmallDenseSet<AffineExpr, 4> &exprsToRemove,
1136 auto binOp = dyn_cast<AffineBinaryOpExpr>(e);
1147 llvm::SmallDenseSet<AffineExpr, 4> ourTracker(exprsToRemove);
1152 if (!ourTracker.erase(thisTerm)) {
1153 toPreserve.push_back(thisTerm);
1157 auto nextBinOp = dyn_cast_if_present<AffineBinaryOpExpr>(nextTerm);
1159 thisTerm = nextTerm;
1162 thisTerm = nextBinOp.getRHS();
1163 nextTerm = nextBinOp.getLHS();
1166 if (!ourTracker.empty())
1171 for (
AffineExpr preserved : llvm::reverse(toPreserve))
1172 newExpr = newExpr + preserved;
1173 replacementsMap.insert({e, newExpr});
1191 AffineDelinearizeIndexOp delinOp,
Value resultToReplace,
AffineMap *map,
1193 if (!delinOp.getDynamicBasis().empty())
1195 if (resultToReplace != delinOp.getMultiIndex().back())
1200 for (
auto [pos, dim] : llvm::enumerate(dims)) {
1201 auto asResult = dyn_cast_if_present<OpResult>(dim);
1204 if (asResult.getOwner() == delinOp.getOperation())
1207 for (
auto [pos, sym] : llvm::enumerate(syms)) {
1208 auto asResult = dyn_cast_if_present<OpResult>(sym);
1211 if (asResult.getOwner() == delinOp.getOperation())
1214 if (llvm::is_contained(resToExpr,
AffineExpr()))
1217 bool isDimReplacement = llvm::all_of(resToExpr, llvm::IsaPred<AffineDimExpr>);
1219 llvm::SmallDenseSet<AffineExpr, 4> expectedExprs;
1222 for (
auto [binding, size] : llvm::zip(
1223 llvm::reverse(resToExpr), llvm::reverse(delinOp.getStaticBasis()))) {
1227 if (resToExpr.size() != delinOp.getStaticBasis().size())
1228 expectedExprs.insert(resToExpr[0] * stride);
1237 if (replacements.empty())
1241 if (isDimReplacement)
1242 dims.push_back(delinOp.getLinearIndex());
1244 syms.push_back(delinOp.getLinearIndex());
1245 *map = origMap.
replace(replacements, dims.size(), syms.size());
1249 if (
auto d = dyn_cast<AffineDimExpr>(e)) {
1250 unsigned pos = d.getPosition();
1252 dims[pos] =
nullptr;
1254 if (
auto s = dyn_cast<AffineSymbolExpr>(e)) {
1255 unsigned pos = s.getPosition();
1257 syms[pos] =
nullptr;
1276 unsigned dimOrSymbolPosition,
1279 bool replaceAffineMin) {
1281 bool isDimReplacement = (dimOrSymbolPosition < dims.size());
1282 unsigned pos = isDimReplacement ? dimOrSymbolPosition
1283 : dimOrSymbolPosition - dims.size();
1284 Value &v = isDimReplacement ? dims[pos] : syms[pos];
1288 if (
auto minOp = v.
getDefiningOp<AffineMinOp>(); minOp && replaceAffineMin) {
1295 if (
auto delinOp = v.
getDefiningOp<affine::AffineDelinearizeIndexOp>()) {
1309 AffineMap composeMap = affineApply.getAffineMap();
1310 assert(composeMap.
getNumResults() == 1 &&
"affine.apply with >1 results");
1312 affineApply.getMapOperands().end());
1326 dims.append(composeDims.begin(), composeDims.end());
1327 syms.append(composeSyms.begin(), composeSyms.end());
1328 *map = map->
replace(toReplace, replacementExpr, dims.size(), syms.size());
1338 bool composeAffineMin =
false) {
1357 bool changed =
false;
1358 for (
unsigned pos = 0; pos != dims.size() + syms.size(); ++pos)
1371 unsigned nDims = 0, nSyms = 0;
1373 dimReplacements.reserve(dims.size());
1374 symReplacements.reserve(syms.size());
1375 for (
auto *container : {&dims, &syms}) {
1376 bool isDim = (container == &dims);
1377 auto &repls = isDim ? dimReplacements : symReplacements;
1378 for (
const auto &en : llvm::enumerate(*container)) {
1379 Value v = en.value();
1383 "map is function of unexpected expr@pos");
1389 operands->push_back(v);
1402 while (llvm::any_of(*operands, [](
Value v) {
1408 if (composeAffineMin && llvm::any_of(*operands, [](
Value v) {
1418 bool composeAffineMin) {
1423 return AffineApplyOp::create(
b, loc, map, valueOperands);
1429 bool composeAffineMin) {
1434 operands, composeAffineMin);
1441 bool composeAffineMin =
false) {
1447 for (
unsigned i : llvm::seq<unsigned>(0, map.
getNumResults())) {
1455 llvm::append_range(dims,
1457 llvm::append_range(symbols,
1464 operands = llvm::to_vector(llvm::concat<Value>(dims, symbols));
1471 bool composeAffineMin) {
1472 assert(map.
getNumResults() == 1 &&
"building affine.apply with !=1 result");
1482 AffineApplyOp applyOp =
1487 for (
unsigned i = 0, e = constOperands.size(); i != e; ++i)
1492 if (failed(applyOp->fold(constOperands, foldResults)) ||
1493 foldResults.empty()) {
1495 listener->notifyOperationInserted(applyOp, {});
1496 return applyOp.getResult();
1500 return llvm::getSingleElement(foldResults);
1510 operands, composeAffineMin);
1516 bool composeAffineMin) {
1517 return llvm::map_to_vector(
1518 llvm::seq<unsigned>(0, map.
getNumResults()), [&](
unsigned i) {
1519 return makeComposedFoldedAffineApply(b, loc, map.getSubMap({i}),
1520 operands, composeAffineMin);
1524template <
typename OpTy>
1530 return OpTy::create(
b, loc,
b.getIndexType(), map, valueOperands);
1539template <
typename OpTy>
1555 for (
unsigned i = 0, e = constOperands.size(); i != e; ++i)
1560 if (failed(minMaxOp->fold(constOperands, foldResults)) ||
1561 foldResults.empty()) {
1563 listener->notifyOperationInserted(minMaxOp, {});
1564 return minMaxOp.getResult();
1568 return llvm::getSingleElement(foldResults);
1587template <
class MapOrSet>
1590 if (!mapOrSet || operands->empty())
1593 assert(mapOrSet->getNumInputs() == operands->size() &&
1594 "map/set inputs must match number of operands");
1596 auto *context = mapOrSet->getContext();
1598 resultOperands.reserve(operands->size());
1600 remappedSymbols.reserve(operands->size());
1601 unsigned nextDim = 0;
1602 unsigned nextSym = 0;
1603 unsigned oldNumSyms = mapOrSet->getNumSymbols();
1605 for (
unsigned i = 0, e = mapOrSet->getNumInputs(); i != e; ++i) {
1606 if (i < mapOrSet->getNumDims()) {
1610 remappedSymbols.push_back((*operands)[i]);
1613 resultOperands.push_back((*operands)[i]);
1616 resultOperands.push_back((*operands)[i]);
1620 resultOperands.append(remappedSymbols.begin(), remappedSymbols.end());
1621 *operands = resultOperands;
1622 *mapOrSet = mapOrSet->replaceDimsAndSymbols(
1623 dimRemapping, {}, nextDim, oldNumSyms + nextSym);
1625 assert(mapOrSet->getNumInputs() == operands->size() &&
1626 "map/set inputs must match number of operands");
1635template <
class MapOrSet>
1638 if (!mapOrSet || operands.empty())
1641 unsigned numOperands = operands.size();
1643 assert(mapOrSet.getNumInputs() == numOperands &&
1644 "map/set inputs must match number of operands");
1646 auto *context = mapOrSet.getContext();
1648 resultOperands.reserve(numOperands);
1650 remappedDims.reserve(numOperands);
1652 symOperands.reserve(mapOrSet.getNumSymbols());
1653 unsigned nextSym = 0;
1654 unsigned nextDim = 0;
1655 unsigned oldNumDims = mapOrSet.getNumDims();
1657 resultOperands.assign(operands.begin(), operands.begin() + oldNumDims);
1658 for (
unsigned i = oldNumDims, e = mapOrSet.getNumInputs(); i != e; ++i) {
1661 symRemapping[i - oldNumDims] =
1663 remappedDims.push_back(operands[i]);
1666 symOperands.push_back(operands[i]);
1670 append_range(resultOperands, remappedDims);
1671 append_range(resultOperands, symOperands);
1672 operands = resultOperands;
1673 mapOrSet = mapOrSet.replaceDimsAndSymbols(
1674 {}, symRemapping, oldNumDims + nextDim, nextSym);
1676 assert(mapOrSet.getNumInputs() == operands.size() &&
1677 "map/set inputs must match number of operands");
1681template <
class MapOrSet>
1684 static_assert(llvm::is_one_of<MapOrSet, AffineMap, IntegerSet>::value,
1685 "Argument must be either of AffineMap or IntegerSet type");
1687 if (!mapOrSet || operands->empty())
1690 assert(mapOrSet->getNumInputs() == operands->size() &&
1691 "map/set inputs must match number of operands");
1697 llvm::SmallBitVector usedDims(mapOrSet->getNumDims());
1698 llvm::SmallBitVector usedSyms(mapOrSet->getNumSymbols());
1700 if (
auto dimExpr = dyn_cast<AffineDimExpr>(expr))
1701 usedDims[dimExpr.getPosition()] =
true;
1702 else if (
auto symExpr = dyn_cast<AffineSymbolExpr>(expr))
1703 usedSyms[symExpr.getPosition()] =
true;
1706 auto *context = mapOrSet->getContext();
1709 resultOperands.reserve(operands->size());
1711 llvm::SmallDenseMap<Value, AffineExpr, 8> seenDims;
1713 unsigned nextDim = 0;
1714 for (
unsigned i = 0, e = mapOrSet->getNumDims(); i != e; ++i) {
1717 auto it = seenDims.find((*operands)[i]);
1718 if (it == seenDims.end()) {
1720 resultOperands.push_back((*operands)[i]);
1721 seenDims.insert(std::make_pair((*operands)[i], dimRemapping[i]));
1723 dimRemapping[i] = it->second;
1727 llvm::SmallDenseMap<Value, AffineExpr, 8> seenSymbols;
1729 unsigned nextSym = 0;
1730 for (
unsigned i = 0, e = mapOrSet->getNumSymbols(); i != e; ++i) {
1736 IntegerAttr operandCst;
1737 if (
matchPattern((*operands)[i + mapOrSet->getNumDims()],
1744 auto it = seenSymbols.find((*operands)[i + mapOrSet->getNumDims()]);
1745 if (it == seenSymbols.end()) {
1747 resultOperands.push_back((*operands)[i + mapOrSet->getNumDims()]);
1748 seenSymbols.insert(std::make_pair((*operands)[i + mapOrSet->getNumDims()],
1751 symRemapping[i] = it->second;
1754 *mapOrSet = mapOrSet->replaceDimsAndSymbols(dimRemapping, symRemapping,
1756 *operands = resultOperands;
1773template <
typename AffineOpTy>
1782 LogicalResult matchAndRewrite(AffineOpTy affineOp,
1785 llvm::is_one_of<AffineOpTy, AffineLoadOp, AffinePrefetchOp,
1786 AffineStoreOp, AffineApplyOp, AffineMinOp, AffineMaxOp,
1787 AffineVectorStoreOp, AffineVectorLoadOp>::value,
1788 "affine load/store/vectorstore/vectorload/apply/prefetch/min/max op "
1790 auto map = affineOp.getAffineMap();
1792 auto oldOperands = affineOp.getMapOperands();
1797 if (map == oldMap && std::equal(oldOperands.begin(), oldOperands.end(),
1798 resultOperands.begin()))
1801 replaceAffineOp(rewriter, affineOp, map, resultOperands);
1809void SimplifyAffineOp<AffineLoadOp>::replaceAffineOp(
1816void SimplifyAffineOp<AffinePrefetchOp>::replaceAffineOp(
1820 prefetch, prefetch.getMemref(), map, mapOperands, prefetch.getIsWrite(),
1821 prefetch.getLocalityHint(), prefetch.getIsDataCache());
1824void SimplifyAffineOp<AffineStoreOp>::replaceAffineOp(
1828 store, store.getValueToStore(), store.getMemRef(), map, mapOperands);
1831void SimplifyAffineOp<AffineVectorLoadOp>::replaceAffineOp(
1835 vectorload, vectorload.getVectorType(), vectorload.getMemRef(), map,
1839void SimplifyAffineOp<AffineVectorStoreOp>::replaceAffineOp(
1843 vectorstore, vectorstore.getValueToStore(), vectorstore.getMemRef(), map,
1848template <
typename AffineOpTy>
1849void SimplifyAffineOp<AffineOpTy>::replaceAffineOp(
1858 results.
add<SimplifyAffineOp<AffineApplyOp>>(context);
1873 result.addOperands(srcMemRef);
1874 result.addAttribute(getSrcMapAttrStrName(), AffineMapAttr::get(srcMap));
1875 result.addOperands(srcIndices);
1876 result.addOperands(destMemRef);
1877 result.addAttribute(getDstMapAttrStrName(), AffineMapAttr::get(dstMap));
1878 result.addOperands(destIndices);
1879 result.addOperands(tagMemRef);
1880 result.addAttribute(getTagMapAttrStrName(), AffineMapAttr::get(tagMap));
1881 result.addOperands(tagIndices);
1882 result.addOperands(numElements);
1884 result.addOperands({stride, elementsPerStride});
1889 p <<
" " << getSrcMemRef() <<
'[';
1891 p <<
"], " << getDstMemRef() <<
'[';
1893 p <<
"], " << getTagMemRef() <<
'[';
1897 p <<
", " << getStride();
1898 p <<
", " << getNumElementsPerStride();
1900 p <<
" : " << getSrcMemRefType() <<
", " << getDstMemRefType() <<
", "
1901 << getTagMemRefType();
1910ParseResult AffineDmaStartOp::parse(
OpAsmParser &parser,
1913 AffineMapAttr srcMapAttr;
1916 AffineMapAttr dstMapAttr;
1919 AffineMapAttr tagMapAttr;
1934 getSrcMapAttrStrName(),
1938 getDstMapAttrStrName(),
1942 getTagMapAttrStrName(),
1951 if (!strideInfo.empty() && strideInfo.size() != 2) {
1953 "expected two stride related operands");
1955 bool isStrided = strideInfo.size() == 2;
1960 if (types.size() != 3)
1978 if (srcMapOperands.size() != srcMapAttr.getValue().getNumInputs() ||
1979 dstMapOperands.size() != dstMapAttr.getValue().getNumInputs() ||
1980 tagMapOperands.size() != tagMapAttr.getValue().getNumInputs())
1982 "memref operand count not equal to map.numInputs");
1986LogicalResult AffineDmaStartOp::verify() {
1987 if (!llvm::isa<MemRefType>(getOperand(getSrcMemRefOperandIndex()).
getType()))
1988 return emitOpError(
"expected DMA source to be of memref type");
1989 if (!llvm::isa<MemRefType>(getOperand(getDstMemRefOperandIndex()).
getType()))
1990 return emitOpError(
"expected DMA destination to be of memref type");
1991 if (!llvm::isa<MemRefType>(getOperand(getTagMemRefOperandIndex()).
getType()))
1992 return emitOpError(
"expected DMA tag to be of memref type");
1994 unsigned numInputsAllMaps = getSrcMap().getNumInputs() +
1995 getDstMap().getNumInputs() +
1996 getTagMap().getNumInputs();
1997 if (getNumOperands() != numInputsAllMaps + 3 + 1 &&
1998 getNumOperands() != numInputsAllMaps + 3 + 1 + 2) {
1999 return emitOpError(
"incorrect number of operands");
2003 for (
auto idx : getSrcIndices()) {
2004 if (!idx.getType().isIndex())
2005 return emitOpError(
"src index to dma_start must have 'index' type");
2008 "src index must be a valid dimension or symbol identifier");
2010 for (
auto idx : getDstIndices()) {
2011 if (!idx.getType().isIndex())
2012 return emitOpError(
"dst index to dma_start must have 'index' type");
2015 "dst index must be a valid dimension or symbol identifier");
2017 for (
auto idx : getTagIndices()) {
2018 if (!idx.getType().isIndex())
2019 return emitOpError(
"tag index to dma_start must have 'index' type");
2022 "tag index must be a valid dimension or symbol identifier");
2027LogicalResult AffineDmaStartOp::fold(FoldAdaptor adaptor,
2033void AffineDmaStartOp::getEffects(
2052 result.addOperands(tagMemRef);
2053 result.addAttribute(getTagMapAttrStrName(), AffineMapAttr::get(tagMap));
2054 result.addOperands(tagIndices);
2055 result.addOperands(numElements);
2059 p <<
" " << getTagMemRef() <<
'[';
2064 p <<
" : " << getTagMemRef().getType();
2072ParseResult AffineDmaWaitOp::parse(
OpAsmParser &parser,
2075 AffineMapAttr tagMapAttr;
2084 getTagMapAttrStrName(),
2093 if (!llvm::isa<MemRefType>(type))
2095 "expected tag to be of memref type");
2097 if (tagMapOperands.size() != tagMapAttr.getValue().getNumInputs())
2099 "tag memref operand count != to map.numInputs");
2103LogicalResult AffineDmaWaitOp::verify() {
2104 if (!llvm::isa<MemRefType>(getOperand(0).
getType()))
2105 return emitOpError(
"expected DMA tag to be of memref type");
2107 for (
auto idx : getTagIndices()) {
2108 if (!idx.getType().isIndex())
2109 return emitOpError(
"index to dma_wait must have 'index' type");
2112 "index must be a valid dimension or symbol identifier");
2117LogicalResult AffineDmaWaitOp::fold(FoldAdaptor adaptor,
2123void AffineDmaWaitOp::getEffects(
2139 ValueRange iterArgs, BodyBuilderFn bodyBuilder) {
2140 assert(((!lbMap && lbOperands.empty()) ||
2142 "lower bound operand count does not match the affine map");
2143 assert(((!ubMap && ubOperands.empty()) ||
2145 "upper bound operand count does not match the affine map");
2146 assert(step > 0 &&
"step has to be a positive integer constant");
2148 OpBuilder::InsertionGuard guard(builder);
2152 getOperandSegmentSizeAttr(),
2154 static_cast<int32_t>(ubOperands.size()),
2155 static_cast<int32_t>(iterArgs.size())}));
2157 for (Value val : iterArgs)
2158 result.addTypes(val.getType());
2165 result.addAttribute(getLowerBoundMapAttrName(
result.name),
2166 AffineMapAttr::get(lbMap));
2167 result.addOperands(lbOperands);
2170 result.addAttribute(getUpperBoundMapAttrName(
result.name),
2171 AffineMapAttr::get(ubMap));
2172 result.addOperands(ubOperands);
2174 result.addOperands(iterArgs);
2177 Region *bodyRegion =
result.addRegion();
2179 Value inductionVar =
2181 for (Value val : iterArgs)
2182 bodyBlock->
addArgument(val.getType(), val.getLoc());
2187 if (iterArgs.empty() && !bodyBuilder) {
2188 ensureTerminator(*bodyRegion, builder,
result.location);
2189 }
else if (bodyBuilder) {
2190 OpBuilder::InsertionGuard guard(builder);
2192 bodyBuilder(builder,
result.location, inductionVar,
2199 BodyBuilderFn bodyBuilder) {
2202 return build(builder,
result, {}, lbMap, {}, ubMap, step, iterArgs,
2206LogicalResult AffineForOp::verifyRegions() {
2208 if (getStepAsInt() <= 0)
2209 return emitOpError(
"expected step to be a positive integer, got ")
2214 auto *body = getBody();
2215 if (body->getNumArguments() == 0 || !body->getArgument(0).getType().isIndex())
2216 return emitOpError(
"expected body to have a single index argument for the "
2217 "induction variable");
2221 if (getLowerBoundMap().getNumInputs() > 0)
2223 getLowerBoundMap().getNumDims())))
2226 if (getUpperBoundMap().getNumInputs() > 0)
2228 getUpperBoundMap().getNumDims())))
2230 if (getLowerBoundMap().getNumResults() < 1)
2231 return emitOpError(
"expected lower bound map to have at least one result");
2232 if (getUpperBoundMap().getNumResults() < 1)
2233 return emitOpError(
"expected upper bound map to have at least one result");
2235 unsigned opNumResults = getNumResults();
2236 if (opNumResults == 0)
2242 if (getNumIterOperands() != opNumResults)
2244 "mismatch between the number of loop-carried values and results");
2245 if (getNumRegionIterArgs() != opNumResults)
2247 "mismatch between the number of basic block args and results");
2257 bool failedToParsedMinMax =
2261 auto boundAttrStrName =
2262 isLower ? AffineForOp::getLowerBoundMapAttrName(
result.name)
2263 : AffineForOp::getUpperBoundMapAttrName(
result.name);
2270 if (!boundOpInfos.empty()) {
2272 if (boundOpInfos.size() > 1)
2274 "expected only one loop bound operand");
2286 result.addAttribute(boundAttrStrName, AffineMapAttr::get(map));
2299 if (
auto affineMapAttr = dyn_cast<AffineMapAttr>(boundAttr)) {
2300 unsigned currentNumOperands =
result.operands.size();
2305 auto map = affineMapAttr.getValue();
2306 if (map.getNumDims() != numDims)
2309 "dim operand count and affine map dim count must match");
2311 unsigned numDimAndSymbolOperands =
2312 result.operands.size() - currentNumOperands;
2313 if (numDims + map.getNumSymbols() != numDimAndSymbolOperands)
2316 "symbol operand count and affine map symbol count must match");
2320 if (map.getNumResults() > 1 && failedToParsedMinMax) {
2322 return p.
emitError(attrLoc,
"lower loop bound affine map with "
2323 "multiple results requires 'max' prefix");
2325 return p.
emitError(attrLoc,
"upper loop bound affine map with multiple "
2326 "results requires 'min' prefix");
2332 if (
auto integerAttr = dyn_cast<IntegerAttr>(boundAttr)) {
2333 result.attributes.pop_back();
2342 "expected valid affine map representation for loop bounds");
2347 OpAsmParser::Argument inductionVariable;
2354 int64_t numOperands =
result.operands.size();
2357 int64_t numLbOperands =
result.operands.size() - numOperands;
2360 numOperands =
result.operands.size();
2363 int64_t numUbOperands =
result.operands.size() - numOperands;
2368 getStepAttrName(
result.name),
2372 IntegerAttr stepAttr;
2374 getStepAttrName(
result.name).data(),
2378 if (!stepAttr.getValue().isStrictlyPositive())
2381 "expected step to be representable as a positive signed integer");
2385 SmallVector<OpAsmParser::Argument, 4> regionArgs;
2386 SmallVector<OpAsmParser::UnresolvedOperand, 4> operands;
2389 regionArgs.push_back(inductionVariable);
2397 for (
auto argOperandType :
2398 llvm::zip(llvm::drop_begin(regionArgs), operands,
result.types)) {
2399 Type type = std::get<2>(argOperandType);
2400 std::get<0>(argOperandType).type = type;
2408 getOperandSegmentSizeAttr(),
2410 static_cast<int32_t>(numUbOperands),
2411 static_cast<int32_t>(operands.size())}));
2414 Region *body =
result.addRegion();
2415 if (regionArgs.size() !=
result.types.size() + 1)
2418 "mismatch between the number of loop-carried values and results");
2422 AffineForOp::ensureTerminator(*body, builder,
result.location);
2444 if (
auto constExpr = dyn_cast<AffineConstantExpr>(expr)) {
2445 p << constExpr.getValue();
2453 if (isa<AffineSymbolExpr>(expr)) {
2469unsigned AffineForOp::getNumIterOperands() {
2470 AffineMap lbMap = getLowerBoundMapAttr().getValue();
2471 AffineMap ubMap = getUpperBoundMapAttr().getValue();
2476std::optional<MutableArrayRef<OpOperand>>
2477AffineForOp::getYieldedValuesMutable() {
2478 return cast<AffineYieldOp>(getBody()->getTerminator()).getOperandsMutable();
2490 if (getStepAsInt() != 1)
2491 p <<
" step " << getStepAsInt();
2493 bool printBlockTerminators =
false;
2494 if (getNumIterOperands() > 0) {
2496 auto regionArgs = getRegionIterArgs();
2497 auto operands = getInits();
2499 llvm::interleaveComma(llvm::zip(regionArgs, operands), p, [&](
auto it) {
2500 p << std::get<0>(it) <<
" = " << std::get<1>(it);
2502 p <<
") -> (" << getResultTypes() <<
")";
2503 printBlockTerminators =
true;
2508 printBlockTerminators);
2510 (*this)->getAttrs(),
2511 {getLowerBoundMapAttrName(getOperation()->getName()),
2512 getUpperBoundMapAttrName(getOperation()->getName()),
2513 getStepAttrName(getOperation()->getName()),
2514 getOperandSegmentSizeAttr()});
2519 auto foldLowerOrUpperBound = [&forOp](
bool lower) {
2523 auto boundOperands =
2524 lower ? forOp.getLowerBoundOperands() : forOp.getUpperBoundOperands();
2525 for (
auto operand : boundOperands) {
2528 operandConstants.push_back(operandCst);
2532 lower ? forOp.getLowerBoundMap() : forOp.getUpperBoundMap();
2534 "bound maps should have at least one result");
2536 if (failed(boundMap.
constantFold(operandConstants, foldedResults)))
2540 assert(!foldedResults.empty() &&
"bounds should have at least one result");
2541 auto maxOrMin = llvm::cast<IntegerAttr>(foldedResults[0]).getValue();
2542 for (
unsigned i = 1, e = foldedResults.size(); i < e; i++) {
2543 auto foldedResult = llvm::cast<IntegerAttr>(foldedResults[i]).getValue();
2544 maxOrMin = lower ? llvm::APIntOps::smax(maxOrMin, foldedResult)
2545 : llvm::APIntOps::smin(maxOrMin, foldedResult);
2547 lower ? forOp.setConstantLowerBound(maxOrMin.getSExtValue())
2548 : forOp.setConstantUpperBound(maxOrMin.getSExtValue());
2553 bool folded =
false;
2554 if (!forOp.hasConstantLowerBound())
2555 folded |= succeeded(foldLowerOrUpperBound(
true));
2558 if (!forOp.hasConstantUpperBound())
2559 folded |= succeeded(foldLowerOrUpperBound(
false));
2565 int64_t step = forOp.getStepAsInt();
2566 if (!forOp.hasConstantBounds() || step <= 0)
2567 return std::nullopt;
2568 int64_t lb = forOp.getConstantLowerBound();
2569 int64_t ub = forOp.getConstantUpperBound();
2570 return ub - lb <= 0 ? 0 : (
ub - lb + step - 1) / step;
2575 if (!llvm::hasSingleElement(*forOp.getBody()))
2577 if (forOp.getNumResults() == 0)
2580 if (tripCount == 0) {
2583 return forOp.getInits();
2586 auto yieldOp = cast<AffineYieldOp>(forOp.getBody()->getTerminator());
2587 auto iterArgs = forOp.getRegionIterArgs();
2588 bool hasValDefinedOutsideLoop =
false;
2589 bool iterArgsNotInOrder =
false;
2590 for (
unsigned i = 0, e = yieldOp->getNumOperands(); i < e; ++i) {
2591 Value val = yieldOp.getOperand(i);
2595 if (val == forOp.getInductionVar())
2597 if (iterArgIt == iterArgs.end()) {
2599 assert(forOp.isDefinedOutsideOfLoop(val) &&
2600 "must be defined outside of the loop");
2601 hasValDefinedOutsideLoop =
true;
2602 replacements.push_back(val);
2604 unsigned pos = std::distance(iterArgs.begin(), iterArgIt);
2606 iterArgsNotInOrder =
true;
2607 replacements.push_back(forOp.getInits()[pos]);
2612 if (!tripCount.has_value() &&
2613 (hasValDefinedOutsideLoop || iterArgsNotInOrder))
2617 if (tripCount.has_value() && tripCount.value() >= 2 && iterArgsNotInOrder)
2619 return llvm::to_vector_of<OpFoldResult>(replacements);
2627 auto lbMap = forOp.getLowerBoundMap();
2628 auto ubMap = forOp.getUpperBoundMap();
2629 auto prevLbMap = lbMap;
2630 auto prevUbMap = ubMap;
2643 if (lbMap == prevLbMap && ubMap == prevUbMap)
2646 if (lbMap != prevLbMap)
2647 forOp.setLowerBound(lbOperands, lbMap);
2648 if (ubMap != prevUbMap)
2649 forOp.setUpperBound(ubOperands, ubMap);
2658LogicalResult AffineForOp::fold(FoldAdaptor adaptor,
2668 results.assign(getInits().begin(), getInits().end());
2672 if (!foldResults.empty()) {
2673 results.assign(foldResults);
2681 "invalid region point");
2688void AffineForOp::getSuccessorRegions(
2693 "expected loop region");
2699 if (tripCount.has_value()) {
2703 if (tripCount == 1) {
2710 if (tripCount.value() > 0) {
2711 regions.push_back(RegionSuccessor(&getRegion()));
2714 if (tripCount.value() == 0) {
2723 regions.push_back(RegionSuccessor(&getRegion()));
2729 return getResults();
2730 return getRegionIterArgs();
2743 assert(map.
getNumResults() >= 1 &&
"bound map has at least one result");
2744 getLowerBoundOperandsMutable().assign(lbOperands);
2745 setLowerBoundMap(map);
2750 assert(map.
getNumResults() >= 1 &&
"bound map has at least one result");
2751 getUpperBoundOperandsMutable().assign(ubOperands);
2752 setUpperBoundMap(map);
2755bool AffineForOp::hasConstantLowerBound() {
2756 return getLowerBoundMap().isSingleConstant();
2759bool AffineForOp::hasConstantUpperBound() {
2760 return getUpperBoundMap().isSingleConstant();
2763int64_t AffineForOp::getConstantLowerBound() {
2764 return getLowerBoundMap().getSingleConstantResult();
2767int64_t AffineForOp::getConstantUpperBound() {
2768 return getUpperBoundMap().getSingleConstantResult();
2771void AffineForOp::setConstantLowerBound(
int64_t value) {
2775void AffineForOp::setConstantUpperBound(
int64_t value) {
2779AffineForOp::operand_range AffineForOp::getControlOperands() {
2784bool AffineForOp::matchingBoundOperandList() {
2785 auto lbMap = getLowerBoundMap();
2786 auto ubMap = getUpperBoundMap();
2792 for (
unsigned i = 0, e = lbMap.
getNumInputs(); i < e; i++) {
2794 if (getOperand(i) != getOperand(numOperands + i))
2802std::optional<SmallVector<Value>> AffineForOp::getLoopInductionVars() {
2803 return SmallVector<Value>{getInductionVar()};
2806std::optional<SmallVector<OpFoldResult>> AffineForOp::getLoopLowerBounds() {
2807 if (!hasConstantLowerBound())
2808 return std::nullopt;
2810 return SmallVector<OpFoldResult>{
2811 OpFoldResult(
b.getI64IntegerAttr(getConstantLowerBound()))};
2814std::optional<SmallVector<OpFoldResult>> AffineForOp::getLoopSteps() {
2816 return SmallVector<OpFoldResult>{
2817 OpFoldResult(
b.getI64IntegerAttr(getStepAsInt()))};
2820std::optional<SmallVector<OpFoldResult>> AffineForOp::getLoopUpperBounds() {
2821 if (!hasConstantUpperBound())
2824 return SmallVector<OpFoldResult>{
2825 OpFoldResult(
b.getI64IntegerAttr(getConstantUpperBound()))};
2828FailureOr<LoopLikeOpInterface> AffineForOp::replaceWithAdditionalYields(
2830 bool replaceInitOperandUsesInLoop,
2833 OpBuilder::InsertionGuard g(rewriter);
2835 auto inits = llvm::to_vector(getInits());
2836 inits.append(newInitOperands.begin(), newInitOperands.end());
2837 AffineForOp newLoop = AffineForOp::create(
2842 auto yieldOp = cast<AffineYieldOp>(getBody()->getTerminator());
2843 ArrayRef<BlockArgument> newIterArgs =
2844 newLoop.getBody()->getArguments().take_back(newInitOperands.size());
2846 OpBuilder::InsertionGuard g(rewriter);
2848 SmallVector<Value> newYieldedValues =
2849 newYieldValuesFn(rewriter, getLoc(), newIterArgs);
2850 assert(newInitOperands.size() == newYieldedValues.size() &&
2851 "expected as many new yield values as new iter operands");
2853 yieldOp.getOperandsMutable().append(newYieldedValues);
2858 rewriter.
mergeBlocks(getBody(), newLoop.getBody(),
2859 newLoop.getBody()->getArguments().take_front(
2860 getBody()->getNumArguments()));
2862 if (replaceInitOperandUsesInLoop) {
2865 for (
auto it : llvm::zip(newInitOperands, newIterArgs)) {
2867 [&](OpOperand &use) {
2869 return newLoop->isProperAncestor(user);
2876 newLoop->getResults().take_front(getNumResults()));
2877 return cast<LoopLikeOpInterface>(newLoop.getOperation());
2905 auto ivArg = dyn_cast<BlockArgument>(val);
2906 if (!ivArg || !ivArg.getOwner() || !ivArg.getOwner()->getParent())
2907 return AffineForOp();
2909 ivArg.getOwner()->getParent()->getParentOfType<AffineForOp>())
2911 return forOp.getInductionVar() == val ? forOp : AffineForOp();
2912 return AffineForOp();
2916 auto ivArg = dyn_cast<BlockArgument>(val);
2917 if (!ivArg || !ivArg.getOwner())
2920 auto parallelOp = dyn_cast_if_present<AffineParallelOp>(containingOp);
2921 if (parallelOp && llvm::is_contained(parallelOp.getIVs(), val))
2930 ivs->reserve(forInsts.size());
2931 for (
auto forInst : forInsts)
2932 ivs->push_back(forInst.getInductionVar());
2937 ivs.reserve(affineOps.size());
2940 if (
auto forOp = dyn_cast<AffineForOp>(op))
2941 ivs.push_back(forOp.getInductionVar());
2942 else if (
auto parallelOp = dyn_cast<AffineParallelOp>(op))
2943 for (
size_t i = 0; i < parallelOp.getBody()->getNumArguments(); i++)
2944 ivs.push_back(parallelOp.getBody()->getArgument(i));
2950template <
typename BoundListTy,
typename LoopCreatorTy>
2955 LoopCreatorTy &&loopCreatorFn) {
2956 assert(lbs.size() == ubs.size() &&
"Mismatch in number of arguments");
2957 assert(lbs.size() == steps.size() &&
"Mismatch in number of arguments");
2969 ivs.reserve(lbs.size());
2970 for (
unsigned i = 0, e = lbs.size(); i < e; ++i) {
2976 if (i == e - 1 && bodyBuilderFn) {
2978 bodyBuilderFn(nestedBuilder, nestedLoc, ivs);
2980 AffineYieldOp::create(nestedBuilder, nestedLoc);
2985 auto loop = loopCreatorFn(builder, loc, lbs[i], ubs[i], steps[i], loopBody);
2994 AffineForOp::BodyBuilderFn bodyBuilderFn) {
2995 return AffineForOp::create(builder, loc, lb,
ub, step,
3003 AffineForOp::BodyBuilderFn bodyBuilderFn) {
3006 if (lbConst && ubConst)
3008 ubConst.value(), step, bodyBuilderFn);
3039 LogicalResult matchAndRewrite(AffineIfOp ifOp,
3041 if (ifOp.getElseRegion().empty() ||
3042 !llvm::hasSingleElement(*ifOp.getElseBlock()) || ifOp.getNumResults())
3055 using OpRewritePattern<AffineIfOp>::OpRewritePattern;
3057 LogicalResult matchAndRewrite(AffineIfOp op,
3058 PatternRewriter &rewriter)
const override {
3060 auto isTriviallyFalse = [](IntegerSet iSet) {
3061 return iSet.isEmptyIntegerSet();
3064 auto isTriviallyTrue = [](IntegerSet iSet) {
3065 return (iSet.getNumEqualities() == 1 && iSet.getNumInequalities() == 0 &&
3066 iSet.getConstraint(0) == 0);
3069 IntegerSet affineIfConditions = op.getIntegerSet();
3071 if (isTriviallyFalse(affineIfConditions)) {
3075 if (op.getNumResults() == 0 && !op.hasElse()) {
3081 blockToMove = op.getElseBlock();
3082 }
else if (isTriviallyTrue(affineIfConditions)) {
3083 blockToMove = op.getThenBlock();
3087 Operation *blockToMoveTerminator = blockToMove->
getTerminator();
3101 rewriter.
eraseOp(blockToMoveTerminator);
3109void AffineIfOp::getSuccessorRegions(
3117 if (getElseRegion().empty()) {
3132 return getResults();
3133 if (successor == &getThenRegion())
3134 return getThenRegion().getArguments();
3135 if (successor == &getElseRegion())
3136 return getElseRegion().getArguments();
3137 llvm_unreachable(
"invalid region successor");
3140LogicalResult AffineIfOp::verify() {
3143 auto conditionAttr =
3144 (*this)->getAttrOfType<IntegerSetAttr>(getConditionAttrStrName());
3146 return emitOpError(
"requires an integer set attribute named 'condition'");
3149 IntegerSet condition = conditionAttr.getValue();
3151 return emitOpError(
"operand count and condition integer set dimension and "
3152 "symbol count must match");
3164 IntegerSetAttr conditionAttr;
3167 AffineIfOp::getConditionAttrStrName(),
3173 auto set = conditionAttr.getValue();
3174 if (set.getNumDims() != numDims)
3177 "dim operand count and integer set dim count must match");
3178 if (numDims + set.getNumSymbols() !=
result.operands.size())
3181 "symbol operand count and integer set symbol count must match");
3188 result.regions.reserve(2);
3195 AffineIfOp::ensureTerminator(*thenRegion, parser.
getBuilder(),
3202 AffineIfOp::ensureTerminator(*elseRegion, parser.
getBuilder(),
3214 auto conditionAttr =
3215 (*this)->getAttrOfType<IntegerSetAttr>(getConditionAttrStrName());
3216 p <<
" " << conditionAttr;
3218 conditionAttr.getValue().getNumDims(), p);
3225 auto &elseRegion = this->getElseRegion();
3226 if (!elseRegion.
empty()) {
3235 getConditionAttrStrName());
3240 ->getAttrOfType<IntegerSetAttr>(getConditionAttrStrName())
3244void AffineIfOp::setIntegerSet(
IntegerSet newSet) {
3245 (*this)->setAttr(getConditionAttrStrName(), IntegerSetAttr::get(newSet));
3250 (*this)->setOperands(operands);
3255 bool withElseRegion) {
3256 assert(resultTypes.empty() || withElseRegion);
3259 result.addTypes(resultTypes);
3260 result.addOperands(args);
3261 result.addAttribute(getConditionAttrStrName(), IntegerSetAttr::get(set));
3265 if (resultTypes.empty())
3266 AffineIfOp::ensureTerminator(*thenRegion, builder,
result.location);
3269 if (withElseRegion) {
3271 if (resultTypes.empty())
3272 AffineIfOp::ensureTerminator(*elseRegion, builder,
result.location);
3278 AffineIfOp::build(builder,
result, {}, set, args,
3287 bool composeAffineMin =
false) {
3294 if (llvm::none_of(operands,
3305 auto set = getIntegerSet();
3311 if (getIntegerSet() == set && llvm::equal(operands, getOperands()))
3314 setConditional(set, operands);
3320 results.
add<SimplifyDeadElse, AlwaysTrueOrFalseIf>(context);
3329 assert(operands.size() == 1 + map.
getNumInputs() &&
"inconsistent operands");
3330 result.addOperands(operands);
3332 result.addAttribute(getMapAttrStrName(), AffineMapAttr::get(map));
3333 auto memrefType = llvm::cast<MemRefType>(operands[0].
getType());
3334 result.types.push_back(memrefType.getElementType());
3339 assert(map.
getNumInputs() == mapOperands.size() &&
"inconsistent index info");
3341 result.addOperands(mapOperands);
3342 auto memrefType = llvm::cast<MemRefType>(
memref.getType());
3343 result.addAttribute(getMapAttrStrName(), AffineMapAttr::get(map));
3344 result.types.push_back(memrefType.getElementType());
3349 auto memrefType = llvm::cast<MemRefType>(
memref.getType());
3350 int64_t rank = memrefType.getRank();
3364 AffineMapAttr mapAttr;
3369 AffineLoadOp::getMapAttrStrName(),
3380 if (AffineMapAttr mapAttr =
3381 (*this)->getAttrOfType<AffineMapAttr>(getMapAttrStrName()))
3385 {getMapAttrStrName()});
3391template <
typename AffineMemOpTy>
3395 MemRefType memrefType,
unsigned numIndexOperands) {
3398 return op->emitOpError(
"affine map num results must equal memref rank");
3400 return op->emitOpError(
"expects as many subscripts as affine map inputs");
3402 for (
auto idx : mapOperands) {
3403 if (!idx.getType().isIndex())
3404 return op->emitOpError(
"index to load must have 'index' type");
3412LogicalResult AffineLoadOp::verify() {
3414 if (
getType() != memrefType.getElementType())
3415 return emitOpError(
"result type must match element type of memref");
3418 *
this, (*this)->getAttrOfType<AffineMapAttr>(getMapAttrStrName()),
3419 getMapOperands(), memrefType,
3420 getNumOperands() - 1)))
3428 results.
add<SimplifyAffineOp<AffineLoadOp>>(context);
3437 auto getGlobalOp = getMemref().getDefiningOp<memref::GetGlobalOp>();
3442 getGlobalOp, getGlobalOp.getNameAttr());
3448 dyn_cast_or_null<DenseElementsAttr>(global.getConstantInitValue());
3452 if (
auto splatAttr = dyn_cast<SplatElementsAttr>(cstAttr))
3453 return splatAttr.getSplatValue<
Attribute>();
3455 if (!getAffineMap().isConstant())
3458 llvm::map_to_vector<4>(getAffineMap().getConstantResults(),
3459 [](
int64_t v) -> uint64_t {
return v; });
3470 assert(map.
getNumInputs() == mapOperands.size() &&
"inconsistent index info");
3471 result.addOperands(valueToStore);
3473 result.addOperands(mapOperands);
3474 result.getOrAddProperties<Properties>().map = AffineMapAttr::get(map);
3481 auto memrefType = llvm::cast<MemRefType>(
memref.getType());
3482 int64_t rank = memrefType.getRank();
3496 AffineMapAttr mapAttr;
3501 mapOperands, mapAttr, AffineStoreOp::getMapAttrStrName(),
3512 p <<
" " << getValueToStore();
3514 if (AffineMapAttr mapAttr =
3515 (*this)->getAttrOfType<AffineMapAttr>(getMapAttrStrName()))
3519 {getMapAttrStrName()});
3523LogicalResult AffineStoreOp::verify() {
3526 if (getValueToStore().
getType() != memrefType.getElementType())
3528 "value to store must have the same type as memref element type");
3531 *
this, (*this)->getAttrOfType<AffineMapAttr>(getMapAttrStrName()),
3532 getMapOperands(), memrefType,
3533 getNumOperands() - 2)))
3541 results.
add<SimplifyAffineOp<AffineStoreOp>>(context);
3544LogicalResult AffineStoreOp::fold(FoldAdaptor adaptor,
3554template <
typename T>
3557 if (op.getNumOperands() !=
3558 op.getMap().getNumDims() + op.getMap().getNumSymbols())
3559 return op.emitOpError(
3560 "operand count and affine map dimension and symbol count must match");
3562 if (op.getMap().getNumResults() == 0)
3563 return op.emitOpError(
"affine map expect at least one result");
3567template <
typename T>
3569 p <<
' ' << op->getAttr(T::getMapAttrStrName());
3570 auto operands = op.getOperands();
3571 unsigned numDims = op.getMap().getNumDims();
3572 p <<
'(' << operands.take_front(numDims) <<
')';
3574 if (operands.size() != numDims)
3575 p <<
'[' << operands.drop_front(numDims) <<
']';
3577 {T::getMapAttrStrName()});
3580template <
typename T>
3587 AffineMapAttr mapAttr;
3603template <
typename T>
3605 static_assert(llvm::is_one_of<T, AffineMinOp, AffineMaxOp>::value,
3606 "expected affine min or max op");
3612 auto foldedMap = op.getMap().partialConstantFold(operands, &results);
3614 if (foldedMap.getNumSymbols() == 1 && foldedMap.isSymbolIdentity())
3615 return op.getOperand(0);
3618 if (results.empty()) {
3620 if (foldedMap == op.getMap())
3622 op->setAttr(
"map", AffineMapAttr::get(foldedMap));
3623 return op.getResult();
3627 auto resultIt = std::is_same<T, AffineMinOp>::value
3628 ? llvm::min_element(results)
3629 : llvm::max_element(results);
3630 if (resultIt == results.end())
3632 return IntegerAttr::get(IndexType::get(op.getContext()), *resultIt);
3636template <
typename T>
3642 AffineMap oldMap = affineOp.getAffineMap();
3648 if (!llvm::is_contained(newExprs, expr))
3649 newExprs.push_back(expr);
3679template <
typename T>
3685 AffineMap oldMap = affineOp.getAffineMap();
3687 affineOp.getMapOperands().take_front(oldMap.
getNumDims());
3689 affineOp.getMapOperands().take_back(oldMap.
getNumSymbols());
3691 auto newDimOperands = llvm::to_vector<8>(dimOperands);
3692 auto newSymOperands = llvm::to_vector<8>(symOperands);
3700 if (
auto symExpr = dyn_cast<AffineSymbolExpr>(expr)) {
3701 Value symValue = symOperands[symExpr.getPosition()];
3703 producerOps.push_back(producerOp);
3706 }
else if (
auto dimExpr = dyn_cast<AffineDimExpr>(expr)) {
3707 Value dimValue = dimOperands[dimExpr.getPosition()];
3709 producerOps.push_back(producerOp);
3716 newExprs.push_back(expr);
3719 if (producerOps.empty())
3726 for (T producerOp : producerOps) {
3727 AffineMap producerMap = producerOp.getAffineMap();
3728 unsigned numProducerDims = producerMap.
getNumDims();
3733 producerOp.getMapOperands().take_front(numProducerDims);
3735 producerOp.getMapOperands().take_back(numProducerSyms);
3736 newDimOperands.append(dimValues.begin(), dimValues.end());
3737 newSymOperands.append(symValues.begin(), symValues.end());
3741 newExprs.push_back(expr.shiftDims(numProducerDims, numUsedDims)
3742 .shiftSymbols(numProducerSyms, numUsedSyms));
3745 numUsedDims += numProducerDims;
3746 numUsedSyms += numProducerSyms;
3752 llvm::to_vector<8>(llvm::concat<Value>(newDimOperands, newSymOperands));
3771 if (!resultExpr.isPureAffine())
3776 if (failed(flattenResult))
3789 if (llvm::is_sorted(flattenedExprs))
3794 llvm::to_vector(llvm::seq<unsigned>(0, map.
getNumResults()));
3795 llvm::sort(resultPermutation, [&](
unsigned lhs,
unsigned rhs) {
3796 return flattenedExprs[
lhs] < flattenedExprs[
rhs];
3799 for (
unsigned idx : resultPermutation)
3820template <
typename T>
3826 AffineMap map = affineOp.getAffineMap();
3834template <
typename T>
3840 if (affineOp.getMap().getNumResults() != 1)
3843 affineOp.getOperands());
3911ParseResult AffinePrefetchOp::parse(
OpAsmParser &parser,
3918 IntegerAttr hintInfo;
3920 StringRef readOrWrite, cacheType;
3922 AffineMapAttr mapAttr;
3926 AffinePrefetchOp::getMapAttrStrName(),
3932 AffinePrefetchOp::getLocalityHintAttrStrName(),
3942 if (readOrWrite !=
"read" && readOrWrite !=
"write")
3944 "rw specifier has to be 'read' or 'write'");
3945 result.addAttribute(AffinePrefetchOp::getIsWriteAttrStrName(),
3948 if (cacheType !=
"data" && cacheType !=
"instr")
3950 "cache type has to be 'data' or 'instr'");
3952 result.addAttribute(AffinePrefetchOp::getIsDataCacheAttrStrName(),
3959 p <<
" " << getMemref() <<
'[';
3960 AffineMapAttr mapAttr =
3961 (*this)->getAttrOfType<AffineMapAttr>(getMapAttrStrName());
3964 p <<
']' <<
", " << (getIsWrite() ?
"write" :
"read") <<
", "
3965 <<
"locality<" << getLocalityHint() <<
">, "
3966 << (getIsDataCache() ?
"data" :
"instr");
3968 (*this)->getAttrs(),
3969 {getMapAttrStrName(), getLocalityHintAttrStrName(),
3970 getIsDataCacheAttrStrName(), getIsWriteAttrStrName()});
3974LogicalResult AffinePrefetchOp::verify() {
3975 auto mapAttr = (*this)->getAttrOfType<AffineMapAttr>(getMapAttrStrName());
3979 return emitOpError(
"affine.prefetch affine map num results must equal"
3984 if (getNumOperands() != 1)
3989 for (
auto idx : getMapOperands()) {
3992 "index must be a valid dimension or symbol identifier");
4000 results.
add<SimplifyAffineOp<AffinePrefetchOp>>(context);
4003LogicalResult AffinePrefetchOp::fold(FoldAdaptor adaptor,
4018 auto ubs = llvm::map_to_vector<4>(ranges, [&](
int64_t value) {
4022 build(builder,
result, resultTypes, reductions, lbs, {}, ubs,
4032 assert(llvm::all_of(lbMaps,
4034 return m.
getNumDims() == lbMaps[0].getNumDims() &&
4037 "expected all lower bounds maps to have the same number of dimensions "
4039 assert(llvm::all_of(ubMaps,
4041 return m.
getNumDims() == ubMaps[0].getNumDims() &&
4044 "expected all upper bounds maps to have the same number of dimensions "
4046 assert((lbMaps.empty() || lbMaps[0].getNumInputs() == lbArgs.size()) &&
4047 "expected lower bound maps to have as many inputs as lower bound "
4049 assert((ubMaps.empty() || ubMaps[0].getNumInputs() == ubArgs.size()) &&
4050 "expected upper bound maps to have as many inputs as upper bound "
4054 result.addTypes(resultTypes);
4058 for (arith::AtomicRMWKind reduction : reductions)
4059 reductionAttrs.push_back(
4061 result.addAttribute(getReductionsAttrStrName(),
4071 groups.reserve(groups.size() + maps.size());
4072 exprs.reserve(maps.size());
4077 return AffineMap::get(maps[0].getNumDims(), maps[0].getNumSymbols(), exprs,
4083 AffineMap lbMap = concatMapsSameInput(lbMaps, lbGroups);
4084 AffineMap ubMap = concatMapsSameInput(ubMaps, ubGroups);
4085 result.addAttribute(getLowerBoundsMapAttrStrName(),
4086 AffineMapAttr::get(lbMap));
4087 result.addAttribute(getLowerBoundsGroupsAttrStrName(),
4089 result.addAttribute(getUpperBoundsMapAttrStrName(),
4090 AffineMapAttr::get(ubMap));
4091 result.addAttribute(getUpperBoundsGroupsAttrStrName(),
4094 result.addOperands(lbArgs);
4095 result.addOperands(ubArgs);
4098 auto *bodyRegion =
result.addRegion();
4102 for (
unsigned i = 0, e = steps.size(); i < e; ++i)
4104 if (resultTypes.empty())
4105 ensureTerminator(*bodyRegion, builder,
result.location);
4109 return {&getRegion()};
4112unsigned AffineParallelOp::getNumDims() {
return getSteps().size(); }
4114AffineParallelOp::operand_range AffineParallelOp::getLowerBoundsOperands() {
4115 return getOperands().take_front(getLowerBoundsMap().getNumInputs());
4118AffineParallelOp::operand_range AffineParallelOp::getUpperBoundsOperands() {
4119 return getOperands().drop_front(getLowerBoundsMap().getNumInputs());
4122AffineMap AffineParallelOp::getLowerBoundMap(
unsigned pos) {
4123 auto values = getLowerBoundsGroups().getValues<int32_t>();
4125 for (
unsigned i = 0; i < pos; ++i)
4127 return getLowerBoundsMap().getSliceMap(start, values[pos]);
4130AffineMap AffineParallelOp::getUpperBoundMap(
unsigned pos) {
4131 auto values = getUpperBoundsGroups().getValues<int32_t>();
4133 for (
unsigned i = 0; i < pos; ++i)
4135 return getUpperBoundsMap().getSliceMap(start, values[pos]);
4139 return AffineValueMap(getLowerBoundsMap(), getLowerBoundsOperands());
4143 return AffineValueMap(getUpperBoundsMap(), getUpperBoundsOperands());
4146std::optional<SmallVector<int64_t, 8>> AffineParallelOp::getConstantRanges() {
4147 if (hasMinMaxBounds())
4148 return std::nullopt;
4156 for (
unsigned i = 0, e = rangesValueMap.
getNumResults(); i < e; ++i) {
4157 auto expr = rangesValueMap.
getResult(i);
4158 auto cst = dyn_cast<AffineConstantExpr>(expr);
4160 return std::nullopt;
4161 out.push_back(cst.getValue());
4166Block *AffineParallelOp::getBody() {
return &getRegion().
front(); }
4168OpBuilder AffineParallelOp::getBodyBuilder() {
4169 return OpBuilder(getBody(), std::prev(getBody()->end()));
4174 "operands to map must match number of inputs");
4176 auto ubOperands = getUpperBoundsOperands();
4179 newOperands.append(ubOperands.begin(), ubOperands.end());
4180 (*this)->setOperands(newOperands);
4182 setLowerBoundsMapAttr(AffineMapAttr::get(map));
4187 "operands to map must match number of inputs");
4190 newOperands.append(ubOperands.begin(), ubOperands.end());
4191 (*this)->setOperands(newOperands);
4193 setUpperBoundsMapAttr(AffineMapAttr::get(map));
4202 arith::AtomicRMWKind op) {
4204 case arith::AtomicRMWKind::addf:
4205 return isa<FloatType>(resultType);
4206 case arith::AtomicRMWKind::addi:
4207 return isa<IntegerType>(resultType);
4208 case arith::AtomicRMWKind::assign:
4210 case arith::AtomicRMWKind::mulf:
4211 return isa<FloatType>(resultType);
4212 case arith::AtomicRMWKind::muli:
4213 return isa<IntegerType>(resultType);
4214 case arith::AtomicRMWKind::maximumf:
4215 return isa<FloatType>(resultType);
4216 case arith::AtomicRMWKind::minimumf:
4217 return isa<FloatType>(resultType);
4218 case arith::AtomicRMWKind::maxs: {
4219 auto intType = dyn_cast<IntegerType>(resultType);
4220 return intType && intType.isSigned();
4222 case arith::AtomicRMWKind::mins: {
4223 auto intType = dyn_cast<IntegerType>(resultType);
4224 return intType && intType.isSigned();
4226 case arith::AtomicRMWKind::maxu: {
4227 auto intType = dyn_cast<IntegerType>(resultType);
4228 return intType && intType.isUnsigned();
4230 case arith::AtomicRMWKind::minu: {
4231 auto intType = dyn_cast<IntegerType>(resultType);
4232 return intType && intType.isUnsigned();
4234 case arith::AtomicRMWKind::ori:
4235 return isa<IntegerType>(resultType);
4236 case arith::AtomicRMWKind::andi:
4237 return isa<IntegerType>(resultType);
4243LogicalResult AffineParallelOp::verify() {
4244 auto numDims = getNumDims();
4247 getSteps().size() != numDims || getBody()->getNumArguments() != numDims) {
4248 return emitOpError() <<
"the number of region arguments ("
4249 << getBody()->getNumArguments()
4250 <<
") and the number of map groups for lower ("
4251 << getLowerBoundsGroups().getNumElements()
4252 <<
") and upper bound ("
4253 << getUpperBoundsGroups().getNumElements()
4254 <<
"), and the number of steps (" << getSteps().size()
4255 <<
") must all match";
4258 unsigned expectedNumLBResults = 0;
4259 for (APInt v : getLowerBoundsGroups()) {
4260 unsigned results = v.getZExtValue();
4263 <<
"expected lower bound map to have at least one result";
4264 expectedNumLBResults += results;
4266 if (expectedNumLBResults != getLowerBoundsMap().getNumResults())
4267 return emitOpError() <<
"expected lower bounds map to have "
4268 << expectedNumLBResults <<
" results";
4269 unsigned expectedNumUBResults = 0;
4270 for (APInt v : getUpperBoundsGroups()) {
4271 unsigned results = v.getZExtValue();
4274 <<
"expected upper bound map to have at least one result";
4275 expectedNumUBResults += results;
4277 if (expectedNumUBResults != getUpperBoundsMap().getNumResults())
4278 return emitOpError() <<
"expected upper bounds map to have "
4279 << expectedNumUBResults <<
" results";
4281 if (getReductions().size() != getNumResults())
4282 return emitOpError(
"a reduction must be specified for each output");
4286 for (
auto it : llvm::enumerate((getReductions()))) {
4288 auto intAttr = dyn_cast<IntegerAttr>(attr);
4289 if (!intAttr || !arith::symbolizeAtomicRMWKind(intAttr.getInt()))
4290 return emitOpError(
"invalid reduction attribute");
4291 auto kind = arith::symbolizeAtomicRMWKind(intAttr.getInt()).value();
4293 return emitOpError(
"result type cannot match reduction attribute");
4299 getLowerBoundsMap().getNumDims())))
4303 getUpperBoundsMap().getNumDims())))
4312 if (newMap ==
getAffineMap() && newOperands == operands)
4314 reset(newMap, newOperands);
4324 bool ubCanonicalized = succeeded(
ub.canonicalize());
4327 if (!lbCanonicalized && !ubCanonicalized)
4330 if (lbCanonicalized)
4332 if (ubCanonicalized)
4333 op.setUpperBounds(
ub.getOperands(),
ub.getAffineMap());
4338LogicalResult AffineParallelOp::fold(FoldAdaptor adaptor,
4339 SmallVectorImpl<OpFoldResult> &results) {
4350 StringRef keyword) {
4353 ValueRange dimOperands = operands.take_front(numDims);
4354 ValueRange symOperands = operands.drop_front(numDims);
4356 for (llvm::APInt groupSize : group) {
4360 unsigned size = groupSize.getZExtValue();
4365 p << keyword <<
'(';
4374void AffineParallelOp::print(OpAsmPrinter &p) {
4375 p <<
" (" << getBody()->getArguments() <<
") = (";
4377 getLowerBoundsOperands(),
"max");
4380 getUpperBoundsOperands(),
"min");
4382 SmallVector<int64_t, 8> steps = getSteps();
4383 bool elideSteps = llvm::all_of(steps, [](int64_t step) {
return step == 1; });
4386 llvm::interleaveComma(steps, p);
4389 if (getNumResults()) {
4391 llvm::interleaveComma(getReductions(), p, [&](
auto &attr) {
4392 arith::AtomicRMWKind sym = *arith::symbolizeAtomicRMWKind(
4393 llvm::cast<IntegerAttr>(attr).getInt());
4394 p <<
"\"" << arith::stringifyAtomicRMWKind(sym) <<
"\"";
4396 p <<
") -> (" << getResultTypes() <<
")";
4403 (*this)->getAttrs(),
4404 {AffineParallelOp::getReductionsAttrStrName(),
4405 AffineParallelOp::getLowerBoundsMapAttrStrName(),
4406 AffineParallelOp::getLowerBoundsGroupsAttrStrName(),
4407 AffineParallelOp::getUpperBoundsMapAttrStrName(),
4408 AffineParallelOp::getUpperBoundsGroupsAttrStrName(),
4409 AffineParallelOp::getStepsAttrStrName()});
4416static ParseResult deduplicateAndResolveOperands(
4417 OpAsmParser &parser,
4418 ArrayRef<SmallVector<OpAsmParser::UnresolvedOperand>> operands,
4419 SmallVectorImpl<Value> &uniqueOperands,
4420 SmallVectorImpl<AffineExpr> &replacements,
AffineExprKind kind) {
4422 "expected operands to be dim or symbol expression");
4425 for (
const auto &list : operands) {
4426 SmallVector<Value> valueOperands;
4429 for (Value operand : valueOperands) {
4430 unsigned pos = std::distance(uniqueOperands.begin(),
4431 llvm::find(uniqueOperands, operand));
4432 if (pos == uniqueOperands.size())
4433 uniqueOperands.push_back(operand);
4434 replacements.push_back(
4444enum class MinMaxKind { Min, Max };
4463static ParseResult parseAffineMapWithMinMax(OpAsmParser &parser,
4468 const llvm::StringLiteral tmpAttrStrName =
"__pseudo_bound_map";
4470 StringRef mapName = kind == MinMaxKind::Min
4471 ? AffineParallelOp::getUpperBoundsMapAttrStrName()
4472 : AffineParallelOp::getLowerBoundsMapAttrStrName();
4473 StringRef groupsName =
4474 kind == MinMaxKind::Min
4475 ? AffineParallelOp::getUpperBoundsGroupsAttrStrName()
4476 : AffineParallelOp::getLowerBoundsGroupsAttrStrName();
4482 result.addAttribute(
4483 mapName, AffineMapAttr::get(parser.getBuilder().getEmptyAffineMap()));
4484 result.addAttribute(groupsName, parser.getBuilder().getI32TensorAttr({}));
4488 SmallVector<AffineExpr> flatExprs;
4489 SmallVector<SmallVector<OpAsmParser::UnresolvedOperand>> flatDimOperands;
4490 SmallVector<SmallVector<OpAsmParser::UnresolvedOperand>> flatSymOperands;
4491 SmallVector<int32_t> numMapsPerGroup;
4492 SmallVector<OpAsmParser::UnresolvedOperand> mapOperands;
4493 auto parseOperands = [&]() {
4495 kind == MinMaxKind::Min ?
"min" :
"max"))) {
4496 mapOperands.clear();
4502 result.attributes.erase(tmpAttrStrName);
4503 llvm::append_range(flatExprs, map.getValue().getResults());
4504 auto operandsRef = llvm::ArrayRef(mapOperands);
4505 auto dimsRef = operandsRef.take_front(map.getValue().getNumDims());
4506 SmallVector<OpAsmParser::UnresolvedOperand> dims(dimsRef);
4507 auto symsRef = operandsRef.drop_front(map.getValue().getNumDims());
4508 SmallVector<OpAsmParser::UnresolvedOperand> syms(symsRef);
4509 flatDimOperands.append(map.getValue().getNumResults(), dims);
4510 flatSymOperands.append(map.getValue().getNumResults(), syms);
4511 numMapsPerGroup.push_back(map.getValue().getNumResults());
4514 flatSymOperands.emplace_back(),
4515 flatExprs.emplace_back())))
4517 numMapsPerGroup.push_back(1);
4524 unsigned totalNumDims = 0;
4525 unsigned totalNumSyms = 0;
4526 for (
unsigned i = 0, e = flatExprs.size(); i < e; ++i) {
4527 unsigned numDims = flatDimOperands[i].size();
4528 unsigned numSyms = flatSymOperands[i].size();
4529 flatExprs[i] = flatExprs[i]
4530 .shiftDims(numDims, totalNumDims)
4531 .shiftSymbols(numSyms, totalNumSyms);
4532 totalNumDims += numDims;
4533 totalNumSyms += numSyms;
4537 SmallVector<Value> dimOperands, symOperands;
4538 SmallVector<AffineExpr> dimRplacements, symRepacements;
4539 if (deduplicateAndResolveOperands(parser, flatDimOperands, dimOperands,
4541 deduplicateAndResolveOperands(parser, flatSymOperands, symOperands,
4545 result.operands.append(dimOperands.begin(), dimOperands.end());
4546 result.operands.append(symOperands.begin(), symOperands.end());
4549 auto flatMap =
AffineMap::get(totalNumDims, totalNumSyms, flatExprs,
4551 flatMap = flatMap.replaceDimsAndSymbols(
4552 dimRplacements, symRepacements, dimOperands.size(), symOperands.size());
4554 result.addAttribute(mapName, AffineMapAttr::get(flatMap));
4564ParseResult AffineParallelOp::parse(OpAsmParser &parser,
4565 OperationState &
result) {
4568 SmallVector<OpAsmParser::Argument, 4> ivs;
4571 parseAffineMapWithMinMax(parser,
result, MinMaxKind::Max) ||
4573 parseAffineMapWithMinMax(parser,
result, MinMaxKind::Min))
4576 AffineMapAttr stepsMapAttr;
4577 NamedAttrList stepsAttrs;
4578 SmallVector<OpAsmParser::UnresolvedOperand, 4> stepsMapOperands;
4580 SmallVector<int64_t, 4> steps(ivs.size(), 1);
4581 result.addAttribute(AffineParallelOp::getStepsAttrStrName(),
4585 AffineParallelOp::getStepsAttrStrName(),
4591 SmallVector<int64_t, 4> steps;
4592 auto stepsMap = stepsMapAttr.getValue();
4593 for (
const auto &
result : stepsMap.getResults()) {
4594 auto constExpr = dyn_cast<AffineConstantExpr>(
result);
4597 "steps must be constant integers");
4598 steps.push_back(constExpr.getValue());
4600 result.addAttribute(AffineParallelOp::getStepsAttrStrName(),
4606 SmallVector<Attribute, 4> reductions;
4610 auto parseAttributes = [&]() -> ParseResult {
4615 NamedAttrList attrStorage;
4620 std::optional<arith::AtomicRMWKind> reduction =
4621 arith::symbolizeAtomicRMWKind(attrVal.getValue());
4623 return parser.
emitError(loc,
"invalid reduction value: ") << attrVal;
4624 reductions.push_back(
4632 result.addAttribute(AffineParallelOp::getReductionsAttrStrName(),
4640 Region *body =
result.addRegion();
4641 for (
auto &iv : ivs)
4642 iv.type = indexType;
4648 AffineParallelOp::ensureTerminator(*body, builder,
result.location);
4656LogicalResult AffineYieldOp::verify() {
4657 auto *parentOp = (*this)->getParentOp();
4658 auto results = parentOp->getResults();
4659 auto operands = getOperands();
4661 if (!isa<AffineParallelOp, AffineIfOp, AffineForOp>(parentOp))
4662 return emitOpError() <<
"only terminates affine.if/for/parallel regions";
4663 if (parentOp->getNumResults() != getNumOperands())
4664 return emitOpError() <<
"parent of yield must have same number of "
4665 "results as the yield operands";
4666 for (
auto it : llvm::zip(results, operands)) {
4668 return emitOpError() <<
"types mismatch between yield op and its parent";
4678void AffineVectorLoadOp::build(OpBuilder &builder, OperationState &
result,
4679 VectorType resultType, AffineMap map,
4681 assert(operands.size() == 1 + map.
getNumInputs() &&
"inconsistent operands");
4682 result.addOperands(operands);
4684 result.addAttribute(getMapAttrStrName(), AffineMapAttr::get(map));
4685 result.types.push_back(resultType);
4688void AffineVectorLoadOp::build(OpBuilder &builder, OperationState &
result,
4689 VectorType resultType, Value memref,
4691 assert(map.
getNumInputs() == mapOperands.size() &&
"inconsistent index info");
4692 result.addOperands(memref);
4693 result.addOperands(mapOperands);
4694 result.addAttribute(getMapAttrStrName(), AffineMapAttr::get(map));
4695 result.types.push_back(resultType);
4698void AffineVectorLoadOp::build(OpBuilder &builder, OperationState &
result,
4699 VectorType resultType, Value memref,
4701 auto memrefType = llvm::cast<MemRefType>(memref.
getType());
4702 int64_t rank = memrefType.getRank();
4710void AffineVectorLoadOp::getCanonicalizationPatterns(RewritePatternSet &results,
4711 MLIRContext *context) {
4712 results.
add<SimplifyAffineOp<AffineVectorLoadOp>>(context);
4715ParseResult AffineVectorLoadOp::parse(OpAsmParser &parser,
4716 OperationState &
result) {
4720 MemRefType memrefType;
4721 VectorType resultType;
4722 OpAsmParser::UnresolvedOperand memrefInfo;
4723 AffineMapAttr mapAttr;
4724 SmallVector<OpAsmParser::UnresolvedOperand, 1> mapOperands;
4728 AffineVectorLoadOp::getMapAttrStrName(),
4738void AffineVectorLoadOp::print(OpAsmPrinter &p) {
4740 if (AffineMapAttr mapAttr =
4741 (*this)->getAttrOfType<AffineMapAttr>(getMapAttrStrName()))
4745 {getMapAttrStrName()});
4750static LogicalResult verifyVectorMemoryOp(Operation *op, MemRefType memrefType,
4751 VectorType vectorType) {
4753 if (memrefType.getElementType() != vectorType.getElementType())
4755 "requires memref and vector types of the same elemental type");
4759LogicalResult AffineVectorLoadOp::verify() {
4762 *
this, (*this)->getAttrOfType<AffineMapAttr>(getMapAttrStrName()),
4763 getMapOperands(), memrefType,
4764 getNumOperands() - 1)))
4777void AffineVectorStoreOp::build(OpBuilder &builder, OperationState &
result,
4778 Value valueToStore, Value memref, AffineMap map,
4780 assert(map.
getNumInputs() == mapOperands.size() &&
"inconsistent index info");
4781 result.addOperands(valueToStore);
4782 result.addOperands(memref);
4783 result.addOperands(mapOperands);
4784 result.addAttribute(getMapAttrStrName(), AffineMapAttr::get(map));
4788void AffineVectorStoreOp::build(OpBuilder &builder, OperationState &
result,
4789 Value valueToStore, Value memref,
4791 auto memrefType = llvm::cast<MemRefType>(memref.
getType());
4792 int64_t rank = memrefType.getRank();
4799void AffineVectorStoreOp::getCanonicalizationPatterns(
4800 RewritePatternSet &results, MLIRContext *context) {
4801 results.
add<SimplifyAffineOp<AffineVectorStoreOp>>(context);
4804ParseResult AffineVectorStoreOp::parse(OpAsmParser &parser,
4805 OperationState &
result) {
4808 MemRefType memrefType;
4809 VectorType resultType;
4810 OpAsmParser::UnresolvedOperand storeValueInfo;
4811 OpAsmParser::UnresolvedOperand memrefInfo;
4812 AffineMapAttr mapAttr;
4813 SmallVector<OpAsmParser::UnresolvedOperand, 1> mapOperands;
4818 AffineVectorStoreOp::getMapAttrStrName(),
4828void AffineVectorStoreOp::print(OpAsmPrinter &p) {
4829 p <<
" " << getValueToStore();
4831 if (AffineMapAttr mapAttr =
4832 (*this)->getAttrOfType<AffineMapAttr>(getMapAttrStrName()))
4836 {getMapAttrStrName()});
4837 p <<
" : " <<
getMemRefType() <<
", " << getValueToStore().getType();
4840LogicalResult AffineVectorStoreOp::verify() {
4843 *
this, (*this)->getAttrOfType<AffineMapAttr>(getMapAttrStrName()),
4844 getMapOperands(), memrefType,
4845 getNumOperands() - 2)))
4858void AffineDelinearizeIndexOp::build(OpBuilder &odsBuilder,
4859 OperationState &odsState,
4861 ArrayRef<int64_t> staticBasis,
4862 bool hasOuterBound) {
4863 SmallVector<Type> returnTypes(hasOuterBound ? staticBasis.size()
4864 : staticBasis.size() + 1,
4866 build(odsBuilder, odsState, returnTypes, linearIndex, dynamicBasis,
4870void AffineDelinearizeIndexOp::build(OpBuilder &odsBuilder,
4871 OperationState &odsState,
4873 bool hasOuterBound) {
4874 if (hasOuterBound && !basis.empty() && basis.front() ==
nullptr) {
4875 hasOuterBound =
false;
4876 basis = basis.drop_front();
4878 SmallVector<Value> dynamicBasis;
4879 SmallVector<int64_t> staticBasis;
4882 build(odsBuilder, odsState, linearIndex, dynamicBasis, staticBasis,
4886void AffineDelinearizeIndexOp::build(OpBuilder &odsBuilder,
4887 OperationState &odsState,
4889 ArrayRef<OpFoldResult> basis,
4890 bool hasOuterBound) {
4891 if (hasOuterBound && !basis.empty() && basis.front() == OpFoldResult()) {
4892 hasOuterBound =
false;
4893 basis = basis.drop_front();
4895 SmallVector<Value> dynamicBasis;
4896 SmallVector<int64_t> staticBasis;
4898 build(odsBuilder, odsState, linearIndex, dynamicBasis, staticBasis,
4902void AffineDelinearizeIndexOp::build(OpBuilder &odsBuilder,
4903 OperationState &odsState,
4904 Value linearIndex, ArrayRef<int64_t> basis,
4905 bool hasOuterBound) {
4906 build(odsBuilder, odsState, linearIndex,
ValueRange{}, basis, hasOuterBound);
4909LogicalResult AffineDelinearizeIndexOp::verify() {
4910 ArrayRef<int64_t> staticBasis = getStaticBasis();
4911 if (getNumResults() != staticBasis.size() &&
4912 getNumResults() != staticBasis.size() + 1)
4913 return emitOpError(
"should return an index for each basis element and up "
4914 "to one extra index");
4916 auto dynamicMarkersCount = llvm::count_if(staticBasis, ShapedType::isDynamic);
4917 if (
static_cast<size_t>(dynamicMarkersCount) != getDynamicBasis().size())
4919 "mismatch between dynamic and static basis (kDynamic marker but no "
4920 "corresponding dynamic basis entry) -- this can only happen due to an "
4921 "incorrect fold/rewrite");
4923 if (!llvm::all_of(staticBasis, [](int64_t v) {
4924 return v > 0 || ShapedType::isDynamic(v);
4926 return emitOpError(
"no basis element may be statically non-positive");
4935static std::optional<SmallVector<int64_t>>
4939 uint64_t dynamicBasisIndex = 0;
4945 if (basis && isa<IntegerAttr>(basis)) {
4946 mutableDynamicBasis.
erase(dynamicBasisIndex);
4948 ++dynamicBasisIndex;
4953 if (dynamicBasisIndex == dynamicBasis.size())
4954 return std::nullopt;
4960 staticBasis.push_back(ShapedType::kDynamic);
4962 staticBasis.push_back(*basisVal);
4969AffineDelinearizeIndexOp::fold(FoldAdaptor adaptor,
4970 SmallVectorImpl<OpFoldResult> &
result) {
4971 std::optional<SmallVector<int64_t>> maybeStaticBasis =
4973 adaptor.getDynamicBasis());
4974 if (maybeStaticBasis) {
4975 setStaticBasis(*maybeStaticBasis);
4980 if (getNumResults() == 1) {
4981 result.push_back(getLinearIndex());
4985 if (adaptor.getLinearIndex() ==
nullptr)
4988 if (!adaptor.getDynamicBasis().empty())
4991 int64_t highPart = cast<IntegerAttr>(adaptor.getLinearIndex()).getInt();
4992 Type attrType = getLinearIndex().getType();
4994 ArrayRef<int64_t> staticBasis = getStaticBasis();
4995 if (hasOuterBound())
4996 staticBasis = staticBasis.drop_front();
4997 for (int64_t modulus : llvm::reverse(staticBasis)) {
4998 result.push_back(IntegerAttr::get(attrType, llvm::mod(highPart, modulus)));
4999 highPart = llvm::divideFloorSigned(highPart, modulus);
5001 result.push_back(IntegerAttr::get(attrType, highPart));
5006SmallVector<OpFoldResult> AffineDelinearizeIndexOp::getEffectiveBasis() {
5008 if (hasOuterBound()) {
5009 if (getStaticBasis().front() == ::mlir::ShapedType::kDynamic)
5011 getDynamicBasis().drop_front(), builder);
5013 return getMixedValues(getStaticBasis().drop_front(), getDynamicBasis(),
5017 return getMixedValues(getStaticBasis(), getDynamicBasis(), builder);
5020SmallVector<OpFoldResult> AffineDelinearizeIndexOp::getPaddedBasis() {
5021 SmallVector<OpFoldResult> ret = getMixedBasis();
5022 if (!hasOuterBound())
5023 ret.insert(ret.begin(), OpFoldResult());
5030struct DropUnitExtentBasis
5031 :
public OpRewritePattern<affine::AffineDelinearizeIndexOp> {
5034 LogicalResult matchAndRewrite(affine::AffineDelinearizeIndexOp delinearizeOp,
5035 PatternRewriter &rewriter)
const override {
5036 SmallVector<Value> replacements(delinearizeOp->getNumResults(),
nullptr);
5037 std::optional<Value> zero = std::nullopt;
5038 Location loc = delinearizeOp->getLoc();
5039 auto getZero = [&]() -> Value {
5042 return zero.value();
5047 SmallVector<OpFoldResult> newBasis;
5048 for (
auto [index, basis] :
5049 llvm::enumerate(delinearizeOp.getPaddedBasis())) {
5050 std::optional<int64_t> basisVal =
5053 replacements[index] =
getZero();
5055 newBasis.push_back(basis);
5058 if (newBasis.size() == delinearizeOp.getNumResults())
5060 "no unit basis elements");
5062 if (!newBasis.empty()) {
5064 auto newDelinearizeOp = affine::AffineDelinearizeIndexOp::create(
5065 rewriter, loc, delinearizeOp.getLinearIndex(), newBasis);
5071 replacement = newDelinearizeOp->getResult(newIndex++);
5075 rewriter.
replaceOp(delinearizeOp, replacements);
5090struct CancelDelinearizeOfLinearizeDisjointExactTail
5091 :
public OpRewritePattern<affine::AffineDelinearizeIndexOp> {
5094 LogicalResult matchAndRewrite(affine::AffineDelinearizeIndexOp delinearizeOp,
5095 PatternRewriter &rewriter)
const override {
5096 auto linearizeOp = delinearizeOp.getLinearIndex()
5097 .getDefiningOp<affine::AffineLinearizeIndexOp>();
5100 "index doesn't come from linearize");
5102 if (!linearizeOp.getDisjoint())
5105 ValueRange linearizeIns = linearizeOp.getMultiIndex();
5107 SmallVector<OpFoldResult> linearizeBasis = linearizeOp.getMixedBasis();
5108 SmallVector<OpFoldResult> delinearizeBasis = delinearizeOp.getMixedBasis();
5109 size_t numMatches = 0;
5110 for (
auto [linSize, delinSize] : llvm::zip(
5111 llvm::reverse(linearizeBasis), llvm::reverse(delinearizeBasis))) {
5112 if (linSize != delinSize)
5117 if (numMatches == 0)
5119 delinearizeOp,
"final basis element doesn't match linearize");
5122 if (numMatches == linearizeBasis.size() &&
5123 numMatches == delinearizeBasis.size() &&
5124 linearizeIns.size() == delinearizeOp.getNumResults()) {
5125 rewriter.
replaceOp(delinearizeOp, linearizeOp.getMultiIndex());
5129 Value newLinearize = affine::AffineLinearizeIndexOp::create(
5130 rewriter, linearizeOp.getLoc(), linearizeIns.drop_back(numMatches),
5131 ArrayRef<OpFoldResult>{linearizeBasis}.drop_back(numMatches),
5132 linearizeOp.getDisjoint());
5133 auto newDelinearize = affine::AffineDelinearizeIndexOp::create(
5134 rewriter, delinearizeOp.getLoc(), newLinearize,
5135 ArrayRef<OpFoldResult>{delinearizeBasis}.drop_back(numMatches),
5136 delinearizeOp.hasOuterBound());
5137 SmallVector<Value> mergedResults(newDelinearize.getResults());
5138 mergedResults.append(linearizeIns.take_back(numMatches).begin(),
5139 linearizeIns.take_back(numMatches).end());
5140 rewriter.
replaceOp(delinearizeOp, mergedResults);
5158struct SplitDelinearizeSpanningLastLinearizeArg final
5159 : OpRewritePattern<affine::AffineDelinearizeIndexOp> {
5162 LogicalResult matchAndRewrite(affine::AffineDelinearizeIndexOp delinearizeOp,
5163 PatternRewriter &rewriter)
const override {
5164 auto linearizeOp = delinearizeOp.getLinearIndex()
5165 .getDefiningOp<affine::AffineLinearizeIndexOp>();
5168 "index doesn't come from linearize");
5170 if (!linearizeOp.getDisjoint())
5172 "linearize isn't disjoint");
5174 int64_t
target = linearizeOp.getStaticBasis().back();
5175 if (ShapedType::isDynamic(
target))
5177 linearizeOp,
"linearize ends with dynamic basis value");
5179 int64_t sizeToSplit = 1;
5180 size_t elemsToSplit = 0;
5181 ArrayRef<int64_t> basis = delinearizeOp.getStaticBasis();
5182 for (int64_t basisElem : llvm::reverse(basis)) {
5183 if (ShapedType::isDynamic(basisElem))
5185 delinearizeOp,
"dynamic basis element while scanning for split");
5186 sizeToSplit *= basisElem;
5189 if (sizeToSplit >
target)
5191 "overshot last argument size");
5192 if (sizeToSplit ==
target)
5196 if (sizeToSplit <
target)
5198 delinearizeOp,
"product of known basis elements doesn't exceed last "
5199 "linearize argument");
5201 if (elemsToSplit < 2)
5204 "need at least two elements to form the basis product");
5206 Value linearizeWithoutBack = affine::AffineLinearizeIndexOp::create(
5207 rewriter, linearizeOp.getLoc(), linearizeOp.getMultiIndex().drop_back(),
5208 linearizeOp.getDynamicBasis(), linearizeOp.getStaticBasis().drop_back(),
5209 linearizeOp.getDisjoint());
5210 auto delinearizeWithoutSplitPart = affine::AffineDelinearizeIndexOp::create(
5211 rewriter, delinearizeOp.getLoc(), linearizeWithoutBack,
5212 delinearizeOp.getDynamicBasis(), basis.drop_back(elemsToSplit),
5213 delinearizeOp.hasOuterBound());
5214 auto delinearizeBack = affine::AffineDelinearizeIndexOp::create(
5215 rewriter, delinearizeOp.getLoc(), linearizeOp.getMultiIndex().back(),
5216 basis.take_back(elemsToSplit),
true);
5217 SmallVector<Value> results = llvm::to_vector(
5218 llvm::concat<Value>(delinearizeWithoutSplitPart.getResults(),
5219 delinearizeBack.getResults()));
5220 rewriter.
replaceOp(delinearizeOp, results);
5227void affine::AffineDelinearizeIndexOp::getCanonicalizationPatterns(
5228 RewritePatternSet &patterns, MLIRContext *context) {
5230 .
insert<CancelDelinearizeOfLinearizeDisjointExactTail,
5231 DropUnitExtentBasis, SplitDelinearizeSpanningLastLinearizeArg>(
5239void AffineLinearizeIndexOp::build(OpBuilder &odsBuilder,
5240 OperationState &odsState,
5243 if (!basis.empty() && basis.front() == Value())
5244 basis = basis.drop_front();
5245 SmallVector<Value> dynamicBasis;
5246 SmallVector<int64_t> staticBasis;
5249 build(odsBuilder, odsState, multiIndex, dynamicBasis, staticBasis, disjoint);
5252void AffineLinearizeIndexOp::build(OpBuilder &odsBuilder,
5253 OperationState &odsState,
5255 ArrayRef<OpFoldResult> basis,
5257 if (!basis.empty() && basis.front() == OpFoldResult())
5258 basis = basis.drop_front();
5259 SmallVector<Value> dynamicBasis;
5260 SmallVector<int64_t> staticBasis;
5262 build(odsBuilder, odsState, multiIndex, dynamicBasis, staticBasis, disjoint);
5265void AffineLinearizeIndexOp::build(OpBuilder &odsBuilder,
5266 OperationState &odsState,
5268 ArrayRef<int64_t> basis,
bool disjoint) {
5269 build(odsBuilder, odsState, multiIndex,
ValueRange{}, basis, disjoint);
5272LogicalResult AffineLinearizeIndexOp::verify() {
5273 size_t numIndexes = getMultiIndex().size();
5274 size_t numBasisElems = getStaticBasis().size();
5275 if (numIndexes != numBasisElems && numIndexes != numBasisElems + 1)
5276 return emitOpError(
"should be passed a basis element for each index except "
5277 "possibly the first");
5279 auto dynamicMarkersCount =
5280 llvm::count_if(getStaticBasis(), ShapedType::isDynamic);
5281 if (
static_cast<size_t>(dynamicMarkersCount) != getDynamicBasis().size())
5283 "mismatch between dynamic and static basis (kDynamic marker but no "
5284 "corresponding dynamic basis entry) -- this can only happen due to an "
5285 "incorrect fold/rewrite");
5290OpFoldResult AffineLinearizeIndexOp::fold(FoldAdaptor adaptor) {
5291 std::optional<SmallVector<int64_t>> maybeStaticBasis =
5293 adaptor.getDynamicBasis());
5294 if (maybeStaticBasis) {
5295 setStaticBasis(*maybeStaticBasis);
5299 if (getMultiIndex().empty())
5300 return IntegerAttr::get(getResult().
getType(), 0);
5303 if (getMultiIndex().size() == 1)
5304 return getMultiIndex().front();
5309 if (llvm::any_of(adaptor.getMultiIndex(), [](Attribute a) {
5310 return !isa_and_nonnull<IntegerAttr>(a);
5314 if (!adaptor.getDynamicBasis().empty())
5319 for (
auto [length, indexAttr] :
5320 llvm::zip_first(llvm::reverse(getStaticBasis()),
5321 llvm::reverse(adaptor.getMultiIndex()))) {
5322 result =
result + cast<IntegerAttr>(indexAttr).getInt() * stride;
5323 stride = stride * length;
5326 if (!hasOuterBound())
5329 cast<IntegerAttr>(adaptor.getMultiIndex().front()).getInt() * stride;
5334SmallVector<OpFoldResult> AffineLinearizeIndexOp::getEffectiveBasis() {
5336 if (hasOuterBound()) {
5337 if (getStaticBasis().front() == ::mlir::ShapedType::kDynamic)
5339 getDynamicBasis().drop_front(), builder);
5341 return getMixedValues(getStaticBasis().drop_front(), getDynamicBasis(),
5345 return getMixedValues(getStaticBasis(), getDynamicBasis(), builder);
5348SmallVector<OpFoldResult> AffineLinearizeIndexOp::getPaddedBasis() {
5349 SmallVector<OpFoldResult> ret = getMixedBasis();
5350 if (!hasOuterBound())
5351 ret.insert(ret.begin(), OpFoldResult());
5366struct DropLinearizeUnitComponentsIfDisjointOrZero final
5367 : OpRewritePattern<affine::AffineLinearizeIndexOp> {
5370 LogicalResult matchAndRewrite(affine::AffineLinearizeIndexOp op,
5371 PatternRewriter &rewriter)
const override {
5373 size_t numIndices = multiIndex.size();
5374 SmallVector<Value> newIndices;
5375 newIndices.reserve(numIndices);
5376 SmallVector<OpFoldResult> newBasis;
5377 newBasis.reserve(numIndices);
5379 if (!op.hasOuterBound()) {
5380 newIndices.push_back(multiIndex.front());
5381 multiIndex = multiIndex.drop_front();
5384 SmallVector<OpFoldResult> basis = op.getMixedBasis();
5385 for (
auto [index, basisElem] : llvm::zip_equal(multiIndex, basis)) {
5387 if (!basisEntry || *basisEntry != 1) {
5388 newIndices.push_back(index);
5389 newBasis.push_back(basisElem);
5394 if (!op.getDisjoint() && (!indexValue || *indexValue != 0)) {
5395 newIndices.push_back(index);
5396 newBasis.push_back(basisElem);
5400 if (newIndices.size() == numIndices)
5402 "no unit basis entries to replace");
5404 if (newIndices.empty()) {
5409 op, newIndices, newBasis, op.getDisjoint());
5415 ArrayRef<OpFoldResult> terms) {
5416 int64_t nDynamic = 0;
5417 SmallVector<Value> dynamicPart;
5419 for (OpFoldResult term : terms) {
5426 dynamicPart.push_back(cast<Value>(term));
5430 if (
auto constant = dyn_cast<AffineConstantExpr>(
result))
5432 return AffineApplyOp::create(builder, loc,
result, dynamicPart).getResult();
5462struct CancelLinearizeOfDelinearizePortion final
5463 : OpRewritePattern<affine::AffineLinearizeIndexOp> {
5473 unsigned linStart = 0;
5474 unsigned delinStart = 0;
5475 unsigned length = 0;
5479 LogicalResult matchAndRewrite(affine::AffineLinearizeIndexOp linearizeOp,
5480 PatternRewriter &rewriter)
const override {
5481 SmallVector<Match> matches;
5483 const SmallVector<OpFoldResult> linBasis = linearizeOp.getPaddedBasis();
5484 ArrayRef<OpFoldResult> linBasisRef = linBasis;
5486 ValueRange multiIndex = linearizeOp.getMultiIndex();
5487 unsigned numLinArgs = multiIndex.size();
5488 unsigned linArgIdx = 0;
5491 llvm::SmallPtrSet<Operation *, 2> alreadyMatchedDelinearize;
5492 while (linArgIdx < numLinArgs) {
5493 auto asResult = dyn_cast<OpResult>(multiIndex[linArgIdx]);
5499 auto delinearizeOp =
5500 dyn_cast<AffineDelinearizeIndexOp>(asResult.getOwner());
5501 if (!delinearizeOp) {
5518 unsigned delinArgIdx = asResult.getResultNumber();
5519 SmallVector<OpFoldResult> delinBasis = delinearizeOp.getPaddedBasis();
5520 OpFoldResult firstDelinBound = delinBasis[delinArgIdx];
5521 OpFoldResult firstLinBound = linBasis[linArgIdx];
5522 bool boundsMatch = firstDelinBound == firstLinBound;
5523 bool bothAtFront = linArgIdx == 0 && delinArgIdx == 0;
5524 bool knownByDisjoint =
5525 linearizeOp.getDisjoint() && delinArgIdx == 0 && !firstDelinBound;
5526 if (!boundsMatch && !bothAtFront && !knownByDisjoint) {
5532 unsigned numDelinOuts = delinearizeOp.getNumResults();
5533 for (; j + linArgIdx < numLinArgs && j + delinArgIdx < numDelinOuts;
5535 if (multiIndex[linArgIdx + j] !=
5536 delinearizeOp.getResult(delinArgIdx + j))
5538 if (linBasis[linArgIdx + j] != delinBasis[delinArgIdx + j])
5544 if (j <= 1 || !alreadyMatchedDelinearize.insert(delinearizeOp).second) {
5548 matches.push_back(Match{delinearizeOp, linArgIdx, delinArgIdx, j});
5552 if (matches.empty())
5554 linearizeOp,
"no run of delinearize outputs to deal with");
5559 SmallVector<SmallVector<Value>> delinearizeReplacements;
5561 SmallVector<Value> newIndex;
5562 newIndex.reserve(numLinArgs);
5563 SmallVector<OpFoldResult> newBasis;
5564 newBasis.reserve(numLinArgs);
5565 unsigned prevMatchEnd = 0;
5566 for (Match m : matches) {
5567 unsigned gap = m.linStart - prevMatchEnd;
5568 llvm::append_range(newIndex, multiIndex.slice(prevMatchEnd, gap));
5569 llvm::append_range(newBasis, linBasisRef.slice(prevMatchEnd, gap));
5571 prevMatchEnd = m.linStart + m.length;
5573 PatternRewriter::InsertionGuard g(rewriter);
5576 ArrayRef<OpFoldResult> basisToMerge =
5577 linBasisRef.slice(m.linStart, m.length);
5580 OpFoldResult newSize =
5585 newIndex.push_back(m.delinearize.getLinearIndex());
5586 newBasis.push_back(newSize);
5588 delinearizeReplacements.push_back(SmallVector<Value>());
5592 SmallVector<Value> newDelinResults;
5593 SmallVector<OpFoldResult> newDelinBasis = m.delinearize.getPaddedBasis();
5594 newDelinBasis.erase(newDelinBasis.begin() + m.delinStart,
5595 newDelinBasis.begin() + m.delinStart + m.length);
5596 newDelinBasis.insert(newDelinBasis.begin() + m.delinStart, newSize);
5597 auto newDelinearize = AffineDelinearizeIndexOp::create(
5598 rewriter, m.delinearize.getLoc(), m.delinearize.getLinearIndex(),
5604 Value combinedElem = newDelinearize.getResult(m.delinStart);
5605 auto residualDelinearize = AffineDelinearizeIndexOp::create(
5606 rewriter, m.delinearize.getLoc(), combinedElem, basisToMerge);
5611 llvm::append_range(newDelinResults,
5612 newDelinearize.getResults().take_front(m.delinStart));
5613 llvm::append_range(newDelinResults, residualDelinearize.getResults());
5616 newDelinearize.getResults().drop_front(m.delinStart + 1));
5618 delinearizeReplacements.push_back(newDelinResults);
5619 newIndex.push_back(combinedElem);
5620 newBasis.push_back(newSize);
5622 llvm::append_range(newIndex, multiIndex.drop_front(prevMatchEnd));
5623 llvm::append_range(newBasis, linBasisRef.drop_front(prevMatchEnd));
5625 linearizeOp, newIndex, newBasis, linearizeOp.getDisjoint());
5627 for (
auto [m, newResults] :
5628 llvm::zip_equal(matches, delinearizeReplacements)) {
5629 if (newResults.empty())
5631 rewriter.
replaceOp(m.delinearize, newResults);
5642struct DropLinearizeLeadingZero final
5643 : OpRewritePattern<affine::AffineLinearizeIndexOp> {
5646 LogicalResult matchAndRewrite(affine::AffineLinearizeIndexOp op,
5647 PatternRewriter &rewriter)
const override {
5648 Value leadingIdx = op.getMultiIndex().front();
5652 if (op.getMultiIndex().size() == 1) {
5657 SmallVector<OpFoldResult> mixedBasis = op.getMixedBasis();
5658 ArrayRef<OpFoldResult> newMixedBasis = mixedBasis;
5659 if (op.hasOuterBound())
5660 newMixedBasis = newMixedBasis.drop_front();
5663 op, op.getMultiIndex().drop_front(), newMixedBasis, op.getDisjoint());
5669void affine::AffineLinearizeIndexOp::getCanonicalizationPatterns(
5670 RewritePatternSet &patterns, MLIRContext *context) {
5671 patterns.
add<CancelLinearizeOfDelinearizePortion, DropLinearizeLeadingZero,
5672 DropLinearizeUnitComponentsIfDisjointOrZero>(context);
5679#define GET_OP_CLASSES
5680#include "mlir/Dialect/Affine/IR/AffineOps.cpp.inc"
static AffineForOp buildAffineLoopFromConstants(OpBuilder &builder, Location loc, int64_t lb, int64_t ub, int64_t step, AffineForOp::BodyBuilderFn bodyBuilderFn)
Creates an affine loop from the bounds known to be constants.
static bool hasTrivialZeroTripCount(AffineForOp op)
Returns true if the affine.for has zero iterations in trivial cases.
static LogicalResult verifyMemoryOpIndexing(AffineMemOpTy op, AffineMapAttr mapAttr, Operation::operand_range mapOperands, MemRefType memrefType, unsigned numIndexOperands)
Verify common indexing invariants of affine.load, affine.store, affine.vector_load and affine....
static void printAffineMinMaxOp(OpAsmPrinter &p, T op)
static bool isResultTypeMatchAtomicRMWKind(Type resultType, arith::AtomicRMWKind op)
static bool remainsLegalAfterInline(Value value, Region *src, Region *dest, const IRMapping &mapping, function_ref< bool(Value, Region *)> legalityCheck)
Checks if value known to be a legal affine dimension or symbol in src region remains legal if the ope...
static void printMinMaxBound(OpAsmPrinter &p, AffineMapAttr mapAttr, DenseIntElementsAttr group, ValueRange operands, StringRef keyword)
Prints a lower(upper) bound of an affine parallel loop with max(min) conditions in it.
static OpFoldResult foldMinMaxOp(T op, ArrayRef< Attribute > operands)
Fold an affine min or max operation with the given operands.
static bool isTopLevelValueOrAbove(Value value, Region *region)
A utility function to check if a value is defined at the top level of region or is an argument of reg...
static LogicalResult canonicalizeLoopBounds(AffineForOp forOp)
Canonicalize the bounds of the given loop.
static void simplifyExprAndOperands(AffineExpr &expr, unsigned numDims, unsigned numSymbols, ArrayRef< Value > operands)
Simplify expr while exploiting information from the values in operands.
static bool isValidAffineIndexOperand(Value value, Region *region)
p<< " : "<< getMemRefType()<< ", "<< getType();}static LogicalResult verifyVectorMemoryOp(Operation *op, MemRefType memrefType, VectorType vectorType) { if(memrefType.getElementType() !=vectorType.getElementType()) return op-> emitOpError("requires memref and vector types of the same elemental type")
Given a list of lists of parsed operands, populates uniqueOperands with unique operands.
static void canonicalizeMapOrSetAndOperands(MapOrSet *mapOrSet, SmallVectorImpl< Value > *operands)
static ParseResult parseBound(bool isLower, OperationState &result, OpAsmParser &p)
Parse a for operation loop bounds.
static std::optional< SmallVector< int64_t > > foldCstValueToCstAttrBasis(ArrayRef< OpFoldResult > mixedBasis, MutableOperandRange mutableDynamicBasis, ArrayRef< Attribute > dynamicBasis)
Given mixed basis of affine.delinearize_index/linearize_index replace constant SSA values with the co...
static void canonicalizePromotedSymbols(MapOrSet *mapOrSet, SmallVectorImpl< Value > *operands)
static void simplifyMinOrMaxExprWithOperands(AffineMap &map, ArrayRef< Value > operands, bool isMax)
Simplify the expressions in map while making use of lower or upper bounds of its operands.
static ParseResult parseAffineMinMaxOp(OpAsmParser &parser, OperationState &result)
static LogicalResult replaceAffineDelinearizeIndexInverseExpression(AffineDelinearizeIndexOp delinOp, Value resultToReplace, AffineMap *map, SmallVectorImpl< Value > &dims, SmallVectorImpl< Value > &syms)
If this map contains of the expression x_1 + x_1 * C_1 + ... x_n * C_N + / ... (not necessarily in or...
static void composeSetAndOperands(IntegerSet &set, SmallVectorImpl< Value > &operands, bool composeAffineMin=false)
Compose any affine.apply ops feeding into operands of the integer set set by composing the maps of su...
static bool isMemRefSizeValidSymbol(AnyMemRefDefOp memrefDefOp, unsigned index, Region *region)
Returns true if the 'index' dimension of the memref defined by memrefDefOp is a statically shaped one...
static bool isNonNegativeBoundedBy(AffineExpr e, ArrayRef< Value > operands, int64_t k)
Check if e is known to be: 0 <= e < k.
static AffineForOp buildAffineLoopFromValues(OpBuilder &builder, Location loc, Value lb, Value ub, int64_t step, AffineForOp::BodyBuilderFn bodyBuilderFn)
Creates an affine loop from the bounds that may or may not be constants.
static void simplifyMapWithOperands(AffineMap &map, ArrayRef< Value > operands)
Simplify the map while exploiting information on the values in operands.
static void printDimAndSymbolList(Operation::operand_iterator begin, Operation::operand_iterator end, unsigned numDims, OpAsmPrinter &printer)
Prints dimension and symbol list.
static int64_t getLargestKnownDivisor(AffineExpr e, ArrayRef< Value > operands)
Returns the largest known divisor of e.
static void composeAffineMapAndOperands(AffineMap *map, SmallVectorImpl< Value > *operands, bool composeAffineMin=false)
Iterate over operands and fold away all those produced by an AffineApplyOp iteratively.
static void legalizeDemotedDims(MapOrSet &mapOrSet, SmallVectorImpl< Value > &operands)
A valid affine dimension may appear as a symbol in affine.apply operations.
static OpTy makeComposedMinMax(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands)
static std::optional< int64_t > getUpperBound(Value iv)
Gets the constant upper bound on an affine.for iv.
static void buildAffineLoopNestImpl(OpBuilder &builder, Location loc, BoundListTy lbs, BoundListTy ubs, ArrayRef< int64_t > steps, function_ref< void(OpBuilder &, Location, ValueRange)> bodyBuilderFn, LoopCreatorTy &&loopCreatorFn)
Builds an affine loop nest, using "loopCreatorFn" to create individual loop operations.
static LogicalResult foldLoopBounds(AffineForOp forOp)
Fold the constant bounds of a loop.
static LogicalResult replaceAffineMinBoundingBoxExpression(AffineMinOp minOp, AffineExpr dimOrSym, AffineMap *map, ValueRange dims, ValueRange syms)
Assuming dimOrSym is a quantity in the apply op map map and defined by minOp = affine_min(x_1,...
static SmallVector< OpFoldResult > AffineForEmptyLoopFolder(AffineForOp forOp)
Fold the empty loop.
static LogicalResult verifyDimAndSymbolIdentifiers(OpTy &op, Operation::operand_range operands, unsigned numDims)
Utility function to verify that a set of operands are valid dimension and symbol identifiers.
static OpFoldResult makeComposedFoldedMinMax(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands)
static bool isDimOpValidSymbol(ShapedDimOpInterface dimOp, Region *region)
Returns true if the result of the dim op is a valid symbol for region.
static bool isQTimesDPlusR(AffineExpr e, ArrayRef< Value > operands, int64_t &div, AffineExpr "ientTimesDiv, AffineExpr &rem)
Check if expression e is of the form d*e_1 + e_2 where 0 <= e_2 < d.
static LogicalResult replaceDimOrSym(AffineMap *map, unsigned dimOrSymbolPosition, SmallVectorImpl< Value > &dims, SmallVectorImpl< Value > &syms, bool replaceAffineMin)
Replace all occurrences of AffineExpr at position pos in map by the defining AffineApplyOp expression...
static std::optional< int64_t > getLowerBound(Value iv)
Gets the constant lower bound on an iv.
static std::optional< uint64_t > getTrivialConstantTripCount(AffineForOp forOp)
Returns constant trip count in trivial cases.
static LogicalResult verifyAffineMinMaxOp(T op)
static void printBound(AffineMapAttr boundMap, Operation::operand_range boundOperands, const char *prefix, OpAsmPrinter &p)
static void shortenAddChainsContainingAll(AffineExpr e, const llvm::SmallDenseSet< AffineExpr, 4 > &exprsToRemove, AffineExpr newVal, DenseMap< AffineExpr, AffineExpr > &replacementsMap)
Recursively traverse e.
static void composeMultiResultAffineMap(AffineMap &map, SmallVectorImpl< Value > &operands, bool composeAffineMin=false)
Composes the given affine map with the given list of operands, pulling in the maps from any affine....
static LogicalResult canonicalizeMapExprAndTermOrder(AffineMap &map)
Canonicalize the result expression order of an affine map and return success if the order changed.
static Value getZero(OpBuilder &b, Location loc, Type elementType)
Get zero value for an element type.
static Value getMemRef(Operation *memOp)
Returns the memref being read/written by a memref/affine load/store op.
static bool isLegalToInline(InlinerInterface &interface, Region *src, Region *insertRegion, bool shouldCloneInlinedRegion, IRMapping &valueMapping)
Utility to check that all of the operations within 'src' can be inlined.
static int64_t getNumElements(Type t)
Compute the total number of elements in the given type, also taking into account nested types.
*if copies could not be generated due to yet unimplemented cases *copyInPlacementStart and copyOutPlacementStart in copyPlacementBlock *specify the insertion points where the incoming copies and outgoing should be the output argument nBegin is set to its * replacement(set to `begin` if no invalidation happens). Since outgoing *copies could have been inserted at `end`
static Operation::operand_range getLowerBoundOperands(AffineForOp forOp)
static Operation::operand_range getUpperBoundOperands(AffineForOp forOp)
static VectorType getVectorType(Type scalarTy, const VectorizationStrategy *strategy)
Returns the vector type resulting from applying the provided vectorization strategy on the scalar typ...
RetTy walkPostOrder(AffineExpr expr)
Base type for affine expression.
AffineExpr floorDiv(uint64_t v) const
AffineExprKind getKind() const
Return the classification for this type.
int64_t getLargestKnownDivisor() const
Returns the greatest known integral divisor of this affine expression.
MLIRContext * getContext() const
AffineExpr replace(AffineExpr expr, AffineExpr replacement) const
Sparse replace method.
AffineExpr ceilDiv(uint64_t v) const
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
AffineMap getSliceMap(unsigned start, unsigned length) const
Returns the map consisting of length expressions starting from start.
MLIRContext * getContext() const
bool isFunctionOfDim(unsigned position) const
Return true if any affine expression involves AffineDimExpr position.
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
AffineMap shiftDims(unsigned shift, unsigned offset=0) const
Replace dims[offset ... numDims) by dims[offset + shift ... shift + numDims).
unsigned getNumSymbols() const
unsigned getNumDims() const
ArrayRef< AffineExpr > getResults() const
bool isFunctionOfSymbol(unsigned position) const
Return true if any affine expression involves AffineSymbolExpr position.
unsigned getNumResults() const
static SmallVector< AffineMap, 4 > inferFromExprList(ArrayRef< ArrayRef< AffineExpr > > exprsList, MLIRContext *context)
Returns a vector of AffineMaps; each with as many results as exprs.size(), as many dims as the larges...
AffineMap replaceDimsAndSymbols(ArrayRef< AffineExpr > dimReplacements, ArrayRef< AffineExpr > symReplacements, unsigned numResultDims, unsigned numResultSyms) const
This method substitutes any uses of dimensions and symbols (e.g.
unsigned getNumInputs() const
AffineMap shiftSymbols(unsigned shift, unsigned offset=0) const
Replace symbols[offset ... numSymbols) by symbols[offset + shift ... shift + numSymbols).
AffineExpr getResult(unsigned idx) const
AffineMap replace(AffineExpr expr, AffineExpr replacement, unsigned numResultDims, unsigned numResultSyms) const
Sparse replace method.
static AffineMap getConstantMap(int64_t val, MLIRContext *context)
Returns a single constant result affine map.
AffineMap getSubMap(ArrayRef< unsigned > resultPos) const
Returns the map consisting of the resultPos subset.
LogicalResult constantFold(ArrayRef< Attribute > operandConstants, SmallVectorImpl< Attribute > &results, bool *hasPoison=nullptr) const
Folds the results of the application of an affine map on the provided operands to a constant if possi...
@ Paren
Parens surrounding zero or more operands.
@ OptionalSquare
Square brackets supporting zero or more ops, or nothing.
virtual ParseResult parseColonTypeList(SmallVectorImpl< Type > &result)=0
Parse a colon followed by a type list, which must have at least one type.
virtual Builder & getBuilder() const =0
Return a builder which provides useful access to MLIRContext, global objects like types and attribute...
virtual ParseResult parseCommaSeparatedList(Delimiter delimiter, function_ref< ParseResult()> parseElementFn, StringRef contextMessage=StringRef())=0
Parse a list of comma-separated items with an optional delimiter.
virtual ParseResult parseOptionalAttrDict(NamedAttrList &result)=0
Parse a named dictionary into 'result' if it is present.
virtual ParseResult parseOptionalKeyword(StringRef keyword)=0
Parse the given keyword if present.
MLIRContext * getContext() const
virtual ParseResult parseRParen()=0
Parse a ) token.
virtual InFlightDiagnostic emitError(SMLoc loc, const Twine &message={})=0
Emit a diagnostic at the specified location and return failure.
ParseResult addTypeToList(Type type, SmallVectorImpl< Type > &result)
Add the specified type to the end of the specified type list and return success.
virtual ParseResult parseOptionalRParen()=0
Parse a ) token if present.
virtual ParseResult parseLess()=0
Parse a '<' token.
virtual ParseResult parseEqual()=0
Parse a = token.
virtual ParseResult parseColonType(Type &result)=0
Parse a colon followed by a type.
virtual SMLoc getCurrentLocation()=0
Get the location of the next token and store it into the argument.
virtual SMLoc getNameLoc() const =0
Return the location of the original name token.
virtual ParseResult parseGreater()=0
Parse a '>' token.
virtual ParseResult parseLParen()=0
Parse a ( token.
virtual ParseResult parseType(Type &result)=0
Parse a type.
virtual ParseResult parseComma()=0
Parse a , token.
virtual ParseResult parseOptionalArrowTypeList(SmallVectorImpl< Type > &result)=0
Parse an optional arrow followed by a type list.
virtual ParseResult parseArrowTypeList(SmallVectorImpl< Type > &result)=0
Parse an arrow followed by a type list.
ParseResult parseKeyword(StringRef keyword)
Parse a given keyword.
virtual ParseResult parseAttribute(Attribute &result, Type type={})=0
Parse an arbitrary attribute of a given type and return it in result.
void printOptionalArrowTypeList(TypeRange &&types)
Print an optional arrow followed by a type list.
Attributes are known-constant values of operations.
This class represents an argument of a Block.
Block represents an ordered list of Operations.
Operation * getTerminator()
Get the terminator operation of this block.
BlockArgument addArgument(Type type, Location loc)
Add one value to the argument list.
BlockArgListType getArguments()
DenseI32ArrayAttr getDenseI32ArrayAttr(ArrayRef< int32_t > values)
IntegerAttr getIntegerAttr(Type type, int64_t value)
AffineMap getDimIdentityMap()
AffineMap getMultiDimIdentityMap(unsigned rank)
AffineExpr getAffineSymbolExpr(unsigned position)
AffineExpr getAffineConstantExpr(int64_t constant)
DenseIntElementsAttr getI32TensorAttr(ArrayRef< int32_t > values)
Tensor-typed DenseIntElementsAttr getters.
IntegerAttr getI64IntegerAttr(int64_t value)
IntegerType getIntegerType(unsigned width)
BoolAttr getBoolAttr(bool value)
AffineMap getEmptyAffineMap()
Returns a zero result affine map with no dimensions or symbols: () -> ().
AffineMap getConstantAffineMap(int64_t val)
Returns a single constant result affine map with 0 dimensions and 0 symbols.
AffineMap getSymbolIdentityMap()
ArrayAttr getArrayAttr(ArrayRef< Attribute > value)
MLIRContext * getContext() const
ArrayAttr getI64ArrayAttr(ArrayRef< int64_t > values)
An attribute that represents a reference to a dense integer vector or tensor object.
This is a utility class for mapping one set of IR entities to another.
auto lookup(T from) const
Lookup a mapped value within the map.
An integer set representing a conjunction of one or more affine equalities and inequalities.
unsigned getNumDims() const
static IntegerSet get(unsigned dimCount, unsigned symbolCount, ArrayRef< AffineExpr > constraints, ArrayRef< bool > eqFlags)
MLIRContext * getContext() const
unsigned getNumInputs() const
ArrayRef< AffineExpr > getConstraints() const
ArrayRef< bool > getEqFlags() const
Returns the equality bits, which specify whether each of the constraints is an equality or inequality...
unsigned getNumSymbols() const
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
This class provides a mutable adaptor for a range of operands.
void erase(unsigned subStart, unsigned subLen=1)
Erase the operands within the given sub-range.
The OpAsmParser has methods for interacting with the asm parser: parsing things from it,...
virtual ParseResult parseRegion(Region ®ion, ArrayRef< Argument > arguments={}, bool enableNameShadowing=false)=0
Parses a region.
virtual ParseResult parseArgument(Argument &result, bool allowType=false, bool allowAttrs=false)=0
Parse a single argument with the following syntax:
ParseResult parseTrailingOperandList(SmallVectorImpl< UnresolvedOperand > &result, Delimiter delimiter=Delimiter::None)
Parse zero or more trailing SSA comma-separated trailing operand references with a specified surround...
virtual ParseResult parseArgumentList(SmallVectorImpl< Argument > &result, Delimiter delimiter=Delimiter::None, bool allowType=false, bool allowAttrs=false)=0
Parse zero or more arguments with a specified surrounding delimiter.
virtual ParseResult parseAffineMapOfSSAIds(SmallVectorImpl< UnresolvedOperand > &operands, Attribute &map, StringRef attrName, NamedAttrList &attrs, Delimiter delimiter=Delimiter::Square)=0
Parses an affine map attribute where dims and symbols are SSA operands.
ParseResult parseAssignmentList(SmallVectorImpl< Argument > &lhs, SmallVectorImpl< UnresolvedOperand > &rhs)
Parse a list of assignments of the form (x1 = y1, x2 = y2, ...)
virtual ParseResult resolveOperand(const UnresolvedOperand &operand, Type type, SmallVectorImpl< Value > &result)=0
Resolve an operand to an SSA value, emitting an error on failure.
ParseResult resolveOperands(Operands &&operands, Type type, SmallVectorImpl< Value > &result)
Resolve a list of operands to SSA values, emitting an error on failure, or appending the results to t...
virtual ParseResult parseOperand(UnresolvedOperand &result, bool allowResultNumber=true)=0
Parse a single SSA value operand name along with a result number if allowResultNumber is true.
virtual ParseResult parseAffineExprOfSSAIds(SmallVectorImpl< UnresolvedOperand > &dimOperands, SmallVectorImpl< UnresolvedOperand > &symbOperands, AffineExpr &expr)=0
Parses an affine expression where dims and symbols are SSA operands.
virtual ParseResult parseOperandList(SmallVectorImpl< UnresolvedOperand > &result, Delimiter delimiter=Delimiter::None, bool allowResultNumber=true, int requiredOperandCount=-1)=0
Parse zero or more SSA comma-separated operand references with a specified surrounding delimiter,...
This is a pure-virtual base class that exposes the asmprinter hooks necessary to implement a custom p...
virtual void printOptionalAttrDict(ArrayRef< NamedAttribute > attrs, ArrayRef< StringRef > elidedAttrs={})=0
If the specified operation has attributes, print out an attribute dictionary with their values.
virtual void printAffineExprOfSSAIds(AffineExpr expr, ValueRange dimOperands, ValueRange symOperands)=0
Prints an affine expression of SSA ids with SSA id names used instead of dims and symbols.
virtual void printAffineMapOfSSAIds(AffineMapAttr mapAttr, ValueRange operands)=0
Prints an affine map of SSA ids, where SSA id names are used in place of dims/symbols.
virtual void printRegion(Region &blocks, bool printEntryBlockArgs=true, bool printBlockTerminators=true, bool printEmptyBlock=false)=0
Prints a region.
virtual void printRegionArgument(BlockArgument arg, ArrayRef< NamedAttribute > argAttrs={}, bool omitType=false)=0
Print a block argument in the usual format of: ssaName : type {attr1=42} loc("here") where location p...
virtual void printOperand(Value value)=0
Print implementations for various things an operation contains.
RAII guard to reset the insertion point of the builder when destroyed.
This class helps build Operations.
Block * createBlock(Region *parent, Region::iterator insertPt={}, TypeRange argTypes={}, ArrayRef< Location > locs={})
Add new block with 'argTypes' arguments and set the insertion point to the end of it.
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
This class represents a single result from folding an operation.
A trait of region holding operations that defines a new scope for polyhedral optimization purposes.
This class provides the API for ops that are known to be isolated from above.
This class implements the operand iterators for the Operation class.
Operation is the basic unit of execution within MLIR.
bool hasTrait()
Returns true if the operation was registered with a particular trait, e.g.
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
OperandRange operand_range
operand_range getOperands()
Returns an iterator on the underlying Value's.
Region * getParentRegion()
Returns the region to which the instruction belongs.
operand_range::iterator operand_iterator
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers.
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
This class represents a point being branched from in the methods of the RegionBranchOpInterface.
bool isParent() const
Returns true if branching from the parent op.
RegionBranchTerminatorOpInterface getTerminatorPredecessorOrNull() const
Returns the terminator if branching from a region.
This class represents a successor of a region.
static RegionSuccessor parent()
Initialize a successor that branches after/out of the parent operation.
bool isParent() const
Return true if the successor is the parent operation.
Region * getSuccessor() const
Return the given region successor.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Operation * getParentOp()
Return the parent operation this region is attached to.
bool hasOneBlock()
Return true if this region has exactly one block.
RewritePatternSet & insert(ConstructorArg &&arg, ConstructorArgs &&...args)
Add an instance of each of the pattern types 'Ts' to the pattern list with the given arguments.
RewritePatternSet & add(ConstructorArg &&arg, ConstructorArgs &&...args)
Add an instance of each of the pattern types 'Ts' to the pattern list with the given arguments.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void eraseBlock(Block *block)
This method erases all operations in a block.
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
virtual void finalizeOpModification(Operation *op)
This method is used to signal the end of an in-place modification of the given operation.
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
virtual void replaceUsesWithIf(Value from, Value to, function_ref< bool(OpOperand &)> functor, bool *allUsesReplaced=nullptr)
Find uses of from and replace them with to if the functor returns true.
virtual void inlineBlockBefore(Block *source, Block *dest, Block::iterator before, ValueRange argValues={})
Inline the operations of block 'source' into block 'dest' before the given position.
void mergeBlocks(Block *source, Block *dest, ValueRange argValues={})
Inline the operations of block 'source' into the end of block 'dest'.
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the listener that the IR failed to be rewritten because of a match failure,...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
virtual void startOpModification(Operation *op)
This method is used to notify the rewriter that an in-place operation modification is about to happen...
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
This class represents a specific instance of an effect.
static DerivedEffect * get()
static DefaultResource * get()
std::vector< SmallVector< int64_t, 8 > > operandExprStack
static Operation * lookupNearestSymbolFrom(Operation *from, StringAttr symbol)
Returns the operation registered with the given symbol name within the closest parent operation of,...
This class provides an abstraction over the various different ranges of value types.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
A variable that can be added to the constraint set as a "column".
static bool compare(const Variable &lhs, ComparisonOperator cmp, const Variable &rhs)
Return "true" if "lhs cmp rhs" was proven to hold.
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Region * getParentRegion()
Return the Region in which this Value is defined.
AffineBound represents a lower or upper bound in the for operation.
An AffineValueMap is an affine map plus its ML value operands and results for analysis purposes.
LogicalResult canonicalize()
Attempts to canonicalize the map and operands.
ArrayRef< Value > getOperands() const
AffineExpr getResult(unsigned i)
AffineMap getAffineMap() const
void reset(AffineMap map, ValueRange operands, ValueRange results={})
unsigned getNumResults() const
static void difference(const AffineValueMap &a, const AffineValueMap &b, AffineValueMap *res)
Return the value map that is the difference of value maps 'a' and 'b', represented as an affine map a...
static ConstantIndexOp create(OpBuilder &builder, Location location, int64_t value)
Operation * getOwner() const
Return the owner of this operand.
constexpr auto RecursivelySpeculatable
Speculatability
This enum is returned from the getSpeculatability method in the ConditionallySpeculatable op interfac...
constexpr auto NotSpeculatable
void buildAffineLoopNest(OpBuilder &builder, Location loc, ArrayRef< int64_t > lbs, ArrayRef< int64_t > ubs, ArrayRef< int64_t > steps, function_ref< void(OpBuilder &, Location, ValueRange)> bodyBuilderFn=nullptr)
Builds a perfect nest of affine.for loops, i.e., each loop except the innermost one contains only ano...
AffineApplyOp makeComposedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands, bool composeAffineMin=false)
Returns a composed AffineApplyOp by composing map and operands with other AffineApplyOps supplying th...
void extractForInductionVars(ArrayRef< AffineForOp > forInsts, SmallVectorImpl< Value > *ivs)
Extracts the induction variables from a list of AffineForOps and places them in the output argument i...
bool isValidDim(Value value)
Returns true if the given Value can be used as a dimension id in the region of the closest surroundin...
bool isAffineInductionVar(Value val)
Returns true if the provided value is the induction variable of an AffineForOp or AffineParallelOp.
SmallVector< OpFoldResult > makeComposedFoldedMultiResultAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands, bool composeAffineMin=false)
Variant of makeComposedFoldedAffineApply suitable for multi-result maps.
OpFoldResult computeProduct(Location loc, OpBuilder &builder, ArrayRef< OpFoldResult > terms)
Return the product of terms, creating an affine.apply if any of them are non-constant values.
AffineForOp getForInductionVarOwner(Value val)
Returns the loop parent of an induction variable.
void canonicalizeMapAndOperands(AffineMap *map, SmallVectorImpl< Value > *operands)
Modifies both map and operands in-place so as to:
OpFoldResult makeComposedFoldedAffineMax(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands)
Constructs an AffineMinOp that computes a maximum across the results of applying map to operands,...
bool isAffineForInductionVar(Value val)
Returns true if the provided value is the induction variable of an AffineForOp.
OpFoldResult makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands, bool composeAffineMin=false)
Constructs an AffineApplyOp that applies map to operands after composing the map with the maps of any...
OpFoldResult makeComposedFoldedAffineMin(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands)
Constructs an AffineMinOp that computes a minimum across the results of applying map to operands,...
bool isTopLevelValue(Value value)
A utility function to check if a value is defined at the top level of an op with trait AffineScope or...
Region * getAffineAnalysisScope(Operation *op)
Returns the closest region enclosing op that is held by a non-affine operation; nullptr if there is n...
void fullyComposeAffineMapAndOperands(AffineMap *map, SmallVectorImpl< Value > *operands, bool composeAffineMin=false)
Given an affine map map and its input operands, this method composes into map, maps of AffineApplyOps...
void canonicalizeSetAndOperands(IntegerSet *set, SmallVectorImpl< Value > *operands)
Canonicalizes an integer set the same way canonicalizeMapAndOperands does for affine maps.
void extractInductionVars(ArrayRef< Operation * > affineOps, SmallVectorImpl< Value > &ivs)
Extracts the induction variables from a list of either AffineForOp or AffineParallelOp and places the...
bool isValidSymbol(Value value)
Returns true if the given value can be used as a symbol in the region of the closest surrounding op t...
AffineParallelOp getAffineParallelInductionVarOwner(Value val)
Returns true if the provided value is among the induction variables of an AffineParallelOp.
Region * getAffineScope(Operation *op)
Returns the closest region enclosing op that is held by an operation with trait AffineScope; nullptr ...
ParseResult parseDimAndSymbolList(OpAsmParser &parser, SmallVectorImpl< Value > &operands, unsigned &numDims)
Parses dimension and symbol list.
bool isAffineParallelInductionVar(Value val)
Returns true if val is the induction variable of an AffineParallelOp.
AffineMinOp makeComposedAffineMin(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands)
Returns an AffineMinOp obtained by composing map and operands with AffineApplyOps supplying those ope...
LogicalResult foldMemRefCast(Operation *op, Value inner=nullptr)
This is a common utility used for patterns of the form "someop(memref.cast) -> someop".
MemRefType getMemRefType(T &&t)
Convenience method to abbreviate casting getType().
Include the generated interface declarations.
AffineMap simplifyAffineMap(AffineMap map)
Simplifies an affine map by simplifying its underlying AffineExpr results.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
SmallVector< OpFoldResult > getMixedValues(ArrayRef< int64_t > staticValues, ValueRange dynamicValues, MLIRContext *context)
Return a vector of OpFoldResults with the same size a staticValues, but all elements for which Shaped...
OpFoldResult getAsIndexOpFoldResult(MLIRContext *ctx, int64_t val)
Convert int64_t to integer attributes of index type and return them as OpFoldResult.
AffineMap removeDuplicateExprs(AffineMap map)
Returns a map with the same dimension and symbol count as map, but whose results are the unique affin...
std::optional< int64_t > getConstantIntValue(OpFoldResult ofr)
If ofr is a constant integer or an IntegerAttr, return the integer.
std::function< SmallVector< Value >( OpBuilder &b, Location loc, ArrayRef< BlockArgument > newBbArgs)> NewYieldValuesFn
A function that returns the additional yielded values during replaceWithAdditionalYields.
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
std::optional< int64_t > getBoundForAffineExpr(AffineExpr expr, unsigned numDims, unsigned numSymbols, ArrayRef< std::optional< int64_t > > constLowerBounds, ArrayRef< std::optional< int64_t > > constUpperBounds, bool isUpper)
Get a lower or upper (depending on isUpper) bound for expr while using the constant lower and upper b...
SmallVector< int64_t > delinearize(int64_t linearIndex, ArrayRef< int64_t > strides)
Given the strides together with a linear index in the dimension space, return the vector-space offset...
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
bool isPure(Operation *op)
Returns true if the given operation is pure, i.e., is speculatable that does not touch memory.
@ CeilDiv
RHS of ceildiv is always a constant or a symbolic expression.
@ Mod
RHS of mod is always a constant or a symbolic expression with a positive value.
@ DimId
Dimensional identifier.
@ FloorDiv
RHS of floordiv is always a constant or a symbolic expression.
@ SymbolId
Symbolic identifier.
AffineExpr getAffineBinaryOpExpr(AffineExprKind kind, AffineExpr lhs, AffineExpr rhs)
detail::constant_int_predicate_matcher m_Zero()
Matches a constant scalar / vector splat / tensor splat integer zero.
void dispatchIndexOpFoldResults(ArrayRef< OpFoldResult > ofrs, SmallVectorImpl< Value > &dynamicVec, SmallVectorImpl< int64_t > &staticVec)
Helper function to dispatch multiple OpFoldResults according to the behavior of dispatchIndexOpFoldRe...
llvm::TypeSwitch< T, ResultT > TypeSwitch
AffineExpr getAffineConstantExpr(int64_t constant, MLIRContext *context)
llvm::DenseMap< KeyT, ValueT, KeyInfoT, BucketT > DenseMap
OpFoldResult getAsOpFoldResult(Value val)
Given a value, try to extract a constant Attribute.
detail::constant_op_matcher m_Constant()
Matches a constant foldable operation.
AffineExpr getAffineDimExpr(unsigned position, MLIRContext *context)
These free functions allow clients of the API to not use classes in detail.
AffineMap foldAttributesIntoMap(Builder &b, AffineMap map, ArrayRef< OpFoldResult > operands, SmallVector< Value > &remainingValues)
Fold all attributes among the given operands into the affine map.
llvm::function_ref< Fn > function_ref
AffineExpr getAffineSymbolExpr(unsigned position, MLIRContext *context)
Canonicalize the affine map result expression order of an affine min/max operation.
LogicalResult matchAndRewrite(T affineOp, PatternRewriter &rewriter) const override
LogicalResult matchAndRewrite(T affineOp, PatternRewriter &rewriter) const override
Remove duplicated expressions in affine min/max ops.
LogicalResult matchAndRewrite(T affineOp, PatternRewriter &rewriter) const override
Merge an affine min/max op to its consumers if its consumer is also an affine min/max op.
LogicalResult matchAndRewrite(T affineOp, PatternRewriter &rewriter) const override
This is the representation of an operand reference.
This class represents a listener that may be used to hook into various actions within an OpBuilder.
OpRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting against an...
OpRewritePattern(MLIRContext *context, PatternBenefit benefit=1, ArrayRef< StringRef > generatedNames={})
This represents an operation in an abstracted form, suitable for use with the builder APIs.