30 #include "llvm/Support/LogicalResult.h"
33 #define DEBUG_TYPE "affine-utils"
36 using namespace affine;
37 using namespace presburger;
43 class AffineApplyExpander
50 : builder(builder), dimValues(dimValues), symbolValues(symbolValues),
53 template <
typename OpTy>
59 auto op = builder.
create<OpTy>(loc, lhs, rhs);
64 return buildBinaryExpr<arith::AddIOp>(expr);
68 return buildBinaryExpr<arith::MulIOp>(expr);
81 if (
auto rhsConst = dyn_cast<AffineConstantExpr>(expr.
getRHS())) {
82 if (rhsConst.getValue() <= 0) {
83 emitError(loc,
"modulo by non-positive value is not supported");
90 assert(lhs && rhs &&
"unexpected affine expr lowering failure");
92 Value remainder = builder.
create<arith::RemSIOp>(loc, lhs, rhs);
93 Value zeroCst = builder.
create<arith::ConstantIndexOp>(loc, 0);
94 Value isRemainderNegative = builder.
create<arith::CmpIOp>(
95 loc, arith::CmpIPredicate::slt, remainder, zeroCst);
96 Value correctedRemainder =
97 builder.
create<arith::AddIOp>(loc, remainder, rhs);
99 loc, isRemainderNegative, correctedRemainder, remainder);
121 if (
auto rhsConst = dyn_cast<AffineConstantExpr>(expr.
getRHS())) {
122 if (rhsConst.getValue() <= 0) {
123 emitError(loc,
"division by non-positive value is not supported");
129 assert(lhs && rhs &&
"unexpected affine expr lowering failure");
131 Value zeroCst = builder.
create<arith::ConstantIndexOp>(loc, 0);
132 Value noneCst = builder.
create<arith::ConstantIndexOp>(loc, -1);
134 loc, arith::CmpIPredicate::slt, lhs, zeroCst);
135 Value negatedDecremented = builder.
create<arith::SubIOp>(loc, noneCst, lhs);
137 builder.
create<arith::SelectOp>(loc, negative, negatedDecremented, lhs);
138 Value quotient = builder.
create<arith::DivSIOp>(loc, dividend, rhs);
139 Value correctedQuotient =
140 builder.
create<arith::SubIOp>(loc, noneCst, quotient);
141 Value result = builder.
create<arith::SelectOp>(loc, negative,
142 correctedQuotient, quotient);
160 if (
auto rhsConst = dyn_cast<AffineConstantExpr>(expr.
getRHS())) {
161 if (rhsConst.getValue() <= 0) {
162 emitError(loc,
"division by non-positive value is not supported");
168 assert(lhs && rhs &&
"unexpected affine expr lowering failure");
170 Value zeroCst = builder.
create<arith::ConstantIndexOp>(loc, 0);
171 Value oneCst = builder.
create<arith::ConstantIndexOp>(loc, 1);
173 loc, arith::CmpIPredicate::sle, lhs, zeroCst);
174 Value negated = builder.
create<arith::SubIOp>(loc, zeroCst, lhs);
175 Value decremented = builder.
create<arith::SubIOp>(loc, lhs, oneCst);
177 builder.
create<arith::SelectOp>(loc, nonPositive, negated, decremented);
178 Value quotient = builder.
create<arith::DivSIOp>(loc, dividend, rhs);
179 Value negatedQuotient =
180 builder.
create<arith::SubIOp>(loc, zeroCst, quotient);
181 Value incrementedQuotient =
182 builder.
create<arith::AddIOp>(loc, quotient, oneCst);
184 loc, nonPositive, negatedQuotient, incrementedQuotient);
189 auto op = builder.
create<arith::ConstantIndexOp>(loc, expr.
getValue());
195 "affine dim position out of range");
201 "symbol dim position out of range");
220 return AffineApplyExpander(builder, dimValues, symbolValues, loc).visit(expr);
225 std::optional<SmallVector<Value, 8>>
229 auto expanded = llvm::to_vector<8>(
231 [numDims, &builder, loc, operands](
AffineExpr expr) {
232 return expandAffineExpr(builder, loc, expr,
233 operands.take_front(numDims),
234 operands.drop_front(numDims));
236 if (llvm::all_of(expanded, [](
Value v) {
return v; }))
246 assert(ifOp.hasElse() &&
"else block expected");
248 Block *destBlock = ifOp->getBlock();
249 Block *srcBlock = elseBlock ? ifOp.getElseBlock() : ifOp.getThenBlock();
252 std::prev(srcBlock->
end()));
263 auto *res = ifOp.getOperation();
264 while (!isa<func::FuncOp>(res->getParentOp())) {
265 auto *parentOp = res->getParentOp();
266 if (
auto forOp = dyn_cast<AffineForOp>(parentOp)) {
267 if (llvm::is_contained(ifOperands, forOp.getInductionVar()))
269 }
else if (
auto parallelOp = dyn_cast<AffineParallelOp>(parentOp)) {
270 for (
auto iv : parallelOp.getIVs())
271 if (llvm::is_contained(ifOperands, iv))
273 }
else if (!isa<AffineIfOp>(parentOp)) {
288 if (hoistOverOp == ifOp)
298 auto hoistedIfOp = b.
create<AffineIfOp>(ifOp.getLoc(), ifOp.getIntegerSet(),
307 StringAttr idForIfOp = b.
getStringAttr(
"__mlir_if_hoisting");
312 hoistOverOpClone = b.
clone(*hoistOverOp, operandMap);
318 auto *thenBlock = hoistedIfOp.getThenBlock();
319 thenBlock->getOperations().splice(thenBlock->begin(),
324 AffineIfOp ifCloneInElse;
325 hoistOverOpClone->walk([&](AffineIfOp ifClone) {
326 if (!ifClone->getAttr(idForIfOp))
328 ifCloneInElse = ifClone;
331 assert(ifCloneInElse &&
"if op clone should exist");
334 if (!ifCloneInElse.hasElse())
335 ifCloneInElse.erase();
340 auto *elseBlock = hoistedIfOp.getElseBlock();
341 elseBlock->getOperations().splice(
342 elseBlock->begin(), hoistOverOpClone->getBlock()->getOperations(),
351 AffineParallelOp *resOp) {
353 unsigned numReductions = parallelReductions.size();
354 if (numReductions != forOp.getNumIterOperands())
359 AffineMap lowerBoundMap = forOp.getLowerBoundMap();
360 ValueRange lowerBoundOperands = forOp.getLowerBoundOperands();
361 AffineMap upperBoundMap = forOp.getUpperBoundMap();
362 ValueRange upperBoundOperands = forOp.getUpperBoundOperands();
365 auto reducedValues = llvm::to_vector<4>(llvm::map_range(
367 auto reductionKinds = llvm::to_vector<4>(llvm::map_range(
369 AffineParallelOp newPloop = outsideBuilder.
create<AffineParallelOp>(
376 Operation *yieldOp = &newPloop.getBody()->back();
381 newResults.reserve(numReductions);
382 for (
unsigned i = 0; i < numReductions; ++i) {
383 Value init = forOp.getInits()[i];
388 assert(reductionOp &&
"yielded value is expected to be produced by an op");
392 reductionOp->
setOperands({init, newPloop->getResult(i)});
393 forOp->getResult(i).replaceAllUsesWith(reductionOp->
getResult(0));
402 newPloop.getBody()->eraseArguments(numIVs, numReductions);
414 if (ifOp.getNumResults() != 0)
423 AffineIfOp::getCanonicalizationPatterns(patterns, ifOp.
getContext());
440 assert(llvm::all_of(ifOp.getOperands(),
442 return isTopLevelValue(v) || isAffineForInductionVar(v);
444 "operands not composed");
452 if (hoistedIfOp == ifOp)
469 return positivePath ?
min :
max;
470 if (
auto bin = dyn_cast<AffineBinaryOpExpr>(e)) {
477 auto c1 = dyn_cast<AffineConstantExpr>(bin.getLHS());
478 auto c2 = dyn_cast<AffineConstantExpr>(bin.getRHS());
479 if (c1 && c1.getValue() < 0)
482 if (c2 && c2.getValue() < 0)
494 if (op.hasMinMaxBounds())
497 AffineMap lbMap = op.getLowerBoundsMap();
500 bool isAlreadyNormalized =
501 llvm::all_of(llvm::zip(steps, lbMap.
getResults()), [](
auto tuple) {
502 int64_t step = std::get<0>(tuple);
503 auto lbExpr = dyn_cast<AffineConstantExpr>(std::get<1>(tuple));
504 return lbExpr && lbExpr.getValue() == 0 && step == 1;
506 if (isAlreadyNormalized)
511 op.getLowerBoundsValueMap(), &ranges);
513 auto zeroExpr = builder.getAffineConstantExpr(0);
516 for (
unsigned i = 0, e = steps.size(); i < e; ++i) {
517 int64_t step = steps[i];
520 lbExprs.push_back(zeroExpr);
524 ubExprs.push_back(ubExpr);
530 auto expr = lbExpr + builder.getAffineDimExpr(nDims) * step;
537 OperandRange dimOperands = lbOperands.take_front(nDims);
538 OperandRange symbolOperands = lbOperands.drop_front(nDims);
540 applyOperands.push_back(iv);
541 applyOperands.append(symbolOperands.begin(), symbolOperands.end());
542 auto apply = builder.create<AffineApplyOp>(op.getLoc(), map, applyOperands);
547 op.setSteps(newSteps);
549 0, 0, lbExprs, op.getContext());
550 op.setLowerBounds({}, newLowerMap);
552 ubExprs, op.getContext());
553 op.setUpperBounds(ranges.
getOperands(), newUpperMap);
557 bool promoteSingleIter) {
562 if (op.hasConstantLowerBound() && (op.getConstantLowerBound() == 0) &&
570 if (op.getLowerBoundMap().getNumResults() != 1)
575 int64_t origLoopStep = op.getStepAsInt();
578 AffineMap oldLbMap = op.getLowerBoundMap();
585 op.getLowerBoundMap().getResult(0));
590 AffineValueMap paddedLbValueMap(paddedLbMap, op.getLowerBoundOperands());
591 AffineValueMap ubValueMap(op.getUpperBoundMap(), op.getUpperBoundOperands());
600 for (
unsigned i = 0; i < numResult; ++i)
609 op.setUpperBound(newUbValueMap.
getOperands(), newUbMap);
622 (void)newIvToOldIvMap.canonicalize();
623 auto newIV = opBuilder.
create<AffineApplyOp>(
624 loc, newIvToOldIvMap.getAffineMap(), newIvToOldIvMap.getOperands());
625 op.getInductionVar().replaceAllUsesExcept(newIV->getResult(0), newIV);
650 unsigned minSurroundingLoops) {
664 for (
unsigned d = nsLoops + 1; d > minSurroundingLoops; d--) {
666 srcAccess, destAccess, d, &dependenceConstraints,
681 template <
typename EffectType,
typename T>
687 bool hasSideEffect =
false;
690 Value memref = memOp.getMemRef();
696 if (
auto memEffect = dyn_cast<MemoryEffectOpInterface>(op)) {
698 memEffect.getEffects(effects);
700 bool opMayHaveEffect =
false;
701 for (
auto effect : effects) {
704 if (isa<EffectType>(effect.getEffect())) {
705 if (effect.getValue() && effect.getValue() != memref &&
706 !
mayAlias(effect.getValue(), memref))
708 opMayHaveEffect =
true;
713 if (!opMayHaveEffect)
718 if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op)) {
725 unsigned minSurroundingLoops =
728 hasSideEffect =
true;
734 hasSideEffect =
true;
741 for (
Region ®ion : op->getRegions())
742 for (
Block &block : region)
750 hasSideEffect =
true;
761 checkOperation(parent);
770 "Checking for side effect between two operations without a common "
778 until(untilOp->getParentOp(), untilOp);
790 for (
auto iter = ++from->getIterator(), end = from->
getBlock()->
end();
791 iter != end && &*iter != untilOp; ++iter) {
792 checkOperation(&*iter);
797 if (untilOp->getBlock() != from->
getBlock())
799 todoBlocks.push_back(succ);
804 while (!todoBlocks.empty()) {
805 Block *blk = todoBlocks.pop_back_val();
809 for (
auto &op : *blk) {
815 todoBlocks.push_back(succ);
820 return !hasSideEffect;
839 for (
auto *user : loadOp.getMemRef().getUsers()) {
840 auto storeOp = dyn_cast<AffineWriteOpInterface>(user);
854 if (srcAccess != destAccess)
869 if (!affine::hasNoInterveningEffect<MemoryEffects::Write>(storeOp, loadOp,
874 assert(lastWriteStoreOp ==
nullptr &&
875 "multiple simultaneous replacement stores");
876 lastWriteStoreOp = storeOp;
879 if (!lastWriteStoreOp)
884 cast<AffineWriteOpInterface>(lastWriteStoreOp).getValueToStore();
887 if (storeVal.
getType() != loadOp.getValue().getType())
891 memrefsToErase.insert(loadOp.getMemRef());
893 loadOpsToErase.push_back(loadOp);
898 affine::AffineReadOpInterface>(
914 auto writeB = dyn_cast<AffineWriteOpInterface>(user);
919 if (writeB == writeA)
923 if (writeB->getParentRegion() != writeA->getParentRegion())
930 if (srcAccess != destAccess)
939 if (!affine::hasNoInterveningEffect<MemoryEffects::Read>(writeA, writeB,
943 opsToErase.push_back(writeA);
954 static void loadCSE(AffineReadOpInterface loadA,
959 for (
auto *user : loadA.getMemRef().getUsers()) {
960 auto loadB = dyn_cast<AffineReadOpInterface>(user);
961 if (!loadB || loadB == loadA)
968 if (srcAccess != destAccess) {
977 if (!affine::hasNoInterveningEffect<MemoryEffects::Write>(
978 loadB.getOperation(), loadA,
mayAlias))
983 if (loadB.getValue().getType() != loadA.getValue().getType())
986 loadCandidates.push_back(loadB);
992 for (AffineReadOpInterface option : loadCandidates) {
993 if (llvm::all_of(loadCandidates, [&](AffineReadOpInterface depStore) {
994 return depStore == option ||
996 depStore.getOperation());
998 loadB = option.getValue();
1006 loadOpsToErase.push_back(loadA);
1045 return !aliasAnalysis.
alias(val1, val2).
isNo();
1049 f.walk([&](AffineReadOpInterface loadOp) {
1052 for (
auto *op : opsToErase)
1057 f.walk([&](AffineWriteOpInterface storeOp) {
1060 for (
auto *op : opsToErase)
1068 for (
auto memref : memrefsToErase) {
1070 Operation *defOp = memref.getDefiningOp();
1071 if (!defOp || !hasSingleEffect<MemoryEffects::Allocate>(defOp, memref))
1075 if (llvm::any_of(memref.getUsers(), [&](
Operation *ownerOp) {
1076 return !isa<AffineWriteOpInterface>(ownerOp) &&
1077 !hasSingleEffect<MemoryEffects::Free>(ownerOp, memref);
1082 for (
auto *user : llvm::make_early_inc_range(memref.getUsers()))
1090 f.walk([&](AffineReadOpInterface loadOp) {
1093 for (
auto *op : opsToErase)
1104 unsigned oldMemRefRank = cast<MemRefType>(oldMemRef.
getType()).getRank();
1105 unsigned newMemRefRank = cast<MemRefType>(newMemRef.
getType()).getRank();
1106 unsigned oldMapNumInputs = oldMemRefRank;
1109 op->
operand_begin() + memRefOperandPos + 1 + oldMapNumInputs);
1111 oldMemRefOperands.assign(oldMapOperands.begin(), oldMapOperands.end());
1113 remapOperands.reserve(extraOperands.size() + oldMemRefRank +
1114 symbolOperands.size());
1115 remapOperands.append(extraOperands.begin(), extraOperands.end());
1116 remapOperands.append(oldMemRefOperands.begin(), oldMemRefOperands.end());
1117 remapOperands.append(symbolOperands.begin(), symbolOperands.end());
1120 remapOutputs.reserve(oldMemRefRank);
1128 for (
auto resultExpr : indexRemap.
getResults()) {
1131 auto afOp = builder.
create<AffineApplyOp>(op->
getLoc(), singleResMap,
1133 remapOutputs.push_back(afOp);
1134 affineApplyOps.push_back(afOp);
1138 remapOutputs.assign(remapOperands.begin(), remapOperands.end());
1142 newMapOperands.reserve(newMemRefRank);
1145 for (
Value extraIndex : extraIndices) {
1147 "invalid memory op index");
1148 newMapOperands.push_back(extraIndex);
1152 newMapOperands.append(remapOutputs.begin(), remapOutputs.end());
1155 assert(newMapOperands.size() == newMemRefRank);
1159 state.operands.reserve(newMapOperands.size() + extraIndices.size());
1160 state.operands.push_back(newMemRef);
1163 state.operands.append(newMapOperands.begin(), newMapOperands.end());
1167 state.types.push_back(result.getType());
1170 for (
auto namedAttr : op->
getAttrs()) {
1171 state.attributes.push_back(namedAttr);
1175 auto *repOp = builder.
create(state);
1186 bool allowNonDereferencingOps) {
1187 unsigned newMemRefRank = cast<MemRefType>(newMemRef.
getType()).getRank();
1188 (void)newMemRefRank;
1189 unsigned oldMemRefRank = cast<MemRefType>(oldMemRef.
getType()).getRank();
1190 (void)oldMemRefRank;
1192 assert(indexRemap.
getNumSymbols() == symbolOperands.size() &&
1193 "symbolic operand count mismatch");
1195 extraOperands.size() + oldMemRefRank + symbolOperands.size());
1196 assert(indexRemap.
getNumResults() + extraIndices.size() == newMemRefRank);
1198 assert(oldMemRefRank + extraIndices.size() == newMemRefRank);
1202 assert(cast<MemRefType>(oldMemRef.
getType()).getElementType() ==
1203 cast<MemRefType>(newMemRef.
getType()).getElementType());
1207 if (opEntry.value() == oldMemRef)
1208 usePositions.push_back(opEntry.index());
1212 if (usePositions.empty())
1215 if (usePositions.size() > 1) {
1217 assert(
false &&
"multiple dereferencing uses in a single op not supported");
1221 unsigned memRefOperandPos = usePositions.front();
1226 auto affMapAccInterface = dyn_cast<AffineMapAccessInterface>(op);
1227 if (!affMapAccInterface) {
1228 if (!allowNonDereferencingOps) {
1236 auto memrefLoad = dyn_cast<memref::LoadOp>(op);
1237 bool isReductionLike =
1239 if (!memrefLoad || !isReductionLike) {
1245 op, oldMemRef, newMemRef, memRefOperandPos, extraIndices, extraOperands,
1246 symbolOperands, indexRemap);
1250 affMapAccInterface.getAffineMapAttrForMemRef(oldMemRef);
1255 op->
operand_begin() + memRefOperandPos + 1 + oldMapNumInputs);
1260 oldMemRefOperands.reserve(oldMemRefRank);
1262 for (
auto resultExpr : oldMap.
getResults()) {
1265 auto afOp = builder.
create<AffineApplyOp>(op->
getLoc(), singleResMap,
1267 oldMemRefOperands.push_back(afOp);
1268 affineApplyOps.push_back(afOp);
1271 oldMemRefOperands.assign(oldMapOperands.begin(), oldMapOperands.end());
1278 remapOperands.reserve(extraOperands.size() + oldMemRefRank +
1279 symbolOperands.size());
1280 remapOperands.append(extraOperands.begin(), extraOperands.end());
1281 remapOperands.append(oldMemRefOperands.begin(), oldMemRefOperands.end());
1282 remapOperands.append(symbolOperands.begin(), symbolOperands.end());
1285 remapOutputs.reserve(oldMemRefRank);
1290 for (
auto resultExpr : indexRemap.
getResults()) {
1293 auto afOp = builder.
create<AffineApplyOp>(op->
getLoc(), singleResMap,
1295 remapOutputs.push_back(afOp);
1296 affineApplyOps.push_back(afOp);
1300 remapOutputs.assign(remapOperands.begin(), remapOperands.end());
1304 newMapOperands.reserve(newMemRefRank);
1307 for (
Value extraIndex : extraIndices) {
1309 "invalid memory op index");
1310 newMapOperands.push_back(extraIndex);
1314 newMapOperands.append(remapOutputs.begin(), remapOutputs.end());
1317 assert(newMapOperands.size() == newMemRefRank);
1323 for (
Value value : affineApplyOps)
1324 if (value.use_empty())
1325 value.getDefiningOp()->erase();
1329 state.operands.reserve(op->
getNumOperands() + extraIndices.size());
1334 state.operands.push_back(newMemRef);
1337 state.operands.append(newMapOperands.begin(), newMapOperands.end());
1340 state.operands.append(op->
operand_begin() + memRefOperandPos + 1 +
1347 state.types.push_back(result.getType());
1351 for (
auto namedAttr : op->
getAttrs()) {
1352 if (namedAttr.getName() == oldMapAttrPair.
getName())
1353 state.attributes.push_back({namedAttr.getName(), newMapAttr});
1355 state.attributes.push_back(namedAttr);
1359 auto *repOp = builder.
create(state);
1370 Operation *postDomOpFilter,
bool allowNonDereferencingOps,
1371 bool replaceInDeallocOp) {
1372 unsigned newMemRefRank = cast<MemRefType>(newMemRef.
getType()).getRank();
1373 (void)newMemRefRank;
1374 unsigned oldMemRefRank = cast<MemRefType>(oldMemRef.
getType()).getRank();
1375 (void)oldMemRefRank;
1377 assert(indexRemap.
getNumSymbols() == symbolOperands.size() &&
1378 "symbol operand count mismatch");
1380 extraOperands.size() + oldMemRefRank + symbolOperands.size());
1381 assert(indexRemap.
getNumResults() + extraIndices.size() == newMemRefRank);
1383 assert(oldMemRefRank + extraIndices.size() == newMemRefRank);
1387 assert(cast<MemRefType>(oldMemRef.
getType()).getElementType() ==
1388 cast<MemRefType>(newMemRef.
getType()).getElementType());
1390 std::unique_ptr<DominanceInfo> domInfo;
1391 std::unique_ptr<PostDominanceInfo> postDomInfo;
1393 domInfo = std::make_unique<DominanceInfo>(
1396 if (postDomOpFilter)
1397 postDomInfo = std::make_unique<PostDominanceInfo>(
1404 for (
auto *op : oldMemRef.
getUsers()) {
1406 if (domOpFilter && !domInfo->dominates(domOpFilter, op))
1410 if (postDomOpFilter && !postDomInfo->postDominates(postDomOpFilter, op))
1415 if (hasSingleEffect<MemoryEffects::Free>(op, oldMemRef) &&
1416 !replaceInDeallocOp)
1422 if (!isa<AffineMapAccessInterface>(*op)) {
1423 if (!allowNonDereferencingOps) {
1424 LLVM_DEBUG(llvm::dbgs()
1425 <<
"Memref replacement failed: non-deferencing memref op: \n"
1432 LLVM_DEBUG(llvm::dbgs() <<
"Memref replacement failed: use without a "
1433 "memrefs normalizable trait: \n"
1441 opsToReplace.insert(op);
1444 for (
auto *op : opsToReplace) {
1446 oldMemRef, newMemRef, op, extraIndices, indexRemap, extraOperands,
1447 symbolOperands, allowNonDereferencingOps)))
1448 llvm_unreachable(
"memref replacement guaranteed to succeed here");
1488 if (isa_and_nonnull<AffineApplyOp>(operand.getDefiningOp()))
1489 subOperands.push_back(operand);
1495 if (affineApplyOps.empty())
1500 bool localized =
true;
1501 for (
auto *op : affineApplyOps) {
1502 for (
auto result : op->getResults()) {
1503 for (
auto *user : result.getUsers()) {
1504 if (user != opInst) {
1520 sliceOps->reserve(composedMap.getNumResults());
1521 for (
auto resultExpr : composedMap.getResults()) {
1523 composedMap.getNumSymbols(), resultExpr);
1524 sliceOps->push_back(builder.
create<AffineApplyOp>(
1525 opInst->
getLoc(), singleResMap, composedOpOperands));
1533 for (
Value &operand : newOperands) {
1536 for (
j = 0, f = subOperands.size();
j < f;
j++) {
1537 if (operand == subOperands[
j])
1540 if (
j < subOperands.size())
1541 operand = (*sliceOps)[
j];
1543 for (
unsigned idx = 0, e = newOperands.size(); idx < e; idx++)
1566 SmallVectorImpl<std::tuple<AffineExpr, unsigned, unsigned>> &tileSizePos) {
1577 if (isa<AffineConstantExpr>(binaryExpr.
getRHS()))
1578 floordivExprs.emplace_back(
1579 std::make_tuple(binaryExpr.
getLHS(), binaryExpr.
getRHS(), pos));
1584 if (floordivExprs.empty()) {
1591 for (std::tuple<AffineExpr, AffineExpr, unsigned> fexpr : floordivExprs) {
1592 AffineExpr floordivExprLHS = std::get<0>(fexpr);
1593 AffineExpr floordivExprRHS = std::get<1>(fexpr);
1594 unsigned floordivPos = std::get<2>(fexpr);
1606 bool notTiled =
false;
1607 if (pos != floordivPos) {
1609 if (e == floordivExprLHS) {
1611 AffineBinaryOpExpr binaryExpr = cast<AffineBinaryOpExpr>(expr);
1613 if (floordivExprLHS == binaryExpr.getLHS() &&
1614 floordivExprRHS == binaryExpr.getRHS()) {
1618 tileSizePos.emplace_back(
1619 std::make_tuple(binaryExpr.getRHS(), floordivPos, pos));
1671 if (isa<AffineDimExpr>(e) &&
1672 llvm::any_of(inMemrefTypeDynDims, [&](
unsigned dim) {
1691 binaryExpr = cast<AffineBinaryOpExpr>(oldMapOutput);
1692 newMapOutput = binaryExpr.
getRHS();
1695 binaryExpr = cast<AffineBinaryOpExpr>(oldMapOutput);
1700 newMapOutput = oldMapOutput;
1702 return newMapOutput;
1738 MemRefType newMemRefType,
AffineMap map,
1744 unsigned dynIdx = 0;
1745 for (
unsigned d = 0; d < oldMemRefType.getRank(); ++d) {
1746 if (oldMemRefShape[d] < 0) {
1748 inAffineApply.emplace_back(allocOp->getDynamicSizes()[dynIdx]);
1753 inAffineApply.emplace_back(
1754 b.
create<arith::ConstantOp>(allocOp->getLoc(), constantAttr));
1760 unsigned newDimIdx = 0;
1765 if (newMemRefShape[newDimIdx] < 0) {
1768 for (
auto pos : tileSizePos) {
1769 if (newDimIdx == std::get<1>(pos))
1771 else if (newDimIdx == std::get<2>(pos))
1778 b.
create<AffineApplyOp>(allocOp->getLoc(), newMap, inAffineApply);
1779 newDynamicSizes.emplace_back(affineApp);
1787 MemRefType memrefType = allocOp->getType();
1793 if (newMemRefType == memrefType)
1798 Value oldMemRef = allocOp->getResult();
1801 AffineMap layoutMap = memrefType.getLayout().getAffineMap();
1802 memref::AllocOp newAlloc;
1807 if (newMemRefType.getNumDynamicDims() > 0 && !tileSizePos.empty()) {
1808 MemRefType oldMemRefType = cast<MemRefType>(oldMemRef.
getType());
1814 b.
create<memref::AllocOp>(allocOp->getLoc(), newMemRefType,
1815 newDynamicSizes, allocOp->getAlignmentAttr());
1817 newAlloc = b.
create<memref::AllocOp>(allocOp->getLoc(), newMemRefType,
1818 allocOp->getAlignmentAttr());
1836 return hasSingleEffect<MemoryEffects::Free>(op, oldMemRef);
1844 unsigned rank = memrefType.getRank();
1848 if (memrefType.getLayout().isIdentity()) {
1853 AffineMap layoutMap = memrefType.getLayout().getAffineMap();
1864 if (memrefType.getNumDynamicDims() > 0 && tileSizePos.empty())
1873 for (
unsigned d = 0; d < rank; ++d) {
1877 fac.
addBound(BoundType::UB, d, shape[d] - 1);
1879 memrefTypeDynDims.emplace_back(d);
1892 for (
unsigned d = 0; d < newRank; ++d) {
1895 newShape[d] = ShapedType::kDynamic;
1904 if (!ubConst.has_value() || *ubConst < 0) {
1905 LLVM_DEBUG(llvm::dbgs()
1906 <<
"can't normalize map due to unknown/invalid upper bound");
1910 newShape[d] = *ubConst + 1;
1914 auto newMemRefType =
1919 return newMemRefType;
1945 FailureOr<SmallVector<Value>>
1949 basis = basis.drop_front();
1955 FailureOr<OpFoldResult> nextProd =
1957 if (failed(nextProd))
1959 basisProd = *nextProd;
1964 results.reserve(divisors.size() + 1);
1965 Value residual = linearIndex;
1966 for (
Value divisor : llvm::reverse(divisors)) {
1968 results.push_back(divMod.
quotient);
1971 results.push_back(residual);
1975 FailureOr<SmallVector<Value>>
1978 bool hasOuterBound) {
1980 basis = basis.drop_front();
1986 FailureOr<OpFoldResult> nextProd =
1988 if (failed(nextProd))
1990 basisProd = *nextProd;
1995 results.reserve(divisors.size() + 1);
1996 Value residual = linearIndex;
1997 for (
Value divisor : llvm::reverse(divisors)) {
1999 results.push_back(divMod.
quotient);
2002 results.push_back(residual);
2015 assert(multiIndex.size() == basis.size() ||
2016 multiIndex.size() == basis.size() + 1);
2021 if (multiIndex.size() == basis.size() + 1)
2024 for (
size_t i = 0; i < basis.size(); ++i) {
2030 strides.reserve(stridesAffine.size());
2031 llvm::transform(stridesAffine, std::back_inserter(strides),
2032 [&builder, &basis, loc](
AffineExpr strideExpr) {
2034 builder, loc, strideExpr, basis);
2040 multiIndexAndStrides);
static void createNewDynamicSizes(MemRefType oldMemRefType, MemRefType newMemRefType, AffineMap map, memref::AllocOp *allocOp, OpBuilder b, SmallVectorImpl< Value > &newDynamicSizes)
Create new maps to calculate each dimension size of newMemRefType, and create newDynamicSizes from th...
static bool mayHaveEffect(Operation *srcMemOp, Operation *destMemOp, unsigned minSurroundingLoops)
Returns true if srcMemOp may have an effect on destMemOp within the scope of the outermost minSurroun...
static LogicalResult getTileSizePos(AffineMap map, SmallVectorImpl< std::tuple< AffineExpr, unsigned, unsigned >> &tileSizePos)
Check if map is a tiled layout.
TileExprPattern
Enum to set patterns of affine expr in tiled-layout map.
static void promoteIfBlock(AffineIfOp ifOp, bool elseBlock)
Promotes the then or the else block of ifOp (depending on whether elseBlock is false or true) into if...
static bool isNormalizedMemRefDynamicDim(unsigned dim, AffineMap layoutMap, SmallVectorImpl< unsigned > &inMemrefTypeDynDims)
Check if dim dimension of memrefType with layoutMap becomes dynamic after normalization.
LogicalResult transformMemRefLoadWithReducedRank(Operation *op, Value oldMemRef, Value newMemRef, unsigned memRefOperandPos, ArrayRef< Value > extraIndices, ArrayRef< Value > extraOperands, ArrayRef< Value > symbolOperands, AffineMap indexRemap)
static FailureOr< OpFoldResult > composedAffineMultiply(OpBuilder &b, Location loc, OpFoldResult lhs, OpFoldResult rhs)
Create an affine map that computes lhs * rhs, composing in any other affine maps.
static void loadCSE(AffineReadOpInterface loadA, SmallVectorImpl< Operation * > &loadOpsToErase, DominanceInfo &domInfo, llvm::function_ref< bool(Value, Value)> mayAlias)
static AffineExpr createDimSizeExprForTiledLayout(AffineExpr oldMapOutput, TileExprPattern pat)
Create affine expr to calculate dimension size for a tiled-layout map.
static Operation * getOutermostInvariantForOp(AffineIfOp ifOp)
Returns the outermost affine.for/parallel op that the ifOp is invariant on.
static void findUnusedStore(AffineWriteOpInterface writeA, SmallVectorImpl< Operation * > &opsToErase, PostDominanceInfo &postDominanceInfo, llvm::function_ref< bool(Value, Value)> mayAlias)
static bool mustReachAtInnermost(const MemRefAccess &srcAccess, const MemRefAccess &destAccess)
Returns true if the memory operation of destAccess depends on srcAccess inside of the innermost commo...
static void forwardStoreToLoad(AffineReadOpInterface loadOp, SmallVectorImpl< Operation * > &loadOpsToErase, SmallPtrSetImpl< Value > &memrefsToErase, DominanceInfo &domInfo, llvm::function_ref< bool(Value, Value)> mayAlias)
Attempt to eliminate loadOp by replacing it with a value stored into memory which the load is guarant...
static void visit(Operation *op, DenseSet< Operation * > &visited)
Visits all the pdl.operand(s), pdl.result(s), and pdl.operation(s) connected to the given operation.
static bool mayAlias(Value first, Value second)
Returns true if two values may be referencing aliasing memory.
static Value max(ImplicitLocOpBuilder &builder, Value value, Value bound)
static Value min(ImplicitLocOpBuilder &builder, Value value, Value bound)
Affine binary operation expression.
AffineExpr getLHS() const
AffineExpr getRHS() const
An integer constant appearing in affine expression.
A dimensional identifier appearing in an affine expression.
unsigned getPosition() const
See documentation for AffineExprVisitorBase.
Base type for affine expression.
AffineExpr floorDiv(uint64_t v) const
RetT walk(FnT &&callback) const
Walk all of the AffineExpr's in this expression in postorder.
AffineExpr ceilDiv(uint64_t v) const
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
MLIRContext * getContext() const
static AffineMap getMultiDimIdentityMap(unsigned numDims, MLIRContext *context)
Returns an AffineMap with 'numDims' identity result dim exprs.
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
unsigned getNumSymbols() const
unsigned getNumDims() const
ArrayRef< AffineExpr > getResults() const
unsigned getNumResults() const
unsigned getNumInputs() const
AffineExpr getResult(unsigned idx) const
AffineMap compose(AffineMap map) const
Returns the AffineMap resulting from composing this with map.
A symbolic identifier appearing in an affine expression.
unsigned getPosition() const
This class represents the main alias analysis interface in MLIR.
AliasResult alias(Value lhs, Value rhs)
Given two values, return their aliasing behavior.
bool isNo() const
Returns if this result indicates no possibility of aliasing.
This class represents an argument of a Block.
Block represents an ordered list of Operations.
OpListType::iterator iterator
SuccessorRange getSuccessors()
Operation * getTerminator()
Get the terminator operation of this block.
OpListType & getOperations()
IntegerAttr getIndexAttr(int64_t value)
IntegerAttr getIntegerAttr(Type type, int64_t value)
AffineMap getMultiDimIdentityMap(unsigned rank)
BoolAttr getBoolAttr(bool value)
StringAttr getStringAttr(const Twine &bytes)
AffineExpr getAffineDimExpr(unsigned position)
AffineMap getConstantAffineMap(int64_t val)
Returns a single constant result affine map with 0 dimensions and 0 symbols.
MLIRContext * getContext() const
A class for computing basic dominance information.
bool dominates(Operation *a, Operation *b) const
Return true if operation A dominates operation B, i.e.
LogicalResult composeMatchingMap(AffineMap other)
Composes an affine map whose dimensions and symbols match one to one with the dimensions and symbols ...
void projectOut(Value val)
Projects out the variable that is associate with Value.
This class represents a frozen set of patterns that can be processed by a pattern applicator.
This class allows control over how the GreedyPatternRewriteDriver works.
GreedyRewriteStrictness strictMode
Strict mode can restrict the ops that are added to the worklist during the rewrite.
This is a utility class for mapping one set of IR entities to another.
void clear()
Clears all mappings held by the mapper.
ImplicitLocOpBuilder maintains a 'current location', allowing use of the create<> method without spec...
Location getLoc() const
Accessors for the implied location.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
This is a builder type that keeps local references to arguments.
Builder & setLayout(MemRefLayoutAttrInterface newLayout)
Builder & setShape(ArrayRef< int64_t > newShape)
NamedAttribute represents a combination of a name and an Attribute value.
StringAttr getName() const
Return the name of the attribute.
Attribute getValue() const
Return the value of the attribute.
This class helps build Operations.
static OpBuilder atBlockBegin(Block *block, Listener *listener=nullptr)
Create a builder and set the insertion point to before the first operation in the block but still ins...
Block::iterator getInsertionPoint() const
Returns the current insertion point of the builder.
Operation * clone(Operation &op, IRMapping &mapper)
Creates a deep copy of the specified operation, remapping any operands that use values outside of the...
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Block * getInsertionBlock() const
Return the block the current insertion point belongs to.
This class represents a single result from folding an operation.
This trait indicates that the memory effects of an operation includes the effects of operations neste...
This class provides the API for ops that are known to be isolated from above.
This class implements the operand iterators for the Operation class.
Operation is the basic unit of execution within MLIR.
Value getOperand(unsigned idx)
void setOperand(unsigned idx, Value value)
operand_iterator operand_begin()
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Location getLoc()
The source location the operation was defined or derived from.
unsigned getNumOperands()
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
ArrayRef< NamedAttribute > getAttrs()
Return all of the attributes on this operation.
Block * getBlock()
Returns the operation block that contains this operation.
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
operand_iterator operand_end()
Region & getRegion(unsigned index)
Returns the region held by this operation at position 'index'.
OperationName getName()
The name of an operation is the key identifier for it.
operand_range getOperands()
Returns an iterator on the underlying Value's.
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
void replaceAllUsesWith(ValuesT &&values)
Replace all uses of results of this operation with the provided 'values'.
void setOperands(ValueRange operands)
Replace the current operands of this operation with the ones provided in 'operands'.
user_range getUsers()
Returns a range of all users.
Region * getParentRegion()
Returns the region to which the instruction belongs.
result_range getResults()
void erase()
Remove this operation from its parent block and delete it.
unsigned getNumResults()
Return the number of results held by this operation.
A class for computing basic postdominance information.
bool postDominates(Operation *a, Operation *b) const
Return true if operation A postdominates operation B.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
bool isAncestor(Region *other)
Return true if this region is ancestor of the other region.
void takeBody(Region &other)
Takes body of another region (that region will have no body after this operation completes).
MLIRContext * getContext() const
This class provides an abstraction over the different types of ranges over Values.
type_range getTypes() const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
void replaceAllUsesExcept(Value newValue, const SmallPtrSetImpl< Operation * > &exceptions)
Replace all uses of 'this' value with 'newValue', updating anything in the IR that uses 'this' to use...
void replaceAllUsesWith(Value newValue)
Replace all uses of 'this' value with the new value, updating anything in the IR that uses 'this' to ...
user_range getUsers() const
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
static WalkResult advance()
static WalkResult interrupt()
An AffineValueMap is an affine map plus its ML value operands and results for analysis purposes.
LogicalResult canonicalize()
Attempts to canonicalize the map and operands.
unsigned getNumSymbols() const
ArrayRef< Value > getOperands() const
unsigned getNumDims() const
AffineExpr getResult(unsigned i)
AffineMap getAffineMap() const
unsigned getNumResults() const
static void difference(const AffineValueMap &a, const AffineValueMap &b, AffineValueMap *res)
Return the value map that is the difference of value maps 'a' and 'b', represented as an affine map a...
FlatAffineValueConstraints is an extension of FlatLinearValueConstraints with helper functions for Af...
LogicalResult addBound(presburger::BoundType type, unsigned pos, AffineMap boundMap, ValueRange operands)
Adds a bound for the variable at the specified position with constraints being drawn from the specifi...
std::optional< int64_t > getConstantBound64(BoundType type, unsigned pos) const
The same, but casts to int64_t.
unsigned getNumVars() const
unsigned getNumLocalVars() const
std::optional< SmallVector< Value, 8 > > expandAffineMap(OpBuilder &builder, Location loc, AffineMap affineMap, ValueRange operands)
Create a sequence of operations that implement the affineMap applied to the given operands (as it it ...
void fullyComposeAffineMapAndOperands(AffineMap *map, SmallVectorImpl< Value > *operands)
Given an affine map map and its input operands, this method composes into map, maps of AffineApplyOps...
void affineScalarReplace(func::FuncOp f, DominanceInfo &domInfo, PostDominanceInfo &postDomInfo, AliasAnalysis &analysis)
Replace affine store and load accesses by scalars by forwarding stores to loads and eliminate invaria...
LogicalResult promoteIfSingleIteration(AffineForOp forOp)
Promotes the loop body of a AffineForOp to its containing block if the loop was known to have a singl...
bool isValidDim(Value value)
Returns true if the given Value can be used as a dimension id in the region of the closest surroundin...
Value expandAffineExpr(OpBuilder &builder, Location loc, AffineExpr expr, ValueRange dimValues, ValueRange symbolValues)
Emit code that computes the given affine expression using standard arithmetic operations applied to t...
unsigned getNumCommonSurroundingLoops(Operation &a, Operation &b)
Returns the number of surrounding loops common to both A and B.
void normalizeAffineParallel(AffineParallelOp op)
Normalize a affine.parallel op so that lower bounds are 0 and steps are 1.
DependenceResult checkMemrefAccessDependence(const MemRefAccess &srcAccess, const MemRefAccess &dstAccess, unsigned loopDepth, FlatAffineValueConstraints *dependenceConstraints=nullptr, SmallVector< DependenceComponent, 2 > *dependenceComponents=nullptr, bool allowRAR=false)
LogicalResult affineParallelize(AffineForOp forOp, ArrayRef< LoopReduction > parallelReductions={}, AffineParallelOp *resOp=nullptr)
Replaces a parallel affine.for op with a 1-d affine.parallel op.
AffineApplyOp makeComposedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands)
Returns a composed AffineApplyOp by composing map and operands with other AffineApplyOps supplying th...
void canonicalizeMapAndOperands(AffineMap *map, SmallVectorImpl< Value > *operands)
Modifies both map and operands in-place so as to:
void getReachableAffineApplyOps(ArrayRef< Value > operands, SmallVectorImpl< Operation * > &affineApplyOps)
Returns in affineApplyOps, the sequence of those AffineApplyOp Operations that are reachable via a se...
LogicalResult normalizeAffineFor(AffineForOp op, bool promoteSingleIter=false)
Normalize an affine.for op.
LogicalResult normalizeMemRef(memref::AllocOp *op)
Rewrites the memref defined by this alloc op to have an identity layout map and updates all its index...
bool isValidSymbol(Value value)
Returns true if the given value can be used as a symbol in the region of the closest surrounding op t...
OpFoldResult makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands)
Constructs an AffineApplyOp that applies map to operands after composing the map with the maps of any...
bool hasDependence(DependenceResult result)
Utility function that returns true if the provided DependenceResult corresponds to a dependence resul...
MemRefType normalizeMemRefType(MemRefType memrefType)
Normalizes memrefType so that the affine layout map of the memref is transformed to an identity map w...
FailureOr< SmallVector< Value > > delinearizeIndex(OpBuilder &b, Location loc, Value linearIndex, ArrayRef< Value > basis, bool hasOuterBound=true)
Generate the IR to delinearize linearIndex given the basis and return the multi-index.
OpFoldResult linearizeIndex(ArrayRef< OpFoldResult > multiIndex, ArrayRef< OpFoldResult > basis, ImplicitLocOpBuilder &builder)
Region * getAffineScope(Operation *op)
Returns the closest region enclosing op that is held by an operation with trait AffineScope; nullptr ...
DivModValue getDivMod(OpBuilder &b, Location loc, Value lhs, Value rhs)
Create IR to calculate (div lhs, rhs) and (mod lhs, rhs).
bool hasNoInterveningEffect(Operation *start, T memOp, llvm::function_ref< bool(Value, Value)> mayAlias)
Ensure that all operations that could be executed after start (noninclusive) and prior to memOp (e....
void createAffineComputationSlice(Operation *opInst, SmallVectorImpl< AffineApplyOp > *sliceOps)
Given an operation, inserts one or more single result affine apply operations, results of which are e...
LogicalResult hoistAffineIfOp(AffineIfOp ifOp, bool *folded=nullptr)
Hoists out affine.if/else to as high as possible, i.e., past all invariant affine....
bool noDependence(DependenceResult result)
Returns true if the provided DependenceResult corresponds to the absence of a dependence.
AffineExpr substWithMin(AffineExpr e, AffineExpr dim, AffineExpr min, AffineExpr max, bool positivePath=true)
Traverse e and return an AffineExpr where all occurrences of dim have been replaced by either:
LogicalResult replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef, ArrayRef< Value > extraIndices={}, AffineMap indexRemap=AffineMap(), ArrayRef< Value > extraOperands={}, ArrayRef< Value > symbolOperands={}, Operation *domOpFilter=nullptr, Operation *postDomOpFilter=nullptr, bool allowNonDereferencingOps=false, bool replaceInDeallocOp=false)
Replaces all "dereferencing" uses of oldMemRef with newMemRef while optionally remapping the old memr...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Include the generated interface declarations.
AffineMap simplifyAffineMap(AffineMap map)
Simplifies an affine map by simplifying its underlying AffineExpr results.
LogicalResult applyOpPatternsAndFold(ArrayRef< Operation * > ops, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr, bool *allErased=nullptr)
Rewrite the specified ops by repeatedly applying the highest benefit patterns in a greedy worklist dr...
void bindDims(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to DimExpr at positions: [0 .
std::pair< AffineExpr, SmallVector< OpFoldResult > > computeLinearIndex(OpFoldResult sourceOffset, ArrayRef< OpFoldResult > strides, ArrayRef< OpFoldResult > indices)
Compute linear index from provided strides and indices, assuming strided layout.
SmallVector< int64_t > computeStrides(ArrayRef< int64_t > sizes)
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
@ CeilDiv
RHS of ceildiv is always a constant or a symbolic expression.
@ Mod
RHS of mod is always a constant or a symbolic expression with a positive value.
@ FloorDiv
RHS of floordiv is always a constant or a symbolic expression.
AffineExpr getAffineBinaryOpExpr(AffineExprKind kind, AffineExpr lhs, AffineExpr rhs)
void bindSymbols(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to SymbolExpr at positions: [0 .
LogicalResult applyPatternsAndFoldGreedily(Region ®ion, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr)
Rewrite ops in the given region, which must be isolated from above, by repeatedly applying the highes...
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
AffineExpr getAffineConstantExpr(int64_t constant, MLIRContext *context)
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
@ ExistingOps
Only pre-existing ops are processed.
AffineExpr getAffineDimExpr(unsigned position, MLIRContext *context)
These free functions allow clients of the API to not use classes in detail.
AffineExpr getAffineSymbolExpr(unsigned position, MLIRContext *context)
The following effect indicates that the operation reads from some resource.
This represents an operation in an abstracted form, suitable for use with the builder APIs.
Checks whether two accesses to the same memref access the same element.
Holds the result of (div a, b) and (mod a, b).
A description of a (parallelizable) reduction in an affine loop.
arith::AtomicRMWKind kind
Reduction kind.
Value value
The value being reduced.
Encapsulates a memref load or store access information.
Eliminates variable at the specified position using Fourier-Motzkin variable elimination.