30 #include "llvm/Support/LogicalResult.h"
33 #define DEBUG_TYPE "affine-utils"
36 using namespace affine;
37 using namespace presburger;
43 class AffineApplyExpander
50 : builder(builder), dimValues(dimValues), symbolValues(symbolValues),
53 template <
typename OpTy>
55 arith::IntegerOverflowFlags overflowFlags =
56 arith::IntegerOverflowFlags::none) {
61 auto op = builder.
create<OpTy>(loc, lhs, rhs, overflowFlags);
66 return buildBinaryExpr<arith::AddIOp>(expr);
70 return buildBinaryExpr<arith::MulIOp>(expr,
71 arith::IntegerOverflowFlags::nsw);
84 if (
auto rhsConst = dyn_cast<AffineConstantExpr>(expr.
getRHS())) {
85 if (rhsConst.getValue() <= 0) {
86 emitError(loc,
"modulo by non-positive value is not supported");
93 assert(lhs && rhs &&
"unexpected affine expr lowering failure");
95 Value remainder = builder.
create<arith::RemSIOp>(loc, lhs, rhs);
96 Value zeroCst = builder.
create<arith::ConstantIndexOp>(loc, 0);
97 Value isRemainderNegative = builder.
create<arith::CmpIOp>(
98 loc, arith::CmpIPredicate::slt, remainder, zeroCst);
99 Value correctedRemainder =
100 builder.
create<arith::AddIOp>(loc, remainder, rhs);
102 loc, isRemainderNegative, correctedRemainder, remainder);
124 if (
auto rhsConst = dyn_cast<AffineConstantExpr>(expr.
getRHS())) {
125 if (rhsConst.getValue() <= 0) {
126 emitError(loc,
"division by non-positive value is not supported");
132 assert(lhs && rhs &&
"unexpected affine expr lowering failure");
134 Value zeroCst = builder.
create<arith::ConstantIndexOp>(loc, 0);
135 Value noneCst = builder.
create<arith::ConstantIndexOp>(loc, -1);
137 loc, arith::CmpIPredicate::slt, lhs, zeroCst);
138 Value negatedDecremented = builder.
create<arith::SubIOp>(loc, noneCst, lhs);
140 builder.
create<arith::SelectOp>(loc, negative, negatedDecremented, lhs);
141 Value quotient = builder.
create<arith::DivSIOp>(loc, dividend, rhs);
142 Value correctedQuotient =
143 builder.
create<arith::SubIOp>(loc, noneCst, quotient);
144 Value result = builder.
create<arith::SelectOp>(loc, negative,
145 correctedQuotient, quotient);
163 if (
auto rhsConst = dyn_cast<AffineConstantExpr>(expr.
getRHS())) {
164 if (rhsConst.getValue() <= 0) {
165 emitError(loc,
"division by non-positive value is not supported");
171 assert(lhs && rhs &&
"unexpected affine expr lowering failure");
173 Value zeroCst = builder.
create<arith::ConstantIndexOp>(loc, 0);
174 Value oneCst = builder.
create<arith::ConstantIndexOp>(loc, 1);
176 loc, arith::CmpIPredicate::sle, lhs, zeroCst);
177 Value negated = builder.
create<arith::SubIOp>(loc, zeroCst, lhs);
178 Value decremented = builder.
create<arith::SubIOp>(loc, lhs, oneCst);
180 builder.
create<arith::SelectOp>(loc, nonPositive, negated, decremented);
181 Value quotient = builder.
create<arith::DivSIOp>(loc, dividend, rhs);
182 Value negatedQuotient =
183 builder.
create<arith::SubIOp>(loc, zeroCst, quotient);
184 Value incrementedQuotient =
185 builder.
create<arith::AddIOp>(loc, quotient, oneCst);
187 loc, nonPositive, negatedQuotient, incrementedQuotient);
192 auto op = builder.
create<arith::ConstantIndexOp>(loc, expr.
getValue());
198 "affine dim position out of range");
204 "symbol dim position out of range");
223 return AffineApplyExpander(builder, dimValues, symbolValues, loc).visit(expr);
228 std::optional<SmallVector<Value, 8>>
232 auto expanded = llvm::to_vector<8>(
234 [numDims, &builder, loc, operands](
AffineExpr expr) {
235 return expandAffineExpr(builder, loc, expr,
236 operands.take_front(numDims),
237 operands.drop_front(numDims));
239 if (llvm::all_of(expanded, [](
Value v) {
return v; }))
249 assert(ifOp.hasElse() &&
"else block expected");
251 Block *destBlock = ifOp->getBlock();
252 Block *srcBlock = elseBlock ? ifOp.getElseBlock() : ifOp.getThenBlock();
255 std::prev(srcBlock->
end()));
269 if (
auto forOp = dyn_cast<AffineForOp>(parentOp)) {
270 if (llvm::is_contained(ifOperands, forOp.getInductionVar()))
272 }
else if (
auto parallelOp = dyn_cast<AffineParallelOp>(parentOp)) {
273 if (llvm::any_of(parallelOp.getIVs(), [&](
Value iv) {
274 return llvm::is_contained(ifOperands, iv);
277 }
else if (!isa<AffineIfOp>(parentOp)) {
292 if (hoistOverOp == ifOp)
302 auto hoistedIfOp = b.
create<AffineIfOp>(ifOp.getLoc(), ifOp.getIntegerSet(),
311 StringAttr idForIfOp = b.
getStringAttr(
"__mlir_if_hoisting");
316 hoistOverOpClone = b.
clone(*hoistOverOp, operandMap);
322 auto *thenBlock = hoistedIfOp.getThenBlock();
323 thenBlock->getOperations().splice(thenBlock->begin(),
328 AffineIfOp ifCloneInElse;
329 hoistOverOpClone->walk([&](AffineIfOp ifClone) {
330 if (!ifClone->getAttr(idForIfOp))
332 ifCloneInElse = ifClone;
335 assert(ifCloneInElse &&
"if op clone should exist");
338 if (!ifCloneInElse.hasElse())
339 ifCloneInElse.erase();
344 auto *elseBlock = hoistedIfOp.getElseBlock();
345 elseBlock->getOperations().splice(
346 elseBlock->begin(), hoistOverOpClone->getBlock()->getOperations(),
355 AffineParallelOp *resOp) {
357 unsigned numReductions = parallelReductions.size();
358 if (numReductions != forOp.getNumIterOperands())
363 AffineMap lowerBoundMap = forOp.getLowerBoundMap();
364 ValueRange lowerBoundOperands = forOp.getLowerBoundOperands();
365 AffineMap upperBoundMap = forOp.getUpperBoundMap();
366 ValueRange upperBoundOperands = forOp.getUpperBoundOperands();
369 auto reducedValues = llvm::to_vector<4>(llvm::map_range(
371 auto reductionKinds = llvm::to_vector<4>(llvm::map_range(
373 AffineParallelOp newPloop = outsideBuilder.
create<AffineParallelOp>(
380 Operation *yieldOp = &newPloop.getBody()->back();
385 newResults.reserve(numReductions);
386 for (
unsigned i = 0; i < numReductions; ++i) {
387 Value init = forOp.getInits()[i];
392 assert(reductionOp &&
"yielded value is expected to be produced by an op");
396 reductionOp->
setOperands({init, newPloop->getResult(i)});
397 forOp->getResult(i).replaceAllUsesWith(reductionOp->
getResult(0));
406 newPloop.getBody()->eraseArguments(numIVs, numReductions);
418 if (ifOp.getNumResults() != 0)
427 AffineIfOp::getCanonicalizationPatterns(
patterns, ifOp.getContext());
431 ifOp.getOperation(), frozenPatterns,
443 assert(llvm::all_of(ifOp.getOperands(),
445 return isTopLevelValue(v) || isAffineInductionVar(v);
447 "operands not composed");
455 if (hoistedIfOp == ifOp)
472 return positivePath ?
min :
max;
473 if (
auto bin = dyn_cast<AffineBinaryOpExpr>(e)) {
480 auto c1 = dyn_cast<AffineConstantExpr>(bin.getLHS());
481 auto c2 = dyn_cast<AffineConstantExpr>(bin.getRHS());
482 if (c1 && c1.getValue() < 0)
485 if (c2 && c2.getValue() < 0)
497 if (op.hasMinMaxBounds())
500 AffineMap lbMap = op.getLowerBoundsMap();
503 bool isAlreadyNormalized =
504 llvm::all_of(llvm::zip(steps, lbMap.
getResults()), [](
auto tuple) {
505 int64_t step = std::get<0>(tuple);
506 auto lbExpr = dyn_cast<AffineConstantExpr>(std::get<1>(tuple));
507 return lbExpr && lbExpr.getValue() == 0 && step == 1;
509 if (isAlreadyNormalized)
514 op.getLowerBoundsValueMap(), &ranges);
516 auto zeroExpr = builder.getAffineConstantExpr(0);
519 for (
unsigned i = 0, e = steps.size(); i < e; ++i) {
520 int64_t step = steps[i];
523 lbExprs.push_back(zeroExpr);
527 ubExprs.push_back(ubExpr);
533 auto expr = lbExpr + builder.getAffineDimExpr(nDims) * step;
540 OperandRange dimOperands = lbOperands.take_front(nDims);
541 OperandRange symbolOperands = lbOperands.drop_front(nDims);
543 applyOperands.push_back(iv);
544 applyOperands.append(symbolOperands.begin(), symbolOperands.end());
545 auto apply = builder.create<AffineApplyOp>(op.getLoc(), map, applyOperands);
550 op.setSteps(newSteps);
552 0, 0, lbExprs, op.getContext());
553 op.setLowerBounds({}, newLowerMap);
555 ubExprs, op.getContext());
556 op.setUpperBounds(ranges.
getOperands(), newUpperMap);
560 bool promoteSingleIter) {
565 if (op.hasConstantLowerBound() && (op.getConstantLowerBound() == 0) &&
573 if (op.getLowerBoundMap().getNumResults() != 1)
578 int64_t origLoopStep = op.getStepAsInt();
581 AffineMap oldLbMap = op.getLowerBoundMap();
588 op.getLowerBoundMap().getResult(0));
593 AffineValueMap paddedLbValueMap(paddedLbMap, op.getLowerBoundOperands());
594 AffineValueMap ubValueMap(op.getUpperBoundMap(), op.getUpperBoundOperands());
603 for (
unsigned i = 0; i < numResult; ++i)
612 op.setUpperBound(newUbValueMap.
getOperands(), newUbMap);
625 (void)newIvToOldIvMap.canonicalize();
626 auto newIV = opBuilder.
create<AffineApplyOp>(
627 loc, newIvToOldIvMap.getAffineMap(), newIvToOldIvMap.getOperands());
628 op.getInductionVar().replaceAllUsesExcept(newIV->getResult(0), newIV);
654 unsigned minSurroundingLoops) {
668 for (
unsigned d = nsLoops + 1; d > minSurroundingLoops; d--) {
670 srcAccess, destAccess, d, &dependenceConstraints,
685 template <
typename EffectType,
typename T>
691 bool hasSideEffect =
false;
694 Value memref = memOp.getMemRef();
700 if (
auto memEffect = dyn_cast<MemoryEffectOpInterface>(op)) {
702 memEffect.getEffects(effects);
704 bool opMayHaveEffect =
false;
705 for (
auto effect : effects) {
708 if (isa<EffectType>(effect.getEffect())) {
709 if (effect.getValue() && effect.getValue() != memref &&
710 !
mayAlias(effect.getValue(), memref))
712 opMayHaveEffect =
true;
717 if (!opMayHaveEffect)
722 if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op)) {
729 unsigned minSurroundingLoops =
732 hasSideEffect =
true;
738 hasSideEffect =
true;
745 for (
Region ®ion : op->getRegions())
746 for (
Block &block : region)
754 hasSideEffect =
true;
765 checkOperation(parent);
774 "Checking for side effect between two operations without a common "
782 until(untilOp->getParentOp(), untilOp);
794 for (
auto iter = ++from->getIterator(), end = from->
getBlock()->
end();
795 iter != end && &*iter != untilOp; ++iter) {
796 checkOperation(&*iter);
801 if (untilOp->getBlock() != from->
getBlock())
803 todoBlocks.push_back(succ);
808 while (!todoBlocks.empty()) {
809 Block *blk = todoBlocks.pop_back_val();
813 for (
auto &op : *blk) {
819 todoBlocks.push_back(succ);
824 return !hasSideEffect;
843 for (
auto *user : loadOp.getMemRef().getUsers()) {
844 auto storeOp = dyn_cast<AffineWriteOpInterface>(user);
858 if (srcAccess != destAccess)
873 if (!affine::hasNoInterveningEffect<MemoryEffects::Write>(storeOp, loadOp,
878 assert(lastWriteStoreOp ==
nullptr &&
879 "multiple simultaneous replacement stores");
880 lastWriteStoreOp = storeOp;
883 if (!lastWriteStoreOp)
888 cast<AffineWriteOpInterface>(lastWriteStoreOp).getValueToStore();
891 if (storeVal.
getType() != loadOp.getValue().getType())
895 memrefsToErase.insert(loadOp.getMemRef());
897 loadOpsToErase.push_back(loadOp);
902 affine::AffineReadOpInterface>(
918 auto writeB = dyn_cast<AffineWriteOpInterface>(user);
923 if (writeB == writeA)
927 if (writeB->getParentRegion() != writeA->getParentRegion())
934 if (srcAccess != destAccess)
943 if (!affine::hasNoInterveningEffect<MemoryEffects::Read>(writeA, writeB,
947 opsToErase.push_back(writeA);
958 static void loadCSE(AffineReadOpInterface loadA,
963 for (
auto *user : loadA.getMemRef().getUsers()) {
964 auto loadB = dyn_cast<AffineReadOpInterface>(user);
965 if (!loadB || loadB == loadA)
972 if (srcAccess != destAccess) {
981 if (!affine::hasNoInterveningEffect<MemoryEffects::Write>(
982 loadB.getOperation(), loadA,
mayAlias))
987 if (loadB.getValue().getType() != loadA.getValue().getType())
990 loadCandidates.push_back(loadB);
996 for (AffineReadOpInterface option : loadCandidates) {
997 if (llvm::all_of(loadCandidates, [&](AffineReadOpInterface depStore) {
998 return depStore == option ||
1000 depStore.getOperation());
1002 loadB = option.getValue();
1010 loadOpsToErase.push_back(loadA);
1049 return !aliasAnalysis.
alias(val1, val2).
isNo();
1053 f.walk([&](AffineReadOpInterface loadOp) {
1056 for (
auto *op : opsToErase)
1061 f.walk([&](AffineWriteOpInterface storeOp) {
1064 for (
auto *op : opsToErase)
1072 for (
auto memref : memrefsToErase) {
1074 Operation *defOp = memref.getDefiningOp();
1075 if (!defOp || !hasSingleEffect<MemoryEffects::Allocate>(defOp, memref))
1079 if (llvm::any_of(memref.getUsers(), [&](
Operation *ownerOp) {
1080 return !isa<AffineWriteOpInterface>(ownerOp) &&
1081 !hasSingleEffect<MemoryEffects::Free>(ownerOp, memref);
1086 for (
auto *user : llvm::make_early_inc_range(memref.getUsers()))
1094 f.walk([&](AffineReadOpInterface loadOp) {
1097 for (
auto *op : opsToErase)
1108 unsigned oldMemRefRank = cast<MemRefType>(oldMemRef.
getType()).getRank();
1109 unsigned newMemRefRank = cast<MemRefType>(newMemRef.
getType()).getRank();
1110 unsigned oldMapNumInputs = oldMemRefRank;
1113 op->
operand_begin() + memRefOperandPos + 1 + oldMapNumInputs);
1115 oldMemRefOperands.assign(oldMapOperands.begin(), oldMapOperands.end());
1117 remapOperands.reserve(extraOperands.size() + oldMemRefRank +
1118 symbolOperands.size());
1119 remapOperands.append(extraOperands.begin(), extraOperands.end());
1120 remapOperands.append(oldMemRefOperands.begin(), oldMemRefOperands.end());
1121 remapOperands.append(symbolOperands.begin(), symbolOperands.end());
1124 remapOutputs.reserve(oldMemRefRank);
1132 for (
auto resultExpr : indexRemap.
getResults()) {
1135 auto afOp = builder.
create<AffineApplyOp>(op->
getLoc(), singleResMap,
1137 remapOutputs.push_back(afOp);
1138 affineApplyOps.push_back(afOp);
1142 remapOutputs.assign(remapOperands.begin(), remapOperands.end());
1146 newMapOperands.reserve(newMemRefRank);
1149 for (
Value extraIndex : extraIndices) {
1151 "invalid memory op index");
1152 newMapOperands.push_back(extraIndex);
1156 newMapOperands.append(remapOutputs.begin(), remapOutputs.end());
1159 assert(newMapOperands.size() == newMemRefRank);
1163 state.operands.reserve(newMapOperands.size() + extraIndices.size());
1164 state.operands.push_back(newMemRef);
1167 state.operands.append(newMapOperands.begin(), newMapOperands.end());
1171 state.types.push_back(result.getType());
1174 for (
auto namedAttr : op->
getAttrs()) {
1175 state.attributes.push_back(namedAttr);
1179 auto *repOp = builder.
create(state);
1190 bool allowNonDereferencingOps) {
1191 unsigned newMemRefRank = cast<MemRefType>(newMemRef.
getType()).getRank();
1192 (void)newMemRefRank;
1193 unsigned oldMemRefRank = cast<MemRefType>(oldMemRef.
getType()).getRank();
1194 (void)oldMemRefRank;
1196 assert(indexRemap.
getNumSymbols() == symbolOperands.size() &&
1197 "symbolic operand count mismatch");
1199 extraOperands.size() + oldMemRefRank + symbolOperands.size());
1200 assert(indexRemap.
getNumResults() + extraIndices.size() == newMemRefRank);
1202 assert(oldMemRefRank + extraIndices.size() == newMemRefRank);
1206 assert(cast<MemRefType>(oldMemRef.
getType()).getElementType() ==
1207 cast<MemRefType>(newMemRef.
getType()).getElementType());
1211 if (opEntry.value() == oldMemRef)
1212 usePositions.push_back(opEntry.index());
1216 if (usePositions.empty())
1219 if (usePositions.size() > 1) {
1221 assert(
false &&
"multiple dereferencing uses in a single op not supported");
1225 unsigned memRefOperandPos = usePositions.front();
1230 auto affMapAccInterface = dyn_cast<AffineMapAccessInterface>(op);
1231 if (!affMapAccInterface) {
1232 if (!allowNonDereferencingOps) {
1240 auto memrefLoad = dyn_cast<memref::LoadOp>(op);
1241 bool isReductionLike =
1243 if (!memrefLoad || !isReductionLike) {
1249 op, oldMemRef, newMemRef, memRefOperandPos, extraIndices, extraOperands,
1250 symbolOperands, indexRemap);
1254 affMapAccInterface.getAffineMapAttrForMemRef(oldMemRef);
1259 op->
operand_begin() + memRefOperandPos + 1 + oldMapNumInputs);
1264 oldMemRefOperands.reserve(oldMemRefRank);
1266 for (
auto resultExpr : oldMap.
getResults()) {
1269 auto afOp = builder.
create<AffineApplyOp>(op->
getLoc(), singleResMap,
1271 oldMemRefOperands.push_back(afOp);
1272 affineApplyOps.push_back(afOp);
1275 oldMemRefOperands.assign(oldMapOperands.begin(), oldMapOperands.end());
1282 remapOperands.reserve(extraOperands.size() + oldMemRefRank +
1283 symbolOperands.size());
1284 remapOperands.append(extraOperands.begin(), extraOperands.end());
1285 remapOperands.append(oldMemRefOperands.begin(), oldMemRefOperands.end());
1286 remapOperands.append(symbolOperands.begin(), symbolOperands.end());
1289 remapOutputs.reserve(oldMemRefRank);
1294 for (
auto resultExpr : indexRemap.
getResults()) {
1297 auto afOp = builder.
create<AffineApplyOp>(op->
getLoc(), singleResMap,
1299 remapOutputs.push_back(afOp);
1300 affineApplyOps.push_back(afOp);
1304 remapOutputs.assign(remapOperands.begin(), remapOperands.end());
1308 newMapOperands.reserve(newMemRefRank);
1311 for (
Value extraIndex : extraIndices) {
1313 "invalid memory op index");
1314 newMapOperands.push_back(extraIndex);
1318 newMapOperands.append(remapOutputs.begin(), remapOutputs.end());
1321 assert(newMapOperands.size() == newMemRefRank);
1327 for (
Value value : affineApplyOps)
1328 if (value.use_empty())
1329 value.getDefiningOp()->erase();
1333 state.operands.reserve(op->
getNumOperands() + extraIndices.size());
1338 state.operands.push_back(newMemRef);
1341 state.operands.append(newMapOperands.begin(), newMapOperands.end());
1344 state.operands.append(op->
operand_begin() + memRefOperandPos + 1 +
1351 state.types.push_back(result.getType());
1355 for (
auto namedAttr : op->
getAttrs()) {
1356 if (namedAttr.getName() == oldMapAttrPair.
getName())
1357 state.attributes.push_back({namedAttr.getName(), newMapAttr});
1359 state.attributes.push_back(namedAttr);
1363 auto *repOp = builder.
create(state);
1374 Operation *postDomOpFilter,
bool allowNonDereferencingOps,
1375 bool replaceInDeallocOp) {
1376 unsigned newMemRefRank = cast<MemRefType>(newMemRef.
getType()).getRank();
1377 (void)newMemRefRank;
1378 unsigned oldMemRefRank = cast<MemRefType>(oldMemRef.
getType()).getRank();
1379 (void)oldMemRefRank;
1381 assert(indexRemap.
getNumSymbols() == symbolOperands.size() &&
1382 "symbol operand count mismatch");
1384 extraOperands.size() + oldMemRefRank + symbolOperands.size());
1385 assert(indexRemap.
getNumResults() + extraIndices.size() == newMemRefRank);
1387 assert(oldMemRefRank + extraIndices.size() == newMemRefRank);
1391 assert(cast<MemRefType>(oldMemRef.
getType()).getElementType() ==
1392 cast<MemRefType>(newMemRef.
getType()).getElementType());
1394 std::unique_ptr<DominanceInfo> domInfo;
1395 std::unique_ptr<PostDominanceInfo> postDomInfo;
1397 domInfo = std::make_unique<DominanceInfo>(
1400 if (postDomOpFilter)
1401 postDomInfo = std::make_unique<PostDominanceInfo>(
1408 for (
auto *op : oldMemRef.
getUsers()) {
1410 if (domOpFilter && !domInfo->dominates(domOpFilter, op))
1414 if (postDomOpFilter && !postDomInfo->postDominates(postDomOpFilter, op))
1419 if (hasSingleEffect<MemoryEffects::Free>(op, oldMemRef) &&
1420 !replaceInDeallocOp)
1426 if (!isa<AffineMapAccessInterface>(*op)) {
1427 if (!allowNonDereferencingOps) {
1428 LLVM_DEBUG(llvm::dbgs()
1429 <<
"Memref replacement failed: non-deferencing memref op: \n"
1436 LLVM_DEBUG(llvm::dbgs() <<
"Memref replacement failed: use without a "
1437 "memrefs normalizable trait: \n"
1445 opsToReplace.insert(op);
1448 for (
auto *op : opsToReplace) {
1450 oldMemRef, newMemRef, op, extraIndices, indexRemap, extraOperands,
1451 symbolOperands, allowNonDereferencingOps)))
1452 llvm_unreachable(
"memref replacement guaranteed to succeed here");
1492 if (isa_and_nonnull<AffineApplyOp>(operand.getDefiningOp()))
1493 subOperands.push_back(operand);
1499 if (affineApplyOps.empty())
1504 bool localized =
true;
1505 for (
auto *op : affineApplyOps) {
1506 for (
auto result : op->getResults()) {
1507 for (
auto *user : result.getUsers()) {
1508 if (user != opInst) {
1524 sliceOps->reserve(composedMap.getNumResults());
1525 for (
auto resultExpr : composedMap.getResults()) {
1527 composedMap.getNumSymbols(), resultExpr);
1528 sliceOps->push_back(builder.
create<AffineApplyOp>(
1529 opInst->
getLoc(), singleResMap, composedOpOperands));
1537 for (
Value &operand : newOperands) {
1540 for (
j = 0, f = subOperands.size();
j < f;
j++) {
1541 if (operand == subOperands[
j])
1544 if (
j < subOperands.size())
1545 operand = (*sliceOps)[
j];
1547 for (
unsigned idx = 0, e = newOperands.size(); idx < e; idx++)
1570 SmallVectorImpl<std::tuple<AffineExpr, unsigned, unsigned>> &tileSizePos) {
1581 if (isa<AffineConstantExpr>(binaryExpr.
getRHS()))
1582 floordivExprs.emplace_back(
1583 std::make_tuple(binaryExpr.
getLHS(), binaryExpr.
getRHS(), pos));
1588 if (floordivExprs.empty()) {
1595 for (std::tuple<AffineExpr, AffineExpr, unsigned> fexpr : floordivExprs) {
1596 AffineExpr floordivExprLHS = std::get<0>(fexpr);
1597 AffineExpr floordivExprRHS = std::get<1>(fexpr);
1598 unsigned floordivPos = std::get<2>(fexpr);
1610 bool notTiled =
false;
1611 if (pos != floordivPos) {
1613 if (e == floordivExprLHS) {
1615 AffineBinaryOpExpr binaryExpr = cast<AffineBinaryOpExpr>(expr);
1617 if (floordivExprLHS == binaryExpr.getLHS() &&
1618 floordivExprRHS == binaryExpr.getRHS()) {
1622 tileSizePos.emplace_back(
1623 std::make_tuple(binaryExpr.getRHS(), floordivPos, pos));
1675 if (isa<AffineDimExpr>(e) &&
1676 llvm::any_of(inMemrefTypeDynDims, [&](
unsigned dim) {
1695 binaryExpr = cast<AffineBinaryOpExpr>(oldMapOutput);
1696 newMapOutput = binaryExpr.
getRHS();
1699 binaryExpr = cast<AffineBinaryOpExpr>(oldMapOutput);
1704 newMapOutput = oldMapOutput;
1706 return newMapOutput;
1741 template <
typename AllocLikeOp>
1743 MemRefType newMemRefType,
AffineMap map,
1749 unsigned dynIdx = 0;
1750 for (
unsigned d = 0; d < oldMemRefType.getRank(); ++d) {
1751 if (oldMemRefShape[d] < 0) {
1753 inAffineApply.emplace_back(allocOp.getDynamicSizes()[dynIdx]);
1758 inAffineApply.emplace_back(
1759 b.
create<arith::ConstantOp>(allocOp.getLoc(), constantAttr));
1765 unsigned newDimIdx = 0;
1770 if (newMemRefShape[newDimIdx] < 0) {
1773 for (
auto pos : tileSizePos) {
1774 if (newDimIdx == std::get<1>(pos))
1776 else if (newDimIdx == std::get<2>(pos))
1783 b.
create<AffineApplyOp>(allocOp.getLoc(), newMap, inAffineApply);
1784 newDynamicSizes.emplace_back(affineApp);
1790 template <
typename AllocLikeOp>
1792 MemRefType memrefType = allocOp.getType();
1798 if (newMemRefType == memrefType)
1803 Value oldMemRef = allocOp.getResult();
1806 AffineMap layoutMap = memrefType.getLayout().getAffineMap();
1807 AllocLikeOp newAlloc;
1812 if (newMemRefType.getNumDynamicDims() > 0 && !tileSizePos.empty()) {
1813 auto oldMemRefType = cast<MemRefType>(oldMemRef.
getType());
1819 b.
create<AllocLikeOp>(allocOp.getLoc(), newMemRefType, newDynamicSizes,
1820 allocOp.getAlignmentAttr());
1822 newAlloc = b.
create<AllocLikeOp>(allocOp.getLoc(), newMemRefType,
1823 allocOp.getAlignmentAttr());
1841 return hasSingleEffect<MemoryEffects::Free>(op, oldMemRef);
1848 template LogicalResult
1849 mlir::affine::normalizeMemRef<memref::AllocaOp>(memref::AllocaOp op);
1850 template LogicalResult
1851 mlir::affine::normalizeMemRef<memref::AllocOp>(memref::AllocOp op);
1854 unsigned rank = memrefType.getRank();
1858 if (memrefType.getLayout().isIdentity()) {
1863 AffineMap layoutMap = memrefType.getLayout().getAffineMap();
1874 if (memrefType.getNumDynamicDims() > 0 && tileSizePos.empty())
1883 for (
unsigned d = 0; d < rank; ++d) {
1887 fac.
addBound(BoundType::UB, d, shape[d] - 1);
1889 memrefTypeDynDims.emplace_back(d);
1902 for (
unsigned d = 0; d < newRank; ++d) {
1905 newShape[d] = ShapedType::kDynamic;
1914 if (!ubConst.has_value() || *ubConst < 0) {
1915 LLVM_DEBUG(llvm::dbgs()
1916 <<
"can't normalize map due to unknown/invalid upper bound");
1920 newShape[d] = *ubConst + 1;
1924 auto newMemRefType =
1929 return newMemRefType;
1955 FailureOr<SmallVector<Value>>
1959 basis = basis.drop_front();
1965 FailureOr<OpFoldResult> nextProd =
1967 if (failed(nextProd))
1969 basisProd = *nextProd;
1974 results.reserve(divisors.size() + 1);
1975 Value residual = linearIndex;
1976 for (
Value divisor : llvm::reverse(divisors)) {
1978 results.push_back(divMod.
quotient);
1981 results.push_back(residual);
1985 FailureOr<SmallVector<Value>>
1988 bool hasOuterBound) {
1990 basis = basis.drop_front();
1996 FailureOr<OpFoldResult> nextProd =
1998 if (failed(nextProd))
2000 basisProd = *nextProd;
2005 results.reserve(divisors.size() + 1);
2006 Value residual = linearIndex;
2007 for (
Value divisor : llvm::reverse(divisors)) {
2009 results.push_back(divMod.
quotient);
2012 results.push_back(residual);
2025 assert(multiIndex.size() == basis.size() ||
2026 multiIndex.size() == basis.size() + 1);
2031 if (multiIndex.size() == basis.size() + 1)
2034 for (
size_t i = 0; i < basis.size(); ++i) {
2040 strides.reserve(stridesAffine.size());
2041 llvm::transform(stridesAffine, std::back_inserter(strides),
2042 [&builder, &basis, loc](
AffineExpr strideExpr) {
2044 builder, loc, strideExpr, basis);
2050 multiIndexAndStrides);
static bool mayHaveEffect(Operation *srcMemOp, Operation *destMemOp, unsigned minSurroundingLoops)
Returns true if srcMemOp may have an effect on destMemOp within the scope of the outermost minSurroun...
static void createNewDynamicSizes(MemRefType oldMemRefType, MemRefType newMemRefType, AffineMap map, AllocLikeOp allocOp, OpBuilder b, SmallVectorImpl< Value > &newDynamicSizes)
Create new maps to calculate each dimension size of newMemRefType, and create newDynamicSizes from th...
static LogicalResult getTileSizePos(AffineMap map, SmallVectorImpl< std::tuple< AffineExpr, unsigned, unsigned >> &tileSizePos)
Check if map is a tiled layout.
TileExprPattern
Enum to set patterns of affine expr in tiled-layout map.
static void promoteIfBlock(AffineIfOp ifOp, bool elseBlock)
Promotes the then or the else block of ifOp (depending on whether elseBlock is false or true) into if...
static bool isNormalizedMemRefDynamicDim(unsigned dim, AffineMap layoutMap, SmallVectorImpl< unsigned > &inMemrefTypeDynDims)
Check if dim dimension of memrefType with layoutMap becomes dynamic after normalization.
LogicalResult transformMemRefLoadWithReducedRank(Operation *op, Value oldMemRef, Value newMemRef, unsigned memRefOperandPos, ArrayRef< Value > extraIndices, ArrayRef< Value > extraOperands, ArrayRef< Value > symbolOperands, AffineMap indexRemap)
static FailureOr< OpFoldResult > composedAffineMultiply(OpBuilder &b, Location loc, OpFoldResult lhs, OpFoldResult rhs)
Create an affine map that computes lhs * rhs, composing in any other affine maps.
static void loadCSE(AffineReadOpInterface loadA, SmallVectorImpl< Operation * > &loadOpsToErase, DominanceInfo &domInfo, llvm::function_ref< bool(Value, Value)> mayAlias)
static AffineExpr createDimSizeExprForTiledLayout(AffineExpr oldMapOutput, TileExprPattern pat)
Create affine expr to calculate dimension size for a tiled-layout map.
static Operation * getOutermostInvariantForOp(AffineIfOp ifOp)
Returns the outermost affine.for/parallel op that the ifOp is invariant on.
static void findUnusedStore(AffineWriteOpInterface writeA, SmallVectorImpl< Operation * > &opsToErase, PostDominanceInfo &postDominanceInfo, llvm::function_ref< bool(Value, Value)> mayAlias)
static bool mustReachAtInnermost(const MemRefAccess &srcAccess, const MemRefAccess &destAccess)
Returns true if the memory operation of destAccess depends on srcAccess inside of the innermost commo...
static void forwardStoreToLoad(AffineReadOpInterface loadOp, SmallVectorImpl< Operation * > &loadOpsToErase, SmallPtrSetImpl< Value > &memrefsToErase, DominanceInfo &domInfo, llvm::function_ref< bool(Value, Value)> mayAlias)
Attempt to eliminate loadOp by replacing it with a value stored into memory which the load is guarant...
static void visit(Operation *op, DenseSet< Operation * > &visited)
Visits all the pdl.operand(s), pdl.result(s), and pdl.operation(s) connected to the given operation.
static bool mayAlias(Value first, Value second)
Returns true if two values may be referencing aliasing memory.
static Value max(ImplicitLocOpBuilder &builder, Value value, Value bound)
static Value min(ImplicitLocOpBuilder &builder, Value value, Value bound)
Affine binary operation expression.
AffineExpr getLHS() const
AffineExpr getRHS() const
An integer constant appearing in affine expression.
A dimensional identifier appearing in an affine expression.
unsigned getPosition() const
See documentation for AffineExprVisitorBase.
Base type for affine expression.
AffineExpr floorDiv(uint64_t v) const
RetT walk(FnT &&callback) const
Walk all of the AffineExpr's in this expression in postorder.
AffineExpr ceilDiv(uint64_t v) const
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
MLIRContext * getContext() const
static AffineMap getMultiDimIdentityMap(unsigned numDims, MLIRContext *context)
Returns an AffineMap with 'numDims' identity result dim exprs.
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
unsigned getNumSymbols() const
unsigned getNumDims() const
ArrayRef< AffineExpr > getResults() const
unsigned getNumResults() const
unsigned getNumInputs() const
AffineExpr getResult(unsigned idx) const
AffineMap compose(AffineMap map) const
Returns the AffineMap resulting from composing this with map.
A symbolic identifier appearing in an affine expression.
unsigned getPosition() const
This class represents the main alias analysis interface in MLIR.
AliasResult alias(Value lhs, Value rhs)
Given two values, return their aliasing behavior.
bool isNo() const
Returns if this result indicates no possibility of aliasing.
This class represents an argument of a Block.
Block represents an ordered list of Operations.
OpListType::iterator iterator
SuccessorRange getSuccessors()
Operation * getTerminator()
Get the terminator operation of this block.
OpListType & getOperations()
IntegerAttr getIndexAttr(int64_t value)
IntegerAttr getIntegerAttr(Type type, int64_t value)
AffineMap getMultiDimIdentityMap(unsigned rank)
BoolAttr getBoolAttr(bool value)
StringAttr getStringAttr(const Twine &bytes)
AffineExpr getAffineDimExpr(unsigned position)
AffineMap getConstantAffineMap(int64_t val)
Returns a single constant result affine map with 0 dimensions and 0 symbols.
MLIRContext * getContext() const
A class for computing basic dominance information.
bool dominates(Operation *a, Operation *b) const
Return true if operation A dominates operation B, i.e.
LogicalResult composeMatchingMap(AffineMap other)
Composes an affine map whose dimensions and symbols match one to one with the dimensions and symbols ...
void projectOut(Value val)
Projects out the variable that is associate with Value.
This class represents a frozen set of patterns that can be processed by a pattern applicator.
This class allows control over how the GreedyPatternRewriteDriver works.
GreedyRewriteConfig & setStrictness(GreedyRewriteStrictness mode)
This is a utility class for mapping one set of IR entities to another.
void clear()
Clears all mappings held by the mapper.
ImplicitLocOpBuilder maintains a 'current location', allowing use of the create<> method without spec...
Location getLoc() const
Accessors for the implied location.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
This is a builder type that keeps local references to arguments.
Builder & setLayout(MemRefLayoutAttrInterface newLayout)
Builder & setShape(ArrayRef< int64_t > newShape)
NamedAttribute represents a combination of a name and an Attribute value.
StringAttr getName() const
Return the name of the attribute.
Attribute getValue() const
Return the value of the attribute.
This class helps build Operations.
static OpBuilder atBlockBegin(Block *block, Listener *listener=nullptr)
Create a builder and set the insertion point to before the first operation in the block but still ins...
Block::iterator getInsertionPoint() const
Returns the current insertion point of the builder.
Operation * clone(Operation &op, IRMapping &mapper)
Creates a deep copy of the specified operation, remapping any operands that use values outside of the...
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Block * getInsertionBlock() const
Return the block the current insertion point belongs to.
This class represents a single result from folding an operation.
This trait indicates that the memory effects of an operation includes the effects of operations neste...
This class provides the API for ops that are known to be isolated from above.
This class implements the operand iterators for the Operation class.
Operation is the basic unit of execution within MLIR.
Value getOperand(unsigned idx)
bool hasTrait()
Returns true if the operation was registered with a particular trait, e.g.
void setOperand(unsigned idx, Value value)
operand_iterator operand_begin()
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Location getLoc()
The source location the operation was defined or derived from.
unsigned getNumOperands()
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
ArrayRef< NamedAttribute > getAttrs()
Return all of the attributes on this operation.
Block * getBlock()
Returns the operation block that contains this operation.
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
operand_iterator operand_end()
Region & getRegion(unsigned index)
Returns the region held by this operation at position 'index'.
OperationName getName()
The name of an operation is the key identifier for it.
operand_range getOperands()
Returns an iterator on the underlying Value's.
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
void replaceAllUsesWith(ValuesT &&values)
Replace all uses of results of this operation with the provided 'values'.
void setOperands(ValueRange operands)
Replace the current operands of this operation with the ones provided in 'operands'.
user_range getUsers()
Returns a range of all users.
Region * getParentRegion()
Returns the region to which the instruction belongs.
result_range getResults()
void erase()
Remove this operation from its parent block and delete it.
unsigned getNumResults()
Return the number of results held by this operation.
A class for computing basic postdominance information.
bool postDominates(Operation *a, Operation *b) const
Return true if operation A postdominates operation B.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
bool isAncestor(Region *other)
Return true if this region is ancestor of the other region.
void takeBody(Region &other)
Takes body of another region (that region will have no body after this operation completes).
This class provides an abstraction over the different types of ranges over Values.
type_range getTypes() const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
void replaceAllUsesExcept(Value newValue, const SmallPtrSetImpl< Operation * > &exceptions)
Replace all uses of 'this' value with 'newValue', updating anything in the IR that uses 'this' to use...
void replaceAllUsesWith(Value newValue)
Replace all uses of 'this' value with the new value, updating anything in the IR that uses 'this' to ...
user_range getUsers() const
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
static WalkResult advance()
static WalkResult interrupt()
An AffineValueMap is an affine map plus its ML value operands and results for analysis purposes.
LogicalResult canonicalize()
Attempts to canonicalize the map and operands.
unsigned getNumSymbols() const
ArrayRef< Value > getOperands() const
unsigned getNumDims() const
AffineExpr getResult(unsigned i)
AffineMap getAffineMap() const
unsigned getNumResults() const
static void difference(const AffineValueMap &a, const AffineValueMap &b, AffineValueMap *res)
Return the value map that is the difference of value maps 'a' and 'b', represented as an affine map a...
FlatAffineValueConstraints is an extension of FlatLinearValueConstraints with helper functions for Af...
LogicalResult addBound(presburger::BoundType type, unsigned pos, AffineMap boundMap, ValueRange operands)
Adds a bound for the variable at the specified position with constraints being drawn from the specifi...
std::optional< int64_t > getConstantBound64(BoundType type, unsigned pos) const
The same, but casts to int64_t.
unsigned getNumVars() const
unsigned getNumLocalVars() const
std::optional< SmallVector< Value, 8 > > expandAffineMap(OpBuilder &builder, Location loc, AffineMap affineMap, ValueRange operands)
Create a sequence of operations that implement the affineMap applied to the given operands (as it it ...
void fullyComposeAffineMapAndOperands(AffineMap *map, SmallVectorImpl< Value > *operands)
Given an affine map map and its input operands, this method composes into map, maps of AffineApplyOps...
void affineScalarReplace(func::FuncOp f, DominanceInfo &domInfo, PostDominanceInfo &postDomInfo, AliasAnalysis &analysis)
Replace affine store and load accesses by scalars by forwarding stores to loads and eliminate invaria...
LogicalResult promoteIfSingleIteration(AffineForOp forOp)
Promotes the loop body of a AffineForOp to its containing block if the loop was known to have a singl...
bool isValidDim(Value value)
Returns true if the given Value can be used as a dimension id in the region of the closest surroundin...
Value expandAffineExpr(OpBuilder &builder, Location loc, AffineExpr expr, ValueRange dimValues, ValueRange symbolValues)
Emit code that computes the given affine expression using standard arithmetic operations applied to t...
unsigned getNumCommonSurroundingLoops(Operation &a, Operation &b)
Returns the number of surrounding loops common to both A and B.
void normalizeAffineParallel(AffineParallelOp op)
Normalize a affine.parallel op so that lower bounds are 0 and steps are 1.
DependenceResult checkMemrefAccessDependence(const MemRefAccess &srcAccess, const MemRefAccess &dstAccess, unsigned loopDepth, FlatAffineValueConstraints *dependenceConstraints=nullptr, SmallVector< DependenceComponent, 2 > *dependenceComponents=nullptr, bool allowRAR=false)
LogicalResult affineParallelize(AffineForOp forOp, ArrayRef< LoopReduction > parallelReductions={}, AffineParallelOp *resOp=nullptr)
Replaces a parallel affine.for op with a 1-d affine.parallel op.
AffineApplyOp makeComposedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands)
Returns a composed AffineApplyOp by composing map and operands with other AffineApplyOps supplying th...
void canonicalizeMapAndOperands(AffineMap *map, SmallVectorImpl< Value > *operands)
Modifies both map and operands in-place so as to:
void getReachableAffineApplyOps(ArrayRef< Value > operands, SmallVectorImpl< Operation * > &affineApplyOps)
Returns in affineApplyOps, the sequence of those AffineApplyOp Operations that are reachable via a se...
LogicalResult normalizeAffineFor(AffineForOp op, bool promoteSingleIter=false)
Normalize an affine.for op.
Region * getAffineAnalysisScope(Operation *op)
Returns the closest region enclosing op that is held by a non-affine operation; nullptr if there is n...
bool isValidSymbol(Value value)
Returns true if the given value can be used as a symbol in the region of the closest surrounding op t...
OpFoldResult makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands)
Constructs an AffineApplyOp that applies map to operands after composing the map with the maps of any...
bool hasDependence(DependenceResult result)
Utility function that returns true if the provided DependenceResult corresponds to a dependence resul...
MemRefType normalizeMemRefType(MemRefType memrefType)
Normalizes memrefType so that the affine layout map of the memref is transformed to an identity map w...
LogicalResult normalizeMemRef(AllocLikeOp op)
Rewrites the memref defined by this alloc op to have an identity layout map and updates all its index...
FailureOr< SmallVector< Value > > delinearizeIndex(OpBuilder &b, Location loc, Value linearIndex, ArrayRef< Value > basis, bool hasOuterBound=true)
Generate the IR to delinearize linearIndex given the basis and return the multi-index.
OpFoldResult linearizeIndex(ArrayRef< OpFoldResult > multiIndex, ArrayRef< OpFoldResult > basis, ImplicitLocOpBuilder &builder)
DivModValue getDivMod(OpBuilder &b, Location loc, Value lhs, Value rhs)
Create IR to calculate (div lhs, rhs) and (mod lhs, rhs).
bool hasNoInterveningEffect(Operation *start, T memOp, llvm::function_ref< bool(Value, Value)> mayAlias)
Ensure that all operations that could be executed after start (noninclusive) and prior to memOp (e....
void createAffineComputationSlice(Operation *opInst, SmallVectorImpl< AffineApplyOp > *sliceOps)
Given an operation, inserts one or more single result affine apply operations, results of which are e...
LogicalResult hoistAffineIfOp(AffineIfOp ifOp, bool *folded=nullptr)
Hoists out affine.if/else to as high as possible, i.e., past all invariant affine....
bool noDependence(DependenceResult result)
Returns true if the provided DependenceResult corresponds to the absence of a dependence.
AffineExpr substWithMin(AffineExpr e, AffineExpr dim, AffineExpr min, AffineExpr max, bool positivePath=true)
Traverse e and return an AffineExpr where all occurrences of dim have been replaced by either:
LogicalResult replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef, ArrayRef< Value > extraIndices={}, AffineMap indexRemap=AffineMap(), ArrayRef< Value > extraOperands={}, ArrayRef< Value > symbolOperands={}, Operation *domOpFilter=nullptr, Operation *postDomOpFilter=nullptr, bool allowNonDereferencingOps=false, bool replaceInDeallocOp=false)
Replaces all "dereferencing" uses of oldMemRef with newMemRef while optionally remapping the old memr...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Include the generated interface declarations.
AffineMap simplifyAffineMap(AffineMap map)
Simplifies an affine map by simplifying its underlying AffineExpr results.
void bindDims(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to DimExpr at positions: [0 .
std::pair< AffineExpr, SmallVector< OpFoldResult > > computeLinearIndex(OpFoldResult sourceOffset, ArrayRef< OpFoldResult > strides, ArrayRef< OpFoldResult > indices)
Compute linear index from provided strides and indices, assuming strided layout.
LogicalResult applyPatternsGreedily(Region ®ion, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr)
Rewrite ops in the given region, which must be isolated from above, by repeatedly applying the highes...
SmallVector< int64_t > computeStrides(ArrayRef< int64_t > sizes)
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
LogicalResult applyOpPatternsGreedily(ArrayRef< Operation * > ops, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr, bool *allErased=nullptr)
Rewrite the specified ops by repeatedly applying the highest benefit patterns in a greedy worklist dr...
@ CeilDiv
RHS of ceildiv is always a constant or a symbolic expression.
@ Mod
RHS of mod is always a constant or a symbolic expression with a positive value.
@ FloorDiv
RHS of floordiv is always a constant or a symbolic expression.
AffineExpr getAffineBinaryOpExpr(AffineExprKind kind, AffineExpr lhs, AffineExpr rhs)
const FrozenRewritePatternSet & patterns
void bindSymbols(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to SymbolExpr at positions: [0 .
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
AffineExpr getAffineConstantExpr(int64_t constant, MLIRContext *context)
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
@ ExistingOps
Only pre-existing ops are processed.
AffineExpr getAffineDimExpr(unsigned position, MLIRContext *context)
These free functions allow clients of the API to not use classes in detail.
AffineExpr getAffineSymbolExpr(unsigned position, MLIRContext *context)
The following effect indicates that the operation reads from some resource.
This represents an operation in an abstracted form, suitable for use with the builder APIs.
Checks whether two accesses to the same memref access the same element.
Holds the result of (div a, b) and (mod a, b).
A description of a (parallelizable) reduction in an affine loop.
arith::AtomicRMWKind kind
Reduction kind.
Value value
The value being reduced.
Encapsulates a memref load or store access information.
Eliminates variable at the specified position using Fourier-Motzkin variable elimination.