31 #define DEBUG_TYPE "affine-utils"
34 using namespace affine;
35 using namespace presburger;
41 class AffineApplyExpander
48 : builder(builder), dimValues(dimValues), symbolValues(symbolValues),
51 template <
typename OpTy>
53 arith::IntegerOverflowFlags overflowFlags =
54 arith::IntegerOverflowFlags::none) {
59 auto op = OpTy::create(builder, loc, lhs, rhs, overflowFlags);
60 return op.getResult();
64 return buildBinaryExpr<arith::AddIOp>(expr);
68 return buildBinaryExpr<arith::MulIOp>(expr,
69 arith::IntegerOverflowFlags::nsw);
82 if (
auto rhsConst = dyn_cast<AffineConstantExpr>(expr.
getRHS())) {
83 if (rhsConst.getValue() <= 0) {
84 emitError(loc,
"modulo by non-positive value is not supported");
91 assert(lhs && rhs &&
"unexpected affine expr lowering failure");
93 Value remainder = arith::RemSIOp::create(builder, loc, lhs, rhs);
95 Value isRemainderNegative = arith::CmpIOp::create(
96 builder, loc, arith::CmpIPredicate::slt, remainder, zeroCst);
97 Value correctedRemainder =
98 arith::AddIOp::create(builder, loc, remainder, rhs);
99 Value result = arith::SelectOp::create(builder, loc, isRemainderNegative,
100 correctedRemainder, remainder);
122 if (
auto rhsConst = dyn_cast<AffineConstantExpr>(expr.
getRHS())) {
123 if (rhsConst.getValue() <= 0) {
124 emitError(loc,
"division by non-positive value is not supported");
130 assert(lhs && rhs &&
"unexpected affine expr lowering failure");
134 Value negative = arith::CmpIOp::create(
135 builder, loc, arith::CmpIPredicate::slt, lhs, zeroCst);
136 Value negatedDecremented =
137 arith::SubIOp::create(builder, loc, noneCst, lhs);
138 Value dividend = arith::SelectOp::create(builder, loc, negative,
139 negatedDecremented, lhs);
140 Value quotient = arith::DivSIOp::create(builder, loc, dividend, rhs);
141 Value correctedQuotient =
142 arith::SubIOp::create(builder, loc, noneCst, quotient);
143 Value result = arith::SelectOp::create(builder, loc, negative,
144 correctedQuotient, quotient);
162 if (
auto rhsConst = dyn_cast<AffineConstantExpr>(expr.
getRHS())) {
163 if (rhsConst.getValue() <= 0) {
164 emitError(loc,
"division by non-positive value is not supported");
170 assert(lhs && rhs &&
"unexpected affine expr lowering failure");
174 Value nonPositive = arith::CmpIOp::create(
175 builder, loc, arith::CmpIPredicate::sle, lhs, zeroCst);
176 Value negated = arith::SubIOp::create(builder, loc, zeroCst, lhs);
177 Value decremented = arith::SubIOp::create(builder, loc, lhs, oneCst);
178 Value dividend = arith::SelectOp::create(builder, loc, nonPositive, negated,
180 Value quotient = arith::DivSIOp::create(builder, loc, dividend, rhs);
181 Value negatedQuotient =
182 arith::SubIOp::create(builder, loc, zeroCst, quotient);
183 Value incrementedQuotient =
184 arith::AddIOp::create(builder, loc, quotient, oneCst);
185 Value result = arith::SelectOp::create(
186 builder, loc, nonPositive, negatedQuotient, incrementedQuotient);
192 return op.getResult();
197 "affine dim position out of range");
203 "symbol dim position out of range");
222 return AffineApplyExpander(builder, dimValues, symbolValues, loc).visit(expr);
227 std::optional<SmallVector<Value, 8>>
231 auto expanded = llvm::to_vector<8>(
233 [numDims, &builder, loc, operands](
AffineExpr expr) {
234 return expandAffineExpr(builder, loc, expr,
235 operands.take_front(numDims),
236 operands.drop_front(numDims));
238 if (llvm::all_of(expanded, [](
Value v) {
return v; }))
248 assert(ifOp.hasElse() &&
"else block expected");
250 Block *destBlock = ifOp->getBlock();
251 Block *srcBlock = elseBlock ? ifOp.getElseBlock() : ifOp.getThenBlock();
254 std::prev(srcBlock->
end()));
268 if (
auto forOp = dyn_cast<AffineForOp>(parentOp)) {
269 if (llvm::is_contained(ifOperands, forOp.getInductionVar()))
271 }
else if (
auto parallelOp = dyn_cast<AffineParallelOp>(parentOp)) {
272 if (llvm::any_of(parallelOp.getIVs(), [&](
Value iv) {
273 return llvm::is_contained(ifOperands, iv);
276 }
else if (!isa<AffineIfOp>(parentOp)) {
291 if (hoistOverOp == ifOp)
301 auto hoistedIfOp = AffineIfOp::create(b, ifOp.getLoc(), ifOp.getIntegerSet(),
310 StringAttr idForIfOp = b.
getStringAttr(
"__mlir_if_hoisting");
315 hoistOverOpClone = b.
clone(*hoistOverOp, operandMap);
321 auto *thenBlock = hoistedIfOp.getThenBlock();
322 thenBlock->getOperations().splice(thenBlock->begin(),
327 AffineIfOp ifCloneInElse;
328 hoistOverOpClone->
walk([&](AffineIfOp ifClone) {
329 if (!ifClone->getAttr(idForIfOp))
331 ifCloneInElse = ifClone;
334 assert(ifCloneInElse &&
"if op clone should exist");
337 if (!ifCloneInElse.hasElse())
338 ifCloneInElse.erase();
343 auto *elseBlock = hoistedIfOp.getElseBlock();
344 elseBlock->getOperations().splice(
354 AffineParallelOp *resOp) {
356 unsigned numReductions = parallelReductions.size();
357 if (numReductions != forOp.getNumIterOperands())
362 AffineMap lowerBoundMap = forOp.getLowerBoundMap();
363 ValueRange lowerBoundOperands = forOp.getLowerBoundOperands();
364 AffineMap upperBoundMap = forOp.getUpperBoundMap();
365 ValueRange upperBoundOperands = forOp.getUpperBoundOperands();
368 auto reducedValues = llvm::to_vector<4>(llvm::map_range(
370 auto reductionKinds = llvm::to_vector<4>(llvm::map_range(
372 AffineParallelOp newPloop = AffineParallelOp::create(
373 outsideBuilder, loc,
ValueRange(reducedValues).getTypes(), reductionKinds,
378 newPloop.getRegion().takeBody(forOp.getRegion());
379 Operation *yieldOp = &newPloop.getBody()->back();
384 newResults.reserve(numReductions);
385 for (
unsigned i = 0; i < numReductions; ++i) {
386 Value init = forOp.getInits()[i];
391 assert(reductionOp &&
"yielded value is expected to be produced by an op");
395 reductionOp->
setOperands({init, newPloop->getResult(i)});
396 forOp->getResult(i).replaceAllUsesWith(reductionOp->
getResult(0));
405 newPloop.getBody()->eraseArguments(numIVs, numReductions);
417 if (ifOp.getNumResults() != 0)
426 AffineIfOp::getCanonicalizationPatterns(
patterns, ifOp.getContext());
430 ifOp.getOperation(), frozenPatterns,
442 assert(llvm::all_of(ifOp.getOperands(),
444 return isTopLevelValue(v) || isAffineInductionVar(v);
446 "operands not composed");
454 if (hoistedIfOp == ifOp)
471 return positivePath ?
min :
max;
472 if (
auto bin = dyn_cast<AffineBinaryOpExpr>(e)) {
479 auto c1 = dyn_cast<AffineConstantExpr>(bin.getLHS());
480 auto c2 = dyn_cast<AffineConstantExpr>(bin.getRHS());
481 if (c1 && c1.getValue() < 0)
484 if (c2 && c2.getValue() < 0)
496 if (op.hasMinMaxBounds())
499 AffineMap lbMap = op.getLowerBoundsMap();
502 bool isAlreadyNormalized =
503 llvm::all_of(llvm::zip(steps, lbMap.
getResults()), [](
auto tuple) {
504 int64_t step = std::get<0>(tuple);
505 auto lbExpr = dyn_cast<AffineConstantExpr>(std::get<1>(tuple));
506 return lbExpr && lbExpr.getValue() == 0 && step == 1;
508 if (isAlreadyNormalized)
513 op.getLowerBoundsValueMap(), &ranges);
515 auto zeroExpr = builder.getAffineConstantExpr(0);
518 for (
unsigned i = 0, e = steps.size(); i < e; ++i) {
519 int64_t step = steps[i];
522 lbExprs.push_back(zeroExpr);
526 ubExprs.push_back(ubExpr);
532 auto expr = lbExpr + builder.getAffineDimExpr(nDims) * step;
539 OperandRange dimOperands = lbOperands.take_front(nDims);
540 OperandRange symbolOperands = lbOperands.drop_front(nDims);
542 applyOperands.push_back(iv);
543 applyOperands.append(symbolOperands.begin(), symbolOperands.end());
545 AffineApplyOp::create(builder, op.getLoc(), map, applyOperands);
550 op.setSteps(newSteps);
552 0, 0, lbExprs, op.getContext());
553 op.setLowerBounds({}, newLowerMap);
555 ubExprs, op.getContext());
556 op.setUpperBounds(ranges.
getOperands(), newUpperMap);
560 bool promoteSingleIter) {
565 if (op.hasConstantLowerBound() && (op.getConstantLowerBound() == 0) &&
573 if (op.getLowerBoundMap().getNumResults() != 1)
578 int64_t origLoopStep = op.getStepAsInt();
581 AffineMap oldLbMap = op.getLowerBoundMap();
588 op.getLowerBoundMap().getResult(0));
593 AffineValueMap paddedLbValueMap(paddedLbMap, op.getLowerBoundOperands());
594 AffineValueMap ubValueMap(op.getUpperBoundMap(), op.getUpperBoundOperands());
603 for (
unsigned i = 0; i < numResult; ++i)
612 op.setUpperBound(newUbValueMap.
getOperands(), newUbMap);
625 (void)newIvToOldIvMap.canonicalize();
627 AffineApplyOp::create(opBuilder, loc, newIvToOldIvMap.getAffineMap(),
628 newIvToOldIvMap.getOperands());
629 op.getInductionVar().replaceAllUsesExcept(newIV->getResult(0), newIV);
655 unsigned minSurroundingLoops) {
669 for (
unsigned d = nsLoops + 1; d > minSurroundingLoops; d--) {
671 srcAccess, destAccess, d, &dependenceConstraints,
686 template <
typename EffectType,
typename T>
692 bool hasSideEffect =
false;
695 Value memref = memOp.getMemRef();
701 if (
auto memEffect = dyn_cast<MemoryEffectOpInterface>(op)) {
703 memEffect.getEffects(effects);
705 bool opMayHaveEffect =
false;
706 for (
auto effect : effects) {
709 if (isa<EffectType>(effect.getEffect())) {
710 if (effect.getValue() && effect.getValue() != memref &&
711 !
mayAlias(effect.getValue(), memref))
713 opMayHaveEffect =
true;
718 if (!opMayHaveEffect)
723 if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op)) {
730 unsigned minSurroundingLoops =
733 hasSideEffect =
true;
739 hasSideEffect =
true;
746 for (
Region ®ion : op->getRegions())
747 for (
Block &block : region)
755 hasSideEffect =
true;
766 checkOperation(parent);
775 "Checking for side effect between two operations without a common "
783 until(untilOp->getParentOp(), untilOp);
795 for (
auto iter = ++from->getIterator(), end = from->
getBlock()->
end();
796 iter != end && &*iter != untilOp; ++iter) {
797 checkOperation(&*iter);
802 if (untilOp->getBlock() != from->
getBlock())
804 todoBlocks.push_back(succ);
809 while (!todoBlocks.empty()) {
810 Block *blk = todoBlocks.pop_back_val();
814 for (
auto &op : *blk) {
820 todoBlocks.push_back(succ);
825 return !hasSideEffect;
844 for (
auto *user : loadOp.getMemRef().getUsers()) {
845 auto storeOp = dyn_cast<AffineWriteOpInterface>(user);
859 if (srcAccess != destAccess)
874 if (!affine::hasNoInterveningEffect<MemoryEffects::Write>(storeOp, loadOp,
879 assert(lastWriteStoreOp ==
nullptr &&
880 "multiple simultaneous replacement stores");
881 lastWriteStoreOp = storeOp;
884 if (!lastWriteStoreOp)
889 cast<AffineWriteOpInterface>(lastWriteStoreOp).getValueToStore();
892 if (storeVal.
getType() != loadOp.getValue().getType())
896 memrefsToErase.insert(loadOp.getMemRef());
898 loadOpsToErase.push_back(loadOp);
903 affine::AffineReadOpInterface>(
919 auto writeB = dyn_cast<AffineWriteOpInterface>(user);
924 if (writeB == writeA)
928 if (writeB->getParentRegion() != writeA->getParentRegion())
935 if (srcAccess != destAccess)
944 if (!affine::hasNoInterveningEffect<MemoryEffects::Read>(writeA, writeB,
948 opsToErase.push_back(writeA);
959 static void loadCSE(AffineReadOpInterface loadA,
964 for (
auto *user : loadA.getMemRef().getUsers()) {
965 auto loadB = dyn_cast<AffineReadOpInterface>(user);
966 if (!loadB || loadB == loadA)
973 if (srcAccess != destAccess) {
982 if (!affine::hasNoInterveningEffect<MemoryEffects::Write>(
983 loadB.getOperation(), loadA,
mayAlias))
988 if (loadB.getValue().getType() != loadA.getValue().getType())
991 loadCandidates.push_back(loadB);
997 for (AffineReadOpInterface option : loadCandidates) {
998 if (llvm::all_of(loadCandidates, [&](AffineReadOpInterface depStore) {
999 return depStore == option ||
1000 domInfo.
dominates(option.getOperation(),
1001 depStore.getOperation());
1003 loadB = option.getValue();
1011 loadOpsToErase.push_back(loadA);
1050 return !aliasAnalysis.
alias(val1, val2).
isNo();
1054 f.walk([&](AffineReadOpInterface loadOp) {
1057 for (
auto *op : opsToErase)
1062 f.walk([&](AffineWriteOpInterface storeOp) {
1065 for (
auto *op : opsToErase)
1073 for (
auto memref : memrefsToErase) {
1075 Operation *defOp = memref.getDefiningOp();
1076 if (!defOp || !hasSingleEffect<MemoryEffects::Allocate>(defOp, memref))
1080 if (llvm::any_of(memref.getUsers(), [&](
Operation *ownerOp) {
1081 return !isa<AffineWriteOpInterface>(ownerOp) &&
1082 !hasSingleEffect<MemoryEffects::Free>(ownerOp, memref);
1087 for (
auto *user : llvm::make_early_inc_range(memref.getUsers()))
1095 f.walk([&](AffineReadOpInterface loadOp) {
1098 for (
auto *op : opsToErase)
1105 return isa<AffineMapAccessInterface, memref::LoadOp, memref::StoreOp>(op);
1113 bool allowNonDereferencingOps) {
1114 unsigned newMemRefRank = cast<MemRefType>(newMemRef.
getType()).getRank();
1115 (void)newMemRefRank;
1116 unsigned oldMemRefRank = cast<MemRefType>(oldMemRef.
getType()).getRank();
1117 (void)oldMemRefRank;
1119 assert(indexRemap.
getNumSymbols() == symbolOperands.size() &&
1120 "symbolic operand count mismatch");
1122 extraOperands.size() + oldMemRefRank + symbolOperands.size());
1123 assert(indexRemap.
getNumResults() + extraIndices.size() == newMemRefRank);
1125 assert(oldMemRefRank + extraIndices.size() == newMemRefRank);
1129 assert(cast<MemRefType>(oldMemRef.
getType()).getElementType() ==
1130 cast<MemRefType>(newMemRef.
getType()).getElementType());
1134 if (opEntry.value() == oldMemRef)
1135 usePositions.push_back(opEntry.index());
1139 if (usePositions.empty())
1142 unsigned memRefOperandPos = usePositions.front();
1148 if (!allowNonDereferencingOps) {
1154 for (
unsigned pos : usePositions)
1159 if (usePositions.size() > 1) {
1161 LLVM_DEBUG(llvm::dbgs()
1162 <<
"multiple dereferencing uses in a single op not supported");
1169 unsigned oldMemRefNumIndices = oldMemRefRank;
1171 auto affMapAccInterface = dyn_cast<AffineMapAccessInterface>(op);
1172 if (affMapAccInterface) {
1177 affMapAccInterface.getAffineMapAttrForMemRef(oldMemRef);
1178 oldMap = cast<AffineMapAttr>(oldMapAttrPair.
getValue()).getValue();
1181 oldMapOperands.assign(startIdx, startIdx + oldMemRefNumIndices);
1186 oldMemRefOperands.reserve(oldMemRefRank);
1187 if (affMapAccInterface &&
1189 for (
auto resultExpr : oldMap.
getResults()) {
1192 auto afOp = AffineApplyOp::create(builder, op->
getLoc(), singleResMap,
1194 oldMemRefOperands.push_back(afOp);
1195 affineApplyOps.push_back(afOp);
1198 oldMemRefOperands.assign(oldMapOperands.begin(), oldMapOperands.end());
1205 remapOperands.reserve(extraOperands.size() + oldMemRefRank +
1206 symbolOperands.size());
1207 remapOperands.append(extraOperands.begin(), extraOperands.end());
1208 remapOperands.append(oldMemRefOperands.begin(), oldMemRefOperands.end());
1209 remapOperands.append(symbolOperands.begin(), symbolOperands.end());
1212 remapOutputs.reserve(oldMemRefRank);
1216 for (
auto resultExpr : indexRemap.
getResults()) {
1219 auto afOp = AffineApplyOp::create(builder, op->
getLoc(), singleResMap,
1221 remapOutputs.push_back(afOp);
1222 affineApplyOps.push_back(afOp);
1226 remapOutputs.assign(remapOperands.begin(), remapOperands.end());
1229 newMapOperands.reserve(newMemRefRank);
1232 for (
Value extraIndex : extraIndices) {
1234 "invalid memory op index");
1235 newMapOperands.push_back(extraIndex);
1239 newMapOperands.append(remapOutputs.begin(), remapOutputs.end());
1242 assert(newMapOperands.size() == newMemRefRank);
1248 for (
Value value : affineApplyOps)
1249 if (value.use_empty())
1250 value.getDefiningOp()->erase();
1254 state.operands.reserve(op->
getNumOperands() + extraIndices.size());
1259 state.operands.push_back(newMemRef);
1262 if (affMapAccInterface) {
1263 state.operands.append(newMapOperands.begin(), newMapOperands.end());
1268 for (
unsigned i = 0; i < newMemRefRank; i++) {
1269 state.operands.push_back(AffineApplyOp::create(
1272 newMap.getResult(i)),
1278 unsigned oldMapNumInputs = oldMapOperands.size();
1279 state.operands.append(op->
operand_begin() + memRefOperandPos + 1 +
1285 state.types.push_back(result.getType());
1289 for (
auto namedAttr : op->
getAttrs()) {
1290 if (affMapAccInterface &&
1291 namedAttr.getName() ==
1292 affMapAccInterface.getAffineMapAttrForMemRef(oldMemRef).getName())
1293 state.attributes.push_back({namedAttr.getName(), newMapAttr});
1295 state.attributes.push_back(namedAttr);
1299 auto *repOp = builder.
create(state);
1311 bool allowNonDereferencingOps,
bool replaceInDeallocOp) {
1312 unsigned newMemRefRank = cast<MemRefType>(newMemRef.
getType()).getRank();
1313 (void)newMemRefRank;
1314 unsigned oldMemRefRank = cast<MemRefType>(oldMemRef.
getType()).getRank();
1315 (void)oldMemRefRank;
1317 assert(indexRemap.
getNumSymbols() == symbolOperands.size() &&
1318 "symbol operand count mismatch");
1320 extraOperands.size() + oldMemRefRank + symbolOperands.size());
1321 assert(indexRemap.
getNumResults() + extraIndices.size() == newMemRefRank);
1323 assert(oldMemRefRank + extraIndices.size() == newMemRefRank);
1327 assert(cast<MemRefType>(oldMemRef.
getType()).getElementType() ==
1328 cast<MemRefType>(newMemRef.
getType()).getElementType());
1330 std::unique_ptr<DominanceInfo> domInfo;
1331 std::unique_ptr<PostDominanceInfo> postDomInfo;
1337 for (
auto *user : oldMemRef.
getUsers()) {
1339 if (userFilterFn && !userFilterFn(user))
1344 if (hasSingleEffect<MemoryEffects::Free>(user, oldMemRef) &&
1345 !replaceInDeallocOp)
1351 if (!isa<AffineMapAccessInterface>(*user)) {
1352 if (!allowNonDereferencingOps) {
1355 <<
"Memref replacement failed: non-deferencing memref user: \n"
1362 LLVM_DEBUG(llvm::dbgs() <<
"Memref replacement failed: use without a "
1363 "memrefs normalizable trait: \n"
1372 opsToReplace.insert(user);
1375 for (
auto *user : opsToReplace) {
1377 oldMemRef, newMemRef, user, extraIndices, indexRemap, extraOperands,
1378 symbolOperands, allowNonDereferencingOps)))
1379 llvm_unreachable(
"memref replacement guaranteed to succeed here");
1419 if (isa_and_nonnull<AffineApplyOp>(operand.getDefiningOp()))
1420 subOperands.push_back(operand);
1426 if (affineApplyOps.empty())
1431 bool localized =
true;
1432 for (
auto *op : affineApplyOps) {
1433 for (
auto result : op->getResults()) {
1434 for (
auto *user : result.getUsers()) {
1435 if (user != opInst) {
1451 sliceOps->reserve(composedMap.getNumResults());
1452 for (
auto resultExpr : composedMap.getResults()) {
1454 composedMap.getNumSymbols(), resultExpr);
1455 sliceOps->push_back(AffineApplyOp::create(
1456 builder, opInst->
getLoc(), singleResMap, composedOpOperands));
1464 for (
Value &operand : newOperands) {
1467 for (
j = 0, f = subOperands.size();
j < f;
j++) {
1468 if (operand == subOperands[
j])
1471 if (
j < subOperands.size())
1472 operand = (*sliceOps)[
j];
1474 for (
unsigned idx = 0, e = newOperands.size(); idx < e; idx++)
1497 SmallVectorImpl<std::tuple<AffineExpr, unsigned, unsigned>> &tileSizePos) {
1508 if (isa<AffineConstantExpr>(binaryExpr.
getRHS()))
1509 floordivExprs.emplace_back(
1510 std::make_tuple(binaryExpr.
getLHS(), binaryExpr.
getRHS(), pos));
1515 if (floordivExprs.empty()) {
1522 for (std::tuple<AffineExpr, AffineExpr, unsigned> fexpr : floordivExprs) {
1523 AffineExpr floordivExprLHS = std::get<0>(fexpr);
1524 AffineExpr floordivExprRHS = std::get<1>(fexpr);
1525 unsigned floordivPos = std::get<2>(fexpr);
1537 bool notTiled =
false;
1538 if (pos != floordivPos) {
1540 if (e == floordivExprLHS) {
1542 AffineBinaryOpExpr binaryExpr = cast<AffineBinaryOpExpr>(expr);
1544 if (floordivExprLHS == binaryExpr.getLHS() &&
1545 floordivExprRHS == binaryExpr.getRHS()) {
1549 tileSizePos.emplace_back(
1550 std::make_tuple(binaryExpr.getRHS(), floordivPos, pos));
1602 if (isa<AffineDimExpr>(e) &&
1603 llvm::any_of(inMemrefTypeDynDims, [&](
unsigned dim) {
1622 binaryExpr = cast<AffineBinaryOpExpr>(oldMapOutput);
1623 newMapOutput = binaryExpr.
getRHS();
1626 binaryExpr = cast<AffineBinaryOpExpr>(oldMapOutput);
1631 newMapOutput = oldMapOutput;
1633 return newMapOutput;
1668 template <
typename AllocLikeOp>
1670 MemRefType newMemRefType,
AffineMap map,
1676 unsigned dynIdx = 0;
1677 for (
unsigned d = 0; d < oldMemRefType.getRank(); ++d) {
1678 if (oldMemRefShape[d] < 0) {
1680 inAffineApply.emplace_back(allocOp.getDynamicSizes()[dynIdx]);
1685 inAffineApply.emplace_back(
1686 arith::ConstantOp::create(b, allocOp.getLoc(), constantAttr));
1692 unsigned newDimIdx = 0;
1697 if (newMemRefShape[newDimIdx] < 0) {
1700 for (
auto pos : tileSizePos) {
1701 if (newDimIdx == std::get<1>(pos))
1703 else if (newDimIdx == std::get<2>(pos))
1710 AffineApplyOp::create(b, allocOp.getLoc(), newMap, inAffineApply);
1711 newDynamicSizes.emplace_back(affineApp);
1717 template <
typename AllocLikeOp>
1719 MemRefType memrefType = allocOp.getType();
1725 if (newMemRefType == memrefType)
1730 Value oldMemRef = allocOp.getResult();
1733 AffineMap layoutMap = memrefType.getLayout().getAffineMap();
1734 AllocLikeOp newAlloc;
1739 if (newMemRefType.getNumDynamicDims() > 0 && !tileSizePos.empty()) {
1740 auto oldMemRefType = cast<MemRefType>(oldMemRef.
getType());
1745 newAlloc = AllocLikeOp::create(b, allocOp.getLoc(), newMemRefType,
1746 newDynamicSizes, allocOp.getAlignmentAttr());
1748 newAlloc = AllocLikeOp::create(b, allocOp.getLoc(), newMemRefType,
1749 allocOp.getAlignmentAttr());
1766 return hasSingleEffect<MemoryEffects::Free>(op, oldMemRef);
1775 MemRefType memrefType = reinterpretCastOp.getType();
1776 AffineMap oldLayoutMap = memrefType.getLayout().getAffineMap();
1777 Value oldMemRef = reinterpretCastOp.getResult();
1786 if (newMemRefType == memrefType)
1790 uint64_t newRank = newMemRefType.getRank();
1794 Location loc = reinterpretCastOp.getLoc();
1799 ValueRange oldSizes = reinterpretCastOp.getSizes();
1804 for (
unsigned i = 0, e = memrefType.getRank(); i < e; i++) {
1805 if (memrefType.isDynamicDim(i))
1807 arith::SubIOp::create(b, loc, oldSizes[0].
getType(), oldSizes[idx++],
1812 for (
unsigned i = 0, e = oldStrides.size(); i < e; i++)
1813 mapOperands[memrefType.getRank() + i] = oldStrides[i];
1817 for (
unsigned i = 0; i < newRank; i++) {
1818 if (!newMemRefType.isDynamicDim(i))
1820 newSizes.push_back(AffineApplyOp::create(
1826 for (
unsigned i = 0, e = newSizes.size(); i < e; i++) {
1828 arith::AddIOp::create(b, loc, newSizes[i].
getType(), newSizes[i],
1832 auto newReinterpretCast = memref::ReinterpretCastOp::create(
1833 b, loc, newMemRefType, reinterpretCastOp.getSource(),
1850 newReinterpretCast.erase();
1855 reinterpretCastOp.erase();
1859 template LogicalResult
1860 mlir::affine::normalizeMemRef<memref::AllocaOp>(memref::AllocaOp op);
1861 template LogicalResult
1862 mlir::affine::normalizeMemRef<memref::AllocOp>(memref::AllocOp op);
1865 unsigned rank = memrefType.getRank();
1869 if (memrefType.getLayout().isIdentity()) {
1874 AffineMap layoutMap = memrefType.getLayout().getAffineMap();
1885 if (memrefType.getNumDynamicDims() > 0 && tileSizePos.empty())
1894 for (
unsigned d = 0; d < rank; ++d) {
1898 fac.
addBound(BoundType::UB, d, shape[d] - 1);
1900 memrefTypeDynDims.emplace_back(d);
1913 for (
unsigned d = 0; d < newRank; ++d) {
1916 newShape[d] = ShapedType::kDynamic;
1925 if (!ubConst.has_value() || *ubConst < 0) {
1926 LLVM_DEBUG(llvm::dbgs()
1927 <<
"can't normalize map due to unknown/invalid upper bound");
1931 newShape[d] = *ubConst + 1;
1935 auto newMemRefType =
1940 return newMemRefType;
1966 FailureOr<SmallVector<Value>>
1970 basis = basis.drop_front();
1976 FailureOr<OpFoldResult> nextProd =
1980 basisProd = *nextProd;
1985 results.reserve(divisors.size() + 1);
1986 Value residual = linearIndex;
1987 for (
Value divisor : llvm::reverse(divisors)) {
1989 results.push_back(divMod.
quotient);
1992 results.push_back(residual);
1996 FailureOr<SmallVector<Value>>
1999 bool hasOuterBound) {
2001 basis = basis.drop_front();
2007 FailureOr<OpFoldResult> nextProd =
2011 basisProd = *nextProd;
2016 results.reserve(divisors.size() + 1);
2017 Value residual = linearIndex;
2018 for (
Value divisor : llvm::reverse(divisors)) {
2020 results.push_back(divMod.
quotient);
2023 results.push_back(residual);
2036 assert(multiIndex.size() == basis.size() ||
2037 multiIndex.size() == basis.size() + 1);
2042 if (multiIndex.size() == basis.size() + 1)
2045 for (
size_t i = 0; i < basis.size(); ++i) {
2051 strides.reserve(stridesAffine.size());
2052 llvm::transform(stridesAffine, std::back_inserter(strides),
2053 [&builder, &basis, loc](
AffineExpr strideExpr) {
2055 builder, loc, strideExpr, basis);
2061 multiIndexAndStrides);
static bool mayHaveEffect(Operation *srcMemOp, Operation *destMemOp, unsigned minSurroundingLoops)
Returns true if srcMemOp may have an effect on destMemOp within the scope of the outermost minSurroun...
static void createNewDynamicSizes(MemRefType oldMemRefType, MemRefType newMemRefType, AffineMap map, AllocLikeOp allocOp, OpBuilder b, SmallVectorImpl< Value > &newDynamicSizes)
Create new maps to calculate each dimension size of newMemRefType, and create newDynamicSizes from th...
static bool isDereferencingOp(Operation *op)
static LogicalResult getTileSizePos(AffineMap map, SmallVectorImpl< std::tuple< AffineExpr, unsigned, unsigned >> &tileSizePos)
Check if map is a tiled layout.
TileExprPattern
Enum to set patterns of affine expr in tiled-layout map.
static void promoteIfBlock(AffineIfOp ifOp, bool elseBlock)
Promotes the then or the else block of ifOp (depending on whether elseBlock is false or true) into if...
static bool isNormalizedMemRefDynamicDim(unsigned dim, AffineMap layoutMap, SmallVectorImpl< unsigned > &inMemrefTypeDynDims)
Check if dim dimension of memrefType with layoutMap becomes dynamic after normalization.
static FailureOr< OpFoldResult > composedAffineMultiply(OpBuilder &b, Location loc, OpFoldResult lhs, OpFoldResult rhs)
Create an affine map that computes lhs * rhs, composing in any other affine maps.
static void loadCSE(AffineReadOpInterface loadA, SmallVectorImpl< Operation * > &loadOpsToErase, DominanceInfo &domInfo, llvm::function_ref< bool(Value, Value)> mayAlias)
static AffineExpr createDimSizeExprForTiledLayout(AffineExpr oldMapOutput, TileExprPattern pat)
Create affine expr to calculate dimension size for a tiled-layout map.
static Operation * getOutermostInvariantForOp(AffineIfOp ifOp)
Returns the outermost affine.for/parallel op that the ifOp is invariant on.
static void findUnusedStore(AffineWriteOpInterface writeA, SmallVectorImpl< Operation * > &opsToErase, PostDominanceInfo &postDominanceInfo, llvm::function_ref< bool(Value, Value)> mayAlias)
static bool mustReachAtInnermost(const MemRefAccess &srcAccess, const MemRefAccess &destAccess)
Returns true if the memory operation of destAccess depends on srcAccess inside of the innermost commo...
static void forwardStoreToLoad(AffineReadOpInterface loadOp, SmallVectorImpl< Operation * > &loadOpsToErase, SmallPtrSetImpl< Value > &memrefsToErase, DominanceInfo &domInfo, llvm::function_ref< bool(Value, Value)> mayAlias)
Attempt to eliminate loadOp by replacing it with a value stored into memory which the load is guarant...
static void visit(Operation *op, DenseSet< Operation * > &visited)
Visits all the pdl.operand(s), pdl.result(s), and pdl.operation(s) connected to the given operation.
static bool mayAlias(Value first, Value second)
Returns true if two values may be referencing aliasing memory.
static Value max(ImplicitLocOpBuilder &builder, Value value, Value bound)
static Value min(ImplicitLocOpBuilder &builder, Value value, Value bound)
Affine binary operation expression.
AffineExpr getLHS() const
AffineExpr getRHS() const
An integer constant appearing in affine expression.
A dimensional identifier appearing in an affine expression.
unsigned getPosition() const
See documentation for AffineExprVisitorBase.
Base type for affine expression.
AffineExpr floorDiv(uint64_t v) const
RetT walk(FnT &&callback) const
Walk all of the AffineExpr's in this expression in postorder.
AffineExpr ceilDiv(uint64_t v) const
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
MLIRContext * getContext() const
static AffineMap getMultiDimIdentityMap(unsigned numDims, MLIRContext *context)
Returns an AffineMap with 'numDims' identity result dim exprs.
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
unsigned getNumSymbols() const
unsigned getNumDims() const
ArrayRef< AffineExpr > getResults() const
unsigned getNumResults() const
unsigned getNumInputs() const
AffineExpr getResult(unsigned idx) const
AffineMap compose(AffineMap map) const
Returns the AffineMap resulting from composing this with map.
bool isIdentity() const
Returns true if this affine map is an identity affine map.
A symbolic identifier appearing in an affine expression.
unsigned getPosition() const
This class represents the main alias analysis interface in MLIR.
AliasResult alias(Value lhs, Value rhs)
Given two values, return their aliasing behavior.
bool isNo() const
Returns if this result indicates no possibility of aliasing.
This class represents an argument of a Block.
Block represents an ordered list of Operations.
OpListType::iterator iterator
SuccessorRange getSuccessors()
Operation * getTerminator()
Get the terminator operation of this block.
OpListType & getOperations()
IntegerAttr getIndexAttr(int64_t value)
IntegerAttr getIntegerAttr(Type type, int64_t value)
AffineMap getMultiDimIdentityMap(unsigned rank)
BoolAttr getBoolAttr(bool value)
StringAttr getStringAttr(const Twine &bytes)
AffineExpr getAffineDimExpr(unsigned position)
AffineMap getConstantAffineMap(int64_t val)
Returns a single constant result affine map with 0 dimensions and 0 symbols.
MLIRContext * getContext() const
A class for computing basic dominance information.
bool dominates(Operation *a, Operation *b) const
Return true if operation A dominates operation B, i.e.
LogicalResult composeMatchingMap(AffineMap other)
Composes an affine map whose dimensions and symbols match one to one with the dimensions and symbols ...
void projectOut(Value val)
Projects out the variable that is associate with Value.
This class represents a frozen set of patterns that can be processed by a pattern applicator.
This class allows control over how the GreedyPatternRewriteDriver works.
GreedyRewriteConfig & setStrictness(GreedyRewriteStrictness mode)
This is a utility class for mapping one set of IR entities to another.
void clear()
Clears all mappings held by the mapper.
ImplicitLocOpBuilder maintains a 'current location', allowing use of the create<> method without spec...
Location getLoc() const
Accessors for the implied location.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
This is a builder type that keeps local references to arguments.
Builder & setLayout(MemRefLayoutAttrInterface newLayout)
Builder & setShape(ArrayRef< int64_t > newShape)
NamedAttribute represents a combination of a name and an Attribute value.
Attribute getValue() const
Return the value of the attribute.
This class helps build Operations.
static OpBuilder atBlockBegin(Block *block, Listener *listener=nullptr)
Create a builder and set the insertion point to before the first operation in the block but still ins...
Block::iterator getInsertionPoint() const
Returns the current insertion point of the builder.
Operation * clone(Operation &op, IRMapping &mapper)
Creates a deep copy of the specified operation, remapping any operands that use values outside of the...
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Block * getInsertionBlock() const
Return the block the current insertion point belongs to.
This class represents a single result from folding an operation.
This trait indicates that the memory effects of an operation includes the effects of operations neste...
This class provides the API for ops that are known to be isolated from above.
This class implements the operand iterators for the Operation class.
Operation is the basic unit of execution within MLIR.
Value getOperand(unsigned idx)
bool hasTrait()
Returns true if the operation was registered with a particular trait, e.g.
void setOperand(unsigned idx, Value value)
operand_iterator operand_begin()
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
std::enable_if_t< llvm::function_traits< std::decay_t< FnT > >::num_args==1, RetT > walk(FnT &&callback)
Walk the operation by calling the callback for each nested operation (including this one),...
Location getLoc()
The source location the operation was defined or derived from.
unsigned getNumOperands()
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
ArrayRef< NamedAttribute > getAttrs()
Return all of the attributes on this operation.
Block * getBlock()
Returns the operation block that contains this operation.
operand_iterator operand_end()
OperationName getName()
The name of an operation is the key identifier for it.
operand_range getOperands()
Returns an iterator on the underlying Value's.
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
void replaceAllUsesWith(ValuesT &&values)
Replace all uses of results of this operation with the provided 'values'.
void setOperands(ValueRange operands)
Replace the current operands of this operation with the ones provided in 'operands'.
user_range getUsers()
Returns a range of all users.
Region * getParentRegion()
Returns the region to which the instruction belongs.
result_range getResults()
void erase()
Remove this operation from its parent block and delete it.
unsigned getNumResults()
Return the number of results held by this operation.
A class for computing basic postdominance information.
bool postDominates(Operation *a, Operation *b) const
Return true if operation A postdominates operation B.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
bool isAncestor(Region *other)
Return true if this region is ancestor of the other region.
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
void replaceAllUsesExcept(Value newValue, const SmallPtrSetImpl< Operation * > &exceptions)
Replace all uses of 'this' value with 'newValue', updating anything in the IR that uses 'this' to use...
void replaceAllUsesWith(Value newValue)
Replace all uses of 'this' value with the new value, updating anything in the IR that uses 'this' to ...
user_range getUsers() const
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
static WalkResult advance()
static WalkResult interrupt()
An AffineValueMap is an affine map plus its ML value operands and results for analysis purposes.
LogicalResult canonicalize()
Attempts to canonicalize the map and operands.
unsigned getNumSymbols() const
ArrayRef< Value > getOperands() const
unsigned getNumDims() const
AffineExpr getResult(unsigned i)
AffineMap getAffineMap() const
unsigned getNumResults() const
static void difference(const AffineValueMap &a, const AffineValueMap &b, AffineValueMap *res)
Return the value map that is the difference of value maps 'a' and 'b', represented as an affine map a...
FlatAffineValueConstraints is an extension of FlatLinearValueConstraints with helper functions for Af...
LogicalResult addBound(presburger::BoundType type, unsigned pos, AffineMap boundMap, ValueRange operands)
Adds a bound for the variable at the specified position with constraints being drawn from the specifi...
static ConstantIndexOp create(OpBuilder &builder, Location location, int64_t value)
std::optional< int64_t > getConstantBound64(BoundType type, unsigned pos) const
The same, but casts to int64_t.
unsigned getNumVars() const
unsigned getNumLocalVars() const
std::optional< SmallVector< Value, 8 > > expandAffineMap(OpBuilder &builder, Location loc, AffineMap affineMap, ValueRange operands)
Create a sequence of operations that implement the affineMap applied to the given operands (as it it ...
AffineApplyOp makeComposedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands, bool composeAffineMin=false)
Returns a composed AffineApplyOp by composing map and operands with other AffineApplyOps supplying th...
void affineScalarReplace(func::FuncOp f, DominanceInfo &domInfo, PostDominanceInfo &postDomInfo, AliasAnalysis &analysis)
Replace affine store and load accesses by scalars by forwarding stores to loads and eliminate invaria...
LogicalResult promoteIfSingleIteration(AffineForOp forOp)
Promotes the loop body of a AffineForOp to its containing block if the loop was known to have a singl...
bool isValidDim(Value value)
Returns true if the given Value can be used as a dimension id in the region of the closest surroundin...
Value expandAffineExpr(OpBuilder &builder, Location loc, AffineExpr expr, ValueRange dimValues, ValueRange symbolValues)
Emit code that computes the given affine expression using standard arithmetic operations applied to t...
unsigned getNumCommonSurroundingLoops(Operation &a, Operation &b)
Returns the number of surrounding loops common to both A and B.
void normalizeAffineParallel(AffineParallelOp op)
Normalize a affine.parallel op so that lower bounds are 0 and steps are 1.
DependenceResult checkMemrefAccessDependence(const MemRefAccess &srcAccess, const MemRefAccess &dstAccess, unsigned loopDepth, FlatAffineValueConstraints *dependenceConstraints=nullptr, SmallVector< DependenceComponent, 2 > *dependenceComponents=nullptr, bool allowRAR=false)
LogicalResult affineParallelize(AffineForOp forOp, ArrayRef< LoopReduction > parallelReductions={}, AffineParallelOp *resOp=nullptr)
Replaces a parallel affine.for op with a 1-d affine.parallel op.
void canonicalizeMapAndOperands(AffineMap *map, SmallVectorImpl< Value > *operands)
Modifies both map and operands in-place so as to:
void getReachableAffineApplyOps(ArrayRef< Value > operands, SmallVectorImpl< Operation * > &affineApplyOps)
Returns in affineApplyOps, the sequence of those AffineApplyOp Operations that are reachable via a se...
OpFoldResult makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands, bool composeAffineMin=false)
Constructs an AffineApplyOp that applies map to operands after composing the map with the maps of any...
LogicalResult normalizeAffineFor(AffineForOp op, bool promoteSingleIter=false)
Normalize an affine.for op.
Region * getAffineAnalysisScope(Operation *op)
Returns the closest region enclosing op that is held by a non-affine operation; nullptr if there is n...
void fullyComposeAffineMapAndOperands(AffineMap *map, SmallVectorImpl< Value > *operands, bool composeAffineMin=false)
Given an affine map map and its input operands, this method composes into map, maps of AffineApplyOps...
bool isValidSymbol(Value value)
Returns true if the given value can be used as a symbol in the region of the closest surrounding op t...
bool hasDependence(DependenceResult result)
Utility function that returns true if the provided DependenceResult corresponds to a dependence resul...
MemRefType normalizeMemRefType(MemRefType memrefType)
Normalizes memrefType so that the affine layout map of the memref is transformed to an identity map w...
LogicalResult normalizeMemRef(AllocLikeOp op)
Rewrites the memref defined by alloc or reinterpret_cast op to have an identity layout map and update...
FailureOr< SmallVector< Value > > delinearizeIndex(OpBuilder &b, Location loc, Value linearIndex, ArrayRef< Value > basis, bool hasOuterBound=true)
Generate the IR to delinearize linearIndex given the basis and return the multi-index.
OpFoldResult linearizeIndex(ArrayRef< OpFoldResult > multiIndex, ArrayRef< OpFoldResult > basis, ImplicitLocOpBuilder &builder)
DivModValue getDivMod(OpBuilder &b, Location loc, Value lhs, Value rhs)
Create IR to calculate (div lhs, rhs) and (mod lhs, rhs).
bool hasNoInterveningEffect(Operation *start, T memOp, llvm::function_ref< bool(Value, Value)> mayAlias)
Ensure that all operations that could be executed after start (noninclusive) and prior to memOp (e....
void createAffineComputationSlice(Operation *opInst, SmallVectorImpl< AffineApplyOp > *sliceOps)
Given an operation, inserts one or more single result affine apply operations, results of which are e...
LogicalResult hoistAffineIfOp(AffineIfOp ifOp, bool *folded=nullptr)
Hoists out affine.if/else to as high as possible, i.e., past all invariant affine....
bool noDependence(DependenceResult result)
Returns true if the provided DependenceResult corresponds to the absence of a dependence.
LogicalResult replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef, ArrayRef< Value > extraIndices={}, AffineMap indexRemap=AffineMap(), ArrayRef< Value > extraOperands={}, ArrayRef< Value > symbolOperands={}, llvm::function_ref< bool(Operation *)> userFilterFn=nullptr, bool allowNonDereferencingOps=false, bool replaceInDeallocOp=false)
Replaces all "dereferencing" uses of oldMemRef with newMemRef while optionally remapping the old memr...
AffineExpr substWithMin(AffineExpr e, AffineExpr dim, AffineExpr min, AffineExpr max, bool positivePath=true)
Traverse e and return an AffineExpr where all occurrences of dim have been replaced by either:
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Include the generated interface declarations.
AffineMap simplifyAffineMap(AffineMap map)
Simplifies an affine map by simplifying its underlying AffineExpr results.
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
void bindDims(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to DimExpr at positions: [0 .
std::pair< AffineExpr, SmallVector< OpFoldResult > > computeLinearIndex(OpFoldResult sourceOffset, ArrayRef< OpFoldResult > strides, ArrayRef< OpFoldResult > indices)
Compute linear index from provided strides and indices, assuming strided layout.
LogicalResult applyPatternsGreedily(Region ®ion, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr)
Rewrite ops in the given region, which must be isolated from above, by repeatedly applying the highes...
SmallVector< int64_t > computeStrides(ArrayRef< int64_t > sizes)
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
LogicalResult applyOpPatternsGreedily(ArrayRef< Operation * > ops, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr, bool *allErased=nullptr)
Rewrite the specified ops by repeatedly applying the highest benefit patterns in a greedy worklist dr...
@ CeilDiv
RHS of ceildiv is always a constant or a symbolic expression.
@ Mod
RHS of mod is always a constant or a symbolic expression with a positive value.
@ FloorDiv
RHS of floordiv is always a constant or a symbolic expression.
AffineExpr getAffineBinaryOpExpr(AffineExprKind kind, AffineExpr lhs, AffineExpr rhs)
const FrozenRewritePatternSet & patterns
void bindSymbols(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to SymbolExpr at positions: [0 .
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
AffineExpr getAffineConstantExpr(int64_t constant, MLIRContext *context)
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
@ ExistingOps
Only pre-existing ops are processed.
AffineExpr getAffineDimExpr(unsigned position, MLIRContext *context)
These free functions allow clients of the API to not use classes in detail.
AffineExpr getAffineSymbolExpr(unsigned position, MLIRContext *context)
The following effect indicates that the operation reads from some resource.
This represents an operation in an abstracted form, suitable for use with the builder APIs.
Checks whether two accesses to the same memref access the same element.
Holds the result of (div a, b) and (mod a, b).
A description of a (parallelizable) reduction in an affine loop.
arith::AtomicRMWKind kind
Reduction kind.
Value value
The value being reduced.
Encapsulates a memref load or store access information.
Eliminates variable at the specified position using Fourier-Motzkin variable elimination.