28 #define DEBUG_TYPE "affine-utils" 31 using namespace presburger;
37 class AffineApplyExpander
44 : builder(builder), dimValues(dimValues), symbolValues(symbolValues),
47 template <
typename OpTy>
53 auto op = builder.
create<OpTy>(loc, lhs, rhs);
58 return buildBinaryExpr<arith::AddIOp>(expr);
62 return buildBinaryExpr<arith::MulIOp>(expr);
79 "semi-affine expressions (modulo by non-const) are not supported");
82 if (rhsConst.getValue() <= 0) {
83 emitError(loc,
"modulo by non-positive value is not supported");
89 assert(lhs && rhs &&
"unexpected affine expr lowering failure");
91 Value remainder = builder.
create<arith::RemSIOp>(loc, lhs, rhs);
93 Value isRemainderNegative = builder.
create<arith::CmpIOp>(
94 loc, arith::CmpIPredicate::slt, remainder, zeroCst);
95 Value correctedRemainder =
96 builder.
create<arith::AddIOp>(loc, remainder, rhs);
98 loc, isRemainderNegative, correctedRemainder, remainder);
117 "semi-affine expressions (division by non-const) are not supported");
120 if (rhsConst.getValue() <= 0) {
121 emitError(loc,
"division by non-positive value is not supported");
127 assert(lhs && rhs &&
"unexpected affine expr lowering failure");
132 loc, arith::CmpIPredicate::slt, lhs, zeroCst);
133 Value negatedDecremented = builder.
create<arith::SubIOp>(loc, noneCst, lhs);
135 builder.
create<arith::SelectOp>(loc, negative, negatedDecremented, lhs);
136 Value quotient = builder.
create<arith::DivSIOp>(loc, dividend, rhs);
137 Value correctedQuotient =
138 builder.
create<arith::SubIOp>(loc, noneCst, quotient);
139 Value result = builder.
create<arith::SelectOp>(loc, negative,
140 correctedQuotient, quotient);
157 emitError(loc) <<
"semi-affine expressions (division by non-const) are " 161 if (rhsConst.getValue() <= 0) {
162 emitError(loc,
"division by non-positive value is not supported");
167 assert(lhs && rhs &&
"unexpected affine expr lowering failure");
172 loc, arith::CmpIPredicate::sle, lhs, zeroCst);
173 Value negated = builder.
create<arith::SubIOp>(loc, zeroCst, lhs);
174 Value decremented = builder.
create<arith::SubIOp>(loc, lhs, oneCst);
176 builder.
create<arith::SelectOp>(loc, nonPositive, negated, decremented);
177 Value quotient = builder.
create<arith::DivSIOp>(loc, dividend, rhs);
178 Value negatedQuotient =
179 builder.
create<arith::SubIOp>(loc, zeroCst, quotient);
180 Value incrementedQuotient =
181 builder.
create<arith::AddIOp>(loc, quotient, oneCst);
183 loc, nonPositive, negatedQuotient, incrementedQuotient);
189 return op.getResult();
194 "affine dim position out of range");
200 "symbol dim position out of range");
218 return AffineApplyExpander(builder, dimValues, symbolValues, loc).visit(expr);
228 auto expanded = llvm::to_vector<8>(
230 [numDims, &builder, loc, operands](
AffineExpr expr) {
232 operands.take_front(numDims),
233 operands.drop_front(numDims));
235 if (llvm::all_of(expanded, [](
Value v) {
return v; }))
245 assert(ifOp.hasElse() &&
"else block expected");
247 Block *destBlock = ifOp->getBlock();
248 Block *srcBlock = elseBlock ? ifOp.getElseBlock() : ifOp.getThenBlock();
251 std::prev(srcBlock->
end()));
262 auto *res = ifOp.getOperation();
263 while (!isa<func::FuncOp>(res->getParentOp())) {
264 auto *parentOp = res->getParentOp();
265 if (
auto forOp = dyn_cast<AffineForOp>(parentOp)) {
266 if (llvm::is_contained(ifOperands, forOp.getInductionVar()))
268 }
else if (
auto parallelOp = dyn_cast<AffineParallelOp>(parentOp)) {
269 for (
auto iv : parallelOp.getIVs())
270 if (llvm::is_contained(ifOperands, iv))
272 }
else if (!isa<AffineIfOp>(parentOp)) {
287 if (hoistOverOp == ifOp)
297 auto hoistedIfOp = b.
create<AffineIfOp>(ifOp.getLoc(), ifOp.getIntegerSet(),
306 StringAttr idForIfOp = b.
getStringAttr(
"__mlir_if_hoisting");
311 hoistOverOpClone = b.
clone(*hoistOverOp, operandMap);
317 auto *thenBlock = hoistedIfOp.getThenBlock();
318 thenBlock->getOperations().splice(thenBlock->begin(),
323 AffineIfOp ifCloneInElse;
324 hoistOverOpClone->walk([&](AffineIfOp ifClone) {
325 if (!ifClone->getAttr(idForIfOp))
327 ifCloneInElse = ifClone;
330 assert(ifCloneInElse &&
"if op clone should exist");
333 if (!ifCloneInElse.hasElse())
334 ifCloneInElse.erase();
339 auto *elseBlock = hoistedIfOp.getElseBlock();
340 elseBlock->getOperations().splice(
341 elseBlock->begin(), hoistOverOpClone->getBlock()->getOperations(),
351 unsigned numReductions = parallelReductions.size();
352 if (numReductions != forOp.getNumIterOperands())
357 AffineMap lowerBoundMap = forOp.getLowerBoundMap();
358 ValueRange lowerBoundOperands = forOp.getLowerBoundOperands();
359 AffineMap upperBoundMap = forOp.getUpperBoundMap();
360 ValueRange upperBoundOperands = forOp.getUpperBoundOperands();
363 auto reducedValues = llvm::to_vector<4>(llvm::map_range(
365 auto reductionKinds = llvm::to_vector<4>(llvm::map_range(
367 AffineParallelOp newPloop = outsideBuilder.
create<AffineParallelOp>(
369 llvm::makeArrayRef(lowerBoundMap), lowerBoundOperands,
370 llvm::makeArrayRef(upperBoundMap), upperBoundOperands,
371 llvm::makeArrayRef(forOp.getStep()));
373 newPloop.getRegion().takeBody(forOp.getRegion());
374 Operation *yieldOp = &newPloop.getBody()->back();
379 newResults.reserve(numReductions);
380 for (
unsigned i = 0; i < numReductions; ++i) {
381 Value init = forOp.getIterOperands()[i];
386 assert(reductionOp &&
"yielded value is expected to be produced by an op");
390 reductionOp->
setOperands({init, newPloop->getResult(i)});
391 forOp->getResult(i).replaceAllUsesWith(reductionOp->
getResult(0));
400 newPloop.getBody()->eraseArguments(
401 llvm::to_vector<4>(llvm::seq<unsigned>(numIVs, numReductions + numIVs)));
411 if (ifOp.getNumResults() != 0)
420 AffineIfOp::getCanonicalizationPatterns(patterns, ifOp.getContext());
434 assert(llvm::all_of(ifOp.getOperands(),
438 "operands not composed");
446 if (hoistedIfOp == ifOp)
462 return positivePath ?
min :
max;
472 if (c1 && c1.getValue() < 0)
474 bin.getKind(), c1,
substWithMin(rhs, dim, min, max, !positivePath));
477 bin.getKind(),
substWithMin(lhs, dim, min, max, !positivePath), c2);
479 bin.getKind(),
substWithMin(lhs, dim, min, max, positivePath),
487 if (op.hasMinMaxBounds())
490 AffineMap lbMap = op.getLowerBoundsMap();
493 bool isAlreadyNormalized =
494 llvm::all_of(llvm::zip(steps, lbMap.
getResults()), [](
auto tuple) {
495 int64_t step = std::get<0>(tuple);
497 std::get<1>(tuple).
template dyn_cast<AffineConstantExpr>();
498 return lbExpr && lbExpr.getValue() == 0 && step == 1;
500 if (isAlreadyNormalized)
505 op.getLowerBoundsValueMap(), &ranges);
510 for (
unsigned i = 0, e = steps.size(); i < e; ++i) {
511 int64_t step = steps[i];
514 lbExprs.push_back(zeroExpr);
518 ubExprs.push_back(ubExpr);
524 auto expr = lbExpr + builder.getAffineDimExpr(nDims) * step;
531 OperandRange dimOperands = lbOperands.take_front(nDims);
532 OperandRange symbolOperands = lbOperands.drop_front(nDims);
534 applyOperands.push_back(iv);
535 applyOperands.append(symbolOperands.begin(), symbolOperands.end());
536 auto apply = builder.create<AffineApplyOp>(op.getLoc(), map, applyOperands);
541 op.setSteps(newSteps);
543 0, 0, lbExprs, op.getContext());
544 op.setLowerBounds({}, newLowerMap);
546 ubExprs, op.getContext());
547 op.setUpperBounds(ranges.
getOperands(), newUpperMap);
561 if (op.hasConstantLowerBound() && (op.getConstantLowerBound() == 0) &&
569 if (op.getLowerBoundMap().getNumResults() != 1)
574 int64_t origLoopStep = op.getStep();
613 for (
unsigned i = 0, e = origUbExprs.size(); i < e; ++i) {
614 newUbExprs.push_back(
615 (origUbExprs[i] - origLbExprs[0]).
ceilDiv(origLoopStep));
630 op.setUpperBound(ubOperands, newUbMap);
638 lbOperands.push_back(op.getInductionVar());
648 Operation *newIV = opBuilder.
create<AffineApplyOp>(loc, ivMap, lbOperands);
649 op.getInductionVar().replaceAllUsesExcept(newIV->
getResult(0), newIV);
660 template <
typename EffectType,
typename T>
662 Value memref = memOp.getMemRef();
663 bool isOriginalAllocation = memref.
getDefiningOp<memref::AllocaOp>() ||
668 bool hasSideEffect =
false;
671 std::function<void(Operation *)> checkOperation = [&](
Operation *op) {
676 if (
auto memEffect = dyn_cast<MemoryEffectOpInterface>(op)) {
678 memEffect.getEffects(effects);
680 bool opMayHaveEffect =
false;
681 for (
auto effect : effects) {
684 if (isa<EffectType>(effect.getEffect())) {
685 if (isOriginalAllocation && effect.getValue() &&
686 (effect.getValue().getDefiningOp<memref::AllocaOp>() ||
687 effect.getValue().getDefiningOp<memref::AllocOp>())) {
688 if (effect.getValue() != memref)
691 opMayHaveEffect =
true;
696 if (!opMayHaveEffect)
701 if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op)) {
710 unsigned minSurroundingLoops =
725 for (d = nsLoops + 1; d > minSurroundingLoops; d--) {
727 srcAccess, destAccess, d, &dependenceConstraints,
730 hasSideEffect =
true;
739 hasSideEffect =
true;
746 for (
Region ®ion : op->getRegions())
747 for (
Block &block : region)
755 hasSideEffect =
true;
766 checkOperation(parent);
771 std::function<void(Operation *, Operation *)> recur =
775 "Checking for side effect between two operations without a common " 783 until(untilOp->getParentOp(), untilOp);
795 for (
auto iter = ++from->getIterator(), end = from->
getBlock()->
end();
796 iter != end && &*iter != untilOp; ++iter) {
797 checkOperation(&*iter);
802 if (untilOp->getBlock() != from->
getBlock())
804 todoBlocks.push_back(succ);
809 while (!todoBlocks.empty()) {
810 Block *blk = todoBlocks.pop_back_val();
814 for (
auto &op : *blk) {
818 if (&op == blk->getTerminator())
820 todoBlocks.push_back(succ);
825 return !hasSideEffect;
858 if (srcAccess != destAccess)
867 if (!hasNoInterveningEffect<MemoryEffects::Write>(storeOp, loadOp))
871 assert(lastWriteStoreOp ==
nullptr &&
872 "multiple simulataneous replacement stores");
873 lastWriteStoreOp = storeOp;
876 if (!lastWriteStoreOp)
881 cast<AffineWriteOpInterface>(lastWriteStoreOp).getValueToStore();
888 memrefsToErase.insert(loadOp.
getMemRef());
890 loadOpsToErase.push_back(loadOp);
910 if (writeB == writeA)
914 if (writeB->getParentRegion() != writeA->getParentRegion())
921 if (srcAccess != destAccess)
930 if (!hasNoInterveningEffect<MemoryEffects::Read>(writeA, writeB))
933 opsToErase.push_back(writeA);
950 if (!loadB || loadB == loadA)
957 if (srcAccess != destAccess) {
966 if (!hasNoInterveningEffect<MemoryEffects::Write>(loadB.getOperation(),
975 loadCandidates.push_back(loadB);
983 return depStore == option ||
987 loadB = option.getValue();
995 loadOpsToErase.push_back(loadA);
1036 loadCSE(loadOp, opsToErase, domInfo);
1041 for (
auto *op : opsToErase)
1050 for (
auto *op : opsToErase)
1056 for (
auto memref : memrefsToErase) {
1058 Operation *defOp = memref.getDefiningOp();
1059 if (!defOp || !isa<memref::AllocOp>(defOp))
1063 if (llvm::any_of(memref.getUsers(), [&](
Operation *ownerOp) {
1064 return !isa<AffineWriteOpInterface, memref::DeallocOp>(ownerOp);
1069 for (
auto *user : llvm::make_early_inc_range(memref.getUsers()))
1082 bool allowNonDereferencingOps) {
1083 unsigned newMemRefRank = newMemRef.
getType().
cast<MemRefType>().getRank();
1084 (
void)newMemRefRank;
1085 unsigned oldMemRefRank = oldMemRef.
getType().
cast<MemRefType>().getRank();
1086 (
void)oldMemRefRank;
1088 assert(indexRemap.
getNumSymbols() == symbolOperands.size() &&
1089 "symbolic operand count mismatch");
1091 extraOperands.size() + oldMemRefRank + symbolOperands.size());
1092 assert(indexRemap.
getNumResults() + extraIndices.size() == newMemRefRank);
1094 assert(oldMemRefRank + extraIndices.size() == newMemRefRank);
1103 if (opEntry.value() == oldMemRef)
1104 usePositions.push_back(opEntry.index());
1108 if (usePositions.empty())
1111 if (usePositions.size() > 1) {
1113 assert(
false &&
"multiple dereferencing uses in a single op not supported");
1117 unsigned memRefOperandPos = usePositions.front();
1123 if (!affMapAccInterface) {
1124 if (!allowNonDereferencingOps) {
1135 affMapAccInterface.getAffineMapAttrForMemRef(oldMemRef);
1140 op->
operand_begin() + memRefOperandPos + 1 + oldMapNumInputs);
1145 oldMemRefOperands.reserve(oldMemRefRank);
1147 for (
auto resultExpr : oldMap.
getResults()) {
1150 auto afOp = builder.
create<AffineApplyOp>(op->
getLoc(), singleResMap,
1152 oldMemRefOperands.push_back(afOp);
1153 affineApplyOps.push_back(afOp);
1156 oldMemRefOperands.assign(oldMapOperands.begin(), oldMapOperands.end());
1163 remapOperands.reserve(extraOperands.size() + oldMemRefRank +
1164 symbolOperands.size());
1165 remapOperands.append(extraOperands.begin(), extraOperands.end());
1166 remapOperands.append(oldMemRefOperands.begin(), oldMemRefOperands.end());
1167 remapOperands.append(symbolOperands.begin(), symbolOperands.end());
1170 remapOutputs.reserve(oldMemRefRank);
1175 for (
auto resultExpr : indexRemap.
getResults()) {
1178 auto afOp = builder.
create<AffineApplyOp>(op->
getLoc(), singleResMap,
1180 remapOutputs.push_back(afOp);
1181 affineApplyOps.push_back(afOp);
1185 remapOutputs.assign(remapOperands.begin(), remapOperands.end());
1189 newMapOperands.reserve(newMemRefRank);
1192 for (
Value extraIndex : extraIndices) {
1193 assert(extraIndex.getDefiningOp()->getNumResults() == 1 &&
1194 "single result op's expected to generate these indices");
1196 "invalid memory op index");
1197 newMapOperands.push_back(extraIndex);
1201 newMapOperands.append(remapOutputs.begin(), remapOutputs.end());
1204 assert(newMapOperands.size() == newMemRefRank);
1212 if (
value.use_empty())
1213 value.getDefiningOp()->erase();
1222 state.operands.push_back(newMemRef);
1225 state.operands.append(newMapOperands.begin(), newMapOperands.end());
1228 state.operands.append(op->
operand_begin() + memRefOperandPos + 1 +
1235 state.types.push_back(result.getType());
1238 auto newMapAttr = AffineMapAttr::get(newMap);
1239 for (
auto namedAttr : op->
getAttrs()) {
1240 if (namedAttr.getName() == oldMapAttrPair.
getName())
1241 state.attributes.push_back({namedAttr.getName(), newMapAttr});
1243 state.attributes.push_back(namedAttr);
1247 auto *repOp = builder.
create(state);
1258 Operation *postDomOpFilter,
bool allowNonDereferencingOps,
1259 bool replaceInDeallocOp) {
1260 unsigned newMemRefRank = newMemRef.
getType().
cast<MemRefType>().getRank();
1261 (
void)newMemRefRank;
1262 unsigned oldMemRefRank = oldMemRef.
getType().
cast<MemRefType>().getRank();
1263 (
void)oldMemRefRank;
1265 assert(indexRemap.
getNumSymbols() == symbolOperands.size() &&
1266 "symbol operand count mismatch");
1268 extraOperands.size() + oldMemRefRank + symbolOperands.size());
1269 assert(indexRemap.
getNumResults() + extraIndices.size() == newMemRefRank);
1271 assert(oldMemRefRank + extraIndices.size() == newMemRefRank);
1278 std::unique_ptr<DominanceInfo> domInfo;
1279 std::unique_ptr<PostDominanceInfo> postDomInfo;
1281 domInfo = std::make_unique<DominanceInfo>(
1284 if (postDomOpFilter)
1285 postDomInfo = std::make_unique<PostDominanceInfo>(
1292 for (
auto *op : oldMemRef.
getUsers()) {
1294 if (domOpFilter && !domInfo->dominates(domOpFilter, op))
1298 if (postDomOpFilter && !postDomInfo->postDominates(postDomOpFilter, op))
1303 if (isa<memref::DeallocOp>(op) && !replaceInDeallocOp)
1309 if (!isa<AffineMapAccessInterface>(*op)) {
1310 if (!allowNonDereferencingOps) {
1311 LLVM_DEBUG(llvm::dbgs()
1312 <<
"Memref replacement failed: non-deferencing memref op: \n" 1319 LLVM_DEBUG(llvm::dbgs() <<
"Memref replacement failed: use without a " 1320 "memrefs normalizable trait: \n" 1328 opsToReplace.insert(op);
1331 for (
auto *op : opsToReplace) {
1333 oldMemRef, newMemRef, op, extraIndices, indexRemap, extraOperands,
1334 symbolOperands, allowNonDereferencingOps)))
1335 llvm_unreachable(
"memref replacement guaranteed to succeed here");
1375 if (isa_and_nonnull<AffineApplyOp>(operand.getDefiningOp()))
1376 subOperands.push_back(operand);
1382 if (affineApplyOps.empty())
1387 bool localized =
true;
1388 for (
auto *op : affineApplyOps) {
1389 for (
auto result : op->getResults()) {
1390 for (
auto *user : result.getUsers()) {
1391 if (user != opInst) {
1407 sliceOps->reserve(composedMap.getNumResults());
1408 for (
auto resultExpr : composedMap.getResults()) {
1410 composedMap.getNumSymbols(), resultExpr);
1411 sliceOps->push_back(builder.
create<AffineApplyOp>(
1412 opInst->
getLoc(), singleResMap, composedOpOperands));
1420 for (
unsigned i = 0, e = newOperands.size(); i < e; i++) {
1423 for (j = 0, f = subOperands.size(); j < f; j++) {
1424 if (newOperands[i] == subOperands[j])
1427 if (j < subOperands.size()) {
1428 newOperands[i] = (*sliceOps)[j];
1431 for (
unsigned idx = 0, e = newOperands.size(); idx < e; idx++) {
1455 SmallVectorImpl<std::tuple<AffineExpr, unsigned, unsigned>> &tileSizePos) {
1467 floordivExprs.emplace_back(
1468 std::make_tuple(binaryExpr.
getLHS(), binaryExpr.
getRHS(), pos));
1473 if (floordivExprs.empty()) {
1480 for (std::tuple<AffineExpr, AffineExpr, unsigned> fexpr : floordivExprs) {
1481 AffineExpr floordivExprLHS = std::get<0>(fexpr);
1482 AffineExpr floordivExprRHS = std::get<1>(fexpr);
1483 unsigned floordivPos = std::get<2>(fexpr);
1495 bool notTiled =
false;
1496 if (pos != floordivPos) {
1498 if (e == floordivExprLHS) {
1502 if (floordivExprLHS == binaryExpr.
getLHS() &&
1503 floordivExprRHS == binaryExpr.
getRHS()) {
1507 tileSizePos.emplace_back(
1508 std::make_tuple(binaryExpr.
getRHS(), floordivPos, pos));
1555 bool isDynamicDim =
false;
1559 expr.
walk([&inMemrefTypeDynDims, &isDynamicDim, &context](
AffineExpr e) {
1561 for (unsigned dm : inMemrefTypeDynDims) {
1562 if (e == getAffineDimExpr(dm, context)) {
1563 isDynamicDim = true;
1568 return isDynamicDim;
1582 newMapOutput = binaryExpr.
getRHS();
1590 newMapOutput = oldMapOutput;
1592 return newMapOutput;
1628 MemRefType newMemRefType,
AffineMap map,
1634 unsigned dynIdx = 0;
1635 for (
unsigned d = 0; d < oldMemRefType.getRank(); ++d) {
1636 if (oldMemRefShape[d] < 0) {
1638 inAffineApply.emplace_back(allocOp->dynamicSizes()[dynIdx]);
1644 inAffineApply.emplace_back(
1645 b.
create<arith::ConstantOp>(allocOp->getLoc(), constantAttr));
1651 unsigned newDimIdx = 0;
1656 if (newMemRefShape[newDimIdx] < 0) {
1659 for (
auto pos : tileSizePos) {
1660 if (newDimIdx == std::get<1>(pos))
1662 else if (newDimIdx == std::get<2>(pos))
1669 b.
create<AffineApplyOp>(allocOp->getLoc(), newMap, inAffineApply);
1670 newDynamicSizes.emplace_back(affineApp);
1678 MemRefType memrefType = allocOp->getType();
1683 MemRefType newMemRefType =
1685 if (newMemRefType == memrefType)
1690 Value oldMemRef = allocOp->getResult();
1693 AffineMap layoutMap = memrefType.getLayout().getAffineMap();
1694 memref::AllocOp newAlloc;
1699 if (newMemRefType.getNumDynamicDims() > 0 && !tileSizePos.empty()) {
1700 MemRefType oldMemRefType = oldMemRef.
getType().
cast<MemRefType>();
1706 b.
create<memref::AllocOp>(allocOp->getLoc(), newMemRefType,
1707 newDynamicSizes, allocOp->alignmentAttr());
1709 newAlloc = b.
create<memref::AllocOp>(allocOp->getLoc(), newMemRefType,
1710 allocOp->alignmentAttr());
1728 return isa<memref::DeallocOp>(op);
1736 unsigned numSymbolicOperands) {
1737 unsigned rank = memrefType.getRank();
1741 if (memrefType.getLayout().isIdentity()) {
1746 AffineMap layoutMap = memrefType.getLayout().getAffineMap();
1756 if (memrefType.getNumDynamicDims() > 0 && tileSizePos.empty())
1765 for (
unsigned d = 0; d < rank; ++d) {
1771 memrefTypeDynDims.emplace_back(d);
1783 for (
unsigned d = 0; d < newRank; ++d) {
1786 d, layoutMap, memrefTypeDynDims, b.
getContext());
1794 assert(ubConst &&
"should always have an upper bound");
1795 if (ubConst.getValue() < 0)
1799 newShape[d] = ubConst.getValue() + 1;
1804 MemRefType newMemRefType =
1809 return newMemRefType;
Affine binary operation expression.
DependenceResult checkMemrefAccessDependence(const MemRefAccess &srcAccess, const MemRefAccess &dstAccess, unsigned loopDepth, FlatAffineValueConstraints *dependenceConstraints, SmallVector< DependenceComponent, 2 > *dependenceComponents, bool allowRAR=false)
TODO: Remove this file when SCCP and integer range analysis have been ported to the new framework...
unsigned getNumLocalVars() const
This class contains a list of basic blocks and a link to the parent operation it is attached to...
MLIRContext * getContext() const
Optional< SmallVector< Value, 8 > > expandAffineMap(OpBuilder &builder, Location loc, AffineMap affineMap, ValueRange operands)
Create a sequence of operations that implement the affineMap applied to the given operands (as it it ...
RHS of mod is always a constant or a symbolic expression with a positive value.
AffineMap getMultiDimIdentityMap(unsigned rank)
Block * getInsertionBlock() const
Return the block the current insertion point belongs to.
Operation is a basic unit of execution within MLIR.
static AffineExpr createDimSizeExprForTiledLayout(AffineExpr oldMapOutput, TileExprPattern pat)
Create affine expr to calculate dimension size for a tiled-layout map.
Attribute getValue() const
Return the value of the attribute.
unsigned getNumSymbols() const
bool isAncestor(Operation *other)
Return true if this operation is an ancestor of the other operation.
unsigned getNumDims() const
operand_range getOperands()
Returns an iterator on the underlying Value's.
void setOperands(ValueRange operands)
Replace the current operands of this operation with the ones provided in 'operands'.
This class represents a frozen set of patterns that can be processed by a pattern applicator...
Block represents an ordered list of Operations.
Value getOperand(unsigned idx)
LogicalResult applyOpPatternsAndFold(Operation *op, const FrozenRewritePatternSet &patterns, bool *erased=nullptr)
Applies the specified patterns on op alone while also trying to fold it, by selecting the highest ben...
static void findUnusedStore(AffineWriteOpInterface writeA, SmallVectorImpl< Operation *> &opsToErase, PostDominanceInfo &postDominanceInfo)
Operation * clone(Operation &op, BlockAndValueMapping &mapper)
Creates a deep copy of the specified operation, remapping any operands that use values outside of the...
ArrayRef< NamedAttribute > getAttrs()
Return all of the attributes on this operation.
Value getOperand(unsigned idx)
OpListType & getOperations()
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value...
unsigned getNumOperands()
static Type getElementType(Type type, ArrayRef< int32_t > indices, function_ref< InFlightDiagnostic(StringRef)> emitErrorFn)
Walks the given type hierarchy with the given indices, potentially down to component granularity...
AffineExpr getResult(unsigned i)
LogicalResult promoteIfSingleIteration(AffineForOp forOp)
Promotes the loop body of a AffineForOp to its containing block if the loop was known to have a singl...
A class for computing basic dominance information.
bool succeeded(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a success value...
Operation * getOperation()
Return the operation that this refers to.
This trait indicates that the side effects of an operation includes the effects of operations nested ...
unsigned getPosition() const
Block * getBlock()
Returns the operation block that contains this operation.
static void difference(const AffineValueMap &a, const AffineValueMap &b, AffineValueMap *res)
Return the value map that is the difference of value maps 'a' and 'b', represented as an affine map a...
Checks whether two accesses to the same memref access the same element.
user_range getUsers() const
unsigned getNumCommonSurroundingLoops(Operation &a, Operation &b)
Returns the number of surrounding loops common to both A and B.
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
An integer constant appearing in affine expression.
LogicalResult normalizeMemRef(memref::AllocOp *op)
Rewrites the memref defined by this alloc op to have an identity layout map and updates all its index...
void replaceAllUsesWith(Value newValue) const
Replace all uses of 'this' value with the new value, updating anything in the IR that uses 'this' to ...
static constexpr const bool value
void erase()
Remove this operation from its parent block and delete it.
static OpBuilder atBlockBegin(Block *block, Listener *listener=nullptr)
Create a builder and set the insertion point to before the first operation in the block but still ins...
SmallVector< Value, 4 > operands
void walk(std::function< void(AffineExpr)> callback) const
Walk all of the AffineExpr's in this expression in postorder.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
AffineExpr getRHS() const
NamedAttribute represents a combination of a name and an Attribute value.
AffineExpr getResult(unsigned idx) const
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Base class for AffineExpr visitors/walkers.
static void promoteIfBlock(AffineIfOp ifOp, bool elseBlock)
Promotes the then or the else block of ifOp (depending on whether elseBlock is false or true) into if...
unsigned getNumInputs() const
Region * getParentRegion()
Return the region containing this region or nullptr if the region is attached to a top-level operatio...
TileExprPattern
Enum to set patterns of affine expr in tiled-layout map.
LogicalResult success(bool isSuccess=true)
Utility function to generate a LogicalResult.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
This class represents an efficient way to signal success or failure.
LogicalResult failure(bool isFailure=true)
Utility function to generate a LogicalResult.
bool isValidDim(Value value)
Returns true if the given Value can be used as a dimension id in the region of the closest surroundin...
unsigned getNumOperands()
OpListType::iterator iterator
void clear()
Clears all mappings held by the mapper.
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
arith::AtomicRMWKind kind
Reduction kind.
AffineExpr getLHS() const
IntegerAttr getIntegerAttr(Type type, int64_t value)
bool hasDependence(DependenceResult result)
Utility function that returns true if the provided DependenceResult corresponds to a dependence resul...
bool isValidSymbol(Value value)
Returns true if the given value can be used as a symbol in the region of the closest surrounding op t...
Attributes are known-constant values of operations.
unsigned getNumVars() const
int64_t ceilDiv(int64_t lhs, int64_t rhs)
Returns the result of MLIR's ceildiv operation on constants.
void fullyComposeAffineMapAndOperands(AffineMap *map, SmallVectorImpl< Value > *operands)
Given an affine map map and its input operands, this method composes into map, maps of AffineApplyOps...
AffineExpr substWithMin(AffineExpr e, AffineExpr dim, AffineExpr min, AffineExpr max, bool positivePath=true)
Traverse e and return an AffineExpr where all occurrences of dim have been replaced by either: ...
static void createNewDynamicSizes(MemRefType oldMemRefType, MemRefType newMemRefType, AffineMap map, memref::AllocOp *allocOp, OpBuilder b, SmallVectorImpl< Value > &newDynamicSizes)
Create new maps to calculate each dimension size of newMemRefType, and create newDynamicSizes from th...
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
void replaceAllUsesWith(ValuesT &&values)
Replace all uses of results of this operation with the provided 'values'.
StringAttr getName() const
Return the name of the attribute.
Base type for affine expression.
void canonicalizeMapAndOperands(AffineMap *map, SmallVectorImpl< Value > *operands)
Modifies both map and operands in-place so as to:
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
static WalkResult advance()
unsigned getNumResults() const
Location getLoc()
The source location the operation was defined or derived from.
This represents an operation in an abstracted form, suitable for use with the builder APIs...
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued...
bool isForInductionVar(Value val)
Returns true if the provided value is the induction variable of a AffineForOp.
static WalkResult interrupt()
AffineBound represents a lower or upper bound in the for operation.
This class represents an argument of a Block.
RHS of floordiv is always a constant or a symbolic expression.
AffineExpr ceilDiv(uint64_t v) const
ArrayRef< AffineExpr > getResults() const
Eliminates variable at the specified position using Fourier-Motzkin variable elimination.
void setOperand(unsigned idx, Value value)
AffineExpr getAffineBinaryOpExpr(AffineExprKind kind, AffineExpr lhs, AffineExpr rhs)
static LogicalResult getTileSizePos(AffineMap map, SmallVectorImpl< std::tuple< AffineExpr, unsigned, unsigned >> &tileSizePos)
Check if map is a tiled layout.
LogicalResult hoistAffineIfOp(AffineIfOp ifOp, bool *folded=nullptr)
Hoists out affine.if/else to as high as possible, i.e., past all invariant affine.fors/parallel's.
RHS of ceildiv is always a constant or a symbolic expression.
static Value min(ImplicitLocOpBuilder &builder, Value value, Value bound)
void projectOut(Value val)
Projects out the variable that is associate with Value.
unsigned getPosition() const
LogicalResult normalizeAffineFor(AffineForOp op)
Normalize an affine.for op.
unsigned getNumDims() const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
MemRefType normalizeMemRefType(MemRefType memrefType, OpBuilder builder, unsigned numSymbolicOperands)
Uses the old memref type map layout and computes the new memref type to have a new shape and a layout...
static void loadCSE(AffineReadOpInterface loadA, SmallVectorImpl< Operation *> &loadOpsToErase, DominanceInfo &domInfo)
static LogicalResult forwardStoreToLoad(AffineReadOpInterface loadOp, SmallVectorImpl< Operation *> &loadOpsToErase, SmallPtrSetImpl< Value > &memrefsToErase, DominanceInfo &domInfo)
Attempt to eliminate loadOp by replacing it with a value stored into memory which the load is guarant...
bool isTopLevelValue(Value value)
TODO: These should be renamed if they are on the mlir namespace.
operand_iterator operand_begin()
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
AffineExpr getAffineConstantExpr(int64_t constant)
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
LogicalResult affineParallelize(AffineForOp forOp, ArrayRef< LoopReduction > parallelReductions={})
Replaces a parallel affine.for op with a 1-d affine.parallel op.
AffineMap simplifyAffineMap(AffineMap map)
Simplifies an affine map by simplifying its underlying AffineExpr results.
Type getType() const
Return the type of this value.
FlatAffineValueConstraints represents an extension of IntegerPolyhedron where each non-local variable...
This class provides the API for ops that are known to be isolated from above.
static bool hasNoInterveningEffect(Operation *start, T memOp)
Ensure that all operations that could be executed after start (noninclusive) and prior to memOp (e...
This is a builder type that keeps local references to arguments.
type_range getTypes() const
A dimensional identifier appearing in an affine expression.
Specialization of arith.constant op that returns an integer of index type.
BoolAttr getBoolAttr(bool value)
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
bool dominates(Operation *a, Operation *b) const
Return true if operation A dominates operation B, i.e.
MLIRContext is the top-level object for a collection of MLIR operations.
AffineExpr getAffineDimExpr(unsigned position)
This class implements the operand iterators for the Operation class.
Encapsulates a memref load or store access information.
void createAffineComputationSlice(Operation *opInst, SmallVectorImpl< AffineApplyOp > *sliceOps)
Given an operation, inserts one or more single result affine apply operations, results of which are e...
Region * getParentRegion()
Returns the region to which the instruction belongs.
unsigned getNumSymbols() const
AffineMap getConstantAffineMap(int64_t val)
Returns a single constant result affine map with 0 dimensions and 0 symbols.
void normalizeAffineParallel(AffineParallelOp op)
Normalize a affine.parallel op so that lower bounds are 0 and steps are 1.
void affineScalarReplace(func::FuncOp f, DominanceInfo &domInfo, PostDominanceInfo &postDomInfo)
Replace affine store and load accesses by scalars by forwarding stores to loads and eliminate invaria...
A description of a (parallelizable) reduction in an affine loop.
unsigned getNumResults()
Return the number of results held by this operation.
An AffineValueMap is an affine map plus its ML value operands and results for analysis purposes...
static bool isNormalizedMemRefDynamicDim(unsigned dim, AffineMap layoutMap, SmallVectorImpl< unsigned > &inMemrefTypeDynDims, MLIRContext *context)
Check if dim dimension of memrefType with layoutMap becomes dynamic after normalization.
LogicalResult composeMatchingMap(AffineMap other)
Composes an affine map whose dimensions and symbols match one to one with the dimensions and symbols ...
Value value
The value being reduced.
void getReachableAffineApplyOps(ArrayRef< Value > operands, SmallVectorImpl< Operation *> &affineApplyOps)
Returns in affineApplyOps, the sequence of those AffineApplyOp Operations that are reachable via a se...
Builder & setLayout(MemRefLayoutAttrInterface newLayout)
bool postDominates(Operation *a, Operation *b)
Return true if operation A postdominates operation B.
operand_iterator operand_end()
SuccessorRange getSuccessors()
Block::iterator getInsertionPoint() const
Returns the current insertion point of the builder.
OperationName getName()
The name of an operation is the key identifier for it.
Value expandAffineExpr(OpBuilder &builder, Location loc, AffineExpr expr, ValueRange dimValues, ValueRange symbolValues)
Emit code that computes the given affine expression using standard arithmetic operations applied to t...
static void visit(Operation *op, DenseSet< Operation *> &visited)
Visits all the pdl.operand(s), pdl.result(s), and pdl.operation(s) connected to the given operation...
ArrayRef< Value > getOperands() const
LogicalResult applyPatternsAndFoldGreedily(MutableArrayRef< Region > regions, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig())
Rewrite the regions of the specified operation, which must be isolated from above, by repeatedly applying the highest benefit patterns in a greedy work-list driven manner.
result_range getResults()
static Operation * getOutermostInvariantForOp(AffineIfOp ifOp)
Returns the outermost affine.for/parallel op that the ifOp is invariant on.
This class helps build Operations.
This class provides an abstraction over the different types of ranges over Values.
StringAttr getStringAttr(const Twine &bytes)
Builder & setShape(ArrayRef< int64_t > newShape)
A class for computing basic postdominance information.
Optional< int64_t > getConstantBound(BoundType type, unsigned pos) const
Returns the constant bound for the pos^th variable if there is one; None otherwise.
operand_range getOperands()
bool isAncestor(Region *other)
Return true if this region is ancestor of the other region.
static Value max(ImplicitLocOpBuilder &builder, Value value, Value bound)
A symbolic identifier appearing in an affine expression.
LogicalResult addBound(BoundType type, unsigned pos, AffineMap boundMap, bool isClosedBound)
Adds a bound for the variable at the specified position with constraints being drawn from the specifi...
LogicalResult replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef, ArrayRef< Value > extraIndices={}, AffineMap indexRemap=AffineMap(), ArrayRef< Value > extraOperands={}, ArrayRef< Value > symbolOperands={}, Operation *domOpFilter=nullptr, Operation *postDomOpFilter=nullptr, bool allowNonDereferencingOps=false, bool replaceInDeallocOp=false)
Replaces all "dereferencing" uses of oldMemRef with newMemRef while optionally remapping the old memr...
void replaceAllUsesExcept(Value newValue, const SmallPtrSetImpl< Operation *> &exceptions) const
Replace all uses of 'this' value with 'newValue', updating anything in the IR that uses 'this' to use...