21 #include "llvm/Support/MathExtras.h"
23 #include "llvm/ADT/DenseSet.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallString.h"
26 #include "llvm/Support/Debug.h"
29 #include <type_traits>
31 #define DEBUG_TYPE "affine-loop-analysis"
39 class DirectedOpGraph {
43 assert(!hasNode(op) &&
"node already added");
44 nodes.emplace_back(op);
51 assert(hasNode(src) &&
"src node does not exist in graph");
52 assert(hasNode(dest) &&
"dest node does not exist in graph");
53 edges[src].push_back(getNode(dest));
57 bool hasCycle() {
return dfs(
true); }
60 for (
auto &en : edges) {
61 llvm::dbgs() << *en.first <<
" (" << en.first <<
")"
62 <<
" has " << en.second.size() <<
" edges:\n";
63 for (
auto *node : en.second) {
64 llvm::dbgs() <<
'\t' << *node->op <<
'\n';
91 llvm::find_if(nodes, [&](
const DGNode &node) {
return node.op == op; });
92 assert(value != nodes.end() &&
"node doesn't exist in graph");
98 return llvm::find_if(nodes, [&](
const DGNode &node) {
99 return node.op == key;
108 bool dfs(
bool cycleCheck =
false) {
109 for (DGNode &node : nodes) {
115 for (DGNode &node : nodes) {
117 bool ret = dfsNode(node, cycleCheck, time);
119 if (cycleCheck && ret)
121 }
else if (cycleCheck && node.fn == -1) {
132 bool dfsNode(DGNode &node,
bool cycleCheck,
unsigned &time)
const {
133 auto nodeEdges = edges.find(node.op);
134 assert(nodeEdges != edges.end() &&
"missing node in graph");
137 for (
auto &neighbour : nodeEdges->second) {
138 if (neighbour->vn == 0) {
139 bool ret = dfsNode(*neighbour, cycleCheck, time);
140 if (cycleCheck && ret)
142 }
else if (cycleCheck && neighbour->fn == -1) {
170 AffineForOp forOp,
AffineMap *tripCountMap,
173 int64_t step = forOp.getStepAsInt();
175 if (forOp.hasConstantBounds()) {
176 int64_t lb = forOp.getConstantLowerBound();
177 int64_t ub = forOp.getConstantUpperBound();
182 llvm::divideCeilSigned(loopSpan, step), context);
183 tripCountOperands->clear();
186 auto lbMap = forOp.getLowerBoundMap();
187 auto ubMap = forOp.getUpperBoundMap();
188 if (lbMap.getNumResults() != 1) {
200 auto lbMapSplat =
AffineMap::get(lbMap.getNumDims(), lbMap.getNumSymbols(),
201 lbSplatExpr, context);
202 AffineValueMap lbSplatValueMap(lbMapSplat, forOp.getLowerBoundOperands());
206 for (
unsigned i = 0, e = tripCountValueMap.
getNumResults(); i < e; ++i)
211 tripCountOperands->assign(tripCountValueMap.
getOperands().begin(),
228 std::optional<uint64_t> tripCount;
230 if (
auto constExpr = dyn_cast<AffineConstantExpr>(resultExpr)) {
231 if (tripCount.has_value())
233 std::min(*tripCount,
static_cast<uint64_t
>(constExpr.getValue()));
235 tripCount = constExpr.getValue();
255 assert(map.
getNumResults() >= 1 &&
"expected one or more results");
256 std::optional<uint64_t> gcd;
259 if (
auto constExpr = dyn_cast<AffineConstantExpr>(resultExpr)) {
260 uint64_t tripCount = constExpr.getValue();
269 thisGcd = resultExpr.getLargestKnownDivisor();
272 gcd = std::gcd(*gcd, thisGcd);
276 assert(gcd.has_value() &&
"value expected per above logic");
286 assert(isa<IndexType>(index.
getType()) &&
"index must be of 'index' type");
295 template <
typename LoadOrStoreOp>
299 return !llvm::is_contained(avm.
getOperands(), forOp.getInductionVar());
313 for (
Value index : indices) {
321 template <
typename LoadOrStoreOp>
324 static_assert(llvm::is_one_of<LoadOrStoreOp, AffineReadOpInterface,
325 AffineWriteOpInterface>::value,
326 "Must be called on either an affine read or write op");
327 assert(memRefDim &&
"memRefDim == nullptr");
328 auto memRefType = memoryOp.getMemRefType();
330 if (!memRefType.getLayout().isIdentity())
331 return memoryOp.emitError(
"NYI: non-trivial layout map"),
false;
333 int uniqueVaryingIndexAlongIv = -1;
334 auto accessMap = memoryOp.getAffineMap();
336 unsigned numDims = accessMap.getNumDims();
337 for (
unsigned i = 0, e = memRefType.getRank(); i < e; ++i) {
340 auto resultExpr = accessMap.getResult(i);
342 if (
auto dimExpr = dyn_cast<AffineDimExpr>(expr))
343 exprOperands.push_back(mapOperands[dimExpr.getPosition()]);
344 else if (
auto symExpr = dyn_cast<AffineSymbolExpr>(expr))
345 exprOperands.push_back(mapOperands[numDims + symExpr.getPosition()]);
348 for (
Value exprOperand : exprOperands) {
350 if (uniqueVaryingIndexAlongIv != -1) {
354 uniqueVaryingIndexAlongIv = i;
359 if (uniqueVaryingIndexAlongIv == -1)
362 *memRefDim = memRefType.getRank() - (uniqueVaryingIndexAlongIv + 1);
367 AffineReadOpInterface loadOp,
370 AffineWriteOpInterface loadOp,
373 template <
typename LoadOrStoreOp>
375 auto memRefType = memoryOp.getMemRefType();
376 return isa<VectorType>(memRefType.getElementType());
385 auto *forOp = loop.getOperation();
390 conditionals.match(forOp, &conditionalsMatched);
391 if (!conditionalsMatched.empty()) {
399 if (MemRefType t = dyn_cast<MemRefType>(type))
400 return !VectorType::isValidElementType(t.getElementType());
401 return !VectorType::isValidElementType(type);
405 return !VectorType::isValidElementType(type);
409 types.match(forOp, &opsMatched);
410 if (!opsMatched.empty()) {
416 return op.
getNumRegions() != 0 && !isa<AffineIfOp, AffineForOp>(op);
419 regions.match(forOp, ®ionsMatched);
420 if (!regionsMatched.empty()) {
425 vectorTransferMatcher.
match(forOp, &vectorTransfersMatched);
426 if (!vectorTransfersMatched.empty()) {
432 loadAndStores.match(forOp, &loadAndStoresMatched);
433 for (
auto ls : loadAndStoresMatched) {
434 auto *op = ls.getMatchedOperation();
435 auto load = dyn_cast<AffineLoadOp>(op);
436 auto store = dyn_cast<AffineStoreOp>(op);
444 if (isVectorizableOp && !isVectorizableOp(loop, *op)) {
452 AffineForOp loop,
int *memRefDim,
NestedPattern &vectorTransferMatcher) {
455 auto load = dyn_cast<AffineLoadOp>(op);
456 auto store = dyn_cast<AffineStoreOp>(op);
457 int thisOpMemRefDim = -1;
460 cast<AffineReadOpInterface>(*load),
463 cast<AffineWriteOpInterface>(*store),
465 if (thisOpMemRefDim != -1) {
468 if (*memRefDim != -1 && *memRefDim != thisOpMemRefDim)
470 *memRefDim = thisOpMemRefDim;
489 auto *forBody = forOp.getBody();
490 assert(shifts.size() == forBody->getOperations().size());
495 for (
const auto &it :
497 auto &op = it.value();
501 size_t index = shifts.size() - it.index() - 1;
504 uint64_t shift = shifts[index];
505 forBodyShift.try_emplace(&op, shift);
508 for (
unsigned i = 0, e = op.getNumResults(); i < e; ++i) {
509 Value result = op.getResult(i);
510 for (
auto *user : result.
getUsers()) {
513 if (
auto *ancOp = forBody->findAncestorOpInBlock(*user)) {
514 assert(forBodyShift.count(ancOp) > 0 &&
"ancestor expected in map");
515 if (shift != forBodyShift[ancOp])
525 assert(!loops.empty() &&
"no original loops provided");
530 if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op))
531 loadAndStoreOps.push_back(op);
534 unsigned numOps = loadAndStoreOps.size();
535 unsigned numLoops = loops.size();
536 for (
unsigned d = 1; d <= numLoops + 1; ++d) {
537 for (
unsigned i = 0; i < numOps; ++i) {
540 for (
unsigned j = 0;
j < numOps; ++
j) {
546 srcAccess, dstAccess, d,
nullptr,
556 LLVM_DEBUG(llvm::dbgs() <<
"Checking whether tiling legality violated "
557 "for dependence at depth: "
558 << Twine(d) <<
" between:\n";);
562 if (depComp.lb.has_value() && depComp.ub.has_value() &&
563 *depComp.lb < *depComp.ub && *depComp.ub < 0) {
564 LLVM_DEBUG(llvm::dbgs()
565 <<
"Dependence component lb = " << Twine(*depComp.lb)
566 <<
" ub = " << Twine(*depComp.ub)
567 <<
" is negative at depth: " << Twine(d)
568 <<
" and thus violates the legality rule.\n");
582 DirectedOpGraph graph;
585 if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op)) {
586 accesses.emplace_back(op);
593 for (
const auto &accA : accesses) {
594 for (
const auto &accB : accesses) {
595 if (accA.memref != accB.memref)
598 unsigned numCommonLoops =
600 for (
unsigned d = rootDepth + 1; d <= numCommonLoops + 1; ++d) {
602 graph.addEdge(accA.opInst, accB.opInst);
606 return graph.hasCycle();
static bool isVectorizableLoopBodyWithOpCond(AffineForOp loop, const VectorizableOpFun &isVectorizableOp, NestedPattern &vectorTransferMatcher)
std::function< bool(AffineForOp, Operation &)> VectorizableOpFun
static bool isAccessIndexInvariant(Value iv, Value index)
Given an affine.for iv and an access index of type index, returns true if index is independent of iv ...
static bool isVectorElement(LoadOrStoreOp memoryOp)
static Value max(ImplicitLocOpBuilder &builder, Value value, Value bound)
static Value min(ImplicitLocOpBuilder &builder, Value value, Value bound)
Base type for affine expression.
AffineExpr ceilDiv(uint64_t v) const
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
static AffineMap getMultiDimIdentityMap(unsigned numDims, MLIRContext *context)
Returns an AffineMap with 'numDims' identity result dim exprs.
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
ArrayRef< AffineExpr > getResults() const
unsigned getNumResults() const
static AffineMap getConstantMap(int64_t val, MLIRContext *context)
Returns a single constant result affine map.
MLIRContext is the top-level object for a collection of MLIR operations.
Operation is the basic unit of execution within MLIR.
unsigned getNumRegions()
Returns the number of regions held by this operation.
operand_type_range getOperandTypes()
result_type_range getResultTypes()
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
MLIRContext * getContext() const
Utility to get the associated MLIRContext that this value is defined in.
Type getType() const
Return the type of this value.
user_range getUsers() const
An AffineValueMap is an affine map plus its ML value operands and results for analysis purposes.
void composeSimplifyAndCanonicalize()
Composes all incoming affine.apply ops and then simplifies and canonicalizes the map and operands.
ArrayRef< Value > getOperands() const
AffineExpr getResult(unsigned i)
AffineMap getAffineMap() const
bool isFunctionOf(unsigned idx, Value value) const
Return true if the idx^th result depends on 'value', false otherwise.
void setResult(unsigned i, AffineExpr e)
unsigned getNumResults() const
static void difference(const AffineValueMap &a, const AffineValueMap &b, AffineValueMap *res)
Return the value map that is the difference of value maps 'a' and 'b', represented as an affine map a...
void match(Operation *op, SmallVectorImpl< NestedMatch > *matches)
Returns all the top-level matches in op.
NestedPattern If(const NestedPattern &child)
bool isLoadOrStore(Operation &op)
NestedPattern Op(FilterFunctionType filter=defaultFilterFunction)
std::optional< uint64_t > getConstantTripCount(AffineForOp forOp)
Returns the trip count of the loop if it's a constant, std::nullopt otherwise.
bool isTilingValid(ArrayRef< AffineForOp > loops)
Checks whether hyper-rectangular loop tiling of the nest represented by loops is valid.
bool isVectorizableLoopBody(AffineForOp loop, NestedPattern &vectorTransferMatcher)
Checks whether the loop is structurally vectorizable; i.e.
unsigned getNumCommonSurroundingLoops(Operation &a, Operation &b)
Returns the number of surrounding loops common to both A and B.
DenseSet< Value, DenseMapInfo< Value > > getInvariantAccesses(Value iv, ArrayRef< Value > indices)
Given an induction variable iv of type AffineForOp and indices of type IndexType, returns the set of ...
void getTripCountMapAndOperands(AffineForOp forOp, AffineMap *map, SmallVectorImpl< Value > *operands)
Returns the trip count of the loop as an affine map with its corresponding operands if the latter is ...
bool isInvariantAccess(LoadOrStoreOp memOp, AffineForOp forOp)
Checks if an affine read or write operation depends on forOp's IV, i.e., if the memory access is inva...
DependenceResult checkMemrefAccessDependence(const MemRefAccess &srcAccess, const MemRefAccess &dstAccess, unsigned loopDepth, FlatAffineValueConstraints *dependenceConstraints=nullptr, SmallVector< DependenceComponent, 2 > *dependenceComponents=nullptr, bool allowRAR=false)
bool isAffineForInductionVar(Value val)
Returns true if the provided value is the induction variable of an AffineForOp.
uint64_t getLargestDivisorOfTripCount(AffineForOp forOp)
Returns the greatest known integral divisor of the trip count.
bool isContiguousAccess(Value iv, LoadOrStoreOp memoryOp, int *memRefDim)
Given:
bool hasDependence(DependenceResult result)
Utility function that returns true if the provided DependenceResult corresponds to a dependence resul...
unsigned getNestingDepth(Operation *op)
Returns the nesting depth of this operation, i.e., the number of loops surrounding this operation.
bool isOpwiseShiftValid(AffineForOp forOp, ArrayRef< uint64_t > shifts)
Checks where SSA dominance would be violated if a for op's body operations are shifted by the specifi...
bool hasCyclicDependence(AffineForOp root)
Returns true if the affine nest rooted at root has a cyclic dependence among its affine memory access...
bool noDependence(DependenceResult result)
Returns true if the provided DependenceResult corresponds to the absence of a dependence.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Include the generated interface declarations.
Checks whether two accesses to the same memref access the same element.
Encapsulates a memref load or store access information.
Eliminates variable at the specified position using Fourier-Motzkin variable elimination.