MLIR  21.0.0git
LoopAnalysis.cpp
Go to the documentation of this file.
1 //===- LoopAnalysis.cpp - Misc loop analysis routines //-------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements miscellaneous loop analysis routines.
10 //
11 //===----------------------------------------------------------------------===//
12 
14 
21 #include "llvm/Support/MathExtras.h"
22 
23 #include "llvm/ADT/DenseSet.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallString.h"
26 #include "llvm/Support/Debug.h"
27 #include <numeric>
28 #include <optional>
29 #include <type_traits>
30 
31 #define DEBUG_TYPE "affine-loop-analysis"
32 
33 using namespace mlir;
34 using namespace mlir::affine;
35 
36 namespace {
37 
38 /// A directed graph to model relationships between MLIR Operations.
39 class DirectedOpGraph {
40 public:
41  /// Add a node to the graph.
42  void addNode(Operation *op) {
43  assert(!hasNode(op) && "node already added");
44  nodes.emplace_back(op);
45  edges[op] = {};
46  }
47 
48  /// Add an edge from `src` to `dest`.
49  void addEdge(Operation *src, Operation *dest) {
50  // This is a multi-graph.
51  assert(hasNode(src) && "src node does not exist in graph");
52  assert(hasNode(dest) && "dest node does not exist in graph");
53  edges[src].push_back(getNode(dest));
54  }
55 
56  /// Returns true if there is a (directed) cycle in the graph.
57  bool hasCycle() { return dfs(/*cycleCheck=*/true); }
58 
59  void printEdges() {
60  for (auto &en : edges) {
61  llvm::dbgs() << *en.first << " (" << en.first << ")"
62  << " has " << en.second.size() << " edges:\n";
63  for (auto *node : en.second) {
64  llvm::dbgs() << '\t' << *node->op << '\n';
65  }
66  }
67  }
68 
69 private:
70  /// A node of a directed graph between MLIR Operations to model various
71  /// relationships. This is meant to be used internally.
72  struct DGNode {
73  DGNode(Operation *op) : op(op) {};
74  Operation *op;
75 
76  // Start and finish visit numbers are standard in DFS to implement things
77  // like finding strongly connected components. These numbers are modified
78  // during analyses on the graph and so seemingly const API methods will be
79  // non-const.
80 
81  /// Start visit number.
82  int vn = -1;
83 
84  /// Finish visit number.
85  int fn = -1;
86  };
87 
88  /// Get internal node corresponding to `op`.
89  DGNode *getNode(Operation *op) {
90  auto *value =
91  llvm::find_if(nodes, [&](const DGNode &node) { return node.op == op; });
92  assert(value != nodes.end() && "node doesn't exist in graph");
93  return &*value;
94  }
95 
96  /// Returns true if `key` is in the graph.
97  bool hasNode(Operation *key) const {
98  return llvm::find_if(nodes, [&](const DGNode &node) {
99  return node.op == key;
100  }) != nodes.end();
101  }
102 
103  /// Perform a depth-first traversal of the graph setting visited and finished
104  /// numbers. If `cycleCheck` is set, detects cycles and returns true as soon
105  /// as the first cycle is detected, and false if there are no cycles. If
106  /// `cycleCheck` is not set, completes the DFS and the `return` value doesn't
107  /// have a meaning.
108  bool dfs(bool cycleCheck = false) {
109  for (DGNode &node : nodes) {
110  node.vn = 0;
111  node.fn = -1;
112  }
113 
114  unsigned time = 0;
115  for (DGNode &node : nodes) {
116  if (node.vn == 0) {
117  bool ret = dfsNode(node, cycleCheck, time);
118  // Check if a cycle was already found.
119  if (cycleCheck && ret)
120  return true;
121  } else if (cycleCheck && node.fn == -1) {
122  // We have encountered a node whose visit has started but it's not
123  // finished. So we have a cycle.
124  return true;
125  }
126  }
127  return false;
128  }
129 
130  /// Perform depth-first traversal starting at `node`. Return true
131  /// as soon as a cycle is found if `cycleCheck` was set. Update `time`.
132  bool dfsNode(DGNode &node, bool cycleCheck, unsigned &time) const {
133  auto nodeEdges = edges.find(node.op);
134  assert(nodeEdges != edges.end() && "missing node in graph");
135  node.vn = ++time;
136 
137  for (auto &neighbour : nodeEdges->second) {
138  if (neighbour->vn == 0) {
139  bool ret = dfsNode(*neighbour, cycleCheck, time);
140  if (cycleCheck && ret)
141  return true;
142  } else if (cycleCheck && neighbour->fn == -1) {
143  // We have encountered a node whose visit has started but it's not
144  // finished. So we have a cycle.
145  return true;
146  }
147  }
148 
149  // Update finish time.
150  node.fn = ++time;
151 
152  return false;
153  }
154 
155  // The list of nodes. The storage is owned by this class.
156  SmallVector<DGNode> nodes;
157 
158  // Edges as an adjacency list.
160 };
161 
162 } // namespace
163 
164 /// Returns the trip count of the loop as an affine expression if the latter is
165 /// expressible as an affine expression, and nullptr otherwise. The trip count
166 /// expression is simplified before returning. This method only utilizes map
167 /// composition to construct lower and upper bounds before computing the trip
168 /// count expressions.
170  AffineForOp forOp, AffineMap *tripCountMap,
171  SmallVectorImpl<Value> *tripCountOperands) {
172  MLIRContext *context = forOp.getContext();
173  int64_t step = forOp.getStepAsInt();
174  int64_t loopSpan;
175  if (forOp.hasConstantBounds()) {
176  int64_t lb = forOp.getConstantLowerBound();
177  int64_t ub = forOp.getConstantUpperBound();
178  loopSpan = ub - lb;
179  if (loopSpan < 0)
180  loopSpan = 0;
181  *tripCountMap = AffineMap::getConstantMap(
182  llvm::divideCeilSigned(loopSpan, step), context);
183  tripCountOperands->clear();
184  return;
185  }
186  auto lbMap = forOp.getLowerBoundMap();
187  auto ubMap = forOp.getUpperBoundMap();
188  if (lbMap.getNumResults() != 1) {
189  *tripCountMap = AffineMap();
190  return;
191  }
192 
193  // Difference of each upper bound expression from the single lower bound
194  // expression (divided by the step) provides the expressions for the trip
195  // count map.
196  AffineValueMap ubValueMap(ubMap, forOp.getUpperBoundOperands());
197 
198  SmallVector<AffineExpr, 4> lbSplatExpr(ubValueMap.getNumResults(),
199  lbMap.getResult(0));
200  auto lbMapSplat = AffineMap::get(lbMap.getNumDims(), lbMap.getNumSymbols(),
201  lbSplatExpr, context);
202  AffineValueMap lbSplatValueMap(lbMapSplat, forOp.getLowerBoundOperands());
203 
204  AffineValueMap tripCountValueMap;
205  AffineValueMap::difference(ubValueMap, lbSplatValueMap, &tripCountValueMap);
206  for (unsigned i = 0, e = tripCountValueMap.getNumResults(); i < e; ++i)
207  tripCountValueMap.setResult(i,
208  tripCountValueMap.getResult(i).ceilDiv(step));
209 
210  *tripCountMap = tripCountValueMap.getAffineMap();
211  tripCountOperands->assign(tripCountValueMap.getOperands().begin(),
212  tripCountValueMap.getOperands().end());
213 }
214 
215 /// Returns the trip count of the loop if it's a constant, std::nullopt
216 /// otherwise. This method uses affine expression analysis (in turn using
217 /// getTripCount) and is able to determine constant trip count in non-trivial
218 /// cases.
219 std::optional<uint64_t> mlir::affine::getConstantTripCount(AffineForOp forOp) {
220  SmallVector<Value, 4> operands;
221  AffineMap map;
222  getTripCountMapAndOperands(forOp, &map, &operands);
223 
224  if (!map)
225  return std::nullopt;
226 
227  // Take the min if all trip counts are constant.
228  std::optional<uint64_t> tripCount;
229  for (auto resultExpr : map.getResults()) {
230  if (auto constExpr = dyn_cast<AffineConstantExpr>(resultExpr)) {
231  if (tripCount.has_value())
232  tripCount =
233  std::min(*tripCount, static_cast<uint64_t>(constExpr.getValue()));
234  else
235  tripCount = constExpr.getValue();
236  } else {
237  return std::nullopt;
238  }
239  }
240  return tripCount;
241 }
242 
243 /// Returns the greatest known integral divisor of the trip count. Affine
244 /// expression analysis is used (indirectly through getTripCount), and
245 /// this method is thus able to determine non-trivial divisors.
246 uint64_t mlir::affine::getLargestDivisorOfTripCount(AffineForOp forOp) {
247  SmallVector<Value, 4> operands;
248  AffineMap map;
249  getTripCountMapAndOperands(forOp, &map, &operands);
250 
251  if (!map)
252  return 1;
253 
254  // The largest divisor of the trip count is the GCD of the individual largest
255  // divisors.
256  assert(map.getNumResults() >= 1 && "expected one or more results");
257  std::optional<uint64_t> gcd;
258  for (auto resultExpr : map.getResults()) {
259  uint64_t thisGcd;
260  if (auto constExpr = dyn_cast<AffineConstantExpr>(resultExpr)) {
261  uint64_t tripCount = constExpr.getValue();
262  // 0 iteration loops (greatest divisor is 2^64 - 1).
263  if (tripCount == 0)
265  else
266  // The greatest divisor is the trip count.
267  thisGcd = tripCount;
268  } else {
269  // Trip count is not a known constant; return its largest known divisor.
270  thisGcd = resultExpr.getLargestKnownDivisor();
271  }
272  if (gcd.has_value())
273  gcd = std::gcd(*gcd, thisGcd);
274  else
275  gcd = thisGcd;
276  }
277  assert(gcd.has_value() && "value expected per above logic");
278  return *gcd;
279 }
280 
281 /// Given an affine.for `iv` and an access `index` of type index, returns `true`
282 /// if `index` is independent of `iv` and false otherwise.
283 ///
284 /// Prerequisites: `iv` and `index` of the proper type;
285 static bool isAccessIndexInvariant(Value iv, Value index) {
286  assert(isAffineForInductionVar(iv) && "iv must be an affine.for iv");
287  assert(isa<IndexType>(index.getType()) && "index must be of 'index' type");
288  auto map = AffineMap::getMultiDimIdentityMap(/*numDims=*/1, iv.getContext());
289  SmallVector<Value> operands = {index};
290  AffineValueMap avm(map, operands);
292  return !avm.isFunctionOf(0, iv);
293 }
294 
295 // Pre-requisite: Loop bounds should be in canonical form.
296 template <typename LoadOrStoreOp>
297 bool mlir::affine::isInvariantAccess(LoadOrStoreOp memOp, AffineForOp forOp) {
298  AffineValueMap avm(memOp.getAffineMap(), memOp.getMapOperands());
300  return !llvm::is_contained(avm.getOperands(), forOp.getInductionVar());
301 }
302 
303 // Explicitly instantiate the template so that the compiler knows we need them.
304 template bool mlir::affine::isInvariantAccess(AffineReadOpInterface,
305  AffineForOp);
306 template bool mlir::affine::isInvariantAccess(AffineWriteOpInterface,
307  AffineForOp);
308 template bool mlir::affine::isInvariantAccess(AffineLoadOp, AffineForOp);
309 template bool mlir::affine::isInvariantAccess(AffineStoreOp, AffineForOp);
310 
312  ArrayRef<Value> indices) {
313  DenseSet<Value> res;
314  for (Value index : indices) {
315  if (isAccessIndexInvariant(iv, index))
316  res.insert(index);
317  }
318  return res;
319 }
320 
321 // TODO: check access stride.
322 template <typename LoadOrStoreOp>
323 bool mlir::affine::isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
324  int *memRefDim) {
325  static_assert(llvm::is_one_of<LoadOrStoreOp, AffineReadOpInterface,
326  AffineWriteOpInterface>::value,
327  "Must be called on either an affine read or write op");
328  assert(memRefDim && "memRefDim == nullptr");
329  auto memRefType = memoryOp.getMemRefType();
330 
331  if (!memRefType.getLayout().isIdentity())
332  return memoryOp.emitError("NYI: non-trivial layout map"), false;
333 
334  int uniqueVaryingIndexAlongIv = -1;
335  auto accessMap = memoryOp.getAffineMap();
336  SmallVector<Value, 4> mapOperands(memoryOp.getMapOperands());
337  unsigned numDims = accessMap.getNumDims();
338  for (unsigned i = 0, e = memRefType.getRank(); i < e; ++i) {
339  // Gather map operands used in result expr 'i' in 'exprOperands'.
340  SmallVector<Value, 4> exprOperands;
341  auto resultExpr = accessMap.getResult(i);
342  resultExpr.walk([&](AffineExpr expr) {
343  if (auto dimExpr = dyn_cast<AffineDimExpr>(expr))
344  exprOperands.push_back(mapOperands[dimExpr.getPosition()]);
345  else if (auto symExpr = dyn_cast<AffineSymbolExpr>(expr))
346  exprOperands.push_back(mapOperands[numDims + symExpr.getPosition()]);
347  });
348  // Check access invariance of each operand in 'exprOperands'.
349  for (Value exprOperand : exprOperands) {
350  if (!isAccessIndexInvariant(iv, exprOperand)) {
351  if (uniqueVaryingIndexAlongIv != -1) {
352  // 2+ varying indices -> do not vectorize along iv.
353  return false;
354  }
355  uniqueVaryingIndexAlongIv = i;
356  }
357  }
358  }
359 
360  if (uniqueVaryingIndexAlongIv == -1)
361  *memRefDim = -1;
362  else
363  *memRefDim = memRefType.getRank() - (uniqueVaryingIndexAlongIv + 1);
364  return true;
365 }
366 
367 template bool mlir::affine::isContiguousAccess(Value iv,
368  AffineReadOpInterface loadOp,
369  int *memRefDim);
370 template bool mlir::affine::isContiguousAccess(Value iv,
371  AffineWriteOpInterface loadOp,
372  int *memRefDim);
373 
374 template <typename LoadOrStoreOp>
375 static bool isVectorElement(LoadOrStoreOp memoryOp) {
376  auto memRefType = memoryOp.getMemRefType();
377  return isa<VectorType>(memRefType.getElementType());
378 }
379 
380 using VectorizableOpFun = std::function<bool(AffineForOp, Operation &)>;
381 
382 static bool
384  const VectorizableOpFun &isVectorizableOp,
385  NestedPattern &vectorTransferMatcher) {
386  auto *forOp = loop.getOperation();
387 
388  // No vectorization across conditionals for now.
389  auto conditionals = matcher::If();
390  SmallVector<NestedMatch, 8> conditionalsMatched;
391  conditionals.match(forOp, &conditionalsMatched);
392  if (!conditionalsMatched.empty()) {
393  return false;
394  }
395 
396  // No vectorization for ops with operand or result types that are not
397  // vectorizable.
398  auto types = matcher::Op([](Operation &op) -> bool {
399  if (llvm::any_of(op.getOperandTypes(), [](Type type) {
400  if (MemRefType t = dyn_cast<MemRefType>(type))
401  return !VectorType::isValidElementType(t.getElementType());
402  return !VectorType::isValidElementType(type);
403  }))
404  return true;
405  return llvm::any_of(op.getResultTypes(), [](Type type) {
406  return !VectorType::isValidElementType(type);
407  });
408  });
409  SmallVector<NestedMatch, 8> opsMatched;
410  types.match(forOp, &opsMatched);
411  if (!opsMatched.empty()) {
412  return false;
413  }
414 
415  // No vectorization across unknown regions.
416  auto regions = matcher::Op([](Operation &op) -> bool {
417  return op.getNumRegions() != 0 && !isa<AffineIfOp, AffineForOp>(op);
418  });
419  SmallVector<NestedMatch, 8> regionsMatched;
420  regions.match(forOp, &regionsMatched);
421  if (!regionsMatched.empty()) {
422  return false;
423  }
424 
425  SmallVector<NestedMatch, 8> vectorTransfersMatched;
426  vectorTransferMatcher.match(forOp, &vectorTransfersMatched);
427  if (!vectorTransfersMatched.empty()) {
428  return false;
429  }
430 
431  auto loadAndStores = matcher::Op(matcher::isLoadOrStore);
432  SmallVector<NestedMatch, 8> loadAndStoresMatched;
433  loadAndStores.match(forOp, &loadAndStoresMatched);
434  for (auto ls : loadAndStoresMatched) {
435  auto *op = ls.getMatchedOperation();
436  auto load = dyn_cast<AffineLoadOp>(op);
437  auto store = dyn_cast<AffineStoreOp>(op);
438  // Only scalar types are considered vectorizable, all load/store must be
439  // vectorizable for a loop to qualify as vectorizable.
440  // TODO: ponder whether we want to be more general here.
441  bool vector = load ? isVectorElement(load) : isVectorElement(store);
442  if (vector) {
443  return false;
444  }
445  if (isVectorizableOp && !isVectorizableOp(loop, *op)) {
446  return false;
447  }
448  }
449  return true;
450 }
451 
453  AffineForOp loop, int *memRefDim, NestedPattern &vectorTransferMatcher) {
454  *memRefDim = -1;
455  VectorizableOpFun fun([memRefDim](AffineForOp loop, Operation &op) {
456  auto load = dyn_cast<AffineLoadOp>(op);
457  auto store = dyn_cast<AffineStoreOp>(op);
458  int thisOpMemRefDim = -1;
459  bool isContiguous =
460  load ? isContiguousAccess(loop.getInductionVar(),
461  cast<AffineReadOpInterface>(*load),
462  &thisOpMemRefDim)
463  : isContiguousAccess(loop.getInductionVar(),
464  cast<AffineWriteOpInterface>(*store),
465  &thisOpMemRefDim);
466  if (thisOpMemRefDim != -1) {
467  // If memory accesses vary across different dimensions then the loop is
468  // not vectorizable.
469  if (*memRefDim != -1 && *memRefDim != thisOpMemRefDim)
470  return false;
471  *memRefDim = thisOpMemRefDim;
472  }
473  return isContiguous;
474  });
475  return isVectorizableLoopBodyWithOpCond(loop, fun, vectorTransferMatcher);
476 }
477 
479  AffineForOp loop, NestedPattern &vectorTransferMatcher) {
480  return isVectorizableLoopBodyWithOpCond(loop, nullptr, vectorTransferMatcher);
481 }
482 
483 /// Checks whether SSA dominance would be violated if a for op's body
484 /// operations are shifted by the specified shifts. This method checks if a
485 /// 'def' and all its uses have the same shift factor.
486 // TODO: extend this to check for memory-based dependence violation when we have
487 // the support.
488 bool mlir::affine::isOpwiseShiftValid(AffineForOp forOp,
489  ArrayRef<uint64_t> shifts) {
490  auto *forBody = forOp.getBody();
491  assert(shifts.size() == forBody->getOperations().size());
492 
493  // Work backwards over the body of the block so that the shift of a use's
494  // ancestor operation in the block gets recorded before it's looked up.
495  DenseMap<Operation *, uint64_t> forBodyShift;
496  for (const auto &it :
497  llvm::enumerate(llvm::reverse(forBody->getOperations()))) {
498  auto &op = it.value();
499 
500  // Get the index of the current operation, note that we are iterating in
501  // reverse so we need to fix it up.
502  size_t index = shifts.size() - it.index() - 1;
503 
504  // Remember the shift of this operation.
505  uint64_t shift = shifts[index];
506  forBodyShift.try_emplace(&op, shift);
507 
508  // Validate the results of this operation if it were to be shifted.
509  for (unsigned i = 0, e = op.getNumResults(); i < e; ++i) {
510  Value result = op.getResult(i);
511  for (auto *user : result.getUsers()) {
512  // If an ancestor operation doesn't lie in the block of forOp,
513  // there is no shift to check.
514  if (auto *ancOp = forBody->findAncestorOpInBlock(*user)) {
515  assert(forBodyShift.count(ancOp) > 0 && "ancestor expected in map");
516  if (shift != forBodyShift[ancOp])
517  return false;
518  }
519  }
520  }
521  }
522  return true;
523 }
524 
526  assert(!loops.empty() && "no original loops provided");
527 
528  // We first find out all dependences we intend to check.
529  SmallVector<Operation *, 8> loadAndStoreOps;
530  loops[0]->walk([&](Operation *op) {
531  if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op))
532  loadAndStoreOps.push_back(op);
533  });
534 
535  unsigned numOps = loadAndStoreOps.size();
536  unsigned numLoops = loops.size();
537  for (unsigned d = 1; d <= numLoops + 1; ++d) {
538  for (unsigned i = 0; i < numOps; ++i) {
539  Operation *srcOp = loadAndStoreOps[i];
540  MemRefAccess srcAccess(srcOp);
541  for (unsigned j = 0; j < numOps; ++j) {
542  Operation *dstOp = loadAndStoreOps[j];
543  MemRefAccess dstAccess(dstOp);
544 
547  srcAccess, dstAccess, d, /*dependenceConstraints=*/nullptr,
548  &depComps);
549 
550  // Skip if there is no dependence in this case.
551  if (!hasDependence(result))
552  continue;
553 
554  // Check whether there is any negative direction vector in the
555  // dependence components found above, which means that dependence is
556  // violated by the default hyper-rect tiling method.
557  LLVM_DEBUG(llvm::dbgs() << "Checking whether tiling legality violated "
558  "for dependence at depth: "
559  << Twine(d) << " between:\n";);
560  LLVM_DEBUG(srcAccess.opInst->dump());
561  LLVM_DEBUG(dstAccess.opInst->dump());
562  for (const DependenceComponent &depComp : depComps) {
563  if (depComp.lb.has_value() && depComp.ub.has_value() &&
564  *depComp.lb < *depComp.ub && *depComp.ub < 0) {
565  LLVM_DEBUG(llvm::dbgs()
566  << "Dependence component lb = " << Twine(*depComp.lb)
567  << " ub = " << Twine(*depComp.ub)
568  << " is negative at depth: " << Twine(d)
569  << " and thus violates the legality rule.\n");
570  return false;
571  }
572  }
573  }
574  }
575  }
576 
577  return true;
578 }
579 
580 bool mlir::affine::hasCyclicDependence(AffineForOp root) {
581  // Collect all the memory accesses in the source nest grouped by their
582  // immediate parent block.
583  DirectedOpGraph graph;
584  SmallVector<MemRefAccess> accesses;
585  root->walk([&](Operation *op) {
586  if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op)) {
587  accesses.emplace_back(op);
588  graph.addNode(op);
589  }
590  });
591 
592  // Construct the dependence graph for all the collected acccesses.
593  unsigned rootDepth = getNestingDepth(root);
594  for (const auto &accA : accesses) {
595  for (const auto &accB : accesses) {
596  if (accA.memref != accB.memref)
597  continue;
598  // Perform the dependence on all surrounding loops + the body.
599  unsigned numCommonLoops =
600  getNumCommonSurroundingLoops(*accA.opInst, *accB.opInst);
601  for (unsigned d = rootDepth + 1; d <= numCommonLoops + 1; ++d) {
602  if (!noDependence(checkMemrefAccessDependence(accA, accB, d)))
603  graph.addEdge(accA.opInst, accB.opInst);
604  }
605  }
606  }
607  return graph.hasCycle();
608 }
static bool isVectorizableLoopBodyWithOpCond(AffineForOp loop, const VectorizableOpFun &isVectorizableOp, NestedPattern &vectorTransferMatcher)
std::function< bool(AffineForOp, Operation &)> VectorizableOpFun
static bool isAccessIndexInvariant(Value iv, Value index)
Given an affine.for iv and an access index of type index, returns true if index is independent of iv ...
static bool isVectorElement(LoadOrStoreOp memoryOp)
static Value max(ImplicitLocOpBuilder &builder, Value value, Value bound)
static Value min(ImplicitLocOpBuilder &builder, Value value, Value bound)
Base type for affine expression.
Definition: AffineExpr.h:68
AffineExpr ceilDiv(uint64_t v) const
Definition: AffineExpr.cpp:968
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition: AffineMap.h:46
static AffineMap getMultiDimIdentityMap(unsigned numDims, MLIRContext *context)
Returns an AffineMap with 'numDims' identity result dim exprs.
Definition: AffineMap.cpp:334
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
ArrayRef< AffineExpr > getResults() const
Definition: AffineMap.cpp:407
unsigned getNumResults() const
Definition: AffineMap.cpp:402
static AffineMap getConstantMap(int64_t val, MLIRContext *context)
Returns a single constant result affine map.
Definition: AffineMap.cpp:128
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
unsigned getNumRegions()
Returns the number of regions held by this operation.
Definition: Operation.h:674
operand_type_range getOperandTypes()
Definition: Operation.h:397
result_type_range getResultTypes()
Definition: Operation.h:428
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
MLIRContext * getContext() const
Utility to get the associated MLIRContext that this value is defined in.
Definition: Value.h:108
Type getType() const
Return the type of this value.
Definition: Value.h:105
user_range getUsers() const
Definition: Value.h:204
An AffineValueMap is an affine map plus its ML value operands and results for analysis purposes.
void composeSimplifyAndCanonicalize()
Composes all incoming affine.apply ops and then simplifies and canonicalizes the map and operands.
ArrayRef< Value > getOperands() const
AffineExpr getResult(unsigned i)
bool isFunctionOf(unsigned idx, Value value) const
Return true if the idx^th result depends on 'value', false otherwise.
void setResult(unsigned i, AffineExpr e)
unsigned getNumResults() const
static void difference(const AffineValueMap &a, const AffineValueMap &b, AffineValueMap *res)
Return the value map that is the difference of value maps 'a' and 'b', represented as an affine map a...
void match(Operation *op, SmallVectorImpl< NestedMatch > *matches)
Returns all the top-level matches in op.
NestedPattern If(const NestedPattern &child)
bool isLoadOrStore(Operation &op)
NestedPattern Op(FilterFunctionType filter=defaultFilterFunction)
std::optional< uint64_t > getConstantTripCount(AffineForOp forOp)
Returns the trip count of the loop if it's a constant, std::nullopt otherwise.
bool isTilingValid(ArrayRef< AffineForOp > loops)
Checks whether hyper-rectangular loop tiling of the nest represented by loops is valid.
bool isVectorizableLoopBody(AffineForOp loop, NestedPattern &vectorTransferMatcher)
Checks whether the loop is structurally vectorizable; i.e.
unsigned getNumCommonSurroundingLoops(Operation &a, Operation &b)
Returns the number of surrounding loops common to both A and B.
Definition: Utils.cpp:2024
DenseSet< Value, DenseMapInfo< Value > > getInvariantAccesses(Value iv, ArrayRef< Value > indices)
Given an induction variable iv of type AffineForOp and indices of type IndexType, returns the set of ...
void getTripCountMapAndOperands(AffineForOp forOp, AffineMap *map, SmallVectorImpl< Value > *operands)
Returns the trip count of the loop as an affine map with its corresponding operands if the latter is ...
bool isInvariantAccess(LoadOrStoreOp memOp, AffineForOp forOp)
Checks if an affine read or write operation depends on forOp's IV, i.e., if the memory access is inva...
DependenceResult checkMemrefAccessDependence(const MemRefAccess &srcAccess, const MemRefAccess &dstAccess, unsigned loopDepth, FlatAffineValueConstraints *dependenceConstraints=nullptr, SmallVector< DependenceComponent, 2 > *dependenceComponents=nullptr, bool allowRAR=false)
bool isAffineForInductionVar(Value val)
Returns true if the provided value is the induction variable of an AffineForOp.
Definition: AffineOps.cpp:2641
uint64_t getLargestDivisorOfTripCount(AffineForOp forOp)
Returns the greatest known integral divisor of the trip count.
bool isContiguousAccess(Value iv, LoadOrStoreOp memoryOp, int *memRefDim)
Given:
bool hasDependence(DependenceResult result)
Utility function that returns true if the provided DependenceResult corresponds to a dependence resul...
unsigned getNestingDepth(Operation *op)
Returns the nesting depth of this operation, i.e., the number of loops surrounding this operation.
Definition: Utils.cpp:1979
bool isOpwiseShiftValid(AffineForOp forOp, ArrayRef< uint64_t > shifts)
Checks where SSA dominance would be violated if a for op's body operations are shifted by the specifi...
bool hasCyclicDependence(AffineForOp root)
Returns true if the affine nest rooted at root has a cyclic dependence among its affine memory access...
bool noDependence(DependenceResult result)
Returns true if the provided DependenceResult corresponds to the absence of a dependence.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
Include the generated interface declarations.
Checks whether two accesses to the same memref access the same element.
Encapsulates a memref load or store access information.
Eliminates variable at the specified position using Fourier-Motzkin variable elimination.