MLIR  21.0.0git
LoopAnalysis.cpp
Go to the documentation of this file.
1 //===- LoopAnalysis.cpp - Misc loop analysis routines //-------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements miscellaneous loop analysis routines.
10 //
11 //===----------------------------------------------------------------------===//
12 
14 
21 #include "llvm/Support/MathExtras.h"
22 
23 #include "llvm/ADT/DenseSet.h"
24 #include "llvm/ADT/SmallPtrSet.h"
25 #include "llvm/ADT/SmallString.h"
26 #include "llvm/Support/Debug.h"
27 #include <numeric>
28 #include <optional>
29 #include <type_traits>
30 
31 #define DEBUG_TYPE "affine-loop-analysis"
32 
33 using namespace mlir;
34 using namespace mlir::affine;
35 
36 namespace {
37 
38 /// A directed graph to model relationships between MLIR Operations.
39 class DirectedOpGraph {
40 public:
41  /// Add a node to the graph.
42  void addNode(Operation *op) {
43  assert(!hasNode(op) && "node already added");
44  nodes.emplace_back(op);
45  edges[op] = {};
46  }
47 
48  /// Add an edge from `src` to `dest`.
49  void addEdge(Operation *src, Operation *dest) {
50  // This is a multi-graph.
51  assert(hasNode(src) && "src node does not exist in graph");
52  assert(hasNode(dest) && "dest node does not exist in graph");
53  edges[src].push_back(getNode(dest));
54  }
55 
56  /// Returns true if there is a (directed) cycle in the graph.
57  bool hasCycle() { return dfs(/*cycleCheck=*/true); }
58 
59  void printEdges() {
60  for (auto &en : edges) {
61  llvm::dbgs() << *en.first << " (" << en.first << ")"
62  << " has " << en.second.size() << " edges:\n";
63  for (auto *node : en.second) {
64  llvm::dbgs() << '\t' << *node->op << '\n';
65  }
66  }
67  }
68 
69 private:
70  /// A node of a directed graph between MLIR Operations to model various
71  /// relationships. This is meant to be used internally.
72  struct DGNode {
73  DGNode(Operation *op) : op(op) {};
74  Operation *op;
75 
76  // Start and finish visit numbers are standard in DFS to implement things
77  // like finding strongly connected components. These numbers are modified
78  // during analyses on the graph and so seemingly const API methods will be
79  // non-const.
80 
81  /// Start visit number.
82  int vn = -1;
83 
84  /// Finish visit number.
85  int fn = -1;
86  };
87 
88  /// Get internal node corresponding to `op`.
89  DGNode *getNode(Operation *op) {
90  auto *value =
91  llvm::find_if(nodes, [&](const DGNode &node) { return node.op == op; });
92  assert(value != nodes.end() && "node doesn't exist in graph");
93  return &*value;
94  }
95 
96  /// Returns true if `key` is in the graph.
97  bool hasNode(Operation *key) const {
98  return llvm::find_if(nodes, [&](const DGNode &node) {
99  return node.op == key;
100  }) != nodes.end();
101  }
102 
103  /// Perform a depth-first traversal of the graph setting visited and finished
104  /// numbers. If `cycleCheck` is set, detects cycles and returns true as soon
105  /// as the first cycle is detected, and false if there are no cycles. If
106  /// `cycleCheck` is not set, completes the DFS and the `return` value doesn't
107  /// have a meaning.
108  bool dfs(bool cycleCheck = false) {
109  for (DGNode &node : nodes) {
110  node.vn = 0;
111  node.fn = -1;
112  }
113 
114  unsigned time = 0;
115  for (DGNode &node : nodes) {
116  if (node.vn == 0) {
117  bool ret = dfsNode(node, cycleCheck, time);
118  // Check if a cycle was already found.
119  if (cycleCheck && ret)
120  return true;
121  } else if (cycleCheck && node.fn == -1) {
122  // We have encountered a node whose visit has started but it's not
123  // finished. So we have a cycle.
124  return true;
125  }
126  }
127  return false;
128  }
129 
130  /// Perform depth-first traversal starting at `node`. Return true
131  /// as soon as a cycle is found if `cycleCheck` was set. Update `time`.
132  bool dfsNode(DGNode &node, bool cycleCheck, unsigned &time) const {
133  auto nodeEdges = edges.find(node.op);
134  assert(nodeEdges != edges.end() && "missing node in graph");
135  node.vn = ++time;
136 
137  for (auto &neighbour : nodeEdges->second) {
138  if (neighbour->vn == 0) {
139  bool ret = dfsNode(*neighbour, cycleCheck, time);
140  if (cycleCheck && ret)
141  return true;
142  } else if (cycleCheck && neighbour->fn == -1) {
143  // We have encountered a node whose visit has started but it's not
144  // finished. So we have a cycle.
145  return true;
146  }
147  }
148 
149  // Update finish time.
150  node.fn = ++time;
151 
152  return false;
153  }
154 
155  // The list of nodes. The storage is owned by this class.
156  SmallVector<DGNode> nodes;
157 
158  // Edges as an adjacency list.
160 };
161 
162 } // namespace
163 
164 /// Returns the trip count of the loop as an affine expression if the latter is
165 /// expressible as an affine expression, and nullptr otherwise. The trip count
166 /// expression is simplified before returning. This method only utilizes map
167 /// composition to construct lower and upper bounds before computing the trip
168 /// count expressions.
170  AffineForOp forOp, AffineMap *tripCountMap,
171  SmallVectorImpl<Value> *tripCountOperands) {
172  MLIRContext *context = forOp.getContext();
173  int64_t step = forOp.getStepAsInt();
174  int64_t loopSpan;
175  if (forOp.hasConstantBounds()) {
176  int64_t lb = forOp.getConstantLowerBound();
177  int64_t ub = forOp.getConstantUpperBound();
178  loopSpan = ub - lb;
179  if (loopSpan < 0)
180  loopSpan = 0;
181  *tripCountMap = AffineMap::getConstantMap(
182  llvm::divideCeilSigned(loopSpan, step), context);
183  tripCountOperands->clear();
184  return;
185  }
186  auto lbMap = forOp.getLowerBoundMap();
187  auto ubMap = forOp.getUpperBoundMap();
188  if (lbMap.getNumResults() != 1) {
189  *tripCountMap = AffineMap();
190  return;
191  }
192 
193  // Difference of each upper bound expression from the single lower bound
194  // expression (divided by the step) provides the expressions for the trip
195  // count map.
196  AffineValueMap ubValueMap(ubMap, forOp.getUpperBoundOperands());
197 
198  SmallVector<AffineExpr, 4> lbSplatExpr(ubValueMap.getNumResults(),
199  lbMap.getResult(0));
200  auto lbMapSplat = AffineMap::get(lbMap.getNumDims(), lbMap.getNumSymbols(),
201  lbSplatExpr, context);
202  AffineValueMap lbSplatValueMap(lbMapSplat, forOp.getLowerBoundOperands());
203 
204  AffineValueMap tripCountValueMap;
205  AffineValueMap::difference(ubValueMap, lbSplatValueMap, &tripCountValueMap);
206  for (unsigned i = 0, e = tripCountValueMap.getNumResults(); i < e; ++i)
207  tripCountValueMap.setResult(i,
208  tripCountValueMap.getResult(i).ceilDiv(step));
209 
210  *tripCountMap = tripCountValueMap.getAffineMap();
211  tripCountOperands->assign(tripCountValueMap.getOperands().begin(),
212  tripCountValueMap.getOperands().end());
213 }
214 
215 /// Returns the trip count of the loop if it's a constant, std::nullopt
216 /// otherwise. This method uses affine expression analysis (in turn using
217 /// getTripCount) and is able to determine constant trip count in non-trivial
218 /// cases.
219 std::optional<uint64_t> mlir::affine::getConstantTripCount(AffineForOp forOp) {
220  SmallVector<Value, 4> operands;
221  AffineMap map;
222  getTripCountMapAndOperands(forOp, &map, &operands);
223 
224  if (!map)
225  return std::nullopt;
226 
227  // Take the min if all trip counts are constant.
228  std::optional<uint64_t> tripCount;
229  for (auto resultExpr : map.getResults()) {
230  if (auto constExpr = dyn_cast<AffineConstantExpr>(resultExpr)) {
231  if (tripCount.has_value())
232  tripCount =
233  std::min(*tripCount, static_cast<uint64_t>(constExpr.getValue()));
234  else
235  tripCount = constExpr.getValue();
236  } else
237  return std::nullopt;
238  }
239  return tripCount;
240 }
241 
242 /// Returns the greatest known integral divisor of the trip count. Affine
243 /// expression analysis is used (indirectly through getTripCount), and
244 /// this method is thus able to determine non-trivial divisors.
245 uint64_t mlir::affine::getLargestDivisorOfTripCount(AffineForOp forOp) {
246  SmallVector<Value, 4> operands;
247  AffineMap map;
248  getTripCountMapAndOperands(forOp, &map, &operands);
249 
250  if (!map)
251  return 1;
252 
253  // The largest divisor of the trip count is the GCD of the individual largest
254  // divisors.
255  assert(map.getNumResults() >= 1 && "expected one or more results");
256  std::optional<uint64_t> gcd;
257  for (auto resultExpr : map.getResults()) {
258  uint64_t thisGcd;
259  if (auto constExpr = dyn_cast<AffineConstantExpr>(resultExpr)) {
260  uint64_t tripCount = constExpr.getValue();
261  // 0 iteration loops (greatest divisor is 2^64 - 1).
262  if (tripCount == 0)
264  else
265  // The greatest divisor is the trip count.
266  thisGcd = tripCount;
267  } else {
268  // Trip count is not a known constant; return its largest known divisor.
269  thisGcd = resultExpr.getLargestKnownDivisor();
270  }
271  if (gcd.has_value())
272  gcd = std::gcd(*gcd, thisGcd);
273  else
274  gcd = thisGcd;
275  }
276  assert(gcd.has_value() && "value expected per above logic");
277  return *gcd;
278 }
279 
280 /// Given an affine.for `iv` and an access `index` of type index, returns `true`
281 /// if `index` is independent of `iv` and false otherwise.
282 ///
283 /// Prerequisites: `iv` and `index` of the proper type;
284 static bool isAccessIndexInvariant(Value iv, Value index) {
285  assert(isAffineForInductionVar(iv) && "iv must be an affine.for iv");
286  assert(isa<IndexType>(index.getType()) && "index must be of 'index' type");
287  auto map = AffineMap::getMultiDimIdentityMap(/*numDims=*/1, iv.getContext());
288  SmallVector<Value> operands = {index};
289  AffineValueMap avm(map, operands);
291  return !avm.isFunctionOf(0, iv);
292 }
293 
294 // Pre-requisite: Loop bounds should be in canonical form.
295 template <typename LoadOrStoreOp>
296 bool mlir::affine::isInvariantAccess(LoadOrStoreOp memOp, AffineForOp forOp) {
297  AffineValueMap avm(memOp.getAffineMap(), memOp.getMapOperands());
299  return !llvm::is_contained(avm.getOperands(), forOp.getInductionVar());
300 }
301 
302 // Explicitly instantiate the template so that the compiler knows we need them.
303 template bool mlir::affine::isInvariantAccess(AffineReadOpInterface,
304  AffineForOp);
305 template bool mlir::affine::isInvariantAccess(AffineWriteOpInterface,
306  AffineForOp);
307 template bool mlir::affine::isInvariantAccess(AffineLoadOp, AffineForOp);
308 template bool mlir::affine::isInvariantAccess(AffineStoreOp, AffineForOp);
309 
311  ArrayRef<Value> indices) {
312  DenseSet<Value> res;
313  for (Value index : indices) {
314  if (isAccessIndexInvariant(iv, index))
315  res.insert(index);
316  }
317  return res;
318 }
319 
320 // TODO: check access stride.
321 template <typename LoadOrStoreOp>
322 bool mlir::affine::isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
323  int *memRefDim) {
324  static_assert(llvm::is_one_of<LoadOrStoreOp, AffineReadOpInterface,
325  AffineWriteOpInterface>::value,
326  "Must be called on either an affine read or write op");
327  assert(memRefDim && "memRefDim == nullptr");
328  auto memRefType = memoryOp.getMemRefType();
329 
330  if (!memRefType.getLayout().isIdentity())
331  return memoryOp.emitError("NYI: non-trivial layout map"), false;
332 
333  int uniqueVaryingIndexAlongIv = -1;
334  auto accessMap = memoryOp.getAffineMap();
335  SmallVector<Value, 4> mapOperands(memoryOp.getMapOperands());
336  unsigned numDims = accessMap.getNumDims();
337  for (unsigned i = 0, e = memRefType.getRank(); i < e; ++i) {
338  // Gather map operands used in result expr 'i' in 'exprOperands'.
339  SmallVector<Value, 4> exprOperands;
340  auto resultExpr = accessMap.getResult(i);
341  resultExpr.walk([&](AffineExpr expr) {
342  if (auto dimExpr = dyn_cast<AffineDimExpr>(expr))
343  exprOperands.push_back(mapOperands[dimExpr.getPosition()]);
344  else if (auto symExpr = dyn_cast<AffineSymbolExpr>(expr))
345  exprOperands.push_back(mapOperands[numDims + symExpr.getPosition()]);
346  });
347  // Check access invariance of each operand in 'exprOperands'.
348  for (Value exprOperand : exprOperands) {
349  if (!isAccessIndexInvariant(iv, exprOperand)) {
350  if (uniqueVaryingIndexAlongIv != -1) {
351  // 2+ varying indices -> do not vectorize along iv.
352  return false;
353  }
354  uniqueVaryingIndexAlongIv = i;
355  }
356  }
357  }
358 
359  if (uniqueVaryingIndexAlongIv == -1)
360  *memRefDim = -1;
361  else
362  *memRefDim = memRefType.getRank() - (uniqueVaryingIndexAlongIv + 1);
363  return true;
364 }
365 
366 template bool mlir::affine::isContiguousAccess(Value iv,
367  AffineReadOpInterface loadOp,
368  int *memRefDim);
369 template bool mlir::affine::isContiguousAccess(Value iv,
370  AffineWriteOpInterface loadOp,
371  int *memRefDim);
372 
373 template <typename LoadOrStoreOp>
374 static bool isVectorElement(LoadOrStoreOp memoryOp) {
375  auto memRefType = memoryOp.getMemRefType();
376  return isa<VectorType>(memRefType.getElementType());
377 }
378 
379 using VectorizableOpFun = std::function<bool(AffineForOp, Operation &)>;
380 
381 static bool
383  const VectorizableOpFun &isVectorizableOp,
384  NestedPattern &vectorTransferMatcher) {
385  auto *forOp = loop.getOperation();
386 
387  // No vectorization across conditionals for now.
388  auto conditionals = matcher::If();
389  SmallVector<NestedMatch, 8> conditionalsMatched;
390  conditionals.match(forOp, &conditionalsMatched);
391  if (!conditionalsMatched.empty()) {
392  return false;
393  }
394 
395  // No vectorization for ops with operand or result types that are not
396  // vectorizable.
397  auto types = matcher::Op([](Operation &op) -> bool {
398  if (llvm::any_of(op.getOperandTypes(), [](Type type) {
399  if (MemRefType t = dyn_cast<MemRefType>(type))
400  return !VectorType::isValidElementType(t.getElementType());
401  return !VectorType::isValidElementType(type);
402  }))
403  return true;
404  return llvm::any_of(op.getResultTypes(), [](Type type) {
405  return !VectorType::isValidElementType(type);
406  });
407  });
408  SmallVector<NestedMatch, 8> opsMatched;
409  types.match(forOp, &opsMatched);
410  if (!opsMatched.empty()) {
411  return false;
412  }
413 
414  // No vectorization across unknown regions.
415  auto regions = matcher::Op([](Operation &op) -> bool {
416  return op.getNumRegions() != 0 && !isa<AffineIfOp, AffineForOp>(op);
417  });
418  SmallVector<NestedMatch, 8> regionsMatched;
419  regions.match(forOp, &regionsMatched);
420  if (!regionsMatched.empty()) {
421  return false;
422  }
423 
424  SmallVector<NestedMatch, 8> vectorTransfersMatched;
425  vectorTransferMatcher.match(forOp, &vectorTransfersMatched);
426  if (!vectorTransfersMatched.empty()) {
427  return false;
428  }
429 
430  auto loadAndStores = matcher::Op(matcher::isLoadOrStore);
431  SmallVector<NestedMatch, 8> loadAndStoresMatched;
432  loadAndStores.match(forOp, &loadAndStoresMatched);
433  for (auto ls : loadAndStoresMatched) {
434  auto *op = ls.getMatchedOperation();
435  auto load = dyn_cast<AffineLoadOp>(op);
436  auto store = dyn_cast<AffineStoreOp>(op);
437  // Only scalar types are considered vectorizable, all load/store must be
438  // vectorizable for a loop to qualify as vectorizable.
439  // TODO: ponder whether we want to be more general here.
440  bool vector = load ? isVectorElement(load) : isVectorElement(store);
441  if (vector) {
442  return false;
443  }
444  if (isVectorizableOp && !isVectorizableOp(loop, *op)) {
445  return false;
446  }
447  }
448  return true;
449 }
450 
452  AffineForOp loop, int *memRefDim, NestedPattern &vectorTransferMatcher) {
453  *memRefDim = -1;
454  VectorizableOpFun fun([memRefDim](AffineForOp loop, Operation &op) {
455  auto load = dyn_cast<AffineLoadOp>(op);
456  auto store = dyn_cast<AffineStoreOp>(op);
457  int thisOpMemRefDim = -1;
458  bool isContiguous =
459  load ? isContiguousAccess(loop.getInductionVar(),
460  cast<AffineReadOpInterface>(*load),
461  &thisOpMemRefDim)
462  : isContiguousAccess(loop.getInductionVar(),
463  cast<AffineWriteOpInterface>(*store),
464  &thisOpMemRefDim);
465  if (thisOpMemRefDim != -1) {
466  // If memory accesses vary across different dimensions then the loop is
467  // not vectorizable.
468  if (*memRefDim != -1 && *memRefDim != thisOpMemRefDim)
469  return false;
470  *memRefDim = thisOpMemRefDim;
471  }
472  return isContiguous;
473  });
474  return isVectorizableLoopBodyWithOpCond(loop, fun, vectorTransferMatcher);
475 }
476 
478  AffineForOp loop, NestedPattern &vectorTransferMatcher) {
479  return isVectorizableLoopBodyWithOpCond(loop, nullptr, vectorTransferMatcher);
480 }
481 
482 /// Checks whether SSA dominance would be violated if a for op's body
483 /// operations are shifted by the specified shifts. This method checks if a
484 /// 'def' and all its uses have the same shift factor.
485 // TODO: extend this to check for memory-based dependence violation when we have
486 // the support.
487 bool mlir::affine::isOpwiseShiftValid(AffineForOp forOp,
488  ArrayRef<uint64_t> shifts) {
489  auto *forBody = forOp.getBody();
490  assert(shifts.size() == forBody->getOperations().size());
491 
492  // Work backwards over the body of the block so that the shift of a use's
493  // ancestor operation in the block gets recorded before it's looked up.
494  DenseMap<Operation *, uint64_t> forBodyShift;
495  for (const auto &it :
496  llvm::enumerate(llvm::reverse(forBody->getOperations()))) {
497  auto &op = it.value();
498 
499  // Get the index of the current operation, note that we are iterating in
500  // reverse so we need to fix it up.
501  size_t index = shifts.size() - it.index() - 1;
502 
503  // Remember the shift of this operation.
504  uint64_t shift = shifts[index];
505  forBodyShift.try_emplace(&op, shift);
506 
507  // Validate the results of this operation if it were to be shifted.
508  for (unsigned i = 0, e = op.getNumResults(); i < e; ++i) {
509  Value result = op.getResult(i);
510  for (auto *user : result.getUsers()) {
511  // If an ancestor operation doesn't lie in the block of forOp,
512  // there is no shift to check.
513  if (auto *ancOp = forBody->findAncestorOpInBlock(*user)) {
514  assert(forBodyShift.count(ancOp) > 0 && "ancestor expected in map");
515  if (shift != forBodyShift[ancOp])
516  return false;
517  }
518  }
519  }
520  }
521  return true;
522 }
523 
525  assert(!loops.empty() && "no original loops provided");
526 
527  // We first find out all dependences we intend to check.
528  SmallVector<Operation *, 8> loadAndStoreOps;
529  loops[0]->walk([&](Operation *op) {
530  if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op))
531  loadAndStoreOps.push_back(op);
532  });
533 
534  unsigned numOps = loadAndStoreOps.size();
535  unsigned numLoops = loops.size();
536  for (unsigned d = 1; d <= numLoops + 1; ++d) {
537  for (unsigned i = 0; i < numOps; ++i) {
538  Operation *srcOp = loadAndStoreOps[i];
539  MemRefAccess srcAccess(srcOp);
540  for (unsigned j = 0; j < numOps; ++j) {
541  Operation *dstOp = loadAndStoreOps[j];
542  MemRefAccess dstAccess(dstOp);
543 
546  srcAccess, dstAccess, d, /*dependenceConstraints=*/nullptr,
547  &depComps);
548 
549  // Skip if there is no dependence in this case.
550  if (!hasDependence(result))
551  continue;
552 
553  // Check whether there is any negative direction vector in the
554  // dependence components found above, which means that dependence is
555  // violated by the default hyper-rect tiling method.
556  LLVM_DEBUG(llvm::dbgs() << "Checking whether tiling legality violated "
557  "for dependence at depth: "
558  << Twine(d) << " between:\n";);
559  LLVM_DEBUG(srcAccess.opInst->dump());
560  LLVM_DEBUG(dstAccess.opInst->dump());
561  for (const DependenceComponent &depComp : depComps) {
562  if (depComp.lb.has_value() && depComp.ub.has_value() &&
563  *depComp.lb < *depComp.ub && *depComp.ub < 0) {
564  LLVM_DEBUG(llvm::dbgs()
565  << "Dependence component lb = " << Twine(*depComp.lb)
566  << " ub = " << Twine(*depComp.ub)
567  << " is negative at depth: " << Twine(d)
568  << " and thus violates the legality rule.\n");
569  return false;
570  }
571  }
572  }
573  }
574  }
575 
576  return true;
577 }
578 
579 bool mlir::affine::hasCyclicDependence(AffineForOp root) {
580  // Collect all the memory accesses in the source nest grouped by their
581  // immediate parent block.
582  DirectedOpGraph graph;
583  SmallVector<MemRefAccess> accesses;
584  root->walk([&](Operation *op) {
585  if (isa<AffineReadOpInterface, AffineWriteOpInterface>(op)) {
586  accesses.emplace_back(op);
587  graph.addNode(op);
588  }
589  });
590 
591  // Construct the dependence graph for all the collected acccesses.
592  unsigned rootDepth = getNestingDepth(root);
593  for (const auto &accA : accesses) {
594  for (const auto &accB : accesses) {
595  if (accA.memref != accB.memref)
596  continue;
597  // Perform the dependence on all surrounding loops + the body.
598  unsigned numCommonLoops =
599  getNumCommonSurroundingLoops(*accA.opInst, *accB.opInst);
600  for (unsigned d = rootDepth + 1; d <= numCommonLoops + 1; ++d) {
601  if (!noDependence(checkMemrefAccessDependence(accA, accB, d)))
602  graph.addEdge(accA.opInst, accB.opInst);
603  }
604  }
605  }
606  return graph.hasCycle();
607 }
static bool isVectorizableLoopBodyWithOpCond(AffineForOp loop, const VectorizableOpFun &isVectorizableOp, NestedPattern &vectorTransferMatcher)
std::function< bool(AffineForOp, Operation &)> VectorizableOpFun
static bool isAccessIndexInvariant(Value iv, Value index)
Given an affine.for iv and an access index of type index, returns true if index is independent of iv ...
static bool isVectorElement(LoadOrStoreOp memoryOp)
static Value max(ImplicitLocOpBuilder &builder, Value value, Value bound)
static Value min(ImplicitLocOpBuilder &builder, Value value, Value bound)
Base type for affine expression.
Definition: AffineExpr.h:68
AffineExpr ceilDiv(uint64_t v) const
Definition: AffineExpr.cpp:968
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition: AffineMap.h:46
static AffineMap getMultiDimIdentityMap(unsigned numDims, MLIRContext *context)
Returns an AffineMap with 'numDims' identity result dim exprs.
Definition: AffineMap.cpp:334
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
ArrayRef< AffineExpr > getResults() const
Definition: AffineMap.cpp:407
unsigned getNumResults() const
Definition: AffineMap.cpp:402
static AffineMap getConstantMap(int64_t val, MLIRContext *context)
Returns a single constant result affine map.
Definition: AffineMap.cpp:128
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
unsigned getNumRegions()
Returns the number of regions held by this operation.
Definition: Operation.h:674
operand_type_range getOperandTypes()
Definition: Operation.h:397
result_type_range getResultTypes()
Definition: Operation.h:428
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
MLIRContext * getContext() const
Utility to get the associated MLIRContext that this value is defined in.
Definition: Value.h:108
Type getType() const
Return the type of this value.
Definition: Value.h:105
user_range getUsers() const
Definition: Value.h:204
An AffineValueMap is an affine map plus its ML value operands and results for analysis purposes.
void composeSimplifyAndCanonicalize()
Composes all incoming affine.apply ops and then simplifies and canonicalizes the map and operands.
ArrayRef< Value > getOperands() const
AffineExpr getResult(unsigned i)
bool isFunctionOf(unsigned idx, Value value) const
Return true if the idx^th result depends on 'value', false otherwise.
void setResult(unsigned i, AffineExpr e)
unsigned getNumResults() const
static void difference(const AffineValueMap &a, const AffineValueMap &b, AffineValueMap *res)
Return the value map that is the difference of value maps 'a' and 'b', represented as an affine map a...
void match(Operation *op, SmallVectorImpl< NestedMatch > *matches)
Returns all the top-level matches in op.
NestedPattern If(const NestedPattern &child)
bool isLoadOrStore(Operation &op)
NestedPattern Op(FilterFunctionType filter=defaultFilterFunction)
std::optional< uint64_t > getConstantTripCount(AffineForOp forOp)
Returns the trip count of the loop if it's a constant, std::nullopt otherwise.
bool isTilingValid(ArrayRef< AffineForOp > loops)
Checks whether hyper-rectangular loop tiling of the nest represented by loops is valid.
bool isVectorizableLoopBody(AffineForOp loop, NestedPattern &vectorTransferMatcher)
Checks whether the loop is structurally vectorizable; i.e.
unsigned getNumCommonSurroundingLoops(Operation &a, Operation &b)
Returns the number of surrounding loops common to both A and B.
Definition: Utils.cpp:2030
DenseSet< Value, DenseMapInfo< Value > > getInvariantAccesses(Value iv, ArrayRef< Value > indices)
Given an induction variable iv of type AffineForOp and indices of type IndexType, returns the set of ...
void getTripCountMapAndOperands(AffineForOp forOp, AffineMap *map, SmallVectorImpl< Value > *operands)
Returns the trip count of the loop as an affine map with its corresponding operands if the latter is ...
bool isInvariantAccess(LoadOrStoreOp memOp, AffineForOp forOp)
Checks if an affine read or write operation depends on forOp's IV, i.e., if the memory access is inva...
DependenceResult checkMemrefAccessDependence(const MemRefAccess &srcAccess, const MemRefAccess &dstAccess, unsigned loopDepth, FlatAffineValueConstraints *dependenceConstraints=nullptr, SmallVector< DependenceComponent, 2 > *dependenceComponents=nullptr, bool allowRAR=false)
bool isAffineForInductionVar(Value val)
Returns true if the provided value is the induction variable of an AffineForOp.
Definition: AffineOps.cpp:2573
uint64_t getLargestDivisorOfTripCount(AffineForOp forOp)
Returns the greatest known integral divisor of the trip count.
bool isContiguousAccess(Value iv, LoadOrStoreOp memoryOp, int *memRefDim)
Given:
bool hasDependence(DependenceResult result)
Utility function that returns true if the provided DependenceResult corresponds to a dependence resul...
unsigned getNestingDepth(Operation *op)
Returns the nesting depth of this operation, i.e., the number of loops surrounding this operation.
Definition: Utils.cpp:1985
bool isOpwiseShiftValid(AffineForOp forOp, ArrayRef< uint64_t > shifts)
Checks where SSA dominance would be violated if a for op's body operations are shifted by the specifi...
bool hasCyclicDependence(AffineForOp root)
Returns true if the affine nest rooted at root has a cyclic dependence among its affine memory access...
bool noDependence(DependenceResult result)
Returns true if the provided DependenceResult corresponds to the absence of a dependence.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
Include the generated interface declarations.
Checks whether two accesses to the same memref access the same element.
Encapsulates a memref load or store access information.
Eliminates variable at the specified position using Fourier-Motzkin variable elimination.