MLIR  22.0.0git
IndexingUtils.cpp
Go to the documentation of this file.
1 //===- IndexingUtils.cpp - Helpers related to index computations ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
11 #include "mlir/IR/AffineExpr.h"
12 #include "mlir/IR/Builders.h"
14 #include "mlir/IR/MLIRContext.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include <numeric>
17 #include <optional>
18 
19 using namespace mlir;
20 
21 template <typename ExprType>
23  ExprType unit) {
24  if (sizes.empty())
25  return {};
26  SmallVector<ExprType> strides(sizes.size(), unit);
27  for (int64_t r = static_cast<int64_t>(strides.size()) - 2; r >= 0; --r)
28  strides[r] = strides[r + 1] * sizes[r + 1];
29  return strides;
30 }
31 
32 template <typename ExprType>
34  ArrayRef<ExprType> v2) {
35  // Early exit if both are empty, let zip_equal fail if only 1 is empty.
36  if (v1.empty() && v2.empty())
37  return {};
38  SmallVector<ExprType> result;
39  for (auto it : llvm::zip_equal(v1, v2))
40  result.push_back(std::get<0>(it) * std::get<1>(it));
41  return result;
42 }
43 
44 template <typename ExprType>
46  ExprType zero) {
47  assert(offsets.size() == basis.size());
48  ExprType linearIndex = zero;
49  for (unsigned idx = 0, e = basis.size(); idx < e; ++idx)
50  linearIndex = linearIndex + offsets[idx] * basis[idx];
51  return linearIndex;
52 }
53 
54 template <typename ExprType, typename DivOpTy>
56  ArrayRef<ExprType> strides,
57  DivOpTy divOp) {
58  int64_t rank = strides.size();
59  SmallVector<ExprType> offsets(rank);
60  for (int64_t r = 0; r < rank; ++r) {
61  offsets[r] = divOp(linearIndex, strides[r]);
62  linearIndex = linearIndex % strides[r];
63  }
64  return offsets;
65 }
66 
67 //===----------------------------------------------------------------------===//
68 // Utils that operate on static integer values.
69 //===----------------------------------------------------------------------===//
70 
72  assert((sizes.empty() ||
73  llvm::all_of(sizes.drop_front(), [](int64_t s) { return s >= 0; })) &&
74  "sizes must be nonnegative");
75  int64_t unit = 1;
77 }
78 
80  ArrayRef<int64_t> v2) {
81  return computeElementwiseMulImpl(v1, v2);
82 }
83 
85  assert(llvm::all_of(basis, [](int64_t s) { return s > 0; }) &&
86  "basis must be nonnegative");
87  if (basis.empty())
88  return 0;
89  return std::accumulate(basis.begin(), basis.end(), 1, std::plus<int64_t>());
90 }
91 
93  assert(llvm::all_of(basis, [](int64_t s) { return s > 0; }) &&
94  "basis must be nonnegative");
95  if (basis.empty())
96  return 1;
97  return std::accumulate(basis.begin(), basis.end(), 1,
98  std::multiplies<int64_t>());
99 }
100 
102  assert(llvm::all_of(basis, [](int64_t s) { return s > 0; }) &&
103  "basis must be nonnegative");
104  int64_t zero = 0;
105  return linearizeImpl(offsets, basis, zero);
106 }
107 
109  ArrayRef<int64_t> strides) {
110  assert(llvm::all_of(strides, [](int64_t s) { return s > 0; }) &&
111  "strides must be nonnegative");
112  return delinearizeImpl(linearIndex, strides,
113  [](int64_t e1, int64_t e2) { return e1 / e2; });
114 }
115 
116 std::optional<SmallVector<int64_t>>
118  if (shape.size() < subShape.size())
119  return std::nullopt;
120  assert(llvm::all_of(shape, [](int64_t s) { return s > 0; }) &&
121  "shape must be nonnegative");
122  assert(llvm::all_of(subShape, [](int64_t s) { return s > 0; }) &&
123  "subShape must be nonnegative");
124 
125  // Starting from the end, compute the integer divisors.
126  std::vector<int64_t> result;
127  result.reserve(shape.size());
128  for (auto [size, subSize] :
129  llvm::zip(llvm::reverse(shape), llvm::reverse(subShape))) {
130  // If integral division does not occur, return and let the caller decide.
131  if (size % subSize != 0)
132  return std::nullopt;
133  result.push_back(size / subSize);
134  }
135  // At this point we computed the ratio (in reverse) for the common size.
136  // Fill with the remaining entries from the shape (still in reverse).
137  int commonSize = subShape.size();
138  std::copy(shape.rbegin() + commonSize, shape.rend(),
139  std::back_inserter(result));
140  // Reverse again to get it back in the proper order and return.
141  return SmallVector<int64_t>{result.rbegin(), result.rend()};
142 }
143 
144 //===----------------------------------------------------------------------===//
145 // Utils that operate on AffineExpr.
146 //===----------------------------------------------------------------------===//
147 
149  if (sizes.empty())
150  return {};
151  AffineExpr unit = getAffineConstantExpr(1, sizes.front().getContext());
153 }
154 
157  return computeElementwiseMulImpl(v1, v2);
158 }
159 
161  if (basis.empty())
162  return getAffineConstantExpr(0, ctx);
163  return std::accumulate(basis.begin(), basis.end(),
164  getAffineConstantExpr(0, ctx),
165  std::plus<AffineExpr>());
166 }
167 
169  if (basis.empty())
170  return getAffineConstantExpr(1, ctx);
171  return std::accumulate(basis.begin(), basis.end(),
172  getAffineConstantExpr(1, ctx),
173  std::multiplies<AffineExpr>());
174 }
175 
177  ArrayRef<AffineExpr> basis) {
178  AffineExpr zero = getAffineConstantExpr(0, ctx);
179  return linearizeImpl(offsets, basis, zero);
180 }
181 
183  ArrayRef<int64_t> basis) {
184 
185  return linearize(ctx, offsets, getAffineConstantExprs(basis, ctx));
186 }
187 
189  ArrayRef<AffineExpr> strides) {
190  return delinearizeImpl(
191  linearIndex, strides,
192  [](AffineExpr e1, AffineExpr e2) { return e1.floorDiv(e2); });
193 }
194 
196  ArrayRef<int64_t> strides) {
197  MLIRContext *ctx = linearIndex.getContext();
198  return delinearize(linearIndex, getAffineConstantExprs(strides, ctx));
199 }
200 
201 //===----------------------------------------------------------------------===//
202 // Permutation utils.
203 //===----------------------------------------------------------------------===//
204 
207  assert(llvm::all_of(permutation, [](int64_t s) { return s >= 0; }) &&
208  "permutation must be non-negative");
209  SmallVector<int64_t> inversion(permutation.size());
210  for (const auto &pos : llvm::enumerate(permutation)) {
211  inversion[pos.value()] = pos.index();
212  }
213  return inversion;
214 }
215 
217  for (auto i : llvm::seq<int64_t>(0, permutation.size()))
218  if (permutation[i] != i)
219  return false;
220  return true;
221 }
222 
224  llvm::SmallDenseSet<int64_t, 4> seenVals;
225  for (auto val : interchange) {
226  if (val < 0 || static_cast<uint64_t>(val) >= interchange.size())
227  return false;
228  if (seenVals.count(val))
229  return false;
230  seenVals.insert(val);
231  }
232  return seenVals.size() == interchange.size();
233 }
234 
237  ArrayRef<int64_t> desiredPositions) {
238  SmallVector<int64_t> res(permSize, -1);
239  DenseSet<int64_t> seen;
240  for (auto [pos, desiredPos] : llvm::zip_equal(positions, desiredPositions)) {
241  res[desiredPos] = pos;
242  seen.insert(pos);
243  }
244  int64_t nextPos = 0;
245  for (int64_t &entry : res) {
246  if (entry != -1)
247  continue;
248  while (seen.contains(nextPos))
249  ++nextPos;
250  entry = nextPos;
251  ++nextPos;
252  }
253  return res;
254 }
255 
257  ArrayRef<int64_t> dropPositions) {
258  assert(inputPerm.size() >= dropPositions.size() &&
259  "expect inputPerm size large than position to drop");
261  unsigned permSize = inputPerm.size();
262  for (unsigned inputIndex = 0; inputIndex < permSize; ++inputIndex) {
263  int64_t targetIndex = inputPerm[inputIndex];
264  bool shouldDrop = false;
265  unsigned dropSize = dropPositions.size();
266  for (unsigned dropIndex = 0; dropIndex < dropSize; dropIndex++) {
267  if (dropPositions[dropIndex] == inputPerm[inputIndex]) {
268  shouldDrop = true;
269  break;
270  }
271  if (dropPositions[dropIndex] < inputPerm[inputIndex]) {
272  targetIndex--;
273  }
274  }
275  if (!shouldDrop) {
276  res.push_back(targetIndex);
277  }
278  }
279  return res;
280 }
281 
283  unsigned dropFront,
284  unsigned dropBack) {
285  assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds");
286  auto range = arrayAttr.getAsRange<IntegerAttr>();
288  res.reserve(arrayAttr.size() - dropFront - dropBack);
289  for (auto it = range.begin() + dropFront, eit = range.end() - dropBack;
290  it != eit; ++it)
291  res.push_back((*it).getValue().getSExtValue());
292  return res;
293 }
294 
295 // TODO: do we have any common utily for this?
297  assert(val && "Invalid value");
298  if (auto attr = dyn_cast<Attribute>(val)) {
299  return attr.getContext();
300  }
301  return cast<Value>(val).getContext();
302 }
303 
304 std::pair<AffineExpr, SmallVector<OpFoldResult>>
306  ArrayRef<OpFoldResult> strides,
307  ArrayRef<OpFoldResult> indices) {
308  assert(strides.size() == indices.size());
309  auto sourceRank = static_cast<unsigned>(strides.size());
310 
311  // Hold the affine symbols and values for the computation of the offset.
312  SmallVector<OpFoldResult> values(2 * sourceRank + 1);
313  SmallVector<AffineExpr> symbols(2 * sourceRank + 1);
314 
315  bindSymbolsList(getContext(sourceOffset), MutableArrayRef{symbols});
316  AffineExpr expr = symbols.front();
317  values[0] = sourceOffset;
318 
319  for (unsigned i = 0; i < sourceRank; ++i) {
320  // Compute the stride.
321  OpFoldResult origStride = strides[i];
322 
323  // Build up the computation of the offset.
324  unsigned baseIdxForDim = 1 + 2 * i;
325  unsigned subOffsetForDim = baseIdxForDim;
326  unsigned origStrideForDim = baseIdxForDim + 1;
327  expr = expr + symbols[subOffsetForDim] * symbols[origStrideForDim];
328  values[subOffsetForDim] = indices[i];
329  values[origStrideForDim] = origStride;
330  }
331 
332  return {expr, values};
333 }
334 
335 std::pair<AffineExpr, SmallVector<OpFoldResult>>
337  ArrayRef<Value> indices) {
338  return computeLinearIndex(
339  sourceOffset, getAsIndexOpFoldResult(sourceOffset.getContext(), strides),
340  getAsOpFoldResult(ValueRange(indices)));
341 }
342 
343 //===----------------------------------------------------------------------===//
344 // TileOffsetRange
345 //===----------------------------------------------------------------------===//
346 
347 /// Apply left-padding by 1 to the tile shape if required.
349  unsigned paddedSize) {
350  assert(tileShape.size() <= paddedSize &&
351  "expected tileShape to <= paddedSize");
352  if (tileShape.size() == paddedSize)
353  return to_vector(tileShape);
354  SmallVector<int64_t> result(paddedSize - tileShape.size(), 1);
355  llvm::append_range(result, tileShape);
356  return result;
357 }
358 
360  ArrayRef<int64_t> shape, ArrayRef<int64_t> tileShape,
361  ArrayRef<int64_t> loopOrder)
362  : tileShape(padTileShapeToSize(tileShape, shape.size())),
363  inverseLoopOrder(invertPermutationVector(loopOrder)),
364  sliceStrides(shape.size()) {
365  // Divide the shape by the tile shape.
366  std::optional<SmallVector<int64_t>> shapeRatio =
367  mlir::computeShapeRatio(shape, tileShape);
368  assert(shapeRatio && shapeRatio->size() == shape.size() &&
369  "target shape does not evenly divide the original shape");
370  assert(isPermutationVector(loopOrder) && loopOrder.size() == shape.size() &&
371  "expected loop order to be a permutation of rank equal to outer "
372  "shape");
373 
374  maxLinearIndex = mlir::computeMaxLinearIndex(*shapeRatio);
375  mlir::applyPermutationToVector(*shapeRatio, loopOrder);
376  sliceStrides = mlir::computeStrides(*shapeRatio);
377 }
378 
380  int64_t linearIndex) const {
382  delinearize(linearIndex, sliceStrides), inverseLoopOrder);
383  return computeElementwiseMul(tileCoords, tileShape);
384 }
385 
388  AffineExpr linearIndex) const {
389  MLIRContext *ctx = linearIndex.getContext();
391  delinearize(linearIndex, sliceStrides), inverseLoopOrder);
392  return mlir::computeElementwiseMul(tileCoords,
393  getAffineConstantExprs(tileShape, ctx));
394 }
void dropFront(int64_t arr[N], int64_t *res)
Definition: CRunnerUtils.h:118
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
static MLIRContext * getContext(OpFoldResult val)
static SmallVector< int64_t > padTileShapeToSize(ArrayRef< int64_t > tileShape, unsigned paddedSize)
Apply left-padding by 1 to the tile shape if required.
SmallVector< ExprType > computeElementwiseMulImpl(ArrayRef< ExprType > v1, ArrayRef< ExprType > v2)
SmallVector< ExprType > computeSuffixProductImpl(ArrayRef< ExprType > sizes, ExprType unit)
ExprType linearizeImpl(ArrayRef< ExprType > offsets, ArrayRef< ExprType > basis, ExprType zero)
SmallVector< ExprType > delinearizeImpl(ExprType linearIndex, ArrayRef< ExprType > strides, DivOpTy divOp)
Base type for affine expression.
Definition: AffineExpr.h:68
AffineExpr floorDiv(uint64_t v) const
Definition: AffineExpr.cpp:959
MLIRContext * getContext() const
Definition: AffineExpr.cpp:31
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:63
This class represents a single result from folding an operation.
Definition: OpDefinition.h:272
MLIRContext * getContext() const
Definition: OpDefinition.h:278
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:387
SmallVector< int64_t > getStaticTileOffsets(int64_t linearIndex) const
TileOffsetRangeImpl(ArrayRef< int64_t > shape, ArrayRef< int64_t > tileShape, ArrayRef< int64_t > loopOrder)
SmallVector< AffineExpr > getDynamicTileOffsets(AffineExpr linearIndex) const
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
Include the generated interface declarations.
OpFoldResult getAsIndexOpFoldResult(MLIRContext *ctx, int64_t val)
Convert int64_t to integer attributes of index type and return them as OpFoldResult.
SmallVector< int64_t > computeElementwiseMul(ArrayRef< int64_t > v1, ArrayRef< int64_t > v2)
Return a vector containing llvm::zip_equal(v1, v2) multiplied elementwise.
std::pair< AffineExpr, SmallVector< OpFoldResult > > computeLinearIndex(OpFoldResult sourceOffset, ArrayRef< OpFoldResult > strides, ArrayRef< OpFoldResult > indices)
Compute linear index from provided strides and indices, assuming strided layout.
SmallVector< int64_t > computeStrides(ArrayRef< int64_t > sizes)
Definition: IndexingUtils.h:47
SmallVector< T > applyPermutation(ArrayRef< T > input, ArrayRef< int64_t > permutation)
SmallVector< int64_t > delinearize(int64_t linearIndex, ArrayRef< int64_t > strides)
Given the strides together with a linear index in the dimension space, return the vector-space offset...
int64_t computeProduct(ArrayRef< int64_t > basis)
Self-explicit.
bool isIdentityPermutation(ArrayRef< int64_t > permutation)
Returns true if permutation is an identity permutation.
SmallVector< int64_t > computePermutationVector(int64_t permSize, ArrayRef< int64_t > positions, ArrayRef< int64_t > desiredPositions)
Return a permutation vector of size permSize that would result in moving positions into desiredPositi...
SmallVector< int64_t > getI64SubArray(ArrayAttr arrayAttr, unsigned dropFront=0, unsigned dropBack=0)
Helper to return a subset of arrayAttr as a vector of int64_t.
SmallVector< int64_t > computeSuffixProduct(ArrayRef< int64_t > sizes)
Given a set of sizes, return the suffix product.
int64_t computeMaxLinearIndex(ArrayRef< int64_t > basis)
Return the number of elements of basis (i.e.
Definition: IndexingUtils.h:69
AffineExpr getAffineConstantExpr(int64_t constant, MLIRContext *context)
Definition: AffineExpr.cpp:643
OpFoldResult getAsOpFoldResult(Value val)
Given a value, try to extract a constant Attribute.
int64_t linearize(ArrayRef< int64_t > offsets, ArrayRef< int64_t > basis)
Return the linearized index of 'offsets' w.r.t.
SmallVector< AffineExpr > getAffineConstantExprs(ArrayRef< int64_t > constants, MLIRContext *context)
Definition: AffineExpr.cpp:653
std::optional< SmallVector< int64_t > > computeShapeRatio(ArrayRef< int64_t > shape, ArrayRef< int64_t > subShape)
Return the multi-dimensional integral ratio of subShape to the trailing dimensions of shape.
void applyPermutationToVector(SmallVector< T, N > &inVec, ArrayRef< int64_t > permutation)
Apply the permutation defined by permutation to inVec.
int64_t computeSum(ArrayRef< int64_t > basis)
Self-explicit.
SmallVector< int64_t > dropDims(ArrayRef< int64_t > inputPerm, ArrayRef< int64_t > dropPositions)
Returns a permutation vector that drop the input dims in dropPositions from inputPerm.
bool isPermutationVector(ArrayRef< int64_t > interchange)
Method to check if an interchange vector is a permutation.
void bindSymbolsList(MLIRContext *ctx, MutableArrayRef< AffineExprTy > exprs)
Definition: AffineExpr.h:330
SmallVector< int64_t > invertPermutationVector(ArrayRef< int64_t > permutation)
Helper method to apply to inverse a permutation.