MLIR  19.0.0git
VectorUtils.cpp
Go to the documentation of this file.
1 //===- VectorUtils.cpp - MLIR Utilities for VectorOps ------------------===//
2 //
3 // Part of the MLIR Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements utility methods for working with the Vector dialect.
10 //
11 //===----------------------------------------------------------------------===//
12 
14 
23 #include "mlir/IR/Builders.h"
24 #include "mlir/IR/IntegerSet.h"
25 #include "mlir/IR/Operation.h"
26 #include "mlir/IR/TypeUtilities.h"
27 #include "mlir/Support/LLVM.h"
29 
30 #include "llvm/ADT/DenseSet.h"
31 #include "llvm/ADT/SetVector.h"
32 
33 using namespace mlir;
34 
35 /// Helper function that creates a memref::DimOp or tensor::DimOp depending on
36 /// the type of `source`.
38  int64_t dim) {
39  if (isa<UnrankedMemRefType, MemRefType>(source.getType()))
40  return b.createOrFold<memref::DimOp>(loc, source, dim);
41  if (isa<UnrankedTensorType, RankedTensorType>(source.getType()))
42  return b.createOrFold<tensor::DimOp>(loc, source, dim);
43  llvm_unreachable("Expected MemRefType or TensorType");
44 }
45 
46 /// Given the n-D transpose pattern 'transp', return true if 'dim0' and 'dim1'
47 /// should be transposed with each other within the context of their 2D
48 /// transposition slice.
49 ///
50 /// Example 1: dim0 = 0, dim1 = 2, transp = [2, 1, 0]
51 /// Return true: dim0 and dim1 are transposed within the context of their 2D
52 /// transposition slice ([1, 0]).
53 ///
54 /// Example 2: dim0 = 0, dim1 = 1, transp = [2, 1, 0]
55 /// Return true: dim0 and dim1 are transposed within the context of their 2D
56 /// transposition slice ([1, 0]). Paradoxically, note how dim1 (1) is *not*
57 /// transposed within the full context of the transposition.
58 ///
59 /// Example 3: dim0 = 0, dim1 = 1, transp = [2, 0, 1]
60 /// Return false: dim0 and dim1 are *not* transposed within the context of
61 /// their 2D transposition slice ([0, 1]). Paradoxically, note how dim0 (0)
62 /// and dim1 (1) are transposed within the full context of the of the
63 /// transposition.
64 static bool areDimsTransposedIn2DSlice(int64_t dim0, int64_t dim1,
65  ArrayRef<int64_t> transp) {
66  // Perform a linear scan along the dimensions of the transposed pattern. If
67  // dim0 is found first, dim0 and dim1 are not transposed within the context of
68  // their 2D slice. Otherwise, 'dim1' is found first and they are transposed.
69  for (int64_t permDim : transp) {
70  if (permDim == dim0)
71  return false;
72  if (permDim == dim1)
73  return true;
74  }
75 
76  llvm_unreachable("Ill-formed transpose pattern");
77 }
78 
80 mlir::vector::isTranspose2DSlice(vector::TransposeOp op) {
81  VectorType srcType = op.getSourceVectorType();
82  SmallVector<int64_t> srcGtOneDims;
83  for (auto [index, size] : llvm::enumerate(srcType.getShape()))
84  if (size > 1)
85  srcGtOneDims.push_back(index);
86 
87  if (srcGtOneDims.size() != 2)
88  return failure();
89 
90  // Check whether the two source vector dimensions that are greater than one
91  // must be transposed with each other so that we can apply one of the 2-D
92  // transpose pattens. Otherwise, these patterns are not applicable.
93  if (!areDimsTransposedIn2DSlice(srcGtOneDims[0], srcGtOneDims[1],
94  op.getPermutation()))
95  return failure();
96 
97  return std::pair<int, int>(srcGtOneDims[0], srcGtOneDims[1]);
98 }
99 
100 /// Constructs a permutation map from memref indices to vector dimension.
101 ///
102 /// The implementation uses the knowledge of the mapping of enclosing loop to
103 /// vector dimension. `enclosingLoopToVectorDim` carries this information as a
104 /// map with:
105 /// - keys representing "vectorized enclosing loops";
106 /// - values representing the corresponding vector dimension.
107 /// The algorithm traverses "vectorized enclosing loops" and extracts the
108 /// at-most-one MemRef index that is invariant along said loop. This index is
109 /// guaranteed to be at most one by construction: otherwise the MemRef is not
110 /// vectorizable.
111 /// If this invariant index is found, it is added to the permutation_map at the
112 /// proper vector dimension.
113 /// If no index is found to be invariant, 0 is added to the permutation_map and
114 /// corresponds to a vector broadcast along that dimension.
115 ///
116 /// Returns an empty AffineMap if `enclosingLoopToVectorDim` is empty,
117 /// signalling that no permutation map can be constructed given
118 /// `enclosingLoopToVectorDim`.
119 ///
120 /// Examples can be found in the documentation of `makePermutationMap`, in the
121 /// header file.
123  ArrayRef<Value> indices,
124  const DenseMap<Operation *, unsigned> &enclosingLoopToVectorDim) {
125  if (enclosingLoopToVectorDim.empty())
126  return AffineMap();
127  MLIRContext *context =
128  enclosingLoopToVectorDim.begin()->getFirst()->getContext();
129  SmallVector<AffineExpr> perm(enclosingLoopToVectorDim.size(),
130  getAffineConstantExpr(0, context));
131 
132  for (auto kvp : enclosingLoopToVectorDim) {
133  assert(kvp.second < perm.size());
134  auto invariants = affine::getInvariantAccesses(
135  cast<affine::AffineForOp>(kvp.first).getInductionVar(), indices);
136  unsigned numIndices = indices.size();
137  unsigned countInvariantIndices = 0;
138  for (unsigned dim = 0; dim < numIndices; ++dim) {
139  if (!invariants.count(indices[dim])) {
140  assert(perm[kvp.second] == getAffineConstantExpr(0, context) &&
141  "permutationMap already has an entry along dim");
142  perm[kvp.second] = getAffineDimExpr(dim, context);
143  } else {
144  ++countInvariantIndices;
145  }
146  }
147  assert((countInvariantIndices == numIndices ||
148  countInvariantIndices == numIndices - 1) &&
149  "Vectorization prerequisite violated: at most 1 index may be "
150  "invariant wrt a vectorized loop");
151  (void)countInvariantIndices;
152  }
153  return AffineMap::get(indices.size(), 0, perm, context);
154 }
155 
156 /// Implementation detail that walks up the parents and records the ones with
157 /// the specified type.
158 /// TODO: could also be implemented as a collect parents followed by a
159 /// filter and made available outside this file.
160 template <typename T>
163  auto *current = block->getParentOp();
164  while (current) {
165  if ([[maybe_unused]] auto typedParent = dyn_cast<T>(current)) {
166  assert(res.count(current) == 0 && "Already inserted");
167  res.insert(current);
168  }
169  current = current->getParentOp();
170  }
171  return res;
172 }
173 
174 /// Returns the enclosing AffineForOp, from closest to farthest.
176  return getParentsOfType<affine::AffineForOp>(block);
177 }
178 
180  Block *insertPoint, ArrayRef<Value> indices,
181  const DenseMap<Operation *, unsigned> &loopToVectorDim) {
182  DenseMap<Operation *, unsigned> enclosingLoopToVectorDim;
183  auto enclosingLoops = getEnclosingforOps(insertPoint);
184  for (auto *forInst : enclosingLoops) {
185  auto it = loopToVectorDim.find(forInst);
186  if (it != loopToVectorDim.end()) {
187  enclosingLoopToVectorDim.insert(*it);
188  }
189  }
190  return ::makePermutationMap(indices, enclosingLoopToVectorDim);
191 }
192 
194  Operation *op, ArrayRef<Value> indices,
195  const DenseMap<Operation *, unsigned> &loopToVectorDim) {
196  return makePermutationMap(op->getBlock(), indices, loopToVectorDim);
197 }
198 
199 bool matcher::operatesOnSuperVectorsOf(Operation &op,
200  VectorType subVectorType) {
201  // First, extract the vector type and distinguish between:
202  // a. ops that *must* lower a super-vector (i.e. vector.transfer_read,
203  // vector.transfer_write); and
204  // b. ops that *may* lower a super-vector (all other ops).
205  // The ops that *may* lower a super-vector only do so if the super-vector to
206  // sub-vector ratio exists. The ops that *must* lower a super-vector are
207  // explicitly checked for this property.
208  /// TODO: there should be a single function for all ops to do this so we
209  /// do not have to special case. Maybe a trait, or just a method, unclear atm.
210  bool mustDivide = false;
211  (void)mustDivide;
212  VectorType superVectorType;
213  if (auto transfer = dyn_cast<VectorTransferOpInterface>(op)) {
214  superVectorType = transfer.getVectorType();
215  mustDivide = true;
216  } else if (op.getNumResults() == 0) {
217  if (!isa<func::ReturnOp>(op)) {
218  op.emitError("NYI: assuming only return operations can have 0 "
219  " results at this point");
220  }
221  return false;
222  } else if (op.getNumResults() == 1) {
223  if (auto v = dyn_cast<VectorType>(op.getResult(0).getType())) {
224  superVectorType = v;
225  } else {
226  // Not a vector type.
227  return false;
228  }
229  } else {
230  // Not a vector.transfer and has more than 1 result, fail hard for now to
231  // wake us up when something changes.
232  op.emitError("NYI: operation has more than 1 result");
233  return false;
234  }
235 
236  // Get the ratio.
237  auto ratio =
238  computeShapeRatio(superVectorType.getShape(), subVectorType.getShape());
239 
240  // Sanity check.
241  assert((ratio || !mustDivide) &&
242  "vector.transfer operation in which super-vector size is not an"
243  " integer multiple of sub-vector size");
244 
245  // This catches cases that are not strictly necessary to have multiplicity but
246  // still aren't divisible by the sub-vector shape.
247  // This could be useful information if we wanted to reshape at the level of
248  // the vector type (but we would have to look at the compute and distinguish
249  // between parallel, reduction and possibly other cases.
250  return ratio.has_value();
251 }
252 
253 bool vector::isContiguousSlice(MemRefType memrefType, VectorType vectorType) {
254  if (vectorType.isScalable())
255  return false;
256 
257  ArrayRef<int64_t> vectorShape = vectorType.getShape();
258  auto vecRank = vectorType.getRank();
259 
260  if (!trailingNDimsContiguous(memrefType, vecRank))
261  return false;
262 
263  // Extract the trailing dims and strides of the input memref
264  auto memrefShape = memrefType.getShape().take_back(vecRank);
265 
266  // Compare the dims of `vectorType` against `memrefType` (in reverse).
267  // In the most basic case, all dims will match.
268  auto firstNonMatchingDim =
269  std::mismatch(vectorShape.rbegin(), vectorShape.rend(),
270  memrefShape.rbegin(), memrefShape.rend());
271  if (firstNonMatchingDim.first == vectorShape.rend())
272  return true;
273 
274  // One non-matching dim is still fine, however the remaining leading dims of
275  // `vectorType` need to be 1.
276  SmallVector<int64_t> leadingDims(++firstNonMatchingDim.first,
277  vectorShape.rend());
278 
279  return llvm::all_of(leadingDims, [](auto x) { return x == 1; });
280 }
281 
282 std::optional<StaticTileOffsetRange>
283 vector::createUnrollIterator(VectorType vType, int64_t targetRank) {
284  if (vType.getRank() <= targetRank)
285  return {};
286  // Attempt to unroll until targetRank or the first scalable dimension (which
287  // cannot be unrolled).
288  auto shapeToUnroll = vType.getShape().drop_back(targetRank);
289  auto scalableDimsToUnroll = vType.getScalableDims().drop_back(targetRank);
290  auto it =
291  std::find(scalableDimsToUnroll.begin(), scalableDimsToUnroll.end(), true);
292  auto firstScalableDim = it - scalableDimsToUnroll.begin();
293  if (firstScalableDim == 0)
294  return {};
295  // All scalable dimensions should be removed now.
296  scalableDimsToUnroll = scalableDimsToUnroll.slice(0, firstScalableDim);
297  assert(!llvm::is_contained(scalableDimsToUnroll, true) &&
298  "unexpected leading scalable dimension");
299  // Create an unroll iterator for leading dimensions.
300  shapeToUnroll = shapeToUnroll.slice(0, firstScalableDim);
301  return StaticTileOffsetRange(shapeToUnroll, /*unrollStep=*/1);
302 }
303 
305  Operation *xfer,
306  RewriterBase &rewriter) {
307  auto loc = xfer->getLoc();
308 
310  .Case<vector::TransferReadOp>(
311  [&](auto readOp) { return readOp.getSource(); })
312  .Case<vector::TransferWriteOp>(
313  [&](auto writeOp) { return writeOp.getOperand(1); });
314 
315  SmallVector<OpFoldResult> mixedSourceDims =
316  hasTensorSemantics ? tensor::getMixedSizes(rewriter, loc, base)
317  : memref::getMixedSizes(rewriter, loc, base);
318  return mixedSourceDims;
319 }
320 
321 bool vector::isLinearizableVector(VectorType type) {
322  auto numScalableDims = llvm::count(type.getScalableDims(), true);
323  return (type.getRank() > 1) && (numScalableDims <= 1);
324 }
static VectorShape vectorShape(Type type)
static bool areDimsTransposedIn2DSlice(int64_t dim0, int64_t dim1, ArrayRef< int64_t > transp)
Given the n-D transpose pattern 'transp', return true if 'dim0' and 'dim1' should be transposed with ...
Definition: VectorUtils.cpp:64
static SetVector< Operation * > getEnclosingforOps(Block *block)
Returns the enclosing AffineForOp, from closest to farthest.
static AffineMap makePermutationMap(ArrayRef< Value > indices, const DenseMap< Operation *, unsigned > &enclosingLoopToVectorDim)
Constructs a permutation map from memref indices to vector dimension.
static SetVector< Operation * > getParentsOfType(Block *block)
Implementation detail that walks up the parents and records the ones with the specified type.
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition: AffineMap.h:47
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
Block represents an ordered list of Operations.
Definition: Block.h:30
Operation * getParentOp()
Returns the closest surrounding operation that contains this block.
Definition: Block.cpp:30
This class provides support for representing a failure result, or a valid value of type T.
Definition: LogicalResult.h:78
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:63
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
This class helps build Operations.
Definition: Builders.h:209
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Definition: Builders.h:522
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition: Operation.h:402
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:223
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:268
Block * getBlock()
Returns the operation block that contains this operation.
Definition: Operation.h:213
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:399
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:400
A range-style iterator that allows for iterating over the offsets of all potential tiles of size tile...
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:125
DenseSet< Value, DenseMapInfo< Value > > getInvariantAccesses(Value iv, ArrayRef< Value > indices)
Given an induction variable iv of type AffineForOp and indices of type IndexType, returns the set of ...
bool hasTensorSemantics(Operation *op)
Return "true" if the given op has tensor semantics and should be bufferized.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:285
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given memref value.
Definition: MemRefOps.cpp:77
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given tensor value.
Definition: TensorOps.cpp:61
bool isContiguousSlice(MemRefType memrefType, VectorType vectorType)
Return true if vectorType is a contiguous slice of memrefType.
FailureOr< std::pair< int, int > > isTranspose2DSlice(vector::TransposeOp op)
Returns two dims that are greater than one if the transposition is applied on a 2D slice.
Definition: VectorUtils.cpp:80
std::optional< StaticTileOffsetRange > createUnrollIterator(VectorType vType, int64_t targetRank=1)
Returns an iterator for all positions in the leading dimensions of vType up to the targetRank.
Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim)
Helper function that creates a memref::DimOp or tensor::DimOp depending on the type of source.
Definition: VectorUtils.cpp:37
bool isLinearizableVector(VectorType type)
Returns true if the input Vector type can be linearized.
SmallVector< OpFoldResult > getMixedSizesXfer(bool hasTensorSemantics, Operation *xfer, RewriterBase &rewriter)
A wrapper for getMixedSizes for vector.transfer_read and vector.transfer_write Ops (for source and de...
Include the generated interface declarations.
LogicalResult failure(bool isFailure=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:62
AffineExpr getAffineConstantExpr(int64_t constant, MLIRContext *context)
Definition: AffineExpr.cpp:623
std::optional< SmallVector< int64_t > > computeShapeRatio(ArrayRef< int64_t > shape, ArrayRef< int64_t > subShape)
Return the multi-dimensional integral ratio of subShape to the trailing dimensions of shape.
AffineExpr getAffineDimExpr(unsigned position, MLIRContext *context)
These free functions allow clients of the API to not use classes in detail.
Definition: AffineExpr.cpp:599
bool trailingNDimsContiguous(MemRefType type, int64_t n)
Return "true" if the last N dimensions of the given type are contiguous.