MLIR  22.0.0git
VectorTransforms.cpp
Go to the documentation of this file.
1 //===- VectorTransforms.cpp - Conversion within the Vector dialect --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements target-independent rewrites as 1->N patterns.
10 //
11 //===----------------------------------------------------------------------===//
12 
14 
15 #include <cassert>
16 #include <cstdint>
17 #include <functional>
18 #include <optional>
19 
29 #include "mlir/IR/BuiltinTypes.h"
30 #include "mlir/IR/Location.h"
31 #include "mlir/IR/Matchers.h"
32 #include "mlir/IR/PatternMatch.h"
33 #include "mlir/IR/TypeUtilities.h"
34 
35 #include "llvm/ADT/STLExtras.h"
36 #include "llvm/Support/FormatVariadic.h"
37 
38 #define DEBUG_TYPE "vector-to-vector"
39 
40 using namespace mlir;
41 using namespace mlir::vector;
42 
43 template <typename IntType>
44 static SmallVector<IntType> extractVector(ArrayAttr arrayAttr) {
45  return llvm::to_vector<4>(llvm::map_range(
46  arrayAttr.getAsRange<IntegerAttr>(),
47  [](IntegerAttr attr) { return static_cast<IntType>(attr.getInt()); }));
48 }
49 
50 // Helper to find an index in an affine map.
51 static std::optional<int64_t> getResultIndex(AffineMap map, int64_t index) {
52  for (int64_t i = 0, e = map.getNumResults(); i < e; ++i) {
53  int64_t idx = map.getDimPosition(i);
54  if (idx == index)
55  return i;
56  }
57  return std::nullopt;
58 }
59 
60 namespace {
61 
62 /// Convert MulIOp/MulFOp + MultiDimReductionOp<add> into ContractionOp.
63 /// Ex:
64 /// ```
65 /// %0 = arith.mulf %arg0, %arg1 : vector<8x32x16xf32>
66 /// %1 = vector.multi_reduction add, %0 [1]
67 /// : vector<8x32x16xf32> to vector<8x16xf32>
68 /// ```
69 /// Gets converted to:
70 /// ```
71 /// %1 = vector.contract {indexing_maps = [
72 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
73 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
74 /// affine_map<(d0, d1, d2) -> (d0, d1)>],
75 /// iterator_types = ["parallel", "parallel", "reduction"],
76 /// kind = add} %0, %arg1, %cst_f0
77 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32>
78 /// ```
79 struct MultiReduceToContract
80  : public OpRewritePattern<vector::MultiDimReductionOp> {
82 
83  LogicalResult matchAndRewrite(vector::MultiDimReductionOp reduceOp,
84  PatternRewriter &rewriter) const override {
85  if (reduceOp.getKind() != vector::CombiningKind::ADD)
86  return failure();
87  Operation *mulOp = reduceOp.getSource().getDefiningOp();
88  if (!mulOp || !isa<arith::MulIOp, arith::MulFOp>(mulOp))
89  return failure();
90  SmallVector<bool> reductionMask = reduceOp.getReductionMask();
91  auto srcMap = rewriter.getMultiDimIdentityMap(reductionMask.size());
94  for (const auto &isReduceDim : llvm::enumerate(reductionMask)) {
95  if (!isReduceDim.value()) {
96  iteratorTypes.push_back(vector::IteratorType::parallel);
97  exprs.push_back(rewriter.getAffineDimExpr(isReduceDim.index()));
98  } else {
99  iteratorTypes.push_back(vector::IteratorType::reduction);
100  }
101  }
102  auto dstMap =
103  AffineMap::get(/*dimCount=*/reductionMask.size(),
104  /*symbolCount=*/0, exprs, reduceOp.getContext());
105  rewriter.replaceOpWithNewOp<mlir::vector::ContractionOp>(
106  reduceOp, mulOp->getOperand(0), mulOp->getOperand(1), reduceOp.getAcc(),
107  rewriter.getAffineMapArrayAttr({srcMap, srcMap, dstMap}),
108  rewriter.getArrayAttr(llvm::to_vector(llvm::map_range(
109  iteratorTypes, [&](IteratorType t) -> mlir::Attribute {
110  return IteratorTypeAttr::get(rewriter.getContext(), t);
111  }))));
112  return success();
113  }
114 };
115 
116 /// Merge LHS/RHS (A/B) TransposeOp into ContractionOp user.
117 /// Ex:
118 /// ```
119 /// %0 = vector.transpose %arg0, [2, 0, 1]
120 /// : vector<32x16x8xf32> to vector<8x32x16xf32>
121 /// %1 = vector.contract {indexing_maps = [
122 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
123 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
124 /// affine_map<(d0, d1, d2) -> (d0, d1)>],
125 /// iterator_types = ["parallel", "parallel", "reduction"],
126 /// kind = add} %0, %arg1, %cst_f0
127 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32>
128 /// ```
129 /// Gets converted to:
130 /// ```
131 /// %1 = vector.contract {indexing_maps = [
132 /// affine_map<(d0, d1, d2) -> (d1, d2, d0)>,
133 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
134 /// affine_map<(d0, d1, d2) -> (d0, d1)>],
135 /// iterator_types = ["parallel", "parallel", "reduction"],
136 /// kind = add} %arg0, %arg1, %cst_f0
137 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32>
138 /// ```
139 struct CombineContractABTranspose final
140  : public OpRewritePattern<vector::ContractionOp> {
142 
143  LogicalResult matchAndRewrite(vector::ContractionOp contractOp,
144  PatternRewriter &rewriter) const override {
146  llvm::to_vector<4>(contractOp.getIndexingMapsArray());
147  Value lhs = contractOp.getLhs();
148  Value rhs = contractOp.getRhs();
149  size_t index = 0;
150  bool changed = false;
151  for (Value *operand : {&lhs, &rhs}) {
152  AffineMap &map = maps[index++];
153  auto transposeOp = operand->getDefiningOp<vector::TransposeOp>();
154  if (!transposeOp)
155  continue;
156  AffineMap permutationMap = AffineMap::getPermutationMap(
157  transposeOp.getPermutation(), contractOp.getContext());
158  map = inversePermutation(permutationMap).compose(map);
159  *operand = transposeOp.getVector();
160  changed = true;
161  }
162  if (!changed)
163  return failure();
164  rewriter.replaceOpWithNewOp<vector::ContractionOp>(
165  contractOp, lhs, rhs, contractOp.getAcc(),
166  rewriter.getAffineMapArrayAttr(maps), contractOp.getIteratorTypes());
167  return success();
168  }
169 };
170 
171 /// Merges accumulator and result transposes into contract.
172 ///
173 /// For example:
174 /// ```mlir
175 /// %accT = vector.transpose %acc, [0, 2, 1]
176 /// : vector<2x8x4xf32> to vector<2x4x8xf32>
177 /// %contract = vector.contract {
178 /// indexing_maps = [
179 /// affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>,
180 /// affine_map<(d0, d1, d2, d3) -> (d3, d2)>,
181 /// affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
182 /// ],
183 /// iterator_types = ["parallel", "parallel", "parallel", "reduction"],
184 /// kind = #vector.kind<add>
185 /// } %lhs, %rhs, %accT
186 /// : vector<2x4x4xf32>, vector<4x8xf32> into vector<2x4x8xf32>
187 /// %0 = vector.transpose %contract, [0, 2, 1]
188 /// : vector<2x4x8xf32> to vector<2x8x4>
189 /// ```
190 /// Becomes:
191 /// ```mlir
192 /// %0 = vector.contract {
193 /// indexing_maps = [
194 /// affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>,
195 /// affine_map<(d0, d1, d2, d3) -> (d3, d2)>,
196 /// affine_map<(d0, d1, d2, d3) -> (d0, d2, d1)>
197 /// ],
198 /// iterator_types = ["parallel", "parallel", "parallel", "reduction"],
199 /// kind = #vector.kind<add>
200 /// } %lhs, %rhs, %acc
201 /// : vector<2x4x4xf32>, vector<4x8xf32> into vector<2x8x4xf32>
202 /// ```
203 struct CombineContractResultTranspose final
204  : public OpRewritePattern<vector::TransposeOp> {
206 
207  LogicalResult matchAndRewrite(vector::TransposeOp resTOp,
208  PatternRewriter &rewriter) const override {
209  auto contractOp = resTOp.getVector().getDefiningOp<vector::ContractionOp>();
210  if (!contractOp || !contractOp->hasOneUse())
211  return failure();
212 
213  auto accTOp = contractOp.getAcc().getDefiningOp<vector::TransposeOp>();
214  if (!accTOp)
215  return failure();
216 
217  MLIRContext *context = contractOp.getContext();
218  auto maps = llvm::to_vector<3>(contractOp.getIndexingMapsArray());
219  AffineMap contractMap = maps.back();
220 
221  // Accumulator transpose performs f(A) -> B. Contract performs g(C) -> B.
222  // To index into A in contract, we need revert(f)(g(C)) -> A.
223  auto accTMap =
224  AffineMap::getPermutationMap(accTOp.getPermutation(), context);
225 
226  // Contract performs g(C) -> D. Result transpose performs h(D) -> E.
227  // To index into E in contract, we need h(g(C)) -> E.
228  auto resTMap =
229  AffineMap::getPermutationMap(resTOp.getPermutation(), context);
230  auto combinedResMap = resTMap.compose(contractMap);
231 
232  // The accumulator and result share the same indexing map. So they should be
233  // the same to be able to merge. This means combinedResMap is the same as
234  // inversePermutation(accTMap).compose(contractMap), which means
235  if (inversePermutation(accTMap) != resTMap)
236  return failure();
237  maps.back() = combinedResMap;
238 
239  rewriter.replaceOpWithNewOp<vector::ContractionOp>(
240  resTOp, contractOp.getLhs(), contractOp.getRhs(), accTOp.getVector(),
241  rewriter.getAffineMapArrayAttr(maps), contractOp.getIteratorTypes());
242  return success();
243  }
244 };
245 
246 /// Merge BroadcastOp into ContractionOp user.
247 /// Ex:
248 /// ```
249 /// %0 = vector.broadcast %arg0 : vector<32x16xf32> to vector<8x32x16xf32>
250 /// %1 = vector.contract {indexing_maps = [
251 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
252 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
253 /// affine_map<(d0, d1, d2) -> (d0, d1)>],
254 /// iterator_types = ["parallel", "parallel", "reduction"],
255 /// kind = add} %0, %arg1, %cst_f0
256 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32>
257 /// ```
258 /// Gets converted to:
259 /// ```
260 /// %1 = vector.contract {indexing_maps = [
261 /// affine_map<(d0, d1, d2) -> (d1, d2)>,
262 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
263 /// affine_map<(d0, d1, d2) -> (d0, d1)>],
264 /// iterator_types = ["parallel", "parallel", "reduction"],
265 /// kind = add} %arg0, %arg1, %cst_f0
266 /// : vector<32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32>
267 /// ```
268 ///
269 /// For masked vector.contract, the mask requires updating when a dimension is
270 /// dropped. In such cases, the dropped dimensions must correspond to the mask's
271 /// leading unit dimensions. Supporting more generic cases (e.g. non-unit dims)
272 /// is not supported.
273 FailureOr<Value> combineContractAndBroadcast(vector::ContractionOp contractOp,
274  MaskingOpInterface maskingOp,
275  PatternRewriter &rewriter) {
277  llvm::to_vector<4>(contractOp.getIndexingMapsArray());
278  Value lhs = contractOp.getLhs();
279  Value rhs = contractOp.getRhs();
280  size_t index = 0;
281  bool changed = false;
282  for (Value *operand : {&lhs, &rhs}) {
283  AffineMap &map = maps[index++];
284  auto broadcast = operand->getDefiningOp<vector::BroadcastOp>();
285  if (!broadcast)
286  continue;
287  // contractionOp can only take vector as operands.
288  auto srcType = dyn_cast<VectorType>(broadcast.getSourceType());
289  if (!srcType ||
290  srcType.getRank() == broadcast.getResultVectorType().getRank())
291  continue;
292  int64_t rankDiff =
293  broadcast.getResultVectorType().getRank() - srcType.getRank();
294  bool innerDimBroadcast = false;
295  SmallVector<AffineExpr> originalDims;
296  for (const auto &dim : llvm::enumerate(srcType.getShape())) {
297  if (dim.value() !=
298  broadcast.getResultVectorType().getDimSize(rankDiff + dim.index())) {
299  innerDimBroadcast = true;
300  break;
301  }
302  originalDims.push_back(rewriter.getAffineDimExpr(dim.index() + rankDiff));
303  }
304  // Contract doesn't support inner dimension broadcast. Once this is
305  // relaxed we can remove this case.
306  if (innerDimBroadcast)
307  continue;
308 
309  // It would be incorrect to fold a broadcast onto a reduction dimension
310  // of non-unit size.
311  bool nonUnitDimReductionBroadcast = false;
312  for (int64_t i = 0; i < rankDiff; ++i) {
313  if (broadcast.getResultVectorType().getDimSize(i) != 1 &&
314  isReductionIterator(contractOp.getIteratorTypes()
315  .getValue()[map.getDimPosition(i)])) {
316  nonUnitDimReductionBroadcast = true;
317  break;
318  }
319  }
320  if (nonUnitDimReductionBroadcast)
321  continue;
322 
323  AffineMap broadcastMap =
324  AffineMap::get(broadcast.getResultVectorType().getRank(), 0,
325  originalDims, contractOp.getContext());
326  map = broadcastMap.compose(map);
327  *operand = broadcast.getSource();
328  changed = true;
329  }
330 
331  if (!changed)
332  return failure();
333 
334  // Determine which dims are usused, now that the maps have been composed
335  // with the broadcast maps.
336  llvm::SmallBitVector unusedDimsBitVector = getUnusedDimsBitVector(maps);
337  // Compress unused dims.
338  for (auto &m : maps)
339  m = compressDims(m, unusedDimsBitVector);
340  // Compute the combined iterators.
341  SmallVector<Attribute> iterators;
342  for (unsigned i = 0, e = unusedDimsBitVector.size(); i < e; ++i) {
343  if (!unusedDimsBitVector.test(i))
344  iterators.push_back(contractOp.getIteratorTypes().getValue()[i]);
345  }
346 
347  // Check whether any of the unused dims is non-unit, e.g.:
348  // * vector.broadcast %arg0 : vector<8x4xi32> to vector<2x8x4xi32>
349  // This is only required when collapsing a mask. If there is no mask, skip.
350  VectorType oldMaskType;
351  bool isAnyUnusedDimNonUnit = false;
352  if (maskingOp) {
353  oldMaskType = cast<VectorType>(maskingOp.getMask().getType());
354  for (unsigned i = 0, e = unusedDimsBitVector.size(); i < e; ++i) {
355  if (unusedDimsBitVector.test(i) && oldMaskType.getShape()[i] != 1) {
356  isAnyUnusedDimNonUnit = true;
357  break;
358  }
359  }
360  }
361 
362  // Check that compressing unused dims isn't removing all reduction dimension
363  // pairs. For example, if the vector.contract had only one reduction
364  // iterator and that was a unit-dimension created by a broadcast,
365  // then we should bail here, otherwise we would create a contract without
366  // a reduction dimension pair.
367  bool hasReductionIteratorApplyingOnBothSides = false;
368  for (unsigned i = 0; i < iterators.size(); ++i) {
369  if (!isReductionIterator(iterators[i]))
370  continue;
371  if (getResultIndex(maps[0], i) && getResultIndex(maps[1], i)) {
372  hasReductionIteratorApplyingOnBothSides = true;
373  break;
374  }
375  }
376  if (!hasReductionIteratorApplyingOnBothSides)
377  return failure();
378 
379  // If the compressed maps have a dimension that is not used by either LHS or
380  // RHS then the ContractionOp verifier would fail.
381  if (getUnusedDimsBitVector({maps[0], maps[1]}).any())
382  return failure();
383 
384  Operation *newOp = vector::ContractionOp::create(
385  rewriter, contractOp.getLoc(), lhs, rhs, contractOp.getAcc(),
386  rewriter.getAffineMapArrayAttr(maps), rewriter.getArrayAttr(iterators));
387 
388  // Handle the mask.
389  if (maskingOp) {
390  if (isAnyUnusedDimNonUnit)
391  return rewriter.notifyMatchFailure(contractOp,
392  "Cannont drop non-unit mask dim.");
393  assert(unusedDimsBitVector.size() ==
394  static_cast<size_t>(oldMaskType.getRank()) &&
395  "The mask rank is incorrect!");
396 
397  // If a dimension has been dropped, update the mask accordingly. Otherwise,
398  // keep it as is.
399  Value mask = maskingOp.getMask();
400  if (unusedDimsBitVector.count() != 0) {
401  // At this point, two assumptions are made:
402  // * The unused dimensions are the leading mask dimensions
403  // (vector.contract does not support inner dim broadcasting).
404  // * The unused dimensions are all unit.
405  // These conditions are effectively verified in the blocks preceeding this
406  // one.
407  auto newShape =
408  oldMaskType.getShape().drop_front(unusedDimsBitVector.count());
409  auto newShapeScalableDims =
410  oldMaskType.getScalableDims().drop_front(unusedDimsBitVector.count());
411  VectorType maskOpType =
412  VectorType::get(newShape, rewriter.getI1Type(), newShapeScalableDims);
413  mask = vector::ShapeCastOp::create(rewriter, contractOp.getLoc(),
414  maskOpType, maskingOp.getMask())
415  .getResult();
416  }
417 
418  newOp = mlir::vector::maskOperation(rewriter, newOp, mask);
419  }
420  return newOp->getResult(0);
421 }
422 
423 struct CombineContractBroadcastMask
424  : public MaskableOpRewritePattern<vector::ContractionOp> {
425  using MaskableOpRewritePattern::MaskableOpRewritePattern;
426  FailureOr<Value>
427 
428  matchAndRewriteMaskableOp(vector::ContractionOp contractOp,
429  MaskingOpInterface maskingOp,
430  PatternRewriter &rewriter) const override {
431  return combineContractAndBroadcast(contractOp, maskingOp, rewriter);
432  }
433 };
434 
435 /// Reorders cast(broadcast) to broadcast(cast). This makes broadcast ops and
436 /// contraction ops closer, which kicks in CombineContractBroadcast pattern when
437 /// casting ops are around these operations.
438 /// Ex:
439 /// ```
440 /// %0 = vector.broadcast %arg0 : vector<32x16xi8> to vector<8x32x16xi8>
441 /// %1 = arith.extsi %0 : vector<8x32x16xi8> to vector<8x32x16xi32>
442 /// ```
443 /// Gets converted to:
444 /// ```
445 /// %0 = arith.extsi %0 : vector<32x16xi8> to vector<32x16xi32>
446 /// %1 = vector.broadcast %arg0 : vector<32x16xi32> to vector<8x32x16xi32>
447 /// ```
448 struct ReorderCastOpsOnBroadcast
449  : public OpInterfaceRewritePattern<CastOpInterface> {
451 
452  LogicalResult matchAndRewrite(CastOpInterface op,
453  PatternRewriter &rewriter) const override {
454  if (op->getNumOperands() != 1)
455  return failure();
456  auto bcastOp = op->getOperand(0).getDefiningOp<vector::BroadcastOp>();
457  if (!bcastOp)
458  return failure();
459 
460  Type castResTy = getElementTypeOrSelf(op->getResult(0));
461  if (auto vecTy = dyn_cast<VectorType>(bcastOp.getSourceType()))
462  castResTy = vecTy.clone(castResTy);
463  auto *castOp =
464  rewriter.create(op->getLoc(), op->getName().getIdentifier(),
465  bcastOp.getSource(), castResTy, op->getAttrs());
466  rewriter.replaceOpWithNewOp<vector::BroadcastOp>(
467  op, op->getResult(0).getType(), castOp->getResult(0));
468  return success();
469  }
470 };
471 
472 /// Reorders elementwise(transpose) to transpose(elementwise). This makes
473 /// transpose ops and contraction ops closer, which kicks in
474 /// CombineContractABTranspose pattern when elementwise ops are between these
475 /// operations. Ex:
476 /// ```
477 /// %at = vector.transpose %a, [1, 0]: vector<4x2xf32> to vector<2x4xf32>
478 /// %bt = vector.transpose %b, [1, 0]: vector<4x2xf32> to vector<2x4xf32>
479 /// %r = arith.addf %at, %bt : vector<2x4xf32>
480 /// ```
481 /// Gets converted to:
482 /// ```
483 /// %0 = arith.addf %a, %b : vector<4x2xf32>
484 /// %r = vector.transpose %0, [1, 0] : vector<2x4xf32>
485 /// ```
486 struct ReorderElementwiseOpsOnTranspose final
487  : public OpTraitRewritePattern<OpTrait::Elementwise> {
489  LogicalResult matchAndRewrite(Operation *op,
490  PatternRewriter &rewriter) const override {
491  if (op->getNumResults() != 1 || op->getNumRegions() != 0)
492  return failure();
493 
494  // Make sure all operands are transpose/constant ops and collect their
495  // transposition maps.
496  SmallVector<ArrayRef<int64_t>> transposeMaps;
497  transposeMaps.reserve(op->getNumOperands());
498  // Record the initial type before transposition. We'll use its shape later.
499  // Any type will do here as we will check all transpose maps are the same.
500  VectorType srcType;
501  for (Value operand : op->getOperands()) {
502  auto transposeOp = operand.getDefiningOp<vector::TransposeOp>();
503  if (transposeOp) {
504  transposeMaps.push_back(transposeOp.getPermutation());
505  srcType = transposeOp.getSourceVectorType();
506  } else if (!matchPattern(operand, m_Constant())) {
507  return failure();
508  }
509  }
510  if (transposeMaps.empty())
511  return failure();
512  // This is an elementwise op, so all transposed operands should have the
513  // same type. We need to additionally check that all transposes uses the
514  // same map.
515  if (!llvm::all_equal(transposeMaps))
516  return rewriter.notifyMatchFailure(op, "different transpose map");
517 
518  SmallVector<Value> srcValues;
519  srcValues.reserve(op->getNumOperands());
520 
521  // If there are constant operands, we need to insert inverse transposes for
522  // them. Calculate the inverse order first.
523  auto order = transposeMaps.front();
524  SmallVector<int64_t> invOrder(order.size());
525  for (int i = 0, e = order.size(); i < e; ++i)
526  invOrder[order[i]] = i;
527 
528  for (Value operand : op->getOperands()) {
529  auto transposeOp = operand.getDefiningOp<vector::TransposeOp>();
530  if (transposeOp) {
531  srcValues.push_back(transposeOp.getVector());
532  } else {
533  // This is a constant. Create a reverse transpose op for it.
534  auto vectorType =
535  srcType.clone(cast<VectorType>(operand.getType()).getElementType());
536  srcValues.push_back(vector::TransposeOp::create(
537  rewriter, operand.getLoc(), vectorType, operand, invOrder));
538  }
539  }
540 
541  auto vectorType = srcType.clone(
542  cast<VectorType>(op->getResultTypes()[0]).getElementType());
543  Operation *elementwiseOp =
544  rewriter.create(op->getLoc(), op->getName().getIdentifier(), srcValues,
545  vectorType, op->getAttrs());
546  rewriter.replaceOpWithNewOp<vector::TransposeOp>(
547  op, op->getResultTypes()[0], elementwiseOp->getResult(0),
548  transposeMaps.front());
549  return success();
550  }
551 };
552 
553 // Returns the values in `arrayAttr` as an integer vector.
554 static SmallVector<int64_t> getIntValueVector(ArrayAttr arrayAttr) {
555  return llvm::to_vector<4>(
556  llvm::map_range(arrayAttr.getAsRange<IntegerAttr>(),
557  [](IntegerAttr attr) { return attr.getInt(); }));
558 }
559 
560 // Shuffles vector.bitcast op after vector.extract op.
561 //
562 // This transforms IR like:
563 // %0 = vector.bitcast %src : vector<4xf32> to vector<8xf16>
564 // %1 = vector.extract %0[3] : f16 from vector<8xf16>
565 // Into:
566 // %0 = vector.extract %src[1] : f32 from vector<4xf32>
567 // %1 = vector.bitcast %0: vector<1xf32> to vector<2xf16>
568 // %2 = vector.extract %1[1] : f16 from vector<2xf16>
569 struct BubbleDownVectorBitCastForExtract
570  : public OpRewritePattern<vector::ExtractOp> {
572 
573  LogicalResult matchAndRewrite(vector::ExtractOp extractOp,
574  PatternRewriter &rewriter) const override {
575  // Only support extracting scalars for now.
576  if (extractOp.getSourceVectorType().getRank() != 1)
577  return failure();
578 
579  auto castOp = extractOp.getVector().getDefiningOp<vector::BitCastOp>();
580  if (!castOp)
581  return failure();
582 
583  VectorType castSrcType = castOp.getSourceVectorType();
584  VectorType castDstType = castOp.getResultVectorType();
585  assert(castSrcType.getRank() == castDstType.getRank());
586 
587  // Fail to match if we only have one element in the cast op source.
588  // This is to avoid infinite loop given that this pattern can generate
589  // such cases.
590  if (castSrcType.getNumElements() == 1)
591  return failure();
592 
593  // Only support casting to a larger number of elements or now.
594  // E.g., vector<4xf32> -> vector<8xf16>.
595  if (castSrcType.getNumElements() > castDstType.getNumElements())
596  return failure();
597 
598  unsigned expandRatio =
599  castDstType.getNumElements() / castSrcType.getNumElements();
600 
601  // Get the first element of the mixed position as integer.
602  auto mixedPos = extractOp.getMixedPosition();
603  if (!mixedPos.empty() && !isa<Attribute>(mixedPos[0]))
604  return failure();
605  uint64_t index = cast<IntegerAttr>(cast<Attribute>(mixedPos[0])).getInt();
606 
607  // Get the single scalar (as a vector) in the source value that packs the
608  // desired scalar. E.g. extract vector<1xf32> from vector<4xf32>
609  Location loc = extractOp.getLoc();
610  Value packedValue = vector::ExtractOp::create(
611  rewriter, loc, castOp.getSource(), index / expandRatio);
612  Type packedVecType = VectorType::get(/*shape=*/{1}, packedValue.getType());
613  Value zero = arith::ConstantOp::create(rewriter, loc, packedVecType,
614  rewriter.getZeroAttr(packedVecType));
615  packedValue = vector::InsertOp::create(rewriter, loc, packedValue, zero,
616  /*position=*/0);
617 
618  // Cast it to a vector with the desired scalar's type.
619  // E.g. f32 -> vector<2xf16>
620  VectorType packedType =
621  VectorType::get({expandRatio}, castDstType.getElementType());
622  Value castedValue =
623  vector::BitCastOp::create(rewriter, loc, packedType, packedValue);
624 
625  // Finally extract the desired scalar.
626  rewriter.replaceOpWithNewOp<vector::ExtractOp>(extractOp, castedValue,
627  index % expandRatio);
628  return success();
629  }
630 };
631 
632 // Shuffles vector.bitcast op after vector.extract_strided_slice op.
633 //
634 // This transforms IR like:
635 // %cast = vector.bitcast %arg0: vector<4xf32> to vector<8xf16>
636 // %0 = vector.extract_strided_slice %cast {
637 // offsets = [4], sizes = [4], strides = [1]
638 // } : vector<8xf16> to vector<4xf16>
639 // Into:
640 // %0 = vector.extract_strided_slice %src {
641 // offsets = [2], sizes = [2], strides = [1]
642 // } : vector<4xf32> to vector<2xf32>
643 // %1 = vector.bitcast %0 : vector<2xf32> to vector<4xf16>
644 struct BubbleDownBitCastForStridedSliceExtract
645  : public OpRewritePattern<vector::ExtractStridedSliceOp> {
647 
648  LogicalResult matchAndRewrite(vector::ExtractStridedSliceOp extractOp,
649  PatternRewriter &rewriter) const override {
650  auto castOp = extractOp.getVector().getDefiningOp<vector::BitCastOp>();
651  if (!castOp)
652  return failure();
653 
654  VectorType castSrcType = castOp.getSourceVectorType();
655  VectorType castDstType = castOp.getResultVectorType();
656  assert(castSrcType.getRank() == castDstType.getRank());
657 
658  int64_t castSrcLastDim = castSrcType.getShape().back();
659  int64_t castDstLastDim = castDstType.getShape().back();
660  // Require casting to more elements for now; other cases to be implemented.
661  if (castSrcLastDim > castDstLastDim)
662  return failure();
663 
664  // Only accept all one strides for now.
665  if (llvm::any_of(extractOp.getStrides().getAsValueRange<IntegerAttr>(),
666  [](const APInt &val) { return !val.isOne(); }))
667  return failure();
668 
669  unsigned rank = extractOp.getSourceVectorType().getRank();
670  assert(castDstLastDim % castSrcLastDim == 0);
671  int64_t expandRatio = castDstLastDim / castSrcLastDim;
672 
673  // If we have a less number of offsets than the rank, then implicitly we
674  // are selecting the full range for the last bitcasted dimension; other
675  // dimensions aren't affected. Otherwise, we need to scale down the last
676  // dimension's offset given we are extracting from less elements now.
677  ArrayAttr newOffsets = extractOp.getOffsets();
678  if (newOffsets.size() == rank) {
679  SmallVector<int64_t> offsets = getIntValueVector(newOffsets);
680  if (offsets.back() % expandRatio != 0)
681  return failure();
682  offsets.back() = offsets.back() / expandRatio;
683  newOffsets = rewriter.getI64ArrayAttr(offsets);
684  }
685 
686  // Similarly for sizes.
687  ArrayAttr newSizes = extractOp.getSizes();
688  if (newSizes.size() == rank) {
689  SmallVector<int64_t> sizes = getIntValueVector(newSizes);
690  if (sizes.back() % expandRatio != 0)
691  return failure();
692  sizes.back() = sizes.back() / expandRatio;
693  newSizes = rewriter.getI64ArrayAttr(sizes);
694  }
695 
696  SmallVector<int64_t> dims =
697  llvm::to_vector<4>(cast<VectorType>(extractOp.getType()).getShape());
698  dims.back() = dims.back() / expandRatio;
699  VectorType newExtractType =
700  VectorType::get(dims, castSrcType.getElementType());
701 
702  auto newExtractOp = vector::ExtractStridedSliceOp::create(
703  rewriter, extractOp.getLoc(), newExtractType, castOp.getSource(),
704  newOffsets, newSizes, extractOp.getStrides());
705 
706  rewriter.replaceOpWithNewOp<vector::BitCastOp>(
707  extractOp, extractOp.getType(), newExtractOp);
708 
709  return success();
710  }
711 };
712 
713 // Shuffles vector.bitcast op before vector.insert_strided_slice op.
714 //
715 // This transforms IR like:
716 // %0 = vector.insert %val, %dst[4] : vector<32xi4> into vector<8x32xi4>
717 // %1 = vector.bitcast %0 : vector<8x32xi4> to vector<8x16xi8>
718 // Into:
719 // %0 = vector.bitcast %val : vector<32xi4> to vector<16xi8>
720 // %1 = vector.bitcast %dst : vector<8x32xi4> to vector<8x16xi8>
721 // %2 = vector.insert %0, %1 [4] : vector<16xi8> into vector<8x16xi8>
722 //
723 struct BubbleUpBitCastForInsert : public OpRewritePattern<vector::BitCastOp> {
725 
726  LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp,
727  PatternRewriter &rewriter) const override {
728  VectorType castSrcType = bitcastOp.getSourceVectorType();
729  VectorType castDstType = bitcastOp.getResultVectorType();
730 
731  // 0-D and scalable vectors are not supported yet.
732  if (castSrcType.getRank() == 0 || castSrcType.isScalable() ||
733  castDstType.isScalable())
734  return failure();
735 
736  int64_t castSrcLastDim = castSrcType.getShape().back();
737  int64_t castDstLastDim = castDstType.getShape().back();
738  bool isNumElemsShrink = castSrcLastDim >= castDstLastDim;
739  int64_t ratio;
740  if (isNumElemsShrink) {
741  assert(castSrcLastDim % castDstLastDim == 0);
742  ratio = castSrcLastDim / castDstLastDim;
743  } else {
744  assert(castDstLastDim % castSrcLastDim == 0);
745  ratio = castDstLastDim / castSrcLastDim;
746  }
747 
748  auto insertOp = bitcastOp.getSource().getDefiningOp<vector::InsertOp>();
749  if (!insertOp)
750  return failure();
751 
752  // Only vector sources are supported for now.
753  auto insertSrcType = dyn_cast<VectorType>(insertOp.getValueToStoreType());
754  if (!insertSrcType)
755  return failure();
756 
757  // Bitcast the source.
758  SmallVector<int64_t> srcDims(insertSrcType.getShape());
759  srcDims.back() =
760  isNumElemsShrink ? srcDims.back() / ratio : srcDims.back() * ratio;
761  VectorType newCastSrcType =
762  VectorType::get(srcDims, castDstType.getElementType());
763  auto newCastSrcOp =
764  vector::BitCastOp::create(rewriter, bitcastOp.getLoc(), newCastSrcType,
765  insertOp.getValueToStore());
766 
767  SmallVector<int64_t> dstDims(insertOp.getDestVectorType().getShape());
768  dstDims.back() =
769  isNumElemsShrink ? dstDims.back() / ratio : dstDims.back() * ratio;
770  VectorType newCastDstType =
771  VectorType::get(dstDims, castDstType.getElementType());
772 
773  // Bitcast the destination.
774  auto newCastDstOp = vector::BitCastOp::create(
775  rewriter, bitcastOp.getLoc(), newCastDstType, insertOp.getDest());
776 
777  // Generate new insert.
778  rewriter.replaceOpWithNewOp<vector::InsertOp>(
779  bitcastOp, newCastSrcOp, newCastDstOp, insertOp.getMixedPosition());
780  return success();
781  }
782 };
783 
784 // Shuffles vector.bitcast op before vector.insert_strided_slice op.
785 //
786 // This transforms IR like:
787 // %0 = vector.insert_strided_slice %src, %dst {
788 // offsets = [0], strides = [1]} : vector<4xf16> into vector<8xf16>
789 // %1 = vector.bitcast %0: vector<8xf16> to vector<4xf32>
790 // Into:
791 // %0 = vector.bitcast %src : vector<4xf16> to vector<2xf32>
792 // %1 = vector.bitcast %dst : vector<8xf16> to vector<4xf32>
793 // %2 = vector.insert_strided_slice %src, %dst {
794 // offsets = [0], strides = [1]} : vector<2xf32> into vector<4xf32>
795 struct BubbleUpBitCastForStridedSliceInsert
796  : public OpRewritePattern<vector::BitCastOp> {
798 
799  LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp,
800  PatternRewriter &rewriter) const override {
801  VectorType castSrcType = bitcastOp.getSourceVectorType();
802  VectorType castDstType = bitcastOp.getResultVectorType();
803  assert(castSrcType.getRank() == castDstType.getRank());
804  // Skip 0-D vector which will not from InsertStridedSliceOp.
805  if (castSrcType.getRank() == 0)
806  return failure();
807 
808  int64_t castSrcLastDim = castSrcType.getShape().back();
809  int64_t castDstLastDim = castDstType.getShape().back();
810  // Require casting to less elements for now; other cases to be implemented.
811  if (castSrcLastDim < castDstLastDim)
812  return failure();
813 
814  assert(castSrcLastDim % castDstLastDim == 0);
815  int64_t shrinkRatio = castSrcLastDim / castDstLastDim;
816 
817  auto insertOp =
818  bitcastOp.getSource().getDefiningOp<vector::InsertStridedSliceOp>();
819  if (!insertOp)
820  return failure();
821 
822  // Only accept all one strides for now.
823  if (llvm::any_of(insertOp.getStrides().getAsValueRange<IntegerAttr>(),
824  [](const APInt &val) { return !val.isOne(); }))
825  return failure();
826 
827  unsigned rank = insertOp.getSourceVectorType().getRank();
828  // Require insert op to have the same rank for the source and destination
829  // vector; other cases to be implemented.
830  if (rank != insertOp.getDestVectorType().getRank())
831  return failure();
832 
833  // Requires that shape of insert op src is castable to dstType.
834  unsigned sourceWidth = castSrcType.getElementType().getIntOrFloatBitWidth();
835  unsigned destinationWidth =
836  castDstType.getElementType().getIntOrFloatBitWidth();
837  unsigned numElements = destinationWidth / sourceWidth;
838  if (insertOp.getSourceVectorType().getNumElements() % numElements != 0)
839  return failure();
840 
841  ArrayAttr newOffsets = insertOp.getOffsets();
842  assert(newOffsets.size() == rank);
843  SmallVector<int64_t> offsets = getIntValueVector(newOffsets);
844  if (offsets.back() % shrinkRatio != 0)
845  return failure();
846  offsets.back() = offsets.back() / shrinkRatio;
847  newOffsets = rewriter.getI64ArrayAttr(offsets);
848 
849  SmallVector<int64_t> srcDims =
850  llvm::to_vector<4>(insertOp.getSourceVectorType().getShape());
851  srcDims.back() = srcDims.back() / shrinkRatio;
852  VectorType newCastSrcType =
853  VectorType::get(srcDims, castDstType.getElementType());
854 
855  auto newCastSrcOp =
856  vector::BitCastOp::create(rewriter, bitcastOp.getLoc(), newCastSrcType,
857  insertOp.getValueToStore());
858 
859  SmallVector<int64_t> dstDims =
860  llvm::to_vector<4>(insertOp.getDestVectorType().getShape());
861  dstDims.back() = dstDims.back() / shrinkRatio;
862  VectorType newCastDstType =
863  VectorType::get(dstDims, castDstType.getElementType());
864 
865  auto newCastDstOp = vector::BitCastOp::create(
866  rewriter, bitcastOp.getLoc(), newCastDstType, insertOp.getDest());
867 
868  rewriter.replaceOpWithNewOp<vector::InsertStridedSliceOp>(
869  bitcastOp, bitcastOp.getType(), newCastSrcOp, newCastDstOp, newOffsets,
870  insertOp.getStrides());
871 
872  return success();
873  }
874 };
875 
876 // Breaks down vector.bitcast op
877 //
878 // This transforms IR like:
879 // %1 = vector.bitcast %0: vector<8xf16> to vector<4xf32>
880 // Into:
881 // %cst = vector.splat %c0_f32 : vector<4xf32>
882 // %1 = vector.extract_strided_slice %0 {
883 // offsets = [0], sizes = [4], strides = [1]
884 // } : vector<8xf16> to vector<4xf16>
885 // %2 = vector.bitcast %1 : vector<4xf16> to vector<2xf32>
886 // %4 = vector.insert_strided_slice %2, %cst {
887 // offsets = [0], strides = [1]} : vector<2xf32> into vector<4xf32>
888 // %5 = vector.extract_strided_slice %0 {
889 // offsets = [4], sizes = [4], strides = [1]
890 // } : vector<8xf16> to vector<4xf16>
891 // %6 = vector.bitcast %5 : vector<4xf16> to vector<2xf32>
892 // %7 = vector.insert_strided_slice %6, %cst {
893 // offsets = [2], strides = [1]} : vector<2xf32> into vector<4xf32>
894 struct BreakDownVectorBitCast : public OpRewritePattern<vector::BitCastOp> {
896 
897 public:
898  BreakDownVectorBitCast(MLIRContext *context,
899  std::function<bool(vector::BitCastOp)> controlFn,
900  PatternBenefit benefit)
901  : OpRewritePattern(context, benefit), controlFn(std::move(controlFn)) {}
902 
903  LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp,
904  PatternRewriter &rewriter) const override {
905 
906  if (controlFn && !controlFn(bitcastOp))
907  return failure();
908 
909  VectorType castSrcType = bitcastOp.getSourceVectorType();
910  VectorType castDstType = bitcastOp.getResultVectorType();
911  assert(castSrcType.getRank() == castDstType.getRank());
912 
913  // This transformation builds on top of
914  // vector.{extract|insert}_strided_slice, which do not support
915  // extracting/inserting "scallable sub-vectors". Bail out.
916  if (castSrcType.isScalable())
917  return rewriter.notifyMatchFailure(bitcastOp,
918  "Scalable vectors are not supported");
919 
920  // Only support rank 1 case for now.
921  if (castSrcType.getRank() != 1)
922  return failure();
923 
924  int64_t castSrcLastDim = castSrcType.getShape().back();
925  int64_t castDstLastDim = castDstType.getShape().back();
926  // Require casting to less elements for now; other cases to be implemented.
927  if (castSrcLastDim < castDstLastDim)
928  return failure();
929 
930  assert(castSrcLastDim % castDstLastDim == 0);
931  int64_t shrinkRatio = castSrcLastDim / castDstLastDim;
932  // Nothing to do if it is already bitcasting to a single element.
933  if (castSrcLastDim == shrinkRatio)
934  return failure();
935 
936  Location loc = bitcastOp.getLoc();
937  Type elemType = castDstType.getElementType();
938  assert(elemType.isSignlessIntOrIndexOrFloat());
939 
940  Value zero = arith::ConstantOp::create(rewriter, loc, elemType,
941  rewriter.getZeroAttr(elemType));
942  Value res = BroadcastOp::create(rewriter, loc, castDstType, zero);
943 
944  SmallVector<int64_t> sliceShape = {castDstLastDim};
945  SmallVector<int64_t> strides = {1};
946  VectorType newCastDstType =
947  VectorType::get(SmallVector<int64_t>{castDstLastDim / shrinkRatio},
948  castDstType.getElementType());
949 
950  for (int i = 0, e = shrinkRatio; i < e; ++i) {
951  Value extracted = ExtractStridedSliceOp::create(
952  rewriter, loc, bitcastOp.getSource(),
953  ArrayRef<int64_t>{i * castDstLastDim}, sliceShape, strides);
954  Value bitcast =
955  BitCastOp::create(rewriter, loc, newCastDstType, extracted);
956  res = InsertStridedSliceOp::create(
957  rewriter, loc, bitcast, res,
958  ArrayRef<int64_t>{i * castDstLastDim / shrinkRatio}, strides);
959  }
960  rewriter.replaceOp(bitcastOp, res);
961  return success();
962  }
963 
964 private:
965  std::function<bool(BitCastOp)> controlFn;
966 };
967 
968 static bool haveSameShapeAndScaling(Type t, Type u) {
969  auto tVec = dyn_cast<VectorType>(t);
970  auto uVec = dyn_cast<VectorType>(u);
971  if (!tVec) {
972  return !uVec;
973  }
974  if (!uVec) {
975  return false;
976  }
977  return tVec.getShape() == uVec.getShape() &&
978  tVec.getScalableDims() == uVec.getScalableDims();
979 }
980 
981 /// If `type` is shaped, clone it with `newElementType`. Otherwise,
982 /// return `newElementType`.
983 static Type cloneOrReplace(Type type, Type newElementType) {
984  if (auto shapedType = dyn_cast<ShapedType>(type)) {
985  return shapedType.clone(newElementType);
986  }
987  return newElementType;
988 }
989 
990 /// If `value` is the result of a splat or broadcast operation, return the input
991 /// of the splat/broadcast operation.
992 static Value getBroadcastLikeSource(Value value) {
993 
994  Operation *op = value.getDefiningOp();
995  if (!op)
996  return {};
997 
998  if (auto broadcast = dyn_cast<vector::BroadcastOp>(op))
999  return broadcast.getSource();
1000 
1001  if (auto splat = dyn_cast<vector::SplatOp>(op))
1002  return splat.getInput();
1003 
1004  return {};
1005 }
1006 
1007 /// Reorders elementwise(broadcast/splat) to broadcast(elementwise). Ex:
1008 ///
1009 /// Example:
1010 /// ```
1011 /// %a = vector.broadcast %arg1 : index to vector<1x4xindex>
1012 /// %b = vector.broadcast %arg2 : index to vector<1x4xindex>
1013 /// %r = arith.addi %a, %b : vector<1x4xindex>
1014 /// ```
1015 /// Gets converted to:
1016 /// ```
1017 /// %r = arith.addi %arg0, %arg1 : index
1018 /// %b = vector.broadcast %r : index to vector<1x4xindex>
1019 /// ```
1020 ///
1021 /// Both `vector.broadcast` and `vector.splat` are supported as broadcasting
1022 /// ops.
1023 struct ReorderElementwiseOpsOnBroadcast final
1024  : public OpTraitRewritePattern<OpTrait::Elementwise> {
1026  LogicalResult matchAndRewrite(Operation *op,
1027  PatternRewriter &rewriter) const override {
1028  if (op->getNumResults() != 1)
1029  return failure();
1030  auto resultType = dyn_cast<VectorType>(op->getResult(0).getType());
1031  if (!resultType)
1032  return failure();
1034  return rewriter.notifyMatchFailure(
1035  op, "Op doesn't have ElementwiseMappableTraits");
1036  if (op->getNumOperands() == 0)
1037  return failure();
1038  if (isa<vector::FMAOp>(op)) {
1039  return rewriter.notifyMatchFailure(
1040  op,
1041  "Op only accepts vector types - not supported as broadcast source "
1042  "might be a scalar");
1043  }
1044 
1045  Type resultElemType = resultType.getElementType();
1046 
1047  // Get the type of the first non-constant operand
1048  Value splatSource;
1049  for (Value operand : op->getOperands()) {
1050  Operation *definingOp = operand.getDefiningOp();
1051  if (!definingOp)
1052  return failure();
1053  if (definingOp->hasTrait<OpTrait::ConstantLike>())
1054  continue;
1055  splatSource = getBroadcastLikeSource(operand);
1056  break;
1057  }
1058  if (!splatSource)
1059  return failure();
1060  Type unbroadcastResultType =
1061  cloneOrReplace(splatSource.getType(), resultElemType);
1062 
1063  // Make sure that all operands are broadcast from identically-shaped types:
1064  // * scalar (`vector.broadcast` + `vector.splat`), or
1065  // * vector (`vector.broadcast`).
1066  // Otherwise the re-ordering wouldn't be safe.
1067  if (!llvm::all_of(op->getOperands(), [splatSource](Value val) {
1068  if (auto source = getBroadcastLikeSource(val))
1069  return haveSameShapeAndScaling(source.getType(),
1070  splatSource.getType());
1071  SplatElementsAttr splatConst;
1072  return matchPattern(val, m_Constant(&splatConst));
1073  })) {
1074  return rewriter.notifyMatchFailure(
1075  op,
1076  "not all operands are constants or broadcasts from the same type");
1077  }
1078 
1079  // Collect the source values before broadcasting
1080  SmallVector<Value> srcValues;
1081  srcValues.reserve(op->getNumOperands());
1082  for (Value operand : op->getOperands()) {
1083  SplatElementsAttr splatConst;
1084  if (matchPattern(operand, m_Constant(&splatConst))) {
1085  Attribute newConst;
1086  Type elementType = getElementTypeOrSelf(operand.getType());
1087  Type newType = cloneOrReplace(unbroadcastResultType, elementType);
1088  if (auto newTypeShaped = dyn_cast<ShapedType>(newType)) {
1089  newConst = splatConst.resizeSplat(newTypeShaped);
1090  } else {
1091  newConst = splatConst.getSplatValue<Attribute>();
1092  }
1093  Operation *newConstOp =
1094  operand.getDefiningOp()->getDialect()->materializeConstant(
1095  rewriter, newConst, newType, operand.getLoc());
1096  srcValues.push_back(newConstOp->getResult(0));
1097  } else {
1098  srcValues.push_back(operand.getDefiningOp()->getOperand(0));
1099  }
1100  }
1101 
1102  // Create the "elementwise" Op
1103  Operation *elementwiseOp =
1104  rewriter.create(op->getLoc(), op->getName().getIdentifier(), srcValues,
1105  unbroadcastResultType, op->getAttrs());
1106 
1107  // Replace the original Op with the elementwise Op
1108  rewriter.replaceOpWithNewOp<vector::BroadcastOp>(
1109  op, resultType, elementwiseOp->getResults());
1110 
1111  return success();
1112  }
1113 };
1114 
1115 /// Pattern to rewrite a ExtractOp(Elementwise) -> Elementwise(ExtractOp).
1116 /// This may result in cleaner code when extracting a single value
1117 /// from multi-element vector and also to help canonicalize 1-element vectors to
1118 /// scalars.
1119 ///
1120 /// Example:
1121 /// ```
1122 /// %0 = arith.addf %arg0, %arg1 : vector<4xf32>
1123 /// %1 = vector.extract %0[1] : f32 from vector<4xf32>
1124 /// ```
1125 /// Gets converted to:
1126 /// ```
1127 /// %0 = vector.extract %arg0[1] : f32 from vector<4xf32>
1128 /// %1 = vector.extract %arg1[1] : f32 from vector<4xf32>
1129 /// %2 = arith.addf %0, %1 : f32
1130 /// ```
1131 class ExtractOpFromElementwise final
1132  : public OpRewritePattern<vector::ExtractOp> {
1133 public:
1135 
1136  LogicalResult matchAndRewrite(vector::ExtractOp op,
1137  PatternRewriter &rewriter) const override {
1138  Operation *eltwise = op.getVector().getDefiningOp();
1139 
1140  // TODO: vector::FMAOp is not an ElemetwiseMappable even if it claims to be,
1141  // as it doesn't support scalars.
1142  if (!eltwise || !OpTrait::hasElementwiseMappableTraits(eltwise) ||
1143  isa<vector::FMAOp>(eltwise))
1144  return rewriter.notifyMatchFailure(op, "not an elementwise op");
1145 
1146  if (eltwise->getNumResults() != 1)
1147  return rewriter.notifyMatchFailure(op, "expected single result");
1148 
1149  if (!eltwise->hasOneUse())
1150  return rewriter.notifyMatchFailure(op, "expected single op use");
1151 
1152  if (!llvm::all_equal(eltwise->getOperandTypes()))
1153  return rewriter.notifyMatchFailure(op, "operand types are different");
1154 
1155  // Dynamic position can cause dominance issues, so conservatively fail for
1156  // now.
1157  if (!op.getDynamicPosition().empty())
1158  return rewriter.notifyMatchFailure(
1159  op, "dynamic position not yet implemented");
1160 
1161  Type dstType = op.getType();
1162 
1163  OpBuilder::InsertionGuard g(rewriter);
1164  rewriter.setInsertionPoint(eltwise);
1165 
1166  IRMapping mapping;
1167  Location loc = eltwise->getLoc();
1168  SmallVector<OpFoldResult> pos = op.getMixedPosition();
1169  for (Value arg : eltwise->getOperands()) {
1170  Value newArg = vector::ExtractOp::create(rewriter, loc, arg, pos);
1171  mapping.map(arg, newArg);
1172  }
1173 
1174  Operation *newEltwise = rewriter.clone(*eltwise, mapping);
1175  newEltwise->getResult(0).setType(dstType);
1176 
1177  rewriter.replaceOp(op, newEltwise);
1178  rewriter.eraseOp(eltwise);
1179  return success();
1180  }
1181 };
1182 
1183 /// Check if the element type is suitable for vector.load/store sinking.
1184 /// Element type must be index or byte-aligned integer or floating-point type.
1185 static bool isSupportedMemSinkElementType(Type type) {
1186  if (isa<IndexType>(type))
1187  return true;
1188 
1189  return type.isIntOrFloat() && type.getIntOrFloatBitWidth() % 8 == 0;
1190 }
1191 
1192 /// Pattern to rewrite `vector.extract(vector.load) -> vector/memref.load.
1193 /// Only index and byte-aligned integer and floating-point element types are
1194 /// supported for now.
1195 ///
1196 /// Example:
1197 /// ```
1198 /// vector.load %arg0[%arg1] : memref<?xf32>, vector<4xf32>
1199 /// vector.extract %0[1] : f32 from vector<4xf32>
1200 /// ```
1201 /// Gets converted to:
1202 /// ```
1203 /// %c1 = arith.constant 1 : index
1204 /// %0 = arith.addi %arg1, %c1 overflow<nsw> : index
1205 /// %1 = memref.load %arg0[%0] : memref<?xf32>
1206 /// ```
1207 class ExtractOpFromLoad final : public OpRewritePattern<vector::ExtractOp> {
1208 public:
1210 
1211  LogicalResult matchAndRewrite(vector::ExtractOp op,
1212  PatternRewriter &rewriter) const override {
1213  auto loadOp = op.getVector().getDefiningOp<vector::LoadOp>();
1214  if (!loadOp)
1215  return rewriter.notifyMatchFailure(op, "expected a load op");
1216 
1217  // Checking for single use so we won't duplicate load ops.
1218  if (!loadOp->hasOneUse())
1219  return rewriter.notifyMatchFailure(op, "expected single op use");
1220 
1221  VectorType loadVecType = loadOp.getVectorType();
1222  if (loadVecType.isScalable())
1223  return rewriter.notifyMatchFailure(op,
1224  "scalable vectors are not supported");
1225 
1226  MemRefType memType = loadOp.getMemRefType();
1227 
1228  // Non-byte-aligned types are tricky and may require special handling,
1229  // ignore them for now.
1230  if (!isSupportedMemSinkElementType(memType.getElementType()))
1231  return rewriter.notifyMatchFailure(op, "unsupported element type");
1232 
1233  int64_t rankOffset = memType.getRank() - loadVecType.getRank();
1234  if (rankOffset < 0)
1235  return rewriter.notifyMatchFailure(op, "unsupported ranks combination");
1236 
1237  auto extractVecType = dyn_cast<VectorType>(op.getResult().getType());
1238  int64_t finalRank = 0;
1239  if (extractVecType)
1240  finalRank = extractVecType.getRank();
1241 
1242  SmallVector<Value> indices = loadOp.getIndices();
1243  SmallVector<OpFoldResult> extractPos = op.getMixedPosition();
1244 
1245  // There may be memory stores between the load and the extract op, so we
1246  // need to make sure that the new load op is inserted at the same place as
1247  // the original load op.
1248  OpBuilder::InsertionGuard g(rewriter);
1249  rewriter.setInsertionPoint(loadOp);
1250  Location loc = loadOp.getLoc();
1251  ArithIndexingBuilder idxBuilderf(rewriter, loc);
1252  for (auto i : llvm::seq<int64_t>(rankOffset, indices.size() - finalRank)) {
1253  OpFoldResult pos = extractPos[i - rankOffset];
1254  if (isZeroInteger(pos))
1255  continue;
1256 
1257  Value offset = getValueOrCreateConstantIndexOp(rewriter, loc, pos);
1258  indices[i] = idxBuilderf.add(indices[i], offset);
1259  }
1260 
1261  Value base = loadOp.getBase();
1262  if (extractVecType) {
1263  rewriter.replaceOpWithNewOp<vector::LoadOp>(op, extractVecType, base,
1264  indices);
1265  } else {
1266  rewriter.replaceOpWithNewOp<memref::LoadOp>(op, base, indices);
1267  }
1268  // We checked for single use so we can safely erase the load op.
1269  rewriter.eraseOp(loadOp);
1270  return success();
1271  }
1272 };
1273 
1274 /// Pattern to rewrite vector.store(vector.splat) -> vector/memref.store.
1275 ///
1276 /// Example:
1277 /// ```
1278 /// %0 = vector.splat %arg2 : vector<1xf32>
1279 /// vector.store %0, %arg0[%arg1] : memref<?xf32>, vector<1xf32>
1280 /// ```
1281 /// Gets converted to:
1282 /// ```
1283 /// memref.store %arg2, %arg0[%arg1] : memref<?xf32>
1284 /// ```
1285 class StoreOpFromSplatOrBroadcast final
1286  : public OpRewritePattern<vector::StoreOp> {
1287 public:
1289 
1290  LogicalResult matchAndRewrite(vector::StoreOp op,
1291  PatternRewriter &rewriter) const override {
1292  VectorType vecType = op.getVectorType();
1293  if (vecType.isScalable())
1294  return rewriter.notifyMatchFailure(op,
1295  "scalable vectors are not supported");
1296 
1297  if (isa<VectorType>(op.getMemRefType().getElementType()))
1298  return rewriter.notifyMatchFailure(
1299  op, "memrefs of vectors are not supported");
1300 
1301  if (vecType.getNumElements() != 1)
1302  return rewriter.notifyMatchFailure(
1303  op, "only 1-element vectors are supported");
1304 
1305  Value toStore = op.getValueToStore();
1306  Value source = getBroadcastLikeSource(toStore);
1307  if (!source)
1308  return rewriter.notifyMatchFailure(
1309  op, "value to store is not from a broadcast");
1310 
1311  // Checking for single use so we can remove splat.
1312  Operation *splat = toStore.getDefiningOp();
1313  if (!splat->hasOneUse())
1314  return rewriter.notifyMatchFailure(op, "expected single op use");
1315 
1316  Value base = op.getBase();
1317  ValueRange indices = op.getIndices();
1318 
1319  if (isa<VectorType>(source.getType())) {
1320  rewriter.replaceOpWithNewOp<vector::StoreOp>(op, source, base, indices);
1321  } else {
1322  rewriter.replaceOpWithNewOp<memref::StoreOp>(op, source, base, indices);
1323  }
1324  rewriter.eraseOp(splat);
1325  return success();
1326  }
1327 };
1328 
1329 // Helper that returns a vector comparison that constructs a mask:
1330 // mask = [0,1,..,n-1] + [o,o,..,o] < [b,b,..,b]
1331 //
1332 // If `dim == 0` then the result will be a 0-D vector.
1333 //
1334 // NOTE: The LLVM::GetActiveLaneMaskOp intrinsic would provide an alternative,
1335 // much more compact, IR for this operation, but LLVM eventually
1336 // generates more elaborate instructions for this intrinsic since it
1337 // is very conservative on the boundary conditions.
1338 static Value buildVectorComparison(PatternRewriter &rewriter, Operation *op,
1339  bool force32BitVectorIndices, int64_t dim,
1340  Value b, Value *off = nullptr) {
1341  auto loc = op->getLoc();
1342  // If we can assume all indices fit in 32-bit, we perform the vector
1343  // comparison in 32-bit to get a higher degree of SIMD parallelism.
1344  // Otherwise we perform the vector comparison using 64-bit indices.
1345  Type idxType =
1346  force32BitVectorIndices ? rewriter.getI32Type() : rewriter.getI64Type();
1347  DenseIntElementsAttr indicesAttr;
1348  if (dim == 0 && force32BitVectorIndices) {
1349  indicesAttr = DenseIntElementsAttr::get(
1351  } else if (dim == 0) {
1352  indicesAttr = DenseIntElementsAttr::get(
1354  } else if (force32BitVectorIndices) {
1355  indicesAttr = rewriter.getI32VectorAttr(
1356  llvm::to_vector<4>(llvm::seq<int32_t>(0, dim)));
1357  } else {
1358  indicesAttr = rewriter.getI64VectorAttr(
1359  llvm::to_vector<4>(llvm::seq<int64_t>(0, dim)));
1360  }
1361  Value indices = arith::ConstantOp::create(rewriter, loc, indicesAttr);
1362  // Add in an offset if requested.
1363  if (off) {
1364  Value o = getValueOrCreateCastToIndexLike(rewriter, loc, idxType, *off);
1365  Value ov = vector::BroadcastOp::create(rewriter, loc, indices.getType(), o);
1366  indices = arith::AddIOp::create(rewriter, loc, ov, indices);
1367  }
1368  // Construct the vector comparison.
1369  Value bound = getValueOrCreateCastToIndexLike(rewriter, loc, idxType, b);
1370  Value bounds =
1371  vector::BroadcastOp::create(rewriter, loc, indices.getType(), bound);
1372  return arith::CmpIOp::create(rewriter, loc, arith::CmpIPredicate::slt,
1373  indices, bounds);
1374 }
1375 
1376 template <typename ConcreteOp>
1377 struct MaterializeTransferMask : public OpRewritePattern<ConcreteOp> {
1378 public:
1379  explicit MaterializeTransferMask(MLIRContext *context, bool enableIndexOpt,
1380  PatternBenefit benefit = 1)
1381  : mlir::OpRewritePattern<ConcreteOp>(context, benefit),
1382  force32BitVectorIndices(enableIndexOpt) {}
1383 
1384  LogicalResult matchAndRewrite(ConcreteOp xferOp,
1385  PatternRewriter &rewriter) const override {
1386  if (!xferOp.hasOutOfBoundsDim())
1387  return failure();
1388 
1389  if (xferOp.getVectorType().getRank() > 1 || xferOp.getIndices().empty())
1390  return failure();
1391 
1392  Location loc = xferOp->getLoc();
1393  VectorType vtp = xferOp.getVectorType();
1394 
1395  // Create the in-bounds mask with all elements between [0 .. dim - offset)
1396  // set and [dim - offset .. vector_length) unset.
1397  //
1398  // TODO: when the leaf transfer rank is k > 1, we need the last `k`
1399  // dimensions here.
1400  unsigned lastIndex = llvm::size(xferOp.getIndices()) - 1;
1401  Value off = xferOp.getIndices()[lastIndex];
1402  Value dim =
1403  vector::createOrFoldDimOp(rewriter, loc, xferOp.getBase(), lastIndex);
1404  Value b = arith::SubIOp::create(rewriter, loc, dim.getType(), dim, off);
1405  Value mask = vector::CreateMaskOp::create(
1406  rewriter, loc,
1407  VectorType::get(vtp.getShape(), rewriter.getI1Type(),
1408  vtp.getScalableDims()),
1409  b);
1410  if (xferOp.getMask()) {
1411  // Intersect the in-bounds with the mask specified as an op parameter.
1412  mask = arith::AndIOp::create(rewriter, loc, mask, xferOp.getMask());
1413  }
1414 
1415  rewriter.modifyOpInPlace(xferOp, [&]() {
1416  xferOp.getMaskMutable().assign(mask);
1417  xferOp.setInBoundsAttr(rewriter.getBoolArrayAttr({true}));
1418  });
1419 
1420  return success();
1421  }
1422 
1423 private:
1424  const bool force32BitVectorIndices;
1425 };
1426 
1427 /// Conversion pattern for a `vector.create_mask` (0-D and 1-D only).
1428 class VectorCreateMaskOpConversion
1429  : public OpRewritePattern<vector::CreateMaskOp> {
1430 public:
1431  explicit VectorCreateMaskOpConversion(MLIRContext *context,
1432  bool enableIndexOpt,
1433  PatternBenefit benefit = 1)
1434  : mlir::OpRewritePattern<vector::CreateMaskOp>(context, benefit),
1435  force32BitVectorIndices(enableIndexOpt) {}
1436 
1437  LogicalResult matchAndRewrite(vector::CreateMaskOp op,
1438  PatternRewriter &rewriter) const override {
1439  auto dstType = op.getType();
1440  if (cast<VectorType>(dstType).isScalable())
1441  return failure();
1442  int64_t rank = dstType.getRank();
1443  if (rank > 1)
1444  return failure();
1445  rewriter.replaceOp(
1446  op, buildVectorComparison(rewriter, op, force32BitVectorIndices,
1447  rank == 0 ? 0 : dstType.getDimSize(0),
1448  op.getOperand(0)));
1449  return success();
1450  }
1451 
1452 private:
1453  const bool force32BitVectorIndices;
1454 };
1455 
1456 /// Returns true if all the `i1` elements of `constantOp` are set to `value`.
1457 static bool allI1ConstantValuesSetTo(arith::ConstantOp constantOp, bool value) {
1458  auto denseAttr = dyn_cast<DenseIntElementsAttr>(constantOp.getValue());
1459  // TODO: Support non-dense constant.
1460  if (!denseAttr)
1461  return false;
1462 
1463  assert(denseAttr.getElementType().isInteger(1) && "Unexpected type");
1464  return denseAttr.isSplat() && denseAttr.getSplatValue<bool>() == value;
1465 }
1466 
1467 /// Folds a select operation between an all-true and all-false vector. For now,
1468 /// only single element vectors (i.e., vector<1xi1>) are supported. That is:
1469 ///
1470 /// %true = arith.constant dense<true> : vector<1xi1>
1471 /// %false = arith.constant dense<false> : vector<1xi1>
1472 /// %result = arith.select %cond, %true, %false : i1, vector<1xi1>
1473 /// =>
1474 /// %result = vector.broadcast %cond : i1 to vector<1xi1>
1475 ///
1476 /// InstCombine seems to handle vectors with multiple elements but not the
1477 /// single element ones.
1478 struct FoldI1Select : public OpRewritePattern<arith::SelectOp> {
1480 
1481  LogicalResult matchAndRewrite(arith::SelectOp selectOp,
1482  PatternRewriter &rewriter) const override {
1483  auto vecType = dyn_cast<VectorType>(selectOp.getType());
1484  if (!vecType || !vecType.getElementType().isInteger(1))
1485  return failure();
1486 
1487  // Only scalar conditions can be folded.
1488  Value cond = selectOp.getCondition();
1489  if (isa<VectorType>(cond.getType()))
1490  return failure();
1491 
1492  // TODO: Support n-D and scalable vectors.
1493  if (vecType.getRank() != 1 || vecType.isScalable())
1494  return failure();
1495 
1496  // TODO: Support vectors with multiple elements.
1497  if (vecType.getShape()[0] != 1)
1498  return failure();
1499 
1500  auto trueConst = selectOp.getTrueValue().getDefiningOp<arith::ConstantOp>();
1501  if (!trueConst || !allI1ConstantValuesSetTo(trueConst, true))
1502  return failure();
1503 
1504  auto falseConst =
1505  selectOp.getFalseValue().getDefiningOp<arith::ConstantOp>();
1506  if (!falseConst || !allI1ConstantValuesSetTo(falseConst, false))
1507  return failure();
1508 
1509  // Replace select with its condition broadcasted to single element vector.
1510  auto elemType = rewriter.getIntegerType(vecType.getNumElements());
1511  auto bcastType = VectorType::get(/*shape=*/{1}, elemType);
1512  rewriter.replaceOpWithNewOp<vector::BroadcastOp>(selectOp, bcastType, cond);
1513  return success();
1514  }
1515 };
1516 
1517 /// Returns the number of dims can be folded away from transfer ops. It returns
1518 /// a failure if it can not determine the number of dims to be folded.
1519 ///
1520 /// Ex 1: returns "2" if `srcType` is memref<512x16x1x1xf32> and
1521 /// `vectorType` is vector<16x16x1x1xf32>
1522 /// (there two inner most dims can be dropped by memref.subview ops)
1523 ///
1524 /// Ex 2: returns "1" if `srcType` is memref<512x16x1x1xf32> with
1525 /// [8192, 16, 8, 1] strides and `vectorType` is vector<16x16x1x1xf32>
1526 /// (only the inner most unit dim of `srcType` can be dropped)
1527 ///
1528 /// Ex 3: return "0" if `srcType` is memref<512x16x1x1xf32> and
1529 /// `vectorType` is vector<16x16x1x[1]xf32>
1530 /// (the most inner dim in `vectorType` is not a unit dim (it's a "scalable
1531 /// unit")
1532 static FailureOr<size_t>
1533 getTransferFoldableInnerUnitDims(MemRefType srcType, VectorType vectorType) {
1534  SmallVector<int64_t> srcStrides;
1535  int64_t srcOffset;
1536  if (failed(srcType.getStridesAndOffset(srcStrides, srcOffset)))
1537  return failure();
1538 
1539  auto isUnitDim = [](VectorType type, int dim) {
1540  return type.getDimSize(dim) == 1 && !type.getScalableDims()[dim];
1541  };
1542 
1543  // According to vector.transfer_read/write semantics, the vector can be a
1544  // slice. Thus, we have to offset the check index with `rankDiff` in
1545  // `srcStrides` and source dim sizes.
1546  size_t result = 0;
1547  int rankDiff = srcType.getRank() - vectorType.getRank();
1548  for (int64_t i = 0, e = vectorType.getRank(); i < e; ++i) {
1549  // Check that the inner dim size is 1 for both memref type and vector slice.
1550  // It can be folded only if they are 1 and the stride is 1.
1551  int dim = vectorType.getRank() - i - 1;
1552  if (srcStrides[dim + rankDiff] != 1 ||
1553  srcType.getDimSize(dim + rankDiff) != 1 || !isUnitDim(vectorType, dim))
1554  break;
1555  result++;
1556  }
1557  return result;
1558 }
1559 
1560 /// Drop inner most contiguous unit dimensions from transfer_read operand.
1561 class DropInnerMostUnitDimsTransferRead
1562  : public OpRewritePattern<vector::TransferReadOp> {
1564 
1565  LogicalResult matchAndRewrite(vector::TransferReadOp readOp,
1566  PatternRewriter &rewriter) const override {
1567  // TODO: support 0-d corner case.
1568  if (readOp.getTransferRank() == 0)
1569  return failure();
1570 
1571  // TODO: support mask.
1572  if (readOp.getMask())
1573  return failure();
1574 
1575  auto srcType = dyn_cast<MemRefType>(readOp.getBase().getType());
1576  if (!srcType)
1577  return failure();
1578 
1579  if (!readOp.getPermutationMap().isMinorIdentity())
1580  return failure();
1581 
1582  auto targetType = readOp.getVectorType();
1583  if (targetType.getRank() <= 1)
1584  return failure();
1585 
1586  FailureOr<size_t> maybeDimsToDrop =
1587  getTransferFoldableInnerUnitDims(srcType, targetType);
1588  if (failed(maybeDimsToDrop))
1589  return failure();
1590 
1591  size_t dimsToDrop = maybeDimsToDrop.value();
1592  if (dimsToDrop == 0)
1593  return failure();
1594 
1595  auto inBounds = readOp.getInBoundsValues();
1596  auto droppedInBounds = ArrayRef<bool>(inBounds).take_back(dimsToDrop);
1597  if (llvm::is_contained(droppedInBounds, false))
1598  return failure();
1599 
1600  auto resultTargetVecType =
1601  VectorType::get(targetType.getShape().drop_back(dimsToDrop),
1602  targetType.getElementType(),
1603  targetType.getScalableDims().drop_back(dimsToDrop));
1604 
1605  auto loc = readOp.getLoc();
1607  memref::getMixedSizes(rewriter, loc, readOp.getBase());
1608  SmallVector<OpFoldResult> offsets(srcType.getRank(),
1609  rewriter.getIndexAttr(0));
1610  SmallVector<OpFoldResult> strides(srcType.getRank(),
1611  rewriter.getIndexAttr(1));
1612  MemRefType resultMemrefType = memref::SubViewOp::inferRankReducedResultType(
1613  srcType.getShape().drop_back(dimsToDrop), srcType, offsets, sizes,
1614  strides);
1615  ArrayAttr inBoundsAttr = rewriter.getArrayAttr(
1616  readOp.getInBoundsAttr().getValue().drop_back(dimsToDrop));
1617  Value rankedReducedView =
1618  memref::SubViewOp::create(rewriter, loc, resultMemrefType,
1619  readOp.getBase(), offsets, sizes, strides);
1620  auto permMap = getTransferMinorIdentityMap(
1621  cast<ShapedType>(rankedReducedView.getType()), resultTargetVecType);
1622  Value result = vector::TransferReadOp::create(
1623  rewriter, loc, resultTargetVecType, rankedReducedView,
1624  readOp.getIndices().drop_back(dimsToDrop), AffineMapAttr::get(permMap),
1625  readOp.getPadding(),
1626  // TODO: support mask.
1627  /*mask=*/Value(), inBoundsAttr);
1628  rewriter.replaceOpWithNewOp<vector::ShapeCastOp>(readOp, targetType,
1629  result);
1630  return success();
1631  }
1632 };
1633 
1634 /// Drop inner most contiguous unit dimensions from transfer_write operand.
1635 /// E.g.,
1636 /// vector.transfer_write %arg1, %arg0[%c0, %arg2, %c0, %c0, %c0]
1637 /// {in_bounds = [true, true, true, true, true]}
1638 /// : vector<1x16x16x1x1xf32>, memref<1x512x16x1x1xf32>
1639 ///
1640 /// will be replaced with
1641 ///
1642 /// %subview = memref.subview %arg0
1643 /// [0, 0, 0, 0, 0] [1, 512, 16, 1, 1] [1, 1, 1, 1, 1]
1644 /// : memref<1x512x16x1x1xf32> to memref<1x512x16xf32>
1645 /// %0 = vector.shape_cast %arg1 : vector<1x16x16x1x1xf32>
1646 /// to vector<1x16x16xf32>
1647 /// vector.transfer_write %0, %subview[%c0, %arg2, %c0]
1648 /// {in_bounds = [true, true, true]}
1649 /// : vector<1x16x16xf32>, memref<1x512x16xf32>
1650 ///
1651 /// Note, this pattern will not collapse "scalable unit" dims (i.e. `[1]`).
1652 class DropInnerMostUnitDimsTransferWrite
1653  : public OpRewritePattern<vector::TransferWriteOp> {
1655 
1656  LogicalResult matchAndRewrite(vector::TransferWriteOp writeOp,
1657  PatternRewriter &rewriter) const override {
1658  // TODO: support 0-d corner case.
1659  if (writeOp.getTransferRank() == 0)
1660  return failure();
1661 
1662  // TODO: support mask.
1663  if (writeOp.getMask())
1664  return failure();
1665 
1666  auto srcType = dyn_cast<MemRefType>(writeOp.getBase().getType());
1667  if (!srcType)
1668  return failure();
1669 
1670  if (!writeOp.getPermutationMap().isMinorIdentity())
1671  return failure();
1672 
1673  auto targetType = writeOp.getVectorType();
1674  if (targetType.getRank() <= 1)
1675  return failure();
1676 
1677  FailureOr<size_t> maybeDimsToDrop =
1678  getTransferFoldableInnerUnitDims(srcType, targetType);
1679  if (failed(maybeDimsToDrop))
1680  return failure();
1681 
1682  size_t dimsToDrop = maybeDimsToDrop.value();
1683  if (dimsToDrop == 0)
1684  return failure();
1685 
1686  auto inBounds = writeOp.getInBoundsValues();
1687  auto droppedInBounds = ArrayRef<bool>(inBounds).take_back(dimsToDrop);
1688  if (llvm::is_contained(droppedInBounds, false))
1689  return failure();
1690 
1691  auto resultTargetVecType =
1692  VectorType::get(targetType.getShape().drop_back(dimsToDrop),
1693  targetType.getElementType(),
1694  targetType.getScalableDims().drop_back(dimsToDrop));
1695 
1696  Location loc = writeOp.getLoc();
1698  memref::getMixedSizes(rewriter, loc, writeOp.getBase());
1699  SmallVector<OpFoldResult> offsets(srcType.getRank(),
1700  rewriter.getIndexAttr(0));
1701  SmallVector<OpFoldResult> strides(srcType.getRank(),
1702  rewriter.getIndexAttr(1));
1703  MemRefType resultMemrefType = memref::SubViewOp::inferRankReducedResultType(
1704  srcType.getShape().drop_back(dimsToDrop), srcType, offsets, sizes,
1705  strides);
1706  ArrayAttr inBoundsAttr = rewriter.getArrayAttr(
1707  writeOp.getInBoundsAttr().getValue().drop_back(dimsToDrop));
1708 
1709  Value rankedReducedView =
1710  memref::SubViewOp::create(rewriter, loc, resultMemrefType,
1711  writeOp.getBase(), offsets, sizes, strides);
1712  auto permMap = getTransferMinorIdentityMap(
1713  cast<ShapedType>(rankedReducedView.getType()), resultTargetVecType);
1714 
1715  auto shapeCast = rewriter.createOrFold<vector::ShapeCastOp>(
1716  loc, resultTargetVecType, writeOp.getVector());
1717  rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
1718  writeOp, shapeCast, rankedReducedView,
1719  writeOp.getIndices().drop_back(dimsToDrop), AffineMapAttr::get(permMap),
1720  // TODO: support mask.
1721  /*mask=*/Value(), inBoundsAttr);
1722  return success();
1723  }
1724 };
1725 
1726 /// Canonicalization of a `vector.contraction %a, %b, %c` with row-major matmul
1727 /// semantics to a contraction suitable for MMT (matrix matrix multiplication
1728 /// with the RHS transposed) lowering.
1729 struct CanonicalizeContractMatmulToMMT final
1730  : OpRewritePattern<vector::ContractionOp> {
1732 
1733  using FilterConstraintType =
1734  std::function<LogicalResult(vector::ContractionOp op)>;
1735 
1736  CanonicalizeContractMatmulToMMT(MLIRContext *context, PatternBenefit benefit,
1737  FilterConstraintType constraint)
1738  : OpRewritePattern<vector::ContractionOp>(context, benefit),
1739  filter(std::move(constraint)) {}
1740 
1741  LogicalResult matchAndRewrite(vector::ContractionOp op,
1742  PatternRewriter &rewriter) const override {
1743  if (failed(filter(op)))
1744  return failure();
1745 
1746  Location loc = op.getLoc();
1747  Value lhs = op.getLhs();
1748  Value rhs = op.getRhs();
1749  Value res = op.getAcc();
1750 
1751  // Set up the parallel/reduction structure in right form.
1752  using MapList = ArrayRef<ArrayRef<AffineExpr>>;
1753  auto infer = [&](MapList m) {
1754  return AffineMap::inferFromExprList(m, op.getContext());
1755  };
1756  AffineExpr m;
1757  AffineExpr n;
1758  AffineExpr k;
1759  bindDims(rewriter.getContext(), m, n, k);
1760  static constexpr std::array<int64_t, 2> perm = {1, 0};
1761  auto iteratorTypes = op.getIteratorTypes().getValue();
1762  SmallVector<AffineMap, 4> maps = op.getIndexingMapsArray();
1763  if (iteratorTypes.size() != 3 ||
1764  !vector::isParallelIterator(iteratorTypes[0]) ||
1765  !vector::isParallelIterator(iteratorTypes[1]) ||
1766  !vector::isReductionIterator(iteratorTypes[2]))
1767  return rewriter.notifyMatchFailure(op, "contraction is not a gemm");
1768 
1769  // The canonical form is "TNT" = A row-major, B col-major, C row-major.
1770  const auto canonicalForm = infer({{m, k}, {n, k}, {m, n}});
1771  if (maps == canonicalForm)
1772  return rewriter.notifyMatchFailure(op, "already in the canonical form");
1773 
1774  // Create a vector transpose making sure to emit zero/sign-extend at the
1775  // end.
1776  auto createTranspose = [&rewriter, loc](Value mat) -> Value {
1777  if (auto sext = mat.getDefiningOp<arith::ExtSIOp>()) {
1778  Value trans =
1779  vector::TransposeOp::create(rewriter, loc, sext.getIn(), perm);
1780  VectorType newType =
1781  cast<VectorType>(trans.getType())
1782  .clone(cast<VectorType>(mat.getType()).getElementType());
1783  return arith::ExtSIOp::create(rewriter, loc, newType, trans);
1784  }
1785  if (auto zext = mat.getDefiningOp<arith::ExtUIOp>()) {
1786  Value trans =
1787  vector::TransposeOp::create(rewriter, loc, zext.getIn(), perm);
1788  VectorType newType =
1789  VectorType::get(cast<VectorType>(trans.getType()).getShape(),
1790  cast<VectorType>(mat.getType()).getElementType());
1791  return arith::ExtUIOp::create(rewriter, loc, newType, trans);
1792  }
1793  return vector::TransposeOp::create(rewriter, loc, mat, perm);
1794  };
1795 
1796  if (maps == infer({{m, k}, {k, n}, {m, n}})) {
1797  rhs = createTranspose(rhs);
1798  } else if (maps == infer({{k, m}, {n, k}, {m, n}})) {
1799  lhs = createTranspose(lhs);
1800  } else if (maps == infer({{k, m}, {k, n}, {m, n}})) {
1801  rhs = createTranspose(rhs);
1802  lhs = createTranspose(lhs);
1803  } else if (maps == infer({{k, m}, {k, n}, {n, m}})) {
1804  std::swap(rhs, lhs);
1805  rhs = createTranspose(rhs);
1806  lhs = createTranspose(lhs);
1807  } else if (maps == infer({{k, m}, {n, k}, {n, m}})) {
1808  std::swap(rhs, lhs);
1809  rhs = createTranspose(rhs);
1810  } else if (maps == infer({{m, k}, {k, n}, {n, m}})) {
1811  std::swap(lhs, rhs);
1812  lhs = createTranspose(lhs);
1813  } else if (maps == infer({{m, k}, {n, k}, {n, m}})) {
1814  std::swap(lhs, rhs);
1815  } else {
1816  return rewriter.notifyMatchFailure(op, "unhandled contraction form");
1817  }
1818  rewriter.replaceOpWithNewOp<vector::ContractionOp>(
1819  op, lhs, rhs, res, rewriter.getAffineMapArrayAttr(canonicalForm),
1820  op.getIteratorTypes());
1821  return success();
1822  };
1823 
1824 private:
1825  FilterConstraintType filter;
1826 };
1827 
1828 /// Pattern to fold arithmetic extensions on floating point data types into
1829 /// vector contraction operations. linalg.matmul introduces arithmetic
1830 /// extensions on its operands. Please mlir snippets below for more details.
1831 /// ```mlir
1832 /// "linalg.matmul"(%lhs, %rhs, %acc) ({
1833 /// ^bb0(%arg1: f16, %arg2: f16, %arg3: f32):
1834 /// %lhs_f32 = "arith.extf"(%arg1) : (f16) -> f32
1835 /// %rhs_f32 = "arith.extf"(%arg2) : (f16) -> f32
1836 /// %mul = "arith.mulf"(%lhs_f32, %rhs_f32) : (f32, f32) -> f32
1837 /// %acc = "arith.addf"(%arg3, %mul) : (f32, f32) -> f32
1838 /// "linalg.yield"(%acc) : (f32) -> ()
1839 /// })
1840 /// ```
1841 /// This restricts the native usage of mixed precision NVIDIA Ampere Tensor
1842 /// Cores, i.e, `mma.sync.*.f32.f16.f16.f32` and `mma.sync.*.f32.bf16.bf16.f32`.
1843 /// This pattern folds the arithmetic extensions into the vector contraction and
1844 /// enables the usage of native mixed precision Tensor Core instructions.
1845 template <typename ExtOp>
1846 struct FoldArithExtIntoContractionOp
1847  : public OpRewritePattern<vector::ContractionOp> {
1849 
1850  LogicalResult matchAndRewrite(vector::ContractionOp contractOp,
1851  PatternRewriter &rewriter) const override {
1852 
1853  auto lhsDefOp = contractOp.getLhs().getDefiningOp<ExtOp>();
1854  auto rhsDefOp = contractOp.getRhs().getDefiningOp<ExtOp>();
1855 
1856  if (!lhsDefOp || !rhsDefOp) {
1857  return rewriter.notifyMatchFailure(contractOp,
1858  "no defining op on contract operands");
1859  }
1860 
1861  rewriter.replaceOpWithNewOp<vector::ContractionOp>(
1862  contractOp, lhsDefOp->getOperand(0), rhsDefOp->getOperand(0),
1863  contractOp.getAcc(), contractOp.getIndexingMapsAttr(),
1864  contractOp.getIteratorTypesAttr());
1865 
1866  return success();
1867  }
1868 };
1869 
1870 /// Pattern to fold chained reduction to a series of vector additions and a
1871 /// final reduction. This form should require fewer subgroup operations.
1872 ///
1873 /// ```mlir
1874 /// %a = vector.reduction <add> %x, %acc
1875 /// %b = vector.reduction <add> %y, %a
1876 /// ==>
1877 /// %a = arith.addf %x, %y
1878 /// %b = vector.reduction <add> %a, %acc
1879 /// ```
1880 struct ChainedReduction final : OpRewritePattern<vector::ReductionOp> {
1882 
1883  LogicalResult matchAndRewrite(vector::ReductionOp op,
1884  PatternRewriter &rewriter) const override {
1885  // TODO: Handle other combining kinds.
1886  if (op.getKind() != vector::CombiningKind::ADD)
1887  return failure();
1888 
1889  // Accumulator is optional.
1890  Value acc = op.getAcc();
1891  if (!acc)
1892  return failure();
1893 
1894  if (!acc.getType().isIntOrFloat())
1895  return failure();
1896 
1897  auto parentReduction = acc.getDefiningOp<vector::ReductionOp>();
1898  if (!parentReduction)
1899  return failure();
1900 
1901  Location loc = op.getLoc();
1902  Value vAdd;
1903  if (isa<IntegerType>(acc.getType())) {
1904  vAdd = rewriter.createOrFold<arith::AddIOp>(
1905  loc, parentReduction.getVector(), op.getVector());
1906  } else {
1907  vAdd = arith::AddFOp::create(rewriter, loc, parentReduction.getVector(),
1908  op.getVector());
1909  }
1910  rewriter.replaceOpWithNewOp<vector::ReductionOp>(op, op.getKind(), vAdd,
1911  parentReduction.getAcc());
1912  return success();
1913  }
1914 };
1915 
1916 // Helper function dropping unit non-scalable dimension from a VectorType
1917 // keeping at least 1 dimension to avoid generating 0-D vectors. Scalable unit
1918 // dimensions are not dropped. Folding such dimensions would require "shifting"
1919 // the scalable flag onto some other fixed-width dim (e.g. vector<[1]x4xf32> ->
1920 // vector<[4]xf32>). This could be implemented in the future.
1921 static VectorType dropNonScalableUnitDimFromType(VectorType inVecTy) {
1922  auto inVecShape = inVecTy.getShape();
1923  SmallVector<int64_t> newShape;
1924  SmallVector<bool> newScalableDims;
1925  for (auto [dim, isScalable] :
1926  llvm::zip_equal(inVecShape, inVecTy.getScalableDims())) {
1927  if (dim == 1 && !isScalable)
1928  continue;
1929 
1930  newShape.push_back(dim);
1931  newScalableDims.push_back(isScalable);
1932  }
1933  // All dims have been dropped, return vector<1xeType>.
1934  if (newShape.empty()) {
1935  newShape.push_back(1);
1936  newScalableDims.push_back(false);
1937  }
1938 
1939  return VectorType::get(newShape, inVecTy.getElementType(), newScalableDims);
1940 }
1941 
1942 /// For vectors with at least one unit dim, replaces:
1943 /// elementwise(a, b)
1944 /// with:
1945 /// sc_a = shape_cast(a)
1946 /// sc_b = shape_cast(b)
1947 /// res = elementwise(sc_a, sc_b)
1948 /// return shape_cast(res)
1949 /// The newly inserted shape_cast Ops fold (before elementwise Op) and then
1950 /// restore (after elementwise Op) the unit dim. Vectors `a` and `b` are
1951 /// required to be rank > 1.
1952 ///
1953 /// Ex:
1954 /// %mul = arith.mulf %B_row, %A_row : vector<1x[4]xf32>
1955 /// %cast = vector.shape_cast %mul : vector<1x[4]xf32> to vector<[4]xf32>
1956 ///
1957 /// gets converted to:
1958 ///
1959 /// %B_row_sc = vector.shape_cast %B_row : vector<1x[4]xf32> to vector<[4]xf32>
1960 /// %A_row_sc = vector.shape_cast %A_row : vector<1x[4]xf32> to vector<[4]xf32>
1961 /// %mul = arith.mulf %B_row_sc, %A_row_sc : vector<[4]xf32>
1962 /// %cast_new = vector.shape_cast %mul : vector<[4]xf32> to vector<1x[4]xf32>
1963 /// %cast = vector.shape_cast %cast_new : vector<1x[4]xf32> to vector<[4]xf32>
1964 ///
1965 /// Patterns for folding shape_casts should instantly eliminate `%cast_new` and
1966 /// `%cast`.
1967 struct DropUnitDimFromElementwiseOps final
1968  : public OpTraitRewritePattern<OpTrait::Elementwise> {
1970  LogicalResult matchAndRewrite(Operation *op,
1971  PatternRewriter &rewriter) const override {
1972  if (op->getNumResults() != 1 || op->getNumRegions() != 0)
1973  return failure();
1974 
1975  auto resultVectorType = dyn_cast<VectorType>(op->getResult(0).getType());
1976  if (!resultVectorType)
1977  return failure();
1978 
1979  // Check the operand pre-conditions. For `Elementwise` ops all operands are
1980  // guaranteed to have identical shapes (with some exceptions such as
1981  // `arith.select`) and it suffices to only check one of them.
1982  auto sourceVectorType = dyn_cast<VectorType>(op->getOperand(0).getType());
1983  if (!sourceVectorType)
1984  return failure();
1985  if (sourceVectorType.getRank() < 2)
1986  return failure();
1987 
1988  SmallVector<Value> newOperands;
1989  auto loc = op->getLoc();
1990  for (auto operand : op->getOperands()) {
1991  auto opVectorType = cast<VectorType>(operand.getType());
1992  auto newVType = dropNonScalableUnitDimFromType(opVectorType);
1993  if (newVType == opVectorType)
1994  return rewriter.notifyMatchFailure(op, "No unit dimension to remove.");
1995 
1996  auto opSC = vector::ShapeCastOp::create(rewriter, loc, newVType, operand);
1997  newOperands.push_back(opSC);
1998  }
1999 
2000  VectorType newResultVectorType =
2001  dropNonScalableUnitDimFromType(resultVectorType);
2002  // Create an updated elementwise Op without unit dim.
2003  Operation *elementwiseOp =
2004  rewriter.create(loc, op->getName().getIdentifier(), newOperands,
2005  newResultVectorType, op->getAttrs());
2006 
2007  // Restore the unit dim by applying vector.shape_cast to the result.
2008  rewriter.replaceOpWithNewOp<ShapeCastOp>(op, resultVectorType,
2009  elementwiseOp->getResult(0));
2010 
2011  return success();
2012  }
2013 };
2014 
2015 /// A pattern to drop unit dims from vector.transpose.
2016 ///
2017 /// Example:
2018 ///
2019 /// BEFORE:
2020 /// ```mlir
2021 /// %transpose = vector.transpose %vector, [3, 0, 1, 2]
2022 /// : vector<1x1x4x[4]xf32> to vector<[4]x1x1x4xf32>
2023 /// ```
2024 ///
2025 /// AFTER:
2026 /// ```mlir
2027 /// %dropDims = vector.shape_cast %vector
2028 /// : vector<1x1x4x[4]xf32> to vector<4x[4]xf32>
2029 /// %transpose = vector.transpose %0, [1, 0]
2030 /// : vector<4x[4]xf32> to vector<[4]x4xf32>
2031 /// %restoreDims = vector.shape_cast %transpose
2032 /// : vector<[4]x4xf32> to vector<[4]x1x1x4xf32>
2033 /// ```
2034 struct DropUnitDimsFromTransposeOp final
2035  : OpRewritePattern<vector::TransposeOp> {
2037 
2038  LogicalResult matchAndRewrite(vector::TransposeOp op,
2039  PatternRewriter &rewriter) const override {
2040  VectorType sourceType = op.getSourceVectorType();
2041  VectorType sourceTypeWithoutUnitDims =
2042  dropNonScalableUnitDimFromType(sourceType);
2043 
2044  if (sourceType == sourceTypeWithoutUnitDims)
2045  return failure();
2046 
2047  // Construct a map from dimIdx -> number of dims dropped before dimIdx.
2048  auto sourceDims = llvm::to_vector(vector::getDims(sourceType));
2049  SmallVector<int64_t> droppedDimsBefore(sourceType.getRank());
2050  int64_t droppedDims = 0;
2051  for (auto [i, dim] : llvm::enumerate(sourceDims)) {
2052  droppedDimsBefore[i] = droppedDims;
2053  if (dim == std::make_tuple(1, false))
2054  ++droppedDims;
2055  }
2056 
2057  // Drop unit dims from transpose permutation.
2058  ArrayRef<int64_t> perm = op.getPermutation();
2059  SmallVector<int64_t> newPerm;
2060  for (int64_t idx : perm) {
2061  if (sourceDims[idx] == std::make_tuple(1, false))
2062  continue;
2063  newPerm.push_back(idx - droppedDimsBefore[idx]);
2064  }
2065 
2066  // Fixup for `newPerm`. The `sourceTypeWithoutUnitDims` could be vector<1xT>
2067  // type when the dimensions are unit dimensions. In this case, the newPerm
2068  // should be [0].
2069  if (newPerm.empty()) {
2070  newPerm.push_back(0);
2071  }
2072 
2073  Location loc = op.getLoc();
2074  // Drop the unit dims via shape_cast.
2075  auto dropDimsShapeCast = vector::ShapeCastOp::create(
2076  rewriter, loc, sourceTypeWithoutUnitDims, op.getVector());
2077  // Create the new transpose.
2078  auto transposeWithoutUnitDims =
2079  vector::TransposeOp::create(rewriter, loc, dropDimsShapeCast, newPerm);
2080  // Restore the unit dims via shape cast.
2081  rewriter.replaceOpWithNewOp<vector::ShapeCastOp>(
2082  op, op.getResultVectorType(), transposeWithoutUnitDims);
2083 
2084  return success();
2085  }
2086 };
2087 
2088 /// A pattern to drop unit dims from the iter_args of an scf.for.
2089 ///
2090 /// Example:
2091 ///
2092 /// BEFORE:
2093 /// ```mlir
2094 /// %res = scf.for ... iter_args(%iter = %init) -> vector<[4]x1x1x4xf32> {
2095 /// ...
2096 /// scf.yield %
2097 /// }
2098 /// ```
2099 ///
2100 /// AFTER:
2101 /// ```mlir
2102 /// %drop = vector.shape_cast %init
2103 /// : vector<4x1x1x[4]xf32> to vector<4x[4]xf32>
2104 /// %new_loop = scf.for ... iter_args(%iter = %drop) -> vector<[4]x4xf32> {
2105 /// %new_iter = vector.shape_cast %iter
2106 /// : vector<[4]x4xf32> to vector<[4]x1x1x4xf32>
2107 /// ...
2108 /// }
2109 /// %res = vector.shape_cast %new_loop
2110 /// : vector<[4]x4xf32> to vector<[4]x1x1x4xf32>
2111 /// ```
2112 struct DropUnitDimsFromScfForOp final : OpRewritePattern<scf::ForOp> {
2114 
2115  LogicalResult matchAndRewrite(scf::ForOp forOp,
2116  PatternRewriter &rewriter) const override {
2117  /// Find the first iter_arg with droppable unit dims. Further applications
2118  /// of this pattern will apply to later arguments.
2119  for (OpOperand &operand : forOp.getInitArgsMutable()) {
2120  auto vectorType = dyn_cast<VectorType>(operand.get().getType());
2121  if (!vectorType)
2122  continue;
2123 
2124  VectorType newVectorType = dropNonScalableUnitDimFromType(vectorType);
2125  if (vectorType == newVectorType)
2126  continue;
2127 
2128  // Create a new ForOp with that iter operand replaced.
2129  auto castFn = [](OpBuilder &b, Location loc, Type type, Value source) {
2130  return vector::ShapeCastOp::create(b, loc, type, source);
2131  };
2132 
2133  Value replacement =
2134  castFn(rewriter, forOp.getLoc(), newVectorType, operand.get());
2135  rewriter.replaceOp(forOp,
2136  replaceAndCastForOpIterArg(rewriter, forOp, operand,
2137  replacement, castFn));
2138  return success();
2139  }
2140  return failure();
2141  }
2142 };
2143 
2144 /// Pattern to eliminate redundant zero-constants added to reduction operands.
2145 /// It's enough for there to be one initial zero value, so we can eliminate the
2146 /// extra ones that feed into `vector.reduction <add>`. These get created by the
2147 /// `ChainedReduction` pattern.
2148 ///
2149 /// ```mlir
2150 /// %a = arith.addf %x, %zero
2151 /// %b = arith.addf %a, %y
2152 /// %c = vector.reduction <add> %b, %acc
2153 /// ==>
2154 /// %b = arith.addf %a, %y
2155 /// %c = vector.reduction <add> %b, %acc
2156 /// ```
2157 struct ReduceRedundantZero final : OpRewritePattern<vector::ReductionOp> {
2159 
2160  LogicalResult matchAndRewrite(vector::ReductionOp op,
2161  PatternRewriter &rewriter) const override {
2162  // TODO: Handle other reduction kinds and their identity values.
2163  if (op.getKind() != vector::CombiningKind::ADD)
2164  return failure();
2165 
2166  Type elemType = op.getSourceVectorType().getElementType();
2167  // The integer case should be handled by `arith.addi` folders, only check
2168  // for floats here.
2169  if (!isa<FloatType>(elemType))
2170  return failure();
2171 
2172  auto vAdd = op.getVector().getDefiningOp<arith::AddFOp>();
2173  if (!vAdd)
2174  return failure();
2175  auto addLhs = vAdd.getLhs().getDefiningOp<arith::AddFOp>();
2176  if (!addLhs)
2177  return failure();
2178 
2179  if (!matchPattern(addLhs.getRhs(), m_AnyZeroFloat()))
2180  return failure();
2181 
2182  auto newAdd = arith::AddFOp::create(rewriter, vAdd.getLoc(),
2183  addLhs.getLhs(), vAdd.getRhs());
2184  rewriter.replaceOpWithNewOp<vector::ReductionOp>(op, op.getKind(), newAdd,
2185  op.getAcc());
2186  return success();
2187  }
2188 };
2189 
2190 /// Example:
2191 /// ```
2192 /// %a = vector.reduction <add> %x : vector<2xf32> into f32
2193 /// ```
2194 /// is transformed into:
2195 /// ```
2196 /// %y = vector.extract %x[0] : f32 from vector<2xf32>
2197 /// %z = vector.extract %x[1] : f32 from vector<2xf32>
2198 /// %a = arith.addf %y, %z : f32
2199 /// ```
2200 struct BreakDownVectorReduction final : OpRewritePattern<vector::ReductionOp> {
2201  BreakDownVectorReduction(MLIRContext *context,
2202  unsigned maxNumElementsToExtract,
2203  PatternBenefit benefit)
2204  : OpRewritePattern(context, benefit),
2205  maxNumElementsToExtract(maxNumElementsToExtract) {}
2206 
2207  LogicalResult matchAndRewrite(vector::ReductionOp op,
2208  PatternRewriter &rewriter) const override {
2209  VectorType type = op.getSourceVectorType();
2210  if (type.isScalable() || op.isMasked())
2211  return failure();
2212  assert(type.getRank() == 1 && "Expected a 1-d vector");
2213 
2214  int64_t numElems = type.getNumElements();
2215  if (numElems > maxNumElementsToExtract) {
2216  return rewriter.notifyMatchFailure(
2217  op, llvm::formatv("has too many vector elements ({0}) to break down "
2218  "(max allowed: {1})",
2219  numElems, maxNumElementsToExtract));
2220  }
2221 
2222  Location loc = op.getLoc();
2223  SmallVector<Value> extracted(numElems, nullptr);
2224  for (auto [idx, extractedElem] : llvm::enumerate(extracted))
2225  extractedElem = vector::ExtractOp::create(rewriter, loc, op.getVector(),
2226  static_cast<int64_t>(idx));
2227 
2228  Value res = extracted.front();
2229  for (auto extractedElem : llvm::drop_begin(extracted))
2230  res = vector::makeArithReduction(rewriter, loc, op.getKind(), res,
2231  extractedElem, op.getFastmathAttr());
2232  if (Value acc = op.getAcc())
2233  res = vector::makeArithReduction(rewriter, loc, op.getKind(), res, acc,
2234  op.getFastmathAttr());
2235 
2236  rewriter.replaceOp(op, res);
2237  return success();
2238  }
2239 
2240 private:
2241  unsigned maxNumElementsToExtract = 0;
2242 };
2243 
2244 /// Fold `mulf(tr(broadcast(A)), broadcast(B))` into `vector.outerproduct(A,
2245 /// B)`.
2246 /// Example:
2247 /// %lhsBcast = vector.broadcast %lhs : vector<4xi32> to vector<4x4xi32>
2248 /// %lhsT = vector.transpose %lhsBcast, [1, 0] : vector<4x4xi32> to
2249 /// vector<4x4xi32> %rhsBcast = vector.broadcast %rhs : vector<4xi32> to
2250 /// vector<4x4xi32> %mul = arith.muli %lhsT, %rhsBcast : vector<4x4xi32>
2251 ///
2252 /// Becomes :
2253 ///
2254 /// %res = vector.outerproduct %lhs, %rhs : vector<4xi32>, vector<4xi32>
2255 ///
2256 /// Supports only 1D-to-2D broadcasts. The following cases are not supported.
2257 /// %ex1 = vector.broadcast %lhsCast : vector<1x4xf32> to vector<4x4xf32>
2258 /// %ex2 = vector.broadcast %lhsCast : f32 to vector<4x4xf32>
2259 /// %ex3 = vector.broadcast %lhsCast : vector<1x1xf32> to vector<4x4xf32>
2260 template <typename MulOpType>
2261 struct FoldArithToVectorOuterProduct : public OpRewritePattern<MulOpType> {
2263  // Returns whether a vector.broadcast matches requirements for an outerproduct
2264  // pattern. aka a 1D-to-2D broadcastOp without broadcasted unit dimension.
2265  bool isValidBroadcastSource(vector::BroadcastOp broadcastOp) const {
2266  // Fail if it is not a 1-to-2 dimension to broadcast to avoid generating
2267  // shape_casts/broadcasts which does not belong in this pattern.
2268  if (!broadcastOp.computeBroadcastedUnitDims().empty())
2269  return false;
2270  // Avoid broadcast like f32 or vector<f32> -> ResType
2271  auto srcType = dyn_cast<VectorType>(broadcastOp.getSourceType());
2272  return srcType && srcType.getRank() != 2;
2273  }
2274 
2275  LogicalResult matchAndRewrite(MulOpType mulOp,
2276  PatternRewriter &rewriter) const override {
2277  auto resType = llvm::dyn_cast<VectorType>(mulOp.getResult().getType());
2278  if (!resType)
2279  return failure();
2280  if (resType.getRank() != 2)
2281  return failure();
2282  /// If operandA can be written as tr(broadcast(A)) and operandB as
2283  /// broadcast(B) where broadcasts are 1D-to-2D, create and return
2284  /// vector.outerproduct(A, B). Returns failure() otherwise.
2285  auto matchOuterProduct =
2286  [&](Value operandA,
2287  Value operandB) -> FailureOr<vector::OuterProductOp> {
2288  auto transposedLhs = operandA.getDefiningOp<vector::TransposeOp>();
2289  if (!transposedLhs)
2290  return failure();
2291  // Fail unless this is a true 2-D matrix transpose.
2292  ArrayRef<int64_t> permutation = transposedLhs.getPermutation();
2293  if (permutation.size() != 2 || permutation[0] != 1 || permutation[1] != 0)
2294  return failure();
2295 
2296  auto broadcastedLhs =
2297  transposedLhs.getVector().getDefiningOp<vector::BroadcastOp>();
2298  if (!broadcastedLhs || !isValidBroadcastSource(broadcastedLhs))
2299  return failure();
2300 
2301  auto broadcastedRhs = operandB.getDefiningOp<vector::BroadcastOp>();
2302  if (!broadcastedRhs || !isValidBroadcastSource(broadcastedRhs))
2303  return failure();
2304 
2305  return vector::OuterProductOp::create(
2306  rewriter, mulOp->getLoc(), resType, broadcastedLhs.getSource(),
2307  broadcastedRhs.getSource(), Value(), vector::CombiningKind::ADD);
2308  };
2309 
2310  Value lhs = mulOp->getOperand(0), rhs = mulOp->getOperand(1);
2311  auto maybeOuterP = matchOuterProduct(lhs, rhs);
2312  // Handle commutativity, the transposed op is the outerproduct LHS.
2313  if (failed(maybeOuterP))
2314  maybeOuterP = matchOuterProduct(rhs, lhs);
2315  if (failed(maybeOuterP))
2316  return failure();
2317  rewriter.replaceOp(mulOp, maybeOuterP->getResult());
2318  return success();
2319  }
2320 };
2321 
2322 } // namespace
2323 
2326  patterns.add<FoldArithExtIntoContractionOp<arith::ExtFOp>,
2327  FoldArithExtIntoContractionOp<arith::ExtSIOp>>(
2328  patterns.getContext());
2329 }
2330 
2331 void mlir::vector::populateVectorMaskMaterializationPatterns(
2332  RewritePatternSet &patterns, bool force32BitVectorIndices,
2333  PatternBenefit benefit) {
2334  patterns.add<VectorCreateMaskOpConversion,
2335  MaterializeTransferMask<vector::TransferReadOp>,
2336  MaterializeTransferMask<vector::TransferWriteOp>>(
2337  patterns.getContext(), force32BitVectorIndices, benefit);
2338  patterns.add<FoldI1Select>(patterns.getContext(), benefit);
2339 }
2340 
2341 void mlir::vector::populateDropUnitDimWithShapeCastPatterns(
2343  patterns.add<DropUnitDimFromElementwiseOps, DropUnitDimsFromScfForOp,
2344  DropUnitDimsFromTransposeOp>(patterns.getContext(), benefit);
2345 }
2346 
2347 void mlir::vector::populateBubbleVectorBitCastOpPatterns(
2349  patterns.add<BubbleDownVectorBitCastForExtract,
2350  BubbleDownBitCastForStridedSliceExtract,
2351  BubbleUpBitCastForInsert, BubbleUpBitCastForStridedSliceInsert>(
2352  patterns.getContext(), benefit);
2353 }
2354 
2355 void mlir::vector::populateBreakDownVectorBitCastOpPatterns(
2357  std::function<bool(vector::BitCastOp)> controlFn, PatternBenefit benefit) {
2358  patterns.add<BreakDownVectorBitCast>(patterns.getContext(),
2359  std::move(controlFn), benefit);
2360 }
2361 
2364  std::function<LogicalResult(vector::ContractionOp)> constraint,
2365  PatternBenefit benefit) {
2366  patterns.add<CanonicalizeContractMatmulToMMT>(patterns.getContext(), benefit,
2367  std::move(constraint));
2368 }
2369 
2372  patterns.add<MultiReduceToContract, CombineContractBroadcastMask,
2373  CombineContractABTranspose, CombineContractResultTranspose>(
2374  patterns.getContext(), benefit);
2375 }
2376 
2379  patterns.add<DropInnerMostUnitDimsTransferRead,
2380  DropInnerMostUnitDimsTransferWrite>(patterns.getContext(),
2381  benefit);
2382 }
2383 
2385  PatternBenefit benefit) {
2386  patterns.add<ReorderElementwiseOpsOnTranspose, ReorderCastOpsOnBroadcast,
2387  ReorderElementwiseOpsOnBroadcast, ExtractOpFromElementwise>(
2388  patterns.getContext(), benefit);
2389 }
2390 
2391 void mlir::vector::populateSinkVectorMemOpsPatterns(RewritePatternSet &patterns,
2392  PatternBenefit benefit) {
2393  // TODO: Consider converting these patterns to canonicalizations.
2394  patterns.add<ExtractOpFromLoad, StoreOpFromSplatOrBroadcast>(
2395  patterns.getContext(), benefit);
2396 }
2397 
2398 void mlir::vector::populateChainedVectorReductionFoldingPatterns(
2400  patterns.add<ChainedReduction>(patterns.getContext(), benefit);
2401  patterns.add<ReduceRedundantZero>(patterns.getContext(),
2402  PatternBenefit(benefit.getBenefit() + 1));
2403 }
2404 
2405 void mlir::vector::populateBreakDownVectorReductionPatterns(
2406  RewritePatternSet &patterns, unsigned maxNumElementsToExtract,
2407  PatternBenefit benefit) {
2408  patterns.add<BreakDownVectorReduction>(patterns.getContext(),
2409  maxNumElementsToExtract, benefit);
2410 }
2411 
2414  patterns.add<FoldArithToVectorOuterProduct<arith::MulFOp>,
2415  FoldArithToVectorOuterProduct<arith::MulIOp>>(
2416  patterns.getContext());
2417 }
2418 
2419 //===----------------------------------------------------------------------===//
2420 // TableGen'd enum attribute definitions
2421 //===----------------------------------------------------------------------===//
2422 
2423 #include "mlir/Dialect/Vector/Transforms/VectorTransformsEnums.cpp.inc"
static uint64_t zext(uint32_t arg)
static Value broadcast(Location loc, Value toBroadcast, unsigned numElements, const TypeConverter &typeConverter, ConversionPatternRewriter &rewriter)
Broadcasts the value to vector with numElements number of elements.
static std::optional< int64_t > getResultIndex(AffineMap map, int64_t index)
static SmallVector< IntType > extractVector(ArrayAttr arrayAttr)
Base type for affine expression.
Definition: AffineExpr.h:68
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition: AffineMap.h:46
unsigned getDimPosition(unsigned idx) const
Extracts the position of the dimensional expression at the given result, when the caller knows it is ...
Definition: AffineMap.cpp:411
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
unsigned getNumResults() const
Definition: AffineMap.cpp:398
static AffineMap getPermutationMap(ArrayRef< unsigned > permutation, MLIRContext *context)
Returns an AffineMap representing a permutation.
Definition: AffineMap.cpp:260
AffineMap compose(AffineMap map) const
Returns the AffineMap resulting from composing this with map.
Definition: AffineMap.cpp:552
static SmallVector< AffineMap, 4 > inferFromExprList(ArrayRef< ArrayRef< AffineExpr >> exprsList, MLIRContext *context)
Returns a vector of AffineMaps; each with as many results as exprs.size(), as many dims as the larges...
Definition: AffineMap.cpp:308
Attributes are known-constant values of operations.
Definition: Attributes.h:25
IntegerAttr getIndexAttr(int64_t value)
Definition: Builders.cpp:103
AffineMap getMultiDimIdentityMap(unsigned rank)
Definition: Builders.cpp:382
IntegerType getI64Type()
Definition: Builders.cpp:64
IntegerType getI32Type()
Definition: Builders.cpp:62
IntegerType getIntegerType(unsigned width)
Definition: Builders.cpp:66
TypedAttr getZeroAttr(Type type)
Definition: Builders.cpp:319
AffineExpr getAffineDimExpr(unsigned position)
Definition: Builders.cpp:359
MLIRContext * getContext() const
Definition: Builders.h:55
DenseIntElementsAttr getI32VectorAttr(ArrayRef< int32_t > values)
Definition: Builders.cpp:117
DenseIntElementsAttr getI64VectorAttr(ArrayRef< int64_t > values)
Definition: Builders.cpp:123
IntegerType getI1Type()
Definition: Builders.cpp:52
ArrayAttr getArrayAttr(ArrayRef< Attribute > value)
Definition: Builders.cpp:261
ArrayAttr getI64ArrayAttr(ArrayRef< int64_t > values)
Definition: Builders.cpp:276
ArrayAttr getBoolArrayAttr(ArrayRef< bool > values)
Definition: Builders.cpp:265
ArrayAttr getAffineMapArrayAttr(ArrayRef< AffineMap > values)
Definition: Builders.cpp:313
std::enable_if_t<!std::is_base_of< Attribute, T >::value||std::is_same< Attribute, T >::value, T > getSplatValue() const
Return the splat value for this attribute.
DenseElementsAttr resizeSplat(ShapedType newType)
Return a new DenseElementsAttr that has the same data as the current attribute, but with a different ...
An attribute that represents a reference to a dense integer vector or tensor object.
static DenseIntElementsAttr get(const ShapedType &type, Arg &&arg)
Get an instance of a DenseIntElementsAttr with the given arguments.
virtual Operation * materializeConstant(OpBuilder &builder, Attribute value, Type type, Location loc)
Registered hook to materialize a single constant operation from a given attribute value with the desi...
Definition: Dialect.h:83
This is a utility class for mapping one set of IR entities to another.
Definition: IRMapping.h:26
void map(Value from, Value to)
Inserts a new mapping for 'from' to 'to'.
Definition: IRMapping.h:30
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:76
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:63
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:346
This class helps build Operations.
Definition: Builders.h:205
Operation * clone(Operation &op, IRMapping &mapper)
Creates a deep copy of the specified operation, remapping any operands that use values outside of the...
Definition: Builders.cpp:548
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition: Builders.h:396
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Definition: Builders.h:517
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:452
This class represents a single result from folding an operation.
Definition: OpDefinition.h:272
This class represents an operand of an operation.
Definition: Value.h:257
OpTraitRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting again...
Definition: PatternMatch.h:342
OpTraitRewritePattern(MLIRContext *context, PatternBenefit benefit=1)
Definition: PatternMatch.h:344
This class provides the API for a sub-set of ops that are known to be constant-like.
StringAttr getIdentifier() const
Return the name of this operation as a StringAttr.
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
Value getOperand(unsigned idx)
Definition: Operation.h:350
bool hasTrait()
Returns true if the operation was registered with a particular trait, e.g.
Definition: Operation.h:749
Dialect * getDialect()
Return the dialect this operation is associated with, or nullptr if the associated dialect is not loa...
Definition: Operation.h:220
bool hasOneUse()
Returns true if this operation has exactly one use.
Definition: Operation.h:849
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition: Operation.h:407
unsigned getNumRegions()
Returns the number of regions held by this operation.
Definition: Operation.h:674
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:223
unsigned getNumOperands()
Definition: Operation.h:346
ArrayRef< NamedAttribute > getAttrs()
Return all of the attributes on this operation.
Definition: Operation.h:512
OperationName getName()
The name of an operation is the key identifier for it.
Definition: Operation.h:119
operand_type_range getOperandTypes()
Definition: Operation.h:397
result_type_range getResultTypes()
Definition: Operation.h:428
operand_range getOperands()
Returns an iterator on the underlying Value's.
Definition: Operation.h:378
result_range getResults()
Definition: Operation.h:415
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:404
This class represents the benefit of a pattern match in a unitless scheme that ranges from 0 (very li...
Definition: PatternMatch.h:34
unsigned short getBenefit() const
If the corresponding pattern can match, return its benefit. If the.
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
Definition: PatternMatch.h:783
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the listener that the IR failed to be rewritten because of a match failure,...
Definition: PatternMatch.h:716
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
Definition: PatternMatch.h:628
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
Definition: PatternMatch.h:519
An attribute that represents a reference to a splat vector or tensor constant, meaning all of the ele...
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
bool isIntOrFloat() const
Return true if this is an integer (of any signedness) or a float type.
Definition: Types.cpp:116
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
Definition: Types.cpp:122
bool isSignlessIntOrIndexOrFloat() const
Return true if this is a signless integer, index, or float type.
Definition: Types.cpp:104
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:387
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
void setType(Type newType)
Mutate the type of this Value to be of the specified type.
Definition: Value.h:116
MLIRContext * getContext() const
Utility to get the associated MLIRContext that this value is defined in.
Definition: Value.h:108
Type getType() const
Return the type of this value.
Definition: Value.h:105
Location getLoc() const
Return the location of this value.
Definition: Value.cpp:24
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:18
bool hasElementwiseMappableTraits(Operation *op)
Together, Elementwise, Scalarizable, Vectorizable, and Tensorizable provide an easy way for scalar op...
Definition: Operation.cpp:1397
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given memref value.
Definition: MemRefOps.cpp:77
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition: Remarks.h:491
SmallVector< Value > replaceAndCastForOpIterArg(RewriterBase &rewriter, scf::ForOp forOp, OpOperand &operand, Value replacement, const ValueTypeCastFnTy &castFn)
Definition: SCF.cpp:796
Value makeArithReduction(OpBuilder &b, Location loc, CombiningKind kind, Value v1, Value acc, arith::FastMathFlagsAttr fastmath=nullptr, Value mask=nullptr)
Returns the result value of reducing two scalar/vector values with the corresponding arith operation.
bool isReductionIterator(Attribute attr)
Returns true if attr has "reduction" iterator type semantics.
Definition: VectorOps.h:154
auto getDims(VectorType vType)
Returns a range over the dims (size and scalability) of a VectorType.
Definition: VectorUtils.h:130
void populateElementwiseToVectorOpsPatterns(RewritePatternSet &patterns)
Collect a set of patterns that fold elementwise op on vectors to the vector dialect.
AffineMap getTransferMinorIdentityMap(ShapedType shapedType, VectorType vectorType)
Build the default minor identity map suitable for a vector transfer.
Definition: VectorOps.cpp:188
Operation * maskOperation(OpBuilder &builder, Operation *maskableOp, Value mask, Value passthru=Value())
Creates a vector.mask operation around a maskable operation.
void populateDropInnerMostUnitDimsXferOpPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Collect a set of patterns to collapse the most inner unit dims in xfer Ops.
bool isParallelIterator(Attribute attr)
Returns true if attr has "parallel" iterator type semantics.
Definition: VectorOps.h:149
void populateFoldArithExtensionPatterns(RewritePatternSet &patterns)
Collect a set of patterns that fold arithmetic extension on floating point into vector contract for t...
void populateVectorContractCanonicalizeMatmulToMMT(RewritePatternSet &patterns, std::function< LogicalResult(vector::ContractionOp)> constraint=[](vector::ContractionOp) { return success();}, PatternBenefit=1)
Canonicalization of a vector.contraction a, b, c with row-major matmul semantics to a contraction wit...
void populateSinkVectorOpsPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Patterns that remove redundant Vector Ops by re-ordering them with e.g.
void populateVectorReductionToContractPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Collect patterns to convert reduction op to vector.contract and fold transpose/broadcast ops into the...
Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim)
Helper function that creates a memref::DimOp or tensor::DimOp depending on the type of source.
Definition: VectorUtils.cpp:39
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
Definition: Matchers.h:490
const FrozenRewritePatternSet GreedyRewriteConfig bool * changed
void bindDims(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to DimExpr at positions: [0 .
Definition: AffineExpr.h:311
AffineMap inversePermutation(AffineMap map)
Returns a map of codomain to domain dimensions such that the first codomain dimension for a particula...
Definition: AffineMap.cpp:784
Value getValueOrCreateCastToIndexLike(OpBuilder &b, Location loc, Type targetType, Value value)
Create a cast from an index-like value (index or integer) to another index-like value.
Definition: Utils.cpp:119
Type getElementTypeOrSelf(Type type)
Return the element type or return the type itself.
const FrozenRewritePatternSet & patterns
bool isZeroInteger(OpFoldResult v)
Return true if v is an IntegerAttr with value 0.
detail::constant_float_predicate_matcher m_AnyZeroFloat()
Matches a constant scalar / vector splat / tensor splat float (both positive and negative) zero.
Definition: Matchers.h:399
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
Definition: Utils.cpp:111
AffineMap compressDims(AffineMap map, const llvm::SmallBitVector &unusedDims)
Drop the dims that are listed in unusedDims.
Definition: AffineMap.cpp:710
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
llvm::SmallBitVector getUnusedDimsBitVector(ArrayRef< AffineMap > maps)
Definition: AffineMap.cpp:923
detail::constant_op_matcher m_Constant()
Matches a constant foldable operation.
Definition: Matchers.h:369
ArithBuilder specialized specifically for tensor/memref indexing calculations.
Definition: Utils.h:126
OpInterfaceRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting a...
Definition: PatternMatch.h:330
OpRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting against an...
Definition: PatternMatch.h:314
OpRewritePattern(MLIRContext *context, PatternBenefit benefit=1, ArrayRef< StringRef > generatedNames={})
Patterns must specify the root operation name they match against, and can also specify the benefit of...
Definition: PatternMatch.h:319
A pattern for ops that implement MaskableOpInterface and that might be masked (i.e.
Definition: VectorUtils.h:163