MLIR  20.0.0git
VectorTransforms.cpp
Go to the documentation of this file.
1 //===- VectorTransforms.cpp - Conversion within the Vector dialect --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements target-independent rewrites as 1->N patterns.
10 //
11 //===----------------------------------------------------------------------===//
12 
14 
15 #include <cassert>
16 #include <cstdint>
17 #include <functional>
18 #include <optional>
19 #include <type_traits>
20 
34 #include "mlir/IR/BuiltinTypes.h"
36 #include "mlir/IR/Location.h"
37 #include "mlir/IR/Matchers.h"
38 #include "mlir/IR/PatternMatch.h"
39 #include "mlir/IR/TypeUtilities.h"
41 
42 #include "llvm/ADT/DenseSet.h"
43 #include "llvm/ADT/MapVector.h"
44 #include "llvm/ADT/STLExtras.h"
45 #include "llvm/Support/CommandLine.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Support/FormatVariadic.h"
48 #include "llvm/Support/raw_ostream.h"
49 
50 #define DEBUG_TYPE "vector-to-vector"
51 
52 using namespace mlir;
53 using namespace mlir::vector;
54 
55 template <typename IntType>
56 static SmallVector<IntType> extractVector(ArrayAttr arrayAttr) {
57  return llvm::to_vector<4>(llvm::map_range(
58  arrayAttr.getAsRange<IntegerAttr>(),
59  [](IntegerAttr attr) { return static_cast<IntType>(attr.getInt()); }));
60 }
61 
62 // Helper to find an index in an affine map.
63 static std::optional<int64_t> getResultIndex(AffineMap map, int64_t index) {
64  for (int64_t i = 0, e = map.getNumResults(); i < e; ++i) {
65  int64_t idx = map.getDimPosition(i);
66  if (idx == index)
67  return i;
68  }
69  return std::nullopt;
70 }
71 
72 namespace {
73 
74 /// ShapeCastOpFolder folds cancelling ShapeCastOps away.
75 //
76 // Example:
77 //
78 // The following MLIR with cancelling ShapeCastOps:
79 //
80 // %0 = source : vector<5x4x2xf32>
81 // %1 = shape_cast %0 : vector<5x4x2xf32> to vector<20x2xf32>
82 // %2 = shape_cast %1 : vector<20x2xf32> to vector<5x4x2xf32>
83 // %3 = user %2 : vector<5x4x2xf32>
84 //
85 // Should canonicalize to the following:
86 //
87 // %0 = source : vector<5x4x2xf32>
88 // %1 = user %0 : vector<5x4x2xf32>
89 //
90 struct ShapeCastOpFolder : public OpRewritePattern<vector::ShapeCastOp> {
92 
93  LogicalResult matchAndRewrite(vector::ShapeCastOp shapeCastOp,
94  PatternRewriter &rewriter) const override {
95  // Check if 'shapeCastOp' has vector source/result type.
96  auto sourceVectorType =
97  dyn_cast_or_null<VectorType>(shapeCastOp.getSource().getType());
98  auto resultVectorType =
99  dyn_cast_or_null<VectorType>(shapeCastOp.getResult().getType());
100  if (!sourceVectorType || !resultVectorType)
101  return failure();
102 
103  // Check if shape cast op source operand is also a shape cast op.
104  auto sourceShapeCastOp = dyn_cast_or_null<vector::ShapeCastOp>(
105  shapeCastOp.getSource().getDefiningOp());
106  if (!sourceShapeCastOp)
107  return failure();
108  auto operandSourceVectorType =
109  cast<VectorType>(sourceShapeCastOp.getSource().getType());
110  auto operandResultVectorType = sourceShapeCastOp.getType();
111 
112  // Check if shape cast operations invert each other.
113  if (operandSourceVectorType != resultVectorType ||
114  operandResultVectorType != sourceVectorType)
115  return failure();
116 
117  rewriter.replaceOp(shapeCastOp, sourceShapeCastOp.getSource());
118  return success();
119  }
120 };
121 
122 /// Convert MulIOp/MulFOp + MultiDimReductionOp<add> into ContractionOp.
123 /// Ex:
124 /// ```
125 /// %0 = arith.mulf %arg0, %arg1 : vector<8x32x16xf32>
126 /// %1 = vector.multi_reduction add, %0 [1]
127 /// : vector<8x32x16xf32> to vector<8x16xf32>
128 /// ```
129 /// Gets converted to:
130 /// ```
131 /// %1 = vector.contract {indexing_maps = [
132 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
133 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
134 /// affine_map<(d0, d1, d2) -> (d0, d1)>],
135 /// iterator_types = ["parallel", "parallel", "reduction"],
136 /// kind = add} %0, %arg1, %cst_f0
137 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32>
138 /// ```
139 struct MultiReduceToContract
140  : public OpRewritePattern<vector::MultiDimReductionOp> {
142 
143  LogicalResult matchAndRewrite(vector::MultiDimReductionOp reduceOp,
144  PatternRewriter &rewriter) const override {
145  if (reduceOp.getKind() != vector::CombiningKind::ADD)
146  return failure();
147  Operation *mulOp = reduceOp.getSource().getDefiningOp();
148  if (!mulOp || !isa<arith::MulIOp, arith::MulFOp>(mulOp))
149  return failure();
150  SmallVector<bool> reductionMask = reduceOp.getReductionMask();
151  auto srcMap = rewriter.getMultiDimIdentityMap(reductionMask.size());
153  SmallVector<vector::IteratorType> iteratorTypes;
154  for (const auto &isReduceDim : llvm::enumerate(reductionMask)) {
155  if (!isReduceDim.value()) {
156  iteratorTypes.push_back(vector::IteratorType::parallel);
157  exprs.push_back(rewriter.getAffineDimExpr(isReduceDim.index()));
158  } else {
159  iteratorTypes.push_back(vector::IteratorType::reduction);
160  }
161  }
162  auto dstMap =
163  AffineMap::get(/*dimCount=*/reductionMask.size(),
164  /*symbolCount=*/0, exprs, reduceOp.getContext());
165  rewriter.replaceOpWithNewOp<mlir::vector::ContractionOp>(
166  reduceOp, mulOp->getOperand(0), mulOp->getOperand(1), reduceOp.getAcc(),
167  rewriter.getAffineMapArrayAttr({srcMap, srcMap, dstMap}),
168  rewriter.getArrayAttr(llvm::to_vector(llvm::map_range(
169  iteratorTypes, [&](IteratorType t) -> mlir::Attribute {
170  return IteratorTypeAttr::get(rewriter.getContext(), t);
171  }))));
172  return success();
173  }
174 };
175 
176 /// Merge LHS/RHS (A/B) TransposeOp into ContractionOp user.
177 /// Ex:
178 /// ```
179 /// %0 = vector.transpose %arg0, [2, 0, 1]
180 /// : vector<32x16x8xf32> to vector<8x32x16xf32>
181 /// %1 = vector.contract {indexing_maps = [
182 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
183 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
184 /// affine_map<(d0, d1, d2) -> (d0, d1)>],
185 /// iterator_types = ["parallel", "parallel", "reduction"],
186 /// kind = add} %0, %arg1, %cst_f0
187 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32>
188 /// ```
189 /// Gets converted to:
190 /// ```
191 /// %1 = vector.contract {indexing_maps = [
192 /// affine_map<(d0, d1, d2) -> (d1, d2, d0)>,
193 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
194 /// affine_map<(d0, d1, d2) -> (d0, d1)>],
195 /// iterator_types = ["parallel", "parallel", "reduction"],
196 /// kind = add} %arg0, %arg1, %cst_f0
197 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32>
198 /// ```
199 struct CombineContractABTranspose final
200  : public OpRewritePattern<vector::ContractionOp> {
202 
203  LogicalResult matchAndRewrite(vector::ContractionOp contractOp,
204  PatternRewriter &rewriter) const override {
206  llvm::to_vector<4>(contractOp.getIndexingMapsArray());
207  Value lhs = contractOp.getLhs();
208  Value rhs = contractOp.getRhs();
209  size_t index = 0;
210  bool changed = false;
211  for (Value *operand : {&lhs, &rhs}) {
212  AffineMap &map = maps[index++];
213  auto transposeOp = operand->getDefiningOp<vector::TransposeOp>();
214  if (!transposeOp)
215  continue;
216  AffineMap permutationMap = AffineMap::getPermutationMap(
217  transposeOp.getPermutation(), contractOp.getContext());
218  map = inversePermutation(permutationMap).compose(map);
219  *operand = transposeOp.getVector();
220  changed = true;
221  }
222  if (!changed)
223  return failure();
224  rewriter.replaceOpWithNewOp<vector::ContractionOp>(
225  contractOp, lhs, rhs, contractOp.getAcc(),
226  rewriter.getAffineMapArrayAttr(maps), contractOp.getIteratorTypes());
227  return success();
228  }
229 };
230 
231 /// Merges accumulator and result transposes into contract.
232 ///
233 /// For example:
234 /// ```mlir
235 /// %accT = vector.transpose %acc, [0, 2, 1]
236 /// : vector<2x8x4xf32> to vector<2x4x8xf32>
237 /// %contract = vector.contract {
238 /// indexing_maps = [
239 /// affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>,
240 /// affine_map<(d0, d1, d2, d3) -> (d3, d2)>,
241 /// affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
242 /// ],
243 /// iterator_types = ["parallel", "parallel", "parallel", "reduction"],
244 /// kind = #vector.kind<add>
245 /// } %lhs, %rhs, %accT
246 /// : vector<2x4x4xf32>, vector<4x8xf32> into vector<2x4x8xf32>
247 /// %0 = vector.transpose %contract, [0, 2, 1]
248 /// : vector<2x4x8xf32> to vector<2x8x4>
249 /// ```
250 /// Becomes:
251 /// ```mlir
252 /// %0 = vector.contract {
253 /// indexing_maps = [
254 /// affine_map<(d0, d1, d2, d3) -> (d0, d3, d1)>,
255 /// affine_map<(d0, d1, d2, d3) -> (d3, d2)>,
256 /// affine_map<(d0, d1, d2, d3) -> (d0, d2, d1)>
257 /// ],
258 /// iterator_types = ["parallel", "parallel", "parallel", "reduction"],
259 /// kind = #vector.kind<add>
260 /// } %lhs, %rhs, %acc
261 /// : vector<2x4x4xf32>, vector<4x8xf32> into vector<2x8x4xf32>
262 /// ```
263 struct CombineContractResultTranspose final
264  : public OpRewritePattern<vector::TransposeOp> {
266 
267  LogicalResult matchAndRewrite(vector::TransposeOp resTOp,
268  PatternRewriter &rewriter) const override {
269  auto contractOp = resTOp.getVector().getDefiningOp<vector::ContractionOp>();
270  if (!contractOp || !contractOp->hasOneUse())
271  return failure();
272 
273  auto accTOp = contractOp.getAcc().getDefiningOp<vector::TransposeOp>();
274  if (!accTOp)
275  return failure();
276 
277  MLIRContext *context = contractOp.getContext();
278  auto maps = llvm::to_vector<3>(contractOp.getIndexingMapsArray());
279  AffineMap contractMap = maps.back();
280 
281  // Accumulator transpose performs f(A) -> B. Contract performs g(C) -> B.
282  // To index into A in contract, we need revert(f)(g(C)) -> A.
283  auto accTMap =
284  AffineMap::getPermutationMap(accTOp.getPermutation(), context);
285 
286  // Contract performs g(C) -> D. Result transpose performs h(D) -> E.
287  // To index into E in contract, we need h(g(C)) -> E.
288  auto resTMap =
289  AffineMap::getPermutationMap(resTOp.getPermutation(), context);
290  auto combinedResMap = resTMap.compose(contractMap);
291 
292  // The accumulator and result share the same indexing map. So they should be
293  // the same to be able to merge. This means combinedResMap is the same as
294  // inversePermutation(accTMap).compose(contractMap), which means
295  if (inversePermutation(accTMap) != resTMap)
296  return failure();
297  maps.back() = combinedResMap;
298 
299  rewriter.replaceOpWithNewOp<vector::ContractionOp>(
300  resTOp, contractOp.getLhs(), contractOp.getRhs(), accTOp.getVector(),
301  rewriter.getAffineMapArrayAttr(maps), contractOp.getIteratorTypes());
302  return success();
303  }
304 };
305 
306 /// Merge BroadcastOp into ContractionOp user.
307 /// Ex:
308 /// ```
309 /// %0 = vector.broadcast %arg0 : vector<32x16xf32> to vector<8x32x16xf32>
310 /// %1 = vector.contract {indexing_maps = [
311 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
312 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
313 /// affine_map<(d0, d1, d2) -> (d0, d1)>],
314 /// iterator_types = ["parallel", "parallel", "reduction"],
315 /// kind = add} %0, %arg1, %cst_f0
316 /// : vector<8x32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32>
317 /// ```
318 /// Gets converted to:
319 /// ```
320 /// %1 = vector.contract {indexing_maps = [
321 /// affine_map<(d0, d1, d2) -> (d1, d2)>,
322 /// affine_map<(d0, d1, d2) -> (d0, d1, d2)>,
323 /// affine_map<(d0, d1, d2) -> (d0, d1)>],
324 /// iterator_types = ["parallel", "parallel", "reduction"],
325 /// kind = add} %arg0, %arg1, %cst_f0
326 /// : vector<32x16xf32>, vector<8x32x16xf32> into vector<8x32xf32>
327 /// ```
328 struct CombineContractBroadcast
329  : public OpRewritePattern<vector::ContractionOp> {
331 
332  LogicalResult matchAndRewrite(vector::ContractionOp contractOp,
333  PatternRewriter &rewriter) const override {
335  llvm::to_vector<4>(contractOp.getIndexingMapsArray());
336  Value lhs = contractOp.getLhs();
337  Value rhs = contractOp.getRhs();
338  size_t index = 0;
339  bool changed = false;
340  for (Value *operand : {&lhs, &rhs}) {
341  AffineMap &map = maps[index++];
342  auto broadcast = operand->getDefiningOp<vector::BroadcastOp>();
343  if (!broadcast)
344  continue;
345  // contractionOp can only take vector as operands.
346  auto srcType = dyn_cast<VectorType>(broadcast.getSourceType());
347  if (!srcType ||
348  srcType.getRank() == broadcast.getResultVectorType().getRank())
349  continue;
350  int64_t rankDiff =
351  broadcast.getResultVectorType().getRank() - srcType.getRank();
352  bool innerDimBroadcast = false;
353  SmallVector<AffineExpr> originalDims;
354  for (const auto &dim : llvm::enumerate(srcType.getShape())) {
355  if (dim.value() != broadcast.getResultVectorType().getDimSize(
356  rankDiff + dim.index())) {
357  innerDimBroadcast = true;
358  break;
359  }
360  originalDims.push_back(
361  rewriter.getAffineDimExpr(dim.index() + rankDiff));
362  }
363  // Contract doesn't support inner dimension broadcast. Once this is
364  // relaxed we can remove this case.
365  if (innerDimBroadcast)
366  continue;
367 
368  // It would be incorrect to fold a broadcast onto a reduction dimension
369  // of non-unit size.
370  bool nonUnitDimReductionBroadcast = false;
371  for (int64_t i = 0; i < rankDiff; ++i) {
372  if (broadcast.getResultVectorType().getDimSize(i) != 1 &&
373  isReductionIterator(contractOp.getIteratorTypes()
374  .getValue()[map.getDimPosition(i)])) {
375  nonUnitDimReductionBroadcast = true;
376  break;
377  }
378  }
379  if (nonUnitDimReductionBroadcast)
380  continue;
381 
382  AffineMap broadcastMap =
383  AffineMap::get(broadcast.getResultVectorType().getRank(), 0,
384  originalDims, contractOp.getContext());
385  map = broadcastMap.compose(map);
386  *operand = broadcast.getSource();
387  changed = true;
388  }
389 
390  if (!changed)
391  return failure();
392 
393  // Determine which dims are usused, now that the maps have been composed
394  // with the broadcast maps.
395  llvm::SmallBitVector unusedDimsBitVector = getUnusedDimsBitVector(maps);
396  // Compress unused dims.
397  for (auto &m : maps)
398  m = compressDims(m, unusedDimsBitVector);
399  // Compute the combined iterators.
400  SmallVector<Attribute> iterators;
401  for (unsigned i = 0; i < unusedDimsBitVector.size(); ++i) {
402  if (!unusedDimsBitVector.test(i))
403  iterators.push_back(contractOp.getIteratorTypes().getValue()[i]);
404  }
405  // Check that compressing unused dims isn't removing all reduction dimension
406  // pairs. For example, if the vector.contract had only one reduction
407  // iterator and that was a unit-dimension created by a broadcast,
408  // then we should bail here, otherwise we would create a contract without
409  // a reduction dimension pair.
410  bool hasReductionIteratorApplyingOnBothSides = false;
411  for (unsigned i = 0; i < iterators.size(); ++i) {
412  if (!isReductionIterator(iterators[i]))
413  continue;
414  if (getResultIndex(maps[0], i) && getResultIndex(maps[1], i)) {
415  hasReductionIteratorApplyingOnBothSides = true;
416  break;
417  }
418  }
419  if (!hasReductionIteratorApplyingOnBothSides)
420  return failure();
421 
422  // If the compressed maps have a dimension that is not used by either LHS or
423  // RHS then the ContractionOp verifier would fail.
424  if (getUnusedDimsBitVector({maps[0], maps[1]}).any())
425  return failure();
426  rewriter.replaceOpWithNewOp<vector::ContractionOp>(
427  contractOp, lhs, rhs, contractOp.getAcc(),
428  rewriter.getAffineMapArrayAttr(maps), rewriter.getArrayAttr(iterators));
429  return success();
430  }
431 };
432 
433 /// Reorders cast(broadcast) to broadcast(cast). This makes broadcast ops and
434 /// contraction ops closer, which kicks in CombineContractBroadcast pattern when
435 /// casting ops are around these operations.
436 /// Ex:
437 /// ```
438 /// %0 = vector.broadcast %arg0 : vector<32x16xi8> to vector<8x32x16xi8>
439 /// %1 = arith.extsi %0 : vector<8x32x16xi8> to vector<8x32x16xi32>
440 /// ```
441 /// Gets converted to:
442 /// ```
443 /// %0 = arith.extsi %0 : vector<32x16xi8> to vector<32x16xi32>
444 /// %1 = vector.broadcast %arg0 : vector<32x16xi32> to vector<8x32x16xi32>
445 /// ```
446 struct ReorderCastOpsOnBroadcast
447  : public OpInterfaceRewritePattern<CastOpInterface> {
449 
450  LogicalResult matchAndRewrite(CastOpInterface op,
451  PatternRewriter &rewriter) const override {
452  if (op->getNumOperands() != 1)
453  return failure();
454  auto bcastOp = op->getOperand(0).getDefiningOp<vector::BroadcastOp>();
455  if (!bcastOp)
456  return failure();
457 
458  Type castResTy = getElementTypeOrSelf(op->getResult(0));
459  if (auto vecTy = dyn_cast<VectorType>(bcastOp.getSourceType()))
460  castResTy = vecTy.clone(castResTy);
461  auto *castOp =
462  rewriter.create(op->getLoc(), op->getName().getIdentifier(),
463  bcastOp.getSource(), castResTy, op->getAttrs());
464  rewriter.replaceOpWithNewOp<vector::BroadcastOp>(
465  op, op->getResult(0).getType(), castOp->getResult(0));
466  return success();
467  }
468 };
469 
470 /// Reorders elementwise(transpose) to transpose(elementwise). This makes
471 /// transpose ops and contraction ops closer, which kicks in
472 /// CombineContractABTranspose pattern when elementwise ops are between these
473 /// operations. Ex:
474 /// ```
475 /// %at = vector.transpose %a, [1, 0]: vector<4x2xf32> to vector<2x4xf32>
476 /// %bt = vector.transpose %b, [1, 0]: vector<4x2xf32> to vector<2x4xf32>
477 /// %r = arith.addf %at, %bt : vector<2x4xf32>
478 /// ```
479 /// Gets converted to:
480 /// ```
481 /// %0 = arith.addf %a, %b : vector<4x2xf32>
482 /// %r = vector.transpose %0, [1, 0] : vector<2x4xf32>
483 /// ```
484 struct ReorderElementwiseOpsOnTranspose final
485  : public OpTraitRewritePattern<OpTrait::Elementwise> {
487  LogicalResult matchAndRewrite(Operation *op,
488  PatternRewriter &rewriter) const override {
489  if (op->getNumResults() != 1 || op->getNumRegions() != 0)
490  return failure();
491 
492  // Make sure all operands are transpose/constant ops and collect their
493  // transposition maps.
494  SmallVector<ArrayRef<int64_t>> transposeMaps;
495  transposeMaps.reserve(op->getNumOperands());
496  // Record the initial type before transposition. We'll use its shape later.
497  // Any type will do here as we will check all transpose maps are the same.
498  VectorType srcType;
499  for (Value operand : op->getOperands()) {
500  auto transposeOp = operand.getDefiningOp<vector::TransposeOp>();
501  if (transposeOp) {
502  transposeMaps.push_back(transposeOp.getPermutation());
503  srcType = transposeOp.getSourceVectorType();
504  } else if (!matchPattern(operand, m_Constant())) {
505  return failure();
506  }
507  }
508  if (transposeMaps.empty())
509  return failure();
510  // This is an elementwise op, so all transposed operands should have the
511  // same type. We need to additionally check that all transposes uses the
512  // same map.
513  if (!llvm::all_equal(transposeMaps))
514  return rewriter.notifyMatchFailure(op, "different transpose map");
515 
516  SmallVector<Value> srcValues;
517  srcValues.reserve(op->getNumOperands());
518 
519  // If there are constant operands, we need to insert inverse transposes for
520  // them. Calculate the inverse order first.
521  auto order = transposeMaps.front();
522  SmallVector<int64_t> invOrder(order.size());
523  for (int i = 0, e = order.size(); i < e; ++i)
524  invOrder[order[i]] = i;
525 
526  for (Value operand : op->getOperands()) {
527  auto transposeOp = operand.getDefiningOp<vector::TransposeOp>();
528  if (transposeOp) {
529  srcValues.push_back(transposeOp.getVector());
530  } else {
531  // This is a constant. Create a reverse transpose op for it.
532  auto vectorType =
533  srcType.clone(cast<VectorType>(operand.getType()).getElementType());
534  srcValues.push_back(rewriter.create<vector::TransposeOp>(
535  operand.getLoc(), vectorType, operand, invOrder));
536  }
537  }
538 
539  auto vectorType = srcType.clone(
540  cast<VectorType>(op->getResultTypes()[0]).getElementType());
541  Operation *elementwiseOp =
542  rewriter.create(op->getLoc(), op->getName().getIdentifier(), srcValues,
543  vectorType, op->getAttrs());
544  rewriter.replaceOpWithNewOp<vector::TransposeOp>(
545  op, op->getResultTypes()[0], elementwiseOp->getResult(0),
546  transposeMaps.front());
547  return success();
548  }
549 };
550 
551 // Returns the values in `arrayAttr` as an integer vector.
552 static SmallVector<int64_t> getIntValueVector(ArrayAttr arrayAttr) {
553  return llvm::to_vector<4>(
554  llvm::map_range(arrayAttr.getAsRange<IntegerAttr>(),
555  [](IntegerAttr attr) { return attr.getInt(); }));
556 }
557 
558 // Shuffles vector.bitcast op after vector.extract op.
559 //
560 // This transforms IR like:
561 // %0 = vector.bitcast %src : vector<4xf32> to vector<8xf16>
562 // %1 = vector.extract %0[3] : f16 from vector<8xf16>
563 // Into:
564 // %0 = vector.extract %src[1] : f32 from vector<4xf32>
565 // %1 = vector.bitcast %0: vector<1xf32> to vector<2xf16>
566 // %2 = vector.extract %1[1] : f16 from vector<2xf16>
567 struct BubbleDownVectorBitCastForExtract
568  : public OpRewritePattern<vector::ExtractOp> {
570 
571  LogicalResult matchAndRewrite(vector::ExtractOp extractOp,
572  PatternRewriter &rewriter) const override {
573  // Only support extracting scalars for now.
574  if (extractOp.getSourceVectorType().getRank() != 1)
575  return failure();
576 
577  auto castOp = extractOp.getVector().getDefiningOp<vector::BitCastOp>();
578  if (!castOp)
579  return failure();
580 
581  VectorType castSrcType = castOp.getSourceVectorType();
582  VectorType castDstType = castOp.getResultVectorType();
583  assert(castSrcType.getRank() == castDstType.getRank());
584 
585  // Fail to match if we only have one element in the cast op source.
586  // This is to avoid infinite loop given that this pattern can generate
587  // such cases.
588  if (castSrcType.getNumElements() == 1)
589  return failure();
590 
591  // Only support casting to a larger number of elements or now.
592  // E.g., vector<4xf32> -> vector<8xf16>.
593  if (castSrcType.getNumElements() > castDstType.getNumElements())
594  return failure();
595 
596  unsigned expandRatio =
597  castDstType.getNumElements() / castSrcType.getNumElements();
598 
599  auto getFirstIntValue = [](ArrayRef<OpFoldResult> values) -> uint64_t {
600  assert(values[0].is<Attribute>() && "Unexpected non-constant index");
601  return cast<IntegerAttr>(values[0].get<Attribute>()).getInt();
602  };
603 
604  uint64_t index = getFirstIntValue(extractOp.getMixedPosition());
605 
606  // Get the single scalar (as a vector) in the source value that packs the
607  // desired scalar. E.g. extract vector<1xf32> from vector<4xf32>
608  Location loc = extractOp.getLoc();
609  Value packedValue = rewriter.create<vector::ExtractOp>(
610  loc, castOp.getSource(), index / expandRatio);
611  Type packedVecType = VectorType::get(/*shape=*/{1}, packedValue.getType());
612  Value zero = rewriter.create<arith::ConstantOp>(
613  loc, packedVecType, rewriter.getZeroAttr(packedVecType));
614  packedValue = rewriter.create<vector::InsertOp>(loc, packedValue, zero,
615  /*position=*/0);
616 
617  // Cast it to a vector with the desired scalar's type.
618  // E.g. f32 -> vector<2xf16>
619  VectorType packedType =
620  VectorType::get({expandRatio}, castDstType.getElementType());
621  Value castedValue =
622  rewriter.create<vector::BitCastOp>(loc, packedType, packedValue);
623 
624  // Finally extract the desired scalar.
625  rewriter.replaceOpWithNewOp<vector::ExtractOp>(extractOp, castedValue,
626  index % expandRatio);
627  return success();
628  }
629 };
630 
631 // Shuffles vector.bitcast op after vector.extract_strided_slice op.
632 //
633 // This transforms IR like:
634 // %cast = vector.bitcast %arg0: vector<4xf32> to vector<8xf16>
635 // %0 = vector.extract_strided_slice %cast {
636 // offsets = [4], sizes = [4], strides = [1]
637 // } : vector<8xf16> to vector<4xf16>
638 // Into:
639 // %0 = vector.extract_strided_slice %src {
640 // offsets = [2], sizes = [2], strides = [1]
641 // } : vector<4xf32> to vector<2xf32>
642 // %1 = vector.bitcast %0 : vector<2xf32> to vector<4xf16>
643 struct BubbleDownBitCastForStridedSliceExtract
644  : public OpRewritePattern<vector::ExtractStridedSliceOp> {
646 
647  LogicalResult matchAndRewrite(vector::ExtractStridedSliceOp extractOp,
648  PatternRewriter &rewriter) const override {
649  auto castOp = extractOp.getVector().getDefiningOp<vector::BitCastOp>();
650  if (!castOp)
651  return failure();
652 
653  VectorType castSrcType = castOp.getSourceVectorType();
654  VectorType castDstType = castOp.getResultVectorType();
655  assert(castSrcType.getRank() == castDstType.getRank());
656 
657  int64_t castSrcLastDim = castSrcType.getShape().back();
658  int64_t castDstLastDim = castDstType.getShape().back();
659  // Require casting to more elements for now; other cases to be implemented.
660  if (castSrcLastDim > castDstLastDim)
661  return failure();
662 
663  // Only accept all one strides for now.
664  if (llvm::any_of(extractOp.getStrides().getAsValueRange<IntegerAttr>(),
665  [](const APInt &val) { return !val.isOne(); }))
666  return failure();
667 
668  unsigned rank = extractOp.getSourceVectorType().getRank();
669  assert(castDstLastDim % castSrcLastDim == 0);
670  int64_t expandRatio = castDstLastDim / castSrcLastDim;
671 
672  // If we have a less number of offsets than the rank, then implicitly we
673  // are selecting the full range for the last bitcasted dimension; other
674  // dimensions aren't affected. Otherwise, we need to scale down the last
675  // dimension's offset given we are extracting from less elements now.
676  ArrayAttr newOffsets = extractOp.getOffsets();
677  if (newOffsets.size() == rank) {
678  SmallVector<int64_t> offsets = getIntValueVector(newOffsets);
679  if (offsets.back() % expandRatio != 0)
680  return failure();
681  offsets.back() = offsets.back() / expandRatio;
682  newOffsets = rewriter.getI64ArrayAttr(offsets);
683  }
684 
685  // Similarly for sizes.
686  ArrayAttr newSizes = extractOp.getSizes();
687  if (newSizes.size() == rank) {
688  SmallVector<int64_t> sizes = getIntValueVector(newSizes);
689  if (sizes.back() % expandRatio != 0)
690  return failure();
691  sizes.back() = sizes.back() / expandRatio;
692  newSizes = rewriter.getI64ArrayAttr(sizes);
693  }
694 
695  SmallVector<int64_t> dims =
696  llvm::to_vector<4>(cast<VectorType>(extractOp.getType()).getShape());
697  dims.back() = dims.back() / expandRatio;
698  VectorType newExtractType =
699  VectorType::get(dims, castSrcType.getElementType());
700 
701  auto newExtractOp = rewriter.create<vector::ExtractStridedSliceOp>(
702  extractOp.getLoc(), newExtractType, castOp.getSource(), newOffsets,
703  newSizes, extractOp.getStrides());
704 
705  rewriter.replaceOpWithNewOp<vector::BitCastOp>(
706  extractOp, extractOp.getType(), newExtractOp);
707 
708  return success();
709  }
710 };
711 
712 // Shuffles vector.bitcast op before vector.insert_strided_slice op.
713 //
714 // This transforms IR like:
715 // %0 = vector.insert %val, %dst[4] : vector<32xi4> into vector<8x32xi4>
716 // %1 = vector.bitcast %0 : vector<8x32xi4> to vector<8x16xi8>
717 // Into:
718 // %0 = vector.bitcast %val : vector<32xi4> to vector<16xi8>
719 // %1 = vector.bitcast %dst : vector<8x32xi4> to vector<8x16xi8>
720 // %2 = vector.insert %0, %1 [4] : vector<16xi8> into vector<8x16xi8>
721 //
722 struct BubbleUpBitCastForInsert : public OpRewritePattern<vector::BitCastOp> {
724 
725  LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp,
726  PatternRewriter &rewriter) const override {
727  VectorType castSrcType = bitcastOp.getSourceVectorType();
728  VectorType castDstType = bitcastOp.getResultVectorType();
729 
730  // 0-D and scalable vectors are not supported yet.
731  if (castSrcType.getRank() == 0 || castSrcType.isScalable() ||
732  castDstType.isScalable())
733  return failure();
734 
735  int64_t castSrcLastDim = castSrcType.getShape().back();
736  int64_t castDstLastDim = castDstType.getShape().back();
737  bool isNumElemsShrink = castSrcLastDim >= castDstLastDim;
738  int64_t ratio;
739  if (isNumElemsShrink) {
740  assert(castSrcLastDim % castDstLastDim == 0);
741  ratio = castSrcLastDim / castDstLastDim;
742  } else {
743  assert(castDstLastDim % castSrcLastDim == 0);
744  ratio = castDstLastDim / castSrcLastDim;
745  }
746 
747  auto insertOp = bitcastOp.getSource().getDefiningOp<vector::InsertOp>();
748  if (!insertOp)
749  return failure();
750 
751  // Only vector sources are supported for now.
752  auto insertSrcType = dyn_cast<VectorType>(insertOp.getSourceType());
753  if (!insertSrcType)
754  return failure();
755 
756  // Bitcast the source.
757  SmallVector<int64_t> srcDims(insertSrcType.getShape());
758  srcDims.back() =
759  isNumElemsShrink ? srcDims.back() / ratio : srcDims.back() * ratio;
760  VectorType newCastSrcType =
761  VectorType::get(srcDims, castDstType.getElementType());
762  auto newCastSrcOp = rewriter.create<vector::BitCastOp>(
763  bitcastOp.getLoc(), newCastSrcType, insertOp.getSource());
764 
765  SmallVector<int64_t> dstDims(insertOp.getDestVectorType().getShape());
766  dstDims.back() =
767  isNumElemsShrink ? dstDims.back() / ratio : dstDims.back() * ratio;
768  VectorType newCastDstType =
769  VectorType::get(dstDims, castDstType.getElementType());
770 
771  // Bitcast the destination.
772  auto newCastDstOp = rewriter.create<vector::BitCastOp>(
773  bitcastOp.getLoc(), newCastDstType, insertOp.getDest());
774 
775  // Generate new insert.
776  rewriter.replaceOpWithNewOp<vector::InsertOp>(
777  bitcastOp, newCastSrcOp, newCastDstOp, insertOp.getMixedPosition());
778  return success();
779  }
780 };
781 
782 // Shuffles vector.bitcast op before vector.insert_strided_slice op.
783 //
784 // This transforms IR like:
785 // %0 = vector.insert_strided_slice %src, %dst {
786 // offsets = [0], strides = [1]} : vector<4xf16> into vector<8xf16>
787 // %1 = vector.bitcast %0: vector<8xf16> to vector<4xf32>
788 // Into:
789 // %0 = vector.bitcast %src : vector<4xf16> to vector<2xf32>
790 // %1 = vector.bitcast %dst : vector<8xf16> to vector<4xf32>
791 // %2 = vector.insert_strided_slice %src, %dst {
792 // offsets = [0], strides = [1]} : vector<2xf32> into vector<4xf32>
793 struct BubbleUpBitCastForStridedSliceInsert
794  : public OpRewritePattern<vector::BitCastOp> {
796 
797  LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp,
798  PatternRewriter &rewriter) const override {
799  VectorType castSrcType = bitcastOp.getSourceVectorType();
800  VectorType castDstType = bitcastOp.getResultVectorType();
801  assert(castSrcType.getRank() == castDstType.getRank());
802  // Skip 0-D vector which will not from InsertStridedSliceOp.
803  if (castSrcType.getRank() == 0)
804  return failure();
805 
806  int64_t castSrcLastDim = castSrcType.getShape().back();
807  int64_t castDstLastDim = castDstType.getShape().back();
808  // Require casting to less elements for now; other cases to be implemented.
809  if (castSrcLastDim < castDstLastDim)
810  return failure();
811 
812  assert(castSrcLastDim % castDstLastDim == 0);
813  int64_t shrinkRatio = castSrcLastDim / castDstLastDim;
814 
815  auto insertOp =
816  bitcastOp.getSource().getDefiningOp<vector::InsertStridedSliceOp>();
817  if (!insertOp)
818  return failure();
819 
820  // Only accept all one strides for now.
821  if (llvm::any_of(insertOp.getStrides().getAsValueRange<IntegerAttr>(),
822  [](const APInt &val) { return !val.isOne(); }))
823  return failure();
824 
825  unsigned rank = insertOp.getSourceVectorType().getRank();
826  // Require insert op to have the same rank for the source and destination
827  // vector; other cases to be implemented.
828  if (rank != insertOp.getDestVectorType().getRank())
829  return failure();
830 
831  // Requires that shape of insert op src is castable to dstType.
832  unsigned sourceWidth = castSrcType.getElementType().getIntOrFloatBitWidth();
833  unsigned destinationWidth =
834  castDstType.getElementType().getIntOrFloatBitWidth();
835  unsigned numElements = destinationWidth / sourceWidth;
836  if (insertOp.getSourceVectorType().getNumElements() % numElements != 0)
837  return failure();
838 
839  ArrayAttr newOffsets = insertOp.getOffsets();
840  assert(newOffsets.size() == rank);
841  SmallVector<int64_t> offsets = getIntValueVector(newOffsets);
842  if (offsets.back() % shrinkRatio != 0)
843  return failure();
844  offsets.back() = offsets.back() / shrinkRatio;
845  newOffsets = rewriter.getI64ArrayAttr(offsets);
846 
847  SmallVector<int64_t> srcDims =
848  llvm::to_vector<4>(insertOp.getSourceVectorType().getShape());
849  srcDims.back() = srcDims.back() / shrinkRatio;
850  VectorType newCastSrcType =
851  VectorType::get(srcDims, castDstType.getElementType());
852 
853  auto newCastSrcOp = rewriter.create<vector::BitCastOp>(
854  bitcastOp.getLoc(), newCastSrcType, insertOp.getSource());
855 
856  SmallVector<int64_t> dstDims =
857  llvm::to_vector<4>(insertOp.getDestVectorType().getShape());
858  dstDims.back() = dstDims.back() / shrinkRatio;
859  VectorType newCastDstType =
860  VectorType::get(dstDims, castDstType.getElementType());
861 
862  auto newCastDstOp = rewriter.create<vector::BitCastOp>(
863  bitcastOp.getLoc(), newCastDstType, insertOp.getDest());
864 
865  rewriter.replaceOpWithNewOp<vector::InsertStridedSliceOp>(
866  bitcastOp, bitcastOp.getType(), newCastSrcOp, newCastDstOp, newOffsets,
867  insertOp.getStrides());
868 
869  return success();
870  }
871 };
872 
873 // Breaks down vector.bitcast op
874 //
875 // This transforms IR like:
876 // %1 = vector.bitcast %0: vector<8xf16> to vector<4xf32>
877 // Into:
878 // %cst = vector.splat %c0_f32 : vector<4xf32>
879 // %1 = vector.extract_strided_slice %0 {
880 // offsets = [0], sizes = [4], strides = [1]
881 // } : vector<8xf16> to vector<4xf16>
882 // %2 = vector.bitcast %1 : vector<4xf16> to vector<2xf32>
883 // %4 = vector.insert_strided_slice %2, %cst {
884 // offsets = [0], strides = [1]} : vector<2xf32> into vector<4xf32>
885 // %5 = vector.extract_strided_slice %0 {
886 // offsets = [4], sizes = [4], strides = [1]
887 // } : vector<8xf16> to vector<4xf16>
888 // %6 = vector.bitcast %5 : vector<4xf16> to vector<2xf32>
889 // %7 = vector.insert_strided_slice %6, %cst {
890 // offsets = [2], strides = [1]} : vector<2xf32> into vector<4xf32>
891 struct BreakDownVectorBitCast : public OpRewritePattern<vector::BitCastOp> {
893 
894 public:
895  BreakDownVectorBitCast(MLIRContext *context,
896  std::function<bool(vector::BitCastOp)> controlFn,
897  PatternBenefit benefit)
898  : OpRewritePattern(context, benefit), controlFn(std::move(controlFn)) {}
899 
900  LogicalResult matchAndRewrite(vector::BitCastOp bitcastOp,
901  PatternRewriter &rewriter) const override {
902 
903  if (controlFn && !controlFn(bitcastOp))
904  return failure();
905 
906  VectorType castSrcType = bitcastOp.getSourceVectorType();
907  VectorType castDstType = bitcastOp.getResultVectorType();
908  assert(castSrcType.getRank() == castDstType.getRank());
909 
910  // Only support rank 1 case for now.
911  if (castSrcType.getRank() != 1)
912  return failure();
913 
914  int64_t castSrcLastDim = castSrcType.getShape().back();
915  int64_t castDstLastDim = castDstType.getShape().back();
916  // Require casting to less elements for now; other cases to be implemented.
917  if (castSrcLastDim < castDstLastDim)
918  return failure();
919 
920  assert(castSrcLastDim % castDstLastDim == 0);
921  int64_t shrinkRatio = castSrcLastDim / castDstLastDim;
922  // Nothing to do if it is already bitcasting to a single element.
923  if (castSrcLastDim == shrinkRatio)
924  return failure();
925 
926  Location loc = bitcastOp.getLoc();
927  Type elemType = castDstType.getElementType();
928  assert(elemType.isSignlessIntOrIndexOrFloat());
929 
930  Value zero = rewriter.create<arith::ConstantOp>(
931  loc, elemType, rewriter.getZeroAttr(elemType));
932  Value res = rewriter.create<SplatOp>(loc, castDstType, zero);
933 
934  SmallVector<int64_t> sliceShape{castDstLastDim};
935  SmallVector<int64_t> strides{1};
936  VectorType newCastDstType =
937  VectorType::get(SmallVector<int64_t>{castDstLastDim / shrinkRatio},
938  castDstType.getElementType());
939 
940  for (int i = 0, e = shrinkRatio; i < e; ++i) {
941  Value extracted = rewriter.create<ExtractStridedSliceOp>(
942  loc, bitcastOp.getSource(), ArrayRef<int64_t>{i * castDstLastDim},
943  sliceShape, strides);
944  Value bitcast =
945  rewriter.create<BitCastOp>(loc, newCastDstType, extracted);
946  res = rewriter.create<InsertStridedSliceOp>(
947  loc, bitcast, res,
948  ArrayRef<int64_t>{i * castDstLastDim / shrinkRatio}, strides);
949  }
950  rewriter.replaceOp(bitcastOp, res);
951  return success();
952  }
953 
954 private:
955  std::function<bool(BitCastOp)> controlFn;
956 };
957 
958 /// Reorders elementwise(broadcast/splat) to broadcast(elementwise). Ex:
959 /// ```
960 /// %a = vector.broadcast %arg1 : index to vector<1x4xindex>
961 /// %b = vector.broadcast %arg2 : index to vector<1x4xindex>
962 /// %r = arith.addi %a, %b : vector<1x4xindex>
963 /// ```
964 /// Gets converted to:
965 /// ```
966 /// %r = arith.addi %arg0, %arg1 : index
967 /// %b = vector.broadcast %r : index to vector<1x4xindex>
968 /// ```
969 ///
970 /// Both `vector.broadcast` and `vector.splat` are supported as broadcasting
971 /// ops.
972 struct ReorderElementwiseOpsOnBroadcast final
973  : public OpTraitRewritePattern<OpTrait::Elementwise> {
975  LogicalResult matchAndRewrite(Operation *op,
976  PatternRewriter &rewriter) const override {
977  if (op->getNumResults() != 1)
978  return failure();
979  if (!llvm::isa<ShapedType>(op->getResults()[0].getType()))
980  return failure();
982  return failure();
983  if (op->getNumOperands() == 0 ||
984  op->getResults()[0].getType() != op->getOperand(0).getType()) {
985  return failure();
986  }
987  // Avoid operations that only accept vector types, since broadcast
988  // source might be scalar types.
989  if (isa<vector::FMAOp>(op)) {
990  return failure();
991  }
992 
993  // Get the type of the lhs operand
994  auto *lhsBcastOrSplat = op->getOperand(0).getDefiningOp();
995  if (!lhsBcastOrSplat ||
996  !isa<vector::BroadcastOp, vector::SplatOp>(*lhsBcastOrSplat))
997  return failure();
998  auto lhsBcastOrSplatType = lhsBcastOrSplat->getOperand(0).getType();
999 
1000  // Make sure that all operands are broadcast from identical types:
1001  // * scalar (`vector.broadcast` + `vector.splat`), or
1002  // * vector (`vector.broadcast`).
1003  // Otherwise the re-ordering wouldn't be safe.
1004  if (!llvm::all_of(op->getOperands(), [&lhsBcastOrSplatType](Value val) {
1005  auto bcast = val.getDefiningOp<vector::BroadcastOp>();
1006  if (bcast)
1007  return (bcast.getOperand().getType() == lhsBcastOrSplatType);
1008  auto splat = val.getDefiningOp<vector::SplatOp>();
1009  if (splat)
1010  return (splat.getOperand().getType() == lhsBcastOrSplatType);
1011  return false;
1012  })) {
1013  return failure();
1014  }
1015 
1016  // Collect the source values before broadcasting
1017  SmallVector<Value> srcValues;
1018  srcValues.reserve(op->getNumOperands());
1019  for (Value operand : op->getOperands()) {
1020  srcValues.push_back(operand.getDefiningOp()->getOperand(0));
1021  }
1022 
1023  // Create the "elementwise" Op
1024  Operation *elementwiseOp =
1025  rewriter.create(op->getLoc(), op->getName().getIdentifier(), srcValues,
1026  lhsBcastOrSplatType, op->getAttrs());
1027 
1028  // Replace the original Op with the elementwise Op
1029  auto vectorType = op->getResultTypes()[0];
1030  rewriter.replaceOpWithNewOp<vector::BroadcastOp>(
1031  op, vectorType, elementwiseOp->getResults());
1032 
1033  return success();
1034  }
1035 };
1036 
1037 // Helper that returns a vector comparison that constructs a mask:
1038 // mask = [0,1,..,n-1] + [o,o,..,o] < [b,b,..,b]
1039 //
1040 // If `dim == 0` then the result will be a 0-D vector.
1041 //
1042 // NOTE: The LLVM::GetActiveLaneMaskOp intrinsic would provide an alternative,
1043 // much more compact, IR for this operation, but LLVM eventually
1044 // generates more elaborate instructions for this intrinsic since it
1045 // is very conservative on the boundary conditions.
1046 static Value buildVectorComparison(PatternRewriter &rewriter, Operation *op,
1047  bool force32BitVectorIndices, int64_t dim,
1048  Value b, Value *off = nullptr) {
1049  auto loc = op->getLoc();
1050  // If we can assume all indices fit in 32-bit, we perform the vector
1051  // comparison in 32-bit to get a higher degree of SIMD parallelism.
1052  // Otherwise we perform the vector comparison using 64-bit indices.
1053  Type idxType =
1054  force32BitVectorIndices ? rewriter.getI32Type() : rewriter.getI64Type();
1055  DenseIntElementsAttr indicesAttr;
1056  if (dim == 0 && force32BitVectorIndices) {
1057  indicesAttr = DenseIntElementsAttr::get(
1059  } else if (dim == 0) {
1060  indicesAttr = DenseIntElementsAttr::get(
1062  } else if (force32BitVectorIndices) {
1063  indicesAttr = rewriter.getI32VectorAttr(
1064  llvm::to_vector<4>(llvm::seq<int32_t>(0, dim)));
1065  } else {
1066  indicesAttr = rewriter.getI64VectorAttr(
1067  llvm::to_vector<4>(llvm::seq<int64_t>(0, dim)));
1068  }
1069  Value indices = rewriter.create<arith::ConstantOp>(loc, indicesAttr);
1070  // Add in an offset if requested.
1071  if (off) {
1072  Value o = getValueOrCreateCastToIndexLike(rewriter, loc, idxType, *off);
1073  Value ov = rewriter.create<vector::SplatOp>(loc, indices.getType(), o);
1074  indices = rewriter.create<arith::AddIOp>(loc, ov, indices);
1075  }
1076  // Construct the vector comparison.
1077  Value bound = getValueOrCreateCastToIndexLike(rewriter, loc, idxType, b);
1078  Value bounds =
1079  rewriter.create<vector::SplatOp>(loc, indices.getType(), bound);
1080  return rewriter.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt, indices,
1081  bounds);
1082 }
1083 
1084 template <typename ConcreteOp>
1085 struct MaterializeTransferMask : public OpRewritePattern<ConcreteOp> {
1086 public:
1087  explicit MaterializeTransferMask(MLIRContext *context, bool enableIndexOpt,
1088  PatternBenefit benefit = 1)
1089  : mlir::OpRewritePattern<ConcreteOp>(context, benefit),
1090  force32BitVectorIndices(enableIndexOpt) {}
1091 
1092  LogicalResult matchAndRewrite(ConcreteOp xferOp,
1093  PatternRewriter &rewriter) const override {
1094  if (!xferOp.hasOutOfBoundsDim())
1095  return failure();
1096 
1097  if (xferOp.getVectorType().getRank() > 1 || xferOp.getIndices().empty())
1098  return failure();
1099 
1100  Location loc = xferOp->getLoc();
1101  VectorType vtp = xferOp.getVectorType();
1102 
1103  // Create the in-bounds mask with all elements between [0 .. dim - offset)
1104  // set and [dim - offset .. vector_length) unset.
1105  //
1106  // TODO: when the leaf transfer rank is k > 1, we need the last `k`
1107  // dimensions here.
1108  unsigned lastIndex = llvm::size(xferOp.getIndices()) - 1;
1109  Value off = xferOp.getIndices()[lastIndex];
1110  Value dim =
1111  vector::createOrFoldDimOp(rewriter, loc, xferOp.getSource(), lastIndex);
1112  Value b = rewriter.create<arith::SubIOp>(loc, dim.getType(), dim, off);
1113  Value mask = rewriter.create<vector::CreateMaskOp>(
1114  loc,
1115  VectorType::get(vtp.getShape(), rewriter.getI1Type(),
1116  vtp.getScalableDims()),
1117  b);
1118  if (xferOp.getMask()) {
1119  // Intersect the in-bounds with the mask specified as an op parameter.
1120  mask = rewriter.create<arith::AndIOp>(loc, mask, xferOp.getMask());
1121  }
1122 
1123  rewriter.modifyOpInPlace(xferOp, [&]() {
1124  xferOp.getMaskMutable().assign(mask);
1125  xferOp.setInBoundsAttr(rewriter.getBoolArrayAttr({true}));
1126  });
1127 
1128  return success();
1129  }
1130 
1131 private:
1132  const bool force32BitVectorIndices;
1133 };
1134 
1135 /// Conversion pattern for a `vector.create_mask` (0-D and 1-D only).
1136 class VectorCreateMaskOpConversion
1137  : public OpRewritePattern<vector::CreateMaskOp> {
1138 public:
1139  explicit VectorCreateMaskOpConversion(MLIRContext *context,
1140  bool enableIndexOpt,
1141  PatternBenefit benefit = 1)
1142  : mlir::OpRewritePattern<vector::CreateMaskOp>(context, benefit),
1143  force32BitVectorIndices(enableIndexOpt) {}
1144 
1145  LogicalResult matchAndRewrite(vector::CreateMaskOp op,
1146  PatternRewriter &rewriter) const override {
1147  auto dstType = op.getType();
1148  if (cast<VectorType>(dstType).isScalable())
1149  return failure();
1150  int64_t rank = dstType.getRank();
1151  if (rank > 1)
1152  return failure();
1153  rewriter.replaceOp(
1154  op, buildVectorComparison(rewriter, op, force32BitVectorIndices,
1155  rank == 0 ? 0 : dstType.getDimSize(0),
1156  op.getOperand(0)));
1157  return success();
1158  }
1159 
1160 private:
1161  const bool force32BitVectorIndices;
1162 };
1163 
1164 /// Returns true if all the `i1` elements of `constantOp` are set to `value`.
1165 static bool allI1ConstantValuesSetTo(arith::ConstantOp constantOp, bool value) {
1166  auto denseAttr = dyn_cast<DenseIntElementsAttr>(constantOp.getValue());
1167  // TODO: Support non-dense constant.
1168  if (!denseAttr)
1169  return false;
1170 
1171  assert(denseAttr.getElementType().isInteger(1) && "Unexpected type");
1172  return denseAttr.isSplat() && denseAttr.getSplatValue<bool>() == value;
1173 }
1174 
1175 /// Folds a select operation between an all-true and all-false vector. For now,
1176 /// only single element vectors (i.e., vector<1xi1>) are supported. That is:
1177 ///
1178 /// %true = arith.constant dense<true> : vector<1xi1>
1179 /// %false = arith.constant dense<false> : vector<1xi1>
1180 /// %result = arith.select %cond, %true, %false : i1, vector<1xi1>
1181 /// =>
1182 /// %result = vector.broadcast %cond : i1 to vector<1xi1>
1183 ///
1184 /// InstCombine seems to handle vectors with multiple elements but not the
1185 /// single element ones.
1186 struct FoldI1Select : public OpRewritePattern<arith::SelectOp> {
1188 
1189  LogicalResult matchAndRewrite(arith::SelectOp selectOp,
1190  PatternRewriter &rewriter) const override {
1191  auto vecType = dyn_cast<VectorType>(selectOp.getType());
1192  if (!vecType || !vecType.getElementType().isInteger(1))
1193  return failure();
1194 
1195  // Only scalar conditions can be folded.
1196  Value cond = selectOp.getCondition();
1197  if (isa<VectorType>(cond.getType()))
1198  return failure();
1199 
1200  // TODO: Support n-D and scalable vectors.
1201  if (vecType.getRank() != 1 || vecType.isScalable())
1202  return failure();
1203 
1204  // TODO: Support vectors with multiple elements.
1205  if (vecType.getShape()[0] != 1)
1206  return failure();
1207 
1208  auto trueConst = selectOp.getTrueValue().getDefiningOp<arith::ConstantOp>();
1209  if (!trueConst || !allI1ConstantValuesSetTo(trueConst, true))
1210  return failure();
1211 
1212  auto falseConst =
1213  selectOp.getFalseValue().getDefiningOp<arith::ConstantOp>();
1214  if (!falseConst || !allI1ConstantValuesSetTo(falseConst, false))
1215  return failure();
1216 
1217  // Replace select with its condition broadcasted to single element vector.
1218  auto elemType = rewriter.getIntegerType(vecType.getNumElements());
1219  auto bcastType = VectorType::get(/*shape=*/{1}, elemType);
1220  rewriter.replaceOpWithNewOp<vector::BroadcastOp>(selectOp, bcastType, cond);
1221  return success();
1222  }
1223 };
1224 
1225 /// Returns the number of dims can be folded away from transfer ops. It returns
1226 /// a failure if it can not determine the number of dims to be folded.
1227 ///
1228 /// Ex 1: returns "2" if `srcType` is memref<512x16x1x1xf32> and
1229 /// `vectorType` is vector<16x16x1x1xf32>
1230 /// (there two inner most dims can be dropped by memref.subview ops)
1231 ///
1232 /// Ex 2: returns "1" if `srcType` is memref<512x16x1x1xf32> with
1233 /// [8192, 16, 8, 1] strides and `vectorType` is vector<16x16x1x1xf32>
1234 /// (only the inner most unit dim of `srcType` can be dropped)
1235 ///
1236 /// Ex 3: return "0" if `srcType` is memref<512x16x1x1xf32> and
1237 /// `vectorType` is vector<16x16x1x[1]xf32>
1238 /// (the most inner dim in `vectorType` is not a unit dim (it's a "scalable
1239 /// unit")
1240 static FailureOr<size_t>
1241 getTransferFoldableInnerUnitDims(MemRefType srcType, VectorType vectorType) {
1242  SmallVector<int64_t> srcStrides;
1243  int64_t srcOffset;
1244  if (failed(getStridesAndOffset(srcType, srcStrides, srcOffset)))
1245  return failure();
1246 
1247  auto isUnitDim = [](VectorType type, int dim) {
1248  return type.getDimSize(dim) == 1 && !type.getScalableDims()[dim];
1249  };
1250 
1251  // According to vector.transfer_read/write semantics, the vector can be a
1252  // slice. Thus, we have to offset the check index with `rankDiff` in
1253  // `srcStrides` and source dim sizes.
1254  size_t result = 0;
1255  int rankDiff = srcType.getRank() - vectorType.getRank();
1256  for (int64_t i = 0, e = vectorType.getRank(); i < e; ++i) {
1257  // Check that the inner dim size is 1 for both memref type and vector slice.
1258  // It can be folded only if they are 1 and the stride is 1.
1259  int dim = vectorType.getRank() - i - 1;
1260  if (srcStrides[dim + rankDiff] != 1 ||
1261  srcType.getDimSize(dim + rankDiff) != 1 || !isUnitDim(vectorType, dim))
1262  break;
1263  result++;
1264  }
1265  return result;
1266 }
1267 
1268 /// Drop inner most contiguous unit dimensions from transfer_read operand.
1269 class DropInnerMostUnitDimsTransferRead
1270  : public OpRewritePattern<vector::TransferReadOp> {
1272 
1273  LogicalResult matchAndRewrite(vector::TransferReadOp readOp,
1274  PatternRewriter &rewriter) const override {
1275  // TODO: support 0-d corner case.
1276  if (readOp.getTransferRank() == 0)
1277  return failure();
1278 
1279  // TODO: support mask.
1280  if (readOp.getMask())
1281  return failure();
1282 
1283  auto srcType = dyn_cast<MemRefType>(readOp.getSource().getType());
1284  if (!srcType)
1285  return failure();
1286 
1287  if (!readOp.getPermutationMap().isMinorIdentity())
1288  return failure();
1289 
1290  auto targetType = readOp.getVectorType();
1291  if (targetType.getRank() <= 1)
1292  return failure();
1293 
1294  FailureOr<size_t> maybeDimsToDrop =
1295  getTransferFoldableInnerUnitDims(srcType, targetType);
1296  if (failed(maybeDimsToDrop))
1297  return failure();
1298 
1299  size_t dimsToDrop = maybeDimsToDrop.value();
1300  if (dimsToDrop == 0)
1301  return failure();
1302 
1303  auto inBounds = readOp.getInBoundsValues();
1304  auto droppedInBounds = ArrayRef<bool>(inBounds).take_back(dimsToDrop);
1305  if (llvm::is_contained(droppedInBounds, false))
1306  return failure();
1307 
1308  auto resultTargetVecType =
1309  VectorType::get(targetType.getShape().drop_back(dimsToDrop),
1310  targetType.getElementType(),
1311  targetType.getScalableDims().drop_back(dimsToDrop));
1312 
1313  auto loc = readOp.getLoc();
1315  memref::getMixedSizes(rewriter, loc, readOp.getSource());
1316  SmallVector<OpFoldResult> offsets(srcType.getRank(),
1317  rewriter.getIndexAttr(0));
1318  SmallVector<OpFoldResult> strides(srcType.getRank(),
1319  rewriter.getIndexAttr(1));
1320  auto resultMemrefType =
1321  cast<MemRefType>(memref::SubViewOp::inferRankReducedResultType(
1322  srcType.getShape().drop_back(dimsToDrop), srcType, offsets, sizes,
1323  strides));
1324  ArrayAttr inBoundsAttr = rewriter.getArrayAttr(
1325  readOp.getInBoundsAttr().getValue().drop_back(dimsToDrop));
1326  Value rankedReducedView = rewriter.create<memref::SubViewOp>(
1327  loc, resultMemrefType, readOp.getSource(), offsets, sizes, strides);
1328  auto permMap = getTransferMinorIdentityMap(
1329  cast<ShapedType>(rankedReducedView.getType()), resultTargetVecType);
1330  Value result = rewriter.create<vector::TransferReadOp>(
1331  loc, resultTargetVecType, rankedReducedView,
1332  readOp.getIndices().drop_back(dimsToDrop), AffineMapAttr::get(permMap),
1333  readOp.getPadding(),
1334  // TODO: support mask.
1335  /*mask=*/Value(), inBoundsAttr);
1336  rewriter.replaceOpWithNewOp<vector::ShapeCastOp>(readOp, targetType,
1337  result);
1338  return success();
1339  }
1340 };
1341 
1342 /// Drop inner most contiguous unit dimensions from transfer_write operand.
1343 /// E.g.,
1344 /// vector.transfer_write %arg1, %arg0[%c0, %arg2, %c0, %c0, %c0]
1345 /// {in_bounds = [true, true, true, true, true]}
1346 /// : vector<1x16x16x1x1xf32>, memref<1x512x16x1x1xf32>
1347 ///
1348 /// will be replaced with
1349 ///
1350 /// %subview = memref.subview %arg0
1351 /// [0, 0, 0, 0, 0] [1, 512, 16, 1, 1] [1, 1, 1, 1, 1]
1352 /// : memref<1x512x16x1x1xf32> to memref<1x512x16xf32>
1353 /// %0 = vector.shape_cast %arg1 : vector<1x16x16x1x1xf32>
1354 /// to vector<1x16x16xf32>
1355 /// vector.transfer_write %0, %subview[%c0, %arg2, %c0]
1356 /// {in_bounds = [true, true, true]}
1357 /// : vector<1x16x16xf32>, memref<1x512x16xf32>
1358 ///
1359 /// Note, this pattern will not collapse "scalable unit" dims (i.e. `[1]`).
1360 class DropInnerMostUnitDimsTransferWrite
1361  : public OpRewritePattern<vector::TransferWriteOp> {
1363 
1364  LogicalResult matchAndRewrite(vector::TransferWriteOp writeOp,
1365  PatternRewriter &rewriter) const override {
1366  // TODO: support 0-d corner case.
1367  if (writeOp.getTransferRank() == 0)
1368  return failure();
1369 
1370  // TODO: support mask.
1371  if (writeOp.getMask())
1372  return failure();
1373 
1374  auto srcType = dyn_cast<MemRefType>(writeOp.getSource().getType());
1375  if (!srcType)
1376  return failure();
1377 
1378  if (!writeOp.getPermutationMap().isMinorIdentity())
1379  return failure();
1380 
1381  auto targetType = writeOp.getVectorType();
1382  if (targetType.getRank() <= 1)
1383  return failure();
1384 
1385  FailureOr<size_t> maybeDimsToDrop =
1386  getTransferFoldableInnerUnitDims(srcType, targetType);
1387  if (failed(maybeDimsToDrop))
1388  return failure();
1389 
1390  size_t dimsToDrop = maybeDimsToDrop.value();
1391  if (dimsToDrop == 0)
1392  return failure();
1393 
1394  auto inBounds = writeOp.getInBoundsValues();
1395  auto droppedInBounds = ArrayRef<bool>(inBounds).take_back(dimsToDrop);
1396  if (llvm::is_contained(droppedInBounds, false))
1397  return failure();
1398 
1399  auto resultTargetVecType =
1400  VectorType::get(targetType.getShape().drop_back(dimsToDrop),
1401  targetType.getElementType(),
1402  targetType.getScalableDims().drop_back(dimsToDrop));
1403 
1404  Location loc = writeOp.getLoc();
1406  memref::getMixedSizes(rewriter, loc, writeOp.getSource());
1407  SmallVector<OpFoldResult> offsets(srcType.getRank(),
1408  rewriter.getIndexAttr(0));
1409  SmallVector<OpFoldResult> strides(srcType.getRank(),
1410  rewriter.getIndexAttr(1));
1411  auto resultMemrefType =
1412  cast<MemRefType>(memref::SubViewOp::inferRankReducedResultType(
1413  srcType.getShape().drop_back(dimsToDrop), srcType, offsets, sizes,
1414  strides));
1415  ArrayAttr inBoundsAttr = rewriter.getArrayAttr(
1416  writeOp.getInBoundsAttr().getValue().drop_back(dimsToDrop));
1417 
1418  Value rankedReducedView = rewriter.create<memref::SubViewOp>(
1419  loc, resultMemrefType, writeOp.getSource(), offsets, sizes, strides);
1420  auto permMap = getTransferMinorIdentityMap(
1421  cast<ShapedType>(rankedReducedView.getType()), resultTargetVecType);
1422 
1423  auto shapeCast = rewriter.createOrFold<vector::ShapeCastOp>(
1424  loc, resultTargetVecType, writeOp.getVector());
1425  rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
1426  writeOp, shapeCast, rankedReducedView,
1427  writeOp.getIndices().drop_back(dimsToDrop), AffineMapAttr::get(permMap),
1428  // TODO: support mask.
1429  /*mask=*/Value(), inBoundsAttr);
1430  return success();
1431  }
1432 };
1433 
1434 /// Canonicalization of a `vector.contraction %a, %b, %c` with row-major matmul
1435 /// semantics to a contraction suitable for MMT (matrix matrix multiplication
1436 /// with the RHS transposed) lowering.
1437 struct CanonicalizeContractMatmulToMMT final
1438  : OpRewritePattern<vector::ContractionOp> {
1440 
1441  using FilterConstraintType =
1442  std::function<LogicalResult(vector::ContractionOp op)>;
1443 
1444  CanonicalizeContractMatmulToMMT(MLIRContext *context, PatternBenefit benefit,
1445  FilterConstraintType constraint)
1446  : OpRewritePattern<vector::ContractionOp>(context, benefit),
1447  filter(std::move(constraint)) {}
1448 
1449  LogicalResult matchAndRewrite(vector::ContractionOp op,
1450  PatternRewriter &rewriter) const override {
1451  if (failed(filter(op)))
1452  return failure();
1453 
1454  Location loc = op.getLoc();
1455  Value lhs = op.getLhs();
1456  Value rhs = op.getRhs();
1457  Value res = op.getAcc();
1458 
1459  // Set up the parallel/reduction structure in right form.
1460  using MapList = ArrayRef<ArrayRef<AffineExpr>>;
1461  auto infer = [&](MapList m) {
1462  return AffineMap::inferFromExprList(m, op.getContext());
1463  };
1464  AffineExpr m;
1465  AffineExpr n;
1466  AffineExpr k;
1467  bindDims(rewriter.getContext(), m, n, k);
1468  static constexpr std::array<int64_t, 2> perm = {1, 0};
1469  auto iteratorTypes = op.getIteratorTypes().getValue();
1470  SmallVector<AffineMap, 4> maps = op.getIndexingMapsArray();
1471  if (iteratorTypes.size() != 3 ||
1472  !vector::isParallelIterator(iteratorTypes[0]) ||
1473  !vector::isParallelIterator(iteratorTypes[1]) ||
1474  !vector::isReductionIterator(iteratorTypes[2]))
1475  return rewriter.notifyMatchFailure(op, "contraction is not a gemm");
1476 
1477  // The canonical form is "TNT" = A row-major, B col-major, C row-major.
1478  const auto canonicalForm = infer({{m, k}, {n, k}, {m, n}});
1479  if (maps == canonicalForm)
1480  return rewriter.notifyMatchFailure(op, "already in the canonical form");
1481 
1482  // Create a vector transpose making sure to emit zero/sign-extend at the
1483  // end.
1484  auto createTranspose = [&rewriter, loc](Value mat) -> Value {
1485  if (auto sext = mat.getDefiningOp<arith::ExtSIOp>()) {
1486  Value trans =
1487  rewriter.create<vector::TransposeOp>(loc, sext.getIn(), perm);
1488  VectorType newType =
1489  cast<VectorType>(trans.getType())
1490  .clone(cast<VectorType>(mat.getType()).getElementType());
1491  return rewriter.create<arith::ExtSIOp>(loc, newType, trans);
1492  }
1493  if (auto zext = mat.getDefiningOp<arith::ExtUIOp>()) {
1494  Value trans =
1495  rewriter.create<vector::TransposeOp>(loc, zext.getIn(), perm);
1496  VectorType newType =
1497  VectorType::get(cast<VectorType>(trans.getType()).getShape(),
1498  cast<VectorType>(mat.getType()).getElementType());
1499  return rewriter.create<arith::ExtUIOp>(loc, newType, trans);
1500  }
1501  return rewriter.create<vector::TransposeOp>(loc, mat, perm);
1502  };
1503 
1504  if (maps == infer({{m, k}, {k, n}, {m, n}})) {
1505  rhs = createTranspose(rhs);
1506  } else if (maps == infer({{k, m}, {n, k}, {m, n}})) {
1507  lhs = createTranspose(lhs);
1508  } else if (maps == infer({{k, m}, {k, n}, {m, n}})) {
1509  rhs = createTranspose(rhs);
1510  lhs = createTranspose(lhs);
1511  } else if (maps == infer({{k, m}, {k, n}, {n, m}})) {
1512  std::swap(rhs, lhs);
1513  rhs = createTranspose(rhs);
1514  lhs = createTranspose(lhs);
1515  } else if (maps == infer({{k, m}, {n, k}, {n, m}})) {
1516  std::swap(rhs, lhs);
1517  rhs = createTranspose(rhs);
1518  } else if (maps == infer({{m, k}, {k, n}, {n, m}})) {
1519  std::swap(lhs, rhs);
1520  lhs = createTranspose(lhs);
1521  } else if (maps == infer({{m, k}, {n, k}, {n, m}})) {
1522  std::swap(lhs, rhs);
1523  } else {
1524  return rewriter.notifyMatchFailure(op, "unhandled contraction form");
1525  }
1526  rewriter.replaceOpWithNewOp<vector::ContractionOp>(
1527  op, lhs, rhs, res, rewriter.getAffineMapArrayAttr(canonicalForm),
1528  op.getIteratorTypes());
1529  return success();
1530  };
1531 
1532 private:
1533  FilterConstraintType filter;
1534 };
1535 
1536 /// Pattern to fold arithmetic extensions on floating point data types into
1537 /// vector contraction operations. linalg.matmul introduces arithmetic
1538 /// extensions on its operands. Please mlir snippets below for more details.
1539 /// ```mlir
1540 /// "linalg.matmul"(%lhs, %rhs, %acc) ({
1541 /// ^bb0(%arg1: f16, %arg2: f16, %arg3: f32):
1542 /// %lhs_f32 = "arith.extf"(%arg1) : (f16) -> f32
1543 /// %rhs_f32 = "arith.extf"(%arg2) : (f16) -> f32
1544 /// %mul = "arith.mulf"(%lhs_f32, %rhs_f32) : (f32, f32) -> f32
1545 /// %acc = "arith.addf"(%arg3, %mul) : (f32, f32) -> f32
1546 /// "linalg.yield"(%acc) : (f32) -> ()
1547 /// })
1548 /// ```
1549 /// This restricts the native usage of mixed precision NVIDIA Ampere Tensor
1550 /// Cores, i.e, `mma.sync.*.f32.f16.f16.f32` and `mma.sync.*.f32.bf16.bf16.f32`.
1551 /// This pattern folds the arithmetic extensions into the vector contraction and
1552 /// enables the usage of native mixed precision Tensor Core instructions.
1553 template <typename ExtOp>
1554 struct FoldArithExtIntoContractionOp
1555  : public OpRewritePattern<vector::ContractionOp> {
1557 
1558  LogicalResult matchAndRewrite(vector::ContractionOp contractOp,
1559  PatternRewriter &rewriter) const override {
1560 
1561  auto lhsDefOp = contractOp.getLhs().getDefiningOp<ExtOp>();
1562  auto rhsDefOp = contractOp.getRhs().getDefiningOp<ExtOp>();
1563 
1564  if (!lhsDefOp || !rhsDefOp) {
1565  return rewriter.notifyMatchFailure(contractOp,
1566  "no defining op on contract operands");
1567  }
1568 
1569  rewriter.replaceOpWithNewOp<vector::ContractionOp>(
1570  contractOp, lhsDefOp->getOperand(0), rhsDefOp->getOperand(0),
1571  contractOp.getAcc(), contractOp.getIndexingMapsAttr(),
1572  contractOp.getIteratorTypesAttr());
1573 
1574  return success();
1575  }
1576 };
1577 
1578 /// Pattern to fold chained reduction to a series of vector additions and a
1579 /// final reduction. This form should require fewer subgroup operations.
1580 ///
1581 /// ```mlir
1582 /// %a = vector.reduction <add> %x, %acc
1583 /// %b = vector.reduction <add> %y, %a
1584 /// ==>
1585 /// %a = arith.addf %x, %y
1586 /// %b = vector.reduction <add> %a, %acc
1587 /// ```
1588 struct ChainedReduction final : OpRewritePattern<vector::ReductionOp> {
1590 
1591  LogicalResult matchAndRewrite(vector::ReductionOp op,
1592  PatternRewriter &rewriter) const override {
1593  // TODO: Handle other combining kinds.
1594  if (op.getKind() != vector::CombiningKind::ADD)
1595  return failure();
1596 
1597  // Accumulator is optional.
1598  Value acc = op.getAcc();
1599  if (!acc)
1600  return failure();
1601 
1602  if (!acc.getType().isIntOrFloat())
1603  return failure();
1604 
1605  auto parentReduction = acc.getDefiningOp<vector::ReductionOp>();
1606  if (!parentReduction)
1607  return failure();
1608 
1609  Location loc = op.getLoc();
1610  Value vAdd;
1611  if (isa<IntegerType>(acc.getType())) {
1612  vAdd = rewriter.createOrFold<arith::AddIOp>(
1613  loc, parentReduction.getVector(), op.getVector());
1614  } else {
1615  vAdd = rewriter.create<arith::AddFOp>(loc, parentReduction.getVector(),
1616  op.getVector());
1617  }
1618  rewriter.replaceOpWithNewOp<vector::ReductionOp>(op, op.getKind(), vAdd,
1619  parentReduction.getAcc());
1620  return success();
1621  }
1622 };
1623 
1624 // Helper function dropping unit non-scalable dimension from a VectorType
1625 // keeping at least 1 dimension to avoid generating 0-D vectors. Scalable unit
1626 // dimensions are not dropped. Folding such dimensions would require "shifting"
1627 // the scalable flag onto some other fixed-width dim (e.g. vector<[1]x4xf32> ->
1628 // vector<[4]xf32>). This could be implemented in the future.
1629 static VectorType dropNonScalableUnitDimFromType(VectorType inVecTy) {
1630  auto inVecShape = inVecTy.getShape();
1631  SmallVector<int64_t> newShape;
1632  SmallVector<bool> newScalableDims;
1633  for (auto [dim, isScalable] :
1634  llvm::zip_equal(inVecShape, inVecTy.getScalableDims())) {
1635  if (dim == 1 && !isScalable)
1636  continue;
1637 
1638  newShape.push_back(dim);
1639  newScalableDims.push_back(isScalable);
1640  }
1641  // All dims have been dropped, return vector<1xeType>.
1642  if (newShape.empty()) {
1643  newShape.push_back(1);
1644  newScalableDims.push_back(false);
1645  }
1646 
1647  return VectorType::get(newShape, inVecTy.getElementType(), newScalableDims);
1648 }
1649 
1650 /// For vectors with at least one unit dim, replaces:
1651 /// elementwise(a, b)
1652 /// with:
1653 /// sc_a = shape_cast(a)
1654 /// sc_b = shape_cast(b)
1655 /// res = elementwise(sc_a, sc_b)
1656 /// return shape_cast(res)
1657 /// The newly inserted shape_cast Ops fold (before elementwise Op) and then
1658 /// restore (after elementwise Op) the unit dim. Vectors `a` and `b` are
1659 /// required to be rank > 1.
1660 ///
1661 /// Ex:
1662 /// %mul = arith.mulf %B_row, %A_row : vector<1x[4]xf32>
1663 /// %cast = vector.shape_cast %mul : vector<1x[4]xf32> to vector<[4]xf32>
1664 ///
1665 /// gets converted to:
1666 ///
1667 /// %B_row_sc = vector.shape_cast %B_row : vector<1x[4]xf32> to vector<[4]xf32>
1668 /// %A_row_sc = vector.shape_cast %A_row : vector<1x[4]xf32> to vector<[4]xf32>
1669 /// %mul = arith.mulf %B_row_sc, %A_row_sc : vector<[4]xf32>
1670 /// %cast_new = vector.shape_cast %mul : vector<[4]xf32> to vector<1x[4]xf32>
1671 /// %cast = vector.shape_cast %cast_new : vector<1x[4]xf32> to vector<[4]xf32>
1672 ///
1673 /// Patterns for folding shape_casts should instantly eliminate `%cast_new` and
1674 /// `%cast`.
1675 struct DropUnitDimFromElementwiseOps final
1676  : public OpTraitRewritePattern<OpTrait::Elementwise> {
1678  LogicalResult matchAndRewrite(Operation *op,
1679  PatternRewriter &rewriter) const override {
1680  if (op->getNumResults() != 1 || op->getNumRegions() != 0)
1681  return failure();
1682 
1683  auto resultVectorType = dyn_cast<VectorType>(op->getResult(0).getType());
1684  if (!resultVectorType)
1685  return failure();
1686 
1687  // Check the operand pre-conditions. For `Elementwise` ops all operands are
1688  // guaranteed to have identical shapes (with some exceptions such as
1689  // `arith.select`) and it suffices to only check one of them.
1690  auto sourceVectorType = dyn_cast<VectorType>(op->getOperand(0).getType());
1691  if (!sourceVectorType)
1692  return failure();
1693  if (sourceVectorType.getRank() < 2)
1694  return failure();
1695 
1696  SmallVector<Value> newOperands;
1697  auto loc = op->getLoc();
1698  for (auto operand : op->getOperands()) {
1699  auto opVectorType = cast<VectorType>(operand.getType());
1700  auto newVType = dropNonScalableUnitDimFromType(opVectorType);
1701  if (newVType == opVectorType)
1702  return rewriter.notifyMatchFailure(op, "No unit dimension to remove.");
1703 
1704  auto opSC = rewriter.create<vector::ShapeCastOp>(loc, newVType, operand);
1705  newOperands.push_back(opSC);
1706  }
1707 
1708  VectorType newResultVectorType =
1709  dropNonScalableUnitDimFromType(resultVectorType);
1710  // Create an updated elementwise Op without unit dim.
1711  Operation *elementwiseOp =
1712  rewriter.create(loc, op->getName().getIdentifier(), newOperands,
1713  newResultVectorType, op->getAttrs());
1714 
1715  // Restore the unit dim by applying vector.shape_cast to the result.
1716  rewriter.replaceOpWithNewOp<ShapeCastOp>(op, resultVectorType,
1717  elementwiseOp->getResult(0));
1718 
1719  return success();
1720  }
1721 };
1722 
1723 /// Pattern to eliminate redundant zero-constants added to reduction operands.
1724 /// It's enough for there to be one initial zero value, so we can eliminate the
1725 /// extra ones that feed into `vector.reduction <add>`. These get created by the
1726 /// `ChainedReduction` pattern.
1727 ///
1728 /// ```mlir
1729 /// %a = arith.addf %x, %zero
1730 /// %b = arith.addf %a, %y
1731 /// %c = vector.reduction <add> %b, %acc
1732 /// ==>
1733 /// %b = arith.addf %a, %y
1734 /// %c = vector.reduction <add> %b, %acc
1735 /// ```
1736 struct ReduceRedundantZero final : OpRewritePattern<vector::ReductionOp> {
1738 
1739  LogicalResult matchAndRewrite(vector::ReductionOp op,
1740  PatternRewriter &rewriter) const override {
1741  // TODO: Handle other reduction kinds and their identity values.
1742  if (op.getKind() != vector::CombiningKind::ADD)
1743  return failure();
1744 
1745  Type elemType = op.getSourceVectorType().getElementType();
1746  // The integer case should be handled by `arith.addi` folders, only check
1747  // for floats here.
1748  if (!isa<FloatType>(elemType))
1749  return failure();
1750 
1751  auto vAdd = op.getVector().getDefiningOp<arith::AddFOp>();
1752  if (!vAdd)
1753  return failure();
1754  auto addLhs = vAdd.getLhs().getDefiningOp<arith::AddFOp>();
1755  if (!addLhs)
1756  return failure();
1757 
1758  if (!matchPattern(addLhs.getRhs(), m_AnyZeroFloat()))
1759  return failure();
1760 
1761  auto newAdd = rewriter.create<arith::AddFOp>(vAdd.getLoc(), addLhs.getLhs(),
1762  vAdd.getRhs());
1763  rewriter.replaceOpWithNewOp<vector::ReductionOp>(op, op.getKind(), newAdd,
1764  op.getAcc());
1765  return success();
1766  }
1767 };
1768 
1769 /// Example:
1770 /// ```
1771 /// %a = vector.reduction <add> %x : vector<2xf32> into f32
1772 /// ```
1773 /// is transformed into:
1774 /// ```
1775 /// %y = vector.extract %x[0] : f32 from vector<2xf32>
1776 /// %z = vector.extract %x[1] : f32 from vector<2xf32>
1777 /// %a = arith.addf %y, %z : f32
1778 /// ```
1779 struct BreakDownVectorReduction final : OpRewritePattern<vector::ReductionOp> {
1780  BreakDownVectorReduction(MLIRContext *context,
1781  unsigned maxNumElementsToExtract,
1782  PatternBenefit benefit)
1783  : OpRewritePattern(context, benefit),
1784  maxNumElementsToExtract(maxNumElementsToExtract) {}
1785 
1786  LogicalResult matchAndRewrite(vector::ReductionOp op,
1787  PatternRewriter &rewriter) const override {
1788  VectorType type = op.getSourceVectorType();
1789  if (type.isScalable() || op.isMasked())
1790  return failure();
1791  assert(type.getRank() == 1 && "Expected a 1-d vector");
1792 
1793  int64_t numElems = type.getNumElements();
1794  if (numElems > maxNumElementsToExtract) {
1795  return rewriter.notifyMatchFailure(
1796  op, llvm::formatv("has too many vector elements ({0}) to break down "
1797  "(max allowed: {1})",
1798  numElems, maxNumElementsToExtract));
1799  }
1800 
1801  Location loc = op.getLoc();
1802  SmallVector<Value> extracted(numElems, nullptr);
1803  for (auto [idx, extractedElem] : llvm::enumerate(extracted))
1804  extractedElem = rewriter.create<vector::ExtractOp>(
1805  loc, op.getVector(), static_cast<int64_t>(idx));
1806 
1807  Value res = extracted.front();
1808  for (auto extractedElem : llvm::drop_begin(extracted))
1809  res = vector::makeArithReduction(rewriter, loc, op.getKind(), res,
1810  extractedElem, op.getFastmathAttr());
1811  if (Value acc = op.getAcc())
1812  res = vector::makeArithReduction(rewriter, loc, op.getKind(), res, acc,
1813  op.getFastmathAttr());
1814 
1815  rewriter.replaceOp(op, res);
1816  return success();
1817  }
1818 
1819 private:
1820  unsigned maxNumElementsToExtract = 0;
1821 };
1822 
1823 /// Fold `mulf(tr(broadcast(A)), broadcast(B))` into `vector.outerproduct(A,
1824 /// B)`.
1825 /// Example:
1826 /// %lhsBcast = vector.broadcast %lhs : vector<4xi32> to vector<4x4xi32>
1827 /// %lhsT = vector.transpose %lhsBcast, [1, 0] : vector<4x4xi32> to
1828 /// vector<4x4xi32> %rhsBcast = vector.broadcast %rhs : vector<4xi32> to
1829 /// vector<4x4xi32> %mul = arith.muli %lhsT, %rhsBcast : vector<4x4xi32>
1830 ///
1831 /// Becomes :
1832 ///
1833 /// %res = vector.outerproduct %lhs, %rhs : vector<4xi32>, vector<4xi32>
1834 ///
1835 /// Supports only 1D-to-2D broadcasts. The following cases are not supported.
1836 /// %ex1 = vector.broadcast %lhsCast : vector<1x4xf32> to vector<4x4xf32>
1837 /// %ex2 = vector.broadcast %lhsCast : f32 to vector<4x4xf32>
1838 /// %ex3 = vector.broadcast %lhsCast : vector<1x1xf32> to vector<4x4xf32>
1839 template <typename MulOpType>
1840 struct FoldArithToVectorOuterProduct : public OpRewritePattern<MulOpType> {
1842  // Returns whether a vector.broadcast matches requirements for an outerproduct
1843  // pattern. aka a 1D-to-2D broadcastOp without broadcasted unit dimension.
1844  bool isValidBroadcastSource(vector::BroadcastOp broadcastOp) const {
1845  // Fail if it is not a 1-to-2 dimension to broadcast to avoid generating
1846  // shape_casts/broadcasts which does not belong in this pattern.
1847  if (!broadcastOp.computeBroadcastedUnitDims().empty())
1848  return false;
1849  // Avoid broadcast like f32 or vector<f32> -> ResType
1850  auto srcType = dyn_cast<VectorType>(broadcastOp.getSourceType());
1851  return srcType && srcType.getRank() != 2;
1852  }
1853 
1854  LogicalResult matchAndRewrite(MulOpType mulOp,
1855  PatternRewriter &rewriter) const override {
1856  auto resType = llvm::cast<VectorType>(mulOp.getResult().getType());
1857  if (!resType)
1858  return failure();
1859  if (resType.getRank() != 2)
1860  return failure();
1861  /// If operandA can be written as tr(broadcast(A)) and operandB as
1862  /// broadcast(B) where broadcasts are 1D-to-2D, create and return
1863  /// vector.outerproduct(A, B). Returns failure() otherwise.
1864  auto matchOuterProduct =
1865  [&](Value operandA,
1866  Value operandB) -> FailureOr<vector::OuterProductOp> {
1867  auto transposedLhs = operandA.getDefiningOp<vector::TransposeOp>();
1868  if (!transposedLhs)
1869  return failure();
1870  // Fail unless this is a true 2-D matrix transpose.
1871  ArrayRef<int64_t> permutation = transposedLhs.getPermutation();
1872  if (permutation.size() != 2 || permutation[0] != 1 || permutation[1] != 0)
1873  return failure();
1874 
1875  auto broadcastedLhs =
1876  transposedLhs.getVector().getDefiningOp<vector::BroadcastOp>();
1877  if (!broadcastedLhs || !isValidBroadcastSource(broadcastedLhs))
1878  return failure();
1879 
1880  auto broadcastedRhs = operandB.getDefiningOp<vector::BroadcastOp>();
1881  if (!broadcastedRhs || !isValidBroadcastSource(broadcastedRhs))
1882  return failure();
1883 
1884  return rewriter.create<vector::OuterProductOp>(
1885  mulOp->getLoc(), resType, broadcastedLhs.getSource(),
1886  broadcastedRhs.getSource(), Value(), vector::CombiningKind::ADD);
1887  };
1888 
1889  Value lhs = mulOp->getOperand(0), rhs = mulOp->getOperand(1);
1890  auto maybeOuterP = matchOuterProduct(lhs, rhs);
1891  // Handle commutativity, the transposed op is the outerproduct LHS.
1892  if (failed(maybeOuterP))
1893  maybeOuterP = matchOuterProduct(rhs, lhs);
1894  if (failed(maybeOuterP))
1895  return failure();
1896  rewriter.replaceOp(mulOp, maybeOuterP->getResult());
1897  return success();
1898  }
1899 };
1900 
1901 } // namespace
1902 
1904  RewritePatternSet &patterns) {
1905  patterns.add<FoldArithExtIntoContractionOp<arith::ExtFOp>,
1906  FoldArithExtIntoContractionOp<arith::ExtSIOp>>(
1907  patterns.getContext());
1908 }
1909 
1911  RewritePatternSet &patterns, bool force32BitVectorIndices,
1912  PatternBenefit benefit) {
1913  patterns.add<VectorCreateMaskOpConversion,
1914  MaterializeTransferMask<vector::TransferReadOp>,
1915  MaterializeTransferMask<vector::TransferWriteOp>>(
1916  patterns.getContext(), force32BitVectorIndices, benefit);
1917  patterns.add<FoldI1Select>(patterns.getContext(), benefit);
1918 }
1919 
1921  PatternBenefit benefit) {
1922  patterns.add<ShapeCastOpFolder>(patterns.getContext(), benefit);
1923 }
1924 
1926  RewritePatternSet &patterns, PatternBenefit benefit) {
1927  patterns.add<DropUnitDimFromElementwiseOps, ShapeCastOpFolder>(
1928  patterns.getContext(), benefit);
1929 }
1930 
1932  RewritePatternSet &patterns, PatternBenefit benefit) {
1933  patterns.add<BubbleDownVectorBitCastForExtract,
1934  BubbleDownBitCastForStridedSliceExtract,
1935  BubbleUpBitCastForInsert, BubbleUpBitCastForStridedSliceInsert>(
1936  patterns.getContext(), benefit);
1937 }
1938 
1940  RewritePatternSet &patterns,
1941  std::function<bool(vector::BitCastOp)> controlFn, PatternBenefit benefit) {
1942  patterns.add<BreakDownVectorBitCast>(patterns.getContext(),
1943  std::move(controlFn), benefit);
1944 }
1945 
1947  RewritePatternSet &patterns,
1948  std::function<LogicalResult(vector::ContractionOp)> constraint,
1949  PatternBenefit benefit) {
1950  patterns.add<CanonicalizeContractMatmulToMMT>(patterns.getContext(), benefit,
1951  std::move(constraint));
1952 }
1953 
1955  RewritePatternSet &patterns, PatternBenefit benefit) {
1956  patterns.add<MultiReduceToContract, CombineContractBroadcast,
1957  CombineContractABTranspose, CombineContractResultTranspose,
1958  ReorderCastOpsOnBroadcast, ReorderElementwiseOpsOnTranspose>(
1959  patterns.getContext(), benefit);
1960 }
1961 
1964  RewritePatternSet &patterns, PatternBenefit benefit) {
1965  patterns.add<DropInnerMostUnitDimsTransferRead,
1966  DropInnerMostUnitDimsTransferWrite>(patterns.getContext(),
1967  benefit);
1968 }
1969 
1971  RewritePatternSet &patterns, PatternBenefit benefit) {
1972  patterns.add<ReorderCastOpsOnBroadcast, ReorderElementwiseOpsOnBroadcast>(
1973  patterns.getContext(), benefit);
1974 }
1975 
1977  RewritePatternSet &patterns, PatternBenefit benefit) {
1978  patterns.add<ChainedReduction>(patterns.getContext(), benefit);
1979  patterns.add<ReduceRedundantZero>(patterns.getContext(),
1980  PatternBenefit(benefit.getBenefit() + 1));
1981 }
1982 
1984  RewritePatternSet &patterns, unsigned maxNumElementsToExtract,
1985  PatternBenefit benefit) {
1986  patterns.add<BreakDownVectorReduction>(patterns.getContext(),
1987  maxNumElementsToExtract, benefit);
1988 }
1989 
1991  RewritePatternSet &patterns) {
1992  patterns.add<FoldArithToVectorOuterProduct<arith::MulFOp>,
1993  FoldArithToVectorOuterProduct<arith::MulIOp>>(
1994  patterns.getContext());
1995 }
1996 
1997 //===----------------------------------------------------------------------===//
1998 // TableGen'd enum attribute definitions
1999 //===----------------------------------------------------------------------===//
2000 
2001 #include "mlir/Dialect/Vector/Transforms/VectorTransformsEnums.cpp.inc"
static uint64_t zext(uint32_t arg)
static Value broadcast(Location loc, Value toBroadcast, unsigned numElements, LLVMTypeConverter &typeConverter, ConversionPatternRewriter &rewriter)
Broadcasts the value to vector with numElements number of elements.
static uint64_t getFirstIntValue(ValueRange values)
Returns the integer value from the first valid input element, assuming Value inputs are defined by a ...
static std::optional< int64_t > getResultIndex(AffineMap map, int64_t index)
static SmallVector< IntType > extractVector(ArrayAttr arrayAttr)
Base type for affine expression.
Definition: AffineExpr.h:68
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition: AffineMap.h:46
unsigned getDimPosition(unsigned idx) const
Extracts the position of the dimensional expression at the given result, when the caller knows it is ...
Definition: AffineMap.cpp:415
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
unsigned getNumResults() const
Definition: AffineMap.cpp:402
static AffineMap getPermutationMap(ArrayRef< unsigned > permutation, MLIRContext *context)
Returns an AffineMap representing a permutation.
Definition: AffineMap.cpp:264
AffineMap compose(AffineMap map) const
Returns the AffineMap resulting from composing this with map.
Definition: AffineMap.cpp:556
static SmallVector< AffineMap, 4 > inferFromExprList(ArrayRef< ArrayRef< AffineExpr >> exprsList, MLIRContext *context)
Returns a vector of AffineMaps; each with as many results as exprs.size(), as many dims as the larges...
Definition: AffineMap.cpp:312
Attributes are known-constant values of operations.
Definition: Attributes.h:25
IntegerAttr getIndexAttr(int64_t value)
Definition: Builders.cpp:128
AffineMap getMultiDimIdentityMap(unsigned rank)
Definition: Builders.cpp:398
IntegerType getI64Type()
Definition: Builders.cpp:89
IntegerType getI32Type()
Definition: Builders.cpp:87
IntegerType getIntegerType(unsigned width)
Definition: Builders.cpp:91
TypedAttr getZeroAttr(Type type)
Definition: Builders.cpp:335
AffineExpr getAffineDimExpr(unsigned position)
Definition: Builders.cpp:375
MLIRContext * getContext() const
Definition: Builders.h:55
DenseIntElementsAttr getI32VectorAttr(ArrayRef< int32_t > values)
Definition: Builders.cpp:142
DenseIntElementsAttr getI64VectorAttr(ArrayRef< int64_t > values)
Definition: Builders.cpp:148
IntegerType getI1Type()
Definition: Builders.cpp:77
ArrayAttr getArrayAttr(ArrayRef< Attribute > value)
Definition: Builders.cpp:277
ArrayAttr getI64ArrayAttr(ArrayRef< int64_t > values)
Definition: Builders.cpp:292
ArrayAttr getBoolArrayAttr(ArrayRef< bool > values)
Definition: Builders.cpp:281
ArrayAttr getAffineMapArrayAttr(ArrayRef< AffineMap > values)
Definition: Builders.cpp:329
An attribute that represents a reference to a dense integer vector or tensor object.
static DenseIntElementsAttr get(const ShapedType &type, Arg &&arg)
Get an instance of a DenseIntElementsAttr with the given arguments.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:63
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Definition: Builders.h:523
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:468
OpTraitRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting again...
Definition: PatternMatch.h:384
OpTraitRewritePattern(MLIRContext *context, PatternBenefit benefit=1)
Definition: PatternMatch.h:386
StringAttr getIdentifier() const
Return the name of this operation as a StringAttr.
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
Value getOperand(unsigned idx)
Definition: Operation.h:345
Operation * clone(IRMapping &mapper, CloneOptions options=CloneOptions::all())
Create a deep copy of this operation, remapping any operands that use values outside of the operation...
Definition: Operation.cpp:717
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition: Operation.h:402
MLIRContext * getContext()
Return the context this operation is associated with.
Definition: Operation.h:216
unsigned getNumRegions()
Returns the number of regions held by this operation.
Definition: Operation.h:669
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:223
unsigned getNumOperands()
Definition: Operation.h:341
ArrayRef< NamedAttribute > getAttrs()
Return all of the attributes on this operation.
Definition: Operation.h:507
OperationName getName()
The name of an operation is the key identifier for it.
Definition: Operation.h:119
result_type_range getResultTypes()
Definition: Operation.h:423
operand_range getOperands()
Returns an iterator on the underlying Value's.
Definition: Operation.h:373
result_range getResults()
Definition: Operation.h:410
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:399
This class represents the benefit of a pattern match in a unitless scheme that ranges from 0 (very li...
Definition: PatternMatch.h:34
unsigned short getBenefit() const
If the corresponding pattern can match, return its benefit. If the.
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
Definition: PatternMatch.h:785
type_range getType() const
Definition: ValueRange.cpp:39
MLIRContext * getContext() const
Definition: PatternMatch.h:823
RewritePatternSet & add(ConstructorArg &&arg, ConstructorArgs &&...args)
Add an instance of each of the pattern types 'Ts' to the pattern list with the given arguments.
Definition: PatternMatch.h:847
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the listener that the IR failed to be rewritten because of a match failure,...
Definition: PatternMatch.h:718
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
Definition: PatternMatch.h:630
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
Definition: PatternMatch.h:536
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
bool isIntOrFloat() const
Return true if this is an integer (of any signedness) or a float type.
Definition: Types.cpp:120
bool isSignlessIntOrIndexOrFloat() const
Return true if this is a signless integer, index, or float type.
Definition: Types.cpp:108
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
MLIRContext * getContext() const
Utility to get the associated MLIRContext that this value is defined in.
Definition: Value.h:132
Type getType() const
Return the type of this value.
Definition: Value.h:129
Location getLoc() const
Return the location of this value.
Definition: Value.cpp:26
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:20
bool hasElementwiseMappableTraits(Operation *op)
Together, Elementwise, Scalarizable, Vectorizable, and Tensorizable provide an easy way for scalar op...
Definition: Operation.cpp:1393
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:285
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given memref value.
Definition: MemRefOps.cpp:77
Value makeArithReduction(OpBuilder &b, Location loc, CombiningKind kind, Value v1, Value acc, arith::FastMathFlagsAttr fastmath=nullptr, Value mask=nullptr)
Returns the result value of reducing two scalar/vector values with the corresponding arith operation.
void populateDropUnitDimWithShapeCastPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Collect a set of patterns that use vector.shape_cast to help fold unit dims.
bool isReductionIterator(Attribute attr)
Returns true if attr has "reduction" iterator type semantics.
Definition: VectorOps.h:144
void populateBreakDownVectorBitCastOpPatterns(RewritePatternSet &patterns, std::function< bool(BitCastOp)> controlFn=nullptr, PatternBenefit benefit=1)
Populate patterns with a pattern to break down 1-D vector.bitcast ops based on the destination vector...
void populateElementwiseToVectorOpsPatterns(RewritePatternSet &patterns)
Collect a set of patterns that fold elementwise op on vectors to the vector dialect.
AffineMap getTransferMinorIdentityMap(ShapedType shapedType, VectorType vectorType)
Build the default minor identity map suitable for a vector transfer.
Definition: VectorOps.cpp:154
void populateShapeCastFoldingPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Collect a set of vector.shape_cast folding patterns.
void populateChainedVectorReductionFoldingPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Patterns that fold chained vector reductions.
void populateBubbleVectorBitCastOpPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Collect a set of patterns that bubble up/down bitcast ops.
bool isParallelIterator(Attribute attr)
Returns true if attr has "parallel" iterator type semantics.
Definition: VectorOps.h:139
void populateFoldArithExtensionPatterns(RewritePatternSet &patterns)
Collect a set of patterns that fold arithmetic extension on floating point into vector contract for t...
void populateVectorContractCanonicalizeMatmulToMMT(RewritePatternSet &patterns, std::function< LogicalResult(vector::ContractionOp)> constraint=[](vector::ContractionOp) { return success();}, PatternBenefit=1)
Canonicalization of a vector.contraction a, b, c with row-major matmul semantics to a contraction wit...
void populateVectorMaskMaterializationPatterns(RewritePatternSet &patterns, bool force32BitVectorIndices, PatternBenefit benefit=1)
These patterns materialize masks for various vector ops such as transfers.
void populateSinkVectorBroadcastPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Patterns that remove redundant vector broadcasts.
void populateVectorTransferCollapseInnerMostContiguousDimsPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Collect a set of patterns to reduce the rank of the operands of vector transfer ops to operate on the...
void populateVectorReductionToContractPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Collect patterns to convert reduction op to vector.contract and fold transpose/broadcast ops into the...
Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim)
Helper function that creates a memref::DimOp or tensor::DimOp depending on the type of source.
Definition: VectorUtils.cpp:41
void populateBreakDownVectorReductionPatterns(RewritePatternSet &patterns, unsigned maxNumElementsToExtract=2, PatternBenefit benefit=1)
Patterns to break down vector reductions into a series of arith reductions over vector elements.
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
Definition: Matchers.h:401
void bindDims(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to DimExpr at positions: [0 .
Definition: AffineExpr.h:348
LogicalResult getStridesAndOffset(MemRefType t, SmallVectorImpl< int64_t > &strides, int64_t &offset)
Returns the strides of the MemRef if the layout map is in strided form.
AffineMap inversePermutation(AffineMap map)
Returns a map of codomain to domain dimensions such that the first codomain dimension for a particula...
Definition: AffineMap.cpp:768
Value getValueOrCreateCastToIndexLike(OpBuilder &b, Location loc, Type targetType, Value value)
Create a cast from an index-like value (index or integer) to another index-like value.
Definition: Utils.cpp:120
Type getElementTypeOrSelf(Type type)
Return the element type or return the type itself.
detail::constant_float_predicate_matcher m_AnyZeroFloat()
Matches a constant scalar / vector splat / tensor splat float (both positive and negative) zero.
Definition: Matchers.h:340
AffineMap compressDims(AffineMap map, const llvm::SmallBitVector &unusedDims)
Drop the dims that are listed in unusedDims.
Definition: AffineMap.cpp:694
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
llvm::SmallBitVector getUnusedDimsBitVector(ArrayRef< AffineMap > maps)
Definition: AffineMap.cpp:905
detail::constant_op_matcher m_Constant()
Matches a constant foldable operation.
Definition: Matchers.h:310
OpInterfaceRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting a...
Definition: PatternMatch.h:373
OpRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting against an...
Definition: PatternMatch.h:358
OpRewritePattern(MLIRContext *context, PatternBenefit benefit=1, ArrayRef< StringRef > generatedNames={})
Patterns must specify the root operation name they match against, and can also specify the benefit of...
Definition: PatternMatch.h:362