MLIR  20.0.0git
Transforms.h
Go to the documentation of this file.
1 //===- Transforms.h - Linalg transformations as patterns --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef MLIR_DIALECT_LINALG_TRANSFORMS_TRANSFORMS_H
10 #define MLIR_DIALECT_LINALG_TRANSFORMS_TRANSFORMS_H
11 
12 #include <utility>
13 
23 #include "mlir/IR/PatternMatch.h"
26 #include "llvm/ADT/SmallBitVector.h"
27 #include "llvm/ADT/SmallSet.h"
28 
29 namespace mlir {
30 namespace bufferization {
31 class AllocTensorOp;
32 class OneShotAnalysisState;
33 } // namespace bufferization
34 
35 namespace linalg {
36 
37 class LinalgOp;
38 
39 //===----------------------------------------------------------------------===//
40 // Utils.
41 //===----------------------------------------------------------------------===//
42 
43 /// Return vector::CombiningKind for the given op.
44 std::optional<vector::CombiningKind> getCombinerOpKind(Operation *combinerOp);
45 
46 //===----------------------------------------------------------------------===//
47 // Bufferization-related transforms.
48 //===----------------------------------------------------------------------===//
49 
51  enum class AllocOp { MemrefAlloc = 0, MemrefAlloca = 1 };
53 
54  enum class MemcpyOp {
56  MemrefCopy = 1,
57  LinalgCopy = 2
58  };
60 
61  /// If set to "true", only the destination tensor operands are bufferized to
62  /// a new allocation (and wrapped in "bufferization.to_tensor"), but not the
63  /// targeted op itself.
65 
66  /// If set to "true", a `memref.dealloc` operation will be emitted for each
67  /// allocated buffer. Otherwise, the memory is leaked, which is useful if
68  /// the buffer deallocation pipeline should be run after bufferization is
69  /// done.
70  bool emitDealloc = false;
71 };
72 
73 /// Materialize a buffer allocation for the given tensor.pad op and lower the
74 /// op to linalg.fill/linalg.generic + bufferization.materialize_in_destination.
75 /// E.g.:
76 ///
77 /// %0 = tensor.pad low[%l] high[%h] %t ...
78 ///
79 /// is lowered to:
80 ///
81 /// %alloc = memref.alloc
82 /// linalg.fill ... outs(%alloc)
83 /// %subview = memref.subview %alloc [%l] [...] [1]
84 /// bufferization.materialize_in_destination %t in %subview
85 /// %0 = bufferization.to_tensor %alloc restrict writable
86 ///
87 /// In addition to rewriting the IR as shown above, this function returns the
88 /// newly allocated buffer. The `insertionPoint` parameter can be used to
89 /// specify a custom insertion point for the buffer allocation.
92  tensor::PadOp padOp, Attribute memorySpace = {},
93  Operation *insertionPoint = nullptr);
94 
95 /// Materialize a buffer allocation for the given vector.mask op and bufferize
96 /// the op, including its region. E.g.:
97 ///
98 /// %0 = vector.mask {
99 /// vector.transfer_write %v, %t : vector<16xf32>, tensor<?xf32>
100 /// } : vector<16xi1> -> tensor<?xf32>
101 ///
102 /// is lowered to:
103 ///
104 /// %alloc = memref.alloc
105 /// bufferization.materialize_in_destination %t in %subview
106 /// vector.mask {
107 /// vector.transfer_write %arg0, %alloc : vector<16xf32>, memref<?xf32>
108 /// } : vector<16xi1>
109 /// %0 = bufferization.to_tensor %alloc restrict writable
110 ///
111 /// In addition to rewriting the IR as shown above, this function returns the
112 /// newly allocated buffer. The `insertionPoint` parameter can be used to
113 /// specify a custom insertion point for the buffer allocation.
115  const BufferizeToAllocationOptions &options,
116  vector::MaskOp maskOp, Attribute memorySpace = {},
117  Operation *insertionPoint = nullptr);
118 
119 /// Materialize a buffer allocation for the given bufferization.alloc_tensor op
120 /// and lower the op to memref.alloc + memref.tensor_store.
121 ///
122 /// In addition to rewriting the IR, this function returns the newly allocated
123 /// buffer. The `insertionPoint` parameter can be used to specify a custom
124 /// insertion point for the buffer allocation.
125 Value bufferizeToAllocation(RewriterBase &rewriter,
126  const BufferizeToAllocationOptions &options,
127  bufferization::AllocTensorOp allocTensorOp,
128  Attribute memorySpace = {},
129  Operation *insertionPoint = nullptr);
130 
131 /// Bufferize the given op with tensor semantics and materialize the result in
132 /// a newly allocated buffer.
133 ///
134 /// Only bufferizable ops that bufferize to a memory write or have an
135 /// aliasing OpOperand (and do not themselves bufferize to an allocation) are
136 /// supported. They are bufferized using their BufferizableOpInterface
137 /// implementation.
138 ///
139 /// Selected ops that bufferize to an allocation (or need special handling) are
140 /// also supported:
141 /// - tensor.pad
142 /// - vector.mask
143 ///
144 /// This function returns the newly allocated buffer. The `insertionPoint`
145 /// parameter can be used to specify a custom insertion point for the buffer
146 /// allocation.
147 Value bufferizeToAllocation(RewriterBase &rewriter,
148  const BufferizeToAllocationOptions &options,
149  Operation *op, Attribute memorySpace = {},
150  Operation *insertionPoint = nullptr);
151 
152 /// Try to eliminate tensor::EmptyOps inside `op` that are anchored on a
153 /// LinalgOp. This transforms looks for LinalgOps that have an unused output
154 /// operand and an input operand that is rooted in a tensor::EmptyOp. The
155 /// tensor::EmptyOp uses are replaced with the output operand and the two
156 /// operands of the LinalgOp are swapped.
157 ///
158 /// Example:
159 /// %0 = tensor.empty()
160 /// %1 = linalg.matmul ins(...) outs(%0)
161 /// %2 = linalg.generic ins(%1) outs(%dest) {
162 /// ^bb0(%in: f32, %out: f32):
163 /// // out not used
164 /// }
165 ///
166 /// The IR is transformed as follows:
167 /// %0 = tensor.empty()
168 /// %1 = linalg.matmul ins(...) outs(%dest)
169 /// %2 = linalg.generic ins(%0) outs(%1) {
170 /// ^bb0(%in: f32, %out: f32):
171 /// // Use %out instead of %in
172 /// }
173 ///
174 /// The "ins" operand has no uses inside the body of the LinalgOp and can be
175 /// folded away with existing cleanup patterns. Afterwards, the tensor::EmptyOp
176 /// can also fold away.
178  RewriterBase &rewriter, Operation *op,
179  bufferization::OneShotAnalysisState &state);
180 
181 //===----------------------------------------------------------------------===//
182 // Structs that configure the behavior of various transformations.
183 //===----------------------------------------------------------------------===//
184 
186  std::function<SmallVector<Value, 4>(OpBuilder &, Operation *)>;
187 
189  /// Computation function that returns the tile sizes for each operation.
190  /// Delayed construction of constant tile sizes should occur to interoperate
191  /// with folding.
193 
196  tileSizeComputationFunction = std::move(fun);
197  return *this;
198  }
199  /// Set the `tileSizeComputationFunction` to return the values `ts`. The
200  /// values must not fold away when tiling. Otherwise, use a more robust
201  /// `tileSizeComputationFunction`.
203  tileSizeComputationFunction = [=](OpBuilder &, Operation *) { return ts; };
204  return *this;
205  }
206  /// Convenience function to set the `tileSizeComputationFunction` to a
207  /// function that computes tile sizes at the point they are needed. Allows
208  /// proper interaction with folding.
210 
211  /// Tile all dynamic dimensions by 1. I.e., scalarize those dimensions.
212  /// Note: `scalarizeDynamicDims` and `setTileSizes` cannot be used together.
214 
215  /// The interchange vector to reorder the tiled loops.
217 
219  interchangeVector.assign(interchange.begin(), interchange.end());
220  return *this;
221  }
222 
223  /// The type of tile loops to generate.
225 
227  loopType = lt;
228  return *this;
229  }
230 
231  /// When specified, specifies distribution of generated tile loops to
232  /// processors.
233  std::optional<LinalgLoopDistributionOptions> distribution;
234 
237  distribution = std::move(distributionOptions);
238  return *this;
239  }
240 
241  /// Specification markers of how to distribute the `linalg.tiled_loop`.
243 
245  distributionTypes.assign(types.begin(), types.end());
246  return *this;
247  }
248 
249  /// Peel the specified loops.
251 
253  peeledLoops.clear();
254  peeledLoops.append(loops.begin(), loops.end());
255  return *this;
256  }
257 };
258 
260  /// Tile sizes used to tile the root operation.
263  tileSizes.assign(ts.begin(), ts.end());
264  return *this;
265  }
266  /// Tile interchange used to permute the tile loops.
268  /// When specified, specifies distribution of generated tile loops to
269  /// processors.
270  std::optional<LinalgLoopDistributionOptions> tileDistribution;
273  tileDistribution = std::move(distributionOptions);
274  return *this;
275  }
276 };
277 
279  /// A padding value for every operand.
282  paddingValues.assign(pv.begin(), pv.end());
283  return *this;
284  }
285  /// A list of iterator dimensions to pad.
288  paddingDimensions.assign(pd.begin(), pd.end());
289  return *this;
290  }
291  /// A list of multiples to which each padding dimension should be padded to.
292  std::optional<SmallVector<int64_t>> padToMultipleOf;
294  padToMultipleOf.emplace(m.begin(), m.end());
295  return *this;
296  }
297  /// A flag for every operand to mark the PadOp as nofold which enables
298  /// packing for statically shaped operands.
301  packPaddings.assign(pp.begin(), pp.end());
302  return *this;
303  }
304  /// A number of loops to hoist the PadOp out for every operand.
307  hoistPaddings.assign(hp.begin(), hp.end());
308  return *this;
309  }
310  /// A permutation vector for every operand used to transpose the packed
311  /// PadOp results.
315  transposePaddings.assign(tp.begin(), tp.end());
316  return *this;
317  }
318  enum class CopyBackOp : int8_t {
319  None = 0,
321  LinalgCopy = 2
322  };
323  /// The op to be used for copying the padded result to the original
324  /// destination tensor.
327  copyBackOp = op;
328  return *this;
329  }
330 };
331 
332 /// Callback function type used to perform the allocation for the promoted
333 /// `subView`. In `boundingSubViewsize` a best attempt is made to find the
334 /// smallest constant value for the size of the buffer needed for each
335 /// dimension. If that is not possible, contains the dynamic size of the
336 /// subview. The call back should return the buffer to use.
337 using AllocBufferCallbackFn = std::function<std::optional<Value>(
338  OpBuilder &b, memref::SubViewOp subView,
339  ArrayRef<Value> boundingSubViewSize, DataLayout &layout)>;
340 
341 /// Callback function type used to deallocate the buffers used to hold the
342 /// promoted subview.
344  std::function<LogicalResult(OpBuilder &b, Value buffer)>;
345 
346 /// Callback function type used to insert copy from original subview to
347 /// subview of the promoted region for the read operands/subview of promoted
348 /// region to original subview for the results. The copy has to happen from
349 /// `src` to `dst`.
351  std::function<LogicalResult(OpBuilder &b, Value src, Value dst)>;
352 
354  /// Indices of subViews to promote. If `std::nullopt`, try to promote all
355  /// operands.
356  std::optional<DenseSet<unsigned>> operandsToPromote;
359  operandsToPromote->insert(operands.begin(), operands.end());
360  return *this;
361  }
362  /// If ith element of `useFullTiles` is true the full view should be used
363  /// for the promoted buffer of the ith operand in `operandsToPromote`.
364  /// Otherwise the partial view will be used. The decision is defaulted to
365  /// `useFullTileBuffersDefault` when `useFullTileBuffers` is std::nullopt and
366  /// for operands missing from `useFullTileBuffers`.
367  std::optional<llvm::SmallBitVector> useFullTileBuffers;
369  unsigned size = useFullTiles.size();
370  llvm::SmallBitVector tmp(size, false);
371  for (unsigned i = 0; i < size; ++i)
372  tmp[i] = useFullTiles[i];
373  useFullTileBuffers = tmp;
374  return *this;
375  }
376  /// If true all operands unspecified by `useFullTileBuffers` will use the
377  /// full view, otherwise the partial view.
381  return *this;
382  }
383  /// Alignment of promoted buffer. If `std::nullopt` do not specify alignment.
384  std::optional<unsigned> alignment;
386  alignment = align;
387  return *this;
388  }
389  /// Memory space of promoted buffer. If `std::nullopt` do not specify memory
390  /// space.
391  std::optional<Attribute> memorySpace;
393  memorySpace = memorySpc;
394  return *this;
395  }
396  /// Use alloca with the default allocation scheme.
397  bool useAlloca = false;
399  useAlloca = use;
400  return *this;
401  }
402  /// Callback function to do the allocation of the promoted buffer. If
403  /// std::nullopt, then the default allocation scheme of allocating a
404  /// memref<?xi8> buffer followed by a view operation is used.
405  std::optional<AllocBufferCallbackFn> allocationFn;
406  std::optional<DeallocBufferCallbackFn> deallocationFn;
409  DeallocBufferCallbackFn const &deallocFn) {
410  allocationFn = allocFn;
411  deallocationFn = deallocFn;
412  return *this;
413  }
414  /// Callback function to do the copy of data to and from the promoted
415  /// subview. If std::nullopt then a memref.copy is used.
416  std::optional<CopyCallbackFn> copyInFn;
417  std::optional<CopyCallbackFn> copyOutFn;
419  CopyCallbackFn const &copyOut) {
420  copyInFn = copyIn;
421  copyOutFn = copyOut;
422  return *this;
423  }
424 };
425 
426 /// Split Reduction options.
428  // Ratio used to split the reduction dimension. If the ratio is <= 1,
429  // nothing will be done.
430  int64_t ratio = 0;
431  // Index where the extra dimension is added to the intermediate tensor
432  // shape.
433  unsigned index = 0;
434  // If the inner dimension after splitting is parallel or reduction.
435  bool innerParallel = false;
436 };
437 
438 /// Function signature to control reduction splitting. This returns
439 /// `SplitReductionOptions`.
440 // TODO: don't use unsigned unless doing bit manipulation.
442  std::function<SplitReductionOptions(LinalgOp op)>;
443 
444 //===----------------------------------------------------------------------===//
445 // Preconditions that ensure the corresponding transformation succeeds and can
446 // be applied as a rewrite pattern.
447 //===----------------------------------------------------------------------===//
448 
449 /// Return true if two `linalg.generic` operations with producer/consumer
450 /// relationship through `fusedOperand` can be fused using elementwise op
451 /// fusion.
452 bool areElementwiseOpsFusable(OpOperand *fusedOperand);
453 
454 /// Promote memref.subviews feeding linalg-on-buffers operations.
455 LogicalResult promoteSubviewsPrecondition(Operation *op,
457 
458 /// Return success if the operation can be vectorized.
459 LogicalResult vectorizeOpPrecondition(Operation *op,
460  ArrayRef<int64_t> inputVectorSizes = {},
461  ArrayRef<bool> inputScalableVecDims = {},
462  bool vectorizeNDExtract = false,
463  bool flatten1DDepthwiseConv = false);
464 
465 //===----------------------------------------------------------------------===//
466 // Transformations exposed as functional-style API calls.
467 //===----------------------------------------------------------------------===//
468 
470 
471 /// Transformation to drop unit-extent dimensions from `linalg.generic`
472 /// operations.
475 
478 
479  using ControlFnTy = std::function<SmallVector<unsigned>(Operation *)>;
481  if (auto genericOp = dyn_cast_or_null<GenericOp>(op)) {
482  return llvm::to_vector(llvm::seq<unsigned>(0, genericOp.getNumLoops()));
483  }
484  if (auto padOp = dyn_cast_or_null<tensor::PadOp>(op)) {
485  return llvm::to_vector(
486  llvm::seq<unsigned>(0, padOp.getSourceType().getRank()));
487  }
488  return SmallVector<unsigned>{};
489  };
490 };
491 LogicalResult dropUnitDims(RewriterBase &rewriter, GenericOp genericOp,
492  const ControlDropUnitDims &options);
493 
494 /// Fuse two `linalg.generic` operations that have a producer-consumer
495 /// relationship captured through `fusedOperand`. The method expects
496 /// that `areElementwiseOpsFusable` returns true for the given `fusedOperand`.
500  static llvm::SmallDenseSet<int>
501  getPreservedProducerResults(GenericOp producer, GenericOp consumer);
502 };
503 FailureOr<ElementwiseOpFusionResult>
504 fuseElementwiseOps(RewriterBase &rewriter, OpOperand *fusedOperand);
505 
506 /// Try to peel and canonicalize loop `op` and return the new result.
507 /// Also applies affine_min/max bounds simplification on the fly where relevant.
508 // TODO: Add support for scf.parallel and affine.for loops.
510 
511 /// Peel 'loops' and applies affine_min/max bounds simplification on the fly
512 /// where relevant.
513 void peelLoops(RewriterBase &rewriter, ArrayRef<scf::ForOp> loops);
514 
515 /// Pad the iterator dimensions `paddingDimensions` of all `opToPad` operands
516 /// to a static bounding box. The original `opToPad` is cloned and operates on
517 /// the padded tensors.
518 ///
519 /// * "options.padToMultipleOf" indicates that each padding dimension should be
520 /// padded to the specified multiple.
521 /// * Use "options.paddingValues" and "options.packPaddings" to set padding
522 /// value and nofold attribute of the created tensor::PadOps, respectively.
523 /// * The unpadded results (extracted slice of the cloned operation) are
524 /// returned via `replacements`.
525 /// * The tensor::PadOps are returned via `padOps`.
526 /// * "options.copyBackOp" specifies the op type for copying back the unpadded
527 /// result to the original destination tensor.
528 LogicalResult rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad,
530  LinalgOp &paddedOp,
531  SmallVector<Value> &replacements,
533 
534 namespace detail {
535 
536 /// Helper struct to hold the results of building a packing loop nest.
540  GenericOp maybeTransposeOp;
541  tensor::PadOp hoistedPadOp;
542 };
543 
544 /// Build the packing loop nest required to hoist `opToHoist` above
545 /// `outermostEnclosingForOp`.
546 /// The loop nest is built just before `outermostEnclosingForOp`.
547 FailureOr<PackingResult>
548 buildPackingLoopNest(RewriterBase &rewriter, tensor::PadOp opToHoist,
549  scf::ForOp outermostEnclosingForOp,
550  ArrayRef<int64_t> transposeVector);
551 
552 } // namespace detail
553 
554 /// Mechanically hoist padding operations on tensors by `numLoops` into a new,
555 /// generally larger tensor. This achieves packing of multiple padding ops into
556 /// a larger tensor. On success, `opToHoist` is replaced by the cloned version
557 /// in the packing loop so the caller can continue reasoning about the padding
558 /// operation. If `transposeVector` is non-empty, hoist padding introduces a
559 /// GenericOp to transpose the padded tensor before inserting it into the packed
560 /// tensor. A `transposeVector` can change the storage order of the padded
561 /// tensor but does not change the order of the pack or compute loops.
562 ///
563 /// TODO: In the future, we should consider rewriting as a tensor.pack after
564 /// hoisting since this abstraction is now available.
565 ///
566 /// Example in pseudo-mlir:
567 /// =======================
568 ///
569 /// If hoistPaddingOnTensors is called with `nLoops` = 2 on the following IR.
570 /// ```
571 /// scf.for (%i, %j, %k)
572 /// %st0 = tensor.extract_slice f(%i, %k) : ... to tensor<?x?xf32>
573 /// %0 = tensor.pad %st0 low[0, 0] high[...] {
574 /// ^bb0( ... ):
575 /// linalg.yield %pad
576 /// } : tensor<?x?xf32> to tensor<4x8xf32>
577 /// compute(%0)
578 /// ```
579 ///
580 /// IR resembling the following is produced:
581 ///
582 /// ```
583 /// scf.for (%i) {
584 /// %packed_init = tensor.empty range(%j) : tensor<?x4x8xf32>
585 /// %packed = scf.for (%k) iter_args(%p : %packed_init) {
586 /// %st0 = tensor.extract_slice f(%i, %k) : ... to tensor<?x?xf32>
587 /// %0 = tensor.pad %st0 low[0, 0] high[...] {
588 /// ^bb0( ... ):
589 /// linalg.yield %pad
590 /// } : tensor<?x?xf32> to tensor<4x8xf32>
591 /// %1 = tensor.insert_slice %0 ...
592 /// : tensor<4x8xf32> to tensor<?x4x8xf32>
593 /// scf.yield %1: tensor<?x4x8xf32>
594 /// } -> tensor<?x4x8xf32>
595 /// scf.for (%j, %k) {
596 /// %st0 = tensor.extract_slice %packed [%k, 0, 0][1, 4, 8][1, 1, 1] :
597 /// tensor<?x4x8xf32> to tensor<4x8xf32>
598 /// compute(%st0)
599 /// }
600 /// }
601 /// ```
602 FailureOr<Value>
603 hoistPaddingOnTensors(RewriterBase &rewriter, tensor::PadOp opToHoist,
604  int64_t numLoops, ArrayRef<int64_t> transposeVector,
605  tensor::PadOp &hoistedOp,
606  SmallVectorImpl<GenericOp> &transposeOps);
607 /// Calls into `hoistPaddingOnTensors` with a local IRRewriter.
608 FailureOr<Value>
609 hoistPaddingOnTensors(tensor::PadOp opToHoist, int64_t numLoops,
610  ArrayRef<int64_t> transposeVector,
611  tensor::PadOp &hoistedOp,
612  SmallVectorImpl<GenericOp> &transposeOps);
613 
614 /// Apply padding and hoisting to `linalgOp` according to the configuration
615 /// specified in `options`.
616 FailureOr<LinalgOp> padAndHoistLinalgOp(RewriterBase &rewriter,
617  LinalgOp linalgOp,
619 
620 /// Split the given `op` into two parts along the given iteration space
621 /// `dimension` at the specified `splitPoint`, and return the two parts.
622 /// If the second part is statically known to be empty, do not create it
623 /// and return nullptr instead. Error state is signalled by returning
624 /// a pair of nullptrs.
625 ///
626 /// For example, the following op:
627 ///
628 /// linalg.matmul ins(%0, %1 : tensor<128x32xf32>, tensor<32x64xf32>)
629 /// outs(%2 : tensor<128x64xf32>)
630 ///
631 /// split along the first dimension at position 42 will result in:
632 ///
633 /// %3 = tensor.extract_slice %0[0, 0][42, 32][1, 1]
634 /// %4 = tensor.extract_slice %2[0, 0][42, 64][1, 1]
635 /// %5 = linalg.matmul ins(%3, %1 : tensor<42x32xf32>, tensor<32x64xf32>)
636 /// outs(%5 : tensor<42x64xf32>)
637 /// %6 = tensor.insert_slice %5 into %2[0, 0][42, 64][1, 1]
638 ///
639 /// %7 = tensor.extract_slice %0[42, 0][86, 32][1, 1]
640 /// %8 = tensor.extract_slice %6[42, 0][86, 64][1, 1]
641 /// %9 = linalg.matmul ins(%7, %1 : tensor<86x32xf32>, tensor<32x64xf32>)
642 /// outs(%8 : tensor<86x64xf32>)
643 /// tensor.insert_slice %5 into %6[42, 0][86, 64][1, 1]
644 ///
645 /// Note that there is no simplification other than constant propagation applied
646 /// to slice extraction and insertion.
647 std::pair<TilingInterface, TilingInterface> splitOp(RewriterBase &rewriter,
648  TilingInterface op,
649  unsigned dimension,
650  OpFoldResult splitPoint);
651 
652 /// Perform standalone tiling of a single LinalgOp by `tileSizes`.
653 /// and permute the loop nest according to `interchangeVector`
654 /// The permutation is expressed as a list of integers that specify
655 /// the new ordering of the loop nest. The length of `interchangeVector`
656 /// must be equal to the length of `tileSizes`.
657 /// An empty vector is interpreted as the identity permutation and the
658 /// transformation returns early.
659 ///
660 /// Return a struct containing the tiled loops in the specified order
661 /// and the cloned op if successful, std::nullopt otherwise.
662 ///
663 /// E.g. the permutation `(i,j,k) -> (j,k,i)` is expressed by
664 /// `interchangeVector = [1,2,0]`. All values in `interchangeVector` must be
665 /// integers, in the range 0..`tileSizes.size()` without duplications
666 /// (i.e. `[1,1,2]` is an invalid permutation).
668  LinalgOp op;
671 };
672 FailureOr<TiledLinalgOp> tileLinalgOp(RewriterBase &b, LinalgOp op,
674 
675 /// Interchange the `iterator_types` and `iterator_maps` dimensions and adapts
676 /// the index accesses of `op`. This is an in-place transformation controlled
677 /// by `interchangeVector`. An empty vector is interpreted as the identity
678 /// permutation and the transformation returns early.
679 ///
680 /// E.g. the permutation `(i,j,k) -> (j,k,i)` is expressed with
681 /// `interchangeVector = [1,2,0]`. All values in `interchangeVector` must be
682 /// integers, in the range 0..`op.rank` without duplications
683 /// (i.e. `[1,1,2]` is an invalid permutation).
684 ///
685 /// Return failure if the permutation is not valid.
686 FailureOr<GenericOp> interchangeGenericOp(RewriterBase &rewriter,
687  GenericOp genericOp,
688  ArrayRef<unsigned> interchangeVector);
689 
690 /// Create a GenericOp from the given named operation `namedOp` and replace
691 /// namedOp.
692 /// Return failure if `namedOp` is a GenericOp or misses a region builder.
693 FailureOr<GenericOp> generalizeNamedOp(RewriterBase &rewriter,
694  LinalgOp namedOp);
695 
696 /// Create a namedOp from the given GenericOp and replace the GenericOp.
697 /// Currently we can specialize only trivial linalg copy operations.
698 FailureOr<LinalgOp> specializeGenericOp(RewriterBase &rewriter,
699  GenericOp genericOp);
700 
701 /// Create a new buffer using the `allocationFn` provided. The size of this
702 /// buffer is the smallest constant bounding size along each dimension that
703 /// can be computed for the size of the result of `subView`. Returns the
704 /// allocated buffer as `fullLocalView` and the view that matches the size of
705 /// the result of subview operation as `partialLocalView`.
709 };
710 FailureOr<PromotionInfo>
711 promoteSubviewAsNewBuffer(OpBuilder &b, Location loc, memref::SubViewOp subView,
712  const AllocBufferCallbackFn &allocationFn,
713  DataLayout &layout);
714 
715 /// Promote the `subViews` into a new buffer allocated at the insertion point
716 /// `b`. Promotion occurs in 3 steps:
717 /// 1. Create a new buffer for a full tile (i.e. not clipped at the
718 /// boundary).
719 /// 2. Take a full view on the buffer.
720 /// 3. Take a partial slice of the full view in step 2. and copy into it.
721 ///
722 /// Return the modified linalg op (the modification happens in place) as well
723 /// as all the copy ops created.
724 FailureOr<LinalgOp> promoteSubViews(OpBuilder &b, LinalgOp op,
726 
727 /// Allocate the subview in the GPU workgroup memory.
728 std::optional<Value> allocateWorkgroupMemory(OpBuilder &builder,
729  memref::SubViewOp subview,
730  ArrayRef<Value> sizeBounds,
731  DataLayout &);
732 
733 /// In case of GPU group memory there is no need to deallocate.
734 LogicalResult deallocateWorkgroupMemory(OpBuilder &, Value /*buffer*/);
735 
736 /// Create Memref copy operations and add gpu barrier guards before and after
737 /// the copy operation to ensure data integrity.
738 LogicalResult copyToWorkgroupMemory(OpBuilder &b, Value src, Value dst);
739 
740 /// Allocate the subview in the GPU private memory.
741 std::optional<Value> allocateGPUPrivateMemory(OpBuilder &builder,
742  memref::SubViewOp subview,
743  ArrayRef<Value> sizeBounds,
744  DataLayout &);
745 
746 /// Normal copy to between src and dst.
747 LogicalResult copyToGPUPrivateMemory(OpBuilder &b, Value src, Value dst);
748 
749 /// In case of GPU private memory there is no need to deallocate since the
750 /// memory is freed when going outside of the scope.
751 LogicalResult deallocateGPUPrivateMemory(OpBuilder &, Value /*buffer*/);
752 
753 /// Emit a suitable vector form for an operation. If provided,
754 /// `inputVectorSizes` are used to vectorize this operation. `inputVectorSizes`
755 /// must match the rank of the iteration space of the operation and the sizes
756 /// must be smaller or equal than their counterpart interation space sizes, if
757 /// static. `inputVectorShapes` also allows the vectorization of operations with
758 /// dynamic shapes.
759 LogicalResult vectorize(RewriterBase &rewriter, Operation *op,
760  ArrayRef<int64_t> inputVectorSizes = {},
761  ArrayRef<bool> inputScalableVecDims = {},
762  bool vectorizeNDExtract = false,
763  bool flatten1DDepthwiseConv = false);
764 
765 /// Emit a suitable vector form for a Copy op with fully static shape.
766 LogicalResult vectorizeCopy(RewriterBase &builder, memref::CopyOp copyOp);
767 
768 /// Emit a loop nest of `scf.for` with the proper body for `linalgOp`.
769 FailureOr<LinalgLoops> linalgOpToLoops(RewriterBase &rewriter,
770  LinalgOp linalgOp);
771 
772 /// Emit a loop nest of `scf.parallel` with the proper body for `linalgOp`.
773 FailureOr<LinalgLoops> linalgOpToParallelLoops(RewriterBase &rewriter,
774  LinalgOp linalgOp);
775 
776 /// Emit a loop nest of `affine.for` with the proper body for `linalgOp`.
777 FailureOr<LinalgLoops> linalgOpToAffineLoops(RewriterBase &rewriter,
778  LinalgOp linalgOp);
779 
780 /// Creates a number of ranges equal to the number of non-zero in `tileSizes`.
781 /// One for each loop of the LinalgOp that is tiled. The `tileSizes` argument
782 /// has one entry per surrounding loop. It uses zero as the convention that a
783 /// particular loop is not tiled. This convention simplifies implementations
784 /// by avoiding affine map manipulations. The returned ranges correspond to
785 /// the loop ranges, in the proper order, that are tiled and for which new
786 /// loops will be created. Also the function returns a map from loop indices
787 /// of the LinalgOp to the corresponding non-empty range indices of newly
788 /// created loops.
790 std::tuple<SmallVector<Range, 4>, LoopIndexToRangeIndexMap>
792  ArrayRef<OpFoldResult> allShapeSizes,
793  ArrayRef<OpFoldResult> allTileSizes);
794 
795 namespace detail {
796 template <typename T>
798  /// Tile sizes.
800  /// Number of tiles associated with each size.
802 };
803 
804 template <typename T>
806  /// Tile sizes.
808  /// Number of tiles associated with each size.
810 };
811 
812 } // namespace detail
813 
814 /// A description of a multi-size tiling comprising tile sizes and numbers of
815 /// tiles, expressed as Values which may or may not be constant. Multi-size
816 /// currently means two-size.
818  : public detail::MultiSizeSpecificationBase<Value> {};
820  : public detail::MultiSizeSpecificationBase<int64_t> {};
821 
825  : public detail::ContinuousTileSizeSpecificationBase<int64_t> {};
826 
827 /// Emits the IR computing the multi-sized tiling specification with two tile
828 /// sizes not exceeding `targetSize`, each divisible by `sizeDivisor`, such
829 /// that there exist numbers of tiles with these sizes that fully cover the
830 /// given iteration space `dimension` of the structured `op`.
831 ///
832 /// The computation is as follows:
833 ///
834 /// b = originalTripCount floordiv sizeDivisor
835 /// t = (targetSize + sizeDivisor - 1) floordiv sizeDivisor
836 /// d = (b + t - 1) floordiv t
837 /// s = (b floordiv d) * sizeDivisor
838 /// v = b % d
839 /// u = d - v
840 ///
841 /// where the tile sizes are `s` and `s` + `sizeDivisor`, and the numbers of
842 /// the corresponding tiles are `u` and `v`, respectively. Alternatively,
843 ///
844 /// s * u + (s + sizeDivisor) * v == original size,
845 /// where s mod sizeDivisor = 0.
846 ///
847 /// Expects all values to be positive. In some cases with the target tile size
848 /// sufficiently close to the dimension shape and non-unit divisor, it is
849 /// impossible to compute such sizes. If `emitAssertion` is set, also emit the
850 /// assertion that size computation succeeded.
851 ///
852 /// Returns the specification consisting of both tile values and the number of
853 /// tiles of each size.
854 FailureOr<MultiSizeSpecification>
855 computeMultiTileSizes(OpBuilder &builder, LinalgOp op, unsigned dimension,
856  OpFoldResult targetSize, OpFoldResult divisor,
857  bool emitAssertions = true);
858 FailureOr<StaticMultiSizeSpecification>
859 computeStaticMultiTileSizes(LinalgOp op, unsigned dimension, int64_t targetSize,
860  int64_t divisor);
861 
862 FailureOr<StaticContinuousTileSizeSpecification>
863 computeStaticContinuousTileSizes(LinalgOp op, unsigned dimension,
864  unsigned targetSize);
865 FailureOr<ContinuousTileSizeSpecification>
866 computeContinuousTileSizes(OpBuilder &builder, TilingInterface op,
867  unsigned dimension, OpFoldResult targetSize,
868  bool emitAssertions);
869 /// Rewrite a TilingInterface `op` to a tiled `scf.forall`, applying
870 /// tiling by `numThreads`.
871 /// If non-empty, the `mapping` is added as an attribute to the
872 /// resulting `scf.forall`.
873 /// Zero tile sizes indicate that the dimension is not tiled, and can be
874 /// thought of as tiling by the full size of data. It is the user's
875 /// responsibility to ensure that `numThreads` is a valid tiling specification
876 /// (i.e. that only tiles parallel dimensions, e.g. in the Linalg case).
880 };
881 FailureOr<ForallTilingResult> tileToForallOp(RewriterBase &builder,
882  TilingInterface op,
883  ArrayRef<OpFoldResult> numThreads,
884  std::optional<ArrayAttr> mapping);
885 
886 /// Same as `tileToForallOp`, but calculate the number of threads
887 /// required using the given tileSizes.
888 FailureOr<ForallTilingResult>
889 tileToForallOpUsingTileSizes(RewriterBase &builder, TilingInterface op,
890  ArrayRef<OpFoldResult> tileSizes,
891  std::optional<ArrayAttr> mapping);
892 
893 /// Transformation information returned after reduction tiling.
895  /// The partial reduction tiled op generated.
897  /// The final reduction operation merging all the partial reductions.
899  /// Initial values used for partial reductions.
901  /// The `scf.forall` operation that iterate over the tiles.
902  scf::ForallOp loops;
903 };
904 
905 /// Method to tile a reduction to parallel iterations computing partial
906 /// reductions. After the loop all the partial reduction are merged into a final
907 /// reduction. For example for the following sequence
908 ///
909 /// ```mlir
910 /// %0 = linalg.generic %in ["parallel", "reduction"]
911 /// : tensor<7x9xf32> -> tensor<7xf32>
912 /// ```
913 ///
914 /// into:
915 ///
916 /// ```mlir
917 /// %0 = linalg.fill ... : tensor<7x4xf32>
918 /// %1 = scf.forall (%iv) in (%c4) shared_outs(%arg0 = %0)
919 /// -> (tensor<7x4xf32>) {
920 /// %2 = tensor.extract_slice %arg3 : tensor<7x4xf32> to tensor<7xf32>
921 /// %3 = tensor.extract_slice %in : tensor<7x9xf32> -> tensor<7x?xf32>
922 /// %4 = linalg.generic %2, %3 ["parallel", "reduction"]
923 /// : tensor<7x?xf32> -> tensor<7xf32>
924 /// %5 = tensor.insert_slice %3, %arg0[0, %iv] : tensor<7x4xf32>
925 /// }
926 /// %6 = linalg.generic %1 ["parallel", "reduction"]
927 /// : tensor<7x4xf32> -> tensor<7xf32>
928 /// ```
929 FailureOr<ForallReductionTilingResult>
930 tileReductionUsingForall(RewriterBase &b, PartialReductionOpInterface op,
931  ArrayRef<OpFoldResult> numThreads,
932  ArrayRef<OpFoldResult> tileSizes = {},
933  std::optional<ArrayAttr> mapping = std::nullopt);
934 
935 /// All indices returned by IndexOp should be invariant with respect to
936 /// tiling. Therefore, if an operation is tiled, we have to transform the
937 /// indices accordingly, i.e. offset them by the values of the corresponding
938 /// induction variables that are captured implicitly in the body of the op.
939 ///
940 /// Example. `linalg.generic` before tiling:
941 ///
942 /// #id_2d = (i, j) -> (i, j)
943 /// #pointwise_2d_trait = {
944 /// indexing_maps = [#id_2d, #id_2d],
945 /// iterator_types = ["parallel", "parallel"]
946 /// }
947 /// linalg.generic #pointwise_2d_trait %operand, %result {
948 /// ^bb0(%operand_in: f32, %result_in: f32):
949 /// %i = linalg.index 0 : index
950 /// %j = linalg.index 1 : index
951 /// <some operations that use %i, %j>
952 /// }: memref<50x100xf32>, memref<50x100xf32>
953 ///
954 /// After tiling pass with tiles sizes 10 and 25:
955 ///
956 /// #strided = (i, j)[s0, s1, s2] -> (i * s1 + s0 + j * s2)
957 ///
958 /// %c1 = arith.constant 1 : index
959 /// %c0 = arith.constant 0 : index
960 /// %c25 = arith.constant 25 : index
961 /// %c10 = arith.constant 10 : index
962 /// operand_dim_0 = dim %operand, 0 : memref<50x100xf32>
963 /// operand_dim_1 = dim %operand, 1 : memref<50x100xf32>
964 /// scf.for %k = %c0 to operand_dim_0 step %c10 {
965 /// scf.for %l = %c0 to operand_dim_1 step %c25 {
966 /// %4 = memref.subview %operand[%k, %l][%c10, %c25][%c1, %c1]
967 /// : memref<50x100xf32> to memref<?x?xf32, #strided>
968 /// %5 = memref.subview %result[%k, %l][%c10, %c25][%c1, %c1]
969 /// : memref<50x100xf32> to memref<?x?xf32, #strided>
970 /// linalg.generic pointwise_2d_trait %4, %5 {
971 /// ^bb0(%operand_in: f32, %result_in: f32):
972 /// %i = linalg.index 0 : index
973 /// %j = linalg.index 1 : index
974 /// // Indices `k` and `l` are implicitly captured in the body.
975 /// %transformed_i = arith.addi %i, %k : index // index `i` is offset by
976 /// %k %transformed_j = arith.addi %j, %l : index // index `j` is offset
977 /// by %l
978 /// // Every use of %i, %j is replaced with %transformed_i,
979 /// %transformed_j <some operations that use %transformed_i,
980 /// %transformed_j>
981 /// }: memref<?x?xf32, #strided>, memref<?x?xf32, #strided>
982 /// }
983 /// }
984 ///
985 /// TODO: Investigate whether mixing implicit and explicit indices
986 /// does not lead to losing information.
987 void transformIndexOps(RewriterBase &b, LinalgOp op,
989  const LoopIndexToRangeIndexMap &loopIndexToRangeIndex);
990 
991 /// Apply transformation to split the single linalg op reduction into a
992 /// parallel and reduction dimension. Then create a new linalg.generic op
993 /// doing the rest of the reduction. Return the new linalg op with an extra
994 /// parallel dimension or failure if the transformation didn't happen.
995 ///
996 /// Example:
997 /// ```
998 /// %r = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>,
999 /// affine_map<(d0) -> ()>],
1000 /// iterator_types = ["reduction"]}
1001 /// ins(%in : tensor<32xf32>)
1002 /// outs(%out : tensor<f32>) {
1003 /// ^bb0(%arg1: f32, %arg2: f32):
1004 /// %y = arith.addf %arg1, %arg2 : f32
1005 /// linalg.yield %y : f32
1006 /// } -> tensor<f32>
1007 /// ```
1008 /// To:
1009 /// ```
1010 /// %cst = arith.constant 0.000000e+00 : f32
1011 /// %0 = tensor.expand_shape %in [[0, 1]] : tensor<32xf32> into
1012 /// tensor<4x8xf32> %1 = tensor.empty [4] : tensor<4xf32> %2 = linalg.fill
1013 /// ins(%cst : f32) outs(%1 : tensor<4xf32>) -> tensor<4xf32> %3 =
1014 /// linalg.generic {indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
1015 /// affine_map<(d0, d1) -> (d0)>],
1016 /// iterator_types = ["parallel", "reduction"]}
1017 /// ins(%0 : tensor<4x8xf32>) outs(%2 : tensor<4xf32>) {
1018 /// ^bb0(%arg3: f32, %arg5: f32):
1019 /// %5 = arith.addf %arg3, %arg4 : f32
1020 /// linalg.yield %5 : f32
1021 /// } -> tensor<4xf32>
1022 /// %r = linalg.generic {indexing_maps = [affine_map<(d0) -> (d0)>,
1023 /// affine_map<(d0) -> ()>],
1024 /// iterator_types = ["reduction"]}
1025 /// ins(%3 : tensor<4xf32>) outs(%out : tensor<f32>) {
1026 /// ^bb0(%arg3: f32, %arg4: f32):
1027 /// %5 = arith.addf %arg3, %arg4 : f32
1028 /// linalg.yield %5 : f32
1029 /// } -> tensor<f32>
1030 /// ```
1033  FillOp fillOp;
1034  LinalgOp splitLinalgOp;
1036 };
1037 FailureOr<SplitReductionResult>
1038 splitReduction(RewriterBase &b, LinalgOp op,
1039  const ControlSplitReductionFn &controlSplitReductionFn,
1040  bool useAlloc = false);
1041 
1042 /// Scaling-based implementation of the split reduction transformation.
1043 /// Instead of introducing an ExpandShapeOp, this rewrites a reduction
1044 /// dimension `k` into `k * scale + kk`.
1045 ///
1046 /// Example:
1047 /// ```
1048 /// %0 = linalg.matmul ins(%A, %B: tensor<16x256xf32>, tensor<256x32xf32>)
1049 /// outs(%C: tensor<16x32xf32>) -> tensor<16x32xf32>
1050 /// ```
1051 ///
1052 /// Is transformed to:
1053 ///
1054 /// ```
1055 /// #map0 = affine_map<(d0, d1, d2, d3) -> (d0, d2 * 4 + d3)>
1056 /// #map1 = affine_map<(d0, d1, d2, d3) -> (d2 * 4 + d3, d1)>
1057 /// #map2 = affine_map<(d0, d1, d2, d3) -> (d2, d3)>
1058 /// #map3 = affine_map<(d0, d1, d2, d3) -> (d0, d1, d2)>
1059 /// #map4 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
1060 /// #map5 = affine_map<(d0, d1, d2) -> (d0, d1)>
1061 /// %0 = tensor.empty [16, 32, 64] : tensor<16x32x64xf32>
1062 /// %cst = arith.constant 0.000000e+00 : f32
1063 /// %1 = linalg.fill ins(%cst : f32) outs(%0 : tensor<16x32x64xf32>) ->
1064 /// tensor<16x32x64xf32>
1065 /// %2 = tensor.empty [64, 4] : tensor<64x4xi1>
1066 ///
1067 /// %3 = linalg.generic {indexing_maps = [#map0, #map1, #map2, #map3],
1068 /// iterator_types = ["parallel", "parallel", "parallel", "reduction"]}
1069 /// ins(%A, %B, %2 : tensor<16x256xf32>, tensor<256x32xf32>,
1070 /// tensor<64x4xi1>)
1071 /// outs(%1 : tensor<16x32x64xf32>) {
1072 /// ^bb0(%arg3: f32, %arg4: f32, %arg5: i1, %arg6: f32):
1073 /// %5 = arith.mulf %arg3, %arg4 : f32
1074 /// %6 = arith.addf %arg6, %5 : f32
1075 /// linalg.yield %6 : f32
1076 /// } -> tensor<16x32x64xf32>
1077 ///
1078 /// %4 = linalg.generic {indexing_maps = [#map4, #map5],
1079 /// iterator_types = ["parallel", "parallel", "reduction"]}
1080 // ins(%3 : tensor<16x32x64xf32>)
1081 /// outs(%C : tensor<16x32xf32>) {
1082 /// ^bb0(%arg3: f32, %arg4: f32):
1083 /// %5 = arith.addf %arg3, %arg4 : f32
1084 /// linalg.yield %5 : f32
1085 /// } -> tensor<16x32xf32>
1086 ///
1087 /// return %4 : tensor<16x32xf32>
1088 /// ```
1089 FailureOr<SplitReductionResult>
1090 splitReductionByScaling(RewriterBase &b, LinalgOp op,
1091  const ControlSplitReductionFn &controlSplitReductionFn,
1092  bool useAlloc = false);
1093 
1094 /// Return `true` if a given sequence of dimensions are contiguous in the
1095 /// range of the specified indexing map.
1097 /// Return `true` if all sequences of dimensions specified in `dimSequences` are
1098 /// contiguous in all the ranges of the `maps`.
1100  ArrayRef<ReassociationIndices> dimSequences);
1101 
1104  LinalgOp collapsedOp;
1105 };
1106 
1107 /// Collapses dimensions of linalg.generic/linalg.copy operation. A precondition
1108 /// to calling this method is that for each list in `foldedIterationDim`, the
1109 /// sequence of dimensions is contiguous in domains of all `indexing_maps` of
1110 /// the `linalgOp`. This can be checked using `areDimSequencePreserved` method.
1111 /// When valid, the method also collapses the operands of the op. Returns
1112 /// replacement values of the results of the original `linalgOp` by inserting
1113 /// reshapes to get back values of compatible types.
1114 FailureOr<CollapseResult>
1115 collapseOpIterationDims(LinalgOp op,
1116  ArrayRef<ReassociationIndices> foldedIterationDims,
1117  RewriterBase &rewriter);
1118 
1120  tensor::PadOp padOp;
1121  tensor::ExpandShapeOp expandShapeOp;
1122  linalg::TransposeOp transposeOp;
1123 };
1124 
1125 /// Rewrite pack as pad + reshape + transpose.
1126 FailureOr<LowerPackResult> lowerPack(RewriterBase &rewriter,
1127  tensor::PackOp packOp);
1128 
1130  tensor::EmptyOp emptyOp;
1131  linalg::TransposeOp transposeOp;
1132  tensor::CollapseShapeOp collapseShapeOp;
1133  tensor::ExtractSliceOp extractSliceOp;
1134 };
1135 
1136 /// Rewrite pack as empty + transpose + reshape + extract_slice.
1137 FailureOr<LowerUnPackOpResult> lowerUnPack(RewriterBase &rewriter,
1138  tensor::UnPackOp unPackOp);
1139 
1140 /// Struct to hold the result of a `pack` call.
1141 struct PackResult {
1143  linalg::LinalgOp packedLinalgOp;
1145 };
1146 /// Implement packing of a single LinalgOp by `packedSizes`.
1147 /// There must be one packedSizes entry per `linalgOp` iterator.
1148 /// Return the packed Linalg op on success, failure otherwise.
1149 FailureOr<PackResult> pack(RewriterBase &rewriter, linalg::LinalgOp linalgOp,
1150  ArrayRef<OpFoldResult> packedSizes);
1151 
1152 /// Struct to hold the result of a `packTranspose` call.
1154  tensor::PackOp transposedPackOp;
1155  linalg::LinalgOp transposedLinalgOp;
1156  tensor::UnPackOp transposedUnPackOp;
1157 };
1158 /// Transpose a single PackOp -> LinalgOp -> UnPackOp chain and return the
1159 /// transposed PackOp -> LinalgOp -> UnPackOp chain after replacements.
1160 /// Return failure if either:
1161 /// 1. the `packOp` does not have the `linalgOp` as its unique use.
1162 /// 2. the `maybeUnPackOp`, if specified must be a consumer of the result tied
1163 /// to the unique `packOp` use.
1164 /// 3. `outerPerm` (resp. `innerPerm`) must be valid permutations of
1165 /// `packOp.getOuterDimsPerm` (resp. `packOp.getInnerDimsPerm`) or empty.
1166 FailureOr<PackTransposeResult>
1167 packTranspose(RewriterBase &rewriter, tensor::PackOp packOp,
1168  linalg::LinalgOp linalgOp, tensor::UnPackOp maybeUnPackOp,
1169  ArrayRef<int64_t> outerPerm, ArrayRef<int64_t> innerPerm);
1170 
1171 /// Pack a LinalgOp by greedily inferring matmul dimensions (m, n, k) where m
1172 /// and n are proper parallel dimensions and k is a proper reduction
1173 /// dimension. Packing occurs by rewriting the op as a linalg.generic and
1174 /// calling linalg::pack by `mnkPackedSizes`. The order of the packed
1175 /// dimensions is customizable: the `mnkOrder` is a permutation of {0, 1, 2}
1176 /// to reorder {m, n, k} into one of the 8 possible forms. The outer
1177 /// dimensions of the operands are not permuted at this time, this is left for
1178 /// future work.
1179 FailureOr<PackResult>
1180 packMatmulGreedily(RewriterBase &rewriter, LinalgOp linalgOp,
1181  ArrayRef<OpFoldResult> mnkPackedSizes,
1182  ArrayRef<int64_t> mnkPaddedSizesNextMultipleOf,
1183  ArrayRef<int64_t> mnkOrder);
1184 
1186  /// Minor block factors (mb, nb, kb) for packing relayout where mb, mn are
1187  /// the parallel dimensions and kb is the reduction dimension.
1189 
1190  /// If true, allows packing of dimensions that only partially fit into the
1191  /// block factors.
1192  bool allowPadding = true;
1193 
1194  /// Next multiples of the packing sizes.
1196 
1197  /// Permutation of matmul (M, N, K) dimensions order.
1199 
1200  /// Transpose LHS outer block layout [MB][KB] -> [KB][MB].
1202 
1203  /// Transpose LHS inner block layout [mb][kb] -> [kb][mb].
1205 
1206  /// Transpose RHS outer block layout [KB][NB] -> [NB][KB].
1208 
1209  /// Transpose RHS inner block layout [kb][nb] -> [nb][kb].
1211 };
1212 
1213 /// Function type which is used to control matmul packing.
1214 /// It is expected to return valid packing configuration for each operation.
1215 /// Lack of packing options indicates that no valid configuration could be
1216 /// assigned and the operation will not be packed.
1218  std::function<std::optional<BlockPackMatmulOptions>(linalg::LinalgOp)>;
1219 
1220 /// Pack a matmul operation into blocked 4D layout.
1221 ///
1222 /// Relayout a matmul operation into blocked layout with two levels of
1223 /// subdivision:
1224 /// - major 2D blocks - outer dimensions, consist of minor blocks
1225 /// - minor 2D blocks - inner dimensions, consist of scalar elements
1226 ///
1227 /// A 2D matmul MxNxK gets reshaped into blocked 4D representation
1228 /// as: [MB][NB][mb][nb] += [MB][KB][mb][kb] * [NB][KB][nb][kb]
1229 /// where the (MB, NB, KB) dimensions represent the major blocks,
1230 /// and the (mb, nb, kb) are the minor blocks of their respective
1231 /// original 2D dimensions (M, N, K).
1232 ///
1233 /// Depending on the initial operands' data layout and the specified
1234 /// packing options, the major blocks dimensions might get transposed
1235 /// e.g., [MB][KB] -> [KB][MB]. The minor blocks can also be transposed
1236 /// e.g., [mb][kb] -> [kb][mb].
1237 /// Any present batch dimensions remain unchanged.
1238 /// The final result is unpacked back to the original shape.
1239 ///
1240 /// Return failure if no valid packing options are provided.
1241 FailureOr<PackResult>
1242 blockPackMatmul(RewriterBase &rewriter, linalg::LinalgOp linalgOp,
1243  const ControlBlockPackMatmulFn &controlPackMatmul);
1244 
1245 /// Rewrite tensor.from_elements to linalg.generic.
1246 FailureOr<Operation *>
1248  tensor::FromElementsOp fromElementsOp);
1249 
1250 /// Rewrite tensor.generate to linalg.generic.
1251 FailureOr<Operation *>
1253  tensor::GenerateOp generateOp);
1254 
1255 /// Rewrite tensor.pad to linalg.generic + tensor.insert_slice.
1256 FailureOr<Operation *> rewriteInDestinationPassingStyle(RewriterBase &rewriter,
1257  tensor::PadOp padOp);
1258 
1259 /// Convert linalg.conv_2d_nhwc_hwcf into linalg.generic (for img2col packing)
1260 /// and linalg.matmul.
1261 ///
1262 /// A convolution operation can be written as a matrix-matrix multiplication by
1263 /// unfolding the cross-correlation between input and filter and explicitly copy
1264 /// overlapped sliding window inputs.
1265 ///
1266 /// Consider 2D input X with single channel input and output and 2x2 filter W:
1267 /// [x(0, 0) , x(0, 1) , ..., x(0, n) ]
1268 /// [x(1, 0) , x(1, 1) , ..., x(1, n) ]
1269 /// [. , . ,. , . ] [w(0, 0), w(0, 1)]
1270 /// [. , . , . , . ] (conv) [w(1, 0), w(1, 1)]
1271 /// [. , . , ., . ]
1272 /// [x(n-1, 0), x(n-1, 1), ..., x(n-1, n-1)]
1273 ///
1274 /// The packed input data (img2col) is a matrix with |rows| = output spatial
1275 /// size, |columns| = filter spatial size. To compute the output Y(i, j) we need
1276 /// to calculate the dot product between filter window at input X(x, y)) and the
1277 /// filter which will look like the following where r.h.s is the img2col matrix
1278 /// and l.h.s is the flattened filter:
1279 ///
1280 /// [x(0,0), x(0,1), x(1,0), x(1,1)]
1281 /// [x(0,1), x(1,1), x(0,2), x(1,2)] (matmul) [w(0,0), w(0,1), w(1,0), w(1,1)]
1282 /// [x(0,1), x(1,1), x(0,2), x(1,2)]
1283 /// [ . , . , . , . ]
1284 ///
1285 /// In general for 2D case with (N, H, W, C) input and (Kh, Kw, C, D) filter
1286 /// and output (N, Ho, Wo, D) the convolution is the following matrix-matrix
1287 /// multiplication (Ho x Wo, Kh x Kw x C) * (Kh x Kw x C, D) for each input in
1288 /// the N input. For the case where N > 1 its a batched matrix-matrix
1289 /// multiplication.
1290 ///
1291 /// On success, return both the operation that produces the img2col tensor and
1292 /// the final operation of the sequence that replaces the original convolution.
1293 FailureOr<std::pair<Operation *, Operation *>>
1294 rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcHwcfOp convOp);
1295 
1296 /// Same as the above but for Fhwc channel orderings in the filter. In this case
1297 /// the matrix multiplication is actually a row-wise dot-product rather than a
1298 /// row-column dot-product. This is to avoid transposing the filter matrix which
1299 /// would be required for a regular matrix multiplication to produce the correct
1300 /// output dimensions.
1301 FailureOr<std::pair<Operation *, Operation *>>
1302 rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp convOp);
1303 
1304 /// Similar to rewriteInIm2Col with linalg::Conv2DNhwcHwcfOp except there is no
1305 /// reduction among the input channels so each convolution can be a
1306 /// matrix-vector product and by transposing both input filter so channels are
1307 /// outer most the computation is a batched matrix-vector product.
1308 FailureOr<std::pair<Operation *, Operation *>>
1309 rewriteInIm2Col(RewriterBase &rewriter,
1310  linalg::DepthwiseConv2DNhwcHwcOp convOp);
1311 
1312 /// Similar to rewriteInIm2Col with linalg::Conv2DNhwcHwcfOp except because the
1313 /// channels are to the left of the image shape dimensions, the position of the
1314 /// contraction dimension in the resulting matmul is reversed. This swaps the
1315 /// LHS and RHS of the matmul when compared with nhwc (i.e. (D, C x Kh x Kw) *
1316 /// (C x Kh x Kw, Ho x Wo))
1317 FailureOr<std::pair<Operation *, Operation *>>
1318 rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNchwFchwOp convOp);
1319 
1320 /// Convert linalg.conv_2d_nhwc_fhwc(_q) to linalg.conv_2d_nhwc_hwcf(_q) by
1321 /// materializing transpose.
1322 FailureOr<Operation *> transposeConv2D(RewriterBase &rewriter,
1323  linalg::Conv2DNhwcFhwcOp op);
1324 FailureOr<Operation *> transposeConv2D(RewriterBase &rewriter,
1325  linalg::Conv2DNhwcFhwcQOp op);
1326 
1327 /// Convert Linalg matmul ops to transposed variants.
1328 FailureOr<Operation *> transposeMatmul(RewriterBase &rewriter,
1329  linalg::MatmulOp op,
1330  bool transposeLHS = true);
1331 FailureOr<Operation *> transposeBatchMatmul(RewriterBase &rewriter,
1332  linalg::BatchMatmulOp op,
1333  bool transposeLHS = true);
1334 
1335 /// Convert linalg.conv_2d_nhwc_fhwc to Winograd Conv2D algorithm
1336 /// F(m x m, r x r). m is the dimension size of output and r is the dimension
1337 /// size of filter.
1338 FailureOr<Operation *> winogradConv2D(RewriterBase &rewriter,
1339  linalg::Conv2DNhwcFhwcOp op, int64_t m,
1340  int64_t r);
1341 
1342 //===----------------------------------------------------------------------===//
1343 // Rewrite patterns wrapping transformations.
1344 // TODO: every single such pattern should be a close to noop wrapper around a
1345 // functional-stye API call.
1346 //===----------------------------------------------------------------------===//
1347 
1348 /// Rewrites 2-D convolution ops with size-1 window dimensions into 1-D
1349 /// convolution ops.
1350 template <typename Conv2DOp, typename Conv1DOp>
1352  : public OpRewritePattern<Conv2DOp> {
1354 
1355  FailureOr<Conv1DOp> returningMatchAndRewrite(Conv2DOp convOp,
1356  PatternRewriter &rewriter) const;
1357 
1358  LogicalResult matchAndRewrite(Conv2DOp convOp,
1359  PatternRewriter &rewriter) const override {
1360  return returningMatchAndRewrite(convOp, rewriter);
1361  }
1362 };
1363 
1364 extern template struct DownscaleSizeOneWindowed2DConvolution<Conv2DNhwcHwcfOp,
1365  Conv1DNwcWcfOp>;
1366 extern template struct DownscaleSizeOneWindowed2DConvolution<Conv2DNchwFchwOp,
1367  Conv1DNcwFcwOp>;
1368 
1369 /// Rewrites 2-D depthwise convolution ops with size-1 (w, kw) or (h, kh)
1370 /// dimensions into 1-D depthwise convolution ops.
1372  : public OpRewritePattern<DepthwiseConv2DNhwcHwcOp> {
1374  PatternBenefit benefit = 1)
1375  : OpRewritePattern<DepthwiseConv2DNhwcHwcOp>(context, benefit) {}
1376 
1377  FailureOr<DepthwiseConv1DNwcWcOp>
1378  returningMatchAndRewrite(DepthwiseConv2DNhwcHwcOp convOp,
1379  PatternRewriter &rewriter) const;
1380 
1381  LogicalResult matchAndRewrite(DepthwiseConv2DNhwcHwcOp convOp,
1382  PatternRewriter &rewriter) const override {
1383  return returningMatchAndRewrite(convOp, rewriter);
1384  }
1385 };
1386 
1387 struct DownscaleConv2DOp final : public OpRewritePattern<Conv2DOp> {
1389  : OpRewritePattern<Conv2DOp>(context, benefit) {}
1390 
1391  FailureOr<Conv1DOp> returningMatchAndRewrite(Conv2DOp convOp,
1392  PatternRewriter &rewriter) const;
1393 
1394  LogicalResult matchAndRewrite(Conv2DOp convOp,
1395  PatternRewriter &rewriter) const override {
1396  return returningMatchAndRewrite(convOp, rewriter);
1397  }
1398 };
1399 
1400 ///
1401 /// Linalg generalization pattern.
1402 ///
1403 /// Apply the `generalization` transformation as a pattern.
1404 /// See `generalization` for more details.
1405 //
1406 // TODO: Automatic default pattern class that just unwraps a function
1407 // returning FailureOr<GenericOp>.
1409  : public OpInterfaceRewritePattern<LinalgOp> {
1411 
1412  /// `matchAndRewrite` implementation that returns the significant
1413  /// transformed pieces of IR.
1414  FailureOr<GenericOp>
1415  returningMatchAndRewrite(LinalgOp op, PatternRewriter &rewriter) const {
1416  return generalizeNamedOp(rewriter, op);
1417  }
1418 
1419  LogicalResult matchAndRewrite(LinalgOp op,
1420  PatternRewriter &rewriter) const override {
1421  return returningMatchAndRewrite(op, rewriter);
1422  }
1423 };
1424 
1425 struct LinalgSpecializationPattern : public OpRewritePattern<GenericOp> {
1427 
1428  FailureOr<GenericOp>
1429  returningMatchAndRewrite(GenericOp op, PatternRewriter &rewriter) const {
1430  return specializeGenericOp(rewriter, op);
1431  }
1432 
1433  LogicalResult matchAndRewrite(GenericOp op,
1434  PatternRewriter &rewriter) const override {
1435  return returningMatchAndRewrite(op, rewriter);
1436  }
1437 };
1438 
1439 /// Vectorization pattern for memref::CopyOp.
1440 struct CopyVectorizationPattern : public OpRewritePattern<memref::CopyOp> {
1442 
1443  LogicalResult matchAndRewrite(memref::CopyOp copyOp,
1444  PatternRewriter &rewriter) const override;
1445 };
1446 
1448  std::function<LogicalResult(RewriterBase &, tensor::PadOp, Value)>;
1449 
1450 /// Rewrite a tensor::PadOp into a sequence of EmptyOp, FillOp and
1451 /// InsertSliceOp. For now, only constant padding values are supported.
1452 /// `OptimizeCopyFn` can be used to customize copying step optimization.
1453 struct GeneralizePadOpPattern : public OpRewritePattern<tensor::PadOp> {
1455  OptimizeCopyFn optimizeCopyFn = nullptr,
1456  PatternBenefit benefit = 1)
1457  : OpRewritePattern<tensor::PadOp>(context, benefit),
1458  optimizeCopyFn(std::move(optimizeCopyFn)) {}
1459  LogicalResult matchAndRewrite(tensor::PadOp padOp,
1460  PatternRewriter &rewriter) const override;
1461 
1462 protected:
1464  Value createFillOrGenerateOp(RewriterBase &rewriter, tensor::PadOp padOp,
1465  Value dest,
1466  const SmallVector<Value> &dynSizes) const;
1467 };
1468 
1469 /// Rewrites a tensor::PackOp into a sequence of tensor.pad + linalg.transpose +
1470 /// tensor.insert_slice ops, where the tensor::PackOp has outer dims being all
1471 /// 1s.
1473  : public OpRewritePattern<tensor::PackOp> {
1475  LogicalResult matchAndRewrite(tensor::PackOp packOp,
1476  PatternRewriter &rewriter) const override;
1477 };
1478 
1479 /// Rewrites a tensor::UnPackOp into a sequence of rank-reduced extract_slice op
1480 /// + transpose op + insert_slice op, where the tensor::UnPackOp has outer dims
1481 /// being all 1s.
1483  : public OpRewritePattern<tensor::UnPackOp> {
1485  LogicalResult matchAndRewrite(tensor::UnPackOp unpackOp,
1486  PatternRewriter &rewriter) const override;
1487 };
1488 
1489 /// Match and rewrite for the pattern:
1490 /// ```
1491 /// %alloc = ...
1492 /// [optional] %view = memref.view %alloc ...
1493 /// %subView = subview %allocOrView ...
1494 /// [optional] linalg.fill(%allocOrView, %cst) ...
1495 /// ...
1496 /// memref.copy(%in, %subView) ...
1497 /// vector.transfer_read %allocOrView[...], %cst ...
1498 /// ```
1499 /// into
1500 /// ```
1501 /// [unchanged] %alloc = ...
1502 /// [unchanged] [optional] %view = memref.view %alloc ...
1503 /// [unchanged] [unchanged] %subView = subview %allocOrView ...
1504 /// ...
1505 /// vector.transfer_read %in[...], %cst ...
1506 /// ```
1507 /// Where there is no interleaved use between memref.copy and transfer_read as
1508 /// well as no interleaved use between linalg.fill and memref.copy (if
1509 /// linalg.fill is specified).
1510 /// This is a custom rewrite to forward partial reads (with optional fills) to
1511 /// vector.transfer_read.
1513  : public OpRewritePattern<vector::TransferReadOp> {
1515 
1516  LogicalResult matchAndRewrite(vector::TransferReadOp xferOp,
1517  PatternRewriter &rewriter) const override;
1518 };
1519 
1520 /// Match and rewrite for the pattern:
1521 /// ```
1522 /// %alloc = ...
1523 /// [optional] %view = memref.view %alloc ...
1524 /// %subView = subview %allocOrView...
1525 /// ...
1526 /// vector.transfer_write %..., %allocOrView[...]
1527 /// memref.copy(%subView, %out)
1528 /// ```
1529 /// into
1530 /// ```
1531 /// [unchanged] %alloc = ...
1532 /// [unchanged] [optional] %view = memref.view %alloc ...
1533 /// [unchanged] %subView = subview %allocOrView...
1534 /// ...
1535 /// vector.transfer_write %..., %out[...]
1536 /// ```
1537 /// Where there is no interleaved use between transfer_write and memref.copy.
1538 /// This is a custom rewrite to forward partial writes to
1539 /// vector.transfer_write.
1541  : public OpRewritePattern<vector::TransferWriteOp> {
1543 
1544  LogicalResult matchAndRewrite(vector::TransferWriteOp xferOp,
1545  PatternRewriter &rewriter) const override;
1546 };
1547 
1548 /// Rewrite extract_slice(tensor.pad(x)) into tensor.pad(extract_slice(x)).
1550  : public OpRewritePattern<tensor::ExtractSliceOp> {
1551  /// A function to control pattern application and rewrite logic.
1552  ///
1553  /// The function will be given the slice op and should return:
1554  /// - std::nullopt: to fail the match and not apply the pattern;
1555  /// - true: to apply the pattern with zero slice guard;
1556  /// - false: to apply the pattern without zero slice guard.
1557  ///
1558  /// See the documentation for tensor::bubbleUpPadSlice regarding zero slice
1559  /// guard.
1560  using ControlFn = std::function<std::optional<bool>(tensor::ExtractSliceOp)>;
1561 
1563  ControlFn controlFn = nullptr,
1564  PatternBenefit benefit = 1)
1565  : OpRewritePattern(context, benefit), controlFn(std::move(controlFn)) {}
1566 
1567  LogicalResult matchAndRewrite(tensor::ExtractSliceOp sliceOp,
1568  PatternRewriter &rewriter) const override;
1569 
1570 private:
1571  ControlFn controlFn;
1572 };
1573 
1574 //===----------------------------------------------------------------------===//
1575 // Populate functions.
1576 //===----------------------------------------------------------------------===//
1577 
1578 /// Canonicalization patterns relevant to apply after tiling patterns. These
1579 /// are applied automatically by the tiling pass but need to be applied
1580 /// manually when tiling is called programmatically.
1583 
1584 /// Linalg generalization patterns
1585 
1586 /// Populates `patterns` with patterns to convert spec-generated named ops to
1587 /// linalg.generic ops.
1589 
1590 /// Populates `patterns` with patterns to convert linalg.generic ops to named
1591 /// ops where possible. A linalg.generic can represent wide range and complex
1592 /// computations for which equivalent linalg named op may not exist e.g.
1593 /// linalg.generic that takes a tensor and computes a polynomial such as:
1594 /// p(x) = an*x^n + ... + a1x + a0
1595 /// There is no equivalent named op to convert to. Many such cases exist.
1597  RewritePatternSet &patterns);
1598 
1599 /// Linalg decompose convolutions patterns
1600 
1601 /// Populates patterns to decompose high-D convolution ops into low-D ones.
1602 /// This is a step in progressive lowering for convolution ops, afterwards we
1603 /// can vectorize the low-D convolution ops.
1605  PatternBenefit benefit = 1);
1606 
1607 /// Populates patterns to transform linalg.conv_2d_xxx operations into
1608 /// linalg.generic (for img2col packing) and linalg.matmul.
1609 /// \see rewriteInIm2Col for more details.
1611 
1612 /// Populates `patterns` with patterns that vectorize tensor.pad.
1613 /// These patterns are meant to apply in a complementary fashion. Benefits
1614 /// are used to encode a certain ordering of pattern application. To avoid
1615 /// scattering magic constants throughout the code base, the patterns must be
1616 /// added with this function. `baseBenefit` can be used to offset the benefit
1617 /// of all tensor::PadOp vectorization patterns by a certain value.
1619  PatternBenefit baseBenefit = 1);
1620 
1621 /// Populate patterns for splitting a `LinalgOp` with multiple statements within
1622 /// its payload into multiple `GenericOp` that have a single statement.
1623 /// The option `removeDeadArgsAndResults` adds patterns to remove dead arguments
1624 /// and results from the generated decomposed ops. This is default `true` since
1625 /// the core decomposition patterns relies on these clean up patterns. It is set
1626 /// to false only for testing purposes.
1628  bool removeDeadArgsAndResults = true);
1629 
1630 /// Populate patterns that convert non-destination-style ops to destination
1631 /// style ops.
1633 
1634 /// Populate patterns for vectorizing low-D convolution ops. This is a step in
1635 /// progressive lowering for convolution ops, it assume high-D convolution ops
1636 /// were decomposed previously.
1638  PatternBenefit benefit = 1);
1639 
1640 /// Populate patterns that convert `ElementwiseMappable` ops to linalg
1641 /// parallel loops.
1643 
1644 /// Populate patterns that are only useful in the context of sparse tensors.
1646 
1647 /// Function type which is used to control when to stop fusion. It is expected
1648 /// that OpOperand is not modified in the callback. The OpOperand is not marked
1649 /// as const to allow callers to use non-const methods.
1650 using ControlFusionFn = std::function<bool(OpOperand *fusedOperand)>;
1651 
1652 /// Patterns for fusing linalg operation on tensors.
1653 
1654 /// Pattern to fuse `linalg.generic` -> `linalg.generic` operations
1655 /// when both operations are fusable elementwise operations.
1657  RewritePatternSet &patterns,
1658  const ControlFusionFn &controlElementwiseOpFusion);
1659 
1660 /// Function type which is used to control propagation of tensor.pack/unpack
1661 /// ops.
1662 using ControlPropagationFn = std::function<bool(OpOperand *opOperand)>;
1663 
1664 /// Patterns to bubble up or down data layout ops across other operations.
1666  RewritePatternSet &patterns,
1667  const ControlPropagationFn &controlPackUnPackPropagation);
1668 
1669 /// Pattern to remove dead operands and results of `linalg.generic` operations.
1670 /// This is effectively DCE for a linalg op.
1672 
1673 /// Patterns to promote inputs to outputs and remove unused inputs of
1674 /// `linalg.generic` ops.
1676 
1677 /// Function type to control generic op dimension collapsing. It is expected
1678 /// to return an array of `ReassociationIndices` representing dimensions that
1679 /// should be merged.
1681  std::function<SmallVector<ReassociationIndices>(linalg::LinalgOp)>;
1682 
1683 /// Pattern to collapse dimensions in a linalg.generic op. This will collapse
1684 /// tensor operands when needed and expand back the result tensors.
1686  RewritePatternSet &patterns,
1687  const GetCollapsableDimensionsFn &controlCollapseDimensions);
1688 
1689 /// Patterns to fold an expanding (collapsing) tensor_reshape operation with its
1690 /// producer (consumer) generic operation by expanding the dimensionality of the
1691 /// loop in the generic op.
1693  RewritePatternSet &patterns, const ControlFusionFn &controlFoldingReshapes);
1694 
1695 /// Patterns to fold an expanding tensor.expand_shape operation with its
1696 /// producer generic operation by collapsing the dimensions of the generic op.
1698  RewritePatternSet &patterns, const ControlFusionFn &controlFoldingReshapes);
1699 
1700 /// Patterns to constant fold Linalg operations.
1702  const ControlFusionFn &controlFn);
1703 
1704 /// Pattern to fuse a `tensor.pad` operation with the producer of its source,
1705 /// if the producer is a `linalg` operation with all parallel iterator types.
1707  RewritePatternSet &patterns);
1708 
1709 /// Patterns to convert from one named op to another. These can be seen as
1710 /// canonicalizations of named ops into another named op.
1712 
1713 /// Patterns to fold unit-extent dimensions in operands/results of linalg ops on
1714 /// tensors via reassociative reshape ops.
1717 
1718 /// A pattern that converts init operands to input operands.
1720 
1721 /// Patterns that are used to inline constant operands into linalg generic ops.
1723 
1724 /// Patterns that are used to bubble up extract slice op above linalg op.
1726 
1727 /// Adds patterns that waps tensor.extract_slice(linalg.fill(%cst, %init)) into
1728 /// linalg.fill(%cst, tensor.extract_slice(%init)).
1730 
1731 /// Patterns to apply `splitReduction` below.
1733  RewritePatternSet &patterns,
1734  const ControlSplitReductionFn &controlSplitReductionFn,
1735  bool useAlloc = false);
1736 
1737 /// Patterns to convert Linalg matmul ops to transposed variants.
1739  bool transposeLHS = true);
1740 
1741 /// Patterns to block pack Linalg matmul ops.
1743  const ControlBlockPackMatmulFn &controlFn);
1744 
1745 /// Patterns to apply Winograd Conv2D algorithm F(m x m, r x r).
1746 void populateWinogradConv2DPatterns(RewritePatternSet &patterns, int64_t m,
1747  int64_t r);
1748 
1749 /// Patterns to decompose Winograd operators.
1751 
1752 /// Adds patterns that reduce the rank of named contraction ops that have
1753 /// unit dimensions in the operand(s) by converting to a sequence of `collapse_shape`,
1754 /// `<corresponding linalg named op>`, `expand_shape` (if on tensors). For example a
1755 /// `linalg.batch_matmul` with unit batch size will convert to `linalg.matmul`
1756 /// and a `linalg.matvec` with with unit spatial dim in lhs will convert to a `linalg.dot`.
1758 
1759 } // namespace linalg
1760 } // namespace mlir
1761 
1762 #endif // MLIR_DIALECT_LINALG_TRANSFORMS_TRANSFORMS_H
static llvm::ManagedStatic< PassManagerOptions > options
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition: AffineMap.h:46
Attributes are known-constant values of operations.
Definition: Attributes.h:25
The main mechanism for performing data layout queries.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:63
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
This class helps build Operations.
Definition: Builders.h:210
This class represents a single result from folding an operation.
Definition: OpDefinition.h:268
This class represents an operand of an operation.
Definition: Value.h:267
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
This class represents the benefit of a pattern match in a unitless scheme that ranges from 0 (very li...
Definition: PatternMatch.h:34
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
Definition: PatternMatch.h:785
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:400
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
FailureOr< PackingResult > buildPackingLoopNest(RewriterBase &rewriter, tensor::PadOp opToHoist, scf::ForOp outermostEnclosingForOp, ArrayRef< int64_t > transposeVector)
Build the packing loop nest required to hoist opToHoist above outermostEnclosingForOp.
void populateLinalgNamedOpConversionPatterns(RewritePatternSet &patterns)
Patterns to convert from one named op to another.
void populateMoveInitOperandsToInputPattern(RewritePatternSet &patterns)
A pattern that converts init operands to input operands.
FailureOr< GenericOp > generalizeNamedOp(RewriterBase &rewriter, LinalgOp namedOp)
Create a GenericOp from the given named operation namedOp and replace namedOp.
void populateTransposeMatmulPatterns(RewritePatternSet &patterns, bool transposeLHS=true)
Patterns to convert Linalg matmul ops to transposed variants.
void populateContractionOpRankReducingPatterns(RewritePatternSet &patterns)
Adds patterns that reduce the rank of named contraction ops that have unit dimensions in the operand(...
LogicalResult rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad, const LinalgPaddingOptions &options, LinalgOp &paddedOp, SmallVector< Value > &replacements, SmallVector< tensor::PadOp > &padOps)
Pad the iterator dimensions paddingDimensions of all opToPad operands to a static bounding box.
Definition: Padding.cpp:153
void populateSplitReductionPattern(RewritePatternSet &patterns, const ControlSplitReductionFn &controlSplitReductionFn, bool useAlloc=false)
Patterns to apply splitReduction below.
void populateFuseTensorPadWithProducerLinalgOpPatterns(RewritePatternSet &patterns)
Pattern to fuse a tensor.pad operation with the producer of its source, if the producer is a linalg o...
FailureOr< std::pair< Operation *, Operation * > > rewriteInIm2Col(RewriterBase &rewriter, linalg::Conv2DNhwcHwcfOp convOp)
Convert linalg.conv_2d_nhwc_hwcf into linalg.generic (for img2col packing) and linalg....
bool areDimSequencesPreserved(ArrayRef< AffineMap > maps, ArrayRef< ReassociationIndices > dimSequences)
Return true if all sequences of dimensions specified in dimSequences are contiguous in all the ranges...
FailureOr< ForallTilingResult > tileToForallOpUsingTileSizes(RewriterBase &builder, TilingInterface op, ArrayRef< OpFoldResult > tileSizes, std::optional< ArrayAttr > mapping)
Same as tileToForallOp, but calculate the number of threads required using the given tileSizes.
Definition: Tiling.cpp:598
FailureOr< LowerUnPackOpResult > lowerUnPack(RewriterBase &rewriter, tensor::UnPackOp unPackOp)
Rewrite pack as empty + transpose + reshape + extract_slice.
Definition: Transforms.cpp:357
void populateBubbleUpExtractSliceOpPatterns(RewritePatternSet &patterns)
Patterns that are used to bubble up extract slice op above linalg op.
void transformIndexOps(RewriterBase &b, LinalgOp op, SmallVectorImpl< Value > &ivs, const LoopIndexToRangeIndexMap &loopIndexToRangeIndex)
All indices returned by IndexOp should be invariant with respect to tiling.
Definition: Tiling.cpp:78
std::function< std::optional< Value >(OpBuilder &b, memref::SubViewOp subView, ArrayRef< Value > boundingSubViewSize, DataLayout &layout)> AllocBufferCallbackFn
Callback function type used to perform the allocation for the promoted subView.
Definition: Transforms.h:339
void populateBlockPackMatmulPatterns(RewritePatternSet &patterns, const ControlBlockPackMatmulFn &controlFn)
Patterns to block pack Linalg matmul ops.
void populateConvertConv2DToImg2ColPatterns(RewritePatternSet &patterns)
Populates patterns to transform linalg.conv_2d_xxx operations into linalg.generic (for img2col packin...
DenseMap< int, int > LoopIndexToRangeIndexMap
Creates a number of ranges equal to the number of non-zero in tileSizes.
Definition: Transforms.h:789
std::optional< Value > allocateWorkgroupMemory(OpBuilder &builder, memref::SubViewOp subview, ArrayRef< Value > sizeBounds, DataLayout &)
Allocate the subview in the GPU workgroup memory.
Definition: Promotion.cpp:470
Value bufferizeToAllocation(RewriterBase &rewriter, const BufferizeToAllocationOptions &options, tensor::PadOp padOp, Attribute memorySpace={}, Operation *insertionPoint=nullptr)
Materialize a buffer allocation for the given tensor.pad op and lower the op to linalg....
std::function< bool(OpOperand *fusedOperand)> ControlFusionFn
Function type which is used to control when to stop fusion.
Definition: Transforms.h:1650
bool isDimSequencePreserved(AffineMap map, ReassociationIndicesRef dimSequence)
Return true if a given sequence of dimensions are contiguous in the range of the specified indexing m...
FailureOr< LinalgOp > specializeGenericOp(RewriterBase &rewriter, GenericOp genericOp)
Create a namedOp from the given GenericOp and replace the GenericOp.
Definition: Specialize.cpp:262
void populateFoldReshapeOpsByCollapsingPatterns(RewritePatternSet &patterns, const ControlFusionFn &controlFoldingReshapes)
Patterns to fold an expanding tensor.expand_shape operation with its producer generic operation by co...
LinalgTilingLoopType
The type of loops to be generated during tiling.
Definition: Utils.h:103
std::function< LogicalResult(OpBuilder &b, Value buffer)> DeallocBufferCallbackFn
Callback function type used to deallocate the buffers used to hold the promoted subview.
Definition: Transforms.h:344
void populateDataLayoutPropagationPatterns(RewritePatternSet &patterns, const ControlPropagationFn &controlPackUnPackPropagation)
Patterns to bubble up or down data layout ops across other operations.
void populatePadOpVectorizationPatterns(RewritePatternSet &patterns, PatternBenefit baseBenefit=1)
Populates patterns with patterns that vectorize tensor.pad.
void populateLinalgTilingCanonicalizationPatterns(RewritePatternSet &patterns)
Definition: Tiling.cpp:1046
LogicalResult deallocateGPUPrivateMemory(OpBuilder &, Value)
In case of GPU private memory there is no need to deallocate since the memory is freed when going out...
Definition: Promotion.cpp:511
void populateSparseTensorRewriting(RewritePatternSet &patterns)
Populate patterns that are only useful in the context of sparse tensors.
FailureOr< ElementwiseOpFusionResult > fuseElementwiseOps(RewriterBase &rewriter, OpOperand *fusedOperand)
FailureOr< PromotionInfo > promoteSubviewAsNewBuffer(OpBuilder &b, Location loc, memref::SubViewOp subView, const AllocBufferCallbackFn &allocationFn, DataLayout &layout)
Definition: Promotion.cpp:238
std::optional< Value > allocateGPUPrivateMemory(OpBuilder &builder, memref::SubViewOp subview, ArrayRef< Value > sizeBounds, DataLayout &)
Allocate the subview in the GPU private memory.
Definition: Promotion.cpp:495
FailureOr< Operation * > rewriteInDestinationPassingStyle(RewriterBase &rewriter, tensor::FromElementsOp fromElementsOp)
Rewrite tensor.from_elements to linalg.generic.
FailureOr< PackResult > blockPackMatmul(RewriterBase &rewriter, linalg::LinalgOp linalgOp, const ControlBlockPackMatmulFn &controlPackMatmul)
Pack a matmul operation into blocked 4D layout.
void peelLoops(RewriterBase &rewriter, ArrayRef< scf::ForOp > loops)
Peel 'loops' and applies affine_min/max bounds simplification on the fly where relevant.
Definition: Transforms.cpp:75
FailureOr< Operation * > winogradConv2D(RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp op, int64_t m, int64_t r)
Convert linalg.conv_2d_nhwc_fhwc to Winograd Conv2D algorithm F(m x m, r x r).
FailureOr< ForallTilingResult > tileToForallOp(RewriterBase &builder, TilingInterface op, ArrayRef< OpFoldResult > numThreads, std::optional< ArrayAttr > mapping)
Definition: Tiling.cpp:589
void populateConvertToDestinationStylePatterns(RewritePatternSet &patterns)
Populate patterns that convert non-destination-style ops to destination style ops.
FailureOr< Operation * > transposeConv2D(RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp op)
Convert linalg.conv_2d_nhwc_fhwc(_q) to linalg.conv_2d_nhwc_hwcf(_q) by materializing transpose.
void populateFoldUnitExtentDimsPatterns(RewritePatternSet &patterns, ControlDropUnitDims &options)
Patterns to fold unit-extent dimensions in operands/results of linalg ops on tensors via reassociativ...
LogicalResult copyToWorkgroupMemory(OpBuilder &b, Value src, Value dst)
Create Memref copy operations and add gpu barrier guards before and after the copy operation to ensur...
Definition: Promotion.cpp:486
std::function< SmallVector< Value, 4 >(OpBuilder &, Operation *)> TileSizeComputationFunction
Definition: Transforms.h:186
std::function< LogicalResult(RewriterBase &, tensor::PadOp, Value)> OptimizeCopyFn
Definition: Transforms.h:1448
FailureOr< Value > hoistPaddingOnTensors(RewriterBase &rewriter, tensor::PadOp opToHoist, int64_t numLoops, ArrayRef< int64_t > transposeVector, tensor::PadOp &hoistedOp, SmallVectorImpl< GenericOp > &transposeOps)
Mechanically hoist padding operations on tensors by numLoops into a new, generally larger tensor.
void populateElementwiseToLinalgConversionPatterns(RewritePatternSet &patterns)
Populate patterns that convert ElementwiseMappable ops to linalg parallel loops.
LogicalResult linalgOpAnchoredEmptyTensorEliminationStep(RewriterBase &rewriter, Operation *op, bufferization::OneShotAnalysisState &state)
Try to eliminate tensor::EmptyOps inside op that are anchored on a LinalgOp.
FailureOr< LinalgLoops > linalgOpToLoops(RewriterBase &rewriter, LinalgOp linalgOp)
Emit a loop nest of scf.for with the proper body for linalgOp.
Definition: Loops.cpp:368
LogicalResult vectorize(RewriterBase &rewriter, Operation *op, ArrayRef< int64_t > inputVectorSizes={}, ArrayRef< bool > inputScalableVecDims={}, bool vectorizeNDExtract=false, bool flatten1DDepthwiseConv=false)
Emit a suitable vector form for an operation.
std::tuple< SmallVector< Range, 4 >, LoopIndexToRangeIndexMap > makeTiledLoopRanges(RewriterBase &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > allShapeSizes, ArrayRef< OpFoldResult > allTileSizes)
Definition: Tiling.cpp:49
FailureOr< Operation * > transposeBatchMatmul(RewriterBase &rewriter, linalg::BatchMatmulOp op, bool transposeLHS=true)
Pattern to replace.
LogicalResult promoteSubviewsPrecondition(Operation *op, LinalgPromotionOptions options)
Promote memref.subviews feeding linalg-on-buffers operations.
Definition: Promotion.cpp:399
LogicalResult copyToGPUPrivateMemory(OpBuilder &b, Value src, Value dst)
Normal copy to between src and dst.
Definition: Promotion.cpp:503
void populateDecomposeConvolutionPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Linalg decompose convolutions patterns.
void populateDecomposeWinogradOpsPatterns(RewritePatternSet &patterns)
Patterns to decompose Winograd operators.
void populateConvolutionVectorizationPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Populate patterns for vectorizing low-D convolution ops.
LogicalResult vectorizeCopy(RewriterBase &builder, memref::CopyOp copyOp)
Emit a suitable vector form for a Copy op with fully static shape.
LogicalResult vectorizeOpPrecondition(Operation *op, ArrayRef< int64_t > inputVectorSizes={}, ArrayRef< bool > inputScalableVecDims={}, bool vectorizeNDExtract=false, bool flatten1DDepthwiseConv=false)
Return success if the operation can be vectorized.
FailureOr< GenericOp > interchangeGenericOp(RewriterBase &rewriter, GenericOp genericOp, ArrayRef< unsigned > interchangeVector)
Interchange the iterator_types and iterator_maps dimensions and adapts the index accesses of op.
Definition: Interchange.cpp:50
void populateCollapseDimensions(RewritePatternSet &patterns, const GetCollapsableDimensionsFn &controlCollapseDimensions)
Pattern to collapse dimensions in a linalg.generic op.
bool areElementwiseOpsFusable(OpOperand *fusedOperand)
Return true if two linalg.generic operations with producer/consumer relationship through fusedOperand...
FailureOr< StaticMultiSizeSpecification > computeStaticMultiTileSizes(LinalgOp op, unsigned dimension, int64_t targetSize, int64_t divisor)
Definition: Tiling.cpp:242
FailureOr< LinalgLoops > linalgOpToAffineLoops(RewriterBase &rewriter, LinalgOp linalgOp)
Emit a loop nest of affine.for with the proper body for linalgOp.
Definition: Loops.cpp:363
void populateEraseUnusedOperandsAndResultsPatterns(RewritePatternSet &patterns)
Pattern to remove dead operands and results of linalg.generic operations.
FailureOr< ContinuousTileSizeSpecification > computeContinuousTileSizes(OpBuilder &builder, TilingInterface op, unsigned dimension, OpFoldResult targetSize, bool emitAssertions)
Definition: Tiling.cpp:162
FailureOr< StaticContinuousTileSizeSpecification > computeStaticContinuousTileSizes(LinalgOp op, unsigned dimension, unsigned targetSize)
Definition: Tiling.cpp:111
std::function< LogicalResult(OpBuilder &b, Value src, Value dst)> CopyCallbackFn
Callback function type used to insert copy from original subview to subview of the promoted region fo...
Definition: Transforms.h:351
FailureOr< SplitReductionResult > splitReduction(RewriterBase &b, LinalgOp op, const ControlSplitReductionFn &controlSplitReductionFn, bool useAlloc=false)
FailureOr< LinalgOp > padAndHoistLinalgOp(RewriterBase &rewriter, LinalgOp linalgOp, const LinalgPaddingOptions &options)
Apply padding and hoisting to linalgOp according to the configuration specified in options.
Definition: Padding.cpp:265
void populateDecomposeLinalgOpsPattern(RewritePatternSet &patterns, bool removeDeadArgsAndResults=true)
Populate patterns for splitting a LinalgOp with multiple statements within its payload into multiple ...
std::function< bool(OpOperand *opOperand)> ControlPropagationFn
Function type which is used to control propagation of tensor.pack/unpack ops.
Definition: Transforms.h:1662
FailureOr< ForallReductionTilingResult > tileReductionUsingForall(RewriterBase &b, PartialReductionOpInterface op, ArrayRef< OpFoldResult > numThreads, ArrayRef< OpFoldResult > tileSizes={}, std::optional< ArrayAttr > mapping=std::nullopt)
Method to tile a reduction to parallel iterations computing partial reductions.
Definition: Tiling.cpp:778
FailureOr< PackResult > packMatmulGreedily(RewriterBase &rewriter, LinalgOp linalgOp, ArrayRef< OpFoldResult > mnkPackedSizes, ArrayRef< int64_t > mnkPaddedSizesNextMultipleOf, ArrayRef< int64_t > mnkOrder)
Pack a LinalgOp by greedily inferring matmul dimensions (m, n, k) where m and n are proper parallel d...
Definition: Transforms.cpp:769
LogicalResult dropUnitDims(RewriterBase &rewriter, GenericOp genericOp, const ControlDropUnitDims &options)
FailureOr< PackResult > pack(RewriterBase &rewriter, linalg::LinalgOp linalgOp, ArrayRef< OpFoldResult > packedSizes)
Implement packing of a single LinalgOp by packedSizes.
Definition: Transforms.cpp:480
void populateEraseUnnecessaryInputsPatterns(RewritePatternSet &patterns)
Patterns to promote inputs to outputs and remove unused inputs of linalg.generic ops.
FailureOr< TiledLinalgOp > tileLinalgOp(RewriterBase &b, LinalgOp op, const LinalgTilingOptions &options)
Definition: Tiling.cpp:1006
std::function< SmallVector< ReassociationIndices >(linalg::LinalgOp)> GetCollapsableDimensionsFn
Function type to control generic op dimension collapsing.
Definition: Transforms.h:1681
FailureOr< LowerPackResult > lowerPack(RewriterBase &rewriter, tensor::PackOp packOp)
Rewrite pack as pad + reshape + transpose.
Definition: Transforms.cpp:219
void populateFoldReshapeOpsByExpansionPatterns(RewritePatternSet &patterns, const ControlFusionFn &controlFoldingReshapes)
Patterns to fold an expanding (collapsing) tensor_reshape operation with its producer (consumer) gene...
void populateSwapExtractSliceWithFillPatterns(RewritePatternSet &patterns)
Adds patterns that waps tensor.extract_slice(linalg.fill(cst, init)) into linalg.fill(cst,...
void populateInlineConstantOperandsPatterns(RewritePatternSet &patterns)
Patterns that are used to inline constant operands into linalg generic ops.
FailureOr< LinalgOp > promoteSubViews(OpBuilder &b, LinalgOp op, const LinalgPromotionOptions &options)
Promote the subViews into a new buffer allocated at the insertion point b.
Definition: Promotion.cpp:421
void populateConstantFoldLinalgOperations(RewritePatternSet &patterns, const ControlFusionFn &controlFn)
Patterns to constant fold Linalg operations.
std::function< SplitReductionOptions(LinalgOp op)> ControlSplitReductionFn
Function signature to control reduction splitting.
Definition: Transforms.h:442
LogicalResult deallocateWorkgroupMemory(OpBuilder &, Value)
In case of GPU group memory there is no need to deallocate.
Definition: Promotion.cpp:479
FailureOr< Operation * > transposeMatmul(RewriterBase &rewriter, linalg::MatmulOp op, bool transposeLHS=true)
Convert Linalg matmul ops to transposed variants.
void populateLinalgNamedOpsGeneralizationPatterns(RewritePatternSet &patterns)
Linalg generalization patterns.
void populateLinalgGenericOpsSpecializationPatterns(RewritePatternSet &patterns)
Populates patterns with patterns to convert linalg.generic ops to named ops where possible.
Definition: Specialize.cpp:330
void populateWinogradConv2DPatterns(RewritePatternSet &patterns, int64_t m, int64_t r)
Patterns to apply Winograd Conv2D algorithm F(m x m, r x r).
std::function< std::optional< BlockPackMatmulOptions >(linalg::LinalgOp)> ControlBlockPackMatmulFn
Function type which is used to control matmul packing.
Definition: Transforms.h:1218
std::optional< vector::CombiningKind > getCombinerOpKind(Operation *combinerOp)
Return vector::CombiningKind for the given op.
SmallVector< Value > peelLoop(RewriterBase &rewriter, Operation *op)
Try to peel and canonicalize loop op and return the new result.
Definition: Transforms.cpp:59
RewritePatternSet getLinalgTilingCanonicalizationPatterns(MLIRContext *ctx)
Canonicalization patterns relevant to apply after tiling patterns.
Definition: Tiling.cpp:1040
FailureOr< CollapseResult > collapseOpIterationDims(LinalgOp op, ArrayRef< ReassociationIndices > foldedIterationDims, RewriterBase &rewriter)
Collapses dimensions of linalg.generic/linalg.copy operation.
FailureOr< PackTransposeResult > packTranspose(RewriterBase &rewriter, tensor::PackOp packOp, linalg::LinalgOp linalgOp, tensor::UnPackOp maybeUnPackOp, ArrayRef< int64_t > outerPerm, ArrayRef< int64_t > innerPerm)
Transpose a single PackOp -> LinalgOp -> UnPackOp chain and return the transposed PackOp -> LinalgOp ...
Definition: Transforms.cpp:678
std::pair< TilingInterface, TilingInterface > splitOp(RewriterBase &rewriter, TilingInterface op, unsigned dimension, OpFoldResult splitPoint)
Split the given op into two parts along the given iteration space dimension at the specified splitPoi...
Definition: Split.cpp:67
void populateElementwiseOpsFusionPatterns(RewritePatternSet &patterns, const ControlFusionFn &controlElementwiseOpFusion)
Patterns for fusing linalg operation on tensors.
FailureOr< SplitReductionResult > splitReductionByScaling(RewriterBase &b, LinalgOp op, const ControlSplitReductionFn &controlSplitReductionFn, bool useAlloc=false)
Scaling-based implementation of the split reduction transformation.
FailureOr< MultiSizeSpecification > computeMultiTileSizes(OpBuilder &builder, LinalgOp op, unsigned dimension, OpFoldResult targetSize, OpFoldResult divisor, bool emitAssertions=true)
Emits the IR computing the multi-sized tiling specification with two tile sizes not exceeding targetS...
Definition: Tiling.cpp:268
FailureOr< LinalgLoops > linalgOpToParallelLoops(RewriterBase &rewriter, LinalgOp linalgOp)
Emit a loop nest of scf.parallel with the proper body for linalgOp.
Definition: Loops.cpp:375
Include the generated interface declarations.
ArrayRef< int64_t > ReassociationIndicesRef
OpInterfaceRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting a...
Definition: PatternMatch.h:373
OpRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting against an...
Definition: PatternMatch.h:358
SmallVector< int64_t, 3 > mnkOrder
Permutation of matmul (M, N, K) dimensions order.
Definition: Transforms.h:1198
SmallVector< int64_t, 3 > blockFactors
Minor block factors (mb, nb, kb) for packing relayout where mb, mn are the parallel dimensions and kb...
Definition: Transforms.h:1188
bool rhsTransposeOuterBlocks
Transpose RHS outer block layout [KB][NB] -> [NB][KB].
Definition: Transforms.h:1207
bool lhsTransposeInnerBlocks
Transpose LHS inner block layout [mb][kb] -> [kb][mb].
Definition: Transforms.h:1204
SmallVector< int64_t, 3 > mnkPaddedSizesNextMultipleOf
Next multiples of the packing sizes.
Definition: Transforms.h:1195
bool lhsTransposeOuterBlocks
Transpose LHS outer block layout [MB][KB] -> [KB][MB].
Definition: Transforms.h:1201
bool allowPadding
If true, allows packing of dimensions that only partially fit into the block factors.
Definition: Transforms.h:1192
bool rhsTransposeInnerBlocks
Transpose RHS inner block layout [kb][nb] -> [nb][kb].
Definition: Transforms.h:1210
bool bufferizeDestinationOnly
If set to "true", only the destination tensor operands are bufferized to a new allocation (and wrappe...
Definition: Transforms.h:64
bool emitDealloc
If set to "true", a memref.dealloc operation will be emitted for each allocated buffer.
Definition: Transforms.h:70
SmallVector< Value > results
Definition: Transforms.h:1103
Transformation to drop unit-extent dimensions from linalg.generic operations.
Definition: Transforms.h:473
RankReductionStrategy rankReductionStrategy
Definition: Transforms.h:476
std::function< SmallVector< unsigned >(Operation *)> ControlFnTy
Definition: Transforms.h:479
Vectorization pattern for memref::CopyOp.
Definition: Transforms.h:1440
LogicalResult matchAndRewrite(memref::CopyOp copyOp, PatternRewriter &rewriter) const override
Definition: Transforms.cpp:920
LogicalResult matchAndRewrite(Conv2DOp convOp, PatternRewriter &rewriter) const override
Definition: Transforms.h:1394
FailureOr< Conv1DOp > returningMatchAndRewrite(Conv2DOp convOp, PatternRewriter &rewriter) const
DownscaleConv2DOp(MLIRContext *context, PatternBenefit benefit=1)
Definition: Transforms.h:1388
Rewrites 2-D depthwise convolution ops with size-1 (w, kw) or (h, kh) dimensions into 1-D depthwise c...
Definition: Transforms.h:1372
FailureOr< DepthwiseConv1DNwcWcOp > returningMatchAndRewrite(DepthwiseConv2DNhwcHwcOp convOp, PatternRewriter &rewriter) const
LogicalResult matchAndRewrite(DepthwiseConv2DNhwcHwcOp convOp, PatternRewriter &rewriter) const override
Definition: Transforms.h:1381
DownscaleDepthwiseConv2DNhwcHwcOp(MLIRContext *context, PatternBenefit benefit=1)
Definition: Transforms.h:1373
Rewrites 2-D convolution ops with size-1 window dimensions into 1-D convolution ops.
Definition: Transforms.h:1352
LogicalResult matchAndRewrite(Conv2DOp convOp, PatternRewriter &rewriter) const override
Definition: Transforms.h:1358
FailureOr< Conv1DOp > returningMatchAndRewrite(Conv2DOp convOp, PatternRewriter &rewriter) const
Fuse two linalg.generic operations that have a producer-consumer relationship captured through fusedO...
Definition: Transforms.h:497
llvm::DenseMap< Value, Value > replacements
Definition: Transforms.h:499
static llvm::SmallDenseSet< int > getPreservedProducerResults(GenericOp producer, GenericOp consumer)
Returns a set of indices of the producer's results which would be preserved after the fusion.
Rewrite extract_slice(tensor.pad(x)) into tensor.pad(extract_slice(x)).
Definition: Transforms.h:1550
std::function< std::optional< bool >(tensor::ExtractSliceOp)> ControlFn
A function to control pattern application and rewrite logic.
Definition: Transforms.h:1560
LogicalResult matchAndRewrite(tensor::ExtractSliceOp sliceOp, PatternRewriter &rewriter) const override
Definition: Transforms.cpp:999
ExtractSliceOfPadTensorSwapPattern(MLIRContext *context, ControlFn controlFn=nullptr, PatternBenefit benefit=1)
Definition: Transforms.h:1562
Transformation information returned after reduction tiling.
Definition: Transforms.h:894
SmallVector< Operation * > mergeOps
The final reduction operation merging all the partial reductions.
Definition: Transforms.h:898
SmallVector< Value > initialValues
Initial values used for partial reductions.
Definition: Transforms.h:900
scf::ForallOp loops
The scf.forall operation that iterate over the tiles.
Definition: Transforms.h:902
SmallVector< Operation * > parallelTiledOps
The partial reduction tiled op generated.
Definition: Transforms.h:896
Rewrite a TilingInterface op to a tiled scf.forall, applying tiling by numThreads.
Definition: Transforms.h:877
Rewrites a tensor::PackOp into a sequence of tensor.pad + linalg.transpose + tensor....
Definition: Transforms.h:1473
LogicalResult matchAndRewrite(tensor::PackOp packOp, PatternRewriter &rewriter) const override
Rewrites a tensor::UnPackOp into a sequence of rank-reduced extract_slice op.
Definition: Transforms.h:1483
LogicalResult matchAndRewrite(tensor::UnPackOp unpackOp, PatternRewriter &rewriter) const override
Rewrite a tensor::PadOp into a sequence of EmptyOp, FillOp and InsertSliceOp.
Definition: Transforms.h:1453
LogicalResult matchAndRewrite(tensor::PadOp padOp, PatternRewriter &rewriter) const override
Definition: Transforms.cpp:944
Value createFillOrGenerateOp(RewriterBase &rewriter, tensor::PadOp padOp, Value dest, const SmallVector< Value > &dynSizes) const
Filling dest using FillOp constant padding value if possible.
Definition: Transforms.cpp:927
GeneralizePadOpPattern(MLIRContext *context, OptimizeCopyFn optimizeCopyFn=nullptr, PatternBenefit benefit=1)
Definition: Transforms.h:1454
Match and rewrite for the pattern:
Definition: Transforms.h:1513
LogicalResult matchAndRewrite(vector::TransferReadOp xferOp, PatternRewriter &rewriter) const override
TODO: use interfaces, side-effects and aliasing analysis as appropriate, when available.
Match and rewrite for the pattern:
Definition: Transforms.h:1541
LogicalResult matchAndRewrite(vector::TransferWriteOp xferOp, PatternRewriter &rewriter) const override
TODO: use interfaces, side-effects and aliasing analysis as appropriate, when available.
Linalg generalization pattern.
Definition: Transforms.h:1409
LogicalResult matchAndRewrite(LinalgOp op, PatternRewriter &rewriter) const override
Definition: Transforms.h:1419
FailureOr< GenericOp > returningMatchAndRewrite(LinalgOp op, PatternRewriter &rewriter) const
matchAndRewrite implementation that returns the significant transformed pieces of IR.
Definition: Transforms.h:1415
Options that allow distribution of loops generated in Linalg transforms to processors while generatin...
Definition: Utils.h:303
SmallVector< Attribute > paddingValues
A padding value for every operand.
Definition: Transforms.h:280
LinalgPaddingOptions & setPadToMultipleOf(ArrayRef< int64_t > m)
Definition: Transforms.h:293
SmallVector< bool > packPaddings
A flag for every operand to mark the PadOp as nofold which enables packing for statically shaped oper...
Definition: Transforms.h:299
std::optional< SmallVector< int64_t > > padToMultipleOf
A list of multiples to which each padding dimension should be padded to.
Definition: Transforms.h:292
LinalgPaddingOptions & setPaddingDimensions(ArrayRef< int64_t > pd)
Definition: Transforms.h:287
LinalgPaddingOptions & setTransposePaddings(ArrayRef< SmallVector< int64_t >> tp)
Definition: Transforms.h:314
SmallVector< SmallVector< int64_t > > transposePaddings
A permutation vector for every operand used to transpose the packed PadOp results.
Definition: Transforms.h:312
LinalgPaddingOptions & setPaddingValues(ArrayRef< Attribute > pv)
Definition: Transforms.h:281
LinalgPaddingOptions & setPackPaddings(ArrayRef< bool > pp)
Definition: Transforms.h:300
LinalgPaddingOptions & setCopyBackOp(CopyBackOp op)
Definition: Transforms.h:326
LinalgPaddingOptions & setHoistPaddings(ArrayRef< int64_t > hp)
Definition: Transforms.h:306
SmallVector< int64_t > hoistPaddings
A number of loops to hoist the PadOp out for every operand.
Definition: Transforms.h:305
SmallVector< int64_t > paddingDimensions
A list of iterator dimensions to pad.
Definition: Transforms.h:286
CopyBackOp copyBackOp
The op to be used for copying the padded result to the original destination tensor.
Definition: Transforms.h:325
std::optional< unsigned > alignment
Alignment of promoted buffer. If std::nullopt do not specify alignment.
Definition: Transforms.h:384
LinalgPromotionOptions & setUseFullTileBuffersByDefault(bool use)
Definition: Transforms.h:379
bool useAlloca
Use alloca with the default allocation scheme.
Definition: Transforms.h:397
LinalgPromotionOptions & setAlignment(unsigned align)
Definition: Transforms.h:385
std::optional< Attribute > memorySpace
Memory space of promoted buffer.
Definition: Transforms.h:391
std::optional< CopyCallbackFn > copyOutFn
Definition: Transforms.h:417
std::optional< CopyCallbackFn > copyInFn
Callback function to do the copy of data to and from the promoted subview.
Definition: Transforms.h:416
LinalgPromotionOptions & setUseAlloca(bool use)
Definition: Transforms.h:398
std::optional< DenseSet< unsigned > > operandsToPromote
Indices of subViews to promote.
Definition: Transforms.h:356
LinalgPromotionOptions & setCopyInOutFns(CopyCallbackFn const &copyIn, CopyCallbackFn const &copyOut)
Definition: Transforms.h:418
LinalgPromotionOptions & setUseFullTileBuffers(ArrayRef< bool > useFullTiles)
Definition: Transforms.h:368
std::optional< AllocBufferCallbackFn > allocationFn
Callback function to do the allocation of the promoted buffer.
Definition: Transforms.h:405
bool useFullTileBuffersDefault
If true all operands unspecified by useFullTileBuffers will use the full view, otherwise the partial ...
Definition: Transforms.h:378
std::optional< DeallocBufferCallbackFn > deallocationFn
Definition: Transforms.h:406
LinalgPromotionOptions & setMemorySpace(Attribute memorySpc)
Definition: Transforms.h:392
LinalgPromotionOptions & setAllocationDeallocationFns(AllocBufferCallbackFn const &allocFn, DeallocBufferCallbackFn const &deallocFn)
Definition: Transforms.h:408
std::optional< llvm::SmallBitVector > useFullTileBuffers
If ith element of useFullTiles is true the full view should be used for the promoted buffer of the it...
Definition: Transforms.h:367
LinalgPromotionOptions & setOperandsToPromote(ArrayRef< int64_t > operands)
Definition: Transforms.h:357
LogicalResult matchAndRewrite(GenericOp op, PatternRewriter &rewriter) const override
Definition: Transforms.h:1433
FailureOr< GenericOp > returningMatchAndRewrite(GenericOp op, PatternRewriter &rewriter) const
Definition: Transforms.h:1429
std::optional< LinalgLoopDistributionOptions > tileDistribution
When specified, specifies distribution of generated tile loops to processors.
Definition: Transforms.h:270
LinalgTilingAndFusionOptions & setTileSizes(ArrayRef< int64_t > ts)
Definition: Transforms.h:262
SmallVector< int64_t > tileInterchange
Tile interchange used to permute the tile loops.
Definition: Transforms.h:267
LinalgTilingAndFusionOptions & setDistributionOptions(LinalgLoopDistributionOptions distributionOptions)
Definition: Transforms.h:272
SmallVector< int64_t > tileSizes
Tile sizes used to tile the root operation.
Definition: Transforms.h:261
LinalgTilingOptions & setLoopType(LinalgTilingLoopType lt)
Definition: Transforms.h:226
LinalgTilingOptions & setDistributionTypes(ArrayRef< StringRef > types)
Definition: Transforms.h:244
LinalgTilingOptions & setInterchange(ArrayRef< unsigned > interchange)
Definition: Transforms.h:218
LinalgTilingLoopType loopType
The type of tile loops to generate.
Definition: Transforms.h:224
LinalgTilingOptions & setTileSizeComputationFunction(TileSizeComputationFunction fun)
Definition: Transforms.h:195
LinalgTilingOptions & setTileSizes(const SmallVector< Value, 4 > &ts)
Set the tileSizeComputationFunction to return the values ts.
Definition: Transforms.h:202
LinalgTilingOptions & setPeeledLoops(ArrayRef< int64_t > loops)
Definition: Transforms.h:252
SmallVector< int64_t > peeledLoops
Peel the specified loops.
Definition: Transforms.h:250
LinalgTilingOptions & setDistributionOptions(LinalgLoopDistributionOptions distributionOptions)
Definition: Transforms.h:236
SmallVector< unsigned, 4 > interchangeVector
The interchange vector to reorder the tiled loops.
Definition: Transforms.h:216
TileSizeComputationFunction tileSizeComputationFunction
Computation function that returns the tile sizes for each operation.
Definition: Transforms.h:192
LinalgTilingOptions & scalarizeDynamicDims()
Tile all dynamic dimensions by 1.
std::optional< LinalgLoopDistributionOptions > distribution
When specified, specifies distribution of generated tile loops to processors.
Definition: Transforms.h:233
SmallVector< StringRef, 2 > distributionTypes
Specification markers of how to distribute the linalg.tiled_loop.
Definition: Transforms.h:242
linalg::TransposeOp transposeOp
Definition: Transforms.h:1122
tensor::ExpandShapeOp expandShapeOp
Definition: Transforms.h:1121
tensor::ExtractSliceOp extractSliceOp
Definition: Transforms.h:1133
linalg::TransposeOp transposeOp
Definition: Transforms.h:1131
tensor::CollapseShapeOp collapseShapeOp
Definition: Transforms.h:1132
A description of a multi-size tiling comprising tile sizes and numbers of tiles, expressed as Values ...
Definition: Transforms.h:818
Struct to hold the result of a pack call.
Definition: Transforms.h:1141
linalg::LinalgOp packedLinalgOp
Definition: Transforms.h:1143
SmallVector< tensor::PackOp > packOps
Definition: Transforms.h:1142
SmallVector< tensor::UnPackOp > unPackOps
Definition: Transforms.h:1144
Struct to hold the result of a packTranspose call.
Definition: Transforms.h:1153
linalg::LinalgOp transposedLinalgOp
Definition: Transforms.h:1155
tensor::UnPackOp transposedUnPackOp
Definition: Transforms.h:1156
Create a new buffer using the allocationFn provided.
Definition: Transforms.h:706
Split Reduction options.
Definition: Transforms.h:427
Apply transformation to split the single linalg op reduction into a parallel and reduction dimension.
Definition: Transforms.h:1031
Perform standalone tiling of a single LinalgOp by tileSizes.
Definition: Transforms.h:667
SmallVector< Operation *, 8 > loops
Definition: Transforms.h:669
SmallVector< Value, 4 > tensorResults
Definition: Transforms.h:670
SmallVector< T > tripCounts
Number of tiles associated with each size.
Definition: Transforms.h:809
T lowTripCount
Number of tiles associated with each size.
Definition: Transforms.h:801
Helper struct to hold the results of building a packing loop nest.
Definition: Transforms.h:537
SmallVector< OpFoldResult > strides
Definition: Transforms.h:538
SmallVector< Value > leadingPackedTensorIndexings
Definition: Transforms.h:539
SmallVector< Value > clonedLoopIvs
Definition: Transforms.h:539
SmallVector< OpFoldResult > sizes
Definition: Transforms.h:538
SmallVector< OpFoldResult > offsets
Definition: Transforms.h:538