MLIR  21.0.0git
ReshapePatterns.cpp
Go to the documentation of this file.
1 //===- RankReductionPatterns.cpp - Patterns related to rank reductions ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
13 #include "mlir/IR/PatternMatch.h"
15 #include "llvm/Support/Debug.h"
16 #include "llvm/Support/LogicalResult.h"
17 
18 using namespace mlir;
19 using namespace mlir::tensor;
20 
21 namespace {
22 /// Fold expand_shape(extract_slice) ops that cancel itself out.
23 struct FoldExpandOfRankReducingExtract
24  : public OpRewritePattern<ExpandShapeOp> {
26 
27  LogicalResult matchAndRewrite(ExpandShapeOp expandShapeOp,
28  PatternRewriter &rewriter) const override {
29  RankedTensorType resultType = expandShapeOp.getResultType();
30  auto extractSliceOp =
31  expandShapeOp.getSrc().getDefiningOp<ExtractSliceOp>();
32  if (!extractSliceOp)
33  return failure();
34  RankedTensorType srcType = extractSliceOp.getSourceType();
35 
36  // Only cases where the ExpandShapeOp can be folded away entirely are
37  // supported. Moreover, only simple cases where the resulting ExtractSliceOp
38  // has no rank-reduction anymore are supported at the moment.
39  RankedTensorType nonReducingExtractType = ExtractSliceOp::inferResultType(
40  srcType, extractSliceOp.getStaticOffsets(),
41  extractSliceOp.getStaticSizes(), extractSliceOp.getStaticStrides());
42  if (nonReducingExtractType != resultType)
43  return failure();
44 
45  SmallVector<OpFoldResult> mixedOffsets = extractSliceOp.getMixedOffsets();
46  SmallVector<OpFoldResult> mixedSizes = extractSliceOp.getMixedSizes();
47  SmallVector<OpFoldResult> mixedStrides = extractSliceOp.getMixedStrides();
48  rewriter.replaceOpWithNewOp<tensor::ExtractSliceOp>(
49  expandShapeOp, extractSliceOp.getSource(), mixedOffsets, mixedSizes,
50  mixedStrides);
51  return success();
52  }
53 };
54 
55 /// Fold collapse_shape which only removes static dimensions of size `1`
56 /// into extract_slice.
57 struct FoldUnPaddingCollapseIntoExtract
58  : public OpRewritePattern<tensor::CollapseShapeOp> {
60 
61  LogicalResult matchAndRewrite(tensor::CollapseShapeOp collapseShapeOp,
62  PatternRewriter &rewriter) const override {
63  auto extractSliceOp =
64  collapseShapeOp.getSrc().getDefiningOp<tensor::ExtractSliceOp>();
65  // Collapse cannot be folded away with multiple users of the extract slice
66  // and it is not necessarily beneficial to only convert the collapse into
67  // another extract slice.
68  if (!extractSliceOp || !extractSliceOp->hasOneUse())
69  return failure();
70 
71  // Only fold away simple collapse where all removed dimensions have static
72  // size `1`.
74  collapseShapeOp.getSrcType(), collapseShapeOp.getResultType());
76  return rewriter.notifyMatchFailure(collapseShapeOp,
77  "expected unpadding collapse");
78 
79  Value unPaddedExtractSlice = rewriter.create<tensor::ExtractSliceOp>(
80  extractSliceOp.getLoc(), collapseShapeOp.getResultType(),
81  extractSliceOp.getSource(), extractSliceOp.getMixedOffsets(),
82  extractSliceOp.getMixedSizes(), extractSliceOp.getMixedStrides());
83  rewriter.replaceOp(collapseShapeOp, unPaddedExtractSlice);
84  return success();
85  }
86 };
87 
88 /// Fold insert_slice(collapse_shape) ops that cancel itself out.
89 template <typename OpTy>
90 struct FoldInsertOfRankReducingInsert : public OpRewritePattern<OpTy> {
92 
93  LogicalResult matchAndRewrite(OpTy insertSliceOp,
94  PatternRewriter &rewriter) const override {
95  auto collapseShapeOp =
96  insertSliceOp.getSource().template getDefiningOp<CollapseShapeOp>();
97  if (!collapseShapeOp)
98  return failure();
99  RankedTensorType srcType = collapseShapeOp.getSrcType();
100 
101  // Only cases where the CollapseShapeOp can be folded away entirely are
102  // supported. Moreover, only simple cases where the resulting InsertSliceOp
103  // has no rank-reduction anymore are supported at the moment.
104  RankedTensorType nonReducingInsertType =
105  RankedTensorType::get(insertSliceOp.getStaticSizes(),
106  insertSliceOp.getDestType().getElementType());
107  if (nonReducingInsertType != srcType)
108  return failure();
109 
110  SmallVector<OpFoldResult> mixedOffsets = insertSliceOp.getMixedOffsets();
111  SmallVector<OpFoldResult> mixedSizes = insertSliceOp.getMixedSizes();
112  SmallVector<OpFoldResult> mixedStrides = insertSliceOp.getMixedStrides();
113  rewriter.replaceOpWithNewOp<OpTy>(insertSliceOp, collapseShapeOp.getSrc(),
114  insertSliceOp.getDest(), mixedOffsets,
115  mixedSizes, mixedStrides);
116  return success();
117  }
118 };
119 
120 /// Fold expand_shape which only adds static dimensions of size `1`
121 /// into insert_slice.
122 template <typename OpTy>
123 struct FoldPaddingExpandIntoInsert : public OpRewritePattern<OpTy> {
125 
126  LogicalResult matchAndRewrite(OpTy insertSliceOp,
127  PatternRewriter &rewriter) const override {
128  auto expandShapeOp = insertSliceOp.getSource()
129  .template getDefiningOp<tensor::ExpandShapeOp>();
130  if (!expandShapeOp)
131  return failure();
132 
133  // Only fold away simple expansion where all added dimensions have static
134  // size `1`.
136  expandShapeOp.getResultType(), expandShapeOp.getSrcType());
138  return rewriter.notifyMatchFailure(insertSliceOp,
139  "expected rank increasing expansion");
140 
141  rewriter.modifyOpInPlace(insertSliceOp, [&]() {
142  insertSliceOp.getSourceMutable().assign(expandShapeOp.getSrc());
143  });
144  return success();
145  }
146 };
147 
148 /// Pattern to bubble up a tensor.expand_shape op through a producer
149 /// tensor.collapse_shape op that has non intersecting reassociations.
150 struct BubbleUpExpandThroughParallelCollapse
151  : public OpRewritePattern<tensor::ExpandShapeOp> {
153 
154  LogicalResult matchAndRewrite(tensor::ExpandShapeOp expandOp,
155  PatternRewriter &rewriter) const override {
156  auto collapseOp =
157  expandOp.getSrc().getDefiningOp<tensor::CollapseShapeOp>();
158  if (!collapseOp)
159  return failure();
160  auto expandReInds = expandOp.getReassociationIndices();
161  auto collapseReInds = collapseOp.getReassociationIndices();
162 
163  // Special case where the collapsed tensor to expand is a 0-D tensor,
164  // then the reassociation maps will be empty and not produce valid results.
165  if (expandReInds.size() == 0) {
166  return failure();
167  }
168 
169  // Reshapes are parallel to each other if none of the reassociation indices
170  // have greater than 1 index for both reshapes.
171  for (auto [expandReassociation, collapseReassociation] :
172  llvm::zip_equal(expandReInds, collapseReInds)) {
173  if (collapseReassociation.size() != 1 && expandReassociation.size() != 1)
174  return failure();
175  }
176 
177  // Compute new reassociation indices and expanded/collaped shapes.
178  SmallVector<ReassociationIndices> newExpandReInds, newCollapseReInds;
179  Location loc = expandOp->getLoc();
180  SmallVector<OpFoldResult> collapseSizes =
181  tensor::getMixedSizes(rewriter, loc, collapseOp.getSrc());
183  expandOp.getStaticOutputShape(), expandOp.getOutputShape(), rewriter));
184  SmallVector<OpFoldResult> newExpandSizes;
185  int64_t index = 0, expandIndex = 0, collapseIndex = 0;
186  for (auto [idx, collapseReassociation] : llvm::enumerate(collapseReInds)) {
187  if (collapseReassociation.size() != 1) {
188  ReassociationIndices newCollapseReassociation;
189  for (size_t i = 0; i < collapseReassociation.size(); ++i) {
190  newCollapseReassociation.push_back(index);
191  newExpandReInds.push_back({index++});
192  newExpandSizes.push_back(collapseSizes[collapseIndex++]);
193  }
194  newCollapseReInds.push_back(newCollapseReassociation);
195  expandIndex++;
196  continue;
197  }
198  ReassociationIndices newExpandReassociation;
199  auto expandReassociation = expandReInds[idx];
200  for (size_t i = 0; i < expandReassociation.size(); ++i) {
201  newExpandReassociation.push_back(index);
202  newCollapseReInds.push_back({index++});
203  newExpandSizes.push_back(expandSizes[expandIndex++]);
204  }
205  newExpandReInds.push_back(newExpandReassociation);
206  collapseIndex++;
207  }
208 
209  // Swap reshape order.
210  SmallVector<Value> dynamicSizes;
211  SmallVector<int64_t> staticSizes;
212  dispatchIndexOpFoldResults(newExpandSizes, dynamicSizes, staticSizes);
213  auto expandResultType = expandOp.getResultType().clone(staticSizes);
214  auto newExpand = rewriter.create<tensor::ExpandShapeOp>(
215  loc, expandResultType, collapseOp.getSrc(), newExpandReInds,
216  newExpandSizes);
217  rewriter.replaceOpWithNewOp<tensor::CollapseShapeOp>(
218  expandOp, newExpand.getResult(), newCollapseReInds);
219  return success();
220  }
221 };
222 
223 /// Converts `tensor.extract_slice(tensor.expand_shape)` to
224 /// `tensor.expand_shape(tensor.extract_slice)`.
225 ///
226 /// For this transformation to be possible, the slice must be fully contiguous
227 /// within each reassociation group of the expand_shape. A slice is defined as
228 /// fully contiguous within a reassociation group if after flattening the
229 /// reassociation group to a single 1D range, then the slice taken out of the
230 /// group could be defined as a single contiguous subrange within that range.
231 ///
232 /// Rank reducing slices are not supported.
233 ///
234 /// Example:
235 /// The transformation is possible because each reassociation group has a
236 /// contiguous slice (i.e., [2x4->2x4], [2x8->1x5], [4x2x4->1x1x4]).
237 /// ```
238 /// BEFORE:
239 /// %reshape = tensor.expand_shape %in [[0, 1], [2, 3], [4, 5, 6]]
240 /// tensor<8x16x32xf32> to tensor<2x4x2x8x4x2x4xf32>
241 /// %slice = tensor.extract_slice %reshape ...
242 /// tensor<2x4x2x8x4x2x4xf32> to tensor<2x4x1x5x1x1x4xf32>
243 ///
244 /// AFTER:
245 /// %slice = tensor.extract_slice %in ...
246 /// tensor<8x16x32xf32> to tensor<8x5x4xf32>
247 /// %reshape = tensor.expand_shape %slice [[0, 1], [2, 3], [4, 5, 6]]
248 /// tensor<8x5x4xf32> to tensor<2x4x1x5x1x1x4xf32>
249 /// ```
250 ///
251 /// Note - this pattern could be extended to be a swap pattern between
252 /// `tensor.expand_shape` and `tensor.extract_slice`, but is currently
253 /// implemented only as a bubble up pattern for `tensor.extract_slice`.
254 struct BubbleUpExpandShapeThroughExtractSlice
255  : public OpRewritePattern<tensor::ExtractSliceOp> {
257 
258  LogicalResult matchAndRewrite(tensor::ExtractSliceOp sliceOp,
259  PatternRewriter &rewriter) const override {
260  auto expandShapeOp =
261  sliceOp.getSource().getDefiningOp<tensor::ExpandShapeOp>();
262 
263  if (checkPreconditionForBubbleUpExtractSlice(sliceOp, expandShapeOp,
264  rewriter)
265  .failed())
266  return failure();
267 
268  // The tensor.extract_slice before applying the pattern works on the result
269  // of the tensor.expand_shape, so variables (i.e. inputs for ExtractSliceOp)
270  // referring to the state before applying the pattern are named with the
271  // prefix "expanded", and ones referring to the state after applying the
272  // pattern are named with the prefix "collapsed".
273  SmallVector<OpFoldResult> expandedOffsets = sliceOp.getMixedOffsets();
274  SmallVector<OpFoldResult> expandedSizes = sliceOp.getMixedSizes();
275  SmallVector<OpFoldResult> expandedShape =
276  getMixedValues(expandShapeOp.getStaticOutputShape(),
277  expandShapeOp.getOutputShape(), rewriter);
278 
279  // Helper variables and function for accumulating the size values.
280  Location loc = expandShapeOp->getLoc();
281  AffineExpr d0, d1, d2;
282  bindDims(rewriter.getContext(), d0, d1, d2);
283  // Multiply two integers.
284  auto mul = [&](OpFoldResult v1, OpFoldResult v2) {
285  auto mulMap = AffineMap::get(2, 0, {d0 * d1});
286  return affine::makeComposedFoldedAffineApply(rewriter, loc, mulMap,
287  {v1, v2});
288  };
289 
290  // Compute new offsets, sizes, and strides for tensor.extract_slice.
291  // The new tensor.extract_slice will work on a tensor that has has a rank of
292  // ReassociationIndices.size(). In the loop a single offset, size, and
293  // stride value is computed per reassociation group.
294  SmallVector<OpFoldResult> collapsedOffsets, collapsedSizes,
295  collapsedStrides;
296  for (const ReassociationIndices &indices :
297  expandShapeOp.getReassociationIndices()) {
298  // collapsedSize will hold the size of the single dim that represents the
299  // reassociation group in the non expanded tensor.
300  OpFoldResult collapsedSize = rewriter.getIndexAttr(1);
301  // The reassocGroupSizes and reassocGroupOffsets are used to create an
302  // affine.linearize_index op to linearize the single offset value required
303  // for this reassociation group.
304  SmallVector<OpFoldResult> reassocGroupSizes, reassocGroupOffsets;
305 
306  for (long expandedDim : indices) {
307  // reassocGroupSizes and reassocGroupOffsets can be obtained directly
308  // from the expanded state, but the collapsed size requires calculation
309  // as it did not previously exist.
310  reassocGroupSizes.push_back(expandedShape[expandedDim]);
311  reassocGroupOffsets.push_back(expandedOffsets[expandedDim]);
312  collapsedSize = mul(collapsedSize, expandedSizes[expandedDim]);
313  }
314 
315  SmallVector<Value> offsetVals =
316  llvm::map_to_vector(reassocGroupOffsets, [&](OpFoldResult ofr) {
317  return getValueOrCreateConstantIndexOp(rewriter, loc, ofr);
318  });
319  OpFoldResult collapsedOffset =
320  rewriter
321  .create<affine::AffineLinearizeIndexOp>(loc, offsetVals,
322  reassocGroupSizes,
323  /*disjoint=*/true)
324  .getResult();
325  collapsedOffsets.push_back(collapsedOffset);
326  collapsedSizes.push_back(collapsedSize);
327 
328  // Only unit stride is supported.
329  collapsedStrides.push_back(rewriter.getIndexAttr(1));
330  }
331 
332  // The shape of the result can be obtained from the sizes passed in.
333  SmallVector<Value> dynDims;
334  SmallVector<int64_t> shape;
335  dispatchIndexOpFoldResults(expandedSizes, dynDims, shape);
336  RankedTensorType resultType = RankedTensorType::get(
337  shape, expandShapeOp.getResultType().getElementType());
338 
339  // Create a new ExtractSliceOp and ExpandShapeOp.
340  Value newSliceOp = rewriter.create<tensor::ExtractSliceOp>(
341  loc, expandShapeOp.getSrc(), collapsedOffsets, collapsedSizes,
342  collapsedStrides);
343  rewriter.replaceOpWithNewOp<tensor::ExpandShapeOp>(
344  sliceOp, resultType, newSliceOp,
345  expandShapeOp.getReassociationIndices(), expandedSizes);
346  return success();
347  }
348 
349  // Helper function to check if all the required conditions for the
350  // tensor.extract_slice to be bubbled up through the tensor.expand_shape are
351  // met.
352  LogicalResult
353  checkPreconditionForBubbleUpExtractSlice(tensor::ExtractSliceOp sliceOp,
354  tensor::ExpandShapeOp expandShapeOp,
355  PatternRewriter &rewriter) const {
356 
357  if (!expandShapeOp) {
358  return rewriter.notifyMatchFailure(
359  sliceOp, "tensor.extract_slice source not produced by expand_shape");
360  }
361 
362  if (!sliceOp.hasUnitStride()) {
363  return rewriter.notifyMatchFailure(
364  sliceOp, "unsupported: non-unit stride. Only contiguous slices can "
365  "be supported in this transformation.");
366  }
367 
368  SmallVector<OpFoldResult> offsets = sliceOp.getMixedOffsets();
369  SmallVector<OpFoldResult> sizes = sliceOp.getMixedSizes();
370 
371  if (static_cast<size_t>(sliceOp.getResultType().getRank()) !=
372  sizes.size()) {
373  return rewriter.notifyMatchFailure(sliceOp,
374  "unimplemented: rank reducing slice");
375  }
376 
377  SmallVector<OpFoldResult> outputShape =
378  getMixedValues(expandShapeOp.getStaticOutputShape(),
379  expandShapeOp.getOutputShape(), rewriter);
380 
381  std::function<bool(OpFoldResult, OpFoldResult, OpFoldResult)>
382  isZeroOffsetAndFullSize =
383  [](OpFoldResult offset, OpFoldResult sliceSize, OpFoldResult size) {
384  if (!isConstantIntValue(offset, 0))
385  return false;
386  FailureOr<bool> maybeEqual =
387  ValueBoundsConstraintSet::areEqual(sliceSize, size);
388  return llvm::succeeded(maybeEqual) && maybeEqual.value();
389  };
390 
391  // Check that the slice is contiguous within each reassociation group.
392  // The slice is contiguous only if after the first dimension where a non
393  // unit slice is taken, the slice size on all subsequent dimensions of the
394  // group is equal to the entire size of the dimension.
395  // Examples of contiguous slices:
396  // full sizes: [8, 8, 10] slice offsets: [0, 0, 0] slice sizes: [1, 1, 10]
397  // full sizes: [5, 10] slice offsets: [3, 0] slice sizes: [2, 10]
398  // Examples of non contiguous slices:
399  // full sizes: [8, 8, 10] slice offsets: [0, 0, 0] slice sizes: [1, 2, 5]
400  // full sizes: [5, 10] slice offsets: [0, 4] slice sizes: [2, 5]
401  for (const ReassociationIndices &indices :
402  expandShapeOp.getReassociationIndices()) {
403  int64_t i = 0;
404  int64_t e = indices.size();
405  // Find the first expanded dim after the first dim with non-unit extracted
406  // size.
407  for (; i < e; ++i) {
408  if (!isConstantIntValue(sizes[indices[i]], 1)) {
409  // +1 to skip the first non-unit size dim.
410  i++;
411  break;
412  }
413  }
414 
415  // Verify that all subsequent dimensions extract the full size of the
416  // source tensor.
417  for (; i < e; ++i) {
418  int64_t expandedDim = indices[i];
419  if (!isZeroOffsetAndFullSize(offsets[expandedDim], sizes[expandedDim],
420  outputShape[expandedDim])) {
421  return rewriter.notifyMatchFailure(
422  sliceOp, "Not a contiguous slice of the expanded tensor.");
423  }
424  }
425  }
426 
427  return success();
428  }
429 };
430 
431 } // namespace
432 
435  patterns
436  .add<FoldExpandOfRankReducingExtract, FoldUnPaddingCollapseIntoExtract,
437  FoldInsertOfRankReducingInsert<tensor::InsertSliceOp>,
438  FoldInsertOfRankReducingInsert<tensor::ParallelInsertSliceOp>,
439  FoldPaddingExpandIntoInsert<tensor::InsertSliceOp>,
440  FoldPaddingExpandIntoInsert<tensor::ParallelInsertSliceOp>>(
441  patterns.getContext());
442 }
443 
446  patterns.add<BubbleUpExpandThroughParallelCollapse>(patterns.getContext());
447 }
448 
451  patterns.add<BubbleUpExpandShapeThroughExtractSlice>(patterns.getContext());
452 }
Base type for affine expression.
Definition: AffineExpr.h:68
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
IntegerAttr getIndexAttr(int64_t value)
Definition: Builders.cpp:104
MLIRContext * getContext() const
Definition: Builders.h:56
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:66
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:453
This class represents a single result from folding an operation.
Definition: OpDefinition.h:271
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
Definition: PatternMatch.h:803
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the listener that the IR failed to be rewritten because of a match failure,...
Definition: PatternMatch.h:736
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
Definition: PatternMatch.h:648
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
Definition: PatternMatch.h:554
static FailureOr< bool > areEqual(const Variable &var1, const Variable &var2)
Compute whether the given variables are equal.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
OpFoldResult makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands)
Constructs an AffineApplyOp that applies map to operands after composing the map with the maps of any...
Definition: AffineOps.cpp:1208
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
void populateReassociativeReshapeFoldingPatterns(RewritePatternSet &patterns)
Populates patterns with patterns that fold tensor.expand_shape and tensor.collapse_shape into other o...
void populateBubbleUpExtractSliceOpPatterns(RewritePatternSet &patterns)
Appends patterns that are used to bubble up tensor.extract slice op above its producer.
void populateBubbleUpExpandShapePatterns(RewritePatternSet &patterns)
Populates patterns with patterns that bubble up tensor.expand_shape through tensor....
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given tensor value.
Definition: TensorOps.cpp:70
Include the generated interface declarations.
bool isConstantIntValue(OpFoldResult ofr, int64_t value)
Return true if ofr is constant integer equal to value.
SliceVerificationResult
Enum that captures information related to verifier error conditions on slice insert/extract type of o...
Definition: BuiltinTypes.h:340
void bindDims(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to DimExpr at positions: [0 .
Definition: AffineExpr.h:348
const FrozenRewritePatternSet & patterns
void dispatchIndexOpFoldResults(ArrayRef< OpFoldResult > ofrs, SmallVectorImpl< Value > &dynamicVec, SmallVectorImpl< int64_t > &staticVec)
Helper function to dispatch multiple OpFoldResults according to the behavior of dispatchIndexOpFoldRe...
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
Definition: Utils.cpp:112
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
SmallVector< OpFoldResult > getMixedValues(ArrayRef< int64_t > staticValues, ValueRange dynamicValues, MLIRContext *context)
Return a vector of OpFoldResults with the same size a staticValues, but all elements for which Shaped...
SliceVerificationResult isRankReducedType(ShapedType originalType, ShapedType candidateReducedType)
Check if originalType can be rank reduced to candidateReducedType type by dropping some dimensions wi...
OpRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting against an...
Definition: PatternMatch.h:358