MLIR  19.0.0git
DropUnitDims.cpp
Go to the documentation of this file.
1 //===- DropUnitDims.cpp - Pass to drop use of unit-extent for broadcasting ===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements patterns/pass to remove usage of unit-extent dimensions
10 // to specify broadcasting in favor of more canonical representation of the
11 // computation
12 //
13 //===----------------------------------------------------------------------===//
14 
16 
26 #include "mlir/IR/AffineExpr.h"
27 #include "mlir/IR/AffineMap.h"
28 #include "mlir/IR/BuiltinTypes.h"
31 #include "llvm/ADT/SetVector.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Debug.h"
34 
35 namespace mlir {
36 #define GEN_PASS_DEF_LINALGFOLDUNITEXTENTDIMSPASS
37 #include "mlir/Dialect/Linalg/Passes.h.inc"
38 } // namespace mlir
39 
40 #define DEBUG_TYPE "linalg-drop-unit-dims"
41 
42 using namespace mlir;
43 using namespace mlir::linalg;
44 
45 namespace {
46 /// Pattern to move init operands to ins when all the loops are parallel and
47 /// blockArgument corresponding to init is used in the region. This is a fix-up
48 /// when unit reduction dimensions are all folded away. In this context, it
49 /// becomes a elementwise generic op. E.g., it converts
50 ///
51 /// %0 = tensor.empty() : tensor<1x1xf32>
52 /// %1 = linalg.fill
53 /// ins(%cst : f32)
54 /// outs(%0 : tensor<1x1xf32>) -> tensor<1x1xf32>
55 /// %2 = linalg.generic {indexing_maps = [affine_map<(d0) -> (0, d0, 0, 0)>,
56 /// affine_map<(d0) -> (0, d0)>],
57 /// iterator_types = ["parallel"]}
58 /// ins(%arg0 : tensor<1x?x1x1xf32>)
59 /// outs(%1 : tensor<1x1xf32>) {
60 /// ^bb0(%in: f32, %out: f32):
61 /// %3 = arith.addf %in, %out : f32
62 /// linalg.yield %3 : f32
63 /// } -> tensor<1x1xf32>
64 ///
65 /// into
66 ///
67 /// %0 = tensor.empty() : tensor<1x1xf32>
68 /// %1 = linalg.fill
69 /// ins(%cst : f32)
70 /// outs(%0 : tensor<1x1xf32>) -> tensor<1x1xf32>
71 /// %2 = tensor.empty() : tensor<1x1xf32>
72 /// %3 = linalg.generic {indexing_maps = [affine_map<(d0) -> (0, d0, 0, 0)>,
73 /// affine_map<(d0) -> (0, d0)>,
74 /// affine_map<(d0) -> (0, d0)>],
75 /// iterator_types = ["parallel"]}
76 /// ins(%arg0, %1 : tensor<1x?x1x1xf32>, tensor<1x1xf32>)
77 /// outs(%2 : tensor<1x1xf32>) {
78 /// ^bb0(%in: f32, %in_0: f32, %out: f32):
79 /// %4 = arith.addf %in, %in_0 : f32
80 /// linalg.yield %4 : f32
81 /// } -> tensor<1x1xf32>
82 struct MoveInitOperandsToInput : public OpRewritePattern<GenericOp> {
84  LogicalResult matchAndRewrite(GenericOp genericOp,
85  PatternRewriter &rewriter) const override {
86  if (!genericOp.hasPureTensorSemantics())
87  return failure();
88  if (genericOp.getNumParallelLoops() != genericOp.getNumLoops())
89  return failure();
90 
91  auto outputOperands = genericOp.getDpsInitsMutable();
92  SetVector<OpOperand *> candidates;
93  for (OpOperand &op : outputOperands) {
94  if (genericOp.getMatchingBlockArgument(&op).use_empty())
95  continue;
96  candidates.insert(&op);
97  }
98 
99  if (candidates.empty())
100  return failure();
101 
102  // Compute the modified indexing maps.
103  int64_t origNumInput = genericOp.getNumDpsInputs();
104  SmallVector<Value> newInputOperands = genericOp.getDpsInputs();
105  SmallVector<AffineMap> indexingMaps = genericOp.getIndexingMapsArray();
106  SmallVector<AffineMap> newIndexingMaps;
107  newIndexingMaps.append(indexingMaps.begin(),
108  std::next(indexingMaps.begin(), origNumInput));
109  for (OpOperand *op : candidates) {
110  newInputOperands.push_back(op->get());
111  newIndexingMaps.push_back(genericOp.getMatchingIndexingMap(op));
112  }
113  newIndexingMaps.append(std::next(indexingMaps.begin(), origNumInput),
114  indexingMaps.end());
115 
116  Location loc = genericOp.getLoc();
117  SmallVector<Value> newOutputOperands =
118  llvm::to_vector(genericOp.getDpsInits());
119  for (OpOperand *op : candidates) {
120  OpBuilder::InsertionGuard guard(rewriter);
121  rewriter.setInsertionPointAfterValue(op->get());
122  auto elemType = cast<ShapedType>(op->get().getType()).getElementType();
123  auto empty = rewriter.create<tensor::EmptyOp>(
124  loc, tensor::getMixedSizes(rewriter, loc, op->get()), elemType);
125 
126  unsigned start = genericOp.getDpsInits().getBeginOperandIndex();
127  newOutputOperands[op->getOperandNumber() - start] = empty.getResult();
128  }
129 
130  auto newOp = rewriter.create<GenericOp>(
131  loc, genericOp.getResultTypes(), newInputOperands, newOutputOperands,
132  newIndexingMaps, genericOp.getIteratorTypesArray(),
133  /*bodyBuild=*/nullptr, linalg::getPrunedAttributeList(genericOp));
134 
135  OpBuilder::InsertionGuard guard(rewriter);
136  Region &region = newOp.getRegion();
137  Block *block = rewriter.createBlock(&region);
138  IRMapping mapper;
139  for (auto bbarg : genericOp.getRegionInputArgs())
140  mapper.map(bbarg, block->addArgument(bbarg.getType(), loc));
141 
142  for (OpOperand *op : candidates) {
143  BlockArgument bbarg = genericOp.getMatchingBlockArgument(op);
144  mapper.map(bbarg, block->addArgument(bbarg.getType(), loc));
145  }
146 
147  for (OpOperand &op : outputOperands) {
148  BlockArgument bbarg = genericOp.getMatchingBlockArgument(&op);
149  if (candidates.count(&op))
150  block->addArgument(bbarg.getType(), loc);
151  else
152  mapper.map(bbarg, block->addArgument(bbarg.getType(), loc));
153  }
154 
155  for (auto &op : genericOp.getBody()->getOperations()) {
156  rewriter.clone(op, mapper);
157  }
158  rewriter.replaceOp(genericOp, newOp.getResults());
159 
160  return success();
161  }
162 };
163 } // namespace
164 
165 //===---------------------------------------------------------------------===//
166 // Drop loops that are unit-extents within Linalg operations.
167 //===---------------------------------------------------------------------===//
168 
169 /// Implements a pass that canonicalizes the uses of unit-extent dimensions for
170 /// broadcasting. For example,
171 ///
172 /// ```mlir
173 /// #accesses = [
174 /// affine_map<(d0, d1) -> (0, d1)>,
175 /// affine_map<(d0, d1) -> (d0, 0)>,
176 /// affine_map<(d0, d1) -> (d0, d1)>
177 /// ]
178 ///
179 /// #trait = {
180 /// args_in = 2,
181 /// args_out = 1,
182 /// indexing_maps = #accesses,
183 /// iterator_types = ["parallel", "parallel"],
184 /// library_call = "some_external_fn"
185 /// }
186 ///
187 /// func @broadcast_test(%arg0 : tensor<5xf32>, %arg1 : tensor<5xf32>) ->
188 /// tensor<5x5xf32>
189 /// {
190 /// %0 = linalg.tensor_reshape %arg0 [affine_map<(d0, d1) -> (d0, d1)>] :
191 /// tensor<5xf32> into tensor<1x5xf32>
192 /// %1 = linalg.tensor_reshape %arg1 [affine_map<(d0, d1) -> (d0, d1)>] :
193 /// tensor<5xf32> into tensor<5x1xf32>
194 /// %2 = linalg.generic #trait %0, %1 {
195 /// ^bb0(%arg2: f32, %arg3: f32):
196 /// %3 = arith.addf %arg2, %arg3 : f32
197 /// linalg.yield %3 : f32
198 /// } : tensor<1x5xf32>, tensor<5x1xf32> -> tensor<5x5xf32>
199 /// return %2 : tensor<5x5xf32>
200 /// }
201 ///
202 /// would canonicalize to
203 ///
204 /// ```mlir
205 /// #accesses = [
206 /// affine_map<(d0, d1) -> (d1)>,
207 /// affine_map<(d0, d1) -> (d0)>,
208 /// affine_map<(d0, d1) -> (d0, d1)>
209 /// ]
210 ///
211 /// #trait = {
212 /// args_in = 2,
213 /// args_out = 1,
214 /// indexing_maps = #accesses,
215 /// iterator_types = ["parallel", "parallel"],
216 /// library_call = "some_external_fn"
217 /// }
218 ///
219 /// func @broadcast_test(%arg0 : tensor<5xf32>, %arg1 : tensor<5xf32>) ->
220 /// tensor<5x5xf32>
221 /// {
222 /// %0 = linalg.generic #trait %arg0, %arg1 {
223 /// ^bb0(%arg2: f32, %arg3: f32):
224 /// %3 = arith.addf %arg2, %arg3 : f32
225 /// linalg.yield %3 : f32
226 /// } : tensor<5xf32>, tensor<5xf32> -> tensor<5x5xf32>
227 /// return %0 : tensor<5x5xf32>
228 /// }
229 
230 /// Update the index accesses of linalg operations having index semantics.
231 static void
232 replaceUnitDimIndexOps(GenericOp genericOp,
233  const llvm::SmallDenseSet<unsigned> &unitDims,
234  RewriterBase &rewriter) {
235  for (IndexOp indexOp :
236  llvm::make_early_inc_range(genericOp.getBody()->getOps<IndexOp>())) {
237  OpBuilder::InsertionGuard guard(rewriter);
238  rewriter.setInsertionPoint(indexOp);
239  if (unitDims.count(indexOp.getDim()) != 0) {
240  rewriter.replaceOpWithNewOp<arith::ConstantIndexOp>(indexOp, 0);
241  } else {
242  // Update the dimension of the index operation if needed.
243  unsigned droppedDims = llvm::count_if(
244  unitDims, [&](unsigned dim) { return dim < indexOp.getDim(); });
245  if (droppedDims != 0)
246  rewriter.replaceOpWithNewOp<IndexOp>(indexOp,
247  indexOp.getDim() - droppedDims);
248  }
249  }
250 }
251 
252 /// Expand the given `value` so that the type matches the type of `origDest`.
253 /// The `reassociation` is used when `rankReductionStrategy` is set to
254 /// `RankReductionStrategy::ReassociativeReshape`.
255 static Value
256 expandValue(RewriterBase &rewriter, Location loc, Value result, Value origDest,
257  ArrayRef<ReassociationIndices> reassociation,
258  ControlDropUnitDims::RankReductionStrategy rankReductionStrategy) {
259  // There are no results for memref outputs.
260  auto origResultType = cast<RankedTensorType>(origDest.getType());
261  if (rankReductionStrategy ==
263  unsigned rank = origResultType.getRank();
264  SmallVector<OpFoldResult> offsets(rank, rewriter.getIndexAttr(0));
266  tensor::getMixedSizes(rewriter, loc, origDest);
267  SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));
268  return rewriter.createOrFold<tensor::InsertSliceOp>(
269  loc, result, origDest, offsets, sizes, strides);
270  }
271 
272  assert(rankReductionStrategy ==
274  "unknown rank reduction strategy");
275  return rewriter.create<tensor::ExpandShapeOp>(loc, origResultType, result,
276  reassociation);
277 }
278 
279 /// Collapse the given `value` so that the type matches the type of
280 /// `origOutput`. The `reassociation` is used when `rankReductionStrategy` is
281 /// set to `RankReductionStrategy::ReassociativeReshape`.
283  RewriterBase &rewriter, Location loc, Value operand,
284  ArrayRef<int64_t> targetShape, ArrayRef<ReassociationIndices> reassociation,
285  ControlDropUnitDims::RankReductionStrategy rankReductionStrategy) {
286  if (auto memrefType = dyn_cast<MemRefType>(operand.getType())) {
287  if (rankReductionStrategy ==
289  FailureOr<Value> rankReducingExtract =
290  memref::SubViewOp::rankReduceIfNeeded(rewriter, loc, operand,
291  targetShape);
292  assert(succeeded(rankReducingExtract) && "not a unit-extent collapse");
293  return *rankReducingExtract;
294  }
295 
296  assert(
297  rankReductionStrategy ==
299  "unknown rank reduction strategy");
300  MemRefLayoutAttrInterface layout;
301  auto targetType = MemRefType::get(targetShape, memrefType.getElementType(),
302  layout, memrefType.getMemorySpace());
303  return rewriter.create<memref::CollapseShapeOp>(loc, targetType, operand,
304  reassociation);
305  }
306  if (auto tensorType = dyn_cast<RankedTensorType>(operand.getType())) {
307  if (rankReductionStrategy ==
309  FailureOr<Value> rankReducingExtract =
310  tensor::ExtractSliceOp::rankReduceIfNeeded(rewriter, loc, operand,
311  targetShape);
312  assert(succeeded(rankReducingExtract) && "not a unit-extent collapse");
313  return *rankReducingExtract;
314  }
315 
316  assert(
317  rankReductionStrategy ==
319  "unknown rank reduction strategy");
320  auto targetType =
321  RankedTensorType::get(targetShape, tensorType.getElementType());
322  return rewriter.create<tensor::CollapseShapeOp>(loc, targetType, operand,
323  reassociation);
324  }
325  llvm_unreachable("unsupported operand type");
326 }
327 
328 /// Compute the modified metadata for an operands of operation
329 /// whose unit dims are being dropped. Return the new indexing map
330 /// to use, the shape of the operand in the replacement op
331 /// and the `reassocation` to use to go from original operand shape
332 /// to modified operand shape.
337 };
339  MLIRContext *context, GenericOp genericOp, OpOperand *opOperand,
340  llvm::SmallDenseMap<unsigned, unsigned> &oldDimsToNewDimsMap,
341  ArrayRef<AffineExpr> dimReplacements) {
343  ReassociationIndices reassociationGroup;
344  SmallVector<AffineExpr> newIndexExprs;
345  AffineMap indexingMap = genericOp.getMatchingIndexingMap(opOperand);
346  ArrayRef<int64_t> operandShape = genericOp.getShape(opOperand);
347  ArrayRef<AffineExpr> exprs = indexingMap.getResults();
348 
349  auto isUnitDim = [&](unsigned dim) {
350  if (auto dimExpr = dyn_cast<AffineDimExpr>(exprs[dim])) {
351  unsigned oldPosition = dimExpr.getPosition();
352  return !oldDimsToNewDimsMap.count(oldPosition);
353  }
354  // Handle the other case where the shape is 1, and is accessed using a
355  // constant 0.
356  if (operandShape[dim] == 1) {
357  auto constAffineExpr = dyn_cast<AffineConstantExpr>(exprs[dim]);
358  return constAffineExpr && constAffineExpr.getValue() == 0;
359  }
360  return false;
361  };
362 
363  unsigned dim = 0;
364  while (dim < operandShape.size() && isUnitDim(dim))
365  reassociationGroup.push_back(dim++);
366  while (dim < operandShape.size()) {
367  assert(!isUnitDim(dim) && "expected non unit-extent");
368  reassociationGroup.push_back(dim);
369  AffineExpr newExpr = exprs[dim].replaceDims(dimReplacements);
370  newIndexExprs.push_back(newExpr);
371  info.targetShape.push_back(operandShape[dim]);
372  ++dim;
373  // Fold all following dimensions that are unit-extent.
374  while (dim < operandShape.size() && isUnitDim(dim)) {
375  reassociationGroup.push_back(dim++);
376  }
377  info.reassociation.push_back(reassociationGroup);
378  reassociationGroup.clear();
379  }
380  info.indexMap =
381  AffineMap::get(oldDimsToNewDimsMap.size(), indexingMap.getNumSymbols(),
382  newIndexExprs, context);
383  return info;
384 }
385 
386 LogicalResult linalg::dropUnitDims(RewriterBase &rewriter, GenericOp genericOp,
387  const ControlDropUnitDims &options) {
388  SmallVector<AffineMap> indexingMaps = genericOp.getIndexingMapsArray();
389  if (indexingMaps.empty())
390  return failure();
391 
392  // 1. Check if any of the iteration dimensions are unit-trip count. They will
393  // end up being unit-trip count if they are used to index into a unit-dim
394  // tensor/memref.
395  AffineMap invertedMap = inversePermutation(concatAffineMaps(indexingMaps));
396  if (!invertedMap) {
397  return rewriter.notifyMatchFailure(genericOp,
398  "invalid indexing maps for operation");
399  }
400  SmallVector<int64_t> dims = genericOp.getStaticShape();
401 
402  // 1a. Get the allowed list of dimensions to drop from the `options`.
403  SmallVector<unsigned> allowedUnitDims = options.controlFn(genericOp);
404  if (allowedUnitDims.empty()) {
405  return rewriter.notifyMatchFailure(
406  genericOp, "control function returns no allowed unit dims to prune");
407  }
408  llvm::SmallDenseSet<unsigned> unitDimsFilter(allowedUnitDims.begin(),
409  allowedUnitDims.end());
410  llvm::SmallDenseSet<unsigned> unitDims;
411  for (const auto &expr : enumerate(invertedMap.getResults())) {
412  if (AffineDimExpr dimExpr = dyn_cast<AffineDimExpr>(expr.value())) {
413  if (dims[dimExpr.getPosition()] == 1 &&
414  unitDimsFilter.count(expr.index()))
415  unitDims.insert(expr.index());
416  }
417  }
418 
419  // 2. Compute the iterator types of the modified op by dropping the one-trip
420  // count loops.
421  SmallVector<utils::IteratorType> newIteratorTypes;
422  llvm::SmallDenseMap<unsigned, unsigned> oldDimToNewDimMap;
423  SmallVector<AffineExpr> dimReplacements;
424  unsigned newDims = 0;
425  for (auto [index, attr] :
426  llvm::enumerate(genericOp.getIteratorTypesArray())) {
427  if (unitDims.count(index)) {
428  dimReplacements.push_back(
429  getAffineConstantExpr(0, rewriter.getContext()));
430  } else {
431  newIteratorTypes.push_back(attr);
432  oldDimToNewDimMap[index] = newDims;
433  dimReplacements.push_back(
434  getAffineDimExpr(newDims, rewriter.getContext()));
435  newDims++;
436  }
437  }
438 
439  // 3. For each of the operands, find the
440  // - modified affine map to use.
441  // - shape of the operands after the unit-dims are dropped.
442  // - the reassociation indices used to convert from the original
443  // operand type to modified operand (needed only when using reshapes
444  // for rank reduction strategy)
445  // Note that the indexing maps might need changing even if there are no
446  // unit dimensions that are dropped to handle cases where `0` is used to
447  // access a unit-extent tensor. Consider moving this out of this specific
448  // transformation as a stand-alone transformation. Kept here right now due
449  // to legacy.
450  SmallVector<AffineMap> newIndexingMaps;
452  SmallVector<SmallVector<int64_t>> targetShapes;
453  SmallVector<bool> collapsed;
454  auto hasCollapsibleType = [](OpOperand &operand) {
455  Type operandType = operand.get().getType();
456  if (auto memrefOperandType = dyn_cast_or_null<MemRefType>(operandType)) {
457  return memrefOperandType.getLayout().isIdentity();
458  }
459  if (auto tensorOperandType = dyn_cast<RankedTensorType>(operandType)) {
460  return tensorOperandType.getEncoding() == nullptr;
461  }
462  return false;
463  };
464  for (OpOperand &opOperand : genericOp->getOpOperands()) {
465  auto indexingMap = genericOp.getMatchingIndexingMap(&opOperand);
466  ArrayRef<int64_t> shape = genericOp.getShape(&opOperand);
467  if (!hasCollapsibleType(opOperand)) {
468  AffineMap newIndexingMap = indexingMap.replaceDimsAndSymbols(
469  dimReplacements, ArrayRef<AffineExpr>{}, oldDimToNewDimMap.size(), 0);
470  newIndexingMaps.push_back(newIndexingMap);
471  targetShapes.push_back(llvm::to_vector(shape));
472  collapsed.push_back(false);
473  reassociations.push_back({});
474  continue;
475  }
476  auto replacementInfo = dropUnitExtentFromOperandMetadata(
477  rewriter.getContext(), genericOp, &opOperand, oldDimToNewDimMap,
478  dimReplacements);
479  reassociations.push_back(replacementInfo.reassociation);
480  newIndexingMaps.push_back(replacementInfo.indexMap);
481  targetShapes.push_back(replacementInfo.targetShape);
482  collapsed.push_back(!(replacementInfo.indexMap.getNumResults() ==
483  indexingMap.getNumResults()));
484  }
485 
486  // Abort if the indexing maps of the result operation are not invertible
487  // (i.e. not legal) or if no dimension was reduced.
488  if (newIndexingMaps == indexingMaps ||
489  !inversePermutation(concatAffineMaps(newIndexingMaps)))
490  return failure();
491 
492  Location loc = genericOp.getLoc();
493  // 4. For each of the operands, collapse the operand to convert
494  // from original shape to shape in the modified operation if needed,
495  // either through use of reshapes or rank-reducing slices as
496  // specified in `options`.
497  SmallVector<Value> newOperands;
498  for (OpOperand &opOperand : genericOp->getOpOperands()) {
499  int64_t idx = opOperand.getOperandNumber();
500  if (!collapsed[idx]) {
501  newOperands.push_back(opOperand.get());
502  continue;
503  }
504  newOperands.push_back(collapseValue(rewriter, loc, opOperand.get(),
505  targetShapes[idx], reassociations[idx],
506  options.rankReductionStrategy));
507  }
508 
509  // 5. Create the `linalg.generic` operation with the new operands,
510  // indexing maps, iterator types and result types.
511  ArrayRef<Value> newInputs =
512  ArrayRef<Value>(newOperands).take_front(genericOp.getNumDpsInputs());
513  ArrayRef<Value> newOutputs =
514  ArrayRef<Value>(newOperands).take_back(genericOp.getNumDpsInits());
515  SmallVector<Type> resultTypes;
516  resultTypes.reserve(genericOp.getNumResults());
517  for (unsigned i : llvm::seq<unsigned>(0, genericOp.getNumResults()))
518  resultTypes.push_back(newOutputs[i].getType());
519  GenericOp replacementOp =
520  rewriter.create<GenericOp>(loc, resultTypes, newInputs, newOutputs,
521  newIndexingMaps, newIteratorTypes);
522  rewriter.inlineRegionBefore(genericOp.getRegion(), replacementOp.getRegion(),
523  replacementOp.getRegion().begin());
524  // 5a. Replace `linalg.index` operations that refer to the dropped unit
525  // dimensions.
526  replaceUnitDimIndexOps(replacementOp, unitDims, rewriter);
527 
528  // 6. If any result type changes, insert a reshape/slice to convert from the
529  // original
530  // type to the new type.
531  SmallVector<Value> resultReplacements;
532  for (auto [index, result] : llvm::enumerate(replacementOp.getResults())) {
533  unsigned opOperandIndex = index + replacementOp.getNumDpsInputs();
534  Value origDest = genericOp.getDpsInitOperand(index)->get();
535  if (!collapsed[opOperandIndex]) {
536  resultReplacements.push_back(result);
537  continue;
538  }
539  resultReplacements.push_back(expandValue(rewriter, loc, result, origDest,
540  reassociations[opOperandIndex],
541  options.rankReductionStrategy));
542  }
543 
544  rewriter.replaceOp(genericOp, resultReplacements);
545  return success();
546 }
547 
548 namespace {
549 struct DropUnitDims : public OpRewritePattern<GenericOp> {
550  DropUnitDims(MLIRContext *context, ControlDropUnitDims options = {},
551  PatternBenefit benefit = 1)
552  : OpRewritePattern(context, benefit), options(std::move(options)) {}
553 
554  LogicalResult matchAndRewrite(GenericOp genericOp,
555  PatternRewriter &rewriter) const override {
556  return dropUnitDims(rewriter, genericOp, options);
557  }
558 
559 private:
561 };
562 } // namespace
563 
564 //===---------------------------------------------------------------------===//
565 // Drop dimensions that are unit-extents within tensor operations.
566 //===---------------------------------------------------------------------===//
567 
568 namespace {
569 struct DropPadUnitDims : public OpRewritePattern<tensor::PadOp> {
570  DropPadUnitDims(MLIRContext *context, ControlDropUnitDims options = {},
571  PatternBenefit benefit = 1)
572  : OpRewritePattern(context, benefit), options(std::move(options)) {}
573 
574  LogicalResult matchAndRewrite(tensor::PadOp padOp,
575  PatternRewriter &rewriter) const override {
576  // 1a. Get the allowed list of dimensions to drop from the `options`.
577  SmallVector<unsigned> allowedUnitDims = options.controlFn(padOp);
578  if (allowedUnitDims.empty()) {
579  return rewriter.notifyMatchFailure(
580  padOp, "control function returns no allowed unit dims to prune");
581  }
582 
583  if (padOp.getSourceType().getEncoding()) {
584  return rewriter.notifyMatchFailure(
585  padOp, "cannot collapse dims of tensor with encoding");
586  }
587 
588  // Fail for non-constant padding values. The body of the pad could
589  // depend on the padding indices and/or properties of the padded
590  // tensor so for now we fail.
591  // TODO: Support non-constant padding values.
592  Value paddingVal = padOp.getConstantPaddingValue();
593  if (!paddingVal) {
594  return rewriter.notifyMatchFailure(
595  padOp, "unimplemented: non-constant padding value");
596  }
597 
598  ArrayRef<int64_t> sourceShape = padOp.getSourceType().getShape();
599  int64_t padRank = sourceShape.size();
600 
601  auto isStaticZero = [](OpFoldResult f) {
602  std::optional<int64_t> maybeInt = getConstantIntValue(f);
603  return maybeInt && *maybeInt == 0;
604  };
605 
606  llvm::SmallDenseSet<unsigned> unitDimsFilter(allowedUnitDims.begin(),
607  allowedUnitDims.end());
608  llvm::SmallDenseSet<unsigned> unitDims;
609  SmallVector<int64_t> newShape;
610  SmallVector<OpFoldResult> newLowPad;
611  SmallVector<OpFoldResult> newHighPad;
612  for (const auto [dim, size, low, high] :
613  zip_equal(llvm::seq(static_cast<int64_t>(0), padRank), sourceShape,
614  padOp.getMixedLowPad(), padOp.getMixedHighPad())) {
615  if (unitDimsFilter.contains(dim) && size == 1 && isStaticZero(low) &&
616  isStaticZero(high)) {
617  unitDims.insert(dim);
618  } else {
619  newShape.push_back(size);
620  newLowPad.push_back(low);
621  newHighPad.push_back(high);
622  }
623  }
624 
625  if (unitDims.empty()) {
626  return rewriter.notifyMatchFailure(padOp, "no unit dims to collapse");
627  }
628 
629  ReassociationIndices reassociationGroup;
630  SmallVector<ReassociationIndices> reassociationMap;
631  int64_t dim = 0;
632  while (dim < padRank && unitDims.contains(dim))
633  reassociationGroup.push_back(dim++);
634  while (dim < padRank) {
635  assert(!unitDims.contains(dim) && "expected non unit-extent");
636  reassociationGroup.push_back(dim);
637  dim++;
638  // Fold all following dimensions that are unit-extent.
639  while (dim < padRank && unitDims.contains(dim))
640  reassociationGroup.push_back(dim++);
641  reassociationMap.push_back(reassociationGroup);
642  reassociationGroup.clear();
643  }
644 
645  Value collapsedSource =
646  collapseValue(rewriter, padOp.getLoc(), padOp.getSource(), newShape,
647  reassociationMap, options.rankReductionStrategy);
648 
649  auto newPadOp = rewriter.create<tensor::PadOp>(
650  padOp.getLoc(), /*result=*/Type(), collapsedSource, newLowPad,
651  newHighPad, paddingVal, padOp.getNofold());
652 
653  Value dest = padOp.getResult();
654  if (options.rankReductionStrategy ==
656  SmallVector<OpFoldResult> expandedSizes;
657  int64_t numUnitDims = 0;
658  for (auto dim : llvm::seq(static_cast<int64_t>(0), padRank)) {
659  if (unitDims.contains(dim)) {
660  expandedSizes.push_back(rewriter.getIndexAttr(1));
661  numUnitDims++;
662  continue;
663  }
664  expandedSizes.push_back(tensor::getMixedSize(
665  rewriter, padOp.getLoc(), newPadOp, dim - numUnitDims));
666  }
667  dest = rewriter.create<tensor::EmptyOp>(
668  padOp.getLoc(), expandedSizes,
669  padOp.getResultType().getElementType());
670  }
671 
672  Value expandedValue =
673  expandValue(rewriter, padOp.getLoc(), newPadOp.getResult(), dest,
674  reassociationMap, options.rankReductionStrategy);
675  rewriter.replaceOp(padOp, expandedValue);
676  return success();
677  }
678 
679 private:
681 };
682 } // namespace
683 
684 namespace {
685 /// Convert `extract_slice` operations to rank-reduced versions.
686 struct RankReducedExtractSliceOp
687  : public OpRewritePattern<tensor::ExtractSliceOp> {
689 
690  LogicalResult matchAndRewrite(tensor::ExtractSliceOp sliceOp,
691  PatternRewriter &rewriter) const override {
692  RankedTensorType resultType = sliceOp.getType();
693  SmallVector<OpFoldResult> targetShape;
694  for (auto size : resultType.getShape())
695  targetShape.push_back(rewriter.getIndexAttr(size));
696  auto reassociation = getReassociationMapForFoldingUnitDims(targetShape);
697  if (!reassociation ||
698  reassociation->size() == static_cast<size_t>(resultType.getRank()))
699  return failure();
700 
701  SmallVector<OpFoldResult> offsets = sliceOp.getMixedOffsets();
702  SmallVector<OpFoldResult> strides = sliceOp.getMixedStrides();
703  SmallVector<OpFoldResult> sizes = sliceOp.getMixedSizes();
704  auto rankReducedType = cast<RankedTensorType>(
705  tensor::ExtractSliceOp::inferCanonicalRankReducedResultType(
706  reassociation->size(), sliceOp.getSourceType(), offsets, sizes,
707  strides));
708 
709  Location loc = sliceOp.getLoc();
710  Value newSlice = rewriter.create<tensor::ExtractSliceOp>(
711  loc, rankReducedType, sliceOp.getSource(), offsets, sizes, strides);
712  rewriter.replaceOpWithNewOp<tensor::ExpandShapeOp>(
713  sliceOp, resultType, newSlice, *reassociation);
714  return success();
715  }
716 };
717 
718 /// Convert `insert_slice` operations to rank-reduced versions.
719 /// This patterns works with both InsertSliceOp and ParallelInsertSliceOp.
720 template <typename InsertOpTy>
721 struct RankReducedInsertSliceOp : public OpRewritePattern<InsertOpTy> {
723 
724  LogicalResult matchAndRewrite(InsertOpTy insertSliceOp,
725  PatternRewriter &rewriter) const override {
726  RankedTensorType sourceType = insertSliceOp.getSourceType();
727  SmallVector<OpFoldResult> targetShape;
728  for (auto size : sourceType.getShape())
729  targetShape.push_back(rewriter.getIndexAttr(size));
730  auto reassociation = getReassociationMapForFoldingUnitDims(targetShape);
731  if (!reassociation ||
732  reassociation->size() == static_cast<size_t>(sourceType.getRank()))
733  return failure();
734 
735  Location loc = insertSliceOp.getLoc();
736  tensor::CollapseShapeOp reshapedSource;
737  {
738  OpBuilder::InsertionGuard g(rewriter);
739  // The only difference between InsertSliceOp and ParallelInsertSliceOp
740  // is the insertion point is just before the ParallelCombiningOp in the
741  // parallel case.
742  if (std::is_same<InsertOpTy, tensor::ParallelInsertSliceOp>::value)
743  rewriter.setInsertionPoint(insertSliceOp->getParentOp());
744  reshapedSource = rewriter.create<tensor::CollapseShapeOp>(
745  loc, insertSliceOp.getSource(), *reassociation);
746  }
747  rewriter.replaceOpWithNewOp<InsertOpTy>(
748  insertSliceOp, reshapedSource, insertSliceOp.getDest(),
749  insertSliceOp.getMixedOffsets(), insertSliceOp.getMixedSizes(),
750  insertSliceOp.getMixedStrides());
751  return success();
752  }
753 };
754 } // namespace
755 
756 /// Patterns that are used to canonicalize the use of unit-extent dims for
757 /// broadcasting.
758 static void
761  auto *context = patterns.getContext();
762  patterns.add<DropUnitDims>(context, options);
763  patterns.add<DropPadUnitDims>(context, options);
764  // TODO: Patterns unrelated to unit dim folding should be factored out.
765  patterns.add<RankReducedExtractSliceOp,
766  RankReducedInsertSliceOp<tensor::InsertSliceOp>,
767  RankReducedInsertSliceOp<tensor::ParallelInsertSliceOp>>(
768  context);
769  linalg::FillOp::getCanonicalizationPatterns(patterns, context);
770  tensor::CollapseShapeOp::getCanonicalizationPatterns(patterns, context);
771  tensor::EmptyOp::getCanonicalizationPatterns(patterns, context);
772  tensor::ExpandShapeOp::getCanonicalizationPatterns(patterns, context);
776 }
777 
778 static void
781  auto *context = patterns.getContext();
782  options.rankReductionStrategy =
784  patterns.add<DropUnitDims>(context, options);
785  patterns.add<DropPadUnitDims>(context, options);
786  // TODO: Patterns unrelated to unit dim folding should be factored out.
787  linalg::FillOp::getCanonicalizationPatterns(patterns, context);
788  tensor::EmptyOp::getCanonicalizationPatterns(patterns, context);
792 }
793 
796  if (options.rankReductionStrategy ==
799  } else if (options.rankReductionStrategy ==
801  ReassociativeReshape) {
803  }
804 }
805 
807  RewritePatternSet &patterns) {
808  patterns.add<MoveInitOperandsToInput>(patterns.getContext());
809 }
810 
811 namespace {
812 /// Pass that removes unit-extent dims within generic ops.
813 struct LinalgFoldUnitExtentDimsPass
814  : public impl::LinalgFoldUnitExtentDimsPassBase<
815  LinalgFoldUnitExtentDimsPass> {
816  using impl::LinalgFoldUnitExtentDimsPassBase<
817  LinalgFoldUnitExtentDimsPass>::LinalgFoldUnitExtentDimsPassBase;
818  void runOnOperation() override {
819  Operation *op = getOperation();
820  MLIRContext *context = op->getContext();
821  RewritePatternSet patterns(context);
823  if (useRankReducingSlices) {
824  options.rankReductionStrategy = linalg::ControlDropUnitDims::
826  }
829  (void)applyPatternsAndFoldGreedily(op, std::move(patterns));
830  }
831 };
832 } // namespace
static Value expandValue(RewriterBase &rewriter, Location loc, Value result, Value origDest, ArrayRef< ReassociationIndices > reassociation, ControlDropUnitDims::RankReductionStrategy rankReductionStrategy)
Expand the given value so that the type matches the type of origDest.
static void replaceUnitDimIndexOps(GenericOp genericOp, const llvm::SmallDenseSet< unsigned > &unitDims, RewriterBase &rewriter)
Implements a pass that canonicalizes the uses of unit-extent dimensions for broadcasting.
static UnitExtentReplacementInfo dropUnitExtentFromOperandMetadata(MLIRContext *context, GenericOp genericOp, OpOperand *opOperand, llvm::SmallDenseMap< unsigned, unsigned > &oldDimsToNewDimsMap, ArrayRef< AffineExpr > dimReplacements)
static void populateFoldUnitExtentDimsViaReshapesPatterns(RewritePatternSet &patterns, ControlDropUnitDims &options)
Patterns that are used to canonicalize the use of unit-extent dims for broadcasting.
static void populateFoldUnitExtentDimsViaSlicesPatterns(RewritePatternSet &patterns, ControlDropUnitDims &options)
static Value collapseValue(RewriterBase &rewriter, Location loc, Value operand, ArrayRef< int64_t > targetShape, ArrayRef< ReassociationIndices > reassociation, ControlDropUnitDims::RankReductionStrategy rankReductionStrategy)
Collapse the given value so that the type matches the type of origOutput.
static llvm::ManagedStatic< PassManagerOptions > options
A dimensional identifier appearing in an affine expression.
Definition: AffineExpr.h:237
Base type for affine expression.
Definition: AffineExpr.h:69
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition: AffineMap.h:47
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
unsigned getNumSymbols() const
Definition: AffineMap.cpp:382
ArrayRef< AffineExpr > getResults() const
Definition: AffineMap.cpp:391
AffineMap replaceDimsAndSymbols(ArrayRef< AffineExpr > dimReplacements, ArrayRef< AffineExpr > symReplacements, unsigned numResultDims, unsigned numResultSyms) const
This method substitutes any uses of dimensions and symbols (e.g.
Definition: AffineMap.cpp:484
This class represents an argument of a Block.
Definition: Value.h:315
Block represents an ordered list of Operations.
Definition: Block.h:30
BlockArgument addArgument(Type type, Location loc)
Add one value to the argument list.
Definition: Block.cpp:152
IntegerAttr getIndexAttr(int64_t value)
Definition: Builders.cpp:124
MLIRContext * getContext() const
Definition: Builders.h:55
This class provides support for representing a failure result, or a valid value of type T.
Definition: LogicalResult.h:78
This is a utility class for mapping one set of IR entities to another.
Definition: IRMapping.h:26
void map(Value from, Value to)
Inserts a new mapping for 'from' to 'to'.
Definition: IRMapping.h:30
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:63
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:350
Operation * clone(Operation &op, IRMapping &mapper)
Creates a deep copy of the specified operation, remapping any operands that use values outside of the...
Definition: Builders.cpp:553
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition: Builders.h:400
Block * createBlock(Region *parent, Region::iterator insertPt={}, TypeRange argTypes=std::nullopt, ArrayRef< Location > locs=std::nullopt)
Add new block with 'argTypes' arguments and set the insertion point to the end of it.
Definition: Builders.cpp:437
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Definition: Builders.h:522
void setInsertionPointAfterValue(Value val)
Sets the insertion point to the node after the specified value.
Definition: Builders.h:423
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:464
This class represents a single result from folding an operation.
Definition: OpDefinition.h:268
This class represents an operand of an operation.
Definition: Value.h:263
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
MLIRContext * getContext()
Return the context this operation is associated with.
Definition: Operation.h:216
This class represents the benefit of a pattern match in a unitless scheme that ranges from 0 (very li...
Definition: PatternMatch.h:34
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
Definition: PatternMatch.h:785
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition: Region.h:26
MLIRContext * getContext() const
Definition: PatternMatch.h:822
RewritePatternSet & add(ConstructorArg &&arg, ConstructorArgs &&...args)
Add an instance of each of the pattern types 'Ts' to the pattern list with the given arguments.
Definition: PatternMatch.h:846
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:400
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the listener that the IR failed to be rewritten because of a match failure,...
Definition: PatternMatch.h:718
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
void inlineRegionBefore(Region &region, Region &parent, Region::iterator before)
Move the blocks that belong to "region" before the given position in another region "parent".
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
Definition: PatternMatch.h:536
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:125
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:285
void populateMoveInitOperandsToInputPattern(RewritePatternSet &patterns)
A pattern that converts init operands to input operands.
SmallVector< NamedAttribute > getPrunedAttributeList(OpTy op)
Returns an attribute list that excludes pre-defined attributes.
Definition: Utils.h:371
std::optional< SmallVector< ReassociationIndices > > getReassociationMapForFoldingUnitDims(ArrayRef< OpFoldResult > mixedSizes)
Get the reassociation maps to fold the result of a extract_slice (or source of a insert_slice) operat...
Definition: Utils.cpp:886
void populateFoldUnitExtentDimsPatterns(RewritePatternSet &patterns, ControlDropUnitDims &options)
Patterns to fold unit-extent dimensions in operands/results of linalg ops on tensors via reassociativ...
LogicalResult dropUnitDims(RewriterBase &rewriter, GenericOp genericOp, const ControlDropUnitDims &options)
void populateResolveRankedShapedTypeResultDimsPatterns(RewritePatternSet &patterns)
Appends patterns that resolve memref.dim operations with values that are defined by operations that i...
void populateResolveShapedTypeResultDimsPatterns(RewritePatternSet &patterns)
Appends patterns that resolve memref.dim operations with values that are defined by operations that i...
void populateFoldTensorEmptyPatterns(RewritePatternSet &patterns, bool foldSingleUseOnly=false)
Populates patterns with patterns that fold tensor.empty with tensor.
OpFoldResult getMixedSize(OpBuilder &builder, Location loc, Value value, int64_t dim)
Return the dimension of the given tensor value.
Definition: TensorOps.cpp:51
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given tensor value.
Definition: TensorOps.cpp:61
Include the generated interface declarations.
LogicalResult failure(bool isFailure=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:62
std::optional< int64_t > getConstantIntValue(OpFoldResult ofr)
If ofr is a constant integer or an IntegerAttr, return the integer.
AffineMap inversePermutation(AffineMap map)
Returns a map of codomain to domain dimensions such that the first codomain dimension for a particula...
Definition: AffineMap.cpp:753
bool succeeded(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a success value.
Definition: LogicalResult.h:68
LogicalResult success(bool isSuccess=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:56
AffineMap concatAffineMaps(ArrayRef< AffineMap > maps)
Concatenates a list of maps into a single AffineMap, stepping over potentially empty maps.
Definition: AffineMap.cpp:798
LogicalResult applyPatternsAndFoldGreedily(Region &region, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr)
Rewrite ops in the given region, which must be isolated from above, by repeatedly applying the highes...
AffineExpr getAffineConstantExpr(int64_t constant, MLIRContext *context)
Definition: AffineExpr.cpp:623
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
AffineExpr getAffineDimExpr(unsigned position, MLIRContext *context)
These free functions allow clients of the API to not use classes in detail.
Definition: AffineExpr.cpp:599
Compute the modified metadata for an operands of operation whose unit dims are being dropped.
SmallVector< ReassociationIndices > reassociation
SmallVector< int64_t > targetShape
This class represents an efficient way to signal success or failure.
Definition: LogicalResult.h:26
OpRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting against an...
Definition: PatternMatch.h:358
Transformation to drop unit-extent dimensions from linalg.generic operations.
Definition: Transforms.h:474