MLIR  22.0.0git
Utils.cpp
Go to the documentation of this file.
1 //===- Utils.cpp - Utilities to support the Tensor dialect ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements utilities for the Tensor dialect.
10 //
11 //===----------------------------------------------------------------------===//
12 
14 
19 
20 using namespace mlir;
21 using namespace mlir::tensor;
22 
23 PadOp mlir::tensor::createPadHighOp(RankedTensorType resType, Value source,
24  Value pad, bool nofold, Location loc,
25  OpBuilder &b, ValueRange dynOutDims) {
26 
27  // This assumption simplifies the following logic without limiting what's
28  // required _today_. If needed, we can relax it in the future.
29  assert(((resType.getNumDynamicDims() == dynOutDims.size()) ||
30  dynOutDims.empty()) &&
31  "Either none or all output dynamic dims must be specified!");
32 
33  // Init "low" and "high" padding values ("low" is kept as is, "high" is
34  // computed below).
35  SmallVector<OpFoldResult> low(resType.getRank(), b.getIndexAttr(0));
36  SmallVector<OpFoldResult> high(resType.getRank(), b.getIndexAttr(0));
37 
38  size_t outDimIdx = 0;
39 
40  for (const auto [idx, val] : enumerate(resType.getShape())) {
41  bool isDimDynamic = ShapedType::isDynamic(val);
42  bool updatePadHigh = !isDimDynamic || !dynOutDims.empty();
43 
44  // Keep the default padding width (i.e. "0") when the output dim is dynamic
45  // and no actual output sizes have been provided.
46  if (!updatePadHigh)
47  continue;
48 
49  // Compute the padding width: resDim - sourceDim.
50  AffineExpr d0, d1;
51  bindDims(b.getContext(), d0, d1);
52  OpFoldResult sourceDim = tensor::getMixedSize(b, loc, source, idx);
53  OpFoldResult outDim = isDimDynamic ? OpFoldResult(dynOutDims[outDimIdx++])
54  : OpFoldResult(b.getIndexAttr(val));
55 
56  high[idx] = affine::makeComposedFoldedAffineApply(b, loc, d0 - d1,
57  {outDim, sourceDim});
58  }
59  return PadOp::create(b, loc, resType, source, low, high, pad, nofold);
60 }
61 
63  Location loc,
64  Value rankedTensor) {
65  auto tensorTy = cast<RankedTensorType>(rankedTensor.getType());
66  SmallVector<Value> dynamicDims;
67  for (const auto &en : llvm::enumerate(tensorTy.getShape())) {
68  if (en.value() == ShapedType::kDynamic)
69  dynamicDims.push_back(
70  tensor::DimOp::create(b, loc, rankedTensor, en.index()));
71  }
72  return dynamicDims;
73 }
74 
75 FailureOr<RankedTensorType>
76 mlir::tensor::computeTransposedType(RankedTensorType rankedTensorType,
77  ArrayRef<int64_t> transposeVector) {
78  if (transposeVector.empty())
79  return rankedTensorType;
80 
81  if (!isPermutationVector(transposeVector) ||
82  transposeVector.size() != static_cast<size_t>(rankedTensorType.getRank()))
83  return failure();
84 
85  SmallVector<int64_t> transposedShape(rankedTensorType.getShape());
86  applyPermutationToVector(transposedShape, transposeVector);
87 
88  using RTTBuilder = RankedTensorType::Builder;
89  RankedTensorType transposedTensorType =
90  RTTBuilder(rankedTensorType).setShape(transposedShape);
91  return transposedTensorType;
92 }
93 
94 CollapseShapeOp
96  const llvm::SmallBitVector &dropDims) {
97  auto srcType = cast<ShapedType>(src.getType());
98  int64_t rank = srcType.getRank();
99  assert(rank == static_cast<int64_t>(dropDims.size()) &&
100  "dropDims dimension does not match src tensor rank");
101  assert(llvm::all_of(
102  dropDims.set_bits(),
103  [&](unsigned dim) { return srcType.getShape()[dim] == 1; }) &&
104  "Dropping non unit dimension");
105  // Computed reassociation map for the corresponding tensor.collapse_shape.
107  // Current reassociation group to add dropped dimension to.
108 
109  int64_t nextDimToGroup = 0;
110  llvm::SmallBitVector keptDims(dropDims);
111  keptDims.flip();
112  int64_t lastSetBit = keptDims.find_last();
113  for (int64_t setBit : keptDims.set_bits()) {
114  // Group consecutive dropped dimension with the next non-dropped dimension.
115  // If this is the last set dimension, also group all subsequent dropped
116  // dimension, if any.
117  int64_t upTo = setBit == lastSetBit ? rank - 1 : setBit;
118  auto seq = llvm::seq_inclusive(nextDimToGroup, upTo);
119  reassocMaps.emplace_back(llvm::make_range(seq.begin(), seq.end()));
120  nextDimToGroup = setBit + 1;
121  }
122  return tensor::CollapseShapeOp::create(b, loc, src, reassocMaps);
123 }
124 
126  llvm::SmallBitVector droppedDims = op.getDroppedDims();
127  int64_t srcDim = 0;
128  RankedTensorType resultType = op.getDestType();
129  // Source dims and destination dims (apart from dropped dims) must have the
130  // same size.
131  for (int64_t resultDim = 0; resultDim < resultType.getRank(); ++resultDim) {
132  if (droppedDims.test(resultDim)) {
133  // InsertSlice may expand unit dimensions that result from inserting a
134  // size-1 slice into a non-size-1 result dimension.
135  if (resultType.getDimSize(resultDim) != 1)
136  return false;
137  continue;
138  }
139  FailureOr<bool> equalDimSize = ValueBoundsConstraintSet::areEqual(
140  {op.getSource(), srcDim}, {op.getResult(), resultDim});
141  if (failed(equalDimSize) || !*equalDimSize)
142  return false;
143  ++srcDim;
144  }
145 
146  return true;
147 }
148 
149 bool mlir::tensor::isCastLikeExtractSliceOp(ExtractSliceOp op) {
150  llvm::SmallBitVector droppedDims = op.getDroppedDims();
151  int64_t resultDim = 0;
152  // Source dims and result dims (apart from dropped dims) must have the same
153  // size.
154  RankedTensorType sourceType = op.getSourceType();
155  for (int64_t dim = 0, e = sourceType.getRank(); dim < e; ++dim) {
156  if (droppedDims.test(dim)) {
157  // ExtractSlice may drop unit dimensions that result from taking a size-1
158  // slice from a non-size-1 source dimension.
159  if (sourceType.getDimSize(dim) != 1)
160  return false;
161  continue;
162  }
163  FailureOr<bool> equalDimSize = ValueBoundsConstraintSet::areEqual(
164  {op.getSource(), dim}, {op.getResult(), resultDim});
165  if (failed(equalDimSize) || !*equalDimSize)
166  return false;
167  ++resultDim;
168  }
169 
170  return true;
171 }
static void setBit(char *rawData, size_t bitPos, bool value)
Set a bit to a specific value.
Base type for affine expression.
Definition: AffineExpr.h:68
IntegerAttr getIndexAttr(int64_t value)
Definition: Builders.cpp:103
MLIRContext * getContext() const
Definition: Builders.h:55
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:76
This class helps build Operations.
Definition: Builders.h:205
This class represents a single result from folding an operation.
Definition: OpDefinition.h:272
This is a builder type that keeps local references to arguments.
Definition: BuiltinTypes.h:230
Builder & setShape(ArrayRef< int64_t > newShape)
Definition: BuiltinTypes.h:241
static FailureOr< bool > areEqual(const Variable &var1, const Variable &var2)
Compute whether the given variables are equal.
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:387
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:105
OpFoldResult makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands, bool composeAffineMin=false)
Constructs an AffineApplyOp that applies map to operands after composing the map with the maps of any...
Definition: AffineOps.cpp:1331
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
PadOp createPadHighOp(RankedTensorType resType, Value source, Value pad, bool nofold, Location loc, OpBuilder &builder, ValueRange dynOutDims={})
Definition: Utils.cpp:23
SmallVector< Value > createDynamicDimValues(OpBuilder &b, Location loc, Value rankedTensor)
Definition: Utils.cpp:62
bool isCastLikeInsertSliceOp(InsertSliceOp op)
A tensor.insert_slice is a cast-like operation if it merely rank-extends the source tensor or inserts...
Definition: Utils.cpp:125
CollapseShapeOp dropGivenUnitDims(OpBuilder &b, Location loc, Value src, const llvm::SmallBitVector &dropDims)
Create tensor.collapse_shape to drop unit dimensions in dropDims in tensor src.
Definition: Utils.cpp:95
bool isCastLikeExtractSliceOp(ExtractSliceOp op)
A tensor.extract_slice is a cast-like operation if it merely rank-reduces unit dimensions of the sour...
Definition: Utils.cpp:149
OpFoldResult getMixedSize(OpBuilder &builder, Location loc, Value value, int64_t dim)
Return the dimension of the given tensor value.
Definition: TensorOps.cpp:61
FailureOr< RankedTensorType > computeTransposedType(RankedTensorType rankedTensorType, ArrayRef< int64_t > transposeVector)
Returns the transposed rankedTensorType if transposeVector is non-empty.
Definition: Utils.cpp:76
Include the generated interface declarations.
void bindDims(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to DimExpr at positions: [0 .
Definition: AffineExpr.h:311
void applyPermutationToVector(SmallVector< T, N > &inVec, ArrayRef< int64_t > permutation)
Apply the permutation defined by permutation to inVec.
SmallVector< int64_t > dropDims(ArrayRef< int64_t > inputPerm, ArrayRef< int64_t > dropPositions)
Returns a permutation vector that drop the input dims in dropPositions from inputPerm.
bool isPermutationVector(ArrayRef< int64_t > interchange)
Method to check if an interchange vector is a permutation.