MLIR  21.0.0git
Utils.cpp
Go to the documentation of this file.
1 //===- Utils.cpp - Utilities to support the Tensor dialect ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements utilities for the Tensor dialect.
10 //
11 //===----------------------------------------------------------------------===//
12 
14 
21 
22 using namespace mlir;
23 using namespace mlir::tensor;
24 
25 PadOp mlir::tensor::createPadHighOp(RankedTensorType resType, Value source,
26  Value pad, bool nofold, Location loc,
27  OpBuilder &b, ValueRange dynOutDims) {
28 
29  // This assumption simplifies the following logic without limiting what's
30  // required _today_. If needed, we can relax it in the future.
31  assert(((resType.getNumDynamicDims() == dynOutDims.size()) ||
32  dynOutDims.empty()) &&
33  "Either none or all output dynamic dims must be specified!");
34 
35  // Init "low" and "high" padding values ("low" is kept as is, "high" is
36  // computed below).
37  SmallVector<OpFoldResult> low(resType.getRank(), b.getIndexAttr(0));
38  SmallVector<OpFoldResult> high(resType.getRank(), b.getIndexAttr(0));
39 
40  size_t outDimIdx = 0;
41 
42  for (const auto [idx, val] : enumerate(resType.getShape())) {
43  bool isDimDynamic = ShapedType::isDynamic(val);
44  bool updatePadHigh = !isDimDynamic || !dynOutDims.empty();
45 
46  // Keep the default padding width (i.e. "0") when the output dim is dynamic
47  // and no actual output sizes have been provided.
48  if (!updatePadHigh)
49  continue;
50 
51  // Compute the padding width: resDim - sourceDim.
52  AffineExpr d0, d1;
53  bindDims(b.getContext(), d0, d1);
54  OpFoldResult sourceDim = tensor::getMixedSize(b, loc, source, idx);
55  OpFoldResult outDim = isDimDynamic ? OpFoldResult(dynOutDims[outDimIdx++])
56  : OpFoldResult(b.getIndexAttr(val));
57 
58  high[idx] = affine::makeComposedFoldedAffineApply(b, loc, d0 - d1,
59  {outDim, sourceDim});
60  }
61  return b.create<PadOp>(loc, resType, source, low, high, pad, nofold);
62 }
63 
65  Location loc,
66  Value rankedTensor) {
67  auto tensorTy = cast<RankedTensorType>(rankedTensor.getType());
68  SmallVector<Value> dynamicDims;
69  for (const auto &en : llvm::enumerate(tensorTy.getShape())) {
70  if (en.value() == ShapedType::kDynamic)
71  dynamicDims.push_back(
72  b.create<tensor::DimOp>(loc, rankedTensor, en.index()));
73  }
74  return dynamicDims;
75 }
76 
77 FailureOr<RankedTensorType>
78 mlir::tensor::computeTransposedType(RankedTensorType rankedTensorType,
79  ArrayRef<int64_t> transposeVector) {
80  if (transposeVector.empty())
81  return rankedTensorType;
82 
83  if (!isPermutationVector(transposeVector) ||
84  transposeVector.size() != static_cast<size_t>(rankedTensorType.getRank()))
85  return failure();
86 
87  SmallVector<int64_t> transposedShape(rankedTensorType.getShape());
88  applyPermutationToVector(transposedShape, transposeVector);
89 
90  using RTTBuilder = RankedTensorType::Builder;
91  RankedTensorType transposedTensorType =
92  RTTBuilder(rankedTensorType).setShape(transposedShape);
93  return transposedTensorType;
94 }
95 
96 CollapseShapeOp
98  const llvm::SmallBitVector &dropDims) {
99  auto srcType = cast<ShapedType>(src.getType());
100  int64_t rank = srcType.getRank();
101  assert(rank == static_cast<int64_t>(dropDims.size()) &&
102  "dropDims dimension does not match src tensor rank");
103  assert(llvm::all_of(
104  dropDims.set_bits(),
105  [&](unsigned dim) { return srcType.getShape()[dim] == 1; }) &&
106  "Dropping non unit dimension");
107  // Computed reassociation map for the corresponding tensor.collapse_shape.
109  // Current reassociation group to add dropped dimension to.
110 
111  int64_t nextDimToGroup = 0;
112  llvm::SmallBitVector keptDims(dropDims);
113  keptDims.flip();
114  int64_t lastSetBit = keptDims.find_last();
115  for (int64_t setBit : keptDims.set_bits()) {
116  // Group consecutive dropped dimension with the next non-dropped dimension.
117  // If this is the last set dimension, also group all subsequent dropped
118  // dimension, if any.
119  int64_t upTo = setBit == lastSetBit ? rank - 1 : setBit;
120  auto seq = llvm::seq_inclusive(nextDimToGroup, upTo);
121  reassocMaps.emplace_back(llvm::make_range(seq.begin(), seq.end()));
122  nextDimToGroup = setBit + 1;
123  }
124  return b.create<tensor::CollapseShapeOp>(loc, src, reassocMaps);
125 }
126 
128  llvm::SmallBitVector droppedDims = op.getDroppedDims();
129  int64_t srcDim = 0;
130  RankedTensorType resultType = op.getDestType();
131  // Source dims and destination dims (apart from dropped dims) must have the
132  // same size.
133  for (int64_t resultDim = 0; resultDim < resultType.getRank(); ++resultDim) {
134  if (droppedDims.test(resultDim)) {
135  // InsertSlice may expand unit dimensions that result from inserting a
136  // size-1 slice into a non-size-1 result dimension.
137  if (resultType.getDimSize(resultDim) != 1)
138  return false;
139  continue;
140  }
141  FailureOr<bool> equalDimSize = ValueBoundsConstraintSet::areEqual(
142  {op.getSource(), srcDim}, {op.getResult(), resultDim});
143  if (failed(equalDimSize) || !*equalDimSize)
144  return false;
145  ++srcDim;
146  }
147 
148  return true;
149 }
150 
151 bool mlir::tensor::isCastLikeExtractSliceOp(ExtractSliceOp op) {
152  llvm::SmallBitVector droppedDims = op.getDroppedDims();
153  int64_t resultDim = 0;
154  // Source dims and result dims (apart from dropped dims) must have the same
155  // size.
156  RankedTensorType sourceType = op.getSourceType();
157  for (int64_t dim = 0, e = sourceType.getRank(); dim < e; ++dim) {
158  if (droppedDims.test(dim)) {
159  // ExtractSlice may drop unit dimensions that result from taking a size-1
160  // slice from a non-size-1 source dimension.
161  if (sourceType.getDimSize(dim) != 1)
162  return false;
163  continue;
164  }
165  FailureOr<bool> equalDimSize = ValueBoundsConstraintSet::areEqual(
166  {op.getSource(), dim}, {op.getResult(), resultDim});
167  if (failed(equalDimSize) || !*equalDimSize)
168  return false;
169  ++resultDim;
170  }
171 
172  return true;
173 }
static void setBit(char *rawData, size_t bitPos, bool value)
Set a bit to a specific value.
Base type for affine expression.
Definition: AffineExpr.h:68
IntegerAttr getIndexAttr(int64_t value)
Definition: Builders.cpp:106
MLIRContext * getContext() const
Definition: Builders.h:55
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:76
This class helps build Operations.
Definition: Builders.h:205
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:455
This class represents a single result from folding an operation.
Definition: OpDefinition.h:271
This is a builder type that keeps local references to arguments.
Definition: BuiltinTypes.h:214
Builder & setShape(ArrayRef< int64_t > newShape)
Definition: BuiltinTypes.h:225
static FailureOr< bool > areEqual(const Variable &var1, const Variable &var2)
Compute whether the given variables are equal.
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:387
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:105
OpFoldResult makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands)
Constructs an AffineApplyOp that applies map to operands after composing the map with the maps of any...
Definition: AffineOps.cpp:1225
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
SmallVector< Value > createDynamicDimValues(OpBuilder &b, Location loc, Value rankedTensor)
Definition: Utils.cpp:64
bool isCastLikeInsertSliceOp(InsertSliceOp op)
A tensor.insert_slice is a cast-like operation if it merely rank-extends the source tensor or inserts...
Definition: Utils.cpp:127
PadOp createPadHighOp(RankedTensorType resType, Value source, Value pad, bool nofold, Location loc, OpBuilder &builder, ValueRange dynOutDims=std::nullopt)
Definition: Utils.cpp:25
CollapseShapeOp dropGivenUnitDims(OpBuilder &b, Location loc, Value src, const llvm::SmallBitVector &dropDims)
Create tensor.collapse_shape to drop unit dimensions in dropDims in tensor src.
Definition: Utils.cpp:97
bool isCastLikeExtractSliceOp(ExtractSliceOp op)
A tensor.extract_slice is a cast-like operation if it merely rank-reduces unit dimensions of the sour...
Definition: Utils.cpp:151
OpFoldResult getMixedSize(OpBuilder &builder, Location loc, Value value, int64_t dim)
Return the dimension of the given tensor value.
Definition: TensorOps.cpp:64
FailureOr< RankedTensorType > computeTransposedType(RankedTensorType rankedTensorType, ArrayRef< int64_t > transposeVector)
Returns the transposed rankedTensorType if transposeVector is non-empty.
Definition: Utils.cpp:78
Include the generated interface declarations.
void bindDims(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to DimExpr at positions: [0 .
Definition: AffineExpr.h:311
void applyPermutationToVector(SmallVector< T, N > &inVec, ArrayRef< int64_t > permutation)
Apply the permutation defined by permutation to inVec.
SmallVector< int64_t > dropDims(ArrayRef< int64_t > inputPerm, ArrayRef< int64_t > dropPositions)
Returns a permutation vector that drop the input dims in dropPositions from inputPerm.
bool isPermutationVector(ArrayRef< int64_t > interchange)
Method to check if an interchange vector is a permutation.