MLIR  18.0.0git
SparseTensorType.h
Go to the documentation of this file.
1 //===- SparseTensorType.h - Wrapper around RankedTensorType -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This header defines the `SparseTensorType` wrapper class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSORTYPE_H_
14 #define MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSORTYPE_H_
15 
17 
18 namespace mlir {
19 namespace sparse_tensor {
20 
21 //===----------------------------------------------------------------------===//
22 /// A wrapper around `RankedTensorType`, which has three goals:
23 ///
24 /// (1) To provide a uniform API for querying aspects of sparse-tensor
25 /// types; in particular, to make the "dimension" vs "level" distinction
26 /// overt (i.e., explicit everywhere). Thus, throughout the sparsifier
27 /// this class should be preferred over using `RankedTensorType` or
28 /// `ShapedType` directly, since the methods of the latter do not make
29 /// the "dimension" vs "level" distinction overt.
30 ///
31 /// (2) To provide a uniform abstraction over both sparse-tensor
32 /// types (i.e., `RankedTensorType` with `SparseTensorEncodingAttr`)
33 /// and dense-tensor types (i.e., `RankedTensorType` without an encoding).
34 /// That is, we want to manipulate dense-tensor types using the same API
35 /// that we use for manipulating sparse-tensor types; both to keep the
36 /// "dimension" vs "level" distinction overt, and to avoid needing to
37 /// handle certain cases specially in the sparsifier.
38 ///
39 /// (3) To provide uniform handling of "defaults". In particular
40 /// this means that dense-tensors should always return the same answers
41 /// as sparse-tensors with a default encoding. But it additionally means
42 /// that the answers should be normalized, so that there's no way to
43 /// distinguish between non-provided data (which is filled in by default)
44 /// vs explicitly-provided data which equals the defaults.
45 ///
47 public:
48  // We memoize `lvlRank`, `dimToLvl`, and `lvlToDim` to avoid repeating
49  // the conditionals throughout the rest of the class.
50  SparseTensorType(RankedTensorType rtp)
51  : rtp(rtp), enc(getSparseTensorEncoding(rtp)),
52  lvlRank(enc ? enc.getLvlRank() : getDimRank()),
53  dimToLvl(enc.isIdentity() ? AffineMap() : enc.getDimToLvl()),
54  lvlToDim(enc.isIdentity() ? AffineMap() : enc.getLvlToDim()) {
55  assert(rtp && "got null RankedTensorType");
56  assert((!isIdentity() || getDimRank() == lvlRank) && "Rank mismatch");
57  }
58 
59  SparseTensorType(ShapedType stp, SparseTensorEncodingAttr enc)
61  RankedTensorType::get(stp.getShape(), stp.getElementType(), enc)) {}
62 
63  // TODO: remove?
64  SparseTensorType(SparseTensorEncodingAttr enc)
65  : SparseTensorType(RankedTensorType::get(
66  SmallVector<Size>(enc.getDimRank(), ShapedType::kDynamic),
67  Float32Type::get(enc.getContext()), enc)) {}
68 
70  SparseTensorType(const SparseTensorType &) = default;
71 
72  //
73  // Factory methods to construct a new `SparseTensorType`
74  // with the same dimension-shape and element type.
75  //
76 
77  SparseTensorType withEncoding(SparseTensorEncodingAttr newEnc) const {
78  return SparseTensorType(rtp, newEnc);
79  }
80 
82  return withEncoding(enc.withDimToLvl(dimToLvl));
83  }
84 
85  SparseTensorType withDimToLvl(SparseTensorEncodingAttr dimToLvlEnc) const {
86  return withEncoding(enc.withDimToLvl(dimToLvlEnc));
87  }
88 
89  SparseTensorType withDimToLvl(const SparseTensorType &dimToLvlSTT) const {
90  return withDimToLvl(dimToLvlSTT.getEncoding());
91  }
92 
94  return withEncoding(enc.withoutDimToLvl());
95  }
96 
97  SparseTensorType withBitWidths(unsigned posWidth, unsigned crdWidth) const {
98  return withEncoding(enc.withBitWidths(posWidth, crdWidth));
99  }
100 
102  return withEncoding(enc.withoutBitWidths());
103  }
104 
107  return withEncoding(enc.withDimSlices(dimSlices));
108  }
109 
111  return withEncoding(enc.withoutDimSlices());
112  }
113 
114  /// Allow implicit conversion to `RankedTensorType`, `ShapedType`,
115  /// and `Type`. These are implicit to help alleviate the impedance
116  /// mismatch for code that has not been converted to use `SparseTensorType`
117  /// directly. Once more uses have been converted to `SparseTensorType`,
118  /// we may want to make these explicit instead.
119  ///
120  /// WARNING: This user-defined-conversion method causes overload
121  /// ambiguity whenever passing a `SparseTensorType` directly to a
122  /// function which is overloaded to accept either `Type` or `TypeRange`.
123  /// In particular, this includes `RewriterBase::replaceOpWithNewOp<OpTy>`
124  /// and `OpBuilder::create<OpTy>` whenever the `OpTy::build` is overloaded
125  /// thus. This happens because the `TypeRange<T>(T&&)` ctor is implicit
126  /// as well, and there's no SFINAE we can add to this method that would
127  /// block subsequent application of that ctor. The only way to fix the
128  /// overload ambiguity is to avoid *implicit* conversion at the callsite:
129  /// e.g., by using `static_cast` to make the conversion explicit, by
130  /// assigning the `SparseTensorType` to a temporary variable of the
131  /// desired type, etc.
132  //
133  // NOTE: We implement this as a single templated user-defined-conversion
134  // function to avoid ambiguity problems when the desired result is `Type`
135  // (since both `RankedTensorType` and `ShapedType` can be implicitly
136  // converted to `Type`).
137  template <typename T, typename = std::enable_if_t<
138  std::is_convertible_v<RankedTensorType, T>>>
139  /*implicit*/ operator T() const {
140  return rtp;
141  }
142 
143  /// Explicitly convert to `RankedTensorType`. This method is
144  /// a convenience for resolving overload-ambiguity issues with
145  /// implicit conversion.
146  RankedTensorType getRankedTensorType() const { return rtp; }
147 
148  bool operator==(const SparseTensorType &other) const {
149  // All other fields are derived from `rtp` and therefore don't need
150  // to be checked.
151  return rtp == other.rtp;
152  }
153 
154  bool operator!=(const SparseTensorType &other) const {
155  return !(*this == other);
156  }
157 
158  MLIRContext *getContext() const { return rtp.getContext(); }
159 
160  Type getElementType() const { return rtp.getElementType(); }
161 
162  SparseTensorEncodingAttr getEncoding() const { return enc; }
163 
164  //
165  // SparseTensorEncodingAttr delegators
166  //
167 
168  /// Returns true for tensors which have an encoding, and false for
169  /// those which do not. Therefore tensors with an all-dense encoding
170  /// return true.
171  bool hasEncoding() const { return static_cast<bool>(enc); }
172 
173  /// Returns true for tensors where every level is dense.
174  /// (This is always true for dense-tensors.)
175  bool isAllDense() const { return enc.isAllDense(); }
176 
177  /// Returns true for tensors where every level is ordered.
178  /// (This is always true for dense-tensors.)
179  bool isAllOrdered() const { return enc.isAllOrdered(); }
180 
181  /// Translates between level / dimension coordinate space.
183  CrdTransDirectionKind dir) const {
184  return enc.translateCrds(builder, loc, crds, dir);
185  }
186 
187  /// Returns true if the dimToLvl mapping is a permutation.
188  /// (This is always true for dense-tensors.)
189  bool isPermutation() const { return enc.isPermutation(); }
190 
191  /// Returns true if the dimToLvl mapping is the identity.
192  /// (This is always true for dense-tensors.)
193  bool isIdentity() const { return enc.isIdentity(); }
194 
195  //
196  // Other methods.
197  //
198 
199  /// Returns the dimToLvl mapping (or the null-map for the identity).
200  /// If you intend to compare the results of this method for equality,
201  /// see `hasSameDimToLvl` instead.
202  AffineMap getDimToLvl() const { return dimToLvl; }
203 
204  /// Returns the lvlToDiml mapping (or the null-map for the identity).
205  AffineMap getLvlToDim() const { return lvlToDim; }
206 
207  /// Returns the dimToLvl mapping, where the identity map is expanded out
208  /// into a full `AffineMap`. This method is provided as a convenience,
209  /// but for most purposes other methods (`isIdentity`, `getDimToLvl`,
210  /// etc) will be more helpful.
212  return dimToLvl
213  ? dimToLvl
215  }
216 
217  /// Returns true iff the two types have the same mapping. This method
218  /// takes care to handle identity maps properly, so it should be preferred
219  /// over using `getDimToLvl` followed by `AffineMap::operator==`.
220  bool hasSameDimToLvl(const SparseTensorType &other) const {
221  // If the maps are the identity, then we need to check the rank
222  // to be sure they're the same size identity. (And since identity
223  // means dimRank==lvlRank, we use lvlRank as a minor optimization.)
224  return isIdentity() ? (other.isIdentity() && lvlRank == other.lvlRank)
225  : (dimToLvl == other.dimToLvl);
226  }
227 
228  /// Returns the dimension-rank.
229  Dimension getDimRank() const { return rtp.getRank(); }
230 
231  /// Returns the level-rank.
232  Level getLvlRank() const { return lvlRank; }
233 
234  /// Returns the dimension-shape.
235  ArrayRef<Size> getDimShape() const { return rtp.getShape(); }
236 
237  /// Returns the Level-shape.
239  return getEncoding().tranlateShape(getDimShape(),
240  CrdTransDirectionKind::dim2lvl);
241  }
242 
243  /// Returns the type with an identity mapping.
244  RankedTensorType getDemappedType() const {
246  enc.withoutDimToLvl());
247  }
248 
249  /// Safely looks up the requested dimension-DynSize. If you intend
250  /// to check the result with `ShapedType::isDynamic`, then see the
251  /// `getStaticDimSize` method instead.
253  assert(d < getDimRank() && "Dimension is out of bounds");
254  return getDimShape()[d];
255  }
256 
257  /// Returns true if no dimension has dynamic size.
258  bool hasStaticDimShape() const { return rtp.hasStaticShape(); }
259 
260  /// Returns true if any dimension has dynamic size.
261  bool hasDynamicDimShape() const { return !hasStaticDimShape(); }
262 
263  /// Returns true if the given dimension has dynamic size. If you
264  /// intend to call `getDynamicDimSize` based on the result, then see
265  /// the `getStaticDimSize` method instead.
266  bool isDynamicDim(Dimension d) const {
267  // We don't use `rtp.isDynamicDim(d)` because we want the
268  // OOB error message to be consistent with `getDynamicDimSize`.
269  return ShapedType::isDynamic(getDynamicDimSize(d));
270  }
271 
272  /// Returns the number of dimensions which have dynamic sizes.
273  /// The return type is `int64_t` to maintain consistency with
274  /// `ShapedType::Trait<T>::getNumDynamicDims`.
275  int64_t getNumDynamicDims() const { return rtp.getNumDynamicDims(); }
276 
277  ArrayRef<LevelType> getLvlTypes() const { return enc.getLvlTypes(); }
279  // This OOB check is for dense-tensors, since this class knows
280  // their lvlRank (whereas STEA::getLvlType will/can only check
281  // OOB for sparse-tensors).
282  assert(l < lvlRank && "Level out of bounds");
283  return enc.getLvlType(l);
284  }
285 
286  // We can't just delegate these, since we want to use this class's
287  // `getLvlType` method instead of STEA's.
288  bool isDenseLvl(Level l) const { return isDenseLT(getLvlType(l)); }
289  bool isCompressedLvl(Level l) const { return isCompressedLT(getLvlType(l)); }
290  bool isLooseCompressedLvl(Level l) const {
291  return isLooseCompressedLT(getLvlType(l));
292  }
293  bool isSingletonLvl(Level l) const { return isSingletonLT(getLvlType(l)); }
294  bool is2OutOf4Lvl(Level l) const { return is2OutOf4LT(getLvlType(l)); }
295  bool isOrderedLvl(Level l) const { return isOrderedLT(getLvlType(l)); }
296  bool isUniqueLvl(Level l) const { return isUniqueLT(getLvlType(l)); }
297  bool isWithPos(Level l) const { return isWithPosLT(getLvlType(l)); }
298  bool isWithCrd(Level l) const { return isWithCrdLT(getLvlType(l)); }
299 
300  /// Returns the coordinate-overhead bitwidth, defaulting to zero.
301  unsigned getCrdWidth() const { return enc ? enc.getCrdWidth() : 0; }
302 
303  /// Returns the position-overhead bitwidth, defaulting to zero.
304  unsigned getPosWidth() const { return enc ? enc.getPosWidth() : 0; }
305 
306  /// Returns the coordinate-overhead MLIR type, defaulting to `IndexType`.
307  Type getCrdType() const {
308  if (getCrdWidth())
310  return IndexType::get(getContext());
311  }
312 
313  /// Returns the position-overhead MLIR type, defaulting to `IndexType`.
314  Type getPosType() const {
315  if (getPosWidth())
317  return IndexType::get(getContext());
318  }
319 
320  /// Returns true iff this sparse tensor type has a trailing
321  /// COO region starting at the given level. By default, it
322  /// tests for a unique COO type at top level.
323  bool isCOOType(Level startLvl = 0, bool isUnique = true) const;
324 
325  /// Returns the starting level of this sparse tensor type for a
326  /// trailing COO region that spans **at least** two levels. If
327  /// no such COO region is found, then returns the level-rank.
328  Level getCOOStart() const;
329 
330  /// Returns [un]ordered COO type for this sparse tensor type.
331  RankedTensorType getCOOType(bool ordered) const;
332 
333 private:
334  // These two must be const, to ensure coherence of the memoized fields.
335  const RankedTensorType rtp;
336  const SparseTensorEncodingAttr enc;
337  // Memoized to avoid frequent redundant conditionals.
338  const Level lvlRank;
339  const AffineMap dimToLvl;
340  const AffineMap lvlToDim;
341 };
342 
343 /// Convenience methods to obtain a SparseTensorType from a Value.
345  return SparseTensorType(cast<RankedTensorType>(val.getType()));
346 }
347 inline std::optional<SparseTensorType> tryGetSparseTensorType(Value val) {
348  if (auto rtp = dyn_cast<RankedTensorType>(val.getType()))
349  return SparseTensorType(rtp);
350  return std::nullopt;
351 }
352 
353 } // namespace sparse_tensor
354 } // namespace mlir
355 
356 #endif // MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSORTYPE_H_
bool isUnique(It begin, It end)
Definition: MeshOps.cpp:250
static ArrayRef< int64_t > getShape(Type type)
Returns the shape of the given type.
Definition: Traits.cpp:118
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition: AffineMap.h:47
static AffineMap getMultiDimIdentityMap(unsigned numDims, MLIRContext *context)
Returns an AffineMap with 'numDims' identity result dim exprs.
Definition: AffineMap.cpp:312
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:63
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
This class helps build Operations.
Definition: Builders.h:206
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:378
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:125
A wrapper around RankedTensorType, which has three goals:
Size getDynamicDimSize(Dimension d) const
Safely looks up the requested dimension-DynSize.
Level getCOOStart() const
Returns the starting level of this sparse tensor type for a trailing COO region that spans at least t...
SparseTensorType(ShapedType stp, SparseTensorEncodingAttr enc)
unsigned getCrdWidth() const
Returns the coordinate-overhead bitwidth, defaulting to zero.
bool operator!=(const SparseTensorType &other) const
ArrayRef< LevelType > getLvlTypes() const
bool hasEncoding() const
Returns true for tensors which have an encoding, and false for those which do not.
ArrayRef< Size > getDimShape() const
Returns the dimension-shape.
bool isAllOrdered() const
Returns true for tensors where every level is ordered.
SmallVector< Size > getLvlShape() const
Returns the Level-shape.
bool operator==(const SparseTensorType &other) const
SparseTensorType withEncoding(SparseTensorEncodingAttr newEnc) const
bool isCOOType(Level startLvl=0, bool isUnique=true) const
Returns true iff this sparse tensor type has a trailing COO region starting at the given level.
Dimension getDimRank() const
Returns the dimension-rank.
AffineMap getLvlToDim() const
Returns the lvlToDiml mapping (or the null-map for the identity).
SparseTensorType withoutDimToLvl() const
SparseTensorType withDimToLvl(AffineMap dimToLvl) const
SparseTensorType(const SparseTensorType &)=default
SparseTensorType withDimToLvl(const SparseTensorType &dimToLvlSTT) const
bool isAllDense() const
Returns true for tensors where every level is dense.
int64_t getNumDynamicDims() const
Returns the number of dimensions which have dynamic sizes.
SparseTensorType withDimSlices(ArrayRef< SparseTensorDimSliceAttr > dimSlices) const
Type getCrdType() const
Returns the coordinate-overhead MLIR type, defaulting to IndexType.
bool isIdentity() const
Returns true if the dimToLvl mapping is the identity.
bool hasDynamicDimShape() const
Returns true if any dimension has dynamic size.
bool hasSameDimToLvl(const SparseTensorType &other) const
Returns true iff the two types have the same mapping.
RankedTensorType getRankedTensorType() const
Explicitly convert to RankedTensorType.
SparseTensorType withoutBitWidths() const
SparseTensorType(SparseTensorEncodingAttr enc)
bool hasStaticDimShape() const
Returns true if no dimension has dynamic size.
SparseTensorType withoutDimSlices() const
RankedTensorType getDemappedType() const
Returns the type with an identity mapping.
AffineMap getExpandedDimToLvl() const
Returns the dimToLvl mapping, where the identity map is expanded out into a full AffineMap.
SparseTensorType withBitWidths(unsigned posWidth, unsigned crdWidth) const
Level getLvlRank() const
Returns the level-rank.
unsigned getPosWidth() const
Returns the position-overhead bitwidth, defaulting to zero.
RankedTensorType getCOOType(bool ordered) const
Returns [un]ordered COO type for this sparse tensor type.
bool isPermutation() const
Returns true if the dimToLvl mapping is a permutation.
SparseTensorType withDimToLvl(SparseTensorEncodingAttr dimToLvlEnc) const
SparseTensorEncodingAttr getEncoding() const
bool isDynamicDim(Dimension d) const
Returns true if the given dimension has dynamic size.
AffineMap getDimToLvl() const
Returns the dimToLvl mapping (or the null-map for the identity).
ValueRange translateCrds(OpBuilder &builder, Location loc, ValueRange crds, CrdTransDirectionKind dir) const
Translates between level / dimension coordinate space.
Type getPosType() const
Returns the position-overhead MLIR type, defaulting to IndexType.
SparseTensorType & operator=(const SparseTensorType &)=delete
constexpr bool isWithPosLT(LevelType lt)
Check if the LevelType needs positions array.
Definition: Enums.h:283
constexpr bool isLooseCompressedLT(LevelType lt)
Check if the LevelType is loose compressed (regardless of properties).
Definition: Enums.h:271
uint64_t Dimension
The type of dimension identifiers and dimension-ranks.
Definition: SparseTensor.h:35
constexpr bool isUniqueLT(LevelType lt)
Check if the LevelType is unique (regardless of storage format).
Definition: Enums.h:299
uint64_t Level
The type of level identifiers and level-ranks.
Definition: SparseTensor.h:38
std::optional< SparseTensorType > tryGetSparseTensorType(Value val)
int64_t Size
The type for individual components of a compile-time shape, including the value ShapedType::kDynamic ...
Definition: SparseTensor.h:42
constexpr bool isWithCrdLT(LevelType lt)
Check if the LevelType needs coordinates array.
Definition: Enums.h:288
constexpr bool is2OutOf4LT(LevelType lt)
Check if the LevelType is 2OutOf4 (regardless of properties).
Definition: Enums.h:277
constexpr bool isDenseLT(LevelType lt)
Check if the LevelType is dense (regardless of properties).
Definition: Enums.h:253
constexpr bool isSingletonLT(LevelType lt)
Check if the LevelType is singleton (regardless of properties).
Definition: Enums.h:265
constexpr bool isOrderedLT(LevelType lt)
Check if the LevelType is ordered (regardless of storage format).
Definition: Enums.h:294
LevelType
This enum defines all the sparse representations supportable by the SparseTensor dialect.
Definition: Enums.h:168
SparseTensorEncodingAttr getSparseTensorEncoding(Type type)
Convenience method to get a sparse encoding attribute from a type.
constexpr bool isCompressedLT(LevelType lt)
Check if the LevelType is compressed (regardless of properties).
Definition: Enums.h:259
SparseTensorType getSparseTensorType(Value val)
Convenience methods to obtain a SparseTensorType from a Value.
Include the generated interface declarations.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...