MLIR  19.0.0git
SparseTensorType.h
Go to the documentation of this file.
1 //===- SparseTensorType.h - Wrapper around RankedTensorType -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This header defines the `SparseTensorType` wrapper class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSORTYPE_H_
14 #define MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSORTYPE_H_
15 
17 
18 namespace mlir {
19 namespace sparse_tensor {
20 
21 /// A simple structure that encodes a range of levels in the sparse tensors that
22 /// forms a COO segment.
23 struct COOSegment {
24  std::pair<Level, Level> lvlRange; // [low, high)
25  bool isSoA;
26 
27  bool isAoS() const { return !isSoA; }
28  bool isSegmentStart(Level l) const { return l == lvlRange.first; }
29  bool inSegment(Level l) const {
30  return l >= lvlRange.first && l < lvlRange.second;
31  }
32 };
33 
34 //===----------------------------------------------------------------------===//
35 /// A wrapper around `RankedTensorType`, which has three goals:
36 ///
37 /// (1) To provide a uniform API for querying aspects of sparse-tensor
38 /// types; in particular, to make the "dimension" vs "level" distinction
39 /// overt (i.e., explicit everywhere). Thus, throughout the sparsifier
40 /// this class should be preferred over using `RankedTensorType` or
41 /// `ShapedType` directly, since the methods of the latter do not make
42 /// the "dimension" vs "level" distinction overt.
43 ///
44 /// (2) To provide a uniform abstraction over both sparse-tensor
45 /// types (i.e., `RankedTensorType` with `SparseTensorEncodingAttr`)
46 /// and dense-tensor types (i.e., `RankedTensorType` without an encoding).
47 /// That is, we want to manipulate dense-tensor types using the same API
48 /// that we use for manipulating sparse-tensor types; both to keep the
49 /// "dimension" vs "level" distinction overt, and to avoid needing to
50 /// handle certain cases specially in the sparsifier.
51 ///
52 /// (3) To provide uniform handling of "defaults". In particular
53 /// this means that dense-tensors should always return the same answers
54 /// as sparse-tensors with a default encoding. But it additionally means
55 /// that the answers should be normalized, so that there's no way to
56 /// distinguish between non-provided data (which is filled in by default)
57 /// vs explicitly-provided data which equals the defaults.
58 ///
60 public:
61  // We memoize `lvlRank`, `dimToLvl`, and `lvlToDim` to avoid repeating
62  // the conditionals throughout the rest of the class.
63  SparseTensorType(RankedTensorType rtp)
64  : rtp(rtp), enc(getSparseTensorEncoding(rtp)),
65  lvlRank(enc ? enc.getLvlRank() : getDimRank()),
66  dimToLvl(enc.isIdentity() ? AffineMap() : enc.getDimToLvl()),
67  lvlToDim(enc.isIdentity() ? AffineMap() : enc.getLvlToDim()) {
68  assert(rtp && "got null RankedTensorType");
69  assert((!isIdentity() || getDimRank() == lvlRank) && "Rank mismatch");
70  }
71 
72  SparseTensorType(ShapedType stp, SparseTensorEncodingAttr enc)
74  RankedTensorType::get(stp.getShape(), stp.getElementType(), enc)) {}
75 
76  // TODO: remove?
77  SparseTensorType(SparseTensorEncodingAttr enc)
78  : SparseTensorType(RankedTensorType::get(
79  SmallVector<Size>(enc.getDimRank(), ShapedType::kDynamic),
80  Float32Type::get(enc.getContext()), enc)) {}
81 
83  SparseTensorType(const SparseTensorType &) = default;
84 
85  //
86  // Factory methods to construct a new `SparseTensorType`
87  // with the same dimension-shape and element type.
88  //
89 
90  SparseTensorType withEncoding(SparseTensorEncodingAttr newEnc) const {
91  return SparseTensorType(rtp, newEnc);
92  }
93 
95  return withEncoding(enc.withDimToLvl(dimToLvl));
96  }
97 
98  SparseTensorType withDimToLvl(SparseTensorEncodingAttr dimToLvlEnc) const {
99  return withEncoding(enc.withDimToLvl(dimToLvlEnc));
100  }
101 
102  SparseTensorType withDimToLvl(const SparseTensorType &dimToLvlSTT) const {
103  return withDimToLvl(dimToLvlSTT.getEncoding());
104  }
105 
107  return withEncoding(enc.withoutDimToLvl());
108  }
109 
110  SparseTensorType withBitWidths(unsigned posWidth, unsigned crdWidth) const {
111  return withEncoding(enc.withBitWidths(posWidth, crdWidth));
112  }
113 
115  return withEncoding(enc.withoutBitWidths());
116  }
117 
120  return withEncoding(enc.withDimSlices(dimSlices));
121  }
122 
124  return withEncoding(enc.withoutDimSlices());
125  }
126 
127  /// Allow implicit conversion to `RankedTensorType`, `ShapedType`,
128  /// and `Type`. These are implicit to help alleviate the impedance
129  /// mismatch for code that has not been converted to use `SparseTensorType`
130  /// directly. Once more uses have been converted to `SparseTensorType`,
131  /// we may want to make these explicit instead.
132  ///
133  /// WARNING: This user-defined-conversion method causes overload
134  /// ambiguity whenever passing a `SparseTensorType` directly to a
135  /// function which is overloaded to accept either `Type` or `TypeRange`.
136  /// In particular, this includes `RewriterBase::replaceOpWithNewOp<OpTy>`
137  /// and `OpBuilder::create<OpTy>` whenever the `OpTy::build` is overloaded
138  /// thus. This happens because the `TypeRange<T>(T&&)` ctor is implicit
139  /// as well, and there's no SFINAE we can add to this method that would
140  /// block subsequent application of that ctor. The only way to fix the
141  /// overload ambiguity is to avoid *implicit* conversion at the callsite:
142  /// e.g., by using `static_cast` to make the conversion explicit, by
143  /// assigning the `SparseTensorType` to a temporary variable of the
144  /// desired type, etc.
145  //
146  // NOTE: We implement this as a single templated user-defined-conversion
147  // function to avoid ambiguity problems when the desired result is `Type`
148  // (since both `RankedTensorType` and `ShapedType` can be implicitly
149  // converted to `Type`).
150  template <typename T, typename = std::enable_if_t<
151  std::is_convertible_v<RankedTensorType, T>>>
152  /*implicit*/ operator T() const {
153  return rtp;
154  }
155 
156  /// Explicitly convert to `RankedTensorType`. This method is
157  /// a convenience for resolving overload-ambiguity issues with
158  /// implicit conversion.
159  RankedTensorType getRankedTensorType() const { return rtp; }
160 
161  bool operator==(const SparseTensorType &other) const {
162  // All other fields are derived from `rtp` and therefore don't need
163  // to be checked.
164  return rtp == other.rtp;
165  }
166 
167  bool operator!=(const SparseTensorType &other) const {
168  return !(*this == other);
169  }
170 
171  MLIRContext *getContext() const { return rtp.getContext(); }
172 
173  Type getElementType() const { return rtp.getElementType(); }
174 
175  SparseTensorEncodingAttr getEncoding() const { return enc; }
176 
177  //
178  // SparseTensorEncodingAttr delegators
179  //
180 
181  /// Returns true for tensors which have an encoding, and false for
182  /// those which do not. Therefore tensors with an all-dense encoding
183  /// return true.
184  bool hasEncoding() const { return static_cast<bool>(enc); }
185 
186  /// Returns true for tensors where every level is dense.
187  /// (This is always true for dense-tensors.)
188  bool isAllDense() const { return enc.isAllDense(); }
189 
190  /// Returns true for tensors where every level is ordered.
191  /// (This is always true for dense-tensors.)
192  bool isAllOrdered() const { return enc.isAllOrdered(); }
193 
194  /// Translates between level / dimension coordinate space.
196  CrdTransDirectionKind dir) const {
197  return enc.translateCrds(builder, loc, crds, dir);
198  }
199 
200  /// Returns true if the dimToLvl mapping is a permutation.
201  /// (This is always true for dense-tensors.)
202  bool isPermutation() const { return enc.isPermutation(); }
203 
204  /// Returns true if the dimToLvl mapping is the identity.
205  /// (This is always true for dense-tensors.)
206  bool isIdentity() const { return enc.isIdentity(); }
207 
208  //
209  // Other methods.
210  //
211 
212  /// Returns the dimToLvl mapping (or the null-map for the identity).
213  /// If you intend to compare the results of this method for equality,
214  /// see `hasSameDimToLvl` instead.
215  AffineMap getDimToLvl() const { return dimToLvl; }
216 
217  /// Returns the lvlToDiml mapping (or the null-map for the identity).
218  AffineMap getLvlToDim() const { return lvlToDim; }
219 
220  /// Returns the dimToLvl mapping, where the identity map is expanded out
221  /// into a full `AffineMap`. This method is provided as a convenience,
222  /// but for most purposes other methods (`isIdentity`, `getDimToLvl`,
223  /// etc) will be more helpful.
225  return dimToLvl
226  ? dimToLvl
228  }
229 
230  /// Returns true iff the two types have the same mapping. This method
231  /// takes care to handle identity maps properly, so it should be preferred
232  /// over using `getDimToLvl` followed by `AffineMap::operator==`.
233  bool hasSameDimToLvl(const SparseTensorType &other) const {
234  // If the maps are the identity, then we need to check the rank
235  // to be sure they're the same size identity. (And since identity
236  // means dimRank==lvlRank, we use lvlRank as a minor optimization.)
237  return isIdentity() ? (other.isIdentity() && lvlRank == other.lvlRank)
238  : (dimToLvl == other.dimToLvl);
239  }
240 
241  /// Returns the dimension-rank.
242  Dimension getDimRank() const { return rtp.getRank(); }
243 
244  /// Returns the level-rank.
245  Level getLvlRank() const { return lvlRank; }
246 
247  /// Returns the dimension-shape.
248  ArrayRef<Size> getDimShape() const { return rtp.getShape(); }
249 
250  /// Returns the level-shape.
252  return getEncoding().translateShape(getDimShape(),
253  CrdTransDirectionKind::dim2lvl);
254  }
255 
256  /// Returns the batched level-rank.
257  unsigned getBatchLvlRank() const { return getEncoding().getBatchLvlRank(); }
258 
259  /// Returns the batched level-shape.
261  auto lvlShape = getEncoding().translateShape(
262  getDimShape(), CrdTransDirectionKind::dim2lvl);
263  lvlShape.truncate(getEncoding().getBatchLvlRank());
264  return lvlShape;
265  }
266 
267  /// Returns the type with an identity mapping.
268  RankedTensorType getDemappedType() const {
270  enc.withoutDimToLvl());
271  }
272 
273  /// Safely looks up the requested dimension-DynSize. If you intend
274  /// to check the result with `ShapedType::isDynamic`, then see the
275  /// `getStaticDimSize` method instead.
277  assert(d < getDimRank() && "Dimension is out of bounds");
278  return getDimShape()[d];
279  }
280 
281  /// Returns true if no dimension has dynamic size.
282  bool hasStaticDimShape() const { return rtp.hasStaticShape(); }
283 
284  /// Returns true if any dimension has dynamic size.
285  bool hasDynamicDimShape() const { return !hasStaticDimShape(); }
286 
287  /// Returns true if the given dimension has dynamic size. If you
288  /// intend to call `getDynamicDimSize` based on the result, then see
289  /// the `getStaticDimSize` method instead.
290  bool isDynamicDim(Dimension d) const {
291  // We don't use `rtp.isDynamicDim(d)` because we want the
292  // OOB error message to be consistent with `getDynamicDimSize`.
293  return ShapedType::isDynamic(getDynamicDimSize(d));
294  }
295 
296  /// Returns the number of dimensions which have dynamic sizes.
297  /// The return type is `int64_t` to maintain consistency with
298  /// `ShapedType::Trait<T>::getNumDynamicDims`.
299  int64_t getNumDynamicDims() const { return rtp.getNumDynamicDims(); }
300 
301  ArrayRef<LevelType> getLvlTypes() const { return enc.getLvlTypes(); }
303  // This OOB check is for dense-tensors, since this class knows
304  // their lvlRank (whereas STEA::getLvlType will/can only check
305  // OOB for sparse-tensors).
306  assert(l < lvlRank && "Level out of bounds");
307  return enc.getLvlType(l);
308  }
309 
310  // We can't just delegate these, since we want to use this class's
311  // `getLvlType` method instead of STEA's.
312  bool isDenseLvl(Level l) const { return isDenseLT(getLvlType(l)); }
313  bool isCompressedLvl(Level l) const { return isCompressedLT(getLvlType(l)); }
314  bool isLooseCompressedLvl(Level l) const {
315  return isLooseCompressedLT(getLvlType(l));
316  }
317  bool isSingletonLvl(Level l) const { return isSingletonLT(getLvlType(l)); }
318  bool isNOutOfMLvl(Level l) const { return isNOutOfMLT(getLvlType(l)); }
319  bool isOrderedLvl(Level l) const { return isOrderedLT(getLvlType(l)); }
320  bool isUniqueLvl(Level l) const { return isUniqueLT(getLvlType(l)); }
321  bool isWithPos(Level l) const { return isWithPosLT(getLvlType(l)); }
322  bool isWithCrd(Level l) const { return isWithCrdLT(getLvlType(l)); }
323 
324  /// Returns the coordinate-overhead bitwidth, defaulting to zero.
325  unsigned getCrdWidth() const { return enc ? enc.getCrdWidth() : 0; }
326 
327  /// Returns the position-overhead bitwidth, defaulting to zero.
328  unsigned getPosWidth() const { return enc ? enc.getPosWidth() : 0; }
329 
330  /// Returns the coordinate-overhead MLIR type, defaulting to `IndexType`.
331  Type getCrdType() const {
332  if (getCrdWidth())
334  return IndexType::get(getContext());
335  }
336 
337  /// Returns the position-overhead MLIR type, defaulting to `IndexType`.
338  Type getPosType() const {
339  if (getPosWidth())
341  return IndexType::get(getContext());
342  }
343 
344  /// Returns true iff this sparse tensor type has a trailing
345  /// COO region starting at the given level. By default, it
346  /// tests for a unique COO type at top level.
347  bool isCOOType(Level startLvl = 0, bool isUnique = true) const;
348 
349  /// Returns the starting level of this sparse tensor type for a
350  /// trailing COO region that spans **at least** two levels. If
351  /// no such COO region is found, then returns the level-rank.
352  ///
353  /// DEPRECATED: use getCOOSegment instead;
354  Level getAoSCOOStart() const;
355 
356  /// Returns [un]ordered COO type for this sparse tensor type.
357  RankedTensorType getCOOType(bool ordered) const;
358 
359  /// Returns a list of COO segments in the sparse tensor types.
361 
362 private:
363  // These two must be const, to ensure coherence of the memoized fields.
364  const RankedTensorType rtp;
365  const SparseTensorEncodingAttr enc;
366  // Memoized to avoid frequent redundant conditionals.
367  const Level lvlRank;
368  const AffineMap dimToLvl;
369  const AffineMap lvlToDim;
370 };
371 
372 /// Convenience methods to obtain a SparseTensorType from a Value.
374  return SparseTensorType(cast<RankedTensorType>(val.getType()));
375 }
376 inline std::optional<SparseTensorType> tryGetSparseTensorType(Value val) {
377  if (auto rtp = dyn_cast<RankedTensorType>(val.getType()))
378  return SparseTensorType(rtp);
379  return std::nullopt;
380 }
381 
382 } // namespace sparse_tensor
383 } // namespace mlir
384 
385 #endif // MLIR_DIALECT_SPARSETENSOR_IR_SPARSETENSORTYPE_H_
bool isUnique(It begin, It end)
Definition: MeshOps.cpp:112
static ArrayRef< int64_t > getShape(Type type)
Returns the shape of the given type.
Definition: Traits.cpp:118
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition: AffineMap.h:47
static AffineMap getMultiDimIdentityMap(unsigned numDims, MLIRContext *context)
Returns an AffineMap with 'numDims' identity result dim exprs.
Definition: AffineMap.cpp:318
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:63
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
This class helps build Operations.
Definition: Builders.h:209
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:381
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:125
A wrapper around RankedTensorType, which has three goals:
Size getDynamicDimSize(Dimension d) const
Safely looks up the requested dimension-DynSize.
SparseTensorType(ShapedType stp, SparseTensorEncodingAttr enc)
unsigned getCrdWidth() const
Returns the coordinate-overhead bitwidth, defaulting to zero.
bool operator!=(const SparseTensorType &other) const
unsigned getBatchLvlRank() const
Returns the batched level-rank.
SmallVector< Size > getBatchLvlShape() const
Returns the batched level-shape.
ArrayRef< LevelType > getLvlTypes() const
bool hasEncoding() const
Returns true for tensors which have an encoding, and false for those which do not.
ArrayRef< Size > getDimShape() const
Returns the dimension-shape.
bool isAllOrdered() const
Returns true for tensors where every level is ordered.
SmallVector< Size > getLvlShape() const
Returns the level-shape.
bool operator==(const SparseTensorType &other) const
SparseTensorType withEncoding(SparseTensorEncodingAttr newEnc) const
bool isCOOType(Level startLvl=0, bool isUnique=true) const
Returns true iff this sparse tensor type has a trailing COO region starting at the given level.
Dimension getDimRank() const
Returns the dimension-rank.
AffineMap getLvlToDim() const
Returns the lvlToDiml mapping (or the null-map for the identity).
SparseTensorType withoutDimToLvl() const
SparseTensorType withDimToLvl(AffineMap dimToLvl) const
SparseTensorType(const SparseTensorType &)=default
SparseTensorType withDimToLvl(const SparseTensorType &dimToLvlSTT) const
bool isAllDense() const
Returns true for tensors where every level is dense.
int64_t getNumDynamicDims() const
Returns the number of dimensions which have dynamic sizes.
SparseTensorType withDimSlices(ArrayRef< SparseTensorDimSliceAttr > dimSlices) const
Type getCrdType() const
Returns the coordinate-overhead MLIR type, defaulting to IndexType.
bool isIdentity() const
Returns true if the dimToLvl mapping is the identity.
bool hasDynamicDimShape() const
Returns true if any dimension has dynamic size.
bool hasSameDimToLvl(const SparseTensorType &other) const
Returns true iff the two types have the same mapping.
RankedTensorType getRankedTensorType() const
Explicitly convert to RankedTensorType.
SparseTensorType withoutBitWidths() const
SparseTensorType(SparseTensorEncodingAttr enc)
bool hasStaticDimShape() const
Returns true if no dimension has dynamic size.
SparseTensorType withoutDimSlices() const
RankedTensorType getDemappedType() const
Returns the type with an identity mapping.
AffineMap getExpandedDimToLvl() const
Returns the dimToLvl mapping, where the identity map is expanded out into a full AffineMap.
SparseTensorType withBitWidths(unsigned posWidth, unsigned crdWidth) const
Level getLvlRank() const
Returns the level-rank.
unsigned getPosWidth() const
Returns the position-overhead bitwidth, defaulting to zero.
RankedTensorType getCOOType(bool ordered) const
Returns [un]ordered COO type for this sparse tensor type.
bool isPermutation() const
Returns true if the dimToLvl mapping is a permutation.
SparseTensorType withDimToLvl(SparseTensorEncodingAttr dimToLvlEnc) const
SparseTensorEncodingAttr getEncoding() const
bool isDynamicDim(Dimension d) const
Returns true if the given dimension has dynamic size.
Level getAoSCOOStart() const
Returns the starting level of this sparse tensor type for a trailing COO region that spans at least t...
AffineMap getDimToLvl() const
Returns the dimToLvl mapping (or the null-map for the identity).
ValueRange translateCrds(OpBuilder &builder, Location loc, ValueRange crds, CrdTransDirectionKind dir) const
Translates between level / dimension coordinate space.
Type getPosType() const
Returns the position-overhead MLIR type, defaulting to IndexType.
SparseTensorType & operator=(const SparseTensorType &)=delete
SmallVector< COOSegment > getCOOSegments() const
Returns a list of COO segments in the sparse tensor types.
bool isUniqueLT(LevelType lt)
Definition: Enums.h:424
bool isWithCrdLT(LevelType lt)
Definition: Enums.h:427
bool isWithPosLT(LevelType lt)
Definition: Enums.h:428
bool isOrderedLT(LevelType lt)
Definition: Enums.h:421
bool isSingletonLT(LevelType lt)
Definition: Enums.h:417
uint64_t Dimension
The type of dimension identifiers and dimension-ranks.
Definition: SparseTensor.h:35
bool isCompressedLT(LevelType lt)
Definition: Enums.h:411
uint64_t Level
The type of level identifiers and level-ranks.
Definition: SparseTensor.h:38
std::optional< SparseTensorType > tryGetSparseTensorType(Value val)
bool isLooseCompressedLT(LevelType lt)
Definition: Enums.h:414
int64_t Size
The type for individual components of a compile-time shape, including the value ShapedType::kDynamic ...
Definition: SparseTensor.h:42
SparseTensorEncodingAttr getSparseTensorEncoding(Type type)
Convenience method to get a sparse encoding attribute from a type.
bool isDenseLT(LevelType lt)
Definition: Enums.h:409
SparseTensorType getSparseTensorType(Value val)
Convenience methods to obtain a SparseTensorType from a Value.
bool isNOutOfMLT(LevelType lt)
Definition: Enums.h:420
Include the generated interface declarations.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
A simple structure that encodes a range of levels in the sparse tensors that forms a COO segment.
bool isSegmentStart(Level l) const
std::pair< Level, Level > lvlRange
This enum defines all the sparse representations supportable by the SparseTensor dialect.
Definition: Enums.h:238