10 #include "TypeDetail.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/BitVector.h"
21 #include "llvm/ADT/Sequence.h"
22 #include "llvm/ADT/Twine.h"
23 #include "llvm/ADT/TypeSwitch.h"
32 #define GET_TYPEDEF_CLASSES
33 #include "mlir/IR/BuiltinTypes.cpp.inc"
36 #include "mlir/IR/BuiltinTypeConstraints.cpp.inc"
43 void BuiltinDialect::registerTypes() {
45 #define GET_TYPEDEF_LIST
46 #include "mlir/IR/BuiltinTypes.cpp.inc"
58 return emitError() <<
"invalid element type for complex";
69 SignednessSemantics signedness) {
70 if (width > IntegerType::kMaxWidth) {
71 return emitError() <<
"integer bitwidth is limited to "
72 << IntegerType::kMaxWidth <<
" bits";
77 unsigned IntegerType::getWidth()
const {
return getImpl()->width; }
79 IntegerType::SignednessSemantics IntegerType::getSignedness()
const {
80 return getImpl()->signedness;
83 IntegerType IntegerType::scaleElementBitwidth(
unsigned scale) {
97 if (llvm::isa<FloatTF32Type>(*
this))
99 return APFloat::semanticsSizeInBits(getFloatSemantics());
104 if (llvm::isa<Float6E3M2FNType>(*
this))
105 return APFloat::Float6E3M2FN();
106 if (llvm::isa<Float8E5M2Type>(*
this))
107 return APFloat::Float8E5M2();
108 if (llvm::isa<Float8E4M3Type>(*
this))
109 return APFloat::Float8E4M3();
110 if (llvm::isa<Float8E4M3FNType>(*
this))
111 return APFloat::Float8E4M3FN();
112 if (llvm::isa<Float8E5M2FNUZType>(*
this))
113 return APFloat::Float8E5M2FNUZ();
114 if (llvm::isa<Float8E4M3FNUZType>(*
this))
115 return APFloat::Float8E4M3FNUZ();
116 if (llvm::isa<Float8E4M3B11FNUZType>(*
this))
117 return APFloat::Float8E4M3B11FNUZ();
118 if (llvm::isa<Float8E3M4Type>(*
this))
119 return APFloat::Float8E3M4();
120 if (llvm::isa<BFloat16Type>(*
this))
121 return APFloat::BFloat();
122 if (llvm::isa<Float16Type>(*
this))
123 return APFloat::IEEEhalf();
124 if (llvm::isa<FloatTF32Type>(*
this))
125 return APFloat::FloatTF32();
126 if (llvm::isa<Float32Type>(*
this))
127 return APFloat::IEEEsingle();
128 if (llvm::isa<Float64Type>(*
this))
129 return APFloat::IEEEdouble();
130 if (llvm::isa<Float80Type>(*
this))
131 return APFloat::x87DoubleExtended();
132 if (llvm::isa<Float128Type>(*
this))
133 return APFloat::IEEEquad();
134 llvm_unreachable(
"non-floating point type used");
141 if (isF16() || isBF16()) {
154 return APFloat::semanticsPrecision(getFloatSemantics());
161 unsigned FunctionType::getNumInputs()
const {
return getImpl()->numInputs; }
164 return getImpl()->getInputs();
167 unsigned FunctionType::getNumResults()
const {
return getImpl()->numResults; }
170 return getImpl()->getResults();
179 FunctionType FunctionType::getWithArgsAndResults(
186 insertTypesInto(getResults(), resultIndices, resultTypes, resultStorage);
187 return clone(newArgTypes, newResultTypes);
192 FunctionType::getWithoutArgsAndResults(
const BitVector &argIndices,
193 const BitVector &resultIndices) {
198 return clone(newArgTypes, newResultTypes);
207 StringAttr dialect, StringRef typeData) {
209 return emitError() <<
"invalid dialect namespace '" << dialect <<
"'";
216 <<
"`!" << dialect <<
"<\"" << typeData <<
"\">"
217 <<
"` type created with unregistered dialect. If this is "
218 "intended, please call allowUnregisteredDialects() on the "
219 "MLIRContext, or use -allow-unregistered-dialect with "
220 "the MLIR opt tool used";
230 bool VectorType::isValidElementType(
Type t) {
231 return isValidVectorTypeElementType(t);
237 if (!isValidElementType(elementType))
239 <<
"vector elements must be int/index/float type but got "
242 if (any_of(shape, [](int64_t i) {
return i <= 0; }))
244 <<
"vector types must have positive constant sizes but got "
247 if (scalableDims.size() != shape.size())
248 return emitError() <<
"number of dims must match, got "
249 << scalableDims.size() <<
" and " << shape.size();
254 VectorType VectorType::scaleElementBitwidth(
unsigned scale) {
258 if (
auto scaledEt = et.scaleElementBitwidth(scale))
261 if (
auto scaledEt = et.scaleElementBitwidth(scale))
267 Type elementType)
const {
278 .Case<RankedTensorType, UnrankedTensorType>(
279 [](
auto type) {
return type.getElementType(); });
283 return !llvm::isa<UnrankedTensorType>(*
this);
287 return llvm::cast<RankedTensorType>(*this).getShape();
291 Type elementType)
const {
292 if (llvm::dyn_cast<UnrankedTensorType>(*
this)) {
298 auto rankedTy = llvm::cast<RankedTensorType>(*
this);
301 rankedTy.getEncoding());
303 rankedTy.getEncoding());
307 Type elementType)
const {
308 return ::llvm::cast<RankedTensorType>(cloneWith(shape, elementType));
312 return ::llvm::cast<RankedTensorType>(cloneWith(shape,
getElementType()));
320 return emitError() <<
"invalid tensor element type: " << elementType;
329 return llvm::isa<ComplexType,
FloatType, IntegerType, OpaqueType, VectorType,
331 !llvm::isa<BuiltinDialect>(type.
getDialect());
342 for (int64_t s : shape)
343 if (s < 0 && !ShapedType::isDynamic(s))
344 return emitError() <<
"invalid tensor dimension size";
345 if (
auto v = llvm::dyn_cast_or_null<VerifiableTensorEncoding>(encoding))
346 if (failed(v.verifyEncoding(shape, elementType,
emitError)))
368 [](
auto type) {
return type.getElementType(); });
372 return !llvm::isa<UnrankedMemRefType>(*
this);
376 return llvm::cast<MemRefType>(*this).getShape();
380 Type elementType)
const {
381 if (llvm::dyn_cast<UnrankedMemRefType>(*
this)) {
397 Type elementType)
const {
398 return ::llvm::cast<MemRefType>(cloneWith(shape, elementType));
402 return ::llvm::cast<MemRefType>(cloneWith(shape,
getElementType()));
406 if (
auto rankedMemRefTy = llvm::dyn_cast<MemRefType>(*
this))
407 return rankedMemRefTy.getMemorySpace();
408 return llvm::cast<UnrankedMemRefType>(*this).getMemorySpace();
412 if (
auto rankedMemRefTy = llvm::dyn_cast<MemRefType>(*
this))
413 return rankedMemRefTy.getMemorySpaceAsInt();
414 return llvm::cast<UnrankedMemRefType>(*this).getMemorySpaceAsInt();
421 std::optional<llvm::SmallDenseSet<unsigned>>
425 size_t originalRank = originalShape.size(), reducedRank = reducedShape.size();
426 llvm::SmallDenseSet<unsigned> unusedDims;
427 unsigned reducedIdx = 0;
428 for (
unsigned originalIdx = 0; originalIdx < originalRank; ++originalIdx) {
430 int64_t origSize = originalShape[originalIdx];
432 if (matchDynamic && reducedIdx < reducedRank && origSize != 1 &&
433 (ShapedType::isDynamic(reducedShape[reducedIdx]) ||
434 ShapedType::isDynamic(origSize))) {
438 if (reducedIdx < reducedRank && origSize == reducedShape[reducedIdx]) {
443 unusedDims.insert(originalIdx);
450 if (reducedIdx != reducedRank)
457 ShapedType candidateReducedType) {
458 if (originalType == candidateReducedType)
461 ShapedType originalShapedType = llvm::cast<ShapedType>(originalType);
462 ShapedType candidateReducedShapedType =
463 llvm::cast<ShapedType>(candidateReducedType);
468 candidateReducedShapedType.getShape();
469 unsigned originalRank = originalShape.size(),
470 candidateReducedRank = candidateReducedShape.size();
471 if (candidateReducedRank > originalRank)
474 auto optionalUnusedDimsMask =
478 if (!optionalUnusedDimsMask)
481 if (originalShapedType.getElementType() !=
482 candidateReducedShapedType.getElementType())
494 if (llvm::isa<IntegerAttr, StringAttr, DictionaryAttr>(memorySpace))
498 if (!isa<BuiltinDialect>(memorySpace.
getDialect()))
506 if (memorySpace == 0)
513 IntegerAttr intMemorySpace = llvm::dyn_cast_or_null<IntegerAttr>(memorySpace);
514 if (intMemorySpace && intMemorySpace.getValue() == 0)
524 assert(llvm::isa<IntegerAttr>(memorySpace) &&
525 "Using `getMemorySpaceInteger` with non-Integer attribute");
527 return static_cast<unsigned>(llvm::cast<IntegerAttr>(memorySpace).getInt());
535 MemRefLayoutAttrInterface layout,
549 MemRefType MemRefType::getChecked(
551 Type elementType, MemRefLayoutAttrInterface layout,
Attribute memorySpace) {
561 return Base::getChecked(emitErrorFn, elementType.
getContext(), shape,
562 elementType, layout, memorySpace);
599 return Base::getChecked(emitErrorFn, elementType.
getContext(), shape,
600 elementType, layout, memorySpace);
604 AffineMap map,
unsigned memorySpaceInd) {
625 unsigned memorySpaceInd) {
639 return Base::getChecked(emitErrorFn, elementType.
getContext(), shape,
640 elementType, layout, memorySpace);
645 MemRefLayoutAttrInterface layout,
648 return emitError() <<
"invalid memref element type";
651 for (int64_t s : shape)
652 if (s < 0 && !ShapedType::isDynamic(s))
653 return emitError() <<
"invalid memref size";
655 assert(layout &&
"missing layout specification");
656 if (failed(layout.verifyLayout(shape,
emitError)))
660 return emitError() <<
"unsupported memory space Attribute";
677 return emitError() <<
"invalid memref element type";
680 return emitError() <<
"unsupported memory space Attribute";
691 if (
auto dim = dyn_cast<AffineDimExpr>(e))
692 strides[dim.getPosition()] =
693 strides[dim.getPosition()] + multiplicativeFactor;
695 offset = offset + e * multiplicativeFactor;
706 auto bin = dyn_cast<AffineBinaryOpExpr>(e);
718 auto dim = dyn_cast<AffineDimExpr>(bin.getLHS());
720 strides[dim.getPosition()] =
721 strides[dim.getPosition()] + bin.getRHS() * multiplicativeFactor;
728 if (bin.getLHS().isSymbolicOrConstant())
729 return extractStrides(bin.getRHS(), multiplicativeFactor * bin.getLHS(),
731 return extractStrides(bin.getLHS(), multiplicativeFactor * bin.getRHS(),
737 extractStrides(bin.getLHS(), multiplicativeFactor, strides, offset);
739 extractStrides(bin.getRHS(), multiplicativeFactor, strides, offset);
740 return success(succeeded(res1) && succeeded(res2));
743 llvm_unreachable(
"unexpected binary operation");
760 SmallVectorImpl<AffineExpr> &strides,
762 AffineMap m = t.getLayout().getAffineMap();
764 if (m.getNumResults() != 1 && !m.isIdentity())
770 strides.assign(t.getRank(), zero);
773 if (m.isIdentity()) {
775 if (t.getRank() == 0)
781 assert(
false &&
"unexpected failure: extract strides in canonical layout");
794 unsigned numDims = m.getNumDims();
795 unsigned numSymbols = m.getNumSymbols();
797 for (
auto &stride : strides)
821 if (
auto strided = llvm::dyn_cast<StridedLayoutAttr>(t.getLayout())) {
822 llvm::append_range(strides, strided.getStrides());
823 offset = strided.getOffset();
833 if (
auto cst = dyn_cast<AffineConstantExpr>(offsetExpr))
834 offset = cst.getValue();
836 offset = ShapedType::kDynamic;
837 for (
auto e : strideExprs) {
838 if (
auto c = dyn_cast<AffineConstantExpr>(e))
839 strides.push_back(c.getValue());
841 strides.push_back(ShapedType::kDynamic);
846 std::pair<SmallVector<int64_t>, int64_t>
852 assert(succeeded(status) &&
"Invalid use of check-free getStridesAndOffset");
853 return {strides, offset};
861 ArrayRef<Type> TupleType::getTypes()
const {
return getImpl()->getTypes(); }
868 for (
Type type : getTypes()) {
869 if (
auto nestedTuple = llvm::dyn_cast<TupleType>(type))
870 nestedTuple.getFlattenedTypes(types);
872 types.push_back(type);
877 size_t TupleType::size()
const {
return getImpl()->size(); }
889 AffineMap m = t.getLayout().getAffineMap();
896 if (m.getNumResults() > 1)
900 if (m.getNumDims() == 0 && m.getNumSymbols() == 0) {
901 if (
auto cst = dyn_cast<AffineConstantExpr>(m.getResult(0)))
902 if (cst.getValue() == 0)
910 if (t.getShape().empty())
918 auto simplifiedLayoutExpr =
920 if (expr != simplifiedLayoutExpr)
922 m.getNumDims(), m.getNumSymbols(), simplifiedLayoutExpr)));
933 assert(!exprs.empty() &&
"expected exprs");
935 assert(!maps.empty() &&
"Expected one non-empty map");
936 unsigned numDims = maps[0].getNumDims(), nSymbols = maps[0].getNumSymbols();
939 bool dynamicPoisonBit =
false;
940 int64_t runningSize = 1;
941 for (
auto en : llvm::zip(llvm::reverse(exprs), llvm::reverse(sizes))) {
942 int64_t size = std::get<1>(en);
947 expr = expr ? expr + dimExpr * stride : dimExpr * stride;
950 assert(runningSize > 0 &&
"integer overflow in size computation");
952 dynamicPoisonBit =
true;
961 exprs.reserve(sizes.size());
962 for (
auto dim : llvm::seq<unsigned>(0, sizes.size()))
971 return succeeded(res);
978 return succeeded(successStrides) && (strides.empty() || strides.back() == 1);
985 auto memrefShape = type.getShape().take_back(n);
986 if (ShapedType::isDynamicShape(memrefShape))
989 if (type.getLayout().isIdentity())
1003 auto dimProduct = 1;
1004 for (
auto dim : llvm::reverse(memrefShape.drop_front(1))) {
1006 flattenedDims.push_back(dimProduct);
1009 strides = strides.drop_back(1);
1010 return llvm::equal(strides, llvm::reverse(flattenedDims));
static LogicalResult checkTensorElementType(function_ref< InFlightDiagnostic()> emitError, Type elementType)
static LogicalResult extractStrides(AffineExpr e, AffineExpr multiplicativeFactor, MutableArrayRef< AffineExpr > strides, AffineExpr &offset)
Takes a single AffineExpr e and populates the strides array with the strides expressions for each dim...
static void extractStridesFromTerm(AffineExpr e, AffineExpr multiplicativeFactor, MutableArrayRef< AffineExpr > strides, AffineExpr &offset)
static MLIRContext * getContext(OpFoldResult val)
static Type getElementType(Type type, ArrayRef< int32_t > indices, function_ref< InFlightDiagnostic(StringRef)> emitErrorFn)
Walks the given type hierarchy with the given indices, potentially down to component granularity,...
static ArrayRef< int64_t > getShape(Type type)
Returns the shape of the given type.
Base type for affine expression.
MLIRContext * getContext() const
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
static AffineMap getMultiDimIdentityMap(unsigned numDims, MLIRContext *context)
Returns an AffineMap with 'numDims' identity result dim exprs.
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
static SmallVector< AffineMap, 4 > inferFromExprList(ArrayRef< ArrayRef< AffineExpr >> exprsList, MLIRContext *context)
Returns a vector of AffineMaps; each with as many results as exprs.size(), as many dims as the larges...
Attributes are known-constant values of operations.
Dialect & getDialect() const
Get the dialect this attribute is registered to.
This class provides a shared interface for ranked and unranked memref types.
ArrayRef< int64_t > getShape() const
Returns the shape of this memref type.
static bool isValidElementType(Type type)
Return true if the specified element type is ok in a memref.
Attribute getMemorySpace() const
Returns the memory space in which data referred to by this memref resides.
unsigned getMemorySpaceAsInt() const
[deprecated] Returns the memory space in old raw integer representation.
bool hasRank() const
Returns if this type is ranked, i.e. it has a known number of dimensions.
Type getElementType() const
Returns the element type of this memref type.
MemRefType clone(ArrayRef< int64_t > shape, Type elementType) const
Return a clone of this type with the given new shape and element type.
BaseMemRefType cloneWith(std::optional< ArrayRef< int64_t >> shape, Type elementType) const
Clone this type with the given shape and element type.
static bool isValidNamespace(StringRef str)
Utility function that returns if the given string is a valid dialect namespace.
static FloatType getF64(MLIRContext *ctx)
FloatType scaleElementBitwidth(unsigned scale)
Get or create a new FloatType with bitwidth scaled by scale.
const llvm::fltSemantics & getFloatSemantics()
Return the floating semantics of this float type.
unsigned getFPMantissaWidth()
Return the width of the mantissa of this type.
unsigned getWidth()
Return the bitwidth of this float type.
static FloatType getF32(MLIRContext *ctx)
This class represents a diagnostic that is inflight and set to be reported.
MLIRContext is the top-level object for a collection of MLIR operations.
Dialect * getLoadedDialect(StringRef name)
Get a registered IR dialect with the given namespace.
bool allowsUnregisteredDialects()
Return true if we allow to create operation for unregistered dialects.
This is a builder type that keeps local references to arguments.
Builder & setLayout(MemRefLayoutAttrInterface newLayout)
Builder & setElementType(Type newElementType)
Builder & setShape(ArrayRef< int64_t > newShape)
Builder & setMemorySpace(Attribute newMemorySpace)
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
TensorType cloneWith(std::optional< ArrayRef< int64_t >> shape, Type elementType) const
Clone this type with the given shape and element type.
static bool isValidElementType(Type type)
Return true if the specified element type is ok in a tensor.
ArrayRef< int64_t > getShape() const
Returns the shape of this tensor type.
bool hasRank() const
Returns if this type is ranked, i.e. it has a known number of dimensions.
RankedTensorType clone(ArrayRef< int64_t > shape, Type elementType) const
Return a clone of this type with the given new shape and element type.
Type getElementType() const
Returns the element type of this tensor type.
This class provides an abstraction over the various different ranges of value types.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Dialect & getDialect() const
Get the dialect this type is registered to.
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
bool isIntOrFloat() const
Return true if this is an integer (of any signedness) or a float type.
Attribute wrapIntegerMemorySpace(unsigned memorySpace, MLIRContext *ctx)
Wraps deprecated integer memory space to the new Attribute form.
unsigned getMemorySpaceAsInt(Attribute memorySpace)
[deprecated] Returns the memory space in old raw integer representation.
bool isSupportedMemorySpace(Attribute memorySpace)
Checks if the memorySpace has supported Attribute type.
Attribute skipDefaultMemorySpace(Attribute memorySpace)
Replaces default memorySpace (integer == 0) with empty Attribute.
Include the generated interface declarations.
SliceVerificationResult
Enum that captures information related to verifier error conditions on slice insert/extract type of o...
bool isLastMemrefDimUnitStride(MemRefType type)
Return "true" if the last dimension of the given type has a static unit stride.
SmallVector< Type, 10 > getFlattenedTypes(TupleType t)
Get the types within a nested Tuple.
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
LogicalResult getStridesAndOffset(MemRefType t, SmallVectorImpl< int64_t > &strides, int64_t &offset)
Returns the strides of the MemRef if the layout map is in strided form.
TypeRange filterTypesOut(TypeRange types, const BitVector &indices, SmallVectorImpl< Type > &storage)
Filters out any elements referenced by indices.
@ CeilDiv
RHS of ceildiv is always a constant or a symbolic expression.
@ Mul
RHS of mul is always a constant or a symbolic expression.
@ Mod
RHS of mod is always a constant or a symbolic expression with a positive value.
@ FloorDiv
RHS of floordiv is always a constant or a symbolic expression.
MemRefType canonicalizeStridedLayout(MemRefType t)
Return a version of t with identity layout if it can be determined statically that the layout is the ...
Operation * clone(OpBuilder &b, Operation *op, TypeRange newResultTypes, ValueRange newOperands)
AffineExpr getAffineConstantExpr(int64_t constant, MLIRContext *context)
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
AffineExpr makeCanonicalStridedLayoutExpr(ArrayRef< int64_t > sizes, ArrayRef< AffineExpr > exprs, MLIRContext *context)
Given MemRef sizes that are either static or dynamic, returns the canonical "contiguous" strides Affi...
std::optional< llvm::SmallDenseSet< unsigned > > computeRankReductionMask(ArrayRef< int64_t > originalShape, ArrayRef< int64_t > reducedShape, bool matchDynamic=false)
Given an originalShape and a reducedShape assumed to be a subset of originalShape with some 1 entries...
AffineExpr simplifyAffineExpr(AffineExpr expr, unsigned numDims, unsigned numSymbols)
Simplify an affine expression by flattening and some amount of simple analysis.
bool isStrided(MemRefType t)
Return "true" if the layout for t is compatible with strided semantics.
AffineExpr getAffineDimExpr(unsigned position, MLIRContext *context)
These free functions allow clients of the API to not use classes in detail.
SliceVerificationResult isRankReducedType(ShapedType originalType, ShapedType candidateReducedType)
Check if originalType can be rank reduced to candidateReducedType type by dropping some dimensions wi...
LogicalResult verify(Operation *op, bool verifyRecursively=true)
Perform (potentially expensive) checks of invariants, used to detect compiler bugs,...
TypeRange insertTypesInto(TypeRange oldTypes, ArrayRef< unsigned > indices, TypeRange newTypes, SmallVectorImpl< Type > &storage)
Insert a set of newTypes into oldTypes at the given indices.
AffineExpr getAffineSymbolExpr(unsigned position, MLIRContext *context)
bool trailingNDimsContiguous(MemRefType type, int64_t n)
Return "true" if the last N dimensions of the given type are contiguous.