29 #include "llvm/ADT/APFloat.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/TypeSwitch.h"
38 #include "mlir/Dialect/Tosa/IR/TosaOpsDialect.cpp.inc"
44 #include "mlir/Dialect/Tosa/IR/TosaInterfaces.cpp.inc"
47 #include "mlir/Dialect/Tosa/IR/TosaDialectBytecode.cpp.inc"
68 return (isa<tosa::IfOp>(dest->getParentOp()) ||
69 isa<tosa::WhileOp>(dest->getParentOp()));
75 TosaDialectBytecodeInterface(
Dialect *dialect)
85 LogicalResult writeAttribute(
Attribute attr,
87 return ::writeAttribute(attr, writer);
97 LogicalResult writeType(
Type type,
99 return ::writeType(type, writer);
106 std::unique_ptr<DialectVersion>
109 reader.
emitError(
"Dialect does not support versioning");
113 LogicalResult upgradeFromVersion(
Operation *topLevelOp,
132 void TosaDialect::initialize() {
135 #include "mlir/Dialect/Tosa/IR/TosaOps.cpp.inc"
138 #define GET_ATTRDEF_LIST
139 #include "mlir/Dialect/Tosa/IR/TosaAttributes.cpp.inc"
141 addInterfaces<TosaDialectBytecodeInterface, TosaInlinerInterface>();
142 declarePromisedInterfaces<
143 mesh::ShardingInterface, ClampOp, SigmoidOp, TanhOp, AddOp,
144 ArithmeticRightShiftOp, BitwiseAndOp, BitwiseOrOp, BitwiseXorOp, IntDivOp,
145 LogicalAndOp, LogicalLeftShiftOp, LogicalRightShiftOp, LogicalOrOp,
146 LogicalXorOp, MaximumOp, MinimumOp, MulOp, PowOp, SubOp, AbsOp,
147 BitwiseNotOp, CeilOp, ClzOp, ExpOp, FloorOp, LogOp, LogicalNotOp,
148 NegateOp, ReciprocalOp, RsqrtOp, SelectOp, EqualOp, GreaterOp,
149 GreaterEqualOp, MatMulOp>();
156 if (llvm::isa<ElementsAttr>(value))
157 return builder.
create<tosa::ConstOp>(loc, type,
158 llvm::cast<ElementsAttr>(value));
171 <<
"expected attribute";
173 if (
auto typedAttr = dyn_cast<TypedAttr>(attr)) {
190 bool needsSpace =
false;
191 auto typedAttr = dyn_cast_or_null<TypedAttr>(attr);
192 if (!typedAttr || typedAttr.getType() != type.getValue()) {
209 template <
typename T>
212 auto inputType = llvm::dyn_cast<RankedTensorType>(op.getInput().getType());
213 auto weightType = llvm::dyn_cast<RankedTensorType>(op.getWeight().getType());
217 op.emitOpError(
"expect a ranked tensor for input, got ") << op.getInput();
221 op.emitOpError(
"expect a ranked tensor for weight, got ") << op.getWeight();
225 auto inputEType = inputType.getElementType();
226 auto weightEType = weightType.getElementType();
228 bool inputIsQuant = !llvm::isa<FloatType>(inputEType);
229 bool weightIsQuant = !llvm::isa<FloatType>(weightEType);
232 if (inputIsQuant != weightIsQuant) {
234 "expect both input and weight to be float or not together, got ")
235 << inputEType <<
" and " << weightEType;
241 if ((inputIsQuant && !op.getQuantizationInfo()) ||
242 (!inputIsQuant && op.getQuantizationInfo())) {
243 op.emitOpError(
"quantizationattr is required for quantized type, and not "
244 "allowed for float type");
252 auto attrType = llvm::dyn_cast<TensorType>(getValueAttr().
getType());
253 auto outputType = llvm::dyn_cast<TensorType>(getOutput().
getType());
255 if (!attrType || !outputType) {
256 emitOpError(
"expected tensors for attr/result type");
260 if (
auto result = llvm::dyn_cast<mlir::quant::QuantizedType>(
261 outputType.getElementType())) {
262 if (result.getStorageType() == attrType.getElementType())
266 if (attrType.getElementType() != outputType.getElementType()) {
267 emitOpError(
"expected same attr/result element types");
276 const auto resultETy = llvm::cast<ShapedType>(
getType()).getElementType();
277 if (!resultETy.isIntOrIndex())
278 return emitOpError(
"result tensor is not of integer type");
281 const auto inputType = llvm::cast<ShapedType>(getInput().
getType());
282 const int64_t axis = getAxisAttr().getInt();
283 if (inputType.hasRank() && ((axis < 0) || axis >= inputType.getRank()))
284 return emitOpError(
"specified axis is outside the rank of the tensor");
290 auto inputType = llvm::cast<ShapedType>(getInput().
getType());
292 auto inputETy = inputType.getElementType();
293 auto resultETy = llvm::cast<ShapedType>(
getType()).getElementType();
296 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(inputETy))
297 inputETy = quantType.getStorageType();
300 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(resultETy))
301 resultETy = quantType.getStorageType();
303 auto accType = getAccType();
304 if (llvm::isa<IntegerType>(inputETy) && !accType.isInteger(32))
305 return emitOpError(
"accumulator type for integer tensor is not i32");
307 if (inputETy.isF16() && !(accType.isF16() || accType.isF32()))
308 return emitOpError(
"accumulator type for f16 tensor is not f16/f32");
310 if (inputETy.isBF16() && !accType.isF32())
311 return emitOpError(
"accumulator type for bf16 tensor is not f32");
313 if (inputETy.isF32() && !accType.isF32())
314 return emitOpError(
"accumulator type for f32 tensor is not f32");
316 if ((inputETy.isF32() && resultETy.isF32()) ||
317 (inputETy.isF16() && resultETy.isF16()) ||
318 (inputETy.isBF16() && resultETy.isBF16()) ||
319 (inputETy.isInteger(8) && resultETy.isInteger(8)) ||
320 (inputETy.isInteger(16) && resultETy.isInteger(16)))
323 return emitOpError(
"input/output element types are incompatible.");
328 llvm::cast<ShapedType>(getInput().
getType()).getElementType();
330 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(inputETy)) {
331 inputETy = quantType.getStorageType();
333 mlir::Type maxFpType = getMaxFpAttr().getType();
334 mlir::Type minFpType = getMinFpAttr().getType();
336 llvm::cast<ShapedType>(getOutput().
getType()).getElementType();
338 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(outputETy)) {
339 outputETy = quantType.getStorageType();
343 if (inputETy != outputETy)
344 return emitOpError(
"input/output element types are incompatible.");
349 if (!inputETy.
isInteger(dataTypeBitWidth)) {
350 if (((maxFpType != minFpType) ||
353 return emitOpError(
"min/max attributes types are incompatible with "
354 "input/output element types.");
439 auto inputType = llvm::dyn_cast<ShapedType>(a.
getType());
440 assert(inputType &&
"Input must be a shaped tensor type!");
442 auto inputQType = llvm::dyn_cast<mlir::quant::UniformQuantizedType>(
443 inputType.getElementType());
444 assert(inputQType &&
"Tensor must have quantized datatype!");
446 unsigned inputBits = inputQType.getStorageTypeIntegralWidth();
448 auto outputShapedType = llvm::dyn_cast<ShapedType>(outputType);
449 assert(outputShapedType &&
"Output must be a shaped type");
451 IntegerType accElementType;
456 auto accType = outputShapedType.clone(accElementType);
469 DenseArrayAttr kernel, DenseArrayAttr stride,
470 DenseArrayAttr pad, TypeAttr accType) {
479 result.
types.push_back(outputType);
492 result.
types.push_back(outputType);
505 result.
types.push_back(outputType);
519 result.
types.push_back(outputType);
529 for (
int i = 0, e = operands.size(); i != e; ++i) {
531 if (!shape.hasRank()) {
536 outRank = std::max<int64_t>(outRank, shape.getRank());
539 outShape.resize(outRank, 1);
541 for (
int i = 0, e = operands.size(); i != e; ++i) {
543 auto rankDiff = outShape.size() - shape.getRank();
545 for (
size_t i = 0, e = shape.getRank(); i < e; ++i) {
546 auto dim1 = outShape[i + rankDiff];
547 auto dim2 = shape.getDimSize(i);
548 auto resolvedDim = dim1;
552 }
else if (dim2 == 1) {
554 }
else if (dim1 != dim2) {
557 outShape[i + rankDiff] = resolvedDim;
564 LogicalResult tosa::ArgMaxOp::inferReturnTypeComponents(
565 MLIRContext *context, ::std::optional<Location> location,
566 ArgMaxOp::Adaptor adaptor,
569 IntegerAttr axis = adaptor.getProperties().axis;
570 int32_t axisVal = axis.getValue().getSExtValue();
572 if (!inputShape.hasRank()) {
578 outShape.reserve(inputShape.getRank() - 1);
579 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
582 outShape.push_back(inputShape.getDimSize(i));
589 LogicalResult tosa::RFFT2dOp::inferReturnTypeComponents(
590 MLIRContext *context, ::std::optional<Location> location,
591 RFFT2dOp::Adaptor adaptor,
595 if (!inputShape.hasRank())
599 outputShape.resize(3, ShapedType::kDynamic);
600 outputShape[0] = inputShape.getDimSize(0);
601 outputShape[1] = inputShape.getDimSize(1);
602 int64_t inWidth = inputShape.getDimSize(2);
606 if (inWidth != ShapedType::kDynamic)
607 outputShape[2] = inWidth / 2 + 1;
615 LogicalResult tosa::FFT2dOp::inferReturnTypeComponents(
616 MLIRContext *context, ::std::optional<Location> location,
617 FFT2dOp::Adaptor adaptor,
619 inferredReturnShapes.push_back(
621 inferredReturnShapes.push_back(
626 LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
627 MLIRContext *context, ::std::optional<Location> location,
628 ConcatOp::Adaptor adaptor,
631 const Properties &prop = adaptor.getProperties();
632 int32_t axis = prop.axis.getValue().getSExtValue();
634 bool hasRankedInput =
false;
635 for (
auto operand : adaptor.getOperands()) {
637 if (!operandShape.hasRank())
642 outputShape.resize(operandShape.getRank(), ShapedType::kDynamic);
645 for (
int i = 0, s = operandShape.getRank(); i < s; i++) {
646 if (i == axis || operandShape.isDynamicDim(i))
648 if (outputShape[i] == ShapedType::kDynamic)
649 outputShape[i] = operandShape.getDimSize(i);
650 if (outputShape[i] != operandShape.getDimSize(i))
652 "Cannot concat tensors with different sizes"
653 " on the non-axis dimension ",
657 hasRankedInput =
true;
660 llvm::cast<TensorType>(adaptor.getInput1().getType()[0]).getElementType();
661 if (!hasRankedInput) {
667 int64_t concatDimSize = 0;
668 for (
auto operand : adaptor.getOperands()) {
673 if (!operandShape.hasRank() || operandShape.isDynamicDim(axis)) {
674 concatDimSize = ShapedType::kDynamic;
678 concatDimSize += operandShape.getDimSize(axis);
681 outputShape[axis] = concatDimSize;
687 LogicalResult tosa::EqualOp::inferReturnTypeComponents(
688 MLIRContext *context, ::std::optional<Location> location,
705 if (l.size() != r.size() || l.size() != 1)
710 LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
711 MLIRContext *context, ::std::optional<Location> location,
712 FullyConnectedOp::Adaptor adaptor,
715 ShapeAdaptor weightShape(adaptor.getWeight().getType());
720 outShape.resize(2, ShapedType::kDynamic);
722 if (inputShape.hasRank()) {
723 outShape[0] = inputShape.getDimSize(0);
726 if (weightShape.hasRank()) {
727 outShape[1] = weightShape.getDimSize(0);
730 if (biasShape.hasRank()) {
731 outShape[1] = outShape[1] == ShapedType::kDynamic ? biasShape.getDimSize(0)
741 LogicalResult tosa::MatMulOp::inferReturnTypeComponents(
742 MLIRContext *context, ::std::optional<Location> location,
743 MatMulOp::Adaptor adaptor,
750 outShape.resize(3, ShapedType::kDynamic);
752 if (lhsShape.hasRank()) {
753 outShape[0] = lhsShape.getDimSize(0);
754 outShape[1] = lhsShape.getDimSize(1);
757 if (rhsShape.hasRank()) {
758 outShape[0] = outShape[0] == ShapedType::kDynamic ? rhsShape.getDimSize(0)
760 outShape[2] = rhsShape.getDimSize(2);
767 LogicalResult tosa::PadOp::inferReturnTypeComponents(
768 MLIRContext *context, ::std::optional<Location> location,
769 PadOp::Adaptor adaptor,
772 ShapeAdaptor paddingShape(adaptor.getPadding().getType());
777 if (!inputShape.hasRank() && !paddingShape.hasRank()) {
784 if (!inputShape.hasRank()) {
785 if (paddingShape.isDynamicDim(0)) {
790 outputShape.resize(paddingShape.getDimSize(0), ShapedType::kDynamic);
798 outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
804 for (
auto val : paddings) {
805 paddingValues.push_back(val.getSExtValue());
808 outputShape.reserve(inputShape.getRank());
809 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
810 if (inputShape.isDynamicDim(i)) {
811 outputShape.push_back(ShapedType::kDynamic);
815 outputShape.push_back(inputShape.getDimSize(i) + paddingValues[i * 2] +
816 paddingValues[i * 2 + 1]);
824 RankedTensorType inputType = getInput1().getType();
825 RankedTensorType outputType = getOutput().getType();
826 TensorType paddingType = getPadding().getType();
828 if (inputType.getRank() != outputType.getRank())
829 return emitOpError() <<
"expect same input and output tensor rank.";
831 if (paddingType.
hasRank() && paddingType.getRank() != 2)
832 return emitOpError() <<
"expect 'padding' tensor rank equal to 2.";
838 return to_vector(llvm::map_range(shape, [](int64_t dim) {
839 return dim == -1 ? ShapedType::kDynamic : dim;
843 LogicalResult tosa::SliceOp::inferReturnTypeComponents(
844 MLIRContext *context, ::std::optional<Location> location,
845 SliceOp::Adaptor adaptor,
847 inferredReturnShapes.push_back(
853 auto inputType = llvm::dyn_cast<RankedTensorType>(getInput1().
getType());
857 if (
static_cast<size_t>(inputType.getRank()) != getStart().size())
859 "length of start attribute is not equal rank of input shape");
861 if (
static_cast<size_t>(inputType.getRank()) != getSize().size())
863 "length of size attribute is not equal rank of input shape");
869 Type elementTy = getInput1().getType().getElementType();
870 if (isa<FloatType>(elementTy) && getShift() != 0)
871 return emitOpError() <<
"require shift to be 0 for float type";
876 LogicalResult tosa::TableOp::inferReturnTypeComponents(
877 MLIRContext *context, ::std::optional<Location> location,
878 TableOp::Adaptor adaptor,
882 if (!inputShape.hasRank()) {
887 inferredReturnShapes.resize(1);
888 inputShape.getDims(inferredReturnShapes[0]);
894 TensorType outputType = getOutput().getType();
897 inputType.getRank() != outputType.getRank())
899 <<
"expected input tensor rank to equal result tensor rank";
901 auto inputDims = inputType.
getShape();
902 auto outputDims = outputType.
getShape();
904 int64_t dim = it.index();
905 auto [inputDim, outputDim] = it.value();
906 if (!ShapedType::isDynamic(outputDim) && outputDim != inputDim) {
907 return emitOpError() <<
"dim(result, " << dim <<
") = " << outputDim
908 <<
" doesn't match dim(input, " << dim
909 <<
") = " << inputDim;
915 LogicalResult tosa::TileOp::inferReturnTypeComponents(
916 MLIRContext *context, ::std::optional<Location> location,
917 TileOp::Adaptor adaptor,
922 if (!inputShape.hasRank()) {
923 outputShape.resize(multiples.size(), ShapedType::kDynamic);
926 }
else if (
static_cast<size_t>(inputShape.getRank()) != multiples.size())
930 outputShape.reserve(multiples.size());
931 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
932 int64_t dim = inputShape.getDimSize(i);
933 if (dim != ShapedType::kDynamic)
935 outputShape.push_back(dim);
943 ShapedType inputType = llvm::cast<ShapedType>(getInput1().
getType());
944 ShapedType outputType = llvm::cast<ShapedType>(
getType());
945 auto multiples = getMultiples();
947 if (inputType.hasRank()) {
948 if (
static_cast<size_t>(inputType.getRank()) != multiples.size())
949 return emitOpError(
"expect 'multiples' array to have length ")
950 << inputType.getRank() <<
" but got " << multiples.size() <<
".";
951 if (outputType.hasRank() && inputType.getRank() != outputType.getRank())
952 return emitOpError(
"expect same input and output tensor rank.");
953 }
else if (outputType.hasRank() &&
954 static_cast<size_t>(outputType.getRank()) != multiples.size())
955 return emitOpError(
"expect 'multiples' array to have length ")
956 << outputType.getRank() <<
" but got " << multiples.size() <<
".";
958 if (llvm::any_of(multiples, [](int64_t v) {
return v <= 0 && v != -1; }))
960 "expect element of 'multiples' to be positive integer or -1.");
966 if (l.size() != r.size() || l.size() != 1)
971 LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
972 MLIRContext *context, ::std::optional<Location> location,
973 ReshapeOp::Adaptor adaptor,
982 if (!inputShape.hasRank() || !inputShape.hasStaticShape()) {
983 inferredReturnShapes.push_back(
991 int64_t numElements = inputShape.getNumElements();
992 int64_t staticMul = 1;
993 for (
auto val : newShapeValue) {
994 if (!ShapedType::isDynamic(val)) {
1000 for (
auto &val : newShapeValue) {
1001 if (ShapedType::isDynamic(val))
1002 val = numElements / staticMul;
1005 inferredReturnShapes.push_back(
1011 TensorType inputType = getInput1().getType();
1012 RankedTensorType outputType =
getType();
1014 if ((int64_t)getNewShape().size() != outputType.getRank())
1015 return emitOpError() <<
"new shape does not match result rank";
1017 for (
auto [newShapeDim, outputShapeDim] :
1018 zip(getNewShape(), outputType.getShape())) {
1019 if (newShapeDim != -1 && outputShapeDim != ShapedType::kDynamic &&
1020 newShapeDim != outputShapeDim)
1021 return emitOpError() <<
"new shape is inconsistent with result shape";
1023 if (newShapeDim != ShapedType::kDynamic && newShapeDim < -1)
1024 return emitOpError() <<
"new shape has invalid tensor dimension size "
1028 if (inputType.hasStaticShape()) {
1029 int64_t inputElementsNum = inputType.getNumElements();
1030 if (outputType.hasStaticShape()) {
1031 int64_t outputElementsNum = outputType.getNumElements();
1032 if (inputElementsNum != outputElementsNum) {
1033 return emitOpError() <<
"cannot reshape " << inputElementsNum
1034 <<
" elements into " << outputElementsNum;
1038 int64_t newShapeElementsNum = std::accumulate(
1039 getNewShape().begin(), getNewShape().end(), 1LL,
1040 [](int64_t acc, int64_t dim) {
return (dim > 0) ? acc * dim : acc; });
1041 bool isStaticNewShape =
1042 llvm::all_of(getNewShape(), [](int64_t s) {
return s > 0; });
1043 if ((isStaticNewShape && inputElementsNum != newShapeElementsNum) ||
1044 (!isStaticNewShape && newShapeElementsNum > inputElementsNum)) {
1045 return emitOpError() <<
"cannot reshape " << inputElementsNum
1046 <<
" elements into " << newShapeElementsNum;
1050 int missingDims = llvm::count(getNewShape(), -1);
1051 if (missingDims > 1)
1052 return emitOpError() <<
"expected at most one target dimension to be -1";
1054 return mlir::success();
1064 for (
auto v : permsAttr.getValues<APInt>())
1065 perms.push_back(v.getSExtValue());
1070 LogicalResult tosa::TransposeOp::inferReturnTypeComponents(
1071 MLIRContext *context, ::std::optional<Location> location,
1072 TransposeOp::Adaptor adaptor,
1074 ShapeAdaptor inputShape(adaptor.getInput1().getType());
1078 if (permsShape.hasRank() && permsShape.getRank() == 0)
1083 if (!inputShape.hasRank() || !permsShape.hasRank() ||
1084 permsShape.isDynamicDim(0)) {
1091 if (permsShape.getDimSize(0) != inputShape.getRank()) {
1097 if (inputShape.getRank() == 0) {
1103 bool allTheSame =
true;
1104 for (
int i = 1, s = inputShape.getRank(); i < s; i++) {
1105 if (inputShape.getDimSize(0) != inputShape.getDimSize(i)) {
1114 outputShape.resize(inputShape.getRank(), inputShape.getDimSize(0));
1119 outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
1124 attr.getType().getRank() == 1) {
1127 if (inputShape.getRank() != permShape.
getRank())
1129 "constant permutation must be the same length"
1130 " as the input rank");
1133 for (
int i = 0, e = inputShape.getRank(); i < e; i++) {
1134 if (inputShape.getRank() <= permShape.
getDimSize(i))
1138 outputShape.reserve(inputShape.getRank());
1139 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
1140 outputShape[i] = inputShape.getDimSize(permShape.
getDimSize(i));
1149 TensorType inputType = getInput1().getType();
1151 TensorType outputType = getOutput().getType();
1153 if (permType.
hasRank() && permType.getRank() != 1)
1154 return emitOpError()
1155 <<
"expected permutation tensor to be rank 1 but got rank "
1156 << permType.getRank();
1158 if (!permType.isDynamicDim(0) &&
1159 permType.getDimSize(0) != inputType.getRank())
1160 return emitOpError() <<
"expected permutation tensor dim 0 to have size "
1161 << inputType.getRank()
1162 <<
" (input rank) but got size "
1163 << permType.getDimSize(0);
1165 inputType.getRank() != outputType.getRank())
1166 return emitOpError()
1167 <<
"expected input tensor rank to equal result tensor rank";
1169 if (!permType.isDynamicDim(0) &&
1170 permType.getDimSize(0) != outputType.getRank())
1171 return emitOpError() <<
"expected permutation tensor dim 0 to have size "
1172 << outputType.getRank()
1173 <<
" (output rank) but got size "
1174 << permType.getDimSize(0);
1177 if (succeeded(getConstantPerms(constantPerms))) {
1181 "Unexpectedly found permutation tensor without rank");
1182 if (!llvm::all_of(constantPerms,
1183 [&constantPerms](int32_t s) {
1185 static_cast<size_t>(s) < constantPerms.size();
1188 constantPerms, [](int32_t v) -> int64_t {
return v; }))))
1189 return emitOpError() <<
"expected valid permutation tensor";
1194 assert(constantPerms.size() ==
static_cast<size_t>(inputType.getRank()) &&
1195 inputType.getRank() == outputType.getRank());
1197 for (
auto i = 0; i < outputType.getRank(); i++) {
1198 if (inputType.isDynamicDim(constantPerms[i]) ||
1199 outputType.isDynamicDim(i))
1202 if (inputType.getDimSize(constantPerms[i]) != outputType.getDimSize(i))
1203 return emitOpError()
1204 <<
"expected output tensor dim " << i <<
" to match "
1205 <<
"input dim " << constantPerms[i] <<
" with value of "
1206 << inputType.getDimSize(constantPerms[i]);
1217 if (getConstantPerms(transposePerms).failed())
1220 Value input = getInput1();
1221 auto inputType = cast<TensorType>(input.
getType());
1224 for (
auto dim : transposePerms) {
1225 int32_t dimInInput = transposePerms[dim];
1226 if (inputType.isDynamicDim(dimInInput))
1228 builder.
create<tensor::DimOp>(getLoc(), input, dimInInput)
1232 builder.
getIndexAttr(inputType.getDimSize(dimInInput));
1235 reifiedReturnShapes.emplace_back(std::move(returnedDims));
1239 LogicalResult tosa::GatherOp::inferReturnTypeComponents(
1240 MLIRContext *context, ::std::optional<Location> location,
1241 GatherOp::Adaptor adaptor,
1244 outputShape.resize(3, ShapedType::kDynamic);
1246 ShapeAdaptor valuesShape(adaptor.getValues().getType());
1247 if (valuesShape.hasRank()) {
1248 outputShape[0] = valuesShape.getDimSize(0);
1249 outputShape[2] = valuesShape.getDimSize(2);
1252 ShapeAdaptor indicesShape(adaptor.getIndices().getType());
1253 if (indicesShape.hasRank()) {
1254 if (outputShape[0] == ShapedType::kDynamic)
1255 outputShape[0] = indicesShape.getDimSize(0);
1256 if (outputShape[1] == ShapedType::kDynamic)
1257 outputShape[1] = indicesShape.getDimSize(1);
1264 LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
1265 MLIRContext *context, ::std::optional<Location> location,
1266 ResizeOp::Adaptor adaptor,
1269 outputShape.resize(4, ShapedType::kDynamic);
1272 if (!inputShape.hasRank())
1275 outputShape[0] = inputShape.getDimSize(0);
1276 outputShape[3] = inputShape.getDimSize(3);
1277 int64_t inputHeight = inputShape.getDimSize(1);
1278 int64_t inputWidth = inputShape.getDimSize(2);
1280 if ((inputHeight == ShapedType::kDynamic) ||
1281 (inputWidth == ShapedType::kDynamic))
1290 (((inputHeight - 1) * scaleInt[0] - offsetInt[0] + borderInt[0]) /
1295 (((inputWidth - 1) * scaleInt[2] - offsetInt[1] + borderInt[1]) /
1303 LogicalResult tosa::ScatterOp::inferReturnTypeComponents(
1304 MLIRContext *context, ::std::optional<Location> location,
1305 ScatterOp::Adaptor adaptor,
1308 outputShape.resize(3, ShapedType::kDynamic);
1310 ShapeAdaptor valuesInShape(adaptor.getValuesIn().getType());
1311 if (valuesInShape.hasRank()) {
1312 outputShape[0] = valuesInShape.getDimSize(0);
1313 outputShape[1] = valuesInShape.getDimSize(1);
1314 outputShape[2] = valuesInShape.getDimSize(2);
1317 ShapeAdaptor indicesShape(adaptor.getIndices().getType());
1318 if (indicesShape.hasRank()) {
1319 if (outputShape[0] == ShapedType::kDynamic)
1320 outputShape[0] = indicesShape.getDimSize(0);
1324 if (inputShape.hasRank()) {
1325 if (outputShape[0] == ShapedType::kDynamic)
1326 outputShape[0] = inputShape.getDimSize(0);
1327 if (outputShape[2] == ShapedType::kDynamic)
1328 outputShape[2] = inputShape.getDimSize(2);
1338 int64_t axisVal = axis.getValue().getSExtValue();
1339 if (!operandShape.
hasRank() || operandShape.
getRank() <= axisVal) {
1345 operandShape.
getDims(outputShape);
1346 outputShape[axisVal] = 1;
1351 #define COMPATIBLE_RETURN_TYPES(OP) \
1352 bool OP::isCompatibleReturnTypes(TypeRange l, TypeRange r) { \
1353 if (l.size() != r.size() || l.size() != 1) \
1355 if (getElementTypeOrSelf(l[0]) != getElementTypeOrSelf(r[0])) \
1357 return succeeded(verifyCompatibleShape(l[0], r[0])); \
1360 #define REDUCE_SHAPE_INFER(OP) \
1361 LogicalResult OP::inferReturnTypeComponents( \
1362 MLIRContext *context, ::std::optional<Location> location, \
1363 OP::Adaptor adaptor, \
1364 SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) { \
1366 llvm::cast<TensorType>(adaptor.getInput().getType()).getElementType(); \
1367 ShapeAdaptor inputShape(adaptor.getInput().getType()); \
1368 const Properties &prop = adaptor.getProperties(); \
1369 return ReduceInferReturnTypes(inputShape, inputType, prop.axis, \
1370 inferredReturnShapes); \
1372 COMPATIBLE_RETURN_TYPES(OP)
1380 #undef REDUCE_SHAPE_INFER
1382 #undef COMPATIBLE_RETURN_TYPES
1384 template <
typename T>
1387 TensorType inputType = op.getInput().getType();
1388 TensorType outputType = op.getOutput().getType();
1389 int32_t reduceAxis = op.getAxis();
1391 if (reduceAxis < 0) {
1392 op.emitOpError(
"reduce axis must not be negative");
1396 int64_t inputRank = inputType.getRank();
1399 if (reduceAxis >= inputRank && !(reduceAxis == 0 && inputRank == 0)) {
1400 op.emitOpError(
"expect input tensor rank (")
1401 << inputRank <<
") to be larger than reduce axis (" << reduceAxis
1407 int64_t outputRank = outputType.getRank();
1408 if (inputType.
hasRank() && outputRank != inputType.getRank()) {
1410 "expect output tensor rank to be equal to input tensor rank");
1413 if (reduceAxis >= outputRank && !(reduceAxis == 0 && outputRank == 0)) {
1414 op.emitOpError(
"expect output tensor rank (")
1415 << outputRank <<
") to be larger than reduce axis (" << reduceAxis
1421 if (outputRank != 0) {
1422 auto outputShape = outputType.
getShape();
1423 if (!outputType.isDynamicDim(reduceAxis) &&
1424 outputShape[reduceAxis] != 1) {
1425 op.emitOpError(
"expect reduced dimension size to be 1, got ")
1426 << outputShape[reduceAxis];
1453 #define NARY_SHAPE_INFER(OP) \
1454 LogicalResult OP::inferReturnTypeComponents( \
1455 MLIRContext *context, ::std::optional<Location> location, \
1456 ValueShapeRange operands, DictionaryAttr attributes, \
1457 OpaqueProperties properties, RegionRange regions, \
1458 SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) { \
1459 return NAryInferReturnTypes(operands, inferredReturnShapes); \
1502 #undef PRED_SHAPE_INFER
1509 outputShape.resize(4, ShapedType::kDynamic);
1524 if (!ShapedType::isDynamic(height)) {
1525 int64_t padded = height + pad[0] + pad[1] - kernel[0];
1526 outputShape[1] = padded / stride[0] + 1;
1529 if (!ShapedType::isDynamic(width)) {
1530 int64_t padded = width + pad[2] + pad[3] - kernel[1];
1531 outputShape[2] = padded / stride[1] + 1;
1538 LogicalResult Conv2DOp::inferReturnTypeComponents(
1539 MLIRContext *context, ::std::optional<Location> location,
1540 Conv2DOp::Adaptor adaptor,
1544 int64_t inputWidth = ShapedType::kDynamic;
1545 int64_t inputHeight = ShapedType::kDynamic;
1546 int64_t weightWidth = ShapedType::kDynamic;
1547 int64_t weightHeight = ShapedType::kDynamic;
1552 if (inputShape.hasRank()) {
1553 outputShape[0] = inputShape.getDimSize(0);
1554 inputHeight = inputShape.getDimSize(1);
1555 inputWidth = inputShape.getDimSize(2);
1559 ShapeAdaptor weightShape(adaptor.getWeight().getType());
1560 if (weightShape.hasRank()) {
1561 outputShape[3] = weightShape.getDimSize(0);
1562 weightHeight = weightShape.getDimSize(1);
1563 weightWidth = weightShape.getDimSize(2);
1568 if (biasShape.hasRank()) {
1569 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1570 ? biasShape.getDimSize(0)
1578 if (!ShapedType::isDynamic(inputHeight) &&
1579 !ShapedType::isDynamic(weightHeight)) {
1580 int64_t inputSize = inputHeight + padding[0] + padding[1];
1581 int64_t filterSize = (weightHeight - 1) * dilation[0] + 1;
1582 int64_t unstridedResult = inputSize - filterSize + 1;
1583 outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
1586 if (!ShapedType::isDynamic(inputWidth) &&
1587 !ShapedType::isDynamic(weightWidth)) {
1588 int64_t inputSize = inputWidth + padding[2] + padding[3];
1589 int64_t filterSize = (weightWidth - 1) * dilation[1] + 1;
1590 int64_t unstridedResult = inputSize - filterSize + 1;
1591 outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
1600 LogicalResult Conv3DOp::inferReturnTypeComponents(
1601 MLIRContext *context, ::std::optional<Location> location,
1602 Conv3DOp::Adaptor adaptor,
1606 int64_t inputWidth = ShapedType::kDynamic;
1607 int64_t inputHeight = ShapedType::kDynamic;
1608 int64_t inputDepth = ShapedType::kDynamic;
1610 int64_t weightWidth = ShapedType::kDynamic;
1611 int64_t weightHeight = ShapedType::kDynamic;
1612 int64_t weightDepth = ShapedType::kDynamic;
1616 if (inputShape.hasRank()) {
1617 outputShape[0] = inputShape.getDimSize(0);
1618 inputDepth = inputShape.getDimSize(1);
1619 inputHeight = inputShape.getDimSize(2);
1620 inputWidth = inputShape.getDimSize(3);
1624 ShapeAdaptor weightShape(adaptor.getWeight().getType());
1625 if (weightShape.hasRank()) {
1626 outputShape[4] = weightShape.getDimSize(0);
1627 weightDepth = weightShape.getDimSize(1);
1628 weightHeight = weightShape.getDimSize(2);
1629 weightWidth = weightShape.getDimSize(3);
1634 if (biasShape.hasRank() && ShapedType::isDynamic(outputShape[4])) {
1635 outputShape[4] = biasShape.getDimSize(0);
1642 if (!ShapedType::isDynamic(inputDepth) &&
1643 !ShapedType::isDynamic(weightDepth)) {
1644 int32_t inputSize = inputDepth + pad[0] + pad[1];
1645 int32_t filterSize = (weightDepth - 1) * dilation[0] + 1;
1646 int32_t unstridedResult = inputSize - filterSize + 1;
1647 outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
1650 if (!ShapedType::isDynamic(inputHeight) &&
1651 !ShapedType::isDynamic(weightHeight)) {
1652 int32_t inputSize = inputHeight + pad[2] + pad[3];
1653 int32_t filterSize = (weightHeight - 1) * dilation[1] + 1;
1654 int32_t unstridedResult = inputSize - filterSize + 1;
1655 outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
1658 if (!ShapedType::isDynamic(inputWidth) &&
1659 !ShapedType::isDynamic(weightWidth)) {
1660 int32_t inputSize = inputWidth + pad[4] + pad[5];
1661 int32_t filterSize = (weightWidth - 1) * dilation[2] + 1;
1662 int32_t unstridedResult = inputSize - filterSize + 1;
1663 outputShape[3] = (unstridedResult - 1) / stride[2] + 1;
1672 LogicalResult AvgPool2dOp::inferReturnTypeComponents(
1673 MLIRContext *context, ::std::optional<Location> location,
1674 AvgPool2dOp::Adaptor adaptor,
1677 const Properties &prop = adaptor.getProperties();
1679 inferredReturnShapes);
1682 LogicalResult MaxPool2dOp::inferReturnTypeComponents(
1683 MLIRContext *context, ::std::optional<Location> location,
1684 MaxPool2dOp::Adaptor adaptor,
1687 const Properties &prop = adaptor.getProperties();
1689 inferredReturnShapes);
1692 LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents(
1693 MLIRContext *context, ::std::optional<Location> location,
1694 DepthwiseConv2DOp::Adaptor adaptor,
1698 int64_t inputWidth = ShapedType::kDynamic;
1699 int64_t inputHeight = ShapedType::kDynamic;
1700 int64_t inputChannels = ShapedType::kDynamic;
1702 int64_t weightWidth = ShapedType::kDynamic;
1703 int64_t weightHeight = ShapedType::kDynamic;
1704 int64_t depthChannels = ShapedType::kDynamic;
1708 if (inputShape.hasRank()) {
1709 outputShape[0] = inputShape.getDimSize(0);
1710 inputHeight = inputShape.getDimSize(1);
1711 inputWidth = inputShape.getDimSize(2);
1712 inputChannels = inputShape.getDimSize(3);
1716 ShapeAdaptor weightShape(adaptor.getWeight().getType());
1717 if (weightShape.hasRank()) {
1718 weightHeight = weightShape.getDimSize(0);
1719 weightWidth = weightShape.getDimSize(1);
1720 inputChannels = ShapedType::isDynamic(inputChannels)
1721 ? weightShape.getDimSize(2)
1723 depthChannels = weightShape.getDimSize(3);
1728 if (!ShapedType::isDynamic(inputChannels) &&
1729 !ShapedType::isDynamic(depthChannels)) {
1730 outputShape[3] = inputChannels * depthChannels;
1735 if (biasShape.hasRank()) {
1736 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1737 ? biasShape.getDimSize(0)
1745 if (!ShapedType::isDynamic(inputHeight) &&
1746 !ShapedType::isDynamic(weightHeight)) {
1747 int64_t inputSize = inputHeight + padding[0] + padding[1];
1748 int64_t filterSize = (weightHeight - 1) * dilation[0] + 1;
1749 int64_t unstridedResult = inputSize - filterSize + 1;
1750 outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
1753 if (!ShapedType::isDynamic(inputWidth) &&
1754 !ShapedType::isDynamic(weightWidth)) {
1755 int64_t inputSize = inputWidth + padding[2] + padding[3];
1756 int64_t filterSize = (weightWidth - 1) * dilation[1] + 1;
1757 int64_t unstridedResult = inputSize - filterSize + 1;
1758 outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
1767 LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
1768 MLIRContext *context, ::std::optional<Location> location,
1769 TransposeConv2DOp::Adaptor adaptor,
1775 int64_t inputWidth = ShapedType::kDynamic;
1776 int64_t inputHeight = ShapedType::kDynamic;
1777 int64_t weightWidth = ShapedType::kDynamic;
1778 int64_t weightHeight = ShapedType::kDynamic;
1782 if (inputShape.hasRank()) {
1783 outputShape[0] = ShapedType::isDynamic(outputShape[0])
1784 ? inputShape.getDimSize(0)
1786 inputHeight = inputShape.getDimSize(1);
1787 inputWidth = inputShape.getDimSize(2);
1791 ShapeAdaptor weightShape(adaptor.getFilter().getType());
1792 if (weightShape.hasRank()) {
1793 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1794 ? weightShape.getDimSize(0)
1796 weightHeight = weightShape.getDimSize(1);
1797 weightWidth = weightShape.getDimSize(2);
1802 if (biasShape.hasRank()) {
1803 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1804 ? biasShape.getDimSize(0)
1811 if (!ShapedType::isDynamic(inputHeight) &&
1812 !ShapedType::isDynamic(weightHeight)) {
1813 int64_t calculateSize =
1814 (inputHeight - 1) * stride[0] + padding[0] + padding[1] + weightHeight;
1816 ShapedType::isDynamic(outputShape[1]) ? calculateSize : outputShape[1];
1819 if (!ShapedType::isDynamic(inputWidth) &&
1820 !ShapedType::isDynamic(weightWidth)) {
1821 int64_t calculateSize =
1822 (inputWidth - 1) * stride[1] + padding[2] + padding[3] + weightWidth;
1824 ShapedType::isDynamic(outputShape[2]) ? calculateSize : outputShape[2];
1831 LogicalResult IfOp::inferReturnTypeComponents(
1832 MLIRContext *context, ::std::optional<Location> location,
1833 IfOp::Adaptor adaptor,
1836 for (
Region *region : adaptor.getRegions()) {
1837 for (
auto &block : *region)
1838 if (
auto returnOp = dyn_cast<tosa::YieldOp>(block.getTerminator()))
1839 yieldOps.push_back(returnOp);
1842 if (yieldOps.empty())
1847 resultKnowledge.reserve(yieldOps.front().getNumOperands());
1848 for (
auto operand : yieldOps.front().getOperands()) {
1849 resultKnowledge.push_back(
1853 for (
auto yieldOp : yieldOps) {
1854 if (resultKnowledge.size() != yieldOp.getNumOperands())
1858 int32_t index = it.index();
1860 resultKnowledge[index],
1864 resultKnowledge[index] = meet;
1869 inferredReturnShapes.push_back(result.getShapedTypeComponents());
1875 LogicalResult WhileOp::inferReturnTypeComponents(
1876 MLIRContext *context, ::std::optional<Location> location,
1877 WhileOp::Adaptor adaptor,
1880 for (
auto &block : adaptor.getBody())
1881 if (
auto returnOp = dyn_cast<tosa::YieldOp>(block.getTerminator()))
1882 yieldOps.push_back(returnOp);
1886 if (yieldOps.empty())
1891 resultKnowledge.reserve(yieldOps.front().getNumOperands());
1892 for (
auto operand : yieldOps.front().getOperands()) {
1893 resultKnowledge.push_back(
1897 for (
auto yieldOp : yieldOps) {
1898 if (resultKnowledge.size() != yieldOp.getNumOperands())
1902 int32_t index = it.index();
1904 resultKnowledge[index],
1906 resultKnowledge[index] = meet;
1912 inferredReturnShapes.push_back(result.getShapedTypeComponents());
1918 std::optional<SmallVector<int64_t, 4>> ApplyScaleOp::getShapeForUnroll() {
1919 if (
auto vt = llvm::dyn_cast<VectorType>(
getType()))
1920 return llvm::to_vector<4>(vt.getShape());
1921 return std::nullopt;
1958 bool printBlockTerminators =
false;
1960 p <<
" " << getCond();
1961 if (!getResults().empty()) {
1962 p <<
" -> (" << getResultTypes() <<
")";
1964 printBlockTerminators =
true;
1969 printBlockTerminators);
1972 auto &elseRegion = getElseBranch();
1973 if (!elseRegion.
empty()) {
1977 printBlockTerminators);
1984 TensorType inputType = getInput1().getType();
1985 TensorType outputType = getOutput().getType();
1986 int32_t reverseAxis = getAxis();
1988 if (reverseAxis < 0)
1989 return emitOpError(
"expected non-negative reverse axis");
1991 int64_t inputRank = inputType.getRank();
1994 if (reverseAxis >= inputRank && !(reverseAxis == 0 && inputRank == 0))
1995 return emitOpError(
"expect input tensor rank (")
1996 << inputRank <<
") to be larger than reverse axis (" << reverseAxis
2000 int64_t outputRank = outputType.getRank();
2001 if (inputType.
hasRank() && outputRank != inputType.getRank())
2003 "expect output tensor rank to be equal to input tensor rank");
2004 if (reverseAxis >= outputRank && !(reverseAxis == 0 && outputRank == 0))
2005 return emitOpError(
"expect output tensor rank (")
2006 << outputRank <<
") to be larger than reverse axis ("
2007 << reverseAxis <<
")";
2024 FunctionType functionType;
2029 result.
addTypes(functionType.getResults());
2031 if (functionType.getNumInputs() != operands.size()) {
2033 <<
"expected as many input types as operands "
2034 <<
"(expected " << operands.size() <<
" got "
2035 << functionType.getNumInputs() <<
")";
2045 for (
size_t i = 0, e = regionArgs.size(); i != e; ++i)
2046 regionArgs[i].type = functionType.getInput(i);
2048 return failure(parser.
parseRegion(*cond, regionArgs) ||
2056 StringRef prefix =
"") {
2057 assert(blocksArgs.size() == initializers.size() &&
2058 "expected same length of arguments and initializers");
2059 if (initializers.empty())
2062 parser << prefix <<
'(';
2063 llvm::interleaveComma(
2064 llvm::zip(blocksArgs, initializers), parser,
2065 [&](
auto it) { parser << std::get<0>(it) <<
" = " << std::get<1>(it); });
2085 #define GET_ATTRDEF_CLASSES
2086 #include "mlir/Dialect/Tosa/IR/TosaAttributes.cpp.inc"
2092 #define GET_OP_CLASSES
2093 #include "mlir/Dialect/Tosa/IR/TosaOps.cpp.inc"
static Operation * materializeConstant(Dialect *dialect, OpBuilder &builder, Attribute value, Type type, Location loc)
A utility function used to materialize a constant for a given attribute and type.
static MLIRContext * getContext(OpFoldResult val)
static bool isLegalToInline(InlinerInterface &interface, Region *src, Region *insertRegion, bool shouldCloneInlinedRegion, IRMapping &valueMapping)
Utility to check that all of the operations within 'src' can be inlined.
static void print(spirv::VerCapExtAttr triple, DialectAsmPrinter &printer)
static void buildMatMulOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value a, Value b)
The tosa.matmul op is also intended to be generated where a fully_connected op must be constructed wh...
static LogicalResult ReduceInferReturnTypes(ShapeAdaptor operandShape, Type inputType, IntegerAttr axis, SmallVectorImpl< ShapedTypeComponents > &inferredReturnShapes)
#define REDUCE_SHAPE_INFER(OP)
static LogicalResult verifyConvOp(T op)
static void buildUnaryOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input)
This builder is called on single-parameter unary operators that have scale relationship between their...
static LogicalResult poolingInferReturnTypes(ShapeAdaptor inputShape, ArrayRef< int64_t > kernel, ArrayRef< int64_t > stride, ArrayRef< int64_t > pad, SmallVectorImpl< ShapedTypeComponents > &inferredReturnShapes)
static void buildPadOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value paddings)
This builder is called on TOSA pad operator that needs to create its own OptionalAttr quantization_at...
static void buildFCOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, Value bias)
The tosa.fully_connected op has its own builder as it does not have strides/dilation/padding.
static LogicalResult verifyReduceOp(T op)
#define NARY_SHAPE_INFER(OP)
static void buildExplicitValuePadOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value paddings, Value padConst)
This builder is called on TOSA pad operator when an explicit pad_const value is passed in.
static LogicalResult NAryInferReturnTypes(const ValueShapeRange &operands, SmallVectorImpl< ShapedTypeComponents > &inferredReturnShapes)
#define COMPATIBLE_RETURN_TYPES(OP)
static LogicalResult resolveBroadcastShape(const ValueShapeRange &operands, SmallVector< int64_t > &outShape)
static void buildTransConvOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, Value bias, DenseI64ArrayAttr outpad, DenseI64ArrayAttr stride, DenseI64ArrayAttr outputShape)
Handles tosa.transpose_conv2d which has outpad and output shape attributes.
static void buildAvgPool2dOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, DenseArrayAttr kernel, DenseArrayAttr stride, DenseArrayAttr pad, TypeAttr accType)
Both the tosa.avg_pool2d and unary ops use the same UnaruOpQuantizationAttr but avg_pool operator has...
static SmallVector< int64_t > convertToMlirShape(ArrayRef< int64_t > shape)
static void buildConvOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, Value bias, DenseI64ArrayAttr pad, DenseI64ArrayAttr stride, DenseI64ArrayAttr dilation)
This builder is called on all convolution operators except TransposeConv, which has specialized outpu...
static void printInitializationList(OpAsmPrinter &parser, Block::BlockArgListType blocksArgs, ValueRange initializers, StringRef prefix="")
virtual Builder & getBuilder() const =0
Return a builder which provides useful access to MLIRContext, global objects like types and attribute...
virtual ParseResult parseOptionalAttrDict(NamedAttrList &result)=0
Parse a named dictionary into 'result' if it is present.
virtual ParseResult parseOptionalEqual()=0
Parse a = token if present.
virtual ParseResult parseOptionalKeyword(StringRef keyword)=0
Parse the given keyword if present.
virtual InFlightDiagnostic emitError(SMLoc loc, const Twine &message={})=0
Emit a diagnostic at the specified location and return failure.
virtual ParseResult parseOptionalAttrDictWithKeyword(NamedAttrList &result)=0
Parse a named dictionary into 'result' if the attributes keyword is present.
virtual ParseResult parseColonType(Type &result)=0
Parse a colon followed by a type.
virtual SMLoc getCurrentLocation()=0
Get the location of the next token and store it into the argument.
virtual ParseResult parseOptionalArrowTypeList(SmallVectorImpl< Type > &result)=0
Parse an optional arrow followed by a type list.
ParseResult parseKeyword(StringRef keyword)
Parse a given keyword.
virtual ParseResult parseAttribute(Attribute &result, Type type={})=0
Parse an arbitrary attribute of a given type and return it in result.
virtual void printAttribute(Attribute attr)
Attributes are known-constant values of operations.
IntegerAttr getIndexAttr(int64_t value)
IntegerType getIntegerType(unsigned width)
An attribute that represents a reference to a dense integer vector or tensor object.
This class defines a virtual interface for reading a bytecode stream, providing hooks into the byteco...
virtual InFlightDiagnostic emitError(const Twine &msg={}) const =0
Emit an error to the reader.
This class defines a virtual interface for writing to a bytecode stream, providing hooks into the byt...
This is the interface that must be implemented by the dialects of operations to be inlined.
DialectInlinerInterface(Dialect *dialect)
This class is used to represent the version of a dialect, for the purpose of polymorphic destruction.
Dialects are groups of MLIR operations, types and attributes, as well as behavior associated with the...
This is a utility class for mapping one set of IR entities to another.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
The OpAsmParser has methods for interacting with the asm parser: parsing things from it,...
virtual OptionalParseResult parseOptionalAssignmentList(SmallVectorImpl< Argument > &lhs, SmallVectorImpl< UnresolvedOperand > &rhs)=0
virtual ParseResult parseRegion(Region ®ion, ArrayRef< Argument > arguments={}, bool enableNameShadowing=false)=0
Parses a region.
virtual ParseResult resolveOperand(const UnresolvedOperand &operand, Type type, SmallVectorImpl< Value > &result)=0
Resolve an operand to an SSA value, emitting an error on failure.
ParseResult resolveOperands(Operands &&operands, Type type, SmallVectorImpl< Value > &result)
Resolve a list of operands to SSA values, emitting an error on failure, or appending the results to t...
virtual ParseResult parseOperand(UnresolvedOperand &result, bool allowResultNumber=true)=0
Parse a single SSA value operand name along with a result number if allowResultNumber is true.
This is a pure-virtual base class that exposes the asmprinter hooks necessary to implement a custom p...
virtual void printOptionalAttrDictWithKeyword(ArrayRef< NamedAttribute > attrs, ArrayRef< StringRef > elidedAttrs={})=0
If the specified operation has attributes, print out an attribute dictionary prefixed with 'attribute...
virtual void printOptionalAttrDict(ArrayRef< NamedAttribute > attrs, ArrayRef< StringRef > elidedAttrs={})=0
If the specified operation has attributes, print out an attribute dictionary with their values.
void printFunctionalType(Operation *op)
Print the complete type of an operation in functional form.
virtual void printRegion(Region &blocks, bool printEntryBlockArgs=true, bool printBlockTerminators=true, bool printEmptyBlock=false)=0
Prints a region.
This class helps build Operations.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Simple wrapper around a void* in order to express generically how to pass in op properties through AP...
Operation is the basic unit of execution within MLIR.
This class implements Optional functionality for ParseResult.
ParseResult value() const
Access the internal ParseResult value.
bool has_value() const
Returns true if we contain a valid ParseResult value.
This class provides an abstraction over the different types of ranges over Regions.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Adaptor class to abstract the differences between whether value is from a ShapedType or ShapedTypeCom...
int64_t getDimSize(int index) const
Returns the size of the index'th dimension.
int64_t getRank() const
Returns the rank of the shape.
void getDims(SmallVectorImpl< int64_t > &res) const
Populates the dimensions from shape referenced.
bool hasRank() const
Returns whether the shape has a rank.
ShapedTypeComponents that represents the components of a ShapedType.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
ArrayRef< int64_t > getShape() const
Returns the shape of this tensor type.
bool hasRank() const
Returns if this type is ranked, i.e. it has a known number of dimensions.
This class provides an abstraction over the various different ranges of value types.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
bool isInteger() const
Return true if this is an integer type (with the specified width).
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
This class provides an abstraction over the different types of ranges over Values.
Range of values and shapes (corresponding effectively to Shapes dialect's ValueShape type concept).
ShapeAdaptor getShape(int index) const
Returns the shape of index'th operand.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
Base class for DenseArrayAttr that is instantiated and specialized for each supported element type be...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
QueryRef parse(llvm::StringRef line, const QuerySession &qs)
ConvOpQuantizationAttr buildConvOpQuantizationAttr(OpBuilder &builder, Value input, Value weight)
Method to build ConvOpQuantizationAttr, called from ConvOpQuantInfoBuilder/TransConvOpQuantInfoBuilde...
Type buildConvOpResultTypeInfo(OpBuilder &builder, Type outputType, Value input, Value weight)
construct ConvOp output type with correct bitwidth based on input/weight width.
PadOpQuantizationAttr buildPadOpQuantizationAttr(OpBuilder &builder, Value input)
Builds PadOpQuantizationAttr, called from PadOpQuantInfoBuilder: inputZp: input zeropoint.
ParseResult parseTypeOrAttr(OpAsmParser &parser, TypeAttr &typeAttr, Attribute &attr)
MatMulOpQuantizationAttr buildMatMulOpQuantizationAttr(OpBuilder &builder, Value a, Value b)
Builds MatMulOpQuantizationAttr, called from MatMulOpQuantInfoBuilder: aZp: input a zeropoint bZp: in...
void printTypeOrAttr(OpAsmPrinter &p, Operation *op, TypeAttr type, Attribute attr)
UnaryOpQuantizationAttr buildUnaryOpQuantizationAttr(OpBuilder &builder, Value input, Type outputRawType)
Builds UnaryOpQuantizationAttr UnaryOpQuantInfoBuilder: inputZp: input zeropoint outputZp: output zer...
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
LogicalResult emitOptionalError(std::optional< Location > loc, Args &&...args)
Overloads of the above emission functions that take an optionally null location.
Type getElementTypeOrSelf(Type type)
Return the element type or return the type itself.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
LogicalResult verifyCompatibleShape(ArrayRef< int64_t > shape1, ArrayRef< int64_t > shape2)
Returns success if the given two shapes are compatible.
detail::constant_op_matcher m_Constant()
Matches a constant foldable operation.
LogicalResult verify(Operation *op, bool verifyRecursively=true)
Perform (potentially expensive) checks of invariants, used to detect compiler bugs,...
bool isPermutationVector(ArrayRef< int64_t > interchange)
Method to check if an interchange vector is a permutation.
This is the representation of an operand reference.
This represents an operation in an abstracted form, suitable for use with the builder APIs.
SmallVector< Value, 4 > operands
void addOperands(ValueRange newOperands)
void addAttribute(StringRef name, Attribute attr)
Add an attribute with the specified name.
void addTypes(ArrayRef< Type > newTypes)
SmallVector< std::unique_ptr< Region >, 1 > regions
Regions that the op will hold.
SmallVector< Type, 4 > types
Types of the results of this operation.
Region * addRegion()
Create a region that should be attached to the operation.
Statically known information for a particular Value.
static ValueKnowledge meet(const ValueKnowledge &lhs, const ValueKnowledge &rhs)
static ValueKnowledge getKnowledgeFromType(Type type)