29 #include "llvm/ADT/APFloat.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/TypeSwitch.h"
36 #include "mlir/Dialect/Tosa/IR/TosaOpsDialect.cpp.inc"
42 #include "mlir/Dialect/Tosa/IR/TosaInterfaces.cpp.inc"
45 #include "mlir/Dialect/Tosa/IR/TosaDialectBytecode.cpp.inc"
66 return (isa<tosa::IfOp>(dest->getParentOp()) ||
67 isa<tosa::WhileOp>(dest->getParentOp()));
73 TosaDialectBytecodeInterface(
Dialect *dialect)
83 LogicalResult writeAttribute(
Attribute attr,
85 return ::writeAttribute(attr, writer);
95 LogicalResult writeType(
Type type,
97 return ::writeType(type, writer);
104 std::unique_ptr<DialectVersion>
107 reader.
emitError(
"Dialect does not support versioning");
111 LogicalResult upgradeFromVersion(
Operation *topLevelOp,
130 void TosaDialect::initialize() {
133 #include "mlir/Dialect/Tosa/IR/TosaOps.cpp.inc"
136 #define GET_ATTRDEF_LIST
137 #include "mlir/Dialect/Tosa/IR/TosaAttributes.cpp.inc"
139 addInterfaces<TosaDialectBytecodeInterface, TosaInlinerInterface>();
140 declarePromisedInterfaces<
141 mesh::ShardingInterface, ClampOp, SigmoidOp, TanhOp, AddOp,
142 ArithmeticRightShiftOp, BitwiseAndOp, BitwiseOrOp, BitwiseXorOp, IntDivOp,
143 LogicalAndOp, LogicalLeftShiftOp, LogicalRightShiftOp, LogicalOrOp,
144 LogicalXorOp, MaximumOp, MinimumOp, MulOp, PowOp, SubOp, AbsOp,
145 BitwiseNotOp, CeilOp, ClzOp, ExpOp, FloorOp, LogOp, LogicalNotOp,
146 NegateOp, ReciprocalOp, RsqrtOp, SelectOp, EqualOp, GreaterOp,
147 GreaterEqualOp, MatMulOp>();
154 if (llvm::isa<ElementsAttr>(value))
155 return builder.
create<tosa::ConstOp>(loc, type,
156 llvm::cast<ElementsAttr>(value));
169 <<
"expected attribute";
171 if (
auto typedAttr = dyn_cast<TypedAttr>(attr)) {
188 bool needsSpace =
false;
189 auto typedAttr = dyn_cast_or_null<TypedAttr>(attr);
190 if (!typedAttr || typedAttr.getType() != type.getValue()) {
207 template <
typename T>
210 auto inputType = llvm::dyn_cast<RankedTensorType>(op.getInput().getType());
211 auto weightType = llvm::dyn_cast<RankedTensorType>(op.getWeight().getType());
215 op.
emitOpError(
"expect a ranked tensor for input, got ") << op.getInput();
219 op.
emitOpError(
"expect a ranked tensor for weight, got ") << op.getWeight();
223 auto inputEType = inputType.getElementType();
224 auto weightEType = weightType.getElementType();
226 bool inputIsQuant = !llvm::isa<FloatType>(inputEType);
227 bool weightIsQuant = !llvm::isa<FloatType>(weightEType);
230 if (inputIsQuant != weightIsQuant) {
232 "expect both input and weight to be float or not together, got ")
233 << inputEType <<
" and " << weightEType;
239 if ((inputIsQuant && !op.getQuantizationInfo()) ||
240 (!inputIsQuant && op.getQuantizationInfo())) {
241 op.
emitOpError(
"quantizationattr is required for quantized type, and not "
242 "allowed for float type");
250 auto attrType = llvm::dyn_cast<TensorType>(getValueAttr().
getType());
251 auto outputType = llvm::dyn_cast<TensorType>(getOutput().
getType());
253 if (!attrType || !outputType) {
254 emitOpError(
"expected tensors for attr/result type");
258 if (
auto result = llvm::dyn_cast<mlir::quant::QuantizedType>(
259 outputType.getElementType())) {
260 if (result.getStorageType() == attrType.getElementType())
264 if (attrType.getElementType() != outputType.getElementType()) {
265 emitOpError(
"expected same attr/result element types");
274 const auto resultETy = llvm::cast<ShapedType>(
getType()).getElementType();
275 if (!resultETy.isIntOrIndex())
276 return emitOpError(
"result tensor is not of integer type");
279 const auto inputType = llvm::cast<ShapedType>(getInput().
getType());
280 const int64_t axis = getAxisAttr().getInt();
281 if (inputType.hasRank() && ((axis < 0) || axis >= inputType.getRank()))
282 return emitOpError(
"specified axis is outside the rank of the tensor");
288 auto inputType = llvm::cast<ShapedType>(getInput().
getType());
290 auto inputETy = inputType.getElementType();
291 auto resultETy = llvm::cast<ShapedType>(
getType()).getElementType();
294 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(inputETy))
295 inputETy = quantType.getStorageType();
298 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(resultETy))
299 resultETy = quantType.getStorageType();
301 auto accType = getAccType();
302 if (llvm::isa<IntegerType>(inputETy) && !accType.isInteger(32))
303 return emitOpError(
"accumulator type for integer tensor is not i32");
305 if (inputETy.isF16() && !(accType.isF16() || accType.isF32()))
306 return emitOpError(
"accumulator type for f16 tensor is not f16/f32");
308 if (inputETy.isBF16() && !accType.isF32())
309 return emitOpError(
"accumulator type for bf16 tensor is not f32");
311 if (inputETy.isF32() && !accType.isF32())
312 return emitOpError(
"accumulator type for f32 tensor is not f32");
314 if ((inputETy.isF32() && resultETy.isF32()) ||
315 (inputETy.isF16() && resultETy.isF16()) ||
316 (inputETy.isBF16() && resultETy.isBF16()) ||
317 (inputETy.isInteger(8) && resultETy.isInteger(8)) ||
318 (inputETy.isInteger(16) && resultETy.isInteger(16)))
321 return emitOpError(
"input/output element types are incompatible.");
326 llvm::cast<ShapedType>(getInput().
getType()).getElementType();
328 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(inputETy)) {
329 inputETy = quantType.getStorageType();
331 mlir::Type maxFpType = getMaxFpAttr().getType();
332 mlir::Type minFpType = getMinFpAttr().getType();
334 llvm::cast<ShapedType>(getOutput().
getType()).getElementType();
336 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(outputETy)) {
337 outputETy = quantType.getStorageType();
341 if (inputETy != outputETy)
342 return emitOpError(
"input/output element types are incompatible.");
347 if (!inputETy.
isInteger(dataTypeBitWidth)) {
348 if (((maxFpType != minFpType) ||
351 return emitOpError(
"min/max attributes types are incompatible with "
352 "input/output element types.");
437 auto inputType = llvm::dyn_cast<ShapedType>(a.
getType());
438 assert(inputType &&
"Input must be a shaped tensor type!");
440 auto inputQType = llvm::dyn_cast<mlir::quant::UniformQuantizedType>(
441 inputType.getElementType());
442 assert(inputQType &&
"Tensor must have quantized datatype!");
444 unsigned inputBits = inputQType.getStorageTypeIntegralWidth();
446 auto outputShapedType = llvm::dyn_cast<ShapedType>(outputType);
447 assert(outputShapedType &&
"Output must be a shaped type");
449 IntegerType accElementType;
454 auto accType = outputShapedType.clone(accElementType);
467 DenseArrayAttr kernel, DenseArrayAttr stride,
468 DenseArrayAttr pad, TypeAttr accType) {
477 result.
types.push_back(outputType);
490 result.
types.push_back(outputType);
503 result.
types.push_back(outputType);
517 result.
types.push_back(outputType);
527 for (
int i = 0, e = operands.size(); i != e; ++i) {
529 if (!shape.hasRank()) {
534 outRank = std::max<int64_t>(outRank, shape.getRank());
537 outShape.resize(outRank, 1);
539 for (
int i = 0, e = operands.size(); i != e; ++i) {
541 auto rankDiff = outShape.size() - shape.getRank();
543 for (
size_t i = 0, e = shape.getRank(); i < e; ++i) {
544 auto dim1 = outShape[i + rankDiff];
545 auto dim2 = shape.getDimSize(i);
546 auto resolvedDim = dim1;
550 }
else if (dim2 == 1) {
552 }
else if (dim1 != dim2) {
555 outShape[i + rankDiff] = resolvedDim;
562 LogicalResult tosa::ArgMaxOp::inferReturnTypeComponents(
563 MLIRContext *context, ::std::optional<Location> location,
564 ArgMaxOp::Adaptor adaptor,
567 IntegerAttr axis = adaptor.getProperties().axis;
568 int32_t axisVal = axis.getValue().getSExtValue();
570 if (!inputShape.hasRank()) {
576 outShape.reserve(inputShape.getRank() - 1);
577 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
580 outShape.push_back(inputShape.getDimSize(i));
587 LogicalResult tosa::RFFT2dOp::inferReturnTypeComponents(
588 MLIRContext *context, ::std::optional<Location> location,
589 RFFT2dOp::Adaptor adaptor,
593 if (!inputShape.hasRank())
597 outputShape.resize(3, ShapedType::kDynamic);
598 outputShape[0] = inputShape.getDimSize(0);
599 outputShape[1] = inputShape.getDimSize(1);
600 int64_t inWidth = inputShape.getDimSize(2);
604 if (inWidth != ShapedType::kDynamic)
605 outputShape[2] = inWidth / 2 + 1;
613 LogicalResult tosa::FFT2dOp::inferReturnTypeComponents(
614 MLIRContext *context, ::std::optional<Location> location,
615 FFT2dOp::Adaptor adaptor,
617 inferredReturnShapes.push_back(
619 inferredReturnShapes.push_back(
624 LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
625 MLIRContext *context, ::std::optional<Location> location,
626 ConcatOp::Adaptor adaptor,
629 const Properties &prop = adaptor.getProperties();
630 int32_t axis = prop.axis.getValue().getSExtValue();
632 bool hasRankedInput =
false;
633 for (
auto operand : adaptor.getOperands()) {
635 if (!operandShape.hasRank())
640 outputShape.resize(operandShape.getRank(), ShapedType::kDynamic);
643 for (
int i = 0, s = operandShape.getRank(); i < s; i++) {
644 if (i == axis || operandShape.isDynamicDim(i))
646 if (outputShape[i] == ShapedType::kDynamic)
647 outputShape[i] = operandShape.getDimSize(i);
648 if (outputShape[i] != operandShape.getDimSize(i))
650 "Cannot concat tensors with different sizes"
651 " on the non-axis dimension ",
655 hasRankedInput =
true;
658 llvm::cast<TensorType>(adaptor.getInput1().getType()[0]).getElementType();
659 if (!hasRankedInput) {
665 int64_t concatDimSize = 0;
666 for (
auto operand : adaptor.getOperands()) {
671 if (!operandShape.hasRank() || operandShape.isDynamicDim(axis)) {
672 concatDimSize = ShapedType::kDynamic;
676 concatDimSize += operandShape.getDimSize(axis);
679 outputShape[axis] = concatDimSize;
685 LogicalResult tosa::EqualOp::inferReturnTypeComponents(
686 MLIRContext *context, ::std::optional<Location> location,
703 if (l.size() != r.size() || l.size() != 1)
708 LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
709 MLIRContext *context, ::std::optional<Location> location,
710 FullyConnectedOp::Adaptor adaptor,
713 ShapeAdaptor weightShape(adaptor.getWeight().getType());
718 outShape.resize(2, ShapedType::kDynamic);
720 if (inputShape.hasRank()) {
721 outShape[0] = inputShape.getDimSize(0);
724 if (weightShape.hasRank()) {
725 outShape[1] = weightShape.getDimSize(0);
728 if (biasShape.hasRank()) {
729 outShape[1] = outShape[1] == ShapedType::kDynamic ? biasShape.getDimSize(0)
739 LogicalResult tosa::MatMulOp::inferReturnTypeComponents(
740 MLIRContext *context, ::std::optional<Location> location,
741 MatMulOp::Adaptor adaptor,
748 outShape.resize(3, ShapedType::kDynamic);
750 if (lhsShape.hasRank()) {
751 outShape[0] = lhsShape.getDimSize(0);
752 outShape[1] = lhsShape.getDimSize(1);
755 if (rhsShape.hasRank()) {
756 outShape[0] = outShape[0] == ShapedType::kDynamic ? rhsShape.getDimSize(0)
758 outShape[2] = rhsShape.getDimSize(2);
765 LogicalResult tosa::PadOp::inferReturnTypeComponents(
766 MLIRContext *context, ::std::optional<Location> location,
767 PadOp::Adaptor adaptor,
770 ShapeAdaptor paddingShape(adaptor.getPadding().getType());
775 if (!inputShape.hasRank() && !paddingShape.hasRank()) {
782 if (!inputShape.hasRank()) {
783 if (paddingShape.isDynamicDim(0)) {
788 outputShape.resize(paddingShape.getDimSize(0), ShapedType::kDynamic);
796 outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
802 for (
auto val : paddings) {
803 paddingValues.push_back(val.getSExtValue());
806 outputShape.reserve(inputShape.getRank());
807 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
808 if (inputShape.isDynamicDim(i)) {
809 outputShape.push_back(ShapedType::kDynamic);
813 outputShape.push_back(inputShape.getDimSize(i) + paddingValues[i * 2] +
814 paddingValues[i * 2 + 1]);
822 RankedTensorType inputType = getInput1().getType();
823 RankedTensorType outputType = getOutput().getType();
824 TensorType paddingType = getPadding().getType();
826 if (inputType.getRank() != outputType.getRank())
827 return emitOpError() <<
"expect same input and output tensor rank.";
829 if (paddingType.
hasRank() && paddingType.getRank() != 2)
830 return emitOpError() <<
"expect 'padding' tensor rank equal to 2.";
836 return to_vector(llvm::map_range(shape, [](int64_t dim) {
837 return dim == -1 ? ShapedType::kDynamic : dim;
841 LogicalResult tosa::SliceOp::inferReturnTypeComponents(
842 MLIRContext *context, ::std::optional<Location> location,
843 SliceOp::Adaptor adaptor,
845 inferredReturnShapes.push_back(
851 auto inputType = llvm::dyn_cast<RankedTensorType>(getInput().
getType());
855 if (
static_cast<size_t>(inputType.getRank()) != getStart().size())
857 "length of start attribute is not equal rank of input shape");
859 if (
static_cast<size_t>(inputType.getRank()) != getSize().size())
861 "length of size attribute is not equal rank of input shape");
866 LogicalResult tosa::TableOp::inferReturnTypeComponents(
867 MLIRContext *context, ::std::optional<Location> location,
868 TableOp::Adaptor adaptor,
872 if (!inputShape.hasRank()) {
877 inferredReturnShapes.resize(1);
878 inputShape.getDims(inferredReturnShapes[0]);
884 TensorType outputType = getOutput().getType();
887 inputType.getRank() != outputType.getRank())
889 <<
"expected input tensor rank to equal result tensor rank";
891 auto inputDims = inputType.
getShape();
892 auto outputDims = outputType.
getShape();
894 int64_t dim = it.index();
895 auto [inputDim, outputDim] = it.value();
896 if (!ShapedType::isDynamic(outputDim) && outputDim != inputDim) {
897 return emitOpError() <<
"dim(result, " << dim <<
") = " << outputDim
898 <<
" doesn't match dim(input, " << dim
899 <<
") = " << inputDim;
905 LogicalResult tosa::TileOp::inferReturnTypeComponents(
906 MLIRContext *context, ::std::optional<Location> location,
907 TileOp::Adaptor adaptor,
912 if (!inputShape.hasRank()) {
913 outputShape.resize(multiples.size(), ShapedType::kDynamic);
916 }
else if (
static_cast<size_t>(inputShape.getRank()) != multiples.size())
920 outputShape.reserve(multiples.size());
921 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
922 int64_t dim = inputShape.getDimSize(i);
923 if (dim != ShapedType::kDynamic)
925 outputShape.push_back(dim);
933 ShapedType inputType = llvm::cast<ShapedType>(getInput1().
getType());
934 ShapedType outputType = llvm::cast<ShapedType>(
getType());
935 auto multiples = getMultiples();
937 if (inputType.hasRank()) {
938 if (
static_cast<size_t>(inputType.getRank()) != multiples.size())
939 return emitOpError(
"expect 'multiples' array to have length ")
940 << inputType.getRank() <<
" but got " << multiples.size() <<
".";
941 if (outputType.hasRank() && inputType.getRank() != outputType.getRank())
942 return emitOpError(
"expect same input and output tensor rank.");
943 }
else if (outputType.hasRank() &&
944 static_cast<size_t>(outputType.getRank()) != multiples.size())
945 return emitOpError(
"expect 'multiples' array to have length ")
946 << outputType.getRank() <<
" but got " << multiples.size() <<
".";
948 if (llvm::any_of(multiples, [](int64_t v) {
return v <= 0 && v != -1; }))
950 "expect element of 'multiples' to be positive integer or -1.");
956 if (l.size() != r.size() || l.size() != 1)
961 LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
962 MLIRContext *context, ::std::optional<Location> location,
963 ReshapeOp::Adaptor adaptor,
972 if (!inputShape.hasRank() || !inputShape.hasStaticShape()) {
973 inferredReturnShapes.push_back(
981 int64_t numElements = inputShape.getNumElements();
982 int64_t staticMul = 1;
983 for (
auto val : newShapeValue) {
984 if (!ShapedType::isDynamic(val)) {
990 for (
auto &val : newShapeValue) {
991 if (ShapedType::isDynamic(val))
992 val = numElements / staticMul;
995 inferredReturnShapes.push_back(
1001 TensorType inputType = getInput1().getType();
1002 RankedTensorType outputType =
getType();
1004 if ((int64_t)getNewShape().size() != outputType.getRank())
1005 return emitOpError() <<
"new shape does not match result rank";
1007 for (
auto [newShapeDim, outputShapeDim] :
1008 zip(getNewShape(), outputType.getShape())) {
1009 if (newShapeDim != -1 && outputShapeDim != ShapedType::kDynamic &&
1010 newShapeDim != outputShapeDim)
1011 return emitOpError() <<
"new shape is inconsistent with result shape";
1013 if (newShapeDim != ShapedType::kDynamic && newShapeDim < -1)
1014 return emitOpError() <<
"new shape has invalid tensor dimension size "
1018 if (inputType.hasStaticShape() && outputType.hasStaticShape()) {
1019 int64_t inputElementsNum = inputType.getNumElements();
1020 int64_t outputElementsNum = outputType.getNumElements();
1021 if (inputElementsNum != outputElementsNum) {
1022 return emitOpError() <<
"cannot reshape " << inputElementsNum
1023 <<
" elements into " << outputElementsNum;
1027 int missingDims = llvm::count(getNewShape(), -1);
1028 if (missingDims > 1)
1029 return emitOpError() <<
"expected at most one target dimension to be -1";
1031 return mlir::success();
1041 for (
auto v : permsAttr.getValues<APInt>())
1042 perms.push_back(v.getSExtValue());
1047 LogicalResult tosa::TransposeOp::inferReturnTypeComponents(
1048 MLIRContext *context, ::std::optional<Location> location,
1049 TransposeOp::Adaptor adaptor,
1051 ShapeAdaptor inputShape(adaptor.getInput1().getType());
1055 if (permsShape.hasRank() && permsShape.getRank() == 0)
1060 if (!inputShape.hasRank() || !permsShape.hasRank() ||
1061 permsShape.isDynamicDim(0)) {
1068 if (permsShape.getDimSize(0) != inputShape.getRank()) {
1074 if (inputShape.getRank() == 0) {
1080 bool allTheSame =
true;
1081 for (
int i = 1, s = inputShape.getRank(); i < s; i++) {
1082 if (inputShape.getDimSize(0) != inputShape.getDimSize(i)) {
1091 outputShape.resize(inputShape.getRank(), inputShape.getDimSize(0));
1096 outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
1101 attr.getType().getRank() == 1) {
1104 if (inputShape.getRank() != permShape.
getRank())
1106 "constant permutation must be the same length"
1107 " as the input rank");
1110 for (
int i = 0, e = inputShape.getRank(); i < e; i++) {
1111 if (inputShape.getRank() <= permShape.
getDimSize(i))
1115 outputShape.reserve(inputShape.getRank());
1116 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
1117 outputShape[i] = inputShape.getDimSize(permShape.
getDimSize(i));
1126 TensorType inputType = getInput1().getType();
1128 TensorType outputType = getOutput().getType();
1130 if (permType.
hasRank() && permType.getRank() != 1)
1131 return emitOpError()
1132 <<
"expected permutation tensor to be rank 1 but got rank "
1133 << permType.getRank();
1135 if (!permType.isDynamicDim(0) &&
1136 permType.getDimSize(0) != inputType.getRank())
1137 return emitOpError() <<
"expected permutation tensor dim 0 to have size "
1138 << inputType.getRank()
1139 <<
" (input rank) but got size "
1140 << permType.getDimSize(0);
1142 inputType.getRank() != outputType.getRank())
1143 return emitOpError()
1144 <<
"expected input tensor rank to equal result tensor rank";
1146 if (!permType.isDynamicDim(0) &&
1147 permType.getDimSize(0) != outputType.getRank())
1148 return emitOpError() <<
"expected permutation tensor dim 0 to have size "
1149 << outputType.getRank()
1150 <<
" (output rank) but got size "
1151 << permType.getDimSize(0);
1154 if (succeeded(getConstantPerms(constantPerms))) {
1158 "Unexpectedly found permutation tensor without rank");
1159 if (!llvm::all_of(constantPerms,
1160 [&constantPerms](int32_t s) {
1162 static_cast<size_t>(s) < constantPerms.size();
1165 constantPerms, [](int32_t v) -> int64_t {
return v; }))))
1166 return emitOpError() <<
"expected valid permutation tensor";
1171 assert(constantPerms.size() ==
static_cast<size_t>(inputType.getRank()) &&
1172 inputType.getRank() == outputType.getRank());
1174 for (
auto i = 0; i < outputType.getRank(); i++) {
1175 if (inputType.isDynamicDim(constantPerms[i]) ||
1176 outputType.isDynamicDim(i))
1179 if (inputType.getDimSize(constantPerms[i]) != outputType.getDimSize(i))
1180 return emitOpError()
1181 <<
"expected output tensor dim " << i <<
" to match "
1182 <<
"input dim " << constantPerms[i] <<
" with value of "
1183 << inputType.getDimSize(constantPerms[i]);
1194 if (getConstantPerms(transposePerms).failed())
1197 Value input = getInput1();
1198 auto inputType = cast<TensorType>(input.
getType());
1201 for (
auto dim : transposePerms) {
1202 int32_t dimInInput = transposePerms[dim];
1203 if (inputType.isDynamicDim(dimInInput))
1205 builder.
create<tensor::DimOp>(getLoc(), input, dimInInput)
1209 builder.
getIndexAttr(inputType.getDimSize(dimInInput));
1212 reifiedReturnShapes.emplace_back(std::move(returnedDims));
1216 LogicalResult tosa::GatherOp::inferReturnTypeComponents(
1217 MLIRContext *context, ::std::optional<Location> location,
1218 GatherOp::Adaptor adaptor,
1221 outputShape.resize(3, ShapedType::kDynamic);
1223 ShapeAdaptor valuesShape(adaptor.getValues().getType());
1224 if (valuesShape.hasRank()) {
1225 outputShape[0] = valuesShape.getDimSize(0);
1226 outputShape[2] = valuesShape.getDimSize(2);
1229 ShapeAdaptor indicesShape(adaptor.getIndices().getType());
1230 if (indicesShape.hasRank()) {
1231 if (outputShape[0] == ShapedType::kDynamic)
1232 outputShape[0] = indicesShape.getDimSize(0);
1233 if (outputShape[1] == ShapedType::kDynamic)
1234 outputShape[1] = indicesShape.getDimSize(1);
1241 LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
1242 MLIRContext *context, ::std::optional<Location> location,
1243 ResizeOp::Adaptor adaptor,
1246 outputShape.resize(4, ShapedType::kDynamic);
1249 if (!inputShape.hasRank())
1252 outputShape[0] = inputShape.getDimSize(0);
1253 outputShape[3] = inputShape.getDimSize(3);
1254 int64_t inputHeight = inputShape.getDimSize(1);
1255 int64_t inputWidth = inputShape.getDimSize(2);
1257 if ((inputHeight == ShapedType::kDynamic) ||
1258 (inputWidth == ShapedType::kDynamic))
1267 (((inputHeight - 1) * scaleInt[0] - offsetInt[0] + borderInt[0]) /
1272 (((inputWidth - 1) * scaleInt[2] - offsetInt[1] + borderInt[1]) /
1280 LogicalResult tosa::ScatterOp::inferReturnTypeComponents(
1281 MLIRContext *context, ::std::optional<Location> location,
1282 ScatterOp::Adaptor adaptor,
1285 outputShape.resize(3, ShapedType::kDynamic);
1287 ShapeAdaptor valuesInShape(adaptor.getValuesIn().getType());
1288 if (valuesInShape.hasRank()) {
1289 outputShape[0] = valuesInShape.getDimSize(0);
1290 outputShape[1] = valuesInShape.getDimSize(1);
1291 outputShape[2] = valuesInShape.getDimSize(2);
1294 ShapeAdaptor indicesShape(adaptor.getIndices().getType());
1295 if (indicesShape.hasRank()) {
1296 if (outputShape[0] == ShapedType::kDynamic)
1297 outputShape[0] = indicesShape.getDimSize(0);
1301 if (inputShape.hasRank()) {
1302 if (outputShape[0] == ShapedType::kDynamic)
1303 outputShape[0] = inputShape.getDimSize(0);
1304 if (outputShape[2] == ShapedType::kDynamic)
1305 outputShape[2] = inputShape.getDimSize(2);
1315 int64_t axisVal = axis.getValue().getSExtValue();
1316 if (!operandShape.
hasRank() || operandShape.
getRank() <= axisVal) {
1322 operandShape.
getDims(outputShape);
1323 outputShape[axisVal] = 1;
1328 #define COMPATIBLE_RETURN_TYPES(OP) \
1329 bool OP::isCompatibleReturnTypes(TypeRange l, TypeRange r) { \
1330 if (l.size() != r.size() || l.size() != 1) \
1332 if (getElementTypeOrSelf(l[0]) != getElementTypeOrSelf(r[0])) \
1334 return succeeded(verifyCompatibleShape(l[0], r[0])); \
1337 #define REDUCE_SHAPE_INFER(OP) \
1338 LogicalResult OP::inferReturnTypeComponents( \
1339 MLIRContext *context, ::std::optional<Location> location, \
1340 OP::Adaptor adaptor, \
1341 SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) { \
1343 llvm::cast<TensorType>(adaptor.getInput().getType()).getElementType(); \
1344 ShapeAdaptor inputShape(adaptor.getInput().getType()); \
1345 const Properties &prop = adaptor.getProperties(); \
1346 return ReduceInferReturnTypes(inputShape, inputType, prop.axis, \
1347 inferredReturnShapes); \
1349 COMPATIBLE_RETURN_TYPES(OP)
1357 #undef REDUCE_SHAPE_INFER
1359 #undef COMPATIBLE_RETURN_TYPES
1361 template <
typename T>
1364 TensorType inputType = op.getInput().getType();
1365 TensorType outputType = op.getOutput().getType();
1366 int32_t reduceAxis = op.getAxis();
1368 if (reduceAxis < 0) {
1369 op.
emitOpError(
"reduce axis must not be negative");
1373 int64_t inputRank = inputType.getRank();
1376 if (reduceAxis >= inputRank && !(reduceAxis == 0 && inputRank == 0)) {
1378 << inputRank <<
") to be larger than reduce axis (" << reduceAxis
1384 int64_t outputRank = outputType.getRank();
1385 if (inputType.
hasRank() && outputRank != inputType.getRank()) {
1387 "expect output tensor rank to be equal to input tensor rank");
1390 if (reduceAxis >= outputRank && !(reduceAxis == 0 && outputRank == 0)) {
1392 << outputRank <<
") to be larger than reduce axis (" << reduceAxis
1398 if (outputRank != 0) {
1399 auto outputShape = outputType.
getShape();
1400 if (!outputType.isDynamicDim(reduceAxis) &&
1401 outputShape[reduceAxis] != 1) {
1402 op.
emitOpError(
"expect reduced dimension size to be 1, got ")
1403 << outputShape[reduceAxis];
1430 #define NARY_SHAPE_INFER(OP) \
1431 LogicalResult OP::inferReturnTypeComponents( \
1432 MLIRContext *context, ::std::optional<Location> location, \
1433 ValueShapeRange operands, DictionaryAttr attributes, \
1434 OpaqueProperties properties, RegionRange regions, \
1435 SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) { \
1436 return NAryInferReturnTypes(operands, inferredReturnShapes); \
1479 #undef PRED_SHAPE_INFER
1486 outputShape.resize(4, ShapedType::kDynamic);
1501 if (!ShapedType::isDynamic(height)) {
1502 int64_t padded = height + pad[0] + pad[1] - kernel[0];
1503 outputShape[1] = padded / stride[0] + 1;
1506 if (!ShapedType::isDynamic(width)) {
1507 int64_t padded = width + pad[2] + pad[3] - kernel[1];
1508 outputShape[2] = padded / stride[1] + 1;
1515 LogicalResult Conv2DOp::inferReturnTypeComponents(
1516 MLIRContext *context, ::std::optional<Location> location,
1517 Conv2DOp::Adaptor adaptor,
1521 int64_t inputWidth = ShapedType::kDynamic;
1522 int64_t inputHeight = ShapedType::kDynamic;
1523 int64_t weightWidth = ShapedType::kDynamic;
1524 int64_t weightHeight = ShapedType::kDynamic;
1529 if (inputShape.hasRank()) {
1530 outputShape[0] = inputShape.getDimSize(0);
1531 inputHeight = inputShape.getDimSize(1);
1532 inputWidth = inputShape.getDimSize(2);
1536 ShapeAdaptor weightShape(adaptor.getWeight().getType());
1537 if (weightShape.hasRank()) {
1538 outputShape[3] = weightShape.getDimSize(0);
1539 weightHeight = weightShape.getDimSize(1);
1540 weightWidth = weightShape.getDimSize(2);
1545 if (biasShape.hasRank()) {
1546 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1547 ? biasShape.getDimSize(0)
1555 if (!ShapedType::isDynamic(inputHeight) &&
1556 !ShapedType::isDynamic(weightHeight)) {
1557 int64_t inputSize = inputHeight + padding[0] + padding[1];
1558 int64_t filterSize = (weightHeight - 1) * dilation[0] + 1;
1559 int64_t unstridedResult = inputSize - filterSize + 1;
1560 outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
1563 if (!ShapedType::isDynamic(inputWidth) &&
1564 !ShapedType::isDynamic(weightWidth)) {
1565 int64_t inputSize = inputWidth + padding[2] + padding[3];
1566 int64_t filterSize = (weightWidth - 1) * dilation[1] + 1;
1567 int64_t unstridedResult = inputSize - filterSize + 1;
1568 outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
1577 LogicalResult Conv3DOp::inferReturnTypeComponents(
1578 MLIRContext *context, ::std::optional<Location> location,
1579 Conv3DOp::Adaptor adaptor,
1583 int64_t inputWidth = ShapedType::kDynamic;
1584 int64_t inputHeight = ShapedType::kDynamic;
1585 int64_t inputDepth = ShapedType::kDynamic;
1587 int64_t weightWidth = ShapedType::kDynamic;
1588 int64_t weightHeight = ShapedType::kDynamic;
1589 int64_t weightDepth = ShapedType::kDynamic;
1593 if (inputShape.hasRank()) {
1594 outputShape[0] = inputShape.getDimSize(0);
1595 inputDepth = inputShape.getDimSize(1);
1596 inputHeight = inputShape.getDimSize(2);
1597 inputWidth = inputShape.getDimSize(3);
1601 ShapeAdaptor weightShape(adaptor.getWeight().getType());
1602 if (weightShape.hasRank()) {
1603 outputShape[4] = weightShape.getDimSize(0);
1604 weightDepth = weightShape.getDimSize(1);
1605 weightHeight = weightShape.getDimSize(2);
1606 weightWidth = weightShape.getDimSize(3);
1611 if (biasShape.hasRank() && ShapedType::isDynamic(outputShape[4])) {
1612 outputShape[4] = biasShape.getDimSize(0);
1619 if (!ShapedType::isDynamic(inputDepth) &&
1620 !ShapedType::isDynamic(weightDepth)) {
1621 int32_t inputSize = inputDepth + pad[0] + pad[1];
1622 int32_t filterSize = (weightDepth - 1) * dilation[0] + 1;
1623 int32_t unstridedResult = inputSize - filterSize + 1;
1624 outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
1627 if (!ShapedType::isDynamic(inputHeight) &&
1628 !ShapedType::isDynamic(weightHeight)) {
1629 int32_t inputSize = inputHeight + pad[2] + pad[3];
1630 int32_t filterSize = (weightHeight - 1) * dilation[1] + 1;
1631 int32_t unstridedResult = inputSize - filterSize + 1;
1632 outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
1635 if (!ShapedType::isDynamic(inputWidth) &&
1636 !ShapedType::isDynamic(weightWidth)) {
1637 int32_t inputSize = inputWidth + pad[4] + pad[5];
1638 int32_t filterSize = (weightWidth - 1) * dilation[2] + 1;
1639 int32_t unstridedResult = inputSize - filterSize + 1;
1640 outputShape[3] = (unstridedResult - 1) / stride[2] + 1;
1649 LogicalResult AvgPool2dOp::inferReturnTypeComponents(
1650 MLIRContext *context, ::std::optional<Location> location,
1651 AvgPool2dOp::Adaptor adaptor,
1654 const Properties &prop = adaptor.getProperties();
1656 inferredReturnShapes);
1659 LogicalResult MaxPool2dOp::inferReturnTypeComponents(
1660 MLIRContext *context, ::std::optional<Location> location,
1661 MaxPool2dOp::Adaptor adaptor,
1664 const Properties &prop = adaptor.getProperties();
1666 inferredReturnShapes);
1669 LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents(
1670 MLIRContext *context, ::std::optional<Location> location,
1671 DepthwiseConv2DOp::Adaptor adaptor,
1675 int64_t inputWidth = ShapedType::kDynamic;
1676 int64_t inputHeight = ShapedType::kDynamic;
1677 int64_t inputChannels = ShapedType::kDynamic;
1679 int64_t weightWidth = ShapedType::kDynamic;
1680 int64_t weightHeight = ShapedType::kDynamic;
1681 int64_t depthChannels = ShapedType::kDynamic;
1685 if (inputShape.hasRank()) {
1686 outputShape[0] = inputShape.getDimSize(0);
1687 inputHeight = inputShape.getDimSize(1);
1688 inputWidth = inputShape.getDimSize(2);
1689 inputChannels = inputShape.getDimSize(3);
1693 ShapeAdaptor weightShape(adaptor.getWeight().getType());
1694 if (weightShape.hasRank()) {
1695 weightHeight = weightShape.getDimSize(0);
1696 weightWidth = weightShape.getDimSize(1);
1697 inputChannels = ShapedType::isDynamic(inputChannels)
1698 ? weightShape.getDimSize(2)
1700 depthChannels = weightShape.getDimSize(3);
1705 if (!ShapedType::isDynamic(inputChannels) &&
1706 !ShapedType::isDynamic(depthChannels)) {
1707 outputShape[3] = inputChannels * depthChannels;
1712 if (biasShape.hasRank()) {
1713 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1714 ? biasShape.getDimSize(0)
1722 if (!ShapedType::isDynamic(inputHeight) &&
1723 !ShapedType::isDynamic(weightHeight)) {
1724 int64_t inputSize = inputHeight + padding[0] + padding[1];
1725 int64_t filterSize = (weightHeight - 1) * dilation[0] + 1;
1726 int64_t unstridedResult = inputSize - filterSize + 1;
1727 outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
1730 if (!ShapedType::isDynamic(inputWidth) &&
1731 !ShapedType::isDynamic(weightWidth)) {
1732 int64_t inputSize = inputWidth + padding[2] + padding[3];
1733 int64_t filterSize = (weightWidth - 1) * dilation[1] + 1;
1734 int64_t unstridedResult = inputSize - filterSize + 1;
1735 outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
1744 LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
1745 MLIRContext *context, ::std::optional<Location> location,
1746 TransposeConv2DOp::Adaptor adaptor,
1752 int64_t inputWidth = ShapedType::kDynamic;
1753 int64_t inputHeight = ShapedType::kDynamic;
1754 int64_t weightWidth = ShapedType::kDynamic;
1755 int64_t weightHeight = ShapedType::kDynamic;
1759 if (inputShape.hasRank()) {
1760 outputShape[0] = ShapedType::isDynamic(outputShape[0])
1761 ? inputShape.getDimSize(0)
1763 inputHeight = inputShape.getDimSize(1);
1764 inputWidth = inputShape.getDimSize(2);
1768 ShapeAdaptor weightShape(adaptor.getFilter().getType());
1769 if (weightShape.hasRank()) {
1770 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1771 ? weightShape.getDimSize(0)
1773 weightHeight = weightShape.getDimSize(1);
1774 weightWidth = weightShape.getDimSize(2);
1779 if (biasShape.hasRank()) {
1780 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1781 ? biasShape.getDimSize(0)
1788 if (!ShapedType::isDynamic(inputHeight) &&
1789 !ShapedType::isDynamic(weightHeight)) {
1790 int64_t calculateSize =
1791 (inputHeight - 1) * stride[0] + padding[0] + padding[1] + weightHeight;
1793 ShapedType::isDynamic(outputShape[1]) ? calculateSize : outputShape[1];
1796 if (!ShapedType::isDynamic(inputWidth) &&
1797 !ShapedType::isDynamic(weightWidth)) {
1798 int64_t calculateSize =
1799 (inputWidth - 1) * stride[1] + padding[2] + padding[3] + weightWidth;
1801 ShapedType::isDynamic(outputShape[2]) ? calculateSize : outputShape[2];
1808 LogicalResult IfOp::inferReturnTypeComponents(
1809 MLIRContext *context, ::std::optional<Location> location,
1810 IfOp::Adaptor adaptor,
1813 for (
Region *region : adaptor.getRegions()) {
1814 for (
auto &block : *region)
1815 if (
auto returnOp = dyn_cast<tosa::YieldOp>(block.getTerminator()))
1816 yieldOps.push_back(returnOp);
1819 if (yieldOps.empty())
1824 resultKnowledge.reserve(yieldOps.front().getNumOperands());
1825 for (
auto operand : yieldOps.front().getOperands()) {
1826 resultKnowledge.push_back(
1830 for (
auto yieldOp : yieldOps) {
1831 if (resultKnowledge.size() != yieldOp.getNumOperands())
1835 int32_t index = it.index();
1837 resultKnowledge[index],
1841 resultKnowledge[index] = meet;
1846 inferredReturnShapes.push_back(result.getShapedTypeComponents());
1852 LogicalResult WhileOp::inferReturnTypeComponents(
1853 MLIRContext *context, ::std::optional<Location> location,
1854 WhileOp::Adaptor adaptor,
1857 for (
auto &block : adaptor.getBody())
1858 if (
auto returnOp = dyn_cast<tosa::YieldOp>(block.getTerminator()))
1859 yieldOps.push_back(returnOp);
1863 if (yieldOps.empty())
1868 resultKnowledge.reserve(yieldOps.front().getNumOperands());
1869 for (
auto operand : yieldOps.front().getOperands()) {
1870 resultKnowledge.push_back(
1874 for (
auto yieldOp : yieldOps) {
1875 if (resultKnowledge.size() != yieldOp.getNumOperands())
1879 int32_t index = it.index();
1881 resultKnowledge[index],
1883 resultKnowledge[index] = meet;
1889 inferredReturnShapes.push_back(result.getShapedTypeComponents());
1895 std::optional<SmallVector<int64_t, 4>> ApplyScaleOp::getShapeForUnroll() {
1896 if (
auto vt = llvm::dyn_cast<VectorType>(
getType()))
1897 return llvm::to_vector<4>(vt.getShape());
1898 return std::nullopt;
1935 bool printBlockTerminators =
false;
1937 p <<
" " << getCond();
1938 if (!getResults().empty()) {
1939 p <<
" -> (" << getResultTypes() <<
")";
1941 printBlockTerminators =
true;
1946 printBlockTerminators);
1949 auto &elseRegion = getElseBranch();
1950 if (!elseRegion.
empty()) {
1954 printBlockTerminators);
1962 TensorType outputType = getOutput().getType();
1963 int32_t reverseAxis = getAxis();
1965 if (reverseAxis < 0)
1966 return emitOpError(
"expected non-negative reverse axis");
1968 int64_t inputRank = inputType.getRank();
1971 if (reverseAxis >= inputRank && !(reverseAxis == 0 && inputRank == 0))
1972 return emitOpError(
"expect input tensor rank (")
1973 << inputRank <<
") to be larger than reverse axis (" << reverseAxis
1977 int64_t outputRank = outputType.getRank();
1978 if (inputType.
hasRank() && outputRank != inputType.getRank())
1980 "expect output tensor rank to be equal to input tensor rank");
1981 if (reverseAxis >= outputRank && !(reverseAxis == 0 && outputRank == 0))
1982 return emitOpError(
"expect output tensor rank (")
1983 << outputRank <<
") to be larger than reverse axis ("
1984 << reverseAxis <<
")";
2001 FunctionType functionType;
2006 result.
addTypes(functionType.getResults());
2008 if (functionType.getNumInputs() != operands.size()) {
2010 <<
"expected as many input types as operands "
2011 <<
"(expected " << operands.size() <<
" got "
2012 << functionType.getNumInputs() <<
")";
2022 for (
size_t i = 0, e = regionArgs.size(); i != e; ++i)
2023 regionArgs[i].type = functionType.getInput(i);
2025 return failure(parser.
parseRegion(*cond, regionArgs) ||
2033 StringRef prefix =
"") {
2034 assert(blocksArgs.size() == initializers.size() &&
2035 "expected same length of arguments and initializers");
2036 if (initializers.empty())
2039 parser << prefix <<
'(';
2040 llvm::interleaveComma(
2041 llvm::zip(blocksArgs, initializers), parser,
2042 [&](
auto it) { parser << std::get<0>(it) <<
" = " << std::get<1>(it); });
2062 #define GET_ATTRDEF_CLASSES
2063 #include "mlir/Dialect/Tosa/IR/TosaAttributes.cpp.inc"
2069 #define GET_OP_CLASSES
2070 #include "mlir/Dialect/Tosa/IR/TosaOps.cpp.inc"
static Operation * materializeConstant(Dialect *dialect, OpBuilder &builder, Attribute value, Type type, Location loc)
A utility function used to materialize a constant for a given attribute and type.
static MLIRContext * getContext(OpFoldResult val)
static bool isLegalToInline(InlinerInterface &interface, Region *src, Region *insertRegion, bool shouldCloneInlinedRegion, IRMapping &valueMapping)
Utility to check that all of the operations within 'src' can be inlined.
static void print(spirv::VerCapExtAttr triple, DialectAsmPrinter &printer)
static void buildMatMulOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value a, Value b)
The tosa.matmul op is also intended to be generated where a fully_connected op must be constructed wh...
static LogicalResult ReduceInferReturnTypes(ShapeAdaptor operandShape, Type inputType, IntegerAttr axis, SmallVectorImpl< ShapedTypeComponents > &inferredReturnShapes)
#define REDUCE_SHAPE_INFER(OP)
static LogicalResult verifyConvOp(T op)
static void buildUnaryOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input)
This builder is called on single-parameter unary operators that have scale relationship between their...
static LogicalResult poolingInferReturnTypes(ShapeAdaptor inputShape, ArrayRef< int64_t > kernel, ArrayRef< int64_t > stride, ArrayRef< int64_t > pad, SmallVectorImpl< ShapedTypeComponents > &inferredReturnShapes)
static void buildPadOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value paddings)
This builder is called on TOSA pad operator that needs to create its own OptionalAttr quantization_at...
static void buildFCOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, Value bias)
The tosa.fully_connected op has its own builder as it does not have strides/dilation/padding.
static LogicalResult verifyReduceOp(T op)
#define NARY_SHAPE_INFER(OP)
static void buildExplicitValuePadOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value paddings, Value padConst)
This builder is called on TOSA pad operator when an explicit pad_const value is passed in.
static LogicalResult NAryInferReturnTypes(const ValueShapeRange &operands, SmallVectorImpl< ShapedTypeComponents > &inferredReturnShapes)
#define COMPATIBLE_RETURN_TYPES(OP)
static LogicalResult resolveBroadcastShape(const ValueShapeRange &operands, SmallVector< int64_t > &outShape)
static void buildTransConvOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, Value bias, DenseI64ArrayAttr outpad, DenseI64ArrayAttr stride, DenseI64ArrayAttr outputShape)
Handles tosa.transpose_conv2d which has outpad and output shape attributes.
static void buildAvgPool2dOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, DenseArrayAttr kernel, DenseArrayAttr stride, DenseArrayAttr pad, TypeAttr accType)
Both the tosa.avg_pool2d and unary ops use the same UnaruOpQuantizationAttr but avg_pool operator has...
static SmallVector< int64_t > convertToMlirShape(ArrayRef< int64_t > shape)
static void buildConvOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, Value bias, DenseI64ArrayAttr pad, DenseI64ArrayAttr stride, DenseI64ArrayAttr dilation)
This builder is called on all convolution operators except TransposeConv, which has specialized outpu...
static void printInitializationList(OpAsmPrinter &parser, Block::BlockArgListType blocksArgs, ValueRange initializers, StringRef prefix="")
virtual Builder & getBuilder() const =0
Return a builder which provides useful access to MLIRContext, global objects like types and attribute...
virtual ParseResult parseOptionalAttrDict(NamedAttrList &result)=0
Parse a named dictionary into 'result' if it is present.
virtual ParseResult parseOptionalEqual()=0
Parse a = token if present.
virtual ParseResult parseOptionalKeyword(StringRef keyword)=0
Parse the given keyword if present.
virtual InFlightDiagnostic emitError(SMLoc loc, const Twine &message={})=0
Emit a diagnostic at the specified location and return failure.
virtual ParseResult parseOptionalAttrDictWithKeyword(NamedAttrList &result)=0
Parse a named dictionary into 'result' if the attributes keyword is present.
virtual ParseResult parseColonType(Type &result)=0
Parse a colon followed by a type.
virtual SMLoc getCurrentLocation()=0
Get the location of the next token and store it into the argument.
virtual ParseResult parseOptionalArrowTypeList(SmallVectorImpl< Type > &result)=0
Parse an optional arrow followed by a type list.
ParseResult parseKeyword(StringRef keyword)
Parse a given keyword.
virtual ParseResult parseAttribute(Attribute &result, Type type={})=0
Parse an arbitrary attribute of a given type and return it in result.
virtual void printAttribute(Attribute attr)
Attributes are known-constant values of operations.
IntegerAttr getIndexAttr(int64_t value)
IntegerType getIntegerType(unsigned width)
An attribute that represents a reference to a dense integer vector or tensor object.
This class defines a virtual interface for reading a bytecode stream, providing hooks into the byteco...
virtual InFlightDiagnostic emitError(const Twine &msg={}) const =0
Emit an error to the reader.
This class defines a virtual interface for writing to a bytecode stream, providing hooks into the byt...
This is the interface that must be implemented by the dialects of operations to be inlined.
DialectInlinerInterface(Dialect *dialect)
This class is used to represent the version of a dialect, for the purpose of polymorphic destruction.
Dialects are groups of MLIR operations, types and attributes, as well as behavior associated with the...
This is a utility class for mapping one set of IR entities to another.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
The OpAsmParser has methods for interacting with the asm parser: parsing things from it,...
virtual OptionalParseResult parseOptionalAssignmentList(SmallVectorImpl< Argument > &lhs, SmallVectorImpl< UnresolvedOperand > &rhs)=0
virtual ParseResult parseRegion(Region ®ion, ArrayRef< Argument > arguments={}, bool enableNameShadowing=false)=0
Parses a region.
virtual ParseResult resolveOperand(const UnresolvedOperand &operand, Type type, SmallVectorImpl< Value > &result)=0
Resolve an operand to an SSA value, emitting an error on failure.
ParseResult resolveOperands(Operands &&operands, Type type, SmallVectorImpl< Value > &result)
Resolve a list of operands to SSA values, emitting an error on failure, or appending the results to t...
virtual ParseResult parseOperand(UnresolvedOperand &result, bool allowResultNumber=true)=0
Parse a single SSA value operand name along with a result number if allowResultNumber is true.
This is a pure-virtual base class that exposes the asmprinter hooks necessary to implement a custom p...
virtual void printOptionalAttrDictWithKeyword(ArrayRef< NamedAttribute > attrs, ArrayRef< StringRef > elidedAttrs={})=0
If the specified operation has attributes, print out an attribute dictionary prefixed with 'attribute...
virtual void printOptionalAttrDict(ArrayRef< NamedAttribute > attrs, ArrayRef< StringRef > elidedAttrs={})=0
If the specified operation has attributes, print out an attribute dictionary with their values.
void printFunctionalType(Operation *op)
Print the complete type of an operation in functional form.
virtual void printRegion(Region &blocks, bool printEntryBlockArgs=true, bool printBlockTerminators=true, bool printEmptyBlock=false)=0
Prints a region.
This class helps build Operations.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Simple wrapper around a void* in order to express generically how to pass in op properties through AP...
Operation is the basic unit of execution within MLIR.
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers.
This class implements Optional functionality for ParseResult.
ParseResult value() const
Access the internal ParseResult value.
bool has_value() const
Returns true if we contain a valid ParseResult value.
This class provides an abstraction over the different types of ranges over Regions.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Adaptor class to abstract the differences between whether value is from a ShapedType or ShapedTypeCom...
int64_t getDimSize(int index) const
Returns the size of the index'th dimension.
int64_t getRank() const
Returns the rank of the shape.
void getDims(SmallVectorImpl< int64_t > &res) const
Populates the dimensions from shape referenced.
bool hasRank() const
Returns whether the shape has a rank.
ShapedTypeComponents that represents the components of a ShapedType.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
ArrayRef< int64_t > getShape() const
Returns the shape of this tensor type.
bool hasRank() const
Returns if this type is ranked, i.e. it has a known number of dimensions.
This class provides an abstraction over the various different ranges of value types.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
bool isInteger() const
Return true if this is an integer type (with the specified width).
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
This class provides an abstraction over the different types of ranges over Values.
Range of values and shapes (corresponding effectively to Shapes dialect's ValueShape type concept).
ShapeAdaptor getShape(int index) const
Returns the shape of index'th operand.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
Base class for DenseArrayAttr that is instantiated and specialized for each supported element type be...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
QueryRef parse(llvm::StringRef line, const QuerySession &qs)
ConvOpQuantizationAttr buildConvOpQuantizationAttr(OpBuilder &builder, Value input, Value weight)
Method to build ConvOpQuantizationAttr, called from ConvOpQuantInfoBuilder/TransConvOpQuantInfoBuilde...
Type buildConvOpResultTypeInfo(OpBuilder &builder, Type outputType, Value input, Value weight)
construct ConvOp output type with correct bitwidth based on input/weight width.
PadOpQuantizationAttr buildPadOpQuantizationAttr(OpBuilder &builder, Value input)
Builds PadOpQuantizationAttr, called from PadOpQuantInfoBuilder: inputZp: input zeropoint.
ParseResult parseTypeOrAttr(OpAsmParser &parser, TypeAttr &typeAttr, Attribute &attr)
MatMulOpQuantizationAttr buildMatMulOpQuantizationAttr(OpBuilder &builder, Value a, Value b)
Builds MatMulOpQuantizationAttr, called from MatMulOpQuantInfoBuilder: aZp: input a zeropoint bZp: in...
void printTypeOrAttr(OpAsmPrinter &p, Operation *op, TypeAttr type, Attribute attr)
UnaryOpQuantizationAttr buildUnaryOpQuantizationAttr(OpBuilder &builder, Value input, Type outputRawType)
Builds UnaryOpQuantizationAttr UnaryOpQuantInfoBuilder: inputZp: input zeropoint outputZp: output zer...
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
LogicalResult emitOptionalError(std::optional< Location > loc, Args &&...args)
Overloads of the above emission functions that take an optionally null location.
Type getElementTypeOrSelf(Type type)
Return the element type or return the type itself.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
LogicalResult verifyCompatibleShape(ArrayRef< int64_t > shape1, ArrayRef< int64_t > shape2)
Returns success if the given two shapes are compatible.
detail::constant_op_matcher m_Constant()
Matches a constant foldable operation.
LogicalResult verify(Operation *op, bool verifyRecursively=true)
Perform (potentially expensive) checks of invariants, used to detect compiler bugs,...
bool isPermutationVector(ArrayRef< int64_t > interchange)
Method to check if an interchange vector is a permutation.
This is the representation of an operand reference.
This represents an operation in an abstracted form, suitable for use with the builder APIs.
SmallVector< Value, 4 > operands
void addOperands(ValueRange newOperands)
void addAttribute(StringRef name, Attribute attr)
Add an attribute with the specified name.
void addTypes(ArrayRef< Type > newTypes)
SmallVector< std::unique_ptr< Region >, 1 > regions
Regions that the op will hold.
SmallVector< Type, 4 > types
Types of the results of this operation.
Region * addRegion()
Create a region that should be attached to the operation.
Statically known information for a particular Value.
static ValueKnowledge meet(const ValueKnowledge &lhs, const ValueKnowledge &rhs)
static ValueKnowledge getKnowledgeFromType(Type type)