29 #include "llvm/ADT/APFloat.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/TypeSwitch.h"
36 #include "mlir/Dialect/Tosa/IR/TosaOpsDialect.cpp.inc"
42 #include "mlir/Dialect/Tosa/IR/TosaInterfaces.cpp.inc"
45 #include "mlir/Dialect/Tosa/IR/TosaDialectBytecode.cpp.inc"
66 return (isa<tosa::IfOp>(dest->getParentOp()) ||
67 isa<tosa::WhileOp>(dest->getParentOp()));
73 TosaDialectBytecodeInterface(
Dialect *dialect)
83 LogicalResult writeAttribute(
Attribute attr,
85 return ::writeAttribute(attr, writer);
95 LogicalResult writeType(
Type type,
97 return ::writeType(type, writer);
104 std::unique_ptr<DialectVersion>
107 reader.
emitError(
"Dialect does not support versioning");
111 LogicalResult upgradeFromVersion(
Operation *topLevelOp,
130 void TosaDialect::initialize() {
133 #include "mlir/Dialect/Tosa/IR/TosaOps.cpp.inc"
136 #define GET_ATTRDEF_LIST
137 #include "mlir/Dialect/Tosa/IR/TosaAttributes.cpp.inc"
139 addInterfaces<TosaDialectBytecodeInterface, TosaInlinerInterface>();
140 declarePromisedInterfaces<
141 mesh::ShardingInterface, ClampOp, SigmoidOp, TanhOp, AddOp,
142 ArithmeticRightShiftOp, BitwiseAndOp, BitwiseOrOp, BitwiseXorOp, IntDivOp,
143 LogicalAndOp, LogicalLeftShiftOp, LogicalRightShiftOp, LogicalOrOp,
144 LogicalXorOp, MaximumOp, MinimumOp, MulOp, PowOp, SubOp, AbsOp,
145 BitwiseNotOp, CeilOp, ClzOp, ExpOp, FloorOp, LogOp, LogicalNotOp,
146 NegateOp, ReciprocalOp, RsqrtOp, SelectOp, EqualOp, GreaterOp,
147 GreaterEqualOp, MatMulOp>();
154 if (llvm::isa<ElementsAttr>(value))
155 return builder.
create<tosa::ConstOp>(loc, type,
156 llvm::cast<ElementsAttr>(value));
169 <<
"expected attribute";
171 if (
auto typedAttr = dyn_cast<TypedAttr>(attr)) {
188 bool needsSpace =
false;
189 auto typedAttr = dyn_cast_or_null<TypedAttr>(attr);
190 if (!typedAttr || typedAttr.getType() != type.getValue()) {
208 if (!shapedType.hasRank())
211 auto rank = shapedType.getRank();
213 for (
int i = 0; i < rank; i++) {
214 if (shapedType.isDynamicDim(i))
216 if (shapedType.getDimSize(i) == 0)
223 template <
typename T>
226 auto inputType = llvm::dyn_cast<RankedTensorType>(op.getInput().getType());
227 auto weightType = llvm::dyn_cast<RankedTensorType>(op.getWeight().getType());
231 op.
emitOpError(
"expect a ranked tensor for input, got ") << op.getInput();
235 op.
emitOpError(
"expect a ranked tensor for weight, got ") << op.getWeight();
240 return op.
emitOpError() <<
"tensor has a dimension with size zero. Each "
241 "dimension of a tensor must have size >= 1";
243 auto inputEType = inputType.getElementType();
244 auto weightEType = weightType.getElementType();
246 bool inputIsQuant = !llvm::isa<FloatType>(inputEType);
247 bool weightIsQuant = !llvm::isa<FloatType>(weightEType);
250 if (inputIsQuant != weightIsQuant) {
252 "expect both input and weight to be float or not together, got ")
253 << inputEType <<
" and " << weightEType;
259 if ((inputIsQuant && !op.getQuantizationInfo()) ||
260 (!inputIsQuant && op.getQuantizationInfo())) {
261 op.
emitOpError(
"quantizationattr is required for quantized type, and not "
262 "allowed for float type");
271 const auto resultETy = llvm::cast<ShapedType>(
getType()).getElementType();
272 if (!resultETy.isIntOrIndex())
273 return emitOpError(
"result tensor is not of integer type");
276 const auto inputType = llvm::cast<ShapedType>(getInput().
getType());
277 const int64_t axis = getAxisAttr().getInt();
278 if (inputType.hasRank() && ((axis < 0) || axis >= inputType.getRank()))
279 return emitOpError(
"specified axis is outside the rank of the tensor");
285 auto inputType = llvm::cast<ShapedType>(getInput().
getType());
287 return emitOpError() <<
"tensor has a dimension with size zero. Each "
288 "dimension of a tensor must have size >= 1";
290 auto inputETy = inputType.getElementType();
291 auto resultETy = llvm::cast<ShapedType>(
getType()).getElementType();
294 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(inputETy))
295 inputETy = quantType.getStorageType();
298 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(resultETy))
299 resultETy = quantType.getStorageType();
301 auto accType = getAccType();
302 if (llvm::isa<IntegerType>(inputETy) && !accType.isInteger(32))
303 return emitOpError(
"accumulator type for integer tensor is not i32");
305 if (inputETy.isF16() && !(accType.isF16() || accType.isF32()))
306 return emitOpError(
"accumulator type for f16 tensor is not f16/f32");
308 if (inputETy.isBF16() && !accType.isF32())
309 return emitOpError(
"accumulator type for bf16 tensor is not f32");
311 if (inputETy.isF32() && !accType.isF32())
312 return emitOpError(
"accumulator type for f32 tensor is not f32");
314 if ((inputETy.isF32() && resultETy.isF32()) ||
315 (inputETy.isF16() && resultETy.isF16()) ||
316 (inputETy.isBF16() && resultETy.isBF16()) ||
317 (inputETy.isInteger(8) && resultETy.isInteger(8)) ||
318 (inputETy.isInteger(16) && resultETy.isInteger(16)))
321 return emitOpError(
"input/output element types are incompatible.");
326 llvm::cast<ShapedType>(getInput().
getType()).getElementType();
328 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(inputETy)) {
329 inputETy = quantType.getStorageType();
331 mlir::Type maxFpType = getMaxFpAttr().getType();
332 mlir::Type minFpType = getMinFpAttr().getType();
334 llvm::cast<ShapedType>(getOutput().
getType()).getElementType();
336 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(outputETy)) {
337 outputETy = quantType.getStorageType();
341 if (inputETy != outputETy)
342 return emitOpError(
"input/output element types are incompatible.");
347 if (!inputETy.
isInteger(dataTypeBitWidth)) {
348 if (((maxFpType != minFpType) ||
351 return emitOpError(
"min/max attributes types are incompatible with "
352 "input/output element types.");
436 auto inputType = llvm::dyn_cast<ShapedType>(a.
getType());
437 assert(inputType &&
"Input must be a shaped tensor type!");
439 auto inputQType = llvm::dyn_cast<mlir::quant::UniformQuantizedType>(
440 inputType.getElementType());
441 assert(inputQType &&
"Tensor must have quantized datatype!");
443 unsigned inputBits = inputQType.getStorageTypeIntegralWidth();
445 auto outputShapedType = llvm::dyn_cast<ShapedType>(outputType);
446 assert(outputShapedType &&
"Output must be a shaped type");
448 IntegerType accElementType;
453 auto accType = outputShapedType.clone(accElementType);
466 DenseArrayAttr kernel, DenseArrayAttr stride,
467 DenseArrayAttr pad, TypeAttr accType) {
476 result.
types.push_back(outputType);
489 result.
types.push_back(outputType);
502 result.
types.push_back(outputType);
516 result.
types.push_back(outputType);
526 for (
int i = 0, e = operands.size(); i != e; ++i) {
528 if (!shape.hasRank()) {
533 outRank = std::max<int64_t>(outRank, shape.getRank());
536 outShape.resize(outRank, 1);
538 for (
int i = 0, e = operands.size(); i != e; ++i) {
540 auto rankDiff = outShape.size() - shape.getRank();
542 for (
size_t i = 0, e = shape.getRank(); i < e; ++i) {
543 auto dim1 = outShape[i + rankDiff];
544 auto dim2 = shape.getDimSize(i);
545 auto resolvedDim = dim1;
549 }
else if (dim2 == 1) {
551 }
else if (dim1 != dim2) {
554 outShape[i + rankDiff] = resolvedDim;
561 LogicalResult tosa::ArgMaxOp::inferReturnTypeComponents(
562 MLIRContext *context, ::std::optional<Location> location,
563 ArgMaxOp::Adaptor adaptor,
566 IntegerAttr axis = adaptor.getProperties().axis;
567 int32_t axisVal = axis.getValue().getSExtValue();
569 if (!inputShape.hasRank()) {
575 outShape.reserve(inputShape.getRank() - 1);
576 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
579 outShape.push_back(inputShape.getDimSize(i));
586 LogicalResult tosa::RFFT2dOp::inferReturnTypeComponents(
587 MLIRContext *context, ::std::optional<Location> location,
588 RFFT2dOp::Adaptor adaptor,
592 if (!inputShape.hasRank())
596 outputShape.resize(3, ShapedType::kDynamic);
597 outputShape[0] = inputShape.getDimSize(0);
598 outputShape[1] = inputShape.getDimSize(1);
599 int64_t inWidth = inputShape.getDimSize(2);
603 if (inWidth != ShapedType::kDynamic)
604 outputShape[2] = inWidth / 2 + 1;
612 LogicalResult tosa::FFT2dOp::inferReturnTypeComponents(
613 MLIRContext *context, ::std::optional<Location> location,
614 FFT2dOp::Adaptor adaptor,
616 inferredReturnShapes.push_back(
618 inferredReturnShapes.push_back(
623 LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
624 MLIRContext *context, ::std::optional<Location> location,
625 ConcatOp::Adaptor adaptor,
628 const Properties &prop = adaptor.getProperties();
629 int32_t axis = prop.axis.getValue().getSExtValue();
631 bool hasRankedInput =
false;
632 for (
auto operand : adaptor.getOperands()) {
634 if (!operandShape.hasRank())
639 outputShape.resize(operandShape.getRank(), ShapedType::kDynamic);
642 for (
int i = 0, s = operandShape.getRank(); i < s; i++) {
643 if (i == axis || operandShape.isDynamicDim(i))
645 if (outputShape[i] == ShapedType::kDynamic)
646 outputShape[i] = operandShape.getDimSize(i);
647 if (outputShape[i] != operandShape.getDimSize(i))
649 "Cannot concat tensors with different sizes"
650 " on the non-axis dimension ",
654 hasRankedInput =
true;
657 llvm::cast<TensorType>(adaptor.getInput1().getType()[0]).getElementType();
658 if (!hasRankedInput) {
664 int64_t concatDimSize = 0;
665 for (
auto operand : adaptor.getOperands()) {
670 if (!operandShape.hasRank() || operandShape.isDynamicDim(axis)) {
671 concatDimSize = ShapedType::kDynamic;
675 concatDimSize += operandShape.getDimSize(axis);
678 outputShape[axis] = concatDimSize;
684 LogicalResult tosa::EqualOp::inferReturnTypeComponents(
685 MLIRContext *context, ::std::optional<Location> location,
702 if (l.size() != r.size() || l.size() != 1)
707 LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
708 MLIRContext *context, ::std::optional<Location> location,
709 FullyConnectedOp::Adaptor adaptor,
712 ShapeAdaptor weightShape(adaptor.getWeight().getType());
717 outShape.resize(2, ShapedType::kDynamic);
719 if (inputShape.hasRank()) {
720 outShape[0] = inputShape.getDimSize(0);
723 if (weightShape.hasRank()) {
724 outShape[1] = weightShape.getDimSize(0);
727 if (biasShape.hasRank()) {
728 outShape[1] = outShape[1] == ShapedType::kDynamic ? biasShape.getDimSize(0)
738 LogicalResult tosa::MatMulOp::inferReturnTypeComponents(
739 MLIRContext *context, ::std::optional<Location> location,
740 MatMulOp::Adaptor adaptor,
747 outShape.resize(3, ShapedType::kDynamic);
749 if (lhsShape.hasRank()) {
750 outShape[0] = lhsShape.getDimSize(0);
751 outShape[1] = lhsShape.getDimSize(1);
754 if (rhsShape.hasRank()) {
755 outShape[0] = outShape[0] == ShapedType::kDynamic ? rhsShape.getDimSize(0)
757 outShape[2] = rhsShape.getDimSize(2);
764 LogicalResult tosa::PadOp::inferReturnTypeComponents(
765 MLIRContext *context, ::std::optional<Location> location,
766 PadOp::Adaptor adaptor,
769 ShapeAdaptor paddingShape(adaptor.getPadding().getType());
774 if (!inputShape.hasRank() && !paddingShape.hasRank()) {
781 if (!inputShape.hasRank()) {
782 if (paddingShape.isDynamicDim(0)) {
787 outputShape.resize(paddingShape.getDimSize(0), ShapedType::kDynamic);
795 outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
801 for (
auto val : paddings) {
802 paddingValues.push_back(val.getSExtValue());
805 outputShape.reserve(inputShape.getRank());
806 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
807 if (inputShape.isDynamicDim(i)) {
808 outputShape.push_back(ShapedType::kDynamic);
812 outputShape.push_back(inputShape.getDimSize(i) + paddingValues[i * 2] +
813 paddingValues[i * 2 + 1]);
821 return to_vector(llvm::map_range(shape, [](int64_t dim) {
822 return dim == -1 ? ShapedType::kDynamic : dim;
826 LogicalResult tosa::SliceOp::inferReturnTypeComponents(
827 MLIRContext *context, ::std::optional<Location> location,
828 SliceOp::Adaptor adaptor,
830 inferredReturnShapes.push_back(
836 auto inputType = llvm::dyn_cast<RankedTensorType>(getInput().
getType());
840 if (
static_cast<size_t>(inputType.getRank()) != getStart().size())
842 "length of start attribute is not equal rank of input shape");
844 if (
static_cast<size_t>(inputType.getRank()) != getSize().size())
846 "length of size attribute is not equal rank of input shape");
851 LogicalResult tosa::TableOp::inferReturnTypeComponents(
852 MLIRContext *context, ::std::optional<Location> location,
853 TableOp::Adaptor adaptor,
857 if (!inputShape.hasRank()) {
862 inferredReturnShapes.resize(1);
863 inputShape.getDims(inferredReturnShapes[0]);
867 LogicalResult tosa::TileOp::inferReturnTypeComponents(
868 MLIRContext *context, ::std::optional<Location> location,
869 TileOp::Adaptor adaptor,
874 if (!inputShape.hasRank()) {
875 outputShape.resize(multiples.size(), ShapedType::kDynamic);
878 }
else if (
static_cast<size_t>(inputShape.getRank()) != multiples.size())
882 outputShape.reserve(multiples.size());
883 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
884 int64_t dim = inputShape.getDimSize(i);
885 if (dim != ShapedType::kDynamic)
887 outputShape.push_back(dim);
895 ShapedType inputType = llvm::cast<ShapedType>(getInput1().
getType());
896 ShapedType outputType = llvm::cast<ShapedType>(
getType());
897 auto multiples = getMultiples();
899 if (inputType.hasRank()) {
900 if (
static_cast<size_t>(inputType.getRank()) != multiples.size())
901 return emitOpError(
"expect 'multiples' array to have length ")
902 << inputType.getRank() <<
" but got " << multiples.size() <<
".";
903 if (outputType.hasRank() && inputType.getRank() != outputType.getRank())
904 return emitOpError(
"expect same input and output tensor rank.");
905 }
else if (outputType.hasRank() &&
906 static_cast<size_t>(outputType.getRank()) != multiples.size())
907 return emitOpError(
"expect 'multiples' array to have length ")
908 << outputType.getRank() <<
" but got " << multiples.size() <<
".";
914 if (l.size() != r.size() || l.size() != 1)
919 LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
920 MLIRContext *context, ::std::optional<Location> location,
921 ReshapeOp::Adaptor adaptor,
930 if (!inputShape.hasRank() || !inputShape.hasStaticShape()) {
931 inferredReturnShapes.push_back(
939 int64_t numElements = inputShape.getNumElements();
940 int64_t staticMul = 1;
941 for (
auto val : newShapeValue) {
942 if (!ShapedType::isDynamic(val)) {
948 for (
auto &val : newShapeValue) {
949 if (ShapedType::isDynamic(val))
950 val = numElements / staticMul;
953 inferredReturnShapes.push_back(
960 RankedTensorType outputType =
getType();
963 return emitOpError() <<
"tensor has a dimension with size zero. Each "
964 "dimension of a tensor must have size >= 1";
966 if ((int64_t)getNewShape().size() != outputType.getRank())
967 return emitOpError() <<
"new shape does not match result rank";
969 for (
auto [newShapeDim, outputShapeDim] :
970 zip(getNewShape(), outputType.getShape()))
971 if (newShapeDim != -1 && outputShapeDim != ShapedType::kDynamic &&
972 newShapeDim != outputShapeDim)
973 return emitOpError() <<
"new shape is inconsistent with result shape";
975 if (inputType.hasStaticShape() && outputType.hasStaticShape()) {
976 int64_t inputElementsNum = inputType.getNumElements();
977 int64_t outputElementsNum = outputType.getNumElements();
978 if (inputElementsNum != outputElementsNum) {
979 return emitOpError() <<
"cannot reshape " << inputElementsNum
980 <<
" elements into " << outputElementsNum;
984 int missingDims = llvm::count(getNewShape(), -1);
986 return emitOpError() <<
"expected at most one target dimension to be -1";
988 return mlir::success();
998 perms = llvm::to_vector(
999 llvm::map_range(permsAttr.getValues<APInt>(),
1000 [](
const APInt &val) { return val.getSExtValue(); }));
1005 LogicalResult tosa::TransposeOp::inferReturnTypeComponents(
1006 MLIRContext *context, ::std::optional<Location> location,
1007 TransposeOp::Adaptor adaptor,
1009 ShapeAdaptor inputShape(adaptor.getInput1().getType());
1013 if (permsShape.hasRank() && permsShape.getRank() == 0)
1018 if (!inputShape.hasRank() || !permsShape.hasRank() ||
1019 permsShape.isDynamicDim(0)) {
1026 if (permsShape.getDimSize(0) != inputShape.getRank()) {
1032 if (inputShape.getRank() == 0) {
1038 bool allTheSame =
true;
1039 for (
int i = 1, s = inputShape.getRank(); i < s; i++) {
1040 if (inputShape.getDimSize(0) != inputShape.getDimSize(i)) {
1049 outputShape.resize(inputShape.getRank(), inputShape.getDimSize(0));
1054 outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
1059 attr.getType().getRank() == 1) {
1062 if (inputShape.getRank() != permShape.
getRank())
1064 "constant permutation must be the same length"
1065 " as the input rank");
1068 for (
int i = 0, e = inputShape.getRank(); i < e; i++) {
1069 if (inputShape.getRank() <= permShape.
getDimSize(i))
1073 outputShape.reserve(inputShape.getRank());
1074 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
1075 outputShape[i] = inputShape.getDimSize(permShape.
getDimSize(i));
1084 TensorType inputType = getInput1().getType();
1086 TensorType outputType = getOutput().getType();
1088 if (permType.
hasRank() && permType.getRank() != 1)
1089 return emitOpError()
1090 <<
"expected permutation tensor to be rank 1 but got rank "
1091 << permType.getRank();
1093 if (!permType.isDynamicDim(0) &&
1094 permType.getDimSize(0) != inputType.getRank())
1095 return emitOpError() <<
"expected permutation tensor dim 0 to have size "
1096 << inputType.getRank()
1097 <<
" (input rank) but got size "
1098 << permType.getDimSize(0);
1100 inputType.getRank() != outputType.getRank())
1101 return emitOpError()
1102 <<
"expected input tensor rank to equal result tensor rank";
1104 if (!permType.isDynamicDim(0) &&
1105 permType.getDimSize(0) != outputType.getRank())
1106 return emitOpError() <<
"expected permutation tensor dim 0 to have size "
1107 << outputType.getRank()
1108 <<
" (output rank) but got size "
1109 << permType.getDimSize(0);
1112 if (succeeded(getConstantPerms(constantPerms))) {
1116 "Unexpectedly found permutation tensor without rank");
1118 return emitOpError() <<
"expected valid permutation tensor";
1127 if (getConstantPerms(transposePerms).failed())
1130 Value input = getInput1();
1131 auto inputType = cast<TensorType>(input.
getType());
1134 for (
auto dim : transposePerms) {
1135 int64_t dimInInput = transposePerms[dim];
1136 if (inputType.isDynamicDim(dimInInput))
1138 builder.
create<tensor::DimOp>(getLoc(), input, dimInInput)
1142 builder.
getIndexAttr(inputType.getDimSize(dimInInput));
1145 reifiedReturnShapes.emplace_back(std::move(returnedDims));
1149 LogicalResult tosa::GatherOp::inferReturnTypeComponents(
1150 MLIRContext *context, ::std::optional<Location> location,
1151 GatherOp::Adaptor adaptor,
1154 outputShape.resize(3, ShapedType::kDynamic);
1156 ShapeAdaptor valuesShape(adaptor.getValues().getType());
1157 if (valuesShape.hasRank()) {
1158 outputShape[0] = valuesShape.getDimSize(0);
1159 outputShape[2] = valuesShape.getDimSize(2);
1162 ShapeAdaptor indicesShape(adaptor.getIndices().getType());
1163 if (indicesShape.hasRank()) {
1164 if (outputShape[0] == ShapedType::kDynamic)
1165 outputShape[0] = indicesShape.getDimSize(0);
1166 if (outputShape[1] == ShapedType::kDynamic)
1167 outputShape[1] = indicesShape.getDimSize(1);
1174 LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
1175 MLIRContext *context, ::std::optional<Location> location,
1176 ResizeOp::Adaptor adaptor,
1179 outputShape.resize(4, ShapedType::kDynamic);
1182 if (!inputShape.hasRank())
1185 outputShape[0] = inputShape.getDimSize(0);
1186 outputShape[3] = inputShape.getDimSize(3);
1187 int64_t inputHeight = inputShape.getDimSize(1);
1188 int64_t inputWidth = inputShape.getDimSize(2);
1190 if ((inputHeight == ShapedType::kDynamic) ||
1191 (inputWidth == ShapedType::kDynamic))
1200 (((inputHeight - 1) * scaleInt[0] - offsetInt[0] + borderInt[0]) /
1205 (((inputWidth - 1) * scaleInt[2] - offsetInt[1] + borderInt[1]) /
1213 LogicalResult tosa::ScatterOp::inferReturnTypeComponents(
1214 MLIRContext *context, ::std::optional<Location> location,
1215 ScatterOp::Adaptor adaptor,
1218 outputShape.resize(3, ShapedType::kDynamic);
1220 ShapeAdaptor valuesInShape(adaptor.getValuesIn().getType());
1221 if (valuesInShape.hasRank()) {
1222 outputShape[0] = valuesInShape.getDimSize(0);
1223 outputShape[1] = valuesInShape.getDimSize(1);
1224 outputShape[2] = valuesInShape.getDimSize(2);
1227 ShapeAdaptor indicesShape(adaptor.getIndices().getType());
1228 if (indicesShape.hasRank()) {
1229 if (outputShape[0] == ShapedType::kDynamic)
1230 outputShape[0] = indicesShape.getDimSize(0);
1234 if (inputShape.hasRank()) {
1235 if (outputShape[0] == ShapedType::kDynamic)
1236 outputShape[0] = inputShape.getDimSize(0);
1237 if (outputShape[2] == ShapedType::kDynamic)
1238 outputShape[2] = inputShape.getDimSize(2);
1248 int64_t axisVal = axis.getValue().getSExtValue();
1249 if (!operandShape.
hasRank() || operandShape.
getRank() <= axisVal) {
1255 operandShape.
getDims(outputShape);
1256 outputShape[axisVal] = 1;
1261 #define COMPATIBLE_RETURN_TYPES(OP) \
1262 bool OP::isCompatibleReturnTypes(TypeRange l, TypeRange r) { \
1263 if (l.size() != r.size() || l.size() != 1) \
1265 if (getElementTypeOrSelf(l[0]) != getElementTypeOrSelf(r[0])) \
1267 return succeeded(verifyCompatibleShape(l[0], r[0])); \
1270 #define REDUCE_SHAPE_INFER(OP) \
1271 LogicalResult OP::inferReturnTypeComponents( \
1272 MLIRContext *context, ::std::optional<Location> location, \
1273 OP::Adaptor adaptor, \
1274 SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) { \
1276 llvm::cast<TensorType>(adaptor.getInput().getType()).getElementType(); \
1277 ShapeAdaptor inputShape(adaptor.getInput().getType()); \
1278 const Properties &prop = adaptor.getProperties(); \
1279 return ReduceInferReturnTypes(inputShape, inputType, prop.axis, \
1280 inferredReturnShapes); \
1282 COMPATIBLE_RETURN_TYPES(OP)
1290 #undef REDUCE_SHAPE_INFER
1292 #undef COMPATIBLE_RETURN_TYPES
1294 template <
typename T>
1297 TensorType inputType = op.getInput().getType();
1298 TensorType outputType = op.getOutput().getType();
1299 int32_t reduceAxis = op.getAxis();
1301 if (reduceAxis < 0) {
1302 op.
emitOpError(
"reduce axis must not be negative");
1306 int64_t inputRank = inputType.getRank();
1309 if (reduceAxis >= inputRank && !(reduceAxis == 0 && inputRank == 0)) {
1311 << inputRank <<
") to be larger than reduce axis (" << reduceAxis
1317 int64_t outputRank = outputType.getRank();
1318 if (inputType.
hasRank() && outputRank != inputType.getRank()) {
1320 "expect output tensor rank to be equal to input tensor rank");
1323 if (reduceAxis >= outputRank && !(reduceAxis == 0 && outputRank == 0)) {
1325 << outputRank <<
") to be larger than reduce axis (" << reduceAxis
1331 if (outputRank != 0) {
1332 auto outputShape = outputType.
getShape();
1333 if (!outputType.isDynamicDim(reduceAxis) &&
1334 outputShape[reduceAxis] != 1) {
1335 op.
emitOpError(
"expect reduced dimension size to be 1, got ")
1336 << outputShape[reduceAxis];
1363 #define NARY_SHAPE_INFER(OP) \
1364 LogicalResult OP::inferReturnTypeComponents( \
1365 MLIRContext *context, ::std::optional<Location> location, \
1366 ValueShapeRange operands, DictionaryAttr attributes, \
1367 OpaqueProperties properties, RegionRange regions, \
1368 SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) { \
1369 return NAryInferReturnTypes(operands, inferredReturnShapes); \
1412 #undef PRED_SHAPE_INFER
1419 outputShape.resize(4, ShapedType::kDynamic);
1434 if (!ShapedType::isDynamic(height)) {
1435 int64_t padded = height + pad[0] + pad[1] - kernel[0];
1436 outputShape[1] = padded / stride[0] + 1;
1439 if (!ShapedType::isDynamic(width)) {
1440 int64_t padded = width + pad[2] + pad[3] - kernel[1];
1441 outputShape[2] = padded / stride[1] + 1;
1448 LogicalResult Conv2DOp::inferReturnTypeComponents(
1449 MLIRContext *context, ::std::optional<Location> location,
1450 Conv2DOp::Adaptor adaptor,
1454 int64_t inputWidth = ShapedType::kDynamic;
1455 int64_t inputHeight = ShapedType::kDynamic;
1456 int64_t weightWidth = ShapedType::kDynamic;
1457 int64_t weightHeight = ShapedType::kDynamic;
1462 if (inputShape.hasRank()) {
1463 outputShape[0] = inputShape.getDimSize(0);
1464 inputHeight = inputShape.getDimSize(1);
1465 inputWidth = inputShape.getDimSize(2);
1469 ShapeAdaptor weightShape(adaptor.getWeight().getType());
1470 if (weightShape.hasRank()) {
1471 outputShape[3] = weightShape.getDimSize(0);
1472 weightHeight = weightShape.getDimSize(1);
1473 weightWidth = weightShape.getDimSize(2);
1478 if (biasShape.hasRank()) {
1479 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1480 ? biasShape.getDimSize(0)
1488 if (!ShapedType::isDynamic(inputHeight) &&
1489 !ShapedType::isDynamic(weightHeight)) {
1490 int64_t inputSize = inputHeight + padding[0] + padding[1];
1491 int64_t filterSize = (weightHeight - 1) * dilation[0] + 1;
1492 int64_t unstridedResult = inputSize - filterSize + 1;
1493 outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
1496 if (!ShapedType::isDynamic(inputWidth) &&
1497 !ShapedType::isDynamic(weightWidth)) {
1498 int64_t inputSize = inputWidth + padding[2] + padding[3];
1499 int64_t filterSize = (weightWidth - 1) * dilation[1] + 1;
1500 int64_t unstridedResult = inputSize - filterSize + 1;
1501 outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
1510 LogicalResult Conv3DOp::inferReturnTypeComponents(
1511 MLIRContext *context, ::std::optional<Location> location,
1512 Conv3DOp::Adaptor adaptor,
1516 int64_t inputWidth = ShapedType::kDynamic;
1517 int64_t inputHeight = ShapedType::kDynamic;
1518 int64_t inputDepth = ShapedType::kDynamic;
1520 int64_t weightWidth = ShapedType::kDynamic;
1521 int64_t weightHeight = ShapedType::kDynamic;
1522 int64_t weightDepth = ShapedType::kDynamic;
1526 if (inputShape.hasRank()) {
1527 outputShape[0] = inputShape.getDimSize(0);
1528 inputDepth = inputShape.getDimSize(1);
1529 inputHeight = inputShape.getDimSize(2);
1530 inputWidth = inputShape.getDimSize(3);
1534 ShapeAdaptor weightShape(adaptor.getWeight().getType());
1535 if (weightShape.hasRank()) {
1536 outputShape[4] = weightShape.getDimSize(0);
1537 weightDepth = weightShape.getDimSize(1);
1538 weightHeight = weightShape.getDimSize(2);
1539 weightWidth = weightShape.getDimSize(3);
1544 if (biasShape.hasRank() && ShapedType::isDynamic(outputShape[4])) {
1545 outputShape[4] = biasShape.getDimSize(0);
1552 if (!ShapedType::isDynamic(inputDepth) &&
1553 !ShapedType::isDynamic(weightDepth)) {
1554 int32_t inputSize = inputDepth + pad[0] + pad[1];
1555 int32_t filterSize = (weightDepth - 1) * dilation[0] + 1;
1556 int32_t unstridedResult = inputSize - filterSize + 1;
1557 outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
1560 if (!ShapedType::isDynamic(inputHeight) &&
1561 !ShapedType::isDynamic(weightHeight)) {
1562 int32_t inputSize = inputHeight + pad[2] + pad[3];
1563 int32_t filterSize = (weightHeight - 1) * dilation[1] + 1;
1564 int32_t unstridedResult = inputSize - filterSize + 1;
1565 outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
1568 if (!ShapedType::isDynamic(inputWidth) &&
1569 !ShapedType::isDynamic(weightWidth)) {
1570 int32_t inputSize = inputWidth + pad[4] + pad[5];
1571 int32_t filterSize = (weightWidth - 1) * dilation[2] + 1;
1572 int32_t unstridedResult = inputSize - filterSize + 1;
1573 outputShape[3] = (unstridedResult - 1) / stride[2] + 1;
1582 LogicalResult AvgPool2dOp::inferReturnTypeComponents(
1583 MLIRContext *context, ::std::optional<Location> location,
1584 AvgPool2dOp::Adaptor adaptor,
1587 const Properties &prop = adaptor.getProperties();
1589 inferredReturnShapes);
1592 LogicalResult MaxPool2dOp::inferReturnTypeComponents(
1593 MLIRContext *context, ::std::optional<Location> location,
1594 MaxPool2dOp::Adaptor adaptor,
1597 const Properties &prop = adaptor.getProperties();
1599 inferredReturnShapes);
1602 LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents(
1603 MLIRContext *context, ::std::optional<Location> location,
1604 DepthwiseConv2DOp::Adaptor adaptor,
1608 int64_t inputWidth = ShapedType::kDynamic;
1609 int64_t inputHeight = ShapedType::kDynamic;
1610 int64_t inputChannels = ShapedType::kDynamic;
1612 int64_t weightWidth = ShapedType::kDynamic;
1613 int64_t weightHeight = ShapedType::kDynamic;
1614 int64_t depthChannels = ShapedType::kDynamic;
1618 if (inputShape.hasRank()) {
1619 outputShape[0] = inputShape.getDimSize(0);
1620 inputHeight = inputShape.getDimSize(1);
1621 inputWidth = inputShape.getDimSize(2);
1622 inputChannels = inputShape.getDimSize(3);
1626 ShapeAdaptor weightShape(adaptor.getWeight().getType());
1627 if (weightShape.hasRank()) {
1628 weightHeight = weightShape.getDimSize(0);
1629 weightWidth = weightShape.getDimSize(1);
1630 inputChannels = ShapedType::isDynamic(inputChannels)
1631 ? weightShape.getDimSize(2)
1633 depthChannels = weightShape.getDimSize(3);
1638 if (!ShapedType::isDynamic(inputChannels) &&
1639 !ShapedType::isDynamic(depthChannels)) {
1640 outputShape[3] = inputChannels * depthChannels;
1645 if (biasShape.hasRank()) {
1646 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1647 ? biasShape.getDimSize(0)
1655 if (!ShapedType::isDynamic(inputHeight) &&
1656 !ShapedType::isDynamic(weightHeight)) {
1657 int64_t inputSize = inputHeight + padding[0] + padding[1];
1658 int64_t filterSize = (weightHeight - 1) * dilation[0] + 1;
1659 int64_t unstridedResult = inputSize - filterSize + 1;
1660 outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
1663 if (!ShapedType::isDynamic(inputWidth) &&
1664 !ShapedType::isDynamic(weightWidth)) {
1665 int64_t inputSize = inputWidth + padding[2] + padding[3];
1666 int64_t filterSize = (weightWidth - 1) * dilation[1] + 1;
1667 int64_t unstridedResult = inputSize - filterSize + 1;
1668 outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
1677 LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
1678 MLIRContext *context, ::std::optional<Location> location,
1679 TransposeConv2DOp::Adaptor adaptor,
1685 int64_t inputWidth = ShapedType::kDynamic;
1686 int64_t inputHeight = ShapedType::kDynamic;
1687 int64_t weightWidth = ShapedType::kDynamic;
1688 int64_t weightHeight = ShapedType::kDynamic;
1692 if (inputShape.hasRank()) {
1693 outputShape[0] = ShapedType::isDynamic(outputShape[0])
1694 ? inputShape.getDimSize(0)
1696 inputHeight = inputShape.getDimSize(1);
1697 inputWidth = inputShape.getDimSize(2);
1701 ShapeAdaptor weightShape(adaptor.getFilter().getType());
1702 if (weightShape.hasRank()) {
1703 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1704 ? weightShape.getDimSize(0)
1706 weightHeight = weightShape.getDimSize(1);
1707 weightWidth = weightShape.getDimSize(2);
1712 if (biasShape.hasRank()) {
1713 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1714 ? biasShape.getDimSize(0)
1721 if (!ShapedType::isDynamic(inputHeight) &&
1722 !ShapedType::isDynamic(weightHeight)) {
1723 int64_t calculateSize =
1724 (inputHeight - 1) * stride[0] + padding[0] + padding[1] + weightHeight;
1726 ShapedType::isDynamic(outputShape[1]) ? calculateSize : outputShape[1];
1729 if (!ShapedType::isDynamic(inputWidth) &&
1730 !ShapedType::isDynamic(weightWidth)) {
1731 int64_t calculateSize =
1732 (inputWidth - 1) * stride[1] + padding[2] + padding[3] + weightWidth;
1734 ShapedType::isDynamic(outputShape[2]) ? calculateSize : outputShape[2];
1741 LogicalResult IfOp::inferReturnTypeComponents(
1742 MLIRContext *context, ::std::optional<Location> location,
1743 IfOp::Adaptor adaptor,
1746 for (
Region *region : adaptor.getRegions()) {
1747 for (
auto &block : *region)
1748 if (
auto returnOp = dyn_cast<tosa::YieldOp>(block.getTerminator()))
1749 yieldOps.push_back(returnOp);
1752 if (yieldOps.empty())
1757 resultKnowledge.reserve(yieldOps.front().getNumOperands());
1758 for (
auto operand : yieldOps.front().getOperands()) {
1759 resultKnowledge.push_back(
1763 for (
auto yieldOp : yieldOps) {
1764 if (resultKnowledge.size() != yieldOp.getNumOperands())
1768 int32_t index = it.index();
1770 resultKnowledge[index],
1774 resultKnowledge[index] = meet;
1779 inferredReturnShapes.push_back(result.getShapedTypeComponents());
1785 LogicalResult WhileOp::inferReturnTypeComponents(
1786 MLIRContext *context, ::std::optional<Location> location,
1787 WhileOp::Adaptor adaptor,
1790 for (
auto &block : adaptor.getBody())
1791 if (
auto returnOp = dyn_cast<tosa::YieldOp>(block.getTerminator()))
1792 yieldOps.push_back(returnOp);
1796 if (yieldOps.empty())
1801 resultKnowledge.reserve(yieldOps.front().getNumOperands());
1802 for (
auto operand : yieldOps.front().getOperands()) {
1803 resultKnowledge.push_back(
1807 for (
auto yieldOp : yieldOps) {
1808 if (resultKnowledge.size() != yieldOp.getNumOperands())
1812 int32_t index = it.index();
1814 resultKnowledge[index],
1816 resultKnowledge[index] = meet;
1822 inferredReturnShapes.push_back(result.getShapedTypeComponents());
1828 std::optional<SmallVector<int64_t, 4>> ApplyScaleOp::getShapeForUnroll() {
1829 if (
auto vt = llvm::dyn_cast<VectorType>(
getType()))
1830 return llvm::to_vector<4>(vt.getShape());
1831 return std::nullopt;
1868 bool printBlockTerminators =
false;
1870 p <<
" " << getCond();
1871 if (!getResults().empty()) {
1872 p <<
" -> (" << getResultTypes() <<
")";
1874 printBlockTerminators =
true;
1879 printBlockTerminators);
1882 auto &elseRegion = getElseBranch();
1883 if (!elseRegion.
empty()) {
1887 printBlockTerminators);
1895 TensorType outputType = getOutput().getType();
1896 int32_t reverseAxis = getAxis();
1898 if (reverseAxis < 0)
1899 return emitOpError(
"expected non-negative reverse axis");
1901 int64_t inputRank = inputType.getRank();
1904 if (reverseAxis >= inputRank && !(reverseAxis == 0 && inputRank == 0))
1905 return emitOpError(
"expect input tensor rank (")
1906 << inputRank <<
") to be larger than reverse axis (" << reverseAxis
1910 int64_t outputRank = outputType.getRank();
1911 if (inputType.
hasRank() && outputRank != inputType.getRank())
1913 "expect output tensor rank to be equal to input tensor rank");
1914 if (reverseAxis >= outputRank && !(reverseAxis == 0 && outputRank == 0))
1915 return emitOpError(
"expect output tensor rank (")
1916 << outputRank <<
") to be larger than reverse axis ("
1917 << reverseAxis <<
")";
1934 FunctionType functionType;
1939 result.
addTypes(functionType.getResults());
1941 if (functionType.getNumInputs() != operands.size()) {
1943 <<
"expected as many input types as operands "
1944 <<
"(expected " << operands.size() <<
" got "
1945 << functionType.getNumInputs() <<
")";
1955 for (
size_t i = 0, e = regionArgs.size(); i != e; ++i)
1956 regionArgs[i].type = functionType.getInput(i);
1958 return failure(parser.
parseRegion(*cond, regionArgs) ||
1966 StringRef prefix =
"") {
1967 assert(blocksArgs.size() == initializers.size() &&
1968 "expected same length of arguments and initializers");
1969 if (initializers.empty())
1972 parser << prefix <<
'(';
1973 llvm::interleaveComma(
1974 llvm::zip(blocksArgs, initializers), parser,
1975 [&](
auto it) { parser << std::get<0>(it) <<
" = " << std::get<1>(it); });
1995 #define GET_ATTRDEF_CLASSES
1996 #include "mlir/Dialect/Tosa/IR/TosaAttributes.cpp.inc"
2002 #define GET_OP_CLASSES
2003 #include "mlir/Dialect/Tosa/IR/TosaOps.cpp.inc"
static Operation * materializeConstant(Dialect *dialect, OpBuilder &builder, Attribute value, Type type, Location loc)
A utility function used to materialize a constant for a given attribute and type.
static MLIRContext * getContext(OpFoldResult val)
static bool isLegalToInline(InlinerInterface &interface, Region *src, Region *insertRegion, bool shouldCloneInlinedRegion, IRMapping &valueMapping)
Utility to check that all of the operations within 'src' can be inlined.
static void print(spirv::VerCapExtAttr triple, DialectAsmPrinter &printer)
static void buildMatMulOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value a, Value b)
The tosa.matmul op is also intended to be generated where a fully_connected op must be constructed wh...
static LogicalResult ReduceInferReturnTypes(ShapeAdaptor operandShape, Type inputType, IntegerAttr axis, SmallVectorImpl< ShapedTypeComponents > &inferredReturnShapes)
#define REDUCE_SHAPE_INFER(OP)
static bool hasZeroDimension(ShapedType shapedType)
static LogicalResult verifyConvOp(T op)
static void buildUnaryOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input)
This builder is called on single-parameter unary operators that have scale relationship between their...
static LogicalResult poolingInferReturnTypes(ShapeAdaptor inputShape, ArrayRef< int64_t > kernel, ArrayRef< int64_t > stride, ArrayRef< int64_t > pad, SmallVectorImpl< ShapedTypeComponents > &inferredReturnShapes)
static void buildPadOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value paddings)
This builder is called on TOSA pad operator that needs to create its own OptionalAttr quantization_at...
static void buildFCOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, Value bias)
The tosa.fully_connected op has its own builder as it does not have strides/dilation/padding.
static LogicalResult verifyReduceOp(T op)
#define NARY_SHAPE_INFER(OP)
static void buildExplicitValuePadOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value paddings, Value padConst)
This builder is called on TOSA pad operator when an explicit pad_const value is passed in.
static LogicalResult NAryInferReturnTypes(const ValueShapeRange &operands, SmallVectorImpl< ShapedTypeComponents > &inferredReturnShapes)
#define COMPATIBLE_RETURN_TYPES(OP)
static LogicalResult resolveBroadcastShape(const ValueShapeRange &operands, SmallVector< int64_t > &outShape)
static void buildTransConvOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, Value bias, DenseI64ArrayAttr outpad, DenseI64ArrayAttr stride, DenseI64ArrayAttr outputShape)
Handles tosa.transpose_conv2d which has outpad and output shape attributes.
static void buildAvgPool2dOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, DenseArrayAttr kernel, DenseArrayAttr stride, DenseArrayAttr pad, TypeAttr accType)
Both the tosa.avg_pool2d and unary ops use the same UnaruOpQuantizationAttr but avg_pool operator has...
static SmallVector< int64_t > convertToMlirShape(ArrayRef< int64_t > shape)
static void buildConvOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, Value bias, DenseI64ArrayAttr pad, DenseI64ArrayAttr stride, DenseI64ArrayAttr dilation)
This builder is called on all convolution operators except TransposeConv, which has specialized outpu...
static void printInitializationList(OpAsmPrinter &parser, Block::BlockArgListType blocksArgs, ValueRange initializers, StringRef prefix="")
virtual Builder & getBuilder() const =0
Return a builder which provides useful access to MLIRContext, global objects like types and attribute...
virtual ParseResult parseOptionalAttrDict(NamedAttrList &result)=0
Parse a named dictionary into 'result' if it is present.
virtual ParseResult parseOptionalEqual()=0
Parse a = token if present.
virtual ParseResult parseOptionalKeyword(StringRef keyword)=0
Parse the given keyword if present.
virtual InFlightDiagnostic emitError(SMLoc loc, const Twine &message={})=0
Emit a diagnostic at the specified location and return failure.
virtual ParseResult parseOptionalAttrDictWithKeyword(NamedAttrList &result)=0
Parse a named dictionary into 'result' if the attributes keyword is present.
virtual ParseResult parseColonType(Type &result)=0
Parse a colon followed by a type.
virtual SMLoc getCurrentLocation()=0
Get the location of the next token and store it into the argument.
virtual ParseResult parseOptionalArrowTypeList(SmallVectorImpl< Type > &result)=0
Parse an optional arrow followed by a type list.
ParseResult parseKeyword(StringRef keyword)
Parse a given keyword.
virtual ParseResult parseAttribute(Attribute &result, Type type={})=0
Parse an arbitrary attribute of a given type and return it in result.
virtual void printAttribute(Attribute attr)
Attributes are known-constant values of operations.
IntegerAttr getIndexAttr(int64_t value)
IntegerType getIntegerType(unsigned width)
An attribute that represents a reference to a dense integer vector or tensor object.
This class defines a virtual interface for reading a bytecode stream, providing hooks into the byteco...
virtual InFlightDiagnostic emitError(const Twine &msg={}) const =0
Emit an error to the reader.
This class defines a virtual interface for writing to a bytecode stream, providing hooks into the byt...
This is the interface that must be implemented by the dialects of operations to be inlined.
DialectInlinerInterface(Dialect *dialect)
This class is used to represent the version of a dialect, for the purpose of polymorphic destruction.
Dialects are groups of MLIR operations, types and attributes, as well as behavior associated with the...
This is a utility class for mapping one set of IR entities to another.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
The OpAsmParser has methods for interacting with the asm parser: parsing things from it,...
virtual OptionalParseResult parseOptionalAssignmentList(SmallVectorImpl< Argument > &lhs, SmallVectorImpl< UnresolvedOperand > &rhs)=0
virtual ParseResult parseRegion(Region ®ion, ArrayRef< Argument > arguments={}, bool enableNameShadowing=false)=0
Parses a region.
virtual ParseResult resolveOperand(const UnresolvedOperand &operand, Type type, SmallVectorImpl< Value > &result)=0
Resolve an operand to an SSA value, emitting an error on failure.
ParseResult resolveOperands(Operands &&operands, Type type, SmallVectorImpl< Value > &result)
Resolve a list of operands to SSA values, emitting an error on failure, or appending the results to t...
virtual ParseResult parseOperand(UnresolvedOperand &result, bool allowResultNumber=true)=0
Parse a single SSA value operand name along with a result number if allowResultNumber is true.
This is a pure-virtual base class that exposes the asmprinter hooks necessary to implement a custom p...
virtual void printOptionalAttrDictWithKeyword(ArrayRef< NamedAttribute > attrs, ArrayRef< StringRef > elidedAttrs={})=0
If the specified operation has attributes, print out an attribute dictionary prefixed with 'attribute...
virtual void printOptionalAttrDict(ArrayRef< NamedAttribute > attrs, ArrayRef< StringRef > elidedAttrs={})=0
If the specified operation has attributes, print out an attribute dictionary with their values.
void printFunctionalType(Operation *op)
Print the complete type of an operation in functional form.
virtual void printRegion(Region &blocks, bool printEntryBlockArgs=true, bool printBlockTerminators=true, bool printEmptyBlock=false)=0
Prints a region.
This class helps build Operations.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Simple wrapper around a void* in order to express generically how to pass in op properties through AP...
Operation is the basic unit of execution within MLIR.
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers.
This class implements Optional functionality for ParseResult.
ParseResult value() const
Access the internal ParseResult value.
bool has_value() const
Returns true if we contain a valid ParseResult value.
This class provides an abstraction over the different types of ranges over Regions.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Adaptor class to abstract the differences between whether value is from a ShapedType or ShapedTypeCom...
int64_t getDimSize(int index) const
Returns the size of the index'th dimension.
int64_t getRank() const
Returns the rank of the shape.
void getDims(SmallVectorImpl< int64_t > &res) const
Populates the dimensions from shape referenced.
bool hasRank() const
Returns whether the shape has a rank.
ShapedTypeComponents that represents the components of a ShapedType.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
ArrayRef< int64_t > getShape() const
Returns the shape of this tensor type.
bool hasRank() const
Returns if this type is ranked, i.e. it has a known number of dimensions.
This class provides an abstraction over the various different ranges of value types.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
bool isInteger() const
Return true if this is an integer type (with the specified width).
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
This class provides an abstraction over the different types of ranges over Values.
Range of values and shapes (corresponding effectively to Shapes dialect's ValueShape type concept).
ShapeAdaptor getShape(int index) const
Returns the shape of index'th operand.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
Base class for DenseArrayAttr that is instantiated and specialized for each supported element type be...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
QueryRef parse(llvm::StringRef line, const QuerySession &qs)
ConvOpQuantizationAttr buildConvOpQuantizationAttr(OpBuilder &builder, Value input, Value weight)
Method to build ConvOpQuantizationAttr, called from ConvOpQuantInfoBuilder/TransConvOpQuantInfoBuilde...
Type buildConvOpResultTypeInfo(OpBuilder &builder, Type outputType, Value input, Value weight)
construct ConvOp output type with correct bitwidth based on input/weight width.
PadOpQuantizationAttr buildPadOpQuantizationAttr(OpBuilder &builder, Value input)
Builds PadOpQuantizationAttr, called from PadOpQuantInfoBuilder: inputZp: input zeropoint.
ParseResult parseTypeOrAttr(OpAsmParser &parser, TypeAttr &typeAttr, Attribute &attr)
MatMulOpQuantizationAttr buildMatMulOpQuantizationAttr(OpBuilder &builder, Value a, Value b)
Builds MatMulOpQuantizationAttr, called from MatMulOpQuantInfoBuilder: aZp: input a zeropoint bZp: in...
void printTypeOrAttr(OpAsmPrinter &p, Operation *op, TypeAttr type, Attribute attr)
UnaryOpQuantizationAttr buildUnaryOpQuantizationAttr(OpBuilder &builder, Value input, Type outputRawType)
Builds UnaryOpQuantizationAttr UnaryOpQuantInfoBuilder: inputZp: input zeropoint outputZp: output zer...
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
LogicalResult emitOptionalError(std::optional< Location > loc, Args &&...args)
Overloads of the above emission functions that take an optionally null location.
Type getElementTypeOrSelf(Type type)
Return the element type or return the type itself.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
LogicalResult verifyCompatibleShape(ArrayRef< int64_t > shape1, ArrayRef< int64_t > shape2)
Returns success if the given two shapes are compatible.
detail::constant_op_matcher m_Constant()
Matches a constant foldable operation.
LogicalResult verify(Operation *op, bool verifyRecursively=true)
Perform (potentially expensive) checks of invariants, used to detect compiler bugs,...
bool isPermutationVector(ArrayRef< int64_t > interchange)
Method to check if an interchange vector is a permutation.
This is the representation of an operand reference.
This represents an operation in an abstracted form, suitable for use with the builder APIs.
SmallVector< Value, 4 > operands
void addOperands(ValueRange newOperands)
void addAttribute(StringRef name, Attribute attr)
Add an attribute with the specified name.
void addTypes(ArrayRef< Type > newTypes)
SmallVector< std::unique_ptr< Region >, 1 > regions
Regions that the op will hold.
SmallVector< Type, 4 > types
Types of the results of this operation.
Region * addRegion()
Create a region that should be attached to the operation.
Statically known information for a particular Value.
static ValueKnowledge meet(const ValueKnowledge &lhs, const ValueKnowledge &rhs)
static ValueKnowledge getKnowledgeFromType(Type type)