29 #include "llvm/ADT/APFloat.h"
30 #include "llvm/ADT/DenseMap.h"
31 #include "llvm/ADT/TypeSwitch.h"
38 #include "mlir/Dialect/Tosa/IR/TosaOpsDialect.cpp.inc"
45 #include "mlir/Dialect/Tosa/IR/TosaInterfaces.cpp.inc"
48 #include "mlir/Dialect/Tosa/IR/TosaDialectBytecode.cpp.inc"
69 return (isa<tosa::IfOp>(dest->getParentOp()) ||
70 isa<tosa::WhileOp>(dest->getParentOp()));
76 TosaDialectBytecodeInterface(
Dialect *dialect)
86 LogicalResult writeAttribute(
Attribute attr,
88 return ::writeAttribute(attr, writer);
98 LogicalResult writeType(
Type type,
100 return ::writeType(type, writer);
107 std::unique_ptr<DialectVersion>
110 reader.
emitError(
"Dialect does not support versioning");
114 LogicalResult upgradeFromVersion(
Operation *topLevelOp,
133 void TosaDialect::initialize() {
135 #define GET_TYPEDEF_LIST
136 #include "mlir/Dialect/Tosa/IR/TosaOpsTypesBase.cpp.inc"
140 #include "mlir/Dialect/Tosa/IR/TosaOps.cpp.inc"
143 #define GET_ATTRDEF_LIST
144 #include "mlir/Dialect/Tosa/IR/TosaAttributes.cpp.inc"
146 addInterfaces<TosaDialectBytecodeInterface, TosaInlinerInterface>();
147 declarePromisedInterfaces<
148 mesh::ShardingInterface, ClampOp, SigmoidOp, TanhOp, AddOp,
149 ArithmeticRightShiftOp, BitwiseAndOp, BitwiseOrOp, BitwiseXorOp, IntDivOp,
150 LogicalAndOp, LogicalLeftShiftOp, LogicalRightShiftOp, LogicalOrOp,
151 LogicalXorOp, MaximumOp, MinimumOp, MulOp, PowOp, SubOp, AbsOp,
152 BitwiseNotOp, CeilOp, ClzOp, ExpOp, FloorOp, LogOp, LogicalNotOp,
153 NegateOp, ReciprocalOp, RsqrtOp, SelectOp, EqualOp, GreaterOp,
154 GreaterEqualOp, MatMulOp>();
161 if (llvm::isa<shapeType>(type) && llvm::isa<DenseIntElementsAttr>(value)) {
162 return builder.
create<tosa::ConstShapeOp>(
163 loc, type, llvm::cast<DenseIntElementsAttr>(value));
165 if (llvm::isa<ElementsAttr>(value))
166 return builder.
create<tosa::ConstOp>(loc, type,
167 llvm::cast<ElementsAttr>(value));
180 <<
"expected attribute";
182 if (
auto typedAttr = dyn_cast<TypedAttr>(attr)) {
199 bool needsSpace =
false;
200 auto typedAttr = dyn_cast_or_null<TypedAttr>(attr);
201 if (!typedAttr || typedAttr.getType() != type.getValue()) {
218 template <
typename T>
222 auto inputType = llvm::dyn_cast<RankedTensorType>(op.getInput().getType());
224 op.emitOpError(
"expect a ranked tensor for input, got ") << op.getInput();
228 auto weightType = llvm::dyn_cast<RankedTensorType>(op.getWeight().getType());
230 op.emitOpError(
"expect a ranked tensor for weight, got ") << op.getWeight();
234 auto inputEType = inputType.getElementType();
235 auto weightEType = weightType.getElementType();
237 llvm::cast<ShapedType>(op.getBias().getType()).getElementType();
239 llvm::cast<ShapedType>(op.getResult().getType()).getElementType();
240 bool biasIsFloat = llvm::isa<FloatType>(biasEType);
241 bool resultIsFloat = llvm::isa<FloatType>(resultEType);
243 if (
auto quantType = llvm::dyn_cast<mlir::quant::QuantizedType>(inputEType))
244 inputEType = quantType.getStorageType();
246 if (
auto quantType = llvm::dyn_cast<mlir::quant::QuantizedType>(biasEType))
247 biasEType = quantType.getStorageType();
249 if (
auto quantType = llvm::dyn_cast<mlir::quant::QuantizedType>(resultEType))
250 resultEType = quantType.getStorageType();
252 if (biasIsFloat && resultIsFloat && (biasEType != resultEType)) {
256 "expect both bias and result to have same element type, got ")
257 << biasEType <<
" and " << resultEType;
261 if (isa<Float8E5M2Type>(inputEType) || isa<Float8E4M3FNType>(inputEType) ||
262 isa<Float8E5M2Type>(weightEType) || isa<Float8E4M3FNType>(weightEType)) {
263 if (inputEType != weightEType) {
265 "expect both input and weight to have same element type, got ")
266 << inputEType <<
" and " << weightEType;
271 bool inputIsFloat = llvm::isa<FloatType>(inputEType);
272 bool weightIsFloat = llvm::isa<FloatType>(weightEType);
275 if (inputIsFloat != weightIsFloat) {
277 "expect both input and weight to be float or not together, got ")
278 << inputEType <<
" and " << weightEType;
284 if (!op.getInputZp() && !op.getWeightZp())
285 return inputEType.isInteger(8) ? failure() : success();
287 ElementsAttr inputZpAttr;
288 ElementsAttr weightZpAttr;
292 "bail out if the actual value of zero points cannot be determined");
303 op.emitOpError(
"input zero point must be zero for non-int8 integer types");
310 op.emitOpError(
"weight zero point must be zero for non-int8 integer types");
319 auto attrType = llvm::dyn_cast<TensorType>(getValueAttr().
getType());
320 auto outputType = llvm::dyn_cast<TensorType>(getOutput().
getType());
322 if (!attrType || !outputType) {
323 emitOpError(
"expected tensors for attr/result type");
327 if (
auto result = llvm::dyn_cast<mlir::quant::QuantizedType>(
328 outputType.getElementType())) {
329 if (result.getStorageType() == attrType.getElementType())
333 if (attrType.getElementType() != outputType.getElementType()) {
334 emitOpError(
"expected same attr/result element types");
341 template <
typename T>
344 llvm::cast<ShapedType>(op.getInput().getType()).getElementType();
346 if (
auto quantType = llvm::dyn_cast<mlir::quant::QuantizedType>(inputEType))
347 inputEType = quantType.getStorageType();
349 auto accType = op.getAccType();
350 if (inputEType.isInteger(8) && !accType.isInteger(32))
351 return op.emitOpError(
"accumulator type for i8 tensor is not i32");
353 if (inputEType.isInteger(16) && !accType.isInteger(48))
354 return op.emitOpError(
"accumulator type for i16 tensor is not i48");
356 if (isa<Float8E5M2Type, Float8E4M3Type>(inputEType) && !accType.isF16())
357 return op.emitOpError(
"accumulator type for f8 tensor is not f16");
359 if (inputEType.isF16() && !(accType.isF16() || accType.isF32()))
360 return op.emitOpError(
"accumulator type for f16 tensor is not f16/f32");
362 if (inputEType.isBF16() && !accType.isF32())
363 return op.emitOpError(
"accumulator type for bf16 tensor is not f32");
365 if (inputEType.isF32() && !accType.isF32())
366 return op.emitOpError(
"accumulator type for f32 tensor is not f32");
369 llvm::cast<ShapedType>(op.getResult().getType()).getElementType();
371 if (
auto quantType = llvm::dyn_cast<mlir::quant::QuantizedType>(resultEType))
372 resultEType = quantType.getStorageType();
375 if ((inputEType.isInteger(8) && resultEType.isInteger(32)) ||
376 (inputEType.isInteger(16) && resultEType.isInteger(48)) ||
377 (isa<Float8E5M2Type>(inputEType) && resultEType.isF16()) ||
378 (isa<Float8E4M3FNType>(inputEType) && resultEType.isF16()) ||
379 (inputEType.isF16() && resultEType.isF16()) ||
380 (inputEType.isBF16() && resultEType.isBF16()) ||
381 (inputEType.isF32() && resultEType.isF32()))
384 return op.emitOpError(
"input/output element types are incompatible.");
388 template <
typename T>
390 auto inputType = llvm::dyn_cast<TensorType>(inType);
391 auto outputType = llvm::dyn_cast<TensorType>(outType);
393 op.emitOpError(
"expect shaped tensor for input, got ") << inType;
397 op.emitOpError(
"expect shaped tensor for output, got ") << outType;
400 auto inputElementType = inputType.getElementType();
401 auto outputElementType = outputType.getElementType();
402 auto inputQuantType =
403 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(inputElementType);
404 auto outputQuantType =
405 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(outputElementType);
406 if ((inputElementType.isIntOrIndexOrFloat() || inputQuantType) &&
407 (outputElementType.isIntOrIndexOrFloat() || outputQuantType) &&
408 inputElementType != outputElementType) {
413 op.emitOpError(
"expect input and output to have same element type, got ")
414 << inputElementType <<
" and " << outputElementType;
422 const auto resultETy = llvm::cast<ShapedType>(
getType()).getElementType();
423 if (!resultETy.isIntOrIndex())
424 return emitOpError(
"result tensor is not of integer type");
427 const auto inputType = llvm::cast<ShapedType>(getInput().
getType());
428 const int64_t axis = getAxisAttr().getInt();
429 if (inputType.hasRank() && ((axis < 0) || axis >= inputType.getRank()))
430 return emitOpError(
"specified axis is outside the rank of the tensor");
436 auto inputType = llvm::cast<ShapedType>(getInput().
getType());
438 auto inputETy = inputType.getElementType();
439 auto resultETy = llvm::cast<ShapedType>(
getType()).getElementType();
442 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(inputETy))
443 inputETy = quantType.getStorageType();
446 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(resultETy))
447 resultETy = quantType.getStorageType();
449 auto accType = getAccType();
450 if (llvm::isa<IntegerType>(inputETy) && !accType.isInteger(32))
451 return emitOpError(
"accumulator type for integer tensor is not i32");
453 if (inputETy.isF16() && !(accType.isF16() || accType.isF32()))
454 return emitOpError(
"accumulator type for f16 tensor is not f16/f32");
456 if (inputETy.isBF16() && !accType.isF32())
457 return emitOpError(
"accumulator type for bf16 tensor is not f32");
459 if (inputETy.isF32() && !accType.isF32())
460 return emitOpError(
"accumulator type for f32 tensor is not f32");
462 if ((inputETy.isF32() && resultETy.isF32()) ||
463 (inputETy.isF16() && resultETy.isF16()) ||
464 (inputETy.isBF16() && resultETy.isBF16()) ||
465 (inputETy.isInteger(8) && resultETy.isInteger(8)) ||
466 (inputETy.isInteger(16) && resultETy.isInteger(16)))
469 return emitOpError(
"input/output element types are incompatible.");
474 llvm::cast<ShapedType>(getInput().
getType()).getElementType();
476 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(inputETy)) {
477 inputETy = quantType.getStorageType();
480 llvm::cast<ShapedType>(getOutput().
getType()).getElementType();
482 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(outputETy)) {
483 outputETy = quantType.getStorageType();
485 if (inputETy != outputETy)
486 return emitOpError(
"input/output element types are incompatible.");
488 auto maxValAttr = getMaxValAttr();
489 auto minValAttr = getMinValAttr();
493 if (inputETy.
isInteger(dataTypeBitWidth)) {
497 auto intMaxValAttr = mlir::dyn_cast<mlir::IntegerAttr>(maxValAttr);
498 auto intMinValAttr = mlir::dyn_cast<mlir::IntegerAttr>(minValAttr);
499 if (!intMaxValAttr || !intMinValAttr ||
500 (intMaxValAttr.getType() != intMinValAttr.getType()) ||
501 (intMaxValAttr.getType() != inputETy))
502 return emitOpError(
"min/max attributes types are incompatible with "
503 "input/output element types.");
508 auto floatMaxValAttr = mlir::dyn_cast<mlir::FloatAttr>(maxValAttr);
509 auto floatMinValAttr = mlir::dyn_cast<mlir::FloatAttr>(minValAttr);
510 if (!floatMaxValAttr || !floatMinValAttr ||
511 (floatMaxValAttr.getType() != floatMinValAttr.getType()) ||
512 (floatMaxValAttr.getType() != inputETy))
513 return emitOpError(
"min/max attributes types are incompatible with "
514 "input/output element types.");
534 result.
addOperands({input, weight, bias, zps.first, zps.second});
539 Type finalOutputType = outputType;
555 result.
addOperands({input, weight, bias, zps.first, zps.second});
560 Type finalOutputType = outputType;
598 static_cast<int32_t
>(quantAttr.getAZp())));
600 static_cast<int32_t
>(quantAttr.getBZp())));
602 auto inputType = llvm::dyn_cast<ShapedType>(a.
getType());
603 assert(inputType &&
"Input must be a shaped tensor type!");
605 auto inputQType = llvm::dyn_cast<mlir::quant::UniformQuantizedType>(
606 inputType.getElementType());
607 assert(inputQType &&
"Tensor must have quantized datatype!");
609 unsigned inputBits = inputQType.getStorageTypeIntegralWidth();
611 auto outputShapedType = llvm::dyn_cast<ShapedType>(outputType);
612 assert(outputShapedType &&
"Output must be a shaped type");
614 IntegerType accElementType;
619 auto accType = outputShapedType.clone(accElementType);
632 DenseArrayAttr kernel, DenseArrayAttr stride,
633 DenseArrayAttr pad, TypeAttr accType) {
643 static_cast<int32_t
>(quantAttr.getInputZp())));
646 static_cast<int32_t
>(quantAttr.getOutputZp())));
648 result.
types.push_back(outputType);
663 static_cast<int32_t
>(quantAttr.getInputZp())));
666 static_cast<int32_t
>(quantAttr.getOutputZp())));
668 result.
types.push_back(outputType);
682 static_cast<int32_t
>(quantAttr.getInputZp())));
684 result.
types.push_back(outputType);
699 static_cast<int32_t
>(quantAttr.getInputZp())));
701 result.
types.push_back(outputType);
711 for (
int i = 0, e = operands.size(); i != e; ++i) {
713 if (!shape.hasRank()) {
718 outRank = std::max<int64_t>(outRank, shape.getRank());
721 outShape.resize(outRank, 1);
723 for (
int i = 0, e = operands.size(); i != e; ++i) {
725 auto rankDiff = outShape.size() - shape.getRank();
727 for (
size_t i = 0, e = shape.getRank(); i < e; ++i) {
728 auto dim1 = outShape[i + rankDiff];
729 auto dim2 = shape.getDimSize(i);
730 auto resolvedDim = dim1;
734 }
else if (dim2 == 1) {
736 }
else if (dim1 != dim2) {
739 outShape[i + rankDiff] = resolvedDim;
746 LogicalResult tosa::ArgMaxOp::inferReturnTypeComponents(
747 MLIRContext *context, ::std::optional<Location> location,
748 ArgMaxOp::Adaptor adaptor,
751 IntegerAttr axis = adaptor.getProperties().axis;
752 int32_t axisVal = axis.getValue().getSExtValue();
754 if (!inputShape.hasRank()) {
760 outShape.reserve(inputShape.getRank() - 1);
761 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
764 outShape.push_back(inputShape.getDimSize(i));
771 LogicalResult tosa::RFFT2dOp::inferReturnTypeComponents(
772 MLIRContext *context, ::std::optional<Location> location,
773 RFFT2dOp::Adaptor adaptor,
777 if (!inputShape.hasRank())
781 outputShape.resize(3, ShapedType::kDynamic);
782 outputShape[0] = inputShape.getDimSize(0);
783 outputShape[1] = inputShape.getDimSize(1);
784 int64_t inWidth = inputShape.getDimSize(2);
788 if (inWidth != ShapedType::kDynamic)
789 outputShape[2] = inWidth / 2 + 1;
797 LogicalResult tosa::FFT2dOp::inferReturnTypeComponents(
798 MLIRContext *context, ::std::optional<Location> location,
799 FFT2dOp::Adaptor adaptor,
801 inferredReturnShapes.push_back(
803 inferredReturnShapes.push_back(
808 LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
809 MLIRContext *context, ::std::optional<Location> location,
810 ConcatOp::Adaptor adaptor,
813 const Properties &prop = adaptor.getProperties();
814 int32_t axis = prop.axis.getValue().getSExtValue();
816 bool hasRankedInput =
false;
817 for (
auto operand : adaptor.getOperands()) {
819 if (!operandShape.hasRank())
824 outputShape.resize(operandShape.getRank(), ShapedType::kDynamic);
827 for (
int i = 0, s = operandShape.getRank(); i < s; i++) {
828 if (i == axis || operandShape.isDynamicDim(i))
830 if (outputShape[i] == ShapedType::kDynamic)
831 outputShape[i] = operandShape.getDimSize(i);
832 if (outputShape[i] != operandShape.getDimSize(i))
834 "Cannot concat tensors with different sizes"
835 " on the non-axis dimension ",
839 hasRankedInput =
true;
842 llvm::cast<TensorType>(adaptor.getInput1().getType()[0]).getElementType();
843 if (!hasRankedInput) {
849 int64_t concatDimSize = 0;
850 for (
auto operand : adaptor.getOperands()) {
855 if (!operandShape.hasRank() || operandShape.isDynamicDim(axis)) {
856 concatDimSize = ShapedType::kDynamic;
860 concatDimSize += operandShape.getDimSize(axis);
863 outputShape[axis] = concatDimSize;
869 LogicalResult tosa::EqualOp::inferReturnTypeComponents(
870 MLIRContext *context, ::std::optional<Location> location,
887 if (l.size() != r.size() || l.size() != 1)
892 LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
893 MLIRContext *context, ::std::optional<Location> location,
894 FullyConnectedOp::Adaptor adaptor,
897 ShapeAdaptor weightShape(adaptor.getWeight().getType());
902 outShape.resize(2, ShapedType::kDynamic);
904 if (inputShape.hasRank()) {
905 outShape[0] = inputShape.getDimSize(0);
908 if (weightShape.hasRank()) {
909 outShape[1] = weightShape.getDimSize(0);
912 if (biasShape.hasRank()) {
913 outShape[1] = outShape[1] == ShapedType::kDynamic ? biasShape.getDimSize(0)
923 auto inputType = llvm::dyn_cast<RankedTensorType>(getInput().
getType());
925 RankedTensorType weightType =
926 llvm::dyn_cast<RankedTensorType>(getWeight().
getType());
930 emitOpError(
"expect a ranked tensor for input, got ") << getInput();
934 emitOpError(
"expect a ranked tensor for weight, got ") << getWeight();
938 auto inputEType = inputType.getElementType();
939 auto weightEType = weightType.getElementType();
941 bool inputIsQuant = !llvm::isa<FloatType>(inputEType);
942 bool weightIsQuant = !llvm::isa<FloatType>(weightEType);
945 if (inputIsQuant != weightIsQuant) {
947 "expect both input and weight to be float or not together, got ")
948 << inputEType <<
" and " << weightEType;
954 if ((inputIsQuant && !getInputZp()) || (!inputIsQuant && getInputZp())) {
955 emitOpError(
"input zero point is required for quantized type, and not "
956 "allowed for float type");
962 LogicalResult tosa::MatMulOp::inferReturnTypeComponents(
963 MLIRContext *context, ::std::optional<Location> location,
964 MatMulOp::Adaptor adaptor,
971 outShape.resize(3, ShapedType::kDynamic);
973 if (lhsShape.hasRank()) {
974 outShape[0] = lhsShape.getDimSize(0);
975 outShape[1] = lhsShape.getDimSize(1);
978 if (rhsShape.hasRank()) {
979 outShape[0] = outShape[0] == ShapedType::kDynamic ? rhsShape.getDimSize(0)
981 outShape[2] = rhsShape.getDimSize(2);
988 LogicalResult tosa::PadOp::inferReturnTypeComponents(
989 MLIRContext *context, ::std::optional<Location> location,
990 PadOp::Adaptor adaptor,
994 cast<tosa::shapeType>(adaptor.getPadding().getType()).getRank();
999 if (!inputShape.hasRank()) {
1000 outputShape.resize(paddingRank / 2, ShapedType::kDynamic);
1009 outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
1014 outputShape.reserve(inputShape.getRank());
1015 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
1016 if (inputShape.isDynamicDim(i)) {
1017 outputShape.push_back(ShapedType::kDynamic);
1020 auto padFront = paddingValues[i * 2];
1021 auto padBack = paddingValues[i * 2 + 1];
1022 if (padFront < 0 || padBack < 0) {
1024 outputShape.push_back(ShapedType::kDynamic);
1028 outputShape.push_back(inputShape.getDimSize(i) + padFront + padBack);
1036 RankedTensorType inputType = getInput1().getType();
1037 RankedTensorType outputType = getOutput().getType();
1038 auto paddingRank = cast<tosa::shapeType>(getPadding().
getType()).getRank();
1040 if (inputType.getRank() != outputType.getRank())
1041 return emitOpError() <<
"expect same input and output tensor rank.";
1043 if (paddingRank != inputType.getRank() * 2)
1044 return emitOpError() <<
"expected padding tensor dim 0 to have size "
1045 << inputType.getRank() * 2
1046 <<
" (2*rank(shape1)) but got size " << paddingRank;
1052 return to_vector(llvm::map_range(shape, [](int64_t dim) {
1053 return dim == -1 ? ShapedType::kDynamic : dim;
1057 LogicalResult tosa::SliceOp::inferReturnTypeComponents(
1058 MLIRContext *context, ::std::optional<Location> location,
1059 SliceOp::Adaptor adaptor,
1068 auto rank = cast<tosa::shapeType>(adaptor.getSize().getType()).getRank();
1076 ShapeAdaptor inputShape(adaptor.getInput1().getType());
1079 if (inputShape.hasRank()) {
1080 for (
size_t i = 0; i < size.size(); i++) {
1081 if (size[i] != 0 && size[i] >= -1 && start[i] >= 0 &&
1082 (ShapedType::isDynamic(inputShape.getDimSize(i)) ||
1083 start[i] < inputShape.getDimSize(i))) {
1085 if (ShapedType::isDynamic(inputShape.getDimSize(i))) {
1088 outputShape[i] = size[i];
1092 if (size[i] == -1) {
1093 outputShape[i] = inputShape.getDimSize(i) - start[i];
1094 }
else if (start[i] + size[i] <= inputShape.getDimSize(i)) {
1096 outputShape[i] = size[i];
1109 auto inputType = llvm::dyn_cast<RankedTensorType>(getInput1().
getType());
1113 auto startShapeRank =
1114 llvm::cast<tosa::shapeType>(getStart().
getType()).getRank();
1115 if (inputType.getRank() != startShapeRank)
1117 "length of start attribute is not equal rank of input shape");
1119 auto sizeShapeRank =
1120 llvm::cast<tosa::shapeType>(getSize().
getType()).getRank();
1121 if (inputType.getRank() != sizeShapeRank)
1123 "length of size attribute is not equal rank of input shape");
1128 LogicalResult tosa::MulOp::inferReturnTypeComponents(
1129 MLIRContext *context, ::std::optional<Location> location,
1149 if (
auto resIntType = dyn_cast<IntegerType>(resElemType)) {
1150 IntegerType lhsIntType =
1152 IntegerType rhsIntType =
1154 if (lhsIntType != rhsIntType)
1155 return emitOpError(
"requires the same element type for all operands");
1160 if (lhsIntType.getWidth() > resIntType.getWidth())
1161 return emitOpError(
"invalid data type size for operands or result");
1166 for (
int i = 0; i < 2; ++i) {
1169 "requires the same element type for all operands and results");
1173 ElementsAttr shift_elem;
1175 int32_t shift = shift_elem.getValues<IntegerAttr>()[0].getInt();
1177 return emitOpError() <<
"require shift to be 0 for float type";
1188 auto hasRank = [](
const Type type) {
1189 if (
auto shaped_type = dyn_cast<ShapedType>(type))
1190 return shaped_type.hasRank();
1195 auto rankedOperandTypes =
1196 llvm::to_vector(llvm::make_filter_range(getOperandTypes(), hasRank));
1198 auto rankedResultTypes =
1199 llvm::make_filter_range(getOperation()->getResultTypes(), hasRank);
1202 if (rankedOperandTypes.empty() && rankedResultTypes.empty())
1206 auto getRank = [](
const Type type) {
1207 return cast<ShapedType>(type).getRank();
1210 auto rank = !rankedOperandTypes.empty() ? getRank(*rankedOperandTypes.begin())
1211 : getRank(*rankedResultTypes.begin());
1213 for (
size_t i = 0; i < 2; ++i) {
1214 if (rank != getRank(rankedOperandTypes[i])) {
1215 return emitOpError(
"operands don't have matching ranks");
1219 for (
const auto type : rankedResultTypes) {
1220 if (rank != getRank(type)) {
1221 return emitOpError(
"result type has different rank than operands");
1230 return mlir::cast<ShapedType>(type).getShape();
1236 return emitOpError(
"operands don't have broadcast-compatible shapes");
1242 LogicalResult tosa::TableOp::inferReturnTypeComponents(
1243 MLIRContext *context, ::std::optional<Location> location,
1244 TableOp::Adaptor adaptor,
1246 ShapeAdaptor inputShape(adaptor.getInput1().getType());
1248 if (!inputShape.hasRank()) {
1253 inferredReturnShapes.resize(1);
1254 inputShape.getDims(inferredReturnShapes[0]);
1259 TensorType inputType = getInput1().getType();
1260 TensorType outputType = getOutput().getType();
1263 inputType.getRank() != outputType.getRank())
1264 return emitOpError()
1265 <<
"expected input tensor rank to equal result tensor rank";
1267 auto inputDims = inputType.
getShape();
1268 auto outputDims = outputType.
getShape();
1270 int64_t dim = it.index();
1271 auto [inputDim, outputDim] = it.value();
1272 if (!ShapedType::isDynamic(outputDim) && outputDim != inputDim) {
1273 return emitOpError() <<
"dim(result, " << dim <<
") = " << outputDim
1274 <<
" doesn't match dim(input, " << dim
1275 <<
") = " << inputDim;
1287 multiples = llvm::to_vector(
1288 llvm::map_range(multiplesAttr.getValues<APInt>(),
1289 [](
const APInt &val) { return val.getSExtValue(); }));
1293 LogicalResult tosa::TileOp::inferReturnTypeComponents(
1294 MLIRContext *context, ::std::optional<Location> location,
1295 TileOp::Adaptor adaptor,
1302 llvm::map_range(multiplesAttr.getValues<APInt>(),
1303 [](
const APInt &val) { return val.getSExtValue(); }));
1305 ShapeAdaptor inputShape(adaptor.getInput1().getType());
1307 if (!inputShape.hasRank()) {
1308 outputShape.resize(multiples.size(), ShapedType::kDynamic);
1311 }
else if (
static_cast<size_t>(inputShape.getRank()) != multiples.size())
1315 outputShape.reserve(multiples.size());
1316 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
1317 int64_t dim = inputShape.getDimSize(i);
1318 if (dim != ShapedType::kDynamic)
1319 dim *= multiples[i];
1320 outputShape.push_back(dim);
1328 ShapedType inputType = llvm::cast<ShapedType>(getInput1().
getType());
1329 ShapedType outputType = llvm::cast<ShapedType>(
getType());
1331 shapeType multiplesType =
1332 llvm::cast<tosa::shapeType>(getMultiples().
getType());
1334 auto multiplesRank = multiplesType.getRank();
1336 if (inputType.hasRank()) {
1337 if (inputType.getRank() != multiplesRank)
1338 return emitOpError(
"expect 'multiples' to have rank ")
1339 << inputType.getRank() <<
" but got " << multiplesRank <<
".";
1340 if (outputType.hasRank() && inputType.getRank() != outputType.getRank())
1341 return emitOpError(
"expect same input and output tensor rank.");
1342 }
else if (outputType.hasRank() && outputType.getRank() != multiplesRank)
1343 return emitOpError(
"expect 'multiples' array to have length ")
1344 << outputType.getRank() <<
" but got " << multiplesRank <<
".";
1347 if (getConstantMultiples(multiples).succeeded() &&
1348 llvm::any_of(multiples, [](int64_t v) {
return v <= 0 && v != -1; }))
1350 "expect element of 'multiples' to be positive integer or -1.");
1356 if (l.size() != r.size() || l.size() != 1)
1361 LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
1362 MLIRContext *context, ::std::optional<Location> location,
1363 ReshapeOp::Adaptor adaptor,
1365 ShapeAdaptor inputShape(adaptor.getInput1().getType());
1370 auto rank = cast<tosa::shapeType>(adaptor.getShape().getType()).getRank();
1380 if (!inputShape.hasRank() || !inputShape.hasStaticShape()) {
1381 inferredReturnShapes.push_back(
1389 int64_t numElements = inputShape.getNumElements();
1390 int64_t staticMul = 1;
1391 for (
auto val : newShapeValue) {
1392 if (!ShapedType::isDynamic(val)) {
1398 for (
auto &val : newShapeValue) {
1399 if (ShapedType::isDynamic(val))
1400 val = numElements / staticMul;
1403 inferredReturnShapes.push_back(
1409 TensorType inputType = getInput1().getType();
1410 RankedTensorType outputType =
getType();
1415 return mlir::success();
1418 if ((int64_t)shapeValues.size() != outputType.getRank())
1419 return emitOpError() <<
"new shape does not match result rank";
1421 for (
auto [newShapeDim, outputShapeDim] :
1422 zip(shapeValues, outputType.getShape())) {
1423 if (newShapeDim != -1 && newShapeDim != ShapedType::kDynamic &&
1424 outputShapeDim != ShapedType::kDynamic && newShapeDim != outputShapeDim)
1425 return emitOpError() <<
"new shape is inconsistent with result shape";
1427 if (newShapeDim != ShapedType::kDynamic && newShapeDim < -1)
1428 return emitOpError() <<
"new shape has invalid tensor dimension size "
1432 if (inputType.hasStaticShape()) {
1433 int64_t inputElementsNum = inputType.getNumElements();
1434 if (outputType.hasStaticShape()) {
1435 int64_t outputElementsNum = outputType.getNumElements();
1436 if (inputElementsNum != outputElementsNum) {
1437 return emitOpError() <<
"cannot reshape " << inputElementsNum
1438 <<
" elements into " << outputElementsNum;
1442 int64_t newShapeElementsNum = std::accumulate(
1443 shapeValues.begin(), shapeValues.end(), 1LL,
1444 [](int64_t acc, int64_t dim) { return (dim > 0) ? acc * dim : acc; });
1445 bool isStaticNewShape =
1446 llvm::all_of(shapeValues, [](int64_t s) {
return s > 0; });
1447 if ((isStaticNewShape && inputElementsNum != newShapeElementsNum) ||
1448 (!isStaticNewShape && newShapeElementsNum > inputElementsNum)) {
1449 return emitOpError() <<
"cannot reshape " << inputElementsNum
1450 <<
" elements into " << newShapeElementsNum;
1454 int missingDims = llvm::count(shapeValues, -1);
1455 if (missingDims > 1)
1456 return emitOpError() <<
"expected at most one target dimension to be -1";
1458 return mlir::success();
1468 for (
auto v : permsAttr.getValues<APInt>())
1469 perms.push_back(v.getSExtValue());
1474 LogicalResult tosa::TransposeOp::inferReturnTypeComponents(
1475 MLIRContext *context, ::std::optional<Location> location,
1476 TransposeOp::Adaptor adaptor,
1478 ShapeAdaptor inputShape(adaptor.getInput1().getType());
1482 if (permsShape.hasRank() && permsShape.getRank() == 0)
1487 if (!inputShape.hasRank() || !permsShape.hasRank() ||
1488 permsShape.isDynamicDim(0)) {
1495 if (permsShape.getDimSize(0) != inputShape.getRank()) {
1501 if (inputShape.getRank() == 0) {
1507 bool allTheSame =
true;
1508 for (
int i = 1, s = inputShape.getRank(); i < s; i++) {
1509 if (inputShape.getDimSize(0) != inputShape.getDimSize(i)) {
1518 outputShape.resize(inputShape.getRank(), inputShape.getDimSize(0));
1523 outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
1528 attr.getType().getRank() == 1) {
1531 if (inputShape.getRank() != permShape.
getRank())
1533 "constant permutation must be the same length"
1534 " as the input rank");
1537 for (
int i = 0, e = inputShape.getRank(); i < e; i++) {
1538 if (inputShape.getRank() <= permShape.
getDimSize(i))
1542 outputShape.reserve(inputShape.getRank());
1543 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
1544 outputShape[i] = inputShape.getDimSize(permShape.
getDimSize(i));
1553 TensorType inputType = getInput1().getType();
1555 TensorType outputType = getOutput().getType();
1557 if (permType.
hasRank() && permType.getRank() != 1)
1558 return emitOpError()
1559 <<
"expected permutation tensor to be rank 1 but got rank "
1560 << permType.getRank();
1562 if (!permType.isDynamicDim(0) &&
1563 permType.getDimSize(0) != inputType.getRank())
1564 return emitOpError() <<
"expected permutation tensor dim 0 to have size "
1565 << inputType.getRank()
1566 <<
" (input rank) but got size "
1567 << permType.getDimSize(0);
1569 inputType.getRank() != outputType.getRank())
1570 return emitOpError()
1571 <<
"expected input tensor rank to equal result tensor rank";
1573 if (!permType.isDynamicDim(0) &&
1574 permType.getDimSize(0) != outputType.getRank())
1575 return emitOpError() <<
"expected permutation tensor dim 0 to have size "
1576 << outputType.getRank()
1577 <<
" (output rank) but got size "
1578 << permType.getDimSize(0);
1581 if (succeeded(getConstantPerms(constantPerms))) {
1585 "Unexpectedly found permutation tensor without rank");
1586 if (!llvm::all_of(constantPerms,
1587 [&constantPerms](int32_t s) {
1589 static_cast<size_t>(s) < constantPerms.size();
1592 constantPerms, [](int32_t v) -> int64_t {
return v; }))))
1593 return emitOpError() <<
"expected valid permutation tensor";
1598 assert(constantPerms.size() ==
static_cast<size_t>(inputType.getRank()) &&
1599 inputType.getRank() == outputType.getRank());
1601 for (
auto i = 0; i < outputType.getRank(); i++) {
1602 if (inputType.isDynamicDim(constantPerms[i]) ||
1603 outputType.isDynamicDim(i))
1606 if (inputType.getDimSize(constantPerms[i]) != outputType.getDimSize(i))
1607 return emitOpError()
1608 <<
"expected output tensor dim " << i <<
" to match "
1609 <<
"input dim " << constantPerms[i] <<
" with value of "
1610 << inputType.getDimSize(constantPerms[i]);
1621 if (getConstantPerms(transposePerms).failed())
1624 Value input = getInput1();
1625 auto inputType = cast<TensorType>(input.
getType());
1628 for (
auto dim : transposePerms) {
1629 int32_t dimInInput = transposePerms[dim];
1630 if (inputType.isDynamicDim(dimInInput))
1632 builder.
create<tensor::DimOp>(getLoc(), input, dimInInput)
1636 builder.
getIndexAttr(inputType.getDimSize(dimInInput));
1639 reifiedReturnShapes.emplace_back(std::move(returnedDims));
1643 LogicalResult tosa::GatherOp::inferReturnTypeComponents(
1644 MLIRContext *context, ::std::optional<Location> location,
1645 GatherOp::Adaptor adaptor,
1648 outputShape.resize(3, ShapedType::kDynamic);
1650 ShapeAdaptor valuesShape(adaptor.getValues().getType());
1651 if (valuesShape.hasRank()) {
1652 outputShape[0] = valuesShape.getDimSize(0);
1653 outputShape[2] = valuesShape.getDimSize(2);
1656 ShapeAdaptor indicesShape(adaptor.getIndices().getType());
1657 if (indicesShape.hasRank()) {
1658 if (outputShape[0] == ShapedType::kDynamic)
1659 outputShape[0] = indicesShape.getDimSize(0);
1660 if (outputShape[1] == ShapedType::kDynamic)
1661 outputShape[1] = indicesShape.getDimSize(1);
1668 LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
1669 MLIRContext *context, ::std::optional<Location> location,
1670 ResizeOp::Adaptor adaptor,
1673 outputShape.resize(4, ShapedType::kDynamic);
1676 if (!inputShape.hasRank())
1679 outputShape[0] = inputShape.getDimSize(0);
1680 outputShape[3] = inputShape.getDimSize(3);
1681 int64_t inputHeight = inputShape.getDimSize(1);
1682 int64_t inputWidth = inputShape.getDimSize(2);
1684 if ((inputHeight == ShapedType::kDynamic) ||
1685 (inputWidth == ShapedType::kDynamic))
1694 (((inputHeight - 1) * scaleInt[0] - offsetInt[0] + borderInt[0]) /
1699 (((inputWidth - 1) * scaleInt[2] - offsetInt[1] + borderInt[1]) /
1707 LogicalResult tosa::ScatterOp::inferReturnTypeComponents(
1708 MLIRContext *context, ::std::optional<Location> location,
1709 ScatterOp::Adaptor adaptor,
1712 outputShape.resize(3, ShapedType::kDynamic);
1714 ShapeAdaptor valuesInShape(adaptor.getValuesIn().getType());
1715 if (valuesInShape.hasRank()) {
1716 outputShape[0] = valuesInShape.getDimSize(0);
1717 outputShape[1] = valuesInShape.getDimSize(1);
1718 outputShape[2] = valuesInShape.getDimSize(2);
1721 ShapeAdaptor indicesShape(adaptor.getIndices().getType());
1722 if (indicesShape.hasRank()) {
1723 if (outputShape[0] == ShapedType::kDynamic)
1724 outputShape[0] = indicesShape.getDimSize(0);
1728 if (inputShape.hasRank()) {
1729 if (outputShape[0] == ShapedType::kDynamic)
1730 outputShape[0] = inputShape.getDimSize(0);
1731 if (outputShape[2] == ShapedType::kDynamic)
1732 outputShape[2] = inputShape.getDimSize(2);
1742 int64_t axisVal = axis.getValue().getSExtValue();
1743 if (!operandShape.
hasRank() || operandShape.
getRank() <= axisVal) {
1749 operandShape.
getDims(outputShape);
1750 outputShape[axisVal] = 1;
1755 #define COMPATIBLE_RETURN_TYPES(OP) \
1756 bool OP::isCompatibleReturnTypes(TypeRange l, TypeRange r) { \
1757 if (l.size() != r.size() || l.size() != 1) \
1759 if (getElementTypeOrSelf(l[0]) != getElementTypeOrSelf(r[0])) \
1761 return succeeded(verifyCompatibleShape(l[0], r[0])); \
1764 #define REDUCE_SHAPE_INFER(OP) \
1765 LogicalResult OP::inferReturnTypeComponents( \
1766 MLIRContext *context, ::std::optional<Location> location, \
1767 OP::Adaptor adaptor, \
1768 SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) { \
1770 llvm::cast<TensorType>(adaptor.getInput().getType()).getElementType(); \
1771 ShapeAdaptor inputShape(adaptor.getInput().getType()); \
1772 const Properties &prop = adaptor.getProperties(); \
1773 return ReduceInferReturnTypes(inputShape, inputType, prop.axis, \
1774 inferredReturnShapes); \
1776 COMPATIBLE_RETURN_TYPES(OP)
1784 #undef REDUCE_SHAPE_INFER
1786 #undef COMPATIBLE_RETURN_TYPES
1788 template <
typename T>
1791 TensorType inputType = op.getInput().getType();
1792 TensorType outputType = op.getOutput().getType();
1793 int32_t reduceAxis = op.getAxis();
1795 if (reduceAxis < 0) {
1796 op.emitOpError(
"reduce axis must not be negative");
1800 int64_t inputRank = inputType.getRank();
1803 if (reduceAxis >= inputRank && !(reduceAxis == 0 && inputRank == 0)) {
1804 op.emitOpError(
"expect input tensor rank (")
1805 << inputRank <<
") to be larger than reduce axis (" << reduceAxis
1811 int64_t outputRank = outputType.getRank();
1812 if (inputType.
hasRank() && outputRank != inputType.getRank()) {
1814 "expect output tensor rank to be equal to input tensor rank");
1817 if (reduceAxis >= outputRank && !(reduceAxis == 0 && outputRank == 0)) {
1818 op.emitOpError(
"expect output tensor rank (")
1819 << outputRank <<
") to be larger than reduce axis (" << reduceAxis
1825 if (outputRank != 0) {
1826 auto outputShape = outputType.
getShape();
1827 if (!outputType.isDynamicDim(reduceAxis) &&
1828 outputShape[reduceAxis] != 1) {
1829 op.emitOpError(
"expect reduced dimension size to be 1, got ")
1830 << outputShape[reduceAxis];
1857 #define NARY_SHAPE_INFER(OP) \
1858 LogicalResult OP::inferReturnTypeComponents( \
1859 MLIRContext *context, ::std::optional<Location> location, \
1860 ValueShapeRange operands, DictionaryAttr attributes, \
1861 OpaqueProperties properties, RegionRange regions, \
1862 SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) { \
1863 return NAryInferReturnTypes(operands, inferredReturnShapes); \
1905 #undef PRED_SHAPE_INFER
1912 outputShape.resize(4, ShapedType::kDynamic);
1927 if (!ShapedType::isDynamic(height)) {
1928 int64_t padded = height + pad[0] + pad[1] - kernel[0];
1929 outputShape[1] = padded / stride[0] + 1;
1932 if (!ShapedType::isDynamic(width)) {
1933 int64_t padded = width + pad[2] + pad[3] - kernel[1];
1934 outputShape[2] = padded / stride[1] + 1;
1941 LogicalResult Conv2DOp::inferReturnTypeComponents(
1942 MLIRContext *context, ::std::optional<Location> location,
1943 Conv2DOp::Adaptor adaptor,
1947 int64_t inputWidth = ShapedType::kDynamic;
1948 int64_t inputHeight = ShapedType::kDynamic;
1949 int64_t weightWidth = ShapedType::kDynamic;
1950 int64_t weightHeight = ShapedType::kDynamic;
1955 if (inputShape.hasRank()) {
1956 outputShape[0] = inputShape.getDimSize(0);
1957 inputHeight = inputShape.getDimSize(1);
1958 inputWidth = inputShape.getDimSize(2);
1962 ShapeAdaptor weightShape(adaptor.getWeight().getType());
1963 if (weightShape.hasRank()) {
1964 outputShape[3] = weightShape.getDimSize(0);
1965 weightHeight = weightShape.getDimSize(1);
1966 weightWidth = weightShape.getDimSize(2);
1971 if (biasShape.hasRank()) {
1972 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1973 ? biasShape.getDimSize(0)
1981 if (!ShapedType::isDynamic(inputHeight) &&
1982 !ShapedType::isDynamic(weightHeight)) {
1983 int64_t inputSize = inputHeight + padding[0] + padding[1];
1984 int64_t filterSize = (weightHeight - 1) * dilation[0] + 1;
1985 int64_t unstridedResult = inputSize - filterSize + 1;
1986 outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
1989 if (!ShapedType::isDynamic(inputWidth) &&
1990 !ShapedType::isDynamic(weightWidth)) {
1991 int64_t inputSize = inputWidth + padding[2] + padding[3];
1992 int64_t filterSize = (weightWidth - 1) * dilation[1] + 1;
1993 int64_t unstridedResult = inputSize - filterSize + 1;
1994 outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
2007 LogicalResult Conv3DOp::inferReturnTypeComponents(
2008 MLIRContext *context, ::std::optional<Location> location,
2009 Conv3DOp::Adaptor adaptor,
2013 int64_t inputWidth = ShapedType::kDynamic;
2014 int64_t inputHeight = ShapedType::kDynamic;
2015 int64_t inputDepth = ShapedType::kDynamic;
2017 int64_t weightWidth = ShapedType::kDynamic;
2018 int64_t weightHeight = ShapedType::kDynamic;
2019 int64_t weightDepth = ShapedType::kDynamic;
2023 if (inputShape.hasRank()) {
2024 outputShape[0] = inputShape.getDimSize(0);
2025 inputDepth = inputShape.getDimSize(1);
2026 inputHeight = inputShape.getDimSize(2);
2027 inputWidth = inputShape.getDimSize(3);
2031 ShapeAdaptor weightShape(adaptor.getWeight().getType());
2032 if (weightShape.hasRank()) {
2033 outputShape[4] = weightShape.getDimSize(0);
2034 weightDepth = weightShape.getDimSize(1);
2035 weightHeight = weightShape.getDimSize(2);
2036 weightWidth = weightShape.getDimSize(3);
2041 if (biasShape.hasRank() && ShapedType::isDynamic(outputShape[4])) {
2042 outputShape[4] = biasShape.getDimSize(0);
2049 if (!ShapedType::isDynamic(inputDepth) &&
2050 !ShapedType::isDynamic(weightDepth)) {
2051 int32_t inputSize = inputDepth + pad[0] + pad[1];
2052 int32_t filterSize = (weightDepth - 1) * dilation[0] + 1;
2053 int32_t unstridedResult = inputSize - filterSize + 1;
2054 outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
2057 if (!ShapedType::isDynamic(inputHeight) &&
2058 !ShapedType::isDynamic(weightHeight)) {
2059 int32_t inputSize = inputHeight + pad[2] + pad[3];
2060 int32_t filterSize = (weightHeight - 1) * dilation[1] + 1;
2061 int32_t unstridedResult = inputSize - filterSize + 1;
2062 outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
2065 if (!ShapedType::isDynamic(inputWidth) &&
2066 !ShapedType::isDynamic(weightWidth)) {
2067 int32_t inputSize = inputWidth + pad[4] + pad[5];
2068 int32_t filterSize = (weightWidth - 1) * dilation[2] + 1;
2069 int32_t unstridedResult = inputSize - filterSize + 1;
2070 outputShape[3] = (unstridedResult - 1) / stride[2] + 1;
2083 LogicalResult AvgPool2dOp::inferReturnTypeComponents(
2084 MLIRContext *context, ::std::optional<Location> location,
2085 AvgPool2dOp::Adaptor adaptor,
2088 const Properties &prop = adaptor.getProperties();
2090 inferredReturnShapes);
2093 LogicalResult MaxPool2dOp::inferReturnTypeComponents(
2094 MLIRContext *context, ::std::optional<Location> location,
2095 MaxPool2dOp::Adaptor adaptor,
2098 const Properties &prop = adaptor.getProperties();
2100 inferredReturnShapes);
2103 LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents(
2104 MLIRContext *context, ::std::optional<Location> location,
2105 DepthwiseConv2DOp::Adaptor adaptor,
2109 int64_t inputWidth = ShapedType::kDynamic;
2110 int64_t inputHeight = ShapedType::kDynamic;
2111 int64_t inputChannels = ShapedType::kDynamic;
2113 int64_t weightWidth = ShapedType::kDynamic;
2114 int64_t weightHeight = ShapedType::kDynamic;
2115 int64_t depthChannels = ShapedType::kDynamic;
2119 if (inputShape.hasRank()) {
2120 outputShape[0] = inputShape.getDimSize(0);
2121 inputHeight = inputShape.getDimSize(1);
2122 inputWidth = inputShape.getDimSize(2);
2123 inputChannels = inputShape.getDimSize(3);
2127 ShapeAdaptor weightShape(adaptor.getWeight().getType());
2128 if (weightShape.hasRank()) {
2129 weightHeight = weightShape.getDimSize(0);
2130 weightWidth = weightShape.getDimSize(1);
2131 inputChannels = ShapedType::isDynamic(inputChannels)
2132 ? weightShape.getDimSize(2)
2134 depthChannels = weightShape.getDimSize(3);
2139 if (!ShapedType::isDynamic(inputChannels) &&
2140 !ShapedType::isDynamic(depthChannels)) {
2141 outputShape[3] = inputChannels * depthChannels;
2146 if (biasShape.hasRank()) {
2147 outputShape[3] = ShapedType::isDynamic(outputShape[3])
2148 ? biasShape.getDimSize(0)
2156 if (!ShapedType::isDynamic(inputHeight) &&
2157 !ShapedType::isDynamic(weightHeight)) {
2158 int64_t inputSize = inputHeight + padding[0] + padding[1];
2159 int64_t filterSize = (weightHeight - 1) * dilation[0] + 1;
2160 int64_t unstridedResult = inputSize - filterSize + 1;
2161 outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
2164 if (!ShapedType::isDynamic(inputWidth) &&
2165 !ShapedType::isDynamic(weightWidth)) {
2166 int64_t inputSize = inputWidth + padding[2] + padding[3];
2167 int64_t filterSize = (weightWidth - 1) * dilation[1] + 1;
2168 int64_t unstridedResult = inputSize - filterSize + 1;
2169 outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
2182 LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
2183 MLIRContext *context, ::std::optional<Location> location,
2184 TransposeConv2DOp::Adaptor adaptor,
2190 int64_t inputWidth = ShapedType::kDynamic;
2191 int64_t inputHeight = ShapedType::kDynamic;
2192 int64_t weightWidth = ShapedType::kDynamic;
2193 int64_t weightHeight = ShapedType::kDynamic;
2197 if (inputShape.hasRank()) {
2198 outputShape[0] = ShapedType::isDynamic(outputShape[0])
2199 ? inputShape.getDimSize(0)
2201 inputHeight = inputShape.getDimSize(1);
2202 inputWidth = inputShape.getDimSize(2);
2206 ShapeAdaptor weightShape(adaptor.getWeight().getType());
2207 if (weightShape.hasRank()) {
2208 outputShape[3] = ShapedType::isDynamic(outputShape[3])
2209 ? weightShape.getDimSize(0)
2211 weightHeight = weightShape.getDimSize(1);
2212 weightWidth = weightShape.getDimSize(2);
2217 if (biasShape.hasRank()) {
2218 outputShape[3] = ShapedType::isDynamic(outputShape[3])
2219 ? biasShape.getDimSize(0)
2226 if (!ShapedType::isDynamic(inputHeight) &&
2227 !ShapedType::isDynamic(weightHeight)) {
2228 int64_t calculateSize =
2229 (inputHeight - 1) * stride[0] + padding[0] + padding[1] + weightHeight;
2231 ShapedType::isDynamic(outputShape[1]) ? calculateSize : outputShape[1];
2234 if (!ShapedType::isDynamic(inputWidth) &&
2235 !ShapedType::isDynamic(weightWidth)) {
2236 int64_t calculateSize =
2237 (inputWidth - 1) * stride[1] + padding[2] + padding[3] + weightWidth;
2239 ShapedType::isDynamic(outputShape[2]) ? calculateSize : outputShape[2];
2252 LogicalResult IfOp::inferReturnTypeComponents(
2253 MLIRContext *context, ::std::optional<Location> location,
2254 IfOp::Adaptor adaptor,
2257 for (
Region *region : adaptor.getRegions()) {
2258 for (
auto &block : *region)
2259 if (
auto returnOp = dyn_cast<tosa::YieldOp>(block.getTerminator()))
2260 yieldOps.push_back(returnOp);
2263 if (yieldOps.empty())
2268 resultKnowledge.reserve(yieldOps.front().getNumOperands());
2269 for (
auto operand : yieldOps.front().getOperands()) {
2270 resultKnowledge.push_back(
2274 for (
auto yieldOp : yieldOps) {
2275 if (resultKnowledge.size() != yieldOp.getNumOperands())
2279 int32_t index = it.index();
2281 resultKnowledge[index],
2285 resultKnowledge[index] = meet;
2290 inferredReturnShapes.push_back(result.getShapedTypeComponents());
2296 LogicalResult WhileOp::inferReturnTypeComponents(
2297 MLIRContext *context, ::std::optional<Location> location,
2298 WhileOp::Adaptor adaptor,
2301 for (
auto &block : adaptor.getBody())
2302 if (
auto returnOp = dyn_cast<tosa::YieldOp>(block.getTerminator()))
2303 yieldOps.push_back(returnOp);
2307 if (yieldOps.empty())
2312 resultKnowledge.reserve(yieldOps.front().getNumOperands());
2313 for (
auto operand : yieldOps.front().getOperands()) {
2314 resultKnowledge.push_back(
2318 for (
auto yieldOp : yieldOps) {
2319 if (resultKnowledge.size() != yieldOp.getNumOperands())
2323 int32_t index = it.index();
2325 resultKnowledge[index],
2327 resultKnowledge[index] = meet;
2333 inferredReturnShapes.push_back(result.getShapedTypeComponents());
2339 std::optional<SmallVector<int64_t, 4>> ApplyScaleOp::getShapeForUnroll() {
2340 if (
auto vt = llvm::dyn_cast<VectorType>(
getType()))
2341 return llvm::to_vector<4>(vt.getShape());
2342 return std::nullopt;
2379 bool printBlockTerminators =
false;
2381 p <<
" " << getCond();
2382 if (!getResults().empty()) {
2383 p <<
" -> (" << getResultTypes() <<
")";
2385 printBlockTerminators =
true;
2390 printBlockTerminators);
2393 auto &elseRegion = getElseBranch();
2394 if (!elseRegion.
empty()) {
2398 printBlockTerminators);
2405 TensorType inputType = getInput1().getType();
2406 TensorType outputType = getOutput().getType();
2407 int32_t reverseAxis = getAxis();
2409 if (reverseAxis < 0)
2410 return emitOpError(
"expected non-negative reverse axis");
2412 int64_t inputRank = inputType.getRank();
2415 if (reverseAxis >= inputRank && !(reverseAxis == 0 && inputRank == 0))
2416 return emitOpError(
"expect input tensor rank (")
2417 << inputRank <<
") to be larger than reverse axis (" << reverseAxis
2421 int64_t outputRank = outputType.getRank();
2422 if (inputType.
hasRank() && outputRank != inputType.getRank())
2424 "expect output tensor rank to be equal to input tensor rank");
2425 if (reverseAxis >= outputRank && !(reverseAxis == 0 && outputRank == 0))
2426 return emitOpError(
"expect output tensor rank (")
2427 << outputRank <<
") to be larger than reverse axis ("
2428 << reverseAxis <<
")";
2445 FunctionType functionType;
2450 result.
addTypes(functionType.getResults());
2452 if (functionType.getNumInputs() != operands.size()) {
2454 <<
"expected as many input types as operands "
2455 <<
"(expected " << operands.size() <<
" got "
2456 << functionType.getNumInputs() <<
")";
2466 for (
size_t i = 0, e = regionArgs.size(); i != e; ++i)
2467 regionArgs[i].type = functionType.getInput(i);
2469 return failure(parser.
parseRegion(*cond, regionArgs) ||
2477 StringRef prefix =
"") {
2478 assert(blocksArgs.size() == initializers.size() &&
2479 "expected same length of arguments and initializers");
2480 if (initializers.empty())
2483 parser << prefix <<
'(';
2484 llvm::interleaveComma(
2485 llvm::zip(blocksArgs, initializers), parser,
2486 [&](
auto it) { parser << std::get<0>(it) <<
" = " << std::get<1>(it); });
2503 Type zpElemType = zpAttr.getElementType();
2504 if (
auto quantType =
2505 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(zpElemType)) {
2506 zp = quantType.getZeroPoint();
2509 if (llvm::isa<FloatType>(zpElemType)) {
2511 if (!zpAttr.getValues<APFloat>()[0].isZero())
2516 if (llvm::isa<IntegerType>(zpElemType)) {
2517 zp = zpAttr.getValues<APInt>()[0].getSExtValue();
2530 if (
auto quantType = llvm::dyn_cast<mlir::quant::QuantizedType>(srcElemType))
2531 srcElemType = quantType.getStorageType();
2533 if (llvm::isa<FloatType>(srcElemType)) {
2535 zpType, builder.
getFloatAttr(srcElemType,
static_cast<double>(zp)));
2536 return builder.
create<tosa::ConstOp>(loc, zpType, zpAttr);
2538 if (llvm::isa<IntegerType>(srcElemType)) {
2541 return builder.
create<tosa::ConstOp>(loc, zpType, zpAttr);
2543 llvm::errs() <<
"zero point is not allowed for unsupported data types\n";
2544 return std::nullopt;
2552 return mlir::isa<tosa::shapeType>(t);
2559 return emitError() <<
"invalid rank (must be >= 0): " << rank;
2565 if (mlir::isa<::mlir::tosa::shapeType>(v.getType())) {
2566 Operation *definingOp = v.getDefiningOp();
2568 return op->
emitOpError(
"shape operand is not compile time resolvable");
2577 if (!mlir::isa<mlir::tosa::shapeType>(type)) {
2578 return op->
emitOpError(
"must have operands with tosa shape type");
2582 if (!mlir::isa<mlir::tosa::shapeType>(type)) {
2583 return op->
emitOpError(
"must have result with tosa shape type");
2596 auto getRank = [](
const Type type) {
2597 return mlir::cast<mlir::tosa::shapeType>(type).getRank();
2603 for (
auto type : operandTypes) {
2604 if (getRank(type) != rank) {
2605 return op->
emitOpError(
"operands don't have matching ranks");
2608 for (
auto type : resultTypes) {
2609 if (getRank(type) != rank) {
2610 return op->
emitOpError(
"result shape has different rank than operands");
2622 auto valuesRank = getValue().getType().getRank();
2623 if (valuesRank != 1)
2624 return emitOpError(
"expect elements in attribute value with rank 1");
2626 auto count = getValue().getNumElements();
2627 auto rank = (cast<tosa::shapeType>(getResult().
getType())).getRank();
2628 if (!(count == rank || (count == 1 && rank == 0))) {
2629 return emitOpError(
"expect number of elements in attribute value (")
2630 << count <<
") to be equal to the rank (" << rank
2631 <<
") for the result shape type";
2640 #define GET_ATTRDEF_CLASSES
2641 #include "mlir/Dialect/Tosa/IR/TosaAttributes.cpp.inc"
2646 #define GET_TYPEDEF_CLASSES
2647 #include "mlir/Dialect/Tosa/IR/TosaOpsTypesBase.cpp.inc"
2653 #define GET_OP_CLASSES
2654 #include "mlir/Dialect/Tosa/IR/TosaOps.cpp.inc"
static Operation * materializeConstant(Dialect *dialect, OpBuilder &builder, Attribute value, Type type, Location loc)
A utility function used to materialize a constant for a given attribute and type.
static MLIRContext * getContext(OpFoldResult val)
static bool isLegalToInline(InlinerInterface &interface, Region *src, Region *insertRegion, bool shouldCloneInlinedRegion, IRMapping &valueMapping)
Utility to check that all of the operations within 'src' can be inlined.
static void print(spirv::VerCapExtAttr triple, DialectAsmPrinter &printer)
static void buildMatMulOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value a, Value b)
The tosa.matmul op is also intended to be generated where a fully_connected op must be constructed wh...
static LogicalResult verifySameElementTypes(T op, Type inType, Type outType)
static LogicalResult ReduceInferReturnTypes(ShapeAdaptor operandShape, Type inputType, IntegerAttr axis, SmallVectorImpl< ShapedTypeComponents > &inferredReturnShapes)
#define REDUCE_SHAPE_INFER(OP)
static LogicalResult verifyConvOp(T op)
static void buildUnaryOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input)
This builder is called on single-parameter unary operators that have scale relationship between their...
static LogicalResult poolingInferReturnTypes(ShapeAdaptor inputShape, ArrayRef< int64_t > kernel, ArrayRef< int64_t > stride, ArrayRef< int64_t > pad, SmallVectorImpl< ShapedTypeComponents > &inferredReturnShapes)
static void buildTransConvOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, Value bias, DenseI64ArrayAttr outpad, DenseI64ArrayAttr stride, DenseI64ArrayAttr outputShape, TypeAttr accType)
Handles tosa.transpose_conv2d which has outpad and output shape attributes.
static void buildPadOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value paddings)
This builder is called on TOSA pad operator that needs to create its own OptionalAttr quantization_at...
static void buildFCOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, Value bias)
The tosa.fully_connected op has its own builder as it does not have strides/dilation/padding.
static LogicalResult verifyReduceOp(T op)
#define NARY_SHAPE_INFER(OP)
static void buildExplicitValuePadOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value paddings, Value padConst)
This builder is called on TOSA pad operator when an explicit pad_const value is passed in.
static LogicalResult verifyConvOpModes(T op)
static LogicalResult NAryInferReturnTypes(const ValueShapeRange &operands, SmallVectorImpl< ShapedTypeComponents > &inferredReturnShapes)
#define COMPATIBLE_RETURN_TYPES(OP)
static LogicalResult resolveBroadcastShape(const ValueShapeRange &operands, SmallVector< int64_t > &outShape)
static void buildConvOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, Value bias, DenseI64ArrayAttr pad, DenseI64ArrayAttr stride, DenseI64ArrayAttr dilation, TypeAttr accType)
This builder is called on all convolution operators except TransposeConv, which has specialized outpu...
static void buildAvgPool2dOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, DenseArrayAttr kernel, DenseArrayAttr stride, DenseArrayAttr pad, TypeAttr accType)
Both the tosa.avg_pool2d and unary ops use the same UnaruOpQuantizationAttr but avg_pool operator has...
static SmallVector< int64_t > convertToMlirShape(ArrayRef< int64_t > shape)
static void printInitializationList(OpAsmPrinter &parser, Block::BlockArgListType blocksArgs, ValueRange initializers, StringRef prefix="")
static ArrayRef< int64_t > getShape(Type type)
Returns the shape of the given type.
virtual Builder & getBuilder() const =0
Return a builder which provides useful access to MLIRContext, global objects like types and attribute...
virtual ParseResult parseOptionalAttrDict(NamedAttrList &result)=0
Parse a named dictionary into 'result' if it is present.
virtual ParseResult parseOptionalEqual()=0
Parse a = token if present.
virtual ParseResult parseOptionalKeyword(StringRef keyword)=0
Parse the given keyword if present.
virtual InFlightDiagnostic emitError(SMLoc loc, const Twine &message={})=0
Emit a diagnostic at the specified location and return failure.
virtual ParseResult parseOptionalAttrDictWithKeyword(NamedAttrList &result)=0
Parse a named dictionary into 'result' if the attributes keyword is present.
virtual ParseResult parseColonType(Type &result)=0
Parse a colon followed by a type.
virtual SMLoc getCurrentLocation()=0
Get the location of the next token and store it into the argument.
virtual ParseResult parseOptionalArrowTypeList(SmallVectorImpl< Type > &result)=0
Parse an optional arrow followed by a type list.
ParseResult parseKeyword(StringRef keyword)
Parse a given keyword.
virtual ParseResult parseAttribute(Attribute &result, Type type={})=0
Parse an arbitrary attribute of a given type and return it in result.
virtual void printAttribute(Attribute attr)
Attributes are known-constant values of operations.
IntegerAttr getIndexAttr(int64_t value)
IntegerAttr getI32IntegerAttr(int32_t value)
IntegerAttr getIntegerAttr(Type type, int64_t value)
FloatAttr getFloatAttr(Type type, double value)
IntegerType getIntegerType(unsigned width)
static DenseElementsAttr get(ShapedType type, ArrayRef< Attribute > values)
Constructs a dense elements attribute from an array of element values.
An attribute that represents a reference to a dense integer vector or tensor object.
This class defines a virtual interface for reading a bytecode stream, providing hooks into the byteco...
virtual InFlightDiagnostic emitError(const Twine &msg={}) const =0
Emit an error to the reader.
This class defines a virtual interface for writing to a bytecode stream, providing hooks into the byt...
This is the interface that must be implemented by the dialects of operations to be inlined.
DialectInlinerInterface(Dialect *dialect)
This class is used to represent the version of a dialect, for the purpose of polymorphic destruction.
Dialects are groups of MLIR operations, types and attributes, as well as behavior associated with the...
This is a utility class for mapping one set of IR entities to another.
This class represents a diagnostic that is inflight and set to be reported.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
The OpAsmParser has methods for interacting with the asm parser: parsing things from it,...
virtual OptionalParseResult parseOptionalAssignmentList(SmallVectorImpl< Argument > &lhs, SmallVectorImpl< UnresolvedOperand > &rhs)=0
virtual ParseResult parseRegion(Region ®ion, ArrayRef< Argument > arguments={}, bool enableNameShadowing=false)=0
Parses a region.
virtual ParseResult resolveOperand(const UnresolvedOperand &operand, Type type, SmallVectorImpl< Value > &result)=0
Resolve an operand to an SSA value, emitting an error on failure.
ParseResult resolveOperands(Operands &&operands, Type type, SmallVectorImpl< Value > &result)
Resolve a list of operands to SSA values, emitting an error on failure, or appending the results to t...
virtual ParseResult parseOperand(UnresolvedOperand &result, bool allowResultNumber=true)=0
Parse a single SSA value operand name along with a result number if allowResultNumber is true.
This is a pure-virtual base class that exposes the asmprinter hooks necessary to implement a custom p...
virtual void printOptionalAttrDictWithKeyword(ArrayRef< NamedAttribute > attrs, ArrayRef< StringRef > elidedAttrs={})=0
If the specified operation has attributes, print out an attribute dictionary prefixed with 'attribute...
virtual void printOptionalAttrDict(ArrayRef< NamedAttribute > attrs, ArrayRef< StringRef > elidedAttrs={})=0
If the specified operation has attributes, print out an attribute dictionary with their values.
void printFunctionalType(Operation *op)
Print the complete type of an operation in functional form.
virtual void printRegion(Region &blocks, bool printEntryBlockArgs=true, bool printBlockTerminators=true, bool printEmptyBlock=false)=0
Prints a region.
This class helps build Operations.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
This class indicates that op operates on tosa shape types.
Simple wrapper around a void* in order to express generically how to pass in op properties through AP...
Operation is the basic unit of execution within MLIR.
bool hasTrait()
Returns true if the operation was registered with a particular trait, e.g.
operand_type_range getOperandTypes()
result_type_range getResultTypes()
operand_range getOperands()
Returns an iterator on the underlying Value's.
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers.
This class implements Optional functionality for ParseResult.
ParseResult value() const
Access the internal ParseResult value.
bool has_value() const
Returns true if we contain a valid ParseResult value.
This class provides an abstraction over the different types of ranges over Regions.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Adaptor class to abstract the differences between whether value is from a ShapedType or ShapedTypeCom...
int64_t getDimSize(int index) const
Returns the size of the index'th dimension.
int64_t getRank() const
Returns the rank of the shape.
void getDims(SmallVectorImpl< int64_t > &res) const
Populates the dimensions from shape referenced.
bool hasRank() const
Returns whether the shape has a rank.
ShapedTypeComponents that represents the components of a ShapedType.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
ArrayRef< int64_t > getShape() const
Returns the shape of this tensor type.
bool hasRank() const
Returns if this type is ranked, i.e. it has a known number of dimensions.
This class provides an abstraction over the various different ranges of value types.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
bool isInteger() const
Return true if this is an integer type (with the specified width).
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
This class provides an abstraction over the different types of ranges over Values.
Range of values and shapes (corresponding effectively to Shapes dialect's ValueShape type concept).
ShapeAdaptor getShape(int index) const
Returns the shape of index'th operand.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
Base class for DenseArrayAttr that is instantiated and specialized for each supported element type be...
LogicalResult verifyAtLeastNOperands(Operation *op, unsigned numOperands)
LogicalResult verifyTosaShapeOperator(Operation *op)
LogicalResult verifyTosaShapeOperatorWithSameRanks(Operation *op)
LogicalResult verifyTosaResolvableShapeOperands(Operation *op)
bool getBroadcastedShape(ArrayRef< int64_t > shape1, ArrayRef< int64_t > shape2, SmallVectorImpl< int64_t > &resultShape)
Returns true and sets resultShape to the broadcasted shape from the two given shapes if they are broa...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
QueryRef parse(llvm::StringRef line, const QuerySession &qs)
ConvOpQuantizationAttr buildConvOpQuantizationAttr(OpBuilder &builder, Value input, Value weight)
Method to build ConvOpQuantizationAttr, called from ConvOpQuantInfoBuilder/TransConvOpQuantInfoBuilde...
Type buildConvOpResultTypeInfo(OpBuilder &builder, Type outputType, Value input, Value weight)
construct ConvOp output type with correct bitwidth based on input/weight width.
LogicalResult getZeroPoint(ElementsAttr zpAttr, int64_t &zp)
PadOpQuantizationAttr buildPadOpQuantizationAttr(OpBuilder &builder, Value input)
Builds PadOpQuantizationAttr, called from PadOpQuantInfoBuilder: inputZp: input zeropoint.
std::pair< Value, Value > createZPsAsConst(OpBuilder &builder, Value input, Value weight)
ParseResult parseTypeOrAttr(OpAsmParser &parser, TypeAttr &typeAttr, Attribute &attr)
MatMulOpQuantizationAttr buildMatMulOpQuantizationAttr(OpBuilder &builder, Value a, Value b)
Builds MatMulOpQuantizationAttr, called from MatMulOpQuantInfoBuilder: aZp: input a zeropoint bZp: in...
std::optional< Value > createZeroPointTensor(OpBuilder &builder, Location loc, Type srcElemType, int64_t zp=0)
bool isa_tosa_shape_type(mlir::Type t)
void printTypeOrAttr(OpAsmPrinter &p, Operation *op, TypeAttr type, Attribute attr)
UnaryOpQuantizationAttr buildUnaryOpQuantizationAttr(OpBuilder &builder, Value input, Type outputRawType)
Builds UnaryOpQuantizationAttr UnaryOpQuantInfoBuilder: inputZp: input zeropoint outputZp: output zer...
bool getConstShapeValue(Operation *op, llvm::SmallVector< int64_t > &result_shape)
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
LogicalResult emitOptionalError(std::optional< Location > loc, Args &&...args)
Overloads of the above emission functions that take an optionally null location.
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
Type getElementTypeOrSelf(Type type)
Return the element type or return the type itself.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
LogicalResult verifyCompatibleShape(ArrayRef< int64_t > shape1, ArrayRef< int64_t > shape2)
Returns success if the given two shapes are compatible.
detail::constant_op_matcher m_Constant()
Matches a constant foldable operation.
LogicalResult verify(Operation *op, bool verifyRecursively=true)
Perform (potentially expensive) checks of invariants, used to detect compiler bugs,...
bool isPermutationVector(ArrayRef< int64_t > interchange)
Method to check if an interchange vector is a permutation.
This is the representation of an operand reference.
This represents an operation in an abstracted form, suitable for use with the builder APIs.
SmallVector< Value, 4 > operands
void addOperands(ValueRange newOperands)
void addAttribute(StringRef name, Attribute attr)
Add an attribute with the specified name.
void addTypes(ArrayRef< Type > newTypes)
SmallVector< std::unique_ptr< Region >, 1 > regions
Regions that the op will hold.
SmallVector< Type, 4 > types
Types of the results of this operation.
Region * addRegion()
Create a region that should be attached to the operation.
Statically known information for a particular Value.
static ValueKnowledge meet(const ValueKnowledge &lhs, const ValueKnowledge &rhs)
static ValueKnowledge getKnowledgeFromType(Type type)