27 #include "llvm/ADT/DenseMap.h"
28 #include "llvm/ADT/TypeSwitch.h"
33 #include "mlir/Dialect/Tosa/IR/TosaOpsDialect.cpp.inc"
39 #include "mlir/Dialect/Tosa/IR/TosaInterfaces.cpp.inc"
42 #include "mlir/Dialect/Tosa/IR/TosaDialectBytecode.cpp.inc"
63 return (isa<tosa::IfOp>(dest->getParentOp()) ||
64 isa<tosa::WhileOp>(dest->getParentOp()));
70 TosaDialectBytecodeInterface(
Dialect *dialect)
82 return ::writeAttribute(attr, writer);
94 return ::writeType(type, writer);
101 std::unique_ptr<DialectVersion>
104 reader.
emitError(
"Dialect does not support versioning");
127 void TosaDialect::initialize() {
130 #include "mlir/Dialect/Tosa/IR/TosaOps.cpp.inc"
133 #define GET_ATTRDEF_LIST
134 #include "mlir/Dialect/Tosa/IR/TosaAttributes.cpp.inc"
136 addInterfaces<TosaDialectBytecodeInterface, TosaInlinerInterface>();
143 if (llvm::isa<ElementsAttr>(value))
144 return builder.
create<tosa::ConstOp>(loc, type,
145 llvm::cast<ElementsAttr>(value));
158 <<
"expected attribute";
160 if (
auto typedAttr = attr.
dyn_cast<TypedAttr>()) {
177 bool needsSpace =
false;
179 if (!typedAttr || typedAttr.getType() != type.getValue()) {
197 if (!shapedType.hasRank())
200 auto rank = shapedType.getRank();
202 for (
int i = 0; i < rank; i++) {
203 if (shapedType.isDynamicDim(i))
205 if (shapedType.getDimSize(i) == 0)
214 auto inputType = llvm::dyn_cast<RankedTensorType>(op.getInput().getType());
215 auto weightType = llvm::dyn_cast<RankedTensorType>(op.getWeight().getType());
219 op.
emitOpError(
"expect a ranked tensor for input, got ") << op.getInput();
223 op.
emitOpError(
"expect a ranked tensor for weight, got ") << op.getWeight();
228 return op.
emitOpError() <<
"tensor has a dimension with size zero. Each "
229 "dimension of a tensor must have size >= 1";
231 auto inputEType = inputType.getElementType();
232 auto weightEType = weightType.getElementType();
234 bool inputIsQuant = !llvm::isa<FloatType>(inputEType);
235 bool weightIsQuant = !llvm::isa<FloatType>(weightEType);
238 if (inputIsQuant != weightIsQuant) {
240 "expect both input and weight to be float or not together, got ")
241 << inputEType <<
" and " << weightEType;
247 if ((inputIsQuant && !op.getQuantizationInfo()) ||
248 (!inputIsQuant && op.getQuantizationInfo())) {
249 op.
emitOpError(
"quantizationattr is required for quantized type, and not "
250 "allowed for float type");
259 const auto resultETy = llvm::cast<ShapedType>(getType()).getElementType();
260 if (!resultETy.isIntOrIndex())
261 return emitOpError(
"result tensor is not of integer type");
264 const auto inputType = llvm::cast<ShapedType>(getInput().getType());
265 const int64_t axis = getAxisAttr().getInt();
266 if (inputType.hasRank() && ((axis < 0) || axis >= inputType.getRank()))
267 return emitOpError(
"specified axis is outside the rank of the tensor");
273 auto inputType = llvm::cast<ShapedType>(getInput().getType());
275 return emitOpError() <<
"tensor has a dimension with size zero. Each "
276 "dimension of a tensor must have size >= 1";
278 auto inputETy = inputType.getElementType();
279 auto resultETy = llvm::cast<ShapedType>(getType()).getElementType();
282 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(inputETy))
283 inputETy = quantType.getStorageType();
286 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(resultETy))
287 resultETy = quantType.getStorageType();
289 auto accType = getAccType();
290 if (llvm::isa<IntegerType>(inputETy) && !accType.isInteger(32))
291 return emitOpError(
"accumulator type for integer tensor is not i32");
293 if (inputETy.isF16() && !(accType.isF16() || accType.isF32()))
294 return emitOpError(
"accumulator type for f16 tensor is not f16/f32");
296 if (inputETy.isBF16() && !accType.isF32())
297 return emitOpError(
"accumulator type for bf16 tensor is not f32");
299 if (inputETy.isF32() && !accType.isF32())
300 return emitOpError(
"accumulator type for f32 tensor is not f32");
302 if ((inputETy.isF32() && resultETy.isF32()) ||
303 (inputETy.isF16() && resultETy.isF16()) ||
304 (inputETy.isBF16() && resultETy.isBF16()) ||
305 (inputETy.isInteger(8) && resultETy.isInteger(8)) ||
306 (inputETy.isInteger(16) && resultETy.isInteger(16)))
309 return emitOpError(
"input/output element types are incompatible.");
314 llvm::cast<ShapedType>(getInput().getType()).getElementType();
316 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(inputETy)) {
317 inputETy = quantType.getStorageType();
319 mlir::Type maxFpType = getMaxFpAttr().getType();
320 mlir::Type minFpType = getMinFpAttr().getType();
322 llvm::cast<ShapedType>(getOutput().getType()).getElementType();
324 llvm::dyn_cast<mlir::quant::UniformQuantizedType>(outputETy)) {
325 outputETy = quantType.getStorageType();
329 if (inputETy != outputETy)
330 return emitOpError(
"input/output element types are incompatible.");
335 if (!inputETy.
isInteger(dataTypeBitWidth)) {
336 if (((maxFpType != minFpType) ||
339 return emitOpError(
"min/max attributes types are incompatible with "
340 "input/output element types.");
424 auto inputType = llvm::dyn_cast<ShapedType>(a.
getType());
425 assert(inputType &&
"Input must be a shaped tensor type!");
427 auto inputQType = llvm::dyn_cast<mlir::quant::UniformQuantizedType>(
428 inputType.getElementType());
429 assert(inputQType &&
"Tensor must have quantized datatype!");
431 unsigned inputBits = inputQType.getStorageTypeIntegralWidth();
433 auto outputShapedType = llvm::dyn_cast<ShapedType>(outputType);
434 assert(outputShapedType &&
"Output must be a shaped type");
436 IntegerType accElementType;
441 auto accType = outputShapedType.clone(accElementType);
454 DenseArrayAttr kernel, DenseArrayAttr stride,
455 DenseArrayAttr pad, TypeAttr acc_type) {
464 result.
types.push_back(outputType);
477 result.
types.push_back(outputType);
490 result.
types.push_back(outputType);
504 result.
types.push_back(outputType);
514 for (
int i = 0, e = operands.size(); i != e; ++i) {
516 if (!shape.hasRank()) {
521 outRank = std::max<int64_t>(outRank, shape.getRank());
524 outShape.resize(outRank, 1);
526 for (
int i = 0, e = operands.size(); i != e; ++i) {
528 auto rankDiff = outShape.size() - shape.getRank();
530 for (
size_t i = 0, e = shape.getRank(); i < e; ++i) {
531 auto dim1 = outShape[i + rankDiff];
532 auto dim2 = shape.getDimSize(i);
533 auto resolvedDim = dim1;
537 }
else if (dim2 == 1) {
539 }
else if (dim1 != dim2) {
542 outShape[i + rankDiff] = resolvedDim;
550 MLIRContext *context, ::std::optional<Location> location,
551 ArgMaxOp::Adaptor adaptor,
554 IntegerAttr axis = adaptor.getProperties().axis;
555 int32_t axisVal = axis.getValue().getSExtValue();
557 if (!inputShape.hasRank()) {
563 outShape.reserve(inputShape.getRank() - 1);
564 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
567 outShape.push_back(inputShape.getDimSize(i));
575 MLIRContext *context, ::std::optional<Location> location,
576 RFFT2dOp::Adaptor adaptor,
580 if (!inputShape.hasRank())
584 outputShape.resize(3, ShapedType::kDynamic);
585 outputShape[0] = inputShape.getDimSize(0);
586 outputShape[1] = inputShape.getDimSize(1);
587 int64_t inWidth = inputShape.getDimSize(2);
591 if (inWidth != ShapedType::kDynamic)
592 outputShape[2] = inWidth / 2 + 1;
601 MLIRContext *context, ::std::optional<Location> location,
602 FFT2dOp::Adaptor adaptor,
604 inferredReturnShapes.push_back(
606 inferredReturnShapes.push_back(
612 MLIRContext *context, ::std::optional<Location> location,
613 ConcatOp::Adaptor adaptor,
616 const Properties &prop = adaptor.getProperties();
617 int32_t axis = prop.axis.getValue().getSExtValue();
619 bool hasRankedInput =
false;
620 for (
auto operand : adaptor.getOperands()) {
622 if (!operandShape.hasRank())
627 outputShape.resize(operandShape.getRank(), ShapedType::kDynamic);
630 for (
int i = 0, s = operandShape.getRank(); i < s; i++) {
631 if (i == axis || operandShape.isDynamicDim(i))
633 if (outputShape[i] == ShapedType::kDynamic)
634 outputShape[i] = operandShape.getDimSize(i);
635 if (outputShape[i] != operandShape.getDimSize(i))
637 "Cannot concat tensors with different sizes"
638 " on the non-axis dimension ",
642 hasRankedInput =
true;
645 llvm::cast<TensorType>(adaptor.getInput1().getType()[0]).getElementType();
646 if (!hasRankedInput) {
652 int64_t concatDimSize = 0;
653 for (
auto operand : adaptor.getOperands()) {
658 if (!operandShape.hasRank() || operandShape.isDynamicDim(axis)) {
659 concatDimSize = ShapedType::kDynamic;
663 concatDimSize += operandShape.getDimSize(axis);
666 outputShape[axis] = concatDimSize;
673 MLIRContext *context, ::std::optional<Location> location,
690 if (l.size() != r.size() || l.size() != 1)
695 LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
696 MLIRContext *context, ::std::optional<Location> location,
697 FullyConnectedOp::Adaptor adaptor,
700 ShapeAdaptor weightShape(adaptor.getWeight().getType());
705 outShape.resize(2, ShapedType::kDynamic);
707 if (inputShape.hasRank()) {
708 outShape[0] = inputShape.getDimSize(0);
711 if (weightShape.hasRank()) {
712 outShape[1] = weightShape.getDimSize(0);
715 if (biasShape.hasRank()) {
716 outShape[1] = outShape[1] == ShapedType::kDynamic ? biasShape.getDimSize(0)
727 MLIRContext *context, ::std::optional<Location> location,
728 MatMulOp::Adaptor adaptor,
735 outShape.resize(3, ShapedType::kDynamic);
737 if (lhsShape.hasRank()) {
738 outShape[0] = lhsShape.getDimSize(0);
739 outShape[1] = lhsShape.getDimSize(1);
742 if (rhsShape.hasRank()) {
743 outShape[0] = outShape[0] == ShapedType::kDynamic ? rhsShape.getDimSize(0)
745 outShape[2] = rhsShape.getDimSize(2);
753 MLIRContext *context, ::std::optional<Location> location,
754 PadOp::Adaptor adaptor,
757 ShapeAdaptor paddingShape(adaptor.getPadding().getType());
762 if (!inputShape.hasRank() && !paddingShape.hasRank()) {
769 if (!inputShape.hasRank()) {
770 if (paddingShape.isDynamicDim(0)) {
775 outputShape.resize(paddingShape.getDimSize(0), ShapedType::kDynamic);
783 outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
789 for (
auto val : paddings) {
790 paddingValues.push_back(val.getSExtValue());
793 outputShape.reserve(inputShape.getRank());
794 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
795 if (inputShape.isDynamicDim(i)) {
796 outputShape.push_back(ShapedType::kDynamic);
800 outputShape.push_back(inputShape.getDimSize(i) + paddingValues[i * 2] +
801 paddingValues[i * 2 + 1]);
809 return to_vector(llvm::map_range(shape, [](int64_t dim) {
810 return dim == -1 ? ShapedType::kDynamic : dim;
815 MLIRContext *context, ::std::optional<Location> location,
816 SliceOp::Adaptor adaptor,
818 inferredReturnShapes.push_back(
824 auto inputType = llvm::dyn_cast<RankedTensorType>(getInput().getType());
828 if (
static_cast<size_t>(inputType.getRank()) != getStart().size())
830 "length of start attribute is not equal rank of input shape");
832 if (
static_cast<size_t>(inputType.getRank()) != getSize().size())
834 "length of size attribute is not equal rank of input shape");
840 MLIRContext *context, ::std::optional<Location> location,
841 TableOp::Adaptor adaptor,
845 if (!inputShape.hasRank()) {
850 inferredReturnShapes.resize(1);
851 inputShape.getDims(inferredReturnShapes[0]);
856 MLIRContext *context, ::std::optional<Location> location,
857 TileOp::Adaptor adaptor,
862 if (!inputShape.hasRank()) {
863 outputShape.resize(multiples.size(), ShapedType::kDynamic);
866 }
else if (
static_cast<size_t>(inputShape.getRank()) != multiples.size())
870 outputShape.reserve(multiples.size());
871 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
872 int64_t dim = inputShape.getDimSize(i);
873 if (dim != ShapedType::kDynamic)
875 outputShape.push_back(dim);
883 ShapedType inputType = llvm::cast<ShapedType>(getInput1().getType());
884 ShapedType outputType = llvm::cast<ShapedType>(getType());
885 auto multiples = getMultiples();
887 if (inputType.hasRank()) {
888 if (
static_cast<size_t>(inputType.getRank()) != multiples.size())
889 return emitOpError(
"expect 'multiples' array to have length ")
890 << inputType.getRank() <<
" but got " << multiples.size() <<
".";
891 if (outputType.hasRank() && inputType.getRank() != outputType.getRank())
892 return emitOpError(
"expect same input and output tensor rank.");
893 }
else if (outputType.hasRank() &&
894 static_cast<size_t>(outputType.getRank()) != multiples.size())
895 return emitOpError(
"expect 'multiples' array to have length ")
896 << outputType.getRank() <<
" but got " << multiples.size() <<
".";
902 if (l.size() != r.size() || l.size() != 1)
908 MLIRContext *context, ::std::optional<Location> location,
909 ReshapeOp::Adaptor adaptor,
918 if (!inputShape.hasRank() || !inputShape.hasStaticShape()) {
919 inferredReturnShapes.push_back(
927 int64_t numElements = inputShape.getNumElements();
928 int64_t staticMul = 1;
929 for (
auto val : newShapeValue) {
930 if (!ShapedType::isDynamic(val)) {
936 for (
auto &val : newShapeValue) {
937 if (ShapedType::isDynamic(val))
938 val = numElements / staticMul;
941 inferredReturnShapes.push_back(
947 ShapedType inputType = llvm::cast<ShapedType>(getInput1().getType());
948 ShapedType outputType = llvm::cast<ShapedType>(getType());
951 return emitOpError() <<
"tensor has a dimension with size zero. Each "
952 "dimension of a tensor must have size >= 1";
954 if (inputType.hasStaticShape() && outputType.hasStaticShape()) {
955 int64_t inputElementsNum = inputType.getNumElements();
956 int64_t outputElementsNum = outputType.getNumElements();
957 if (inputElementsNum != outputElementsNum) {
958 return emitOpError() <<
"Cannot reshape " << inputElementsNum
959 <<
" elements into " << outputElementsNum;
972 perms = llvm::to_vector(
973 llvm::map_range(permsAttr.getValues<APInt>(),
974 [](
const APInt &val) { return val.getSExtValue(); }));
980 MLIRContext *context, ::std::optional<Location> location,
981 TransposeOp::Adaptor adaptor,
988 if (!inputShape.hasRank() || !permsShape.hasRank() ||
989 permsShape.isDynamicDim(0)) {
996 if (permsShape.getDimSize(0) != inputShape.getRank()) {
1003 if (!inputShape.hasRank()) {
1004 outputShape.resize(permsShape.getDimSize(0), ShapedType::kDynamic);
1010 if (inputShape.getRank() == 0) {
1016 bool allTheSame =
true;
1017 for (
int i = 1, s = inputShape.getRank(); i < s; i++) {
1018 if (inputShape.getDimSize(0) != inputShape.getDimSize(i)) {
1027 outputShape.resize(inputShape.getRank(), inputShape.getDimSize(0));
1032 outputShape.resize(inputShape.getRank(), ShapedType::kDynamic);
1037 attr.getType().getRank() == 1) {
1040 if (inputShape.getRank() != permShape.
getRank())
1042 "constant permutation must be the same length"
1043 " as the input rank");
1046 for (
int i = 0, e = inputShape.getRank(); i < e; i++) {
1047 if (inputShape.getRank() <= permShape.
getDimSize(i))
1051 outputShape.reserve(inputShape.getRank());
1052 for (
int i = 0, s = inputShape.getRank(); i < s; i++) {
1053 outputShape[i] = inputShape.getDimSize(permShape.
getDimSize(i));
1062 MLIRContext *context, ::std::optional<Location> location,
1063 GatherOp::Adaptor adaptor,
1066 outputShape.resize(3, ShapedType::kDynamic);
1068 ShapeAdaptor valuesShape(adaptor.getValues().getType());
1069 if (valuesShape.hasRank()) {
1070 outputShape[0] = valuesShape.getDimSize(0);
1071 outputShape[2] = valuesShape.getDimSize(2);
1074 ShapeAdaptor indicesShape(adaptor.getIndices().getType());
1075 if (indicesShape.hasRank()) {
1076 if (outputShape[0] == ShapedType::kDynamic)
1077 outputShape[0] = indicesShape.getDimSize(0);
1078 if (outputShape[1] == ShapedType::kDynamic)
1079 outputShape[1] = indicesShape.getDimSize(1);
1087 MLIRContext *context, ::std::optional<Location> location,
1088 ResizeOp::Adaptor adaptor,
1091 outputShape.resize(4, ShapedType::kDynamic);
1094 if (!inputShape.hasRank())
1097 outputShape[0] = inputShape.getDimSize(0);
1098 outputShape[3] = inputShape.getDimSize(3);
1099 int64_t inputHeight = inputShape.getDimSize(1);
1100 int64_t inputWidth = inputShape.getDimSize(2);
1102 if ((inputHeight == ShapedType::kDynamic) ||
1103 (inputWidth == ShapedType::kDynamic))
1112 (((inputHeight - 1) * scaleInt[0] - offsetInt[0] + borderInt[0]) /
1117 (((inputWidth - 1) * scaleInt[2] - offsetInt[1] + borderInt[1]) /
1126 MLIRContext *context, ::std::optional<Location> location,
1127 ScatterOp::Adaptor adaptor,
1130 outputShape.resize(3, ShapedType::kDynamic);
1132 ShapeAdaptor valuesInShape(adaptor.getValuesIn().getType());
1133 if (valuesInShape.hasRank()) {
1134 outputShape[0] = valuesInShape.getDimSize(0);
1135 outputShape[1] = valuesInShape.getDimSize(1);
1136 outputShape[2] = valuesInShape.getDimSize(2);
1139 ShapeAdaptor indicesShape(adaptor.getIndices().getType());
1140 if (indicesShape.hasRank()) {
1141 if (outputShape[0] == ShapedType::kDynamic)
1142 outputShape[0] = indicesShape.getDimSize(0);
1146 if (inputShape.hasRank()) {
1147 if (outputShape[0] == ShapedType::kDynamic)
1148 outputShape[0] = inputShape.getDimSize(0);
1149 if (outputShape[2] == ShapedType::kDynamic)
1150 outputShape[2] = inputShape.getDimSize(2);
1160 int64_t axisVal = axis.getValue().getSExtValue();
1161 if (!operandShape.
hasRank() || operandShape.
getRank() <= axisVal) {
1167 operandShape.
getDims(outputShape);
1168 outputShape[axisVal] = 1;
1173 #define COMPATIBLE_RETURN_TYPES(OP) \
1174 bool OP::isCompatibleReturnTypes(TypeRange l, TypeRange r) { \
1175 if (l.size() != r.size() || l.size() != 1) \
1177 if (getElementTypeOrSelf(l[0]) != getElementTypeOrSelf(r[0])) \
1179 return succeeded(verifyCompatibleShape(l[0], r[0])); \
1182 #define REDUCE_SHAPE_INFER(OP) \
1183 LogicalResult OP::inferReturnTypeComponents( \
1184 MLIRContext *context, ::std::optional<Location> location, \
1185 OP::Adaptor adaptor, \
1186 SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) { \
1188 llvm::cast<TensorType>(adaptor.getInput().getType()).getElementType(); \
1189 ShapeAdaptor inputShape(adaptor.getInput().getType()); \
1190 const Properties &prop = adaptor.getProperties(); \
1191 return ReduceInferReturnTypes(inputShape, inputType, prop.axis, \
1192 inferredReturnShapes); \
1194 COMPATIBLE_RETURN_TYPES(OP)
1202 #undef REDUCE_SHAPE_INFER
1204 #undef COMPATIBLE_RETURN_TYPES
1206 template <
typename T>
1209 TensorType inputType = op.getInput().getType();
1210 TensorType outputType = op.getOutput().getType();
1211 int32_t reduceAxis = op.getAxis();
1213 if (reduceAxis < 0) {
1214 op.
emitOpError(
"reduce axis must not be negative");
1218 int64_t inputRank = inputType.getRank();
1221 if (reduceAxis >= inputRank && !(reduceAxis == 0 && inputRank == 0)) {
1223 << inputRank <<
") to be larger than reduce axis (" << reduceAxis
1229 int64_t outputRank = outputType.getRank();
1230 if (inputType.
hasRank() && outputRank != inputType.getRank()) {
1232 "expect output tensor rank to be equal to input tensor rank");
1235 if (reduceAxis >= outputRank && !(reduceAxis == 0 && outputRank == 0)) {
1237 << outputRank <<
") to be larger than reduce axis (" << reduceAxis
1243 if (outputRank != 0) {
1244 auto outputShape = outputType.
getShape();
1245 if (!outputType.isDynamicDim(reduceAxis) &&
1246 outputShape[reduceAxis] != 1) {
1247 op.
emitOpError(
"expect reduced dimension size to be 1, got ")
1248 << outputShape[reduceAxis];
1275 #define NARY_SHAPE_INFER(OP) \
1276 LogicalResult OP::inferReturnTypeComponents( \
1277 MLIRContext *context, ::std::optional<Location> location, \
1278 ValueShapeRange operands, DictionaryAttr attributes, \
1279 OpaqueProperties properties, RegionRange regions, \
1280 SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) { \
1281 return NAryInferReturnTypes(operands, inferredReturnShapes); \
1322 #undef PRED_SHAPE_INFER
1329 outputShape.resize(4, ShapedType::kDynamic);
1344 if (!ShapedType::isDynamic(height)) {
1345 int64_t padded = height + pad[0] + pad[1] - kernel[0];
1346 outputShape[1] = padded / stride[0] + 1;
1349 if (!ShapedType::isDynamic(width)) {
1350 int64_t padded = width + pad[2] + pad[3] - kernel[1];
1351 outputShape[2] = padded / stride[1] + 1;
1359 MLIRContext *context, ::std::optional<Location> location,
1360 Conv2DOp::Adaptor adaptor,
1364 int64_t inputWidth = ShapedType::kDynamic;
1365 int64_t inputHeight = ShapedType::kDynamic;
1366 int64_t weightWidth = ShapedType::kDynamic;
1367 int64_t weightHeight = ShapedType::kDynamic;
1372 if (inputShape.hasRank()) {
1373 outputShape[0] = inputShape.getDimSize(0);
1374 inputHeight = inputShape.getDimSize(1);
1375 inputWidth = inputShape.getDimSize(2);
1379 ShapeAdaptor weightShape(adaptor.getWeight().getType());
1380 if (weightShape.hasRank()) {
1381 outputShape[3] = weightShape.getDimSize(0);
1382 weightHeight = weightShape.getDimSize(1);
1383 weightWidth = weightShape.getDimSize(2);
1388 if (biasShape.hasRank()) {
1389 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1390 ? biasShape.getDimSize(0)
1398 if (!ShapedType::isDynamic(inputHeight) &&
1399 !ShapedType::isDynamic(weightHeight)) {
1400 int64_t inputSize = inputHeight + padding[0] + padding[1];
1401 int64_t filterSize = (weightHeight - 1) * dilation[0] + 1;
1402 int64_t unstridedResult = inputSize - filterSize + 1;
1403 outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
1406 if (!ShapedType::isDynamic(inputWidth) &&
1407 !ShapedType::isDynamic(weightWidth)) {
1408 int64_t inputSize = inputWidth + padding[2] + padding[3];
1409 int64_t filterSize = (weightWidth - 1) * dilation[1] + 1;
1410 int64_t unstridedResult = inputSize - filterSize + 1;
1411 outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
1421 MLIRContext *context, ::std::optional<Location> location,
1422 Conv3DOp::Adaptor adaptor,
1426 int64_t inputWidth = ShapedType::kDynamic;
1427 int64_t inputHeight = ShapedType::kDynamic;
1428 int64_t inputDepth = ShapedType::kDynamic;
1430 int64_t weightWidth = ShapedType::kDynamic;
1431 int64_t weightHeight = ShapedType::kDynamic;
1432 int64_t weightDepth = ShapedType::kDynamic;
1436 if (inputShape.hasRank()) {
1437 outputShape[0] = inputShape.getDimSize(0);
1438 inputDepth = inputShape.getDimSize(1);
1439 inputHeight = inputShape.getDimSize(2);
1440 inputWidth = inputShape.getDimSize(3);
1444 ShapeAdaptor weightShape(adaptor.getWeight().getType());
1445 if (weightShape.hasRank()) {
1446 outputShape[4] = weightShape.getDimSize(0);
1447 weightDepth = weightShape.getDimSize(1);
1448 weightHeight = weightShape.getDimSize(2);
1449 weightWidth = weightShape.getDimSize(3);
1454 if (biasShape.hasRank() && ShapedType::isDynamic(outputShape[4])) {
1455 outputShape[4] = biasShape.getDimSize(0);
1462 if (!ShapedType::isDynamic(inputDepth) &&
1463 !ShapedType::isDynamic(weightDepth)) {
1464 int32_t inputSize = inputDepth + pad[0] + pad[1];
1465 int32_t filterSize = (weightDepth - 1) * dilation[0] + 1;
1466 int32_t unstridedResult = inputSize - filterSize + 1;
1467 outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
1470 if (!ShapedType::isDynamic(inputHeight) &&
1471 !ShapedType::isDynamic(weightHeight)) {
1472 int32_t inputSize = inputHeight + pad[2] + pad[3];
1473 int32_t filterSize = (weightHeight - 1) * dilation[1] + 1;
1474 int32_t unstridedResult = inputSize - filterSize + 1;
1475 outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
1478 if (!ShapedType::isDynamic(inputWidth) &&
1479 !ShapedType::isDynamic(weightWidth)) {
1480 int32_t inputSize = inputWidth + pad[4] + pad[5];
1481 int32_t filterSize = (weightWidth - 1) * dilation[2] + 1;
1482 int32_t unstridedResult = inputSize - filterSize + 1;
1483 outputShape[3] = (unstridedResult - 1) / stride[2] + 1;
1493 MLIRContext *context, ::std::optional<Location> location,
1494 AvgPool2dOp::Adaptor adaptor,
1497 const Properties &prop = adaptor.getProperties();
1499 inferredReturnShapes);
1503 MLIRContext *context, ::std::optional<Location> location,
1504 MaxPool2dOp::Adaptor adaptor,
1507 const Properties &prop = adaptor.getProperties();
1509 inferredReturnShapes);
1513 MLIRContext *context, ::std::optional<Location> location,
1514 DepthwiseConv2DOp::Adaptor adaptor,
1518 int64_t inputWidth = ShapedType::kDynamic;
1519 int64_t inputHeight = ShapedType::kDynamic;
1520 int64_t inputChannels = ShapedType::kDynamic;
1522 int64_t weightWidth = ShapedType::kDynamic;
1523 int64_t weightHeight = ShapedType::kDynamic;
1524 int64_t depthChannels = ShapedType::kDynamic;
1528 if (inputShape.hasRank()) {
1529 outputShape[0] = inputShape.getDimSize(0);
1530 inputHeight = inputShape.getDimSize(1);
1531 inputWidth = inputShape.getDimSize(2);
1532 inputChannels = inputShape.getDimSize(3);
1536 ShapeAdaptor weightShape(adaptor.getWeight().getType());
1537 if (weightShape.hasRank()) {
1538 weightHeight = weightShape.getDimSize(0);
1539 weightWidth = weightShape.getDimSize(1);
1540 inputChannels = ShapedType::isDynamic(inputChannels)
1541 ? weightShape.getDimSize(2)
1543 depthChannels = weightShape.getDimSize(3);
1548 if (!ShapedType::isDynamic(inputChannels) &&
1549 !ShapedType::isDynamic(depthChannels)) {
1550 outputShape[3] = inputChannels * depthChannels;
1555 if (biasShape.hasRank()) {
1556 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1557 ? biasShape.getDimSize(0)
1565 if (!ShapedType::isDynamic(inputHeight) &&
1566 !ShapedType::isDynamic(weightHeight)) {
1567 int64_t inputSize = inputHeight + padding[0] + padding[1];
1568 int64_t filterSize = (weightHeight - 1) * dilation[0] + 1;
1569 int64_t unstridedResult = inputSize - filterSize + 1;
1570 outputShape[1] = (unstridedResult - 1) / stride[0] + 1;
1573 if (!ShapedType::isDynamic(inputWidth) &&
1574 !ShapedType::isDynamic(weightWidth)) {
1575 int64_t inputSize = inputWidth + padding[2] + padding[3];
1576 int64_t filterSize = (weightWidth - 1) * dilation[1] + 1;
1577 int64_t unstridedResult = inputSize - filterSize + 1;
1578 outputShape[2] = (unstridedResult - 1) / stride[1] + 1;
1588 MLIRContext *context, ::std::optional<Location> location,
1589 TransposeConv2DOp::Adaptor adaptor,
1595 int64_t inputWidth = ShapedType::kDynamic;
1596 int64_t inputHeight = ShapedType::kDynamic;
1597 int64_t weightWidth = ShapedType::kDynamic;
1598 int64_t weightHeight = ShapedType::kDynamic;
1602 if (inputShape.hasRank()) {
1603 outputShape[0] = ShapedType::isDynamic(outputShape[0])
1604 ? inputShape.getDimSize(0)
1606 inputHeight = inputShape.getDimSize(1);
1607 inputWidth = inputShape.getDimSize(2);
1611 ShapeAdaptor weightShape(adaptor.getFilter().getType());
1612 if (weightShape.hasRank()) {
1613 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1614 ? weightShape.getDimSize(0)
1616 weightHeight = weightShape.getDimSize(1);
1617 weightWidth = weightShape.getDimSize(2);
1622 if (biasShape.hasRank()) {
1623 outputShape[3] = ShapedType::isDynamic(outputShape[3])
1624 ? biasShape.getDimSize(0)
1631 if (!ShapedType::isDynamic(inputHeight) &&
1632 !ShapedType::isDynamic(weightHeight)) {
1633 int64_t calculateSize =
1634 (inputHeight - 1) * stride[0] + padding[0] + padding[1] + weightHeight;
1636 ShapedType::isDynamic(outputShape[1]) ? calculateSize : outputShape[1];
1639 if (!ShapedType::isDynamic(inputWidth) &&
1640 !ShapedType::isDynamic(weightWidth)) {
1641 int64_t calculateSize =
1642 (inputWidth - 1) * stride[1] + padding[2] + padding[3] + weightWidth;
1644 ShapedType::isDynamic(outputShape[2]) ? calculateSize : outputShape[2];
1652 MLIRContext *context, ::std::optional<Location> location,
1653 IfOp::Adaptor adaptor,
1656 for (
Region *region : adaptor.getRegions()) {
1657 for (
auto &block : *region)
1658 if (
auto returnOp = dyn_cast<tosa::YieldOp>(block.getTerminator()))
1659 yieldOps.push_back(returnOp);
1662 if (yieldOps.empty())
1667 resultKnowledge.reserve(yieldOps.front().getNumOperands());
1668 for (
auto operand : yieldOps.front().getOperands()) {
1669 resultKnowledge.push_back(
1673 for (
auto yieldOp : yieldOps) {
1674 if (resultKnowledge.size() != yieldOp.getNumOperands())
1678 int32_t index = it.index();
1680 resultKnowledge[index],
1684 resultKnowledge[index] = meet;
1689 inferredReturnShapes.push_back(result.getShapedTypeComponents());
1696 MLIRContext *context, ::std::optional<Location> location,
1697 WhileOp::Adaptor adaptor,
1700 for (
auto &block : adaptor.getBody())
1701 if (
auto returnOp = dyn_cast<tosa::YieldOp>(block.getTerminator()))
1702 yieldOps.push_back(returnOp);
1706 if (yieldOps.empty())
1711 resultKnowledge.reserve(yieldOps.front().getNumOperands());
1712 for (
auto operand : yieldOps.front().getOperands()) {
1713 resultKnowledge.push_back(
1717 for (
auto yieldOp : yieldOps) {
1718 if (resultKnowledge.size() != yieldOp.getNumOperands())
1722 int32_t index = it.index();
1724 resultKnowledge[index],
1726 resultKnowledge[index] = meet;
1732 inferredReturnShapes.push_back(result.getShapedTypeComponents());
1738 std::optional<SmallVector<int64_t, 4>> ApplyScaleOp::getShapeForUnroll() {
1739 if (
auto vt = llvm::dyn_cast<VectorType>(getType()))
1740 return llvm::to_vector<4>(vt.getShape());
1741 return std::nullopt;
1778 bool printBlockTerminators =
false;
1780 p <<
" " << getCond();
1781 if (!getResults().empty()) {
1782 p <<
" -> (" << getResultTypes() <<
")";
1784 printBlockTerminators =
true;
1789 printBlockTerminators);
1792 auto &elseRegion = getElseBranch();
1793 if (!elseRegion.
empty()) {
1797 printBlockTerminators);
1805 TensorType outputType = getOutput().getType();
1806 int32_t reverseAxis = getAxis();
1808 if (reverseAxis < 0)
1809 return emitOpError(
"expected non-negative reverse axis");
1811 int64_t inputRank = inputType.getRank();
1814 if (reverseAxis >= inputRank && !(reverseAxis == 0 && inputRank == 0))
1815 return emitOpError(
"expect input tensor rank (")
1816 << inputRank <<
") to be larger than reverse axis (" << reverseAxis
1820 int64_t outputRank = outputType.getRank();
1821 if (inputType.
hasRank() && outputRank != inputType.getRank())
1823 "expect output tensor rank to be equal to input tensor rank");
1824 if (reverseAxis >= outputRank && !(reverseAxis == 0 && outputRank == 0))
1825 return emitOpError(
"expect output tensor rank (")
1826 << outputRank <<
") to be larger than reverse axis ("
1827 << reverseAxis <<
")";
1844 FunctionType functionType;
1849 result.
addTypes(functionType.getResults());
1851 if (functionType.getNumInputs() != operands.size()) {
1853 <<
"expected as many input types as operands "
1854 <<
"(expected " << operands.size() <<
" got "
1855 << functionType.getNumInputs() <<
")";
1865 for (
size_t i = 0, e = regionArgs.size(); i != e; ++i)
1866 regionArgs[i].type = functionType.getInput(i);
1876 StringRef prefix =
"") {
1877 assert(blocksArgs.size() == initializers.size() &&
1878 "expected same length of arguments and initializers");
1879 if (initializers.empty())
1882 parser << prefix <<
'(';
1883 llvm::interleaveComma(
1884 llvm::zip(blocksArgs, initializers), parser,
1885 [&](
auto it) { parser << std::get<0>(it) <<
" = " << std::get<1>(it); });
1905 #define GET_ATTRDEF_CLASSES
1906 #include "mlir/Dialect/Tosa/IR/TosaAttributes.cpp.inc"
1912 #define GET_OP_CLASSES
1913 #include "mlir/Dialect/Tosa/IR/TosaOps.cpp.inc"
static Operation * materializeConstant(Dialect *dialect, OpBuilder &builder, Attribute value, Type type, Location loc)
A utility function used to materialize a constant for a given attribute and type.
static MLIRContext * getContext(OpFoldResult val)
static bool isLegalToInline(InlinerInterface &interface, Region *src, Region *insertRegion, bool shouldCloneInlinedRegion, IRMapping &valueMapping)
Utility to check that all of the operations within 'src' can be inlined.
static void print(spirv::VerCapExtAttr triple, DialectAsmPrinter &printer)
static void buildMatMulOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value a, Value b)
The tosa.matmul op is also intended to be generated where a fully_connected op must be constructed wh...
static LogicalResult ReduceInferReturnTypes(ShapeAdaptor operandShape, Type inputType, IntegerAttr axis, SmallVectorImpl< ShapedTypeComponents > &inferredReturnShapes)
#define REDUCE_SHAPE_INFER(OP)
static bool hasZeroDimension(ShapedType shapedType)
static LogicalResult verifyConvOp(T op)
static void buildUnaryOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input)
This builder is called on single-parameter unary operators that have scale relationship between their...
static LogicalResult poolingInferReturnTypes(ShapeAdaptor inputShape, ArrayRef< int64_t > kernel, ArrayRef< int64_t > stride, ArrayRef< int64_t > pad, SmallVectorImpl< ShapedTypeComponents > &inferredReturnShapes)
static void buildPadOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value paddings)
This builder is called on TOSA pad operator that needs to create its own OptionalAttr quantization_at...
static void buildFCOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, Value bias)
The tosa.fully_connected op has its own builder as it does not have strides/dilation/padding.
static LogicalResult verifyReduceOp(T op)
#define NARY_SHAPE_INFER(OP)
static void buildExplicitValuePadOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value paddings, Value padConst)
This builder is called on TOSA pad operator when an explicit pad_const value is passed in.
static void buildAvgPool2dOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, DenseArrayAttr kernel, DenseArrayAttr stride, DenseArrayAttr pad, TypeAttr acc_type)
Both the tosa.avg_pool2d and unary ops use the same UnaruOpQuantizationAttr but avg_pool operator has...
static LogicalResult NAryInferReturnTypes(const ValueShapeRange &operands, SmallVectorImpl< ShapedTypeComponents > &inferredReturnShapes)
#define COMPATIBLE_RETURN_TYPES(OP)
static LogicalResult resolveBroadcastShape(const ValueShapeRange &operands, SmallVector< int64_t > &outShape)
static void buildTransConvOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, Value bias, DenseI64ArrayAttr outpad, DenseI64ArrayAttr stride, DenseI64ArrayAttr outputShape)
Handles tosa.transpose_conv2d which has outpad and output shape attributes.
static SmallVector< int64_t > convertToMlirShape(ArrayRef< int64_t > shape)
static void buildConvOpWithQuantInfo(OpBuilder &builder, OperationState &result, Type outputType, Value input, Value weight, Value bias, DenseI64ArrayAttr pad, DenseI64ArrayAttr stride, DenseI64ArrayAttr dilation)
This builder is called on all convolution operators except TransposeConv, which has specialized outpu...
static void printInitializationList(OpAsmPrinter &parser, Block::BlockArgListType blocksArgs, ValueRange initializers, StringRef prefix="")
virtual Builder & getBuilder() const =0
Return a builder which provides useful access to MLIRContext, global objects like types and attribute...
virtual ParseResult parseOptionalAttrDict(NamedAttrList &result)=0
Parse a named dictionary into 'result' if it is present.
virtual ParseResult parseOptionalEqual()=0
Parse a = token if present.
virtual ParseResult parseOptionalKeyword(StringRef keyword)=0
Parse the given keyword if present.
virtual InFlightDiagnostic emitError(SMLoc loc, const Twine &message={})=0
Emit a diagnostic at the specified location and return failure.
virtual ParseResult parseOptionalAttrDictWithKeyword(NamedAttrList &result)=0
Parse a named dictionary into 'result' if the attributes keyword is present.
virtual ParseResult parseColonType(Type &result)=0
Parse a colon followed by a type.
virtual SMLoc getCurrentLocation()=0
Get the location of the next token and store it into the argument.
virtual ParseResult parseOptionalArrowTypeList(SmallVectorImpl< Type > &result)=0
Parse an optional arrow followed by a type list.
ParseResult parseKeyword(StringRef keyword)
Parse a given keyword.
virtual ParseResult parseAttribute(Attribute &result, Type type={})=0
Parse an arbitrary attribute of a given type and return it in result.
virtual void printAttribute(Attribute attr)
Attributes are known-constant values of operations.
U dyn_cast_or_null() const
IntegerType getIntegerType(unsigned width)
An attribute that represents a reference to a dense integer vector or tensor object.
This class defines a virtual interface for reading a bytecode stream, providing hooks into the byteco...
virtual InFlightDiagnostic emitError(const Twine &msg={}) const =0
Emit an error to the reader.
This class defines a virtual interface for writing to a bytecode stream, providing hooks into the byt...
This is the interface that must be implemented by the dialects of operations to be inlined.
DialectInlinerInterface(Dialect *dialect)
This class is used to represent the version of a dialect, for the purpose of polymorphic destruction.
Dialects are groups of MLIR operations, types and attributes, as well as behavior associated with the...
This is a utility class for mapping one set of IR entities to another.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
The OpAsmParser has methods for interacting with the asm parser: parsing things from it,...
virtual OptionalParseResult parseOptionalAssignmentList(SmallVectorImpl< Argument > &lhs, SmallVectorImpl< UnresolvedOperand > &rhs)=0
virtual ParseResult parseRegion(Region ®ion, ArrayRef< Argument > arguments={}, bool enableNameShadowing=false)=0
Parses a region.
virtual ParseResult resolveOperand(const UnresolvedOperand &operand, Type type, SmallVectorImpl< Value > &result)=0
Resolve an operand to an SSA value, emitting an error on failure.
ParseResult resolveOperands(Operands &&operands, Type type, SmallVectorImpl< Value > &result)
Resolve a list of operands to SSA values, emitting an error on failure, or appending the results to t...
virtual ParseResult parseOperand(UnresolvedOperand &result, bool allowResultNumber=true)=0
Parse a single SSA value operand name along with a result number if allowResultNumber is true.
This is a pure-virtual base class that exposes the asmprinter hooks necessary to implement a custom p...
virtual void printOptionalAttrDictWithKeyword(ArrayRef< NamedAttribute > attrs, ArrayRef< StringRef > elidedAttrs={})=0
If the specified operation has attributes, print out an attribute dictionary prefixed with 'attribute...
virtual void printOptionalAttrDict(ArrayRef< NamedAttribute > attrs, ArrayRef< StringRef > elidedAttrs={})=0
If the specified operation has attributes, print out an attribute dictionary with their values.
void printFunctionalType(Operation *op)
Print the complete type of an operation in functional form.
virtual void printRegion(Region &blocks, bool printEntryBlockArgs=true, bool printBlockTerminators=true, bool printEmptyBlock=false)=0
Prints a region.
This class helps build Operations.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Simple wrapper around a void* in order to express generically how to pass in op properties through AP...
Operation is the basic unit of execution within MLIR.
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers.
This class implements Optional functionality for ParseResult.
ParseResult value() const
Access the internal ParseResult value.
bool has_value() const
Returns true if we contain a valid ParseResult value.
This class represents success/failure for parsing-like operations that find it important to chain tog...
This class provides an abstraction over the different types of ranges over Regions.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Adaptor class to abstract the differences between whether value is from a ShapedType or ShapedTypeCom...
int64_t getDimSize(int index) const
Returns the size of the index'th dimension.
int64_t getRank() const
Returns the rank of the shape.
void getDims(SmallVectorImpl< int64_t > &res) const
Populates the dimensions from shape referenced.
bool hasRank() const
Returns whether the shape has a rank.
ShapedTypeComponents that represents the components of a ShapedType.
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
ArrayRef< int64_t > getShape() const
Returns the shape of this tensor type.
bool hasRank() const
Returns if this type is ranked, i.e. it has a known number of dimensions.
This class provides an abstraction over the various different ranges of value types.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
bool isInteger(unsigned width) const
Return true if this is an integer type with the specified width.
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
This class provides an abstraction over the different types of ranges over Values.
Range of values and shapes (corresponding effectively to Shapes dialect's ValueShape type concept).
ShapeAdaptor getShape(int index) const
Returns the shape of index'th operand.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
Base class for DenseArrayAttr that is instantiated and specialized for each supported element type be...
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
QueryRef parse(llvm::StringRef line, const QuerySession &qs)
ConvOpQuantizationAttr buildConvOpQuantizationAttr(OpBuilder &builder, Value input, Value weight)
Method to build ConvOpQuantizationAttr, called from ConvOpQuantInfoBuilder/TransConvOpQuantInfoBuilde...
Type buildConvOpResultTypeInfo(OpBuilder &builder, Type outputType, Value input, Value weight)
construct ConvOp output type with correct bitwidth based on input/weight width.
PadOpQuantizationAttr buildPadOpQuantizationAttr(OpBuilder &builder, Value input)
Builds PadOpQuantizationAttr, called from PadOpQuantInfoBuilder: inputZp: input zeropoint.
ParseResult parseTypeOrAttr(OpAsmParser &parser, TypeAttr &typeAttr, Attribute &attr)
MatMulOpQuantizationAttr buildMatMulOpQuantizationAttr(OpBuilder &builder, Value a, Value b)
Builds MatMulOpQuantizationAttr, called from MatMulOpQuantInfoBuilder: aZp: input a zeropoint bZp: in...
void printTypeOrAttr(OpAsmPrinter &p, Operation *op, TypeAttr type, Attribute attr)
UnaryOpQuantizationAttr buildUnaryOpQuantizationAttr(OpBuilder &builder, Value input, Type outputRawType)
Builds UnaryOpQuantizationAttr UnaryOpQuantInfoBuilder: inputZp: input zeropoint outputZp: output zer...
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
LogicalResult failure(bool isFailure=true)
Utility function to generate a LogicalResult.
LogicalResult emitOptionalError(std::optional< Location > loc, Args &&...args)
Overloads of the above emission functions that take an optionally null location.
bool succeeded(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a success value.
LogicalResult success(bool isSuccess=true)
Utility function to generate a LogicalResult.
Type getElementTypeOrSelf(Type type)
Return the element type or return the type itself.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
LogicalResult verifyCompatibleShape(ArrayRef< int64_t > shape1, ArrayRef< int64_t > shape2)
Returns success if the given two shapes are compatible.
detail::constant_op_matcher m_Constant()
Matches a constant foldable operation.
LogicalResult verify(Operation *op, bool verifyRecursively=true)
Perform (potentially expensive) checks of invariants, used to detect compiler bugs,...
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value.
This class represents an efficient way to signal success or failure.
This is the representation of an operand reference.
This represents an operation in an abstracted form, suitable for use with the builder APIs.
SmallVector< Value, 4 > operands
void addOperands(ValueRange newOperands)
void addAttribute(StringRef name, Attribute attr)
Add an attribute with the specified name.
void addTypes(ArrayRef< Type > newTypes)
SmallVector< std::unique_ptr< Region >, 1 > regions
Regions that the op will hold.
SmallVector< Type, 4 > types
Types of the results of this operation.
Region * addRegion()
Create a region that should be attached to the operation.
Statically known information for a particular Value.
static ValueKnowledge meet(const ValueKnowledge &lhs, const ValueKnowledge &rhs)
static ValueKnowledge getKnowledgeFromType(Type type)