32 #include <type_traits>
40 if (llvm::all_of(pad, [](int64_t p) {
return p == 0; }))
43 ShapedType inputTy = cast<ShapedType>(input.
getType());
44 Type inputETy = inputTy.getElementType();
45 auto inputShape = inputTy.getShape();
47 assert((inputShape.size() * 2) == pad.size());
52 for (
size_t i : llvm::seq(inputShape.size())) {
53 auto lowPad = pad[i * 2];
54 auto highPad = pad[i * 2 + 1];
55 if (ShapedType::isDynamic(inputShape[i]))
56 paddedShape.push_back(inputShape[i]);
58 paddedShape.push_back(inputShape[i] + highPad + lowPad);
63 Value padValue = rewriter.
create<arith::ConstantOp>(loc, padAttr);
65 return rewriter.
create<tensor::PadOp>(
67 highIndices, padValue);
74 ShapedType resultTy = cast<ShapedType>(conv.
getType());
76 .
create<linalg::GenericOp>(
77 loc, resultTy,
ValueRange({bias, conv}), result, indexingMaps,
80 Value biasVal = args[0];
81 Type resType = args[1].getType();
82 if (resType != biasVal.
getType()) {
83 biasVal = builder.create<arith::ExtSIOp>(loc, resType, biasVal);
85 Value added = builder.create<arith::AddIOp>(loc, biasVal, args[1]);
86 builder.create<linalg::YieldOp>(loc, added);
95 ShapedType resultTy = cast<ShapedType>(result.
getType());
96 ShapedType sourceTy = cast<ShapedType>(source.
getType());
97 const int64_t resultRank = resultTy.getRank();
98 const int64_t sourceRank = sourceTy.getRank();
106 assert(sourceTy.hasStaticShape() &&
107 "Dynamic broadcasting shapes not supported!");
108 if (sourceRank == 1 && sourceTy.getDimSize(0) == 1) {
111 for (
auto dim : llvm::seq<int64_t>(0, sourceRank)) {
113 sourceDims.push_back(expr);
126 ShapedType resultTy = cast<ShapedType>(result.
getType());
127 const int64_t resultRank = resultTy.getRank();
135 .
create<linalg::GenericOp>(
136 loc, resultTy,
ValueRange({source}), result, indexingMaps,
139 Value biasVal = args[0];
140 Type resType = args[1].getType();
141 if (resType != biasVal.
getType()) {
142 biasVal = builder.create<arith::ExtSIOp>(loc, resType, biasVal);
144 builder.create<linalg::YieldOp>(loc, biasVal);
151 return builder.
create<arith::ConstantIndexOp>(attr);
159 int64_t padBeforeAttr,
160 int64_t padAfterAttr,
Value kernelDim,
162 int64_t dilationAttr,
165 auto one = rewriter.
create<arith::ConstantOp>(
168 Value paddedBefore = builder.
create<arith::AddIOp>(inputDim, padBefore);
170 Value paddedAfter = builder.
create<arith::AddIOp>(paddedBefore, padAfter);
172 Value subOne = builder.
create<arith::SubIOp>(kernelDim, one);
174 Value dilated = builder.
create<arith::MulIOp>(dilation, subOne);
175 Value addOne = builder.
create<arith::AddIOp>(dilated, one);
177 Value subtract = builder.
create<arith::SubIOp>(paddedAfter, addOne);
179 Value divide = builder.
create<arith::DivUIOp>(subtract, stride);
180 return builder.
create<arith::AddIOp>(divide, one);
189 ShapedType inputTy = cast<ShapedType>(input.
getType());
190 int64_t inputRank = inputTy.getRank();
193 dynDims.resize(resultTy.getRank());
195 for (uint32_t i = 0, s = inputSizeDims.size(); i < s; ++i) {
196 int64_t inputDim = inputSizeDims[i];
197 int64_t kernelDim = kernelSizeDims[i];
198 if (resultTy.isDynamicDim(inputDim)) {
199 auto padTop = padAttr[i * 2];
200 auto padBottom = padAttr[i * 2 + 1];
201 auto stride = strideAttr[i];
202 auto dilation = dilationAttr[i];
203 Value initDynDim = rewriter.
create<tensor::DimOp>(loc, input, inputDim);
205 rewriter.
create<tensor::DimOp>(loc, weight, kernelDim);
209 kernelDynDim, stride, dilation, rewriter);
214 for (
int i = 0; i < inputRank; i++) {
215 if (resultTy.isDynamicDim(i) && !dynDims[i])
216 dynDims[i] = rewriter.
create<tensor::DimOp>(loc, input, i);
228 reassociationMap.resize(outputRank);
229 for (
int i = 0; i < outputRank; i++) {
232 reassociationMap[outputRank - 1].push_back(
238 template <
typename TosaConvOp,
typename LinalgConvOp,
typename LinalgConvQOp>
243 matchAndRewrite(TosaConvOp op,
typename TosaConvOp::Adaptor adaptor,
246 Value input = op->getOperand(0);
247 Value weight = op->getOperand(1);
248 Value bias = op->getOperand(2);
250 ShapedType inputTy = cast<ShapedType>(input.
getType());
251 ShapedType weightTy = cast<ShapedType>(weight.
getType());
252 ShapedType biasTy = cast<ShapedType>(bias.
getType());
253 ShapedType resultTy = cast<ShapedType>(op->getResult(0).getType());
255 Type inputETy = inputTy.getElementType();
256 Type resultETy = resultTy.getElementType();
261 bool isQuantized = op.getQuantizationInfo().has_value();
263 if (!weightTy.hasStaticShape() || !biasTy.hasStaticShape())
264 return rewriter.notifyMatchFailure(
265 op,
"tosa.conv ops require static shapes for weight and bias");
268 return rewriter.notifyMatchFailure(
269 op,
"tosa.conv ops does not support unsigned integer input");
273 for (
int i = 1; i < resultTy.getRank() - 1; i++) {
274 inputSizeDims.push_back(i);
275 kernelSizeDims.push_back(i);
279 loc, input, weight, resultTy, padAttr.
asArrayRef(),
281 inputSizeDims, kernelSizeDims, rewriter);
283 auto weightShape = weightTy.getShape();
286 TypedAttr zeroAttr = rewriter.getZeroAttr(inputETy);
288 auto quantizationInfo = *op.getQuantizationInfo();
289 int64_t iZp = quantizationInfo.getInputZp();
298 if (iZp < intMin || iZp > intMax)
299 return rewriter.notifyMatchFailure(
300 op,
"tosa.conv op quantization has zp outside of input range");
302 zeroAttr = rewriter.getIntegerAttr(inputETy, iZp);
307 llvm::append_range(pad, padAttr.
asArrayRef());
308 pad.resize(pad.size() + 2, 0);
309 input =
applyPad(loc, input, pad, zeroAttr, rewriter);
311 if (4 == inputTy.getRank()) {
315 isQuantized ? std::is_same_v<LinalgConvQOp, linalg::Conv2DNhwcHwcfQOp>
316 : std::is_same_v<LinalgConvOp, linalg::Conv2DNhwcHwcfOp>;
323 for (
int i = 1; i < resultTy.getRank(); i++)
324 weightPerm.push_back(i);
325 weightPerm.push_back(0);
328 for (
auto dim : weightPerm)
329 newWeightShape.push_back(weightShape[dim]);
330 auto weightPermAttr = rewriter.getI32TensorAttr(weightPerm);
331 Value weightPermValue =
332 rewriter.create<arith::ConstantOp>(loc, weightPermAttr);
335 weight = rewriter.create<tosa::TransposeOp>(loc, newWeightTy, weight,
343 if (5 == inputTy.getRank()) {
347 for (
int i = 1; i < resultTy.getRank(); i++)
348 weightPerm.push_back(i);
349 weightPerm.push_back(0);
352 for (
auto dim : weightPerm)
353 newWeightShape.push_back(weightShape[dim]);
354 auto weightPermAttr = rewriter.getI32TensorAttr(weightPerm);
355 Value weightPermValue =
356 rewriter.create<arith::ConstantOp>(loc, weightPermAttr);
359 weight = rewriter.create<tosa::TransposeOp>(loc, newWeightTy, weight,
368 auto strideAttr = rewriter.getI64TensorAttr(stride);
369 auto dilationAttr = rewriter.getI64TensorAttr(dilation);
371 Value biasEmptyTensor = rewriter.create<tensor::EmptyOp>(
372 loc, resultTy.getShape(), resultETy, filteredDims);
374 Value broadcastBias =
378 auto quantizationInfo = *op.getQuantizationInfo();
379 auto iZp = rewriter.getI32IntegerAttr(quantizationInfo.getInputZp());
380 auto kZp = rewriter.getI32IntegerAttr(quantizationInfo.getWeightZp());
382 auto iZpVal = rewriter.create<arith::ConstantOp>(loc, iZp);
383 auto kZpVal = rewriter.create<arith::ConstantOp>(loc, kZp);
387 .create<LinalgConvQOp>(
388 loc, resultTy,
ValueRange{input, weight, iZpVal, kZpVal},
389 ValueRange{broadcastBias}, strideAttr, dilationAttr)
392 rewriter.replaceOp(op, conv);
396 Value conv = rewriter
397 .create<LinalgConvOp>(
399 ValueRange{broadcastBias}, strideAttr, dilationAttr)
402 rewriter.replaceOp(op, conv);
407 class DepthwiseConvConverter
412 matchAndRewrite(tosa::DepthwiseConv2DOp op, OpAdaptor adaptor,
415 Value input = op->getOperand(0);
416 Value weight = op->getOperand(1);
417 Value bias = op->getOperand(2);
419 ShapedType inputTy = cast<ShapedType>(input.
getType());
420 ShapedType weightTy = cast<ShapedType>(weight.
getType());
421 ShapedType biasTy = cast<ShapedType>(bias.
getType());
422 ShapedType resultTy = cast<ShapedType>(op->getResult(0).getType());
423 int64_t resultRank = resultTy.getRank();
425 Type inputETy = inputTy.getElementType();
426 Type resultETy = resultTy.getElementType();
428 auto padAttr = cast<DenseI64ArrayAttr>(op->getAttr(
"pad"));
429 auto strideTosaAttr = cast<DenseI64ArrayAttr>(op->getAttr(
"stride"));
430 auto dilationTosaAttr = cast<DenseI64ArrayAttr>(op->getAttr(
"dilation"));
432 if (!weightTy.hasStaticShape() || !biasTy.hasStaticShape())
433 return rewriter.notifyMatchFailure(
434 op,
"tosa.depthwise_conv ops require static shapes");
438 loc, input, weight, resultTy, padAttr.
asArrayRef(),
443 bool isQuantized = op->hasAttr(
"quantization_info");
447 auto quantizationInfo =
448 cast<tosa::ConvOpQuantizationAttr>(op->getAttr(
"quantization_info"));
449 iZp = rewriter.getI32IntegerAttr(quantizationInfo.getInputZp());
450 kZp = rewriter.getI32IntegerAttr(quantizationInfo.getWeightZp());
453 auto weightShape = weightTy.getShape();
454 auto resultShape = resultTy.getShape();
457 TypedAttr zeroAttr = rewriter.getZeroAttr(inputETy);
459 auto quantizationInfo =
460 cast<tosa::ConvOpQuantizationAttr>(op->getAttr(
"quantization_info"));
461 int64_t iZp = quantizationInfo.getInputZp();
470 if (iZp < intMin || iZp > intMax)
471 return rewriter.notifyMatchFailure(
472 op,
"tosa.depthwise_conv op quantization has zp outside of input "
475 zeroAttr = rewriter.getIntegerAttr(inputETy, iZp);
480 llvm::append_range(pad, padAttr.
asArrayRef());
481 pad.resize(pad.size() + 2, 0);
483 input =
applyPad(loc, input, pad, zeroAttr, rewriter);
490 auto strideAttr = rewriter.getI64TensorAttr(stride);
491 auto dilationAttr = rewriter.getI64TensorAttr(dilation);
492 ShapedType linalgConvTy =
494 weightShape[2], weightShape[3]},
497 auto resultZeroAttr = rewriter.getZeroAttr(resultETy);
498 Value emptyTensor = rewriter.create<tensor::EmptyOp>(
499 loc, linalgConvTy.getShape(), resultETy, filteredDims);
500 Value zero = rewriter.create<arith::ConstantOp>(loc, resultZeroAttr);
501 Value zeroTensor = rewriter
502 .create<linalg::FillOp>(loc,
ValueRange{zero},
506 Value biasEmptyTensor = rewriter.create<tensor::EmptyOp>(
507 loc, resultTy.getShape(), resultETy, filteredDims);
512 indexingMaps.push_back(rewriter.getMultiDimIdentityMap(resultRank));
513 indexingMaps.push_back(rewriter.getMultiDimIdentityMap(resultRank));
516 Value conv = rewriter
517 .create<linalg::DepthwiseConv2DNhwcHwcmOp>(
519 ValueRange{zeroTensor}, strideAttr, dilationAttr)
524 Value convReshape = rewriter.create<tensor::CollapseShapeOp>(
525 loc, resultTy, conv, reassociationMap);
529 .create<linalg::GenericOp>(
530 loc, resultTy,
ValueRange({bias, convReshape}),
531 biasEmptyTensor, indexingMaps,
535 Value added = nestedBuilder.create<arith::AddFOp>(
536 loc, args[0], args[1]);
537 nestedBuilder.create<linalg::YieldOp>(nestedLoc, added);
540 rewriter.replaceOp(op, result);
542 auto iZpVal = rewriter.create<arith::ConstantOp>(loc, iZp);
543 auto kZpVal = rewriter.create<arith::ConstantOp>(loc, kZp);
546 .create<linalg::DepthwiseConv2DNhwcHwcmQOp>(
547 loc, linalgConvTy,
ValueRange{input, weight, iZpVal, kZpVal},
548 ValueRange{zeroTensor}, strideAttr, dilationAttr)
552 Value convReshape = rewriter.create<tensor::CollapseShapeOp>(
553 loc, resultTy, conv, reassociationMap);
555 rewriter, loc, bias, convReshape, biasEmptyTensor, indexingMaps);
556 rewriter.replaceOp(op, result);
566 matchAndRewrite(tosa::MatMulOp op, OpAdaptor adaptor,
570 auto outputTy = cast<ShapedType>(op.getType());
571 auto outputElementTy = outputTy.getElementType();
574 dynDims.resize(cast<ShapedType>(op->getResult(0).getType()).getRank());
576 if (!outputTy.hasRank() || outputTy.isDynamicDim(0)) {
577 dynDims[0] = rewriter.create<tensor::DimOp>(loc, op->getOperand(0), 0);
580 if (!outputTy.hasRank() || outputTy.isDynamicDim(1)) {
581 dynDims[1] = rewriter.create<tensor::DimOp>(loc, op->getOperand(0), 1);
584 if (!outputTy.hasRank() || outputTy.isDynamicDim(2)) {
585 dynDims[2] = rewriter.create<tensor::DimOp>(loc, op->getOperand(1), 2);
590 auto zeroAttr = rewriter.getZeroAttr(outputElementTy);
591 Value zero = rewriter.create<arith::ConstantOp>(loc, zeroAttr);
592 auto emptyTensor = rewriter.create<tensor::EmptyOp>(
593 loc, outputTy.getShape(), outputTy.getElementType(), filteredDims);
594 Value zeroTensor = rewriter
595 .create<linalg::FillOp>(loc,
ValueRange{zero},
598 if (!op.getQuantizationInfo()) {
599 rewriter.replaceOpWithNewOp<linalg::BatchMatmulOp>(
605 auto quantizationInfo = *op.getQuantizationInfo();
606 auto aZp = rewriter.create<arith::ConstantOp>(
607 loc, rewriter.getI32IntegerAttr(quantizationInfo.getAZp()));
608 auto bZp = rewriter.create<arith::ConstantOp>(
609 loc, rewriter.getI32IntegerAttr(quantizationInfo.getBZp()));
610 rewriter.replaceOpWithNewOp<linalg::QuantizedBatchMatmulOp>(
612 ValueRange{adaptor.getA(), adaptor.getB(), aZp, bZp}, zeroTensor);
618 class FullyConnectedConverter
623 matchAndRewrite(tosa::FullyConnectedOp op, OpAdaptor adaptor,
626 auto outputTy = cast<ShapedType>(op.getType());
627 auto input = op.getInput();
628 auto inputTy = cast<ShapedType>(input.
getType());
630 auto bias = op.getBias();
632 auto weight = op.getWeight();
633 auto weightTy = cast<ShapedType>(weight.
getType());
634 auto weightShape = weightTy.getShape();
636 auto outputETy = outputTy.getElementType();
639 dynDims.resize(cast<ShapedType>(op->getResult(0).getType()).getRank());
641 if (!inputTy.hasRank() || inputTy.isDynamicDim(0)) {
642 dynDims[0] = rewriter.create<tensor::DimOp>(loc, input, 0);
645 if (!weightTy.hasRank() || weightTy.isDynamicDim(0)) {
646 dynDims[1] = rewriter.create<tensor::DimOp>(loc, weight, 0);
652 auto permutationAttr = rewriter.getI64TensorAttr(permutation);
653 Value permutationValue =
654 rewriter.create<arith::ConstantOp>(loc, permutationAttr);
660 Value transposedWeight = rewriter.create<tosa::TransposeOp>(
661 loc, newWeightTy, weight, permutationValue);
663 Value biasEmptyTensor = rewriter.create<tensor::EmptyOp>(
664 loc, outputTy.getShape(), outputETy, filteredDims);
666 Value broadcastBias =
669 if (!op.getQuantizationInfo()) {
670 Value matmul = rewriter
671 .create<linalg::MatmulOp>(
673 ValueRange{input, transposedWeight}, broadcastBias)
676 rewriter.replaceOp(op, matmul);
680 auto quantizationInfo = *op.getQuantizationInfo();
681 auto inputZp = rewriter.create<arith::ConstantOp>(
682 loc, rewriter.getI32IntegerAttr(quantizationInfo.getInputZp()));
683 auto outputZp = rewriter.create<arith::ConstantOp>(
684 loc, rewriter.getI32IntegerAttr(quantizationInfo.getWeightZp()));
687 .create<linalg::QuantizedMatmulOp>(
689 ValueRange{input, transposedWeight, inputZp, outputZp},
693 rewriter.replaceOp(op, matmul);
704 computeDynamicOutputSizes(tosa::MaxPool2dOp op,
PatternRewriter &rewriter) {
716 if (resultTy.isDynamicDim(0))
717 dynamicDims.push_back(rewriter.
create<tensor::DimOp>(loc, input, 0));
720 for (int64_t dim : {1, 2}) {
721 if (!resultTy.isDynamicDim(dim))
725 int64_t index = dim - 1;
728 Value ihw = rewriter.
create<tensor::DimOp>(loc, input, dim);
731 Value khw = rewriter.
create<arith::ConstantIndexOp>(loc, kernel[index]);
735 pad[index * 2 + 1], khw, stride[index],
737 dynamicDims.push_back(ohw);
741 if (resultTy.isDynamicDim(3))
742 dynamicDims.push_back(rewriter.
create<tensor::DimOp>(loc, input, 3));
747 LogicalResult matchAndRewrite(tosa::MaxPool2dOp op,
751 ShapedType inputTy = input.getType();
753 ShapedType resultTy = op.getType();
754 Type resultETy = inputTy.getElementType();
759 TypedAttr initialAttr;
762 resultETy, APFloat::getLargest(
763 cast<FloatType>(resultETy).getFloatSemantics(),
true));
765 if (isa<IntegerType>(resultETy))
772 op,
"Unsupported initial value for tosa.maxpool_2d op");
777 llvm::append_range(pad, op.getPad());
778 pad.resize(pad.size() + 2, 0);
780 Value paddedInput =
applyPad(loc, input, pad, initialAttr, rewriter);
782 Value initialValue = rewriter.
create<arith::ConstantOp>(loc, initialAttr);
791 Value emptyTensor = rewriter.
create<tensor::EmptyOp>(
792 loc, resultTy.getShape(), resultTy.getElementType(), dynamicDims);
794 Value filledEmptyTensor =
795 rewriter.
create<linalg::FillOp>(loc, initialValue, emptyTensor)
798 Value fakeWindowDims =
799 rewriter.
create<tensor::EmptyOp>(loc, kernel, resultETy);
803 filledEmptyTensor, strideAttr, dilationAttr);
812 LogicalResult matchAndRewrite(tosa::AvgPool2dOp op,
815 Value input = op.getInput();
816 ShapedType inputTy = cast<ShapedType>(input.
getType());
817 Type inElementTy = inputTy.getElementType();
819 ShapedType resultTy = cast<ShapedType>(op.getType());
820 Type resultETy = cast<ShapedType>(op.getType()).getElementType();
822 Type accETy = op.getAccType();
823 ShapedType accTy = resultTy.clone(accETy);
827 if (!dynamicDimsOr.has_value())
834 llvm::append_range(pad, op.getPad());
835 pad.resize(pad.size() + 2, 0);
836 TypedAttr padAttr = rewriter.
getZeroAttr(inElementTy);
840 Value paddedInput =
applyPad(loc, input, pad, padAttr, rewriter);
843 Value initialValue = rewriter.
create<arith::ConstantOp>(loc, initialAttr);
852 Value poolEmptyTensor = rewriter.
create<tensor::EmptyOp>(
853 loc, accTy.getShape(), accETy, dynamicDims);
855 Value filledEmptyTensor =
861 Value fakeWindowDims =
862 rewriter.
create<tensor::EmptyOp>(loc, kernel, accETy);
865 Value poolingOp = rewriter
866 .
create<linalg::PoolingNhwcSumOp>(
869 filledEmptyTensor, strideAttr, dilationAttr)
874 Value iH = rewriter.
create<tensor::DimOp>(loc, poolingOp, 1);
875 Value iW = rewriter.
create<tensor::DimOp>(loc, poolingOp, 2);
877 auto one = rewriter.
create<arith::ConstantIndexOp>(loc, 1);
878 iH = rewriter.
create<arith::SubIOp>(loc, iH, one);
879 iW = rewriter.
create<arith::SubIOp>(loc, iW, one);
881 Value genericEmptyTensor = rewriter.
create<tensor::EmptyOp>(
882 loc, resultTy.getShape(), resultETy, dynamicDims);
885 auto genericOp = rewriter.
create<linalg::GenericOp>(
891 auto zero = rewriter.
create<arith::ConstantIndexOp>(loc, 0);
899 auto padVal = rewriter.
create<arith::ConstantIndexOp>(loc, pad);
900 Value dpos = rewriter.
create<arith::SubIOp>(loc, pos, padVal);
902 Value offset = rewriter.
create<arith::MinSIOp>(loc, dpos, zero);
903 return rewriter.
create<arith::AddIOp>(loc, valid, offset)
907 auto coverageFn = [&](int64_t i,
Value isize) ->
Value {
909 rewriter.
create<arith::ConstantIndexOp>(loc, stride[i - 1]);
911 rewriter.
create<arith::ConstantIndexOp>(loc, kernel[i - 1]);
914 Value left = rewriter.
create<linalg::IndexOp>(loc, i);
915 Value right = rewriter.
create<arith::SubIOp>(loc, isize, left);
916 left = rewriter.
create<arith::MulIOp>(loc, left, strideVal);
917 right = rewriter.
create<arith::MulIOp>(loc, right, strideVal);
920 val = padFn(val, left, pad[i * 2]);
921 val = padFn(val, right, pad[i * 2 + 1]);
922 return rewriter.
create<arith::MaxSIOp>(loc, one, val);
926 Value kH3 = coverageFn(1, iH);
927 Value kW3 = coverageFn(2, iW);
930 auto count = rewriter.
create<arith::IndexCastOp>(
932 rewriter.
create<arith::MulIOp>(loc, kH3, kW3));
937 Value poolVal = args[0];
938 if (isa<FloatType>(accETy)) {
939 auto countF = rewriter.
create<arith::SIToFPOp>(loc, accETy, count);
940 poolVal = rewriter.
create<arith::DivFOp>(loc, poolVal, countF)
942 if (accETy.getIntOrFloatBitWidth() >
945 rewriter.
create<arith::TruncFOp>(loc, resultETy, poolVal);
950 if (op.getQuantizationInfo()) {
951 auto quantizationInfo = *op.getQuantizationInfo();
952 auto inputZp = rewriter.
create<arith::ConstantOp>(
955 rewriter.
create<arith::MulIOp>(loc, accETy, count, inputZp);
957 rewriter.
create<arith::SubIOp>(loc, accETy, poolVal, offset);
963 Value thirtyTwo32 = rewriter.
create<arith::ConstantOp>(
967 rewriter.
create<arith::SubIOp>(loc, count, one32);
969 rewriter.
create<math::CountLeadingZerosOp>(loc, countSubOne);
971 rewriter.
create<arith::SubIOp>(loc, thirtyTwo32, leadingZeros);
976 Value thirtyShiftPlusOne = rewriter.
create<arith::ConstantOp>(
979 rewriter.
create<arith::ShLIOp>(loc, thirtyShiftPlusOne, k64);
985 rewriter.
create<arith::DivUIOp>(loc, numerator, count64);
986 multiplier = rewriter.
create<arith::TruncIOp>(
992 Value thirty8 = rewriter.
create<arith::ConstantOp>(
994 Value shift = rewriter.
create<arith::AddIOp>(loc, k8, thirty8);
999 poolVal, multiplier, shift,
1005 if (op.getQuantizationInfo()) {
1006 auto quantizationInfo = *op.getQuantizationInfo();
1007 auto outputZp = rewriter.
create<arith::ConstantOp>(
1009 quantizationInfo.getOutputZp()));
1010 scaled = rewriter.
create<arith::AddIOp>(loc, scaled, outputZp)
1017 auto min = rewriter.
create<arith::ConstantIntOp>(
1018 loc, APInt::getSignedMinValue(outBitwidth).getSExtValue(),
1020 auto max = rewriter.
create<arith::ConstantIntOp>(
1021 loc, APInt::getSignedMaxValue(outBitwidth).getSExtValue(),
1030 rewriter.
create<arith::TruncIOp>(loc, resultETy, poolVal);
1034 rewriter.
create<linalg::YieldOp>(loc, poolVal);
1037 rewriter.
replaceOp(op, genericOp.getResult(0));
1046 LogicalResult matchAndRewrite(tosa::TransposeOp op,
1049 if (failed(op.getConstantPerms(constantPerms)))
1058 auto permutedSizes =
1059 applyTOSAPermutation<OpFoldResult>(inputSizes, constantPerms);
1061 auto permutedInit = rewriter.
create<tensor::EmptyOp>(
1062 loc, permutedSizes, op.getInput1().getType().getElementType());
1064 op, op.getInput1(), permutedInit,
1065 llvm::to_vector(llvm::map_range(
1066 constantPerms, [](int32_t v) -> int64_t {
return v; })));
1074 if (
options.preferConv2DKernelLayoutHWCF) {
1075 patterns->
add<ConvConverter<tosa::Conv2DOp, linalg::Conv2DNhwcHwcfOp,
1076 linalg::Conv2DNhwcHwcfQOp>>(
1079 patterns->
add<ConvConverter<tosa::Conv2DOp, linalg::Conv2DNhwcFhwcOp,
1080 linalg::Conv2DNhwcFhwcQOp>>(
1085 ConvConverter<tosa::Conv3DOp, linalg::Conv3DNdhwcDhwcfOp, linalg::Conv3DNdhwcDhwcfQOp>,
1086 DepthwiseConvConverter,
1090 FullyConnectedConverter,
static llvm::ManagedStatic< PassManagerOptions > options
static Value clamp(ImplicitLocOpBuilder &builder, Value value, Value lowerBound, Value upperBound)
static Value max(ImplicitLocOpBuilder &builder, Value value, Value bound)
static Value min(ImplicitLocOpBuilder &builder, Value value, Value bound)
static AffineMap getBroadcastingMap(PatternRewriter &rewriter, Value source, Value result)
static mlir::Value applyPad(Location loc, Value input, ArrayRef< int64_t > pad, TypedAttr padAttr, OpBuilder &rewriter)
static mlir::Value linalgBroadcastAndMaybeExtSI(PatternRewriter &rewriter, Location loc, Value source, Value result)
static void createDepthwiseConvCollapseMap(int64_t outputRank, SmallVector< ReassociationExprs, 4 > &reassociationMap, OpBuilder &rewriter)
static mlir::Value linalgIntBroadcastExtSIAdd(PatternRewriter &rewriter, Location loc, Value bias, Value conv, Value result, ArrayRef< AffineMap > indexingMaps)
static mlir::Value getConvOrPoolOutputDim(Location loc, Value inputDim, int64_t padBeforeAttr, int64_t padAfterAttr, Value kernelDim, int64_t strideAttr, int64_t dilationAttr, OpBuilder &rewriter)
static mlir::Value reifyConstantDim(int64_t attr, ImplicitLocOpBuilder &builder)
static SmallVector< Value > inferDynamicDimsForConv(Location loc, Value input, Value weight, ShapedType resultTy, ArrayRef< int64_t > padAttr, ArrayRef< int64_t > strideAttr, ArrayRef< int64_t > dilationAttr, ArrayRef< int64_t > inputSizeDims, ArrayRef< int64_t > kernelSizeDims, OpBuilder &rewriter)
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
Attributes are known-constant values of operations.
IntegerAttr getIndexAttr(int64_t value)
IntegerAttr getI32IntegerAttr(int32_t value)
IntegerAttr getIntegerAttr(Type type, int64_t value)
AffineMap getMultiDimIdentityMap(unsigned rank)
FloatAttr getFloatAttr(Type type, double value)
AffineExpr getAffineConstantExpr(int64_t constant)
IntegerAttr getI64IntegerAttr(int64_t value)
BoolAttr getBoolAttr(bool value)
TypedAttr getZeroAttr(Type type)
AffineExpr getAffineDimExpr(unsigned position)
MLIRContext * getContext() const
DenseIntElementsAttr getI64VectorAttr(ArrayRef< int64_t > values)
IntegerAttr getI8IntegerAttr(int8_t value)
This class implements a pattern rewriter for use with ConversionPatterns.
ImplicitLocOpBuilder maintains a 'current location', allowing use of the create<> method without spec...
OpTy create(Args &&...args)
Create an operation of specific op type at the current insertion point and location.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
This class helps build Operations.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
OpConversionPattern is a wrapper around ConversionPattern that allows for matching and rewriting agai...
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
MLIRContext * getContext() const
RewritePatternSet & add(ConstructorArg &&arg, ConstructorArgs &&...args)
Add an instance of each of the pattern types 'Ts' to the pattern list with the given arguments.
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the listener that the IR failed to be rewritten because of a match failure,...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
Tensor types represent multi-dimensional arrays, and have two variants: RankedTensorType and Unranked...
This class provides an abstraction over the various different ranges of value types.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
bool isUnsignedInteger() const
Return true if this is an unsigned integer type (with the specified width).
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
Base class for DenseArrayAttr that is instantiated and specialized for each supported element type be...
ArrayRef< T > asArrayRef() const
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given tensor value.
std::optional< SmallVector< Value > > checkHasDynamicBatchDims(PatternRewriter &rewriter, Op op, ArrayRef< Value > params)
SmallVector< utils::IteratorType > getNParallelLoopsAttrs(unsigned nParallelLoops)
SmallVector< Value > condenseValues(const SmallVector< Value > &values)
Value clampIntHelper(Location loc, Value arg, Value min, Value max, OpBuilder &rewriter, bool isUnsigned)
void populateTosaToLinalgNamedConversionPatterns(RewritePatternSet *patterns, const TosaToLinalgNamedOptions &options)
Populates conversion passes from TOSA dialect to Linalg named operations.
Include the generated interface declarations.
std::conditional_t< std::is_same_v< Ty, mlir::Type >, mlir::Value, detail::TypedValue< Ty > > TypedValue
If Ty is mlir::Type this will select Value instead of having a wrapper around it.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
OpRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting against an...