36 if (llvm::all_of(pad, [](int64_t p) {
return p == 0; }))
39 ShapedType inputTy = input.
getType().
cast<ShapedType>();
40 Type inputETy = inputTy.getElementType();
41 auto inputShape = inputTy.getShape();
43 assert((inputShape.size() * 2) == pad.size());
48 for (
int i = 0, s = inputShape.size(); i < s; i++) {
49 auto lowPad = pad[i * 2];
50 auto highPad = pad[i * 2 + 1];
51 if (ShapedType::isDynamic(inputShape[i]))
52 paddedShape.push_back(inputShape[i]);
54 paddedShape.push_back(inputShape[i] + highPad + lowPad);
59 Value padValue = rewriter.
create<arith::ConstantOp>(loc, padAttr);
62 input, padValue, lowIndices, highIndices,
81 auto one = rewriter.
create<arith::ConstantOp>(
82 loc, IntegerAttr::get(initDim.
getType(), 1));
84 Value paddedBefore = builder.
create<arith::AddIOp>(initDim, padBefore);
86 Value paddedAfter = builder.
create<arith::AddIOp>(paddedBefore, padAfter);
88 Value subOne = builder.
create<arith::SubIOp>(kernelDim, one);
90 Value dilated = builder.
create<arith::MulIOp>(dilation, subOne);
91 Value addOne = builder.
create<arith::AddIOp>(dilated, one);
96 return builder.
create<arith::SubIOp>(divide, one);
102 ArrayAttr padAttr, ArrayAttr strideAttr, ArrayAttr dilationAttr,
103 int64_t weightHDim, int64_t weightWDim,
OpBuilder &rewriter) {
104 ShapedType inputTy = input.
getType().
cast<ShapedType>();
105 Type inputETy = inputTy.getElementType();
106 int64_t inputRank = inputTy.getRank();
107 int64_t heightDim = 1;
108 int64_t weightDim = 2;
111 dynDims.resize(resultTy.getRank());
112 for (
int i = 0; i < inputRank; i++) {
113 if (inputTy.isDynamicDim(i) && i != heightDim && i != weightDim)
114 dynDims[i] = rewriter.
create<tensor::DimOp>(loc, input, i);
118 if (inputTy.isDynamicDim(heightDim)) {
120 rewriter.
create<tensor::DimOp>(loc, input, heightDim).getResult();
122 rewriter.
create<tensor::DimOp>(loc, weight, weightHDim).getResult();
125 loc, initHDim, padAttr.getValue()[0], padAttr.getValue()[1], kernelHDim,
126 strideAttr.getValue()[0], dilationAttr.getValue()[0], inputETy,
131 if (inputTy.isDynamicDim(weightDim)) {
133 rewriter.
create<tensor::DimOp>(loc, input, weightDim).getResult();
135 rewriter.
create<tensor::DimOp>(loc, weight, weightWDim).getResult();
138 loc, initWDim, padAttr.getValue()[2], padAttr.getValue()[3], kernelWDim,
139 strideAttr.getValue()[1], dilationAttr.getValue()[1], inputETy,
152 reassociationMap.resize(outputRank);
153 for (
int i = 0; i < outputRank; i++) {
156 reassociationMap[outputRank - 1].push_back(
166 matchAndRewrite(tosa::Conv2DOp op, OpAdaptor adaptor,
169 Value input = op->getOperand(0);
170 Value weight = op->getOperand(1);
171 Value bias = op->getOperand(2);
173 ShapedType inputTy = input.
getType().
cast<ShapedType>();
174 ShapedType weightTy = weight.
getType().
cast<ShapedType>();
175 ShapedType biasTy = bias.
getType().
cast<ShapedType>();
176 ShapedType resultTy = op->getResult(0).getType().cast<ShapedType>();
178 Type inputETy = inputTy.getElementType();
179 Type resultETy = resultTy.getElementType();
181 auto padAttr = op->getAttr(
"pad").
cast<ArrayAttr>();
182 auto strideTosaAttr = op->getAttr(
"stride").cast<ArrayAttr>();
183 auto dilationTosaAttr = op->getAttr(
"dilation").cast<ArrayAttr>();
184 bool isQuantized = op->hasAttr(
"quantization_info");
186 if (!weightTy.hasStaticShape() || !biasTy.hasStaticShape())
188 op,
"tosa.conv ops require static shapes for weight and bias");
190 if (inputETy.isUnsignedInteger())
192 op,
"tosa.conv ops does not support unsigned integer input");
195 loc, input, weight, resultTy, padAttr, strideTosaAttr, dilationTosaAttr,
198 auto weightShape = weightTy.getShape();
203 auto quantizationInfo =
204 op->getAttr(
"quantization_info").
cast<tosa::ConvOpQuantizationAttr>();
205 auto iZp = quantizationInfo.input_zp().getValue().getSExtValue();
208 APInt::getSignedMinValue(inputETy.getIntOrFloatBitWidth())
211 APInt::getSignedMaxValue(inputETy.getIntOrFloatBitWidth())
214 if (iZp < intMin || iZp > intMax)
216 op,
"tosa.conv op quantization has zp outside of input range");
224 pad.resize(pad.size() + 2, 0);
225 input =
applyPad(loc, input, pad, zeroAttr, rewriter);
233 weightShape[3], weightShape[0]};
235 RankedTensorType::get({4}, rewriter.
getI64Type()), weightPerm);
236 Value weightPermValue =
237 rewriter.
create<arith::ConstantOp>(loc, weightPermAttr);
239 RankedTensorType::get(newWeightShape, weightTy.getElementType());
240 weight = rewriter.
create<tosa::TransposeOp>(loc, newWeightTy, weight,
244 Value initTensor = rewriter.
create<linalg::InitTensorOp>(
245 loc, filteredDims, resultTy.getShape(), resultETy);
246 Value zero = rewriter.
create<arith::ConstantOp>(loc, resultZeroAttr);
247 Value zeroTensor = rewriter
259 RankedTensorType::get({2}, rewriter.
getI64Type()), stride);
261 RankedTensorType::get({2}, rewriter.
getI64Type()), dilation);
266 resultTy.getRank(), 0,
271 Value biasInitTensor = rewriter.
create<linalg::InitTensorOp>(
272 loc, filteredDims, resultTy.getShape(), resultETy);
275 auto quantizationInfo =
276 op->
getAttr(
"quantization_info").
cast<tosa::ConvOpQuantizationAttr>();
278 quantizationInfo.input_zp().getValue().getSExtValue());
280 quantizationInfo.weight_zp().getValue().getSExtValue());
282 auto iZpVal = rewriter.
create<arith::ConstantOp>(loc, iZp);
283 auto kZpVal = rewriter.
create<arith::ConstantOp>(loc, kZp);
286 .
create<linalg::Conv2DNhwcHwcfQOp>(
287 loc, resultTy,
ValueRange{input, weight, iZpVal, kZpVal},
288 ValueRange{zeroTensor}, strideAttr, dilationAttr)
293 .
create<linalg::GenericOp>(
294 loc, resultTy,
ValueRange({bias, conv}), biasInitTensor,
299 loc, args[0], args[1]);
300 nestedBuilder.
create<linalg::YieldOp>(nestedLoc, added);
307 Value conv = rewriter
308 .
create<linalg::Conv2DNhwcHwcfOp>(
310 ValueRange{zeroTensor}, strideAttr, dilationAttr)
315 .
create<linalg::GenericOp>(
316 loc, resultTy,
ValueRange({bias, conv}), biasInitTensor,
321 loc, args[0], args[1]);
322 nestedBuilder.
create<linalg::YieldOp>(nestedLoc, added);
331 class DepthwiseConvConverter
336 matchAndRewrite(tosa::DepthwiseConv2DOp op, OpAdaptor adaptor,
339 Value input = op->getOperand(0);
340 Value weight = op->getOperand(1);
341 Value bias = op->getOperand(2);
343 ShapedType inputTy = input.
getType().
cast<ShapedType>();
344 ShapedType weightTy = weight.
getType().
cast<ShapedType>();
345 ShapedType biasTy = bias.
getType().
cast<ShapedType>();
346 ShapedType resultTy = op->getResult(0).getType().cast<ShapedType>();
347 int64_t resultRank = resultTy.getRank();
349 Type inputETy = inputTy.getElementType();
350 Type resultETy = resultTy.getElementType();
352 auto padAttr = op->getAttr(
"pad").
cast<ArrayAttr>();
353 auto strideTosaAttr = op->getAttr(
"stride").cast<ArrayAttr>();
354 auto dilationTosaAttr = op->getAttr(
"dilation").cast<ArrayAttr>();
356 if (!weightTy.hasStaticShape() || !biasTy.hasStaticShape())
358 op,
"tosa.depthwise_conv ops require static shapes");
362 loc, input, weight, resultTy, padAttr, strideTosaAttr, dilationTosaAttr,
365 bool isQuantized = op->hasAttr(
"quantization_info");
369 auto quantizationInfo =
370 op->getAttr(
"quantization_info").cast<tosa::ConvOpQuantizationAttr>();
372 quantizationInfo.input_zp().getValue().getSExtValue());
374 quantizationInfo.weight_zp().getValue().getSExtValue());
377 auto weightShape = weightTy.getShape();
378 auto resultShape = resultTy.getShape();
383 auto quantizationInfo =
384 op->getAttr(
"quantization_info").
cast<tosa::ConvOpQuantizationAttr>();
385 auto iZp = quantizationInfo.input_zp().getValue().getSExtValue();
394 if (iZp < intMin || iZp > intMax)
396 op,
"tosa.depthwise_conv op quantization has zp outside of input " 405 pad.resize(pad.size() + 2, 0);
407 input =
applyPad(loc, input, pad, zeroAttr, rewriter);
416 RankedTensorType::get({2}, rewriter.
getI64Type()), stride);
418 RankedTensorType::get({2}, rewriter.
getI64Type()), dilation);
419 ShapedType linalgConvTy =
420 RankedTensorType::get({resultShape[0], resultShape[1], resultShape[2],
421 weightShape[2], weightShape[3]},
433 Value initTensor = rewriter.
create<linalg::InitTensorOp>(
434 loc, filteredDims, linalgConvTy.getShape(), resultETy);
435 Value zero = rewriter.
create<arith::ConstantOp>(loc, resultZeroAttr);
436 Value zeroTensor = rewriter
441 Value biasInitTensor = rewriter.
create<linalg::InitTensorOp>(
442 loc, filteredDims, resultTy.getShape(), resultETy);
444 Value conv = rewriter
445 .
create<linalg::DepthwiseConv2DNhwcHwcmOp>(
447 ValueRange{zeroTensor}, strideAttr, dilationAttr)
452 Value convReshape = rewriter.
create<tensor::CollapseShapeOp>(
453 loc, resultTy, conv, reassociationMap);
457 .
create<linalg::GenericOp>(
458 loc, resultTy,
ValueRange({bias, convReshape}),
459 biasInitTensor, indexingMaps,
463 Value added = nestedBuilder.create<arith::AddFOp>(
464 loc, args[0], args[1]);
465 nestedBuilder.create<linalg::YieldOp>(nestedLoc, added);
470 auto iZpVal = rewriter.
create<arith::ConstantOp>(loc, iZp);
471 auto kZpVal = rewriter.
create<arith::ConstantOp>(loc, kZp);
474 .
create<linalg::DepthwiseConv2DNhwcHwcmQOp>(
475 loc, linalgConvTy,
ValueRange{input, weight, iZpVal, kZpVal},
476 ValueRange{zeroTensor}, strideAttr, dilationAttr)
480 Value convReshape = rewriter.
create<tensor::CollapseShapeOp>(
481 loc, resultTy, conv, reassociationMap);
484 .
create<linalg::GenericOp>(
485 loc, resultTy,
ValueRange({bias, convReshape}),
486 biasInitTensor, indexingMaps,
490 Value added = nestedBuilder.create<arith::AddIOp>(
491 loc, args[0], args[1]);
492 nestedBuilder.create<linalg::YieldOp>(nestedLoc, added);
505 matchAndRewrite(tosa::MatMulOp op, OpAdaptor adaptor,
509 auto outputTy = op.getType().
cast<ShapedType>();
510 auto outputElementTy = outputTy.getElementType();
512 auto firstOperandTy = op->getOperand(0).getType().cast<ShapedType>();
513 auto secondOperandTy = op->getOperand(1).getType().cast<ShapedType>();
516 dynDims.resize(op->getResult(0).getType().cast<ShapedType>().getRank());
518 if (!firstOperandTy.hasRank() || firstOperandTy.isDynamicDim(0)) {
519 dynDims[0] = rewriter.
create<tensor::DimOp>(loc, op->getOperand(0), 0);
522 if (!firstOperandTy.hasRank() || firstOperandTy.isDynamicDim(1)) {
523 dynDims[1] = rewriter.
create<tensor::DimOp>(loc, op->getOperand(0), 1);
526 if (!secondOperandTy.hasRank() || secondOperandTy.isDynamicDim(2)) {
527 dynDims[2] = rewriter.
create<tensor::DimOp>(loc, op->getOperand(1), 2);
532 auto zeroAttr = rewriter.
getZeroAttr(outputElementTy);
533 Value zero = rewriter.
create<arith::ConstantOp>(loc, zeroAttr);
534 auto initTensor = rewriter.
create<linalg::InitTensorOp>(
535 loc, filteredDims, outputTy.getShape(), outputTy.getElementType());
536 Value zeroTensor = rewriter
540 if (!op.quantization_info()) {
547 auto quantizationInfo = op.quantization_info().getValue();
548 auto aZp = rewriter.
create<arith::ConstantOp>(
550 quantizationInfo.a_zp().getValue().getSExtValue()));
551 auto bZp = rewriter.
create<arith::ConstantOp>(
553 quantizationInfo.b_zp().getValue().getSExtValue()));
556 ValueRange{adaptor.a(), adaptor.b(), aZp, bZp}, zeroTensor);
562 class FullyConnectedConverter
567 matchAndRewrite(tosa::FullyConnectedOp op, OpAdaptor adaptor,
570 auto outputTy = op.getType().
cast<ShapedType>();
571 auto input = op.input();
572 auto inputTy = input.getType().cast<ShapedType>();
574 auto bias = op.bias();
576 auto weight = op.weight();
577 auto weightTy = weight.getType().cast<ShapedType>();
578 auto weightShape = weightTy.getShape();
580 auto outputETy = outputTy.getElementType();
583 dynDims.resize(op->getResult(0).getType().cast<ShapedType>().getRank());
585 if (!inputTy.hasRank() || inputTy.isDynamicDim(0)) {
586 dynDims[0] = rewriter.
create<tensor::DimOp>(loc, input, 0);
589 if (!weightTy.hasRank() || weightTy.isDynamicDim(0)) {
590 dynDims[1] = rewriter.
create<tensor::DimOp>(loc, weight, 0);
606 auto initTensor = rewriter.
create<linalg::InitTensorOp>(
607 loc, filteredDims, outputTy.getShape(), outputTy.getElementType());
611 Value zero = rewriter.
create<arith::ConstantOp>(loc, resultZeroAttr);
612 Value zeroTensor = rewriter
619 RankedTensorType::get({2}, rewriter.
getI64Type()), permutation);
620 Value permutationValue =
621 rewriter.
create<arith::ConstantOp>(loc, permutationAttr);
625 RankedTensorType::get(newWeightShape, weightTy.getElementType());
627 Value transposedWeight = rewriter.
create<tosa::TransposeOp>(
628 loc, newWeightTy, weight, permutationValue);
630 auto biasInitTensor =
632 .
create<linalg::InitTensorOp>(loc, filteredDims,
633 outputTy.getShape(), outputETy)
636 if (!op.quantization_info()) {
637 Value matmul = rewriter
638 .
create<linalg::MatmulOp>(
640 ValueRange{input, transposedWeight}, zeroTensor)
645 .
create<linalg::GenericOp>(
646 loc, outputTy,
ValueRange({bias, matmul}), biasInitTensor,
651 loc, args[0], args[1]);
652 nestedBuilder.
create<linalg::YieldOp>(nestedLoc, added);
659 auto quantizationInfo = op.quantization_info().getValue();
660 auto inputZp = rewriter.
create<arith::ConstantOp>(
662 quantizationInfo.input_zp().getValue().getSExtValue()));
663 auto outputZp = rewriter.
create<arith::ConstantOp>(
665 quantizationInfo.weight_zp().getValue().getSExtValue()));
668 .
create<linalg::QuantizedMatmulOp>(
670 ValueRange{input, transposedWeight, inputZp, outputZp},
675 .
create<linalg::GenericOp>(
676 loc, outputTy,
ValueRange({bias, matmul}), biasInitTensor,
681 loc, args[0], args[1]);
682 nestedBuilder.
create<linalg::YieldOp>(nestedLoc, added);
697 Value input = op.input();
698 ShapedType inputTy = input.
getType().
cast<ShapedType>();
700 ShapedType resultTy = op.getType().template cast<ShapedType>();
701 Type resultETy = inputTy.getElementType();
705 if (!dynamicDimsOr.hasValue())
711 if (resultETy.isF32())
717 if (resultETy.isa<IntegerType>())
720 APInt::getSignedMinValue(resultETy.getIntOrFloatBitWidth()));
724 op,
"Unsupported initial value for tosa.maxpool_2d op");
730 pad.resize(pad.size() + 2, 0);
731 Value paddedInput =
applyPad(loc, input, pad, initialAttr, rewriter);
733 Value initialValue = rewriter.
create<arith::ConstantOp>(loc, initialAttr);
743 Value initTensor = rewriter.
create<linalg::InitTensorOp>(
744 loc, dynamicDims, resultTy.getShape(), resultTy.getElementType());
746 Value filledInitTensor =
752 Value fakeWindowDims =
753 rewriter.
create<linalg::InitTensorOp>(loc, kernel, resultETy);
757 filledInitTensor, strideAttr, dilationAttr);
769 Value input = op.input();
770 ShapedType inputTy = input.
getType().
cast<ShapedType>();
771 Type inElementTy = inputTy.getElementType();
773 ShapedType resultTy = op.getType().template cast<ShapedType>();
777 inElementTy.
isa<IntegerType>() ? rewriter.
getI32Type() : inElementTy;
778 ShapedType accTy = resultTy.clone(accETy);
782 if (!dynamicDimsOr.hasValue())
790 pad.resize(pad.size() + 2, 0);
792 Value paddedInput =
applyPad(loc, input, pad, padAttr, rewriter);
795 Value initialValue = rewriter.
create<arith::ConstantOp>(loc, initialAttr);
805 Value poolInitTensor = rewriter.
create<linalg::InitTensorOp>(
806 loc, dynamicDims, accTy.getShape(), accETy);
808 Value filledInitTensor =
814 Value fakeWindowDims =
815 rewriter.
create<linalg::InitTensorOp>(loc, kernel, accETy);
818 Value poolingOp = rewriter
819 .
create<linalg::PoolingNhwcSumOp>(
822 filledInitTensor, strideAttr, dilationAttr)
827 auto poolingOpTy = poolingOp.getType().cast<ShapedType>();
830 Value genericInitTensor = rewriter.
create<linalg::InitTensorOp>(
831 loc, dynamicDims, resultTy.getShape(), resultETy);
833 auto genericOp = rewriter.
create<linalg::GenericOp>(
842 loc, poolingOpTy.getDimSize(1) - 1);
844 loc, poolingOpTy.getDimSize(2) - 1);
847 auto y0 = rewriter.
create<linalg::IndexOp>(loc, 1);
848 auto x0 = rewriter.
create<linalg::IndexOp>(loc, 2);
849 auto y1 = rewriter.
create<arith::SubIOp>(loc, iH, y0);
850 auto x1 = rewriter.
create<arith::SubIOp>(loc, iW, x0);
859 Value dx = rewriter.
create<arith::SubIOp>(loc, x, padVal);
862 loc, arith::CmpIPredicate::slt, dx, zero);
863 Value offset = rewriter.
create<arith::SelectOp>(loc, cmp, dx, zero);
864 return rewriter.
create<arith::AddIOp>(loc, v, offset)->getResult(0);
869 auto kH1 = padFn(kH0, y0, pad[2]);
870 auto kH2 = padFn(kH1, y1, pad[3]);
871 auto kHCmp = rewriter.
create<arith::CmpIOp>(
872 loc, arith::CmpIPredicate::slt, kH2, one);
873 auto kH3 = rewriter.
create<arith::SelectOp>(loc, kHCmp, one, kH2);
877 auto kW1 = padFn(kW0, x0, pad[4]);
878 auto kW2 = padFn(kW1, x1, pad[5]);
879 auto kWCmp = rewriter.
create<arith::CmpIOp>(
880 loc, arith::CmpIPredicate::slt, kW2, one);
881 auto kW3 = rewriter.
create<arith::SelectOp>(loc, kWCmp, one, kW2);
884 Value count = rewriter.
create<arith::MulIOp>(loc, kH3, kW3);
885 auto countI = rewriter.
create<arith::IndexCastOp>(
891 Value poolVal = args[0];
893 auto countF = rewriter.
create<arith::SIToFPOp>(loc, accETy, countI);
894 poolVal = rewriter.
create<arith::DivFOp>(loc, poolVal, countF)
900 if (op.quantization_info()) {
901 auto quantizationInfo = op.quantization_info().getValue();
902 auto inputZp = rewriter.
create<arith::ConstantOp>(
903 loc, quantizationInfo.input_zp());
905 rewriter.
create<arith::MulIOp>(loc, accETy, countI, inputZp);
907 rewriter.
create<arith::SubIOp>(loc, accETy, poolVal, offset);
914 int64_t numerator = ((1 << 30) + 1);
917 Value numeratorVal = rewriter.
create<arith::ConstantOp>(
919 Value multiplierVal =
922 numeratorVal, countI)
924 Value shiftVal = rewriter.
create<arith::ConstantOp>(
929 .
create<tosa::ApplyScaleOp>(
930 loc, rewriter.
getI32Type(), poolVal, multiplierVal,
936 if (op.quantization_info()) {
937 auto quantizationInfo = op.quantization_info().getValue();
938 auto outputZp = rewriter.
create<arith::ConstantOp>(
939 loc, quantizationInfo.output_zp());
940 scaled = rewriter.
create<arith::AddIOp>(loc, scaled, outputZp)
945 int64_t outBitwidth = resultETy.getIntOrFloatBitWidth();
948 loc, APInt::getSignedMinValue(outBitwidth).getSExtValue(),
951 loc, APInt::getSignedMaxValue(outBitwidth).getSExtValue(),
953 auto clamp = clampHelper<arith::CmpIOp>(
954 loc, scaled,
min,
max, arith::CmpIPredicate::slt, rewriter);
958 if (resultETy != clamp.getType()) {
960 rewriter.
create<arith::TruncIOp>(loc, resultETy, poolVal);
964 rewriter.
create<linalg::YieldOp>(loc, poolVal);
967 rewriter.
replaceOp(op, genericOp.getResult(0));
979 DepthwiseConvConverter,
983 FullyConnectedConverter>(patterns->
getContext());
Include the generated interface declarations.
DenseIntElementsAttr getI64VectorAttr(ArrayRef< int64_t > values)
MLIRContext * getContext() const
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
AffineMap getMultiDimIdentityMap(unsigned rank)
Attribute getZeroAttr(Type type)
Specialization of arith.constant op that returns an integer value.
static Type getElementType(Type type, ArrayRef< int32_t > indices, function_ref< InFlightDiagnostic(StringRef)> emitErrorFn)
Walks the given type hierarchy with the given indices, potentially down to component granularity...
void getValuesFromIntArrayAttribute(ArrayAttr attr, SmallVector< T > &arrayValues)
SmallVector< Value > condenseValues(const SmallVector< Value > &values)
SmallVector< StringRef > getNParallelLoopsAttrs(unsigned nParallelLoops)
Optional< SmallVector< Value > > checkHasDynamicBatchDims(PatternRewriter &rewriter, Op op, ArrayRef< Value > params)
static mlir::Value getConvOutputDim(Location loc, Value initDim, Attribute padBeforeAttr, Attribute padAfterAttr, Value kernelDim, Attribute strideAttr, Attribute dilationAttr, Type inputETy, OpBuilder &rewriter)
void replaceOp(Operation *op, ValueRange newValues) override
PatternRewriter hook for replacing the results of an operation.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
const llvm::fltSemantics & getFloatSemantics()
Return the floating semantics of this float type.
FloatAttr getFloatAttr(Type type, double value)
IntegerAttr getI32IntegerAttr(int32_t value)
LogicalResult success(bool isSuccess=true)
Utility function to generate a LogicalResult.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
This class represents an efficient way to signal success or failure.
LogicalResult failure(bool isFailure=true)
Utility function to generate a LogicalResult.
virtual void replaceOp(Operation *op, ValueRange newValues)
This method replaces the results of the operation with the specified list of values.
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
IntegerAttr getIntegerAttr(Type type, int64_t value)
IntegerAttr getI8IntegerAttr(int8_t value)
Attributes are known-constant values of operations.
PadOp createPadScalarOp(Type type, Value source, Value pad, ArrayRef< OpFoldResult > low, ArrayRef< OpFoldResult > high, bool nofold, Location loc, OpBuilder &builder)
static DenseIntElementsAttr get(const ShapedType &type, Arg &&arg)
Get an instance of a DenseIntElementsAttr with the given arguments.
This class provides an abstraction over the various different ranges of value types.
LogicalResult notifyMatchFailure(Location loc, function_ref< void(Diagnostic &)> reasonCallback) override
PatternRewriter hook for notifying match failure reasons.
void createOrFold(llvm::SmallVectorImpl< Value > &results, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
OpConversionPattern is a wrapper around ConversionPattern that allows for matching and rewriting agai...
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
static Value min(ImplicitLocOpBuilder &builder, Value value, Value bound)
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
static SmallVector< int64_t, 8 > subtract(ArrayRef< int64_t > vecA, ArrayRef< int64_t > vecB)
static mlir::Value reifyConstantDim(Attribute attr, ImplicitLocOpBuilder &builder)
void populateTosaToLinalgNamedConversionPatterns(RewritePatternSet *patterns)
Populates conversion passes from TOSA dialect to Linalg named operations.
static Value clamp(ImplicitLocOpBuilder &builder, Value value, Value lowerBound, Value upperBound)
OpRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting against an...
OpTy create(Args &&...args)
Create an operation of specific op type at the current insertion point and location.
static SmallVector< Value > inferDynamicDimsForConv(Location loc, Value input, Value weight, ShapedType resultTy, ArrayAttr padAttr, ArrayAttr strideAttr, ArrayAttr dilationAttr, int64_t weightHDim, int64_t weightWDim, OpBuilder &rewriter)
Type getType() const
Return the type of this value.
RewritePatternSet & add(ConstructorArg &&arg, ConstructorArgs &&... args)
Add an instance of each of the pattern types 'Ts' to the pattern list with the given arguments...
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replaces the result op with a new op that is created without verification.
ImplicitLocOpBuilder maintains a 'current location', allowing use of the create<> method without spec...
Specialization of arith.constant op that returns an integer of index type.
BoolAttr getBoolAttr(bool value)
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
This class implements a pattern rewriter for use with ConversionPatterns.
AffineExpr getAffineDimExpr(unsigned position)
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the rewriter that the IR failed to be rewritten because of a match failure...
static void createDepthwiseConvCollapseMap(int64_t outputRank, SmallVector< ReassociationExprs, 4 > &reassociationMap, OpBuilder &rewriter)
static mlir::Value applyPad(Location loc, Value input, ArrayRef< int64_t > pad, Attribute padAttr, OpBuilder &rewriter)
This class helps build Operations.
This class provides an abstraction over the different types of ranges over Values.
IntegerAttr getIndexAttr(int64_t value)
Attribute getAttr(StringAttr name)
Return the specified attribute if present, null otherwise.
MLIRContext * getContext() const
static Value max(ImplicitLocOpBuilder &builder, Value value, Value bound)