19 #include "llvm/ADT/SmallBitVector.h" 25 #include "mlir/Dialect/Linalg/IR/LinalgInterfaces.cpp.inc" 33 for (
auto *opOperand : linalgOp.getInputAndOutputOperands()) {
34 if (llvm::is_contained(droppedOperands, opOperand))
36 indexingMaps.push_back(linalgOp.getTiedIndexingMap(opOperand));
62 template <
typename OpType>
65 block.
walk([&](OpType op) {
79 template <
typename AddOpType,
typename MulOpType>
87 AddOpType addOp = getSingleOpOfType<AddOpType>(block);
88 MulOpType mulOp = getSingleOpOfType<MulOpType>(block);
93 Value a = mulOp->getOperand(0), b = mulOp->getOperand(1);
94 Value mul = mulOp->getResult(0);
96 Value c1 = addOp->getOperand(0), c2 = addOp->getOperand(1);
97 Value add = addOp->getResult(0);
103 success |= (un(c1, argC) && un(c2, mul)) || ((un(c1, mul)) && un(c2, argC));
105 success |= (un(a, argA) && un(b, argB)) || ((un(a, argB)) && un(b, argA));
118 auto linalgOp = dyn_cast<linalg::LinalgOp>(op);
121 if (linalgOp.getNumInputs() != 2 || linalgOp.getNumOutputs() != 1)
123 auto mapRange = linalgOp.indexing_maps().getAsValueRange<AffineMapAttr>();
124 if (linalgOp.getNumReductionLoops() == 0)
126 if (llvm::any_of(mapRange,
130 if (!isAddMul<arith::AddFOp, arith::MulFOp>(linalgOp->getRegion(0).front()) &&
131 !isAddMul<arith::AddIOp, arith::MulIOp>(linalgOp->getRegion(0).front()) &&
132 !isAddMul<complex::AddOp, complex::MulOp>(linalgOp->getRegion(0).front()))
141 return isa<ContractionOpInterface>(op) ||
161 return op->
emitError(
"expected a LinalgOp");
163 return op->
emitError(
"expected op with 2 inputs and 1 outputs");
165 return op->
emitError(
"expected at least a reduction loop");
167 return op->
emitError(
"expected all indexings to be projected permutations");
169 return op->
emitError(
"(add, mul) operations not found");
179 template <
typename T>
181 return lhs.
isa<T>() ? lhs.
cast<T>()
182 : (rhs.
isa<T>() ? rhs.
cast<T>() :
nullptr);
194 struct ConvAccessExprWalker
196 llvm::SmallDenseSet<unsigned> convolvedDims;
197 llvm::SmallDenseSet<unsigned> unConvolvedDims;
201 if (unConvolvedDims.count(position) || convolvedDims.count(position)) {
204 unConvolvedDims.insert(position);
223 if (convolvedDims.count(dim) || unConvolvedDims.count(dim))
225 convolvedDims.insert(dim);
231 auto lhsExpr = symbolMulExpr.getLHS();
232 auto rhsExpr = symbolMulExpr.getRHS();
235 getAffineExprOfType<AffineSymbolExpr>(lhsExpr, rhsExpr);
238 mulExpr = getAffineExprOfType<AffineConstantExpr>(lhsExpr, rhsExpr);
240 auto dimExpr = getAffineExprOfType<AffineDimExpr>(lhsExpr, rhsExpr);
241 if (!mulExpr || !dimExpr)
243 unsigned dim = dimExpr.getPosition();
244 if (convolvedDims.count(dim) || unConvolvedDims.count(dim))
246 convolvedDims.insert(dim);
256 "expected map to have projected permutations");
257 llvm::SmallDenseSet<unsigned> preservedDims;
260 return preservedDims;
275 auto linalgOp = dyn_cast<linalg::LinalgOp>(op);
278 if (linalgOp.getNumInputs() < 2 || linalgOp.getNumOutputs() != 1)
281 auto indexingMaps = linalgOp.getIndexingMaps();
284 ConvAccessExprWalker inputExprWalker;
285 if (llvm::any_of(indexingMaps[0].getResults(),
287 return failed(inputExprWalker.visit(expr));
293 if (!indexingMaps[1].isProjectedPermutation() ||
294 !indexingMaps.back().isProjectedPermutation())
297 auto iteratorTypesRange =
298 linalgOp.iterator_types().getAsValueRange<StringAttr>();
300 llvm::SmallDenseSet<unsigned> outputDims =
302 llvm::SmallDenseSet<unsigned> filterDims =
getPreservedDims(indexingMaps[1]);
316 llvm::SmallDenseSet<unsigned> allLoopDims;
317 for (
auto outputExpr : indexingMaps.back().getResults()) {
318 unsigned outputDim = outputExpr.cast<
AffineDimExpr>().getPosition();
319 if (inputExprWalker.unConvolvedDims.count(outputDim) &&
320 !filterDims.count(outputDim)) {
322 if (*std::next(iteratorTypesRange.begin(), outputDim) !=
325 allLoopDims.insert(outputDim);
328 if (inputExprWalker.convolvedDims.count(outputDim) &&
329 !filterDims.count(outputDim)) {
331 if (*std::next(iteratorTypesRange.begin(), outputDim) !=
334 allLoopDims.insert(outputDim);
337 if (!inputExprWalker.convolvedDims.count(outputDim) &&
338 !inputExprWalker.unConvolvedDims.count(outputDim) &&
339 filterDims.count(outputDim)) {
341 if (*std::next(iteratorTypesRange.begin(), outputDim) !=
344 allLoopDims.insert(outputDim);
347 if (inputExprWalker.unConvolvedDims.count(outputDim) &&
348 filterDims.count(outputDim)) {
350 if (*std::next(iteratorTypesRange.begin(), outputDim) !=
353 allLoopDims.insert(outputDim);
358 for (
auto filterExpr : indexingMaps[1].getResults()) {
359 unsigned filterDim = filterExpr.cast<
AffineDimExpr>().getPosition();
360 if (outputDims.count(filterDim) &&
361 !inputExprWalker.unConvolvedDims.count(filterDim) &&
362 !inputExprWalker.convolvedDims.count(filterDim)) {
366 if (inputExprWalker.convolvedDims.count(filterDim) &&
367 !outputDims.count(filterDim)) {
369 if (*std::next(iteratorTypesRange.begin(), filterDim) !=
372 if (allLoopDims.count(filterDim))
374 allLoopDims.insert(filterDim);
377 if (inputExprWalker.unConvolvedDims.count(filterDim) &&
378 !outputDims.count(filterDim)) {
380 if (*std::next(iteratorTypesRange.begin(), filterDim) !=
383 if (allLoopDims.count(filterDim))
385 allLoopDims.insert(filterDim);
388 if (inputExprWalker.unConvolvedDims.count(filterDim) &&
389 outputDims.count(filterDim)) {
396 if (allLoopDims.size() != linalgOp.getNumLoops())
405 return op->
emitError(
"expected a LinalgOp");
407 return op->
emitError(
"expected op with 2 inputs and 1 output");
409 return op->
emitError(
"unexpected input index map for convolutions");
412 "expected output/filter indexing maps to be projected permutations");
415 return op->
emitError(
"unexpected loop dimension for convolution op");
419 "expected all iterators used to access outputs to be parallel");
423 "expected all iterators not used to access outputs to be reduction");
440 auto linalgOp = dyn_cast<linalg::LinalgOp>(op);
443 if (linalgOp.getNumInputs() != 1 || linalgOp.getNumOutputs() != 1)
447 if (!linalgOp.isScalar(value))
456 return op->
emitError(
"expected a LinalgOp");
458 return op->
emitError(
"expected op with 1 input and 1 output");
460 return op->
emitError(
"expected op with scalar input");
471 result.reserve(this->size());
472 llvm::transform(*
this, std::back_inserter(result),
483 if (source.getType().isa<UnrankedTensorType, RankedTensorType>())
485 llvm_unreachable(
"Expected MemRefType or TensorType");
491 for (
OpOperand *opOperand : getInputAndOutputOperands()) {
492 for (int64_t i = 0, e = getRank(opOperand); i < e; ++i)
500 assert(!hasDynamicShape() &&
"expected operands to have static shapes");
501 for (
OpOperand *opOperand : getInputAndOutputOperands())
502 llvm::append_range(res,
getShape(opOperand));
509 auto viewSizes = createFlatListOfOperandDims(b, loc);
513 for (
unsigned idx = 0; idx < numRes; ++idx) {
516 if (res[d.getPosition()].offset)
518 res[d.getPosition()] =
Range{zeroVal, viewSizes[idx], oneVal};
529 for (
unsigned idx = 0; idx < numRes; ++idx) {
532 res[d.getPosition()] = allShapeSizes[idx];
542 : positions(std::move(positions)) {}
557 llvm::SmallBitVector positions;
561 LinalgOp::reifyResultShapes(
OpBuilder &b,
572 AffineMap loopsToShapesMap = getLoopsToShapesMap();
576 auto resultShapesSubMapPos = getResultsPositionInLoopsToShapeMap();
581 resultShapesSubMapPos.first,
582 resultShapesSubMapPos.second - resultShapesSubMapPos.first);
583 AffineMap resultShapesFromInputShapesMap =
584 loopToResultsShapeMap.
compose(getShapesToLoopsMap());
588 llvm::SmallBitVector outputDims(resultShapesFromInputShapesMap.
getNumDims());
589 outputDims.set(resultShapesSubMapPos.first, resultShapesSubMapPos.second);
591 Location loc = getOperation()->getLoc();
592 auto allResultDimValues =
594 createFlatListOfOperandDims(b, loc));
597 for (
OpOperand *opOperand : getOutputOperands()) {
599 for (int64_t dim : llvm::seq<int64_t>(0, getRank(opOperand))) {
600 if (checkDimExpr.
visit(shapeExprs[pos]))
603 shapes.push_back(allResultDimValues[pos]);
606 reifiedReturnShapes.emplace_back(std::move(shapes));
612 LinalgOp linalgOp = cast<LinalgOp>(op);
617 int64_t numInputs = linalgOp.getNumInputs();
618 int64_t numOutputs = linalgOp.getNumOutputs();
620 return op->
emitOpError(
"expected at least one output operand");
624 if (op->
getNumResults() != linalgOp.getOutputTensorOperands().size())
625 return op->
emitOpError(
"expected the number of results (")
627 <<
") to be equal to the number of output tensors (" 628 << linalgOp.getOutputTensorOperands().size() <<
")";
631 auto iteratorTypesRange =
632 linalgOp.iterator_types().getAsValueRange<StringAttr>();
633 for (StringRef iteratorType : iteratorTypesRange) {
635 return op->
emitOpError(
"unexpected iterator_type (")
636 << iteratorType <<
")";
641 if (linalgOp.hasDynamicIndexingMaps())
642 if (
failed(linalgOp.verifyIndexingMapRequiredAttributes()))
646 if (static_cast<int64_t>(linalgOp.indexing_maps().size()) !=
647 linalgOp.getNumInputsAndOutputs())
648 return op->
emitOpError(
"expected the number of indexing_map (")
649 << linalgOp.indexing_maps().size()
650 <<
") to be equal to the number of input/output operands (" 651 << linalgOp.getNumInputsAndOutputs() <<
")";
653 for (
OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) {
654 AffineMap indexingMap = linalgOp.getTiedIndexingMap(opOperand);
658 return op->
emitOpError(
"unexpected symbols in indexing_map #")
659 << opOperand->getOperandNumber();
662 unsigned numLoops = linalgOp.getNumLoops();
665 << opOperand->getOperandNumber() <<
" to have " << numLoops
666 <<
" dim(s) to match the number of loops";
668 int64_t rank = linalgOp.getRank(opOperand);
671 << rank <<
") to match the result rank of indexing_map #" 672 << opOperand->getOperandNumber() <<
" (" 677 linalgOp.getReductionDims(redDims);
683 if (!linalgOp.getOutputBufferOperands().empty() &&
684 !linalgOp.getOutputTensorOperands().empty())
686 "expected output operands to all have tensor type or " 687 "all have buffer type");
689 for (
OpOperand *opOperand : linalgOp.getOutputTensorOperands()) {
690 OpResult result = linalgOp.getTiedOpResult(opOperand);
691 if (result.
getType() != opOperand->get().getType())
692 return op->
emitOpError(
"expected type of operand #")
693 << opOperand->getOperandNumber() <<
" (" 694 << opOperand->get().getType() <<
")" 695 <<
" to match type of corresponding result (" << result.
getType()
700 for (
OpOperand *opOperand : linalgOp.getOutputOperands()) {
701 AffineMap indexingMap = linalgOp.getTiedIndexingMap(opOperand);
703 for (
unsigned pos : redDims) {
704 if (expr.isFunctionOfDim(pos)) {
707 llvm::raw_string_ostream os(exprStr);
711 "unexpected output tensor expression in indexing map #")
712 << (opOperand->getOperandNumber() - linalgOp.getNumInputs())
713 <<
" a.k.a '" << exprStr
714 <<
"' is function of reduction iterator 'd" << pos <<
"'";
721 if (linalgOp->getNumRegions() != 1 ||
722 !llvm::hasSingleElement(linalgOp->getRegion(0)))
723 return op->
emitOpError(
"expects to have 1 region with 1 block");
725 if (!linalgOp.getShapesToLoopsMap())
726 return op->
emitOpError(
"expected the shape-to-loops map to be non-null");
734 Block &block = linalgOp->getRegion(0).
front();
737 return op->
emitOpError(
"expected as many non-induction variable region " 738 "arguments as the number of input/output operands");
740 for (
OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) {
742 Type argType = block.
getArgument(opOperand->getOperandNumber()).getType();
743 if (elementType != argType)
744 return op->
emitOpError(
"expected type of bb argument #")
745 << opOperand->getOperandNumber() <<
" (" << argType <<
")" 746 <<
" to match element or self type of the corresponding operand (" 747 << elementType <<
")";
756 if (llvm::none_of(endLoopRangeValues, ShapedType::isDynamic)) {
757 for (int64_t &range : endLoopRangeValues)
759 for (
OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) {
760 AffineMap indexingMap = linalgOp.getTiedIndexingMap(opOperand);
762 indexingMap.
compose(startLoopRangeValues);
764 indexingMap.
compose(endLoopRangeValues);
766 for (
auto dim : llvm::seq<int64_t>(0, shape.size())) {
768 if (ShapedType::isDynamic(shape[dim]) || shape[dim] == 0)
781 int64_t inferredDimSize =
782 std::max(startIndices[dim], endIndices[dim]) + 1;
783 if (
std::min(startIndices[dim], endIndices[dim]) < 0) {
786 llvm::raw_string_ostream os(mapStr);
790 "unexpected result less than 0 at expression #")
791 << dim <<
" in " << mapStr;
794 if (inferredDimSize != shape[dim]) {
795 return op->
emitOpError(
"inferred input/output operand #")
796 << opOperand->getOperandNumber()
797 <<
" has shape's dimension #" << dim <<
" to be " 798 << inferredDimSize <<
", but found " << shape[dim];
801 if (inferredDimSize > shape[dim]) {
802 return op->
emitOpError(
"inferred input/output operand #")
803 << opOperand->getOperandNumber()
804 <<
" has shape's dimension #" << dim
805 <<
" to be greater than or equal to " << inferredDimSize
806 <<
", but found " << shape[dim];
Affine binary operation expression.
TODO: Remove this file when SCCP and integer range analysis have been ported to the new framework...
ArrayRef< StringRef > getAllIteratorTypeNames()
Use to encode that a particular iterator type has window semantics.
AffineMap inversePermutation(AffineMap map)
Returns a map of codomain to domain dimensions such that the first codomain dimension for a particula...
constexpr StringRef getParallelIteratorTypeName()
Use to encode that a particular iterator type has parallel semantics.
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
AffineMap compose(AffineMap map) const
Returns the AffineMap resulting from composing this with map.
Operation is a basic unit of execution within MLIR.
bool isaContractionOpInterface(LinalgOp linalgOp)
Checks whether linalgOp conforms to ContractionOpInterface.
unsigned getNumSymbols() const
unsigned getNumDims() const
This is a value defined by a result of an operation.
Block represents an ordered list of Operations.
bool visitDimExpr(AffineDimExpr dimExpr)
Value getOperand(unsigned idx)
LogicalResult verifyNOperands(Operation *op, unsigned numOperands)
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value...
unsigned getNumOperands()
static T getAffineExprOfType(AffineExpr lhs, AffineExpr rhs)
Of the given two expressions returns one that is of type T (lhs gets preference over rhs) ...
static llvm::SmallDenseSet< unsigned > getPreservedDims(AffineMap map)
bool succeeded(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a success value...
bool visitConstantExpr(AffineConstantExpr constExpr)
LogicalResult verifyFillInterface(Operation *op)
Verify that op conforms to the FillOpInterface.
unsigned getPosition() const
static ArrayRef< int64_t > getShape(Type type)
Returns the shape of the given type.
BlockArgument getArgument(unsigned i)
An integer constant appearing in affine expression.
static constexpr const bool value
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Auxiliary range data structure to unpack the offset, size and stride operands into a list of triples...
AffineExpr getRHS() const
AffineExpr getResult(unsigned idx) const
Base class for AffineExpr visitors/walkers.
bool visitAffineBinaryOpExpr(AffineBinaryOpExpr binaryOpExpr)
LogicalResult success(bool isSuccess=true)
Utility function to generate a LogicalResult.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
This class represents an efficient way to signal success or failure.
LogicalResult failure(bool isFailure=true)
Utility function to generate a LogicalResult.
AffineMap getSliceMap(unsigned start, unsigned length) const
Returns the map consisting of length expressions starting from start.
Type getElementTypeOrSelf(Type type)
Return the element type or return the type itself.
AffineExpr getLHS() const
bool isProjectedPermutation(bool allowZeroInResults=false) const
Returns true if the AffineMap represents a subset (i.e.
unsigned getNumArguments()
AffineMap concatAffineMaps(ArrayRef< AffineMap > maps)
Concatenates a list of maps into a single AffineMap, stepping over potentially empty maps...
Base type for affine expression.
RHS of mul is always a constant or a symbolic expression.
SmallVector< Value, 4 > applyMapToValues(OpBuilder &b, Location loc, AffineMap map, ValueRange values)
Returns the values obtained by applying map to the list of values.
static WalkResult advance()
unsigned getNumResults() const
IRValueT get() const
Return the current value being used by this operand.
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued...
static WalkResult interrupt()
LogicalResult verifyStructuredOpInterface(Operation *op)
Verify that op conforms to the invariants of StructuredOpInterface.
RetT walk(FnT &&callback)
Walk the operations in this block.
static bool isAddMul(Block &block)
Detect whether res is any permutation of u5(u1(c) + u2(u3(a) * u4(b))) on the field (AddOpType...
ArrayRef< AffineExpr > getResults() const
static MatchContractionResult isContractionInterfaceImpl(Operation *op)
static MatchConvolutionResult isConvolutionInterfaceImpl(Operation *op)
Visitor to check if any of the given set of positions from AffineDimExprs are used within an AffineEx...
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
bool visitSymbolExpr(AffineSymbolExpr symbolExpr)
static Value min(ImplicitLocOpBuilder &builder, Value value, Value bound)
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
RetTy visit(AffineExpr expr)
Operation * getTerminator()
Get the terminator operation of this block.
AffineExprKind getKind() const
Return the classification for this type.
static MatchFillResult isFillInterfaceImpl(Operation *op)
Type getType() const
Return the type of this value.
A dimensional identifier appearing in an affine expression.
Specialization of arith.constant op that returns an integer of index type.
LogicalResult verifyConvolutionInterface(Operation *op)
Verify that op conforms to the ConvolutionOpInterface.
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
This class represents an operand of an operation.
static bool isChainOfUnaryOpsFrom(Value v, Value from)
Return true if the use-def chain from v to from consists of 0 or more unary single-operand operations...
unsigned getNumResults()
Return the number of results held by this operation.
LogicalResult verifyContractionInterface(Operation *op)
Verify that op conforms to ContractionOpInterface.
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers...
Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim)
Helper function that creates a memref::DimOp or tensor::DimOp depending on the type of source...
constexpr StringRef getReductionIteratorTypeName()
Use to encode that a particular iterator type has reduction semantics.
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
static void visit(Operation *op, DenseSet< Operation *> &visited)
Visits all the pdl.operand(s), pdl.result(s), and pdl.operation(s) connected to the given operation...
This class helps build Operations.
HasAffineDimExprVisitor(llvm::SmallBitVector positions)
bool canOpOperandsBeDroppedImpl(linalg::LinalgOp linalgOp, ArrayRef< OpOperand *> droppedOperands)
Implementation of the method that that check if given operands can be dropped, i.e.
static Value max(ImplicitLocOpBuilder &builder, Value value, Value bound)
A symbolic identifier appearing in an affine expression.
static OpType getSingleOpOfType(Block &block)
Return the unique instance of OpType in block if it is indeed unique.