MLIR 23.0.0git
IndexingMapOpInterface.cpp
Go to the documentation of this file.
1//===- IndexingMapOpInterface.cpp -- IndexingMapOpInterface impl ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10
11using namespace mlir;
12
13namespace mlir {
14#include "mlir/Interfaces/IndexingMapOpInterface.cpp.inc"
15} // namespace mlir
16
17static LogicalResult verifyIndexingMapOperandType(Operation *op, Type t,
18 unsigned operandNumber) {
19 // Non-shaped types are treated as scalars (rank-0). This includes builtin
20 // types (integer, float, complex) as well as custom dialect types.
21 if (!isa<ShapedType>(t))
22 return success();
23
24 // Vectors are allowed.
25 if (isa<VectorType>(t))
26 return success();
27
28 // MemRefs: must be ranked.
29 if (isa<UnrankedMemRefType>(t)) {
30 return op->emitOpError("operand #")
31 << operandNumber << " must be a ranked memref, but got " << t;
32 }
33 if (isa<MemRefType>(t))
34 return success();
35
36 // Tensors: must be ranked.
37 if (isa<UnrankedTensorType>(t)) {
38 return op->emitOpError("operand #")
39 << operandNumber << " must be a ranked tensor, but got " << t;
40 }
41 if (isa<RankedTensorType>(t))
42 return success();
43
44 // Any other shaped type is not supported by this interface.
45 return op->emitOpError("operand #")
46 << operandNumber
47 << " must be ranked tensor/memref, vector, or scalar, but got " << t;
48}
49
50LogicalResult mlir::IndexingMapOpInterface::verifyImpl() {
51 // All input/output operands must be indexed.
52 if (static_cast<int64_t>(getIndexingMapsArray().size()) !=
53 getOperation()->getNumOperands())
54 return this->emitOpError("expected the number of indexing_map (")
55 << getIndexingMapsArray().size()
56 << ") to be equal to the number of input/output operands ("
57 << getOperation()->getNumOperands() << ")";
58
59 // Inline size chosen empirically based on compilation profiling.
60 // Profiled: 7.5M calls, avg=5.9+-3.1. N=8 covers 67% of cases inline.
61 SmallVector<int64_t, 8> allShapesSizes;
62
63 for (OpOperand &opOperand : getOperation()->getOpOperands()) {
64 Type ty = opOperand.get().getType();
65 if (failed(verifyIndexingMapOperandType(getOperation(), ty,
66 opOperand.getOperandNumber())))
67 return failure();
68 AffineMap indexingMap = getMatchingIndexingMap(&opOperand);
69 // Symbols disallowed.
70 if (indexingMap.getNumSymbols() != 0)
71 return this->emitOpError("unexpected symbols in indexing_map #")
72 << opOperand.getOperandNumber();
73 // Handle scalars (non-shaped types: integer, float, complex, custom types,
74 // etc.).
75 if (!isa<ShapedType>(ty)) {
76 int64_t rank = 0;
77 if (indexingMap.getNumResults() != rank)
78 return this->emitOpError("expected operand #")
79 << opOperand.getOperandNumber() << " rank (" << rank
80 << ") to match the result rank of indexing_map ("
81 << indexingMap.getNumResults() << ")";
82 continue;
83 }
84 SmallVector<int64_t> shape = getStaticOperandShape(&opOperand);
85 int64_t rank = shape.size();
86
87 // Result rank must match operand rank.
88 if (indexingMap.getNumResults() != rank)
89 return this->emitOpError("expected operand #")
90 << opOperand.getOperandNumber() << " rank (" << rank
91 << ") to match the result rank of indexing_map ("
92 << indexingMap.getNumResults() << ")";
93
94 llvm::append_range(allShapesSizes, shape);
95 }
96
97 AffineMap invertedMap = getShapesToLoopsMap();
98 if (!invertedMap) {
99 std::string str;
100 llvm::raw_string_ostream os(str);
101 getLoopsToShapesMap().print(os);
102 return this->emitOpError("invalid indexing maps are non-invertible: ")
103 << "(" << str << ")";
104 }
105
106 SmallVector<int64_t> endLoopRangeValues = invertedMap.compose(allShapesSizes);
107
108 // Check if given shapes match to inferred shapes.
109 SmallVector<int64_t> startLoopRangeValues(endLoopRangeValues.size(), 0);
110 // Verify only static cases since we can't get exact dimension sizes and
111 // loop ranges for dynamic cases in this stage.
112 if (llvm::none_of(endLoopRangeValues, ShapedType::isDynamic)) {
113 // Exclusive end range.
114 for (int64_t &range : endLoopRangeValues)
115 range -= 1;
116 for (OpOperand &opOperand : getOperation()->getOpOperands()) {
117 AffineMap indexingMap = getMatchingIndexingMap(&opOperand);
118 SmallVector<int64_t> startIndices =
119 indexingMap.compose(startLoopRangeValues);
120 SmallVector<int64_t> endIndices = indexingMap.compose(endLoopRangeValues);
121 SmallVector<int64_t> shape = getStaticOperandShape(&opOperand);
122 for (auto dim : llvm::seq<int64_t>(0, shape.size())) {
123 // Ignore dynamic dimension or the case that the dimension size is 0
124 if (ShapedType::isDynamic(shape[dim]) || shape[dim] == 0)
125 continue;
126
127 // The first index or last index should be the maximum or the minimum in
128 // the inferred index ranges since the range is increasing or
129 // decreasing. The size of dimensions of input/output operands and the
130 // maximum value + 1 in the inferred range should be the same. But, for
131 // now we check if the inferred ranges are in boundary of input/output
132 // operands' size or not in case that Affine Expressions are complicated
133 // such as d0 * 3
134 // + d1 since it is not easy to handle the issues.
135 // Found the case that this solution can't check, for example, (d0, d1)
136 // -> (d1 - d0)
137 int64_t inferredDimSize =
138 std::max(startIndices[dim], endIndices[dim]) + 1;
139 if (std::min(startIndices[dim], endIndices[dim]) < 0) {
140 std::string mapStr;
141 {
142 llvm::raw_string_ostream os(mapStr);
143 os << indexingMap;
144 }
145 return this->emitOpError(
146 "unexpected result less than 0 at expression #")
147 << dim << " in " << mapStr;
148 }
149 if (isa<AffineDimExpr>(indexingMap.getResult(dim))) {
150 if (inferredDimSize != shape[dim]) {
151 return this->emitOpError("inferred input/output operand #")
152 << opOperand.getOperandNumber() << " has shape's dimension #"
153 << dim << " to be " << inferredDimSize << ", but found "
154 << shape[dim];
155 }
156 } else {
157 if (inferredDimSize > shape[dim]) {
158 return this->emitOpError("inferred input/output operand #")
159 << opOperand.getOperandNumber() << " has shape's dimension #"
160 << dim << " to be greater than or equal to "
161 << inferredDimSize << ", but found " << shape[dim];
162 }
163 }
164 }
165 }
166 }
167
168 return success();
169}
return success()
p<< " : "<< getMemRefType()<< ", "<< getType();}static LogicalResult verifyVectorMemoryOp(Operation *op, MemRefType memrefType, VectorType vectorType) { if(memrefType.getElementType() !=vectorType.getElementType()) return op-> emitOpError("requires memref and vector types of the same elemental type")
Given a list of lists of parsed operands, populates uniqueOperands with unique operands.
static LogicalResult verifyIndexingMapOperandType(Operation *op, Type t, unsigned operandNumber)
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition AffineMap.h:46
unsigned getNumSymbols() const
unsigned getNumResults() const
AffineExpr getResult(unsigned idx) const
AffineMap compose(AffineMap map) const
Returns the AffineMap resulting from composing this with map.
This class represents an operand of an operation.
Definition Value.h:254
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition Remarks.h:717
Include the generated interface declarations.