29 #include "llvm/ADT/DenseSet.h"
30 #include "llvm/ADT/SetVector.h"
32 #define DEBUG_TYPE "vector-utils"
34 #define DBGS() (llvm::dbgs() << '[' << DEBUG_TYPE << "] ")
35 #define LDBG(X) LLVM_DEBUG(DBGS() << X << "\n")
43 if (isa<UnrankedMemRefType, MemRefType>(source.
getType()))
45 if (isa<UnrankedTensorType, RankedTensorType>(source.
getType()))
47 llvm_unreachable(
"Expected MemRefType or TensorType");
73 for (int64_t permDim : transp) {
80 llvm_unreachable(
"Ill-formed transpose pattern");
83 FailureOr<std::pair<int, int>>
85 VectorType srcType = op.getSourceVectorType();
89 srcGtOneDims.push_back(index);
91 if (srcGtOneDims.size() != 2)
101 return std::pair<int, int>(srcGtOneDims[0], srcGtOneDims[1]);
129 if (enclosingLoopToVectorDim.empty())
132 enclosingLoopToVectorDim.begin()->getFirst()->getContext();
136 for (
auto kvp : enclosingLoopToVectorDim) {
137 assert(kvp.second < perm.size());
139 cast<affine::AffineForOp>(kvp.first).getInductionVar(), indices);
140 unsigned numIndices = indices.size();
141 unsigned countInvariantIndices = 0;
142 for (
unsigned dim = 0; dim < numIndices; ++dim) {
143 if (!invariants.count(indices[dim])) {
145 "permutationMap already has an entry along dim");
148 ++countInvariantIndices;
151 assert((countInvariantIndices == numIndices ||
152 countInvariantIndices == numIndices - 1) &&
153 "Vectorization prerequisite violated: at most 1 index may be "
154 "invariant wrt a vectorized loop");
155 (void)countInvariantIndices;
164 template <
typename T>
169 if ([[maybe_unused]]
auto typedParent = dyn_cast<T>(current)) {
170 assert(res.count(current) == 0 &&
"Already inserted");
173 current = current->getParentOp();
180 return getParentsOfType<affine::AffineForOp>(block);
188 for (
auto *forInst : enclosingLoops) {
189 auto it = loopToVectorDim.find(forInst);
190 if (it != loopToVectorDim.end()) {
191 enclosingLoopToVectorDim.insert(*it);
203 bool matcher::operatesOnSuperVectorsOf(
Operation &op,
204 VectorType subVectorType) {
214 bool mustDivide =
false;
216 VectorType superVectorType;
217 if (
auto transfer = dyn_cast<VectorTransferOpInterface>(op)) {
218 superVectorType = transfer.getVectorType();
221 if (!isa<func::ReturnOp>(op)) {
222 op.
emitError(
"NYI: assuming only return operations can have 0 "
223 " results at this point");
236 op.
emitError(
"NYI: operation has more than 1 result");
245 assert((ratio || !mustDivide) &&
246 "vector.transfer operation in which super-vector size is not an"
247 " integer multiple of sub-vector size");
254 return ratio.has_value();
258 if (vectorType.isScalable())
262 auto vecRank = vectorType.getRank();
268 auto memrefShape = memrefType.getShape().take_back(vecRank);
272 auto firstNonMatchingDim =
274 memrefShape.rbegin(), memrefShape.rend());
275 if (firstNonMatchingDim.first ==
vectorShape.rend())
283 return llvm::all_of(leadingDims, [](
auto x) {
return x == 1; });
286 std::optional<StaticTileOffsetRange>
288 if (vType.getRank() <= targetRank)
292 auto shapeToUnroll = vType.getShape().drop_back(targetRank);
293 auto scalableDimsToUnroll = vType.getScalableDims().drop_back(targetRank);
295 std::find(scalableDimsToUnroll.begin(), scalableDimsToUnroll.end(),
true);
296 auto firstScalableDim = it - scalableDimsToUnroll.begin();
297 if (firstScalableDim == 0)
300 scalableDimsToUnroll = scalableDimsToUnroll.slice(0, firstScalableDim);
301 assert(!llvm::is_contained(scalableDimsToUnroll,
true) &&
302 "unexpected leading scalable dimension");
304 shapeToUnroll = shapeToUnroll.slice(0, firstScalableDim);
311 auto loc = xfer->
getLoc();
314 .Case<vector::TransferReadOp>(
315 [&](
auto readOp) {
return readOp.getSource(); })
316 .Case<vector::TransferWriteOp>(
317 [&](
auto writeOp) {
return writeOp.getOperand(1); });
322 return mixedSourceDims;
326 return (type.getRank() > 1) && (type.getNumScalableDims() <= 1);
332 bool useInBoundsInsteadOfMasking) {
333 assert(llvm::none_of(readShape,
334 [](int64_t s) {
return s == ShapedType::kDynamic; }) &&
335 "expected static shape");
336 auto sourceShapedType = cast<ShapedType>(source.
getType());
337 auto sourceShape = sourceShapedType.getShape();
338 assert(sourceShape.size() == readShape.size() &&
"expected same ranks.");
341 assert(padValue.
getType() == sourceShapedType.getElementType() &&
342 "expected same pad element type to match source element type");
343 int64_t readRank = readShape.size();
346 if (useInBoundsInsteadOfMasking) {
348 for (
unsigned i = 0; i < readRank; i++)
349 inBoundsVal[i] = (sourceShape[i] == readShape[i]) &&
350 !ShapedType::isDynamic(sourceShape[i]);
352 auto transferReadOp = builder.
create<vector::TransferReadOp>(
360 if (llvm::equal(readShape, sourceShape) || useInBoundsInsteadOfMasking)
361 return transferReadOp;
365 builder.
create<vector::CreateMaskOp>(loc, maskType, mixedSourceDims);
373 LDBG(
"Iteration space static sizes:");
374 LLVM_DEBUG(llvm::interleaveComma(shape, llvm::dbgs()));
375 LLVM_DEBUG(llvm::dbgs() <<
"\n");
377 if (inputVectorSizes.size() != shape.size()) {
378 LDBG(
"Input vector sizes don't match the number of loops");
381 if (ShapedType::isDynamicShape(inputVectorSizes)) {
382 LDBG(
"Input vector sizes can't have dynamic dimensions");
385 if (!llvm::all_of(llvm::zip(shape, inputVectorSizes),
386 [](std::tuple<int64_t, int64_t> sizePair) {
387 int64_t staticSize = std::get<0>(sizePair);
388 int64_t inputSize = std::get<1>(sizePair);
389 return ShapedType::isDynamic(staticSize) ||
390 staticSize <= inputSize;
392 LDBG(
"Input vector sizes must be greater than or equal to iteration space "
static std::optional< VectorShape > vectorShape(Type type)
static bool areDimsTransposedIn2DSlice(int64_t dim0, int64_t dim1, ArrayRef< int64_t > transp)
Given the n-D transpose pattern 'transp', return true if 'dim0' and 'dim1' should be transposed with ...
static SetVector< Operation * > getEnclosingforOps(Block *block)
Returns the enclosing AffineForOp, from closest to farthest.
static AffineMap makePermutationMap(ArrayRef< Value > indices, const DenseMap< Operation *, unsigned > &enclosingLoopToVectorDim)
Constructs a permutation map from memref indices to vector dimension.
static SetVector< Operation * > getParentsOfType(Block *block)
Implementation detail that walks up the parents and records the ones with the specified type.
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
Block represents an ordered list of Operations.
Operation * getParentOp()
Returns the closest surrounding operation that contains this block.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
This class helps build Operations.
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Operation is the basic unit of execution within MLIR.
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Location getLoc()
The source location the operation was defined or derived from.
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Block * getBlock()
Returns the operation block that contains this operation.
unsigned getNumResults()
Return the number of results held by this operation.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
A range-style iterator that allows for iterating over the offsets of all potential tiles of size tile...
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
Specialization of arith.constant op that returns an integer of index type.
DenseSet< Value, DenseMapInfo< Value > > getInvariantAccesses(Value iv, ArrayRef< Value > indices)
Given an induction variable iv of type AffineForOp and indices of type IndexType, returns the set of ...
bool hasTensorSemantics(Operation *op)
Return "true" if the given op has tensor semantics and should be bufferized.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given memref value.
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given tensor value.
bool isContiguousSlice(MemRefType memrefType, VectorType vectorType)
Return true if vectorType is a contiguous slice of memrefType.
LogicalResult isValidMaskedInputVector(ArrayRef< int64_t > shape, ArrayRef< int64_t > inputVectorSizes)
Returns success if inputVectorSizes is a valid masking configuraion for given shape,...
Operation * maskOperation(OpBuilder &builder, Operation *maskableOp, Value mask, Value passthru=Value())
Creates a vector.mask operation around a maskable operation.
FailureOr< std::pair< int, int > > isTranspose2DSlice(vector::TransposeOp op)
Returns two dims that are greater than one if the transposition is applied on a 2D slice.
std::optional< StaticTileOffsetRange > createUnrollIterator(VectorType vType, int64_t targetRank=1)
Returns an iterator for all positions in the leading dimensions of vType up to the targetRank.
Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim)
Helper function that creates a memref::DimOp or tensor::DimOp depending on the type of source.
bool isLinearizableVector(VectorType type)
Returns true if the input Vector type can be linearized.
Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source, ArrayRef< int64_t > readShape, Value padValue, bool useInBoundsInsteadOfMasking)
Create a TransferReadOp from source with static shape readShape.
SmallVector< OpFoldResult > getMixedSizesXfer(bool hasTensorSemantics, Operation *xfer, RewriterBase &rewriter)
A wrapper for getMixedSizes for vector.transfer_read and vector.transfer_write Ops (for source and de...
Include the generated interface declarations.
AffineExpr getAffineConstantExpr(int64_t constant, MLIRContext *context)
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
std::optional< SmallVector< int64_t > > computeShapeRatio(ArrayRef< int64_t > shape, ArrayRef< int64_t > subShape)
Return the multi-dimensional integral ratio of subShape to the trailing dimensions of shape.
AffineExpr getAffineDimExpr(unsigned position, MLIRContext *context)
These free functions allow clients of the API to not use classes in detail.
bool trailingNDimsContiguous(MemRefType type, int64_t n)
Return "true" if the last N dimensions of the given type are contiguous.