30 for (
unsigned pos : permutation)
31 newInBoundsValues[pos] =
32 cast<BoolAttr>(attr.getValue()[index++]).getValue();
40 auto originalVecType = cast<VectorType>(vec.
getType());
42 newShape.append(originalVecType.getShape().begin(),
43 originalVecType.getShape().end());
46 newScalableDims.append(originalVecType.getScalableDims().begin(),
47 originalVecType.getScalableDims().end());
49 newShape, originalVecType.getElementType(), newScalableDims);
50 return builder.
create<vector::BroadcastOp>(loc, newVecType, vec);
59 for (int64_t i = addedRank,
60 e = cast<VectorType>(broadcasted.
getType()).getRank();
62 permutation.push_back(i);
63 for (int64_t i = 0; i < addedRank; ++i)
64 permutation.push_back(i);
65 return builder.
create<vector::TransposeOp>(loc, broadcasted, permutation);
92 struct TransferReadPermutationLowering
94 using MaskableOpRewritePattern::MaskableOpRewritePattern;
96 FailureOr<mlir::Value>
97 matchAndRewriteMaskableOp(vector::TransferReadOp op,
98 MaskingOpInterface maskOp,
101 if (op.getTransferRank() == 0)
113 op,
"map is not permutable to minor identity, apply another pattern");
127 ArrayRef<bool> originalScalableDims = op.getVectorType().getScalableDims();
130 newVectorShape[pos.value()] = originalShape[pos.index()];
131 newScalableDims[pos.value()] = originalScalableDims[pos.index()];
135 ArrayAttr newInBoundsAttr =
140 newVectorShape, op.getVectorType().getElementType(), newScalableDims);
141 Value newRead = rewriter.
create<vector::TransferReadOp>(
142 op.getLoc(), newReadType, op.getSource(), op.getIndices(),
149 .
create<vector::TransposeOp>(op.getLoc(), newRead, transposePerm)
170 struct TransferWritePermutationLowering
172 using MaskableOpRewritePattern::MaskableOpRewritePattern;
174 FailureOr<mlir::Value>
175 matchAndRewriteMaskableOp(vector::TransferWriteOp op,
176 MaskingOpInterface maskOp,
179 if (op.getTransferRank() == 0)
192 op,
"map is not permutable to minor identity, apply another pattern");
202 llvm::transform(permutationMap.
getResults(), std::back_inserter(indices),
204 return dyn_cast<AffineDimExpr>(expr).getPosition();
208 ArrayAttr newInBoundsAttr =
212 Value newVec = rewriter.
create<vector::TransposeOp>(
213 op.getLoc(), op.getVector(), indices);
216 auto newWrite = rewriter.
create<vector::TransferWriteOp>(
217 op.getLoc(), newVec, op.getSource(), op.getIndices(),
219 if (newWrite.hasPureTensorSemantics())
242 struct TransferWriteNonPermutationLowering
244 using MaskableOpRewritePattern::MaskableOpRewritePattern;
246 FailureOr<mlir::Value>
247 matchAndRewriteMaskableOp(vector::TransferWriteOp op,
248 MaskingOpInterface maskOp,
251 if (op.getTransferRank() == 0)
262 "map is already permutable to minor identity, apply another pattern");
269 foundDim[cast<AffineDimExpr>(exp).getPosition()] =
true;
271 bool foundFirstDim =
false;
273 for (
size_t i = 0; i < foundDim.size(); i++) {
275 foundFirstDim =
true;
282 missingInnerDim.push_back(i);
287 missingInnerDim.size());
292 missingInnerDim.size());
298 for (int64_t i = 0, e = op.getVectorType().getRank(); i < e; ++i) {
299 newInBoundsValues.push_back(op.isDimInBounds(i));
302 auto newWrite = rewriter.
create<vector::TransferWriteOp>(
303 op.getLoc(), newVec, op.getSource(), op.getIndices(),
305 if (newWrite.hasPureTensorSemantics())
321 struct TransferOpReduceRank
323 using MaskableOpRewritePattern::MaskableOpRewritePattern;
325 FailureOr<mlir::Value>
326 matchAndRewriteMaskableOp(vector::TransferReadOp op,
327 MaskingOpInterface maskOp,
330 if (op.getTransferRank() == 0)
337 unsigned numLeadingBroadcast = 0;
339 auto dimExpr = dyn_cast<AffineConstantExpr>(expr);
340 if (!dimExpr || dimExpr.getValue() != 0)
342 numLeadingBroadcast++;
345 if (numLeadingBroadcast == 0)
348 VectorType originalVecType = op.getVectorType();
349 unsigned reducedShapeRank = originalVecType.getRank() - numLeadingBroadcast;
358 op,
"map is not a minor identity with broadcasting");
362 originalVecType.getShape().take_back(reducedShapeRank));
364 originalVecType.getScalableDims().take_back(reducedShapeRank));
367 newShape, originalVecType.getElementType(), newScalableDims);
368 ArrayAttr newInBoundsAttr =
371 op.getInBoundsAttr().getValue().take_back(reducedShapeRank))
373 Value newRead = rewriter.
create<vector::TransferReadOp>(
374 op.getLoc(), newReadType, op.getSource(), op.getIndices(),
378 .
create<vector::BroadcastOp>(op.getLoc(), originalVecType, newRead)
388 .add<TransferReadPermutationLowering, TransferWritePermutationLowering,
389 TransferOpReduceRank, TransferWriteNonPermutationLowering>(
406 struct TransferReadToVectorLoadLowering
408 TransferReadToVectorLoadLowering(
MLIRContext *context,
409 std::optional<unsigned> maxRank,
412 maxTransferRank(maxRank) {}
414 FailureOr<mlir::Value>
415 matchAndRewriteMaskableOp(vector::TransferReadOp read,
416 MaskingOpInterface maskOp,
418 if (maxTransferRank && read.getVectorType().getRank() > *maxTransferRank) {
420 read,
"vector type is greater than max transfer rank");
429 if (!read.getPermutationMap().isMinorIdentityWithBroadcasting(
433 auto memRefType = dyn_cast<MemRefType>(read.getShapedType());
438 if (!memRefType.isLastDimUnitStride())
445 for (
unsigned i : broadcastedDims)
446 unbroadcastedVectorShape[i] = 1;
447 VectorType unbroadcastedVectorType = read.getVectorType().cloneWith(
448 unbroadcastedVectorShape, read.getVectorType().getElementType());
452 auto memrefElTy = memRefType.getElementType();
453 if (isa<VectorType>(memrefElTy) && memrefElTy != unbroadcastedVectorType)
457 if (!isa<VectorType>(memrefElTy) &&
458 memrefElTy != read.getVectorType().getElementType())
462 if (read.hasOutOfBoundsDim())
467 if (read.getMask()) {
468 if (read.getVectorType().getRank() != 1)
471 read,
"vector type is not rank 1, can't create masked load, needs "
475 read.getLoc(), unbroadcastedVectorType, read.getPadding());
476 res = rewriter.
create<vector::MaskedLoadOp>(
477 read.getLoc(), unbroadcastedVectorType, read.getSource(),
478 read.getIndices(), read.getMask(), fill);
480 res = rewriter.
create<vector::LoadOp>(
481 read.getLoc(), unbroadcastedVectorType, read.getSource(),
486 if (!broadcastedDims.empty())
487 res = rewriter.
create<vector::BroadcastOp>(
488 read.getLoc(), read.getVectorType(), res->getResult(0));
492 std::optional<unsigned> maxTransferRank;
503 struct TransferWriteToVectorStoreLowering
505 TransferWriteToVectorStoreLowering(
MLIRContext *context,
506 std::optional<unsigned> maxRank,
509 maxTransferRank(maxRank) {}
511 FailureOr<mlir::Value>
512 matchAndRewriteMaskableOp(vector::TransferWriteOp write,
513 MaskingOpInterface maskOp,
515 if (maxTransferRank && write.getVectorType().getRank() > *maxTransferRank) {
517 write,
"vector type is greater than max transfer rank");
525 !write.getPermutationMap().isMinorIdentity())
527 diag <<
"permutation map is not minor identity: " << write;
530 auto memRefType = dyn_cast<MemRefType>(write.getShapedType());
533 diag <<
"not a memref type: " << write;
537 if (!memRefType.isLastDimUnitStride())
539 diag <<
"most minor stride is not 1: " << write;
544 auto memrefElTy = memRefType.getElementType();
545 if (isa<VectorType>(memrefElTy) && memrefElTy != write.getVectorType())
547 diag <<
"elemental type mismatch: " << write;
551 if (!isa<VectorType>(memrefElTy) &&
552 memrefElTy != write.getVectorType().getElementType())
554 diag <<
"elemental type mismatch: " << write;
558 if (write.hasOutOfBoundsDim())
560 diag <<
"out of bounds dim: " << write;
562 if (write.getMask()) {
563 if (write.getVectorType().getRank() != 1)
567 diag <<
"vector type is not rank 1, can't create masked store, "
568 "needs VectorToSCF: "
572 rewriter.
create<vector::MaskedStoreOp>(
573 write.getLoc(), write.getSource(), write.getIndices(),
574 write.getMask(), write.getVector());
576 rewriter.
create<vector::StoreOp>(write.getLoc(), write.getVector(),
577 write.getSource(), write.getIndices());
584 std::optional<unsigned> maxTransferRank;
591 patterns.add<TransferReadToVectorLoadLowering,
592 TransferWriteToVectorStoreLowering>(
patterns.getContext(),
593 maxTransferRank, benefit);
static ArrayAttr inverseTransposeInBoundsAttr(OpBuilder &builder, ArrayAttr attr, const SmallVector< unsigned > &permutation)
Transpose a vector transfer op's in_bounds attribute by applying reverse permutation based on the giv...
static Value extendMaskRank(OpBuilder &builder, Location loc, Value vec, int64_t addedRank)
Extend the rank of a vector Value by addedRanks by adding inner unit dimensions.
static Value extendVectorRank(OpBuilder &builder, Location loc, Value vec, int64_t addedRank)
Extend the rank of a vector Value by addedRanks by adding outer unit dimensions.
static std::string diag(const llvm::Value &value)
static std::optional< VectorShape > vectorShape(Type type)
Base type for affine expression.
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
static AffineMap getMinorIdentityMap(unsigned dims, unsigned results, MLIRContext *context)
Returns an identity affine map (d0, ..., dn) -> (dp, ..., dn) on the most minor dimensions.
bool isMinorIdentity() const
Returns true if this affine map is a minor identity, i.e.
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
bool isMinorIdentityWithBroadcasting(SmallVectorImpl< unsigned > *broadcastedDims=nullptr) const
Returns true if this affine map is a minor identity up to broadcasted dimensions which are indicated ...
unsigned getNumDims() const
ArrayRef< AffineExpr > getResults() const
bool isPermutationOfMinorIdentityWithBroadcasting(SmallVectorImpl< unsigned > &permutedDims) const
Return true if this affine map can be converted to a minor identity with broadcast by doing a permute...
unsigned getNumResults() const
static AffineMap getPermutationMap(ArrayRef< unsigned > permutation, MLIRContext *context)
Returns an AffineMap representing a permutation.
AffineMap compose(AffineMap map) const
Returns the AffineMap resulting from composing this with map.
bool isIdentity() const
Returns true if this affine map is an identity affine map.
AffineExpr getAffineDimExpr(unsigned position)
MLIRContext * getContext() const
ArrayAttr getArrayAttr(ArrayRef< Attribute > value)
ArrayAttr getBoolArrayAttr(ArrayRef< bool > values)
This class contains all of the information necessary to report a diagnostic to the DiagnosticEngine.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
This class helps build Operations.
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Operation is the basic unit of execution within MLIR.
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
This class represents the benefit of a pattern match in a unitless scheme that ranges from 0 (very li...
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the listener that the IR failed to be rewritten because of a match failure,...
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
void populateVectorTransferPermutationMapLoweringPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Collect a set of transfer read/write lowering patterns that simplify the permutation map (e....
void populateVectorTransferLoweringPatterns(RewritePatternSet &patterns, std::optional< unsigned > maxTransferRank=std::nullopt, PatternBenefit benefit=1)
Populate the pattern set with the following patterns:
Include the generated interface declarations.
AffineMap inversePermutation(AffineMap map)
Returns a map of codomain to domain dimensions such that the first codomain dimension for a particula...
const FrozenRewritePatternSet & patterns
AffineMap compressUnusedDims(AffineMap map)
Drop the dims that are not used.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
A pattern for ops that implement MaskableOpInterface and that might be masked (i.e.