24 #define GEN_PASS_DEF_LINALGSPECIALIZEGENERICOPSPASS
25 #include "mlir/Dialect/Linalg/Passes.h.inc"
28 #define DEBUG_TYPE "linalg-specialization"
30 #define REPLACE_BINARY_OP(NEWOP, OPERANDS_SWAP) \
31 (rewriter.replaceOpWithNewOp<NEWOP>( \
33 ValueRange{genericOp.getDpsInputs()[(OPERANDS_SWAP) ? 1 : 0], \
34 genericOp.getDpsInputs()[(OPERANDS_SWAP) ? 0 : 1]}, \
35 ValueRange{genericOp.getDpsInits()[0]}))
37 #define REPLACE_UNARY_OP(NEWOP) \
38 (rewriter.replaceOpWithNewOp<NEWOP>(genericOp, \
39 ValueRange{genericOp.getDpsInputs()[0]}, \
40 ValueRange{genericOp.getDpsInits()[0]}))
57 Block *body = genericOp.getBody();
64 "binary op uses just one block arg");
95 enum class IndexMatchResult {
109 static IndexMatchResult matchOperandMap(
AffineMap map,
unsigned rowDimIdx,
110 unsigned expectedPosOfRowDim,
111 unsigned expectedPosOfColDim) {
113 auto exprOfRowDim = map.
getResults()[rowDimIdx];
114 auto exprOfColDim = map.
getResults()[rowDimIdx + 1];
119 return IndexMatchResult::Mismatch;
121 auto posRowDim = cast<AffineDimExpr>(exprOfRowDim).getPosition();
122 auto posColDim = cast<AffineDimExpr>(exprOfColDim).getPosition();
124 if (expectedPosOfRowDim == posRowDim && expectedPosOfColDim == posColDim)
125 return IndexMatchResult::Match;
127 if (expectedPosOfRowDim == posColDim && expectedPosOfColDim == posRowDim)
128 return IndexMatchResult::Transposed;
130 return IndexMatchResult::Mismatch;
137 template <
typename NamedOpTy>
138 static LinalgOp replaceWithMatmulVariant(
RewriterBase &rewriter, GenericOp op) {
140 op,
ValueRange{op.getDpsInputs()[0], op.getDpsInputs()[1]},
146 static FailureOr<LinalgOp> specializeLinalgContractions(
RewriterBase &rewriter,
147 GenericOp genericOp) {
148 if (genericOp.getNumDpsInputs() != 2 || genericOp.getNumDpsInits() != 1)
152 auto mapRange = genericOp.getIndexingMapsArray();
153 if (llvm::any_of(mapRange,
180 if (dims.m.size() != 1 || dims.n.size() != 1 || dims.k.size() != 1)
185 if ((isa<arith::MulFOp>(first) && isa<arith::AddFOp>(second)) ||
186 (isa<arith::MulIOp>(first) && isa<arith::AddIOp>(second)) ||
187 (isa<complex::MulOp>(first) && isa<complex::AddOp>(second)))
194 auto indexingMaps = genericOp.getIndexingMapsArray();
195 if (llvm::any_of(indexingMaps, [&dims](
AffineMap m) {
197 dims.batch.size() + 2 ;
201 auto numOfBatchDims = dims.batch.size();
202 if (indexingMaps[0].getNumDims() != numOfBatchDims + 3)
205 if (numOfBatchDims) {
209 if (llvm::any_of(indexingMaps, [numOfBatchDims](
AffineMap m) {
210 for (
unsigned i = 0; i < numOfBatchDims; ++i) {
213 cast<AffineDimExpr>(expr).getPosition() != i)
222 matchOperandMap(indexingMaps[0], numOfBatchDims, dims.m[0], dims.k[0]);
224 matchOperandMap(indexingMaps[1], numOfBatchDims, dims.k[0], dims.n[0]);
226 matchOperandMap(indexingMaps[2], numOfBatchDims, dims.m[0], dims.n[0]);
228 if (llvm::is_contained({a, b, c}, IndexMatchResult::Mismatch))
231 if (c != IndexMatchResult::Match ||
232 (a == IndexMatchResult::Transposed && b == IndexMatchResult::Transposed))
236 if (numOfBatchDims) {
237 return replaceWithMatmulVariant<BatchMatmulOp>(rewriter, genericOp);
239 return replaceWithMatmulVariant<MatmulOp>(rewriter, genericOp);
248 GenericOp genericOp) {
252 genericOp, genericOp.getDpsInputs()[0], genericOp.getDpsInits()[0]);
260 genericOp, *fillValue, genericOp.getDpsInits()[0]);
265 std::optional<SmallVector<int64_t>> equivalentToBroadcast =
267 if (equivalentToBroadcast) {
268 auto dims = *equivalentToBroadcast;
270 genericOp, genericOp.getDpsInputs()[0], genericOp.getDpsInits()[0],
276 std::optional<SmallVector<int64_t>> equivalentToTranspose =
278 if (equivalentToTranspose) {
279 auto permutation = *equivalentToTranspose;
281 genericOp, genericOp.getDpsInputs()[0], genericOp.getDpsInits()[0],
288 Operation *op = &genericOp.getBody()->front();
289 if (isa<math::ExpOp>(op)) {
298 Operation *op = &genericOp.getBody()->front();
299 if (isa<arith::AddFOp>(op)) {
303 if (isa<arith::SubFOp>(op)) {
307 if (isa<arith::MulFOp>(op)) {
311 if (isa<arith::DivFOp>(op)) {
319 return specializeLinalgContractions(rewriter, genericOp);
325 struct LinalgSpecializeGenericOpsPass
326 :
public impl::LinalgSpecializeGenericOpsPassBase<
327 LinalgSpecializeGenericOpsPass> {
329 using impl::LinalgSpecializeGenericOpsPassBase<
330 LinalgSpecializeGenericOpsPass>::LinalgSpecializeGenericOpsPassBase;
331 void runOnOperation()
override;
335 void LinalgSpecializeGenericOpsPass::runOnOperation() {
static MLIRContext * getContext(OpFoldResult val)
#define REPLACE_BINARY_OP(NEWOP, OPERANDS_SWAP)
static bool areBinOpsSwapped(GenericOp genericOp)
#define REPLACE_UNARY_OP(NEWOP)
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
bool isProjectedPermutation(bool allowZeroInResults=false) const
Returns true if the AffineMap represents a subset (i.e.
ArrayRef< AffineExpr > getResults() const
Block represents an ordered list of Operations.
BlockArgument getArgument(unsigned i)
IRValueT get() const
Return the current value being used by this operand.
Operation is the basic unit of execution within MLIR.
OpOperand & getOpOperand(unsigned idx)
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
This class provides an abstraction over the different types of ranges over Values.
bool isContractionBody(Block &block, function_ref< bool(Operation *, Operation *)> isaPair, llvm::raw_ostream &errs=mlir::thread_safe_nulls())
Returns true if the block contains a contraction of the following form:
std::optional< SmallVector< int64_t > > isaTransposeOpInterface(GenericOp genericOp)
Checks whether genericOp is semantically equivalent to a linalg.transpose.
bool isaElemwiseSingleUnaryOpInterface(GenericOp genericOp)
Checks whether a given genericOp is semantically equivalent to a single linalgelementwise unary op.
bool isaCopyOpInterface(LinalgOp linalgOp)
Checks whether linalgOp is semantically equivalent to a linalg.copyOp.
void populateDecomposeProjectedPermutationPatterns(RewritePatternSet &patterns)
Add patterns to make explicit broadcasts and transforms in the input operands of a genericOp.
FailureOr< LinalgOp > specializeGenericOp(RewriterBase &rewriter, GenericOp genericOp)
Create a namedOp from the given GenericOp and replace the GenericOp.
std::optional< SmallVector< int64_t > > isaBroadcastOpInterface(GenericOp genericOp)
Checks whether genericOp is semantically equivalent to a linalg.broadcast.
FailureOr< ContractionDimensions > inferContractionDims(LinalgOp linalgOp)
Find at least 2 parallel (m and n) and 1 reduction (k) dimension candidates that form a matmul subcom...
bool isaContractionOpInterface(LinalgOp linalgOp)
Checks whether linalgOp conforms to ContractionOpInterface.
void populateLinalgGenericOpsSpecializationPatterns(RewritePatternSet &patterns)
Populates patterns with patterns to convert linalg.generic ops to named ops where possible.
std::optional< Value > isaFillOpInterface(GenericOp genericOp)
Checks whether genericOp is semantically equivalent to a linalg.fill.
bool isaElemwiseSingleBinaryOpInterface(GenericOp genericOp)
Checks whether genericOp is semantically equivalent to a single linalg elementwise binary op e....
Include the generated interface declarations.
LogicalResult applyPatternsGreedily(Region ®ion, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr)
Rewrite ops in the given region, which must be isolated from above, by repeatedly applying the highes...
@ DimId
Dimensional identifier.
const FrozenRewritePatternSet & patterns