MLIR 23.0.0git
Transforms.cpp
Go to the documentation of this file.
1//===- Transforms.cpp - Linalg transformations as patterns ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements logic and helpers to expose Linalg transforms as rewrite
10// patterns.
11//
12//===----------------------------------------------------------------------===//
13
28#include "mlir/IR/AffineExpr.h"
31#include "mlir/Support/LLVM.h"
32#include "llvm/ADT/SmallVectorExtras.h"
33#include "llvm/ADT/TypeSwitch.h"
34#include "llvm/Support/Debug.h"
35#include "llvm/Support/DebugLog.h"
36#include "llvm/Support/InterleavedRange.h"
37#include "llvm/Support/raw_ostream.h"
38#include <type_traits>
39#include <utility>
40
41#define DEBUG_TYPE "linalg-transforms"
42
43using namespace mlir;
44using namespace mlir::linalg;
45
46//===----------------------------------------------------------------------===//
47// Transformations exposed as functional-style API calls.
48//===----------------------------------------------------------------------===//
49
50//===----------------------------------------------------------------------===//
51// peelLoop transformation.
52//===----------------------------------------------------------------------===//
53
54/// Try to peel and canonicalize loop `op` and return the new result.
55/// Also applies affine_min/max bounds simplification on the fly where relevant.
56// TODO: Add support for scf.parallel and affine.for loops.
58 Operation *op) {
60 .Case([&](scf::ForOp forOp) {
61 scf::ForOp partialIteration;
62 if (succeeded(scf::peelForLoopAndSimplifyBounds(rewriter, forOp,
63 partialIteration)))
64 return partialIteration->getResults();
65 assert(!partialIteration && "expected that loop was not peeled");
66 return forOp->getResults();
67 })
68 .Default([&](Operation *op) { return op->getResults(); });
69}
70
71/// Peel 'loops' and applies affine_min/max bounds simplification on the fly
72/// where relevant.
75 for (auto loopOp : loops)
76 peelLoop(rewriter, loopOp);
77}
78
79//===----------------------------------------------------------------------===//
80// pack transformation.
81//===----------------------------------------------------------------------===//
82
83#ifndef NDEBUG
84/// Return true if `map` has 0 or 1 result function of AffineDimExpr(dim).
86 bool found = false;
87 for (AffineExpr e : map.getResults()) {
88 if (!e.isFunctionOfDim(dim))
89 continue;
90 if (found)
91 return false;
92 found = true;
93 }
94 return true;
95}
96#endif // NDEBUG
97
99 return llvm::interleaved(ri, ", ", /*Prefix=*/"|", /*Suffix=*/"");
100}
101
102/// Return the index of the first result of `map` that is a function of
103/// AffineDimExpr(dim), std::nullopt otherwise.
104static std::optional<int64_t> getFirstResultIndexFunctionOf(AffineMap map,
105 int64_t dim) {
106 for (int64_t i = 0, e = map.getNumResults(); i < e; ++i) {
107 AffineExpr expr = map.getResult(i);
108 if (!expr.isFunctionOfDim(dim))
109 continue;
110 return i;
111 }
112 return std::nullopt;
113}
114
115/// Perform one step of packing of a LinalgOp's metadata along `dim` into the
116/// `newDim` at `iteratorTypes.size()` by:
117/// 1. Appending `iteratorTypes[newDim]`, equal to `iteratorTypes[dim]`.
118/// 2. Appending a `newDim` to the domain of every indexing map.
119/// 3. For each operand (i.e. for each map in `indexingMaps`), perform packing
120/// by potentially adding a `newDim` result to `map`.
121/// The preserved invariant is that `iteratorTypes.size()` is always equal to
122/// `map.getNumDims()` for every map in `indexingMaps`.
123///
124/// Update `indexingMaps` and `iteratorTypes` inplace as one step of the update.
125/// Return a vector that records the optional packing for each operand.
126/// Return failure if the packed indexing cannot be represented with a LinalgOp.
127///
128/// Further details:
129/// ================
130/// The current implementation of packing (i.e. data tiling) consists of
131/// rewriting a linearized strip-mined form into a higher-dimensional access.
132/// e.g. consider an access `A[I][f(j, k, l)]` and packing by 4; we rewrite
133/// `I` into `4 * i + ii`, where `0 <= ii < 4`.
134/// The access is further rewritten as `A[i][f(j, k, l)][ii]`.
135///
136/// This rewrite into higher dimensional access is not possible for general
137/// AffineExpr in Linalg atm, it is restricted to an AffineDimExpr:
138/// e.g. consider an access `A[I + J][f(j, k, l)]` and packing by 4; we
139/// rewrite `I + J` into `4 * i + ii + J`, where `0 <= ii < 4`.
140/// The rewrite of the access would be a form not representable in Linalg:
141/// `A[i + (ii + J) / 4][f(j, k, l)][(ii + J) % 4]`.
142/// Note however that as `J` and `ii` iterate, the accesses do not have a
143/// particular alignment, so packing does not achieve alignment in this case
144///
145/// In the future, we may want to consider a mixed-form that allows some
146/// alignment in the presence of multiple accesses:
147/// `A[I][f(j, k, l)]` and `B[I + J][f(j, k, l)]`
148/// And would rewrite accesses as:
149/// `A[i][f(j, k, l)][ii]` and `B[4 * i + ii + J][f(j, k, l)]`
150static FailureOr<SmallVector<std::optional<int64_t>>>
153 int64_t dim) {
154 int64_t newDim = iteratorTypes.size();
155 iteratorTypes.push_back(iteratorTypes[dim]);
156
157 SmallVector<std::optional<int64_t>> packedDimPerIndexingMap(
158 indexingMaps.size(), std::nullopt);
160 for (int64_t operandIdx = 0, e = indexingMaps.size(); operandIdx < e;
161 ++operandIdx) {
162 AffineMap map = indexingMaps[operandIdx];
163
164 // Add the `newDim` to map whatever the case.
165 assert(map.getNumDims() == newDim && "num dims invariant violation");
166 map = map.shiftDims(1, newDim);
167
168 // Get the at-most-1 index of the result that is a function of `dim`.
169 // If we can find one, we insert `AffineDimExpr(newDim)` to the map, which
170 // logically chunks dimension `dim` into `K * dim + newDim`, where the
171 // packing factor `K` is specified separately.
172 assert(hasAtMostOneResultFunctionOfDim(map, dim) &&
173 "num results invariant violation");
174 auto maybeOperandDimensionToPack = getFirstResultIndexFunctionOf(map, dim);
175 if (!maybeOperandDimensionToPack.has_value()) {
176 newMaps.push_back(map);
177 continue;
178 }
179
180 // We can only pack AffineDimExpr atm.
181 if (!isa<AffineDimExpr>(map.getResult(maybeOperandDimensionToPack.value())))
182 return failure();
183
184 // Add `newDim` to the results of the map.
185 map = map.insertResult(Builder(map.getContext()).getAffineDimExpr(newDim),
186 map.getNumResults());
187 newMaps.push_back(map);
188
189 // Record the that `operandIdx` is packed.
190 packedDimPerIndexingMap[operandIdx] = maybeOperandDimensionToPack;
191 }
192 indexingMaps = newMaps;
193
194 return packedDimPerIndexingMap;
195}
196
197namespace {
198
199/// Helper struct to encode packing along one dimension of a LinalgOp.
200struct PackedOperandsDim {
201 OpFoldResult packedSize;
202 SmallVector<std::optional<int64_t>> packedDimForEachOperand;
203};
204
205/// Helper struct to encode packing along all dimensions of a LinalgOp.
206struct PackedOperandsDimList {
207 void pushBack(PackedOperandsDim &&packedOperandsDims) {
208 spec.emplace_back(packedOperandsDims);
209 }
210 /// Return all the dims that have been packed for operand @ `operandPos`.
211 SmallVector<int64_t> extractPackedDimsForOperand(int64_t operandPos);
212 /// Return all the pack sizes by which an operand @ `operandPos` is packed.
213 SmallVector<OpFoldResult> extractPackSizesForOperand(int64_t operandPos);
214
215private:
216 SmallVector<PackedOperandsDim> spec;
217};
218
219} // namespace
220
221FailureOr<LowerPackResult> linalg::lowerPack(RewriterBase &rewriter,
222 linalg::PackOp packOp,
223 bool lowerPadLikeWithInsertSlice) {
224 // TODO: Support Memref PackOp. Temporarily return failure.
225 if (!packOp.hasPureTensorSemantics())
226 return failure();
227
228 // 1. Filter out NYI cases.
229 auto packedTensorType =
230 cast<RankedTensorType>(packOp->getResultTypes().front());
231 if (llvm::any_of(packOp.getStaticInnerTiles(), ShapedType::isDynamic)) {
232 return rewriter.notifyMatchFailure(
233 packOp,
234 "non-static shape NYI, needs a more powerful tensor.expand_shape op");
235 }
236
237 Location loc = packOp->getLoc();
238 OpBuilder::InsertionGuard g(rewriter);
239 rewriter.setInsertionPoint(packOp);
240
241 // 2. Compute the permutation vector to shuffle packed shape into the shape
242 // before any outer or inner permutations have been applied.
243 PackingMetadata packingMetadata;
244 SmallVector<int64_t> packedToStripMinedShapePerm =
245 getPackInverseDestPerm(packOp, packingMetadata);
246
247 // 3. Compute the stripMinedShape: this is the packed shape before any outer
248 // or inner permutations have been applied.
249 SmallVector<int64_t> stripMinedShape(packedTensorType.getShape());
250 applyPermutationToVector(stripMinedShape, packedToStripMinedShapePerm);
251
252 // 4. Pad the source of packOp to a shape we can expand into stripMinedShape.
253 SmallVector<OpFoldResult> lows(packOp.getSourceRank(),
254 rewriter.getIndexAttr(0));
255 SmallVector<OpFoldResult> highs(packOp.getSourceRank(),
256 rewriter.getIndexAttr(0));
257 for (auto [pos, innerSize] :
258 llvm::zip_equal(packOp.getInnerDimsPos(), packOp.getMixedTiles())) {
259 int outerPos =
260 packedToStripMinedShapePerm[packingMetadata.outerPositions[pos]];
261 OpFoldResult origSize =
262 tensor::getMixedSize(rewriter, loc, packOp.getSource(), pos);
263 OpFoldResult outerSize =
264 tensor::getMixedSize(rewriter, loc, packOp.getDest(), outerPos);
265 AffineExpr s0, d0, d1;
266 bindDims(rewriter.getContext(), d0, d1);
267 bindSymbols(rewriter.getContext(), s0);
268 auto map = AffineMap::get(/*dimCount=*/2, /*symbolCount=*/1, d0 * s0 - d1);
270 rewriter, loc, map, {outerSize, origSize, innerSize});
271 }
272 RankedTensorType collapsed = tensor::CollapseShapeOp::inferCollapsedType(
273 RankedTensorType::Builder(packedTensorType).setShape(stripMinedShape),
274 packingMetadata.reassociations);
275 Value paddingValue = packOp.getPaddingValue();
276 if (!paddingValue) {
277 paddingValue = arith::ConstantOp::create(
278 rewriter, loc, rewriter.getZeroAttr(getElementTypeOrSelf(collapsed)));
279 }
280 auto padOp =
281 tensor::PadOp::create(rewriter, loc, collapsed, packOp.getSource(), lows,
282 highs, paddingValue, /*nofold=*/false);
283
284 LDBG() << "insertPositions: "
285 << llvm::interleaved(packingMetadata.insertPositions);
286 LDBG() << "outerPositions: "
287 << llvm::interleaved(packingMetadata.outerPositions);
288 LDBG() << "packedShape: " << llvm::interleaved(packedTensorType.getShape());
289 LDBG() << "packedToStripMinedShapePerm: "
290 << llvm::interleaved(packedToStripMinedShapePerm);
291 LDBG() << "reassociations: "
292 << llvm::interleaved(llvm::map_range(packingMetadata.reassociations,
294 LDBG() << "stripMinedShape: " << llvm::interleaved(stripMinedShape);
295 LDBG() << "collapsed type: " << collapsed;
296
297 if (lowerPadLikeWithInsertSlice && packOp.isLikePad()) {
298 // Pack ops which operate as simple pads may not produce legal
299 // tensor.insert_slice operations when the packed type does not rank reduce
300 // to the padded type.
301 SliceVerificationResult rankReduces =
302 isRankReducedType(packedTensorType, padOp.getResultType());
303
304 if (rankReduces == SliceVerificationResult::Success) {
305 // This pack is just a plain pad.
306 // Just insert the pad in the higher ranked tensor.
307 // Offsets.
308 SmallVector<OpFoldResult> zeros(packOp.getDestRank(),
309 rewriter.getIndexAttr(0));
310 // Strides.
311 SmallVector<OpFoldResult> ones(packOp.getDestRank(),
312 rewriter.getIndexAttr(1));
314 tensor::getMixedSizes(rewriter, loc, packOp.getDest());
315
316 auto insertSliceOp = tensor::InsertSliceOp::create(
317 rewriter, loc, /*source=*/padOp, /*dest=*/packOp.getDest(),
318 /*offsets=*/zeros, sizes, /*strides=*/ones);
319
320 LDBG() << "insert_slice op: " << insertSliceOp;
321
322 rewriter.replaceOp(packOp, insertSliceOp->getResults());
323
324 return LowerPackResult{padOp, /*reshapeOp=*/nullptr,
325 /*transposeOp=*/nullptr};
326 }
327 }
328
329 // 5. Expand from the padded result to the stripMinedShape.
330 auto expandShapeResultType =
331 RankedTensorType::Builder(packedTensorType).setShape(stripMinedShape);
332 auto reshapeOp = tensor::ExpandShapeOp::create(
333 rewriter, loc, expandShapeResultType, padOp.getResult(),
334 packingMetadata.reassociations);
335
336 // 6. Transpose stripMinedShape to packedShape.
337 SmallVector<int64_t> transpPerm =
338 invertPermutationVector(packedToStripMinedShapePerm);
339 auto transposeOp = linalg::TransposeOp::create(
340 rewriter, loc, reshapeOp.getResult(), packOp.getDest(), transpPerm);
341
342 LDBG() << "reshape op: " << reshapeOp;
343 LDBG() << "transpPerm: " << llvm::interleaved(transpPerm);
344 LDBG() << "transpose op: " << transposeOp;
345
346 // 7. Replace packOp by transposeOp.
347 rewriter.replaceOp(packOp, transposeOp->getResults());
348
349 return LowerPackResult{padOp, reshapeOp, transposeOp};
350}
351
352FailureOr<LowerUnPackOpResult>
353linalg::lowerUnPack(RewriterBase &rewriter, linalg::UnPackOp unPackOp,
354 bool lowerUnpadLikeWithExtractSlice) {
355 // TODO: Support Memref UnPackOp. Temporarily return failure.
356 if (!unPackOp.hasPureTensorSemantics())
357 return failure();
358
359 Location loc = unPackOp->getLoc();
360 OpBuilder::InsertionGuard g(rewriter);
361 rewriter.setInsertionPoint(unPackOp);
362
363 auto packedTensorType = cast<RankedTensorType>(unPackOp.getSourceType());
364 int64_t packedRank = packedTensorType.getRank();
365
366 OpFoldResult zero = rewriter.getIndexAttr(0), one = rewriter.getIndexAttr(1);
367 auto destTensorType = cast<RankedTensorType>(unPackOp.getDest().getType());
368 if (lowerUnpadLikeWithExtractSlice && unPackOp.isLikeUnPad()) {
369 // This unpack is just a plain unpad.
370 // Just extract the slice from the higher ranked tensor.
371 ArrayRef<int64_t> destShape = destTensorType.getShape();
372 // The inner dimensions stay the same as the destination tensor, but the
373 // outer ones are additional 1s.
374 SmallVector<OpFoldResult> sizes(packedRank - destShape.size(), one);
375 sizes.append(tensor::getMixedSizes(rewriter, loc, unPackOp.getDest()));
376
377 auto extractSliceOp = tensor::ExtractSliceOp::create(
378 rewriter, loc, destTensorType, unPackOp.getSource(),
379 SmallVector<OpFoldResult>(packedRank, zero), sizes,
380 SmallVector<OpFoldResult>(packedRank, one));
381
382 rewriter.replaceOp(unPackOp, extractSliceOp->getResults());
383
384 return LowerUnPackOpResult{/*emptyOp=*/nullptr, /*transposeOp=*/nullptr,
385 /*reshapeOp=*/nullptr, extractSliceOp};
386 }
387
388 // 1. Compute the permutation vector to shuffle packed shape into the shape
389 // before any outer or inner permutations have been applied.
390 PackingMetadata packingMetadata;
391 SmallVector<int64_t> packedToStripMinedShapePerm =
392 getUnPackInverseSrcPerm(unPackOp, packingMetadata);
393
394 // 2. Compute the stripMinedShape: this is the packed shape without outer and
395 // inner permutations.
396 SmallVector<int64_t> stripMinedShape(packedTensorType.getShape());
397 applyPermutationToVector(stripMinedShape, packedToStripMinedShapePerm);
398
399 // 3. Transpose packedShape to stripMinedShape.
400 RankedTensorType stripMinedTensorType =
401 RankedTensorType::Builder(packedTensorType).setShape(stripMinedShape);
402 RankedTensorType collapsedType = tensor::CollapseShapeOp::inferCollapsedType(
403 stripMinedTensorType, packingMetadata.reassociations);
404
405 // Get dynamic dims from input tensor based on packedToStripMinedShapePerm
406 // permutation.
408 tensor::getMixedSizes(rewriter, loc, unPackOp.getSource());
409 applyPermutationToVector(dims, packedToStripMinedShapePerm);
410 auto emptyOp = tensor::EmptyOp::create(rewriter, loc, dims,
411 stripMinedTensorType.getElementType());
412 auto transposeOp =
413 linalg::TransposeOp::create(rewriter, loc, unPackOp.getSource(), emptyOp,
414 packedToStripMinedShapePerm);
415
416 LDBG() << "insertPositions: "
417 << llvm::interleaved(packingMetadata.insertPositions);
418 LDBG() << "packedShape: " << llvm::interleaved(packedTensorType.getShape());
419 LDBG() << "packedToStripMinedShapePerm: "
420 << llvm::interleaved(packedToStripMinedShapePerm);
421 LDBG() << "reassociations: "
422 << llvm::interleaved(llvm::map_range(packingMetadata.reassociations,
424 LDBG() << "stripMinedShape: " << llvm::interleaved(stripMinedShape);
425 LDBG() << "collapsed type: " << collapsedType;
426
427 // 4. Collapse from the stripMinedShape to the padded result.
428 auto reshapeOp = tensor::CollapseShapeOp::create(
429 rewriter, loc, collapsedType, transposeOp->getResult(0),
430 packingMetadata.reassociations);
431
432 // 5. ExtractSlice.
433 int64_t destRank = destTensorType.getRank();
434 auto extractSliceOp = tensor::ExtractSliceOp::create(
435 rewriter, loc, destTensorType, reshapeOp->getResult(0),
436 SmallVector<OpFoldResult>(destRank, zero),
437 tensor::getMixedSizes(rewriter, loc, unPackOp.getDest()),
438 SmallVector<OpFoldResult>(destRank, one));
439
440 // 6. Inject a copy to preserve DPS.
441 auto copyOp = linalg::CopyOp::create(
442 rewriter, loc, extractSliceOp->getResult(0), unPackOp.getDest());
443
444 // 7. Replace unPackOp by copyOp.
445 rewriter.replaceOp(unPackOp, copyOp->getResults());
446
447 return LowerUnPackOpResult{emptyOp, transposeOp, reshapeOp, extractSliceOp};
448}
449
451PackedOperandsDimList::extractPackedDimsForOperand(int64_t operandPos) {
453 for (auto &i : spec) {
454 if (!i.packedDimForEachOperand[operandPos].has_value())
455 continue;
456 res.push_back(i.packedDimForEachOperand[operandPos].value());
457 }
458 return res;
459}
460
461SmallVector<OpFoldResult>
462PackedOperandsDimList::extractPackSizesForOperand(int64_t operandPos) {
463 SmallVector<OpFoldResult> res;
464 for (auto &i : spec) {
465 if (!i.packedDimForEachOperand[operandPos].has_value())
466 continue;
467 res.push_back(i.packedSize);
468 }
469 return res;
470}
471
472/// Implement packing of a single LinalgOp by performing packing by
473/// `packedSizes`. There must be one packedSizes entry per `linalgOp` iterator.
474/// Return the packed Linalg op on success, failure otherwise.
475FailureOr<PackResult> linalg::pack(RewriterBase &rewriter,
476 linalg::LinalgOp linalgOp,
477 ArrayRef<OpFoldResult> packedSizes) {
478 if (packedSizes.size() != linalgOp.getNumLoops()) {
479 return rewriter.notifyMatchFailure(linalgOp,
480 "incorrect number of pack sizes");
481 }
482
483 Location loc = linalgOp->getLoc();
484 SmallVector<AffineMap> indexingMaps = linalgOp.getIndexingMapsArray();
486 linalgOp.getIteratorTypesArray();
487 LDBG() << "Start packing: " << linalgOp;
488 LDBG() << "maps: " << llvm::interleaved(indexingMaps);
489 LDBG() << "iterators: " << llvm::interleaved(iteratorTypes);
490
493 // Step 1. Pack each dim of the LinalgOp metadata by packedSizes[i].
494 PackedOperandsDimList listOfPackedOperandsDim;
495 for (int64_t i = 0, e = packedSizes.size(); i < e; ++i) {
496 std::optional<int64_t> maybeConstant = getConstantIntValue(packedSizes[i]);
497 // Skip tile sizes explicitly set to 0.
498 if (maybeConstant.has_value() && maybeConstant.value() == 0)
499 continue;
500
501 PackedOperandsDim packedOperandsDims;
502 packedOperandsDims.packedSize = packedSizes[i];
503 FailureOr<SmallVector<std::optional<int64_t>>>
504 maybePackedDimForEachOperand =
505 packLinalgMetadataOnce(indexingMaps, iteratorTypes, i);
506 if (failed(maybePackedDimForEachOperand))
507 return failure();
508 packedOperandsDims.packedDimForEachOperand = *maybePackedDimForEachOperand;
509
510 LDBG() << "++++ After pack size #" << i << ": " << packedSizes[i];
511 LDBG() << "maps: " << llvm::interleaved(indexingMaps);
512 LDBG() << "iterators: " << llvm::interleaved(iteratorTypes);
513 LDBG() << "packedDimForEachOperand: "
514 << llvm::interleaved(packedOperandsDims.packedDimForEachOperand);
515
516 listOfPackedOperandsDim.pushBack(std::move(packedOperandsDims));
517 }
518
519 // Step 2. Propagate packing to all LinalgOp operands.
520 SmallVector<Value> inputsAndInits, results;
521 SmallVector<OpOperand *> initOperands =
522 llvm::to_vector(llvm::make_pointer_range(linalgOp.getDpsInitsMutable()));
523 SmallVector<OpOperand *> inputOperands = linalgOp.getDpsInputOperands();
524 for (const auto &operandsList : {inputOperands, initOperands}) {
525 for (OpOperand *opOperand : operandsList) {
526 int64_t pos = opOperand->getOperandNumber();
527 Value operand = opOperand->get();
528 SmallVector<int64_t> innerPos =
529 listOfPackedOperandsDim.extractPackedDimsForOperand(pos);
530 SmallVector<OpFoldResult> innerPackSizes =
531 listOfPackedOperandsDim.extractPackSizesForOperand(pos);
532 LDBG() << "operand: " << operand;
533 LDBG() << "innerPos: " << llvm::interleaved(innerPos);
534 LDBG() << "innerPackSizes: " << llvm::interleaved(innerPackSizes);
535 if (innerPackSizes.empty()) {
536 inputsAndInits.push_back(operand);
537 continue;
538 }
539 Value dest = linalg::PackOp::createDestinationTensor(
540 rewriter, loc, operand, innerPackSizes, innerPos,
541 /*outerDimsPerm=*/{});
542 ShapedType operandType = cast<ShapedType>(operand.getType());
543 bool areConstantTiles =
544 llvm::all_of(innerPackSizes, [](OpFoldResult tile) {
545 return getConstantIntValue(tile).has_value();
546 });
547 if (areConstantTiles && operandType.hasStaticShape() &&
548 !linalg::PackOp::requirePaddingValue(
549 operandType.getShape(), innerPos,
550 cast<ShapedType>(dest.getType()).getShape(), {},
551 innerPackSizes)) {
552 packOps.push_back(linalg::PackOp::create(rewriter, loc, operand, dest,
553 innerPos, innerPackSizes));
554 } else {
555 // TODO: value of the padding attribute should be determined by
556 // consumers.
557 auto zeroAttr =
558 rewriter.getZeroAttr(getElementTypeOrSelf(dest.getType()));
559 Value zero = arith::ConstantOp::create(rewriter, loc, zeroAttr);
560 packOps.push_back(linalg::PackOp::create(
561 rewriter, loc, operand, dest, innerPos, innerPackSizes, zero));
562 }
563 inputsAndInits.push_back(packOps.back().getResult());
564 }
565 }
566
567 // Step 3. Build the packed op, use the type of `inits` as result types.
568 ValueRange inputs =
569 ValueRange{inputsAndInits}.take_front(linalgOp.getNumDpsInputs());
570 ValueRange inits =
571 ValueRange{inputsAndInits}.take_back(linalgOp.getNumDpsInits());
572 auto packedLinalgOp =
573 linalg::GenericOp::create(rewriter, linalgOp.getLoc(), inits.getTypes(),
574 inputs, inits, indexingMaps, iteratorTypes);
575 packedLinalgOp.getRegion().takeBody(linalgOp->getRegion(0));
576
577 // Step 4. Propagate packing to all the op results.
578 for (OpResult result : packedLinalgOp->getResults()) {
579 int64_t resultNum = result.getResultNumber();
580 linalg::PackOp maybePackedInit =
581 inits[resultNum].getDefiningOp<linalg::PackOp>();
582 if (!maybePackedInit) {
583 results.push_back(result);
584 continue;
585 }
586 // Build the symmetrical UnPackOp to the existing PackOp.
587 unPackOps.push_back(linalg::UnPackOp::create(
588 rewriter, packedLinalgOp->getLoc(), result, maybePackedInit.getSource(),
589 maybePackedInit.getInnerDimsPos(), maybePackedInit.getMixedTiles()));
590 results.push_back(unPackOps.back().getResult());
591 }
592
593 // Step 5. Replace `linalgOp`.
594 rewriter.replaceOp(linalgOp, results);
595
596 // Return packedLinalgOp.
597 return PackResult{packOps,
598 cast<linalg::LinalgOp>(packedLinalgOp.getOperation()),
599 unPackOps};
600}
601
602//===----------------------------------------------------------------------===//
603// packTranspose transformation.
604//===----------------------------------------------------------------------===//
605
606/// Return a copy of `tensorType` after permutation by `permutationVector`.
607// Note: Should be a new method in of MemRef/RankedTensor/VectorType::Builder
608// but this would introduce a dependence on Dialect in IR.
609// TODO: Restructure.
610static RankedTensorType permuteShape(RankedTensorType tensorType,
611 ArrayRef<int64_t> permutationVector) {
612 SmallVector<int64_t> shape(tensorType.getShape());
613 applyPermutationToVector(shape, permutationVector);
614 return RankedTensorType::Builder(tensorType).setShape(shape);
615}
616
617/// Return a new GenericOp obtained by transposing opOperand by the permutation
618/// vector:
619/// - the corresponding indexing map is transposed by `permutation`
620/// - the corresponding operand value is replaced by `transposedValue`
621/// `linalgOp` is replaced by the return op in the process.
622/// Asserts that `transposedValue` is of the proper transposed ShapedType.
624 RewriterBase &rewriter, LinalgOp linalgOp, OpOperand &opOperand,
625 ArrayRef<int64_t> permutation, Value transposedValue) {
626 // Sanity check the operand.
627 assert(linalgOp == opOperand.getOwner() && "linalg op must own the operand");
628
629 // Sanity check of the expected transposed tensor type.
630 auto tensorType = permuteShape(
631 cast<RankedTensorType>(opOperand.get().getType()), permutation);
632 (void)tensorType;
633 assert(tensorType == transposedValue.getType() &&
634 "expected tensor type mismatch");
635
636 // Compute the transposed indexing map.
637 // Sigh unsigned pollution.
638 SmallVector<unsigned> tmpTransposition =
639 llvm::map_to_vector(permutation, [](int64_t i) -> unsigned { return i; });
640 AffineMap permutationMap =
641 AffineMap::getPermutationMap(tmpTransposition, rewriter.getContext());
642 AffineMap transposedMap =
643 permutationMap.compose(linalgOp.getMatchingIndexingMap(&opOperand));
644
645 // Set the transposed indexing map in the proper position.
646 SmallVector<AffineMap> indexingMaps = linalgOp.getIndexingMapsArray();
647 indexingMaps[linalgOp.getIndexingMapIndex(&opOperand)] = transposedMap;
648 // Set the transposedValue in the proper operand position.
649 SmallVector<Value> operands = linalgOp->getOperands();
650 operands[opOperand.getOperandNumber()] = transposedValue;
651
652 ValueRange operandsRef(operands);
653 auto transposedGenericOp = linalg::GenericOp::create(
654 rewriter,
655 /*location=*/linalgOp->getLoc(),
656 /*resultTensorTypes=*/
657 operandsRef.drop_front(linalgOp.getNumDpsInputs()).getTypes(),
658 /*inputs=*/operandsRef.take_front(linalgOp.getNumDpsInputs()),
659 /*outputs=*/operandsRef.drop_front(linalgOp.getNumDpsInputs()),
660 /*indexingMaps=*/indexingMaps,
661 /*iteratorTypes=*/linalgOp.getIteratorTypesArray());
662 transposedGenericOp.getRegion().takeBody(linalgOp->getRegion(0));
663 rewriter.replaceOp(linalgOp, transposedGenericOp->getResults());
664
665 return cast<linalg::LinalgOp>(transposedGenericOp.getOperation());
666}
667
668FailureOr<PackTransposeResult>
669linalg::packTranspose(RewriterBase &rewriter, linalg::PackOp packOp,
670 linalg::LinalgOp linalgOp, linalg::UnPackOp maybeUnPackOp,
671 ArrayRef<int64_t> outerPerm,
672 ArrayRef<int64_t> innerPerm) {
673 Location loc = linalgOp.getLoc();
674
675 // Step 1. Transpose packOp.
676 rewriter.setInsertionPoint(packOp);
677 linalg::PackOp transposedPackOp =
678 packOp.createTransposedClone(rewriter, loc, innerPerm, outerPerm);
679
680 if (packOp.hasPureBufferSemantics() || !packOp.getResult().hasOneUse())
681 return rewriter.notifyMatchFailure(linalgOp, "expect single pack use");
682
683 OpOperand &packUse = *packOp->getUses().begin();
684 if (packUse.getOwner() != linalgOp) {
685 return rewriter.notifyMatchFailure(
686 linalgOp, "not a single use by the LinalgOp target");
687 }
688 if (maybeUnPackOp &&
689 (!linalgOp.isDpsInit(&packUse) ||
690 maybeUnPackOp.getSource() != linalgOp.getTiedOpResult(&packUse))) {
691 return rewriter.notifyMatchFailure(linalgOp,
692 "not produced by the LinalgOp target");
693 }
694
695 // Step 2. Transpose linalgOp.
696 // transposedPackOp.getOuterDimsPerm() may be empty, in which case it is the
697 // identity. Don't rely on it.
698 int64_t numLeadingDims = packOp.getSourceRank();
699 int64_t numTrailingDims = packOp.getInnerDimsPos().size();
700 // Step 2.a. Compute the permutation on the whole operand.
701 // Leading part just reuse the outerPerm.
702 SmallVector<int64_t> permutation(outerPerm);
703 if (permutation.empty())
704 llvm::append_range(permutation, llvm::seq<int64_t>(0, numLeadingDims));
705 // Trailing part needs to reindex positions by `numLeadingDims`.
706 if (innerPerm.empty()) {
707 llvm::append_range(
708 permutation,
709 llvm::seq<int64_t>(numLeadingDims, numLeadingDims + numTrailingDims));
710 } else {
711 llvm::append_range(permutation,
712 llvm::map_range(innerPerm, [&](int64_t pos) {
713 return numLeadingDims + pos;
714 }));
715 }
716 if (!isPermutationVector(permutation))
717 return rewriter.notifyMatchFailure(linalgOp, "invalid permutation");
718
719 // Step 2.b. Save the transposedPackUse operand number in case we need to
720 // get the tied OpResult after `linalgOp` has been replaced.
721 int64_t packUseOperandNumber = packUse.getOperandNumber();
722 // Step 2.c. Actually perform the transposition.
723 rewriter.setInsertionPoint(linalgOp);
724 linalg::LinalgOp transposedLinalgOp = transposeOneLinalgOperandAndReplace(
725 rewriter, linalgOp, packUse, permutation, transposedPackOp.getResult());
726
727 // Step 3. Maybe transpose unPackOp.
728 linalg::UnPackOp transposedUnPackOp;
729 if (maybeUnPackOp) {
730 OpOperand &opOperand =
731 transposedLinalgOp->getOpOperand(packUseOperandNumber);
732 OpResult transposedResult = transposedLinalgOp.getTiedOpResult(&opOperand);
733 rewriter.setInsertionPoint(maybeUnPackOp);
734 transposedUnPackOp = maybeUnPackOp.createTransposedClone(
735 rewriter, loc, transposedResult, innerPerm, outerPerm);
736
737 rewriter.replaceOp(maybeUnPackOp, transposedUnPackOp->getResults());
738 }
739
740 // Step 4. Finally, replace packOp now that we don't need it anymore.
741 if (packOp.hasPureTensorSemantics())
742 rewriter.replaceOp(packOp, transposedPackOp->getResults());
743 else
744 rewriter.eraseOp(packOp);
745
746 return PackTransposeResult{transposedPackOp, transposedLinalgOp,
747 transposedUnPackOp};
748}
749
750//===----------------------------------------------------------------------===//
751// packMatmulGreedily transformation.
752//===----------------------------------------------------------------------===//
753
754/// Pack a LinalgOp by greedily inferring matmul dimensions (m, n, k) where m
755/// and n are proper parallel dimensions and k is a proper reduction
756/// dimension. Packing occurs by rewriting the op as a linalg.generic and
757/// calling linalg::pack by `mnkPackedSizes`. The order of the packed
758/// dimensions is customizable: the `mnkOrder` is a permutation of {0, 1, 2}
759/// to reorder {m, n, k} into one of the 8 possible forms. The outer
760/// dimensions of the operands are not permuted at this time, this is left for
761/// future work.
762FailureOr<PackResult>
763linalg::packMatmulGreedily(RewriterBase &rewriter, LinalgOp linalgOp,
764 ArrayRef<OpFoldResult> mnkPackedSizes,
765 ArrayRef<int64_t> mnkPaddedSizesNextMultipleOf,
766 ArrayRef<int64_t> mnkOrder) {
767 assert(mnkPackedSizes.size() == 3 && "unexpected num of packing sizes");
768 assert((mnkPaddedSizesNextMultipleOf.empty() ||
769 mnkPaddedSizesNextMultipleOf.size() == 3) &&
770 "num of packing sizes next multiple should be empty or of size 3");
771 assert(mnkOrder.size() == 3 && "unexpected mnkOrder size");
772 assert(isPermutationVector(mnkOrder) && "expected a permutation");
773
774 int64_t numLoops = linalgOp.getNumLoops();
775 if (numLoops <= 2) {
776 LDBG() << "need 3+ loops to find a matmul to pack, got " << numLoops
777 << " in: " << linalgOp;
778 return rewriter.notifyMatchFailure(
779 linalgOp, "need 3+ loops to find a matmul to pack");
780 }
781
782 // Locally adjust the desired iterator position of mnk and packing sizes.
783 int64_t numPackedDims = mnkPackedSizes.size();
784 SmallVector<int64_t> mmnnkkPos(numPackedDims);
785 for (int64_t i = 0, e = numPackedDims; i < e; ++i)
786 mmnnkkPos[i] = numLoops - numPackedDims + mnkOrder[i];
787 SmallVector<OpFoldResult> packedSizes(numPackedDims);
788 for (int64_t i = 0, e = numPackedDims; i < e; ++i)
789 packedSizes[mnkOrder[i]] = mnkPackedSizes[i];
790 SmallVector<int64_t> paddedSizesNextMultipleOf(numPackedDims);
791 for (int64_t i = 0, e = numPackedDims; i < e; ++i) {
792 paddedSizesNextMultipleOf[mnkOrder[i]] =
793 mnkPaddedSizesNextMultipleOf.empty() ? 0
794 : mnkPaddedSizesNextMultipleOf[i];
795 }
796
797 // 1. Infer dims that are important for matmul.
798 FailureOr<ContractionDimensions> maybeDimensions =
799 inferContractionDims(linalgOp);
800 if (failed(maybeDimensions)) {
801 LDBG() << "couldn't infer matmul iterators in: " << linalgOp;
802 return rewriter.notifyMatchFailure(linalgOp,
803 "couldn't infer matmul iterators");
804 }
805
806 // 2. Normalize linalgOp to an kmn-matmul-like with [red, par, par] most
807 // minor iterators. In cases with multiple options for m, n, k bias towards
808 // the most minor embedding.
809 // If we wanted a different normalization order, this is where it would have
810 // to plug a heuristic.
811 int64_t mPos = maybeDimensions->m.back(), nPos = maybeDimensions->n.back(),
812 kPos = maybeDimensions->k.back();
813 LDBG() << "Start packing generic op greedily with (m@" << mPos << ", n@"
814 << nPos << ", k@" << kPos << "): " << linalgOp;
815
816 // 2.a. Rewrite as a generic.
817 auto genericOp = dyn_cast<GenericOp>(linalgOp.getOperation());
818 if (!genericOp) {
819 FailureOr<GenericOp> generalizeResult =
820 generalizeNamedOp(rewriter, linalgOp);
821 assert(succeeded(generalizeResult) && "unexpected failure generalizing op");
822 genericOp = *generalizeResult;
823 }
824
825 // 2.b. Interchange to move the dimensions (k, m, n) as most-minor
826 // iterators. Note that this only normalized the iteration order and does
827 // not change the indexings of any operand.
828 SmallVector<int64_t> permutation =
829 computePermutationVector(numLoops, {mPos, nPos, kPos}, mmnnkkPos);
830 LDBG() << "perm: " << llvm::interleaved(permutation);
831 // Sign .. unsigned pollution.
832 SmallVector<unsigned> unsignedPerm(permutation.begin(), permutation.end());
833 FailureOr<GenericOp> interchangeResult =
834 interchangeGenericOp(rewriter, genericOp, unsignedPerm);
835 assert(succeeded(interchangeResult) && "unexpected failure interchanging op");
836 genericOp = *interchangeResult;
837 LDBG() << "Generalized Op to pack: " << genericOp;
838
839 // At this point, the op iterators are normalized to {leading, k, m, n}.
840 // The layouts induced by packing will always be:
841 // - LHS{leading_lhs, kk, mm}
842 // - RHS{leading_rhs, kk, nn}
843 // - RES{leading_res, mm, nn}
844 // If we wanted to change the packed order, we would reorder (k, m, n) to
845 // something else above.
846 //
847 // Additional permutations of the outer dims of the operands (i.e.
848 // leading_lhs, leading_rhs and leading_res) could follow by computing the
849 // desired outerPerm for each operand.
850 // This is left for future work.
851
852 // TODO: this creates too much IR, go use reifyResultShapes.
853 SmallVector<Range, 4> loopRanges =
854 cast<LinalgOp>(genericOp.getOperation())
855 .createLoopRanges(rewriter, genericOp.getLoc());
856
857 // Add leading zeros to match numLoops, we only pack the last 3 dimensions
858 // post interchange.
859 LDBG() << "paddedSizesNextMultipleOf: "
860 << llvm::interleaved(paddedSizesNextMultipleOf);
861 LDBG() << "loopRanges: "
862 << llvm::interleaved(
863 llvm::map_range(loopRanges, [](Range r) { return r.size; }));
864 SmallVector<OpFoldResult> adjustedPackedSizes(numLoops - packedSizes.size(),
865 rewriter.getIndexAttr(0));
866 for (int64_t i = 0, e = numPackedDims; i < e; ++i) {
867 if (paddedSizesNextMultipleOf[i] == 0) {
868 adjustedPackedSizes.push_back(packedSizes[i]);
869 continue;
870 }
871 AffineExpr d0, s0;
872 bindDims(rewriter.getContext(), d0);
873 bindSymbols(rewriter.getContext(), s0);
874 adjustedPackedSizes.push_back(affine::makeComposedFoldedAffineApply(
875 rewriter, genericOp->getLoc(), d0.ceilDiv(s0) * s0,
876 {loopRanges[adjustedPackedSizes.size()].size,
877 rewriter.getIndexAttr(paddedSizesNextMultipleOf[i])}));
878 }
879 LDBG() << "adjustedPackedSizes: " << llvm::interleaved(adjustedPackedSizes);
880
881 // TODO: If we wanted to give the genericOp a name after packing, after
882 // calling `pack` would be a good time. One would still need to check that
883 // `containsMostMinorMatmul(packingRes->packedLinalgOp)` is true, since we
884 // also allow degenerate matmul cases (i.e. matvec, dot).
885 return pack(rewriter, genericOp, adjustedPackedSizes);
886}
887
888//===----------------------------------------------------------------------===//
889// Transformations exposed as rewrite patterns.
890//===----------------------------------------------------------------------===//
891
894 assert(!tileSizeComputationFunction && "tile sizes already set");
895 SmallVector<int64_t, 4> tileSizes(ts);
896 tileSizeComputationFunction = [tileSizes](OpBuilder &b, Operation *op) {
898 b.setInsertionPointToStart(
899 &op->getParentOfType<func::FuncOp>().getBody().front());
900 return llvm::map_to_vector<4>(tileSizes, [&](int64_t s) {
901 Value v = arith::ConstantIndexOp::create(b, op->getLoc(), s);
902 return v;
903 });
904 };
905 return *this;
906}
907
909 memref::CopyOp copyOp, PatternRewriter &rewriter) const {
910 return vectorizeCopy(rewriter, copyOp);
911}
912
913/// Filling `dest` using FillOp constant padding value if possible.
914/// Otherwise, generate a tensor::GenerateOp.
916 RewriterBase &rewriter, tensor::PadOp padOp, Value dest,
917 const SmallVector<Value> &dynSizes) const {
918 auto padValue = padOp.getConstantPaddingValue();
919 if (padValue) {
920 // Move the padding value defined inside the PadOp block to outside.
921 if (padValue.getParentBlock() == &padOp.getRegion().front())
922 rewriter.moveOpBefore(padValue.getDefiningOp(), padOp);
923 return FillOp::create(rewriter, padOp.getLoc(), padValue, dest).result();
924 }
925
926 // Fill could not be optimized: Lower to tensor::GenerateOp with region.
927 auto generateOp = tensor::GenerateOp::create(rewriter, padOp.getLoc(),
928 padOp.getResultType(), dynSizes);
929 // Copy region to new op.
930 IRMapping bvm;
931 padOp.getRegion().cloneInto(&generateOp.getRegion(), bvm);
932 return generateOp;
933}
934
935LogicalResult
937 PatternRewriter &rewriter) const {
938 // Given an OpFoldResult, return an index-typed value.
939 auto getIdxValue = [&](OpFoldResult ofr) {
940 if (auto val = llvm::dyn_cast_if_present<Value>(ofr))
941 return val;
943 rewriter, padOp.getLoc(),
944 cast<IntegerAttr>(cast<Attribute>(ofr)).getInt())
945 .getResult();
946 };
947
948 auto resultType = padOp.getResultType();
949 // Compute size of EmptyOp. Any combination of static/dynamic is supported.
950 SmallVector<Value> dynSizes;
951 SmallVector<int64_t> staticSizes;
952 for (unsigned dim = 0; dim < resultType.getRank(); ++dim) {
953 if (resultType.isDynamicDim(dim)) {
954 auto srcSize = getIdxValue(tensor::getMixedSize(rewriter, padOp.getLoc(),
955 padOp.getSource(), dim));
956 // Add low and high padding value.
957 auto plusLow = rewriter.createOrFold<arith::AddIOp>(
958 padOp.getLoc(), srcSize, getIdxValue(padOp.getMixedLowPad()[dim]));
959 auto plusHigh = rewriter.createOrFold<arith::AddIOp>(
960 padOp.getLoc(), plusLow, getIdxValue(padOp.getMixedHighPad()[dim]));
961 dynSizes.push_back(plusHigh);
962 }
963 staticSizes.push_back(resultType.getDimSize(dim));
964 }
965
966 // Init tensor and fill it with padding.
967 Value emptyTensor =
968 tensor::EmptyOp::create(rewriter, padOp.getLoc(), staticSizes,
969 resultType.getElementType(), dynSizes);
970 Value fill = createFillOrGenerateOp(rewriter, padOp, emptyTensor, dynSizes);
971
972 // Generate a InsertSliceOp for copying the PadOp source.
973 auto sourceType = padOp.getSourceType();
974 // Compute size of source of tensor::PadOp.
976 tensor::getMixedSizes(rewriter, padOp.getLoc(), padOp.getSource());
977 // Strides of InsertSliceOp are all 1.
978 SmallVector<OpFoldResult> strides(sourceType.getRank(),
979 rewriter.getIndexAttr(1));
980 rewriter.replaceOpWithNewOp<tensor::InsertSliceOp>(
981 padOp, padOp.getSource(), fill, padOp.getMixedLowPad(), srcSizes,
982 strides);
983
984 return success();
985}
986
988 tensor::ExtractSliceOp sliceOp, PatternRewriter &rewriter) const {
989 if (!sliceOp.hasUnitStride())
990 return failure();
991
992 auto padOp = sliceOp.getSource().getDefiningOp<tensor::PadOp>();
993 if (!padOp)
994 return failure();
995
996 bool zeroSliceGuard = true;
997 if (controlFn) {
998 if (std::optional<bool> control = controlFn(sliceOp))
999 zeroSliceGuard = *control;
1000 else
1001 return failure();
1002 }
1003
1004 FailureOr<TilingResult> tilingResult =
1005 tensor::bubbleUpPadSlice(rewriter, padOp, sliceOp.getMixedOffsets(),
1006 sliceOp.getMixedSizes(), zeroSliceGuard);
1007 if (failed(tilingResult))
1008 return failure();
1009
1010 RankedTensorType sourceType = sliceOp.getSourceType();
1011 RankedTensorType resultType = sliceOp.getResultType();
1012
1013 // If the extract_slice is not rank-reduced, all shapes are static and the
1014 // data source is actually used. Rewrite into pad(extract_slice(x)).
1015 if (sourceType.getRank() == resultType.getRank()) {
1016 rewriter.replaceOp(sliceOp, tilingResult->tiledValues);
1017 return success();
1018 }
1019
1020 // Handle rank-reduced slice by creating another extract_slice op.
1022 rewriter, sliceOp.getLoc(), tilingResult->tiledValues[0], resultType);
1023
1024 rewriter.replaceOp(sliceOp, rankReduced);
1025 return success();
1026}
1027
1028/// If padding value is set, returns a tensor.pad Op for the source tensor,
1029/// with the output shape matching the output of `packOp`. Otherwise, returns
1030/// the source directly.
1031///
1032/// This method assumes that all outer dims for this pack Op are 1.
1034 linalg::PackOp packOp) {
1035 Value input = packOp.getSource();
1036 // TODO: Support Memref PackOp. Temporarily return just Op Source.
1037 if (!packOp.hasPureTensorSemantics())
1038 return input;
1039
1040 if (!packOp.getPaddingValue()) {
1041 return input;
1042 }
1043
1044 assert(llvm::all_of(packOp.getAllOuterDims(),
1045 [](int64_t val) { return val == 1; }) &&
1046 "some outer dims are != 1");
1047
1048 Location loc = packOp.getLoc();
1049 ShapedType inputType = packOp.getSourceType();
1050 int64_t inputRank = inputType.getRank();
1051
1052 DenseMap<int64_t, OpFoldResult> tileAndPosMapping =
1053 packOp.getDimAndTileMapping();
1054
1055 // The sizes of dynamic tiles
1056 SmallVector<Value> dynamicTileSizes;
1057
1058 // Collect dims for the padded shape.
1059 SmallVector<int64_t> paddedShape;
1060 for (int64_t dimIdx = 0; dimIdx < inputRank; ++dimIdx) {
1061 // 1. Non-tiled outer dims.
1062 // These dims should be 1 and we simply preserve them.
1063 if (!tileAndPosMapping.count(dimIdx)) {
1064 int64_t inputDimSize = inputType.getDimSize(dimIdx);
1065 assert(inputDimSize == 1 &&
1066 "with all outer dims == 1, this non-tiled input dim should be 1!");
1067 paddedShape.push_back(inputDimSize);
1068 continue;
1069 }
1070
1071 // 2. Tiled outer dims
1072 // As all outer dims == 1, it is safe to use the tile size for the padded
1073 // shape.
1074 OpFoldResult tileSizeForDim = tileAndPosMapping.lookup(dimIdx);
1075
1076 // 2.1 Static tile sizes
1077 std::optional<int64_t> cstTileSize = getConstantIntValue(tileSizeForDim);
1078 if (cstTileSize.has_value()) {
1079 paddedShape.push_back(cstTileSize.value());
1080 continue;
1081 }
1082
1083 // 2.2 Dynamic tile sizes
1084 paddedShape.push_back(ShapedType::kDynamic);
1085
1086 // Get the value that holds the dynamic size.
1087 dynamicTileSizes.push_back(llvm::dyn_cast<Value>(tileSizeForDim));
1088 }
1089 auto resultType =
1090 RankedTensorType::get(paddedShape, inputType.getElementType());
1091 return tensor::createPadHighOp(resultType, input, packOp.getPaddingValue(),
1092 /*nofold=*/false, loc, builder,
1093 dynamicTileSizes);
1094}
1095
1096// Normalizes a permutation on a higher rank space to its actual size, e.g.
1097// perm = [1, 4, 2]
1098// becomes
1099// norm = [0, 2, 1]
1100static SmallVector<int64_t>
1102 constexpr int64_t kNonTiledMarker = -1;
1103 SmallVector<int64_t> vec(rank, kNonTiledMarker);
1104 for (auto [index, value] : llvm::enumerate(perm))
1105 vec[value] = index;
1106 SmallVector<int64_t> normalizedPerm = llvm::filter_to_vector(
1107 vec, [&](int64_t v) { return v != kNonTiledMarker; });
1108 // This inverts the permutation in addition to normalizing so invert back.
1109 return invertPermutationVector(normalizedPerm);
1110}
1111
1112// Gets the normalized permutation implied by innerDimsPos and outerDimsPerm
1113// assuming rank reduction of unit outer dims.
1114static SmallVector<int64_t>
1116 ArrayRef<int64_t> innerDimsPos,
1117 ArrayRef<int64_t> outerDimsPerm) {
1118 SmallVector<int64_t> rankReducedOuterDimsPerm;
1119 SmallVector<int64_t> outerDims;
1120 SmallVector<int64_t> innerDims;
1121 int64_t dim = 0;
1122 int64_t unpackedRank = shape.size();
1123 for (auto i : llvm::seq<unsigned>(0, unpackedRank)) {
1124 if (llvm::is_contained(innerDimsPos, i)) {
1125 innerDims.push_back(dim++);
1126 continue;
1127 }
1128 if (shape[i] == 1)
1129 continue;
1130 outerDims.push_back(dim++);
1131 if (!outerDimsPerm.empty())
1132 rankReducedOuterDimsPerm.push_back(outerDimsPerm[i]);
1133 }
1134
1135 // Get the position of the inner dims after permutation.
1136 SmallVector<int64_t> innerPerm =
1137 getPackUnpackNormalizedPerm(unpackedRank, innerDimsPos);
1138 applyPermutationToVector<int64_t>(innerDims, innerPerm);
1139
1140 // Ditto for the outer dims.
1141 SmallVector<int64_t> perm = outerDims;
1142
1143 rankReducedOuterDimsPerm =
1144 getPackUnpackNormalizedPerm(unpackedRank, rankReducedOuterDimsPerm);
1145 if (!rankReducedOuterDimsPerm.empty())
1146 applyPermutationToVector<int64_t>(perm, rankReducedOuterDimsPerm);
1147
1148 // The tile always ends up as the inner most dims after packing.
1149 perm.append(innerDims);
1150
1151 return perm;
1152}
1153
1155 linalg::PackOp packOp, PatternRewriter &rewriter) const {
1156 // TODO: Support Memref PackOp. Temporarily return failure.
1157 if (!packOp.hasPureTensorSemantics())
1158 return failure();
1159
1160 if (llvm::any_of(packOp.getTiledOuterDims(),
1161 [](int64_t dim) { return dim != 1; })) {
1162 return rewriter.notifyMatchFailure(
1163 packOp, "not all outer dimensions of the result are 1s");
1164 }
1165
1166 ArrayRef<int64_t> innerDimsPos = packOp.getInnerDimsPos();
1167 auto outerDimsPerm = packOp.getOuterDimsPerm();
1168
1169 // Verify that there are no:
1170 // * non-unit + un-tiled-outer-dims,
1171 // that are permuted. Supporting such cases would require refining the logic
1172 // that generates the Transpose Op.
1173 if (!llvm::all_of(outerDimsPerm, [&innerDimsPos, &packOp](int64_t dim) {
1174 static int prev = 0;
1175 // Skip tiled dims - these can be permuted.
1176 if (llvm::is_contained(innerDimsPos, dim))
1177 return true;
1178
1179 // Check whether this dim has been permuted. Permuting unit dims is fine
1180 // as that's effectively a no-op.
1181 if (dim < prev && (packOp.getResult().getType().getShape()[prev] != 1 ||
1182 packOp.getResult().getType().getShape()[dim] != 1))
1183 return false;
1184
1185 prev = dim;
1186 return true;
1187 })) {
1188 return rewriter.notifyMatchFailure(
1189 packOp, "At least one non-unit and un-tiled outer dim is permuted, "
1190 "this is not supported ATM!");
1191 }
1192
1193 Location loc = packOp.getLoc();
1194
1195 int64_t srcRank = packOp.getSourceRank();
1196
1197 // 1. Get the input that is going to be packed. If the input requires padding,
1198 // add a padding operation and return that as the input.
1199 Value input = getPackOpSourceOrPaddedSource(rewriter, packOp);
1200
1201 // 2. Transpose the input to match the inner tile order:
1202 // %init = tensor.empty()
1203 // %transposed_tile = linalg.transpose ins(%source_or_padded_source),
1204 // outs(%init)
1205 // Assumptions made:
1206 // - All tiled outer dims are 1 - the corresponding transposition order
1207 // doesn't matter, but requires all dim indices to be present.
1208 // - Un-tiled outer dims remain un-permuted.
1209
1210 // 2.1 Get the permutation for linalg.transpose:
1211 // [ untiled-dims, inner-dims-pos ]
1212 // Note, this logic assumes that the untiled dims are not permuted.
1213 SmallVector<int64_t> srcPermForTranspose;
1214 for (int64_t i = 0; i < srcRank; i++) {
1215 // We assume the `k` dimensions of the inner dim position, where `k` is the
1216 // rank of the inner tiling, correspond to the last `k` indices of the
1217 // transpose permutation. This is done by adding the indices not contained
1218 // in the inner dimension position in order from 0 to `n`. Where n is the
1219 // rank of the source tensor. For example if we have a source tensor with
1220 // indices [0, 1, 2, 3] and inner dim position of [3, 0], the remaining
1221 // indices are [1, 2]. and the transpose will be [1, 2, 3, 0].
1222 if (llvm::is_contained(innerDimsPos, i))
1223 continue;
1224 srcPermForTranspose.push_back(i);
1225 }
1226 srcPermForTranspose.append(innerDimsPos.begin(), innerDimsPos.end());
1227
1228 // 2.2 Create the init tensor for linalg.transpose with the correct shape:
1229 // [ untiled-dims, tiled-dims ]
1230 ShapedType inputTy = cast<ShapedType>(input.getType());
1231 SmallVector<OpFoldResult> shapeForEmptyOp;
1232 for (int64_t i = 0; i < srcRank; i++) {
1233 if (llvm::is_contained(innerDimsPos, i)) {
1234 // The tiled dims are appended after this loop.
1235 continue;
1236 }
1237 if (inputTy.isStaticDim(i))
1238 shapeForEmptyOp.push_back(rewriter.getIndexAttr(inputTy.getShape()[i]));
1239 else
1240 shapeForEmptyOp.emplace_back(
1241 tensor::DimOp::create(rewriter, loc, input, i).getResult());
1242 }
1243 shapeForEmptyOp.append(packOp.getMixedTiles());
1244
1245 // getMixedTiles() may contain Values pointing to constant ops (as opposed to
1246 // constant attributes with the corresponding value). Replace those with
1247 // attributes. This is to match the behaviour in
1248 // `getPackOpSourceOrPaddedSource`, which replaces constant SSA values with
1249 // attributes.
1250 llvm::transform(shapeForEmptyOp, shapeForEmptyOp.begin(),
1251 [&](OpFoldResult ofr) {
1252 if (auto val = llvm::dyn_cast<Value>(ofr))
1253 return getAsOpFoldResult(val);
1254 return ofr;
1255 });
1256
1257 LDBG() << "Pack permutation: " << packOp;
1258 LDBG() << "perm: " << llvm::interleaved(srcPermForTranspose);
1259 LDBG() << "Shape of empty tensor: " << llvm::interleaved(shapeForEmptyOp);
1260
1261 Value empty = tensor::EmptyOp::create(
1262 rewriter, loc, shapeForEmptyOp, packOp.getSourceType().getElementType());
1263
1264 // 2.3 Create linalg.transpose
1265 auto transposedOp = linalg::TransposeOp::create(rewriter, loc, input, empty,
1266 srcPermForTranspose);
1267
1268 // 3. Insert the inner tile into the destination tensor:
1269 // %inserted_tile = tensor.insert_slice(%transposed_tile)
1270
1271 // Compute the sizes attribute:
1272 // [ outer-dims, tile-sizes ]
1273 // Note that the output from the transpose Op excludes the tiled outer dims.
1274 // However, given the assumption that:
1275 // * all tiled outer dims == 1,
1276 // we can just use a rank-expanding tensor.insert_slice.
1277 SmallVector<OpFoldResult> writeSizes;
1278 for (auto size : packOp.getAllOuterDims()) {
1279 writeSizes.push_back(rewriter.getIndexAttr(size));
1280 }
1281
1282 for (auto tileSize : packOp.getMixedTiles()) {
1283 auto [_, tileSizeOfr] =
1284 getSimplifiedOfrAndStaticSizePair(tileSize, rewriter);
1285 writeSizes.push_back(tileSizeOfr);
1286 }
1287
1288 auto insert = tensor::InsertSliceOp::create(
1289 rewriter, loc, transposedOp.getResult()[0], packOp.getDest(), writeSizes);
1290
1291 // 4. Replace tensor.packOp with tensor.insert_slice created above
1292 rewriter.replaceOp(packOp, insert.getResult());
1293
1294 return success();
1295}
1296
1298 linalg::UnPackOp unpackOp, PatternRewriter &rewriter) const {
1299 if (!unpackOp.hasPureTensorSemantics())
1300 return failure();
1301
1302 int64_t destRank = unpackOp.getDestRank();
1303 ArrayRef<int64_t> srcShape = unpackOp.getSourceType().getShape();
1304 ArrayRef<int64_t> innerDimsPos = unpackOp.getInnerDimsPos();
1305 if (llvm::any_of(unpackOp.getTiledOuterDims(),
1306 [](int64_t dim) { return dim != 1; })) {
1307 return rewriter.notifyMatchFailure(
1308 unpackOp,
1309 "require the tiled outer dimensions of the result are all 1s");
1310 }
1311
1312 // 1. Use rank-reduced tensor.extract_slice op to extract the tile:
1313 // %extracted_tile = tensor.extract_slice(%unpack_op_input)
1314 Location loc = unpackOp.getLoc();
1315 Value source = unpackOp.getSource();
1316 DenseMap<int64_t, OpFoldResult> dimAndTileMapping =
1317 unpackOp.getDimAndTileMapping();
1318 Attribute oneIdxAttr = rewriter.getIndexAttr(1);
1319
1320 // The shape for ExtractSliceOp. Note that this will consist of 3 blocks of
1321 // dims:
1322 // [ outer-untiled-dims, outer-tiled-dims, tile-sizes ]
1323 SmallVector<int64_t> readShapeForExtractSlice;
1324 // The sizes attribute for ExtractSliceOp. Due to rank-reducing (and
1325 // outer-tiled-dims being all 1), this will be
1326 // [ outer-untiled-dims, tile-sizes ]
1327 SmallVector<OpFoldResult> extractSliceSizes;
1328
1329 // Shape for EmptyOp that's used as the init value for TransposeOp below.
1330 // This should be:
1331 // [ outer-untiled-dims, tile-sizes ]
1332 // However, skip unit dims - TransposeOp (below) applies rank-reduced
1333 // permutation.
1334 SmallVector<OpFoldResult> shapeForEmptyOp;
1335
1336 for (auto i : llvm::seq<unsigned>(0, destRank)) {
1337 // Compute sizes attribute for ExtractSliceOp - outer-tiled-dims.
1338 //
1339 // As all outer tiled dims are 1, so the corresponding
1340 // slice size to read will also 1. As this will be rank-reducing "extract
1341 // slice" (i.e. the unit dims will be "collapsed"), there's no need to
1342 // update:
1343 // * the output shape for ExtractSliceOp, nor
1344 // * the shape for EmptyOp.
1345 if (dimAndTileMapping.count(i)) {
1346 extractSliceSizes.push_back(oneIdxAttr);
1347 continue;
1348 }
1349
1350 // Compute sizes attribute for ExtractSliceOp + EmptyOp -
1351 // outer-untiled-dims
1352 if (ShapedType::isDynamic(srcShape[i])) {
1353 OpFoldResult dynamicDim =
1354 tensor::DimOp::create(rewriter, loc, source, i).getResult();
1355 extractSliceSizes.push_back(dynamicDim);
1356 shapeForEmptyOp.push_back(dynamicDim);
1357 } else {
1358 extractSliceSizes.push_back(rewriter.getIndexAttr(srcShape[i]));
1359 if (srcShape[i] != 1)
1360 shapeForEmptyOp.push_back(rewriter.getIndexAttr(srcShape[i]));
1361 }
1362 // Compute the output shape for ExtractSliceOp - outer-untiled-dims (take
1363 // into account rank-reducing)
1364 if (srcShape[i] != 1) {
1365 readShapeForExtractSlice.push_back(srcShape[i]);
1366 }
1367 }
1368 // Append the tile sizes to "sizes attribute" for ExtractSliceOp and the
1369 // shape for EmptyOp.
1370 auto mixedTiles = unpackOp.getMixedTiles();
1371 extractSliceSizes.append(mixedTiles.begin(), mixedTiles.end());
1372 shapeForEmptyOp.append(mixedTiles.begin(), mixedTiles.end());
1373
1374 // Explicitly create the type for extract_slice op because the inner tile
1375 // size could be 1. We want to represent the whole inner tile in this case.
1376 auto tileShape = srcShape.drop_front(destRank);
1377 // Append the inner tile shape to the permuted and rank-reduced outer shape.
1378 readShapeForExtractSlice.append(tileShape.begin(), tileShape.end());
1379 Type elemType = unpackOp.getSourceType().getElementType();
1380 auto readType = RankedTensorType::get(readShapeForExtractSlice, elemType);
1381 Value innerTile = tensor::ExtractSliceOp::create(
1382 rewriter, loc, readType, unpackOp.getSource(), extractSliceSizes);
1383
1384 // 2. Transpose the tile to match the outer corresponding tile order.
1386 srcShape.take_front(destRank), innerDimsPos, unpackOp.getOuterDimsPerm());
1387 // Unpack is a transition out of packed space so we invert the permutation.
1388 perm = invertPermutationVector(perm);
1389 applyPermutationToVector<OpFoldResult>(shapeForEmptyOp, perm);
1390
1391 Value empty =
1392 tensor::EmptyOp::create(rewriter, loc, shapeForEmptyOp, elemType);
1393 auto transposedOp =
1394 linalg::TransposeOp::create(rewriter, loc, innerTile, empty, perm);
1395
1396 // 3. Handle in-complete tiles if needed. It truncates trailing data from the
1397 // transposed tile.
1398 SmallVector<OpFoldResult> tileSizes;
1399 ArrayRef<int64_t> destShape = unpackOp.getDestType().getShape();
1400 for (auto i : llvm::seq<unsigned>(0, destRank)) {
1401 if (dimAndTileMapping.count(i) || destShape[i] != 1)
1402 tileSizes.push_back(
1403 tensor::getMixedSize(rewriter, loc, unpackOp.getDest(), i));
1404 }
1405
1406 auto partialTile =
1407 tensor::ExtractSliceOp::create(rewriter, loc, RankedTensorType(),
1408 transposedOp.getResult()[0], tileSizes);
1409
1410 // 4. Insert the result to the destination tensor.
1411 SmallVector<OpFoldResult> writeSizes;
1412 for (int i = 0, idx = 0; i < destRank; ++i) {
1413 if (dimAndTileMapping.count(i) || destShape[i] != 1)
1414 writeSizes.push_back(tileSizes[idx++]);
1415 else
1416 writeSizes.push_back(oneIdxAttr);
1417 }
1418 auto insert = tensor::InsertSliceOp::create(rewriter, loc, partialTile,
1419 unpackOp.getDest(), writeSizes);
1420 rewriter.replaceOp(unpackOp, insert.getResult());
1421
1422 return success();
1423}
1424
1425// The following are patterns for downscaling convolution ops with size-1
1426// window dimensions.
1427//
1428// Note that we'd eventually want to write such transformations in a generic
1429// way, e.g., converting to linalg.generic, removing the size-1 dimensions,
1430// and then turning back to named ops. But for now it's fine to have a few
1431// patterns matching special ops to get started.
1432
1433template <typename Conv2DOp, typename Conv1DOp>
1435 returningMatchAndRewrite(LinalgOp convOp, PatternRewriter &rewriter) const {
1436 // Check if this LinalgOp is of the expected Conv2DOp type (named or generic).
1437 std::optional<DilationsAndStrides> convParams =
1439 if (!convParams)
1440 return failure();
1441 SmallVector<int64_t> dilations = std::move(convParams->dilations);
1442 SmallVector<int64_t> strides = std::move(convParams->strides);
1443
1444 if (convOp.hasPureBufferSemantics())
1445 return failure(); // To be implemented.
1446
1447 Value input = convOp.getDpsInputs().front();
1448 Value kernel = convOp.getDpsInputs().back();
1449 Value output = convOp.getDpsInits().front();
1450
1451 auto inputType = dyn_cast<RankedTensorType>(input.getType());
1452 auto kernelType = dyn_cast<RankedTensorType>(kernel.getType());
1453 auto outputType = dyn_cast<RankedTensorType>(output.getType());
1454
1455 auto kernelShape = kernelType.getShape();
1456 auto outputShape = outputType.getShape();
1457
1458 // Get domain indices based on Conv2DOp type. These are known at compile time.
1459 int64_t khIndex, kwIndex, ohIndex, owIndex;
1460 if constexpr (std::is_same_v<Conv2DOp, linalg::Conv2DNhwcHwcfOp> ||
1461 std::is_same_v<Conv2DOp, linalg::PoolingNhwcSumOp> ||
1462 std::is_same_v<Conv2DOp, linalg::PoolingNhwcMaxOp> ||
1463 std::is_same_v<Conv2DOp, linalg::PoolingNhwcMaxUnsignedOp> ||
1464 std::is_same_v<Conv2DOp, linalg::PoolingNhwcMinOp> ||
1465 std::is_same_v<Conv2DOp, linalg::PoolingNhwcMinUnsignedOp>) {
1466 // NHWC layout: kernel [H, W, ...], output [N, H, W, C]
1467 khIndex = 0;
1468 kwIndex = 1;
1469 ohIndex = 1;
1470 owIndex = 2;
1471 } else if constexpr (std::is_same_v<Conv2DOp, linalg::Conv2DNchwFchwOp>) {
1472 // NCHW_FCHW layout: kernel [..., H, W], output [N, C, H, W]
1473 khIndex = 2;
1474 kwIndex = 3;
1475 ohIndex = 2;
1476 owIndex = 3;
1477 } else if constexpr (std::is_same_v<Conv2DOp, linalg::PoolingNchwSumOp> ||
1478 std::is_same_v<Conv2DOp, linalg::PoolingNchwMaxOp>) {
1479 // NCHW pooling layout: kernel [H, W], output [N, C, H, W]
1480 khIndex = 0;
1481 kwIndex = 1;
1482 ohIndex = 2;
1483 owIndex = 3;
1484 }
1485
1486 // Only handle the case where at least one of the window dimensions is
1487 // of size 1. Other cases can rely on tiling to reduce to such cases.
1488 int64_t khSize = kernelShape[khIndex], kwSize = kernelShape[kwIndex];
1489 int64_t ohSize = outputShape[ohIndex], owSize = outputShape[owIndex];
1490 bool removeH = (khSize == 1 && ohSize == 1);
1491 bool removeW = (kwSize == 1 && owSize == 1);
1492 if (!removeH && !removeW)
1493 return failure();
1494
1495 // Get new shapes and types for all operands by removing the size-1
1496 // dimension.
1497 using RTTBuilder = RankedTensorType::Builder;
1498 RankedTensorType newInputType =
1499 RTTBuilder(inputType).dropDim((removeH ? ohIndex : owIndex));
1500 RankedTensorType newKernelType =
1501 RTTBuilder(kernelType).dropDim((removeH ? khIndex : kwIndex));
1502 RankedTensorType newOutputType =
1503 RTTBuilder(outputType).dropDim((removeH ? ohIndex : owIndex));
1504
1505 // Rank-reduce operands.
1506 Location loc = convOp.getLoc();
1508 rewriter, loc, input, newInputType);
1510 rewriter, loc, kernel, newKernelType);
1512 rewriter, loc, output, newOutputType);
1513
1514 // Rank-reduce strides and dilations too.
1515 // TODO: dropDim 1-liner helper.
1516 strides.erase(strides.begin() + (removeH ? 0 : 1));
1517 auto stridesAttr = rewriter.getI64VectorAttr(strides);
1518
1519 dilations.erase(dilations.begin() + (removeH ? 0 : 1));
1520 auto dilationsAttr = rewriter.getI64VectorAttr(dilations);
1521
1522 auto conv1DOp = Conv1DOp::create(
1523 rewriter, loc, newOutputType, ValueRange{newInput, newKernel},
1524 ValueRange{newOutput}, stridesAttr, dilationsAttr);
1525
1526 // Insert back.
1528 rewriter, loc, conv1DOp.getResult(0), output);
1529 rewriter.replaceOp(convOp, inserted);
1530
1531 return conv1DOp;
1532}
1533
1534template struct linalg::DownscaleSizeOneWindowed2DConvolution<Conv2DNhwcHwcfOp,
1535 Conv1DNwcWcfOp>;
1536template struct linalg::DownscaleSizeOneWindowed2DConvolution<Conv2DNchwFchwOp,
1537 Conv1DNcwFcwOp>;
1538template struct linalg::DownscaleSizeOneWindowed2DConvolution<PoolingNhwcSumOp,
1539 PoolingNwcSumOp>;
1540template struct linalg::DownscaleSizeOneWindowed2DConvolution<PoolingNchwSumOp,
1541 PoolingNcwSumOp>;
1542template struct linalg::DownscaleSizeOneWindowed2DConvolution<PoolingNhwcMaxOp,
1543 PoolingNwcMaxOp>;
1545 PoolingNhwcMaxUnsignedOp, PoolingNwcMaxUnsignedOp>;
1546template struct linalg::DownscaleSizeOneWindowed2DConvolution<PoolingNhwcMinOp,
1547 PoolingNwcMinOp>;
1549 PoolingNhwcMinUnsignedOp, PoolingNwcMinUnsignedOp>;
1550template struct linalg::DownscaleSizeOneWindowed2DConvolution<PoolingNchwMaxOp,
1551 PoolingNcwMaxOp>;
1552
1553FailureOr<DepthwiseConv1DNwcWcOp>
1555 LinalgOp convOp, PatternRewriter &rewriter) const {
1556 // Check if this LinalgOp is a DepthwiseConv2DNhwcHwcOp (named or generic).
1557 std::optional<DilationsAndStrides> convParams =
1559 if (!convParams)
1560 return failure();
1561 SmallVector<int64_t> dilations = std::move(convParams->dilations);
1562 SmallVector<int64_t> strides = std::move(convParams->strides);
1563
1564 if (convOp.hasPureBufferSemantics())
1565 return failure(); // To be implemented.
1566
1567 Value input = convOp.getDpsInputs().front();
1568 Value kernel = convOp.getDpsInputs().back();
1569 Value output = convOp.getDpsInits().front();
1570
1571 auto inputType = dyn_cast<RankedTensorType>(input.getType());
1572 auto kernelType = dyn_cast<RankedTensorType>(kernel.getType());
1573 auto outputType = dyn_cast<RankedTensorType>(output.getType());
1574
1575 auto kernelShape = kernelType.getShape();
1576 auto outputShape = outputType.getShape();
1577
1578 // Only handle the case where at least one of the window dimensions is
1579 // of size 1. Other cases can rely on tiling to reduce to such cases.
1580 int64_t khSize = kernelShape[0], kwSize = kernelShape[1];
1581 int64_t ohSize = outputShape[1], owSize = outputShape[2];
1582 bool removeH = (khSize == 1 && ohSize == 1);
1583 bool removeW = (kwSize == 1 && owSize == 1);
1584 if (!removeH && !removeW)
1585 return failure();
1586
1587 // Get new shapes and types for all operands by removing the size-1
1588 // dimension.
1589 using RTTBuilder = RankedTensorType::Builder;
1590 RankedTensorType newInputType =
1591 RTTBuilder(inputType).dropDim((removeH ? 1 : 2));
1592 RankedTensorType newKernelType =
1593 RTTBuilder(kernelType).dropDim((removeH ? 0 : 1));
1594 RankedTensorType newOutputType =
1595 RTTBuilder(outputType).dropDim(removeH ? 1 : 2);
1596
1597 // Rank-reduce operands.
1598 Location loc = convOp.getLoc();
1600 rewriter, loc, input, newInputType);
1602 rewriter, loc, kernel, newKernelType);
1604 rewriter, loc, output, newOutputType);
1605
1606 // Rank-reduce strides and dilations too.
1607 // TODO: dropDim 1-liner helper.
1608 strides.erase(strides.begin() + (removeH ? 0 : 1));
1609 auto stridesAttr = rewriter.getI64VectorAttr(strides);
1610
1611 dilations.erase(dilations.begin() + (removeH ? 0 : 1));
1612 auto dilationsAttr = rewriter.getI64VectorAttr(dilations);
1613
1614 auto conv1DOp = DepthwiseConv1DNwcWcOp::create(
1615 rewriter, loc, newOutputType, ValueRange{newInput, newKernel},
1616 ValueRange{newOutput}, stridesAttr, dilationsAttr);
1617
1618 // Insert back.
1620 rewriter, loc, conv1DOp.getResult(0), output);
1621 rewriter.replaceOp(convOp, inserted);
1622
1623 return conv1DOp;
1624}
1625
1626FailureOr<Conv1DOp>
1628 PatternRewriter &rewriter) const {
1629 // Check if this LinalgOp is a Conv2DOp (named or generic).
1630 std::optional<DilationsAndStrides> convParams =
1632 if (!convParams)
1633 return failure();
1634
1635 if (convOp.hasPureBufferSemantics())
1636 return failure(); // To be implemented.
1637
1638 Value input = convOp.getDpsInputs().front();
1639 Value kernel = convOp.getDpsInputs().back();
1640 Value output = convOp.getDpsInits().front();
1641
1642 auto inputType = dyn_cast<RankedTensorType>(input.getType());
1643 auto kernelType = dyn_cast<RankedTensorType>(kernel.getType());
1644 auto outputType = dyn_cast<RankedTensorType>(output.getType());
1645
1646 auto kernelShape = kernelType.getShape();
1647 auto outputShape = outputType.getShape();
1648
1649 // Only handle the case where at least one of the window dimensions is
1650 // of size 1. Other cases can rely on tiling to reduce to such cases.
1651 int64_t khSize = kernelShape[0], kwSize = kernelShape[1];
1652 int64_t ohSize = outputShape[0], owSize = outputShape[1];
1653 bool removeH = (khSize == 1 && ohSize == 1);
1654 bool removeW = (kwSize == 1 && owSize == 1);
1655 if (!removeH && !removeW)
1656 return failure();
1657
1658 // Get new shapes and types for all operands by removing the size-1
1659 // dimension.
1660 using RTTBuilder = RankedTensorType::Builder;
1661 RankedTensorType newInputType =
1662 RTTBuilder(inputType).dropDim((removeH ? 0 : 1));
1663 RankedTensorType newKernelType =
1664 RTTBuilder(kernelType).dropDim((removeH ? 0 : 1));
1665 RankedTensorType newOutputType =
1666 RTTBuilder(outputType).dropDim(removeH ? 0 : 1);
1667
1668 // Rank-reduce operands.
1669 Location loc = convOp.getLoc();
1671 rewriter, loc, input, newInputType);
1673 rewriter, loc, kernel, newKernelType);
1675 rewriter, loc, output, newOutputType);
1676
1677 auto conv1DOp =
1678 Conv1DOp::create(rewriter, loc, newOutputType,
1679 ValueRange{newInput, newKernel}, ValueRange{newOutput});
1680
1681 // Insert back.
1683 rewriter, loc, conv1DOp.getResult(0), output);
1684 rewriter.replaceOp(convOp, inserted);
1685
1686 return conv1DOp;
1687}
1688
1709
1714
return success()
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
static RankedTensorType permuteShape(RankedTensorType tensorType, ArrayRef< int64_t > permutationVector)
Return a copy of tensorType after permutation by permutationVector.
static SmallVector< int64_t > getPackUnpackRankReducedPerm(ArrayRef< int64_t > shape, ArrayRef< int64_t > innerDimsPos, ArrayRef< int64_t > outerDimsPerm)
static FailureOr< SmallVector< std::optional< int64_t > > > packLinalgMetadataOnce(SmallVectorImpl< AffineMap > &indexingMaps, SmallVectorImpl< utils::IteratorType > &iteratorTypes, int64_t dim)
Perform one step of packing of a LinalgOp's metadata along dim into the newDim at iteratorTypes....
static LinalgOp transposeOneLinalgOperandAndReplace(RewriterBase &rewriter, LinalgOp linalgOp, OpOperand &opOperand, ArrayRef< int64_t > permutation, Value transposedValue)
Return a new GenericOp obtained by transposing opOperand by the permutation vector:
static SmallVector< int64_t > getPackUnpackNormalizedPerm(int rank, ArrayRef< int64_t > perm)
static bool hasAtMostOneResultFunctionOfDim(AffineMap map, int64_t dim)
Return true if map has 0 or 1 result function of AffineDimExpr(dim).
static Value getPackOpSourceOrPaddedSource(OpBuilder &builder, linalg::PackOp packOp)
If padding value is set, returns a tensor.pad Op for the source tensor, with the output shape matchin...
static std::string stringifyReassocIndices(ReassociationIndicesRef ri)
static std::optional< int64_t > getFirstResultIndexFunctionOf(AffineMap map, int64_t dim)
Return the index of the first result of map that is a function of AffineDimExpr(dim),...
*if copies could not be generated due to yet unimplemented cases *copyInPlacementStart and copyOutPlacementStart in copyPlacementBlock *specify the insertion points where the incoming copies and outgoing should be inserted(the insertion happens right before the *insertion point). Since `begin` can itself be invalidated due to the memref *rewriting done from this method
Base type for affine expression.
Definition AffineExpr.h:68
bool isFunctionOfDim(unsigned position) const
Return true if the affine expression involves AffineDimExpr position.
AffineExpr ceilDiv(uint64_t v) const
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition AffineMap.h:46
MLIRContext * getContext() const
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
AffineMap shiftDims(unsigned shift, unsigned offset=0) const
Replace dims[offset ... numDims) by dims[offset + shift ... shift + numDims).
Definition AffineMap.h:267
AffineMap insertResult(AffineExpr expr, unsigned pos) const
Returns a new AffineMap with the same number of dims and symbols and an extra result inserted at pos.
Definition AffineMap.h:315
unsigned getNumDims() const
ArrayRef< AffineExpr > getResults() const
unsigned getNumResults() const
AffineExpr getResult(unsigned idx) const
static AffineMap getPermutationMap(ArrayRef< unsigned > permutation, MLIRContext *context)
Returns an AffineMap representing a permutation.
AffineMap compose(AffineMap map) const
Returns the AffineMap resulting from composing this with map.
Attributes are known-constant values of operations.
Definition Attributes.h:25
This class is a general helper class for creating context-global objects like types,...
Definition Builders.h:51
IntegerAttr getIndexAttr(int64_t value)
Definition Builders.cpp:108
TypedAttr getZeroAttr(Type type)
Definition Builders.cpp:324
AffineExpr getAffineDimExpr(unsigned position)
Definition Builders.cpp:364
DenseIntElementsAttr getI64VectorAttr(ArrayRef< int64_t > values)
Definition Builders.cpp:128
MLIRContext * getContext() const
Definition Builders.h:56
This is a utility class for mapping one set of IR entities to another.
Definition IRMapping.h:26
IRValueT get() const
Return the current value being used by this operand.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition Location.h:76
RAII guard to reset the insertion point of the builder when destroyed.
Definition Builders.h:348
This class helps build Operations.
Definition Builders.h:207
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition Builders.h:398
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Definition Builders.h:526
This class represents a single result from folding an operation.
This class represents an operand of an operation.
Definition Value.h:257
unsigned getOperandNumber()
Return which operand this is in the OpOperand list of the Operation.
Definition Value.cpp:226
This is a value defined by a result of an operation.
Definition Value.h:457
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
result_range getResults()
Definition Operation.h:415
This class represents the benefit of a pattern match in a unitless scheme that ranges from 0 (very li...
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
This is a builder type that keeps local references to arguments.
Builder & setShape(ArrayRef< int64_t > newShape)
Builder & dropDim(unsigned pos)
Erase a dim from shape @pos.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
void moveOpBefore(Operation *op, Operation *existingOp)
Unlink this operation from its current block and insert it right before existingOp which may be in th...
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the listener that the IR failed to be rewritten because of a match failure,...
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition ValueRange.h:387
type_range getTypes() const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition Value.h:96
Type getType() const
Return the type of this value.
Definition Value.h:105
static ConstantIndexOp create(OpBuilder &builder, Location location, int64_t value)
Definition ArithOps.cpp:363
Operation * getOwner() const
Return the owner of this operand.
Definition UseDefLists.h:38
OpFoldResult makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands, bool composeAffineMin=false)
Constructs an AffineApplyOp that applies map to operands after composing the map with the maps of any...
SmallVector< int64_t > getUnPackInverseSrcPerm(linalg::UnPackOp, PackingMetadata &metadata)
Compute inverse permutation for the source tensor (i.e.
FailureOr< PackTransposeResult > packTranspose(RewriterBase &rewriter, linalg::PackOp packOp, linalg::LinalgOp linalgOp, linalg::UnPackOp maybeUnPackOp, ArrayRef< int64_t > outerPerm, ArrayRef< int64_t > innerPerm)
Transpose a single PackOp -> LinalgOp -> UnPackOp chain and return the transposed PackOp -> LinalgOp ...
std::optional< DilationsAndStrides > matchConvolutionOpOfType(LinalgOp op)
Given a linalg op this function returns DilationsAndStrides if it is a convolution op of type ConvOpT...
FailureOr< LowerUnPackOpResult > lowerUnPack(RewriterBase &rewriter, linalg::UnPackOp unPackOp, bool lowerUnpadLikeWithExtractSlice=true)
Rewrite pack as empty + transpose + reshape + extract_slice.
void peelLoops(RewriterBase &rewriter, ArrayRef< scf::ForOp > loops)
Peel 'loops' and applies affine_min/max bounds simplification on the fly where relevant.
FailureOr< GenericOp > generalizeNamedOp(RewriterBase &rewriter, LinalgOp linalgOp)
Create a GenericOp from the given named operation linalgOp and replace the given linalgOp.
void populateDecomposeConvolutionPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Linalg decompose convolutions patterns.
LogicalResult vectorizeCopy(RewriterBase &builder, memref::CopyOp copyOp)
Emit a suitable vector form for a Copy op with fully static shape.
FailureOr< GenericOp > interchangeGenericOp(RewriterBase &rewriter, GenericOp genericOp, ArrayRef< unsigned > interchangeVector)
Interchange the iterator_types and iterator_maps dimensions and adapts the index accesses of op.
SmallVector< int64_t > getPackInverseDestPerm(linalg::PackOp packOp, PackingMetadata &metadata)
Compute inverse permutation for the destination tensor (i.e.
void populateDecomposePackUnpackPatterns(RewritePatternSet &patterns)
Populates patterns to decompose linalg.pack and linalg.unpack Ops into e.g.
FailureOr< ContractionDimensions > inferContractionDims(LinalgOp linalgOp)
Find at least 2 parallel (m and n) and 1 reduction (k) dimension candidates that form a matmul subcom...
FailureOr< PackResult > packMatmulGreedily(RewriterBase &rewriter, LinalgOp linalgOp, ArrayRef< OpFoldResult > mnkPackedSizes, ArrayRef< int64_t > mnkPaddedSizesNextMultipleOf, ArrayRef< int64_t > mnkOrder)
Pack a LinalgOp by greedily inferring matmul dimensions (m, n, k) where m and n are proper parallel d...
FailureOr< PackResult > pack(RewriterBase &rewriter, linalg::LinalgOp linalgOp, ArrayRef< OpFoldResult > packedSizes)
Implement packing of a single LinalgOp by packedSizes.
SmallVector< Value > peelLoop(RewriterBase &rewriter, Operation *op)
Try to peel and canonicalize loop op and return the new result.
void populateDecomposePadPatterns(RewritePatternSet &patterns)
Populates patterns to decompose tensor.pad into e.g.
FailureOr< LowerPackResult > lowerPack(RewriterBase &rewriter, linalg::PackOp packOp, bool lowerPadLikeWithInsertSlice=true)
Rewrite pack as pad + reshape + transpose.
LogicalResult peelForLoopAndSimplifyBounds(RewriterBase &rewriter, ForOp forOp, scf::ForOp &partialIteration)
Rewrite a for loop with bounds/step that potentially do not divide evenly into a for loop where the s...
FailureOr< TilingResult > bubbleUpPadSlice(OpBuilder &b, tensor::PadOp padOp, ArrayRef< OpFoldResult > offsets, ArrayRef< OpFoldResult > sizes, bool generateZeroSliceGuard=true)
Bubbles up a slice of this pad by taking the slice first and then performing the padding.
PadOp createPadHighOp(RankedTensorType resType, Value source, Value pad, bool nofold, Location loc, OpBuilder &builder, ValueRange dynOutDims={})
Definition Utils.cpp:23
Value createCanonicalRankReducingInsertSliceOp(OpBuilder &b, Location loc, Value tensor, Value dest)
Create a rank-reducing InsertSliceOp @[0 .
Value createCanonicalRankReducingExtractSliceOp(OpBuilder &b, Location loc, Value tensor, RankedTensorType targetType)
Create a rank-reducing ExtractSliceOp @[0 .
OpFoldResult getMixedSize(OpBuilder &builder, Location loc, Value value, int64_t dim)
Return the dimension of the given tensor value.
Definition TensorOps.cpp:59
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given tensor value.
Definition TensorOps.cpp:68
Include the generated interface declarations.
SliceVerificationResult
Enum that captures information related to verifier error conditions on slice insert/extract type of o...
ArrayRef< int64_t > ReassociationIndicesRef
std::optional< int64_t > getConstantIntValue(OpFoldResult ofr)
If ofr is a constant integer or an IntegerAttr, return the integer.
void bindDims(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to DimExpr at positions: [0 .
Definition AffineExpr.h:311
SmallVector< int64_t > computePermutationVector(int64_t permSize, ArrayRef< int64_t > positions, ArrayRef< int64_t > desiredPositions)
Return a permutation vector of size permSize that would result in moving positions into desiredPositi...
Type getElementTypeOrSelf(Type type)
Return the element type or return the type itself.
const FrozenRewritePatternSet & patterns
void bindSymbols(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to SymbolExpr at positions: [0 .
Definition AffineExpr.h:325
SmallVector< Loops, 8 > tile(ArrayRef< scf::ForOp > forOps, ArrayRef< Value > sizes, ArrayRef< scf::ForOp > targets)
Performs tiling fo imperfectly nested loops (with interchange) by strip-mining the forOps by sizes an...
Definition Utils.cpp:1297
llvm::DenseMap< KeyT, ValueT, KeyInfoT, BucketT > DenseMap
Definition LLVM.h:118
std::pair< int64_t, OpFoldResult > getSimplifiedOfrAndStaticSizePair(OpFoldResult ofr, Builder &b)
Given OpFoldResult representing dim size value (*), generates a pair of sizes:
void applyPermutationToVector(SmallVector< T, N > &inVec, ArrayRef< int64_t > permutation)
Apply the permutation defined by permutation to inVec.
SliceVerificationResult isRankReducedType(ShapedType originalType, ShapedType candidateReducedType)
Check if originalType can be rank reduced to candidateReducedType type by dropping some dimensions wi...
bool isPermutationVector(ArrayRef< int64_t > interchange)
Method to check if an interchange vector is a permutation.
SmallVector< int64_t > invertPermutationVector(ArrayRef< int64_t > permutation)
Helper method to apply to inverse a permutation.
Represents a range (offset, size, and stride) where each element of the triple may be dynamic or stat...
OpFoldResult size
LogicalResult matchAndRewrite(memref::CopyOp copyOp, PatternRewriter &rewriter) const override
Rewrites a linalg::PackOp into a sequence of:
LogicalResult matchAndRewrite(linalg::PackOp packOp, PatternRewriter &rewriter) const override
Rewrites a linalg::UnPackOp into a sequence of:
LogicalResult matchAndRewrite(linalg::UnPackOp unpackOp, PatternRewriter &rewriter) const override
Rewrite a tensor::PadOp into a sequence of EmptyOp, FillOp and InsertSliceOp.
LogicalResult matchAndRewrite(tensor::PadOp padOp, PatternRewriter &rewriter) const override
Value createFillOrGenerateOp(RewriterBase &rewriter, tensor::PadOp padOp, Value dest, const SmallVector< Value > &dynSizes) const
Filling dest using FillOp constant padding value if possible.
FailureOr< Conv1DOp > returningMatchAndRewrite(LinalgOp convOp, PatternRewriter &rewriter) const
Rewrites 2-D depthwise convolution ops with size-1 (w, kw) or (h, kh) dimensions into 1-D depthwise c...
FailureOr< DepthwiseConv1DNwcWcOp > returningMatchAndRewrite(LinalgOp convOp, PatternRewriter &rewriter) const
Rewrites 2-D convolution ops with size-1 window dimensions into 1-D convolution ops.
FailureOr< Conv1DOp > returningMatchAndRewrite(LinalgOp convOp, PatternRewriter &rewriter) const
LogicalResult matchAndRewrite(tensor::ExtractSliceOp sliceOp, PatternRewriter &rewriter) const override
LinalgTilingOptions & setTileSizes(const SmallVector< Value, 4 > &ts)
Set the tileSizeComputationFunction to return the values ts.
Definition Transforms.h:204
TileSizeComputationFunction tileSizeComputationFunction
Computation function that returns the tile sizes for each operation.
Definition Transforms.h:194
Struct to hold the result of a pack call.
Struct to hold the result of a packTranspose call.