MLIR 23.0.0git
HoistPadding.cpp
Go to the documentation of this file.
1//===- HoistPadding.cpp - Hoisting for tensor::PadOp ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements functions concerned with hoisting padding operations.
10//
11//===----------------------------------------------------------------------===//
12
23#include "mlir/IR/AsmState.h"
24#include "mlir/IR/Dominance.h"
25#include "mlir/IR/Matchers.h"
29#include "llvm/ADT/SmallVectorExtras.h"
30#include "llvm/Support/Debug.h"
31
32using llvm::dbgs;
33
34#define DEBUG_TYPE "hoist-padding"
35
36#define DBGS() (dbgs() << '[' << DEBUG_TYPE << "] ")
37
38using namespace mlir;
39using namespace mlir::linalg;
40using namespace mlir::linalg::detail;
41
42#ifndef NDEBUG
44 AsmState state(op->getParentOfType<func::FuncOp>());
45 (void)state;
46 if (auto forOp = dyn_cast<scf::ForOp>(op)) {
47 forOp.getInductionVar().printAsOperand(dbgs(), state);
48 dbgs() << " @ " << forOp.getOperation();
49 return true;
50 }
51 return false;
52}
53#endif
54
56 LLVM_DEBUG(llvm::interleaveComma(backwardSlice, DBGS() << "--backwardSlice:",
57 [](Operation *op) {
58 dbgs() << "\n";
59 DBGS() << "----";
61 dbgs() << "\n";
62 return;
63 }
64 dbgs() << *op << "\n";
65 });
66 DBGS() << "\n";);
67}
68
69/// Return at most nLevels of immediately enclosing scf::ForOp loops.
70/// Stops at the first parent that is not an scf::ForOp.
71/// Multi-loops such as scf.parallel or linalg.tiled_loop are not modeled atm.
72/// Control-flow and other containing ops with regions are not modeled atm.
73static void
74getAtMostNEnclosingLoops(tensor::PadOp padOp, int nLevels,
75 SmallVector<scf::ForOp> &reverseEnclosingLoops) {
76 scf::ForOp outermostEnclosingForOp = nullptr;
77 Operation *nextEnclosingOp = padOp->getParentOp();
78 while (nLevels-- > 0 &&
79 (outermostEnclosingForOp = dyn_cast<scf::ForOp>(nextEnclosingOp))) {
80 LLVM_DEBUG(DBGS() << "loops: ";
81 debugPrintLoopInShortForm(outermostEnclosingForOp);
82 dbgs() << "\n");
83 reverseEnclosingLoops.push_back(outermostEnclosingForOp);
84 nextEnclosingOp = outermostEnclosingForOp->getParentOp();
85 }
86}
87
88/// Return at most nLevels of immediately enclosing scf::ForOp loops.
89/// Stops at the first parent that is not an scf::ForOp.
90/// Multi-loops such as scf.parallel or linalg.tiled_loop are not modeled atm.
91/// Control-flow and other containing ops with regions are not modeled atm.
92static void
93getEnclosingLoopsUntil(tensor::PadOp padOp, scf::ForOp untilLoop,
94 SmallVector<scf::ForOp> &reverseEnclosingLoops) {
95 scf::ForOp outermostEnclosingForOp = nullptr;
96 Operation *nextEnclosingOp = padOp->getParentOp();
97 while (outermostEnclosingForOp != untilLoop &&
98 (outermostEnclosingForOp = dyn_cast<scf::ForOp>(nextEnclosingOp))) {
99 LLVM_DEBUG(DBGS() << "loops: ";
100 debugPrintLoopInShortForm(outermostEnclosingForOp);
101 dbgs() << "\n");
102 reverseEnclosingLoops.push_back(outermostEnclosingForOp);
103 nextEnclosingOp = outermostEnclosingForOp->getParentOp();
104 }
105}
106
107// Get all the ops in the backwards slice starting from `padOp` and that
108// are dominated by the outermost enclosing loop.
109// This also requires tracking ops defining values used in the region but
110// defined above.
111static void computeBackwardSlice(tensor::PadOp padOp,
112 scf::ForOp outermostEnclosingForOp,
113 SetVector<Operation *> &backwardSlice) {
114 DominanceInfo domInfo(outermostEnclosingForOp);
115 BackwardSliceOptions sliceOptions;
116 sliceOptions.filter = [&](Operation *op) {
117 return domInfo.dominates(outermostEnclosingForOp, op) &&
118 !padOp->isProperAncestor(op);
119 };
120 sliceOptions.inclusive = true;
121
122 // First, add the ops required to compute the region to the backwardSlice.
123 SetVector<Value> valuesDefinedAbove;
124 getUsedValuesDefinedAbove(padOp.getRegion(), padOp.getRegion(),
125 valuesDefinedAbove);
126 for (Value v : valuesDefinedAbove) {
127 LogicalResult result = getBackwardSlice(v, &backwardSlice, sliceOptions);
128 assert(result.succeeded() && "expected a backward slice");
129 (void)result;
130 }
131 // Then, add the backward slice from padOp itself.
132 LogicalResult result =
133 getBackwardSlice(padOp.getOperation(), &backwardSlice, sliceOptions);
134 assert(result.succeeded() && "expected a backward slice");
135 (void)result;
136}
137
138//===----------------------------------------------------------------------===//
139// HoistPaddingAnalysis Implementation.
140//===----------------------------------------------------------------------===//
141
142namespace {
143/// Analysis class to support tensor::PadOp hoisting across multiple enclosing
144/// loops. The failure conditions are:
145/// 1. Pad op has a use that is not an input of a LinalgOp.
146/// 2. Pad op does not have a constant padding value.
147/// 3. There is no immediately enclosing scf::ForOp.
148/// 4. The backward slice from the pad op to the scf::ForOp to hoist above
149/// contains an unknown op with non index type operands, a region, or a
150/// memory effect.
151/// 5. The backward slice from the pad op to the scf::ForOp to hoist above is
152/// empty.
153/// 6. The source tensor of pad op is not defined by an extract slice op.
154/// 7. The source tensor of the extract slice op is not defined outside of
155/// the outermost enclosing scf::ForOp.
156/// 8. There is no enclosing scf::ForOp that indexes the padded data.
157/// Other cases succeed and will trigger hoisting of the pad op.
158struct HoistPaddingAnalysis {
159 HoistPaddingAnalysis(tensor::PadOp padOp, int numLoops);
160 HoistPaddingAnalysis(tensor::PadOp padOp, scf::ForOp outermostEnclosingForOp);
161
162 bool isValid() { return valid.has_value() && valid.value(); }
163 bool isInvalid() { return valid.has_value() && !valid.value(); }
164
165 /// Footprint of the hoistedPackedTensor, computed from the packingLoops.
166 SmallVector<Value> getHoistedPackedTensorSizes(RewriterBase &rewriter,
167 Location loc) const;
168
169 /// Performs optional hoisting to enable hoist padding to occur. This may be
170 /// necessary when `sliceOp` is not defined outside of the outermost enclosing
171 /// loop we want to hoist above.
172 ///
173 /// Example:
174 /// ```
175 /// %source = linalg.fill(%cst, %arg0)
176 /// // %source is available for packing here!
177 /// scf.for %i
178 /// scf.for %j
179 /// scf.for %k
180 /// %slice = tensor.extract_slice %source [%i, %j]
181 /// %padded_slice = tensor.pad %slice
182 /// ```
183 void enableHoistPadding(RewriterBase &rewriter);
184
185 /// Common analysis builder to finalize the construction of the analysis once
186 /// optional `enableHoistPadding` has run.
187 /// `reverseEnclosingLoops.back()` is the loop to hoist above.
188 void finalizeHoistPaddingAnalysis();
189
190private:
191 /// Encodes whether the analysis is valid and hoisting can proceed.
192 std::optional<bool> valid;
193
194 /// The padOp to hoist.
195 tensor::PadOp opToHoist;
196
197 /// Immediately enclosing loops considered for hoisting padding.
198 SmallVector<scf::ForOp> reverseEnclosingLoops;
199
200 /// Drop any non-index dependencies of `padOp` and `sliceOp` from
201 /// `backwardSlice`. The method follows the use-def chains of the index
202 /// operands consumed by `padOp` and `sliceOp` and drops the operations
203 /// not part of this index computation. Afterwards, the filtered
204 /// `backwardSlice` contains only the loops whose induction variable is
205 /// used, directly or indirectly, to index the padded tensor. The method
206 /// returns failure if the filtered backward slice contains an unexpected
207 /// operation.
208 ///
209 /// Example:
210 /// ```
211 /// %source = linalg.fill(%cst, %arg0)
212 /// scf.for %i
213 /// %unrelated = linalg.fill(%cst, %arg1) // not used to index
214 /// %source! scf.for %j (%arg2 = %unrelated)
215 /// scf.for %k // not used to index
216 /// %source!
217 /// %ubi = affine.min #map(%i)
218 /// %ubj = affine.min #map(%j)
219 /// %slice = tensor.extract_slice %source [%i, %j] [%ubi, %ubj]
220 /// %padded_slice = tensor.pad %slice
221 /// ```
222 /// dropNonIndexDependencies(%padded_slice, %slice)
223 /// removes [scf.for %k, linalg.fill(%cst, %arg1)] from backwardSlice.
224 LogicalResult dropNonIndexDependencies();
225
226public:
227 /// The outermost loop, determined by `nLevels` above which `padOp` will
228 /// be hoisted.
229 scf::ForOp outermostEnclosingForOp;
230
231 /// Backward slice rooted at `padOp` and nested under
232 /// `outermostEnclosingForOp`.
233 SetVector<Operation *> backwardSlice;
234
235 /// The scf::ForOp immediately enclosing `padOp` such that:
236 /// 1. they are nested under `outermostEnclosingForOp` (inclusive)
237 /// 2. whose induction variable is used, directly or indirectly, in the
238 /// computation of `padOp`.
239 /// The span of these loops determines the footprint of the packed tensor.
240 SmallVector<scf::ForOp> packingLoops;
241
242 /// The ExtractSliceOp that feeds the PadOp we want to hoist.
243 tensor::ExtractSliceOp sliceOp;
244
245 /// If non-empty, this is the unique scf::ForOp that consumes the `sliceOp`.
246 scf::ForOp padConsumingForOp;
247};
248
249} // namespace
250
251HoistPaddingAnalysis::HoistPaddingAnalysis(tensor::PadOp padOp, int numLoops)
252 : valid(std::nullopt), opToHoist(padOp) {
253 // Get at most `numLoops` of immediately enclosing loops.
254 getAtMostNEnclosingLoops(opToHoist, numLoops, reverseEnclosingLoops);
255 if (reverseEnclosingLoops.empty()) {
256 LLVM_DEBUG(DBGS() << "--No immediately enclosing loop -> Skip\n");
257 valid = false;
258 return;
259 }
260 outermostEnclosingForOp = reverseEnclosingLoops.back();
261 sliceOp = opToHoist.getSource().getDefiningOp<tensor::ExtractSliceOp>();
262 if (!sliceOp) {
263 LLVM_DEBUG(DBGS() << "--Cannot find the extract slice op -> Skip\n");
264 valid = false;
265 return;
266 }
267}
268
269HoistPaddingAnalysis::HoistPaddingAnalysis(tensor::PadOp padOp,
270 scf::ForOp outermostEnclosingForOp)
271 : valid(std::nullopt), opToHoist(padOp) {
272 // Get enclosing loops until outermostEnclosingForOp.
273 getEnclosingLoopsUntil(opToHoist, outermostEnclosingForOp,
274 reverseEnclosingLoops);
275 if (reverseEnclosingLoops.empty()) {
276 LLVM_DEBUG(DBGS() << "--No immediately enclosing loop -> Skip\n");
277 valid = false;
278 return;
279 }
280 this->outermostEnclosingForOp = reverseEnclosingLoops.back();
281 if (this->outermostEnclosingForOp != outermostEnclosingForOp) {
282 LLVM_DEBUG(DBGS() << "--Unexpected outermost enclosing loop -> Skip\n");
283 valid = false;
284 return;
285 }
286 sliceOp = opToHoist.getSource().getDefiningOp<tensor::ExtractSliceOp>();
287 if (!sliceOp) {
288 LLVM_DEBUG(DBGS() << "--Cannot find the extract slice op -> Skip\n");
289 valid = false;
290 return;
291 }
292}
293
294void HoistPaddingAnalysis::enableHoistPadding(RewriterBase &rewriter) {
295 if (isInvalid())
296 return;
297 // If the padded data is not yet available before entering the outermost
298 // enclosing loop, try to apply hoisting on this outermost loop.
299 // TODO: we may want finer-grained hoisting of only that particular `sliceOp`.
300 if (!outermostEnclosingForOp.isDefinedOutsideOfLoop(sliceOp.getSource())) {
301 outermostEnclosingForOp = cast<scf::ForOp>(
302 hoistLoopInvariantSubsets(rewriter, outermostEnclosingForOp));
303 }
304}
305
306void HoistPaddingAnalysis::finalizeHoistPaddingAnalysis() {
307 if (isInvalid())
308 return;
309
310 if (!outermostEnclosingForOp.isDefinedOutsideOfLoop(sliceOp.getSource())) {
311 LLVM_DEBUG(DBGS() << "--outermostEnclosingForOp:\n"
312 << outermostEnclosingForOp << "\n"
313 << "--sliceOp: " << sliceOp << "\n"
314 << "--sliceOp.getSource(): " << sliceOp.getSource()
315 << "\n");
316 LLVM_DEBUG(DBGS() << "----Source not defined outside of loops -> Skip\n");
317 valid = false;
318 return;
319 }
320 if (sliceOp->hasOneUse()) {
321 padConsumingForOp = dyn_cast<scf::ForOp>(*(sliceOp->getUsers().begin()));
322 }
323
324 // Check the region of `padOp` depends on a constant only. Adding hoisting
325 // support for arbitrary padding regions would require cloning all
326 // dependencies captured by the padding region.
327 Value paddingValue = opToHoist.getConstantPaddingValue();
328 if (!paddingValue ||
329 !isa_and_nonnull<arith::ConstantOp>(paddingValue.getDefiningOp())) {
330 LLVM_DEBUG(DBGS() << "Cannot find constant padding value -> Skip\n");
331 valid = false;
332 return;
333 }
334
335 computeBackwardSlice(opToHoist, outermostEnclosingForOp, backwardSlice);
336 if (backwardSlice.size() <= 1) {
337 valid = false;
338 return;
339 }
340
341 debugPrintBackwardSlice(backwardSlice);
342 // Remove all ops in the backward slice that are not used to index
343 // the padded tensor. In particular, keep `padOp`, `sliceOp`, and
344 // the loop and affine operations used for the index computation.
345 if (failed(dropNonIndexDependencies())) {
346 LLVM_DEBUG(DBGS() << "--Cannot dropNonIndexDependencies -> Skip\n");
347 valid = false;
348 return;
349 }
350 debugPrintBackwardSlice(backwardSlice);
351
352 // Add only the loops part of the filtered `backwardSlice` to the
353 // packing loops. All other loops are not used to index the padded
354 // data and consequently access the same data in every loop
355 // iteration. Adding them to the packing loops would increase the
356 // cache footprint of the packed data by storing the same data
357 // multiple times.
358 for (scf::ForOp forOp : llvm::reverse(reverseEnclosingLoops))
359 if (backwardSlice.contains(forOp))
360 packingLoops.push_back(forOp);
361
362 // TODO: for multiple loops we need to track the use to the innermost loop.
363 if (packingLoops.size() > 1 && padConsumingForOp) {
364 LLVM_DEBUG(DBGS() << "--Cannot hoist multiple loops through iter_args -> "
365 "Downgrade to 1 loop\n");
366 packingLoops.resize(1);
367 }
368
369 // Note: at this point, packing loops may be empty but we would still like
370 // to hoist the padding if so specified.
371
372 // The analysis is valid and hoisting can occur.
373 valid = true;
374}
375
376LogicalResult HoistPaddingAnalysis::dropNonIndexDependencies() {
377 // Set of all values used for index computation.
378 SetVector<Value> indexEdges;
379
380 // Add all index operands of `operation` to `indexEdges`. An index operand
381 // is an operand of type index.
382 auto addIndexOperandsToIndexEdges = [&](Operation *operation) {
383 for (Value operand : operation->getOperands())
384 if (operand.getType().isIndex())
385 indexEdges.insert(operand);
386 };
387
388 // Check if any operation result is contained in `indexEdges`.
389 auto hasIndexResult = [&](Operation *operation) {
390 return llvm::any_of(operation->getResults(), [&](Value result) {
391 return indexEdges.contains(result);
392 });
393 };
394
395 // Starting from `opToHoist` and `sliceOp` walk the use-def edges of index
396 // type in `backwardSlice`. Add the index operands of an operation to
397 // `indexEdges` and remove all operations from `backwardSlice` that are not
398 // part of the index computation.
399 //
400 // Example:
401 // ```
402 // %source = linalg.fill(%cst, %arg0)
403 // scf.for %i
404 // %unrelated = linalg.fill(%cst, %arg1) // not used to index %source!
405 // scf.for %j (%arg2 = %unrelated)
406 // scf.for %k // not used to index %source!
407 // %ubi = affine.min #map(%i)
408 // %ubj = affine.min #map(%j)
409 // %slice = tensor.extract_slice %source [%i, %j] [%ubi, %ubj]
410 // %padded_slice = tensor.pad %slice
411 // ```
412 // After iterating `backwardSlice` we obtain:
413 // indexEdges = [%i, %j, %ubi, %ubj]
414 // backwardSlice = backwardSlice / [linalg.fill(%cst, %arg1), scf.for %k]
415 SetVector<Operation *> operationsToRemove;
416 for (Operation *op : llvm::reverse(backwardSlice)) {
417 // Add the index operands of `opToHoist` and `sliceOp` to start the
418 // exploration of the index computation.
419 if (op == opToHoist || op == sliceOp) {
420 addIndexOperandsToIndexEdges(op);
421 continue;
422 }
423 // Add the index operands of the loop if its induction variable is
424 // used for index computation.
425 if (auto forOp = dyn_cast<scf::ForOp>(op)) {
426 if (!hasIndexResult(op) && indexEdges.contains(forOp.getInductionVar())) {
427 addIndexOperandsToIndexEdges(op);
428 continue;
429 }
430 }
431 // Add the index operands of all other operations if at least one result
432 // is used for index computation.
433 if (hasIndexResult(op)) {
434 addIndexOperandsToIndexEdges(op);
435 // Check the operands of the remaining operations all have index type.
436 if (llvm::any_of(op->getOperandTypes(),
437 [](Type type) { return !type.isIndex(); })) {
438 LLVM_DEBUG(DBGS() << "Unsupported op with non index type operands: "
439 << op << " -> Skip\n");
440 return failure();
441 }
442 // Check the remaining operations do not have regions or memory effects.
443 auto effectInterface = dyn_cast<MemoryEffectOpInterface>(op);
444 bool hasMemoryEffect = effectInterface && !effectInterface.hasNoEffect();
445 if (hasMemoryEffect || op->getNumRegions() != 0) {
446 LLVM_DEBUG(DBGS() << "Unsupported op with region or memory effect: "
447 << op << " -> Skip\n");
448 return failure();
449 }
450 continue;
451 }
452 // Remove all other operations not used by the index computation. An
453 // exception are constant operations that may be used by `opToHoist`.
454 if (!isa<arith::ConstantOp>(op))
455 operationsToRemove.insert(op);
456 }
457 backwardSlice.set_subtract(operationsToRemove);
458 return success();
459}
460
461SmallVector<Value>
462HoistPaddingAnalysis::getHoistedPackedTensorSizes(RewriterBase &rewriter,
463 Location loc) const {
464 SmallVector<Value> dynamicTensorSizes;
465
466 // Upper bound the packing loop lengths to size the packed tensor. Taking
467 // upper bounds can make the sizes of the packed tensor independent of the
468 // enclosing loops. This independence is a prerequisite for reusing the same
469 // buffer for all enclosing loop iterations and hoisting its allocation out
470 // of the enclosing loops.
471 for (auto forOp : packingLoops) {
472 // Compute an upper bound `ubVal` for the upper bound of `forOp`.
473 FailureOr<OpFoldResult> loopUb = affine::reifyIndexValueBound(
474 rewriter, loc, presburger::BoundType::UB, forOp.getUpperBound(),
475 /*stopCondition=*/
476 [&](Value v, std::optional<int64_t> d, ValueBoundsConstraintSet &cstr) {
477 if (v == forOp.getUpperBound())
478 return false;
479 // Compute a bound that is independent of any affine op results.
480 Operation *op = v.getDefiningOp();
481 if (!op)
482 return true;
483 return !isa<affine::AffineMinOp, affine::AffineMaxOp,
484 affine::AffineApplyOp>(op);
485 },
486 /*closedUB=*/true);
487 assert(succeeded(loopUb) && "could not get upper bound");
488 Value ubVal = getValueOrCreateConstantIndexOp(rewriter, loc, *loopUb);
489
490 // Compute the maximal packing loop length as (ub - lb).ceilDiv(step) and
491 // store the result to `dynamicTensorSizes`.
492 // TODO: instead of using the lower bound of `forOp` directly, implement a
493 // lower bound computation similar to the upper bound computation.
494 AffineExpr lb, ub, step;
495 bindDims(rewriter.getContext(), lb, ub);
496 bindSymbols(rewriter.getContext(), step);
497 Value res = rewriter.createOrFold<affine::AffineApplyOp>(
498 loc, (ub - lb).ceilDiv(step),
499 ValueRange{forOp.getLowerBound(), ubVal,
500 cast<scf::ForOp>(forOp).getStep()});
501 dynamicTensorSizes.push_back(res);
502 }
503
504 return dynamicTensorSizes;
505}
506
507static bool isDefinedOutsideOrConstant(scf::ForOp outer, Value v) {
508 return outer.isDefinedOutsideOfLoop(v) || matchPattern(v, m_Constant());
509}
510
511//===----------------------------------------------------------------------===//
512// buildPackingLoopNest Implementation.
513//===----------------------------------------------------------------------===//
514
515/// Return the current iteration number in the loop (iv - lb).ceilDiv(step).
516/// The returned Value is guaranteed not to depend on any loop comprised in
517/// [`outer`, `forOp`].
518/// Return null if such a loop-independent quantity cannot be computed.
519static Value buildLoopIterationCount(RewriterBase &rewriter, scf::ForOp outer,
520 scf::ForOp forOp) {
521 MLIRContext *ctx = forOp->getContext();
522 AffineExpr iv, lb, step;
523 bindDims(ctx, iv, lb);
524 bindSymbols(ctx, step);
525 if (!isDefinedOutsideOrConstant(outer, forOp.getLowerBound()) ||
526 !isDefinedOutsideOrConstant(outer, forOp.getStep()))
527 return Value();
528 Value ivVal = forOp.getInductionVar(), lbVal = forOp.getLowerBound(),
529 stepVal = forOp.getStep();
530 auto loc = forOp->getLoc();
531 return rewriter.createOrFold<affine::AffineApplyOp>(
532 loc, (iv - lb).ceilDiv(step), ValueRange{ivVal, lbVal, stepVal});
533}
534
535// Build a packing loop nest by iteratively traversing the backward slice and
536// clone the operations, iteratively stepping into the loops that we encounter.
537// The implementation proceeds in a stack-like fashion:
538// 1. Iteratively clone and step into the loops, pushing the
539// `hoistedPackedTensor`
540// deeper in the stack.
541// 2. At the innermost loop level, create a GenericOp if `transposeVector` is
542// non-empty.
543// 3. At the innermost loop level, create a InsertSliceOp.
544// 4. Iteratively pop and yield the result of the InsertSliceOp across the
545// cloned loops.
546static FailureOr<PackingResult> buildPackingLoopNestImpl(
547 RewriterBase &rewriter, IRMapping &bvm, tensor::PadOp opToHoist,
548 ArrayRef<int64_t> transposeVector, RankedTensorType transposedTensorType,
549 tensor::EmptyOp emptyOp, const HoistPaddingAnalysis &analysis) {
550 SmallVector<OpFoldResult> offsets, sizes, strides;
551 SmallVector<Value> clonedLoopIvs, leadingHoistedPackedTensorIndexings;
552
553 scf::ForOp outerLoop = analysis.outermostEnclosingForOp;
554
555 Location loc = opToHoist->getLoc();
556 RankedTensorType paddedTensorType = opToHoist.getResultType();
557 int paddedRank = paddedTensorType.getRank();
558
559 // Step 0. Populate bvm with opToHoist.getSource if relevant.
560 BlockArgument bbArg = dyn_cast<BlockArgument>(opToHoist.getSource());
561 while (bbArg) {
562 auto forOp = dyn_cast<scf::ForOp>(bbArg.getOwner()->getParentOp());
563 if (!forOp)
564 break;
565 if (forOp != outerLoop && !outerLoop->isAncestor(forOp))
566 break;
567 OpOperand &operand = *forOp.getTiedLoopInit(bbArg);
568 bvm.map(bbArg, operand.get());
569 bbArg = dyn_cast<BlockArgument>(operand.get());
570 }
571
572 // Step 1. iteratively clone loops and push `hoistedPackedTensor`.
573 Value hoistedPackedTensor = emptyOp.getResult();
574 OpBuilder::InsertionGuard g(rewriter);
575 for (Operation *op : analysis.backwardSlice) {
576 // Specifically sit out in the extract_slice(hoistedPackedTensor) case: this
577 // is the piece we seek to replace.
578 if (auto sliceOp = dyn_cast<tensor::ExtractSliceOp>(op)) {
579 if (bvm.lookupOrDefault(sliceOp.getSource()) == hoistedPackedTensor) {
580 LLVM_DEBUG(DBGS() << "--Skip: " << sliceOp << "\n");
581 continue;
582 }
583 }
584
585 // Clone all operations except loops which require special handling.
586 auto forOp = dyn_cast<scf::ForOp>(op);
587 if (!forOp) {
588 // We are at the right insertion point within the loop nest.
589 rewriter.clone(*op, bvm);
590 continue;
591 }
592
593 // Create a packing loop that takes `hoistedPackedTensor` as iteration
594 // argument.
595 auto clonedForOp = scf::ForOp::create(
596 rewriter, loc, bvm.lookupOrDefault(forOp.getLowerBound()),
597 bvm.lookupOrDefault(forOp.getUpperBound()),
598 bvm.lookupOrDefault(forOp.getStep()), hoistedPackedTensor,
599 /*bodyBuilder=*/nullptr, forOp.getUnsignedCmp());
600
601 // Map the induction var, region args and results to the `clonedForOp`.
602 bvm.map(forOp.getInductionVar(), clonedForOp.getInductionVar());
603 bvm.map(forOp.getRegionIterArgs(), clonedForOp.getRegionIterArgs());
604 bvm.map(forOp.getResults(), clonedForOp.getResults());
605 assert(clonedForOp->getNumRegions() == 1);
606 clonedLoopIvs.push_back(clonedForOp.getInductionVar());
607
608 // Do not insert guard here, we get deeper into the loop nest.
609 rewriter.setInsertionPointToStart(&clonedForOp->getRegion(0).front());
610 Value loopIndependentIterationCount =
611 buildLoopIterationCount(rewriter, outerLoop, clonedForOp);
612
613 // Assert the loop-independent iteration count can be computed.
614 if (!loopIndependentIterationCount)
615 llvm_unreachable("loop independence prerequisite not met");
616 leadingHoistedPackedTensorIndexings.push_back(
617 loopIndependentIterationCount);
618 hoistedPackedTensor = clonedForOp.getRegionIterArgs().front();
619 }
620
621 // Step 2. Construct offsets, sizes and strides for the innermost level of the
622 // packing loop.
623 int64_t nPackedLoops = clonedLoopIvs.size();
624 // offsets = [clonedLoopIvs, 0 .. 0].
625 offsets =
626 SmallVector<OpFoldResult>{leadingHoistedPackedTensorIndexings.begin(),
627 leadingHoistedPackedTensorIndexings.end()};
628 offsets.append(paddedRank, rewriter.getIndexAttr(0));
629 // sizes = [1 .. 1, transposedShape].
630 sizes = SmallVector<OpFoldResult>(nPackedLoops, rewriter.getIndexAttr(1));
631 for (int64_t sz : transposedTensorType.getShape()) {
632 // TODO: go grab dims when needed, atm tensor::PadOp yields a static tensor.
633 if (ShapedType::isDynamic(sz))
634 return failure();
635 sizes.push_back(rewriter.getIndexAttr(sz));
636 }
637 // strides = [1 .. 1].
638 strides = SmallVector<OpFoldResult>(nPackedLoops + paddedRank,
639 rewriter.getIndexAttr(1));
640
641 // Step 3. Optionally transpose the padded tensor.
642 TransposeOp maybeTransposeOp;
643 Value paddedTensor = bvm.lookup(opToHoist.getResult());
644 if (!transposeVector.empty()) {
645 Value outputTensor = tensor::ExtractSliceOp::create(
646 rewriter, loc, transposedTensorType, hoistedPackedTensor, offsets,
647 sizes, strides);
648 maybeTransposeOp = linalg::TransposeOp::create(
649 rewriter, loc, paddedTensor, outputTensor, transposeVector);
650 paddedTensor = maybeTransposeOp.getResult()[0];
651 }
652
653 // Innermost tensor.insert_slice and yields are optional / need loops.
654 if (nPackedLoops > 0) {
655 // Step 4. Create InsertSliceOp at the innermost loop level, inserting an
656 // optionally transposed padded slice into the packed tensor.
657 Value inserted = tensor::InsertSliceOp::create(rewriter, loc, paddedTensor,
658 hoistedPackedTensor, offsets,
659 sizes, strides);
660
661 // Step 5. Iteratively pop the stack and propagate the yield.
662 Value valueToYield = inserted;
663 for (Value iv : llvm::reverse(clonedLoopIvs)) {
664 auto forOp = scf::getForInductionVarOwner(iv);
665 rewriter.setInsertionPointToEnd(&forOp.getRegion().front());
666 scf::YieldOp::create(rewriter, loc, valueToYield);
667 valueToYield = forOp.getResult(0);
668 }
669 }
670
671 return PackingResult{
672 offsets,
673 sizes,
674 strides,
675 clonedLoopIvs,
676 leadingHoistedPackedTensorIndexings,
677 maybeTransposeOp,
678 cast<tensor::PadOp>(bvm.lookup(opToHoist.getResult()).getDefiningOp())};
679}
680
681/// Build the packing loop nest required to hoist `opToHoist` above
682/// `outermostEnclosingForOp`.
683/// The loop nest is built just before `outermostEnclosingForOp`.
684static FailureOr<PackingResult> buildPackingLoopNestImpl(
685 RewriterBase &rewriter, IRMapping &bvm, tensor::PadOp opToHoist,
686 ArrayRef<int64_t> transposeVector, const HoistPaddingAnalysis &analysis) {
687 // Update actual number of loops, which may be smaller.
688 int nPackedLoops = analysis.packingLoops.size();
689 LLVM_DEBUG(DBGS() << "\n";
690 DBGS() << "Func:\n"
691 << *opToHoist->getParentOfType<func::FuncOp>() << "\n";
692 DBGS() << "Start hoisting above " << nPackedLoops << " loops\n");
693
694 Location loc = opToHoist->getLoc();
695 RankedTensorType paddedTensorType = opToHoist.getResultType();
696
697 // Compute the type of the transposed padded tensor.
698 FailureOr<RankedTensorType> transposedTensorType =
699 tensor::computeTransposedType(paddedTensorType, transposeVector);
700 if (failed(transposedTensorType)) {
701 LLVM_DEBUG(DBGS() << "--Could not compute transposed type -> Skip\n");
702 return failure();
703 }
704
705 // Create the packed tensor<?x?x..? x transposedShape>.
706 SmallVector<int64_t> packedShape(nPackedLoops, ShapedType::kDynamic);
707 // TODO: go grab dims when needed, atm tensor::PadOp yields a static tensor.
708 llvm::append_range(packedShape, transposedTensorType->getShape());
709 auto hoistedPackedTensorType = RankedTensorType::get(
710 packedShape, transposedTensorType->getElementType());
711
712 // Set the insertion point right before the outer loop and start packing.
713 scf::ForOp outerLoop = analysis.outermostEnclosingForOp;
714 OpBuilder::InsertionGuard g(rewriter);
715 rewriter.setInsertionPoint(outerLoop);
716 SmallVector<Value> dynamicTensorSizes =
717 analysis.getHoistedPackedTensorSizes(rewriter, loc);
718 auto emptyOp = tensor::EmptyOp::create(
719 rewriter, loc, hoistedPackedTensorType.getShape(),
720 hoistedPackedTensorType.getElementType(), dynamicTensorSizes);
721
722 return buildPackingLoopNestImpl(rewriter, bvm, opToHoist, transposeVector,
723 *transposedTensorType, emptyOp, analysis);
724}
725
726/// Build the packing loop nest required to hoist `opToHoist` above
727/// `outermostEnclosingForOp`.
728/// The loop nest is built just before `outermostEnclosingForOp`.
730 RewriterBase &rewriter, tensor::PadOp opToHoist,
731 scf::ForOp outermostEnclosingForOp, ArrayRef<int64_t> transposeVector) {
732 HoistPaddingAnalysis analysis(opToHoist, outermostEnclosingForOp);
733 analysis.enableHoistPadding(rewriter);
734 analysis.finalizeHoistPaddingAnalysis();
735 if (!analysis.isValid()) {
736 LLVM_DEBUG(DBGS() << "--Analysis failed -> Skip\n");
737 return failure();
738 }
739 IRMapping bvm;
740 return buildPackingLoopNestImpl(rewriter, bvm, opToHoist, transposeVector,
741 analysis);
742}
743
744//===----------------------------------------------------------------------===//
745// hoistPaddingOnTensors Implementation.
746//===----------------------------------------------------------------------===//
747
748/// Return true if we can walk back the use-def chain from `extractSliceOp` to
749/// expectedSource going through DestinationStyleOpInterface inits only.
750/// This is a poor man's analysis that is sufficient to check the extractSliceOp
751/// the matches tensor.pad we want to hoist.
752/// In the future, it will be easier to ensure this with a matching symmetric
753/// tensor.unpad op.
754static bool tracesBackToExpectedValue(tensor::ExtractSliceOp extractSliceOp,
755 Value expectedSource) {
756 LLVM_DEBUG(DBGS() << "Start tracesBackToExpectedValue on: " << extractSliceOp
757 << "\n");
758 LLVM_DEBUG(DBGS() << "--with extractSlice: " << extractSliceOp << "\n");
759 Value source = extractSliceOp.getSource();
760 LLVM_DEBUG(DBGS() << "--with starting source: " << source << "\n");
761 while (source && source != expectedSource) {
762 auto destOp = source.getDefiningOp<DestinationStyleOpInterface>();
763 if (!destOp)
764 break;
765 LLVM_DEBUG(DBGS() << "--step dest op: " << destOp << "\n");
766 source = destOp.getDpsInitOperand(cast<OpResult>(source).getResultNumber())
767 ->get();
768 }
769 LLVM_DEBUG(DBGS() << "--final source: " << source << "\n");
770 LLVM_DEBUG(DBGS() << "--expected source: " << expectedSource << "\n");
771 return source == expectedSource;
772}
773
774/// If the original consumer of `outerSliceOp` was a `forOp` (i.e. through an
775/// iter arg), propagate the `hoistedPackedTensor` value through the same iter
776/// arg.
777/// TODO: for multiple loops we need to track the use to the innermost loop.
778///
779/// Match:
780/// ```
781/// %outerSliceOp = tensor.extract_slice ..
782/// %f = scf.for ... iter_args(%arg0 = %outerSliceOp) {
783/// %hoistedPackedTensor = tensor.pad %arg0
784/// %1 = compute %hoistedPackedTensor
785/// %2 = tensor.extract_slice %1
786/// scf.yield %2
787/// }
788/// ```
789///
790/// and rewrite as:
791/// ```
792/// %outerSliceOp = tensor.extract_slice ..
793/// %hoistedPackedTensor = tensor.pad %outerSliceOp
794/// %f = scf.for ... iter_args(%arg0 = %hoistedPackedTensor) {
795/// %1 = compute %arg0
796/// scf.yield %1
797/// }
798/// %2 = tensor.extract_slice %forOp
799/// ```
800///
801/// Return null when no rewrite happened.
802static tensor::ExtractSliceOp
803padThroughLoopIterArg(RewriterBase &rewriter, Value paddedValueBeforeHoisting,
804 Value hoistedPackedTensor,
805 tensor::ExtractSliceOp outerSliceOp, scf::ForOp forOp) {
806 LLVM_DEBUG(DBGS() << "Start padThroughLoopIterArg on: " << forOp << "\n");
807 LLVM_DEBUG(DBGS() << "--paddedValueBeforeHoisting: "
808 << paddedValueBeforeHoisting << "\n");
809 OpOperand *pUse = nullptr;
810 for (OpOperand &use : outerSliceOp->getUses()) {
811 if (use.getOwner() == forOp) {
812 assert(!pUse && "Multiple slice uses in the for loop");
813 pUse = &use;
814 }
815 }
816 assert(pUse && "No slice use in the for loop");
817 OpBuilder::InsertionGuard g(rewriter);
818 rewriter.setInsertionPointAfter(hoistedPackedTensor.getDefiningOp());
819
820 unsigned iterArgNumber = forOp.getTiedLoopResult(pUse).getResultNumber();
821 auto yieldingExtractSliceOp = forOp.getYieldedValues()[iterArgNumber]
822 .getDefiningOp<tensor::ExtractSliceOp>();
823 if (!yieldingExtractSliceOp)
824 return tensor::ExtractSliceOp();
825
826 // Poor man's analysis sufficient to ensure extractSlice matches tensor.pad.
827 // In the future, it will be easier to ensure this with a matching symmetric
828 // tensor.unpad op.
829 if (!tracesBackToExpectedValue(yieldingExtractSliceOp,
830 paddedValueBeforeHoisting))
831 return tensor::ExtractSliceOp();
832
833 SmallVector<Value> initArgs = forOp.getInitArgs();
834 initArgs[iterArgNumber] = hoistedPackedTensor;
835 SmallVector<Value> yieldOperands = llvm::to_vector(forOp.getYieldedValues());
836 yieldOperands[iterArgNumber] = yieldingExtractSliceOp.getSource();
837
838 int64_t numOriginalForOpResults = initArgs.size();
839 LLVM_DEBUG(DBGS() << "numOriginalForOpResults: " << numOriginalForOpResults
840 << "\n");
841 tensor::ExtractSliceOp extracted;
842 {
843 OpBuilder::InsertionGuard g(rewriter);
844 rewriter.setInsertionPointAfter(forOp);
845 extracted = tensor::ExtractSliceOp::create(
846 rewriter, hoistedPackedTensor.getLoc(), hoistedPackedTensor,
847 outerSliceOp.getMixedOffsets(), outerSliceOp.getMixedSizes(),
848 outerSliceOp.getMixedStrides());
849 rewriter.replaceAllUsesWith(forOp.getResult(iterArgNumber), extracted);
850 }
851 scf::ForOp newForOp = cast<scf::ForOp>(*forOp.replaceWithAdditionalYields(
852 rewriter, initArgs, /*replaceInitOperandUsesInLoop=*/true,
853 [&](OpBuilder &b, Location loc, ArrayRef<BlockArgument> newBBArgs) {
854 return yieldOperands;
855 }));
856
857 LLVM_DEBUG(DBGS() << "newForOp results: " << newForOp.getNumResults()
858 << "\n");
859 LLVM_DEBUG(DBGS() << "replace source of: " << extracted << "\n");
860 LLVM_DEBUG(DBGS() << "with result #"
861 << numOriginalForOpResults + iterArgNumber
862 << " of forOp, giving us: " << extracted << "\n");
863 rewriter.startOpModification(extracted);
864 extracted.getSourceMutable().assign(
865 newForOp.getResult(numOriginalForOpResults + iterArgNumber));
866 rewriter.finalizeOpModification(extracted);
867
868 LLVM_DEBUG(DBGS() << "replace uses of: " << paddedValueBeforeHoisting
869 << "\n");
870 LLVM_DEBUG(DBGS() << "with region iter arg #"
871 << numOriginalForOpResults + iterArgNumber << "\n");
872 rewriter.replaceAllUsesWith(
873 paddedValueBeforeHoisting,
874 newForOp.getRegionIterArg(numOriginalForOpResults + iterArgNumber));
875
876 return extracted;
877}
878
879/// Produce a tensor extracted from the packingResult. This can be used as a
880/// replacement for `opToHoist` in callers.
882 const IRMapping &bvm,
883 tensor::PadOp opToHoist,
884 RankedTensorType transposedTensorType,
885 const HoistPaddingAnalysis &analysis,
886 const PackingResult &packingResult) {
887 // The replacement occurs under a single insertion point within the original
888 // loop, just before opToHoist.
889 OpBuilder::InsertionGuard g(rewriter);
890 rewriter.setInsertionPoint(opToHoist);
891
892 Location loc = opToHoist->getLoc();
893 RankedTensorType paddedTensorType = opToHoist.getResultType();
894 int paddedRank = paddedTensorType.getRank();
895
896 int64_t nPackedLoops = packingResult.clonedLoopIvs.size();
897 LLVM_DEBUG(DBGS() << "nPackedLoops: " << nPackedLoops << " loops\n");
898
899 scf::ForOp outerLoop = analysis.outermostEnclosingForOp;
900 ArrayRef<scf::ForOp> packingLoops = analysis.packingLoops;
901
902 Value hoistedPackedTensor;
903 SmallVector<Value> loopIterationCounts;
904 SmallVector<OpFoldResult> offsets(nPackedLoops + paddedRank,
905 rewriter.getIndexAttr(0));
906 if (nPackedLoops > 0) {
907 loopIterationCounts =
908 llvm::map_to_vector<4>(packingLoops, [&](Operation *loop) {
909 return buildLoopIterationCount(rewriter, outerLoop,
910 cast<scf::ForOp>(loop));
911 });
912 // Assert all loop iteration counts can be computed.
913 if (llvm ::any_of(loopIterationCounts, [](Value v) { return !v; }))
914 llvm_unreachable("loop independence prerequisite not met");
915
916 // offsets = [maybe_leading_ivs = originalLoopIvs, 0 .. 0].
917 llvm::copy(loopIterationCounts, offsets.begin());
918 hoistedPackedTensor =
919 scf::getForInductionVarOwner(packingResult.clonedLoopIvs.front())
920 ->getResult(0);
921 } else {
922 // If no loops were created, this is just hoisting without packing.
923 hoistedPackedTensor = bvm.lookup(opToHoist.getResult());
924 }
925
926 LLVM_DEBUG(DBGS() << "hoistedPackedTensor: " << hoistedPackedTensor << "\n");
927
928 // If the consumer of `padOp` was a `forOp`, propagate through iter args.
929 scf::ForOp forOp = analysis.padConsumingForOp;
930 if (forOp) {
931 return padThroughLoopIterArg(rewriter, opToHoist, hoistedPackedTensor,
932 analysis.sliceOp, forOp);
933 }
934
935 // offsets = [maybe_leading_ivs, 0 .. 0].
936 // sizes = [1 .. 1, transposedShape] (defined above).
937 // strides = [1 .. 1] (defined above)
938 return tensor::ExtractSliceOp::create(
939 rewriter, loc, transposedTensorType, hoistedPackedTensor, offsets,
940 packingResult.sizes, packingResult.strides);
941}
942
944 RewriterBase &rewriter, tensor::PadOp opToHoist, int64_t numLoops,
945 ArrayRef<int64_t> transposeVector, tensor::PadOp &hoistedOp,
946 SmallVectorImpl<TransposeOp> &transposeOps) {
947 LLVM_DEBUG(DBGS() << "\n"; DBGS() << " Try to hoist " << *(opToHoist) << "\n";
948 DBGS() << " by " << numLoops << " loops\n");
949
950 HoistPaddingAnalysis analysis(opToHoist, numLoops);
951 analysis.enableHoistPadding(rewriter);
952 analysis.finalizeHoistPaddingAnalysis();
953 if (!analysis.isValid()) {
954 LLVM_DEBUG(DBGS() << "--Analysis failed -> Skip\n");
955 return failure();
956 }
957
958 /// Construct the packing loop nest.
959 IRMapping bvm;
960 FailureOr<PackingResult> packingResult = buildPackingLoopNestImpl(
961 rewriter, bvm, opToHoist, transposeVector, analysis);
962 if (failed(packingResult)) {
963 LLVM_DEBUG(DBGS() << "--buildPackingLoopNestImpl failed -> Skip\n");
964 return failure();
965 }
966
967 if (!transposeVector.empty())
968 transposeOps.push_back(packingResult->maybeTransposeOp);
969
970 FailureOr<RankedTensorType> transposedTensorType =
971 tensor::computeTransposedType(opToHoist.getResultType(), transposeVector);
972 assert(succeeded(transposedTensorType) && "unexpected failure in type");
973
974 // Now the packed tensor is ready, replace the original padding op by a
975 // 1x..x1 slice [originalLoopIvs, 0 .. 0][1 .. 1, paddedShape][1 .. 1].
976 Value newResult =
977 replaceByPackingResult(rewriter, bvm, opToHoist, *transposedTensorType,
978 analysis, *packingResult);
979
980 Location loc = opToHoist->getLoc();
981 RankedTensorType paddedTensorType = opToHoist.getResultType();
982 if (!transposeVector.empty()) {
983 OpBuilder::InsertionGuard g(rewriter);
984 rewriter.setInsertionPointAfter(newResult.getDefiningOp());
985 // Transpose the packed tensor back to the original storage order.
986 Value emptyTensor =
987 tensor::EmptyOp::create(rewriter, loc, paddedTensorType.getShape(),
988 paddedTensorType.getElementType());
989 TransposeOp unTransposeOp = linalg::TransposeOp::create(
990 rewriter, loc, newResult, emptyTensor, transposeVector);
991 newResult = unTransposeOp.getResult()[0];
992 transposeOps.push_back(unTransposeOp);
993 }
994
995 LLVM_DEBUG(DBGS() << "newResult: " << newResult << "\n");
996 LLVM_DEBUG(
997 DBGS() << "After hoisting: "
998 << newResult.getDefiningOp()->getParentOfType<func::FuncOp>()
999 << "\n");
1000
1001 // Make the newly cloned `opToHoist` available to the caller.
1002 hoistedOp = packingResult->hoistedPadOp;
1003
1004 LLVM_DEBUG(DBGS() << "--SUCCESS\n");
1005 return newResult;
1006}
1007
1009 tensor::PadOp opToHoist, int64_t numLoops,
1010 ArrayRef<int64_t> transposeVector, tensor::PadOp &hoistedOp,
1011 SmallVectorImpl<TransposeOp> &transposeOps) {
1012 IRRewriter rewriter(opToHoist.getContext());
1013 return hoistPaddingOnTensors(rewriter, opToHoist, numLoops, transposeVector,
1014 hoistedOp, transposeOps);
1015}
return success()
static tensor::ExtractSliceOp padThroughLoopIterArg(RewriterBase &rewriter, Value paddedValueBeforeHoisting, Value hoistedPackedTensor, tensor::ExtractSliceOp outerSliceOp, scf::ForOp forOp)
If the original consumer of outerSliceOp was a forOp (i.e.
static Value buildLoopIterationCount(RewriterBase &rewriter, scf::ForOp outer, scf::ForOp forOp)
Return the current iteration number in the loop (iv - lb).ceilDiv(step).
static void getEnclosingLoopsUntil(tensor::PadOp padOp, scf::ForOp untilLoop, SmallVector< scf::ForOp > &reverseEnclosingLoops)
Return at most nLevels of immediately enclosing scf::ForOp loops.
static bool debugPrintLoopInShortForm(Operation *op)
static bool tracesBackToExpectedValue(tensor::ExtractSliceOp extractSliceOp, Value expectedSource)
Return true if we can walk back the use-def chain from extractSliceOp to expectedSource going through...
static bool isDefinedOutsideOrConstant(scf::ForOp outer, Value v)
static FailureOr< PackingResult > buildPackingLoopNestImpl(RewriterBase &rewriter, IRMapping &bvm, tensor::PadOp opToHoist, ArrayRef< int64_t > transposeVector, RankedTensorType transposedTensorType, tensor::EmptyOp emptyOp, const HoistPaddingAnalysis &analysis)
static void computeBackwardSlice(tensor::PadOp padOp, scf::ForOp outermostEnclosingForOp, SetVector< Operation * > &backwardSlice)
static Value replaceByPackingResult(RewriterBase &rewriter, const IRMapping &bvm, tensor::PadOp opToHoist, RankedTensorType transposedTensorType, const HoistPaddingAnalysis &analysis, const PackingResult &packingResult)
Produce a tensor extracted from the packingResult.
#define DBGS()
static void debugPrintBackwardSlice(SetVector< Operation * > &backwardSlice)
static void getAtMostNEnclosingLoops(tensor::PadOp padOp, int nLevels, SmallVector< scf::ForOp > &reverseEnclosingLoops)
Return at most nLevels of immediately enclosing scf::ForOp loops.
#define DBGS()
Definition Hoisting.cpp:32
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
*if copies could not be generated due to yet unimplemented cases *copyInPlacementStart and copyOutPlacementStart in copyPlacementBlock *specify the insertion points where the incoming copies and outgoing should be inserted(the insertion happens right before the *insertion point). Since `begin` can itself be invalidated due to the memref *rewriting done from this method
Base type for affine expression.
Definition AffineExpr.h:68
This class provides management for the lifetime of the state used when printing the IR.
Definition AsmState.h:542
This class represents an argument of a Block.
Definition Value.h:309
Block * getOwner() const
Returns the block that owns this argument.
Definition Value.h:318
Operation * getParentOp()
Returns the closest surrounding operation that contains this block.
Definition Block.cpp:31
IntegerAttr getIndexAttr(int64_t value)
Definition Builders.cpp:108
MLIRContext * getContext() const
Definition Builders.h:56
A class for computing basic dominance information.
Definition Dominance.h:140
bool dominates(Operation *a, Operation *b) const
Return true if operation A dominates operation B, i.e.
Definition Dominance.h:158
This is a utility class for mapping one set of IR entities to another.
Definition IRMapping.h:26
auto lookupOrDefault(T from) const
Lookup a mapped value within the map.
Definition IRMapping.h:65
auto lookup(T from) const
Lookup a mapped value within the map.
Definition IRMapping.h:72
void map(Value from, Value to)
Inserts a new mapping for 'from' to 'to'.
Definition IRMapping.h:30
IRValueT get() const
Return the current value being used by this operand.
This class coordinates rewriting a piece of IR outside of a pattern rewrite, providing a way to keep ...
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition Location.h:76
MLIRContext is the top-level object for a collection of MLIR operations.
Definition MLIRContext.h:63
RAII guard to reset the insertion point of the builder when destroyed.
Definition Builders.h:348
This class helps build Operations.
Definition Builders.h:207
Operation * clone(Operation &op, IRMapping &mapper)
Creates a deep copy of the specified operation, remapping any operands that use values outside of the...
Definition Builders.cpp:562
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition Builders.h:431
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition Builders.h:398
void setInsertionPointToEnd(Block *block)
Sets the insertion point to the end of the specified block.
Definition Builders.h:436
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Definition Builders.h:526
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition Builders.h:412
This class represents an operand of an operation.
Definition Value.h:257
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
Operation * getParentOp()
Returns the closest surrounding operation that contains this operation or nullptr if this is a top-le...
Definition Operation.h:234
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
Definition Operation.h:238
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void finalizeOpModification(Operation *op)
This method is used to signal the end of an in-place modification of the given operation.
virtual void replaceAllUsesWith(Value from, Value to)
Find uses of from and replace them with to.
virtual void startOpModification(Operation *op)
This method is used to notify the rewriter that an in-place operation modification is about to happen...
This class provides an abstraction over the different types of ranges over Values.
Definition ValueRange.h:387
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition Value.h:96
Location getLoc() const
Return the location of this value.
Definition Value.cpp:24
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition Value.cpp:18
FailureOr< PackingResult > buildPackingLoopNest(RewriterBase &rewriter, tensor::PadOp opToHoist, scf::ForOp outermostEnclosingForOp, ArrayRef< int64_t > transposeVector)
Build the packing loop nest required to hoist opToHoist above outermostEnclosingForOp.
FailureOr< Value > hoistPaddingOnTensors(RewriterBase &rewriter, tensor::PadOp opToHoist, int64_t numLoops, ArrayRef< int64_t > transposeVector, tensor::PadOp &hoistedOp, SmallVectorImpl< TransposeOp > &transposeOps)
Mechanically hoist padding operations on tensors by numLoops into a new, generally larger tensor.
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition Remarks.h:573
ForOp getForInductionVarOwner(Value val)
Returns the loop parent of an induction variable.
Definition SCF.cpp:659
FailureOr< RankedTensorType > computeTransposedType(RankedTensorType rankedTensorType, ArrayRef< int64_t > transposeVector)
Returns the transposed rankedTensorType if transposeVector is non-empty.
Definition Utils.cpp:76
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
Definition Matchers.h:490
LogicalResult getBackwardSlice(Operation *op, SetVector< Operation * > *backwardSlice, const BackwardSliceOptions &options={})
Fills backwardSlice with the computed backward slice (i.e.
LoopLikeOpInterface hoistLoopInvariantSubsets(RewriterBase &rewriter, LoopLikeOpInterface loopLike)
Hoist loop-invariant tensor subsets (subset extraction and subset insertion ops) from loop-like ops.
void bindDims(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to DimExpr at positions: [0 .
Definition AffineExpr.h:311
llvm::SetVector< T, Vector, Set, N > SetVector
Definition LLVM.h:123
void bindSymbols(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to SymbolExpr at positions: [0 .
Definition AffineExpr.h:325
void getUsedValuesDefinedAbove(Region &region, Region &limit, SetVector< Value > &values)
Fill values with a list of values defined at the ancestors of the limit region and used within region...
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
Definition Utils.cpp:112
detail::constant_op_matcher m_Constant()
Matches a constant foldable operation.
Definition Matchers.h:369
bool inclusive
Include the top level op in the slice.
TransitiveFilter filter
Helper struct to hold the results of building a packing loop nest.
Definition Transforms.h:767
SmallVector< OpFoldResult > strides
Definition Transforms.h:768
SmallVector< Value > clonedLoopIvs
Definition Transforms.h:769
SmallVector< OpFoldResult > sizes
Definition Transforms.h:768