MLIR 22.0.0git
LoopUtils.h File Reference
#include "mlir/IR/Block.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Transforms/RegionUtils.h"
#include <optional>

Go to the source code of this file.

Classes

struct  mlir::affine::AffineCopyOptions
 Explicit copy / DMA generation options for mlir::affineDataCopyGenerate. More...

Namespaces

namespace  mlir
 Include the generated interface declarations.
namespace  mlir::func
namespace  mlir::scf
namespace  mlir::affine

Functions

LogicalResult mlir::affine::loopUnrollFull (AffineForOp forOp)
 Unrolls this for operation completely if the trip count is known to be constant.
LogicalResult mlir::affine::loopUnrollByFactor (AffineForOp forOp, uint64_t unrollFactor, function_ref< void(unsigned, Operation *, OpBuilder)> annotateFn=nullptr, bool cleanUpUnroll=false)
 Unrolls this for operation by the specified unroll factor.
LogicalResult mlir::affine::loopUnrollUpToFactor (AffineForOp forOp, uint64_t unrollFactor)
 Unrolls this loop by the specified unroll factor or its trip count, whichever is lower.
bool mlir::affine::isPerfectlyNested (ArrayRef< AffineForOp > loops)
 Returns true if loops is a perfectly nested loop nest, where loops appear in it from outermost to innermost.
void mlir::affine::getPerfectlyNestedLoops (SmallVectorImpl< AffineForOp > &nestedLoops, AffineForOp root)
 Get perfectly nested sequence of loops starting at root of loop nest (the first op being another AffineFor, and the second op - a terminator).
LogicalResult mlir::affine::loopUnrollJamByFactor (AffineForOp forOp, uint64_t unrollJamFactor)
 Unrolls and jams this loop by the specified factor.
LogicalResult mlir::affine::loopUnrollJamUpToFactor (AffineForOp forOp, uint64_t unrollJamFactor)
 Unrolls and jams this loop by the specified factor or by the trip count (if constant), whichever is lower.
LogicalResult mlir::affine::promoteIfSingleIteration (AffineForOp forOp)
 Promotes the loop body of a AffineForOp to its containing block if the loop was known to have a single iteration.
void mlir::affine::promoteSingleIterationLoops (func::FuncOp f)
 Promotes all single iteration AffineForOp's in the Function, i.e., moves their body into the containing Block.
LogicalResult mlir::affine::affineForOpBodySkew (AffineForOp forOp, ArrayRef< uint64_t > shifts, bool unrollPrologueEpilogue=false)
 Skew the operations in an affine.for's body with the specified operation-wise shifts.
LogicalResult mlir::affine::tilePerfectlyNested (MutableArrayRef< AffineForOp > input, ArrayRef< unsigned > tileSizes, SmallVectorImpl< AffineForOp > *tiledNest=nullptr)
 Tiles the specified band of perfectly nested loops creating tile-space loops and intra-tile loops.
LogicalResult mlir::affine::tilePerfectlyNestedParametric (MutableArrayRef< AffineForOp > input, ArrayRef< Value > tileSizes, SmallVectorImpl< AffineForOp > *tiledNest=nullptr)
 Tiles the specified band of perfectly nested loops creating tile-space loops and intra-tile loops, using SSA values as tiling parameters.
void mlir::affine::interchangeLoops (AffineForOp forOpA, AffineForOp forOpB)
 Performs loop interchange on 'forOpA' and 'forOpB'.
bool mlir::affine::isValidLoopInterchangePermutation (ArrayRef< AffineForOp > loops, ArrayRef< unsigned > loopPermMap)
 Checks if the loop interchange permutation 'loopPermMap', of the perfectly nested sequence of loops in 'loops', would violate dependences (loop 'i' in 'loops' is mapped to location 'j = 'loopPermMap[i]' in the interchange).
unsigned mlir::affine::permuteLoops (ArrayRef< AffineForOp > inputNest, ArrayRef< unsigned > permMap)
 Performs a loop permutation on a perfectly nested loop nest inputNest (where the contained loops appear from outer to inner) as specified by the permutation permMap: loop 'i' in inputNest is mapped to location 'loopPermMap[i]', where positions 0, 1, ... are from the outermost position to inner.
AffineForOp mlir::affine::sinkSequentialLoops (AffineForOp forOp)
SmallVector< SmallVector< AffineForOp, 8 >, 8 > mlir::affine::tile (ArrayRef< AffineForOp > forOps, ArrayRef< uint64_t > sizes, ArrayRef< AffineForOp > targets)
 Performs tiling fo imperfectly nested loops (with interchange) by strip-mining the forOps by sizes and sinking them, in their order of occurrence in forOps, under each of the targets.
SmallVector< AffineForOp, 8 > mlir::affine::tile (ArrayRef< AffineForOp > forOps, ArrayRef< uint64_t > sizes, AffineForOp target)
 Performs tiling (with interchange) by strip-mining the forOps by sizes and sinking them, in their order of occurrence in forOps, under target.
int64_t mlir::affine::numEnclosingInvariantLoops (OpOperand &operand)
 Performs explicit copying for the contiguous sequence of operations in the block iterator range [‘begin’, ‘end’), where ‘end’ can't be past the / terminator of the block (since additional operations are potentially / inserted right before end.