MLIR 22.0.0git
Utils.cpp File Reference

Go to the source code of this file.

Namespaces

namespace  mlir
 Include the generated interface declarations.
namespace  mlir::linalg

Macros

#define DEBUG_TYPE   "linalg-utils"

Functions

static bool isTiled (AffineExpr expr, ArrayRef< OpFoldResult > tileSizes)
static bool isTiled (AffineMap map, ArrayRef< OpFoldResult > tileSizes)
static void unpackRanges (OpBuilder &builder, Location loc, ArrayRef< Range > ranges, SmallVectorImpl< Value > &lbs, SmallVectorImpl< Value > &ubs, SmallVectorImpl< Value > &steps)
 Given a list of subview ranges, extract individual values for lower, upper bounds and steps and put them into the corresponding vectors.
static SmallVector< int64_tcomputePackUnPackPerm (int64_t rank, ArrayRef< int64_t > &innerDimsPos, ArrayRef< int64_t > &outerPerm, PackingMetadata &packingMetadata)
 The permutation can be obtained from two permutations: a) Compute the permutation vector to move the last numPackedDims into the innerPosDims of a shape of rank rank.
SmallVector< int64_tmlir::linalg::getPackInverseDestPerm (PackOp packOp, PackingMetadata &metadata)
SmallVector< int64_tmlir::linalg::getUnPackInverseSrcPerm (UnPackOp unpackOp, PackingMetadata &metadata)
bool mlir::linalg::allIndexingsAreProjectedPermutation (LinalgOp op)
 Check if all indexing maps are projected permutations.
bool mlir::linalg::hasOnlyScalarElementwiseOp (Region &r)
 Detect whether r has only ConstantOp, ElementwiseMappable and YieldOp.
bool mlir::linalg::isElementwise (LinalgOp op)
 Check if a LinalgOp is an element-wise operation.
bool mlir::linalg::isParallelIterator (utils::IteratorType iteratorType)
 Check if iterator type has "parallel" semantics.
bool mlir::linalg::isReductionIterator (utils::IteratorType iteratorType)
 Check if iterator type has "reduction" semantics.
static BlockArgument mlir::linalg::getBlockArgumentWithOptionalExtOps (Value val)
 Returns the BlockArgument that leads to val, if any.
static bool mlir::linalg::bodyMatcherForConvolutionOps (Value yieldVal, Block *body)
 Utility to match block body for convolution ops.
template<typename... OpTypes>
static bool mlir::linalg::bodyMatcherForPoolOps (Value yieldVal, Block *body)
 Utility to match block body for linalg.pool* ops.
static bool mlir::linalg::bodyMatcherForMaxSignedPoolOps (Value yieldVal, Block *body)
static bool mlir::linalg::bodyMatcherForMaxUnsignedPoolOps (Value yieldVal, Block *body)
static bool mlir::linalg::bodyMatcherForMinSignedPoolOps (Value yieldVal, Block *body)
static bool mlir::linalg::bodyMatcherForMinUnsignedPoolOps (Value yieldVal, Block *body)
static bool mlir::linalg::bodyMatcherForSumPoolOps (Value yieldVal, Block *body)
static AffineExpr mlir::linalg::getAffineMapDim (ArrayAttr indexingMaps, uint32_t mapIndex, uint32_t dimIndex)
static int64_t mlir::linalg::isDimTimesConstantOrDimOnly (AffineExpr expr, AffineExpr &dim)
 Check if expr is either:
static bool mlir::linalg::matchConvDimAddExprPattern (ArrayAttr indexingMaps, unsigned iDim, unsigned fDim, unsigned oDim, int64_t &dilation, int64_t &stride)
 Given an array of AffineMaps indexingMaps verify the following commutatively:- indexingMaps[0].getResult(iDim) == indexingMaps[1].getResult(fDim) * <c0> + indexingMaps[n-1].getResult(oDim) * <c1> where,.
static bool mlir::linalg::convLayoutMatches (ArrayRef< ArrayRef< AffineExpr > > mapListExpected, ArrayAttr indexingMaps, MLIRContext *context)
 Returns true if the given indexing maps matches with the expected indexing maps.
template<>
bool mlir::linalg::isaConvolutionOpOfType< linalg::Conv1DOp > (LinalgOp op, SmallVector< int64_t > *dilations, SmallVector< int64_t > *strides)
template<>
bool mlir::linalg::isaConvolutionOpOfType< linalg::Conv1DNwcWcfOp > (LinalgOp op, SmallVector< int64_t > *dilations, SmallVector< int64_t > *strides)
template<>
bool mlir::linalg::isaConvolutionOpOfType< linalg::Conv1DNcwFcwOp > (LinalgOp op, SmallVector< int64_t > *dilations, SmallVector< int64_t > *strides)
template<>
bool mlir::linalg::isaConvolutionOpOfType< linalg::Conv2DOp > (LinalgOp op, SmallVector< int64_t > *dilations, SmallVector< int64_t > *strides)
template<>
bool mlir::linalg::isaConvolutionOpOfType< linalg::Conv3DOp > (LinalgOp op, SmallVector< int64_t > *dilations, SmallVector< int64_t > *strides)
template<>
bool mlir::linalg::isaConvolutionOpOfType< linalg::DepthwiseConv1DNcwCwOp > (LinalgOp op, SmallVector< int64_t > *dilations, SmallVector< int64_t > *strides)
template<>
bool mlir::linalg::isaConvolutionOpOfType< linalg::DepthwiseConv1DNwcWcOp > (LinalgOp op, SmallVector< int64_t > *dilations, SmallVector< int64_t > *strides)
template<>
bool mlir::linalg::isaConvolutionOpOfType< linalg::DepthwiseConv1DNwcWcmOp > (LinalgOp op, SmallVector< int64_t > *dilations, SmallVector< int64_t > *strides)
template<>
bool mlir::linalg::isaConvolutionOpOfType< linalg::DepthwiseConv2DNchwChwOp > (LinalgOp op, SmallVector< int64_t > *dilations, SmallVector< int64_t > *strides)
template<>
bool mlir::linalg::isaConvolutionOpOfType< linalg::DepthwiseConv3DNdhwcDhwcmOp > (LinalgOp op, SmallVector< int64_t > *dilations, SmallVector< int64_t > *strides)
template<>
bool mlir::linalg::isaConvolutionOpOfType< linalg::PoolingNhwcMaxOp > (LinalgOp op, SmallVector< int64_t > *dilations, SmallVector< int64_t > *strides)
template<>
bool mlir::linalg::isaConvolutionOpOfType< linalg::PoolingNhwcMinOp > (LinalgOp op, SmallVector< int64_t > *dilations, SmallVector< int64_t > *strides)
template<>
bool mlir::linalg::isaConvolutionOpOfType< linalg::PoolingNhwcSumOp > (LinalgOp op, SmallVector< int64_t > *dilations, SmallVector< int64_t > *strides)
template<>
bool mlir::linalg::isaConvolutionOpOfType< linalg::PoolingNhwcMaxUnsignedOp > (LinalgOp op, SmallVector< int64_t > *dilations, SmallVector< int64_t > *strides)
template<>
bool mlir::linalg::isaConvolutionOpOfType< linalg::PoolingNhwcMinUnsignedOp > (LinalgOp op, SmallVector< int64_t > *dilations, SmallVector< int64_t > *strides)
Value mlir::linalg::makeComposedPadHighOp (OpBuilder &b, Location loc, RankedTensorType type, Value source, Value padding, bool nofold, ValueRange typeDynDims={})
 Create a tensor::PadOp that pads source to the shape of type whose sizes are assumed to be greater than the dynamic source size.
GenericOp mlir::linalg::makeMemRefCopyOp (OpBuilder &b, Location loc, Value from, Value to)
 Returns GenericOp that copies an n-D memref.
void mlir::linalg::updateBoundsForCyclicDistribution (OpBuilder &builder, Location loc, Value procId, Value nprocs, Value &lb, Value &ub, Value &step)
 Update the lb, ub and step to get per processor lb, ub and step.
static void mlir::linalg::generateParallelLoopNest (OpBuilder &b, Location loc, ValueRange lbs, ValueRange ubs, ValueRange steps, ArrayRef< utils::IteratorType > iteratorTypes, ArrayRef< linalg::ProcInfo > procInfo, function_ref< void(OpBuilder &, Location, ValueRange)> bodyBuilderFn, SmallVectorImpl< Value > &ivStorage)
 Generates a loop nest consisting of scf.parallel and scf.for, depending on the iteratorTypes.
static Operationmlir::linalg::materializeTiledShape (OpBuilder &builder, Location loc, Value valueToTile, const SliceParameters &sliceParams)
Operationmlir::linalg::makeTiledShape (OpBuilder &builder, Location loc, Value valueToTile, ArrayRef< OpFoldResult > tileSizes, AffineMap map, ArrayRef< OpFoldResult > lbs, ArrayRef< OpFoldResult > ubs, ArrayRef< OpFoldResult > subShapeSizes, bool omitPartialTileCheck)
 Creates an extract_slice/subview op for a single valueToTile with builder.
SliceParameters mlir::linalg::computeSliceParameters (OpBuilder &builder, Location loc, Value valueToTile, ArrayRef< OpFoldResult > tileSizes, AffineMap map, ArrayRef< OpFoldResult > lbs, ArrayRef< OpFoldResult > ubs, ArrayRef< OpFoldResult > subShapeSizes, bool omitPartialTileCheck)
 Computes SliceParameters for a single valueToTile assuming that its user is being tiled with the given loop bounds lbs and ubs and the tile sizes tileSizes.
SmallVector< OpFoldResultmlir::linalg::computeTileOffsets (OpBuilder &b, Location loc, ArrayRef< OpFoldResult > ivs, ArrayRef< OpFoldResult > tileSizes)
 Computes tile offsets, given a list of loop ivs and tileSizes.
SmallVector< OpFoldResultmlir::linalg::computeTileSizes (OpBuilder &b, Location loc, ArrayRef< OpFoldResult > tileSizes, ArrayRef< OpFoldResult > sizeBounds)
 Computes tile sizes, given a list of tileSizes and dimension sizes (sizeBounds).
SmallVector< Typemlir::linalg::getTensorOutputTypes (LinalgOp op, ValueRange operands)
 Returns the list of tensor output types produced when the given structured operation op is applied to the given operands.
SmallVector< Valuemlir::linalg::insertSlicesBack (OpBuilder &builder, Location loc, LinalgOp op, ValueRange operands, ValueRange results)
 Creates insert_slice ops that insert results back into larger tensors they were originally extracted from with extract_slice before being passed as operands to the given structured operation op or its clone.
SmallVector< std::optional< SliceParameters > > mlir::linalg::computeAllSliceParameters (OpBuilder &builder, Location loc, LinalgOp linalgOp, ValueRange valuesToTile, ArrayRef< OpFoldResult > ivs, ArrayRef< OpFoldResult > tileSizes, ArrayRef< OpFoldResult > sizeBounds, bool omitPartialTileCheck)
 Computes SliceParamaters for all valuesToTile of the given linalgOp, assuming linalgOp is being fused into a loop nest.
SmallVector< Valuemlir::linalg::makeTiledShapes (OpBuilder &builder, Location loc, LinalgOp linalgOp, ValueRange valuesToTile, ArrayRef< OpFoldResult > ivs, ArrayRef< OpFoldResult > tileSizes, ArrayRef< OpFoldResult > sizeBounds, bool omitPartialTileCheck)
 Creates extract_slice/subview ops for all valuesToTile of the given linalgOp with builder, assuming linalgOp is being fused into a loop nest for tiling with the given induction variables ivs and tile sizes tileSizes.
void mlir::linalg::offsetIndices (OpBuilder &b, LinalgOp linalgOp, ArrayRef< OpFoldResult > offests)
 Add the specified offsets to any linalg.index ops contained in the given linalgOp.
void mlir::linalg::offsetIndices (RewriterBase &b, LinalgOp linalgOp, ArrayRef< OpFoldResult > offests)
std::optional< SmallVector< ReassociationIndices > > mlir::linalg::getReassociationMapForFoldingUnitDims (ArrayRef< OpFoldResult > mixedSizes)
 Get the reassociation maps to fold the result of a extract_slice (or source of a insert_slice) operation with given offsets, and sizes to its rank-reduced version.

Macro Definition Documentation

◆ DEBUG_TYPE

#define DEBUG_TYPE   "linalg-utils"

Definition at line 37 of file Utils.cpp.

Function Documentation

◆ computePackUnPackPerm()

SmallVector< int64_t > computePackUnPackPerm ( int64_t rank,
ArrayRef< int64_t > & innerDimsPos,
ArrayRef< int64_t > & outerPerm,
PackingMetadata & packingMetadata )
static

The permutation can be obtained from two permutations: a) Compute the permutation vector to move the last numPackedDims into the innerPosDims of a shape of rank rank.

b) Compute the permutation vector to move outer dims if the outerPerm parameter is not empty. Apply (b) permutation on (a) permutation to get the final permutation.

Definition at line 150 of file Utils.cpp.

References mlir::applyPermutationToVector(), and mlir::computePermutationVector().

Referenced by mlir::linalg::getPackInverseDestPerm(), and mlir::linalg::getUnPackInverseSrcPerm().

◆ isTiled() [1/2]

◆ isTiled() [2/2]

bool isTiled ( AffineMap map,
ArrayRef< OpFoldResult > tileSizes )
static

◆ unpackRanges()

void unpackRanges ( OpBuilder & builder,
Location loc,
ArrayRef< Range > ranges,
SmallVectorImpl< Value > & lbs,
SmallVectorImpl< Value > & ubs,
SmallVectorImpl< Value > & steps )
static

Given a list of subview ranges, extract individual values for lower, upper bounds and steps and put them into the corresponding vectors.

Definition at line 126 of file Utils.cpp.

References mlir::getValueOrCreateConstantIndexOp().

Referenced by mlir::linalg::GenerateLoopNest< LoopTy >::doit(), and mlir::linalg::GenerateLoopNest< LoopTy >::doit().