MLIR  19.0.0git
Classes | Namespaces | Typedefs | Functions
Transforms.h File Reference
#include <utility>
#include "mlir/Conversion/VectorToSCF/VectorToSCF.h"
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
#include "mlir/Dialect/Linalg/Utils/Utils.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/Utils/Utils.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Dialect/Utils/StaticValueUtils.h"
#include "mlir/Dialect/Vector/Transforms/VectorTransforms.h"
#include "mlir/Dialect/X86Vector/Transforms.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Interfaces/TilingInterface.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"

Go to the source code of this file.

Classes

struct  mlir::linalg::BufferizeToAllocationOptions
 
struct  mlir::linalg::LinalgTilingOptions
 
struct  mlir::linalg::LinalgTilingAndFusionOptions
 
struct  mlir::linalg::LinalgPaddingOptions
 
struct  mlir::linalg::LinalgPromotionOptions
 
struct  mlir::linalg::SplitReductionOptions
 Split Reduction options. More...
 
struct  mlir::linalg::ControlDropUnitDims
 Transformation to drop unit-extent dimensions from linalg.generic operations. More...
 
struct  mlir::linalg::ElementwiseOpFusionResult
 Fuse two linalg.generic operations that have a producer-consumer relationship captured through fusedOperand. More...
 
struct  mlir::linalg::detail::PackingResult
 Helper struct to hold the results of building a packing loop nest. More...
 
struct  mlir::linalg::TiledLinalgOp
 Perform standalone tiling of a single LinalgOp by tileSizes. More...
 
struct  mlir::linalg::PromotionInfo
 Create a new buffer using the allocationFn provided. More...
 
struct  mlir::linalg::detail::MultiSizeSpecificationBase< T >
 
struct  mlir::linalg::MultiSizeSpecification
 A description of a multi-size tiling comprising tile sizes and numbers of tiles, expressed as Values which may or may not be constant. More...
 
struct  mlir::linalg::StaticMultiSizeSpecification
 
struct  mlir::linalg::ForallTilingResult
 Rewrite a TilingInterface op to a tiled scf.forall, applying tiling by numThreads. More...
 
struct  mlir::linalg::ForallReductionTilingResult
 Transformation information returned after reduction tiling. More...
 
struct  mlir::linalg::SplitReductionResult
 Apply transformation to split the single linalg op reduction into a parallel and reduction dimension. More...
 
struct  mlir::linalg::CollapseResult
 
struct  mlir::linalg::LowerPackResult
 
struct  mlir::linalg::LowerUnPackOpResult
 
struct  mlir::linalg::PackResult
 Struct to hold the result of a pack call. More...
 
struct  mlir::linalg::PackTransposeResult
 Struct to hold the result of a packTranspose call. More...
 
struct  mlir::linalg::DownscaleSizeOneWindowed2DConvolution< Conv2DOp, Conv1DOp >
 Rewrites 2-D convolution ops with size-1 window dimensions into 1-D convolution ops. More...
 
struct  mlir::linalg::DownscaleDepthwiseConv2DNhwcHwcOp
 Rewrites 2-D depthwise convolution ops with size-1 (w, kw) or (h, kh) dimensions into 1-D depthwise convolution ops. More...
 
struct  mlir::linalg::DownscaleConv2DOp
 
struct  mlir::linalg::LinalgGeneralizationPattern
 Linalg generalization pattern. More...
 
struct  mlir::linalg::CopyVectorizationPattern
 Vectorization pattern for memref::CopyOp. More...
 
struct  mlir::linalg::GeneralizePadOpPattern
 Rewrite a tensor::PadOp into a sequence of EmptyOp, FillOp and InsertSliceOp. More...
 
struct  mlir::linalg::GeneralizeOuterUnitDimsPackOpPattern
 Rewrites a tensor::PackOp into a sequence of tensor.pad + linalg.transpose + tensor.insert_slice ops, where the tensor::PackOp has outer dims being all 1s. More...
 
struct  mlir::linalg::GeneralizeOuterUnitDimsUnPackOpPattern
 Rewrites a tensor::UnPackOp into a sequence of rank-reduced extract_slice op. More...
 
struct  mlir::linalg::LinalgCopyVTRForwardingPattern
 Match and rewrite for the pattern: More...
 
struct  mlir::linalg::LinalgCopyVTWForwardingPattern
 Match and rewrite for the pattern: More...
 
struct  mlir::linalg::ExtractSliceOfPadTensorSwapPattern
 Rewrite extract_slice(tensor.pad(x)) into tensor.pad(extract_slice(x)). More...
 

Namespaces

 mlir
 Include the generated interface declarations.
 
 mlir::bufferization
 
 mlir::linalg
 
 mlir::linalg::detail
 

Typedefs

using mlir::linalg::TileSizeComputationFunction = std::function< SmallVector< Value, 4 >(OpBuilder &, Operation *)>
 
using mlir::linalg::AllocBufferCallbackFn = std::function< std::optional< Value >(OpBuilder &b, memref::SubViewOp subView, ArrayRef< Value > boundingSubViewSize, DataLayout &layout)>
 Callback function type used to perform the allocation for the promoted subView. More...
 
using mlir::linalg::DeallocBufferCallbackFn = std::function< LogicalResult(OpBuilder &b, Value buffer)>
 Callback function type used to deallocate the buffers used to hold the promoted subview. More...
 
using mlir::linalg::CopyCallbackFn = std::function< LogicalResult(OpBuilder &b, Value src, Value dst)>
 Callback function type used to insert copy from original subview to subview of the promoted region for the read operands/subview of promoted region to original subview for the results. More...
 
using mlir::linalg::ControlSplitReductionFn = std::function< SplitReductionOptions(LinalgOp op)>
 Function signature to control reduction splitting. More...
 
using mlir::linalg::LinalgLoops = SmallVector< Operation *, 4 >
 
using mlir::linalg::LoopIndexToRangeIndexMap = DenseMap< int, int >
 Creates a number of ranges equal to the number of non-zero in tileSizes. More...
 
using mlir::linalg::OptimizeCopyFn = std::function< LogicalResult(RewriterBase &, tensor::PadOp, Value)>
 
using mlir::linalg::ControlFusionFn = std::function< bool(OpOperand *fusedOperand)>
 Function type which is used to control when to stop fusion. More...
 
using mlir::linalg::ControlPropagationFn = std::function< bool(Operation *op)>
 Function type which is used to control propagation of tensor.pack/unpack ops. More...
 
using mlir::linalg::GetCollapsableDimensionsFn = std::function< SmallVector< ReassociationIndices >(linalg::LinalgOp)>
 Function type to control generic op dimension collapsing. More...
 

Functions

std::optional< vector::CombiningKind > mlir::linalg::getCombinerOpKind (Operation *combinerOp)
 Return vector::CombiningKind for the given op. More...
 
Value mlir::linalg::bufferizeToAllocation (RewriterBase &rewriter, const BufferizeToAllocationOptions &options, tensor::PadOp padOp, Attribute memorySpace={}, Operation *insertionPoint=nullptr)
 Materialize a buffer allocation for the given tensor.pad op and lower the op to linalg.fill/linalg.generic + bufferization.materialize_in_destination. More...
 
Value mlir::linalg::bufferizeToAllocation (RewriterBase &rewriter, const BufferizeToAllocationOptions &options, vector::MaskOp maskOp, Attribute memorySpace={}, Operation *insertionPoint=nullptr)
 Materialize a buffer allocation for the given vector.mask op and bufferize the op, including its region. More...
 
Value mlir::linalg::bufferizeToAllocation (RewriterBase &rewriter, const BufferizeToAllocationOptions &options, bufferization::AllocTensorOp allocTensorOp, Attribute memorySpace={}, Operation *insertionPoint=nullptr)
 Materialize a buffer allocation for the given bufferization.alloc_tensor op and lower the op to memref.alloc + memref.tensor_store. More...
 
Value mlir::linalg::bufferizeToAllocation (RewriterBase &rewriter, const BufferizeToAllocationOptions &options, Operation *op, Attribute memorySpace={}, Operation *insertionPoint=nullptr)
 Bufferize the given op with tensor semantics and materialize the result in a newly allocated buffer. More...
 
LogicalResult mlir::linalg::linalgOpAnchoredEmptyTensorEliminationStep (RewriterBase &rewriter, Operation *op, bufferization::OneShotAnalysisState &state)
 Try to eliminate tensor::EmptyOps inside op that are anchored on a LinalgOp. More...
 
bool mlir::linalg::areElementwiseOpsFusable (OpOperand *fusedOperand)
 Return true if two linalg.generic operations with producer/consumer relationship through fusedOperand can be fused using elementwise op fusion. More...
 
LogicalResult mlir::linalg::promoteSubviewsPrecondition (Operation *op, LinalgPromotionOptions options)
 Promote memref.subviews feeding linalg-on-buffers operations. More...
 
LogicalResult mlir::linalg::vectorizeOpPrecondition (Operation *op, ArrayRef< int64_t > inputVectorSizes={}, ArrayRef< bool > inputScalableVecDims={}, bool vectorizeNDExtract=false, bool flatten1DDepthwiseConv=false)
 Return success if the operation can be vectorized. More...
 
LogicalResult mlir::linalg::dropUnitDims (RewriterBase &rewriter, GenericOp genericOp, const ControlDropUnitDims &options)
 
FailureOr< ElementwiseOpFusionResultmlir::linalg::fuseElementwiseOps (RewriterBase &rewriter, OpOperand *fusedOperand)
 
SmallVector< Valuemlir::linalg::peelLoop (RewriterBase &rewriter, Operation *op)
 Try to peel and canonicalize loop op and return the new result. More...
 
void mlir::linalg::peelLoops (RewriterBase &rewriter, ArrayRef< scf::ForOp > loops)
 Peel 'loops' and applies affine_min/max bounds simplification on the fly where relevant. More...
 
LogicalResult mlir::linalg::rewriteAsPaddedOp (RewriterBase &rewriter, LinalgOp opToPad, const LinalgPaddingOptions &options, LinalgOp &paddedOp, SmallVector< Value > &replacements, SmallVector< tensor::PadOp > &padOps)
 Pad the iterator dimensions paddingDimensions of all opToPad operands to a static bounding box. More...
 
FailureOr< PackingResultmlir::linalg::detail::buildPackingLoopNest (RewriterBase &rewriter, tensor::PadOp opToHoist, scf::ForOp outermostEnclosingForOp, ArrayRef< int64_t > transposeVector)
 Build the packing loop nest required to hoist opToHoist above outermostEnclosingForOp. More...
 
FailureOr< Valuemlir::linalg::hoistPaddingOnTensors (RewriterBase &rewriter, tensor::PadOp opToHoist, int64_t numLoops, ArrayRef< int64_t > transposeVector, tensor::PadOp &hoistedOp, SmallVectorImpl< GenericOp > &transposeOps)
 Mechanically hoist padding operations on tensors by numLoops into a new, generally larger tensor. More...
 
FailureOr< Valuemlir::linalg::hoistPaddingOnTensors (tensor::PadOp opToHoist, int64_t numLoops, ArrayRef< int64_t > transposeVector, tensor::PadOp &hoistedOp, SmallVectorImpl< GenericOp > &transposeOps)
 Calls into hoistPaddingOnTensors with a local IRRewriter. More...
 
FailureOr< LinalgOp > mlir::linalg::padAndHoistLinalgOp (RewriterBase &rewriter, LinalgOp linalgOp, const LinalgPaddingOptions &options)
 Apply padding and hoisting to linalgOp according to the configuration specified in options. More...
 
std::pair< TilingInterface, TilingInterface > mlir::linalg::splitOp (RewriterBase &rewriter, TilingInterface op, unsigned dimension, OpFoldResult splitPoint)
 Split the given op into two parts along the given iteration space dimension at the specified splitPoint, and return the two parts. More...
 
FailureOr< TiledLinalgOpmlir::linalg::tileLinalgOp (RewriterBase &b, LinalgOp op, const LinalgTilingOptions &options)
 
FailureOr< GenericOp > mlir::linalg::interchangeGenericOp (RewriterBase &rewriter, GenericOp genericOp, ArrayRef< unsigned > interchangeVector)
 Interchange the iterator_types and iterator_maps dimensions and adapts the index accesses of op. More...
 
FailureOr< GenericOp > mlir::linalg::generalizeNamedOp (RewriterBase &rewriter, LinalgOp namedOp)
 Create a GenericOp from the given named operation namedOp and replace namedOp. More...
 
FailureOr< LinalgOp > mlir::linalg::specializeGenericOp (RewriterBase &rewriter, GenericOp genericOp)
 Create a namedOp from the given GenericOp and replace the GenericOp. More...
 
FailureOr< PromotionInfomlir::linalg::promoteSubviewAsNewBuffer (OpBuilder &b, Location loc, memref::SubViewOp subView, const AllocBufferCallbackFn &allocationFn, DataLayout &layout)
 
FailureOr< LinalgOp > mlir::linalg::promoteSubViews (OpBuilder &b, LinalgOp op, const LinalgPromotionOptions &options)
 Promote the subViews into a new buffer allocated at the insertion point b. More...
 
std::optional< Valuemlir::linalg::allocateWorkgroupMemory (OpBuilder &builder, memref::SubViewOp subview, ArrayRef< Value > sizeBounds, DataLayout &)
 Allocate the subview in the GPU workgroup memory. More...
 
LogicalResult mlir::linalg::deallocateWorkgroupMemory (OpBuilder &, Value)
 In case of GPU group memory there is no need to deallocate. More...
 
LogicalResult mlir::linalg::copyToWorkgroupMemory (OpBuilder &b, Value src, Value dst)
 Create Memref copy operations and add gpu barrier guards before and after the copy operation to ensure data integrity. More...
 
std::optional< Valuemlir::linalg::allocateGPUPrivateMemory (OpBuilder &builder, memref::SubViewOp subview, ArrayRef< Value > sizeBounds, DataLayout &)
 Allocate the subview in the GPU private memory. More...
 
LogicalResult mlir::linalg::copyToGPUPrivateMemory (OpBuilder &b, Value src, Value dst)
 Normal copy to between src and dst. More...
 
LogicalResult mlir::linalg::deallocateGPUPrivateMemory (OpBuilder &, Value)
 In case of GPU private memory there is no need to deallocate since the memory is freed when going outside of the scope. More...
 
LogicalResult mlir::linalg::vectorize (RewriterBase &rewriter, Operation *op, ArrayRef< int64_t > inputVectorSizes={}, ArrayRef< bool > inputScalableVecDims={}, bool vectorizeNDExtract=false, bool flatten1DDepthwiseConv=false)
 Emit a suitable vector form for an operation. More...
 
LogicalResult mlir::linalg::vectorizeCopy (RewriterBase &builder, memref::CopyOp copyOp)
 Emit a suitable vector form for a Copy op with fully static shape. More...
 
FailureOr< LinalgLoopsmlir::linalg::linalgOpToLoops (RewriterBase &rewriter, LinalgOp linalgOp)
 Emit a loop nest of scf.for with the proper body for linalgOp. More...
 
FailureOr< LinalgLoopsmlir::linalg::linalgOpToParallelLoops (RewriterBase &rewriter, LinalgOp linalgOp)
 Emit a loop nest of scf.parallel with the proper body for linalgOp. More...
 
FailureOr< LinalgLoopsmlir::linalg::linalgOpToAffineLoops (RewriterBase &rewriter, LinalgOp linalgOp)
 Emit a loop nest of affine.for with the proper body for linalgOp. More...
 
std::tuple< SmallVector< Range, 4 >, LoopIndexToRangeIndexMapmlir::linalg::makeTiledLoopRanges (RewriterBase &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > allShapeSizes, ArrayRef< OpFoldResult > allTileSizes)
 
FailureOr< MultiSizeSpecificationmlir::linalg::computeMultiTileSizes (OpBuilder &builder, LinalgOp op, unsigned dimension, OpFoldResult targetSize, OpFoldResult divisor, bool emitAssertions=true)
 Emits the IR computing the multi-sized tiling specification with two tile sizes not exceeding targetSize, each divisible by sizeDivisor, such that there exist numbers of tiles with these sizes that fully cover the given iteration space dimension of the structured op. More...
 
FailureOr< StaticMultiSizeSpecificationmlir::linalg::computeStaticMultiTileSizes (LinalgOp op, unsigned dimension, int64_t targetSize, int64_t divisor)
 
FailureOr< ForallTilingResultmlir::linalg::tileToForallOp (RewriterBase &builder, TilingInterface op, ArrayRef< OpFoldResult > numThreads, std::optional< ArrayAttr > mapping)
 
FailureOr< ForallTilingResultmlir::linalg::tileToForallOpUsingTileSizes (RewriterBase &builder, TilingInterface op, ArrayRef< OpFoldResult > tileSizes, std::optional< ArrayAttr > mapping)
 Same as tileToForallOp, but calculate the number of threads required using the given tileSizes. More...
 
FailureOr< ForallReductionTilingResultmlir::linalg::tileReductionUsingForall (RewriterBase &b, PartialReductionOpInterface op, ArrayRef< OpFoldResult > numThreads, ArrayRef< OpFoldResult > tileSizes={}, std::optional< ArrayAttr > mapping=std::nullopt)
 Method to tile a reduction to parallel iterations computing partial reductions. More...
 
void mlir::linalg::transformIndexOps (RewriterBase &b, LinalgOp op, SmallVectorImpl< Value > &ivs, const LoopIndexToRangeIndexMap &loopIndexToRangeIndex)
 All indices returned by IndexOp should be invariant with respect to tiling. More...
 
FailureOr< SplitReductionResultmlir::linalg::splitReduction (RewriterBase &b, LinalgOp op, const ControlSplitReductionFn &controlSplitReductionFn, bool useAlloc=false)
 
FailureOr< SplitReductionResultmlir::linalg::splitReductionByScaling (RewriterBase &b, LinalgOp op, const ControlSplitReductionFn &controlSplitReductionFn, bool useAlloc=false)
 Scaling-based implementation of the split reduction transformation. More...
 
bool mlir::linalg::isDimSequencePreserved (AffineMap map, ReassociationIndicesRef dimSequence)
 Return true if a given sequence of dimensions are contiguous in the range of the specified indexing map. More...
 
bool mlir::linalg::areDimSequencesPreserved (ArrayRef< AffineMap > maps, ArrayRef< ReassociationIndices > dimSequences)
 Return true if all sequences of dimensions specified in dimSequences are contiguous in all the ranges of the maps. More...
 
FailureOr< CollapseResultmlir::linalg::collapseOpIterationDims (LinalgOp op, ArrayRef< ReassociationIndices > foldedIterationDims, RewriterBase &rewriter)
 Collapses dimensions of linalg.generic/linalg.copy operation. More...
 
FailureOr< LowerPackResultmlir::linalg::lowerPack (RewriterBase &rewriter, tensor::PackOp packOp)
 Rewrite pack as pad + reshape + transpose. More...
 
FailureOr< LowerUnPackOpResultmlir::linalg::lowerUnPack (RewriterBase &rewriter, tensor::UnPackOp unPackOp)
 Rewrite pack as empty + transpose + reshape + extract_slice. More...
 
FailureOr< PackResultmlir::linalg::pack (RewriterBase &rewriter, linalg::LinalgOp linalgOp, ArrayRef< OpFoldResult > packedSizes)
 Implement packing of a single LinalgOp by packedSizes. More...
 
FailureOr< PackTransposeResultmlir::linalg::packTranspose (RewriterBase &rewriter, tensor::PackOp packOp, linalg::LinalgOp linalgOp, tensor::UnPackOp maybeUnPackOp, ArrayRef< int64_t > outerPerm, ArrayRef< int64_t > innerPerm)
 Transpose a single PackOp -> LinalgOp -> UnPackOp chain and return the transposed PackOp -> LinalgOp -> UnPackOp chain after replacements. More...
 
FailureOr< PackResultmlir::linalg::packMatmulGreedily (RewriterBase &rewriter, LinalgOp linalgOp, ArrayRef< OpFoldResult > mnkPackedSizes, ArrayRef< int64_t > mnkPaddedSizesNextMultipleOf, ArrayRef< int64_t > mnkOrder)
 Pack a LinalgOp by greedily inferring matmul dimensions (m, n, k) where m and n are proper parallel dimensions and k is a proper reduction dimension. More...
 
FailureOr< Operation * > mlir::linalg::rewriteInDestinationPassingStyle (RewriterBase &rewriter, tensor::FromElementsOp fromElementsOp)
 Rewrite tensor.from_elements to linalg.generic. More...
 
FailureOr< Operation * > mlir::linalg::rewriteInDestinationPassingStyle (RewriterBase &rewriter, tensor::GenerateOp generateOp)
 Rewrite tensor.generate to linalg.generic. More...
 
FailureOr< Operation * > mlir::linalg::rewriteInDestinationPassingStyle (RewriterBase &rewriter, tensor::PadOp padOp)
 Rewrite tensor.pad to linalg.generic + tensor.insert_slice. More...
 
FailureOr< std::pair< Operation *, Operation * > > mlir::linalg::rewriteInIm2Col (RewriterBase &rewriter, linalg::Conv2DNhwcHwcfOp convOp)
 Convert linalg.conv_2d_nhwc_hwcf into linalg.generic (for img2col packing) and linalg.matmul. More...
 
FailureOr< std::pair< Operation *, Operation * > > mlir::linalg::rewriteInIm2Col (RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp convOp)
 Same as the above but for Fhwc channel orderings in the filter. More...
 
FailureOr< std::pair< Operation *, Operation * > > mlir::linalg::rewriteInIm2Col (RewriterBase &rewriter, linalg::DepthwiseConv2DNhwcHwcOp convOp)
 Similar to rewriteInIm2Col with linalg::Conv2DNhwcHwcfOp except there is no reduction among the input channels so each convolution can be a matrix-vector product and by transposing both input filter so channels are outer most the computation is a batched matrix-vector product. More...
 
FailureOr< std::pair< Operation *, Operation * > > mlir::linalg::rewriteInIm2Col (RewriterBase &rewriter, linalg::Conv2DNchwFchwOp convOp)
 Similar to rewriteInIm2Col with linalg::Conv2DNhwcHwcfOp except because the channels are to the left of the image shape dimensions, the position of the contraction dimension in the resulting matmul is reversed. More...
 
FailureOr< Operation * > mlir::linalg::transposeConv2D (RewriterBase &rewriter, linalg::Conv2DNhwcFhwcOp op)
 Convert linalg.conv_2d_nhwc_fhwc(_q) to linalg.conv_2d_nhwc_hwcf(_q) by materializing transpose. More...
 
FailureOr< Operation * > mlir::linalg::transposeConv2D (RewriterBase &rewriter, linalg::Conv2DNhwcFhwcQOp op)
 
RewritePatternSet mlir::linalg::getLinalgTilingCanonicalizationPatterns (MLIRContext *ctx)
 Canonicalization patterns relevant to apply after tiling patterns. More...
 
void mlir::linalg::populateLinalgTilingCanonicalizationPatterns (RewritePatternSet &patterns)
 
void mlir::linalg::populateLinalgNamedOpsGeneralizationPatterns (RewritePatternSet &patterns)
 Linalg generalization patterns. More...
 
void mlir::linalg::populateDecomposeConvolutionPatterns (RewritePatternSet &patterns, PatternBenefit benefit=1)
 Linalg decompose convolutions patterns. More...
 
void mlir::linalg::populateConvertConv2DToImg2ColPatterns (RewritePatternSet &patterns)
 Populates patterns to transform linalg.conv_2d_xxx operations into linalg.generic (for img2col packing) and linalg.matmul. More...
 
void mlir::linalg::populatePadOpVectorizationPatterns (RewritePatternSet &patterns, PatternBenefit baseBenefit=1)
 Populates patterns with patterns that vectorize tensor.pad. More...
 
void mlir::linalg::populateDecomposeLinalgOpsPattern (RewritePatternSet &patterns, bool removeDeadArgsAndResults=true)
 Populate patterns for splitting a LinalgOp with multiple statements within its payload into multiple GenericOp that have a single statement. More...
 
void mlir::linalg::populateConvertToDestinationStylePatterns (RewritePatternSet &patterns)
 Populate patterns that convert non-destination-style ops to destination style ops. More...
 
void mlir::linalg::populateConvolutionVectorizationPatterns (RewritePatternSet &patterns, PatternBenefit benefit=1)
 Populate patterns for vectorizing low-D convolution ops. More...
 
void mlir::linalg::populateElementwiseToLinalgConversionPatterns (RewritePatternSet &patterns)
 Populate patterns that convert ElementwiseMappable ops to linalg parallel loops. More...
 
void mlir::linalg::populateSparseTensorRewriting (RewritePatternSet &patterns)
 Populate patterns that are only useful in the context of sparse tensors. More...
 
void mlir::linalg::populateElementwiseOpsFusionPatterns (RewritePatternSet &patterns, const ControlFusionFn &controlElementwiseOpFusion)
 Patterns for fusing linalg operation on tensors. More...
 
void mlir::linalg::populateDataLayoutPropagationPatterns (RewritePatternSet &patterns, const ControlPropagationFn &controlPackUnPackPropagation)
 Patterns to bubble up or down data layout ops across other operations. More...
 
void mlir::linalg::populateEraseUnusedOperandsAndResultsPatterns (RewritePatternSet &patterns)
 Pattern to remove dead operands and results of linalg.generic operations. More...
 
void mlir::linalg::populateEraseUnnecessaryInputsPatterns (RewritePatternSet &patterns)
 Patterns to promote inputs to outputs and remove unused inputs of linalg.generic ops. More...
 
void mlir::linalg::populateCollapseDimensions (RewritePatternSet &patterns, const GetCollapsableDimensionsFn &controlCollapseDimensions)
 Pattern to collapse dimensions in a linalg.generic op. More...
 
void mlir::linalg::populateFoldReshapeOpsByExpansionPatterns (RewritePatternSet &patterns, const ControlFusionFn &controlFoldingReshapes)
 Patterns to fold an expanding (collapsing) tensor_reshape operation with its producer (consumer) generic operation by expanding the dimensionality of the loop in the generic op. More...
 
void mlir::linalg::populateFoldReshapeOpsByCollapsingPatterns (RewritePatternSet &patterns, const ControlFusionFn &controlFoldingReshapes)
 Patterns to fold an expanding tensor.expand_shape operation with its producer generic operation by collapsing the dimensions of the generic op. More...
 
void mlir::linalg::populateConstantFoldLinalgOperations (RewritePatternSet &patterns, const ControlFusionFn &controlFn)
 Patterns to constant fold Linalg operations. More...
 
void mlir::linalg::populateFuseTensorPadWithProducerLinalgOpPatterns (RewritePatternSet &patterns)
 Pattern to fuse a tensor.pad operation with the producer of its source, if the producer is a linalg operation with all parallel iterator types. More...
 
void mlir::linalg::populateLinalgNamedOpConversionPatterns (RewritePatternSet &patterns)
 Patterns to convert from one named op to another. More...
 
void mlir::linalg::populateFoldUnitExtentDimsPatterns (RewritePatternSet &patterns, ControlDropUnitDims &options)
 Patterns to fold unit-extent dimensions in operands/results of linalg ops on tensors via reassociative reshape ops. More...
 
void mlir::linalg::populateMoveInitOperandsToInputPattern (RewritePatternSet &patterns)
 A pattern that converts init operands to input operands. More...
 
void mlir::linalg::populateInlineConstantOperandsPatterns (RewritePatternSet &patterns)
 Patterns that are used to inline constant operands into linalg generic ops. More...
 
void mlir::linalg::populateBubbleUpExtractSliceOpPatterns (RewritePatternSet &patterns)
 Patterns that are used to bubble up extract slice op above linalg op. More...
 
void mlir::linalg::populateSwapExtractSliceWithFillPatterns (RewritePatternSet &patterns)
 Adds patterns that waps tensor.extract_slice(linalg.fill(cst, init)) into linalg.fill(cst, tensor.extract_slice(init)). More...
 
void mlir::linalg::populateSplitReductionPattern (RewritePatternSet &patterns, const ControlSplitReductionFn &controlSplitReductionFn, bool useAlloc=false)
 Patterns to apply splitReduction below. More...