MLIR
17.0.0git

#include "mlir/Dialect/Affine/Passes.h"
#include "mlir/Analysis/SliceAnalysis.h"
#include "mlir/Dialect/Affine/Analysis/AffineAnalysis.h"
#include "mlir/Dialect/Affine/Analysis/LoopAnalysis.h"
#include "mlir/Dialect/Affine/Analysis/NestedMatcher.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/Utils.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/Dialect/Vector/Utils/VectorUtils.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include <optional>
#include "mlir/Dialect/Affine/Passes.h.inc"
Go to the source code of this file.
Namespaces  
mlir  
This header declares functions that assit transformations in the MemRef dialect.  
Macros  
#define  GEN_PASS_DEF_AFFINEVECTORIZE 
#define  DEBUG_TYPE "earlyvect" 
Implements a highlevel vectorization strategy on a Function. More...  
Functions  
static FilterFunctionType  isVectorizableLoopPtrFactory (const DenseSet< Operation * > ¶llelLoops, int fastestVaryingMemRefDimension) 
Forward declaration. More...  
static std::optional< NestedPattern >  makePattern (const DenseSet< Operation * > ¶llelLoops, int vectorRank, ArrayRef< int64_t > fastestVaryingPattern) 
Creates a vectorization pattern from the command line arguments. More...  
static NestedPattern &  vectorTransferPattern () 
static void  vectorizeLoopIfProfitable (Operation *loop, unsigned depthInPattern, unsigned patternDepth, VectorizationStrategy *strategy) 
static LogicalResult  analyzeProfitability (ArrayRef< NestedMatch > matches, unsigned depthInPattern, unsigned patternDepth, VectorizationStrategy *strategy) 
Implements a simple strawman strategy for vectorization. More...  
static void  eraseLoopNest (AffineForOp forOp) 
Erases a loop nest, including all its nested operations. More...  
static void  computeMemoryOpIndices (Operation *op, AffineMap map, ValueRange mapOperands, VectorizationState &state, SmallVectorImpl< Value > &results) 
static VectorType  getVectorType (Type scalarTy, const VectorizationStrategy *strategy) 
Returns the vector type resulting from applying the provided vectorization strategy on the scalar type. More...  
static arith::ConstantOp  vectorizeConstant (arith::ConstantOp constOp, VectorizationState &state) 
Tries to transform a scalar constant into a vector constant. More...  
static arith::ConstantOp  createInitialVector (arith::AtomicRMWKind reductionKind, Value oldOperand, VectorizationState &state) 
Creates a constant vector filled with the neutral elements of the given reduction. More...  
static Value  createMask (AffineForOp vecForOp, VectorizationState &state) 
Creates a mask used to filter out garbage elements in the last iteration of unaligned loops. More...  
static bool  isUniformDefinition (Value value, const VectorizationStrategy *strategy) 
Returns true if the provided value is vector uniform given the vectorization strategy. More...  
static Operation *  vectorizeUniform (Value uniformVal, VectorizationState &state) 
Generates a broadcast op for the provided uniform value using the vectorization strategy in 'state'. More...  
static Value  vectorizeOperand (Value operand, VectorizationState &state) 
Tries to vectorize a given operand by applying the following logic: More...  
static Operation *  vectorizeAffineLoad (AffineLoadOp loadOp, VectorizationState &state) 
Vectorizes an affine load with the vectorization strategy in 'state' by generating a 'vector.transfer_read' op with the proper permutation map inferred from the indices of the load. More...  
static Operation *  vectorizeAffineStore (AffineStoreOp storeOp, VectorizationState &state) 
Vectorizes an affine store with the vectorization strategy in 'state' by generating a 'vector.transfer_write' op with the proper permutation map inferred from the indices of the store. More...  
static bool  isNeutralElementConst (arith::AtomicRMWKind reductionKind, Value value, VectorizationState &state) 
Returns true if value is a constant equal to the neutral element of the given vectorizable reduction. More...  
static Operation *  vectorizeAffineForOp (AffineForOp forOp, VectorizationState &state) 
Vectorizes a loop with the vectorization strategy in 'state'. More...  
static Operation *  widenOp (Operation *op, VectorizationState &state) 
Vectorizes arbitrary operation by plain widening. More...  
static Operation *  vectorizeAffineYieldOp (AffineYieldOp yieldOp, VectorizationState &state) 
Vectorizes a yield operation by widening its types. More...  
static Operation *  vectorizeOneOperation (Operation *op, VectorizationState &state) 
Encodes Operationspecific behavior for vectorization. More...  
static void  getMatchedAffineLoopsRec (NestedMatch match, unsigned currentLevel, std::vector< SmallVector< AffineForOp, 2 >> &loops) 
Recursive implementation to convert all the nested loops in 'match' to a 2D vector container that preserves the relative nesting level of each loop with respect to the others in 'match'. More...  
static void  getMatchedAffineLoops (NestedMatch match, std::vector< SmallVector< AffineForOp, 2 >> &loops) 
Converts all the nested loops in 'match' to a 2D vector container that preserves the relative nesting level of each loop with respect to the others in 'match'. More...  
static LogicalResult  vectorizeLoopNest (std::vector< SmallVector< AffineForOp, 2 >> &loops, const VectorizationStrategy &strategy) 
Internal implementation to vectorize affine loops from a single loop nest using an nD vectorization strategy. More...  
static LogicalResult  vectorizeRootMatch (NestedMatch m, const VectorizationStrategy &strategy) 
Extracts the matched loops and vectorizes them following a topological order. More...  
static void  computeIntersectionBuckets (ArrayRef< NestedMatch > matches, std::vector< SmallVector< NestedMatch, 8 >> &intersectionBuckets) 
Traverses all the loop matches and classifies them into intersection buckets. More...  
static void  vectorizeLoops (Operation *parentOp, DenseSet< Operation * > &loops, ArrayRef< int64_t > vectorSizes, ArrayRef< int64_t > fastestVaryingPattern, const ReductionLoopMap &reductionLoops) 
Internal implementation to vectorize affine loops in 'loops' using the nD vectorization factors in 'vectorSizes'. More...  
static LogicalResult  verifyLoopNesting (const std::vector< SmallVector< AffineForOp, 2 >> &loops) 
Verify that affine loops in 'loops' meet the nesting criteria expected by SuperVectorizer: More...  
void  mlir::vectorizeAffineLoops (Operation *parentOp, DenseSet< Operation * > &loops, ArrayRef< int64_t > vectorSizes, ArrayRef< int64_t > fastestVaryingPattern, const ReductionLoopMap &reductionLoops) 
External utility to vectorize affine loops in 'loops' using the nD vectorization factors in 'vectorSizes'. More...  
LogicalResult  mlir::vectorizeAffineLoopNest (std::vector< SmallVector< AffineForOp, 2 >> &loops, const VectorizationStrategy &strategy) 
External utility to vectorize affine loops from a single loop nest using an nD vectorization strategy (see doc in VectorizationStrategy definition). More...  
#define DEBUG_TYPE "earlyvect" 
Implements a highlevel vectorization strategy on a Function.
The abstraction used is that of supervectors, which provide a single, compact, representation in the vector types, information that is expected to reduce the impact of the phase ordering problem
This pass is designed to perform vectorization at a supervector granularity. A supervector is loosely defined as a vector type that is a multiple of a "good" vector size so the HW can efficiently implement a set of highlevel primitives. Multiple is understood along any dimension; e.g. both vector<16xf32> and vector<2x8xf32> are valid supervectors for a vector<8xf32> HW vector. Note that a "good vector size so the HW can efficiently implement a set of highlevel primitives" is not necessarily an integer multiple of actual hardware registers. We leave details of this distinction unspecified for now.
Some may prefer the terminology a "tile of HW vectors". In this case, one should note that supervectors implement an "always full tile" abstraction. They guarantee no partialtile separation is necessary by relying on a highlevel copyreshape abstraction that we call vector.transfer. This copyreshape operations is also responsible for performing layout transposition if necessary. In the general case this will require a scoped allocation in some notional local memory.
Whatever the mental model one prefers to use for this abstraction, the key point is that we burn into a single, compact, representation in the vector types, information that is expected to reduce the impact of the phase ordering problem. Indeed, a vector type conveys information that:
For a particular target, a notion of minimal nd vector size will be specified and vectorization targets a multiple of those. In the following paragraph, let "k ." represent "a multiple of", to be understood as a multiple in the same dimension (e.g. vector<16 x k . 128> summarizes vector<16 x 128>, vector<16 x 256>, vector<16 x 1024>, etc).
Some nonexhaustive notable supervector sizes of interest include:
Loops and operations are emitted that operate on those supervector shapes. Subsequent lowering passes will materialize to actual HW vector sizes. These passes are expected to be (gradually) more targetspecific.
At a high level, a vectorized load in a loop will resemble:
It is the responsibility of the implementation of vector.transfer_read to materialize vector registers from the original scalar memrefs. A later (more targetdependent) lowering pass will materialize to actual HW vector sizes. This lowering may be occur at different times:
This is an active area of investigation. We start with 2 remarks to position supervectorization in the context of existing ongoing work: LLVM VPLAN and LLVM SLP Vectorizer.
The astute reader may have noticed that in the limit, supervectorization can be applied at a similar time and with similar objectives than VPLAN. For instance, in the case of a traditional, polyhedral compilationflow (for instance, the PPCG project uses ISL to provide dependence analysis, multilevel(scheduling + tiling), lifting footprint to fast memory, communication synthesis, mapping, register optimizations) and before unrolling. When vectorization is applied at this late level in a typical polyhedral flow, and is instantiated with actual hardware vector sizes, supervectorization is expected to match (or subsume) the type of patterns that LLVM's VPLAN aims at targeting. The main difference here is that MLIR is higher level and our implementation should be significantly simpler. Also note that in this mode, recursive patterns are probably a bit of an overkill although it is reasonable to expect that mixing a bit of outer loop and inner loop vectorization + unrolling will provide interesting choices to MLIR.
Supervectorization however is not meant to be usable in a similar fashion to the SLP vectorizer. The main difference lies in the information that both vectorizers use: supervectorization examines contiguity of memory references along fastest varying dimensions and loops with recursive nested patterns capturing imperfectlynested loop nests; the SLP vectorizer, on the other hand, performs flat pattern matching inside a single unrolled loop body and stitches together pieces of load and store operations into full 1D vectors. We envision that the SLP vectorizer is a good way to capture innermost loop, controlflow dependent patterns that supervectorization may not be able to capture easily. In other words, supervectorization does not aim at replacing the SLP vectorizer and the two solutions are complementary.
We discuss the following early places where supervectorization is applicable and touch on the expected benefits and risks . We list the opportunities in the context of the traditional polyhedral compiler flow described in PPCG. There are essentially 6 places in the MLIR pass pipeline we expect to experiment with supervectorization:
Let's pause here and remark that applying supervectorization as described in 1. and 2. presents clear opportunities and risks:
Back to our listing, the last places where early supervectorization makes sense are:
At these levels the riskreward looks different: on one hand we probably lost a good deal of language/user/librarylevel annotation; on the other hand we gained parallelism and locality through scheduling and tiling. However we probably want to ensure tiling is compatible with the fulltileonly abstraction used in supervectorization or suffer the consequences. It is too early to place bets on what will win but we expect supervectorization to be the right abstraction to allow exploring at all these levels. And again, search is our friend.
Lastly, we mention it again here:
TODO: point to the proper places.
The algorithm proceeds in a few steps:
The choice of loop transformation to apply for coarsening vectorized loops is still subject to exploratory tradeoffs. In particular, say we want to vectorize by a factor 128, we want to transform the following input:
Traditionally, one would vectorize late (after scheduling, tiling, memory promotion etc) say after stripmining (and potentially unrolling in the case of LLVM's SLP vectorizer):
Instead, we seek to vectorize early and freeze vector types before scheduling, so we want to generate a pattern that resembles:
i. simply dividing the lower / upper bounds by 128 creates issues when representing expressions such as ii + 1 because now we only have access to original values that have been divided. Additional information is needed to specify accesses at below128 granularity; ii. another alternative is to coarsen the loop step but this may have consequences on dependence analysis and fusability of loops: fusable loops probably need to have the same step (because we don't want to stripmine/unroll to enable fusion). As a consequence, we choose to represent the coarsening using the loop step for now and reevaluate in the future. Note that we can renormalize loop steps later if/when we have evidence that they are problematic.
For the simple strawman example above, vectorizing for a 1D vector abstraction of size 128 returns code similar to:
Consider the following Function:
The affinesupervectorize pass with the following arguments:
produces this standard innermostloop vectorized code:
The affinesupervectorize pass with the following arguments:
produces this more interesting mixed outerinnermostloop vectorized code:
Of course, much more intricate nD imperfectlynested patterns can be vectorized too and specified in a fully declarative fashion.
Vectorizing reduction loops along the reduction dimension is supported if:
Comparing to the nonvectordimension case, two additional things are done during vectorization of such loops:
vector.reduce
.Reduction vectorization is switched off by default, it can be enabled by passing a map from loops to reductions to utility functions, or by passing vectorizereductions=true
to the vectorization pass.
Consider the following example:
The affinesupervectorize pass with the following arguments:
produces the following output:
Note that because of loop misalignment we needed to apply a mask to prevent last 12 elements from affecting the final result. The mask is full of ones in every iteration except for the last one, in which it has the form 11...100...0
with 116 ones and 12 zeros.
Definition at line 572 of file SuperVectorize.cpp.
#define GEN_PASS_DEF_AFFINEVECTORIZE 
Definition at line 34 of file SuperVectorize.cpp.

static 
Implements a simple strawman strategy for vectorization.
Given a matched pattern matches
of depth patternDepth
, this strategy greedily assigns the fastest varying dimension ** of the vector ** to the innermost loop in the pattern. When coupled with a pattern that looks for the fastest varying dimension in load/store MemRefs, this creates a generic vectorization strategy that works for any loop in a hierarchy (outermost, innermost or intermediate).
TODO: In the future we should additionally increase the power of the profitability analysis along 3 directions:
Definition at line 655 of file SuperVectorize.cpp.
References mlir::failed(), mlir::failure(), mlir::success(), and vectorizeLoopIfProfitable().
Referenced by vectorizeLoops().

static 
Traverses all the loop matches and classifies them into intersection buckets.
Two matches intersect if any of them encloses the other one. A match intersects with a bucket if the match intersects with the root (outermost) loop in that bucket.
Definition at line 1617 of file SuperVectorize.cpp.
References intersects().
Referenced by vectorizeLoops().

static 
Definition at line 908 of file SuperVectorize.cpp.
References mlir::AffineMap::get(), mlir::Operation::getLoc(), mlir::AffineMap::getNumDims(), mlir::AffineMap::getNumSymbols(), and mlir::AffineMap::getResults().
Referenced by vectorizeAffineLoad(), and vectorizeAffineStore().

static 
Creates a constant vector filled with the neutral elements of the given reduction.
The scalar type of vector elements will be taken from oldOperand
.
Definition at line 982 of file SuperVectorize.cpp.
References mlir::DenseElementsAttr::get(), mlir::arith::getIdentityValueAttr(), mlir::Value::getLoc(), mlir::Value::getType(), and getVectorType().
Referenced by vectorizeAffineForOp().

static 
Creates a mask used to filter out garbage elements in the last iteration of unaligned loops.
If a mask is not required then nullptr
is returned. The mask will be a vector of booleans representing meaningful vector elements in the current iteration. It is filled with ones for each iteration except for the last one, where it has the form 11...100...0
with the number of ones equal to the number of meaningful elements (i.e. the number of iterations that would be left in the original loop).
Definition at line 1006 of file SuperVectorize.cpp.
References mlir::Operation::erase(), mlir::AffineMap::get(), mlir::Value::getDefiningOp(), mlir::AffineMap::getNumResults(), mlir::makeComposedAffineApply(), and mlir::Value::use_empty().
Referenced by vectorizeAffineForOp().

static 
Erases a loop nest, including all its nested operations.
Definition at line 896 of file SuperVectorize.cpp.
Referenced by vectorizeLoopNest().

static 
Converts all the nested loops in 'match' to a 2D vector container that preserves the relative nesting level of each loop with respect to the others in 'match'.
This means that every loop in 'loops[i]' will have a parent loop in 'loops[i1]'. A loop in 'loops[i]' may or may not have a child loop in 'loops[i+1]'.
Definition at line 1527 of file SuperVectorize.cpp.
References getMatchedAffineLoopsRec().
Referenced by vectorizeRootMatch().

static 
Recursive implementation to convert all the nested loops in 'match' to a 2D vector container that preserves the relative nesting level of each loop with respect to the others in 'match'.
'currentLevel' is the nesting level that will be assigned to the loop in the current 'match'.
Definition at line 1507 of file SuperVectorize.cpp.
References mlir::NestedMatch::getMatchedChildren(), and mlir::NestedMatch::getMatchedOperation().
Referenced by getMatchedAffineLoops().

static 
Returns the vector type resulting from applying the provided vectorization strategy on the scalar type.
Definition at line 944 of file SuperVectorize.cpp.
References mlir::Type::isa(), and mlir::VectorizationStrategy::vectorSizes.
Referenced by createInitialVector(), vectorizeConstant(), and vectorizeUniform().

static 
Returns true if value
is a constant equal to the neutral element of the given vectorizable reduction.
Definition at line 1256 of file SuperVectorize.cpp.
References mlir::Value::getDefiningOp(), mlir::arith::getIdentityValueAttr(), mlir::Value::getLoc(), and mlir::Value::getType().
Referenced by vectorizeAffineForOp().

static 
Returns true if the provided value is vector uniform given the vectorization strategy.
Definition at line 1079 of file SuperVectorize.cpp.
References mlir::getForInductionVarOwner(), and mlir::VectorizationStrategy::loopToVectorDim.
Referenced by vectorizeOperand().

static 
Forward declaration.
Returns a FilterFunctionType that can be used in NestedPattern to match a loop whose underlying load/store accesses are either invariant or all.
Definition at line 925 of file SuperVectorize.cpp.
References mlir::isVectorizableLoopBody(), and vectorTransferPattern().
Referenced by makePattern().

static 
Creates a vectorization pattern from the command line arguments.
Up to 3D patterns are supported. If the command line argument requests a pattern of higher order, returns an empty pattern list which will conservatively result in no vectorization.
Definition at line 586 of file SuperVectorize.cpp.
References mlir::matcher::For(), and isVectorizableLoopPtrFactory().
Referenced by vectorizeLoops().

static 
Vectorizes a loop with the vectorization strategy in 'state'.
A new loop is created and registered as replacement for the scalar loop. The builder's insertion point is set to the new loop's body so that subsequent vectorized operations are inserted into the new loop. If the loop is a vector dimension, the step of the newly created loop will reflect the vectorization factor used to vectorized that dimension.
Definition at line 1274 of file SuperVectorize.cpp.
References createInitialVector(), createMask(), mlir::Value::getLoc(), mlir::arith::getReductionOp(), mlir::vector::getVectorReductionOp(), isNeutralElementConst(), mlir::VectorizationStrategy::loopToVectorDim, mlir::VectorizationStrategy::reductionLoops, vectorizeOperand(), and mlir::VectorizationStrategy::vectorSizes.
Referenced by vectorizeOneOperation().

static 
Vectorizes an affine load with the vectorization strategy in 'state' by generating a 'vector.transfer_read' op with the proper permutation map inferred from the indices of the load.
The new 'vector.transfer_read' is registered as replacement of the scalar load. Returns the newly created 'vector.transfer_read' if vectorization was successful. Returns nullptr, otherwise.
Definition at line 1171 of file SuperVectorize.cpp.
References computeMemoryOpIndices(), and makePermutationMap().
Referenced by vectorizeOneOperation().

static 
Vectorizes an affine store with the vectorization strategy in 'state' by generating a 'vector.transfer_write' op with the proper permutation map inferred from the indices of the store.
The new 'vector.transfer_store' is registered as replacement of the scalar load. Returns the newly created 'vector.transfer_write' if vectorization was successful. Returns nullptr, otherwise.
Definition at line 1215 of file SuperVectorize.cpp.
References computeMemoryOpIndices(), makePermutationMap(), and vectorizeOperand().
Referenced by vectorizeOneOperation().

static 
Vectorizes a yield operation by widening its types.
The builder's insertion point is set after the vectorized parent op to continue vectorizing the operations after the parent op. When vectorizing a reduction loop a mask may be used to prevent adding garbage values to the accumulator.
Definition at line 1428 of file SuperVectorize.cpp.
References mlir::Value::getLoc(), mlir::Operation::getNumOperands(), mlir::matchReduction(), and widenOp().
Referenced by vectorizeOneOperation().

static 
Tries to transform a scalar constant into a vector constant.
Returns the vector constant if the scalar type is valid vector element type. Returns nullptr, otherwise.
Definition at line 953 of file SuperVectorize.cpp.
References mlir::DenseElementsAttr::get(), mlir::Operation::getParentOp(), and getVectorType().
Referenced by vectorizeOneOperation(), and vectorizeOperand().

static 
Definition at line 627 of file SuperVectorize.cpp.
References mlir::VectorizationStrategy::loopToVectorDim, and mlir::VectorizationStrategy::vectorSizes.
Referenced by analyzeProfitability(), and vectorizeLoops().

static 
Internal implementation to vectorize affine loops from a single loop nest using an nD vectorization strategy.
Definition at line 1535 of file SuperVectorize.cpp.
References mlir::WalkResult::advance(), eraseLoopNest(), mlir::failure(), mlir::WalkResult::interrupt(), mlir::isVectorizableLoopBody(), mlir::PreOrder, mlir::success(), vectorizeOneOperation(), and vectorTransferPattern().
Referenced by mlir::vectorizeAffineLoopNest(), and vectorizeRootMatch().

static 
Internal implementation to vectorize affine loops in 'loops' using the nD vectorization factors in 'vectorSizes'.
By default, each vectorization factor is applied innertoouter to the loops of each loop nest. 'fastestVaryingPattern' can be optionally used to provide a different loop vectorization order. reductionLoops
can be provided to specify loops which can be vectorized along the reduction dimension.
Definition at line 1661 of file SuperVectorize.cpp.
References analyzeProfitability(), computeIntersectionBuckets(), mlir::failed(), makePattern(), mlir::VectorizationStrategy::reductionLoops, mlir::succeeded(), vectorizeLoopIfProfitable(), vectorizeRootMatch(), and mlir::VectorizationStrategy::vectorSizes.
Referenced by mlir::vectorizeAffineLoops().

static 
Encodes Operationspecific behavior for vectorization.
In general we assume that all operands of an op must be vectorized but this is not always true. In the future, it would be nice to have a trait that describes how a particular operation vectorizes. For now we implement the case distinction here. Returns a vectorized form of an operation or nullptr if vectorization fails.
Definition at line 1476 of file SuperVectorize.cpp.
References mlir::Operation::getNumRegions(), vectorizeAffineForOp(), vectorizeAffineLoad(), vectorizeAffineStore(), vectorizeAffineYieldOp(), vectorizeConstant(), and widenOp().
Referenced by vectorizeLoopNest().

static 
Tries to vectorize a given operand
by applying the following logic:
operand
is already in the proper vector form;operand
is a constant, returns the vectorized form of the constant;operand
is uniform, returns a vector broadcast of the op
;operand
is not supported. Newly created vector operations are registered in state
as replacement for their scalar counterparts. In particular this logic captures some of the use cases where definitions that are not scoped under the current pattern are needed to vectorize. One such example is top level function constants that need to be splatted.Returns an operand that has been vectorized to match state
's strategy if vectorization is possible with the above logic. Returns nullptr otherwise.
TODO: handle more complex cases.
Definition at line 1126 of file SuperVectorize.cpp.
References mlir::Value::getDefiningOp(), mlir::Operation::getResult(), mlir::Value::getType(), mlir::Type::isa(), isUniformDefinition(), vectorizeConstant(), and vectorizeUniform().
Referenced by vectorizeAffineForOp(), vectorizeAffineStore(), and widenOp().

static 
Extracts the matched loops and vectorizes them following a topological order.
A new vector loop nest will be created if vectorization succeeds. The original loop nest won't be modified in any case.
Definition at line 1606 of file SuperVectorize.cpp.
References getMatchedAffineLoops(), and vectorizeLoopNest().
Referenced by vectorizeLoops().

static 
Generates a broadcast op for the provided uniform value using the vectorization strategy in 'state'.
Definition at line 1095 of file SuperVectorize.cpp.
References mlir::Value::getLoc(), mlir::Value::getType(), and getVectorType().
Referenced by vectorizeOperand().

static 
Definition at line 608 of file SuperVectorize.cpp.
References mlir::matcher::Op().
Referenced by isVectorizableLoopPtrFactory(), and vectorizeLoopNest().

static 
Verify that affine loops in 'loops' meet the nesting criteria expected by SuperVectorizer:
Definition at line 1768 of file SuperVectorize.cpp.
References mlir::failure(), and mlir::success().
Referenced by mlir::vectorizeAffineLoopNest().

static 
Vectorizes arbitrary operation by plain widening.
We apply generic type widening of all its results and retrieve the vector counterparts for all its operands.
Definition at line 1396 of file SuperVectorize.cpp.
References mlir::Operation::getAttrs(), mlir::OperationName::getIdentifier(), mlir::Operation::getLoc(), mlir::Operation::getName(), mlir::Operation::getOperands(), mlir::Operation::getResults(), and vectorizeOperand().
Referenced by vectorizeAffineYieldOp(), and vectorizeOneOperation().