#include "mlir/Support/LLVM.h"
#include "llvm/ADT/ArrayRef.h"
#include <optional>
Go to the source code of this file.
|
| mlir |
| This header declares functions that assist transformations in the MemRef dialect.
|
|
| mlir::affine |
|
|
void | mlir::affine::getTripCountMapAndOperands (AffineForOp forOp, AffineMap *map, SmallVectorImpl< Value > *operands) |
| Returns the trip count of the loop as an affine map with its corresponding operands if the latter is expressible as an affine expression, and nullptr otherwise. More...
|
|
std::optional< uint64_t > | mlir::affine::getConstantTripCount (AffineForOp forOp) |
| Returns the trip count of the loop if it's a constant, std::nullopt otherwise. More...
|
|
uint64_t | mlir::affine::getLargestDivisorOfTripCount (AffineForOp forOp) |
| Returns the greatest known integral divisor of the trip count. More...
|
|
DenseSet< Value, DenseMapInfo< Value > > | mlir::affine::getInvariantAccesses (Value iv, ArrayRef< Value > indices) |
| Given an induction variable iv of type AffineForOp and indices of type IndexType, returns the set of indices that are independent of iv . More...
|
|
bool | mlir::affine::isVectorizableLoopBody (AffineForOp loop, NestedPattern &vectorTransferMatcher) |
| Checks whether the loop is structurally vectorizable; i.e. More...
|
|
bool | mlir::affine::isVectorizableLoopBody (AffineForOp loop, int *memRefDim, NestedPattern &vectorTransferMatcher) |
| Checks whether the loop is structurally vectorizable and that all the LoadOp and StoreOp matched have access indexing functions that are are either: More...
|
|
bool | mlir::affine::isOpwiseShiftValid (AffineForOp forOp, ArrayRef< uint64_t > shifts) |
| Checks where SSA dominance would be violated if a for op's body operations are shifted by the specified shifts. More...
|
|