MLIR 23.0.0git
VectorUtils.cpp
Go to the documentation of this file.
1//===- VectorUtils.cpp - MLIR Utilities for VectorOps ------------------===//
2//
3// Part of the MLIR Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements utility methods for working with the Vector dialect.
10//
11//===----------------------------------------------------------------------===//
12
14
23#include "mlir/IR/Builders.h"
24#include "mlir/IR/IntegerSet.h"
25#include "mlir/IR/Operation.h"
27#include "mlir/Support/LLVM.h"
28
29#include "llvm/ADT/DenseSet.h"
30#include "llvm/Support/DebugLog.h"
31#include "llvm/Support/InterleavedRange.h"
32
33#define DEBUG_TYPE "vector-utils"
34
35using namespace mlir;
36
37/// Helper function that creates a memref::DimOp or tensor::DimOp depending on
38/// the type of `source`.
40 int64_t dim) {
41 if (isa<UnrankedMemRefType, MemRefType>(source.getType()))
42 return b.createOrFold<memref::DimOp>(loc, source, dim);
43 if (isa<UnrankedTensorType, RankedTensorType>(source.getType()))
44 return b.createOrFold<tensor::DimOp>(loc, source, dim);
45 llvm_unreachable("Expected MemRefType or TensorType");
46}
47
48/// Given the n-D transpose pattern 'transp', return true if 'dim0' and 'dim1'
49/// should be transposed with each other within the context of their 2D
50/// transposition slice.
51///
52/// Example 1: dim0 = 0, dim1 = 2, transp = [2, 1, 0]
53/// Return true: dim0 and dim1 are transposed within the context of their 2D
54/// transposition slice ([1, 0]).
55///
56/// Example 2: dim0 = 0, dim1 = 1, transp = [2, 1, 0]
57/// Return true: dim0 and dim1 are transposed within the context of their 2D
58/// transposition slice ([1, 0]). Paradoxically, note how dim1 (1) is *not*
59/// transposed within the full context of the transposition.
60///
61/// Example 3: dim0 = 0, dim1 = 1, transp = [2, 0, 1]
62/// Return false: dim0 and dim1 are *not* transposed within the context of
63/// their 2D transposition slice ([0, 1]). Paradoxically, note how dim0 (0)
64/// and dim1 (1) are transposed within the full context of the of the
65/// transposition.
67 ArrayRef<int64_t> transp) {
68 // Perform a linear scan along the dimensions of the transposed pattern. If
69 // dim0 is found first, dim0 and dim1 are not transposed within the context of
70 // their 2D slice. Otherwise, 'dim1' is found first and they are transposed.
71 for (int64_t permDim : transp) {
72 if (permDim == dim0)
73 return false;
74 if (permDim == dim1)
75 return true;
76 }
77
78 llvm_unreachable("Ill-formed transpose pattern");
79}
80
81FailureOr<std::pair<int, int>>
82mlir::vector::isTranspose2DSlice(vector::TransposeOp op) {
83 VectorType srcType = op.getSourceVectorType();
84 SmallVector<int64_t> srcGtOneDims;
85 for (auto [index, size] : llvm::enumerate(srcType.getShape()))
86 if (size > 1)
87 srcGtOneDims.push_back(index);
88
89 if (srcGtOneDims.size() != 2)
90 return failure();
91
92 // Check whether the two source vector dimensions that are greater than one
93 // must be transposed with each other so that we can apply one of the 2-D
94 // transpose patterns. Otherwise, these patterns are not applicable.
95 if (!areDimsTransposedIn2DSlice(srcGtOneDims[0], srcGtOneDims[1],
96 op.getPermutation()))
97 return failure();
98
99 return std::pair<int, int>(srcGtOneDims[0], srcGtOneDims[1]);
100}
101
102/// Constructs a permutation map from memref indices to vector dimension.
103///
104/// The implementation uses the knowledge of the mapping of enclosing loop to
105/// vector dimension. `enclosingLoopToVectorDim` carries this information as a
106/// map with:
107/// - keys representing "vectorized enclosing loops";
108/// - values representing the corresponding vector dimension.
109/// The algorithm traverses "vectorized enclosing loops" and extracts the
110/// at-most-one MemRef index that is invariant along said loop. This index is
111/// guaranteed to be at most one by construction: otherwise the MemRef is not
112/// vectorizable.
113/// If this invariant index is found, it is added to the permutation_map at the
114/// proper vector dimension.
115/// If no index is found to be invariant, 0 is added to the permutation_map and
116/// corresponds to a vector broadcast along that dimension.
117///
118/// Returns an empty AffineMap if `enclosingLoopToVectorDim` is empty,
119/// signalling that no permutation map can be constructed given
120/// `enclosingLoopToVectorDim`.
121///
122/// Examples can be found in the documentation of `makePermutationMap`, in the
123/// header file.
126 const DenseMap<Operation *, unsigned> &enclosingLoopToVectorDim) {
127 if (enclosingLoopToVectorDim.empty())
128 return AffineMap();
129 MLIRContext *context =
130 enclosingLoopToVectorDim.begin()->getFirst()->getContext();
131 SmallVector<AffineExpr> perm(enclosingLoopToVectorDim.size(),
132 getAffineConstantExpr(0, context));
133
134 for (auto kvp : enclosingLoopToVectorDim) {
135 assert(kvp.second < perm.size());
136 auto invariants = affine::getInvariantAccesses(
137 cast<affine::AffineForOp>(kvp.first).getInductionVar(), indices);
138 unsigned numIndices = indices.size();
139 unsigned countInvariantIndices = 0;
140 for (unsigned dim = 0; dim < numIndices; ++dim) {
141 if (!invariants.count(indices[dim])) {
142 assert(perm[kvp.second] == getAffineConstantExpr(0, context) &&
143 "permutationMap already has an entry along dim");
144 perm[kvp.second] = getAffineDimExpr(dim, context);
145 } else {
146 ++countInvariantIndices;
147 }
148 }
149 assert((countInvariantIndices == numIndices ||
150 countInvariantIndices == numIndices - 1) &&
151 "Vectorization prerequisite violated: at most 1 index may be "
152 "invariant wrt a vectorized loop");
153 (void)countInvariantIndices;
154 }
155 return AffineMap::get(indices.size(), 0, perm, context);
156}
157
158/// Implementation detail that walks up the parents and records the ones with
159/// the specified type.
160/// TODO: could also be implemented as a collect parents followed by a
161/// filter and made available outside this file.
162template <typename T>
165 auto *current = block->getParentOp();
166 while (current) {
167 if ([[maybe_unused]] auto typedParent = dyn_cast<T>(current)) {
168 assert(res.count(current) == 0 && "Already inserted");
169 res.insert(current);
170 }
171 current = current->getParentOp();
172 }
173 return res;
174}
175
176/// Returns the enclosing AffineForOp, from closest to farthest.
180
181AffineMap mlir::makePermutationMap(
182 Block *insertPoint, ArrayRef<Value> indices,
183 const DenseMap<Operation *, unsigned> &loopToVectorDim) {
184 DenseMap<Operation *, unsigned> enclosingLoopToVectorDim;
185 auto enclosingLoops = getEnclosingforOps(insertPoint);
186 for (auto *forInst : enclosingLoops) {
187 auto it = loopToVectorDim.find(forInst);
188 if (it != loopToVectorDim.end()) {
189 enclosingLoopToVectorDim.insert(*it);
190 }
191 }
192 return ::makePermutationMap(indices, enclosingLoopToVectorDim);
193}
194
195AffineMap mlir::makePermutationMap(
197 const DenseMap<Operation *, unsigned> &loopToVectorDim) {
198 return makePermutationMap(op->getBlock(), indices, loopToVectorDim);
199}
200
201bool matcher::operatesOnSuperVectorsOf(Operation &op,
202 VectorType subVectorType) {
203 // First, extract the vector type and distinguish between:
204 // a. ops that *must* lower a super-vector (i.e. vector.transfer_read,
205 // vector.transfer_write); and
206 // b. ops that *may* lower a super-vector (all other ops).
207 // The ops that *may* lower a super-vector only do so if the super-vector to
208 // sub-vector ratio exists. The ops that *must* lower a super-vector are
209 // explicitly checked for this property.
210 /// TODO: there should be a single function for all ops to do this so we
211 /// do not have to special case. Maybe a trait, or just a method, unclear atm.
212 VectorType superVectorType;
213 if (auto transfer = dyn_cast<VectorTransferOpInterface>(op)) {
214 superVectorType = transfer.getVectorType();
215 } else if (op.getNumResults() == 0) {
216 if (!isa<func::ReturnOp>(op)) {
217 op.emitError("NYI: assuming only return operations can have 0 "
218 " results at this point");
219 }
220 return false;
221 } else if (op.getNumResults() == 1) {
222 if (auto v = dyn_cast<VectorType>(op.getResult(0).getType())) {
223 superVectorType = v;
224 } else {
225 // Not a vector type.
226 return false;
227 }
228 } else {
229 // Not a vector.transfer and has more than 1 result, fail hard for now to
230 // wake us up when something changes.
231 op.emitError("NYI: operation has more than 1 result");
232 return false;
233 }
234
235 // Get the ratio. If the shapes are incompatible (e.g., different ranks or
236 // non-integer divisibility), the operation does not operate on a super-vector
237 // of the given sub-vector type.
238 auto ratio =
239 computeShapeRatio(superVectorType.getShape(), subVectorType.getShape());
240 return ratio.has_value();
241}
242
243bool vector::isContiguousSlice(MemRefType memrefType, VectorType vectorType) {
244 if (vectorType.isScalable())
245 return false;
246
247 // Ignore a leading sequence of adjacent unit dimensions in the vector.
249 vectorType.getShape().drop_while([](auto v) { return v == 1; });
250 auto vecRank = vectorShape.size();
251
252 // A single element is always contiguous.
253 if (vecRank == 0)
254 return true;
255
256 if (!memrefType.areTrailingDimsContiguous(vecRank))
257 return false;
258
259 // Extract the trailing dims of the input memref
260 auto memrefShape = memrefType.getShape().take_back(vecRank);
261
262 // Compare the dims of `vectorType` against `memrefType`.
263 // All of the dimensions, except the first must match.
264 return llvm::equal(vectorShape.drop_front(), memrefShape.drop_front());
265}
266
267std::optional<StaticTileOffsetRange>
268vector::createUnrollIterator(VectorType vType, int64_t targetRank) {
269 if (vType.getRank() <= targetRank)
270 return {};
271 // Attempt to unroll until targetRank or the first scalable dimension (which
272 // cannot be unrolled).
273 auto shapeToUnroll = vType.getShape().drop_back(targetRank);
274 auto inputScalableVecDimsToUnroll =
275 vType.getScalableDims().drop_back(targetRank);
276 const auto *it = llvm::find(inputScalableVecDimsToUnroll, true);
277 auto firstScalableDim = it - inputScalableVecDimsToUnroll.begin();
278 if (firstScalableDim == 0)
279 return {};
280 // All scalable dimensions should be removed now.
281 inputScalableVecDimsToUnroll =
282 inputScalableVecDimsToUnroll.slice(0, firstScalableDim);
283 assert(!llvm::is_contained(inputScalableVecDimsToUnroll, true) &&
284 "unexpected leading scalable dimension");
285 // Create an unroll iterator for leading dimensions.
286 shapeToUnroll = shapeToUnroll.slice(0, firstScalableDim);
287 return StaticTileOffsetRange(shapeToUnroll, /*unrollStep=*/1);
288}
289
291 Operation *xfer,
292 RewriterBase &rewriter) {
293 auto loc = xfer->getLoc();
294
295 Value base =
297 .Case([&](vector::TransferReadOp readOp) { return readOp.getBase(); })
298 .Case([&](vector::TransferWriteOp writeOp) {
299 return writeOp.getOperand(1);
300 });
301
302 SmallVector<OpFoldResult> mixedSourceDims =
303 hasTensorSemantics ? tensor::getMixedSizes(rewriter, loc, base)
304 : memref::getMixedSizes(rewriter, loc, base);
305 return mixedSourceDims;
306}
307
308bool vector::isLinearizableVector(VectorType type) {
309 return (type.getRank() > 1) && (type.getNumScalableDims() <= 1);
310}
311
313 Value source,
314 ArrayRef<int64_t> inputVectorSizes,
315 std::optional<Value> padValue,
316 bool useInBoundsInsteadOfMasking,
317 ArrayRef<bool> inputScalableVecDims) {
318 VectorType vecToReadTy = VectorType::get(
319 inputVectorSizes, cast<ShapedType>(source.getType()).getElementType(),
320 inputScalableVecDims);
321
322 return createReadOrMaskedRead(builder, loc, source, vecToReadTy, padValue,
323 useInBoundsInsteadOfMasking);
324}
325
327 Value source,
328 const VectorType &vecToReadTy,
329 std::optional<Value> padValue,
330 bool useInBoundsInsteadOfMasking) {
331 assert(!llvm::is_contained(vecToReadTy.getScalableDims(),
332 ShapedType::kDynamic) &&
333 "invalid input vector sizes");
334 auto sourceShapedType = cast<ShapedType>(source.getType());
335 auto sourceShape = sourceShapedType.getShape();
336
337 int64_t vecToReadRank = vecToReadTy.getRank();
338 auto vecToReadShape = vecToReadTy.getShape();
339
340 assert(sourceShape.size() == static_cast<size_t>(vecToReadRank) &&
341 "expected same ranks.");
342 assert((!padValue.has_value() ||
343 padValue.value().getType() == sourceShapedType.getElementType()) &&
344 "expected same pad element type to match source element type");
345
346 auto zero = arith::ConstantIndexOp::create(builder, loc, 0);
347 SmallVector<bool> inBoundsVal(vecToReadRank, true);
348
349 if (useInBoundsInsteadOfMasking) {
350 // Update the inBounds attribute.
351 // FIXME: This computation is too weak - it ignores the read indices.
352 for (unsigned i = 0; i < vecToReadRank; i++)
353 inBoundsVal[i] = (sourceShape[i] == vecToReadShape[i]) &&
354 ShapedType::isStatic(sourceShape[i]);
355 }
356 auto transferReadOp = vector::TransferReadOp::create(
357 builder, loc,
358 /*vectorType=*/vecToReadTy,
359 /*source=*/source,
360 /*indices=*/SmallVector<Value>(vecToReadRank, zero),
361 /*padding=*/padValue,
362 /*inBounds=*/inBoundsVal);
363
364 if (llvm::equal(vecToReadTy.getShape(), sourceShape) ||
365 useInBoundsInsteadOfMasking)
366 return transferReadOp;
367 SmallVector<OpFoldResult> mixedSourceDims =
368 isa<MemRefType>(source.getType())
369 ? memref::getMixedSizes(builder, loc, source)
370 : tensor::getMixedSizes(builder, loc, source);
371
372 auto maskType = vecToReadTy.cloneWith(/*shape=*/{}, builder.getI1Type());
373 Value mask =
374 vector::CreateMaskOp::create(builder, loc, maskType, mixedSourceDims);
375 return mlir::vector::maskOperation(builder, transferReadOp, mask)
376 ->getResult(0);
377}
378
379LogicalResult
381 ArrayRef<int64_t> inputVectorSizes) {
382 LDBG() << "Iteration space static sizes:" << llvm::interleaved(shape);
383
384 if (inputVectorSizes.size() != shape.size()) {
385 LDBG() << "Input vector sizes don't match the number of loops";
386 return failure();
387 }
388 if (ShapedType::isDynamicShape(inputVectorSizes)) {
389 LDBG() << "Input vector sizes can't have dynamic dimensions";
390 return failure();
391 }
392 if (!llvm::all_of(llvm::zip(shape, inputVectorSizes),
393 [](std::tuple<int64_t, int64_t> sizePair) {
394 int64_t staticSize = std::get<0>(sizePair);
395 int64_t inputSize = std::get<1>(sizePair);
396 return ShapedType::isDynamic(staticSize) ||
397 staticSize <= inputSize;
398 })) {
399 LDBG() << "Input vector sizes must be greater than or equal to iteration "
400 "space static sizes";
401 return failure();
402 }
403 return success();
404}
405
406/// Takes a 2+ dimensional vector as an input
407/// returns n vector values produced by n vector.extract operations.
408/// I.e. calling unrollVectorValue([[%v]], rewriter) such that
409///
410/// %v : vector<nxaxb...>
411///
412/// will produce the following IR changes
413///
414/// %v0 = vector.extract %v[0] : vector<axbx...> from vector<nxaxb...>
415/// %v1 = vector.extract %v[1] : vector<axbx...> from vector<nxaxb...>
416/// ...
417/// %vnminusone = vector.extract %v[n-1] : vector<axbx...> from ...
418///
419/// and returns SmallVector<Value> r = {[[%v0]], [[%v1]], ..., [[%vnminusone]]}
420FailureOr<SmallVector<Value>>
422 RewriterBase &rewriter) {
423 SmallVector<Value> subvectors;
424 VectorType ty = cast<VectorType>(vector.getType());
425 Location loc = vector.getLoc();
426 if (ty.getRank() < 2)
427 return rewriter.notifyMatchFailure(loc, "already 1-D");
428
429 // Unrolling doesn't take vscale into account. Pattern is disabled for
430 // vectors with leading scalable dim(s).
431 if (ty.getScalableDims().front())
432 return rewriter.notifyMatchFailure(loc, "cannot unroll scalable dim");
433
434 for (int64_t i = 0, e = ty.getShape().front(); i < e; ++i) {
435 subvectors.push_back(vector::ExtractOp::create(rewriter, loc, vector, i));
436 }
437
438 return subvectors;
439}
440
442 vector::UnrollVectorOpFn unrollFn) {
443 assert(op->getNumResults() == 1 && "expected single result");
444 assert(isa<VectorType>(op->getResult(0).getType()) && "expected vector type");
445 VectorType resultTy = cast<VectorType>(op->getResult(0).getType());
446 if (resultTy.getRank() < 2)
447 return rewriter.notifyMatchFailure(op, "already 1-D");
448
449 // Unrolling doesn't take vscale into account. Pattern is disabled for
450 // vectors with leading scalable dim(s).
451 if (resultTy.getScalableDims().front())
452 return rewriter.notifyMatchFailure(op, "cannot unroll scalable dim");
453
454 Location loc = op->getLoc();
455 Value result = ub::PoisonOp::create(rewriter, loc, resultTy);
456 VectorType subTy = VectorType::Builder(resultTy).dropDim(0);
457
458 for (int64_t i = 0, e = resultTy.getShape().front(); i < e; ++i) {
459 Value subVector = unrollFn(rewriter, loc, subTy, i);
460 result = vector::InsertOp::create(rewriter, loc, subVector, result, i);
461 }
462
463 rewriter.replaceOp(op, result);
464 return success();
465}
return success()
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
static std::optional< VectorShape > vectorShape(Type type)
static SetVector< Operation * > getParentsOfType(Block *block)
Implementation detail that walks up the parents and records the ones with the specified type.
static bool areDimsTransposedIn2DSlice(int64_t dim0, int64_t dim1, ArrayRef< int64_t > transp)
Given the n-D transpose pattern 'transp', return true if 'dim0' and 'dim1' should be transposed with ...
static SetVector< Operation * > getEnclosingforOps(Block *block)
Returns the enclosing AffineForOp, from closest to farthest.
static AffineMap makePermutationMap(ArrayRef< Value > indices, const DenseMap< Operation *, unsigned > &enclosingLoopToVectorDim)
Constructs a permutation map from memref indices to vector dimension.
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition AffineMap.h:46
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
Block represents an ordered list of Operations.
Definition Block.h:33
Operation * getParentOp()
Returns the closest surrounding operation that contains this block.
Definition Block.cpp:31
IntegerType getI1Type()
Definition Builders.cpp:57
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition Location.h:76
MLIRContext is the top-level object for a collection of MLIR operations.
Definition MLIRContext.h:63
This class helps build Operations.
Definition Builders.h:209
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
Block * getBlock()
Returns the operation block that contains this operation.
Definition Operation.h:234
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition Operation.h:436
Location getLoc()
The source location the operation was defined or derived from.
Definition Operation.h:244
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
unsigned getNumResults()
Return the number of results held by this operation.
Definition Operation.h:433
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the listener that the IR failed to be rewritten because of a match failure,...
A range-style iterator that allows for iterating over the offsets of all potential tiles of size tile...
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition Value.h:96
Type getType() const
Return the type of this value.
Definition Value.h:105
This is a builder type that keeps local references to arguments.
Builder & dropDim(unsigned pos)
Erase a dim from shape @pos.
static ConstantIndexOp create(OpBuilder &builder, Location location, int64_t value)
Definition ArithOps.cpp:363
DenseSet< Value, DenseMapInfo< Value > > getInvariantAccesses(Value iv, ArrayRef< Value > indices)
Given an induction variable iv of type AffineForOp and indices of type IndexType, returns the set of ...
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given memref value.
Definition MemRefOps.cpp:79
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given tensor value.
Definition TensorOps.cpp:68
bool isContiguousSlice(MemRefType memrefType, VectorType vectorType)
Return true if vectorType is a contiguous slice of memrefType, in the sense that it can be read/writt...
Operation * maskOperation(OpBuilder &builder, Operation *maskableOp, Value mask, Value passthru=Value())
Creates a vector.mask operation around a maskable operation.
LogicalResult isValidMaskedInputVector(ArrayRef< int64_t > shape, ArrayRef< int64_t > inputVectorSizes)
Returns success if inputVectorSizes is a valid masking configuraion for given shape,...
FailureOr< std::pair< int, int > > isTranspose2DSlice(vector::TransposeOp op)
Returns two dims that are greater than one if the transposition is applied on a 2D slice.
FailureOr< SmallVector< Value > > unrollVectorValue(TypedValue< VectorType >, RewriterBase &)
Generic utility for unrolling values of type vector<NxAxBx...> to N values of type vector<AxBx....
std::optional< StaticTileOffsetRange > createUnrollIterator(VectorType vType, int64_t targetRank=1)
Returns an iterator for all positions in the leading dimensions of vType up to the targetRank.
Value createOrFoldDimOp(OpBuilder &b, Location loc, Value source, int64_t dim)
Helper function that creates a memref::DimOp or tensor::DimOp depending on the type of source.
bool isLinearizableVector(VectorType type)
Returns true if the input Vector type can be linearized.
Value createReadOrMaskedRead(OpBuilder &builder, Location loc, Value source, const VectorType &vecToReadTy, std::optional< Value > padValue=std::nullopt, bool useInBoundsInsteadOfMasking=false)
Creates a TransferReadOp from source.
function_ref< Value(PatternRewriter &, Location, VectorType, int64_t)> UnrollVectorOpFn
Generic utility for unrolling n-D vector operations to (n-1)-D operations.
SmallVector< OpFoldResult > getMixedSizesXfer(bool hasTensorSemantics, Operation *xfer, RewriterBase &rewriter)
A wrapper for getMixedSizes for vector.transfer_read and vector.transfer_write Ops (for source and de...
LogicalResult unrollVectorOp(Operation *op, PatternRewriter &rewriter, UnrollVectorOpFn unrollFn)
Include the generated interface declarations.
llvm::SetVector< T, Vector, Set, N > SetVector
Definition LLVM.h:123
std::conditional_t< std::is_same_v< Ty, mlir::Type >, mlir::Value, detail::TypedValue< Ty > > TypedValue
If Ty is mlir::Type this will select Value instead of having a wrapper around it.
Definition Value.h:497
llvm::TypeSwitch< T, ResultT > TypeSwitch
Definition LLVM.h:136
AffineExpr getAffineConstantExpr(int64_t constant, MLIRContext *context)
llvm::DenseMap< KeyT, ValueT, KeyInfoT, BucketT > DenseMap
Definition LLVM.h:118
std::optional< SmallVector< int64_t > > computeShapeRatio(ArrayRef< int64_t > shape, ArrayRef< int64_t > subShape)
Return the multi-dimensional integral ratio of subShape to the trailing dimensions of shape.
AffineExpr getAffineDimExpr(unsigned position, MLIRContext *context)
These free functions allow clients of the API to not use classes in detail.