MLIR  21.0.0git
Utils.h
Go to the documentation of this file.
1 //===- Utils.h - Utilities to support the Linalg dialect --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef MLIR_DIALECT_LINALG_UTILS_UTILS_H
10 #define MLIR_DIALECT_LINALG_UTILS_UTILS_H
11 
15 #include "llvm/ADT/StringSet.h"
16 #include <optional>
17 
18 namespace mlir {
19 class AffineExpr;
20 class AffineMap;
21 class PatternRewriter;
22 
23 namespace affine {
24 class AffineForOp;
25 } // namespace affine
26 
27 namespace tensor {
28 class ExtractSliceOp;
29 } // namespace tensor
30 
31 namespace linalg {
32 
33 //===----------------------------------------------------------------------===//
34 // Utilities for inferring various semantics properties of Linalg ops.
35 //===----------------------------------------------------------------------===//
36 /// Shell function to compute the Destination Permutation of PackOp
37 /// This function uses the helper function `computePackUnPackPerm` to get
38 /// the permutation vector. Only major difference between UnPack and Pack is
39 /// that packOp uses destination rank whereas unpack Uses source rank.
41 
42 /// Shell function to compute the Source Permutation of unPackOp.
43 /// This function, like the getPackInverseDestPerm uses the helper function
44 /// computePackUnPackPerm` to get the permutation vector.
45 /// Only major difference between UnPack and Pack is that packOp uses
46 /// destination rank whereas unpack Uses source rank.
47 SmallVector<int64_t> getUnPackInverseSrcPerm(linalg::UnPackOp unpackOp);
48 
49 /// Shell function to compute the Source rank permutation for unpackOp
50 /// Unpack requires some packing metadata data information, so created
51 /// another function where this value is passed by reference.
53  PackingMetadata &metadata);
54 
55 //===----------------------------------------------------------------------===//
56 // General utilities
57 //===----------------------------------------------------------------------===//
58 
59 /// Check if all indexing maps are projected permutations.
60 bool allIndexingsAreProjectedPermutation(LinalgOp op);
61 
62 /// Detect whether `r` has only ConstantOp, ElementwiseMappable and YieldOp.
64 
65 /// Check if a LinalgOp is an element-wise operation.
66 bool isElementwise(LinalgOp op);
67 
68 /// Check if iterator type has "parallel" semantics.
69 bool isParallelIterator(utils::IteratorType iteratorType);
70 
71 /// Check if iterator type has "reduction" semantics.
72 bool isReductionIterator(utils::IteratorType iteratorType);
73 
74 /// Create a tensor::PadOp that pads `source` to the size of the statically
75 /// sized `type` whose static sizes are assumed to be greater than the dynamic
76 /// `source` size. The padding introduces trailing `pad` values until the
77 /// target size is met. If `source` is defined by one or more LinalgOps that
78 /// have been padded with the same value and sizes, return their padded result
79 /// instead of creating a tensor::PadOp.
80 ///
81 /// Example:
82 /// ```
83 /// %0 = tensor.extract_slice %arg0 [%iv0, %iv1] [%sz0, %sz1]
84 /// %1 = tensor.pad %0 low[0, 0] high[...] { tensor.yield %cst }
85 /// %2 = linalg.matmul ins(...) outs(%1)
86 /// %3 = tensor.extract_slice %2 [0, 0] [%sz0, %sz1]
87 /// ```
88 /// makeComposedPadHighOp(source=%3, pad=%cst) returns %2
89 /// makeComposedPadHighOp(source=%3, pad=%other_cst) returns %4
90 /// ```
91 /// %4 = tensor.pad %3 low[0, 0] high[...] { tensor.yield %other_cst }
92 /// ```
93 Value makeComposedPadHighOp(OpBuilder &b, Location loc, RankedTensorType type,
94  Value source, Value pad, bool nofold);
95 
96 /// Returns GenericOp that copies an n-D memref. Unlike the current
97 /// implementation of memref::CopyOp, this op can further tile, lower to loops
98 /// or vectorize.
99 GenericOp makeMemRefCopyOp(OpBuilder &b, Location loc, Value from, Value to);
100 
101 /// Get the reassociation maps to fold the result of a extract_slice (or
102 /// source of a insert_slice) operation with given offsets, and sizes to its
103 /// rank-reduced version. This is only done for the cases where the size is 1
104 /// and offset is 0. Strictly speaking the offset 0 is not required in
105 /// general, but non-zero offsets are not handled by SPIR-V backend at this
106 /// point (and potentially cannot be handled).
107 std::optional<SmallVector<ReassociationIndices>>
109 
110 //===----------------------------------------------------------------------===//
111 // Fusion / Tiling utilities
112 //===----------------------------------------------------------------------===//
113 
114 /// The type of loops to be generated during tiling.
116  Loops = 0,
117  AffineLoops = 1,
118  ParallelLoops = 2
119 };
120 
121 /// Computes tile offsets, given a list of loop `ivs` and `tileSizes`. In case
122 /// a tile size is zero (i.e., no tiling), the corresponding offset is also
123 /// zero.
126  ArrayRef<OpFoldResult> tileSizes);
127 
128 /// Computes tile sizes, given a list of `tileSizes` and dimension
129 /// sizes (`sizeBounds`). In case a tile size is zero (i.e., no tiling), the
130 /// corresponding result size is the corresponding value from `sizeBounds`.
131 /// Note: The returned tile sizes are closed intervals.
133  ArrayRef<OpFoldResult> tileSizes,
134  ArrayRef<OpFoldResult> sizeBounds);
135 
136 /// Returns the list of tensor output types produced when the given structured
137 /// operation `op` is applied to the given `operands`. Note that `operands`
138 /// are not necessarily the actual operands of `op`.
139 SmallVector<Type> getTensorOutputTypes(LinalgOp op, ValueRange operands);
140 
141 /// Creates `insert_slice` ops that insert `results` back into larger tensors
142 /// they were originally extracted from with `extract_slice` before being
143 /// passed as `operands` to the given structured operation `op` or its clone.
144 /// Note that `operands` are not necessarily the actual operands of `op`, the
145 /// operation serves only as metadata container for operand types and
146 /// positions.
148  LinalgOp op, ValueRange operands,
149  ValueRange results);
150 
151 /// A struct containg offsets-sizes-strides arguments of the tiled shape.
156 };
157 
158 /// Computes SliceParameters for a single `valueToTile` assuming that its user
159 /// is being tiled with the given loop bounds `lbs` and `ubs` and the tile
160 /// sizes `tileSizes`.
161 ///
162 /// `omitPartialTileCheck` controls whether to omit the partial/boundary tile
163 /// condition check in cases where we statically know that it is unnecessary.
165 computeSliceParameters(OpBuilder &builder, Location loc, Value valueToTile,
166  ArrayRef<OpFoldResult> tileSizes, AffineMap map,
168  ArrayRef<OpFoldResult> subShapeSizes,
169  bool omitPartialTileCheck);
170 
171 /// Computes SliceParamaters for all `valuesToTile` of the given `linalgOp`,
172 /// assuming `linalgOp` is being fused into a loop nest. Calls
173 /// `computeSliceParameters` for every individual value.
174 ///
175 /// Note that a constant zero in `tileSizes` means no tiling at that implicit
176 /// loop. The number of non-zero values in `tileSizes` should be equal to the
177 /// number of values in `ivs`.
178 ///
179 /// Some of the `valuesToTile` won't be affected by tiling. For these values,
180 /// std::nullopt will be returned.
182 computeAllSliceParameters(OpBuilder &builder, Location loc, LinalgOp linalgOp,
183  ValueRange valuesToTile, ArrayRef<OpFoldResult> ivs,
184  ArrayRef<OpFoldResult> tileSizes,
185  ArrayRef<OpFoldResult> sizeBounds,
186  bool omitPartialTileCheck);
187 
188 /// Creates an extract_slice/subview op for a single `valueToTile` with
189 /// `builder`. This new operation extracts a tile of `valueToTile`, starting
190 /// at offsets `lbs` and with sizes `subShapeSizes`. `omitPartialTileCheck`
191 /// controls whether to omit the partial/boundary tile condition check in
192 /// cases where we statically know that it is unnecessary.
193 Operation *makeTiledShape(OpBuilder &builder, Location loc, Value valueToTile,
194  ArrayRef<OpFoldResult> tileSizes, AffineMap map,
197  ArrayRef<OpFoldResult> subShapeSizes,
198  bool omitPartialTileCheck);
199 
200 /// Creates extract_slice/subview ops for all `valuesToTile` of the given
201 /// `linalgOp` with `builder`, assuming `linalgOp` is being fused into a loop
202 /// nest for tiling with the given induction variables `ivs` and tile sizes
203 /// `tileSizes`. `sizeBounds` are the iteration space bounds for *all* the
204 /// implicit loops in `linalgOp`. `omitPartialTileCheck` controls whether to
205 /// omit the partial/boundary tile condition check in cases where we
206 /// statically know that it is unnecessary.
207 ///
208 /// Note that a constant zero in `tileSizes` means no tiling at that implicit
209 /// loop. The number of non-zero values in `tileSizes` should be equal to the
210 /// number of values in `ivs`.
212  LinalgOp linalgOp, ValueRange valuesToTile,
214  ArrayRef<OpFoldResult> tileSizes,
215  ArrayRef<OpFoldResult> sizeBounds,
216  bool omitPartialTileCheck);
217 
218 /// Add the specified offsets to any `linalg.index` ops contained in the given
219 /// `linalgOp`. The offsets are provided in the same order as iteration space
220 /// dimensions. Null offests are assumed to be zero.
221 void offsetIndices(OpBuilder &b, LinalgOp linalgOp,
222  ArrayRef<OpFoldResult> offests);
223 void offsetIndices(RewriterBase &b, LinalgOp linalgOp,
224  ArrayRef<OpFoldResult> offests);
225 
226 /// A struct containing the Linalg producer before and after fusion.
227 /// When operating on tensors, `fusedProducer` may feed into a `tensor.cast`
228 /// op before the consumer Linalg op, until enough canonicalizations have
229 /// applied.
230 struct FusionInfo {
232  LinalgOp fusedProducer;
233 };
234 
235 /// This implements the fusion part of the "tileAndFuse on tensors"
236 /// transformation and thus requires the `consumerOpOperand` to be a
237 /// `extract_slice` op (generally obtained by applying the tiling
238 /// transformation).
239 FailureOr<FusionInfo> fuseProducerOfTensor(OpBuilder &b,
240  OpOperand &consumerOpOperand);
241 
242 /// This implements the fusion part of the "tileAndFuse on tensors"
243 /// transformation and thus requires the `consumerOpOperand` to be a
244 /// `extract_slice` op (generally obtained by applying the tiling
245 /// transformation). Assumes `producerOfTensor` is a Linalg op that produces
246 /// `consumerOpOperand`.
247 FailureOr<FusionInfo> fuseProducerOfTensor(OpBuilder &b,
248  OpResult producerOpResult,
249  OpOperand &consumerOpOperand);
250 
251 //===----------------------------------------------------------------------===//
252 // Distribution utilities
253 //===----------------------------------------------------------------------===//
254 
255 /// Scheme used to distribute loops to processors.
256 enum class DistributionMethod {
257  /// Cyclic distribution where no assumption is made about the dynamic
258  /// relationship between number of processors and number of iterations of
259  /// the
260  /// distributed loop. Distributes the following loop
261  ///
262  /// scf.parallel (%iv) = (%lb) to (%ub) step (%step)
263  ///
264  /// to
265  ///
266  /// scf.parallel(%iv)= (%lb + %procId * %step) to (%ub) step (%step *
267  /// %nprocs)
268  Cyclic = 0,
269 
270  /// Cyclic distribution where the number of processors can be assumed to be
271  /// more than or equal to the number of iterations of the distributed loop.
272  /// In
273  /// such cases, a simple in-bounds check is enough (instead of materializing
274  /// a
275  /// loop). Distributes the following loop
276  ///
277  /// scf.parallel (%iv) = (%lb) to (%ub) step (%step)
278  ///
279  /// to
280  ///
281  /// %iv = %lb + %procId * %step
282  /// %cond = arith.cmpi "slt", %iv, %ub
283  /// scf.if %cond {
284  /// ...
285  /// }
287 
288  /// Cyclic distribution where the number of processors can be assumed to be
289  /// equal to the number of iterations of the distributed loop. In such
290  /// cases,
291  /// no bounds check is needed. Distributes the following loop
292  ///
293  /// scf.parallel (%iv) = (%lb) to (%ub) step (%step)
294  ///
295  /// to
296  ///
297  /// %iv = %lb + %procId * %step
299 
300  /// No Distribution.
301  None = 3
302 };
303 
304 /// Callback function type used to get processor ID, and number of processors
305 /// used for distribution for all parallel loops generated.
306 struct ProcInfo {
310 };
311 using ProcInfoCallBackFn = std::function<SmallVector<ProcInfo>(
312  OpBuilder &b, Location loc, ArrayRef<Range> parallelLoopRanges)>;
313 
314 /// Options that allow distribution of loops generated in Linalg transforms to
315 /// processors while generating the loops.
317  /// Callback function that returns the Values for processor ID (`procId`),
318  /// and number of processors (`nprocs`) used to execute the parallel loops.
319  /// The number of `{procId, nprocs}` pairs returned must be equal to the
320  /// number of `parallelLoopRanges` passed into the callback. The
321  /// `parallelLoopRanges` are ranges of the outer parallel loops of the
322  /// operation that do have non-zero tile sizes specified.
324 };
325 
326 /// Update the `lb`, `ub` and `step` to get per processor `lb`, `ub` and
327 /// `step`.
329  Value procId, Value nprocs, Value &lb,
330  Value &ub, Value &step);
331 
332 //===----------------------------------------------------------------------===//
333 // Fusion on tensor utilities
334 //===----------------------------------------------------------------------===//
335 
336 //===----------------------------------------------------------------------===//
337 // Generic op region utilities
338 //===----------------------------------------------------------------------===//
339 
340 /// A struct containing common matchers over linalg op's region.
342  enum class BinaryOpKind {
343  IAdd,
344  };
345 
346  /// Matches the given linalg op if its body is performing binary operation
347  /// on int or float scalar values and returns the binary op kind.
348  ///
349  /// The linalg op's region is expected to be
350  /// ```
351  /// {
352  /// ^bb(%a: <scalar-type>, %b: <scalar-type>):
353  /// %0 = <binary-op> %a, %b: <scalar-type>
354  /// linalg.yield %0: <scalar-type>
355  /// }
356  /// ```
357  static std::optional<BinaryOpKind> matchAsScalarBinaryOp(GenericOp op);
358 };
359 
360 //===----------------------------------------------------------------------===//
361 // Loop nest utilities
362 //===----------------------------------------------------------------------===//
363 
364 /// Utility class used to generate nested loops with ranges described by
365 /// `loopRanges` and loop type described by the `iteratorTypes`.
366 /// `bodyBuilderFn` is used to generate the body of the innermost loop. It is
367 /// passed a range of loop induction variables and a range of operand values
368 /// to use.
369 template <typename LoopTy>
371  static void doit(OpBuilder &b, Location loc, ArrayRef<Range> loopRanges,
372  LinalgOp linalgOp,
373  ArrayRef<utils::IteratorType> iteratorTypes,
376  bodyBuilderFn,
377  ArrayRef<linalg::ProcInfo> procInfo = {});
378 };
379 
380 /// Returns an attribute list that excludes pre-defined attributes.
381 template <typename OpTy>
383  auto elidedAttrs = llvm::to_vector(op.getAttributeNames());
384  if (isa<linalg::LinalgOp>(op.getOperation()))
385  elidedAttrs.push_back(LinalgDialect::kMemoizedIndexingMapsAttrName);
386  return getPrunedAttributeList(op, elidedAttrs);
387 }
388 
389 } // namespace linalg
390 } // namespace mlir
391 
392 #endif // MLIR_DIALECT_LINALG_UTILS_UTILS_H
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition: AffineMap.h:46
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:66
This class helps build Operations.
Definition: Builders.h:205
This class represents an operand of an operation.
Definition: Value.h:267
This is a value defined by a result of an operation.
Definition: Value.h:457
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition: Region.h:26
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:400
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:381
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
FailureOr< FusionInfo > fuseProducerOfTensor(OpBuilder &b, OpOperand &consumerOpOperand)
This implements the fusion part of the "tileAndFuse on tensors" transformation and thus requires the ...
Definition: Fusion.cpp:227
SmallVector< Value > makeTiledShapes(OpBuilder &builder, Location loc, LinalgOp linalgOp, ValueRange valuesToTile, ArrayRef< OpFoldResult > ivs, ArrayRef< OpFoldResult > tileSizes, ArrayRef< OpFoldResult > sizeBounds, bool omitPartialTileCheck)
Creates extract_slice/subview ops for all valuesToTile of the given linalgOp with builder,...
Definition: Utils.cpp:857
bool allIndexingsAreProjectedPermutation(LinalgOp op)
Check if all indexing maps are projected permutations.
Definition: Utils.cpp:203
bool isParallelIterator(utils::IteratorType iteratorType)
Check if iterator type has "parallel" semantics.
Definition: Utils.cpp:238
SmallVector< OpFoldResult > computeTileSizes(OpBuilder &b, Location loc, ArrayRef< OpFoldResult > tileSizes, ArrayRef< OpFoldResult > sizeBounds)
Computes tile sizes, given a list of tileSizes and dimension sizes (sizeBounds).
Definition: Utils.cpp:752
GenericOp makeMemRefCopyOp(OpBuilder &b, Location loc, Value from, Value to)
Returns GenericOp that copies an n-D memref.
Definition: Utils.cpp:306
SmallVector< OpFoldResult > computeTileOffsets(OpBuilder &b, Location loc, ArrayRef< OpFoldResult > ivs, ArrayRef< OpFoldResult > tileSizes)
Computes tile offsets, given a list of loop ivs and tileSizes.
Definition: Utils.cpp:738
LinalgTilingLoopType
The type of loops to be generated during tiling.
Definition: Utils.h:115
bool isReductionIterator(utils::IteratorType iteratorType)
Check if iterator type has "reduction" semantics.
Definition: Utils.cpp:242
bool hasOnlyScalarElementwiseOp(Region &r)
Detect whether r has only ConstantOp, ElementwiseMappable and YieldOp.
Definition: Utils.cpp:209
SmallVector< int64_t > getPackInverseDestPerm(linalg::PackOp packOp)
Shell function to compute the Destination Permutation of PackOp This function uses the helper functio...
std::function< SmallVector< ProcInfo >(OpBuilder &b, Location loc, ArrayRef< Range > parallelLoopRanges)> ProcInfoCallBackFn
Definition: Utils.h:312
SmallVector< NamedAttribute > getPrunedAttributeList(OpTy op)
Returns an attribute list that excludes pre-defined attributes.
Definition: Utils.h:382
std::optional< SmallVector< ReassociationIndices > > getReassociationMapForFoldingUnitDims(ArrayRef< OpFoldResult > mixedSizes)
Get the reassociation maps to fold the result of a extract_slice (or source of a insert_slice) operat...
Definition: Utils.cpp:915
DistributionMethod
Scheme used to distribute loops to processors.
Definition: Utils.h:256
@ CyclicNumProcsGeNumIters
Cyclic distribution where the number of processors can be assumed to be more than or equal to the num...
@ Cyclic
Cyclic distribution where no assumption is made about the dynamic relationship between number of proc...
@ CyclicNumProcsEqNumIters
Cyclic distribution where the number of processors can be assumed to be equal to the number of iterat...
SmallVector< Value > insertSlicesBack(OpBuilder &builder, Location loc, LinalgOp op, ValueRange operands, ValueRange results)
Creates insert_slice ops that insert results back into larger tensors they were originally extracted ...
Definition: Utils.cpp:777
bool isElementwise(LinalgOp op)
Check if a LinalgOp is an element-wise operation.
Definition: Utils.cpp:223
void offsetIndices(OpBuilder &b, LinalgOp linalgOp, ArrayRef< OpFoldResult > offests)
Add the specified offsets to any linalg.index ops contained in the given linalgOp.
Definition: Utils.cpp:879
SmallVector< std::optional< SliceParameters > > computeAllSliceParameters(OpBuilder &builder, Location loc, LinalgOp linalgOp, ValueRange valuesToTile, ArrayRef< OpFoldResult > ivs, ArrayRef< OpFoldResult > tileSizes, ArrayRef< OpFoldResult > sizeBounds, bool omitPartialTileCheck)
Computes SliceParamaters for all valuesToTile of the given linalgOp, assuming linalgOp is being fused...
Definition: Utils.cpp:806
Operation * makeTiledShape(OpBuilder &builder, Location loc, Value valueToTile, ArrayRef< OpFoldResult > tileSizes, AffineMap map, ArrayRef< OpFoldResult > lbs, ArrayRef< OpFoldResult > ubs, ArrayRef< OpFoldResult > subShapeSizes, bool omitPartialTileCheck)
Creates an extract_slice/subview op for a single valueToTile with builder.
Definition: Utils.cpp:608
SmallVector< int64_t > getUnPackInverseSrcPerm(linalg::UnPackOp unpackOp)
Shell function to compute the Source Permutation of unPackOp.
Value makeComposedPadHighOp(OpBuilder &b, Location loc, RankedTensorType type, Value source, Value pad, bool nofold)
Create a tensor::PadOp that pads source to the size of the statically sized type whose static sizes a...
Definition: Utils.cpp:246
void updateBoundsForCyclicDistribution(OpBuilder &builder, Location loc, Value procId, Value nprocs, Value &lb, Value &ub, Value &step)
Update the lb, ub and step to get per processor lb, ub and step.
Definition: Utils.cpp:405
SmallVector< Type > getTensorOutputTypes(LinalgOp op, ValueRange operands)
Returns the list of tensor output types produced when the given structured operation op is applied to...
Definition: Utils.cpp:768
SliceParameters computeSliceParameters(OpBuilder &builder, Location loc, Value valueToTile, ArrayRef< OpFoldResult > tileSizes, AffineMap map, ArrayRef< OpFoldResult > lbs, ArrayRef< OpFoldResult > ubs, ArrayRef< OpFoldResult > subShapeSizes, bool omitPartialTileCheck)
Computes SliceParameters for a single valueToTile assuming that its user is being tiled with the give...
Definition: Utils.cpp:621
SmallVector< Value > ValueVector
An owning vector of values, handy to return from functions.
Definition: SCF.h:64
Include the generated interface declarations.
A struct containing the Linalg producer before and after fusion.
Definition: Utils.h:230
LinalgOp originalProducer
Definition: Utils.h:231
LinalgOp fusedProducer
Definition: Utils.h:232
Utility class used to generate nested loops with ranges described by loopRanges and loop type describ...
Definition: Utils.h:370
static void doit(OpBuilder &b, Location loc, ArrayRef< Range > loopRanges, LinalgOp linalgOp, ArrayRef< utils::IteratorType > iteratorTypes, function_ref< scf::ValueVector(OpBuilder &, Location, ValueRange, ValueRange)> bodyBuilderFn, ArrayRef< linalg::ProcInfo > procInfo={})
Options that allow distribution of loops generated in Linalg transforms to processors while generatin...
Definition: Utils.h:316
ProcInfoCallBackFn procInfo
Callback function that returns the Values for processor ID (procId), and number of processors (nprocs...
Definition: Utils.h:323
Callback function type used to get processor ID, and number of processors used for distribution for a...
Definition: Utils.h:306
DistributionMethod distributionMethod
Definition: Utils.h:309
A struct containing common matchers over linalg op's region.
Definition: Utils.h:341
static std::optional< BinaryOpKind > matchAsScalarBinaryOp(GenericOp op)
Matches the given linalg op if its body is performing binary operation on int or float scalar values ...
Definition: Utils.cpp:96
A struct containg offsets-sizes-strides arguments of the tiled shape.
Definition: Utils.h:152
SmallVector< OpFoldResult > strides
Definition: Utils.h:155
SmallVector< OpFoldResult > sizes
Definition: Utils.h:154
SmallVector< OpFoldResult > offsets
Definition: Utils.h:153