MLIR  22.0.0git
PadTilingInterface.cpp
Go to the documentation of this file.
1 //===- PaddingTilingInterface.cpp - Padding of TilingInterface ops --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 
16 #include "mlir/IR/AffineExpr.h"
19 #include "mlir/IR/BuiltinTypes.h"
20 #include "mlir/IR/OpDefinition.h"
21 #include "mlir/IR/Value.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/Support/Casting.h"
25 
26 #define DEBUG_TYPE "pad-tiling-interface"
27 
28 using namespace mlir;
29 using namespace mlir::linalg;
30 using namespace mlir::tensor;
31 
32 #define DBGS() (llvm::dbgs() << "[" DEBUG_TYPE << "]: ")
33 #define DBGSNL() (llvm::dbgs() << "\n")
34 
35 /// Form a "full-rank" padding specification so that the application is easy.
39  SmallVector<OpFoldResult> paddingSizes;
40  // Complete the padding specification to specify all dimensions.
41  for (size_t idx = 0, e = indexingSizes.size(); idx != e; ++idx) {
42  // Complete to zero if needed.
43  paddingSizes.push_back(options.paddingSizes.size() > idx
44  ? options.paddingSizes[idx]
45  : b.getIndexAttr(0));
46  // If a dimension is zero (either specified or completed), replace by:
47  // - 1 if we are padding to the next multiple of.
48  // - indexingSizes[idx] otherwise
49  if (isZeroInteger(paddingSizes[idx])) {
50  paddingSizes[idx] =
51  options.padToMultipleOf ? b.getIndexAttr(1) : indexingSizes[idx];
52  }
53  LLVM_DEBUG(DBGS() << "----idx: " << idx << " : " << paddingSizes[idx]
54  << "\n");
55  }
56  return paddingSizes;
57 }
58 
59 /// Extracts the constant multiplier from an affine expression of the form
60 /// `d * c` or `c * d`, where `d` is an AffineDimExpr and `c` is an
61 /// AffineConstantExpr. Returns 1 if the expression is not a simple
62 /// multiplication of a dimension and a constant.
63 static int64_t extractConstantMultiplier(AffineExpr expr) {
64  if (auto binOp = dyn_cast<AffineBinaryOpExpr>(expr)) {
65  if (binOp.getKind() == AffineExprKind::Mul) {
66  auto lhsD = dyn_cast<AffineDimExpr>(binOp.getLHS());
67  auto rhsC = dyn_cast<AffineConstantExpr>(binOp.getRHS());
68  if (lhsD && rhsC) {
69  return rhsC.getValue();
70  }
71  auto lhsC = dyn_cast<AffineConstantExpr>(binOp.getLHS());
72  auto rhsD = dyn_cast<AffineDimExpr>(binOp.getRHS());
73  if (lhsC && rhsD) {
74  return lhsC.getValue();
75  }
76  }
77  }
78  return 1;
79 }
80 
81 /// Compute the padded shape of the given value `v` of `RankedTensorType` given
82 /// - `indexingSizes` a list of OpFoldResult.
83 /// - an `indexingMap` that encodes how the shape of varies with increases
84 /// in `indexingSizes`.
85 /// The `indexingMap` encodes how the shape of varies with `indexingSizes`.
86 /// The `indexingMap` + `indexingSizes` encoding suits StructuredOps.
87 /// The implementaiton below iteratively combines increases from contributing
88 /// dimensions using affine.apply operations.
89 /// The padded shape is computed by evaluating the maximum accessed index per
90 /// dimension, which may involve multiplying by constant factors derived from
91 /// the affine indexing expressions. Currently, only a limited set of projected
92 /// permutation indexing maps are supported, such as
93 /// - affine_map<(d0, d1, d2) -> (d0, d1)>
94 /// - affine_map<(d0, d1, d2) -> (d0, d1 + d2)>
95 /// - affine_map<(d0, d1) -> (d0 * 3 + d1)>
96 /// In the future, more general interfaces can be devised to encode similar
97 /// shape evolutions and map between an op and its operands.
100  AffineMap indexingMap, ArrayRef<OpFoldResult> indexingSizes,
102  Location loc = v.getLoc();
103  SmallVector<OpFoldResult> paddedShape;
104  auto tensorType = cast<RankedTensorType>(v.getType());
105  paddedShape.resize_for_overwrite(tensorType.getRank());
106  assert(tensorType.getRank() == indexingMap.getNumResults() &&
107  "expect the number of results of the affine map to match the tensor "
108  "rank");
109 
110  // "Full-rank" padding specification.
111  SmallVector<OpFoldResult> paddingSizes =
112  getFullRankPaddingSizes(rewriter, indexingSizes, options);
113 
114  // For each dimension in the operand's shape, iterate over indexingSizes and
115  // add the various term contributions.
116  for (const auto &enResults : enumerate(indexingMap.getResults())) {
117  int64_t resultIndex = enResults.index();
118  AffineMap partialIndexingMap = indexingMap.getSubMap(
119  ArrayRef<unsigned>{static_cast<unsigned>(resultIndex)});
120 
121  LLVM_DEBUG(DBGS() << "----resultIndex: " << resultIndex
122  << " with partialIndexingMap: " << partialIndexingMap
123  << "\n");
124 
125  // Find all padding dimensions that contribute to this operand dimension
126  // and compute the padded term contribution to the final padded shape.
128  for (size_t paddingDim = 0, e = paddingSizes.size(); paddingDim != e;
129  ++paddingDim) {
130  OpFoldResult paddingSize = paddingSizes[paddingDim];
131  LLVM_DEBUG(DBGS() << "------try apply padding of dim: " << paddingDim
132  << " to: " << paddingSize << "\n");
133  if (!enResults.value().isFunctionOfDim(paddingDim))
134  continue;
135 
136  LLVM_DEBUG(DBGS() << "------apply padding of dim: " << paddingDim
137  << " to: " << paddingSize << "\n");
138 
139  // Project non-'paddingDim' dimensions and compress the result.
140  llvm::SmallBitVector projectedDims(partialIndexingMap.getNumDims(), true);
141  projectedDims.flip(paddingDim);
142  AffineMap projectedMap =
143  mlir::projectDims(partialIndexingMap, projectedDims,
144  /*compressDims=*/true);
145 
146  // If we are padding to the next multiple of, compose with ceil(sz) * sz.
147  OpFoldResult paddingDimOfr;
148  if (options.padToMultipleOf) {
149  AffineExpr d0, s0;
150  bindDims(rewriter.getContext(), d0);
151  bindSymbols(rewriter.getContext(), s0);
152  AffineMap ceilMap = AffineMap::get(1, 1, d0.ceilDiv(s0) * s0);
153  AffineMap composedMap = projectedMap.compose(ceilMap);
155  rewriter, loc, composedMap,
156  {indexingSizes[paddingDim], paddingSize},
157  /*composeAffineMin=*/true);
158  } else {
159  // Otherwise just set to paddingSize.
161  rewriter, loc, projectedMap, paddingSize);
162  }
163 
164  // Adjust for the maximum accessed index, which is (paddingSize - 1) *
165  // multiplier.
166  AffineExpr d0;
167  bindDims(rewriter.getContext(), d0);
168  int64_t multiplier = extractConstantMultiplier(projectedMap.getResult(0));
169  AffineMap subtractMap = AffineMap::get(1, 0, d0 - multiplier);
171  rewriter, loc, subtractMap, {paddingDimOfr});
172  terms.push_back(maxAccessIdx);
173 
174  LLVM_DEBUG(DBGS() << "------new term: " << terms.back() << "\n");
175  }
176 
177  // If there are no terms, just return the dim.
178  if (terms.empty()) {
179  paddedShape[resultIndex] =
180  createFoldedDimOp(rewriter, loc, v, resultIndex);
181  continue;
182  }
183 
184  // Sum individual terms' contributions.
185  SmallVector<AffineExpr> dims(terms.size());
186  bindDimsList(rewriter.getContext(), MutableArrayRef{dims});
187  AffineExpr sumExpr = dims.front();
188  for (unsigned i = 1; i < dims.size(); ++i)
189  sumExpr = sumExpr + dims[i];
190  // Add 1 to the maximum accessed index and get the final padded size.
192  rewriter, loc, sumExpr + 1, terms);
193  paddedShape[resultIndex] = paddedDimOfr;
194  }
195 
196  return paddedShape;
197 }
198 
199 FailureOr<SmallVector<OpFoldResult>>
201  RewriterBase &rewriter, OpOperand &operandToPad,
202  ArrayRef<Range> iterationDomain, const PadTilingInterfaceOptions &options) {
203  auto transferOp =
204  llvm::dyn_cast<IndexingMapOpInterface>(operandToPad.getOwner());
205  if (!transferOp)
206  return failure();
207 
208  // clang-format off
209  assert(llvm::all_of(iterationDomain, [&rewriter](Range r) {
210  return r.offset == OpFoldResult(rewriter.getIndexAttr(0)) &&
211  r.stride == OpFoldResult(rewriter.getIndexAttr(1));
212  }) && "expected 0-offset 1-stride loop ranges");
213  // clang-format on
214  SmallVector<OpFoldResult> loopUpperBounds;
215  loopUpperBounds.reserve(iterationDomain.size());
216  for (const Range &range : iterationDomain)
217  loopUpperBounds.push_back(range.size);
218 
219  AffineMap indexingMap = transferOp.getMatchingIndexingMap(&operandToPad);
220  return computePaddedShape(
221  rewriter, cast<TypedValue<RankedTensorType>>(operandToPad.get()),
222  indexingMap, loopUpperBounds, options);
223 }
224 
225 /// Pad a single operand to `paddedShape` using `paddingValueAttr` as padding
226 /// Value.
227 static Value padOperand(RewriterBase &rewriter, TilingInterface opToPad,
229  ArrayRef<OpFoldResult> paddedShape,
230  Attribute paddingValueAttr) {
231  Value paddingValue;
232  if (auto complexTy =
233  dyn_cast<ComplexType>(getElementTypeOrSelf(v.getType()))) {
234  if (auto complexAttr = dyn_cast<ArrayAttr>(paddingValueAttr)) {
235  paddingValue = complex::ConstantOp::create(rewriter, opToPad.getLoc(),
236  complexTy, complexAttr);
237  }
238  } else if (isa<ub::PoisonAttr>(paddingValueAttr)) {
239  paddingValue = ub::PoisonOp::create(rewriter, opToPad.getLoc(),
240  getElementTypeOrSelf(v.getType()));
241  } else if (auto typedAttr = dyn_cast<TypedAttr>(paddingValueAttr)) {
242  paddingValue =
243  arith::ConstantOp::create(rewriter, opToPad.getLoc(), typedAttr);
244  }
245  assert(paddingValue && "failed to create value from padding attribute");
246 
247  // Pad the operand to the bounding box defined by `paddedShape`.
248  SmallVector<int64_t> tensorShape;
249  SmallVector<Value> dynDims;
250  for (OpFoldResult ofr : paddedShape) {
251  std::optional<int64_t> cst = getConstantIntValue(ofr);
252  tensorShape.push_back(cst.has_value() ? *cst : ShapedType::kDynamic);
253  if (!cst.has_value())
254  dynDims.push_back(ofr.dyn_cast<Value>());
255  }
256  // TODO: use dispatchIndexOpFoldResults(paddedShape, dynDims, paddedShape);
257 
258  auto paddedTensorType =
260  LLVM_DEBUG(DBGS() << "--SUCCESS, makeComposedPadHighOp with type: "
261  << paddedTensorType);
262  return makeComposedPadHighOp(rewriter, opToPad.getLoc(), paddedTensorType, v,
263  paddingValue, /*nofold=*/false, dynDims);
264 }
265 
266 FailureOr<TilingInterface> linalg::rewriteAsPaddedOp(
267  RewriterBase &rewriter, TilingInterface opToPad,
268  const PadTilingInterfaceOptions &constOptions,
270  const PadSizeComputationFunction &computePaddingSizeFun) {
271  LLVM_DEBUG(DBGS() << "Start rewriteAsPaddedOp : " << opToPad << "\n");
272 
273  Location loc = opToPad.getLoc();
274  PadTilingInterfaceOptions options(constOptions);
275  // Allow inference of pad values if they are not explicitly specified.
276  // TODO: be mindful about the value depending on the actual operation.
277  if (options.paddingValues.empty()) {
278  SmallVector<Type> types(opToPad->getOperandTypes());
279  llvm::append_range(types, opToPad->getResultTypes());
280  for (Type t : types) {
281  options.paddingValues.push_back(
282  rewriter.getZeroAttr(getElementTypeOrSelf(t)));
283  }
284  }
285 
286  if (llvm::any_of(opToPad->getOperands(),
287  [](Value v) { return isa<MemRefType>(v.getType()); })) {
288  return rewriter.notifyMatchFailure(opToPad,
289  "expected operation on tensors");
290  }
291 
292  OpBuilder::InsertionGuard g(rewriter);
293  // Set IP after opToPad because we also take the dims of opToPad's output.
294  rewriter.setInsertionPointAfter(opToPad);
295 
296  // 1. Get the loopUpperBounds from the TilingInterface.
297  SmallVector<Range> iterationDomain = opToPad.getIterationDomain(rewriter);
298 
299  // 2. For each operand.
300  SmallVector<Value> newOperands;
301  newOperands.reserve(opToPad->getNumOperands());
302  for (OpOperand &opOperand : opToPad->getOpOperands()) {
303  Value operand = opOperand.get();
304  LLVM_DEBUG(DBGS() << "--start padding oprd: " << operand << "\n");
305 
306  // 2.a. Skip scalar-like operands.
307  Type operandType = operand.getType();
308  if (!isa<RankedTensorType>(operandType)) {
309  assert((!isa<ShapedType>(operandType) || isa<VectorType>(operandType)) &&
310  "Unexpected non-vector ShapedType");
311  newOperands.push_back(operand);
312  continue;
313  }
314  // 2.a. Compute padded shape.
315  FailureOr<SmallVector<OpFoldResult>> maybePaddedShape =
316  computePaddingSizeFun(rewriter, opOperand, iterationDomain, options);
317  if (failed(maybePaddedShape)) {
318  return rewriter.notifyMatchFailure(opToPad, "could not pad op");
319  }
320 
321  // 2.b. Expect proper `paddingValues`.
322  // TODO: we may want to allow garbage padding in the future, in which case
323  // we would just not assert.
324  if (opOperand.getOperandNumber() >= options.paddingValues.size()) {
325  return rewriter.notifyMatchFailure(opToPad,
326  "--no padding value specified");
327  }
328  Attribute paddingValueAttr =
329  options.paddingValues[opOperand.getOperandNumber()];
330 
331  // 2.c. Perform actual padding.
332  Value paddedOperand = padOperand(
333  rewriter, opToPad, cast<TypedValue<RankedTensorType>>(operand),
334  *maybePaddedShape, paddingValueAttr);
335  LLVM_DEBUG(DBGS() << "--done padding operand: " << paddedOperand << "\n");
336 
337  // 2.d. Perform actual padding.
338  newOperands.push_back(paddedOperand);
339  if (auto padOp = paddedOperand.getDefiningOp<tensor::PadOp>())
340  padOps.push_back(padOp);
341  }
342 
343  // 3. Form the resulting tensor::ExtractSliceOp.
344  ReifiedRankedShapedTypeDims reifiedResultShapes;
345  if (failed(reifyResultShapes(rewriter, opToPad, reifiedResultShapes))) {
346  LLVM_DEBUG(DBGS() << "--failed to reify result shapes -> FAIL\n");
347  return rewriter.notifyMatchFailure(opToPad,
348  "failed to reify result shapes");
349  }
350  assert(reifiedResultShapes.size() == opToPad->getNumResults() &&
351  "expected same number of results");
352 
353  // Clone `opToPad` to operate on the statically padded shapes.
354  auto resultTensorTypes =
355  ValueRange(newOperands).take_back(opToPad->getNumResults()).getTypes();
356  // clone **should** properly notify the rewriter.
357  TilingInterface paddedOp =
358  clone(rewriter, opToPad, resultTensorTypes, newOperands);
359  LLVM_DEBUG(DBGS() << "--cloned padded op: " << paddedOp << "\n");
360 
361  // Recover the slice out of the new static results. This keeps the original
362  // opToPad around because it uses the dims of the original results.
363  SmallVector<Value> paddedSubtensorResults;
364  paddedSubtensorResults.reserve(opToPad->getNumResults());
365  for (const auto &en : llvm::enumerate(paddedOp->getResults())) {
366  Value paddedResult = en.value();
367  int64_t resultNumber = en.index();
368  int64_t rank = cast<RankedTensorType>(paddedResult.getType()).getRank();
369  SmallVector<OpFoldResult> offsets(rank, rewriter.getIndexAttr(0));
370  SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));
371  paddedSubtensorResults.push_back(tensor::ExtractSliceOp::create(
372  rewriter, loc, paddedResult, offsets, reifiedResultShapes[resultNumber],
373  strides));
374  }
375 
376  rewriter.replaceOp(opToPad, paddedSubtensorResults);
377 
378  return paddedOp;
379 }
static Value padOperand(RewriterBase &rewriter, TilingInterface opToPad, TypedValue< RankedTensorType > v, ArrayRef< OpFoldResult > paddedShape, Attribute paddingValueAttr)
Pad a single operand to paddedShape using paddingValueAttr as padding Value.
#define DBGS()
static int64_t extractConstantMultiplier(AffineExpr expr)
Extracts the constant multiplier from an affine expression of the form d * c or c * d,...
static SmallVector< OpFoldResult > getFullRankPaddingSizes(Builder &b, ArrayRef< OpFoldResult > indexingSizes, const PadTilingInterfaceOptions &options)
Form a "full-rank" padding specification so that the application is easy.
static llvm::ManagedStatic< PassManagerOptions > options
Base type for affine expression.
Definition: AffineExpr.h:68
AffineExpr ceilDiv(uint64_t v) const
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition: AffineMap.h:46
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
unsigned getNumDims() const
Definition: AffineMap.cpp:390
ArrayRef< AffineExpr > getResults() const
Definition: AffineMap.cpp:403
unsigned getNumResults() const
Definition: AffineMap.cpp:398
AffineExpr getResult(unsigned idx) const
Definition: AffineMap.cpp:407
AffineMap getSubMap(ArrayRef< unsigned > resultPos) const
Returns the map consisting of the resultPos subset.
Definition: AffineMap.cpp:647
AffineMap compose(AffineMap map) const
Returns the AffineMap resulting from composing this with map.
Definition: AffineMap.cpp:552
Attributes are known-constant values of operations.
Definition: Attributes.h:25
This class is a general helper class for creating context-global objects like types,...
Definition: Builders.h:51
IntegerAttr getIndexAttr(int64_t value)
Definition: Builders.cpp:107
TypedAttr getZeroAttr(Type type)
Definition: Builders.cpp:323
MLIRContext * getContext() const
Definition: Builders.h:56
IRValueT get() const
Return the current value being used by this operand.
Definition: UseDefLists.h:160
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:76
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:348
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition: Builders.h:412
This class represents a single result from folding an operation.
Definition: OpDefinition.h:272
This class represents an operand of an operation.
Definition: Value.h:257
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Definition: PatternMatch.h:368
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the listener that the IR failed to be rewritten because of a match failure,...
Definition: PatternMatch.h:726
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:387
type_range getTypes() const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:105
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:18
Operation * getOwner() const
Return the owner of this operand.
Definition: UseDefLists.h:38
OpFoldResult makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands, bool composeAffineMin=false)
Constructs an AffineApplyOp that applies map to operands after composing the map with the maps of any...
Definition: AffineOps.cpp:1329
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
LogicalResult rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad, const LinalgPaddingOptions &options, LinalgOp &paddedOp, SmallVector< Value > &replacements, SmallVector< tensor::PadOp > &padOps)
Pad the iterator dimensions options.paddingDimensions of all opToPad operands to a static bounding bo...
Definition: Padding.cpp:244
SmallVector< OpFoldResult > computePaddedShape(RewriterBase &rewriter, TypedValue< RankedTensorType > v, AffineMap indexingMap, ArrayRef< OpFoldResult > indexingSizes, const PadTilingInterfaceOptions &options)
Helper function to compute the padded shape of the given value v of RankedTensorType given:
OpFoldResult createFoldedDimOp(OpBuilder &b, Location loc, Value val, int64_t dim)
Create one memref::DimOp or tensor::DimOp depending on the type of val.
Definition: LinalgOps.cpp:104
FailureOr< SmallVector< OpFoldResult > > computeIndexingMapOpInterfacePaddedShape(RewriterBase &rewriter, OpOperand &operandToPad, ArrayRef< Range > iterationDomain, const PadTilingInterfaceOptions &options)
Specific helper for Linalg ops.
std::function< FailureOr< SmallVector< OpFoldResult > >(RewriterBase &, OpOperand &, ArrayRef< Range >, const PadTilingInterfaceOptions &)> PadSizeComputationFunction
Definition: Transforms.h:631
Value makeComposedPadHighOp(OpBuilder &b, Location loc, RankedTensorType type, Value source, Value padding, bool nofold, ValueRange typeDynDims={})
Create a tensor::PadOp that pads source to the shape of type whose sizes are assumed to be greater th...
Definition: Utils.cpp:243
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition: Remarks.h:491
Include the generated interface declarations.
std::optional< int64_t > getConstantIntValue(OpFoldResult ofr)
If ofr is a constant integer or an IntegerAttr, return the integer.
LogicalResult reifyResultShapes(OpBuilder &b, Operation *op, ReifiedRankedShapedTypeDims &reifiedReturnShapes)
Reify the shape of the result of an operation (typically in terms of the shape of its operands).
void bindDimsList(MLIRContext *ctx, MutableArrayRef< AffineExprTy > exprs)
Definition: AffineExpr.h:316
std::conditional_t< std::is_same_v< Ty, mlir::Type >, mlir::Value, detail::TypedValue< Ty > > TypedValue
If Ty is mlir::Type this will select Value instead of having a wrapper around it.
Definition: Value.h:488
void bindDims(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to DimExpr at positions: [0 .
Definition: AffineExpr.h:311
@ Mul
RHS of mul is always a constant or a symbolic expression.
Type getElementTypeOrSelf(Type type)
Return the element type or return the type itself.
bool isZeroInteger(OpFoldResult v)
Return true if v is an IntegerAttr with value 0.
void bindSymbols(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to SymbolExpr at positions: [0 .
Definition: AffineExpr.h:325
Operation * clone(OpBuilder &b, Operation *op, TypeRange newResultTypes, ValueRange newOperands)
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
AffineMap projectDims(AffineMap map, const llvm::SmallBitVector &projectedDimensions, bool compressDimsFlag=false)
Returns the map that results from projecting out the dimensions specified in projectedDimensions.
Definition: AffineMap.cpp:899
Represents a range (offset, size, and stride) where each element of the triple may be dynamic or stat...
OpFoldResult stride
OpFoldResult offset