MLIR  20.0.0git
TilingInterfaceImpl.cpp
Go to the documentation of this file.
1 //===- TilingInterfaceImpl.cpp - Implementation of TilingInterface -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 
21 #include <optional>
22 
23 using namespace mlir;
24 using namespace mlir::linalg;
25 
26 //===----------------------------------------------------------------------===//
27 // Utility methods for implementation of Tiling Interface for Linalg ops
28 //===----------------------------------------------------------------------===//
29 
30 /// Return the SSA values that represent the data point accessed using a given
31 /// `indexingMap` for a given point in the iteration space represented by `ivs`.
33  AffineMap indexingMap,
34  ValueRange ivs) {
35  SmallVector<Value> indices;
36  indices.reserve(indexingMap.getNumResults());
37  for (auto result : indexingMap.getResults()) {
38  AffineMap m = AffineMap::get(indexingMap.getNumDims(),
39  indexingMap.getNumSymbols(), result);
40  Value v = b.create<affine::AffineApplyOp>(loc, m, ivs);
41  indices.push_back(v);
42  }
43  return indices;
44 }
45 
46 /// Method to inline the payload of a `linalgOp` given the iteration space
47 /// point and values for the arguments of the payload.
48 static LogicalResult inlinePayload(OpBuilder &b, LinalgOp linalgOp,
49  ValueRange ivs, ValueRange argValues) {
50  Block *body = linalgOp.getBlock();
51  IRMapping map;
52  map.map(body->getArguments(), argValues);
53  for (auto &op : body->without_terminator()) {
54  if (auto indexOp = dyn_cast<IndexOp>(&op)) {
55  map.map(indexOp.getResult(), ivs[indexOp.getDim()]);
56  continue;
57  }
58  b.clone(op, map);
59  }
60 
61  Operation *terminator = body->getTerminator();
62  Location loc = terminator->getLoc();
63  for (const auto &operand : llvm::enumerate(terminator->getOperands())) {
64  Value toStore = map.lookupOrDefault(operand.value());
65  OpOperand *storeInto = linalgOp.getDpsInitOperand(operand.index());
66  auto indices = getIndicesForAccess(
67  b, loc, linalgOp.getMatchingIndexingMap(storeInto), ivs);
68  b.create<memref::StoreOp>(
69  loc, toStore, linalgOp.getDpsInitOperand(operand.index())->get(),
70  indices);
71  }
72  return success();
73 }
74 
75 //===----------------------------------------------------------------------===//
76 // External Model for implementing `TilingInterface` for `LinalgOp`s.
77 //===----------------------------------------------------------------------===//
78 
79 namespace {
80 /// External model implementation of TilingInterface for LinalgOps. An external
81 /// model implementation is used for now till the use of `TilingInterface` is
82 /// on-par with the current Linalg tiling + fusion patterns. Once it is
83 /// maybe possible to move this into the op-definition (though there are
84 /// advantages to leaving it as an external model)
85 template <typename LinalgOpTy>
86 struct LinalgOpTilingInterface
87  : public TilingInterface::ExternalModel<LinalgOpTilingInterface<LinalgOpTy>,
88  LinalgOpTy> {
89  /// Return the loop iterator type.
90  SmallVector<utils::IteratorType> getLoopIteratorTypes(Operation *op) const {
91  LinalgOpTy concreteOp = cast<LinalgOpTy>(op);
92  return concreteOp.getIteratorTypesArray();
93  }
94 
95  /// Return the iteration domain range.
96  SmallVector<Range> getIterationDomain(Operation *op, OpBuilder &b) const {
98  b.setInsertionPoint(op);
99  Location loc = op->getLoc();
100  LinalgOp linalgOp = cast<LinalgOp>(op);
101  SmallVector<OpFoldResult> allShapesSizes =
102  linalgOp.createFlatListOfOperandDims(b, loc);
103  AffineMap map = linalgOp.getShapesToLoopsMap();
104 
105  return llvm::to_vector(
106  llvm::map_range(map.getResults(), [&](AffineExpr loopExpr) {
107  OpFoldResult ofr = affine::makeComposedFoldedAffineApply(
108  b, loc, loopExpr, allShapesSizes);
109  return Range{b.getIndexAttr(0), ofr, b.getIndexAttr(1)};
110  }));
111  }
112 
113  /// Instantiate the tiled implementation of the operation.
114  FailureOr<TilingResult>
115  getTiledImplementation(Operation *op, OpBuilder &b,
116  ArrayRef<OpFoldResult> offsets,
117  ArrayRef<OpFoldResult> sizes) const {
118  // Leave the `sizeBounds` value empty. That is only needed when the `sizes`
119  // specified could lead to out of bounds accesses.
120  Location loc = op->getLoc();
121  LinalgOp linalgOp = cast<LinalgOp>(op);
122  SmallVector<Value> valuesToTile = linalgOp->getOperands();
123  SmallVector<Value> tiledOperands = makeTiledShapes(
124  b, loc, linalgOp, valuesToTile, offsets, sizes, {}, true);
125  SmallVector<Operation *> generatedSlices = llvm::map_to_vector(
126  llvm::make_filter_range(
127  tiledOperands,
128  [](Value v) -> bool {
129  return isa_and_nonnull<tensor::ExtractSliceOp, memref::SubViewOp>(
130  v.getDefiningOp());
131  }),
132  [](Value v) -> Operation * { return v.getDefiningOp(); });
133 
134  SmallVector<Type> resultTensorTypes =
135  getTensorOutputTypes(linalgOp, tiledOperands);
136 
137  Operation *tiledOp = clone(b, linalgOp, resultTensorTypes, tiledOperands);
138  offsetIndices(b, cast<LinalgOp>(tiledOp), offsets);
139 
140  return TilingResult{
141  {tiledOp}, SmallVector<Value>(tiledOp->getResults()), generatedSlices};
142  }
143 
144  /// Utility to fetch the offsets and sizes when applied as per the indexing
145  /// map of the linalg op. This helps in fusing the linalg op as a consumer of
146  /// a given slice op.
147  void
148  getMappedOffsetAndSize(LinalgOp linalgOp, OpBuilder &b, AffineMap indexingMap,
149  ArrayRef<OpFoldResult> offsets,
151  SmallVectorImpl<OpFoldResult> &mappedOffsets,
152  SmallVectorImpl<OpFoldResult> &mappedSizes) const {
153  unsigned numLoops = linalgOp.getNumLoops();
154  auto tilingInterfaceOp = cast<TilingInterface>(linalgOp.getOperation());
155  mappedOffsets.resize(numLoops);
156  mappedSizes.resize(numLoops);
157  if (!indexingMap.isPermutation()) {
158  SmallVector<Range> iterationDomain =
159  tilingInterfaceOp.getIterationDomain(b);
160  for (const auto &&[index, value] : llvm::enumerate(iterationDomain)) {
161  mappedOffsets[index] = value.offset;
162  mappedSizes[index] = value.size;
163  }
164  }
165  for (const auto &&[index, value] :
166  llvm::enumerate(indexingMap.getResults())) {
167  unsigned dimPosition = cast<AffineDimExpr>(value).getPosition();
168  mappedOffsets[dimPosition] = offsets[index];
169  mappedSizes[dimPosition] = sizes[index];
170  }
171  }
172 
173  /// Method to return the position of the result tile computed by the tiled
174  /// operation.
175  LogicalResult getIterationDomainTileFromOperandTile(
176  Operation *op, OpBuilder &b, unsigned operandNumber,
178  SmallVectorImpl<OpFoldResult> &iterDomainOffsets,
179  SmallVectorImpl<OpFoldResult> &iterDomainSizes) const {
180  auto linalgOp = cast<LinalgOp>(op);
181 
182  // Check that the indexing map used for the operand is a projected
183  // permutation. This could be relaxed with a more general approach that can
184  // map the offsets and sizes from the operand to iteration space tiles
185  // (filling in full extent for dimensions not used to access the result).
186  AffineMap indexingMap =
187  linalgOp.getMatchingIndexingMap(&op->getOpOperand(operandNumber));
188  if (!indexingMap.isProjectedPermutation()) {
189  return op->emitError()
190  << "unhandled get iter domain position when operand is not "
191  "accessed using a permuted projection";
192  }
193 
194  getMappedOffsetAndSize(linalgOp, b, indexingMap, offsets, sizes,
195  iterDomainOffsets, iterDomainSizes);
196  return success();
197  }
198 
199  /// Return the details of the output tile generated by the tiled
200  /// implementation.
201  LogicalResult
202  getResultTilePosition(Operation *op, OpBuilder &b, unsigned resultNumber,
203  ArrayRef<OpFoldResult> offsets,
205  SmallVector<OpFoldResult> &resultOffsets,
206  SmallVector<OpFoldResult> &resultSizes) const {
207  Location loc = op->getLoc();
208  LinalgOp linalgOp = cast<LinalgOp>(op);
209 
210  AffineExpr d0;
211  bindDims(b.getContext(), d0);
212  SmallVector<OpFoldResult> subShapeSizes =
213  llvm::to_vector(llvm::map_range(sizes, [&](OpFoldResult ofr) {
214  return affine::makeComposedFoldedAffineApply(b, loc, d0 - 1, ofr);
215  }));
216 
217  OpOperand *outOperand = linalgOp.getDpsInitOperand(resultNumber);
219  b, loc, outOperand->get(), sizes,
220  linalgOp.getMatchingIndexingMap(outOperand), offsets,
221  /*ubs*/ {}, subShapeSizes, true);
222  resultOffsets = sliceParams.offsets;
223  resultSizes = sliceParams.sizes;
224  return success();
225  }
226 
227  LogicalResult getIterationDomainTileFromResultTile(
228  Operation *op, OpBuilder &b, unsigned resultNumber,
230  SmallVectorImpl<OpFoldResult> &iterDomainOffsets,
231  SmallVectorImpl<OpFoldResult> &iterDomainSizes) const {
232  auto linalgOp = cast<LinalgOp>(op);
233 
234  // Check that the indexing map used for the output is a projected
235  // permutation. This could be relaxed with a more general approach that can
236  // map the offsets and sizes from the result to iteration space tiles
237  // (filling in full extent for dimensions not used to access the result).
238  AffineMap indexingMap =
239  linalgOp.getIndexingMapMatchingResult(op->getResult(resultNumber));
240  if (!indexingMap.isProjectedPermutation()) {
241  return op->emitOpError(
242  "unhandled tiled implementation generation when result is not "
243  "accessed using a permuted projection");
244  }
245 
246  getMappedOffsetAndSize(linalgOp, b, indexingMap, offsets, sizes,
247  iterDomainOffsets, iterDomainSizes);
248  return success();
249  }
250 
251  FailureOr<TilingResult>
252  generateResultTileValue(Operation *op, OpBuilder &b, unsigned resultNumber,
253  ArrayRef<OpFoldResult> offsets,
254  ArrayRef<OpFoldResult> sizes) const {
255  SmallVector<OpFoldResult> mappedOffsets, mappedSizes;
256  if (failed(getIterationDomainTileFromResultTile(
257  op, b, resultNumber, offsets, sizes, mappedOffsets, mappedSizes))) {
258  return failure();
259  }
260  auto tilingInterfaceOp = cast<TilingInterface>(op);
261  FailureOr<TilingResult> tilingResult =
262  tilingInterfaceOp.getTiledImplementation(b, mappedOffsets, mappedSizes);
263 
264  if (failed(tilingResult))
265  return failure();
266 
267  if (tilingResult->tiledOps.size() != 1)
268  return op->emitOpError("failed to generate tiled implementation");
269 
270  return TilingResult{
271  tilingResult->tiledOps,
272  SmallVector<Value>{tilingResult->tiledValues[resultNumber]},
273  tilingResult->generatedSlices};
274  }
275 
276  /// Method to generate the tiled implementation of an operation from the tile
277  /// of the operand.
278  FailureOr<TilingResult> getTiledImplementationFromOperandTile(
279  Operation *op, OpBuilder &b, unsigned operandNumber,
280  ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes) const {
281  SmallVector<OpFoldResult> mappedOffsets, mappedSizes;
282  if (failed(getIterationDomainTileFromOperandTile(
283  op, b, operandNumber, offsets, sizes, mappedOffsets,
284  mappedSizes))) {
285  return failure();
286  }
287  return getTiledImplementation(op, b, mappedOffsets, mappedSizes);
288  }
289 
290  LogicalResult generateScalarImplementation(Operation *op, OpBuilder &builder,
291  Location loc,
292  ValueRange ivs) const {
293  auto linalgOp = cast<LinalgOp>(op);
294  if (!linalgOp.hasPureBufferSemantics())
295  return op->emitOpError("expected operation to have buffer semantics");
296 
297  SmallVector<Value> indexedValues;
298  indexedValues.reserve(linalgOp->getNumOperands());
299  Location linalgOpLoc = op->getLoc();
300  /// Load the data corresponding to the block arguments that
301  /// represent input operands.
302  for (OpOperand &operand : linalgOp->getOpOperands()) {
303  if (!linalgOp.payloadUsesValueFromOperand(&operand)) {
304  indexedValues.push_back(nullptr);
305  continue;
306  }
307  if (linalgOp.isScalar(&operand)) {
308  indexedValues.push_back(operand.get());
309  continue;
310  }
312  builder, linalgOpLoc, linalgOp.getMatchingIndexingMap(&operand), ivs);
313  Value load =
314  builder.create<memref::LoadOp>(linalgOpLoc, operand.get(), indices);
315  indexedValues.push_back(load);
316  }
317 
318  /// Inline the op payload and store the result.
319  return inlinePayload(builder, linalgOp, ivs, indexedValues);
320  }
321 };
322 
323 //===----------------------------------------------------------------------===//
324 // External Model for implementing `PartialReductionInterface` for `LinalgOp`s.
325 //===----------------------------------------------------------------------===//
326 
327 /// External model implementation of PartialReductionInterface for LinalgOps.
328 template <typename LinalgOpTy>
329 struct LinalgOpPartialReductionInterface
330  : public PartialReductionOpInterface::ExternalModel<
331  LinalgOpPartialReductionInterface<LinalgOpTy>, LinalgOpTy> {
332  FailureOr<SmallVector<Value>> generateInitialTensorForPartialReduction(
334  ArrayRef<int> reductionDims) const {
335  auto linalgOp = cast<LinalgOp>(op);
336  OpBuilder::InsertionGuard guard(b);
337 
338  if (linalgOp.hasPureBufferSemantics())
339  return op->emitOpError("expected operation to have tensor semantics");
340 
341  SmallVector<Value> inits;
342  for (int initIdx = 0, e = linalgOp.getNumDpsInits(); initIdx < e;
343  ++initIdx) {
344  // Insert the new parallel dimension based on the index of the reduction
345  // loops. This could be controlled by user for more flexibility.
346  SmallVector<Operation *, 4> combinerOps;
347  if (!matchReduction(linalgOp.getRegionOutputArgs(), initIdx,
348  combinerOps) ||
349  combinerOps.size() != 1)
350  return op->emitOpError("Failed to anaysis the reduction operation.");
351 
352  Operation *reductionOp = combinerOps[0];
353  std::optional<TypedAttr> identity = arith::getNeutralElement(reductionOp);
354  if (!identity.has_value())
355  return op->emitOpError(
356  "Failed to get an identity value for the reduction operation.");
357 
358  ArrayRef<int64_t> oldShape =
359  linalgOp.getShape(linalgOp.getDpsInitOperand(initIdx));
360 
361  // Calculate the new shape, we insert the new dimensions based on the
362  // index of the reduction dimensions.
363  SmallVector<int64_t> newOutputShape;
364  SmallVector<Value> dynamicDims;
365  int64_t currReductionDims = 0;
366  DenseSet<int> reductionDimsSet(reductionDims.begin(),
367  reductionDims.end());
368  for (int64_t idx :
369  llvm::seq<int64_t>(0, oldShape.size() + reductionDims.size())) {
370  if (reductionDimsSet.contains(idx)) {
371  dispatchIndexOpFoldResults(sizes[idx], dynamicDims, newOutputShape);
372  currReductionDims++;
373  continue;
374  }
375  int64_t oldIdx = idx - currReductionDims;
376  int64_t dim = oldShape[oldIdx];
377  newOutputShape.push_back(dim);
378  if (ShapedType::isDynamic(dim))
379  dynamicDims.push_back(b.create<tensor::DimOp>(
380  loc, linalgOp.getDpsInitOperand(initIdx)->get(), oldIdx));
381  }
382  Value emptyTensor = b.create<tensor::EmptyOp>(
383  loc, newOutputShape,
384  linalgOp.getRegionOutputArgs()[initIdx].getType(), dynamicDims);
385  Value constantOp = b.create<arith::ConstantOp>(loc, *identity);
386  auto identityTensor =
387  b.create<linalg::FillOp>(loc, constantOp, emptyTensor);
388  inits.push_back(identityTensor.getResult(0));
389  }
390 
391  return inits;
392  }
393 
394  FailureOr<TilingResult>
395  tileToPartialReduction(Operation *op, OpBuilder &b, Location loc,
396  ValueRange init, ArrayRef<OpFoldResult> offsets,
398  ArrayRef<int> reductionDims) const {
399  OpBuilder::InsertionGuard guard(b);
400  auto linalgOp = cast<LinalgOp>(op);
401 
402  // Step 1. Extend init maps to have reduction dimension dims, since we
403  // are converting them to parallel dimensions.
404  SmallVector<AffineMap> newInitMaps;
405  newInitMaps.reserve(linalgOp.getNumDpsInits());
406  for (int idx : llvm::seq<int>(0, linalgOp.getNumDpsInits())) {
407  // TODO: linalg::Generic doesn't have getDpsInitOperands. Can replace
408  // this with a for range loop when we have it.
409  AffineMap newMap =
410  linalgOp.getMatchingIndexingMap(linalgOp.getDpsInitOperand(idx));
411  for (int redPos : reductionDims) {
412  newMap = newMap.insertResult(b.getAffineDimExpr(redPos),
413  newMap.getNumResults());
414  }
415  newInitMaps.push_back(newMap);
416  }
417 
418  // Step 2a: Extract a slice of the input operands.
419  SmallVector<Value> tiledInputs = makeTiledShapes(
420  b, loc, linalgOp, linalgOp.getDpsInputs(), offsets, sizes, {}, true);
421  SmallVector<Operation *> generatedSlices = llvm::map_to_vector(
422  llvm::make_filter_range(
423  tiledInputs, [](Value v) -> bool { return v.getDefiningOp(); }),
424  [](Value v) -> Operation * { return v.getDefiningOp(); });
425 
426  // Step 2b: Extract a slice of the init operands.
427  SmallVector<Value, 1> tiledInits;
428  for (auto [valueMap, valueToTile] : llvm::zip_equal(newInitMaps, init)) {
429  int64_t initRank = valueMap.getNumResults();
430  SmallVector<OpFoldResult> initOffset(initRank, b.getIndexAttr(0));
431  SmallVector<OpFoldResult> initStride(initRank, b.getIndexAttr(1));
432  SmallVector<OpFoldResult> initSizes;
433  for (AffineExpr dimExpr : valueMap.getResults()) {
434  auto dim = cast<AffineDimExpr>(dimExpr);
435  initSizes.push_back(sizes[dim.getPosition()]);
436  }
437  // TODO: Use SubsetExtractOpInterface here once available.
438  auto extractSlice = b.create<tensor::ExtractSliceOp>(
439  loc, valueToTile, initOffset, initSizes, initStride);
440  tiledInits.push_back(extractSlice);
441  generatedSlices.push_back(extractSlice);
442  }
443 
444  // Update the indexing maps.
445  SmallVector<AffineMap> newMaps = linalgOp.getIndexingMapsArray();
446  // Change the init maps.
447  for (int idx : llvm::seq<int>(0, linalgOp.getNumDpsInits())) {
448  // TODO: linalg::Generic doesn't have getDpsInitOperands. Can replace
449  // this with a for range loop when we have it.
450  OpOperand *initOperand = linalgOp.getDpsInitOperand(idx);
451  int64_t mapIdx = linalgOp.getIndexingMapIndex(initOperand);
452  newMaps[mapIdx] = newInitMaps[idx];
453  }
454 
455  // Step 3. Change the reduction dim iterator types.
456  SmallVector<utils::IteratorType> newIteratorTypes =
457  linalgOp.getIteratorTypesArray();
458  for (int dim : reductionDims)
459  newIteratorTypes[dim] = utils::IteratorType::parallel;
460 
461  // Step 4. Create the new generic op.
462  auto genericOp =
463  b.create<GenericOp>(loc, ValueRange(tiledInits).getTypes(), tiledInputs,
464  tiledInits, newMaps, newIteratorTypes);
465  IRMapping mapping;
466  op->getRegion(0).cloneInto(&genericOp.getRegion(),
467  genericOp.getRegion().begin(), mapping);
468  return TilingResult{
469  {genericOp.getOperation()},
470  llvm::map_to_vector(genericOp->getResults(),
471  [](OpResult r) -> Value { return r; }),
472  generatedSlices};
473  }
474 
475  FailureOr<MergeResult> mergeReductions(Operation *op, OpBuilder &b,
476  Location loc, ValueRange partialReduce,
477  ArrayRef<int> reductionDims) const {
478  auto linalgOp = cast<LinalgOp>(op);
479  SmallVector<int64_t> reductionDimsInt64(reductionDims);
480  auto reduction = b.create<linalg::ReduceOp>(
481  loc, partialReduce, linalgOp.getDpsInits(), reductionDimsInt64,
482  [&linalgOp](OpBuilder &b, Location loc, ValueRange inputs) {
483  int64_t numInits = linalgOp.getNumDpsInits();
484  SmallVector<Value> yieldedValues;
485  for (int idx : llvm::seq<int>(0, numInits)) {
486  // Get the combiner op.
487  SmallVector<Operation *, 4> combinerOps;
488  matchReduction(linalgOp.getRegionOutputArgs(), idx, combinerOps);
489  Operation *clonedReductionOp = b.clone(*combinerOps[0]);
490  // Combine the input at idx and output at numInits + idx.
491  clonedReductionOp->setOperand(0, inputs[idx]);
492  clonedReductionOp->setOperand(1, inputs[numInits + idx]);
493  // Yield.
494  yieldedValues.push_back(clonedReductionOp->getResult(0));
495  }
496  b.create<linalg::YieldOp>(loc, yieldedValues);
497  });
498  return MergeResult{
499  {reduction.getOperation()},
500  llvm::map_to_vector(reduction->getResults(),
501  [](OpResult r) -> Value { return r; })};
502  }
503 };
504 
505 } // namespace
506 
507 template <typename OpType>
508 static void registerOne(MLIRContext *ctx) {
509  OpType::template attachInterface<LinalgOpTilingInterface<OpType>>(*ctx);
510  OpType::template attachInterface<LinalgOpPartialReductionInterface<OpType>>(
511  *ctx);
512 }
513 
514 /// Variadic helper function.
515 template <typename... OpTypes>
516 static void registerAll(MLIRContext *ctx) {
517  (registerOne<OpTypes>(ctx), ...);
518 }
519 
520 #define GET_OP_LIST
521 
523  DialectRegistry &registry) {
524  registry.addExtension(+[](MLIRContext *ctx, linalg::LinalgDialect *dialect) {
525  registerOne<linalg::GenericOp>(ctx);
526  registerAll<
527 #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
528  >(ctx);
529  });
530 }
static SmallVector< Value > getIndicesForAccess(OpBuilder &b, Location loc, AffineMap indexingMap, ValueRange ivs)
Return the SSA values that represent the data point accessed using a given indexingMap for a given po...
static LogicalResult inlinePayload(OpBuilder &b, LinalgOp linalgOp, ValueRange ivs, ValueRange argValues)
Method to inline the payload of a linalgOp given the iteration space point and values for the argumen...
static void registerAll(MLIRContext *ctx)
Variadic helper function.
static void registerOne(MLIRContext *ctx)
Base type for affine expression.
Definition: AffineExpr.h:68
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition: AffineMap.h:46
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
AffineMap insertResult(AffineExpr expr, unsigned pos) const
Returns a new AffineMap with the same number of dims and symbols and an extra result inserted at pos.
Definition: AffineMap.h:315
bool isProjectedPermutation(bool allowZeroInResults=false) const
Returns true if the AffineMap represents a subset (i.e.
Definition: AffineMap.cpp:618
unsigned getNumSymbols() const
Definition: AffineMap.cpp:398
unsigned getNumDims() const
Definition: AffineMap.cpp:394
ArrayRef< AffineExpr > getResults() const
Definition: AffineMap.cpp:407
unsigned getNumResults() const
Definition: AffineMap.cpp:402
bool isPermutation() const
Returns true if the AffineMap represents a symbol-less permutation map.
Definition: AffineMap.cpp:648
Block represents an ordered list of Operations.
Definition: Block.h:31
Operation * getTerminator()
Get the terminator operation of this block.
Definition: Block.cpp:243
BlockArgListType getArguments()
Definition: Block.h:85
iterator_range< iterator > without_terminator()
Return an iterator range over the operation within this block excluding the terminator operation at t...
Definition: Block.h:207
IntegerAttr getIndexAttr(int64_t value)
Definition: Builders.cpp:148
AffineExpr getAffineDimExpr(unsigned position)
Definition: Builders.cpp:404
MLIRContext * getContext() const
Definition: Builders.h:55
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
This is a utility class for mapping one set of IR entities to another.
Definition: IRMapping.h:26
auto lookupOrDefault(T from) const
Lookup a mapped value within the map.
Definition: IRMapping.h:65
void map(Value from, Value to)
Inserts a new mapping for 'from' to 'to'.
Definition: IRMapping.h:30
IRValueT get() const
Return the current value being used by this operand.
Definition: UseDefLists.h:160
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:66
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:356
This class helps build Operations.
Definition: Builders.h:215
Operation * clone(Operation &op, IRMapping &mapper)
Creates a deep copy of the specified operation, remapping any operands that use values outside of the...
Definition: Builders.cpp:588
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition: Builders.h:406
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:497
This class represents a single result from folding an operation.
Definition: OpDefinition.h:268
This class represents an operand of an operation.
Definition: Value.h:267
This is a value defined by a result of an operation.
Definition: Value.h:457
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
OpOperand & getOpOperand(unsigned idx)
Definition: Operation.h:383
void setOperand(unsigned idx, Value value)
Definition: Operation.h:346
Operation * clone(IRMapping &mapper, CloneOptions options=CloneOptions::all())
Create a deep copy of this operation, remapping any operands that use values outside of the operation...
Definition: Operation.cpp:717
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition: Operation.h:402
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:223
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:268
Region & getRegion(unsigned index)
Returns the region held by this operation at position 'index'.
Definition: Operation.h:682
operand_range getOperands()
Returns an iterator on the underlying Value's.
Definition: Operation.h:373
result_range getResults()
Definition: Operation.h:410
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers.
Definition: Operation.cpp:671
void cloneInto(Region *dest, IRMapping &mapper)
Clone the internal blocks from this region into dest.
Definition: Region.cpp:70
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:381
type_range getTypes() const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:20
OpFoldResult makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands)
Constructs an AffineApplyOp that applies map to operands after composing the map with the maps of any...
Definition: AffineOps.cpp:1192
std::optional< TypedAttr > getNeutralElement(Operation *op)
Return the identity numeric value associated to the give op.
Definition: ArithOps.cpp:2547
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
SmallVector< Value > makeTiledShapes(OpBuilder &builder, Location loc, LinalgOp linalgOp, ValueRange valuesToTile, ArrayRef< OpFoldResult > ivs, ArrayRef< OpFoldResult > tileSizes, ArrayRef< OpFoldResult > sizeBounds, bool omitPartialTileCheck)
Creates extract_slice/subview ops for all valuesToTile of the given linalgOp with builder,...
Definition: Utils.cpp:794
void offsetIndices(OpBuilder &b, LinalgOp linalgOp, ArrayRef< OpFoldResult > offests)
Add the specified offsets to any linalg.index ops contained in the given linalgOp.
Definition: Utils.cpp:816
void registerTilingInterfaceExternalModels(DialectRegistry &registry)
SmallVector< Type > getTensorOutputTypes(LinalgOp op, ValueRange operands)
Returns the list of tensor output types produced when the given structured operation op is applied to...
Definition: Utils.cpp:705
SliceParameters computeSliceParameters(OpBuilder &builder, Location loc, Value valueToTile, ArrayRef< OpFoldResult > tileSizes, AffineMap map, ArrayRef< OpFoldResult > lbs, ArrayRef< OpFoldResult > ubs, ArrayRef< OpFoldResult > subShapeSizes, bool omitPartialTileCheck)
Computes SliceParameters for a single valueToTile assuming that its user is being tiled with the give...
Definition: Utils.cpp:567
Include the generated interface declarations.
void bindDims(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to DimExpr at positions: [0 .
Definition: AffineExpr.h:348
Value matchReduction(ArrayRef< BlockArgument > iterCarriedArgs, unsigned redPos, SmallVectorImpl< Operation * > &combinerOps)
Utility to match a generic reduction given a list of iteration-carried arguments, iterCarriedArgs and...
void dispatchIndexOpFoldResults(ArrayRef< OpFoldResult > ofrs, SmallVectorImpl< Value > &dynamicVec, SmallVectorImpl< int64_t > &staticVec)
Helper function to dispatch multiple OpFoldResults according to the behavior of dispatchIndexOpFoldRe...
Operation * clone(OpBuilder &b, Operation *op, TypeRange newResultTypes, ValueRange newOperands)
Container for the result of merge operation of tiling.
Container for result values of tiling.
SmallVector< Operation * > tiledOps
A struct containg offsets-sizes-strides arguments of the tiled shape.
Definition: Utils.h:134
SmallVector< OpFoldResult > sizes
Definition: Utils.h:136
SmallVector< OpFoldResult > offsets
Definition: Utils.h:135