MLIR  20.0.0git
TilingInterfaceImpl.cpp
Go to the documentation of this file.
1 //===- TilingInterfaceImpl.cpp - Implementation of TilingInterface -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 
21 #include <optional>
22 
23 using namespace mlir;
24 using namespace mlir::linalg;
25 
26 //===----------------------------------------------------------------------===//
27 // Utility methods for implementation of Tiling Interface for Linalg ops
28 //===----------------------------------------------------------------------===//
29 
30 /// Return the SSA values that represent the data point accessed using a given
31 /// `indexingMap` for a given point in the iteration space represented by `ivs`.
33  AffineMap indexingMap,
34  ValueRange ivs) {
35  SmallVector<Value> indices;
36  indices.reserve(indexingMap.getNumResults());
37  for (auto result : indexingMap.getResults()) {
38  AffineMap m = AffineMap::get(indexingMap.getNumDims(),
39  indexingMap.getNumSymbols(), result);
40  Value v = b.create<affine::AffineApplyOp>(loc, m, ivs);
41  indices.push_back(v);
42  }
43  return indices;
44 }
45 
46 /// Method to inline the payload of a `linalgOp` given the iteration space
47 /// point and values for the arguments of the payload.
48 static LogicalResult inlinePayload(OpBuilder &b, LinalgOp linalgOp,
49  ValueRange ivs, ValueRange argValues) {
50  Block *body = linalgOp.getBlock();
51  IRMapping map;
52  map.map(body->getArguments(), argValues);
53  for (auto &op : body->without_terminator()) {
54  if (auto indexOp = dyn_cast<IndexOp>(&op)) {
55  map.map(indexOp.getResult(), ivs[indexOp.getDim()]);
56  continue;
57  }
58  b.clone(op, map);
59  }
60 
61  Operation *terminator = body->getTerminator();
62  Location loc = terminator->getLoc();
63  for (const auto &operand : llvm::enumerate(terminator->getOperands())) {
64  Value toStore = map.lookupOrDefault(operand.value());
65  OpOperand *storeInto = linalgOp.getDpsInitOperand(operand.index());
66  auto indices = getIndicesForAccess(
67  b, loc, linalgOp.getMatchingIndexingMap(storeInto), ivs);
68  b.create<memref::StoreOp>(
69  loc, toStore, linalgOp.getDpsInitOperand(operand.index())->get(),
70  indices);
71  }
72  return success();
73 }
74 
75 //===----------------------------------------------------------------------===//
76 // External Model for implementing `TilingInterface` for `LinalgOp`s.
77 //===----------------------------------------------------------------------===//
78 
79 namespace {
80 /// External model implementation of TilingInterface for LinalgOps. An external
81 /// model implementation is used for now till the use of `TilingInterface` is
82 /// on-par with the current Linalg tiling + fusion patterns. Once it is
83 /// maybe possible to move this into the op-definition (though there are
84 /// advantages to leaving it as an external model)
85 template <typename LinalgOpTy>
86 struct LinalgOpTilingInterface
87  : public TilingInterface::ExternalModel<LinalgOpTilingInterface<LinalgOpTy>,
88  LinalgOpTy> {
89  /// Return the loop iterator type.
90  SmallVector<utils::IteratorType> getLoopIteratorTypes(Operation *op) const {
91  LinalgOpTy concreteOp = cast<LinalgOpTy>(op);
92  return concreteOp.getIteratorTypesArray();
93  }
94 
95  /// Return the iteration domain range.
96  SmallVector<Range> getIterationDomain(Operation *op, OpBuilder &b) const {
98  b.setInsertionPoint(op);
99  Location loc = op->getLoc();
100  LinalgOp linalgOp = cast<LinalgOp>(op);
101  SmallVector<OpFoldResult> allShapesSizes =
102  linalgOp.createFlatListOfOperandDims(b, loc);
103  AffineMap map = linalgOp.getShapesToLoopsMap();
104 
105  return llvm::to_vector(
106  llvm::map_range(map.getResults(), [&](AffineExpr loopExpr) {
107  OpFoldResult ofr = affine::makeComposedFoldedAffineApply(
108  b, loc, loopExpr, allShapesSizes);
109  return Range{b.getIndexAttr(0), ofr, b.getIndexAttr(1)};
110  }));
111  }
112 
113  /// Instantiate the tiled implementation of the operation.
114  FailureOr<TilingResult>
115  getTiledImplementation(Operation *op, OpBuilder &b,
116  ArrayRef<OpFoldResult> offsets,
117  ArrayRef<OpFoldResult> sizes) const {
118  // Leave the `sizeBounds` value empty. That is only needed when the `sizes`
119  // specified could lead to out of bounds accesses.
120  Location loc = op->getLoc();
121  LinalgOp linalgOp = cast<LinalgOp>(op);
122  SmallVector<Value> valuesToTile = linalgOp->getOperands();
123  SmallVector<Value, 4> tiledOperands = makeTiledShapes(
124  b, loc, linalgOp, valuesToTile, offsets, sizes, {}, true);
125 
126  SmallVector<Type> resultTensorTypes =
127  getTensorOutputTypes(linalgOp, tiledOperands);
128 
129  Operation *tiledOp = clone(b, linalgOp, resultTensorTypes, tiledOperands);
130  offsetIndices(b, cast<LinalgOp>(tiledOp), offsets);
131 
132  return TilingResult{{tiledOp}, SmallVector<Value>(tiledOp->getResults())};
133  }
134 
135  /// Utility to fetch the offsets and sizes when applied as per the indexing
136  /// map of the linalg op. This helps in fusing the linalg op as a consumer of
137  /// a given slice op.
138  void
139  getMappedOffsetAndSize(LinalgOp linalgOp, OpBuilder &b, AffineMap indexingMap,
140  ArrayRef<OpFoldResult> offsets,
142  SmallVectorImpl<OpFoldResult> &mappedOffsets,
143  SmallVectorImpl<OpFoldResult> &mappedSizes) const {
144  unsigned numLoops = linalgOp.getNumLoops();
145  auto tilingInterfaceOp = cast<TilingInterface>(linalgOp.getOperation());
146  mappedOffsets.resize(numLoops);
147  mappedSizes.resize(numLoops);
148  if (!indexingMap.isPermutation()) {
149  SmallVector<Range> iterationDomain =
150  tilingInterfaceOp.getIterationDomain(b);
151  for (const auto &&[index, value] : llvm::enumerate(iterationDomain)) {
152  mappedOffsets[index] = value.offset;
153  mappedSizes[index] = value.size;
154  }
155  }
156  for (const auto &&[index, value] :
157  llvm::enumerate(indexingMap.getResults())) {
158  unsigned dimPosition = cast<AffineDimExpr>(value).getPosition();
159  mappedOffsets[dimPosition] = offsets[index];
160  mappedSizes[dimPosition] = sizes[index];
161  }
162  }
163 
164  /// Method to return the position of the result tile computed by the tiled
165  /// operation.
166  LogicalResult getIterationDomainTileFromOperandTile(
167  Operation *op, OpBuilder &b, unsigned operandNumber,
169  SmallVectorImpl<OpFoldResult> &iterDomainOffsets,
170  SmallVectorImpl<OpFoldResult> &iterDomainSizes) const {
171  auto linalgOp = cast<LinalgOp>(op);
172 
173  // Check that the indexing map used for the operand is a projected
174  // permutation. This could be relaxed with a more general approach that can
175  // map the offsets and sizes from the operand to iteration space tiles
176  // (filling in full extent for dimensions not used to access the result).
177  AffineMap indexingMap =
178  linalgOp.getMatchingIndexingMap(&op->getOpOperand(operandNumber));
179  if (!indexingMap.isProjectedPermutation()) {
180  return op->emitError()
181  << "unhandled get iter domain position when operand is not "
182  "accessed using a permuted projection";
183  }
184 
185  getMappedOffsetAndSize(linalgOp, b, indexingMap, offsets, sizes,
186  iterDomainOffsets, iterDomainSizes);
187  return success();
188  }
189 
190  /// Return the details of the output tile generated by the tiled
191  /// implementation.
192  LogicalResult
193  getResultTilePosition(Operation *op, OpBuilder &b, unsigned resultNumber,
194  ArrayRef<OpFoldResult> offsets,
196  SmallVector<OpFoldResult> &resultOffsets,
197  SmallVector<OpFoldResult> &resultSizes) const {
198  Location loc = op->getLoc();
199  LinalgOp linalgOp = cast<LinalgOp>(op);
200 
201  AffineExpr d0;
202  bindDims(b.getContext(), d0);
203  SmallVector<OpFoldResult> subShapeSizes =
204  llvm::to_vector(llvm::map_range(sizes, [&](OpFoldResult ofr) {
205  return affine::makeComposedFoldedAffineApply(b, loc, d0 - 1, ofr);
206  }));
207 
208  OpOperand *outOperand = linalgOp.getDpsInitOperand(resultNumber);
210  b, loc, outOperand->get(), sizes,
211  linalgOp.getMatchingIndexingMap(outOperand), offsets,
212  /*ubs*/ {}, subShapeSizes, true);
213  resultOffsets = sliceParams.offsets;
214  resultSizes = sliceParams.sizes;
215  return success();
216  }
217 
218  LogicalResult getIterationDomainTileFromResultTile(
219  Operation *op, OpBuilder &b, unsigned resultNumber,
221  SmallVectorImpl<OpFoldResult> &iterDomainOffsets,
222  SmallVectorImpl<OpFoldResult> &iterDomainSizes) const {
223  auto linalgOp = cast<LinalgOp>(op);
224 
225  // Check that the indexing map used for the output is a projected
226  // permutation. This could be relaxed with a more general approach that can
227  // map the offsets and sizes from the result to iteration space tiles
228  // (filling in full extent for dimensions not used to access the result).
229  AffineMap indexingMap =
230  linalgOp.getIndexingMapMatchingResult(op->getResult(resultNumber));
231  if (!indexingMap.isProjectedPermutation()) {
232  return op->emitOpError(
233  "unhandled tiled implementation generation when result is not "
234  "accessed using a permuted projection");
235  }
236 
237  getMappedOffsetAndSize(linalgOp, b, indexingMap, offsets, sizes,
238  iterDomainOffsets, iterDomainSizes);
239  return success();
240  }
241 
242  FailureOr<TilingResult>
243  generateResultTileValue(Operation *op, OpBuilder &b, unsigned resultNumber,
244  ArrayRef<OpFoldResult> offsets,
245  ArrayRef<OpFoldResult> sizes) const {
246  SmallVector<OpFoldResult> mappedOffsets, mappedSizes;
247  if (failed(getIterationDomainTileFromResultTile(
248  op, b, resultNumber, offsets, sizes, mappedOffsets, mappedSizes))) {
249  return failure();
250  }
251  auto tilingInterfaceOp = cast<TilingInterface>(op);
252  FailureOr<TilingResult> tilingResult =
253  tilingInterfaceOp.getTiledImplementation(b, mappedOffsets, mappedSizes);
254 
255  if (failed(tilingResult))
256  return failure();
257 
258  if (tilingResult->tiledOps.size() != 1)
259  return op->emitOpError("failed to generate tiled implementation");
260 
261  return TilingResult{
262  tilingResult->tiledOps,
263  SmallVector<Value>{tilingResult->tiledValues[resultNumber]}};
264  }
265 
266  /// Method to generate the tiled implementation of an operation from the tile
267  /// of the operand.
268  FailureOr<TilingResult> getTiledImplementationFromOperandTile(
269  Operation *op, OpBuilder &b, unsigned operandNumber,
270  ArrayRef<OpFoldResult> offsets, ArrayRef<OpFoldResult> sizes) const {
271  SmallVector<OpFoldResult> mappedOffsets, mappedSizes;
272  if (failed(getIterationDomainTileFromOperandTile(
273  op, b, operandNumber, offsets, sizes, mappedOffsets,
274  mappedSizes))) {
275  return failure();
276  }
277  return getTiledImplementation(op, b, mappedOffsets, mappedSizes);
278  }
279 
280  LogicalResult generateScalarImplementation(Operation *op, OpBuilder &builder,
281  Location loc,
282  ValueRange ivs) const {
283  auto linalgOp = cast<LinalgOp>(op);
284  if (!linalgOp.hasPureBufferSemantics())
285  return op->emitOpError("expected operation to have buffer semantics");
286 
287  SmallVector<Value> indexedValues;
288  indexedValues.reserve(linalgOp->getNumOperands());
289  Location linalgOpLoc = op->getLoc();
290  /// Load the data corresponding to the block arguments that
291  /// represent input operands.
292  for (OpOperand &operand : linalgOp->getOpOperands()) {
293  if (!linalgOp.payloadUsesValueFromOperand(&operand)) {
294  indexedValues.push_back(nullptr);
295  continue;
296  }
297  if (linalgOp.isScalar(&operand)) {
298  indexedValues.push_back(operand.get());
299  continue;
300  }
302  builder, linalgOpLoc, linalgOp.getMatchingIndexingMap(&operand), ivs);
303  Value load =
304  builder.create<memref::LoadOp>(linalgOpLoc, operand.get(), indices);
305  indexedValues.push_back(load);
306  }
307 
308  /// Inline the op payload and store the result.
309  return inlinePayload(builder, linalgOp, ivs, indexedValues);
310  }
311 };
312 
313 //===----------------------------------------------------------------------===//
314 // External Model for implementing `PartialReductionInterface` for `LinalgOp`s.
315 //===----------------------------------------------------------------------===//
316 
317 /// External model implementation of PartialReductionInterface for LinalgOps.
318 template <typename LinalgOpTy>
319 struct LinalgOpPartialReductionInterface
320  : public PartialReductionOpInterface::ExternalModel<
321  LinalgOpPartialReductionInterface<LinalgOpTy>, LinalgOpTy> {
322  FailureOr<SmallVector<Value>> generateInitialTensorForPartialReduction(
324  ArrayRef<int> reductionDims) const {
325  auto linalgOp = cast<LinalgOp>(op);
326  OpBuilder::InsertionGuard guard(b);
327 
328  if (linalgOp.hasPureBufferSemantics())
329  return op->emitOpError("expected operation to have tensor semantics");
330 
331  SmallVector<Value> inits;
332  for (int initIdx = 0, e = linalgOp.getNumDpsInits(); initIdx < e;
333  ++initIdx) {
334  // Insert the new parallel dimension based on the index of the reduction
335  // loops. This could be controlled by user for more flexibility.
336  SmallVector<Operation *, 4> combinerOps;
337  if (!matchReduction(linalgOp.getRegionOutputArgs(), initIdx,
338  combinerOps) ||
339  combinerOps.size() != 1)
340  return op->emitOpError("Failed to anaysis the reduction operation.");
341 
342  Operation *reductionOp = combinerOps[0];
343  std::optional<TypedAttr> identity = arith::getNeutralElement(reductionOp);
344  if (!identity.has_value())
345  return op->emitOpError(
346  "Failed to get an identity value for the reduction operation.");
347 
348  ArrayRef<int64_t> oldShape =
349  linalgOp.getShape(linalgOp.getDpsInitOperand(initIdx));
350 
351  // Calculate the new shape, we insert the new dimensions based on the
352  // index of the reduction dimensions.
353  SmallVector<int64_t> newOutputShape;
354  SmallVector<Value> dynamicDims;
355  int64_t currReductionDims = 0;
356  DenseSet<int> reductionDimsSet(reductionDims.begin(),
357  reductionDims.end());
358  for (int64_t idx :
359  llvm::seq<int64_t>(0, oldShape.size() + reductionDims.size())) {
360  if (reductionDimsSet.contains(idx)) {
361  dispatchIndexOpFoldResults(sizes[idx], dynamicDims, newOutputShape);
362  currReductionDims++;
363  continue;
364  }
365  int64_t oldIdx = idx - currReductionDims;
366  int64_t dim = oldShape[oldIdx];
367  newOutputShape.push_back(dim);
368  if (ShapedType::isDynamic(dim))
369  dynamicDims.push_back(b.create<tensor::DimOp>(
370  loc, linalgOp.getDpsInitOperand(initIdx)->get(), oldIdx));
371  }
372  Value emptyTensor = b.create<tensor::EmptyOp>(
373  loc, newOutputShape,
374  linalgOp.getRegionOutputArgs()[initIdx].getType(), dynamicDims);
375  Value constantOp = b.create<arith::ConstantOp>(loc, *identity);
376  auto identityTensor =
377  b.create<linalg::FillOp>(loc, constantOp, emptyTensor);
378  inits.push_back(identityTensor.getResult(0));
379  }
380 
381  return inits;
382  }
383 
384  FailureOr<TilingResult>
385  tileToPartialReduction(Operation *op, OpBuilder &b, Location loc,
386  ValueRange init, ArrayRef<OpFoldResult> offsets,
388  ArrayRef<int> reductionDims) const {
389  OpBuilder::InsertionGuard guard(b);
390  auto linalgOp = cast<LinalgOp>(op);
391 
392  // Step 1. Extend init maps to have reduction dimension dims, since we
393  // are converting them to parallel dimensions.
394  SmallVector<AffineMap> newInitMaps;
395  newInitMaps.reserve(linalgOp.getNumDpsInits());
396  for (int idx : llvm::seq<int>(0, linalgOp.getNumDpsInits())) {
397  // TODO: linalg::Generic doesn't have getDpsInitOperands. Can replace
398  // this with a for range loop when we have it.
399  AffineMap newMap =
400  linalgOp.getMatchingIndexingMap(linalgOp.getDpsInitOperand(idx));
401  for (int redPos : reductionDims) {
402  newMap = newMap.insertResult(b.getAffineDimExpr(redPos),
403  newMap.getNumResults());
404  }
405  newInitMaps.push_back(newMap);
406  }
407 
408  // Step 2a: Extract a slice of the input operands.
410  b, loc, linalgOp, linalgOp.getDpsInputs(), offsets, sizes, {}, true);
411 
412  // Step 2b: Extract a slice of the init operands.
413  SmallVector<Value, 1> tiledInits;
414  for (auto [valueMap, valueToTile] : llvm::zip_equal(newInitMaps, init)) {
415  int64_t initRank = valueMap.getNumResults();
416  SmallVector<OpFoldResult> initOffset(initRank, b.getIndexAttr(0));
417  SmallVector<OpFoldResult> initStride(initRank, b.getIndexAttr(1));
418  SmallVector<OpFoldResult> initSizes;
419  for (AffineExpr dimExpr : valueMap.getResults()) {
420  auto dim = cast<AffineDimExpr>(dimExpr);
421  initSizes.push_back(sizes[dim.getPosition()]);
422  }
423  // TODO: Use SubsetExtractOpInterface here once available.
424  auto extractSlice = b.create<tensor::ExtractSliceOp>(
425  loc, valueToTile, initOffset, initSizes, initStride);
426  tiledInits.push_back(extractSlice);
427  }
428 
429  // Update the indexing maps.
430  SmallVector<AffineMap> newMaps = linalgOp.getIndexingMapsArray();
431  // Change the init maps.
432  for (int idx : llvm::seq<int>(0, linalgOp.getNumDpsInits())) {
433  // TODO: linalg::Generic doesn't have getDpsInitOperands. Can replace
434  // this with a for range loop when we have it.
435  OpOperand *initOperand = linalgOp.getDpsInitOperand(idx);
436  int64_t mapIdx = linalgOp.getIndexingMapIndex(initOperand);
437  newMaps[mapIdx] = newInitMaps[idx];
438  }
439 
440  // Step 3. Change the reduction dim iterator types.
441  SmallVector<utils::IteratorType> newIteratorTypes =
442  linalgOp.getIteratorTypesArray();
443  for (int dim : reductionDims)
444  newIteratorTypes[dim] = utils::IteratorType::parallel;
445 
446  // Step 4. Create the new generic op.
447  auto genericOp =
448  b.create<GenericOp>(loc, ValueRange(tiledInits).getTypes(), tiledInputs,
449  tiledInits, newMaps, newIteratorTypes);
450  IRMapping mapping;
451  op->getRegion(0).cloneInto(&genericOp.getRegion(),
452  genericOp.getRegion().begin(), mapping);
453  return TilingResult{
454  {genericOp.getOperation()},
455  llvm::map_to_vector(genericOp->getResults(),
456  [](OpResult r) -> Value { return r; })};
457  }
458 
459  FailureOr<MergeResult> mergeReductions(Operation *op, OpBuilder &b,
460  Location loc, ValueRange partialReduce,
461  ArrayRef<int> reductionDims) const {
462  auto linalgOp = cast<LinalgOp>(op);
463  SmallVector<int64_t> reductionDimsInt64(reductionDims.begin(),
464  reductionDims.end());
465  auto reduction = b.create<linalg::ReduceOp>(
466  loc, partialReduce, linalgOp.getDpsInits(), reductionDimsInt64,
467  [&linalgOp](OpBuilder &b, Location loc, ValueRange inputs) {
468  int64_t numInits = linalgOp.getNumDpsInits();
469  SmallVector<Value> yieldedValues;
470  for (int idx : llvm::seq<int>(0, numInits)) {
471  // Get the combiner op.
472  SmallVector<Operation *, 4> combinerOps;
473  matchReduction(linalgOp.getRegionOutputArgs(), idx, combinerOps);
474  Operation *clonedReductionOp = b.clone(*combinerOps[0]);
475  // Combine the input at idx and output at numInits + idx.
476  clonedReductionOp->setOperand(0, inputs[idx]);
477  clonedReductionOp->setOperand(1, inputs[numInits + idx]);
478  // Yield.
479  yieldedValues.push_back(clonedReductionOp->getResult(0));
480  }
481  b.create<linalg::YieldOp>(loc, yieldedValues);
482  });
483  return MergeResult{
484  {reduction.getOperation()},
485  llvm::map_to_vector(reduction->getResults(),
486  [](OpResult r) -> Value { return r; })};
487  }
488 };
489 
490 } // namespace
491 
492 template <typename OpType>
493 static void registerOne(MLIRContext *ctx) {
494  OpType::template attachInterface<LinalgOpTilingInterface<OpType>>(*ctx);
495  OpType::template attachInterface<LinalgOpPartialReductionInterface<OpType>>(
496  *ctx);
497 }
498 
499 /// Variadic helper function.
500 template <typename... OpTypes>
501 static void registerAll(MLIRContext *ctx) {
502  (registerOne<OpTypes>(ctx), ...);
503 }
504 
505 #define GET_OP_LIST
506 
508  DialectRegistry &registry) {
509  registry.addExtension(+[](MLIRContext *ctx, linalg::LinalgDialect *dialect) {
510  registerOne<linalg::GenericOp>(ctx);
511  registerAll<
512 #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
513  >(ctx);
514  });
515 }
static SmallVector< Value > getIndicesForAccess(OpBuilder &b, Location loc, AffineMap indexingMap, ValueRange ivs)
Return the SSA values that represent the data point accessed using a given indexingMap for a given po...
static LogicalResult inlinePayload(OpBuilder &b, LinalgOp linalgOp, ValueRange ivs, ValueRange argValues)
Method to inline the payload of a linalgOp given the iteration space point and values for the argumen...
static void registerAll(MLIRContext *ctx)
Variadic helper function.
static void registerOne(MLIRContext *ctx)
Base type for affine expression.
Definition: AffineExpr.h:68
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition: AffineMap.h:46
static AffineMap get(MLIRContext *context)
Returns a zero result affine map with no dimensions or symbols: () -> ().
AffineMap insertResult(AffineExpr expr, unsigned pos) const
Returns a new AffineMap with the same number of dims and symbols and an extra result inserted at pos.
Definition: AffineMap.h:315
bool isProjectedPermutation(bool allowZeroInResults=false) const
Returns true if the AffineMap represents a subset (i.e.
Definition: AffineMap.cpp:595
unsigned getNumSymbols() const
Definition: AffineMap.cpp:398
unsigned getNumDims() const
Definition: AffineMap.cpp:394
ArrayRef< AffineExpr > getResults() const
Definition: AffineMap.cpp:407
unsigned getNumResults() const
Definition: AffineMap.cpp:402
bool isPermutation() const
Returns true if the AffineMap represents a symbol-less permutation map.
Definition: AffineMap.cpp:625
Block represents an ordered list of Operations.
Definition: Block.h:31
Operation * getTerminator()
Get the terminator operation of this block.
Definition: Block.cpp:243
BlockArgListType getArguments()
Definition: Block.h:85
iterator_range< iterator > without_terminator()
Return an iterator range over the operation within this block excluding the terminator operation at t...
Definition: Block.h:207
IntegerAttr getIndexAttr(int64_t value)
Definition: Builders.cpp:128
AffineExpr getAffineDimExpr(unsigned position)
Definition: Builders.cpp:375
MLIRContext * getContext() const
Definition: Builders.h:55
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
void addExtension(std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
This is a utility class for mapping one set of IR entities to another.
Definition: IRMapping.h:26
auto lookupOrDefault(T from) const
Lookup a mapped value within the map.
Definition: IRMapping.h:65
void map(Value from, Value to)
Inserts a new mapping for 'from' to 'to'.
Definition: IRMapping.h:30
IRValueT get() const
Return the current value being used by this operand.
Definition: UseDefLists.h:160
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:63
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:351
This class helps build Operations.
Definition: Builders.h:210
Operation * clone(Operation &op, IRMapping &mapper)
Creates a deep copy of the specified operation, remapping any operands that use values outside of the...
Definition: Builders.cpp:559
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition: Builders.h:401
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:468
This class represents a single result from folding an operation.
Definition: OpDefinition.h:268
This class represents an operand of an operation.
Definition: Value.h:267
This is a value defined by a result of an operation.
Definition: Value.h:457
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
OpOperand & getOpOperand(unsigned idx)
Definition: Operation.h:383
void setOperand(unsigned idx, Value value)
Definition: Operation.h:346
Operation * clone(IRMapping &mapper, CloneOptions options=CloneOptions::all())
Create a deep copy of this operation, remapping any operands that use values outside of the operation...
Definition: Operation.cpp:717
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition: Operation.h:402
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:223
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:268
Region & getRegion(unsigned index)
Returns the region held by this operation at position 'index'.
Definition: Operation.h:682
operand_range getOperands()
Returns an iterator on the underlying Value's.
Definition: Operation.h:373
result_range getResults()
Definition: Operation.h:410
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers.
Definition: Operation.cpp:671
void cloneInto(Region *dest, IRMapping &mapper)
Clone the internal blocks from this region into dest.
Definition: Region.cpp:70
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:381
type_range getTypes() const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
OpFoldResult makeComposedFoldedAffineApply(OpBuilder &b, Location loc, AffineMap map, ArrayRef< OpFoldResult > operands)
Constructs an AffineApplyOp that applies map to operands after composing the map with the maps of any...
Definition: AffineOps.cpp:1192
std::optional< TypedAttr > getNeutralElement(Operation *op)
Return the identity numeric value associated to the give op.
Definition: ArithOps.cpp:2543
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:285
SmallVector< Value > makeTiledShapes(OpBuilder &builder, Location loc, LinalgOp linalgOp, ValueRange valuesToTile, ArrayRef< OpFoldResult > ivs, ArrayRef< OpFoldResult > tileSizes, ArrayRef< OpFoldResult > sizeBounds, bool omitPartialTileCheck)
Creates extract_slice/subview ops for all valuesToTile of the given linalgOp with builder,...
Definition: Utils.cpp:829
void offsetIndices(OpBuilder &b, LinalgOp linalgOp, ArrayRef< OpFoldResult > offests)
Add the specified offsets to any linalg.index ops contained in the given linalgOp.
Definition: Utils.cpp:850
void registerTilingInterfaceExternalModels(DialectRegistry &registry)
SmallVector< Type > getTensorOutputTypes(LinalgOp op, ValueRange operands)
Returns the list of tensor output types produced when the given structured operation op is applied to...
Definition: Utils.cpp:740
SliceParameters computeSliceParameters(OpBuilder &builder, Location loc, Value valueToTile, ArrayRef< OpFoldResult > tileSizes, AffineMap map, ArrayRef< OpFoldResult > lbs, ArrayRef< OpFoldResult > ubs, ArrayRef< OpFoldResult > subShapeSizes, bool omitPartialTileCheck)
Computes SliceParameters for a single valueToTile assuming that its user is being tiled with the give...
Definition: Utils.cpp:602
Include the generated interface declarations.
void bindDims(MLIRContext *ctx, AffineExprTy &...exprs)
Bind a list of AffineExpr references to DimExpr at positions: [0 .
Definition: AffineExpr.h:348
Value matchReduction(ArrayRef< BlockArgument > iterCarriedArgs, unsigned redPos, SmallVectorImpl< Operation * > &combinerOps)
Utility to match a generic reduction given a list of iteration-carried arguments, iterCarriedArgs and...
void dispatchIndexOpFoldResults(ArrayRef< OpFoldResult > ofrs, SmallVectorImpl< Value > &dynamicVec, SmallVectorImpl< int64_t > &staticVec)
Helper function to dispatch multiple OpFoldResults according to the behavior of dispatchIndexOpFoldRe...
Operation * clone(OpBuilder &b, Operation *op, TypeRange newResultTypes, ValueRange newOperands)
Container for the result of merge operation of tiling.
Container for result values of tiling.
SmallVector< Operation * > tiledOps
A struct containg offsets-sizes-strides arguments of the tiled shape.
Definition: Utils.h:140
SmallVector< OpFoldResult > sizes
Definition: Utils.h:142
SmallVector< OpFoldResult > offsets
Definition: Utils.h:141