MLIR  22.0.0git
Promotion.cpp
Go to the documentation of this file.
1 //===- Promotion.cpp - Implementation of linalg Promotion -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the linalg dialect Promotion pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
21 #include "mlir/IR/AffineMap.h"
23 #include "mlir/Support/LLVM.h"
25 #include "llvm/ADT/MapVector.h"
26 #include "llvm/ADT/SmallBitVector.h"
27 #include "llvm/ADT/SmallSet.h"
28 #include "llvm/ADT/TypeSwitch.h"
29 #include "llvm/Support/Debug.h"
30 
31 using namespace mlir;
32 using namespace mlir::linalg;
33 using namespace mlir::scf;
34 
35 using llvm::MapVector;
36 
37 #define DEBUG_TYPE "linalg-promotion"
38 
39 /// Alloc a new buffer of `size` * `width` i8; where `width` is given by the
40 /// data `layout` for `elementType`.
41 /// Use AllocOp or AllocaOp depending on `options`.
42 /// Take an optional alignment.
45  Type elementType, Value allocSize, DataLayout &layout,
46  std::optional<unsigned> alignment = std::nullopt) {
47  llvm::TypeSize width = layout.getTypeSize(elementType);
48  assert(!width.isScalable() && "cannot allocate buffer for a scalable vector");
49 
50  IntegerAttr alignmentAttr;
51  if (alignment.has_value())
52  alignmentAttr = b.getI64IntegerAttr(alignment.value());
53 
54  Attribute memorySpaceAttr;
55  if (options.memorySpace.has_value())
56  memorySpaceAttr = *options.memorySpace;
57 
58  // Static buffer.
59  if (std::optional<int64_t> cst = getConstantIntValue(allocSize)) {
60  auto staticBufferType = MemRefType::get(width.getFixedValue() * cst.value(),
61  b.getIntegerType(8));
62  staticBufferType =
63  MemRefType::Builder(staticBufferType).setMemorySpace(memorySpaceAttr);
64  if (options.useAlloca) {
65  return memref::AllocaOp::create(b, staticBufferType, ValueRange{},
66  alignmentAttr);
67  }
68  return memref::AllocOp::create(b, staticBufferType, ValueRange{},
69  alignmentAttr);
70  }
71 
72  // Fallback dynamic buffer.
73  auto dynamicBufferType =
74  MemRefType::get(ShapedType::kDynamic, b.getIntegerType(8));
75  dynamicBufferType =
76  MemRefType::Builder(dynamicBufferType).setMemorySpace(memorySpaceAttr);
77  Value mul = b.createOrFold<arith::MulIOp>(
78  arith::ConstantIndexOp::create(b, width), allocSize);
79  if (options.useAlloca)
80  return memref::AllocaOp::create(b, dynamicBufferType, mul, alignmentAttr);
81  return memref::AllocOp::create(b, dynamicBufferType, mul, alignmentAttr);
82 }
83 
84 /// Default allocation callback function. This allocates a promoted buffer when
85 /// no call back to do so is provided. The default is to allocate a
86 /// memref<..xi8> and return a view to get a memref type of shape
87 /// boundingSubViewSize.
88 static std::optional<Value> defaultAllocBufferCallBack(
89  const LinalgPromotionOptions &options, OpBuilder &builder,
90  memref::SubViewOp subView, ArrayRef<Value> boundingSubViewSize,
91  std::optional<unsigned> alignment, DataLayout &layout) {
92  ShapedType viewType = subView.getType();
93  ImplicitLocOpBuilder b(subView.getLoc(), builder);
94  auto zero = arith::ConstantIndexOp::create(b, 0);
95  auto one = arith::ConstantIndexOp::create(b, 1);
96 
97  Attribute memorySpaceAttr;
98  if (options.memorySpace.has_value())
99  memorySpaceAttr = *options.memorySpace;
100 
101  Value allocSize = one;
102  for (const auto &size : llvm::enumerate(boundingSubViewSize))
103  allocSize = b.createOrFold<arith::MulIOp>(allocSize, size.value());
104  Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize,
105  layout, alignment);
106  SmallVector<int64_t, 4> dynSizes(boundingSubViewSize.size(),
107  ShapedType::kDynamic);
108 
109  auto viewMemRefType = MemRefType::get(dynSizes, viewType.getElementType());
110  viewMemRefType =
111  MemRefType::Builder(viewMemRefType).setMemorySpace(memorySpaceAttr);
112  Value view = b.createOrFold<memref::ViewOp>(viewMemRefType, buffer, zero,
113  boundingSubViewSize);
114  return view;
115 }
116 
117 /// Default implementation of deallocation of the buffer use for promotion. It
118 /// expects to get the same value that the default allocation method returned,
119 /// i.e. result of a ViewOp.
120 static LogicalResult
122  OpBuilder &b, Value fullLocalView) {
123  if (!options.useAlloca) {
124  auto viewOp = cast<memref::ViewOp>(fullLocalView.getDefiningOp());
125  memref::DeallocOp::create(b, viewOp.getSource().getLoc(),
126  viewOp.getSource());
127  }
128  return success();
129 }
130 
131 namespace {
132 
133 /// Helper struct that captures the information required to apply the
134 /// transformation on each op. This bridges the abstraction gap with the
135 /// user-facing API which exposes positional arguments to control which operands
136 /// are promoted.
137 struct LinalgOpInstancePromotionOptions {
138  LinalgOpInstancePromotionOptions(LinalgOp op,
140  /// SubViews to promote.
141  MapVector<int64_t, Value> subViews;
142  /// Subviews operand numbers to copy in using copyInFn.
143  llvm::SmallSet<int64_t, 4> operandsNumbersToCopyIn;
144  /// True if the full view should be used for the promoted buffer.
145  DenseMap<Value, bool> useFullTileBuffers;
146  /// True if the original subview size should be used. This means the full tile
147  /// buffer is the same size as the partial view.
148  bool useOriginalSubviewSize;
149 
150  /// Callback functions for allocation and deallocation of promoted buffers, as
151  /// well as to copy the data into and out of these buffers.
152  AllocBufferCallbackFn allocationFn;
153  DeallocBufferCallbackFn deallocationFn;
154  CopyCallbackFn copyInFn;
155  CopyCallbackFn copyOutFn;
156 
157  /// Alignment of promoted buffer.
158  std::optional<unsigned> alignment;
159 };
160 } // namespace
161 
162 LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions(
163  LinalgOp linalgOp, const LinalgPromotionOptions &options)
164  : subViews(), alignment(options.alignment) {
165  assert(linalgOp.hasPureBufferSemantics() &&
166  "revisit usage of shaped operand");
167  auto vUseFullTileBuffers =
168  options.useFullTileBuffers.value_or(llvm::SmallBitVector());
169  vUseFullTileBuffers.resize(linalgOp->getNumOperands(),
170  options.useFullTileBuffersDefault);
171  useOriginalSubviewSize = options.useOriginalSubviewSize;
172 
173  for (OpOperand &opOperand : linalgOp->getOpOperands()) {
174  int64_t operandNumber = opOperand.getOperandNumber();
175  if (options.operandsToPromote &&
176  !options.operandsToPromote->count(operandNumber))
177  continue;
178  Operation *op = opOperand.get().getDefiningOp();
179  if (auto sv = dyn_cast_or_null<memref::SubViewOp>(op)) {
180  subViews[operandNumber] = sv;
181  // In case of linalg generic, copy in only if subview is used in linalg
182  // payload.
183  if (!isa<linalg::GenericOp>(linalgOp) ||
184  linalgOp.payloadUsesValueFromOperand(&opOperand))
185  operandsNumbersToCopyIn.insert(operandNumber);
186  useFullTileBuffers[sv] = vUseFullTileBuffers[operandNumber];
187  }
188  }
189 
190  if (options.allocationFn) {
191  allocationFn = *options.allocationFn;
192  } else {
193  allocationFn = [&](OpBuilder &b, memref::SubViewOp subViewOp,
194  ArrayRef<Value> boundingSubViewSize,
195  DataLayout &layout) -> std::optional<Value> {
196  return defaultAllocBufferCallBack(options, b, subViewOp,
197  boundingSubViewSize, alignment, layout);
198  };
199  }
200 
201  if (options.deallocationFn) {
202  deallocationFn = *options.deallocationFn;
203  } else {
204  deallocationFn = [&](OpBuilder &b, Value buffer) {
205  return defaultDeallocBufferCallBack(options, b, buffer);
206  };
207  }
208 
209  // Save the loc because `linalgOp` goes out of scope.
210  Location loc = linalgOp.getLoc();
211  auto defaultCopyCallBack = [loc](OpBuilder &b, Value src,
212  Value dst) -> LogicalResult {
213  linalg::CopyOp::create(b, loc, src, dst);
214  return success();
215  };
216  copyInFn = (options.copyInFn ? *(options.copyInFn) : defaultCopyCallBack);
217  copyOutFn = (options.copyOutFn ? *(options.copyOutFn) : defaultCopyCallBack);
218 }
219 
220 // Performs promotion of a `subView` into a local buffer of the size of the
221 // *ranges* of the `subView`. This produces a buffer whose size may be bigger
222 // than the actual size of the `subView` at the boundaries.
223 // This is related to the full/partial tile problem.
224 // Returns a PromotionInfo containing a `buffer`, `fullLocalView` and
225 // `partialLocalView` such that:
226 // * `buffer` is always the size of the full tile.
227 // * `fullLocalView` is a dense contiguous view into that buffer.
228 // * `partialLocalView` is a dense non-contiguous slice of `fullLocalView`
229 // that corresponds to the size of `subView` and accounting for boundary
230 // effects.
231 // The point of the full tile buffer is that constant static tile sizes are
232 // folded and result in a buffer type with statically known size and alignment
233 // properties.
234 // To account for general boundary effects, padding must be performed on the
235 // boundary tiles. For now this is done with an unconditional `fill` op followed
236 // by a partial `copy` op.
238  OpBuilder &b, Location loc, memref::SubViewOp subView,
239  bool useOriginalSubviewSize, const AllocBufferCallbackFn &allocationFn,
240  DataLayout &layout) {
241  auto viewType = subView.getType();
242  auto rank = viewType.getRank();
243  SmallVector<Value, 4> fullSizes;
244  SmallVector<OpFoldResult> partialSizes;
245  fullSizes.reserve(rank);
246  partialSizes.reserve(rank);
247  llvm::SmallBitVector droppedDims = subView.getDroppedDims();
248  int64_t resultDimIdx = 0;
249  for (const auto &en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) {
250  if (droppedDims[en.index()])
251  continue;
252  auto rangeValue = en.value();
253  // Try to extract a tight constant. If the size is known statically, no need
254  // to look for the bound.
255  LLVM_DEBUG(llvm::dbgs() << "Extract tightest: " << rangeValue.size << "\n");
256  Value size;
257  if (llvm::isa_and_present<Attribute>(rangeValue.size) ||
258  useOriginalSubviewSize) {
259  size = getValueOrCreateConstantIndexOp(b, loc, rangeValue.size);
260  } else {
261  FailureOr<int64_t> upperBound =
262  ValueBoundsConstraintSet::computeConstantBound(
263  presburger::BoundType::UB, rangeValue.size,
264  /*stopCondition=*/nullptr, /*closedUB=*/true);
265  size = failed(upperBound)
266  ? getValueOrCreateConstantIndexOp(b, loc, rangeValue.size)
267  : arith::ConstantIndexOp::create(b, loc, *upperBound);
268  }
269  LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n");
270  fullSizes.push_back(size);
271  partialSizes.push_back(
272  b.createOrFold<memref::DimOp>(loc, subView, resultDimIdx++));
273  }
274  // If a callback is not specified, then use the default implementation for
275  // allocating the promoted buffer.
276  std::optional<Value> fullLocalView =
277  allocationFn(b, subView, fullSizes, layout);
278  if (!fullLocalView)
279  return failure();
280  SmallVector<OpFoldResult, 4> zeros(fullSizes.size(), b.getIndexAttr(0));
281  SmallVector<OpFoldResult, 4> ones(fullSizes.size(), b.getIndexAttr(1));
282  auto partialLocalView = b.createOrFold<memref::SubViewOp>(
283  loc, *fullLocalView, zeros, partialSizes, ones);
284  return PromotionInfo{*fullLocalView, partialLocalView};
285 }
286 
287 static FailureOr<MapVector<int64_t, PromotionInfo>>
289  LinalgOpInstancePromotionOptions options, DataLayout &layout) {
290  if (options.subViews.empty())
291  return failure();
292 
293  MapVector<int64_t, PromotionInfo> promotionInfoMap;
294 
295  for (auto v : options.subViews) {
296  memref::SubViewOp subView =
297  cast<memref::SubViewOp>(v.second.getDefiningOp());
298  auto promotionInfo = promoteSubviewAsNewBuffer(
299  b, b.getLoc(), subView, options.useOriginalSubviewSize,
300  options.allocationFn, layout);
301  if (failed(promotionInfo))
302  return failure();
303  promotionInfoMap[v.first] = *promotionInfo;
304 
305  // Only fill the buffer if the full local view is used
306  if (!options.useFullTileBuffers[v.second])
307  continue;
308  Type subviewEltType = subView.getType().getElementType();
309  Value fillVal =
310  llvm::TypeSwitch<Type, Value>(subviewEltType)
311  .Case([&](FloatType t) {
312  return arith::ConstantOp::create(b, FloatAttr::get(t, 0.0));
313  })
314  .Case([&](IntegerType t) {
315  return arith::ConstantOp::create(b, IntegerAttr::get(t, 0));
316  })
317  .Case([&](ComplexType t) {
318  Value tmp;
319  if (auto et = dyn_cast<FloatType>(t.getElementType()))
320  tmp = arith::ConstantOp::create(b, FloatAttr::get(et, 0.0));
321  else if (auto et = cast<IntegerType>(t.getElementType()))
322  tmp = arith::ConstantOp::create(b, IntegerAttr::get(et, 0));
323  return complex::CreateOp::create(b, t, tmp, tmp);
324  })
325  .Default([](auto) { return Value(); });
326  if (!fillVal)
327  return failure();
328  linalg::FillOp::create(b, fillVal, promotionInfo->fullLocalView);
329  }
330 
331  // Copy data into the promoted buffers. Use callback if provided.
332  for (auto v : options.subViews) {
333  auto *info = promotionInfoMap.find(v.first);
334  if (info == promotionInfoMap.end())
335  continue;
336  if (options.operandsNumbersToCopyIn.count(v.first) == 0)
337  continue;
338  if (failed(options.copyInFn(
339  b, cast<memref::SubViewOp>(v.second.getDefiningOp()),
340  info->second.partialLocalView)))
341  return failure();
342  }
343  return promotionInfoMap;
344 }
345 
346 static FailureOr<LinalgOp>
348  LinalgOpInstancePromotionOptions options, DataLayout &layout) {
349  assert(op.hasPureBufferSemantics() &&
350  "expected linalg op with buffer semantics");
351 
352  // 1. Promote the specified views and use them in the new op.
353  auto promotedBuffersAndViews = promoteSubViews(b, options, layout);
354  if (failed(promotedBuffersAndViews) ||
355  promotedBuffersAndViews->size() != options.subViews.size())
356  return failure();
357 
358  // 2. Append all other operands as they appear, this enforces that such
359  // operands are not views. This is to support cases such as FillOp taking
360  // extra scalars etc. Keep a reference to output buffers;
361  SmallVector<Value, 8> opViews;
362  opViews.reserve(op->getNumOperands());
363  SmallVector<std::pair<Value, Value>, 8> writebackViews;
364  writebackViews.reserve(promotedBuffersAndViews->size());
365  for (OpOperand &opOperand : op->getOpOperands()) {
366  int64_t operandNumber = opOperand.getOperandNumber();
367  if (options.subViews.count(operandNumber) != 0) {
368  if (options.useFullTileBuffers[opOperand.get()])
369  opViews.push_back(
370  (*promotedBuffersAndViews)[operandNumber].fullLocalView);
371  else
372  opViews.push_back(
373  (*promotedBuffersAndViews)[operandNumber].partialLocalView);
374  if (operandNumber >= op.getNumDpsInputs())
375  writebackViews.emplace_back(std::make_pair(
376  opOperand.get(),
377  (*promotedBuffersAndViews)[operandNumber].partialLocalView));
378  } else {
379  opViews.push_back(opOperand.get());
380  }
381  }
382  op->setOperands(0, opViews.size(), opViews);
383 
384  OpBuilder::InsertionGuard guard(b);
386  // 3. Emit write-back for the promoted output views: copy the partial view.
387  for (auto viewAndPartialLocalView : writebackViews) {
388  if (failed(options.copyOutFn(b, viewAndPartialLocalView.second,
389  viewAndPartialLocalView.first)))
390  return failure();
391  }
392 
393  // 4. Dealloc all local buffers.
394  for (const auto &pi : *promotedBuffersAndViews)
395  (void)options.deallocationFn(b, pi.second.fullLocalView);
396  return op;
397 }
398 
399 LogicalResult
402  LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
403  // Transformation applies to buffers only.
404  if (!linalgOp || !linalgOp.hasPureBufferSemantics())
405  return failure();
406  // Check that at least one of the requested operands is indeed a subview.
407  for (OpOperand &opOperand : linalgOp->getOpOperands()) {
408  auto sv =
409  isa_and_nonnull<memref::SubViewOp>(opOperand.get().getDefiningOp());
410  if (sv) {
411  if (!options.operandsToPromote ||
412  options.operandsToPromote->count(opOperand.getOperandNumber()))
413  return success();
414  }
415  }
416  // TODO: Check all subviews requested are bound by a static constant.
417  // TODO: Check that the total footprint fits within a given size.
418  return failure();
419 }
420 
421 FailureOr<LinalgOp>
422 mlir::linalg::promoteSubViews(OpBuilder &builder, LinalgOp linalgOp,
424  LinalgOpInstancePromotionOptions linalgOptions(linalgOp, options);
425  auto layout = DataLayout::closest(linalgOp);
426  ImplicitLocOpBuilder b(linalgOp.getLoc(), builder);
427  auto res = ::promoteSubViews(b, linalgOp, linalgOptions, layout);
428  if (failed(res))
429  return failure();
430  return res;
431 }
432 
433 /// Allocate the given subview to a memory address space in GPU by creating a
434 /// allocation operation and setting the memref type address space to desired
435 /// address space.
436 static std::optional<Value> allocateSubviewGPUMemoryInAddressSpace(
437  OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
438  gpu::AddressSpace addressSpace) {
439  OpBuilder::InsertionGuard guard(builder);
440 
441  func::FuncOp funcOp = subview->getParentOfType<func::FuncOp>();
442  if (!funcOp)
443  return std::nullopt;
444 
445  // The subview size bounds are expected to be constant; they specify the shape
446  // of the allocation.
447  SmallVector<int64_t> shape;
448  for (Value bound : sizeBounds) {
449  APInt value;
450  if (!matchPattern(bound, m_ConstantInt(&value)))
451  return std::nullopt;
452  shape.push_back(value.getSExtValue());
453  }
454 
455  builder.setInsertionPointToStart(&funcOp.front());
456  auto type = MemRefType::get(
457  shape, subview.getType().getElementType(), MemRefLayoutAttrInterface{},
458  gpu::AddressSpaceAttr::get(builder.getContext(), addressSpace));
459  Value buffer;
460  if (addressSpace == gpu::GPUDialect::getWorkgroupAddressSpace()) {
461  buffer = memref::AllocOp::create(builder, funcOp.getLoc(), type);
462  } else if (addressSpace == gpu::GPUDialect::getPrivateAddressSpace()) {
463  buffer = memref::AllocaOp::create(builder, funcOp.getLoc(), type);
464  } else {
465  return std::nullopt;
466  }
467  return buffer;
468 }
469 
470 /// Allocate the subview in the GPU workgroup memory.
472  OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
473  DataLayout &) {
475  builder, subview, sizeBounds,
476  gpu::GPUDialect::getWorkgroupAddressSpace());
477 }
478 
479 /// In case of GPU group memory there is no need to deallocate.
481  Value /*buffer*/) {
482  return success();
483 }
484 
485 /// Create Memref copy operations and add gpu barrier guards before and after
486 /// the copy operation to ensure data integrity.
488  Value dst) {
489  gpu::BarrierOp::create(b, src.getLoc());
490  Operation *copyOp = memref::CopyOp::create(b, src.getLoc(), src, dst);
491  gpu::BarrierOp::create(b, copyOp->getLoc());
492  return success();
493 }
494 
495 /// Allocate the subview in the GPU private memory.
497  OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
498  DataLayout &) {
500  builder, subview, sizeBounds, gpu::GPUDialect::getPrivateAddressSpace());
501 }
502 
503 /// Normal copy to between src and dst.
505  Value dst) {
506  memref::CopyOp::create(b, src.getLoc(), src, dst);
507  return success();
508 }
509 
510 /// In case of GPU private memory there is no need to deallocate since the
511 /// memory is freed when going outside of the scope.
513  Value /*buffer*/) {
514  return success();
515 }
static llvm::ManagedStatic< PassManagerOptions > options
static std::optional< Value > defaultAllocBufferCallBack(const LinalgPromotionOptions &options, OpBuilder &builder, memref::SubViewOp subView, ArrayRef< Value > boundingSubViewSize, std::optional< unsigned > alignment, DataLayout &layout)
Default allocation callback function.
Definition: Promotion.cpp:88
static LogicalResult defaultDeallocBufferCallBack(const LinalgPromotionOptions &options, OpBuilder &b, Value fullLocalView)
Default implementation of deallocation of the buffer use for promotion.
Definition: Promotion.cpp:121
static FailureOr< MapVector< int64_t, PromotionInfo > > promoteSubViews(ImplicitLocOpBuilder &b, LinalgOpInstancePromotionOptions options, DataLayout &layout)
Definition: Promotion.cpp:288
static std::optional< Value > allocateSubviewGPUMemoryInAddressSpace(OpBuilder &builder, memref::SubViewOp subview, ArrayRef< Value > sizeBounds, gpu::AddressSpace addressSpace)
Allocate the given subview to a memory address space in GPU by creating a allocation operation and se...
Definition: Promotion.cpp:436
static Value allocBuffer(ImplicitLocOpBuilder &b, const LinalgPromotionOptions &options, Type elementType, Value allocSize, DataLayout &layout, std::optional< unsigned > alignment=std::nullopt)
Alloc a new buffer of size * width i8; where width is given by the data layout for elementType.
Definition: Promotion.cpp:43
Attributes are known-constant values of operations.
Definition: Attributes.h:25
IntegerAttr getIndexAttr(int64_t value)
Definition: Builders.cpp:103
IntegerAttr getI64IntegerAttr(int64_t value)
Definition: Builders.cpp:107
IntegerType getIntegerType(unsigned width)
Definition: Builders.cpp:66
MLIRContext * getContext() const
Definition: Builders.h:55
The main mechanism for performing data layout queries.
llvm::TypeSize getTypeSize(Type t) const
Returns the size of the given type in the current scope.
ImplicitLocOpBuilder maintains a 'current location', allowing use of the create<> method without spec...
Definition: Builders.h:621
Location getLoc() const
Accessors for the implied location.
Definition: Builders.h:654
void createOrFold(llvm::SmallVectorImpl< Value > &results, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Definition: Builders.h:672
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:76
This is a builder type that keeps local references to arguments.
Definition: BuiltinTypes.h:182
Builder & setMemorySpace(Attribute newMemorySpace)
Definition: BuiltinTypes.h:208
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:346
This class helps build Operations.
Definition: Builders.h:205
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:429
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Definition: Builders.h:517
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition: Builders.h:410
This class represents an operand of an operation.
Definition: Value.h:257
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:223
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:387
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Location getLoc() const
Return the location of this value.
Definition: Value.cpp:24
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:18
static ConstantIndexOp create(OpBuilder &builder, Location location, int64_t value)
Definition: ArithOps.cpp:359
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
std::function< std::optional< Value >(OpBuilder &b, memref::SubViewOp subView, ArrayRef< Value > boundingSubViewSize, DataLayout &layout)> AllocBufferCallbackFn
Callback function type used to perform the allocation for the promoted subView.
Definition: Transforms.h:381
std::optional< Value > allocateWorkgroupMemory(OpBuilder &builder, memref::SubViewOp subview, ArrayRef< Value > sizeBounds, DataLayout &)
Allocate the subview in the GPU workgroup memory.
Definition: Promotion.cpp:471
std::function< LogicalResult(OpBuilder &b, Value buffer)> DeallocBufferCallbackFn
Callback function type used to deallocate the buffers used to hold the promoted subview.
Definition: Transforms.h:386
LogicalResult deallocateGPUPrivateMemory(OpBuilder &, Value)
In case of GPU private memory there is no need to deallocate since the memory is freed when going out...
Definition: Promotion.cpp:512
FailureOr< PromotionInfo > promoteSubviewAsNewBuffer(OpBuilder &b, Location loc, memref::SubViewOp subView, bool useOriginalSubviewSize, const AllocBufferCallbackFn &allocationFn, DataLayout &layout)
Definition: Promotion.cpp:237
std::optional< Value > allocateGPUPrivateMemory(OpBuilder &builder, memref::SubViewOp subview, ArrayRef< Value > sizeBounds, DataLayout &)
Allocate the subview in the GPU private memory.
Definition: Promotion.cpp:496
LogicalResult copyToWorkgroupMemory(OpBuilder &b, Value src, Value dst)
Create Memref copy operations and add gpu barrier guards before and after the copy operation to ensur...
Definition: Promotion.cpp:487
LogicalResult promoteSubviewsPrecondition(Operation *op, LinalgPromotionOptions options)
Promote memref.subviews feeding linalg-on-buffers operations.
Definition: Promotion.cpp:400
LogicalResult copyToGPUPrivateMemory(OpBuilder &b, Value src, Value dst)
Normal copy to between src and dst.
Definition: Promotion.cpp:504
std::function< LogicalResult(OpBuilder &b, Value src, Value dst)> CopyCallbackFn
Callback function type used to insert copy from original subview to subview of the promoted region fo...
Definition: Transforms.h:393
FailureOr< LinalgOp > promoteSubViews(OpBuilder &b, LinalgOp op, const LinalgPromotionOptions &options)
Promote the subViews into a new buffer allocated at the insertion point b.
Definition: Promotion.cpp:422
LogicalResult deallocateWorkgroupMemory(OpBuilder &, Value)
In case of GPU group memory there is no need to deallocate.
Definition: Promotion.cpp:480
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition: Remarks.h:491
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
Definition: Matchers.h:490
detail::constant_int_value_binder m_ConstantInt(IntegerAttr::ValueType *bind_value)
Matches a constant holding a scalar/vector/tensor integer (splat) and writes the integer value to bin...
Definition: Matchers.h:527
std::optional< int64_t > getConstantIntValue(OpFoldResult ofr)
If ofr is a constant integer or an IntegerAttr, return the integer.
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
Definition: Utils.cpp:111
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
Create a new buffer using the allocationFn provided.
Definition: Transforms.h:828