MLIR  20.0.0git
Promotion.cpp
Go to the documentation of this file.
1 //===- Promotion.cpp - Implementation of linalg Promotion -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the linalg dialect Promotion pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
22 #include "mlir/IR/AffineExpr.h"
24 #include "mlir/IR/AffineMap.h"
27 #include "mlir/Support/LLVM.h"
29 #include "llvm/ADT/MapVector.h"
30 #include "llvm/ADT/SmallBitVector.h"
31 #include "llvm/ADT/SmallSet.h"
32 #include "llvm/ADT/TypeSwitch.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 
36 using namespace mlir;
37 using namespace mlir::linalg;
38 using namespace mlir::scf;
39 
40 using llvm::MapVector;
41 
42 #define DEBUG_TYPE "linalg-promotion"
43 
44 /// Alloc a new buffer of `size` * `width` i8; where `width` is given by the
45 /// data `layout` for `elementType`.
46 /// Use AllocOp or AllocaOp depending on `options`.
47 /// Take an optional alignment.
50  Type elementType, Value allocSize, DataLayout &layout,
51  std::optional<unsigned> alignment = std::nullopt) {
52  llvm::TypeSize width = layout.getTypeSize(elementType);
53  assert(!width.isScalable() && "cannot allocate buffer for a scalable vector");
54 
55  IntegerAttr alignmentAttr;
56  if (alignment.has_value())
57  alignmentAttr = b.getI64IntegerAttr(alignment.value());
58 
59  Attribute memorySpaceAttr;
60  if (options.memorySpace.has_value())
61  memorySpaceAttr = *options.memorySpace;
62 
63  // Static buffer.
64  if (std::optional<int64_t> cst = getConstantIntValue(allocSize)) {
65  auto staticBufferType = MemRefType::get(width.getFixedValue() * cst.value(),
66  b.getIntegerType(8));
67  staticBufferType =
68  MemRefType::Builder(staticBufferType).setMemorySpace(memorySpaceAttr);
69  if (options.useAlloca) {
70  return b.create<memref::AllocaOp>(staticBufferType, ValueRange{},
71  alignmentAttr);
72  }
73  return b.create<memref::AllocOp>(staticBufferType, ValueRange{},
74  alignmentAttr);
75  }
76 
77  // Fallback dynamic buffer.
78  auto dynamicBufferType =
79  MemRefType::get(ShapedType::kDynamic, b.getIntegerType(8));
80  dynamicBufferType =
81  MemRefType::Builder(dynamicBufferType).setMemorySpace(memorySpaceAttr);
82  Value mul = b.createOrFold<arith::MulIOp>(
83  b.create<arith::ConstantIndexOp>(width), allocSize);
84  if (options.useAlloca)
85  return b.create<memref::AllocaOp>(dynamicBufferType, mul, alignmentAttr);
86  return b.create<memref::AllocOp>(dynamicBufferType, mul, alignmentAttr);
87 }
88 
89 /// Default allocation callback function. This allocates a promoted buffer when
90 /// no call back to do so is provided. The default is to allocate a
91 /// memref<..xi8> and return a view to get a memref type of shape
92 /// boundingSubViewSize.
93 static std::optional<Value> defaultAllocBufferCallBack(
94  const LinalgPromotionOptions &options, OpBuilder &builder,
95  memref::SubViewOp subView, ArrayRef<Value> boundingSubViewSize,
96  std::optional<unsigned> alignment, DataLayout &layout) {
97  ShapedType viewType = subView.getType();
98  ImplicitLocOpBuilder b(subView.getLoc(), builder);
99  auto zero = b.create<arith::ConstantIndexOp>(0);
100  auto one = b.create<arith::ConstantIndexOp>(1);
101 
102  Attribute memorySpaceAttr;
103  if (options.memorySpace.has_value())
104  memorySpaceAttr = *options.memorySpace;
105 
106  Value allocSize = one;
107  for (const auto &size : llvm::enumerate(boundingSubViewSize))
108  allocSize = b.createOrFold<arith::MulIOp>(allocSize, size.value());
109  Value buffer = allocBuffer(b, options, viewType.getElementType(), allocSize,
110  layout, alignment);
111  SmallVector<int64_t, 4> dynSizes(boundingSubViewSize.size(),
112  ShapedType::kDynamic);
113 
114  auto viewMemRefType = MemRefType::get(dynSizes, viewType.getElementType());
115  viewMemRefType =
116  MemRefType::Builder(viewMemRefType).setMemorySpace(memorySpaceAttr);
117  Value view = b.createOrFold<memref::ViewOp>(viewMemRefType, buffer, zero,
118  boundingSubViewSize);
119  return view;
120 }
121 
122 /// Default implementation of deallocation of the buffer use for promotion. It
123 /// expects to get the same value that the default allocation method returned,
124 /// i.e. result of a ViewOp.
125 static LogicalResult
127  OpBuilder &b, Value fullLocalView) {
128  if (!options.useAlloca) {
129  auto viewOp = cast<memref::ViewOp>(fullLocalView.getDefiningOp());
130  b.create<memref::DeallocOp>(viewOp.getSource().getLoc(),
131  viewOp.getSource());
132  }
133  return success();
134 }
135 
136 namespace {
137 
138 /// Helper struct that captures the information required to apply the
139 /// transformation on each op. This bridges the abstraction gap with the
140 /// user-facing API which exposes positional arguments to control which operands
141 /// are promoted.
142 struct LinalgOpInstancePromotionOptions {
143  LinalgOpInstancePromotionOptions(LinalgOp op,
145  /// SubViews to promote.
146  MapVector<int64_t, Value> subViews;
147  /// Subviews operand numbers to copy in using copyInFn.
148  llvm::SmallSet<int64_t, 4> operandsNumbersToCopyIn;
149  /// True if the full view should be used for the promoted buffer.
150  DenseMap<Value, bool> useFullTileBuffers;
151 
152  /// Callback functions for allocation and deallocation of promoted buffers, as
153  /// well as to copy the data into and out of these buffers.
154  AllocBufferCallbackFn allocationFn;
155  DeallocBufferCallbackFn deallocationFn;
156  CopyCallbackFn copyInFn;
157  CopyCallbackFn copyOutFn;
158 
159  /// Alignment of promoted buffer.
160  std::optional<unsigned> alignment;
161 };
162 } // namespace
163 
164 LinalgOpInstancePromotionOptions::LinalgOpInstancePromotionOptions(
165  LinalgOp linalgOp, const LinalgPromotionOptions &options)
166  : subViews(), alignment(options.alignment) {
167  assert(linalgOp.hasPureBufferSemantics() &&
168  "revisit usage of shaped operand");
169  auto vUseFullTileBuffers =
170  options.useFullTileBuffers.value_or(llvm::SmallBitVector());
171  vUseFullTileBuffers.resize(linalgOp->getNumOperands(),
172  options.useFullTileBuffersDefault);
173 
174  for (OpOperand &opOperand : linalgOp->getOpOperands()) {
175  int64_t operandNumber = opOperand.getOperandNumber();
176  if (options.operandsToPromote &&
177  !options.operandsToPromote->count(operandNumber))
178  continue;
179  Operation *op = opOperand.get().getDefiningOp();
180  if (auto sv = dyn_cast_or_null<memref::SubViewOp>(op)) {
181  subViews[operandNumber] = sv;
182  // In case of linalg generic, copy in only if subview is used in linalg
183  // payload.
184  if (!isa<linalg::GenericOp>(linalgOp) ||
185  linalgOp.payloadUsesValueFromOperand(&opOperand))
186  operandsNumbersToCopyIn.insert(operandNumber);
187  useFullTileBuffers[sv] = vUseFullTileBuffers[operandNumber];
188  }
189  }
190 
191  if (options.allocationFn) {
192  allocationFn = *options.allocationFn;
193  } else {
194  allocationFn = [&](OpBuilder &b, memref::SubViewOp subViewOp,
195  ArrayRef<Value> boundingSubViewSize,
196  DataLayout &layout) -> std::optional<Value> {
197  return defaultAllocBufferCallBack(options, b, subViewOp,
198  boundingSubViewSize, alignment, layout);
199  };
200  }
201 
202  if (options.deallocationFn) {
203  deallocationFn = *options.deallocationFn;
204  } else {
205  deallocationFn = [&](OpBuilder &b, Value buffer) {
206  return defaultDeallocBufferCallBack(options, b, buffer);
207  };
208  }
209 
210  // Save the loc because `linalgOp` goes out of scope.
211  Location loc = linalgOp.getLoc();
212  auto defaultCopyCallBack = [loc](OpBuilder &b, Value src,
213  Value dst) -> LogicalResult {
214  b.create<linalg::CopyOp>(loc, src, dst);
215  return success();
216  };
217  copyInFn = (options.copyInFn ? *(options.copyInFn) : defaultCopyCallBack);
218  copyOutFn = (options.copyOutFn ? *(options.copyOutFn) : defaultCopyCallBack);
219 }
220 
221 // Performs promotion of a `subView` into a local buffer of the size of the
222 // *ranges* of the `subView`. This produces a buffer whose size may be bigger
223 // than the actual size of the `subView` at the boundaries.
224 // This is related to the full/partial tile problem.
225 // Returns a PromotionInfo containing a `buffer`, `fullLocalView` and
226 // `partialLocalView` such that:
227 // * `buffer` is always the size of the full tile.
228 // * `fullLocalView` is a dense contiguous view into that buffer.
229 // * `partialLocalView` is a dense non-contiguous slice of `fullLocalView`
230 // that corresponds to the size of `subView` and accounting for boundary
231 // effects.
232 // The point of the full tile buffer is that constant static tile sizes are
233 // folded and result in a buffer type with statically known size and alignment
234 // properties.
235 // To account for general boundary effects, padding must be performed on the
236 // boundary tiles. For now this is done with an unconditional `fill` op followed
237 // by a partial `copy` op.
239  OpBuilder &b, Location loc, memref::SubViewOp subView,
240  const AllocBufferCallbackFn &allocationFn, DataLayout &layout) {
241  auto viewType = subView.getType();
242  auto rank = viewType.getRank();
243  SmallVector<Value, 4> fullSizes;
244  SmallVector<OpFoldResult> partialSizes;
245  fullSizes.reserve(rank);
246  partialSizes.reserve(rank);
247  llvm::SmallBitVector droppedDims = subView.getDroppedDims();
248  int64_t resultDimIdx = 0;
249  for (const auto &en : llvm::enumerate(subView.getOrCreateRanges(b, loc))) {
250  if (droppedDims[en.index()])
251  continue;
252  auto rangeValue = en.value();
253  // Try to extract a tight constant. If the size is known statically, no need
254  // to look for the bound.
255  LLVM_DEBUG(llvm::dbgs() << "Extract tightest: " << rangeValue.size << "\n");
256  Value size;
257  if (auto attr = llvm::dyn_cast_if_present<Attribute>(rangeValue.size)) {
258  size = getValueOrCreateConstantIndexOp(b, loc, rangeValue.size);
259  } else {
260  FailureOr<int64_t> upperBound =
261  ValueBoundsConstraintSet::computeConstantBound(
262  presburger::BoundType::UB, rangeValue.size,
263  /*stopCondition=*/nullptr, /*closedUB=*/true);
264  size = failed(upperBound)
265  ? getValueOrCreateConstantIndexOp(b, loc, rangeValue.size)
266  : b.create<arith::ConstantIndexOp>(loc, *upperBound);
267  }
268  LLVM_DEBUG(llvm::dbgs() << "Extracted tightest: " << size << "\n");
269  fullSizes.push_back(size);
270  partialSizes.push_back(
271  b.createOrFold<memref::DimOp>(loc, subView, resultDimIdx++));
272  }
273  SmallVector<int64_t, 4> dynSizes(fullSizes.size(), ShapedType::kDynamic);
274  // If a callback is not specified, then use the default implementation for
275  // allocating the promoted buffer.
276  std::optional<Value> fullLocalView =
277  allocationFn(b, subView, fullSizes, layout);
278  if (!fullLocalView)
279  return failure();
280  SmallVector<OpFoldResult, 4> zeros(fullSizes.size(), b.getIndexAttr(0));
281  SmallVector<OpFoldResult, 4> ones(fullSizes.size(), b.getIndexAttr(1));
282  auto partialLocalView = b.createOrFold<memref::SubViewOp>(
283  loc, *fullLocalView, zeros, partialSizes, ones);
284  return PromotionInfo{*fullLocalView, partialLocalView};
285 }
286 
287 static FailureOr<MapVector<int64_t, PromotionInfo>>
289  LinalgOpInstancePromotionOptions options, DataLayout &layout) {
290  if (options.subViews.empty())
291  return failure();
292 
293  MapVector<int64_t, PromotionInfo> promotionInfoMap;
294 
295  for (auto v : options.subViews) {
296  memref::SubViewOp subView =
297  cast<memref::SubViewOp>(v.second.getDefiningOp());
298  auto promotionInfo = promoteSubviewAsNewBuffer(
299  b, b.getLoc(), subView, options.allocationFn, layout);
300  if (failed(promotionInfo))
301  return failure();
302  promotionInfoMap[v.first] = *promotionInfo;
303 
304  // Only fill the buffer if the full local view is used
305  if (!options.useFullTileBuffers[v.second])
306  continue;
307  Type subviewEltType = subView.getType().getElementType();
308  Value fillVal =
309  llvm::TypeSwitch<Type, Value>(subviewEltType)
310  .Case([&](FloatType t) {
311  return b.create<arith::ConstantOp>(FloatAttr::get(t, 0.0));
312  })
313  .Case([&](IntegerType t) {
314  return b.create<arith::ConstantOp>(IntegerAttr::get(t, 0));
315  })
316  .Case([&](ComplexType t) {
317  Value tmp;
318  if (auto et = dyn_cast<FloatType>(t.getElementType()))
319  tmp = b.create<arith::ConstantOp>(FloatAttr::get(et, 0.0));
320  else if (auto et = cast<IntegerType>(t.getElementType()))
321  tmp = b.create<arith::ConstantOp>(IntegerAttr::get(et, 0));
322  return b.create<complex::CreateOp>(t, tmp, tmp);
323  })
324  .Default([](auto) { return Value(); });
325  if (!fillVal)
326  return failure();
327  b.create<linalg::FillOp>(fillVal, promotionInfo->fullLocalView);
328  }
329 
330  // Copy data into the promoted buffers. Use callback if provided.
331  for (auto v : options.subViews) {
332  auto *info = promotionInfoMap.find(v.first);
333  if (info == promotionInfoMap.end())
334  continue;
335  if (options.operandsNumbersToCopyIn.count(v.first) == 0)
336  continue;
337  if (failed(options.copyInFn(
338  b, cast<memref::SubViewOp>(v.second.getDefiningOp()),
339  info->second.partialLocalView)))
340  return failure();
341  }
342  return promotionInfoMap;
343 }
344 
345 static FailureOr<LinalgOp>
347  LinalgOpInstancePromotionOptions options, DataLayout &layout) {
348  assert(op.hasPureBufferSemantics() &&
349  "expected linalg op with buffer semantics");
350 
351  // 1. Promote the specified views and use them in the new op.
352  auto promotedBuffersAndViews = promoteSubViews(b, options, layout);
353  if (failed(promotedBuffersAndViews) ||
354  promotedBuffersAndViews->size() != options.subViews.size())
355  return failure();
356 
357  // 2. Append all other operands as they appear, this enforces that such
358  // operands are not views. This is to support cases such as FillOp taking
359  // extra scalars etc. Keep a reference to output buffers;
360  SmallVector<Value, 8> opViews;
361  opViews.reserve(op->getNumOperands());
362  SmallVector<std::pair<Value, Value>, 8> writebackViews;
363  writebackViews.reserve(promotedBuffersAndViews->size());
364  for (OpOperand &opOperand : op->getOpOperands()) {
365  int64_t operandNumber = opOperand.getOperandNumber();
366  if (options.subViews.count(operandNumber) != 0) {
367  if (options.useFullTileBuffers[opOperand.get()])
368  opViews.push_back(
369  (*promotedBuffersAndViews)[operandNumber].fullLocalView);
370  else
371  opViews.push_back(
372  (*promotedBuffersAndViews)[operandNumber].partialLocalView);
373  if (operandNumber >= op.getNumDpsInputs())
374  writebackViews.emplace_back(std::make_pair(
375  opOperand.get(),
376  (*promotedBuffersAndViews)[operandNumber].partialLocalView));
377  } else {
378  opViews.push_back(opOperand.get());
379  }
380  }
381  op->setOperands(0, opViews.size(), opViews);
382 
383  OpBuilder::InsertionGuard guard(b);
385  // 3. Emit write-back for the promoted output views: copy the partial view.
386  for (auto viewAndPartialLocalView : writebackViews) {
387  if (failed(options.copyOutFn(b, viewAndPartialLocalView.second,
388  viewAndPartialLocalView.first)))
389  return failure();
390  }
391 
392  // 4. Dealloc all local buffers.
393  for (const auto &pi : *promotedBuffersAndViews)
394  (void)options.deallocationFn(b, pi.second.fullLocalView);
395  return op;
396 }
397 
398 LogicalResult
401  LinalgOp linalgOp = dyn_cast<LinalgOp>(op);
402  // Transformation applies to buffers only.
403  if (!linalgOp || !linalgOp.hasPureBufferSemantics())
404  return failure();
405  // Check that at least one of the requested operands is indeed a subview.
406  for (OpOperand &opOperand : linalgOp->getOpOperands()) {
407  auto sv =
408  isa_and_nonnull<memref::SubViewOp>(opOperand.get().getDefiningOp());
409  if (sv) {
410  if (!options.operandsToPromote ||
411  options.operandsToPromote->count(opOperand.getOperandNumber()))
412  return success();
413  }
414  }
415  // TODO: Check all subviews requested are bound by a static constant.
416  // TODO: Check that the total footprint fits within a given size.
417  return failure();
418 }
419 
420 FailureOr<LinalgOp>
421 mlir::linalg::promoteSubViews(OpBuilder &builder, LinalgOp linalgOp,
423  LinalgOpInstancePromotionOptions linalgOptions(linalgOp, options);
424  auto layout = DataLayout::closest(linalgOp);
425  ImplicitLocOpBuilder b(linalgOp.getLoc(), builder);
426  auto res = ::promoteSubViews(b, linalgOp, linalgOptions, layout);
427  if (failed(res))
428  return failure();
429  return res;
430 }
431 
432 /// Allocate the given subview to a memory address space in GPU by creating a
433 /// allocation operation and setting the memref type address space to desired
434 /// address space.
435 static std::optional<Value> allocateSubviewGPUMemoryInAddressSpace(
436  OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
437  gpu::AddressSpace addressSpace) {
438  OpBuilder::InsertionGuard guard(builder);
439 
440  func::FuncOp funcOp = subview->getParentOfType<func::FuncOp>();
441  if (!funcOp)
442  return std::nullopt;
443 
444  // The subview size bounds are expected to be constant; they specify the shape
445  // of the allocation.
446  SmallVector<int64_t> shape;
447  for (Value bound : sizeBounds) {
448  APInt value;
449  if (!matchPattern(bound, m_ConstantInt(&value)))
450  return std::nullopt;
451  shape.push_back(value.getSExtValue());
452  }
453 
454  builder.setInsertionPointToStart(&funcOp.front());
455  auto type = MemRefType::get(
456  shape, subview.getType().getElementType(), MemRefLayoutAttrInterface{},
457  gpu::AddressSpaceAttr::get(builder.getContext(), addressSpace));
458  Value buffer;
459  if (addressSpace == gpu::GPUDialect::getWorkgroupAddressSpace()) {
460  buffer = builder.create<memref::AllocOp>(funcOp.getLoc(), type);
461  } else if (addressSpace == gpu::GPUDialect::getPrivateAddressSpace()) {
462  buffer = builder.create<memref::AllocaOp>(funcOp.getLoc(), type);
463  } else {
464  return std::nullopt;
465  }
466  return buffer;
467 }
468 
469 /// Allocate the subview in the GPU workgroup memory.
471  OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
472  DataLayout &) {
474  builder, subview, sizeBounds,
475  gpu::GPUDialect::getWorkgroupAddressSpace());
476 }
477 
478 /// In case of GPU group memory there is no need to deallocate.
480  Value /*buffer*/) {
481  return success();
482 }
483 
484 /// Create Memref copy operations and add gpu barrier guards before and after
485 /// the copy operation to ensure data integrity.
487  Value dst) {
488  b.create<gpu::BarrierOp>(src.getLoc());
489  Operation *copyOp = b.create<memref::CopyOp>(src.getLoc(), src, dst);
490  b.create<gpu::BarrierOp>(copyOp->getLoc());
491  return success();
492 }
493 
494 /// Allocate the subview in the GPU private memory.
496  OpBuilder &builder, memref::SubViewOp subview, ArrayRef<Value> sizeBounds,
497  DataLayout &) {
499  builder, subview, sizeBounds, gpu::GPUDialect::getPrivateAddressSpace());
500 }
501 
502 /// Normal copy to between src and dst.
504  Value dst) {
505  b.create<memref::CopyOp>(src.getLoc(), src, dst);
506  return success();
507 }
508 
509 /// In case of GPU private memory there is no need to deallocate since the
510 /// memory is freed when going outside of the scope.
512  Value /*buffer*/) {
513  return success();
514 }
static llvm::ManagedStatic< PassManagerOptions > options
static std::optional< Value > defaultAllocBufferCallBack(const LinalgPromotionOptions &options, OpBuilder &builder, memref::SubViewOp subView, ArrayRef< Value > boundingSubViewSize, std::optional< unsigned > alignment, DataLayout &layout)
Default allocation callback function.
Definition: Promotion.cpp:93
static LogicalResult defaultDeallocBufferCallBack(const LinalgPromotionOptions &options, OpBuilder &b, Value fullLocalView)
Default implementation of deallocation of the buffer use for promotion.
Definition: Promotion.cpp:126
static FailureOr< MapVector< int64_t, PromotionInfo > > promoteSubViews(ImplicitLocOpBuilder &b, LinalgOpInstancePromotionOptions options, DataLayout &layout)
Definition: Promotion.cpp:288
static std::optional< Value > allocateSubviewGPUMemoryInAddressSpace(OpBuilder &builder, memref::SubViewOp subview, ArrayRef< Value > sizeBounds, gpu::AddressSpace addressSpace)
Allocate the given subview to a memory address space in GPU by creating a allocation operation and se...
Definition: Promotion.cpp:435
static Value allocBuffer(ImplicitLocOpBuilder &b, const LinalgPromotionOptions &options, Type elementType, Value allocSize, DataLayout &layout, std::optional< unsigned > alignment=std::nullopt)
Alloc a new buffer of size * width i8; where width is given by the data layout for elementType.
Definition: Promotion.cpp:48
Attributes are known-constant values of operations.
Definition: Attributes.h:25
IntegerAttr getIndexAttr(int64_t value)
Definition: Builders.cpp:148
IntegerAttr getI64IntegerAttr(int64_t value)
Definition: Builders.cpp:152
IntegerType getIntegerType(unsigned width)
Definition: Builders.cpp:111
MLIRContext * getContext() const
Definition: Builders.h:56
The main mechanism for performing data layout queries.
llvm::TypeSize getTypeSize(Type t) const
Returns the size of the given type in the current scope.
ImplicitLocOpBuilder maintains a 'current location', allowing use of the create<> method without spec...
Location getLoc() const
Accessors for the implied location.
OpTy create(Args &&...args)
Create an operation of specific op type at the current insertion point and location.
void createOrFold(llvm::SmallVectorImpl< Value > &results, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:66
This is a builder type that keeps local references to arguments.
Definition: BuiltinTypes.h:213
Builder & setMemorySpace(Attribute newMemorySpace)
Definition: BuiltinTypes.h:239
RAII guard to reset the insertion point of the builder when destroyed.
Definition: Builders.h:357
This class helps build Operations.
Definition: Builders.h:216
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Definition: Builders.h:440
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Definition: Builders.h:529
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:497
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition: Builders.h:421
This class represents an operand of an operation.
Definition: Value.h:267
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:381
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Location getLoc() const
Return the location of this value.
Definition: Value.cpp:26
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:20
Specialization of arith.constant op that returns an integer of index type.
Definition: Arith.h:93
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
std::function< std::optional< Value >(OpBuilder &b, memref::SubViewOp subView, ArrayRef< Value > boundingSubViewSize, DataLayout &layout)> AllocBufferCallbackFn
Callback function type used to perform the allocation for the promoted subView.
Definition: Transforms.h:339
std::optional< Value > allocateWorkgroupMemory(OpBuilder &builder, memref::SubViewOp subview, ArrayRef< Value > sizeBounds, DataLayout &)
Allocate the subview in the GPU workgroup memory.
Definition: Promotion.cpp:470
std::function< LogicalResult(OpBuilder &b, Value buffer)> DeallocBufferCallbackFn
Callback function type used to deallocate the buffers used to hold the promoted subview.
Definition: Transforms.h:344
LogicalResult deallocateGPUPrivateMemory(OpBuilder &, Value)
In case of GPU private memory there is no need to deallocate since the memory is freed when going out...
Definition: Promotion.cpp:511
FailureOr< PromotionInfo > promoteSubviewAsNewBuffer(OpBuilder &b, Location loc, memref::SubViewOp subView, const AllocBufferCallbackFn &allocationFn, DataLayout &layout)
Definition: Promotion.cpp:238
std::optional< Value > allocateGPUPrivateMemory(OpBuilder &builder, memref::SubViewOp subview, ArrayRef< Value > sizeBounds, DataLayout &)
Allocate the subview in the GPU private memory.
Definition: Promotion.cpp:495
LogicalResult copyToWorkgroupMemory(OpBuilder &b, Value src, Value dst)
Create Memref copy operations and add gpu barrier guards before and after the copy operation to ensur...
Definition: Promotion.cpp:486
LogicalResult promoteSubviewsPrecondition(Operation *op, LinalgPromotionOptions options)
Promote memref.subviews feeding linalg-on-buffers operations.
Definition: Promotion.cpp:399
LogicalResult copyToGPUPrivateMemory(OpBuilder &b, Value src, Value dst)
Normal copy to between src and dst.
Definition: Promotion.cpp:503
std::function< LogicalResult(OpBuilder &b, Value src, Value dst)> CopyCallbackFn
Callback function type used to insert copy from original subview to subview of the promoted region fo...
Definition: Transforms.h:351
FailureOr< LinalgOp > promoteSubViews(OpBuilder &b, LinalgOp op, const LinalgPromotionOptions &options)
Promote the subViews into a new buffer allocated at the insertion point b.
Definition: Promotion.cpp:421
LogicalResult deallocateWorkgroupMemory(OpBuilder &, Value)
In case of GPU group memory there is no need to deallocate.
Definition: Promotion.cpp:479
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
Definition: Matchers.h:491
detail::constant_int_value_binder m_ConstantInt(IntegerAttr::ValueType *bind_value)
Matches a constant holding a scalar/vector/tensor integer (splat) and writes the integer value to bin...
Definition: Matchers.h:528
std::optional< int64_t > getConstantIntValue(OpFoldResult ofr)
If ofr is a constant integer or an IntegerAttr, return the integer.
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
Definition: Utils.cpp:112
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
Create a new buffer using the allocationFn provided.
Definition: Transforms.h:718