MLIR  14.0.0git
KernelOutlining.cpp
Go to the documentation of this file.
1 //===- KernelOutlining.cpp - Implementation of GPU kernel outlining -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the GPU dialect kernel outlining pass.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PassDetail.h"
15 #include "mlir/Dialect/DLTI/DLTI.h"
18 #include "mlir/Dialect/GPU/Utils.h"
22 #include "mlir/IR/Builders.h"
23 #include "mlir/IR/SymbolTable.h"
24 #include "mlir/Parser.h"
25 #include "mlir/Support/LLVM.h"
27 
28 using namespace mlir;
29 
30 template <typename OpTy>
31 static void createForAllDimensions(OpBuilder &builder, Location loc,
32  SmallVectorImpl<Value> &values) {
33  for (auto dim : {gpu::Dimension::x, gpu::Dimension::y, gpu::Dimension::z})
34  values.push_back(builder.create<OpTy>(loc, builder.getIndexType(), dim));
35 }
36 
37 /// Adds operations generating block/thread ids and grid/block dimensions at the
38 /// beginning of the `launchFuncOpBody` region. Add mapping from argument in
39 /// entry block of `launchOpBody`, to the corresponding result value of the
40 /// added operations.
41 static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody,
42  Region &launchOpBody,
43  BlockAndValueMapping &map) {
44  OpBuilder builder(loc->getContext());
45  Block &firstBlock = launchOpBody.front();
46  builder.setInsertionPointToStart(&launchFuncOpBody.front());
47  SmallVector<Value, 12> indexOps;
48  createForAllDimensions<gpu::BlockIdOp>(builder, loc, indexOps);
49  createForAllDimensions<gpu::ThreadIdOp>(builder, loc, indexOps);
50  createForAllDimensions<gpu::GridDimOp>(builder, loc, indexOps);
51  createForAllDimensions<gpu::BlockDimOp>(builder, loc, indexOps);
52  // Replace the leading 12 function args with the respective thread/block index
53  // operations. Iterate backwards since args are erased and indices change.
54  for (const auto &indexOp : enumerate(indexOps))
55  map.map(firstBlock.getArgument(indexOp.index()), indexOp.value());
56 }
57 
58 /// Identifies operations that are beneficial to sink into kernels. These
59 /// operations may not have side-effects, as otherwise sinking (and hence
60 /// duplicating them) is not legal.
61 static bool isSinkingBeneficiary(Operation *op) {
62  return isa<arith::ConstantOp, ConstantOp, memref::DimOp, SelectOp,
63  arith::CmpIOp>(op);
64 }
65 
66 /// For a given operation `op`, computes whether it is beneficial to sink the
67 /// operation into the kernel. An operation can be sunk if doing so does not
68 /// introduce new kernel arguments. Whether a value is already available in the
69 /// kernel (and hence does not introduce new arguments) is checked by
70 /// querying `existingDependencies` and `availableValues`.
71 /// If an operand is not yet available, we recursively check whether it can be
72 /// made available by siking its defining op.
73 /// Operations that are indentified for sinking are added to `beneficiaryOps` in
74 /// the order they should appear in the kernel. Furthermore, `availableValues`
75 /// is updated with results that will be available after sinking the identified
76 /// ops.
77 static bool
79  const SetVector<Value> &existingDependencies,
80  SetVector<Operation *> &beneficiaryOps,
81  llvm::SmallPtrSetImpl<Value> &availableValues) {
82  if (beneficiaryOps.count(op))
83  return true;
84 
85  if (!isSinkingBeneficiary(op))
86  return false;
87 
88  for (Value operand : op->getOperands()) {
89  // It is already visible in the kernel, keep going.
90  if (availableValues.count(operand))
91  continue;
92  // Else check whether it can be made available via sinking or already is a
93  // dependency.
94  Operation *definingOp = operand.getDefiningOp();
95  if ((!definingOp ||
96  !extractBeneficiaryOps(definingOp, existingDependencies,
97  beneficiaryOps, availableValues)) &&
98  !existingDependencies.count(operand))
99  return false;
100  }
101  // We will sink the operation, mark its results as now available.
102  beneficiaryOps.insert(op);
103  for (Value result : op->getResults())
104  availableValues.insert(result);
105  return true;
106 }
107 
109  Region &launchOpBody = launchOp.body();
110 
111  // Identify uses from values defined outside of the scope of the launch
112  // operation.
113  SetVector<Value> sinkCandidates;
114  getUsedValuesDefinedAbove(launchOpBody, sinkCandidates);
115 
116  SetVector<Operation *> toBeSunk;
117  llvm::SmallPtrSet<Value, 4> availableValues;
118  for (Value operand : sinkCandidates) {
119  Operation *operandOp = operand.getDefiningOp();
120  if (!operandOp)
121  continue;
122  extractBeneficiaryOps(operandOp, sinkCandidates, toBeSunk, availableValues);
123  }
124 
125  // Insert operations so that the defs get cloned before uses.
127  OpBuilder builder(launchOpBody);
128  for (Operation *op : toBeSunk) {
129  Operation *clonedOp = builder.clone(*op, map);
130  // Only replace uses within the launch op.
131  for (auto pair : llvm::zip(op->getResults(), clonedOp->getResults()))
132  replaceAllUsesInRegionWith(std::get<0>(pair), std::get<1>(pair),
133  launchOp.body());
134  }
135  return success();
136 }
137 
138 /// Outline the `gpu.launch` operation body into a kernel function. Replace
139 /// `gpu.terminator` operations by `gpu.return` in the generated function.
140 static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp,
141  StringRef kernelFnName,
142  SetVector<Value> &operands) {
143  Location loc = launchOp.getLoc();
144  // Create a builder with no insertion point, insertion will happen separately
145  // due to symbol table manipulation.
146  OpBuilder builder(launchOp.getContext());
147  Region &launchOpBody = launchOp.body();
148 
149  // Identify uses from values defined outside of the scope of the launch
150  // operation.
151  getUsedValuesDefinedAbove(launchOpBody, operands);
152 
153  // Create the gpu.func operation.
154  SmallVector<Type, 4> kernelOperandTypes;
155  kernelOperandTypes.reserve(operands.size());
156  for (Value operand : operands) {
157  kernelOperandTypes.push_back(operand.getType());
158  }
159  FunctionType type =
160  FunctionType::get(launchOp.getContext(), kernelOperandTypes, {});
161  auto outlinedFunc = builder.create<gpu::GPUFuncOp>(loc, kernelFnName, type);
162  outlinedFunc->setAttr(gpu::GPUDialect::getKernelFuncAttrName(),
163  builder.getUnitAttr());
165 
166  // Map the arguments corresponding to the launch parameters like blockIdx,
167  // threadIdx, etc.
168  Region &outlinedFuncBody = outlinedFunc.body();
169  injectGpuIndexOperations(loc, outlinedFuncBody, launchOpBody, map);
170 
171  // Map arguments from gpu.launch region to the arguments of the gpu.func
172  // operation.
173  Block &entryBlock = outlinedFuncBody.front();
174  for (const auto &operand : enumerate(operands))
175  map.map(operand.value(), entryBlock.getArgument(operand.index()));
176 
177  // Clone the region of the gpu.launch operation into the gpu.func operation.
178  // TODO: If cloneInto can be modified such that if a mapping for
179  // a block exists, that block will be used to clone operations into (at the
180  // end of the block), instead of creating a new block, this would be much
181  // cleaner.
182  launchOpBody.cloneInto(&outlinedFuncBody, map);
183 
184  // Branch from entry of the gpu.func operation to the block that is cloned
185  // from the entry block of the gpu.launch operation.
186  Block &launchOpEntry = launchOpBody.front();
187  Block *clonedLaunchOpEntry = map.lookup(&launchOpEntry);
188  builder.setInsertionPointToEnd(&entryBlock);
189  builder.create<BranchOp>(loc, clonedLaunchOpEntry);
190 
191  outlinedFunc.walk([](gpu::TerminatorOp op) {
192  OpBuilder replacer(op);
193  replacer.create<gpu::ReturnOp>(op.getLoc());
194  op.erase();
195  });
196  return outlinedFunc;
197 }
198 
199 gpu::GPUFuncOp mlir::outlineKernelFunc(gpu::LaunchOp launchOp,
200  StringRef kernelFnName,
201  llvm::SmallVectorImpl<Value> &operands) {
202  DenseSet<Value> inputOperandSet;
203  inputOperandSet.insert(operands.begin(), operands.end());
204  SetVector<Value> operandSet(operands.begin(), operands.end());
205  auto funcOp = outlineKernelFuncImpl(launchOp, kernelFnName, operandSet);
206  for (auto operand : operandSet) {
207  if (!inputOperandSet.count(operand))
208  operands.push_back(operand);
209  }
210  return funcOp;
211 }
212 
213 /// Replace `gpu.launch` operations with an `gpu.launch_func` operation
214 /// launching `kernelFunc`. The kernel func contains the body of the
215 /// `gpu.launch` with constant region arguments inlined.
216 static void convertToLaunchFuncOp(gpu::LaunchOp launchOp,
217  gpu::GPUFuncOp kernelFunc,
218  ValueRange operands) {
219  OpBuilder builder(launchOp);
220  // The launch op has an optional dynamic shared memory size. If it doesn't
221  // exist, we use zero.
222  builder.create<gpu::LaunchFuncOp>(
223  launchOp.getLoc(), kernelFunc, launchOp.getGridSizeOperandValues(),
224  launchOp.getBlockSizeOperandValues(), launchOp.dynamicSharedMemorySize(),
225  operands);
226  launchOp.erase();
227 }
228 
229 namespace {
230 /// Pass that moves the kernel of each LaunchOp into its separate nested module.
231 ///
232 /// This pass moves the kernel code of each LaunchOp into a function created
233 /// inside a nested module. It also creates an external function of the same
234 /// name in the parent module.
235 ///
236 /// The gpu.modules are intended to be compiled to a cubin blob independently in
237 /// a separate pass. The external functions can then be annotated with the
238 /// symbol of the cubin accessor function.
239 class GpuKernelOutliningPass
240  : public GpuKernelOutliningBase<GpuKernelOutliningPass> {
241 public:
242  GpuKernelOutliningPass(StringRef dlStr) {
243  if (!dlStr.empty() && !dataLayoutStr.hasValue())
244  dataLayoutStr = dlStr.str();
245  }
246 
247  GpuKernelOutliningPass(const GpuKernelOutliningPass &other)
248  : dataLayoutSpec(other.dataLayoutSpec) {
249  dataLayoutStr = other.dataLayoutStr;
250  }
251 
252  LogicalResult initialize(MLIRContext *context) override {
253  // Initialize the data layout specification from the data layout string.
254  if (!dataLayoutStr.empty()) {
255  Attribute resultAttr = mlir::parseAttribute(dataLayoutStr, context);
256  if (!resultAttr)
257  return failure();
258 
259  dataLayoutSpec = resultAttr.dyn_cast<DataLayoutSpecInterface>();
260  if (!dataLayoutSpec)
261  return failure();
262  }
263 
264  return success();
265  }
266 
267  void runOnOperation() override {
268  SymbolTable symbolTable(getOperation());
269  bool modified = false;
270  for (auto func : getOperation().getOps<FuncOp>()) {
271  // Insert just after the function.
272  Block::iterator insertPt(func->getNextNode());
273  auto funcWalkResult = func.walk([&](gpu::LaunchOp op) {
274  SetVector<Value> operands;
275  std::string kernelFnName =
276  Twine(op->getParentOfType<FuncOp>().getName(), "_kernel").str();
277 
278  // Pull in instructions that can be sunk
280  return WalkResult::interrupt();
281  gpu::GPUFuncOp outlinedFunc =
282  outlineKernelFuncImpl(op, kernelFnName, operands);
283 
284  // Create nested module and insert outlinedFunc. The module will
285  // originally get the same name as the function, but may be renamed on
286  // insertion into the parent module.
287  auto kernelModule = createKernelModule(outlinedFunc, symbolTable);
288  symbolTable.insert(kernelModule, insertPt);
289 
290  // Potentially changes signature, pulling in constants.
291  convertToLaunchFuncOp(op, outlinedFunc, operands.getArrayRef());
292  modified = true;
293  return WalkResult::advance();
294  });
295  if (funcWalkResult.wasInterrupted())
296  return signalPassFailure();
297  }
298 
299  // If any new module was inserted in this module, annotate this module as
300  // a container module.
301  if (modified)
302  getOperation()->setAttr(gpu::GPUDialect::getContainerModuleAttrName(),
303  UnitAttr::get(&getContext()));
304  }
305 
306 private:
307  /// Returns a gpu.module containing kernelFunc and all callees (recursive).
308  gpu::GPUModuleOp createKernelModule(gpu::GPUFuncOp kernelFunc,
309  const SymbolTable &parentSymbolTable) {
310  // TODO: This code cannot use an OpBuilder because it must be inserted into
311  // a SymbolTable by the caller. SymbolTable needs to be refactored to
312  // prevent manual building of Ops with symbols in code using SymbolTables
313  // and then this needs to use the OpBuilder.
314  auto *context = getOperation().getContext();
315  OpBuilder builder(context);
316  auto kernelModule = builder.create<gpu::GPUModuleOp>(kernelFunc.getLoc(),
317  kernelFunc.getName());
318 
319  // If a valid data layout spec was provided, attach it to the kernel module.
320  // Otherwise, the default data layout will be used.
321  if (dataLayoutSpec)
322  kernelModule->setAttr(DLTIDialect::kDataLayoutAttrName, dataLayoutSpec);
323 
324  SymbolTable symbolTable(kernelModule);
325  symbolTable.insert(kernelFunc);
326 
327  SmallVector<Operation *, 8> symbolDefWorklist = {kernelFunc};
328  while (!symbolDefWorklist.empty()) {
329  if (Optional<SymbolTable::UseRange> symbolUses =
330  SymbolTable::getSymbolUses(symbolDefWorklist.pop_back_val())) {
331  for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
332  StringRef symbolName =
333  symbolUse.getSymbolRef().cast<FlatSymbolRefAttr>().getValue();
334  if (symbolTable.lookup(symbolName))
335  continue;
336 
337  Operation *symbolDefClone =
338  parentSymbolTable.lookup(symbolName)->clone();
339  symbolDefWorklist.push_back(symbolDefClone);
340  symbolTable.insert(symbolDefClone);
341  }
342  }
343  }
344 
345  return kernelModule;
346  }
347 
348  Option<std::string> dataLayoutStr{
349  *this, "data-layout-str",
350  llvm::cl::desc("String containing the data layout specification to be "
351  "attached to the GPU kernel module")};
352 
353  DataLayoutSpecInterface dataLayoutSpec;
354 };
355 
356 } // namespace
357 
358 std::unique_ptr<OperationPass<ModuleOp>>
359 mlir::createGpuKernelOutliningPass(StringRef dataLayoutStr) {
360  return std::make_unique<GpuKernelOutliningPass>(dataLayoutStr);
361 }
Include the generated interface declarations.
Attribute parseAttribute(llvm::StringRef attrStr, MLIRContext *context)
This parses a single MLIR attribute to an MLIR context if it was valid.
OpTy create(Location location, Args &&...args)
Create an operation of specific op type at the current insertion point.
Definition: Builders.h:430
This class contains a list of basic blocks and a link to the parent operation it is attached to...
Definition: Region.h:26
static bool extractBeneficiaryOps(Operation *op, const SetVector< Value > &existingDependencies, SetVector< Operation *> &beneficiaryOps, llvm::SmallPtrSetImpl< Value > &availableValues)
For a given operation op, computes whether it is beneficial to sink the operation into the kernel...
static void createForAllDimensions(OpBuilder &builder, Location loc, SmallVectorImpl< Value > &values)
Operation is a basic unit of execution within MLIR.
Definition: Operation.h:28
operand_range getOperands()
Returns an iterator on the underlying Value&#39;s.
Definition: Operation.h:247
Block represents an ordered list of Operations.
Definition: Block.h:29
Block & front()
Definition: Region.h:65
A symbol reference with a reference path containing a single element.
static Optional< UseRange > getSymbolUses(Operation *from)
Get an iterator range for all of the uses, for any symbol, that are nested within the given operation...
Operation * clone(Operation &op, BlockAndValueMapping &mapper)
Creates a deep copy of the specified operation, remapping any operands that use values outside of the...
Definition: Builders.cpp:457
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value...
Definition: LogicalResult.h:72
static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody, Region &launchOpBody, BlockAndValueMapping &map)
Adds operations generating block/thread ids and grid/block dimensions at the beginning of the launchF...
Operation & front()
Definition: Block.h:144
T lookup(T from) const
Lookup a mapped value within the map.
BlockArgument getArgument(unsigned i)
Definition: Block.h:120
StringAttr insert(Operation *symbol, Block::iterator insertPt={})
Insert a new symbol into the table, and rename it as necessary to avoid collisions.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:48
void map(Block *from, Block *to)
Inserts a new mapping for &#39;from&#39; to &#39;to&#39;.
static void convertToLaunchFuncOp(gpu::LaunchOp launchOp, gpu::GPUFuncOp kernelFunc, ValueRange operands)
Replace gpu.launch operations with an gpu.launch_func operation launching kernelFunc.
LogicalResult success(bool isSuccess=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:56
This class represents an efficient way to signal success or failure.
Definition: LogicalResult.h:26
LogicalResult failure(bool isFailure=true)
Utility function to generate a LogicalResult.
Definition: LogicalResult.h:62
OpListType::iterator iterator
Definition: Block.h:131
MLIRContext * getContext() const
Return the context this attribute belongs to.
Definition: Attributes.cpp:20
void getUsedValuesDefinedAbove(Region &region, Region &limit, SetVector< Value > &values)
Fill values with a list of values defined at the ancestors of the limit region and used within region...
Definition: RegionUtils.cpp:59
LogicalResult sinkOperationsIntoLaunchOp(gpu::LaunchOp launchOp)
Sink operations into the launchOp to reduce the number of values that are used within the region of t...
Attributes are known-constant values of operations.
Definition: Attributes.h:24
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:206
static bool isSinkingBeneficiary(Operation *op)
Identifies operations that are beneficial to sink into kernels.
void replaceAllUsesInRegionWith(Value orig, Value replacement, Region &region)
Replace all uses of orig within the given region with replacement.
Definition: RegionUtils.cpp:24
static WalkResult advance()
Definition: Visitors.h:51
static gpu::GPUFuncOp outlineKernelFuncImpl(gpu::LaunchOp launchOp, StringRef kernelFnName, SetVector< Value > &operands)
Outline the gpu.launch operation body into a kernel function.
static WalkResult interrupt()
Definition: Visitors.h:50
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:84
std::unique_ptr< OperationPass< ModuleOp > > createGpuKernelOutliningPass(StringRef dataLayoutStr=StringRef())
Replaces gpu.launch with gpu.launch_func by moving the region into a separate kernel function...
IndexType getIndexType()
Definition: Builders.cpp:48
Operation * lookup(StringRef name) const
Look up a symbol with the specified name, returning null if no such name exists.
U dyn_cast() const
Definition: Attributes.h:117
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:55
This class allows for representing and managing the symbol table used by operations with the &#39;SymbolT...
Definition: SymbolTable.h:23
gpu::GPUFuncOp outlineKernelFunc(gpu::LaunchOp launchOp, StringRef kernelFnName, SmallVectorImpl< Value > &operands)
Get a gpu.func created from outlining the region of a gpu.launch op with the given kernelFnName...
Operation * clone(BlockAndValueMapping &mapper)
Create a deep copy of this operation, remapping any operands that use values outside of the operation...
Definition: Operation.cpp:564
This class represents a specific symbol use.
Definition: SymbolTable.h:144
result_range getResults()
Definition: Operation.h:284
This class helps build Operations.
Definition: Builders.h:177
This class provides an abstraction over the different types of ranges over Values.