MLIR 23.0.0git
SelectObjectAttr.cpp
Go to the documentation of this file.
1//===- ObjectHandler.cpp - Implements base ObjectManager attributes -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the `OffloadingLLVMTranslationAttrInterface` for the
10// `SelectObject` attribute.
11//
12//===----------------------------------------------------------------------===//
13
19
20#include "llvm/ADT/ScopeExit.h"
21#include "llvm/IR/Constants.h"
22#include "llvm/IR/IRBuilder.h"
23#include "llvm/IR/LLVMContext.h"
24#include "llvm/IR/Module.h"
25#include "llvm/Support/FormatVariadic.h"
26#include "llvm/Transforms/Utils/ModuleUtils.h"
27
28using namespace mlir;
29
30namespace {
31// Implementation of the `OffloadingLLVMTranslationAttrInterface` model.
32class SelectObjectAttrImpl
33 : public gpu::OffloadingLLVMTranslationAttrInterface::FallbackModel<
34 SelectObjectAttrImpl> {
35 // Returns the selected object for embedding.
36 gpu::ObjectAttr getSelectedObject(gpu::BinaryOp op) const;
37
38public:
39 // Translates a `gpu.binary`, embedding the binary into a host LLVM module as
40 // global binary string which gets loaded/unloaded into a global module
41 // object through a global ctor/dtor.
42 LogicalResult embedBinary(Attribute attribute, Operation *operation,
43 llvm::IRBuilderBase &builder,
44 LLVM::ModuleTranslation &moduleTranslation) const;
45
46 // Translates a `gpu.launch_func` to a sequence of LLVM instructions resulting
47 // in a kernel launch call.
48 LogicalResult launchKernel(Attribute attribute,
49 Operation *launchFuncOperation,
50 Operation *binaryOperation,
51 llvm::IRBuilderBase &builder,
52 LLVM::ModuleTranslation &moduleTranslation) const;
53};
54} // namespace
55
56gpu::ObjectAttr
57SelectObjectAttrImpl::getSelectedObject(gpu::BinaryOp op) const {
58 ArrayRef<Attribute> objects = op.getObjectsAttr().getValue();
59
60 // Obtain the index of the object to select.
61 int64_t index = -1;
62 if (Attribute target =
63 cast<gpu::SelectObjectAttr>(op.getOffloadingHandlerAttr())
64 .getTarget()) {
65 // If the target attribute is a number it is the index. Otherwise compare
66 // the attribute to every target inside the object array to find the index.
67 if (auto indexAttr = mlir::dyn_cast<IntegerAttr>(target)) {
68 index = indexAttr.getInt();
69 } else {
70 for (auto [i, attr] : llvm::enumerate(objects)) {
71 auto obj = mlir::dyn_cast<gpu::ObjectAttr>(attr);
72 if (obj.getTarget() == target) {
73 index = i;
74 }
75 }
76 }
77 } else {
78 // If the target attribute is null then it's selecting the first object in
79 // the object array.
80 index = 0;
81 }
82
83 if (index < 0 || index >= static_cast<int64_t>(objects.size())) {
84 op->emitError("the requested target object couldn't be found");
85 return nullptr;
86 }
87 return mlir::dyn_cast<gpu::ObjectAttr>(objects[index]);
88}
89
90static Twine getModuleIdentifier(StringRef moduleName) {
91 return moduleName + "_module";
92}
93
94namespace llvm {
95static LogicalResult embedBinaryImpl(StringRef moduleName,
96 gpu::ObjectAttr object, Module &module) {
97
98 // Embed the object as a global string.
99 // Add null for assembly output for JIT paths that expect null-terminated
100 // strings.
101 bool addNull = (object.getFormat() == gpu::CompilationTarget::Assembly);
102 StringRef serializedStr = object.getObject().getValue();
103 Constant *serializedCst =
104 ConstantDataArray::getString(module.getContext(), serializedStr, addNull);
105 GlobalVariable *serializedObj =
106 new GlobalVariable(module, serializedCst->getType(), true,
107 GlobalValue::LinkageTypes::InternalLinkage,
108 serializedCst, moduleName + "_binary");
109 serializedObj->setAlignment(MaybeAlign(8));
110 serializedObj->setUnnamedAddr(GlobalValue::UnnamedAddr::None);
111
112 // Default JIT optimization level.
113 auto optLevel = APInt::getZero(32);
114
115 if (DictionaryAttr objectProps = object.getProperties()) {
116 if (auto section = dyn_cast_or_null<StringAttr>(
117 objectProps.get(gpu::elfSectionName))) {
118 serializedObj->setSection(section.getValue());
119 }
120 // Check if there's an optimization level embedded in the object.
121 if (auto optAttr = dyn_cast_or_null<IntegerAttr>(objectProps.get("O")))
122 optLevel = optAttr.getValue();
123 }
124
125 IRBuilder<> builder(module.getContext());
126 auto *i32Ty = builder.getInt32Ty();
127 auto *i64Ty = builder.getInt64Ty();
128 auto *ptrTy = builder.getPtrTy(0);
129 auto *voidTy = builder.getVoidTy();
130
131 // Embed the module as a global object.
132 auto *modulePtr = new GlobalVariable(
133 module, ptrTy, /*isConstant=*/false, GlobalValue::InternalLinkage,
134 /*Initializer=*/ConstantPointerNull::get(ptrTy),
135 getModuleIdentifier(moduleName));
136
137 auto *loadFn = Function::Create(FunctionType::get(voidTy, /*IsVarArg=*/false),
138 GlobalValue::InternalLinkage,
139 moduleName + "_load", module);
140 loadFn->setSection(".text.startup");
141 auto *loadBlock = BasicBlock::Create(module.getContext(), "entry", loadFn);
142 builder.SetInsertPoint(loadBlock);
143 Value *moduleObj = [&] {
144 Constant *binarySize =
145 ConstantInt::get(i64Ty, serializedStr.size() + (addNull ? 1 : 0));
146 if (object.getFormat() == gpu::CompilationTarget::Assembly) {
147 FunctionCallee moduleLoadFn = module.getOrInsertFunction(
148 "mgpuModuleLoadJIT", FunctionType::get(ptrTy,
149 {
150 ptrTy,
151 i32Ty,
152 i64Ty,
153 },
154 false));
155
156 Constant *optValue = ConstantInt::get(i32Ty, optLevel);
157 return builder.CreateCall(moduleLoadFn,
158 {serializedObj, optValue, binarySize});
159 }
160 FunctionCallee moduleLoadFn = module.getOrInsertFunction(
161 "mgpuModuleLoad", FunctionType::get(ptrTy, {ptrTy, i64Ty}, false));
162 return builder.CreateCall(moduleLoadFn, {serializedObj, binarySize});
163 }();
164 builder.CreateStore(moduleObj, modulePtr);
165 builder.CreateRetVoid();
166 appendToGlobalCtors(module, loadFn, /*Priority=*/123);
167
168 auto *unloadFn = Function::Create(
169 FunctionType::get(voidTy, /*IsVarArg=*/false),
170 GlobalValue::InternalLinkage, moduleName + "_unload", module);
171 unloadFn->setSection(".text.startup");
172 auto *unloadBlock =
173 BasicBlock::Create(module.getContext(), "entry", unloadFn);
174 builder.SetInsertPoint(unloadBlock);
175 FunctionCallee moduleUnloadFn = module.getOrInsertFunction(
176 "mgpuModuleUnload", FunctionType::get(voidTy, ptrTy, false));
177 builder.CreateCall(moduleUnloadFn, builder.CreateLoad(ptrTy, modulePtr));
178 builder.CreateRetVoid();
179 appendToGlobalDtors(module, unloadFn, /*Priority=*/123);
180
181 return success();
182}
183} // namespace llvm
184
185LogicalResult SelectObjectAttrImpl::embedBinary(
186 Attribute attribute, Operation *operation, llvm::IRBuilderBase &builder,
187 LLVM::ModuleTranslation &moduleTranslation) const {
188 assert(operation && "The binary operation must be non null.");
189 if (!operation)
190 return failure();
191
192 auto op = mlir::dyn_cast<gpu::BinaryOp>(operation);
193 if (!op) {
194 operation->emitError("operation must be a GPU binary");
195 return failure();
196 }
197
198 gpu::ObjectAttr object = getSelectedObject(op);
199 if (!object)
200 return failure();
201
202 return embedBinaryImpl(op.getName(), object,
203 *moduleTranslation.getLLVMModule());
204}
205
206namespace llvm {
207namespace {
208class LaunchKernel {
209public:
210 LaunchKernel(Module &module, IRBuilderBase &builder,
211 mlir::LLVM::ModuleTranslation &moduleTranslation);
212 // Get the kernel launch callee.
213 FunctionCallee getKernelLaunchFn();
214
215 // Get the kernel launch callee.
216 FunctionCallee getClusterKernelLaunchFn();
217
218 // Get the module function callee.
219 FunctionCallee getModuleFunctionFn();
220
221 // Get the stream create callee.
222 FunctionCallee getStreamCreateFn();
223
224 // Get the stream destroy callee.
225 FunctionCallee getStreamDestroyFn();
226
227 // Get the stream sync callee.
228 FunctionCallee getStreamSyncFn();
229
230 // Ger or create the function name global string.
231 Value *getOrCreateFunctionName(StringRef moduleName, StringRef kernelName);
232
233 // Create the void* kernel array for passing the arguments.
234 Value *createKernelArgArray(mlir::gpu::LaunchFuncOp op);
235
236 // Create the full kernel launch.
237 llvm::LogicalResult createKernelLaunch(mlir::gpu::LaunchFuncOp op,
238 mlir::gpu::ObjectAttr object);
239
240private:
241 Module &module;
242 IRBuilderBase &builder;
243 mlir::LLVM::ModuleTranslation &moduleTranslation;
244 Type *i32Ty{};
245 Type *i64Ty{};
246 Type *voidTy{};
247 Type *intPtrTy{};
248 PointerType *ptrTy{};
249};
250} // namespace
251} // namespace llvm
252
253LogicalResult SelectObjectAttrImpl::launchKernel(
254 Attribute attribute, Operation *launchFuncOperation,
255 Operation *binaryOperation, llvm::IRBuilderBase &builder,
256 LLVM::ModuleTranslation &moduleTranslation) const {
257
258 assert(launchFuncOperation && "The launch func operation must be non null.");
259 if (!launchFuncOperation)
260 return failure();
261
262 auto launchFuncOp = mlir::dyn_cast<gpu::LaunchFuncOp>(launchFuncOperation);
263 if (!launchFuncOp) {
264 launchFuncOperation->emitError("operation must be a GPU launch func Op.");
265 return failure();
266 }
267
268 auto binOp = mlir::dyn_cast<gpu::BinaryOp>(binaryOperation);
269 if (!binOp) {
270 binaryOperation->emitError("operation must be a GPU binary.");
271 return failure();
272 }
273 gpu::ObjectAttr object = getSelectedObject(binOp);
274 if (!object)
275 return failure();
276
277 return llvm::LaunchKernel(*moduleTranslation.getLLVMModule(), builder,
278 moduleTranslation)
279 .createKernelLaunch(launchFuncOp, object);
280}
281
282llvm::LaunchKernel::LaunchKernel(
283 Module &module, IRBuilderBase &builder,
284 mlir::LLVM::ModuleTranslation &moduleTranslation)
285 : module(module), builder(builder), moduleTranslation(moduleTranslation) {
286 i32Ty = builder.getInt32Ty();
287 i64Ty = builder.getInt64Ty();
288 ptrTy = builder.getPtrTy(0);
289 voidTy = builder.getVoidTy();
290 intPtrTy = builder.getIntPtrTy(module.getDataLayout());
291}
292
293llvm::FunctionCallee llvm::LaunchKernel::getKernelLaunchFn() {
294 return module.getOrInsertFunction(
295 "mgpuLaunchKernel",
296 FunctionType::get(voidTy,
297 ArrayRef<Type *>({ptrTy, intPtrTy, intPtrTy, intPtrTy,
298 intPtrTy, intPtrTy, intPtrTy, i32Ty,
299 ptrTy, ptrTy, ptrTy, i64Ty}),
300 false));
301}
302
303llvm::FunctionCallee llvm::LaunchKernel::getClusterKernelLaunchFn() {
304 return module.getOrInsertFunction(
305 "mgpuLaunchClusterKernel",
306 FunctionType::get(
307 voidTy,
308 ArrayRef<Type *>({ptrTy, intPtrTy, intPtrTy, intPtrTy, intPtrTy,
309 intPtrTy, intPtrTy, intPtrTy, intPtrTy, intPtrTy,
310 i32Ty, ptrTy, ptrTy, ptrTy}),
311 false));
312}
313
314llvm::FunctionCallee llvm::LaunchKernel::getModuleFunctionFn() {
315 return module.getOrInsertFunction(
316 "mgpuModuleGetFunction",
317 FunctionType::get(ptrTy, ArrayRef<Type *>({ptrTy, ptrTy}), false));
318}
319
320llvm::FunctionCallee llvm::LaunchKernel::getStreamCreateFn() {
321 return module.getOrInsertFunction("mgpuStreamCreate",
322 FunctionType::get(ptrTy, false));
323}
324
325llvm::FunctionCallee llvm::LaunchKernel::getStreamDestroyFn() {
326 return module.getOrInsertFunction(
327 "mgpuStreamDestroy",
328 FunctionType::get(voidTy, ArrayRef<Type *>({ptrTy}), false));
329}
330
331llvm::FunctionCallee llvm::LaunchKernel::getStreamSyncFn() {
332 return module.getOrInsertFunction(
333 "mgpuStreamSynchronize",
334 FunctionType::get(voidTy, ArrayRef<Type *>({ptrTy}), false));
335}
336
337// Generates an LLVM IR dialect global that contains the name of the given
338// kernel function as a C string, and returns a pointer to its beginning.
339llvm::Value *llvm::LaunchKernel::getOrCreateFunctionName(StringRef moduleName,
340 StringRef kernelName) {
341 std::string globalName =
342 std::string(formatv("{0}_{1}_name", moduleName, kernelName));
343
344 if (GlobalVariable *gv = module.getGlobalVariable(globalName, true))
345 return gv;
346
347 return builder.CreateGlobalString(kernelName, globalName);
348}
349
350// Creates a struct containing all kernel parameters on the stack and returns
351// an array of type-erased pointers to the fields of the struct. The array can
352// then be passed to the CUDA / ROCm (HIP) kernel launch calls.
353// The generated code is essentially as follows:
354//
355// %struct = alloca(sizeof(struct { Parameters... }))
356// %array = alloca(NumParameters * sizeof(void *))
357// for (i : [0, NumParameters))
358// %fieldPtr = llvm.getelementptr %struct[0, i]
359// llvm.store parameters[i], %fieldPtr
360// %elementPtr = llvm.getelementptr %array[i]
361// llvm.store %fieldPtr, %elementPtr
362// return %array
363llvm::Value *
364llvm::LaunchKernel::createKernelArgArray(mlir::gpu::LaunchFuncOp op) {
365 SmallVector<Value *> args =
366 moduleTranslation.lookupValues(op.getKernelOperands());
367 SmallVector<Type *> structTypes(args.size(), nullptr);
368
369 for (auto [i, arg] : llvm::enumerate(args))
370 structTypes[i] = arg->getType();
371
372 Type *structTy = StructType::create(module.getContext(), structTypes);
373 Value *argStruct = builder.CreateAlloca(structTy, 0u);
374 Value *argArray = builder.CreateAlloca(
375 ptrTy, ConstantInt::get(intPtrTy, structTypes.size()));
376
377 for (auto [i, arg] : enumerate(args)) {
378 Value *structMember = builder.CreateStructGEP(structTy, argStruct, i);
379 builder.CreateStore(arg, structMember);
380 Value *arrayMember = builder.CreateConstGEP1_32(ptrTy, argArray, i);
381 builder.CreateStore(structMember, arrayMember);
382 }
383 return argArray;
384}
385
386// Emits LLVM IR to launch a kernel function:
387// %1 = load %global_module_object
388// %2 = call @mgpuModuleGetFunction(%1, %global_kernel_name)
389// %3 = call @mgpuStreamCreate()
390// %4 = <see createKernelArgArray()>
391// call @mgpuLaunchKernel(%2, ..., %3, %4, ...)
392// call @mgpuStreamSynchronize(%3)
393// call @mgpuStreamDestroy(%3)
394llvm::LogicalResult
395llvm::LaunchKernel::createKernelLaunch(mlir::gpu::LaunchFuncOp op,
396 mlir::gpu::ObjectAttr object) {
397 auto llvmValue = [&](mlir::Value value) -> Value * {
398 Value *v = moduleTranslation.lookupValue(value);
399 assert(v && "Value has not been translated.");
400 return v;
401 };
402
403 // Get grid dimensions.
404 mlir::gpu::KernelDim3 grid = op.getGridSizeOperandValues();
405 Value *gx = llvmValue(grid.x), *gy = llvmValue(grid.y),
406 *gz = llvmValue(grid.z);
407
408 // Get block dimensions.
409 mlir::gpu::KernelDim3 block = op.getBlockSizeOperandValues();
410 Value *bx = llvmValue(block.x), *by = llvmValue(block.y),
411 *bz = llvmValue(block.z);
412
413 // Get dynamic shared memory size.
414 Value *dynamicMemorySize = nullptr;
415 if (mlir::Value dynSz = op.getDynamicSharedMemorySize())
416 dynamicMemorySize = llvmValue(dynSz);
417 else
418 dynamicMemorySize = ConstantInt::get(i32Ty, 0);
419
420 // Create the argument array.
421 Value *argArray = createKernelArgArray(op);
422
423 // Load the kernel function.
424 StringRef moduleName = op.getKernelModuleName().getValue();
425 Twine moduleIdentifier = getModuleIdentifier(moduleName);
426 Value *modulePtr = module.getGlobalVariable(moduleIdentifier.str(), true);
427 if (!modulePtr)
428 return op.emitError() << "Couldn't find the binary: " << moduleIdentifier;
429 Value *moduleObj = builder.CreateLoad(ptrTy, modulePtr);
430 Value *functionName = getOrCreateFunctionName(moduleName, op.getKernelName());
431 Value *moduleFunction =
432 builder.CreateCall(getModuleFunctionFn(), {moduleObj, functionName});
433
434 // Get the stream to use for execution. If there's no async object then create
435 // a stream to make a synchronous kernel launch.
436 Value *stream = nullptr;
437 // Sync & destroy the stream, for synchronous launches.
438 llvm::scope_exit destroyStream([&]() {
439 builder.CreateCall(getStreamSyncFn(), {stream});
440 builder.CreateCall(getStreamDestroyFn(), {stream});
441 });
442 if (mlir::Value asyncObject = op.getAsyncObject()) {
443 stream = llvmValue(asyncObject);
444 destroyStream.release();
445 } else {
446 stream = builder.CreateCall(getStreamCreateFn(), {});
447 }
448
449 llvm::Constant *paramsCount =
450 llvm::ConstantInt::get(i64Ty, op.getNumKernelOperands());
451
452 // Create the launch call.
453 Value *nullPtr = ConstantPointerNull::get(ptrTy);
454
455 // Launch kernel with clusters if cluster size is specified.
456 if (op.hasClusterSize()) {
457 mlir::gpu::KernelDim3 cluster = op.getClusterSizeOperandValues();
458 Value *cx = llvmValue(cluster.x), *cy = llvmValue(cluster.y),
459 *cz = llvmValue(cluster.z);
460 builder.CreateCall(
461 getClusterKernelLaunchFn(),
462 ArrayRef<Value *>({moduleFunction, cx, cy, cz, gx, gy, gz, bx, by, bz,
463 dynamicMemorySize, stream, argArray, nullPtr}));
464 } else {
465 builder.CreateCall(getKernelLaunchFn(),
466 ArrayRef<Value *>({moduleFunction, gx, gy, gz, bx, by,
467 bz, dynamicMemorySize, stream,
468 argArray, nullPtr, paramsCount}));
469 }
470
471 return success();
472}
473
475 DialectRegistry &registry) {
476 registry.addExtension(+[](MLIRContext *ctx, gpu::GPUDialect *dialect) {
477 SelectObjectAttr::attachInterface<SelectObjectAttrImpl>(*ctx);
478 });
479}
return success()
static Twine getModuleIdentifier(StringRef moduleName)
static void launchKernel(sycl::queue *queue, sycl::kernel *kernel, size_t gridX, size_t gridY, size_t gridZ, size_t blockX, size_t blockY, size_t blockZ, size_t sharedMemBytes, void **params, size_t paramsCount)
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
SmallVector< llvm::Value * > lookupValues(ValueRange values)
Looks up remapped a list of remapped values.
llvm::Value * lookupValue(Value value) const
Finds an LLVM IR value corresponding to the given MLIR value.
llvm::Module * getLLVMModule()
Returns the LLVM module in which the IR is being constructed.
MLIRContext is the top-level object for a collection of MLIR operations.
Definition MLIRContext.h:63
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition Value.h:96
static LogicalResult embedBinaryImpl(StringRef moduleName, gpu::ObjectAttr object, Module &module)
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition Matchers.h:344
constexpr StringLiteral elfSectionName
void registerOffloadingLLVMTranslationInterfaceExternalModels(mlir::DialectRegistry &registry)
Registers the offloading LLVM translation interfaces for gpu.select_object.
Include the generated interface declarations.
@ Constant
Constant integer.
Definition AffineExpr.h:57