MLIR  20.0.0git
GPUToLLVMIRTranslation.cpp
Go to the documentation of this file.
1 //===- GPUToLLVMIRTranslation.cpp - Translate GPU dialect to LLVM IR ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a translation between the MLIR GPU dialect and LLVM IR.
10 //
11 //===----------------------------------------------------------------------===//
15 #include "llvm/ADT/TypeSwitch.h"
16 
17 using namespace mlir;
18 
19 namespace {
20 LogicalResult launchKernel(gpu::LaunchFuncOp launchOp,
21  llvm::IRBuilderBase &builder,
22  LLVM::ModuleTranslation &moduleTranslation) {
23  auto kernelBinary = SymbolTable::lookupNearestSymbolFrom<gpu::BinaryOp>(
24  launchOp, launchOp.getKernelModuleName());
25  if (!kernelBinary) {
26  launchOp.emitError("Couldn't find the binary holding the kernel: ")
27  << launchOp.getKernelModuleName();
28  return failure();
29  }
30  auto offloadingHandler =
31  dyn_cast<gpu::OffloadingLLVMTranslationAttrInterface>(
32  kernelBinary.getOffloadingHandlerAttr());
33  assert(offloadingHandler && "Invalid offloading handler.");
34  return offloadingHandler.launchKernel(launchOp, kernelBinary, builder,
35  moduleTranslation);
36 }
37 
38 class GPUDialectLLVMIRTranslationInterface
40 public:
42 
43  LogicalResult
44  convertOperation(Operation *operation, llvm::IRBuilderBase &builder,
45  LLVM::ModuleTranslation &moduleTranslation) const override {
47  .Case([&](gpu::GPUModuleOp) { return success(); })
48  .Case([&](gpu::BinaryOp op) {
49  auto offloadingHandler =
50  dyn_cast<gpu::OffloadingLLVMTranslationAttrInterface>(
51  op.getOffloadingHandlerAttr());
52  assert(offloadingHandler && "Invalid offloading handler.");
53  return offloadingHandler.embedBinary(op, builder, moduleTranslation);
54  })
55  .Case([&](gpu::LaunchFuncOp op) {
56  return launchKernel(op, builder, moduleTranslation);
57  })
58  .Default([&](Operation *op) {
59  return op->emitError("unsupported GPU operation: ") << op->getName();
60  });
61  }
62 };
63 
64 } // namespace
65 
67  registry.insert<gpu::GPUDialect>();
68  registry.addExtension(+[](MLIRContext *ctx, gpu::GPUDialect *dialect) {
69  dialect->addInterfaces<GPUDialectLLVMIRTranslationInterface>();
70  });
71 }
72 
74  DialectRegistry registry;
76  context.appendDialectRegistry(registry);
77 }
static void launchKernel(sycl::queue *queue, sycl::kernel *kernel, size_t gridX, size_t gridY, size_t gridZ, size_t blockX, size_t blockY, size_t blockZ, size_t sharedMemBytes, void **params, size_t paramsCount)
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
Base class for dialect interfaces providing translation to LLVM IR.
Implementation class for module translation.
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
void appendDialectRegistry(const DialectRegistry &registry)
Append the contents of the given dialect registry to the registry associated with this context.
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
Definition: Operation.cpp:268
OperationName getName()
The name of an operation is the key identifier for it.
Definition: Operation.h:119
Include the generated interface declarations.
void registerGPUDialectTranslation(DialectRegistry &registry)
Register the GPU dialect and the translation from it to the LLVM IR in the given registry;.