MLIR 23.0.0git
Target.cpp
Go to the documentation of this file.
1//===- Target.cpp - MLIR LLVM NVVM target compilation -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This files defines NVVM target related functions including registration
10// calls for the `#nvvm.target` compilation attribute.
11//
12//===----------------------------------------------------------------------===//
13
15
27#include "llvm/Support/InterleavedRange.h"
28
29#include "llvm/ADT/ScopeExit.h"
30#include "llvm/Config/Targets.h"
31#include "llvm/Support/DebugLog.h"
32#include "llvm/Support/FileSystem.h"
33#include "llvm/Support/FileUtilities.h"
34#include "llvm/Support/FormatVariadic.h"
35#include "llvm/Support/MemoryBuffer.h"
36#include "llvm/Support/Path.h"
37#include "llvm/Support/Process.h"
38#include "llvm/Support/Program.h"
39#include "llvm/Support/TargetSelect.h"
40#include "llvm/Support/Timer.h"
41#include "llvm/Support/raw_ostream.h"
42
43#include <cstdint>
44#include <cstdlib>
45#include <optional>
46
47using namespace mlir;
48using namespace mlir::NVVM;
49
50#ifndef __DEFAULT_CUDATOOLKIT_PATH__
51#define __DEFAULT_CUDATOOLKIT_PATH__ ""
52#endif
53
54extern "C" const unsigned char _mlir_embedded_libdevice[];
55extern "C" const unsigned _mlir_embedded_libdevice_size;
56
57namespace {
58// Implementation of the `TargetAttrInterface` model.
59class NVVMTargetAttrImpl
60 : public gpu::TargetAttrInterface::FallbackModel<NVVMTargetAttrImpl> {
61public:
62 std::optional<mlir::gpu::SerializedObject>
63 serializeToObject(Attribute attribute, Operation *module,
64 const gpu::TargetOptions &options) const;
65
66 Attribute createObject(Attribute attribute, Operation *module,
67 const mlir::gpu::SerializedObject &object,
68 const gpu::TargetOptions &options) const;
69};
70} // namespace
71
72// Register the NVVM dialect, the NVVM translation & the target interface.
74 DialectRegistry &registry) {
75 registry.addExtension(+[](MLIRContext *ctx, NVVM::NVVMDialect *dialect) {
76 NVVMTargetAttr::attachInterface<NVVMTargetAttrImpl>(*ctx);
77 });
78}
79
86
87// Search for the CUDA toolkit path.
89 if (const char *var = std::getenv("CUDA_ROOT"))
90 return var;
91 if (const char *var = std::getenv("CUDA_HOME"))
92 return var;
93 if (const char *var = std::getenv("CUDA_PATH"))
94 return var;
96}
97
99 Operation &module, NVVMTargetAttr target,
100 const gpu::TargetOptions &targetOptions)
101 : ModuleToObject(module, target.getTriple(), target.getChip(),
102 target.getFeatures(), target.getO(),
103 targetOptions.getInitialLlvmIRCallback(),
104 targetOptions.getLinkedLlvmIRCallback(),
105 targetOptions.getOptimizedLlvmIRCallback(),
106 targetOptions.getISACallback()),
107 target(target), toolkitPath(targetOptions.getToolkitPath()),
108 librariesToLink(targetOptions.getLibrariesToLink()) {
109
110 // If `targetOptions` have an empty toolkitPath use `getCUDAToolkitPath`
111 if (toolkitPath.empty())
113
114 // Append the files in the target attribute.
115 if (target.getLink())
116 librariesToLink.append(target.getLink().begin(), target.getLink().end());
117
118 // Append libdevice to the files to be loaded.
119 (void)appendStandardLibs();
120}
121
123 static llvm::once_flag initializeBackendOnce;
124 llvm::call_once(initializeBackendOnce, []() {
125 // If the `NVPTX` LLVM target was built, initialize it.
126#if LLVM_HAS_NVPTX_TARGET
127 LLVMInitializeNVPTXTarget();
128 LLVMInitializeNVPTXTargetInfo();
129 LLVMInitializeNVPTXTargetMC();
130 LLVMInitializeNVPTXAsmPrinter();
131#endif
132 });
133}
134
135NVVMTargetAttr SerializeGPUModuleBase::getTarget() const { return target; }
136
138
142
143// Try to append `libdevice` from a CUDA toolkit installation.
145#if MLIR_NVVM_EMBED_LIBDEVICE
146 // If libdevice is embedded in the binary, we don't look it up on the
147 // filesystem.
148 MLIRContext *ctx = target.getContext();
149 auto type =
151 IntegerType::get(ctx, 8));
152 auto resourceManager = DenseResourceElementsHandle::getManagerInterface(ctx);
153
154 // Lookup if we already loaded the resource, otherwise create it.
156 resourceManager.getBlobManager().lookup("_mlir_embedded_libdevice");
157 if (blob) {
158 librariesToLink.push_back(DenseResourceElementsAttr::get(
160 blob, ctx->getLoadedDialect<BuiltinDialect>())));
161 return success();
162 }
163
164 // Allocate a resource using one of the UnManagedResourceBlob method to wrap
165 // the embedded data.
169 librariesToLink.push_back(DenseResourceElementsAttr::get(
170 type, resourceManager.insert("_mlir_embedded_libdevice",
171 std::move(unmanagedBlob))));
172#else
173 StringRef pathRef = getToolkitPath();
174 if (!pathRef.empty()) {
176 path.insert(path.begin(), pathRef.begin(), pathRef.end());
177 pathRef = StringRef(path.data(), path.size());
178 if (!llvm::sys::fs::is_directory(pathRef)) {
179 getOperation().emitError() << "CUDA path: " << pathRef
180 << " does not exist or is not a directory.\n";
181 return failure();
182 }
183 llvm::sys::path::append(path, "nvvm", "libdevice", "libdevice.10.bc");
184 pathRef = StringRef(path.data(), path.size());
185 if (!llvm::sys::fs::is_regular_file(pathRef)) {
186 getOperation().emitError() << "LibDevice path: " << pathRef
187 << " does not exist or is not a file.\n";
188 return failure();
189 }
190 librariesToLink.push_back(StringAttr::get(target.getContext(), pathRef));
191 }
192#endif
193 return success();
194}
195
196std::optional<SmallVector<std::unique_ptr<llvm::Module>>>
199 if (failed(loadBitcodeFilesFromList(module.getContext(), librariesToLink,
200 bcFiles, true)))
201 return std::nullopt;
202 return std::move(bcFiles);
203}
204
205namespace {
206class NVPTXSerializer : public SerializeGPUModuleBase {
207public:
208 NVPTXSerializer(Operation &module, NVVMTargetAttr target,
209 const gpu::TargetOptions &targetOptions);
210
211 /// Returns the GPU module op being serialized.
212 gpu::GPUModuleOp getOperation();
213
214 /// Compiles PTX to cubin using `ptxas`.
215 FailureOr<SmallVector<char, 0>> compileToBinary(StringRef ptxCode);
216
217 /// Compiles PTX to cubin using the `nvptxcompiler` library.
218 FailureOr<SmallVector<char, 0>> compileToBinaryNVPTX(StringRef ptxCode);
219
220 /// Serializes the LLVM module to an object format, depending on the
221 /// compilation target selected in target options.
222 FailureOr<SmallVector<char, 0>>
223 moduleToObject(llvm::Module &llvmModule) override;
224
225 /// Get LLVMIR->ISA performance result.
226 /// Return nullopt if moduleToObject has not been called or the target format
227 /// is LLVMIR.
228 std::optional<int64_t> getLLVMIRToISATimeInMs();
229
230 /// Get ISA->Binary performance result.
231 /// Return nullopt if moduleToObject has not been called or the target format
232 /// is LLVMIR or ISA.
233 std::optional<int64_t> getISAToBinaryTimeInMs();
234
235 /// Get the compiler log from ISA compiler.
236 StringRef getISACompilerLog() const;
237
238private:
239 using TmpFile = std::pair<llvm::SmallString<128>, llvm::FileRemover>;
240
241 /// Creates a temp file.
242 std::optional<TmpFile> createTemp(StringRef name, StringRef suffix);
243
244 /// Finds the `tool` path, where `tool` is the name of the binary to search,
245 /// i.e. `ptxas` or `fatbinary`. The search order is:
246 /// 1. The toolkit path in `targetOptions`.
247 /// 2. In the system PATH.
248 /// 3. The path from `getCUDAToolkitPath()`.
249 std::optional<std::string> findTool(StringRef tool);
250
251 /// Target options.
252 gpu::TargetOptions targetOptions;
253
254 /// LLVMIR->ISA perf result.
255 std::optional<int64_t> llvmToISATimeInMs;
256
257 /// ISA->Binary perf result.
258 std::optional<int64_t> isaToBinaryTimeInMs;
259
260 /// Compiler log from ptxas or libnvptxcompiler.
261 std::string isaCompilerLog;
262};
263} // namespace
264
265NVPTXSerializer::NVPTXSerializer(Operation &module, NVVMTargetAttr target,
266 const gpu::TargetOptions &targetOptions)
267 : SerializeGPUModuleBase(module, target, targetOptions),
268 targetOptions(targetOptions), llvmToISATimeInMs(std::nullopt),
269 isaToBinaryTimeInMs(std::nullopt) {}
270
271std::optional<NVPTXSerializer::TmpFile>
272NVPTXSerializer::createTemp(StringRef name, StringRef suffix) {
273 llvm::SmallString<128> filename;
274 if (name.size() > 80)
275 name = name.substr(0, 80);
276 std::error_code ec =
277 llvm::sys::fs::createTemporaryFile(name, suffix, filename);
278 if (ec) {
279 getOperation().emitError() << "Couldn't create the temp file: `" << filename
280 << "`, error message: " << ec.message();
281 return std::nullopt;
282 }
283 return TmpFile(filename, llvm::FileRemover(filename.c_str()));
284}
285
286std::optional<int64_t> NVPTXSerializer::getLLVMIRToISATimeInMs() {
287 return llvmToISATimeInMs;
288}
289
290std::optional<int64_t> NVPTXSerializer::getISAToBinaryTimeInMs() {
291 return isaToBinaryTimeInMs;
292}
293
294StringRef NVPTXSerializer::getISACompilerLog() const { return isaCompilerLog; }
295
296gpu::GPUModuleOp NVPTXSerializer::getOperation() {
297 return dyn_cast<gpu::GPUModuleOp>(&SerializeGPUModuleBase::getOperation());
298}
299
300std::optional<std::string> NVPTXSerializer::findTool(StringRef tool) {
301 // Find the `tool` path.
302 // 1. Check the toolkit path given in the command line.
303 StringRef pathRef = targetOptions.getToolkitPath();
304 SmallVector<char, 256> path;
305 if (!pathRef.empty()) {
306 path.insert(path.begin(), pathRef.begin(), pathRef.end());
307 llvm::sys::path::append(path, "bin", tool);
308 if (llvm::sys::fs::can_execute(path))
309 return StringRef(path.data(), path.size()).str();
310 }
311
312 // 2. Check PATH.
313 if (std::optional<std::string> toolPath =
314 llvm::sys::Process::FindInEnvPath("PATH", tool))
315 return *toolPath;
316
317 // 3. Check `getCUDAToolkitPath()`.
318 pathRef = getCUDAToolkitPath();
319 path.clear();
320 if (!pathRef.empty()) {
321 path.insert(path.begin(), pathRef.begin(), pathRef.end());
322 llvm::sys::path::append(path, "bin", tool);
323 if (llvm::sys::fs::can_execute(path))
324 return StringRef(path.data(), path.size()).str();
325 }
326 getOperation().emitError()
327 << "Couldn't find the `" << tool
328 << "` binary. Please specify the toolkit "
329 "path, add the compiler to $PATH, or set one of the environment "
330 "variables in `NVVM::getCUDAToolkitPath()`.";
331 return std::nullopt;
332}
333
334/// Adds optional command-line arguments to existing arguments.
335template <typename T>
336static void setOptionalCommandlineArguments(NVVMTargetAttr target,
337 SmallVectorImpl<T> &ptxasArgs) {
338 if (!target.hasCmdOptions())
339 return;
340
341 std::optional<mlir::NamedAttribute> cmdOptions = target.getCmdOptions();
342 for (Attribute attr : cast<ArrayAttr>(cmdOptions->getValue())) {
343 if (auto strAttr = dyn_cast<StringAttr>(attr)) {
344 if constexpr (std::is_same_v<T, StringRef>) {
345 ptxasArgs.push_back(strAttr.getValue());
346 } else if constexpr (std::is_same_v<T, const char *>) {
347 ptxasArgs.push_back(strAttr.getValue().data());
348 }
349 }
350 }
351}
352
353// TODO: clean this method & have a generic tool driver or never emit binaries
354// with this mechanism and let another stage take care of it.
355FailureOr<SmallVector<char, 0>>
356NVPTXSerializer::compileToBinary(StringRef ptxCode) {
357 // Determine if the serializer should create a fatbinary with the PTX embeded
358 // or a simple CUBIN binary.
359 const bool createFatbin =
360 targetOptions.getCompilationTarget() == gpu::CompilationTarget::Fatbin;
361
362 // Find the `ptxas` & `fatbinary` tools.
363 std::optional<std::string> ptxasCompiler = findTool("ptxas");
364 if (!ptxasCompiler)
365 return failure();
366 std::optional<std::string> fatbinaryTool;
367 if (createFatbin) {
368 fatbinaryTool = findTool("fatbinary");
369 if (!fatbinaryTool)
370 return failure();
371 }
372 Location loc = getOperation().getLoc();
373
374 // Base name for all temp files: mlir-<module name>-<target triple>-<chip>.
375 std::string basename =
376 llvm::formatv("mlir-{0}-{1}-{2}", getOperation().getNameAttr().getValue(),
377 getTarget().getTriple(), getTarget().getChip());
378
379 // Create temp files:
380 std::optional<TmpFile> ptxFile = createTemp(basename, "ptx");
381 if (!ptxFile)
382 return failure();
383 std::optional<TmpFile> logFile = createTemp(basename, "log");
384 if (!logFile)
385 return failure();
386 std::optional<TmpFile> binaryFile = createTemp(basename, "bin");
387 if (!binaryFile)
388 return failure();
389 TmpFile cubinFile;
390 if (createFatbin) {
391 std::string cubinFilename = (ptxFile->first + ".cubin").str();
392 cubinFile = TmpFile(cubinFilename, llvm::FileRemover(cubinFilename));
393 } else {
394 cubinFile.first = binaryFile->first;
395 }
396
397 std::error_code ec;
398 // Dump the PTX to a temp file.
399 {
400 llvm::raw_fd_ostream ptxStream(ptxFile->first, ec);
401 if (ec)
402 return emitError(loc) << "Couldn't open the file: `" << ptxFile->first
403 << "`, error message: " << ec.message();
404
405 ptxStream << ptxCode;
406 if (ptxStream.has_error())
407 return emitError(loc) << "An error occurred while writing the PTX to: `"
408 << ptxFile->first << "`.";
409
410 ptxStream.flush();
411 }
412
413 // Command redirects.
414 std::optional<StringRef> redirects[] = {
415 std::nullopt,
416 logFile->first,
417 logFile->first,
418 };
419
420 // Get any extra args passed in `targetOptions`.
421 std::pair<llvm::BumpPtrAllocator, SmallVector<const char *>> cmdOpts =
422 targetOptions.tokenizeCmdOptions();
423
424 // Create ptxas args.
425 std::string optLevel = std::to_string(this->optLevel);
426 SmallVector<StringRef, 12> ptxasArgs(
427 {StringRef("ptxas"), StringRef("-arch"), getTarget().getChip(),
428 StringRef(ptxFile->first), StringRef("-o"), StringRef(cubinFile.first),
429 "--opt-level", optLevel});
430
431 bool useFatbin32 = false;
432 for (const auto *cArg : cmdOpts.second) {
433 // All `cmdOpts` are for `ptxas` except `-32` which passes `-32` to
434 // `fatbinary`, indicating a 32-bit target. By default a 64-bit target is
435 // assumed.
436 if (StringRef arg(cArg); arg != "-32")
437 ptxasArgs.push_back(arg);
438 else
439 useFatbin32 = true;
440 }
441
442 // Set optional command line arguments
443 setOptionalCommandlineArguments(getTarget(), ptxasArgs);
444
445 // Create the `fatbinary` args.
446 StringRef chip = getTarget().getChip();
447 // Remove the arch prefix to obtain the compute capability.
448 chip.consume_front("sm_"), chip.consume_front("compute_");
449 // Embed the cubin object.
450 std::string cubinArg =
451 llvm::formatv("--image3=kind=elf,sm={0},file={1}", chip, cubinFile.first)
452 .str();
453 // Embed the PTX file so the driver can JIT if needed.
454 std::string ptxArg =
455 llvm::formatv("--image3=kind=ptx,sm={0},file={1}", chip, ptxFile->first)
456 .str();
457 SmallVector<StringRef, 6> fatbinArgs({StringRef("fatbinary"),
458 useFatbin32 ? "-32" : "-64", cubinArg,
459 ptxArg, "--create", binaryFile->first});
460
461 // Dump tool invocation commands.
462#define DEBUG_TYPE "serialize-to-binary"
463 LDBG() << "Tool invocation for module: " << getOperation().getNameAttr()
464 << "\nptxas executable:" << ptxasCompiler.value()
465 << "\nptxas args: " << llvm::interleaved(ptxasArgs, " ");
466 if (createFatbin)
467 LDBG() << "fatbin args: " << llvm::interleaved(fatbinArgs, " ");
468#undef DEBUG_TYPE
469
470 // Helper function for printing tool error logs.
471 std::string message;
472 auto emitLogError =
473 [&](StringRef toolName) -> FailureOr<SmallVector<char, 0>> {
474 if (message.empty()) {
475 llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> toolStderr =
476 llvm::MemoryBuffer::getFile(logFile->first);
477 if (toolStderr)
478 return emitError(loc) << toolName << " invocation failed. Log:\n"
479 << toolStderr->get()->getBuffer();
480 else
481 return emitError(loc) << toolName << " invocation failed.";
482 }
483 return emitError(loc) << toolName
484 << " invocation failed, error message: " << message;
485 };
486
487 // Invoke PTXAS.
488 if (llvm::sys::ExecuteAndWait(ptxasCompiler.value(), ptxasArgs,
489 /*Env=*/std::nullopt,
490 /*Redirects=*/redirects,
491 /*SecondsToWait=*/0,
492 /*MemoryLimit=*/0,
493 /*ErrMsg=*/&message))
494 return emitLogError("`ptxas`");
495
496 if (target.hasFlag("collect-compiler-diagnostics")) {
497 if (auto logBuffer = llvm::MemoryBuffer::getFile(logFile->first))
498 isaCompilerLog = (*logBuffer)->getBuffer().str();
499 }
500#define DEBUG_TYPE "dump-sass"
501 LLVM_DEBUG({
502 std::optional<std::string> nvdisasm = findTool("nvdisasm");
503 SmallVector<StringRef> nvdisasmArgs(
504 {StringRef("nvdisasm"), StringRef(cubinFile.first)});
505 if (llvm::sys::ExecuteAndWait(nvdisasm.value(), nvdisasmArgs,
506 /*Env=*/std::nullopt,
507 /*Redirects=*/redirects,
508 /*SecondsToWait=*/0,
509 /*MemoryLimit=*/0,
510 /*ErrMsg=*/&message))
511 return emitLogError("`nvdisasm`");
512 llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> logBuffer =
513 llvm::MemoryBuffer::getFile(logFile->first);
514 if (logBuffer && !(*logBuffer)->getBuffer().empty()) {
515 LDBG() << "Output:\n" << (*logBuffer)->getBuffer();
516 llvm::dbgs().flush();
517 }
518 });
519#undef DEBUG_TYPE
520
521 // Invoke `fatbin`.
522 message.clear();
523 if (createFatbin && llvm::sys::ExecuteAndWait(*fatbinaryTool, fatbinArgs,
524 /*Env=*/std::nullopt,
525 /*Redirects=*/redirects,
526 /*SecondsToWait=*/0,
527 /*MemoryLimit=*/0,
528 /*ErrMsg=*/&message))
529 return emitLogError("`fatbinary`");
530
531// Dump the output of the tools, helpful if the verbose flag was passed.
532#define DEBUG_TYPE "serialize-to-binary"
533 LLVM_DEBUG({
534 llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> logBuffer =
535 llvm::MemoryBuffer::getFile(logFile->first);
536 if (logBuffer && !(*logBuffer)->getBuffer().empty()) {
537 LDBG() << "Output:\n" << (*logBuffer)->getBuffer();
538 llvm::dbgs().flush();
539 }
540 });
541#undef DEBUG_TYPE
542
543 // Read the fatbin.
544 llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> binaryBuffer =
545 llvm::MemoryBuffer::getFile(binaryFile->first);
546 if (!binaryBuffer)
547 return emitError(loc) << "Couldn't open the file: `" << binaryFile->first
548 << "`, error message: "
549 << binaryBuffer.getError().message();
550
551 StringRef fatbin = (*binaryBuffer)->getBuffer();
552 return SmallVector<char, 0>(fatbin.begin(), fatbin.end());
553}
554
555#if MLIR_ENABLE_NVPTXCOMPILER
556#include "nvPTXCompiler.h"
557
558#define RETURN_ON_NVPTXCOMPILER_ERROR(expr) \
559 do { \
560 if (auto status = (expr)) { \
561 emitError(loc) << llvm::Twine(#expr).concat(" failed with error code ") \
562 << status; \
563 return failure(); \
564 } \
565 } while (false)
566
567#include "nvFatbin.h"
568
569#define RETURN_ON_NVFATBIN_ERROR(expr) \
570 do { \
571 auto result = (expr); \
572 if (result != nvFatbinResult::NVFATBIN_SUCCESS) { \
573 emitError(loc) << llvm::Twine(#expr).concat(" failed with error: ") \
574 << nvFatbinGetErrorString(result); \
575 return failure(); \
576 } \
577 } while (false)
578
579FailureOr<SmallVector<char, 0>>
580NVPTXSerializer::compileToBinaryNVPTX(StringRef ptxCode) {
581 Location loc = getOperation().getLoc();
582 nvPTXCompilerHandle compiler = nullptr;
583 nvPTXCompileResult status;
584 size_t logSize;
585
586 // Create the options.
587 std::string optLevel = std::to_string(this->optLevel);
588 std::pair<llvm::BumpPtrAllocator, SmallVector<const char *>> cmdOpts =
589 targetOptions.tokenizeCmdOptions();
590 cmdOpts.second.append(
591 {"-arch", getTarget().getChip().data(), "--opt-level", optLevel.c_str()});
592
593 // Set optional command line arguments
594 setOptionalCommandlineArguments(getTarget(), cmdOpts.second);
595 // Create the compiler handle.
596 RETURN_ON_NVPTXCOMPILER_ERROR(
597 nvPTXCompilerCreate(&compiler, ptxCode.size(), ptxCode.str().c_str()));
598
599 // Try to compile the binary.
600 status = nvPTXCompilerCompile(compiler, cmdOpts.second.size(),
601 cmdOpts.second.data());
602
603 // Check if compilation failed.
604 if (status != NVPTXCOMPILE_SUCCESS) {
605 RETURN_ON_NVPTXCOMPILER_ERROR(
606 nvPTXCompilerGetErrorLogSize(compiler, &logSize));
607 if (logSize != 0) {
608 SmallVector<char> log(logSize + 1, 0);
609 RETURN_ON_NVPTXCOMPILER_ERROR(
610 nvPTXCompilerGetErrorLog(compiler, log.data()));
611 return emitError(loc)
612 << "NVPTX compiler invocation failed, error log: " << log.data();
613 } else {
614 return emitError(loc)
615 << "NVPTX compiler invocation failed with error code: " << status;
616 }
617 }
618
619 // Retrieve the binary.
620 size_t elfSize;
621 RETURN_ON_NVPTXCOMPILER_ERROR(
622 nvPTXCompilerGetCompiledProgramSize(compiler, &elfSize));
623 SmallVector<char, 0> binary(elfSize, 0);
624 RETURN_ON_NVPTXCOMPILER_ERROR(
625 nvPTXCompilerGetCompiledProgram(compiler, (void *)binary.data()));
626
627 // Lambda to fetch info log; returns empty vector on failure or no log.
628 auto fetchInfoLog = [&]() -> SmallVector<char> {
629 size_t size = 0;
630 if (nvPTXCompilerGetInfoLogSize(compiler, &size) != NVPTXCOMPILE_SUCCESS ||
631 size == 0)
632 return {};
633 SmallVector<char> log(size + 1, 0);
634 if (nvPTXCompilerGetInfoLog(compiler, log.data()) != NVPTXCOMPILE_SUCCESS)
635 return {};
636 return log;
637 };
638
639 if (target.hasFlag("collect-compiler-diagnostics")) {
640 if (auto log = fetchInfoLog(); !log.empty())
641 isaCompilerLog = log.data();
642 }
643
644// Dump the log of the compiler, helpful if the verbose flag was passed.
645#define DEBUG_TYPE "serialize-to-binary"
646 LLVM_DEBUG({
647 if (auto log = fetchInfoLog(); !log.empty())
648 LDBG() << "NVPTX compiler invocation for module: "
649 << getOperation().getNameAttr()
650 << "\nArguments: " << llvm::interleaved(cmdOpts.second, " ")
651 << "\nOutput\n"
652 << log.data();
653 });
654#undef DEBUG_TYPE
655 RETURN_ON_NVPTXCOMPILER_ERROR(nvPTXCompilerDestroy(&compiler));
656
657 if (targetOptions.getCompilationTarget() == gpu::CompilationTarget::Fatbin) {
658 bool useFatbin32 = llvm::any_of(cmdOpts.second, [](const char *option) {
659 return llvm::StringRef(option) == "-32";
660 });
661
662 const char *cubinOpts[1] = {useFatbin32 ? "-32" : "-64"};
663 nvFatbinHandle handle;
664
665 auto chip = getTarget().getChip();
666 chip.consume_front("sm_");
667
668 RETURN_ON_NVFATBIN_ERROR(nvFatbinCreate(&handle, cubinOpts, 1));
669 RETURN_ON_NVFATBIN_ERROR(nvFatbinAddCubin(
670 handle, binary.data(), binary.size(), chip.data(), nullptr));
671 RETURN_ON_NVFATBIN_ERROR(nvFatbinAddPTX(
672 handle, ptxCode.data(), ptxCode.size(), chip.data(), nullptr, nullptr));
673
674 size_t fatbinSize;
675 RETURN_ON_NVFATBIN_ERROR(nvFatbinSize(handle, &fatbinSize));
676 SmallVector<char, 0> fatbin(fatbinSize, 0);
677 RETURN_ON_NVFATBIN_ERROR(nvFatbinGet(handle, (void *)fatbin.data()));
678 RETURN_ON_NVFATBIN_ERROR(nvFatbinDestroy(&handle));
679 return fatbin;
680 }
681
682 return binary;
683}
684#endif // MLIR_ENABLE_NVPTXCOMPILER
685
686FailureOr<SmallVector<char, 0>>
687NVPTXSerializer::moduleToObject(llvm::Module &llvmModule) {
688 llvm::Timer moduleToObjectTimer(
689 "moduleToObjectTimer",
690 "Timer for perf llvm-ir -> isa and isa -> binary.");
691 llvm::scope_exit clear([&]() { moduleToObjectTimer.clear(); });
692 // Return LLVM IR if the compilation target is `offload`.
693#define DEBUG_TYPE "serialize-to-llvm"
694 LLVM_DEBUG({
695 LDBG() << "LLVM IR for module: " << getOperation().getNameAttr();
696 LDBG() << llvmModule;
697 });
698#undef DEBUG_TYPE
699 if (targetOptions.getCompilationTarget() == gpu::CompilationTarget::Offload)
701
702#if !LLVM_HAS_NVPTX_TARGET
703 return getOperation()->emitError(
704 "The `NVPTX` target was not built. Please enable it when building LLVM.");
705#endif // LLVM_HAS_NVPTX_TARGET
706
707 // Emit PTX code.
708 FailureOr<llvm::TargetMachine *> targetMachine = getOrCreateTargetMachine();
709 if (failed(targetMachine))
710 return getOperation().emitError()
711 << "Target Machine unavailable for triple " << triple
712 << ", can't optimize with LLVM\n";
713
714 moduleToObjectTimer.startTimer();
715 FailureOr<SmallString<0>> serializedISA =
716 translateModuleToISA(llvmModule, **targetMachine,
717 [&]() { return getOperation().emitError(); });
718 moduleToObjectTimer.stopTimer();
719 llvmToISATimeInMs = moduleToObjectTimer.getTotalTime().getWallTime() * 1000;
720 moduleToObjectTimer.clear();
721 if (failed(serializedISA))
722 return getOperation().emitError()
723 << "Failed translating the module to ISA.";
724
725 if (isaCallback)
726 isaCallback(*serializedISA);
727
728#define DEBUG_TYPE "serialize-to-isa"
729 LDBG() << "PTX for module: " << getOperation().getNameAttr() << "\n"
730 << *serializedISA;
731#undef DEBUG_TYPE
732
733 // Return PTX if the compilation target is `assembly`.
734 if (targetOptions.getCompilationTarget() == gpu::CompilationTarget::Assembly)
735 return SmallVector<char, 0>(serializedISA->begin(), serializedISA->end());
736
737 FailureOr<SmallVector<char, 0>> result;
738 moduleToObjectTimer.startTimer();
739 // Compile to binary.
740#if MLIR_ENABLE_NVPTXCOMPILER
741 result = compileToBinaryNVPTX(*serializedISA);
742#else
743 result = compileToBinary(*serializedISA);
744#endif // MLIR_ENABLE_NVPTXCOMPILER
745
746 moduleToObjectTimer.stopTimer();
747 isaToBinaryTimeInMs = moduleToObjectTimer.getTotalTime().getWallTime() * 1000;
748 moduleToObjectTimer.clear();
749 return result;
750}
751
752std::optional<mlir::gpu::SerializedObject>
753NVVMTargetAttrImpl::serializeToObject(Attribute attribute, Operation *module,
754 const gpu::TargetOptions &options) const {
755 Builder builder(attribute.getContext());
756 assert(module && "The module must be non null.");
757 if (!module)
758 return std::nullopt;
759 if (!mlir::isa<gpu::GPUModuleOp>(module)) {
760 module->emitError("Module must be a GPU module.");
761 return std::nullopt;
762 }
763 NVPTXSerializer serializer(*module, cast<NVVMTargetAttr>(attribute), options);
764 serializer.init();
765 std::optional<SmallVector<char, 0>> result = serializer.run();
766 if (!result)
767 return std::nullopt;
768
769 SmallVector<NamedAttribute, 4> properties;
770 auto llvmToISATimeInMs = serializer.getLLVMIRToISATimeInMs();
771 if (llvmToISATimeInMs.has_value())
772 properties.push_back(builder.getNamedAttr(
773 "LLVMIRToISATimeInMs", builder.getI64IntegerAttr(*llvmToISATimeInMs)));
774 auto isaToBinaryTimeInMs = serializer.getISAToBinaryTimeInMs();
775 if (isaToBinaryTimeInMs.has_value())
776 properties.push_back(
777 builder.getNamedAttr("ISAToBinaryTimeInMs",
778 builder.getI64IntegerAttr(*isaToBinaryTimeInMs)));
779 StringRef isaCompilerLog = serializer.getISACompilerLog();
780 if (!isaCompilerLog.empty())
781 properties.push_back(builder.getNamedAttr(
782 "ISACompilerLog", builder.getStringAttr(isaCompilerLog)));
783
784 return gpu::SerializedObject{std::move(*result),
785 builder.getDictionaryAttr(properties)};
786}
787
788Attribute
789NVVMTargetAttrImpl::createObject(Attribute attribute, Operation *module,
790 const mlir::gpu::SerializedObject &object,
791 const gpu::TargetOptions &options) const {
792 auto target = cast<NVVMTargetAttr>(attribute);
793 gpu::CompilationTarget format = options.getCompilationTarget();
794 DictionaryAttr objectProps;
795 Builder builder(attribute.getContext());
796 SmallVector<NamedAttribute> properties =
797 llvm::to_vector(object.getMetadata().getValue());
798 if (format == gpu::CompilationTarget::Assembly)
799 properties.push_back(
800 builder.getNamedAttr("O", builder.getI32IntegerAttr(target.getO())));
801
802 if (StringRef section = options.getELFSection(); !section.empty())
803 properties.push_back(builder.getNamedAttr(gpu::elfSectionName,
804 builder.getStringAttr(section)));
805
806 if (!properties.empty())
807 objectProps = builder.getDictionaryAttr(properties);
808
809 return builder.getAttr<gpu::ObjectAttr>(
810 attribute, format,
811 builder.getStringAttr(
812 StringRef(object.getObject().data(), object.getObject().size())),
813 objectProps, /*kernels=*/nullptr);
814}
return success()
static void setOptionalCommandlineArguments(NVVMTargetAttr target, SmallVectorImpl< T > &ptxasArgs)
Adds optional command-line arguments to existing arguments.
Definition Target.cpp:336
const unsigned _mlir_embedded_libdevice_size
Definition Target.cpp:55
#define __DEFAULT_CUDATOOLKIT_PATH__
Definition Target.cpp:51
const unsigned char _mlir_embedded_libdevice[]
Definition Target.cpp:54
values clear()
static llvm::ManagedStatic< PassManagerOptions > options
Attributes are known-constant values of operations.
Definition Attributes.h:25
MLIRContext * getContext() const
Return the context this attribute belongs to.
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
The class represents an individual entry of a blob.
LogicalResult loadBitcodeFilesFromList(llvm::LLVMContext &context, ArrayRef< Attribute > librariesToLink, SmallVector< std::unique_ptr< llvm::Module > > &llvmModules, bool failureOnError=true)
Loads multiple bitcode files.
virtual FailureOr< SmallVector< char, 0 > > moduleToObject(llvm::Module &llvmModule)
Serializes the LLVM IR bitcode to an object file, by default it serializes to LLVM bitcode.
Operation & getOperation()
Returns the operation being serialized.
ModuleToObject(Operation &module, StringRef triple, StringRef chip, StringRef features={}, int optLevel=3, function_ref< void(llvm::Module &)> initialLlvmIRCallback={}, function_ref< void(llvm::Module &)> linkedLlvmIRCallback={}, function_ref< void(llvm::Module &)> optimizedLlvmIRCallback={}, function_ref< void(StringRef)> isaCallback={})
Operation & module
Module to transform to a binary object.
MLIRContext is the top-level object for a collection of MLIR operations.
Definition MLIRContext.h:63
void appendDialectRegistry(const DialectRegistry &registry)
Append the contents of the given dialect registry to the registry associated with this context.
Dialect * getLoadedDialect(StringRef name)
Get a registered IR dialect with the given namespace.
Base class for all NVVM serializations from GPU modules into binary strings.
Definition Utils.h:32
ArrayRef< Attribute > getLibrariesToLink() const
Returns the bitcode libraries to be linked into the gpu module after translation to LLVM IR.
Definition Target.cpp:139
SerializeGPUModuleBase(Operation &module, NVVMTargetAttr target, const gpu::TargetOptions &targetOptions={})
Initializes the toolkitPath with the path in targetOptions or if empty with the path in getCUDAToolki...
Definition Target.cpp:98
NVVMTargetAttr target
NVVM target attribute.
Definition Utils.h:63
std::string toolkitPath
CUDA toolkit path.
Definition Utils.h:66
SmallVector< Attribute > librariesToLink
List of LLVM bitcode to link into after translation to LLVM IR.
Definition Utils.h:71
std::optional< SmallVector< std::unique_ptr< llvm::Module > > > loadBitcodeFiles(llvm::Module &module) override
Loads the bitcode files in librariesToLink.
Definition Target.cpp:197
LogicalResult appendStandardLibs()
Appends nvvm/libdevice.bc into librariesToLink.
Definition Target.cpp:144
static void init()
Initializes the LLVM NVPTX target by safely calling LLVMInitializeNVPTX* methods if available.
Definition Target.cpp:122
StringRef getToolkitPath() const
Returns the CUDA toolkit path.
Definition Target.cpp:137
NVVMTargetAttr getTarget() const
Returns the target attribute.
Definition Target.cpp:135
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
static AsmResourceBlob allocateInferAlign(ArrayRef< T > data, AsmResourceBlob::DeleterFn deleter={}, bool dataIsMutable=false)
Definition AsmState.h:235
This class represents a serialized object (GPU binary) with metadata (e.g.
This class serves as an opaque interface for passing options to the TargetAttrInterface methods.
void registerNVVMTargetInterfaceExternalModels(DialectRegistry &registry)
Registers the TargetAttrInterface for the #nvvm.target attribute in the given registry.
Definition Target.cpp:73
StringRef getCUDAToolkitPath()
Searches & returns the path CUDA toolkit path, the search order is:
Definition Target.cpp:88
constexpr StringLiteral elfSectionName
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition Remarks.h:573
Include the generated interface declarations.
DialectResourceBlobHandle< BuiltinDialect > DenseResourceElementsHandle
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
static ManagerInterface & getManagerInterface(MLIRContext *ctx)