27 #include "llvm/Support/InterleavedRange.h"
29 #include "llvm/ADT/ScopeExit.h"
30 #include "llvm/Config/Targets.h"
31 #include "llvm/Support/DebugLog.h"
32 #include "llvm/Support/FileSystem.h"
33 #include "llvm/Support/FileUtilities.h"
34 #include "llvm/Support/FormatVariadic.h"
35 #include "llvm/Support/MemoryBuffer.h"
36 #include "llvm/Support/Path.h"
37 #include "llvm/Support/Process.h"
38 #include "llvm/Support/Program.h"
39 #include "llvm/Support/TargetSelect.h"
40 #include "llvm/Support/Timer.h"
41 #include "llvm/Support/raw_ostream.h"
50 #ifndef __DEFAULT_CUDATOOLKIT_PATH__
51 #define __DEFAULT_CUDATOOLKIT_PATH__ ""
59 class NVVMTargetAttrImpl
60 :
public gpu::TargetAttrInterface::FallbackModel<NVVMTargetAttrImpl> {
62 std::optional<SmallVector<char, 0>>
76 NVVMTargetAttr::attachInterface<NVVMTargetAttrImpl>(*ctx);
89 if (
const char *var = std::getenv(
"CUDA_ROOT"))
91 if (
const char *var = std::getenv(
"CUDA_HOME"))
93 if (
const char *var = std::getenv(
"CUDA_PATH"))
101 : ModuleToObject(module, target.getTriple(), target.getChip(),
102 target.getFeatures(), target.getO(),
103 targetOptions.getInitialLlvmIRCallback(),
104 targetOptions.getLinkedLlvmIRCallback(),
105 targetOptions.getOptimizedLlvmIRCallback(),
106 targetOptions.getISACallback()),
107 target(target), toolkitPath(targetOptions.getToolkitPath()),
108 librariesToLink(targetOptions.getLibrariesToLink()) {
123 static llvm::once_flag initializeBackendOnce;
124 llvm::call_once(initializeBackendOnce, []() {
126 #if LLVM_HAS_NVPTX_TARGET
127 LLVMInitializeNVPTXTarget();
128 LLVMInitializeNVPTXTargetInfo();
129 LLVMInitializeNVPTXTargetMC();
130 LLVMInitializeNVPTXAsmPrinter();
145 #if MLIR_NVVM_EMBED_LIBDEVICE
156 resourceManager.getBlobManager().lookup(
"_mlir_embedded_libdevice");
170 type, resourceManager.insert(
"_mlir_embedded_libdevice",
171 std::move(unmanagedBlob))));
174 if (!pathRef.empty()) {
176 path.insert(path.begin(), pathRef.begin(), pathRef.end());
177 pathRef = StringRef(path.data(), path.size());
178 if (!llvm::sys::fs::is_directory(pathRef)) {
180 <<
" does not exist or is not a directory.\n";
183 llvm::sys::path::append(path,
"nvvm",
"libdevice",
"libdevice.10.bc");
184 pathRef = StringRef(path.data(), path.size());
185 if (!llvm::sys::fs::is_regular_file(pathRef)) {
187 <<
" does not exist or is not a file.\n";
196 std::optional<SmallVector<std::unique_ptr<llvm::Module>>>
202 return std::move(bcFiles);
208 NVPTXSerializer(
Operation &module, NVVMTargetAttr target,
212 gpu::GPUModuleOp getOperation();
215 std::optional<SmallVector<char, 0>>
216 compileToBinary(
const std::string &ptxCode);
219 std::optional<SmallVector<char, 0>>
220 compileToBinaryNVPTX(
const std::string &ptxCode);
224 std::optional<SmallVector<char, 0>>
225 moduleToObject(llvm::Module &llvmModule)
override;
230 std::optional<int64_t> getLLVMIRToISATimeInMs();
235 std::optional<int64_t> getISAToBinaryTimeInMs();
238 using TmpFile = std::pair<llvm::SmallString<128>, llvm::FileRemover>;
241 std::optional<TmpFile> createTemp(StringRef name, StringRef suffix);
248 std::optional<std::string> findTool(StringRef tool);
254 std::optional<int64_t> llvmToISATimeInMs;
257 std::optional<int64_t> isaToBinaryTimeInMs;
261 NVPTXSerializer::NVPTXSerializer(
Operation &module, NVVMTargetAttr target,
264 targetOptions(targetOptions), llvmToISATimeInMs(std::nullopt),
265 isaToBinaryTimeInMs(std::nullopt) {}
267 std::optional<NVPTXSerializer::TmpFile>
268 NVPTXSerializer::createTemp(StringRef name, StringRef suffix) {
270 if (name.size() > 80)
271 name = name.substr(0, 80);
273 llvm::sys::fs::createTemporaryFile(name, suffix, filename);
275 getOperation().emitError() <<
"Couldn't create the temp file: `" << filename
276 <<
"`, error message: " << ec.message();
279 return TmpFile(filename, llvm::FileRemover(filename.c_str()));
282 std::optional<int64_t> NVPTXSerializer::getLLVMIRToISATimeInMs() {
283 return llvmToISATimeInMs;
286 std::optional<int64_t> NVPTXSerializer::getISAToBinaryTimeInMs() {
287 return isaToBinaryTimeInMs;
290 gpu::GPUModuleOp NVPTXSerializer::getOperation() {
294 std::optional<std::string> NVPTXSerializer::findTool(StringRef tool) {
297 StringRef pathRef = targetOptions.getToolkitPath();
299 if (!pathRef.empty()) {
300 path.insert(path.begin(), pathRef.begin(), pathRef.end());
301 llvm::sys::path::append(path,
"bin", tool);
302 if (llvm::sys::fs::can_execute(path))
303 return StringRef(path.data(), path.size()).str();
307 if (std::optional<std::string> toolPath =
308 llvm::sys::Process::FindInEnvPath(
"PATH", tool))
314 if (!pathRef.empty()) {
315 path.insert(path.begin(), pathRef.begin(), pathRef.end());
316 llvm::sys::path::append(path,
"bin", tool);
317 if (llvm::sys::fs::can_execute(path))
318 return StringRef(path.data(), path.size()).str();
320 getOperation().emitError()
321 <<
"Couldn't find the `" << tool
322 <<
"` binary. Please specify the toolkit "
323 "path, add the compiler to $PATH, or set one of the environment "
324 "variables in `NVVM::getCUDAToolkitPath()`.";
329 template <
typename T>
332 if (!target.hasCmdOptions())
335 std::optional<mlir::NamedAttribute> cmdOptions = target.getCmdOptions();
336 for (
Attribute attr : cast<ArrayAttr>(cmdOptions->getValue())) {
337 if (
auto strAttr = dyn_cast<StringAttr>(attr)) {
338 if constexpr (std::is_same_v<T, StringRef>) {
339 ptxasArgs.push_back(strAttr.getValue());
340 }
else if constexpr (std::is_same_v<T, const char *>) {
341 ptxasArgs.push_back(strAttr.getValue().data());
349 std::optional<SmallVector<char, 0>>
350 NVPTXSerializer::compileToBinary(
const std::string &ptxCode) {
353 const bool createFatbin =
354 targetOptions.getCompilationTarget() == gpu::CompilationTarget::Fatbin;
357 std::optional<std::string> ptxasCompiler = findTool(
"ptxas");
360 std::optional<std::string> fatbinaryTool;
362 fatbinaryTool = findTool(
"fatbinary");
366 Location loc = getOperation().getLoc();
369 std::string basename =
370 llvm::formatv(
"mlir-{0}-{1}-{2}", getOperation().getNameAttr().getValue(),
371 getTarget().getTriple(), getTarget().getChip());
374 std::optional<TmpFile> ptxFile = createTemp(basename,
"ptx");
377 std::optional<TmpFile> logFile = createTemp(basename,
"log");
380 std::optional<TmpFile> binaryFile = createTemp(basename,
"bin");
385 std::string cubinFilename = (ptxFile->first +
".cubin").str();
386 cubinFile = TmpFile(cubinFilename, llvm::FileRemover(cubinFilename));
388 cubinFile.first = binaryFile->first;
394 llvm::raw_fd_ostream ptxStream(ptxFile->first, ec);
396 emitError(loc) <<
"Couldn't open the file: `" << ptxFile->first
397 <<
"`, error message: " << ec.message();
400 ptxStream << ptxCode;
401 if (ptxStream.has_error()) {
402 emitError(loc) <<
"An error occurred while writing the PTX to: `"
403 << ptxFile->first <<
"`.";
410 std::optional<StringRef> redirects[] = {
417 std::pair<llvm::BumpPtrAllocator, SmallVector<const char *>> cmdOpts =
418 targetOptions.tokenizeCmdOptions();
421 std::string optLevel = std::to_string(this->optLevel);
423 {StringRef(
"ptxas"), StringRef(
"-arch"), getTarget().getChip(),
424 StringRef(ptxFile->first), StringRef(
"-o"), StringRef(cubinFile.first),
425 "--opt-level", optLevel});
427 bool useFatbin32 =
false;
428 for (
const auto *cArg : cmdOpts.second) {
432 if (StringRef arg(cArg); arg !=
"-32")
433 ptxasArgs.push_back(arg);
442 StringRef chip = getTarget().getChip();
444 chip.consume_front(
"sm_"), chip.consume_front(
"compute_");
446 std::string cubinArg =
447 llvm::formatv(
"--image3=kind=elf,sm={0},file={1}", chip, cubinFile.first)
451 llvm::formatv(
"--image3=kind=ptx,sm={0},file={1}", chip, ptxFile->first)
454 useFatbin32 ?
"-32" :
"-64", cubinArg,
455 ptxArg,
"--create", binaryFile->first});
458 #define DEBUG_TYPE "serialize-to-binary"
459 LDBG() <<
"Tool invocation for module: " << getOperation().getNameAttr()
460 <<
"\nptxas executable:" << ptxasCompiler.value()
461 <<
"\nptxas args: " << llvm::interleaved(ptxasArgs,
" ");
463 LDBG() <<
"fatbin args: " << llvm::interleaved(fatbinArgs,
" ");
470 if (message.empty()) {
471 llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> toolStderr =
472 llvm::MemoryBuffer::getFile(logFile->first);
474 emitError(loc) << toolName <<
" invocation failed. Log:\n"
475 << toolStderr->get()->getBuffer();
477 emitError(loc) << toolName <<
" invocation failed.";
481 <<
" invocation failed, error message: " << message;
486 if (llvm::sys::ExecuteAndWait(ptxasCompiler.value(), ptxasArgs,
492 return emitLogError(
"`ptxas`");
493 #define DEBUG_TYPE "dump-sass"
495 std::optional<std::string> nvdisasm = findTool(
"nvdisasm");
497 {StringRef(
"nvdisasm"), StringRef(cubinFile.first)});
498 if (llvm::sys::ExecuteAndWait(nvdisasm.value(), nvdisasmArgs,
504 return emitLogError(
"`nvdisasm`");
505 llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> logBuffer =
506 llvm::MemoryBuffer::getFile(logFile->first);
507 if (logBuffer && !(*logBuffer)->getBuffer().empty()) {
508 LDBG() <<
"Output:\n" << (*logBuffer)->getBuffer();
509 llvm::dbgs().flush();
516 if (createFatbin && llvm::sys::ExecuteAndWait(*fatbinaryTool, fatbinArgs,
522 return emitLogError(
"`fatbinary`");
525 #define DEBUG_TYPE "serialize-to-binary"
527 llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> logBuffer =
528 llvm::MemoryBuffer::getFile(logFile->first);
529 if (logBuffer && !(*logBuffer)->getBuffer().empty()) {
530 LDBG() <<
"Output:\n" << (*logBuffer)->getBuffer();
531 llvm::dbgs().flush();
537 llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> binaryBuffer =
538 llvm::MemoryBuffer::getFile(binaryFile->first);
540 emitError(loc) <<
"Couldn't open the file: `" << binaryFile->first
541 <<
"`, error message: " << binaryBuffer.getError().message();
544 StringRef fatbin = (*binaryBuffer)->getBuffer();
548 #if MLIR_ENABLE_NVPTXCOMPILER
549 #include "nvPTXCompiler.h"
551 #define RETURN_ON_NVPTXCOMPILER_ERROR(expr) \
553 if (auto status = (expr)) { \
554 emitError(loc) << llvm::Twine(#expr).concat(" failed with error code ") \
556 return std::nullopt; \
560 #include "nvFatbin.h"
562 #define RETURN_ON_NVFATBIN_ERROR(expr) \
564 auto result = (expr); \
565 if (result != nvFatbinResult::NVFATBIN_SUCCESS) { \
566 emitError(loc) << llvm::Twine(#expr).concat(" failed with error: ") \
567 << nvFatbinGetErrorString(result); \
568 return std::nullopt; \
572 std::optional<SmallVector<char, 0>>
573 NVPTXSerializer::compileToBinaryNVPTX(
const std::string &ptxCode) {
574 Location loc = getOperation().getLoc();
575 nvPTXCompilerHandle compiler =
nullptr;
576 nvPTXCompileResult status;
580 std::string optLevel = std::to_string(this->optLevel);
581 std::pair<llvm::BumpPtrAllocator, SmallVector<const char *>> cmdOpts =
582 targetOptions.tokenizeCmdOptions();
583 cmdOpts.second.append(
584 {
"-arch", getTarget().getChip().data(),
"--opt-level", optLevel.c_str()});
589 RETURN_ON_NVPTXCOMPILER_ERROR(
590 nvPTXCompilerCreate(&compiler, ptxCode.size(), ptxCode.c_str()));
593 status = nvPTXCompilerCompile(compiler, cmdOpts.second.size(),
594 cmdOpts.second.data());
597 if (status != NVPTXCOMPILE_SUCCESS) {
598 RETURN_ON_NVPTXCOMPILER_ERROR(
599 nvPTXCompilerGetErrorLogSize(compiler, &logSize));
602 RETURN_ON_NVPTXCOMPILER_ERROR(
603 nvPTXCompilerGetErrorLog(compiler, log.data()));
604 emitError(loc) <<
"NVPTX compiler invocation failed, error log: "
607 emitError(loc) <<
"NVPTX compiler invocation failed with error code: "
615 RETURN_ON_NVPTXCOMPILER_ERROR(
616 nvPTXCompilerGetCompiledProgramSize(compiler, &elfSize));
618 RETURN_ON_NVPTXCOMPILER_ERROR(
619 nvPTXCompilerGetCompiledProgram(compiler, (
void *)binary.data()));
622 #define DEBUG_TYPE "serialize-to-binary"
624 RETURN_ON_NVPTXCOMPILER_ERROR(
625 nvPTXCompilerGetInfoLogSize(compiler, &logSize));
628 RETURN_ON_NVPTXCOMPILER_ERROR(
629 nvPTXCompilerGetInfoLog(compiler, log.data()));
630 LDBG() <<
"NVPTX compiler invocation for module: "
631 << getOperation().getNameAttr()
632 <<
"\nArguments: " << llvm::interleaved(cmdOpts.second,
" ")
638 RETURN_ON_NVPTXCOMPILER_ERROR(nvPTXCompilerDestroy(&compiler));
640 if (targetOptions.getCompilationTarget() == gpu::CompilationTarget::Fatbin) {
641 bool useFatbin32 = llvm::any_of(cmdOpts.second, [](
const char *option) {
642 return llvm::StringRef(option) ==
"-32";
645 const char *cubinOpts[1] = {useFatbin32 ?
"-32" :
"-64"};
646 nvFatbinHandle handle;
648 auto chip = getTarget().getChip();
649 chip.consume_front(
"sm_");
651 RETURN_ON_NVFATBIN_ERROR(nvFatbinCreate(&handle, cubinOpts, 1));
652 RETURN_ON_NVFATBIN_ERROR(nvFatbinAddCubin(
653 handle, binary.data(), binary.size(), chip.data(),
nullptr));
654 RETURN_ON_NVFATBIN_ERROR(nvFatbinAddPTX(
655 handle, ptxCode.data(), ptxCode.size(), chip.data(),
nullptr,
nullptr));
658 RETURN_ON_NVFATBIN_ERROR(nvFatbinSize(handle, &fatbinSize));
660 RETURN_ON_NVFATBIN_ERROR(nvFatbinGet(handle, (
void *)fatbin.data()));
661 RETURN_ON_NVFATBIN_ERROR(nvFatbinDestroy(&handle));
669 std::optional<SmallVector<char, 0>>
670 NVPTXSerializer::moduleToObject(llvm::Module &llvmModule) {
671 llvm::Timer moduleToObjectTimer(
672 "moduleToObjectTimer",
673 "Timer for perf llvm-ir -> isa and isa -> binary.");
674 auto clear = llvm::make_scope_exit([&]() { moduleToObjectTimer.clear(); });
676 #define DEBUG_TYPE "serialize-to-llvm"
678 LDBG() <<
"LLVM IR for module: " << getOperation().getNameAttr();
679 LDBG() << llvmModule;
682 if (targetOptions.getCompilationTarget() == gpu::CompilationTarget::Offload)
685 #if !LLVM_HAS_NVPTX_TARGET
686 getOperation()->emitError(
687 "The `NVPTX` target was not built. Please enable it when building LLVM.");
692 std::optional<llvm::TargetMachine *> targetMachine =
693 getOrCreateTargetMachine();
694 if (!targetMachine) {
695 getOperation().emitError() <<
"Target Machine unavailable for triple "
696 << triple <<
", can't optimize with LLVM\n";
699 moduleToObjectTimer.startTimer();
700 std::optional<std::string> serializedISA =
701 translateToISA(llvmModule, **targetMachine);
702 moduleToObjectTimer.stopTimer();
703 llvmToISATimeInMs = moduleToObjectTimer.getTotalTime().getWallTime() * 1000;
704 moduleToObjectTimer.clear();
705 if (!serializedISA) {
706 getOperation().emitError() <<
"Failed translating the module to ISA.";
711 isaCallback(serializedISA.value());
713 #define DEBUG_TYPE "serialize-to-isa"
714 LDBG() <<
"PTX for module: " << getOperation().getNameAttr() <<
"\n"
719 if (targetOptions.getCompilationTarget() == gpu::CompilationTarget::Assembly)
722 std::optional<SmallVector<char, 0>> result;
723 moduleToObjectTimer.startTimer();
725 #if MLIR_ENABLE_NVPTXCOMPILER
726 result = compileToBinaryNVPTX(*serializedISA);
728 result = compileToBinary(*serializedISA);
731 moduleToObjectTimer.stopTimer();
732 isaToBinaryTimeInMs = moduleToObjectTimer.getTotalTime().getWallTime() * 1000;
733 moduleToObjectTimer.clear();
737 std::optional<SmallVector<char, 0>>
741 assert(module &&
"The module must be non null.");
744 if (!mlir::isa<gpu::GPUModuleOp>(module)) {
745 module->
emitError(
"Module must be a GPU module.");
748 NVPTXSerializer serializer(*module, cast<NVVMTargetAttr>(attribute),
options);
750 std::optional<SmallVector<char, 0>> result = serializer.run();
751 auto llvmToISATimeInMs = serializer.getLLVMIRToISATimeInMs();
752 if (llvmToISATimeInMs.has_value())
753 module->
setAttr(
"LLVMIRToISATimeInMs",
754 builder.getI64IntegerAttr(*llvmToISATimeInMs));
755 auto isaToBinaryTimeInMs = serializer.getISAToBinaryTimeInMs();
756 if (isaToBinaryTimeInMs.has_value())
757 module->
setAttr(
"ISAToBinaryTimeInMs",
758 builder.getI64IntegerAttr(*isaToBinaryTimeInMs));
766 auto target = cast<NVVMTargetAttr>(attribute);
767 gpu::CompilationTarget format =
options.getCompilationTarget();
768 DictionaryAttr objectProps;
771 if (format == gpu::CompilationTarget::Assembly)
772 properties.push_back(
773 builder.getNamedAttr(
"O", builder.getI32IntegerAttr(target.getO())));
775 if (StringRef section =
options.getELFSection(); !section.empty())
776 properties.push_back(builder.getNamedAttr(gpu::elfSectionName,
777 builder.getStringAttr(section)));
779 for (
const auto *perfName : {
"LLVMIRToISATimeInMs",
"ISAToBinaryTimeInMs"}) {
780 if (module->
hasAttr(perfName)) {
781 IntegerAttr attr = llvm::dyn_cast<IntegerAttr>(module->
getAttr(perfName));
782 properties.push_back(builder.getNamedAttr(
783 perfName, builder.getI64IntegerAttr(attr.getInt())));
787 if (!properties.empty())
788 objectProps = builder.getDictionaryAttr(properties);
790 return builder.getAttr<gpu::ObjectAttr>(
792 builder.getStringAttr(StringRef(
object.data(),
object.size())),
793 objectProps,
nullptr);
static void setOptionalCommandlineArguments(NVVMTargetAttr target, SmallVectorImpl< T > &ptxasArgs)
Adds optional command-line arguments to existing arguments.
const unsigned _mlir_embedded_libdevice_size
#define __DEFAULT_CUDATOOLKIT_PATH__
const unsigned char _mlir_embedded_libdevice[]
static llvm::ManagedStatic< PassManagerOptions > options
Attributes are known-constant values of operations.
MLIRContext * getContext() const
Return the context this attribute belongs to.
This class is a general helper class for creating context-global objects like types,...
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
The class represents an individual entry of a blob.
LogicalResult loadBitcodeFilesFromList(llvm::LLVMContext &context, ArrayRef< Attribute > librariesToLink, SmallVector< std::unique_ptr< llvm::Module >> &llvmModules, bool failureOnError=true)
Loads multiple bitcode files.
virtual std::optional< SmallVector< char, 0 > > moduleToObject(llvm::Module &llvmModule)
Serializes the LLVM IR bitcode to an object file, by default it serializes to LLVM bitcode.
Operation & getOperation()
Returns the operation being serialized.
Operation & module
Module to transform to a binary object.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
MLIRContext is the top-level object for a collection of MLIR operations.
void appendDialectRegistry(const DialectRegistry ®istry)
Append the contents of the given dialect registry to the registry associated with this context.
Dialect * getLoadedDialect(StringRef name)
Get a registered IR dialect with the given namespace.
Base class for all NVVM serializations from GPU modules into binary strings.
ArrayRef< Attribute > getLibrariesToLink() const
Returns the bitcode libraries to be linked into the gpu module after translation to LLVM IR.
SerializeGPUModuleBase(Operation &module, NVVMTargetAttr target, const gpu::TargetOptions &targetOptions={})
Initializes the toolkitPath with the path in targetOptions or if empty with the path in getCUDAToolki...
NVVMTargetAttr target
NVVM target attribute.
std::string toolkitPath
CUDA toolkit path.
SmallVector< Attribute > librariesToLink
List of LLVM bitcode to link into after translation to LLVM IR.
std::optional< SmallVector< std::unique_ptr< llvm::Module > > > loadBitcodeFiles(llvm::Module &module) override
Loads the bitcode files in librariesToLink.
LogicalResult appendStandardLibs()
Appends nvvm/libdevice.bc into librariesToLink.
static void init()
Initializes the LLVM NVPTX target by safely calling LLVMInitializeNVPTX* methods if available.
StringRef getToolkitPath() const
Returns the CUDA toolkit path.
NVVMTargetAttr getTarget() const
Returns the target attribute.
Operation is the basic unit of execution within MLIR.
Attribute getAttr(StringAttr name)
Return the specified attribute if present, null otherwise.
bool hasAttr(StringAttr name)
Return true if the operation has an attribute with the provided name, false otherwise.
MLIRContext * getContext()
Return the context this operation is associated with.
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
void setAttr(StringAttr name, Attribute value)
If the an attribute exists with the specified name, change it to the new value.
static AsmResourceBlob allocateInferAlign(ArrayRef< T > data, AsmResourceBlob::DeleterFn deleter={}, bool dataIsMutable=false)
This class serves as an opaque interface for passing options to the TargetAttrInterface methods.
void registerNVVMTargetInterfaceExternalModels(DialectRegistry ®istry)
Registers the TargetAttrInterface for the #nvvm.target attribute in the given registry.
StringRef getCUDAToolkitPath()
Searches & returns the path CUDA toolkit path, the search order is:
Include the generated interface declarations.
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
DialectResourceBlobHandle< BuiltinDialect > DenseResourceElementsHandle
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
static ManagerInterface & getManagerInterface(MLIRContext *ctx)
Get the interface for the dialect that owns handles of this type.