75 m.doc() =
"MLIR Execution Engine";
82 nb::class_<PyExecutionEngine>(m,
"ExecutionEngine")
85 [](PyExecutionEngine &self,
PyModule &module,
int optLevel,
86 const std::vector<std::string> &sharedLibPaths,
87 bool enableObjectDump,
bool enablePIC) {
88 std::vector<MlirStringRef> libPaths;
89 libPaths.reserve(sharedLibPaths.size());
90 for (
const std::string &path : sharedLibPaths)
91 libPaths.push_back({path.c_str(), path.length()});
93 module.
get(), optLevel, libPaths.size(), libPaths.data(),
94 enableObjectDump, enablePIC);
96 throw std::runtime_error(
97 "Failure while creating the ExecutionEngine.");
98 new (&self) PyExecutionEngine(executionEngine);
100 nb::arg(
"module"), nb::arg(
"opt_level") = 2,
101 nb::arg(
"shared_libs") = nb::list(),
102 nb::arg(
"enable_object_dump") =
true, nb::arg(
"enable_pic") =
false,
103 "Create a new ExecutionEngine instance for the given Module. The "
104 "module must contain only dialects that can be translated to LLVM. "
105 "Perform transformations and code generation at the optimization "
106 "level `opt_level` if specified, or otherwise at the default "
107 "level of two (-O2). Load a list of libraries specified in "
110 .def(
"_testing_release", &PyExecutionEngine::release,
111 "Releases (leaks) the backing ExecutionEngine (for testing purpose)")
115 [](PyExecutionEngine &executionEngine,
const std::string &
func) {
117 executionEngine.get(),
119 return reinterpret_cast<uintptr_t
>(res);
121 nb::arg(
"func_name"),
122 "Lookup function `func` in the ExecutionEngine.")
124 "raw_register_runtime",
125 [](PyExecutionEngine &executionEngine,
const std::string &name,
126 const nb::object &callbackObj) {
127 executionEngine.addReferencedObject(callbackObj);
129 nb::cast<uintptr_t>(nb::getattr(callbackObj,
"value"));
131 executionEngine.get(),
133 reinterpret_cast<void *
>(rawSym));
135 nb::arg(
"name"), nb::arg(
"callback"),
136 "Register `callback` as the runtime symbol `name`.")
139 [](PyExecutionEngine &executionEngine) {
142 "Initialize the ExecutionEngine. Global constructors specified by "
143 "`llvm.mlir.global_ctors` will be run. One common scenario is that "
144 "kernel binary compiled from `gpu.module` gets loaded during "
145 "initialization. Make sure all symbols are resolvable before "
146 "initialization by calling `register_runtime` or including "
149 "dump_to_object_file",
150 [](PyExecutionEngine &executionEngine,
const std::string &fileName) {
152 executionEngine.get(),
155 nb::arg(
"file_name"),
"Dump ExecutionEngine to an object file.");
MlirModule get()
Gets the backing MlirModule.
void addReferencedObject(const nb::object &obj)
PyExecutionEngine(MlirExecutionEngine executionEngine)
PyExecutionEngine(PyExecutionEngine &&other) noexcept
static nb::object createFromCapsule(const nb::object &capsule)
MLIR_CAPI_EXPORTED void * mlirExecutionEngineLookupPacked(MlirExecutionEngine jit, MlirStringRef name)
Lookup the wrapper of the native function in the execution engine with the given name,...
MLIR_CAPI_EXPORTED MlirExecutionEngine mlirExecutionEngineCreate(MlirModule op, int optLevel, int numPaths, const MlirStringRef *sharedLibPaths, bool enableObjectDump, bool enablePIC)
Creates an ExecutionEngine for the provided ModuleOp.
MLIR_CAPI_EXPORTED void mlirExecutionEngineRegisterSymbol(MlirExecutionEngine jit, MlirStringRef name, void *sym)
Register a symbol with the jit: this symbol will be accessible to the jitted code.