MLIR  21.0.0git
ExecutionEngine.h
Go to the documentation of this file.
1 //===- ExecutionEngine.h - MLIR Execution engine and utils -----*- C++ -*--===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides a JIT-backed execution engine for MLIR modules.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef MLIR_EXECUTIONENGINE_EXECUTIONENGINE_H_
14 #define MLIR_EXECUTIONENGINE_EXECUTIONENGINE_H_
15 
16 #include "mlir/Support/LLVM.h"
17 #include "llvm/ExecutionEngine/ObjectCache.h"
18 #include "llvm/ExecutionEngine/Orc/LLJIT.h"
19 #include "llvm/ExecutionEngine/SectionMemoryManager.h"
20 #include "llvm/IR/LLVMContext.h"
21 #include "llvm/Support/Error.h"
22 
23 #include <functional>
24 #include <memory>
25 #include <optional>
26 
27 namespace llvm {
28 template <typename T>
29 class Expected;
30 class Module;
31 class ExecutionEngine;
32 class JITEventListener;
33 class MemoryBuffer;
34 } // namespace llvm
35 
36 namespace mlir {
37 
38 class Operation;
39 
40 /// A simple object cache following Lang's LLJITWithObjectCache example.
41 class SimpleObjectCache : public llvm::ObjectCache {
42 public:
43  void notifyObjectCompiled(const llvm::Module *m,
44  llvm::MemoryBufferRef objBuffer) override;
45  std::unique_ptr<llvm::MemoryBuffer> getObject(const llvm::Module *m) override;
46 
47  /// Dump cached object to output file `filename`.
48  void dumpToObjectFile(StringRef filename);
49 
50  /// Returns `true` if cache hasn't been populated yet.
51  bool isEmpty();
52 
53 private:
54  llvm::StringMap<std::unique_ptr<llvm::MemoryBuffer>> cachedObjects;
55 };
56 
58  /// If `llvmModuleBuilder` is provided, it will be used to create an LLVM
59  /// module from the given MLIR IR. Otherwise, a default
60  /// `translateModuleToLLVMIR` function will be used to translate to LLVM IR.
62  llvm::LLVMContext &)>
63  llvmModuleBuilder = nullptr;
64 
65  /// If `transformer` is provided, it will be called on the LLVM module during
66  /// JIT-compilation and can be used, e.g., for reporting or optimization.
67  llvm::function_ref<llvm::Error(llvm::Module *)> transformer = {};
68 
69  /// `jitCodeGenOptLevel`, when provided, is used as the optimization level for
70  /// target code generation.
71  std::optional<llvm::CodeGenOptLevel> jitCodeGenOptLevel;
72 
73  /// If `sharedLibPaths` are provided, the underlying JIT-compilation will
74  /// open and link the shared libraries for symbol resolution. Libraries that
75  /// are designed to be used with the `ExecutionEngine` may implement a
76  /// loading and unloading protocol: if they implement the two functions with
77  /// the names defined in `kLibraryInitFnName` and `kLibraryDestroyFnName`,
78  /// these functions will be called upon loading the library and upon
79  /// destruction of the `ExecutionEngine`. In the init function, the library
80  /// may provide a list of symbols that it wants to make available to code
81  /// run by the `ExecutionEngine`. If the two functions are not defined, only
82  /// symbols with public visibility are available to the executed code.
84 
85  /// Specifies an existing `sectionMemoryMapper` to be associated with the
86  /// compiled code. If none is provided, a default memory mapper that directly
87  /// calls into the operating system is used.
88  llvm::SectionMemoryManager::MemoryMapper *sectionMemoryMapper = nullptr;
89 
90  /// If `enableObjectCache` is set, the JIT compiler will create one to store
91  /// the object generated for the given module. The contents of the cache can
92  /// be dumped to a file via the `dumpToObjectFile` method.
93  bool enableObjectDump = false;
94 
95  /// If enable `enableGDBNotificationListener` is set, the JIT compiler will
96  /// notify the llvm's global GDB notification listener.
98 
99  /// If `enablePerfNotificationListener` is set, the JIT compiler will notify
100  /// the llvm's global Perf notification listener.
102 };
103 
104 /// JIT-backed execution engine for MLIR. Assumes the IR can be converted to
105 /// LLVM IR. For each function, creates a wrapper function with the fixed
106 /// interface
107 ///
108 /// void _mlir_funcName(void **)
109 ///
110 /// where the only argument is interpreted as a list of pointers to the actual
111 /// arguments of the function, followed by a pointer to the result. This allows
112 /// the engine to provide the caller with a generic function pointer that can
113 /// be used to invoke the JIT-compiled function.
115 public:
116  /// Name of init functions of shared libraries. If a library provides a
117  /// function with this name and the one of the destroy function, this function
118  /// is called upon loading the library.
119  static constexpr const char *const kLibraryInitFnName =
120  "__mlir_execution_engine_init";
121 
122  /// Name of destroy functions of shared libraries. If a library provides a
123  /// function with this name and the one of the init function, this function is
124  /// called upon destructing the `ExecutionEngine`.
125  static constexpr const char *const kLibraryDestroyFnName =
126  "__mlir_execution_engine_destroy";
127 
128  /// Function type for init functions of shared libraries. The library may
129  /// provide a list of symbols that it wants to make available to code run by
130  /// the `ExecutionEngine`. If the two functions are not defined, only symbols
131  /// with public visibility are available to the executed code.
132  using LibraryInitFn = void (*)(llvm::StringMap<void *> &);
133 
134  /// Function type for destroy functions of shared libraries.
135  using LibraryDestroyFn = void (*)();
136 
137  ExecutionEngine(bool enableObjectDump, bool enableGDBNotificationListener,
138  bool enablePerfNotificationListener);
139 
141 
142  /// Creates an execution engine for the given MLIR IR. If TargetMachine is
143  /// not provided, default TM is created (i.e. ignoring any command line flags
144  /// that could affect the set-up).
147  std::unique_ptr<llvm::TargetMachine> tm = nullptr);
148 
149  /// Looks up a packed-argument function wrapping the function with the given
150  /// name and returns a pointer to it. Propagates errors in case of failure.
151  llvm::Expected<void (*)(void **)> lookupPacked(StringRef name) const;
152 
153  /// Looks up the original function with the given name and returns a
154  /// pointer to it. This is not necesarily a packed function. Propagates
155  /// errors in case of failure.
156  llvm::Expected<void *> lookup(StringRef name) const;
157 
158  /// Invokes the function with the given name passing it the list of opaque
159  /// pointers to the actual arguments.
160  llvm::Error invokePacked(StringRef name, MutableArrayRef<void *> args = {});
161 
162  /// Trait that defines how a given type is passed to the JIT code. This
163  /// defaults to passing the address but can be specialized.
164  template <typename T>
165  struct Argument {
166  static void pack(SmallVectorImpl<void *> &args, T &val) {
167  args.push_back(&val);
168  }
169  };
170 
171  /// Tag to wrap an output parameter when invoking a jitted function.
172  template <typename T>
173  struct Result {
175  T &value;
176  };
177 
178  /// Helper function to wrap an output operand when using
179  /// ExecutionEngine::invoke.
180  template <typename T>
181  static Result<T> result(T &t) {
182  return Result<T>(t);
183  }
184 
185  // Specialization for output parameter: their address is forwarded directly to
186  // the native code.
187  template <typename T>
188  struct Argument<Result<T>> {
190  args.push_back(&result.value);
191  }
192  };
193 
194  /// Invokes the function with the given name passing it the list of arguments
195  /// by value. Function result can be obtain through output parameter using the
196  /// `Result` wrapper defined above. For example:
197  ///
198  /// func @foo(%arg0 : i32) -> i32 attributes { llvm.emit_c_interface }
199  ///
200  /// can be invoked:
201  ///
202  /// int32_t result = 0;
203  /// llvm::Error error = jit->invoke("foo", 42,
204  /// result(result));
205  template <typename... Args>
206  llvm::Error invoke(StringRef funcName, Args... args) {
207  const std::string adapterName =
208  std::string("_mlir_ciface_") + funcName.str();
209  llvm::SmallVector<void *> argsArray;
210  // Pack every arguments in an array of pointers. Delegate the packing to a
211  // trait so that it can be overridden per argument type.
212  (Argument<Args>::pack(argsArray, args), ...);
213  return invokePacked(adapterName, argsArray);
214  }
215 
216  /// Set the target triple and the data layout for the input module based on
217  /// the input TargetMachine. This is implicitly done when creating the
218  /// engine.
219  static void setupTargetTripleAndDataLayout(llvm::Module *llvmModule,
220  llvm::TargetMachine *tm);
221 
222  /// Dump object code to output file `filename`.
223  void dumpToObjectFile(StringRef filename);
224 
225  /// Register symbols with this ExecutionEngine.
226  void registerSymbols(
227  llvm::function_ref<llvm::orc::SymbolMap(llvm::orc::MangleAndInterner)>
228  symbolMap);
229 
230 private:
231  /// Ordering of llvmContext and jit is important for destruction purposes: the
232  /// jit must be destroyed before the context.
233  llvm::LLVMContext llvmContext;
234 
235  /// Underlying LLJIT.
236  std::unique_ptr<llvm::orc::LLJIT> jit;
237 
238  /// Underlying cache.
239  std::unique_ptr<SimpleObjectCache> cache;
240 
241  /// Names of functions that may be looked up.
242  std::vector<std::string> functionNames;
243 
244  /// GDB notification listener.
245  llvm::JITEventListener *gdbListener;
246 
247  /// Perf notification listener.
248  llvm::JITEventListener *perfListener;
249 
250  /// Destroy functions in the libraries loaded by the ExecutionEngine that are
251  /// called when this ExecutionEngine is destructed.
253 };
254 
255 } // namespace mlir
256 
257 #endif // MLIR_EXECUTIONENGINE_EXECUTIONENGINE_H_
static llvm::ManagedStatic< PassManagerOptions > options
JIT-backed execution engine for MLIR.
llvm::Error invoke(StringRef funcName, Args... args)
Invokes the function with the given name passing it the list of arguments by value.
llvm::Expected< void(*)(void **)> lookupPacked(StringRef name) const
Looks up a packed-argument function wrapping the function with the given name and returns a pointer t...
void(*)(llvm::StringMap< void * > &) LibraryInitFn
Function type for init functions of shared libraries.
void(*)() LibraryDestroyFn
Function type for destroy functions of shared libraries.
void registerSymbols(llvm::function_ref< llvm::orc::SymbolMap(llvm::orc::MangleAndInterner)> symbolMap)
Register symbols with this ExecutionEngine.
llvm::Expected< void * > lookup(StringRef name) const
Looks up the original function with the given name and returns a pointer to it.
static llvm::Expected< std::unique_ptr< ExecutionEngine > > create(Operation *op, const ExecutionEngineOptions &options={}, std::unique_ptr< llvm::TargetMachine > tm=nullptr)
Creates an execution engine for the given MLIR IR.
void dumpToObjectFile(StringRef filename)
Dump object code to output file filename.
static constexpr const char *const kLibraryInitFnName
Name of init functions of shared libraries.
static Result< T > result(T &t)
Helper function to wrap an output operand when using ExecutionEngine::invoke.
static constexpr const char *const kLibraryDestroyFnName
Name of destroy functions of shared libraries.
llvm::Error invokePacked(StringRef name, MutableArrayRef< void * > args={})
Invokes the function with the given name passing it the list of opaque pointers to the actual argumen...
ExecutionEngine(bool enableObjectDump, bool enableGDBNotificationListener, bool enablePerfNotificationListener)
static void setupTargetTripleAndDataLayout(llvm::Module *llvmModule, llvm::TargetMachine *tm)
Set the target triple and the data layout for the input module based on the input TargetMachine.
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
A simple object cache following Lang's LLJITWithObjectCache example.
bool isEmpty()
Returns true if cache hasn't been populated yet.
void notifyObjectCompiled(const llvm::Module *m, llvm::MemoryBufferRef objBuffer) override
void dumpToObjectFile(StringRef filename)
Dump cached object to output file filename.
std::unique_ptr< llvm::MemoryBuffer > getObject(const llvm::Module *m) override
The OpAsmOpInterface, see OpAsmInterface.td for more details.
Definition: CallGraph.h:229
Include the generated interface declarations.
std::optional< llvm::CodeGenOptLevel > jitCodeGenOptLevel
jitCodeGenOptLevel, when provided, is used as the optimization level for target code generation.
ArrayRef< StringRef > sharedLibPaths
If sharedLibPaths are provided, the underlying JIT-compilation will open and link the shared librarie...
bool enableObjectDump
If enableObjectCache is set, the JIT compiler will create one to store the object generated for the g...
bool enablePerfNotificationListener
If enablePerfNotificationListener is set, the JIT compiler will notify the llvm's global Perf notific...
llvm::function_ref< std::unique_ptr< llvm::Module >Operation *, llvm::LLVMContext &)> llvmModuleBuilder
If llvmModuleBuilder is provided, it will be used to create an LLVM module from the given MLIR IR.
llvm::function_ref< llvm::Error(llvm::Module *)> transformer
If transformer is provided, it will be called on the LLVM module during JIT-compilation and can be us...
llvm::SectionMemoryManager::MemoryMapper * sectionMemoryMapper
Specifies an existing sectionMemoryMapper to be associated with the compiled code.
bool enableGDBNotificationListener
If enable enableGDBNotificationListener is set, the JIT compiler will notify the llvm's global GDB no...
static void pack(SmallVectorImpl< void * > &args, Result< T > &result)
Trait that defines how a given type is passed to the JIT code.
static void pack(SmallVectorImpl< void * > &args, T &val)
Tag to wrap an output parameter when invoking a jitted function.