16 #ifndef MLIR_EXECUTIONENGINE_RUNNERUTILS_H 17 #define MLIR_EXECUTIONENGINE_RUNNERUTILS_H 20 #ifndef MLIR_RUNNERUTILS_EXPORT 21 #ifdef mlir_runner_utils_EXPORTS 23 #define MLIR_RUNNERUTILS_EXPORT __declspec(dllexport) 26 #define MLIR_RUNNERUTILS_EXPORT __declspec(dllimport) 27 #endif // mlir_runner_utils_EXPORTS 28 #endif // MLIR_RUNNERUTILS_EXPORT 31 #define MLIR_RUNNERUTILS_EXPORT __attribute__((visibility("default"))) 40 template <
typename T,
typename StreamType>
42 os <<
"base@ = " <<
reinterpret_cast<void *
>(v.
data) <<
" rank = " << v.
rank 43 <<
" offset = " << v.
offset;
44 auto print = [&](
const int64_t *ptr) {
48 for (int64_t i = 1; i < v.
rank; ++i)
53 os <<
"] strides = [";
58 template <
typename StreamType,
typename T,
int N>
60 static_assert(N >= 0,
"Expected N > 0");
65 template <
typename StreamType,
typename T>
67 os <<
"Unranked MemRef ";
75 template <
typename T,
int M,
int... Dims>
78 template <
int... Dims>
83 template <
int N,
int... Dims>
88 static inline void printSpace(std::ostream &os,
int count) {
89 for (
int i = 0; i < count; ++i) {
94 template <
typename T,
int M,
int... Dims>
99 template <
typename T,
int M,
int... Dims>
102 static_assert(M > 0,
"0 dimensioned tensor");
104 "Incorrect vector size!");
109 if (
sizeof...(Dims) > 1)
112 for (
unsigned i = 1; i + 1 < M; ++i) {
114 os << val[i] <<
", ";
115 if (
sizeof...(Dims) > 1)
126 template <
typename T,
int M,
int... Dims>
132 template <
typename T>
134 static void print(std::ostream &os, T *base, int64_t dim, int64_t rank,
135 int64_t offset,
const int64_t *sizes,
136 const int64_t *strides);
137 static void printFirst(std::ostream &os, T *base, int64_t dim, int64_t rank,
138 int64_t offset,
const int64_t *sizes,
139 const int64_t *strides);
140 static void printLast(std::ostream &os, T *base, int64_t dim, int64_t rank,
141 int64_t offset,
const int64_t *sizes,
142 const int64_t *strides);
145 template <
typename T>
147 int64_t rank, int64_t offset,
148 const int64_t *sizes,
149 const int64_t *strides) {
151 print(os, base, dim - 1, rank, offset, sizes + 1, strides + 1);
162 template <
typename T>
164 int64_t rank, int64_t offset,
165 const int64_t *sizes,
const int64_t *strides) {
170 printFirst(os, base, dim, rank, offset, sizes, strides);
171 for (
unsigned i = 1; i + 1 < sizes[0]; ++i) {
173 print(os, base, dim - 1, rank, offset + i * strides[0], sizes + 1,
181 printLast(os, base, dim, rank, offset, sizes, strides);
184 template <
typename T>
186 int64_t rank, int64_t offset,
187 const int64_t *sizes,
188 const int64_t *strides) {
190 print(os, base, dim - 1, rank, offset + (sizes[0] - 1) * (*strides),
191 sizes + 1, strides + 1);
195 template <
typename T,
int N>
197 std::cout <<
"Memref ";
201 template <
typename T>
203 std::cout <<
"Unranked Memref ";
207 template <
typename T>
210 std::cout <<
" data = " << std::endl;
217 std::cout << std::endl;
220 template <
typename T,
int N>
222 std::cout <<
"Memref ";
226 template <
typename T>
228 std::cout <<
"Unranked Memref ";
234 template <
typename T>
237 static constexpr
int printLimit = 10;
240 static bool verifyRelErrorSmallerThan(T actual, T expected, T epsilon);
243 static bool verifyElem(T actual, T expected);
246 static int64_t
verify(std::ostream &os, T *actualBasePtr, T *expectedBasePtr,
247 int64_t dim, int64_t offset,
const int64_t *sizes,
248 const int64_t *strides, int64_t &printCounter);
251 template <
typename T>
255 if (!std::isfinite(actual) || !std::isfinite(expected))
258 T delta = std::abs(actual - expected);
259 return (delta <= epsilon * std::abs(expected));
262 template <
typename T>
264 return actual == expected;
270 return verifyRelErrorSmallerThan(actual, expected, 1e-12);
276 return verifyRelErrorSmallerThan(actual, expected, 1e-6f);
279 template <
typename T>
281 T *expectedBasePtr, int64_t dim,
282 int64_t offset,
const int64_t *sizes,
283 const int64_t *strides,
284 int64_t &printCounter) {
288 if (!verifyElem(actualBasePtr[offset], expectedBasePtr[offset])) {
289 if (printCounter < printLimit) {
290 os << actualBasePtr[offset] <<
" != " << expectedBasePtr[offset]
291 <<
" offset = " << offset <<
"\n";
298 for (int64_t i = 0; i < sizes[0]; ++i) {
300 verify(os, actualBasePtr, expectedBasePtr, dim - 1,
301 offset + i * strides[0], sizes + 1, strides + 1, printCounter);
309 template <
typename T>
313 for (int64_t i = 0; i < actual.
rank; ++i) {
323 int64_t printCounter = 0;
331 template <
typename T>
404 #endif // MLIR_EXECUTIONENGINE_RUNNERUTILS_H static void print(std::ostream &os, T *base, int64_t dim, int64_t rank, int64_t offset, const int64_t *sizes, const int64_t *strides)
Verify the result of two computations are equivalent up to a small numerical error and return the num...
void printMemRefMetaData(StreamType &os, const DynamicMemRefType< T > &v)
static bool verifyElem(T actual, T expected)
Verify the values are equivalent (integers) or are close (floating-point).
MLIR_RUNNERUTILS_EXPORT int64_t verifyMemRefI32(int64_t rank, void *actualPtr, void *expectedPtr)
int64_t verifyMemRef(const DynamicMemRefType< T > &actual, const DynamicMemRefType< T > &expected)
Verify the equivalence of two dynamic memrefs and return the number of errors or -1 if the shape of t...
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemrefVector4x4xf32(StridedMemRefType< Vector2D< 4, 4, float >, 2 > *m)
MLIR_RUNNERUTILS_EXPORT void printMemrefI32(int64_t rank, void *ptr)
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemref3dF32(StridedMemRefType< float, 3 > *m)
MLIR_RUNNERUTILS_EXPORT int64_t verifyMemRefF32(int64_t rank, void *actualPtr, void *expectedPtr)
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemrefF64(UnrankedMemRefType< double > *m)
MLIR_RUNNERUTILS_EXPORT int64_t verifyMemRefF64(int64_t rank, void *actualPtr, void *expectedPtr)
static void printSpace(std::ostream &os, int count)
MLIR_RUNNERUTILS_EXPORT void printCString(char *str)
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemrefF32(UnrankedMemRefType< float > *m)
static void print(std::ostream &os, const Vector< T, M, Dims... > &val)
MLIR_RUNNERUTILS_EXPORT void printMemrefF64(int64_t rank, void *ptr)
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemrefShapeF32(UnrankedMemRefType< float > *m)
void printUnrankedMemRefMetaData(StreamType &os, UnrankedMemRefType< T > &v)
void printMemRefShape(StridedMemRefType< T, N > &m)
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemrefI64(UnrankedMemRefType< int64_t > *m)
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemref4dF32(StridedMemRefType< float, 4 > *m)
MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_verifyMemRefI32(UnrankedMemRefType< int32_t > *actual, UnrankedMemRefType< int32_t > *expected)
static void printFirst(std::ostream &os, T *base, int64_t dim, int64_t rank, int64_t offset, const int64_t *sizes, const int64_t *strides)
static constexpr int value
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemrefShapeI64(UnrankedMemRefType< int64_t > *m)
static void print(ArrayType type, DialectAsmPrinter &os)
static bool verifyRelErrorSmallerThan(T actual, T expected, T epsilon)
Verify the relative difference of the values is smaller than epsilon.
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemrefI8(UnrankedMemRefType< int8_t > *m)
void printMemRef(const DynamicMemRefType< T > &m)
MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_verifyMemRefF64(UnrankedMemRefType< double > *actual, UnrankedMemRefType< double > *expected)
MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_nanoTime()
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemref0dF32(StridedMemRefType< float, 0 > *m)
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemref2dF32(StridedMemRefType< float, 2 > *m)
MLIR_RUNNERUTILS_EXPORT int64_t _mlir_ciface_verifyMemRefF32(UnrankedMemRefType< float > *actual, UnrankedMemRefType< float > *expected)
MLIR_RUNNERUTILS_EXPORT void printMemrefI64(int64_t rank, void *ptr)
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemref1dF32(StridedMemRefType< float, 1 > *m)
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemrefI32(UnrankedMemRefType< int32_t > *m)
LogicalResult verify(Operation *op, bool verifyRecursively=true)
Perform (potentially expensive) checks of invariants, used to detect compiler bugs, on this operation and any nested operations.
StridedMemRef descriptor type with static rank.
std::ostream & operator<<(std::ostream &os, const Vector< T, M, Dims... > &v)
#define MLIR_RUNNERUTILS_EXPORT
static void printLast(std::ostream &os, T *base, int64_t dim, int64_t rank, int64_t offset, const int64_t *sizes, const int64_t *strides)
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemrefShapeI8(UnrankedMemRefType< int8_t > *m)
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemrefShapeF64(UnrankedMemRefType< double > *m)
MLIR_RUNNERUTILS_EXPORT void printMemrefF32(int64_t rank, void *ptr)
static int64_t verify(std::ostream &os, T *actualBasePtr, T *expectedBasePtr, int64_t dim, int64_t offset, const int64_t *sizes, const int64_t *strides, int64_t &printCounter)
Verify the data element-by-element and return the number of errors.
MLIR_RUNNERUTILS_EXPORT void _mlir_ciface_printMemrefShapeI32(UnrankedMemRefType< int32_t > *m)