MLIR 22.0.0git
CRunnerUtils.h
Go to the documentation of this file.
1//===- CRunnerUtils.h - Utils for debugging MLIR execution ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares basic classes and functions to manipulate structured MLIR
10// types at runtime. Entities in this file must be compliant with C++11 and be
11// retargetable, including on targets without a C++ runtime.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef MLIR_EXECUTIONENGINE_CRUNNERUTILS_H
16#define MLIR_EXECUTIONENGINE_CRUNNERUTILS_H
17
18#ifdef _WIN32
19#ifndef MLIR_CRUNNERUTILS_EXPORT
20#ifdef mlir_c_runner_utils_EXPORTS
21// We are building this library
22#define MLIR_CRUNNERUTILS_EXPORT __declspec(dllexport)
23#define MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
24#else
25// We are using this library
26#define MLIR_CRUNNERUTILS_EXPORT __declspec(dllimport)
27#endif // mlir_c_runner_utils_EXPORTS
28#endif // MLIR_CRUNNERUTILS_EXPORT
29#else // _WIN32
30// Non-windows: use visibility attributes.
31#define MLIR_CRUNNERUTILS_EXPORT __attribute__((visibility("default")))
32#define MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
33#endif // _WIN32
34
35#include <array>
36#include <cassert>
37#include <cstdint>
38#include <initializer_list>
39#include <vector>
40
41//===----------------------------------------------------------------------===//
42// Codegen-compatible structures for Vector type.
43//===----------------------------------------------------------------------===//
44namespace mlir {
45namespace detail {
46
47constexpr bool isPowerOf2(int n) { return (!(n & (n - 1))); }
48
49constexpr unsigned nextPowerOf2(int n) {
50 return (n <= 1) ? 1 : (isPowerOf2(n) ? n : (2 * nextPowerOf2((n + 1) / 2)));
51}
52
53template <typename T, int Dim, bool IsPowerOf2>
54struct Vector1D;
55
56template <typename T, int Dim>
57struct Vector1D<T, Dim, /*IsPowerOf2=*/true> {
59 static_assert(detail::nextPowerOf2(sizeof(T[Dim])) == sizeof(T[Dim]),
60 "size error");
61 }
62 inline T &operator[](unsigned i) { return vector[i]; }
63 inline const T &operator[](unsigned i) const { return vector[i]; }
64
65private:
66 T vector[Dim];
67};
68
69// 1-D vector, padded to the next power of 2 allocation.
70// Specialization occurs to avoid zero size arrays (which fail in -Werror).
71template <typename T, int Dim>
72struct Vector1D<T, Dim, /*IsPowerOf2=*/false> {
74 static_assert(nextPowerOf2(sizeof(T[Dim])) > sizeof(T[Dim]), "size error");
75 static_assert(nextPowerOf2(sizeof(T[Dim])) < 2 * sizeof(T[Dim]),
76 "size error");
77 }
78 inline T &operator[](unsigned i) { return vector[i]; }
79 inline const T &operator[](unsigned i) const { return vector[i]; }
80
81private:
82 T vector[Dim];
83 char padding[nextPowerOf2(sizeof(T[Dim])) - sizeof(T[Dim])];
84};
85} // namespace detail
86} // namespace mlir
87
88// N-D vectors recurse down to 1-D.
89template <typename T, int Dim, int... Dims>
90struct Vector {
91 inline Vector<T, Dims...> &operator[](unsigned i) { return vector[i]; }
92 inline const Vector<T, Dims...> &operator[](unsigned i) const {
93 return vector[i];
94 }
95
96private:
97 Vector<T, Dims...> vector[Dim];
98};
99
100// 1-D vectors in LLVM are automatically padded to the next power of 2.
101// We insert explicit padding in to account for this.
102template <typename T, int Dim>
103struct Vector<T, Dim>
104 : public mlir::detail::Vector1D<T, Dim,
105 mlir::detail::isPowerOf2(sizeof(T[Dim]))> {
106};
107
108template <int D1, typename T>
110template <int D1, int D2, typename T>
112template <int D1, int D2, int D3, typename T>
114template <int D1, int D2, int D3, int D4, typename T>
116
117template <int N>
118void dropFront(int64_t arr[N], int64_t *res) {
119 for (unsigned i = 1; i < N; ++i)
120 *(res + i - 1) = arr[i];
121}
122
123//===----------------------------------------------------------------------===//
124// Codegen-compatible structures for StridedMemRef type.
125//===----------------------------------------------------------------------===//
126template <typename T, int Rank>
128
129/// StridedMemRef descriptor type with static rank.
130template <typename T, int N>
137
138 template <typename Range,
139 typename sfinae = decltype(std::declval<Range>().begin())>
140 T &operator[](Range &&indices) {
141 assert(indices.size() == N &&
142 "indices should match rank in memref subscript");
143 int64_t curOffset = offset;
144 for (int dim = N - 1; dim >= 0; --dim) {
145 int64_t currentIndex = *(indices.begin() + dim);
146 assert(currentIndex < sizes[dim] && "Index overflow");
147 curOffset += currentIndex * strides[dim];
148 }
149 return data[curOffset];
150 }
151
153 StridedMemrefIterator<T, N> end() { return {*this, -1}; }
154
155 // This operator[] is extremely slow and only for sugaring purposes.
157 StridedMemRefType<T, N - 1> res;
158 res.basePtr = basePtr;
159 res.data = data;
160 res.offset = offset + idx * strides[0];
161 dropFront<N>(sizes, res.sizes);
162 dropFront<N>(strides, res.strides);
163 return res;
164 }
165};
166
167/// StridedMemRef descriptor type specialized for rank 1.
168template <typename T>
169struct StridedMemRefType<T, 1> {
175
176 template <typename Range,
177 typename sfinae = decltype(std::declval<Range>().begin())>
178 T &operator[](Range indices) {
179 assert(indices.size() == 1 &&
180 "indices should match rank in memref subscript");
181 return (*this)[*indices.begin()];
182 }
183
185 StridedMemrefIterator<T, 1> end() { return {*this, -1}; }
186
187 T &operator[](int64_t idx) { return *(data + offset + idx * strides[0]); }
188};
189
190/// StridedMemRef descriptor type specialized for rank 0.
191template <typename T>
196
197 template <typename Range,
198 typename sfinae = decltype(std::declval<Range>().begin())>
199 T &operator[](Range indices) {
200 assert((indices.size() == 0) &&
201 "Expect empty indices for 0-rank memref subscript");
202 return data[offset];
203 }
204
206 StridedMemrefIterator<T, 0> end() { return {*this, offset + 1}; }
207};
208
209/// Iterate over all elements in a strided memref.
210template <typename T, int Rank>
212public:
213 using iterator_category = std::forward_iterator_tag;
214 using value_type = T;
215 using difference_type = std::ptrdiff_t;
216 using pointer = T *;
217 using reference = T &;
218
220 int64_t offset = 0)
221 : offset(offset), descriptor(&descriptor) {}
223 int dim = Rank - 1;
224 while (dim >= 0 && indices[dim] == (descriptor->sizes[dim] - 1)) {
225 offset -= indices[dim] * descriptor->strides[dim];
226 indices[dim] = 0;
227 --dim;
228 }
229 if (dim < 0) {
230 offset = -1;
231 return *this;
232 }
233 ++indices[dim];
234 offset += descriptor->strides[dim];
235 return *this;
236 }
237
238 reference operator*() { return descriptor->data[offset]; }
239 pointer operator->() { return &descriptor->data[offset]; }
240
241 const std::array<int64_t, Rank> &getIndices() { return indices; }
242
243 bool operator==(const StridedMemrefIterator &other) const {
244 return other.offset == offset && other.descriptor == descriptor;
245 }
246
247 bool operator!=(const StridedMemrefIterator &other) const {
248 return !(*this == other);
249 }
250
251private:
252 /// Offset in the buffer. This can be derived from the indices and the
253 /// descriptor.
254 int64_t offset = 0;
255
256 /// Array of indices in the multi-dimensional memref.
257 std::array<int64_t, Rank> indices = {};
258
259 /// Descriptor for the strided memref.
260 StridedMemRefType<T, Rank> *descriptor;
261};
262
263/// Iterate over all elements in a 0-ranked strided memref.
264template <typename T>
266public:
267 using iterator_category = std::forward_iterator_tag;
268 using value_type = T;
269 using difference_type = std::ptrdiff_t;
270 using pointer = T *;
271 using reference = T &;
272
274 : elt(descriptor.data + offset) {}
275
277 ++elt;
278 return *this;
279 }
280
281 reference operator*() { return *elt; }
282 pointer operator->() { return elt; }
283
284 // There are no indices for a 0-ranked memref, but this API is provided for
285 // consistency with the general case.
286 const std::array<int64_t, 0> &getIndices() {
287 // Since this is a 0-array of indices we can keep a single global const
288 // copy.
289 static const std::array<int64_t, 0> indices = {};
290 return indices;
291 }
292
293 bool operator==(const StridedMemrefIterator &other) const {
294 return other.elt == elt;
295 }
296
297 bool operator!=(const StridedMemrefIterator &other) const {
298 return !(*this == other);
299 }
300
301private:
302 /// Pointer to the single element in the zero-ranked memref.
303 T *elt;
304};
305
306//===----------------------------------------------------------------------===//
307// Codegen-compatible structure for UnrankedMemRef type.
308//===----------------------------------------------------------------------===//
309// Unranked MemRef
310template <typename T>
315
316//===----------------------------------------------------------------------===//
317// DynamicMemRefType type.
318//===----------------------------------------------------------------------===//
319template <typename T>
321
322// A reference to one of the StridedMemRef types.
323template <typename T>
325public:
332
334 : rank(0), basePtr(memRef.basePtr), data(memRef.data),
336 template <int N>
338 : rank(N), basePtr(memRef.basePtr), data(memRef.data),
339 offset(memRef.offset), sizes(memRef.sizes), strides(memRef.strides) {}
340 explicit DynamicMemRefType(const ::UnrankedMemRefType<T> &memRef)
341 : rank(memRef.rank) {
342 auto *desc = static_cast<StridedMemRefType<T, 1> *>(memRef.descriptor);
343 basePtr = desc->basePtr;
344 data = desc->data;
345 offset = desc->offset;
346 sizes = rank == 0 ? nullptr : desc->sizes;
347 strides = sizes + rank;
348 }
349
350 template <typename Range,
351 typename sfinae = decltype(std::declval<Range>().begin())>
352 T &operator[](Range &&indices) {
353 assert(indices.size() == rank &&
354 "indices should match rank in memref subscript");
355 if (rank == 0)
356 return data[offset];
357
358 int64_t curOffset = offset;
359 for (int dim = rank - 1; dim >= 0; --dim) {
360 int64_t currentIndex = *(indices.begin() + dim);
361 assert(currentIndex < sizes[dim] && "Index overflow");
362 curOffset += currentIndex * strides[dim];
363 }
364 return data[curOffset];
365 }
366
367 DynamicMemRefIterator<T> begin() { return {*this, offset}; }
368 DynamicMemRefIterator<T> end() { return {*this, -1}; }
369
370 // This operator[] is extremely slow and only for sugaring purposes.
372 assert(rank > 0 && "can't make a subscript of a zero ranked array");
373
374 DynamicMemRefType<T> res(*this);
375 --res.rank;
376 res.offset += idx * res.strides[0];
377 ++res.sizes;
378 ++res.strides;
379 return res;
380 }
381
382 // This operator* can be used in conjunction with the previous operator[] in
383 // order to access the underlying value in case of zero-ranked memref.
385 assert(rank == 0 && "not a zero-ranked memRef");
386 return data[offset];
387 }
388};
389
390/// Iterate over all elements in a dynamic memref.
391template <typename T>
393public:
394 using iterator_category = std::forward_iterator_tag;
395 using value_type = T;
396 using difference_type = std::ptrdiff_t;
397 using pointer = T *;
398 using reference = T &;
399
401 : offset(offset), descriptor(&descriptor) {
402 indices.resize(descriptor.rank, 0);
403 }
404
406 if (descriptor->rank == 0) {
407 offset = -1;
408 return *this;
409 }
410
411 int dim = descriptor->rank - 1;
412
413 while (dim >= 0 && indices[dim] == (descriptor->sizes[dim] - 1)) {
414 offset -= indices[dim] * descriptor->strides[dim];
415 indices[dim] = 0;
416 --dim;
417 }
418
419 if (dim < 0) {
420 offset = -1;
421 return *this;
422 }
423
424 ++indices[dim];
425 offset += descriptor->strides[dim];
426 return *this;
427 }
428
429 reference operator*() { return descriptor->data[offset]; }
430 pointer operator->() { return &descriptor->data[offset]; }
431
432 const std::vector<int64_t> &getIndices() { return indices; }
433
434 bool operator==(const DynamicMemRefIterator &other) const {
435 return other.offset == offset && other.descriptor == descriptor;
436 }
437
438 bool operator!=(const DynamicMemRefIterator &other) const {
439 return !(*this == other);
440 }
441
442private:
443 /// Offset in the buffer. This can be derived from the indices and the
444 /// descriptor.
445 int64_t offset = 0;
446
447 /// Array of indices in the multi-dimensional memref.
448 std::vector<int64_t> indices = {};
449
450 /// Descriptor for the dynamic memref.
451 DynamicMemRefType<T> *descriptor;
452};
453
454//===----------------------------------------------------------------------===//
455// Small runtime support library for memref.copy lowering during codegen.
456//===----------------------------------------------------------------------===//
457extern "C" MLIR_CRUNNERUTILS_EXPORT void
460
461//===----------------------------------------------------------------------===//
462// Small runtime support library for vector.print lowering during codegen.
463//===----------------------------------------------------------------------===//
465extern "C" MLIR_CRUNNERUTILS_EXPORT void printU64(uint64_t u);
466extern "C" MLIR_CRUNNERUTILS_EXPORT void printF32(float f);
467extern "C" MLIR_CRUNNERUTILS_EXPORT void printF64(double d);
468extern "C" MLIR_CRUNNERUTILS_EXPORT void printString(char const *s);
473
474//===----------------------------------------------------------------------===//
475// Small runtime support library for timing execution and printing GFLOPS
476//===----------------------------------------------------------------------===//
477extern "C" MLIR_CRUNNERUTILS_EXPORT void printFlops(double flops);
479
480//===----------------------------------------------------------------------===//
481// Runtime support library for random number generation.
482//===----------------------------------------------------------------------===//
483// Uses a seed to initialize a random generator and returns the generator.
484extern "C" MLIR_CRUNNERUTILS_EXPORT void *rtsrand(uint64_t s);
485// Uses a random number generator g and returns a random number
486// in the range of [0, m).
487extern "C" MLIR_CRUNNERUTILS_EXPORT uint64_t rtrand(void *g, uint64_t m);
488// Deletes the random number generator.
489extern "C" MLIR_CRUNNERUTILS_EXPORT void rtdrand(void *g);
490// Uses a random number generator g and std::shuffle to modify mref
491// in place. Memref mref will be a permutation of all numbers
492// in the range of [0, size of mref).
493extern "C" MLIR_CRUNNERUTILS_EXPORT void
495
496//===----------------------------------------------------------------------===//
497// Runtime support library to allow the use of std::sort in MLIR program.
498//===----------------------------------------------------------------------===//
499extern "C" MLIR_CRUNNERUTILS_EXPORT void
501extern "C" MLIR_CRUNNERUTILS_EXPORT void
503extern "C" MLIR_CRUNNERUTILS_EXPORT void
505#endif // MLIR_EXECUTIONENGINE_CRUNNERUTILS_H
MLIR_CRUNNERUTILS_EXPORT void printI64(int64_t i)
MLIR_CRUNNERUTILS_EXPORT void printF32(float f)
MLIR_CRUNNERUTILS_EXPORT void printString(char const *s)
MLIR_CRUNNERUTILS_EXPORT void printU64(uint64_t u)
MLIR_CRUNNERUTILS_EXPORT void memrefCopy(int64_t elemSize, ::UnrankedMemRefType< char > *src, ::UnrankedMemRefType< char > *dst)
MLIR_CRUNNERUTILS_EXPORT void printNewline()
MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_stdSortF64(uint64_t n, StridedMemRefType< double, 1 > *vref)
MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_stdSortI64(uint64_t n, StridedMemRefType< int64_t, 1 > *vref)
Vector< T, D1 > Vector1D
#define MLIR_CRUNNERUTILS_EXPORT
Vector< T, D1, D2 > Vector2D
MLIR_CRUNNERUTILS_EXPORT void printOpen()
MLIR_CRUNNERUTILS_EXPORT void printFlops(double flops)
MLIR_CRUNNERUTILS_EXPORT void * rtsrand(uint64_t s)
MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_stdSortF32(uint64_t n, StridedMemRefType< float, 1 > *vref)
void dropFront(int64_t arr[N], int64_t *res)
Vector< T, D1, D2, D3, D4 > Vector4D
MLIR_CRUNNERUTILS_EXPORT void rtdrand(void *g)
MLIR_CRUNNERUTILS_EXPORT double rtclock()
MLIR_CRUNNERUTILS_EXPORT void printClose()
MLIR_CRUNNERUTILS_EXPORT uint64_t rtrand(void *g, uint64_t m)
MLIR_CRUNNERUTILS_EXPORT void printF64(double d)
MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_shuffle(StridedMemRefType< uint64_t, 1 > *mref, void *g)
MLIR_CRUNNERUTILS_EXPORT void printComma()
Vector< T, D1, D2, D3 > Vector3D
true
Given two iterators into the same block, return "true" if a is before `b.
false
Parses a map_entries map type from a string format back into its numeric value.
Iterate over all elements in a dynamic memref.
std::ptrdiff_t difference_type
DynamicMemRefIterator< T > & operator++()
bool operator!=(const DynamicMemRefIterator &other) const
bool operator==(const DynamicMemRefIterator &other) const
DynamicMemRefIterator(DynamicMemRefType< T > &descriptor, int64_t offset=0)
std::forward_iterator_tag iterator_category
const std::vector< int64_t > & getIndices()
T & operator[](Range &&indices)
const int64_t * sizes
DynamicMemRefIterator< T > end()
DynamicMemRefType(const ::UnrankedMemRefType< T > &memRef)
DynamicMemRefType(const StridedMemRefType< T, 0 > &memRef)
DynamicMemRefType< T > operator[](int64_t idx)
DynamicMemRefType(const StridedMemRefType< T, N > &memRef)
const int64_t * strides
DynamicMemRefIterator< T > begin()
const std::array< int64_t, 0 > & getIndices()
bool operator!=(const StridedMemrefIterator &other) const
bool operator==(const StridedMemrefIterator &other) const
StridedMemrefIterator< T, 0 > & operator++()
StridedMemrefIterator(StridedMemRefType< T, 0 > &descriptor, int64_t offset=0)
std::forward_iterator_tag iterator_category
Iterate over all elements in a strided memref.
bool operator!=(const StridedMemrefIterator &other) const
StridedMemrefIterator(StridedMemRefType< T, Rank > &descriptor, int64_t offset=0)
bool operator==(const StridedMemrefIterator &other) const
std::ptrdiff_t difference_type
StridedMemrefIterator< T, Rank > & operator++()
const std::array< int64_t, Rank > & getIndices()
std::forward_iterator_tag iterator_category
constexpr bool isPowerOf2(int n)
constexpr unsigned nextPowerOf2(int n)
Include the generated interface declarations.
StridedMemrefIterator< T, 0 > end()
StridedMemrefIterator< T, 0 > begin()
StridedMemrefIterator< T, 1 > begin()
StridedMemrefIterator< T, 1 > end()
T & operator[](int64_t idx)
StridedMemRef descriptor type with static rank.
StridedMemRefType< T, N - 1 > operator[](int64_t idx)
StridedMemrefIterator< T, N > end()
StridedMemrefIterator< T, N > begin()
Vector< T, Dims... > & operator[](unsigned i)
const Vector< T, Dims... > & operator[](unsigned i) const
const T & operator[](unsigned i) const
const T & operator[](unsigned i) const