15 #ifndef MLIR_EXECUTIONENGINE_CRUNNERUTILS_H
16 #define MLIR_EXECUTIONENGINE_CRUNNERUTILS_H
19 #ifndef MLIR_CRUNNERUTILS_EXPORT
20 #ifdef mlir_c_runner_utils_EXPORTS
22 #define MLIR_CRUNNERUTILS_EXPORT __declspec(dllexport)
23 #define MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
26 #define MLIR_CRUNNERUTILS_EXPORT __declspec(dllimport)
31 #define MLIR_CRUNNERUTILS_EXPORT __attribute__((visibility("default")))
32 #define MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS
38 #include <initializer_list>
47 constexpr
bool isPowerOf2(
int n) {
return (!(n & (n - 1))); }
53 template <
typename T,
int Dim,
bool IsPowerOf2>
56 template <
typename T,
int Dim>
63 inline const T &
operator[](
unsigned i)
const {
return vector[i]; }
71 template <
typename T,
int Dim>
74 static_assert(
nextPowerOf2(
sizeof(T[Dim])) >
sizeof(T[Dim]),
"size error");
75 static_assert(
nextPowerOf2(
sizeof(T[Dim])) < 2 *
sizeof(T[Dim]),
79 inline const T &
operator[](
unsigned i)
const {
return vector[i]; }
83 char padding[
nextPowerOf2(
sizeof(T[Dim])) -
sizeof(T[Dim])];
89 template <
typename T,
int Dim,
int... Dims>
97 Vector<T, Dims...> vector[Dim];
102 template <
typename T,
int Dim>
105 mlir::detail::isPowerOf2(sizeof(T[Dim]))> {
108 template <
int D1,
typename T>
110 template <
int D1,
int D2,
typename T>
112 template <
int D1,
int D2,
int D3,
typename T>
114 template <
int D1,
int D2,
int D3,
int D4,
typename T>
119 for (
unsigned i = 1; i < N; ++i)
120 *(res + i - 1) = arr[i];
126 template <
typename T,
int Rank>
130 template <
typename T,
int N>
138 template <
typename Range,
139 typename sfinae = decltype(std::declval<Range>().
begin())>
141 assert(indices.size() == N &&
142 "indices should match rank in memref subscript");
143 int64_t curOffset =
offset;
144 for (
int dim = N - 1; dim >= 0; --dim) {
145 int64_t currentIndex = *(indices.begin() + dim);
146 assert(currentIndex <
sizes[dim] &&
"Index overflow");
147 curOffset += currentIndex *
strides[dim];
149 return data[curOffset];
161 dropFront<N>(
sizes, res.sizes);
162 dropFront<N>(
strides, res.strides);
168 template <
typename T>
176 template <
typename Range,
177 typename sfinae = decltype(std::declval<Range>().
begin())>
179 assert(indices.size() == 1 &&
180 "indices should match rank in memref subscript");
181 return (*
this)[*indices.begin()];
191 template <
typename T>
197 template <
typename Range,
198 typename sfinae = decltype(std::declval<Range>().
begin())>
200 assert((indices.size() == 0) &&
201 "Expect empty indices for 0-rank memref subscript");
210 template <
typename T,
int Rank>
221 : offset(offset), descriptor(&descriptor) {}
224 while (dim >= 0 && indices[dim] == (descriptor->
sizes[dim] - 1)) {
225 offset -= indices[dim] * descriptor->
strides[dim];
234 offset += descriptor->
strides[dim];
241 const std::array<int64_t, Rank> &
getIndices() {
return indices; }
244 return other.offset == offset && other.descriptor == descriptor;
248 return !(*
this == other);
257 std::array<int64_t, Rank> indices = {};
264 template <
typename T>
274 : elt(descriptor.data + offset) {}
289 static const std::array<int64_t, 0> indices = {};
294 return other.elt == elt;
298 return !(*
this == other);
310 template <
typename T>
319 template <
typename T>
323 template <
typename T>
346 sizes =
rank == 0 ? nullptr : desc->sizes;
350 template <
typename Range,
351 typename sfinae = decltype(std::declval<Range>().
begin())>
353 assert(indices.size() ==
rank &&
354 "indices should match rank in memref subscript");
358 int64_t curOffset =
offset;
359 for (
int dim =
rank - 1; dim >= 0; --dim) {
360 int64_t currentIndex = *(indices.begin() + dim);
361 assert(currentIndex <
sizes[dim] &&
"Index overflow");
362 curOffset += currentIndex *
strides[dim];
364 return data[curOffset];
372 assert(
rank > 0 &&
"can't make a subscript of a zero ranked array");
385 assert(
rank == 0 &&
"not a zero-ranked memRef");
391 template <
typename T>
401 : offset(offset), descriptor(&descriptor) {
402 indices.resize(descriptor.
rank, 0);
406 if (descriptor->rank == 0) {
411 int dim = descriptor->rank - 1;
413 while (dim >= 0 && indices[dim] == (descriptor->sizes[dim] - 1)) {
414 offset -= indices[dim] * descriptor->strides[dim];
425 offset += descriptor->strides[dim];
435 return other.offset == offset && other.descriptor == descriptor;
439 return !(*
this == other);
448 std::vector<int64_t> indices = {};
MLIR_CRUNNERUTILS_EXPORT void printI64(int64_t i)
MLIR_CRUNNERUTILS_EXPORT void printF32(float f)
MLIR_CRUNNERUTILS_EXPORT void printString(char const *s)
MLIR_CRUNNERUTILS_EXPORT void printU64(uint64_t u)
MLIR_CRUNNERUTILS_EXPORT void memrefCopy(int64_t elemSize, ::UnrankedMemRefType< char > *src, ::UnrankedMemRefType< char > *dst)
MLIR_CRUNNERUTILS_EXPORT void printNewline()
MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_stdSortF64(uint64_t n, StridedMemRefType< double, 1 > *vref)
MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_stdSortI64(uint64_t n, StridedMemRefType< int64_t, 1 > *vref)
MLIR_CRUNNERUTILS_EXPORT void * rtsrand(uint64_t s)
#define MLIR_CRUNNERUTILS_EXPORT
MLIR_CRUNNERUTILS_EXPORT void printOpen()
MLIR_CRUNNERUTILS_EXPORT void printFlops(double flops)
MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_stdSortF32(uint64_t n, StridedMemRefType< float, 1 > *vref)
void dropFront(int64_t arr[N], int64_t *res)
MLIR_CRUNNERUTILS_EXPORT void rtdrand(void *g)
MLIR_CRUNNERUTILS_EXPORT double rtclock()
MLIR_CRUNNERUTILS_EXPORT void printClose()
MLIR_CRUNNERUTILS_EXPORT uint64_t rtrand(void *g, uint64_t m)
MLIR_CRUNNERUTILS_EXPORT void printF64(double d)
MLIR_CRUNNERUTILS_EXPORT void _mlir_ciface_shuffle(StridedMemRefType< uint64_t, 1 > *mref, void *g)
MLIR_CRUNNERUTILS_EXPORT void printComma()
Iterate over all elements in a dynamic memref.
std::ptrdiff_t difference_type
DynamicMemRefIterator< T > & operator++()
bool operator!=(const DynamicMemRefIterator &other) const
bool operator==(const DynamicMemRefIterator &other) const
DynamicMemRefIterator(DynamicMemRefType< T > &descriptor, int64_t offset=0)
std::forward_iterator_tag iterator_category
const std::vector< int64_t > & getIndices()
DynamicMemRefIterator< T > begin()
DynamicMemRefType(const ::UnrankedMemRefType< T > &memRef)
DynamicMemRefType(const StridedMemRefType< T, 0 > &memRef)
T & operator[](Range &&indices)
DynamicMemRefIterator< T > end()
DynamicMemRefType< T > operator[](int64_t idx)
DynamicMemRefType(const StridedMemRefType< T, N > &memRef)
Iterate over all elements in a 0-ranked strided memref.
bool operator!=(const StridedMemrefIterator &other) const
bool operator==(const StridedMemrefIterator &other) const
std::ptrdiff_t difference_type
StridedMemrefIterator< T, 0 > & operator++()
StridedMemrefIterator(StridedMemRefType< T, 0 > &descriptor, int64_t offset=0)
const std::array< int64_t, 0 > & getIndices()
std::forward_iterator_tag iterator_category
Iterate over all elements in a strided memref.
const std::array< int64_t, Rank > & getIndices()
bool operator!=(const StridedMemrefIterator &other) const
StridedMemrefIterator< T, Rank > & operator++()
StridedMemrefIterator(StridedMemRefType< T, Rank > &descriptor, int64_t offset=0)
bool operator==(const StridedMemrefIterator &other) const
std::ptrdiff_t difference_type
std::forward_iterator_tag iterator_category
constexpr bool isPowerOf2(int n)
constexpr unsigned nextPowerOf2(int n)
Include the generated interface declarations.
StridedMemRef descriptor type specialized for rank 0.
StridedMemrefIterator< T, 0 > end()
StridedMemrefIterator< T, 0 > begin()
T & operator[](Range indices)
StridedMemRef descriptor type specialized for rank 1.
T & operator[](int64_t idx)
StridedMemrefIterator< T, 1 > begin()
StridedMemrefIterator< T, 1 > end()
T & operator[](Range indices)
StridedMemRef descriptor type with static rank.
StridedMemRefType< T, N - 1 > operator[](int64_t idx)
StridedMemrefIterator< T, N > begin()
T & operator[](Range &&indices)
StridedMemrefIterator< T, N > end()
Vector< T, Dims... > & operator[](unsigned i)
const Vector< T, Dims... > & operator[](unsigned i) const
const T & operator[](unsigned i) const
T & operator[](unsigned i)
const T & operator[](unsigned i) const
T & operator[](unsigned i)