19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/Support/raw_ostream.h"
29 #include <initializer_list>
33 #ifndef MLIR_EXECUTIONENGINE_MEMREFUTILS_H_
34 #define MLIR_EXECUTIONENGINE_MEMREFUTILS_H_
47 assert(shape.size() == N &&
"expect shape specification to match rank");
48 std::array<int64_t, N> res;
50 for (int64_t idx = N - 1; idx >= 0; --idx) {
51 assert(shape[idx] &&
"size must be non-negative for all shape dimensions");
53 running *= shape[idx];
63 template <
int N,
typename T>
67 assert(shape.size() == N);
68 assert(shapeAlloc.size() == N);
70 descriptor.
basePtr =
static_cast<T *
>(ptr);
71 descriptor.
data =
static_cast<T *
>(alignedPtr);
74 auto strides = makeStrides<N>(shapeAlloc);
84 template <
int N,
typename T>
88 assert(shape.size() == N);
89 assert(shapeAlloc.size() == N);
91 descriptor.
basePtr =
static_cast<T *
>(ptr);
92 descriptor.
data =
static_cast<T *
>(alignedPtr);
101 template <
typename T>
104 std::optional<uint64_t> alignment = std::optional<uint64_t>()) {
105 assert(
sizeof(T) <= UINT_MAX &&
"Elemental type overflows");
106 auto size = nElements *
sizeof(T);
107 auto desiredAlignment = alignment.value_or(
nextPowerOf2(
sizeof(T)));
108 assert((desiredAlignment & (desiredAlignment - 1)) == 0);
109 assert(desiredAlignment >=
sizeof(T));
110 T *data =
reinterpret_cast<T *
>(allocFun(size + desiredAlignment));
111 uintptr_t addr =
reinterpret_cast<uintptr_t
>(data);
112 uintptr_t rem = addr % desiredAlignment;
113 T *alignedData = (rem == 0)
115 :
reinterpret_cast<T *
>(addr + (desiredAlignment - rem));
116 assert(
reinterpret_cast<uintptr_t
>(alignedData) % desiredAlignment == 0);
117 return std::make_pair(data, alignedData);
129 template <
typename T>
134 template <
typename T,
unsigned Rank>
149 std::optional<uint64_t> alignment = std::optional<uint64_t>(),
153 ::free(descriptor.
data);
155 : freeFunc(freeFun) {
156 if (shapeAlloc.empty())
158 assert(shape.size() == Rank);
159 assert(shapeAlloc.size() == Rank);
160 for (
unsigned i = 0; i < Rank; ++i)
161 assert(shape[i] <= shapeAlloc[i] &&
162 "shapeAlloc must be greater than or equal to shape");
163 int64_t nElements = 1;
164 for (int64_t s : shapeAlloc)
166 auto [data, alignedData] =
167 detail::allocAligned<T>(nElements, allocFun, alignment);
168 descriptor = detail::makeStridedMemRefDescriptor<Rank>(data, alignedData,
172 end = descriptor.
end();
174 init(*it, it.getIndices());
176 memset(descriptor.
data, 0,
177 nElements *
sizeof(T) +
183 : freeFunc(freeFunc), descriptor(descriptor) {}
186 freeFunc(descriptor);
191 freeFunc = other.freeFunc;
192 descriptor = other.descriptor;
193 other.freeFunc =
nullptr;
194 memset(&other.descriptor, 0,
sizeof(other.descriptor));
201 return descriptor[indices];
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
Iterate over all elements in a strided memref.
Owning MemRef type that abstracts over the runtime type for ranked strided memref.
OwningMemRef(OwningMemRef &&other)
T & operator[](std::initializer_list< int64_t > indices)
DescriptorType * operator->()
OwningMemRef(const OwningMemRef &)=delete
StridedMemRefType< T, Rank > DescriptorType
OwningMemRef(DescriptorType descriptor, FreeFunType freeFunc)
Take ownership of an existing descriptor with a custom deleter.
OwningMemRef & operator=(const OwningMemRef &&other)
DescriptorType & operator*()
OwningMemRef(ArrayRef< int64_t > shape, ArrayRef< int64_t > shapeAlloc={}, ElementWiseVisitor< T > init={}, std::optional< uint64_t > alignment=std::optional< uint64_t >(), AllocFunType allocFun=&::malloc, std::function< void(StridedMemRefType< T, Rank >)> freeFun=[](StridedMemRefType< T, Rank > descriptor) { ::free(descriptor.data);})
Allocate a new dense StridedMemrefRef with a given shape.
std::function< void(DescriptorType)> FreeFunType
OwningMemRef & operator=(const OwningMemRef &)=delete
constexpr unsigned nextPowerOf2(int n)
std::array< int64_t, N > makeStrides(ArrayRef< int64_t > shape)
Given a shape with sizes greater than 0 along all dimensions, returns the distance,...
std::enable_if<(N >=1), StridedMemRefType< T, N > >::type makeStridedMemRefDescriptor(T *ptr, T *alignedPtr, ArrayRef< int64_t > shape, ArrayRef< int64_t > shapeAlloc)
Build a StridedMemRefDescriptor<T, N> that matches the MLIR ABI.
std::pair< T *, T * > allocAligned(size_t nElements, AllocFunType allocFun=&::malloc, std::optional< uint64_t > alignment=std::optional< uint64_t >())
Align nElements of type T with an optional alignment.
Include the generated interface declarations.
llvm::function_ref< void *(size_t)> AllocFunType
StridedMemRef descriptor type specialized for rank 0.
StridedMemRef descriptor type with static rank.
StridedMemrefIterator< T, N > begin()
StridedMemrefIterator< T, N > end()