MLIR  21.0.0git
MemRefUtils.h
Go to the documentation of this file.
1 //===- MemRefUtils.h - Memref helpers to invoke MLIR JIT code ---*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Utils for MLIR ABI interfacing with frameworks.
10 //
11 // The templated free functions below make it possible to allocate dense
12 // contiguous buffers with shapes that interoperate properly with the MLIR
13 // codegen ABI.
14 //
15 //===----------------------------------------------------------------------===//
16 
18 #include "mlir/Support/LLVM.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/STLExtras.h"
21 
22 #include "llvm/Support/raw_ostream.h"
23 
24 #include <algorithm>
25 #include <array>
26 #include <cassert>
27 #include <climits>
28 #include <functional>
29 #include <initializer_list>
30 #include <memory>
31 #include <optional>
32 
33 #ifndef MLIR_EXECUTIONENGINE_MEMREFUTILS_H_
34 #define MLIR_EXECUTIONENGINE_MEMREFUTILS_H_
35 
36 namespace mlir {
37 using AllocFunType = llvm::function_ref<void *(size_t)>;
38 
39 namespace detail {
40 
41 /// Given a shape with sizes greater than 0 along all dimensions, returns the
42 /// distance, in number of elements, between a slice in a dimension and the next
43 /// slice in the same dimension.
44 /// e.g. shape[3, 4, 5] -> strides[20, 5, 1]
45 template <size_t N>
46 inline std::array<int64_t, N> makeStrides(ArrayRef<int64_t> shape) {
47  assert(shape.size() == N && "expect shape specification to match rank");
48  std::array<int64_t, N> res;
49  int64_t running = 1;
50  for (int64_t idx = N - 1; idx >= 0; --idx) {
51  assert(shape[idx] >= 0 &&
52  "size must be non-negative for all shape dimensions");
53  res[idx] = running;
54  running *= shape[idx];
55  }
56  return res;
57 }
58 
59 /// Build a `StridedMemRefDescriptor<T, N>` that matches the MLIR ABI.
60 /// This is an implementation detail that is kept in sync with MLIR codegen
61 /// conventions. Additionally takes a `shapeAlloc` array which
62 /// is used instead of `shape` to allocate "more aligned" data and compute the
63 /// corresponding strides.
64 template <int N, typename T>
65 typename std::enable_if<(N >= 1), StridedMemRefType<T, N>>::type
66 makeStridedMemRefDescriptor(T *ptr, T *alignedPtr, ArrayRef<int64_t> shape,
67  ArrayRef<int64_t> shapeAlloc) {
68  assert(shape.size() == N);
69  assert(shapeAlloc.size() == N);
70  StridedMemRefType<T, N> descriptor;
71  descriptor.basePtr = static_cast<T *>(ptr);
72  descriptor.data = static_cast<T *>(alignedPtr);
73  descriptor.offset = 0;
74  std::copy(shape.begin(), shape.end(), descriptor.sizes);
75  auto strides = makeStrides<N>(shapeAlloc);
76  std::copy(strides.begin(), strides.end(), descriptor.strides);
77  return descriptor;
78 }
79 
80 /// Build a `StridedMemRefDescriptor<T, 0>` that matches the MLIR ABI.
81 /// This is an implementation detail that is kept in sync with MLIR codegen
82 /// conventions. Additionally takes a `shapeAlloc` array which
83 /// is used instead of `shape` to allocate "more aligned" data and compute the
84 /// corresponding strides.
85 template <int N, typename T>
86 typename std::enable_if<(N == 0), StridedMemRefType<T, 0>>::type
87 makeStridedMemRefDescriptor(T *ptr, T *alignedPtr, ArrayRef<int64_t> shape = {},
88  ArrayRef<int64_t> shapeAlloc = {}) {
89  assert(shape.size() == N);
90  assert(shapeAlloc.size() == N);
91  StridedMemRefType<T, 0> descriptor;
92  descriptor.basePtr = static_cast<T *>(ptr);
93  descriptor.data = static_cast<T *>(alignedPtr);
94  descriptor.offset = 0;
95  return descriptor;
96 }
97 
98 /// Align `nElements` of type T with an optional `alignment`.
99 /// This replaces a portable `posix_memalign`.
100 /// `alignment` must be a power of 2 and greater than the size of T. By default
101 /// the alignment is sizeof(T).
102 template <typename T>
103 std::pair<T *, T *>
104 allocAligned(size_t nElements, AllocFunType allocFun = &::malloc,
105  std::optional<uint64_t> alignment = std::optional<uint64_t>()) {
106  assert(sizeof(T) <= UINT_MAX && "Elemental type overflows");
107  auto size = nElements * sizeof(T);
108  auto desiredAlignment = alignment.value_or(nextPowerOf2(sizeof(T)));
109  assert((desiredAlignment & (desiredAlignment - 1)) == 0);
110  assert(desiredAlignment >= sizeof(T));
111  T *data = reinterpret_cast<T *>(allocFun(size + desiredAlignment));
112  uintptr_t addr = reinterpret_cast<uintptr_t>(data);
113  uintptr_t rem = addr % desiredAlignment;
114  T *alignedData = (rem == 0)
115  ? data
116  : reinterpret_cast<T *>(addr + (desiredAlignment - rem));
117  assert(reinterpret_cast<uintptr_t>(alignedData) % desiredAlignment == 0);
118  return std::make_pair(data, alignedData);
119 }
120 
121 } // namespace detail
122 
123 //===----------------------------------------------------------------------===//
124 // Public API
125 //===----------------------------------------------------------------------===//
126 
127 /// Convenient callback to "visit" a memref element by element.
128 /// This takes a reference to an individual element as well as the coordinates.
129 /// It can be used in conjuction with a StridedMemrefIterator.
130 template <typename T>
132 
133 /// Owning MemRef type that abstracts over the runtime type for ranked strided
134 /// memref.
135 template <typename T, unsigned Rank>
137 public:
139  using FreeFunType = std::function<void(DescriptorType)>;
140 
141  /// Allocate a new dense StridedMemrefRef with a given `shape`. An optional
142  /// `shapeAlloc` array can be supplied to "pad" every dimension individually.
143  /// If an ElementWiseVisitor is provided, it will be used to initialize the
144  /// data, else the memory will be zero-initialized. The alloc and free method
145  /// used to manage the data allocation can be optionally provided, and default
146  /// to malloc/free.
148  ArrayRef<int64_t> shape, ArrayRef<int64_t> shapeAlloc = {},
149  ElementWiseVisitor<T> init = {},
150  std::optional<uint64_t> alignment = std::optional<uint64_t>(),
151  AllocFunType allocFun = &::malloc,
152  std::function<void(StridedMemRefType<T, Rank>)> freeFun =
153  [](StridedMemRefType<T, Rank> descriptor) {
154  ::free(descriptor.data);
155  })
156  : freeFunc(freeFun) {
157  if (shapeAlloc.empty())
158  shapeAlloc = shape;
159  assert(shape.size() == Rank);
160  assert(shapeAlloc.size() == Rank);
161  for (unsigned i = 0; i < Rank; ++i)
162  assert(shape[i] <= shapeAlloc[i] &&
163  "shapeAlloc must be greater than or equal to shape");
164  int64_t nElements = 1;
165  for (int64_t s : shapeAlloc)
166  nElements *= s;
167  auto [data, alignedData] =
168  detail::allocAligned<T>(nElements, allocFun, alignment);
169  descriptor = detail::makeStridedMemRefDescriptor<Rank>(data, alignedData,
170  shape, shapeAlloc);
171  if (init) {
172  for (StridedMemrefIterator<T, Rank> it = descriptor.begin(),
173  end = descriptor.end();
174  it != end; ++it)
175  init(*it, it.getIndices());
176  } else {
177  memset(descriptor.data, 0,
178  nElements * sizeof(T) +
179  alignment.value_or(detail::nextPowerOf2(sizeof(T))));
180  }
181  }
182  /// Take ownership of an existing descriptor with a custom deleter.
184  : freeFunc(freeFunc), descriptor(descriptor) {}
186  if (freeFunc)
187  freeFunc(descriptor);
188  }
189  OwningMemRef(const OwningMemRef &) = delete;
190  OwningMemRef &operator=(const OwningMemRef &) = delete;
192  freeFunc = other.freeFunc;
193  descriptor = other.descriptor;
194  other.freeFunc = nullptr;
195  memset(&other.descriptor, 0, sizeof(other.descriptor));
196  }
197  OwningMemRef(OwningMemRef &&other) { *this = std::move(other); }
198 
199  DescriptorType &operator*() { return descriptor; }
200  DescriptorType *operator->() { return &descriptor; }
201  T &operator[](std::initializer_list<int64_t> indices) {
202  return descriptor[indices];
203  }
204 
205 private:
206  /// Custom deleter used to release the data buffer manager with the descriptor
207  /// below.
208  FreeFunType freeFunc;
209  /// The descriptor is an instance of StridedMemRefType<T, rank>.
210  DescriptorType descriptor;
211 };
212 
213 } // namespace mlir
214 
215 #endif // MLIR_EXECUTIONENGINE_MEMREFUTILS_H_
static void copy(Location loc, Value dst, Value src, Value size, OpBuilder &builder)
Copies the given number of bytes from src to dst pointers.
Iterate over all elements in a strided memref.
Definition: CRunnerUtils.h:211
Owning MemRef type that abstracts over the runtime type for ranked strided memref.
Definition: MemRefUtils.h:136
OwningMemRef(OwningMemRef &&other)
Definition: MemRefUtils.h:197
T & operator[](std::initializer_list< int64_t > indices)
Definition: MemRefUtils.h:201
DescriptorType * operator->()
Definition: MemRefUtils.h:200
OwningMemRef(const OwningMemRef &)=delete
StridedMemRefType< T, Rank > DescriptorType
Definition: MemRefUtils.h:138
OwningMemRef(DescriptorType descriptor, FreeFunType freeFunc)
Take ownership of an existing descriptor with a custom deleter.
Definition: MemRefUtils.h:183
OwningMemRef & operator=(const OwningMemRef &&other)
Definition: MemRefUtils.h:191
DescriptorType & operator*()
Definition: MemRefUtils.h:199
OwningMemRef(ArrayRef< int64_t > shape, ArrayRef< int64_t > shapeAlloc={}, ElementWiseVisitor< T > init={}, std::optional< uint64_t > alignment=std::optional< uint64_t >(), AllocFunType allocFun=&::malloc, std::function< void(StridedMemRefType< T, Rank >)> freeFun=[](StridedMemRefType< T, Rank > descriptor) { ::free(descriptor.data);})
Allocate a new dense StridedMemrefRef with a given shape.
Definition: MemRefUtils.h:147
std::function< void(DescriptorType)> FreeFunType
Definition: MemRefUtils.h:139
OwningMemRef & operator=(const OwningMemRef &)=delete
constexpr unsigned nextPowerOf2(int n)
Definition: CRunnerUtils.h:49
std::array< int64_t, N > makeStrides(ArrayRef< int64_t > shape)
Given a shape with sizes greater than 0 along all dimensions, returns the distance,...
Definition: MemRefUtils.h:46
std::enable_if<(N >=1), StridedMemRefType< T, N > >::type makeStridedMemRefDescriptor(T *ptr, T *alignedPtr, ArrayRef< int64_t > shape, ArrayRef< int64_t > shapeAlloc)
Build a StridedMemRefDescriptor<T, N> that matches the MLIR ABI.
Definition: MemRefUtils.h:66
std::pair< T *, T * > allocAligned(size_t nElements, AllocFunType allocFun=&::malloc, std::optional< uint64_t > alignment=std::optional< uint64_t >())
Align nElements of type T with an optional alignment.
Definition: MemRefUtils.h:104
Include the generated interface declarations.
llvm::function_ref< void *(size_t)> AllocFunType
Definition: MemRefUtils.h:37
StridedMemRef descriptor type specialized for rank 0.
Definition: CRunnerUtils.h:192
StridedMemRef descriptor type with static rank.
Definition: CRunnerUtils.h:131
StridedMemrefIterator< T, N > begin()
Definition: CRunnerUtils.h:152
int64_t strides[N]
Definition: CRunnerUtils.h:136
StridedMemrefIterator< T, N > end()
Definition: CRunnerUtils.h:153
int64_t sizes[N]
Definition: CRunnerUtils.h:135