MLIR 22.0.0git
MemRefUtils.h
Go to the documentation of this file.
1//===- MemRefUtils.h - Memref helpers to invoke MLIR JIT code ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Utils for MLIR ABI interfacing with frameworks.
10//
11// The templated free functions below make it possible to allocate dense
12// contiguous buffers with shapes that interoperate properly with the MLIR
13// codegen ABI.
14//
15//===----------------------------------------------------------------------===//
16
18#include "mlir/Support/LLVM.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21
22#include "llvm/Support/raw_ostream.h"
23
24#include <algorithm>
25#include <array>
26#include <cassert>
27#include <climits>
28#include <functional>
29#include <initializer_list>
30#include <memory>
31#include <optional>
32
33#ifndef MLIR_EXECUTIONENGINE_MEMREFUTILS_H_
34#define MLIR_EXECUTIONENGINE_MEMREFUTILS_H_
35
36namespace mlir {
37using AllocFunType = llvm::function_ref<void *(size_t)>;
38
39namespace detail {
40
41/// Given a shape with sizes greater than 0 along all dimensions, returns the
42/// distance, in number of elements, between a slice in a dimension and the next
43/// slice in the same dimension.
44/// e.g. shape[3, 4, 5] -> strides[20, 5, 1]
45template <size_t N>
46inline std::array<int64_t, N> makeStrides(ArrayRef<int64_t> shape) {
47 assert(shape.size() == N && "expect shape specification to match rank");
48 std::array<int64_t, N> res;
49 int64_t running = 1;
50 for (int64_t idx = N - 1; idx >= 0; --idx) {
51 assert(shape[idx] >= 0 &&
52 "size must be non-negative for all shape dimensions");
53 res[idx] = running;
54 running *= shape[idx];
55 }
56 return res;
57}
58
59/// Build a `StridedMemRefDescriptor<T, N>` that matches the MLIR ABI.
60/// This is an implementation detail that is kept in sync with MLIR codegen
61/// conventions. Additionally takes a `shapeAlloc` array which
62/// is used instead of `shape` to allocate "more aligned" data and compute the
63/// corresponding strides.
64template <int N, typename T>
65typename std::enable_if<(N >= 1), StridedMemRefType<T, N>>::type
67 ArrayRef<int64_t> shapeAlloc) {
68 assert(shape.size() == N);
69 assert(shapeAlloc.size() == N);
70 StridedMemRefType<T, N> descriptor;
71 descriptor.basePtr = ptr;
72 descriptor.data = alignedPtr;
73 descriptor.offset = 0;
74 std::copy(shape.begin(), shape.end(), descriptor.sizes);
75 auto strides = makeStrides<N>(shapeAlloc);
76 std::copy(strides.begin(), strides.end(), descriptor.strides);
77 return descriptor;
78}
79
80/// Build a `StridedMemRefDescriptor<T, 0>` that matches the MLIR ABI.
81/// This is an implementation detail that is kept in sync with MLIR codegen
82/// conventions. Additionally takes a `shapeAlloc` array which
83/// is used instead of `shape` to allocate "more aligned" data and compute the
84/// corresponding strides.
85template <int N, typename T>
86typename std::enable_if<(N == 0), StridedMemRefType<T, 0>>::type
88 ArrayRef<int64_t> shapeAlloc = {}) {
89 assert(shape.size() == N);
90 assert(shapeAlloc.size() == N);
91 StridedMemRefType<T, 0> descriptor;
92 descriptor.basePtr = ptr;
93 descriptor.data = alignedPtr;
94 descriptor.offset = 0;
95 return descriptor;
96}
97
98/// Align `nElements` of type T with an optional `alignment`.
99/// This replaces a portable `posix_memalign`.
100/// `alignment` must be a power of 2 and greater than the size of T. By default
101/// the alignment is sizeof(T).
102template <typename T>
103std::pair<T *, T *>
104allocAligned(size_t nElements, AllocFunType allocFun = &::malloc,
105 std::optional<uint64_t> alignment = std::optional<uint64_t>()) {
106 assert(sizeof(T) <= UINT_MAX && "Elemental type overflows");
107 auto size = nElements * sizeof(T);
108 auto desiredAlignment = alignment.value_or(nextPowerOf2(sizeof(T)));
109 assert((desiredAlignment & (desiredAlignment - 1)) == 0);
110 assert(desiredAlignment >= sizeof(T));
111 T *data = reinterpret_cast<T *>(allocFun(size + desiredAlignment));
112 uintptr_t addr = reinterpret_cast<uintptr_t>(data);
113 uintptr_t rem = addr % desiredAlignment;
114 T *alignedData = (rem == 0)
115 ? data
116 : reinterpret_cast<T *>(addr + (desiredAlignment - rem));
117 assert(reinterpret_cast<uintptr_t>(alignedData) % desiredAlignment == 0);
118 return std::make_pair(data, alignedData);
119}
120
121} // namespace detail
122
123//===----------------------------------------------------------------------===//
124// Public API
125//===----------------------------------------------------------------------===//
126
127/// Convenient callback to "visit" a memref element by element.
128/// This takes a reference to an individual element as well as the coordinates.
129/// It can be used in conjuction with a StridedMemrefIterator.
130template <typename T>
132
133/// Owning MemRef type that abstracts over the runtime type for ranked strided
134/// memref.
135template <typename T, unsigned Rank>
137public:
139 using FreeFunType = std::function<void(DescriptorType)>;
140
141 /// Allocate a new dense StridedMemrefRef with a given `shape`. An optional
142 /// `shapeAlloc` array can be supplied to "pad" every dimension individually.
143 /// If an ElementWiseVisitor is provided, it will be used to initialize the
144 /// data, else the memory will be zero-initialized. The alloc and free method
145 /// used to manage the data allocation can be optionally provided, and default
146 /// to malloc/free.
149 ElementWiseVisitor<T> init = {},
150 std::optional<uint64_t> alignment = std::optional<uint64_t>(),
151 AllocFunType allocFun = &::malloc,
152 std::function<void(StridedMemRefType<T, Rank>)> freeFun =
153 [](StridedMemRefType<T, Rank> descriptor) {
154 ::free(descriptor.basePtr);
155 })
156 : freeFunc(freeFun) {
157 if (shapeAlloc.empty())
158 shapeAlloc = shape;
159 assert(shape.size() == Rank);
160 assert(shapeAlloc.size() == Rank);
161 for (unsigned i = 0; i < Rank; ++i)
162 assert(shape[i] <= shapeAlloc[i] &&
163 "shapeAlloc must be greater than or equal to shape");
164 int64_t nElements = 1;
165 for (int64_t s : shapeAlloc)
166 nElements *= s;
167 auto [allocatedPtr, alignedData] =
168 detail::allocAligned<T>(nElements, allocFun, alignment);
170 allocatedPtr, alignedData, shape, shapeAlloc);
171 if (init) {
172 for (StridedMemrefIterator<T, Rank> it = descriptor.begin(),
173 end = descriptor.end();
174 it != end; ++it)
175 init(*it, it.getIndices());
176 } else {
177 memset(alignedData, 0, nElements * sizeof(T));
178 }
179 }
180 /// Take ownership of an existing descriptor with a custom deleter.
182 : freeFunc(freeFunc), descriptor(descriptor) {}
184 if (freeFunc)
185 freeFunc(descriptor);
186 }
187 OwningMemRef(const OwningMemRef &) = delete;
190 freeFunc = other.freeFunc;
191 descriptor = other.descriptor;
192 other.freeFunc = nullptr;
193 memset(&other.descriptor, 0, sizeof(other.descriptor));
194 }
195 OwningMemRef(OwningMemRef &&other) { *this = std::move(other); }
196
197 DescriptorType &operator*() { return descriptor; }
198 DescriptorType *operator->() { return &descriptor; }
199 T &operator[](std::initializer_list<int64_t> indices) {
200 return descriptor[indices];
201 }
202
203private:
204 /// Custom deleter used to release the data buffer manager with the descriptor
205 /// below.
206 FreeFunType freeFunc;
207 /// The descriptor is an instance of StridedMemRefType<T, rank>.
208 DescriptorType descriptor;
209};
210
211} // namespace mlir
212
213#endif // MLIR_EXECUTIONENGINE_MEMREFUTILS_H_
for(Operation *op :ops)
#define rem(a, b)
OwningMemRef(OwningMemRef &&other)
OwningMemRef(ArrayRef< int64_t > shape, ArrayRef< int64_t > shapeAlloc={}, ElementWiseVisitor< T > init={}, std::optional< uint64_t > alignment=std::optional< uint64_t >(), AllocFunType allocFun=&::malloc, std::function< void(StridedMemRefType< T, Rank >)> freeFun=[](StridedMemRefType< T, Rank > descriptor) { ::free(descriptor.basePtr);})
Allocate a new dense StridedMemrefRef with a given shape.
OwningMemRef & operator=(const OwningMemRef &)=delete
OwningMemRef(const OwningMemRef &)=delete
StridedMemRefType< T, Rank > DescriptorType
OwningMemRef(DescriptorType descriptor, FreeFunType freeFunc)
Take ownership of an existing descriptor with a custom deleter.
DescriptorType * operator->()
DescriptorType & operator*()
T & operator[](std::initializer_list< int64_t > indices)
std::function< void(DescriptorType)> FreeFunType
OwningMemRef & operator=(const OwningMemRef &&other)
AttrTypeReplacer.
constexpr unsigned nextPowerOf2(int n)
std::pair< T *, T * > allocAligned(size_t nElements, AllocFunType allocFun=&::malloc, std::optional< uint64_t > alignment=std::optional< uint64_t >())
Align nElements of type T with an optional alignment.
std::array< int64_t, N > makeStrides(ArrayRef< int64_t > shape)
Given a shape with sizes greater than 0 along all dimensions, returns the distance,...
Definition MemRefUtils.h:46
std::enable_if<(N >=1), StridedMemRefType< T, N > >::type makeStridedMemRefDescriptor(T *ptr, T *alignedPtr, ArrayRef< int64_t > shape, ArrayRef< int64_t > shapeAlloc)
Build a StridedMemRefDescriptor<T, N> that matches the MLIR ABI.
Definition MemRefUtils.h:66
Include the generated interface declarations.
llvm::function_ref< void *(size_t)> AllocFunType
Definition MemRefUtils.h:37
llvm::function_ref< void(T &ptr, ArrayRef< int64_t >)> ElementWiseVisitor
Convenient callback to "visit" a memref element by element.
StridedMemRef descriptor type with static rank.