MLIR  22.0.0git
GPUHeuristics.cpp
Go to the documentation of this file.
1 //===- GPUHeuristics.cpp - Heuristics Implementation for Transforms -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 
12 #include "llvm/ADT/ArrayRef.h"
13 #include "llvm/ADT/STLExtras.h"
14 #include "llvm/Support/Debug.h"
15 #include "llvm/Support/DebugLog.h"
16 #include "llvm/Support/InterleavedRange.h"
17 #include "llvm/Support/MathExtras.h"
18 #include "llvm/Support/raw_ostream.h"
19 #include <cmath>
20 #include <numeric>
21 
22 using namespace mlir;
23 
24 #define DEBUG_TYPE "linalg-transforms"
25 
27  return gpu::GPUThreadMappingAttr::get(ctx, gpu::MappingId::LinearDim0);
28 }
30  return gpu::GPUThreadMappingAttr::get(ctx, gpu::MappingId::LinearDim1);
31 }
33  return gpu::GPUThreadMappingAttr::get(ctx, gpu::MappingId::LinearDim2);
34 }
35 
37  int totalNumThreads,
38  int64_t desiredBitAlignment,
39  ArrayRef<int64_t> copySizes,
40  bool favorPredication,
41  int64_t elementalBitwidth) {
42  assert(!copySizes.empty() && copySizes.size() <= 3 &&
43  "only 1,2,3-D copies are supported for now");
44 
45  LDBG() << "START CopyMappingInfo, favorPredication: " << favorPredication;
46  LDBG() << "--copy shape: " << llvm::interleaved(copySizes);
47 
48  // Greedily find the largest vector size that can be used to copy the most
49  // minor dimension: we are in the business of filling kMaxVectorLoadBitWidth
50  // contiguous memory transactions with as few threads as possible.
51  int64_t desiredVectorSize = CopyMappingInfo::maxContiguousElementsToTransfer(
52  desiredBitAlignment, copySizes.back(), elementalBitwidth);
53 
54  LDBG() << "--greedily determined vectorSize: " << desiredVectorSize
55  << " elements of " << elementalBitwidth << "b each -> "
56  << (desiredVectorSize * elementalBitwidth)
57  << "b total out of a max of " << kMaxVectorLoadBitWidth << "b";
58 
59  status = inferNumThreads(totalNumThreads, copySizes, desiredVectorSize,
60  favorPredication);
61  if (status == Status::Invalid)
62  return;
63 
64  LDBG() << "--copy: " << llvm::interleaved(copySizes) << "\n"
65  << "--numThreads: " << llvm::interleaved(this->numThreads) << "\n"
66  << "--vectorSize: " << this->vectorSize;
67  assert(this->numThreads.size() == copySizes.size() &&
68  "compute copy mapping expected same number of threads and copy sizes");
69 
70  // Compute the smallest bounding box.
71  this->smallestBoundingTileSizes = llvm::to_vector(
72  llvm::map_range(llvm::zip(copySizes, this->numThreads), [](auto &&pair) {
73  int64_t size, numThreads;
74  std::tie(size, numThreads) = pair;
75  return llvm::divideCeilSigned(size, numThreads);
76  }));
77  SmallVector<Attribute> allThreadMappings{linearId2(ctx), linearId1(ctx),
78  linearId0(ctx)};
79 
80  // Set the thread mapping.
81  this->threadMapping =
82  llvm::to_vector(ArrayRef(allThreadMappings)
83  .take_back(this->smallestBoundingTileSizes.size()));
84  LDBG() << *this;
85 }
86 
87 int64_t transform::gpu::CopyMappingInfo::maxContiguousElementsToTransfer(
88  int64_t desiredBitAlignment, int64_t numContiguousElements,
89  int64_t elementalBitwidth) {
90  assert(kMaxVectorLoadBitWidth % elementalBitwidth == 0 &&
91  "elemental bitwidth does not divide kMaxVectorLoadBitWidth");
92  assert(desiredBitAlignment % elementalBitwidth == 0 &&
93  "elemental bitwidth does not divide desired bit alignment");
94  return std::gcd(
95  std::gcd(desiredBitAlignment / elementalBitwidth, numContiguousElements),
96  kMaxVectorLoadBitWidth / elementalBitwidth);
97 }
98 
99 /// Get the list of all factors that divide `val`, not just the prime factors.
100 static SmallVector<int64_t> getFactors(int64_t val) {
101  SmallVector<int64_t> factors;
102  factors.reserve(val);
103  for (int64_t factor = 1; factor <= val; ++factor) {
104  if (val % factor != 0)
105  continue;
106  factors.push_back(factor);
107  }
108  factors.push_back(val);
109  return factors;
110 }
111 
112 static int64_t product(ArrayRef<int64_t> vals) {
113  int64_t res = 1;
114  for (auto val : vals)
115  res *= val;
116  return res;
117 }
118 
119 /// Extract `result` from `sizes` with the following constraints:
120 /// 1. sizes[i] % result[i] for all i
121 /// 2. product_of_threadsPerDim <= maxNumThreads
122 /// 3. if `currentIndex` is sizes.size() - 1, then threadsPerDim[currentIndex]
123 /// must be sizes[currentIndex].
124 /// This is used to greedily extract the maximum number of threads usable for
125 /// mapping a copy of size `sizes`, while being bounded by `totalNumThreads` and
126 /// ensuring coalesced access along the most minor dimension.
127 /// Return the number of threads used in the range:
128 /// threadsPerDim[currentIndex .. sizes.end()]
129 // The implementation uses a dynamic programming approach to greedily extract
130 // the best combination under the constraints.
131 // TODO: Implementation details can be improved but putting effort there is a
132 // tradeoffs: `sizes` is expected to be of small rank and contain small values.
134  int64_t currentIndex,
135  int64_t maxNumThreads) {
136  assert(static_cast<size_t>(currentIndex) < sizes.size() &&
137  "currentIndex out of bounds");
138  std::string indent(2 * currentIndex, '-');
139  if (static_cast<size_t>(currentIndex) == sizes.size() - 1) {
140  LDBG() << indent << "mandated globalBest: " << sizes[currentIndex];
141  return SmallVector<int64_t>{sizes[currentIndex]};
142  }
143 
144  int64_t best = 0;
145  int64_t s = sizes[currentIndex];
146  SmallVector<int64_t> factors = getFactors(s);
147  SmallVector<int64_t> localThreadsPerDim;
148  localThreadsPerDim.reserve(sizes.size());
149  LDBG() << indent << "maximizeNumThreads in " << s
150  << " with limit: " << maxNumThreads;
151  for (auto factor : factors) {
152  auto nestedThreadsPerDim =
153  maximizeNumThreads(sizes, currentIndex + 1, maxNumThreads / factor);
154  int64_t localBest = factor * product(nestedThreadsPerDim);
155  if (localBest > best && localBest <= maxNumThreads) {
156  LDBG() << indent << "new localBest: " << localBest;
157  LDBG() << indent << "nestedThreadsPerDim: "
158  << llvm::interleaved(nestedThreadsPerDim);
159  localThreadsPerDim.clear();
160  localThreadsPerDim.push_back(factor);
161  llvm::append_range(localThreadsPerDim, nestedThreadsPerDim);
162  best = localBest;
163  }
164  }
165 
166  LDBG() << indent << "found globalBest: " << best;
167  LDBG() << indent << "numThreads: " << llvm::interleaved(localThreadsPerDim);
168  return localThreadsPerDim;
169 }
170 
172 transform::gpu::CopyMappingInfo::inferNumThreads(int64_t totalNumThreads,
173  ArrayRef<int64_t> sizes,
174  int64_t desiredVectorSize,
175  bool favorPredication) {
176 
177  if (!favorPredication) {
178  int64_t localVectorSize = desiredVectorSize;
179  for (; localVectorSize >= 1; localVectorSize /= 2) {
180  // Attempt to map the copy with predication and current fixed vector size:
181  // 1. if the status is Success, we are done.
182  // 2. if the status is Invalid, we fail immediately, no amount of
183  // vector size reduction can offset the bad tile size selection from the
184  // higher-level.
185  // 3. if the status is RequiresPredication, we try again with a smaller
186  // vector size.
187  Status status =
188  inferNumThreadsImpl(totalNumThreads, sizes, localVectorSize);
189  if (status == Status::Success || status == Status::Invalid)
190  return status;
191 
192  LDBG() << "requires predication, try reducing vector size to "
193  << (localVectorSize / 2);
194  }
195  }
196 
197  // If we have not yet returned, it means that we have tried all vector sizes
198  // and we still require predication. Restart from the original vector size and
199  // do not attempt to
200  return inferNumThreadsImpl(totalNumThreads, sizes, desiredVectorSize);
201 }
202 
204 transform::gpu::CopyMappingInfo::inferNumThreadsImpl(
205  int64_t totalNumThreads, ArrayRef<int64_t> sizes,
206  int64_t desiredVectorSize) {
207  assert(sizes.back() % desiredVectorSize == 0 &&
208  "most-minor size not divisible by actualVectorSize");
209 
210  LDBG() << "inferNumThreadsImpl with totalNumThreads: " << totalNumThreads
211  << " and vectorSize: " << desiredVectorSize;
212 
213  // Scale the most minor size to account for the chosen vector size and
214  // maximize the number of threads without exceeding the total number of
215  // threads.
216  SmallVector<int64_t> scaledSizes(sizes);
217  scaledSizes.back() /= desiredVectorSize;
218  if (scaledSizes.back() > totalNumThreads) {
219  LDBG() << "--Too few threads given the required vector size -> FAIL";
220  return Status::Invalid;
221  }
222  SmallVector<int64_t> inferredNumThreads =
223  maximizeNumThreads(scaledSizes, 0, totalNumThreads);
224 
225  LDBG() << "inferred numThreads: " << llvm::interleaved(inferredNumThreads);
226  LDBG() << "computed actualVectorSize: " << desiredVectorSize;
227 
228  // Corner case: we cannot use more threads than available. If the dimension of
229  // the copy is so bad it is because higher-level tiling did not do its job, we
230  // do not try to recover from it here.
231  int64_t totalNumThreadsUsed = product(inferredNumThreads);
232  LDBG() << "--totalNumThreadsUsed: " << totalNumThreadsUsed;
233  if (totalNumThreadsUsed == 0 || totalNumThreadsUsed > totalNumThreads) {
234  LDBG() << "--Too few threads given the required vector size -> FAIL";
235  return Status::Invalid;
236  }
237 
238  this->vectorSize = desiredVectorSize;
239  this->numThreads = inferredNumThreads;
240  if (totalNumThreadsUsed == totalNumThreads)
241  return Status::Success;
242 
243  return Status::RequiresPredication;
244 }
245 
246 void transform::gpu::CopyMappingInfo::print(llvm::raw_ostream &os) const {
247  os << "MappingInfo{"
248  << "CopyMappingInfo: " << "valid: " << (status != Status::Invalid) << ", "
249  << "vectorSize: " << vectorSize << ", numThreads: {"
250  << llvm::interleaved(numThreads) << "}, smallestBoundingTileSizes: {"
251  << llvm::interleaved(smallestBoundingTileSizes) << "}, threadMapping: {"
252  << llvm::interleaved(threadMapping) << "}}";
253 }
static SmallVector< int64_t > maximizeNumThreads(ArrayRef< int64_t > sizes, int64_t currentIndex, int64_t maxNumThreads)
Extract result from sizes with the following constraints:
static Attribute linearId1(MLIRContext *ctx)
static int64_t product(ArrayRef< int64_t > vals)
static SmallVector< int64_t > getFactors(int64_t val)
Get the list of all factors that divide val, not just the prime factors.
static Attribute linearId0(MLIRContext *ctx)
static Attribute linearId2(MLIRContext *ctx)
Attributes are known-constant values of operations.
Definition: Attributes.h:25
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:63
Include the generated interface declarations.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
SmallVector< Attribute > threadMapping
Thread mapping attributes, one per entry of numThreads.
Definition: GPUHeuristics.h:29
Status
Status of the mapping computation, invalid usually means too many threads are required and we fail to...
Definition: GPUHeuristics.h:36
int64_t vectorSize
Most minor vector size (i.e. 1-D), in number of elements, used in a copy.
Status status
The status of a particular copy mapping.
CopyMappingInfo(MLIRContext *ctx, int totalNumThreads, int64_t desiredBitAlignment, ArrayRef< int64_t > sizes, bool favorPredication=false, int64_t elementalBitwidth=32)
Greedily compute the MappingInfo to use to perform a copy of sizes elements of bitwidth elementalBitw...
static constexpr int64_t kMaxVectorLoadBitWidth
Static quantity determining the number of bits to target in an individual copy.
void print(llvm::raw_ostream &os) const
SmallVector< int64_t > smallestBoundingTileSizes
Explicit computation / injection of the smallest bounding tile sizes after mapping to numThreads.
SmallVector< int64_t > numThreads
Number of threads to use for the copy mapping, from most major to most minor dims (i....
Definition: GPUHeuristics.h:26