MLIR 23.0.0git
CudaRuntimeWrappers.cpp
Go to the documentation of this file.
1//===- CudaRuntimeWrappers.cpp - MLIR CUDA API wrapper library ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Implements C wrappers around the CUDA library for easy linking in ORC jit.
10// Also adds some debugging helpers that are helpful when writing MLIR code to
11// run on GPUs.
12//
13//===----------------------------------------------------------------------===//
14
16
17#include <cstdio>
18
19#include "cuda.h"
20#include "cuda_bf16.h"
21#include "cuda_fp16.h"
22
23#ifdef MLIR_ENABLE_CUDA_CUSPARSE
24#include "cusparse.h"
25#ifdef MLIR_ENABLE_CUDA_CUSPARSELT
26#include "cusparseLt.h"
27#endif // MLIR_ENABLE_CUDA_CUSPARSELT
28#endif // MLIR_ENABLE_CUDA_CUSPARSE
29
30#ifdef _WIN32
31#include <malloc.h>
32#define MLIR_CUDA_WRAPPERS_EXPORT __declspec(dllexport)
33#else
34#define MLIR_CUDA_WRAPPERS_EXPORT __attribute__((visibility("default")))
35#endif // _WIN32
36
37#define CUDA_REPORT_IF_ERROR(expr) \
38 [](CUresult result) { \
39 if (!result) \
40 return; \
41 const char *name = nullptr; \
42 cuGetErrorName(result, &name); \
43 if (!name) \
44 name = "<unknown>"; \
45 fprintf(stderr, "'%s' failed with '%s'\n", #expr, name); \
46 }(expr)
47
48/// Helper to check if a CUDA error is due to the context being destroyed
49/// during program shutdown. Both CUDA_ERROR_DEINITIALIZED and
50/// CUDA_ERROR_CONTEXT_IS_DESTROYED indicate that the CUDA context has been
51/// torn down and any associated resources are already freed.
52static bool isCudaContextShutdownError(CUresult result) {
53 return result == CUDA_ERROR_DEINITIALIZED ||
54 result == CUDA_ERROR_CONTEXT_IS_DESTROYED;
55}
56
57/// Like CUDA_REPORT_IF_ERROR, but silences errors caused by CUDA context
58/// shutdown. These errors are benign when they occur during program exit,
59/// as all resources are freed with the context.
60#define CUDA_REPORT_IF_ERROR_IGNORE_SHUTDOWN(expr) \
61 [](CUresult result) { \
62 if (!result || isCudaContextShutdownError(result)) \
63 return; \
64 const char *name = nullptr; \
65 cuGetErrorName(result, &name); \
66 if (!name) \
67 name = "<unknown>"; \
68 fprintf(stderr, "'%s' failed with '%s'\n", #expr, name); \
69 }(expr)
70
71#define CUSPARSE_REPORT_IF_ERROR(expr) \
72 { \
73 cusparseStatus_t status = (expr); \
74 if (status != CUSPARSE_STATUS_SUCCESS) { \
75 fprintf(stderr, "cuSPARSE '%s' failed with '%s'\n", #expr, \
76 cusparseGetErrorString(status)); \
77 } \
78 }
79
80thread_local static int32_t defaultDevice = 0;
81
82/// Helper method that checks environment value for debugging.
83static bool isDebugEnabled() {
84 const char *kDebugEnvironmentVariable = "MLIR_CUDA_DEBUG";
85 static bool isEnabled = getenv(kDebugEnvironmentVariable) != nullptr;
86 return isEnabled;
87}
88
89#define debug_print(fmt, ...) \
90 do { \
91 if (isDebugEnabled()) \
92 fprintf(stderr, "%s:%d:%s(): " fmt, "CudaRuntimeWrappers.cpp", __LINE__, \
93 __func__, __VA_ARGS__); \
94 } while (0)
95
96// Returns default CUdevice
97static CUdevice getDefaultCuDevice() {
98 CUdevice device;
99 CUDA_REPORT_IF_ERROR(cuDeviceGet(&device, /*ordinal=*/defaultDevice));
100 return device;
101}
102
103// Make the primary context of the current default device current for the
104// duration
105// of the instance and restore the previous context on destruction.
107public:
109 // Static reference to CUDA primary context for device ordinal
110 // defaultDevice.
111 static CUcontext context = [] {
112 CUDA_REPORT_IF_ERROR(cuInit(/*flags=*/0));
113 CUcontext ctx;
114 // Note: this does not affect the current context.
116 cuDevicePrimaryCtxRetain(&ctx, getDefaultCuDevice()));
117 return ctx;
118 }();
119
120 CUDA_REPORT_IF_ERROR(cuCtxPushCurrent(context));
121 }
122
123 ~ScopedContext() { CUDA_REPORT_IF_ERROR(cuCtxPopCurrent(nullptr)); }
124};
125
126#ifdef MLIR_ENABLE_CUDA_CUSPARSE
127// Note that (1) Nvidia confirms the safety to share handle across multiple
128// instances, and streams. (2) Clients are responsible to call the @mgpu
129// environment initialization/destruction in a thread-safe manner, e.g.,
130// at the beginning of the program before multi-threads are created.
131static cusparseHandle_t cusparse_env = nullptr;
132
133#ifdef MLIR_ENABLE_CUDA_CUSPARSELT
134// cusparseLtHandle_t is not a pointer type, so we need an additional flag to
135// indicate whether it is initialized.
136static cusparseLtHandle_t cusparseLt_env;
137static bool cusparseLt_initiated = false;
138
139#endif // MLIR_ENABLE_CUDA_CUSPARSELT
140#endif // MLIR_ENABLE_CUDA_CUSPARSE
141
142extern "C" MLIR_CUDA_WRAPPERS_EXPORT CUmodule
143mgpuModuleLoad(void *data, size_t /*gpuBlobSize*/) {
144 ScopedContext scopedContext;
145 CUmodule module = nullptr;
146 CUDA_REPORT_IF_ERROR(cuModuleLoadData(&module, data));
147 return module;
148}
149
150extern "C" MLIR_CUDA_WRAPPERS_EXPORT CUmodule
151mgpuModuleLoadJIT(void *data, int optLevel, size_t /*assmeblySize*/) {
152 ScopedContext scopedContext;
153 CUmodule module = nullptr;
154 char jitErrorBuffer[4096] = {0};
155 CUjit_option jitOptions[] = {CU_JIT_ERROR_LOG_BUFFER,
156 CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES,
157 CU_JIT_OPTIMIZATION_LEVEL};
158 void *jitOptionsVals[] = {jitErrorBuffer,
159 reinterpret_cast<void *>(sizeof(jitErrorBuffer)),
160 reinterpret_cast<void *>(optLevel)};
161
162 CUresult result =
163 cuModuleLoadDataEx(&module, data, 3, jitOptions, jitOptionsVals);
164 if (result) {
165 fprintf(stderr, "JIT compilation failed with: '%s'\n", jitErrorBuffer);
167 }
168 return module;
169}
170
171extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuModuleUnload(CUmodule module) {
172 CUDA_REPORT_IF_ERROR_IGNORE_SHUTDOWN(cuModuleUnload(module));
173}
174
175extern "C" MLIR_CUDA_WRAPPERS_EXPORT CUfunction
176mgpuModuleGetFunction(CUmodule module, const char *name) {
177 CUfunction function = nullptr;
178 CUDA_REPORT_IF_ERROR(cuModuleGetFunction(&function, module, name));
179 return function;
180}
181
182// The wrapper uses intptr_t instead of CUDA's unsigned int to match
183// the type of MLIR's index type. This avoids the need for casts in the
184// generated MLIR code.
185extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
186mgpuLaunchKernel(CUfunction function, intptr_t gridX, intptr_t gridY,
187 intptr_t gridZ, intptr_t blockX, intptr_t blockY,
188 intptr_t blockZ, int32_t smem, CUstream stream, void **params,
189 void **extra, size_t /*paramsCount*/) {
190 ScopedContext scopedContext;
191 if (smem > 0) {
192 // Avoid checking driver as it's more expensive than if statement
193 int32_t maxShmem = 0;
194 CUdevice device = getDefaultCuDevice();
195 CUDA_REPORT_IF_ERROR(cuDeviceGet(&device, /*ordinal=*/defaultDevice));
196 CUDA_REPORT_IF_ERROR(cuDeviceGetAttribute(
197 &maxShmem, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN,
198 device));
199 if (maxShmem < smem) {
200 fprintf(stderr,
201 "Requested shared memory (%dkb) is larger than maximum allowed "
202 "shared memory (%dkb) for this device\n",
203 smem, maxShmem);
204 }
205 CUDA_REPORT_IF_ERROR(cuFuncSetAttribute(
206 function, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, smem));
207 }
208 debug_print("Launching kernel, grid=%ld,%ld,%ld, "
209 "threads: %ld, %ld, %ld, "
210 "smem: %dkb\n",
211 gridX, gridY, gridZ, blockX, blockY, blockZ, smem);
212 CUDA_REPORT_IF_ERROR(cuLaunchKernel(function, gridX, gridY, gridZ, blockX,
213 blockY, blockZ, smem, stream, params,
214 extra));
215}
216
218 ScopedContext scopedContext;
219 CUstream stream = nullptr;
220 CUDA_REPORT_IF_ERROR(cuStreamCreate(&stream, CU_STREAM_NON_BLOCKING));
221 return stream;
222}
223
224extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamDestroy(CUstream stream) {
225 CUDA_REPORT_IF_ERROR_IGNORE_SHUTDOWN(cuStreamDestroy(stream));
226}
227
228extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
229mgpuStreamSynchronize(CUstream stream) {
230 CUDA_REPORT_IF_ERROR(cuStreamSynchronize(stream));
231}
232
233extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamWaitEvent(CUstream stream,
234 CUevent event) {
236 cuStreamWaitEvent(stream, event, /*flags=*/0));
237}
238
240 ScopedContext scopedContext;
241 CUevent event = nullptr;
242 CUDA_REPORT_IF_ERROR(cuEventCreate(&event, CU_EVENT_DISABLE_TIMING));
243 return event;
244}
245
246extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventDestroy(CUevent event) {
247 CUDA_REPORT_IF_ERROR_IGNORE_SHUTDOWN(cuEventDestroy(event));
248}
249
250extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventSynchronize(CUevent event) {
251 CUDA_REPORT_IF_ERROR_IGNORE_SHUTDOWN(cuEventSynchronize(event));
252}
253
254extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventRecord(CUevent event,
255 CUstream stream) {
256 CUDA_REPORT_IF_ERROR(cuEventRecord(event, stream));
257}
258
259extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
260mgpuMemAlloc(uint64_t sizeBytes, CUstream stream, bool isHostShared) {
261 ScopedContext scopedContext;
262 CUdeviceptr ptr = 0;
263 if (sizeBytes == 0)
264 return reinterpret_cast<void *>(ptr);
265
266 if (isHostShared) {
268 cuMemAllocManaged(&ptr, sizeBytes, CU_MEM_ATTACH_GLOBAL));
269 return reinterpret_cast<void *>(ptr);
270 }
271 CUDA_REPORT_IF_ERROR(cuMemAlloc(&ptr, sizeBytes));
272 return reinterpret_cast<void *>(ptr);
273}
274
275extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemFree(void *ptr,
276 CUstream /*stream*/) {
277 CUDA_REPORT_IF_ERROR(cuMemFree(reinterpret_cast<CUdeviceptr>(ptr)));
278}
279
280extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
281mgpuMemcpy(void *dst, void *src, size_t sizeBytes, CUstream stream) {
282 CUDA_REPORT_IF_ERROR(cuMemcpyAsync(reinterpret_cast<CUdeviceptr>(dst),
283 reinterpret_cast<CUdeviceptr>(src),
284 sizeBytes, stream));
285}
286
287extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
288mgpuMemset32(void *dst, unsigned int value, size_t count, CUstream stream) {
289 CUDA_REPORT_IF_ERROR(cuMemsetD32Async(reinterpret_cast<CUdeviceptr>(dst),
290 value, count, stream));
291}
292
293extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
294mgpuMemset16(void *dst, unsigned short value, size_t count, CUstream stream) {
295 CUDA_REPORT_IF_ERROR(cuMemsetD16Async(reinterpret_cast<CUdeviceptr>(dst),
296 value, count, stream));
297}
298
299///
300/// Helper functions for writing mlir example code
301///
302
303// Allows to register byte array with the CUDA runtime. Helpful until we have
304// transfer functions implemented.
305extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
306mgpuMemHostRegister(void *ptr, uint64_t sizeBytes) {
307 ScopedContext scopedContext;
308 CUDA_REPORT_IF_ERROR(cuMemHostRegister(ptr, sizeBytes, /*flags=*/0));
309}
310
311/// Registers a memref with the CUDA runtime. `descriptor` is a pointer to a
312/// ranked memref descriptor struct of rank `rank`. Helpful until we have
313/// transfer functions implemented.
314extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
316 int64_t elementSizeBytes) {
317 // Only densely packed tensors are currently supported.
318#ifdef _WIN32
319 int64_t *denseStrides = (int64_t *)_alloca(rank * sizeof(int64_t));
320#else
321 int64_t *denseStrides = (int64_t *)alloca(rank * sizeof(int64_t));
322#endif // _WIN32
323 int64_t *sizes = descriptor->sizes;
324 for (int64_t i = rank - 1, runningStride = 1; i >= 0; i--) {
325 denseStrides[i] = runningStride;
326 runningStride *= sizes[i];
327 }
328 uint64_t sizeBytes = sizes[0] * denseStrides[0] * elementSizeBytes;
329 int64_t *strides = &sizes[rank];
330 (void)strides;
331 for (unsigned i = 0; i < rank; ++i)
332 assert(strides[i] == denseStrides[i] &&
333 "Mismatch in computed dense strides");
334
335 auto *ptr = descriptor->data + descriptor->offset * elementSizeBytes;
336 mgpuMemHostRegister(ptr, sizeBytes);
337}
338
339// Allows to unregister byte array with the CUDA runtime.
341 ScopedContext scopedContext;
342 CUDA_REPORT_IF_ERROR(cuMemHostUnregister(ptr));
343}
344
345/// Unregisters a memref with the CUDA runtime. `descriptor` is a pointer to a
346/// ranked memref descriptor struct of rank `rank`
347extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
349 StridedMemRefType<char, 1> *descriptor,
350 int64_t elementSizeBytes) {
351 auto *ptr = descriptor->data + descriptor->offset * elementSizeBytes;
353}
354
355extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuSetDefaultDevice(int32_t device) {
356 defaultDevice = device;
357}
358
359///
360/// Runtime methods using CUDA 12.0+ driver
361///
362
363#if (CUDA_VERSION >= 12000)
364
366 CUfunction function, intptr_t gridX, intptr_t gridY, intptr_t gridZ,
367 intptr_t clusterX, intptr_t clusterY, intptr_t clusterZ, intptr_t blockX,
368 intptr_t blockY, intptr_t blockZ, int32_t smem, CUstream stream,
369 void **params, void **extra) {
370 ScopedContext scopedContext;
371 if (smem > 0) {
372 int32_t maxShmem = 0;
373 CUdevice device = getDefaultCuDevice();
374 CUDA_REPORT_IF_ERROR(cuDeviceGet(&device, /*ordinal=*/defaultDevice));
375 CUDA_REPORT_IF_ERROR(cuDeviceGetAttribute(
376 &maxShmem, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN,
377 device));
378 if (maxShmem < smem) {
379 fprintf(stderr,
380 "Requested shared memory (%dkb) is larger than maximum allowed "
381 "shared memory (%dkb) for this device\n",
382 smem, maxShmem);
383 }
384 CUDA_REPORT_IF_ERROR(cuFuncSetAttribute(
385 function, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, smem));
386 }
387
388 CUlaunchConfig config;
389 config.gridDimX = gridX;
390 config.gridDimY = gridY;
391 config.gridDimZ = gridZ;
392 config.blockDimX = blockX;
393 config.blockDimY = blockY;
394 config.blockDimZ = blockZ;
395 config.sharedMemBytes = smem;
396 config.hStream = stream;
397
398 CUlaunchAttribute launchAttrs[3];
399 int numAttrs = 0;
400
401 bool hasCluster = clusterX > 0 && clusterY > 0 && clusterZ > 0;
402 if (hasCluster) {
403 launchAttrs[numAttrs].id = CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION;
404 launchAttrs[numAttrs].value.clusterDim.x = clusterX;
405 launchAttrs[numAttrs].value.clusterDim.y = clusterY;
406 launchAttrs[numAttrs].value.clusterDim.z = clusterZ;
407 numAttrs++;
408
409 launchAttrs[numAttrs].id =
410 CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE;
411 launchAttrs[numAttrs].value.clusterSchedulingPolicyPreference =
412 CU_CLUSTER_SCHEDULING_POLICY_SPREAD;
413 numAttrs++;
414 }
415
416 launchAttrs[numAttrs].id = CU_LAUNCH_ATTRIBUTE_COOPERATIVE;
417 launchAttrs[numAttrs].value.cooperative = 1;
418 numAttrs++;
419
420 config.numAttrs = numAttrs;
421 config.attrs = launchAttrs;
422
423 debug_print("Launching cooperative kernel (cluster=%d), "
424 "grid=%ld,%ld,%ld, "
425 "threads: %ld, %ld, %ld, "
426 "smem: %dkb\n",
427 hasCluster, gridX, gridY, gridZ, blockX, blockY, blockZ, smem);
428
429 CUDA_REPORT_IF_ERROR(cuLaunchKernelEx(&config, function, params, extra));
430}
431
432extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuLaunchClusterKernel(
433 CUfunction function, intptr_t clusterX, intptr_t clusterY,
434 intptr_t clusterZ, intptr_t gridX, intptr_t gridY, intptr_t gridZ,
435 intptr_t blockX, intptr_t blockY, intptr_t blockZ, int32_t smem,
436 CUstream stream, void **params, void **extra, size_t /*paramsCount*/) {
437 ScopedContext scopedContext;
438 if (smem > 0) {
439 // Avoid checking driver as it's more expensive than if statement
440 int32_t maxShmem = 0;
441 CUdevice device = getDefaultCuDevice();
442 CUDA_REPORT_IF_ERROR(cuDeviceGet(&device, /*ordinal=*/defaultDevice));
443 CUDA_REPORT_IF_ERROR(cuDeviceGetAttribute(
444 &maxShmem, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN,
445 device));
446 if (maxShmem < smem) {
447 fprintf(stderr,
448 "Requested shared memory (%dkb) is larger than maximum allowed "
449 "shared memory (%dkb) for this device\n",
450 smem, maxShmem);
451 }
452 CUDA_REPORT_IF_ERROR(cuFuncSetAttribute(
453 function, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, smem));
454 }
455 CUlaunchConfig config;
456 config.gridDimX = gridX;
457 config.gridDimY = gridY;
458 config.gridDimZ = gridZ;
459 config.blockDimX = blockX;
460 config.blockDimY = blockY;
461 config.blockDimZ = blockZ;
462 config.sharedMemBytes = smem;
463 config.hStream = stream;
464 CUlaunchAttribute launchAttr[2];
465 launchAttr[0].id = CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION;
466 launchAttr[0].value.clusterDim.x = clusterX;
467 launchAttr[0].value.clusterDim.y = clusterY;
468 launchAttr[0].value.clusterDim.z = clusterZ;
469 launchAttr[1].id = CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE;
470 launchAttr[1].value.clusterSchedulingPolicyPreference =
471 CU_CLUSTER_SCHEDULING_POLICY_SPREAD;
472 config.numAttrs = 2;
473 config.attrs = launchAttr;
474
475 debug_print("Launching kernel,"
476 "cluster: %ld, %ld, %ld, "
477 "grid=%ld,%ld,%ld, "
478 "threads: %ld, %ld, %ld, "
479 "smem: %dkb\n",
480 clusterX, clusterY, clusterZ, gridX, gridY, gridZ, blockX, blockY,
481 blockZ, smem);
482
483 CUDA_REPORT_IF_ERROR(cuLaunchKernelEx(&config, function, params, extra));
484}
485
486extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuTensorMapEncodeTiled(
487 CUtensorMap *tensorMap, // Tensor map object
488 CUtensorMapDataType tensorDataType, // Tensor data type
489 cuuint32_t tensorRank, // Dimensionality of tensor
490 void *globalAddress, // Starting address
491 const cuuint64_t *globalDim, // Tensor size (number of elements)
492 const cuuint64_t *globalStrides, // Stride size (in bytes)
493 const cuuint32_t *boxDim, // Traversal box (number of elments)
494 const cuuint32_t *elementStrides, // Traversal stride
495 CUtensorMapInterleave interleave, // Type of interleaved layout
496 CUtensorMapSwizzle swizzle, // Bank swizzling pattern
497 CUtensorMapL2promotion l2Promotion, // L2 promotion size
498 CUtensorMapFloatOOBfill oobFill // Padding zfill or NaN fill
499) {
500 ScopedContext scopedContext;
501 CUDA_REPORT_IF_ERROR(cuTensorMapEncodeTiled(
502 tensorMap, tensorDataType, tensorRank, globalAddress, globalDim,
503 globalStrides, boxDim, elementStrides, interleave, swizzle, l2Promotion,
504 oobFill));
505 debug_print("Created TMA descriptor\n Addr: %p\n"
506 "data type : %d\n"
507 "rank : %d\n"
508 "globalDim[5]: %zu, %zu, %zu, %zu, %zu\n"
509 "globalStrides[5]: %zu, %zu, %zu, %zu, %zu\n"
510 "boxDim[5]: %u, %u, %u, %u, %u\n"
511 "elementStrides[5]: %u, %u, %u, %u, %u\n"
512 "interleave: %u \n"
513 "swizzle: %u \n"
514 "l2Promotion: %u \n"
515 "oobFill: %u \n",
516 (void *)&tensorMap, tensorDataType, tensorRank, globalDim[0],
517 globalDim[1], globalDim[2], globalDim[3], globalDim[4],
518 globalStrides[0], globalStrides[1], globalStrides[2],
519 globalStrides[3], globalStrides[4], boxDim[0], boxDim[1],
520 boxDim[2], boxDim[3], boxDim[4], elementStrides[0],
521 elementStrides[1], elementStrides[2], elementStrides[3],
522 elementStrides[4], interleave, swizzle, l2Promotion, oobFill);
523}
524
525template <int Rank>
526void mgpuGetMemRefDataAndShape(void *rawDescriptor, char **addr,
527 uint64_t *globalDim, uint64_t *globalStrides,
528 const CUtensorMapDataType tensorDataType) {
529 auto descriptor =
530 reinterpret_cast<StridedMemRefType<char, Rank> *>(rawDescriptor);
531 *addr = descriptor->data;
532 for (int i = 0; i < Rank; ++i) {
533 globalDim[i] = static_cast<uint64_t>(descriptor->sizes[Rank - i - 1]);
534 }
535 static constexpr int elementSizeInBytes[] = {1, 2, 4, 4, 8, 8, 2,
536 4, 8, 2, 4, 4, 4};
537 for (int i = 0; i < Rank - 1; ++i) {
538 globalStrides[i] = static_cast<uint64_t>(
539 descriptor->strides[Rank - i - 2] * elementSizeInBytes[tensorDataType]);
540 }
541}
542
543extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *mgpuTensorMapEncodeTiledMemref(
544 int64_t tensorRank, // Dimensionality of tensor
545 void *rankedDescriptor, // Ranked MemRef descriptor
546 const CUtensorMapDataType tensorDataType, // Stride size (in bytes)
547 CUtensorMapInterleave interleave, // Type of interleaved layout
548 CUtensorMapSwizzle swizzle, // Bank swizzling pattern
549 CUtensorMapL2promotion l2Promotion, // L2 promotion size
550 CUtensorMapFloatOOBfill oobFill, // Padding zfill or NaN fill
551 int64_t *inputBoxDims // Tensor size (number of elements)
552) {
553 CUtensorMap tensorMap;
554
555 uint32_t boxDim[5] = {1, 1, 1, 1, 1}, elementStrides[5] = {1, 1, 1, 1, 1};
556 uint64_t globalDim[5] = {1, 1, 1, 1, 1}, globalStrides[5] = {0};
557 uint32_t tensorRank32 = uint32_t(tensorRank);
558
559 char *globalAddress = nullptr;
560 switch (tensorRank) {
561 case 1:
562 mgpuGetMemRefDataAndShape<1>(rankedDescriptor, &globalAddress, globalDim,
563 globalStrides, tensorDataType);
564 break;
565 case 2:
566 mgpuGetMemRefDataAndShape<2>(rankedDescriptor, &globalAddress, globalDim,
567 globalStrides, tensorDataType);
568 break;
569 case 3:
570 mgpuGetMemRefDataAndShape<3>(rankedDescriptor, &globalAddress, globalDim,
571 globalStrides, tensorDataType);
572 break;
573 case 4:
574 mgpuGetMemRefDataAndShape<4>(rankedDescriptor, &globalAddress, globalDim,
575 globalStrides, tensorDataType);
576 break;
577 case 5:
578 mgpuGetMemRefDataAndShape<5>(rankedDescriptor, &globalAddress, globalDim,
579 globalStrides, tensorDataType);
580 break;
581 default:
582 fprintf(
583 stderr,
584 "'mgpuTensorMapEncodeTiledMemref' failed with 'rank is too high'\n");
585 return nullptr;
586 }
587
588 for (int64_t r = 0; r < tensorRank; ++r) {
589 boxDim[r] = static_cast<uint32_t>(inputBoxDims[tensorRank - r - 1]);
590 }
591
592 ScopedContext scopedContext;
593 mgpuTensorMapEncodeTiled(&tensorMap, tensorDataType, tensorRank32,
594 globalAddress, globalDim, globalStrides, boxDim,
595 elementStrides, interleave, swizzle, l2Promotion,
596 oobFill);
597 // Copy created tensor map to device
598 CUdeviceptr dTensorMap;
599 CUDA_REPORT_IF_ERROR(cuMemAlloc(&dTensorMap, sizeof(CUtensorMap)));
600 CUDA_REPORT_IF_ERROR(cuMemcpy(dTensorMap,
601 reinterpret_cast<CUdeviceptr>(&tensorMap),
602 sizeof(CUtensorMap)));
603 return reinterpret_cast<void *>(dTensorMap);
604}
605#endif
606
607#ifdef MLIR_ENABLE_CUDA_CUSPARSE
608
609///
610/// Wrapper methods for the cuSparse library.
611///
612
613// Some macro magic to get float/double alpha and beta on host.
614// TODO: add support to passing alpha and beta as arguments
615#define ALPHABETA(dtp, alpha, beta) \
616 __nv_bfloat16(alpha##16bf) = 1.0f; \
617 __nv_bfloat16(beta##16bf) = 1.0f; \
618 __half(alpha##16f) = 1.0f; \
619 __half(beta##16f) = 1.0f; \
620 float(alpha##f) = 1.0f; \
621 float(beta##f) = 1.0f; \
622 double(alpha##d) = 1.0; \
623 double(beta##d) = 1.0; \
624 const void *(alpha##p) = nullptr; \
625 const void *(beta##p) = nullptr; \
626 if (dtp == CUDA_R_16BF || dtp == CUDA_C_16BF) { \
627 (alpha##p) = reinterpret_cast<void *>(&(alpha##16bf)); \
628 (beta##p) = reinterpret_cast<void *>(&(beta##16bf)); \
629 } else if (dtp == CUDA_R_16F || dtp == CUDA_C_16F) { \
630 (alpha##p) = reinterpret_cast<void *>(&(alpha##16f)); \
631 (beta##p) = reinterpret_cast<void *>(&(beta##16f)); \
632 } else if (dtp == CUDA_R_32F || dtp == CUDA_C_32F) { \
633 (alpha##p) = reinterpret_cast<void *>(&(alpha##f)); \
634 (beta##p) = reinterpret_cast<void *>(&(beta##f)); \
635 } else { \
636 (alpha##p) = reinterpret_cast<void *>(&(alpha##d)); \
637 (beta##p) = reinterpret_cast<void *>(&(beta##d)); \
638 }
639
640extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuCreateSparseEnv() {
641 // ScopedContext is for cuda initialization.
642 ScopedContext scopedContext;
643 assert(!cusparse_env && "client called mgpuCreateSparseEnv() twice");
644 CUSPARSE_REPORT_IF_ERROR(cusparseCreate(&cusparse_env));
645}
646
647extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuDestroySparseEnv() {
648 assert(cusparse_env && "client did not call mgpuCreateSparseEnv()");
649 CUSPARSE_REPORT_IF_ERROR(cusparseDestroy(cusparse_env));
650 cusparse_env = nullptr;
651}
652
653extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
654mgpuCreateDnVec(intptr_t size, void *values, int32_t dtp, CUstream /*stream*/) {
655 cusparseDnVecDescr_t vec = nullptr;
656 auto dTp = static_cast<cudaDataType_t>(dtp);
657 CUSPARSE_REPORT_IF_ERROR(cusparseCreateDnVec(&vec, size, values, dTp))
658 return reinterpret_cast<void *>(vec);
659}
660
661extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
662mgpuDestroyDnVec(void *v, CUstream /*stream*/) {
663 cusparseDnVecDescr_t vec = reinterpret_cast<cusparseDnVecDescr_t>(v);
664 CUSPARSE_REPORT_IF_ERROR(cusparseDestroyDnVec(vec))
665}
666
667extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
668mgpuCreateDnMat(intptr_t rows, intptr_t cols, void *values, int32_t dtp,
669 CUstream /*stream*/) {
670 cusparseDnMatDescr_t mat = nullptr;
671 auto dTp = static_cast<cudaDataType_t>(dtp);
672 CUSPARSE_REPORT_IF_ERROR(cusparseCreateDnMat(&mat, rows, cols, /*ld=*/cols,
673 values, dTp, CUSPARSE_ORDER_ROW))
674 return reinterpret_cast<void *>(mat);
675}
676
677extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
678mgpuDestroyDnMat(void *m, CUstream /*stream*/) {
679 cusparseDnMatDescr_t mat = reinterpret_cast<cusparseDnMatDescr_t>(m);
680 CUSPARSE_REPORT_IF_ERROR(cusparseDestroyDnMat(mat))
681}
682
683extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
684mgpuCreateCoo(intptr_t rows, intptr_t cols, intptr_t nnz, void *rowIdxs,
685 void *colIdxs, void *values, int32_t itp, int32_t dtp,
686 CUstream /*stream*/) {
687 cusparseSpMatDescr_t mat = nullptr;
688 auto iTp = static_cast<cusparseIndexType_t>(itp);
689 auto dTp = static_cast<cudaDataType_t>(dtp);
690 CUSPARSE_REPORT_IF_ERROR(cusparseCreateCoo(&mat, rows, cols, nnz, rowIdxs,
691 colIdxs, values, iTp,
692 CUSPARSE_INDEX_BASE_ZERO, dTp))
693 return reinterpret_cast<void *>(mat);
694}
695
696#ifdef CUSPARSE_COO_AOS // deprecated in cuSPARSE 11.2
697extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
698mgpuCreateCooAoS(intptr_t rows, intptr_t cols, intptr_t nnz, void *idxs,
699 void *values, int32_t itp, int32_t dtp, CUstream /*stream*/) {
700 cusparseSpMatDescr_t mat = nullptr;
701 auto iTp = static_cast<cusparseIndexType_t>(itp);
702 auto dTp = static_cast<cudaDataType_t>(dtp);
703 CUSPARSE_REPORT_IF_ERROR(cusparseCreateCooAoS(
704 &mat, rows, cols, nnz, idxs, values, iTp, CUSPARSE_INDEX_BASE_ZERO, dTp))
705 return reinterpret_cast<void *>(mat);
706}
707#endif // CUSPARSE_COO_AOS
708
709extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
710mgpuCreateCsr(intptr_t rows, intptr_t cols, intptr_t nnz, void *rowPos,
711 void *colIdxs, void *values, int32_t ptp, int32_t itp,
712 int32_t dtp, CUstream /*stream*/) {
713 cusparseSpMatDescr_t mat = nullptr;
714 auto pTp = static_cast<cusparseIndexType_t>(ptp);
715 auto iTp = static_cast<cusparseIndexType_t>(itp);
716 auto dTp = static_cast<cudaDataType_t>(dtp);
717 CUSPARSE_REPORT_IF_ERROR(cusparseCreateCsr(&mat, rows, cols, nnz, rowPos,
718 colIdxs, values, pTp, iTp,
719 CUSPARSE_INDEX_BASE_ZERO, dTp))
720 return reinterpret_cast<void *>(mat);
721}
722
723extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
724mgpuCreateCsc(intptr_t rows, intptr_t cols, intptr_t nnz, void *colPos,
725 void *rowIdxs, void *values, int32_t ptp, int32_t itp,
726 int32_t dtp, CUstream /*stream*/) {
727 cusparseSpMatDescr_t mat = nullptr;
728 auto pTp = static_cast<cusparseIndexType_t>(ptp);
729 auto iTp = static_cast<cusparseIndexType_t>(itp);
730 auto dTp = static_cast<cudaDataType_t>(dtp);
731 CUSPARSE_REPORT_IF_ERROR(cusparseCreateCsc(&mat, rows, cols, nnz, colPos,
732 rowIdxs, values, pTp, iTp,
733 CUSPARSE_INDEX_BASE_ZERO, dTp))
734 return reinterpret_cast<void *>(mat);
735}
736
737extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
738mgpuCreateBsr(intptr_t brows, intptr_t bcols, intptr_t bnnz, intptr_t rBsz,
739 intptr_t cBsz, void *rowPos, void *colIdxs, void *values,
740 int32_t ptp, int32_t itp, int32_t dtp, CUstream /*stream*/) {
741 cusparseSpMatDescr_t mat = nullptr;
742#if CUSPARSE_VERSION >= 12100
743 auto pTp = static_cast<cusparseIndexType_t>(ptp);
744 auto iTp = static_cast<cusparseIndexType_t>(itp);
745 auto dTp = static_cast<cudaDataType_t>(dtp);
746 CUSPARSE_REPORT_IF_ERROR(cusparseCreateBsr(
747 &mat, brows, bcols, bnnz, rBsz, cBsz, rowPos, colIdxs, values, pTp, iTp,
748 CUSPARSE_INDEX_BASE_ZERO, dTp, CUSPARSE_ORDER_ROW))
749#endif
750 return reinterpret_cast<void *>(mat);
751}
752
753extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
754mgpuDestroySpMat(void *m, CUstream /*stream*/) {
755 cusparseSpMatDescr_t mat = reinterpret_cast<cusparseSpMatDescr_t>(m);
756 CUSPARSE_REPORT_IF_ERROR(cusparseDestroySpMat(mat))
757}
758
759extern "C" MLIR_CUDA_WRAPPERS_EXPORT intptr_t mgpuSpMVBufferSize(
760 int32_t ma, void *a, void *x, void *y, int32_t ctp, CUstream /*stream*/) {
761 assert(cusparse_env && "client did not call mgpuCreateSparseEnv()");
762 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
763 cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
764 cusparseDnVecDescr_t vecX = reinterpret_cast<cusparseDnVecDescr_t>(x);
765 cusparseDnVecDescr_t vecY = reinterpret_cast<cusparseDnVecDescr_t>(y);
766 cudaDataType_t cTp = static_cast<cudaDataType_t>(ctp);
767 ALPHABETA(cTp, alpha, beta)
768 size_t bufferSize = 0;
769 CUSPARSE_REPORT_IF_ERROR(cusparseSpMV_bufferSize(
770 cusparse_env, modeA, alphap, matA, vecX, betap, vecY, cTp,
771 CUSPARSE_SPMV_ALG_DEFAULT, &bufferSize))
772 return bufferSize;
773}
774
775extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuSpMV(int32_t ma, void *a, void *x,
776 void *y, int32_t ctp,
777 void *buf,
778 CUstream /*stream*/) {
779 assert(cusparse_env && "client did not call mgpuCreateSparseEnv()");
780 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
781 cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
782 cusparseDnVecDescr_t vecX = reinterpret_cast<cusparseDnVecDescr_t>(x);
783 cusparseDnVecDescr_t vecY = reinterpret_cast<cusparseDnVecDescr_t>(y);
784 cudaDataType_t cTp = static_cast<cudaDataType_t>(ctp);
785 ALPHABETA(cTp, alpha, beta)
786 CUSPARSE_REPORT_IF_ERROR(cusparseSpMV(cusparse_env, modeA, alphap, matA, vecX,
787 betap, vecY, cTp,
788 CUSPARSE_SPMV_ALG_DEFAULT, buf))
789}
790
792mgpuSpMMBufferSize(int32_t ma, int32_t mb, void *a, void *b, void *c,
793 int32_t ctp, CUstream /*stream*/) {
794 assert(cusparse_env && "client did not call mgpuCreateSparseEnv()");
795 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
796 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
797 cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
798 cusparseDnMatDescr_t matB = reinterpret_cast<cusparseDnMatDescr_t>(b);
799 cusparseDnMatDescr_t matC = reinterpret_cast<cusparseDnMatDescr_t>(c);
800 cudaDataType_t cTp = static_cast<cudaDataType_t>(ctp);
801 ALPHABETA(cTp, alpha, beta)
802 size_t bufferSize = 0;
803 CUSPARSE_REPORT_IF_ERROR(cusparseSpMM_bufferSize(
804 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
805 CUSPARSE_SPMM_ALG_DEFAULT, &bufferSize))
806 return bufferSize;
807}
808
809extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuSpMM(int32_t ma, int32_t mb,
810 void *a, void *b, void *c,
811 int32_t ctp, void *buf,
812 CUstream /*stream*/) {
813 assert(cusparse_env && "client did not call mgpuCreateSparseEnv()");
814 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
815 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
816 cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
817 cusparseDnMatDescr_t matB = reinterpret_cast<cusparseDnMatDescr_t>(b);
818 cusparseDnMatDescr_t matC = reinterpret_cast<cusparseDnMatDescr_t>(c);
819 cudaDataType_t cTp = static_cast<cudaDataType_t>(ctp);
820 ALPHABETA(cTp, alpha, beta)
821 CUSPARSE_REPORT_IF_ERROR(cusparseSpMM(cusparse_env, modeA, modeB, alphap,
822 matA, matB, betap, matC, cTp,
823 CUSPARSE_SPMM_ALG_DEFAULT, buf))
824}
825
827mgpuSDDMMBufferSize(int32_t ma, int32_t mb, void *a, void *b, void *c,
828 int32_t ctp, CUstream /*stream*/) {
829 assert(cusparse_env && "client did not call mgpuCreateSparseEnv()");
830 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
831 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
832 cusparseDnMatDescr_t matA = reinterpret_cast<cusparseDnMatDescr_t>(a);
833 cusparseDnMatDescr_t matB = reinterpret_cast<cusparseDnMatDescr_t>(b);
834 cusparseSpMatDescr_t matC = reinterpret_cast<cusparseSpMatDescr_t>(c);
835 auto cTp = static_cast<cudaDataType_t>(ctp);
836 ALPHABETA(cTp, alpha, beta)
837 size_t bufferSize = 0;
838 CUSPARSE_REPORT_IF_ERROR(cusparseSDDMM_bufferSize(
839 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
840 CUSPARSE_SDDMM_ALG_DEFAULT, &bufferSize))
841 return bufferSize;
842}
843
844extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuSDDMM(int32_t ma, int32_t mb,
845 void *a, void *b, void *c,
846 int32_t ctp, void *buf,
847 CUstream /*stream*/) {
848 assert(cusparse_env && "client did not call mgpuCreateSparseEnv()");
849 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
850 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
851 cusparseDnMatDescr_t matA = reinterpret_cast<cusparseDnMatDescr_t>(a);
852 cusparseDnMatDescr_t matB = reinterpret_cast<cusparseDnMatDescr_t>(b);
853 cusparseSpMatDescr_t matC = reinterpret_cast<cusparseSpMatDescr_t>(c);
854 auto cTp = static_cast<cudaDataType_t>(ctp);
855 ALPHABETA(cTp, alpha, beta)
856 CUSPARSE_REPORT_IF_ERROR(cusparseSDDMM(cusparse_env, modeA, modeB, alphap,
857 matA, matB, betap, matC, cTp,
858 CUSPARSE_SDDMM_ALG_DEFAULT, buf))
859}
860
861extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
862mgpuSpGEMMCreateDescr(CUstream /*stream*/) {
863 cusparseSpGEMMDescr_t spgemmDesc = nullptr;
864 CUSPARSE_REPORT_IF_ERROR(cusparseSpGEMM_createDescr(&spgemmDesc))
865 return reinterpret_cast<void *>(spgemmDesc);
866}
867
868extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
869mgpuSpGEMMDestroyDescr(void *s, CUstream /*stream*/) {
870 cusparseSpGEMMDescr_t spgemmDesc = reinterpret_cast<cusparseSpGEMMDescr_t>(s);
871 CUSPARSE_REPORT_IF_ERROR(cusparseSpGEMM_destroyDescr(spgemmDesc))
872}
873
874extern "C" MLIR_CUDA_WRAPPERS_EXPORT intptr_t mgpuSpGEMMWorkEstimation(
875 void *s, int32_t ma, int32_t mb, void *a, void *b, void *c, int32_t ctp,
876 intptr_t bs, void *buf, CUstream /*stream*/) {
877 cusparseSpGEMMDescr_t spgemmDesc = reinterpret_cast<cusparseSpGEMMDescr_t>(s);
878 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
879 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
880 cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
881 cusparseSpMatDescr_t matB = reinterpret_cast<cusparseSpMatDescr_t>(b);
882 cusparseSpMatDescr_t matC = reinterpret_cast<cusparseSpMatDescr_t>(c);
883 auto cTp = static_cast<cudaDataType_t>(ctp);
884 ALPHABETA(cTp, alpha, beta)
885 size_t newBufferSize = bs;
886 CUSPARSE_REPORT_IF_ERROR(cusparseSpGEMM_workEstimation(
887 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
888 CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &newBufferSize, buf))
889 return newBufferSize;
890}
891
893mgpuSpGEMMCompute(void *s, int32_t ma, int32_t mb, void *a, void *b, void *c,
894 int32_t ctp, intptr_t bsz2, void *buf2, CUstream /*stream*/) {
895 cusparseSpGEMMDescr_t spgemmDesc = reinterpret_cast<cusparseSpGEMMDescr_t>(s);
896 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
897 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
898 cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
899 cusparseSpMatDescr_t matB = reinterpret_cast<cusparseSpMatDescr_t>(b);
900 cusparseSpMatDescr_t matC = reinterpret_cast<cusparseSpMatDescr_t>(c);
901 auto cTp = static_cast<cudaDataType_t>(ctp);
902 ALPHABETA(cTp, alpha, beta)
903 size_t newBufferSize2 = bsz2;
904 CUSPARSE_REPORT_IF_ERROR(cusparseSpGEMM_compute(
905 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
906 CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &newBufferSize2, buf2))
907 return newBufferSize2;
908}
909
910extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
911mgpuSpGEMMCopy(void *s, int32_t ma, int32_t mb, void *a, void *b, void *c,
912 int32_t ctp, CUstream /*stream*/) {
913 cusparseSpGEMMDescr_t spgemmDesc = reinterpret_cast<cusparseSpGEMMDescr_t>(s);
914 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
915 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
916 cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
917 cusparseSpMatDescr_t matB = reinterpret_cast<cusparseSpMatDescr_t>(b);
918 cusparseSpMatDescr_t matC = reinterpret_cast<cusparseSpMatDescr_t>(c);
919 auto cTp = static_cast<cudaDataType_t>(ctp);
920 ALPHABETA(cTp, alpha, beta)
922 cusparseSpGEMM_copy(cusparse_env, modeA, modeB, alphap, matA, matB, betap,
923 matC, cTp, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc))
924}
925
926extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
927mgpuSpMatGetSize(void *m, void *r, void *c, void *n, CUstream /*stream*/) {
928 cusparseConstSpMatDescr_t matDescr =
929 reinterpret_cast<cusparseConstSpMatDescr_t>(m);
930 int64_t *rows = reinterpret_cast<int64_t *>(r);
931 int64_t *cols = reinterpret_cast<int64_t *>(c);
932 int64_t *nnz = reinterpret_cast<int64_t *>(n);
933 CUSPARSE_REPORT_IF_ERROR(cusparseSpMatGetSize(matDescr, rows, cols, nnz));
934}
935
936extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
937mgpuSetCsrPointers(void *m, void *p, void *c, void *v, CUstream /*stream*/) {
938 cusparseSpMatDescr_t matDescr = reinterpret_cast<cusparseSpMatDescr_t>(m);
939 CUSPARSE_REPORT_IF_ERROR(cusparseCsrSetPointers(matDescr, p, c, v));
940}
941
942#ifdef MLIR_ENABLE_CUDA_CUSPARSELT
943
944///
945/// Wrapper methods for the cuSparseLt library.
946///
947
948struct cusparseLtSpMatHandleAndData {
949 cusparseLtMatDescriptor_t mat;
950 // TODO: the following three are associated with the SpMM operator rather than
951 // the sparse matrix. Create workspace buffers and pass them to the SpMM
952 // execution.
953 cusparseLtMatmulAlgSelection_t alg_sel;
954 cusparseLtMatmulPlan_t plan;
955 cusparseLtMatmulDescriptor_t matmul;
956 void *values{nullptr};
957};
958
959struct cusparseLtDnMatHandleAndData {
960 cusparseLtMatDescriptor_t mat;
961 void *values{nullptr};
962};
963
964static_assert(sizeof(cusparseLtHandle_t) == 11024,
965 "Unexpected cusparseLt handle size");
966static_assert(sizeof(cusparseLtSpMatHandleAndData) == 44104,
967 "Unexpected cusparseLt sparse matrix handle size");
968static_assert(sizeof(cusparseLtDnMatHandleAndData) == 11032,
969 "Unexpected cusparseLt dense matrix handle size");
970
971extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuCreateSparseLtEnv() {
972 // ScopedContext is for cuda initialization.
973 ScopedContext scopedContext;
974 assert(!cusparseLt_initiated &&
975 "client called mgpuCreateSparseLtEnv() twice");
976 // Note that cuSparseLt still uses cusparseStatus_t.
977 CUSPARSE_REPORT_IF_ERROR(cusparseLtInit(&cusparseLt_env));
978 cusparseLt_initiated = true;
979}
980
981extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuDestroySparseLtEnv() {
982 assert(cusparseLt_initiated && "client did not call mgpuCreateSparseLtEnv()");
983 CUSPARSE_REPORT_IF_ERROR(cusparseLtDestroy(&cusparseLt_env));
984 cusparseLt_initiated = false;
985}
986
987extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
988mgpuCreateCuSparseLtDnMat(void *dh, intptr_t rows, intptr_t cols, void *values,
989 int32_t dtp, CUstream /*stream*/) {
990 assert(cusparseLt_initiated && "client did not call mgpuCreateSparseLtEnv()");
991 auto dnmat_handle = reinterpret_cast<cusparseLtDnMatHandleAndData *>(dh);
992 dnmat_handle->values = values;
993 auto dTp = static_cast<cudaDataType_t>(dtp);
994 // Assume row-major when deciding lda.
995 const uint32_t alignment = 16;
996 CUSPARSE_REPORT_IF_ERROR(cusparseLtDenseDescriptorInit(
997 &cusparseLt_env, &(dnmat_handle->mat), rows, cols, /*lda=*/cols,
998 alignment, dTp, CUSPARSE_ORDER_ROW))
999}
1000
1001extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
1002mgpuDestroyCuSparseLtDnMat(void *dh, CUstream /*stream*/) {
1003 auto dnmat_handle = reinterpret_cast<cusparseLtDnMatHandleAndData *>(dh);
1004 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatDescriptorDestroy(&(dnmat_handle->mat)))
1005}
1006
1007extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
1008mgpuCusparseLtCreate2To4SpMat(void *sh, intptr_t rows, intptr_t cols,
1009 void *values, int32_t dtp, CUstream /*stream*/) {
1010 assert(cusparseLt_initiated && "client did not call mgpuCreateSparseLtEnv()");
1011 auto spmat_handle = reinterpret_cast<cusparseLtSpMatHandleAndData *>(sh);
1012 spmat_handle->values = values;
1013 auto dTp = static_cast<cudaDataType_t>(dtp);
1014 // Assume row-major when deciding lda.
1015 const uint32_t alignment = 16;
1016 CUSPARSE_REPORT_IF_ERROR(cusparseLtStructuredDescriptorInit(
1017 &cusparseLt_env, &(spmat_handle->mat), rows, cols, /*ld=*/cols, alignment,
1018 dTp, CUSPARSE_ORDER_ROW, CUSPARSELT_SPARSITY_50_PERCENT))
1019}
1020
1021extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
1022mgpuDestroyCuSparseLtSpMat(void *sh, CUstream /*stream*/) {
1023 auto spmat_handle = reinterpret_cast<cusparseLtSpMatHandleAndData *>(sh);
1024 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatDescriptorDestroy(&(spmat_handle->mat)))
1025}
1026
1027// Several things are being done in this stage, algorithm selection, planning,
1028// and returning workspace and compressed matrices data buffer sizes.
1029// The parameter prune_flag is used to indicate whether pruning and pruning
1030// check will happen 0 means not prune or prune check, 1 means prune, 2 means
1031// prune & prune check
1032extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
1033mgpuCuSparseLtSpMMBufferSize(void *bs, int32_t ma, int32_t mb, void *a, void *b,
1034 void *c, int32_t ctp, int32_t prune_flag,
1035 CUstream stream) {
1036 assert(cusparseLt_initiated && "client did not call mgpuCreateSparseLtEnv()");
1037 // TODO: support more advanced settings, e.g., the input right operand is a
1038 // sparse matrix assuming matA is the sparse matrix
1039 auto matA = reinterpret_cast<cusparseLtSpMatHandleAndData *>(a);
1040 auto matB = reinterpret_cast<cusparseLtDnMatHandleAndData *>(b);
1041 auto matC = reinterpret_cast<cusparseLtDnMatHandleAndData *>(c);
1042 auto workspace_size = reinterpret_cast<size_t *>(bs);
1043 auto compressed_size = &(reinterpret_cast<size_t *>(bs)[1]);
1044 auto compressed_buffer_size = &(reinterpret_cast<size_t *>(bs)[2]);
1045 auto cTp = static_cast<cusparseComputeType>(ctp);
1046
1047 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
1048 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
1049 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatmulDescriptorInit(
1050 &cusparseLt_env, &(matA->matmul), modeA, modeB, &(matA->mat),
1051 &(matB->mat), &(matC->mat), &(matC->mat), cTp))
1052 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatmulAlgSelectionInit(
1053 &cusparseLt_env, &(matA->alg_sel), &(matA->matmul),
1054 CUSPARSELT_MATMUL_ALG_DEFAULT))
1055 int alg = 0;
1056 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatmulAlgSetAttribute(
1057 &cusparseLt_env, &(matA->alg_sel), CUSPARSELT_MATMUL_ALG_CONFIG_ID, &alg,
1058 sizeof(alg)))
1059
1060 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatmulPlanInit(
1061 &cusparseLt_env, &(matA->plan), &(matA->matmul), &(matA->alg_sel)))
1062
1063 // Pruning step (in-place).
1064 if (prune_flag > 0)
1065 CUSPARSE_REPORT_IF_ERROR(cusparseLtSpMMAPrune(
1066 &cusparseLt_env, &(matA->matmul), matA->values, matA->values,
1067 CUSPARSELT_PRUNE_SPMMA_STRIP, stream))
1068
1069 // Check structure of A.
1070 // Note that this adds a synchronization on the stream.
1071 // TODO: Do we want that?
1072 if (prune_flag == 2) {
1073 int *dvalid = (int *)mgpuMemAlloc(sizeof(int), stream, false);
1074 CUSPARSE_REPORT_IF_ERROR(cusparseLtSpMMAPruneCheck(
1075 &cusparseLt_env, &(matA->matmul), matA->values, dvalid, stream))
1076 int valid = 0;
1077 mgpuMemcpy(&valid, dvalid, sizeof(int), stream);
1078 mgpuStreamSynchronize(stream);
1079 mgpuMemFree(dvalid, stream);
1080 if (valid != 0)
1081 fprintf(stderr, "CUPARSE-LT: sparse matrix is not 2:4; computed results "
1082 "will be invalid\n");
1083 }
1084
1085 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatmulGetWorkspace(
1086 &cusparseLt_env, &(matA->plan), workspace_size))
1087 CUSPARSE_REPORT_IF_ERROR(cusparseLtSpMMACompressedSize(
1088 &cusparseLt_env, &(matA->plan), compressed_size, compressed_buffer_size))
1089}
1090
1091extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
1092mgpuCuSparseLtSpMM(void *a, void *b, void *c, void *d_workspace,
1093 void *dA_compressed, void *dA_compressedBuffer,
1094 CUstream stream) {
1095 assert(cusparseLt_initiated && "client did not call mgpuCreateSparseLtEnv()");
1096 auto matA = reinterpret_cast<cusparseLtSpMatHandleAndData *>(a);
1097 auto matB = reinterpret_cast<cusparseLtDnMatHandleAndData *>(b);
1098 auto matC = reinterpret_cast<cusparseLtDnMatHandleAndData *>(c);
1099
1100 ALPHABETA(CUDA_R_32F, alpha, beta)
1102 cusparseLtSpMMACompress(&cusparseLt_env, &(matA->plan), (matA->values),
1103 dA_compressed, dA_compressedBuffer, stream))
1104
1105 // TODO: add support to multi-stream execution
1106 // Perform the matrix multiplication. D = A*B+C using C==D for now
1108 cusparseLtMatmul(&cusparseLt_env, &(matA->plan), alphap, dA_compressed,
1109 matB->values, betap, matC->values,
1110 /*dD*/ matC->values, d_workspace, nullptr, 0))
1111
1112 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatDescriptorDestroy(&(matA->mat)))
1113 // destroy the plan associated with the sparse matrix
1114 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatmulPlanDestroy(&(matA->plan)))
1115}
1116
1117#endif // MLIR_ENABLE_CUDA_CUSPARSELT
1118#endif // MLIR_ENABLE_CUDA_CUSPARSE
#define CUSPARSE_REPORT_IF_ERROR(expr)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamWaitEvent(CUstream stream, CUevent event)
#define MLIR_CUDA_WRAPPERS_EXPORT
MLIR_CUDA_WRAPPERS_EXPORT void mgpuModuleUnload(CUmodule module)
MLIR_CUDA_WRAPPERS_EXPORT void * mgpuMemAlloc(uint64_t sizeBytes, CUstream stream, bool isHostShared)
#define CUDA_REPORT_IF_ERROR(expr)
static bool isCudaContextShutdownError(CUresult result)
Helper to check if a CUDA error is due to the context being destroyed during program shutdown.
#define CUDA_REPORT_IF_ERROR_IGNORE_SHUTDOWN(expr)
Like CUDA_REPORT_IF_ERROR, but silences errors caused by CUDA context shutdown.
static thread_local int32_t defaultDevice
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostRegisterMemRef(int64_t rank, StridedMemRefType< char, 1 > *descriptor, int64_t elementSizeBytes)
Registers a memref with the CUDA runtime.
MLIR_CUDA_WRAPPERS_EXPORT CUmodule mgpuModuleLoadJIT(void *data, int optLevel, size_t)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostUnregister(void *ptr)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventRecord(CUevent event, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemcpy(void *dst, void *src, size_t sizeBytes, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemset16(void *dst, unsigned short value, size_t count, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemset32(void *dst, unsigned int value, size_t count, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT CUfunction mgpuModuleGetFunction(CUmodule module, const char *name)
#define debug_print(fmt,...)
MLIR_CUDA_WRAPPERS_EXPORT CUevent mgpuEventCreate()
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostRegister(void *ptr, uint64_t sizeBytes)
Helper functions for writing mlir example code.
static bool isDebugEnabled()
Helper method that checks environment value for debugging.
MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamSynchronize(CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT CUstream mgpuStreamCreate()
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemFree(void *ptr, CUstream)
MLIR_CUDA_WRAPPERS_EXPORT CUmodule mgpuModuleLoad(void *data, size_t)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuSetDefaultDevice(int32_t device)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamDestroy(CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuLaunchKernel(CUfunction function, intptr_t gridX, intptr_t gridY, intptr_t gridZ, intptr_t blockX, intptr_t blockY, intptr_t blockZ, int32_t smem, CUstream stream, void **params, void **extra, size_t)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostUnregisterMemRef(int64_t rank, StridedMemRefType< char, 1 > *descriptor, int64_t elementSizeBytes)
Unregisters a memref with the CUDA runtime.
MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventSynchronize(CUevent event)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventDestroy(CUevent event)
static CUdevice getDefaultCuDevice()
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
if(!isCopyOut)
void mgpuLaunchKernelCooperative(hipFunction_t function, intptr_t gridX, intptr_t gridY, intptr_t gridZ, intptr_t clusterX, intptr_t clusterY, intptr_t clusterZ, intptr_t blockX, intptr_t blockY, intptr_t blockZ, int32_t smem, hipStream_t stream, void **params, void **)
StridedMemRef descriptor type with static rank.