MLIR 23.0.0git
CudaRuntimeWrappers.cpp
Go to the documentation of this file.
1//===- CudaRuntimeWrappers.cpp - MLIR CUDA API wrapper library ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Implements C wrappers around the CUDA library for easy linking in ORC jit.
10// Also adds some debugging helpers that are helpful when writing MLIR code to
11// run on GPUs.
12//
13//===----------------------------------------------------------------------===//
14
16
17#include <cstdio>
18
19#include "cuda.h"
20#include "cuda_bf16.h"
21#include "cuda_fp16.h"
22
23#ifdef MLIR_ENABLE_CUDA_CUSPARSE
24#include "cusparse.h"
25#ifdef MLIR_ENABLE_CUDA_CUSPARSELT
26#include "cusparseLt.h"
27#endif // MLIR_ENABLE_CUDA_CUSPARSELT
28#endif // MLIR_ENABLE_CUDA_CUSPARSE
29
30#ifdef _WIN32
31#include <malloc.h>
32#define MLIR_CUDA_WRAPPERS_EXPORT __declspec(dllexport)
33#else
34#define MLIR_CUDA_WRAPPERS_EXPORT __attribute__((visibility("default")))
35#endif // _WIN32
36
37#define CUDA_REPORT_IF_ERROR(expr) \
38 [](CUresult result) { \
39 if (!result) \
40 return; \
41 const char *name = nullptr; \
42 cuGetErrorName(result, &name); \
43 if (!name) \
44 name = "<unknown>"; \
45 fprintf(stderr, "'%s' failed with '%s'\n", #expr, name); \
46 }(expr)
47
48/// Helper to check if a CUDA error is due to the context being destroyed
49/// during program shutdown. Both CUDA_ERROR_DEINITIALIZED and
50/// CUDA_ERROR_CONTEXT_IS_DESTROYED indicate that the CUDA context has been
51/// torn down and any associated resources are already freed.
52static bool isCudaContextShutdownError(CUresult result) {
53 return result == CUDA_ERROR_DEINITIALIZED ||
54 result == CUDA_ERROR_CONTEXT_IS_DESTROYED;
55}
56
57/// Like CUDA_REPORT_IF_ERROR, but silences errors caused by CUDA context
58/// shutdown. These errors are benign when they occur during program exit,
59/// as all resources are freed with the context.
60#define CUDA_REPORT_IF_ERROR_IGNORE_SHUTDOWN(expr) \
61 [](CUresult result) { \
62 if (!result || isCudaContextShutdownError(result)) \
63 return; \
64 const char *name = nullptr; \
65 cuGetErrorName(result, &name); \
66 if (!name) \
67 name = "<unknown>"; \
68 fprintf(stderr, "'%s' failed with '%s'\n", #expr, name); \
69 }(expr)
70
71#define CUSPARSE_REPORT_IF_ERROR(expr) \
72 { \
73 cusparseStatus_t status = (expr); \
74 if (status != CUSPARSE_STATUS_SUCCESS) { \
75 fprintf(stderr, "cuSPARSE '%s' failed with '%s'\n", #expr, \
76 cusparseGetErrorString(status)); \
77 } \
78 }
79
80thread_local static int32_t defaultDevice = 0;
81
82/// Helper method that checks environment value for debugging.
83static bool isDebugEnabled() {
84 const char *kDebugEnvironmentVariable = "MLIR_CUDA_DEBUG";
85 static bool isEnabled = getenv(kDebugEnvironmentVariable) != nullptr;
86 return isEnabled;
87}
88
89#define debug_print(fmt, ...) \
90 do { \
91 if (isDebugEnabled()) \
92 fprintf(stderr, "%s:%d:%s(): " fmt, "CudaRuntimeWrappers.cpp", __LINE__, \
93 __func__, __VA_ARGS__); \
94 } while (0)
95
96// Returns default CUdevice
97static CUdevice getDefaultCuDevice() {
98 CUdevice device;
99 CUDA_REPORT_IF_ERROR(cuDeviceGet(&device, /*ordinal=*/defaultDevice));
100 return device;
101}
102
103// Make the primary context of the current default device current for the
104// duration
105// of the instance and restore the previous context on destruction.
107public:
109 // Static reference to CUDA primary context for device ordinal
110 // defaultDevice.
111 static CUcontext context = [] {
112 CUDA_REPORT_IF_ERROR(cuInit(/*flags=*/0));
113 CUcontext ctx;
114 // Note: this does not affect the current context.
116 cuDevicePrimaryCtxRetain(&ctx, getDefaultCuDevice()));
117 return ctx;
118 }();
119
120 CUDA_REPORT_IF_ERROR(cuCtxPushCurrent(context));
121 }
122
123 ~ScopedContext() { CUDA_REPORT_IF_ERROR(cuCtxPopCurrent(nullptr)); }
124};
125
126#ifdef MLIR_ENABLE_CUDA_CUSPARSE
127// Note that (1) Nvidia confirms the safety to share handle across multiple
128// instances, and streams. (2) Clients are responsible to call the @mgpu
129// environment initialization/destruction in a thread-safe manner, e.g.,
130// at the beginning of the program before multi-threads are created.
131static cusparseHandle_t cusparse_env = nullptr;
132
133#ifdef MLIR_ENABLE_CUDA_CUSPARSELT
134// cusparseLtHandle_t is not a pointer type, so we need an additional flag to
135// indicate whether it is initialized.
136static cusparseLtHandle_t cusparseLt_env;
137static bool cusparseLt_initiated = false;
138
139#endif // MLIR_ENABLE_CUDA_CUSPARSELT
140#endif // MLIR_ENABLE_CUDA_CUSPARSE
141
142extern "C" MLIR_CUDA_WRAPPERS_EXPORT CUmodule
143mgpuModuleLoad(void *data, size_t /*gpuBlobSize*/) {
144 ScopedContext scopedContext;
145 CUmodule module = nullptr;
146 CUDA_REPORT_IF_ERROR(cuModuleLoadData(&module, data));
147 return module;
148}
149
150extern "C" MLIR_CUDA_WRAPPERS_EXPORT CUmodule
151mgpuModuleLoadJIT(void *data, int optLevel, size_t /*assmeblySize*/) {
152 ScopedContext scopedContext;
153 CUmodule module = nullptr;
154 char jitErrorBuffer[4096] = {0};
155 CUjit_option jitOptions[] = {CU_JIT_ERROR_LOG_BUFFER,
156 CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES,
157 CU_JIT_OPTIMIZATION_LEVEL};
158 void *jitOptionsVals[] = {jitErrorBuffer,
159 reinterpret_cast<void *>(sizeof(jitErrorBuffer)),
160 reinterpret_cast<void *>(optLevel)};
161
162 CUresult result =
163 cuModuleLoadDataEx(&module, data, 3, jitOptions, jitOptionsVals);
164 if (result) {
165 fprintf(stderr, "JIT compilation failed with: '%s'\n", jitErrorBuffer);
167 }
168 return module;
169}
170
171extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuModuleUnload(CUmodule module) {
172 CUDA_REPORT_IF_ERROR_IGNORE_SHUTDOWN(cuModuleUnload(module));
173}
174
175extern "C" MLIR_CUDA_WRAPPERS_EXPORT CUfunction
176mgpuModuleGetFunction(CUmodule module, const char *name) {
177 CUfunction function = nullptr;
178 CUDA_REPORT_IF_ERROR(cuModuleGetFunction(&function, module, name));
179 return function;
180}
181
182// The wrapper uses intptr_t instead of CUDA's unsigned int to match
183// the type of MLIR's index type. This avoids the need for casts in the
184// generated MLIR code.
185extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
186mgpuLaunchKernel(CUfunction function, intptr_t gridX, intptr_t gridY,
187 intptr_t gridZ, intptr_t blockX, intptr_t blockY,
188 intptr_t blockZ, int32_t smem, CUstream stream, void **params,
189 void **extra, size_t /*paramsCount*/) {
190 ScopedContext scopedContext;
191 if (smem > 0) {
192 // Avoid checking driver as it's more expensive than if statement
193 int32_t maxShmem = 0;
194 CUdevice device = getDefaultCuDevice();
195 CUDA_REPORT_IF_ERROR(cuDeviceGet(&device, /*ordinal=*/defaultDevice));
196 CUDA_REPORT_IF_ERROR(cuDeviceGetAttribute(
197 &maxShmem, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN,
198 device));
199 if (maxShmem < smem) {
200 fprintf(stderr,
201 "Requested shared memory (%dkb) is larger than maximum allowed "
202 "shared memory (%dkb) for this device\n",
203 smem, maxShmem);
204 }
205 CUDA_REPORT_IF_ERROR(cuFuncSetAttribute(
206 function, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, smem));
207 }
208 debug_print("Launching kernel, grid=%ld,%ld,%ld, "
209 "threads: %ld, %ld, %ld, "
210 "smem: %dkb\n",
211 gridX, gridY, gridZ, blockX, blockY, blockZ, smem);
212 CUDA_REPORT_IF_ERROR(cuLaunchKernel(function, gridX, gridY, gridZ, blockX,
213 blockY, blockZ, smem, stream, params,
214 extra));
215}
216
218 ScopedContext scopedContext;
219 CUstream stream = nullptr;
220 CUDA_REPORT_IF_ERROR(cuStreamCreate(&stream, CU_STREAM_NON_BLOCKING));
221 return stream;
222}
223
224extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamDestroy(CUstream stream) {
225 CUDA_REPORT_IF_ERROR_IGNORE_SHUTDOWN(cuStreamDestroy(stream));
226}
227
228extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
229mgpuStreamSynchronize(CUstream stream) {
230 CUDA_REPORT_IF_ERROR(cuStreamSynchronize(stream));
231}
232
233extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamWaitEvent(CUstream stream,
234 CUevent event) {
236 cuStreamWaitEvent(stream, event, /*flags=*/0));
237}
238
240 ScopedContext scopedContext;
241 CUevent event = nullptr;
242 CUDA_REPORT_IF_ERROR(cuEventCreate(&event, CU_EVENT_DISABLE_TIMING));
243 return event;
244}
245
246extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventDestroy(CUevent event) {
247 CUDA_REPORT_IF_ERROR_IGNORE_SHUTDOWN(cuEventDestroy(event));
248}
249
250extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventSynchronize(CUevent event) {
251 CUDA_REPORT_IF_ERROR_IGNORE_SHUTDOWN(cuEventSynchronize(event));
252}
253
254extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventRecord(CUevent event,
255 CUstream stream) {
256 CUDA_REPORT_IF_ERROR(cuEventRecord(event, stream));
257}
258
259extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
260mgpuMemAlloc(uint64_t sizeBytes, CUstream stream, bool isHostShared) {
261 ScopedContext scopedContext;
262 CUdeviceptr ptr = 0;
263 if (sizeBytes == 0)
264 return reinterpret_cast<void *>(ptr);
265
266 if (isHostShared) {
268 cuMemAllocManaged(&ptr, sizeBytes, CU_MEM_ATTACH_GLOBAL));
269 return reinterpret_cast<void *>(ptr);
270 }
271 CUDA_REPORT_IF_ERROR(cuMemAlloc(&ptr, sizeBytes));
272 return reinterpret_cast<void *>(ptr);
273}
274
275extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemFree(void *ptr,
276 CUstream /*stream*/) {
277 CUDA_REPORT_IF_ERROR(cuMemFree(reinterpret_cast<CUdeviceptr>(ptr)));
278}
279
280extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
281mgpuMemcpy(void *dst, void *src, size_t sizeBytes, CUstream stream) {
282 CUDA_REPORT_IF_ERROR(cuMemcpyAsync(reinterpret_cast<CUdeviceptr>(dst),
283 reinterpret_cast<CUdeviceptr>(src),
284 sizeBytes, stream));
285}
286
287extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
288mgpuMemset32(void *dst, unsigned int value, size_t count, CUstream stream) {
289 CUDA_REPORT_IF_ERROR(cuMemsetD32Async(reinterpret_cast<CUdeviceptr>(dst),
290 value, count, stream));
291}
292
293extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
294mgpuMemset16(void *dst, unsigned short value, size_t count, CUstream stream) {
295 CUDA_REPORT_IF_ERROR(cuMemsetD16Async(reinterpret_cast<CUdeviceptr>(dst),
296 value, count, stream));
297}
298
299///
300/// Helper functions for writing mlir example code
301///
302
303// Allows to register byte array with the CUDA runtime. Helpful until we have
304// transfer functions implemented.
305extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
306mgpuMemHostRegister(void *ptr, uint64_t sizeBytes) {
307 ScopedContext scopedContext;
308 CUDA_REPORT_IF_ERROR(cuMemHostRegister(ptr, sizeBytes, /*flags=*/0));
309}
310
311/// Registers a memref with the CUDA runtime. `descriptor` is a pointer to a
312/// ranked memref descriptor struct of rank `rank`. Helpful until we have
313/// transfer functions implemented.
314extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
316 int64_t elementSizeBytes) {
317 // Only densely packed tensors are currently supported.
318#ifdef _WIN32
319 int64_t *denseStrides = (int64_t *)_alloca(rank * sizeof(int64_t));
320#else
321 int64_t *denseStrides = (int64_t *)alloca(rank * sizeof(int64_t));
322#endif // _WIN32
323 int64_t *sizes = descriptor->sizes;
324 for (int64_t i = rank - 1, runningStride = 1; i >= 0; i--) {
325 denseStrides[i] = runningStride;
326 runningStride *= sizes[i];
327 }
328 uint64_t sizeBytes = sizes[0] * denseStrides[0] * elementSizeBytes;
329 int64_t *strides = &sizes[rank];
330 (void)strides;
331 for (unsigned i = 0; i < rank; ++i)
332 assert(strides[i] == denseStrides[i] &&
333 "Mismatch in computed dense strides");
334
335 auto *ptr = descriptor->data + descriptor->offset * elementSizeBytes;
336 mgpuMemHostRegister(ptr, sizeBytes);
337}
338
339// Allows to unregister byte array with the CUDA runtime.
341 ScopedContext scopedContext;
342 CUDA_REPORT_IF_ERROR(cuMemHostUnregister(ptr));
343}
344
345/// Unregisters a memref with the CUDA runtime. `descriptor` is a pointer to a
346/// ranked memref descriptor struct of rank `rank`
347extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
349 StridedMemRefType<char, 1> *descriptor,
350 int64_t elementSizeBytes) {
351 auto *ptr = descriptor->data + descriptor->offset * elementSizeBytes;
353}
354
355extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuSetDefaultDevice(int32_t device) {
356 defaultDevice = device;
357}
358
359///
360/// Runtime methods using CUDA 12.0+ driver
361///
362
363#if (CUDA_VERSION >= 12000)
364
365extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuLaunchClusterKernel(
366 CUfunction function, intptr_t clusterX, intptr_t clusterY,
367 intptr_t clusterZ, intptr_t gridX, intptr_t gridY, intptr_t gridZ,
368 intptr_t blockX, intptr_t blockY, intptr_t blockZ, int32_t smem,
369 CUstream stream, void **params, void **extra, size_t /*paramsCount*/) {
370 ScopedContext scopedContext;
371 if (smem > 0) {
372 // Avoid checking driver as it's more expensive than if statement
373 int32_t maxShmem = 0;
374 CUdevice device = getDefaultCuDevice();
375 CUDA_REPORT_IF_ERROR(cuDeviceGet(&device, /*ordinal=*/defaultDevice));
376 CUDA_REPORT_IF_ERROR(cuDeviceGetAttribute(
377 &maxShmem, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN,
378 device));
379 if (maxShmem < smem) {
380 fprintf(stderr,
381 "Requested shared memory (%dkb) is larger than maximum allowed "
382 "shared memory (%dkb) for this device\n",
383 smem, maxShmem);
384 }
385 CUDA_REPORT_IF_ERROR(cuFuncSetAttribute(
386 function, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, smem));
387 }
388 CUlaunchConfig config;
389 config.gridDimX = gridX;
390 config.gridDimY = gridY;
391 config.gridDimZ = gridZ;
392 config.blockDimX = blockX;
393 config.blockDimY = blockY;
394 config.blockDimZ = blockZ;
395 config.sharedMemBytes = smem;
396 config.hStream = stream;
397 CUlaunchAttribute launchAttr[2];
398 launchAttr[0].id = CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION;
399 launchAttr[0].value.clusterDim.x = clusterX;
400 launchAttr[0].value.clusterDim.y = clusterY;
401 launchAttr[0].value.clusterDim.z = clusterZ;
402 launchAttr[1].id = CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE;
403 launchAttr[1].value.clusterSchedulingPolicyPreference =
404 CU_CLUSTER_SCHEDULING_POLICY_SPREAD;
405 config.numAttrs = 2;
406 config.attrs = launchAttr;
407
408 debug_print("Launching kernel,"
409 "cluster: %ld, %ld, %ld, "
410 "grid=%ld,%ld,%ld, "
411 "threads: %ld, %ld, %ld, "
412 "smem: %dkb\n",
413 clusterX, clusterY, clusterZ, gridX, gridY, gridZ, blockX, blockY,
414 blockZ, smem);
415
416 CUDA_REPORT_IF_ERROR(cuLaunchKernelEx(&config, function, params, extra));
417}
418
419extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuTensorMapEncodeTiled(
420 CUtensorMap *tensorMap, // Tensor map object
421 CUtensorMapDataType tensorDataType, // Tensor data type
422 cuuint32_t tensorRank, // Dimensionality of tensor
423 void *globalAddress, // Starting address
424 const cuuint64_t *globalDim, // Tensor size (number of elements)
425 const cuuint64_t *globalStrides, // Stride size (in bytes)
426 const cuuint32_t *boxDim, // Traversal box (number of elments)
427 const cuuint32_t *elementStrides, // Traversal stride
428 CUtensorMapInterleave interleave, // Type of interleaved layout
429 CUtensorMapSwizzle swizzle, // Bank swizzling pattern
430 CUtensorMapL2promotion l2Promotion, // L2 promotion size
431 CUtensorMapFloatOOBfill oobFill // Padding zfill or NaN fill
432) {
433 ScopedContext scopedContext;
434 CUDA_REPORT_IF_ERROR(cuTensorMapEncodeTiled(
435 tensorMap, tensorDataType, tensorRank, globalAddress, globalDim,
436 globalStrides, boxDim, elementStrides, interleave, swizzle, l2Promotion,
437 oobFill));
438 debug_print("Created TMA descriptor\n Addr: %p\n"
439 "data type : %d\n"
440 "rank : %d\n"
441 "globalDim[5]: %zu, %zu, %zu, %zu, %zu\n"
442 "globalStrides[5]: %zu, %zu, %zu, %zu, %zu\n"
443 "boxDim[5]: %u, %u, %u, %u, %u\n"
444 "elementStrides[5]: %u, %u, %u, %u, %u\n"
445 "interleave: %u \n"
446 "swizzle: %u \n"
447 "l2Promotion: %u \n"
448 "oobFill: %u \n",
449 (void *)&tensorMap, tensorDataType, tensorRank, globalDim[0],
450 globalDim[1], globalDim[2], globalDim[3], globalDim[4],
451 globalStrides[0], globalStrides[1], globalStrides[2],
452 globalStrides[3], globalStrides[4], boxDim[0], boxDim[1],
453 boxDim[2], boxDim[3], boxDim[4], elementStrides[0],
454 elementStrides[1], elementStrides[2], elementStrides[3],
455 elementStrides[4], interleave, swizzle, l2Promotion, oobFill);
456}
457
458template <int Rank>
459void mgpuGetMemRefDataAndShape(void *rawDescriptor, char **addr,
460 uint64_t *globalDim, uint64_t *globalStrides,
461 const CUtensorMapDataType tensorDataType) {
462 auto descriptor =
463 reinterpret_cast<StridedMemRefType<char, Rank> *>(rawDescriptor);
464 *addr = descriptor->data;
465 for (int i = 0; i < Rank; ++i) {
466 globalDim[i] = static_cast<uint64_t>(descriptor->sizes[Rank - i - 1]);
467 }
468 static constexpr int elementSizeInBytes[] = {1, 2, 4, 4, 8, 8, 2,
469 4, 8, 2, 4, 4, 4};
470 for (int i = 0; i < Rank - 1; ++i) {
471 globalStrides[i] = static_cast<uint64_t>(
472 descriptor->strides[Rank - i - 2] * elementSizeInBytes[tensorDataType]);
473 }
474}
475
476extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *mgpuTensorMapEncodeTiledMemref(
477 int64_t tensorRank, // Dimensionality of tensor
478 void *rankedDescriptor, // Ranked MemRef descriptor
479 const CUtensorMapDataType tensorDataType, // Stride size (in bytes)
480 CUtensorMapInterleave interleave, // Type of interleaved layout
481 CUtensorMapSwizzle swizzle, // Bank swizzling pattern
482 CUtensorMapL2promotion l2Promotion, // L2 promotion size
483 CUtensorMapFloatOOBfill oobFill, // Padding zfill or NaN fill
484 int64_t *inputBoxDims // Tensor size (number of elements)
485) {
486 CUtensorMap tensorMap;
487
488 uint32_t boxDim[5] = {1, 1, 1, 1, 1}, elementStrides[5] = {1, 1, 1, 1, 1};
489 uint64_t globalDim[5] = {1, 1, 1, 1, 1}, globalStrides[5] = {0};
490 uint32_t tensorRank32 = uint32_t(tensorRank);
491
492 char *globalAddress = nullptr;
493 switch (tensorRank) {
494 case 1:
495 mgpuGetMemRefDataAndShape<1>(rankedDescriptor, &globalAddress, globalDim,
496 globalStrides, tensorDataType);
497 break;
498 case 2:
499 mgpuGetMemRefDataAndShape<2>(rankedDescriptor, &globalAddress, globalDim,
500 globalStrides, tensorDataType);
501 break;
502 case 3:
503 mgpuGetMemRefDataAndShape<3>(rankedDescriptor, &globalAddress, globalDim,
504 globalStrides, tensorDataType);
505 break;
506 case 4:
507 mgpuGetMemRefDataAndShape<4>(rankedDescriptor, &globalAddress, globalDim,
508 globalStrides, tensorDataType);
509 break;
510 case 5:
511 mgpuGetMemRefDataAndShape<5>(rankedDescriptor, &globalAddress, globalDim,
512 globalStrides, tensorDataType);
513 break;
514 default:
515 fprintf(
516 stderr,
517 "'mgpuTensorMapEncodeTiledMemref' failed with 'rank is too high'\n");
518 return nullptr;
519 }
520
521 for (int64_t r = 0; r < tensorRank; ++r) {
522 boxDim[r] = static_cast<uint32_t>(inputBoxDims[tensorRank - r - 1]);
523 }
524
525 ScopedContext scopedContext;
526 mgpuTensorMapEncodeTiled(&tensorMap, tensorDataType, tensorRank32,
527 globalAddress, globalDim, globalStrides, boxDim,
528 elementStrides, interleave, swizzle, l2Promotion,
529 oobFill);
530 // Copy created tensor map to device
531 CUdeviceptr dTensorMap;
532 CUDA_REPORT_IF_ERROR(cuMemAlloc(&dTensorMap, sizeof(CUtensorMap)));
533 CUDA_REPORT_IF_ERROR(cuMemcpy(dTensorMap,
534 reinterpret_cast<CUdeviceptr>(&tensorMap),
535 sizeof(CUtensorMap)));
536 return reinterpret_cast<void *>(dTensorMap);
537}
538#endif
539
540#ifdef MLIR_ENABLE_CUDA_CUSPARSE
541
542///
543/// Wrapper methods for the cuSparse library.
544///
545
546// Some macro magic to get float/double alpha and beta on host.
547// TODO: add support to passing alpha and beta as arguments
548#define ALPHABETA(dtp, alpha, beta) \
549 __nv_bfloat16(alpha##16bf) = 1.0f; \
550 __nv_bfloat16(beta##16bf) = 1.0f; \
551 __half(alpha##16f) = 1.0f; \
552 __half(beta##16f) = 1.0f; \
553 float(alpha##f) = 1.0f; \
554 float(beta##f) = 1.0f; \
555 double(alpha##d) = 1.0; \
556 double(beta##d) = 1.0; \
557 const void *(alpha##p) = nullptr; \
558 const void *(beta##p) = nullptr; \
559 if (dtp == CUDA_R_16BF || dtp == CUDA_C_16BF) { \
560 (alpha##p) = reinterpret_cast<void *>(&(alpha##16bf)); \
561 (beta##p) = reinterpret_cast<void *>(&(beta##16bf)); \
562 } else if (dtp == CUDA_R_16F || dtp == CUDA_C_16F) { \
563 (alpha##p) = reinterpret_cast<void *>(&(alpha##16f)); \
564 (beta##p) = reinterpret_cast<void *>(&(beta##16f)); \
565 } else if (dtp == CUDA_R_32F || dtp == CUDA_C_32F) { \
566 (alpha##p) = reinterpret_cast<void *>(&(alpha##f)); \
567 (beta##p) = reinterpret_cast<void *>(&(beta##f)); \
568 } else { \
569 (alpha##p) = reinterpret_cast<void *>(&(alpha##d)); \
570 (beta##p) = reinterpret_cast<void *>(&(beta##d)); \
571 }
572
573extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuCreateSparseEnv() {
574 // ScopedContext is for cuda initialization.
575 ScopedContext scopedContext;
576 assert(!cusparse_env && "client called mgpuCreateSparseEnv() twice");
577 CUSPARSE_REPORT_IF_ERROR(cusparseCreate(&cusparse_env));
578}
579
580extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuDestroySparseEnv() {
581 assert(cusparse_env && "client did not call mgpuCreateSparseEnv()");
582 CUSPARSE_REPORT_IF_ERROR(cusparseDestroy(cusparse_env));
583 cusparse_env = nullptr;
584}
585
586extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
587mgpuCreateDnVec(intptr_t size, void *values, int32_t dtp, CUstream /*stream*/) {
588 cusparseDnVecDescr_t vec = nullptr;
589 auto dTp = static_cast<cudaDataType_t>(dtp);
590 CUSPARSE_REPORT_IF_ERROR(cusparseCreateDnVec(&vec, size, values, dTp))
591 return reinterpret_cast<void *>(vec);
592}
593
594extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
595mgpuDestroyDnVec(void *v, CUstream /*stream*/) {
596 cusparseDnVecDescr_t vec = reinterpret_cast<cusparseDnVecDescr_t>(v);
597 CUSPARSE_REPORT_IF_ERROR(cusparseDestroyDnVec(vec))
598}
599
600extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
601mgpuCreateDnMat(intptr_t rows, intptr_t cols, void *values, int32_t dtp,
602 CUstream /*stream*/) {
603 cusparseDnMatDescr_t mat = nullptr;
604 auto dTp = static_cast<cudaDataType_t>(dtp);
605 CUSPARSE_REPORT_IF_ERROR(cusparseCreateDnMat(&mat, rows, cols, /*ld=*/cols,
606 values, dTp, CUSPARSE_ORDER_ROW))
607 return reinterpret_cast<void *>(mat);
608}
609
610extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
611mgpuDestroyDnMat(void *m, CUstream /*stream*/) {
612 cusparseDnMatDescr_t mat = reinterpret_cast<cusparseDnMatDescr_t>(m);
613 CUSPARSE_REPORT_IF_ERROR(cusparseDestroyDnMat(mat))
614}
615
616extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
617mgpuCreateCoo(intptr_t rows, intptr_t cols, intptr_t nnz, void *rowIdxs,
618 void *colIdxs, void *values, int32_t itp, int32_t dtp,
619 CUstream /*stream*/) {
620 cusparseSpMatDescr_t mat = nullptr;
621 auto iTp = static_cast<cusparseIndexType_t>(itp);
622 auto dTp = static_cast<cudaDataType_t>(dtp);
623 CUSPARSE_REPORT_IF_ERROR(cusparseCreateCoo(&mat, rows, cols, nnz, rowIdxs,
624 colIdxs, values, iTp,
625 CUSPARSE_INDEX_BASE_ZERO, dTp))
626 return reinterpret_cast<void *>(mat);
627}
628
629#ifdef CUSPARSE_COO_AOS // deprecated in cuSPARSE 11.2
630extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
631mgpuCreateCooAoS(intptr_t rows, intptr_t cols, intptr_t nnz, void *idxs,
632 void *values, int32_t itp, int32_t dtp, CUstream /*stream*/) {
633 cusparseSpMatDescr_t mat = nullptr;
634 auto iTp = static_cast<cusparseIndexType_t>(itp);
635 auto dTp = static_cast<cudaDataType_t>(dtp);
636 CUSPARSE_REPORT_IF_ERROR(cusparseCreateCooAoS(
637 &mat, rows, cols, nnz, idxs, values, iTp, CUSPARSE_INDEX_BASE_ZERO, dTp))
638 return reinterpret_cast<void *>(mat);
639}
640#endif // CUSPARSE_COO_AOS
641
642extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
643mgpuCreateCsr(intptr_t rows, intptr_t cols, intptr_t nnz, void *rowPos,
644 void *colIdxs, void *values, int32_t ptp, int32_t itp,
645 int32_t dtp, CUstream /*stream*/) {
646 cusparseSpMatDescr_t mat = nullptr;
647 auto pTp = static_cast<cusparseIndexType_t>(ptp);
648 auto iTp = static_cast<cusparseIndexType_t>(itp);
649 auto dTp = static_cast<cudaDataType_t>(dtp);
650 CUSPARSE_REPORT_IF_ERROR(cusparseCreateCsr(&mat, rows, cols, nnz, rowPos,
651 colIdxs, values, pTp, iTp,
652 CUSPARSE_INDEX_BASE_ZERO, dTp))
653 return reinterpret_cast<void *>(mat);
654}
655
656extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
657mgpuCreateCsc(intptr_t rows, intptr_t cols, intptr_t nnz, void *colPos,
658 void *rowIdxs, void *values, int32_t ptp, int32_t itp,
659 int32_t dtp, CUstream /*stream*/) {
660 cusparseSpMatDescr_t mat = nullptr;
661 auto pTp = static_cast<cusparseIndexType_t>(ptp);
662 auto iTp = static_cast<cusparseIndexType_t>(itp);
663 auto dTp = static_cast<cudaDataType_t>(dtp);
664 CUSPARSE_REPORT_IF_ERROR(cusparseCreateCsc(&mat, rows, cols, nnz, colPos,
665 rowIdxs, values, pTp, iTp,
666 CUSPARSE_INDEX_BASE_ZERO, dTp))
667 return reinterpret_cast<void *>(mat);
668}
669
670extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
671mgpuCreateBsr(intptr_t brows, intptr_t bcols, intptr_t bnnz, intptr_t rBsz,
672 intptr_t cBsz, void *rowPos, void *colIdxs, void *values,
673 int32_t ptp, int32_t itp, int32_t dtp, CUstream /*stream*/) {
674 cusparseSpMatDescr_t mat = nullptr;
675#if CUSPARSE_VERSION >= 12100
676 auto pTp = static_cast<cusparseIndexType_t>(ptp);
677 auto iTp = static_cast<cusparseIndexType_t>(itp);
678 auto dTp = static_cast<cudaDataType_t>(dtp);
679 CUSPARSE_REPORT_IF_ERROR(cusparseCreateBsr(
680 &mat, brows, bcols, bnnz, rBsz, cBsz, rowPos, colIdxs, values, pTp, iTp,
681 CUSPARSE_INDEX_BASE_ZERO, dTp, CUSPARSE_ORDER_ROW))
682#endif
683 return reinterpret_cast<void *>(mat);
684}
685
686extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
687mgpuDestroySpMat(void *m, CUstream /*stream*/) {
688 cusparseSpMatDescr_t mat = reinterpret_cast<cusparseSpMatDescr_t>(m);
689 CUSPARSE_REPORT_IF_ERROR(cusparseDestroySpMat(mat))
690}
691
692extern "C" MLIR_CUDA_WRAPPERS_EXPORT intptr_t mgpuSpMVBufferSize(
693 int32_t ma, void *a, void *x, void *y, int32_t ctp, CUstream /*stream*/) {
694 assert(cusparse_env && "client did not call mgpuCreateSparseEnv()");
695 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
696 cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
697 cusparseDnVecDescr_t vecX = reinterpret_cast<cusparseDnVecDescr_t>(x);
698 cusparseDnVecDescr_t vecY = reinterpret_cast<cusparseDnVecDescr_t>(y);
699 cudaDataType_t cTp = static_cast<cudaDataType_t>(ctp);
700 ALPHABETA(cTp, alpha, beta)
701 size_t bufferSize = 0;
702 CUSPARSE_REPORT_IF_ERROR(cusparseSpMV_bufferSize(
703 cusparse_env, modeA, alphap, matA, vecX, betap, vecY, cTp,
704 CUSPARSE_SPMV_ALG_DEFAULT, &bufferSize))
705 return bufferSize;
706}
707
708extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuSpMV(int32_t ma, void *a, void *x,
709 void *y, int32_t ctp,
710 void *buf,
711 CUstream /*stream*/) {
712 assert(cusparse_env && "client did not call mgpuCreateSparseEnv()");
713 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
714 cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
715 cusparseDnVecDescr_t vecX = reinterpret_cast<cusparseDnVecDescr_t>(x);
716 cusparseDnVecDescr_t vecY = reinterpret_cast<cusparseDnVecDescr_t>(y);
717 cudaDataType_t cTp = static_cast<cudaDataType_t>(ctp);
718 ALPHABETA(cTp, alpha, beta)
719 CUSPARSE_REPORT_IF_ERROR(cusparseSpMV(cusparse_env, modeA, alphap, matA, vecX,
720 betap, vecY, cTp,
721 CUSPARSE_SPMV_ALG_DEFAULT, buf))
722}
723
725mgpuSpMMBufferSize(int32_t ma, int32_t mb, void *a, void *b, void *c,
726 int32_t ctp, CUstream /*stream*/) {
727 assert(cusparse_env && "client did not call mgpuCreateSparseEnv()");
728 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
729 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
730 cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
731 cusparseDnMatDescr_t matB = reinterpret_cast<cusparseDnMatDescr_t>(b);
732 cusparseDnMatDescr_t matC = reinterpret_cast<cusparseDnMatDescr_t>(c);
733 cudaDataType_t cTp = static_cast<cudaDataType_t>(ctp);
734 ALPHABETA(cTp, alpha, beta)
735 size_t bufferSize = 0;
736 CUSPARSE_REPORT_IF_ERROR(cusparseSpMM_bufferSize(
737 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
738 CUSPARSE_SPMM_ALG_DEFAULT, &bufferSize))
739 return bufferSize;
740}
741
742extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuSpMM(int32_t ma, int32_t mb,
743 void *a, void *b, void *c,
744 int32_t ctp, void *buf,
745 CUstream /*stream*/) {
746 assert(cusparse_env && "client did not call mgpuCreateSparseEnv()");
747 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
748 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
749 cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
750 cusparseDnMatDescr_t matB = reinterpret_cast<cusparseDnMatDescr_t>(b);
751 cusparseDnMatDescr_t matC = reinterpret_cast<cusparseDnMatDescr_t>(c);
752 cudaDataType_t cTp = static_cast<cudaDataType_t>(ctp);
753 ALPHABETA(cTp, alpha, beta)
754 CUSPARSE_REPORT_IF_ERROR(cusparseSpMM(cusparse_env, modeA, modeB, alphap,
755 matA, matB, betap, matC, cTp,
756 CUSPARSE_SPMM_ALG_DEFAULT, buf))
757}
758
760mgpuSDDMMBufferSize(int32_t ma, int32_t mb, void *a, void *b, void *c,
761 int32_t ctp, CUstream /*stream*/) {
762 assert(cusparse_env && "client did not call mgpuCreateSparseEnv()");
763 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
764 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
765 cusparseDnMatDescr_t matA = reinterpret_cast<cusparseDnMatDescr_t>(a);
766 cusparseDnMatDescr_t matB = reinterpret_cast<cusparseDnMatDescr_t>(b);
767 cusparseSpMatDescr_t matC = reinterpret_cast<cusparseSpMatDescr_t>(c);
768 auto cTp = static_cast<cudaDataType_t>(ctp);
769 ALPHABETA(cTp, alpha, beta)
770 size_t bufferSize = 0;
771 CUSPARSE_REPORT_IF_ERROR(cusparseSDDMM_bufferSize(
772 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
773 CUSPARSE_SDDMM_ALG_DEFAULT, &bufferSize))
774 return bufferSize;
775}
776
777extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuSDDMM(int32_t ma, int32_t mb,
778 void *a, void *b, void *c,
779 int32_t ctp, void *buf,
780 CUstream /*stream*/) {
781 assert(cusparse_env && "client did not call mgpuCreateSparseEnv()");
782 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
783 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
784 cusparseDnMatDescr_t matA = reinterpret_cast<cusparseDnMatDescr_t>(a);
785 cusparseDnMatDescr_t matB = reinterpret_cast<cusparseDnMatDescr_t>(b);
786 cusparseSpMatDescr_t matC = reinterpret_cast<cusparseSpMatDescr_t>(c);
787 auto cTp = static_cast<cudaDataType_t>(ctp);
788 ALPHABETA(cTp, alpha, beta)
789 CUSPARSE_REPORT_IF_ERROR(cusparseSDDMM(cusparse_env, modeA, modeB, alphap,
790 matA, matB, betap, matC, cTp,
791 CUSPARSE_SDDMM_ALG_DEFAULT, buf))
792}
793
794extern "C" MLIR_CUDA_WRAPPERS_EXPORT void *
795mgpuSpGEMMCreateDescr(CUstream /*stream*/) {
796 cusparseSpGEMMDescr_t spgemmDesc = nullptr;
797 CUSPARSE_REPORT_IF_ERROR(cusparseSpGEMM_createDescr(&spgemmDesc))
798 return reinterpret_cast<void *>(spgemmDesc);
799}
800
801extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
802mgpuSpGEMMDestroyDescr(void *s, CUstream /*stream*/) {
803 cusparseSpGEMMDescr_t spgemmDesc = reinterpret_cast<cusparseSpGEMMDescr_t>(s);
804 CUSPARSE_REPORT_IF_ERROR(cusparseSpGEMM_destroyDescr(spgemmDesc))
805}
806
807extern "C" MLIR_CUDA_WRAPPERS_EXPORT intptr_t mgpuSpGEMMWorkEstimation(
808 void *s, int32_t ma, int32_t mb, void *a, void *b, void *c, int32_t ctp,
809 intptr_t bs, void *buf, CUstream /*stream*/) {
810 cusparseSpGEMMDescr_t spgemmDesc = reinterpret_cast<cusparseSpGEMMDescr_t>(s);
811 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
812 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
813 cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
814 cusparseSpMatDescr_t matB = reinterpret_cast<cusparseSpMatDescr_t>(b);
815 cusparseSpMatDescr_t matC = reinterpret_cast<cusparseSpMatDescr_t>(c);
816 auto cTp = static_cast<cudaDataType_t>(ctp);
817 ALPHABETA(cTp, alpha, beta)
818 size_t newBufferSize = bs;
819 CUSPARSE_REPORT_IF_ERROR(cusparseSpGEMM_workEstimation(
820 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
821 CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &newBufferSize, buf))
822 return newBufferSize;
823}
824
826mgpuSpGEMMCompute(void *s, int32_t ma, int32_t mb, void *a, void *b, void *c,
827 int32_t ctp, intptr_t bsz2, void *buf2, CUstream /*stream*/) {
828 cusparseSpGEMMDescr_t spgemmDesc = reinterpret_cast<cusparseSpGEMMDescr_t>(s);
829 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
830 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
831 cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
832 cusparseSpMatDescr_t matB = reinterpret_cast<cusparseSpMatDescr_t>(b);
833 cusparseSpMatDescr_t matC = reinterpret_cast<cusparseSpMatDescr_t>(c);
834 auto cTp = static_cast<cudaDataType_t>(ctp);
835 ALPHABETA(cTp, alpha, beta)
836 size_t newBufferSize2 = bsz2;
837 CUSPARSE_REPORT_IF_ERROR(cusparseSpGEMM_compute(
838 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
839 CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &newBufferSize2, buf2))
840 return newBufferSize2;
841}
842
843extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
844mgpuSpGEMMCopy(void *s, int32_t ma, int32_t mb, void *a, void *b, void *c,
845 int32_t ctp, CUstream /*stream*/) {
846 cusparseSpGEMMDescr_t spgemmDesc = reinterpret_cast<cusparseSpGEMMDescr_t>(s);
847 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
848 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
849 cusparseSpMatDescr_t matA = reinterpret_cast<cusparseSpMatDescr_t>(a);
850 cusparseSpMatDescr_t matB = reinterpret_cast<cusparseSpMatDescr_t>(b);
851 cusparseSpMatDescr_t matC = reinterpret_cast<cusparseSpMatDescr_t>(c);
852 auto cTp = static_cast<cudaDataType_t>(ctp);
853 ALPHABETA(cTp, alpha, beta)
855 cusparseSpGEMM_copy(cusparse_env, modeA, modeB, alphap, matA, matB, betap,
856 matC, cTp, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc))
857}
858
859extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
860mgpuSpMatGetSize(void *m, void *r, void *c, void *n, CUstream /*stream*/) {
861 cusparseConstSpMatDescr_t matDescr =
862 reinterpret_cast<cusparseConstSpMatDescr_t>(m);
863 int64_t *rows = reinterpret_cast<int64_t *>(r);
864 int64_t *cols = reinterpret_cast<int64_t *>(c);
865 int64_t *nnz = reinterpret_cast<int64_t *>(n);
866 CUSPARSE_REPORT_IF_ERROR(cusparseSpMatGetSize(matDescr, rows, cols, nnz));
867}
868
869extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
870mgpuSetCsrPointers(void *m, void *p, void *c, void *v, CUstream /*stream*/) {
871 cusparseSpMatDescr_t matDescr = reinterpret_cast<cusparseSpMatDescr_t>(m);
872 CUSPARSE_REPORT_IF_ERROR(cusparseCsrSetPointers(matDescr, p, c, v));
873}
874
875#ifdef MLIR_ENABLE_CUDA_CUSPARSELT
876
877///
878/// Wrapper methods for the cuSparseLt library.
879///
880
881struct cusparseLtSpMatHandleAndData {
882 cusparseLtMatDescriptor_t mat;
883 // TODO: the following three are associated with the SpMM operator rather than
884 // the sparse matrix. Create workspace buffers and pass them to the SpMM
885 // execution.
886 cusparseLtMatmulAlgSelection_t alg_sel;
887 cusparseLtMatmulPlan_t plan;
888 cusparseLtMatmulDescriptor_t matmul;
889 void *values{nullptr};
890};
891
892struct cusparseLtDnMatHandleAndData {
893 cusparseLtMatDescriptor_t mat;
894 void *values{nullptr};
895};
896
897static_assert(sizeof(cusparseLtHandle_t) == 11024,
898 "Unexpected cusparseLt handle size");
899static_assert(sizeof(cusparseLtSpMatHandleAndData) == 44104,
900 "Unexpected cusparseLt sparse matrix handle size");
901static_assert(sizeof(cusparseLtDnMatHandleAndData) == 11032,
902 "Unexpected cusparseLt dense matrix handle size");
903
904extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuCreateSparseLtEnv() {
905 // ScopedContext is for cuda initialization.
906 ScopedContext scopedContext;
907 assert(!cusparseLt_initiated &&
908 "client called mgpuCreateSparseLtEnv() twice");
909 // Note that cuSparseLt still uses cusparseStatus_t.
910 CUSPARSE_REPORT_IF_ERROR(cusparseLtInit(&cusparseLt_env));
911 cusparseLt_initiated = true;
912}
913
914extern "C" MLIR_CUDA_WRAPPERS_EXPORT void mgpuDestroySparseLtEnv() {
915 assert(cusparseLt_initiated && "client did not call mgpuCreateSparseLtEnv()");
916 CUSPARSE_REPORT_IF_ERROR(cusparseLtDestroy(&cusparseLt_env));
917 cusparseLt_initiated = false;
918}
919
920extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
921mgpuCreateCuSparseLtDnMat(void *dh, intptr_t rows, intptr_t cols, void *values,
922 int32_t dtp, CUstream /*stream*/) {
923 assert(cusparseLt_initiated && "client did not call mgpuCreateSparseLtEnv()");
924 auto dnmat_handle = reinterpret_cast<cusparseLtDnMatHandleAndData *>(dh);
925 dnmat_handle->values = values;
926 auto dTp = static_cast<cudaDataType_t>(dtp);
927 // Assume row-major when deciding lda.
928 const uint32_t alignment = 16;
929 CUSPARSE_REPORT_IF_ERROR(cusparseLtDenseDescriptorInit(
930 &cusparseLt_env, &(dnmat_handle->mat), rows, cols, /*lda=*/cols,
931 alignment, dTp, CUSPARSE_ORDER_ROW))
932}
933
934extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
935mgpuDestroyCuSparseLtDnMat(void *dh, CUstream /*stream*/) {
936 auto dnmat_handle = reinterpret_cast<cusparseLtDnMatHandleAndData *>(dh);
937 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatDescriptorDestroy(&(dnmat_handle->mat)))
938}
939
940extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
941mgpuCusparseLtCreate2To4SpMat(void *sh, intptr_t rows, intptr_t cols,
942 void *values, int32_t dtp, CUstream /*stream*/) {
943 assert(cusparseLt_initiated && "client did not call mgpuCreateSparseLtEnv()");
944 auto spmat_handle = reinterpret_cast<cusparseLtSpMatHandleAndData *>(sh);
945 spmat_handle->values = values;
946 auto dTp = static_cast<cudaDataType_t>(dtp);
947 // Assume row-major when deciding lda.
948 const uint32_t alignment = 16;
949 CUSPARSE_REPORT_IF_ERROR(cusparseLtStructuredDescriptorInit(
950 &cusparseLt_env, &(spmat_handle->mat), rows, cols, /*ld=*/cols, alignment,
951 dTp, CUSPARSE_ORDER_ROW, CUSPARSELT_SPARSITY_50_PERCENT))
952}
953
954extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
955mgpuDestroyCuSparseLtSpMat(void *sh, CUstream /*stream*/) {
956 auto spmat_handle = reinterpret_cast<cusparseLtSpMatHandleAndData *>(sh);
957 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatDescriptorDestroy(&(spmat_handle->mat)))
958}
959
960// Several things are being done in this stage, algorithm selection, planning,
961// and returning workspace and compressed matrices data buffer sizes.
962// The parameter prune_flag is used to indicate whether pruning and pruning
963// check will happen 0 means not prune or prune check, 1 means prune, 2 means
964// prune & prune check
965extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
966mgpuCuSparseLtSpMMBufferSize(void *bs, int32_t ma, int32_t mb, void *a, void *b,
967 void *c, int32_t ctp, int32_t prune_flag,
968 CUstream stream) {
969 assert(cusparseLt_initiated && "client did not call mgpuCreateSparseLtEnv()");
970 // TODO: support more advanced settings, e.g., the input right operand is a
971 // sparse matrix assuming matA is the sparse matrix
972 auto matA = reinterpret_cast<cusparseLtSpMatHandleAndData *>(a);
973 auto matB = reinterpret_cast<cusparseLtDnMatHandleAndData *>(b);
974 auto matC = reinterpret_cast<cusparseLtDnMatHandleAndData *>(c);
975 auto workspace_size = reinterpret_cast<size_t *>(bs);
976 auto compressed_size = &(reinterpret_cast<size_t *>(bs)[1]);
977 auto compressed_buffer_size = &(reinterpret_cast<size_t *>(bs)[2]);
978 auto cTp = static_cast<cusparseComputeType>(ctp);
979
980 cusparseOperation_t modeA = static_cast<cusparseOperation_t>(ma);
981 cusparseOperation_t modeB = static_cast<cusparseOperation_t>(mb);
982 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatmulDescriptorInit(
983 &cusparseLt_env, &(matA->matmul), modeA, modeB, &(matA->mat),
984 &(matB->mat), &(matC->mat), &(matC->mat), cTp))
985 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatmulAlgSelectionInit(
986 &cusparseLt_env, &(matA->alg_sel), &(matA->matmul),
987 CUSPARSELT_MATMUL_ALG_DEFAULT))
988 int alg = 0;
989 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatmulAlgSetAttribute(
990 &cusparseLt_env, &(matA->alg_sel), CUSPARSELT_MATMUL_ALG_CONFIG_ID, &alg,
991 sizeof(alg)))
992
993 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatmulPlanInit(
994 &cusparseLt_env, &(matA->plan), &(matA->matmul), &(matA->alg_sel)))
995
996 // Pruning step (in-place).
997 if (prune_flag > 0)
998 CUSPARSE_REPORT_IF_ERROR(cusparseLtSpMMAPrune(
999 &cusparseLt_env, &(matA->matmul), matA->values, matA->values,
1000 CUSPARSELT_PRUNE_SPMMA_STRIP, stream))
1001
1002 // Check structure of A.
1003 // Note that this adds a synchronization on the stream.
1004 // TODO: Do we want that?
1005 if (prune_flag == 2) {
1006 int *dvalid = (int *)mgpuMemAlloc(sizeof(int), stream, false);
1007 CUSPARSE_REPORT_IF_ERROR(cusparseLtSpMMAPruneCheck(
1008 &cusparseLt_env, &(matA->matmul), matA->values, dvalid, stream))
1009 int valid = 0;
1010 mgpuMemcpy(&valid, dvalid, sizeof(int), stream);
1011 mgpuStreamSynchronize(stream);
1012 mgpuMemFree(dvalid, stream);
1013 if (valid != 0)
1014 fprintf(stderr, "CUPARSE-LT: sparse matrix is not 2:4; computed results "
1015 "will be invalid\n");
1016 }
1017
1018 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatmulGetWorkspace(
1019 &cusparseLt_env, &(matA->plan), workspace_size))
1020 CUSPARSE_REPORT_IF_ERROR(cusparseLtSpMMACompressedSize(
1021 &cusparseLt_env, &(matA->plan), compressed_size, compressed_buffer_size))
1022}
1023
1024extern "C" MLIR_CUDA_WRAPPERS_EXPORT void
1025mgpuCuSparseLtSpMM(void *a, void *b, void *c, void *d_workspace,
1026 void *dA_compressed, void *dA_compressedBuffer,
1027 CUstream stream) {
1028 assert(cusparseLt_initiated && "client did not call mgpuCreateSparseLtEnv()");
1029 auto matA = reinterpret_cast<cusparseLtSpMatHandleAndData *>(a);
1030 auto matB = reinterpret_cast<cusparseLtDnMatHandleAndData *>(b);
1031 auto matC = reinterpret_cast<cusparseLtDnMatHandleAndData *>(c);
1032
1033 ALPHABETA(CUDA_R_32F, alpha, beta)
1035 cusparseLtSpMMACompress(&cusparseLt_env, &(matA->plan), (matA->values),
1036 dA_compressed, dA_compressedBuffer, stream))
1037
1038 // TODO: add support to multi-stream execution
1039 // Perform the matrix multiplication. D = A*B+C using C==D for now
1041 cusparseLtMatmul(&cusparseLt_env, &(matA->plan), alphap, dA_compressed,
1042 matB->values, betap, matC->values,
1043 /*dD*/ matC->values, d_workspace, nullptr, 0))
1044
1045 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatDescriptorDestroy(&(matA->mat)))
1046 // destroy the plan associated with the sparse matrix
1047 CUSPARSE_REPORT_IF_ERROR(cusparseLtMatmulPlanDestroy(&(matA->plan)))
1048}
1049
1050#endif // MLIR_ENABLE_CUDA_CUSPARSELT
1051#endif // MLIR_ENABLE_CUDA_CUSPARSE
#define CUSPARSE_REPORT_IF_ERROR(expr)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamWaitEvent(CUstream stream, CUevent event)
#define MLIR_CUDA_WRAPPERS_EXPORT
MLIR_CUDA_WRAPPERS_EXPORT void mgpuModuleUnload(CUmodule module)
MLIR_CUDA_WRAPPERS_EXPORT void * mgpuMemAlloc(uint64_t sizeBytes, CUstream stream, bool isHostShared)
#define CUDA_REPORT_IF_ERROR(expr)
static bool isCudaContextShutdownError(CUresult result)
Helper to check if a CUDA error is due to the context being destroyed during program shutdown.
#define CUDA_REPORT_IF_ERROR_IGNORE_SHUTDOWN(expr)
Like CUDA_REPORT_IF_ERROR, but silences errors caused by CUDA context shutdown.
static thread_local int32_t defaultDevice
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostRegisterMemRef(int64_t rank, StridedMemRefType< char, 1 > *descriptor, int64_t elementSizeBytes)
Registers a memref with the CUDA runtime.
MLIR_CUDA_WRAPPERS_EXPORT CUmodule mgpuModuleLoadJIT(void *data, int optLevel, size_t)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostUnregister(void *ptr)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventRecord(CUevent event, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemcpy(void *dst, void *src, size_t sizeBytes, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemset16(void *dst, unsigned short value, size_t count, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemset32(void *dst, unsigned int value, size_t count, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT CUfunction mgpuModuleGetFunction(CUmodule module, const char *name)
#define debug_print(fmt,...)
MLIR_CUDA_WRAPPERS_EXPORT CUevent mgpuEventCreate()
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostRegister(void *ptr, uint64_t sizeBytes)
Helper functions for writing mlir example code.
static bool isDebugEnabled()
Helper method that checks environment value for debugging.
MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamSynchronize(CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT CUstream mgpuStreamCreate()
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemFree(void *ptr, CUstream)
MLIR_CUDA_WRAPPERS_EXPORT CUmodule mgpuModuleLoad(void *data, size_t)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuSetDefaultDevice(int32_t device)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamDestroy(CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuLaunchKernel(CUfunction function, intptr_t gridX, intptr_t gridY, intptr_t gridZ, intptr_t blockX, intptr_t blockY, intptr_t blockZ, int32_t smem, CUstream stream, void **params, void **extra, size_t)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostUnregisterMemRef(int64_t rank, StridedMemRefType< char, 1 > *descriptor, int64_t elementSizeBytes)
Unregisters a memref with the CUDA runtime.
MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventSynchronize(CUevent event)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventDestroy(CUevent event)
static CUdevice getDefaultCuDevice()
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
if(!isCopyOut)
StridedMemRef descriptor type with static rank.