20 #include "cuda_bf16.h"
21 #include "cuda_fp16.h"
23 #ifdef MLIR_ENABLE_CUDA_CUSPARSE
25 #ifdef MLIR_ENABLE_CUDA_CUSPARSELT
26 #include "cusparseLt.h"
32 #define MLIR_CUDA_WRAPPERS_EXPORT __declspec(dllexport)
34 #define MLIR_CUDA_WRAPPERS_EXPORT __attribute__((visibility("default")))
37 #define CUDA_REPORT_IF_ERROR(expr) \
38 [](CUresult result) { \
41 const char *name = nullptr; \
42 cuGetErrorName(result, &name); \
45 fprintf(stderr, "'%s' failed with '%s'\n", #expr, name); \
48 #define CUSPARSE_REPORT_IF_ERROR(expr) \
50 cusparseStatus_t status = (expr); \
51 if (status != CUSPARSE_STATUS_SUCCESS) { \
52 fprintf(stderr, "cuSPARSE '%s' failed with '%s'\n", #expr, \
53 cusparseGetErrorString(status)); \
63 static bool isInitialized =
false;
64 static bool isEnabled =
false;
70 #define debug_print(fmt, ...) \
72 if (isDebugEnabled()) \
73 fprintf(stderr, "%s:%d:%s(): " fmt, "CudaRuntimeWrappers.cpp", __LINE__, \
74 __func__, __VA_ARGS__); \
92 static CUcontext context = [] {
107 #ifdef MLIR_ENABLE_CUDA_CUSPARSE
112 static cusparseHandle_t cusparse_env =
nullptr;
114 #ifdef MLIR_ENABLE_CUDA_CUSPARSELT
117 static cusparseLtHandle_t cusparseLt_env;
118 static bool cusparseLt_initiated =
false;
126 CUmodule module =
nullptr;
134 CUmodule module =
nullptr;
135 char jitErrorBuffer[4096] = {0};
136 CUjit_option jitOptions[] = {CU_JIT_ERROR_LOG_BUFFER,
137 CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES,
138 CU_JIT_OPTIMIZATION_LEVEL};
139 void *jitOptionsVals[] = {jitErrorBuffer,
140 reinterpret_cast<void *
>(
sizeof(jitErrorBuffer)),
141 reinterpret_cast<void *
>(optLevel)};
144 cuModuleLoadDataEx(&module, data, 3, jitOptions, jitOptionsVals);
146 fprintf(stderr,
"JIT compilation failed with: '%s'\n", jitErrorBuffer);
158 CUfunction
function =
nullptr;
168 intptr_t gridZ, intptr_t blockX, intptr_t blockY,
169 intptr_t blockZ, int32_t smem, CUstream stream,
void **params,
170 void **extra,
size_t ) {
174 int32_t maxShmem = 0;
178 &maxShmem, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN,
180 if (maxShmem < smem) {
182 "Requested shared memory (%dkb) is larger than maximum allowed "
183 "shared memory (%dkb) for this device\n",
187 function, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, smem));
190 "threads: %ld, %ld, %ld, "
192 gridX, gridY, gridZ, blockX, blockY, blockZ, smem);
194 blockY, blockZ, smem, stream, params,
200 CUstream stream =
nullptr;
221 CUevent
event =
nullptr;
245 return reinterpret_cast<void *
>(ptr);
254 mgpuMemcpy(
void *dst,
void *src,
size_t sizeBytes, CUstream stream) {
256 reinterpret_cast<CUdeviceptr
>(src),
261 mgpuMemset32(
void *dst,
unsigned int value,
size_t count, CUstream stream) {
263 value, count, stream));
267 mgpuMemset16(
void *dst,
unsigned short value,
size_t count, CUstream stream) {
269 value, count, stream));
289 int64_t elementSizeBytes) {
292 int64_t *denseStrides = (int64_t *)_alloca(rank *
sizeof(int64_t));
294 int64_t *denseStrides = (int64_t *)alloca(rank *
sizeof(int64_t));
296 int64_t *sizes = descriptor->
sizes;
297 for (int64_t i = rank - 1, runningStride = 1; i >= 0; i--) {
298 denseStrides[i] = runningStride;
299 runningStride *= sizes[i];
301 uint64_t sizeBytes = sizes[0] * denseStrides[0] * elementSizeBytes;
302 int64_t *strides = &sizes[rank];
304 for (
unsigned i = 0; i < rank; ++i)
305 assert(strides[i] == denseStrides[i] &&
306 "Mismatch in computed dense strides");
308 auto *ptr = descriptor->
data + descriptor->
offset * elementSizeBytes;
323 int64_t elementSizeBytes) {
324 auto *ptr = descriptor->
data + descriptor->
offset * elementSizeBytes;
336 #if (CUDA_VERSION >= 12000)
339 CUfunction
function, intptr_t clusterX, intptr_t clusterY,
340 intptr_t clusterZ, intptr_t gridX, intptr_t gridY, intptr_t gridZ,
341 intptr_t blockX, intptr_t blockY, intptr_t blockZ, int32_t smem,
342 CUstream stream,
void **params,
void **extra,
size_t ) {
346 int32_t maxShmem = 0;
350 &maxShmem, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN,
352 if (maxShmem < smem) {
354 "Requested shared memory (%dkb) is larger than maximum allowed "
355 "shared memory (%dkb) for this device\n",
359 function, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, smem));
361 CUlaunchConfig config;
362 config.gridDimX = gridX;
363 config.gridDimY = gridY;
364 config.gridDimZ = gridZ;
365 config.blockDimX = blockX;
366 config.blockDimY = blockY;
367 config.blockDimZ = blockZ;
368 config.sharedMemBytes = smem;
369 config.hStream = stream;
370 CUlaunchAttribute launchAttr[2];
371 launchAttr[0].id = CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION;
372 launchAttr[0].value.clusterDim.x = clusterX;
373 launchAttr[0].value.clusterDim.y = clusterY;
374 launchAttr[0].value.clusterDim.z = clusterZ;
375 launchAttr[1].id = CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE;
376 launchAttr[1].value.clusterSchedulingPolicyPreference =
377 CU_CLUSTER_SCHEDULING_POLICY_SPREAD;
379 config.attrs = launchAttr;
382 "cluster: %ld, %ld, %ld, "
384 "threads: %ld, %ld, %ld, "
386 clusterX, clusterY, clusterZ, gridX, gridY, gridZ, blockX, blockY,
393 CUtensorMap *tensorMap,
394 CUtensorMapDataType tensorDataType,
395 cuuint32_t tensorRank,
397 const cuuint64_t *globalDim,
398 const cuuint64_t *globalStrides,
399 const cuuint32_t *boxDim,
400 const cuuint32_t *elementStrides,
401 CUtensorMapInterleave interleave,
402 CUtensorMapSwizzle swizzle,
403 CUtensorMapL2promotion l2Promotion,
404 CUtensorMapFloatOOBfill oobFill
408 tensorMap, tensorDataType, tensorRank, globalAddress, globalDim,
409 globalStrides, boxDim, elementStrides, interleave, swizzle, l2Promotion,
414 "globalDim[5]: %zu, %zu, %zu, %zu, %zu\n"
415 "globalStrides[5]: %zu, %zu, %zu, %zu, %zu\n"
416 "boxDim[5]: %u, %u, %u, %u, %u\n"
417 "elementStrides[5]: %u, %u, %u, %u, %u\n"
422 (
void *)&tensorMap, tensorDataType, tensorRank, globalDim[0],
423 globalDim[1], globalDim[2], globalDim[3], globalDim[4],
424 globalStrides[0], globalStrides[1], globalStrides[2],
425 globalStrides[3], globalStrides[4], boxDim[0], boxDim[1],
426 boxDim[2], boxDim[3], boxDim[4], elementStrides[0],
427 elementStrides[1], elementStrides[2], elementStrides[3],
428 elementStrides[4], interleave, swizzle, l2Promotion, oobFill);
432 void mgpuGetMemRefDataAndShape(
void *rawDescriptor,
char **addr,
433 uint64_t *globalDim, uint64_t *globalStrides,
434 const CUtensorMapDataType tensorDataType) {
437 *addr = descriptor->
data;
438 for (
int i = 0; i < Rank; ++i) {
439 globalDim[i] =
static_cast<uint64_t
>(descriptor->sizes[Rank - i - 1]);
441 static constexpr
int elementSizeInBytes[] = {1, 2, 4, 4, 8, 8, 2,
443 for (
int i = 0; i < Rank - 1; ++i) {
444 globalStrides[i] =
static_cast<uint64_t
>(
445 descriptor->strides[Rank - i - 2] * elementSizeInBytes[tensorDataType]);
451 void *rankedDescriptor,
452 const CUtensorMapDataType tensorDataType,
453 CUtensorMapInterleave interleave,
454 CUtensorMapSwizzle swizzle,
455 CUtensorMapL2promotion l2Promotion,
456 CUtensorMapFloatOOBfill oobFill,
457 int64_t *inputBoxDims
459 CUtensorMap tensorMap;
461 uint32_t boxDim[5] = {1, 1, 1, 1, 1}, elementStrides[5] = {1, 1, 1, 1, 1};
462 uint64_t globalDim[5] = {1, 1, 1, 1, 1}, globalStrides[5] = {0};
463 uint32_t tensorRank32 = uint32_t(tensorRank);
465 char *globalAddress =
nullptr;
466 switch (tensorRank) {
468 mgpuGetMemRefDataAndShape<1>(rankedDescriptor, &globalAddress, globalDim,
469 globalStrides, tensorDataType);
472 mgpuGetMemRefDataAndShape<2>(rankedDescriptor, &globalAddress, globalDim,
473 globalStrides, tensorDataType);
476 mgpuGetMemRefDataAndShape<3>(rankedDescriptor, &globalAddress, globalDim,
477 globalStrides, tensorDataType);
480 mgpuGetMemRefDataAndShape<4>(rankedDescriptor, &globalAddress, globalDim,
481 globalStrides, tensorDataType);
484 mgpuGetMemRefDataAndShape<5>(rankedDescriptor, &globalAddress, globalDim,
485 globalStrides, tensorDataType);
490 "'mgpuTensorMapEncodeTiledMemref' failed with 'rank is too high'\n");
494 for (int64_t r = 0; r < tensorRank; ++r) {
495 boxDim[r] =
static_cast<uint32_t
>(inputBoxDims[tensorRank - r - 1]);
499 mgpuTensorMapEncodeTiled(&tensorMap, tensorDataType, tensorRank32,
500 globalAddress, globalDim, globalStrides, boxDim,
501 elementStrides, interleave, swizzle, l2Promotion,
504 CUdeviceptr dTensorMap;
507 reinterpret_cast<CUdeviceptr
>(&tensorMap),
508 sizeof(CUtensorMap)));
509 return reinterpret_cast<void *
>(dTensorMap);
513 #ifdef MLIR_ENABLE_CUDA_CUSPARSE
521 #define ALPHABETA(dtp, alpha, beta) \
522 __nv_bfloat16(alpha##16bf) = 1.0f; \
523 __nv_bfloat16(beta##16bf) = 1.0f; \
524 __half(alpha##16f) = 1.0f; \
525 __half(beta##16f) = 1.0f; \
526 float(alpha##f) = 1.0f; \
527 float(beta##f) = 1.0f; \
528 double(alpha##d) = 1.0; \
529 double(beta##d) = 1.0; \
530 const void *(alpha##p) = nullptr; \
531 const void *(beta##p) = nullptr; \
532 if (dtp == CUDA_R_16BF || dtp == CUDA_C_16BF) { \
533 (alpha##p) = reinterpret_cast<void *>(&(alpha##16bf)); \
534 (beta##p) = reinterpret_cast<void *>(&(beta##16bf)); \
535 } else if (dtp == CUDA_R_16F || dtp == CUDA_C_16F) { \
536 (alpha##p) = reinterpret_cast<void *>(&(alpha##16f)); \
537 (beta##p) = reinterpret_cast<void *>(&(beta##16f)); \
538 } else if (dtp == CUDA_R_32F || dtp == CUDA_C_32F) { \
539 (alpha##p) = reinterpret_cast<void *>(&(alpha##f)); \
540 (beta##p) = reinterpret_cast<void *>(&(beta##f)); \
542 (alpha##p) = reinterpret_cast<void *>(&(alpha##d)); \
543 (beta##p) = reinterpret_cast<void *>(&(beta##d)); \
549 assert(!cusparse_env &&
"client called mgpuCreateSparseEnv() twice");
554 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
556 cusparse_env =
nullptr;
560 mgpuCreateDnVec(intptr_t size,
void *values, int32_t dtp, CUstream ) {
561 cusparseDnVecDescr_t vec =
nullptr;
562 auto dTp =
static_cast<cudaDataType_t
>(dtp);
564 return reinterpret_cast<void *
>(vec);
568 mgpuDestroyDnVec(
void *v, CUstream ) {
569 cusparseDnVecDescr_t vec =
reinterpret_cast<cusparseDnVecDescr_t
>(v);
574 mgpuCreateDnMat(intptr_t rows, intptr_t cols,
void *values, int32_t dtp,
576 cusparseDnMatDescr_t mat =
nullptr;
577 auto dTp =
static_cast<cudaDataType_t
>(dtp);
579 values, dTp, CUSPARSE_ORDER_ROW))
580 return reinterpret_cast<void *
>(mat);
584 mgpuDestroyDnMat(
void *m, CUstream ) {
585 cusparseDnMatDescr_t mat =
reinterpret_cast<cusparseDnMatDescr_t
>(m);
590 mgpuCreateCoo(intptr_t rows, intptr_t cols, intptr_t nnz,
void *rowIdxs,
591 void *colIdxs,
void *values, int32_t itp, int32_t dtp,
593 cusparseSpMatDescr_t mat =
nullptr;
594 auto iTp =
static_cast<cusparseIndexType_t
>(itp);
595 auto dTp =
static_cast<cudaDataType_t
>(dtp);
597 colIdxs, values, iTp,
598 CUSPARSE_INDEX_BASE_ZERO, dTp))
599 return reinterpret_cast<void *
>(mat);
602 #ifdef CUSPARSE_COO_AOS
604 mgpuCreateCooAoS(intptr_t rows, intptr_t cols, intptr_t nnz,
void *idxs,
605 void *values, int32_t itp, int32_t dtp, CUstream ) {
606 cusparseSpMatDescr_t mat =
nullptr;
607 auto iTp =
static_cast<cusparseIndexType_t
>(itp);
608 auto dTp =
static_cast<cudaDataType_t
>(dtp);
610 &mat, rows, cols, nnz, idxs, values, iTp, CUSPARSE_INDEX_BASE_ZERO, dTp))
611 return reinterpret_cast<void *
>(mat);
616 mgpuCreateCsr(intptr_t rows, intptr_t cols, intptr_t nnz,
void *rowPos,
617 void *colIdxs,
void *values, int32_t ptp, int32_t itp,
618 int32_t dtp, CUstream ) {
619 cusparseSpMatDescr_t mat =
nullptr;
620 auto pTp =
static_cast<cusparseIndexType_t
>(ptp);
621 auto iTp =
static_cast<cusparseIndexType_t
>(itp);
622 auto dTp =
static_cast<cudaDataType_t
>(dtp);
624 colIdxs, values, pTp, iTp,
625 CUSPARSE_INDEX_BASE_ZERO, dTp))
626 return reinterpret_cast<void *
>(mat);
630 mgpuCreateCsc(intptr_t rows, intptr_t cols, intptr_t nnz,
void *colPos,
631 void *rowIdxs,
void *values, int32_t ptp, int32_t itp,
632 int32_t dtp, CUstream ) {
633 cusparseSpMatDescr_t mat =
nullptr;
634 auto pTp =
static_cast<cusparseIndexType_t
>(ptp);
635 auto iTp =
static_cast<cusparseIndexType_t
>(itp);
636 auto dTp =
static_cast<cudaDataType_t
>(dtp);
638 rowIdxs, values, pTp, iTp,
639 CUSPARSE_INDEX_BASE_ZERO, dTp))
640 return reinterpret_cast<void *
>(mat);
644 mgpuCreateBsr(intptr_t brows, intptr_t bcols, intptr_t bnnz, intptr_t rBsz,
645 intptr_t cBsz,
void *rowPos,
void *colIdxs,
void *values,
646 int32_t ptp, int32_t itp, int32_t dtp, CUstream ) {
647 cusparseSpMatDescr_t mat =
nullptr;
648 #if CUSPARSE_VERSION >= 12100
649 auto pTp =
static_cast<cusparseIndexType_t
>(ptp);
650 auto iTp =
static_cast<cusparseIndexType_t
>(itp);
651 auto dTp =
static_cast<cudaDataType_t
>(dtp);
653 &mat, brows, bcols, bnnz, rBsz, cBsz, rowPos, colIdxs, values, pTp, iTp,
654 CUSPARSE_INDEX_BASE_ZERO, dTp, CUSPARSE_ORDER_ROW))
656 return reinterpret_cast<void *
>(mat);
660 mgpuDestroySpMat(
void *m, CUstream ) {
661 cusparseSpMatDescr_t mat =
reinterpret_cast<cusparseSpMatDescr_t
>(m);
666 int32_t ma,
void *a,
void *x,
void *y, int32_t ctp, CUstream ) {
667 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
668 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
669 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
670 cusparseDnVecDescr_t vecX =
reinterpret_cast<cusparseDnVecDescr_t
>(x);
671 cusparseDnVecDescr_t vecY =
reinterpret_cast<cusparseDnVecDescr_t
>(y);
672 cudaDataType_t cTp =
static_cast<cudaDataType_t
>(ctp);
673 ALPHABETA(cTp, alpha, beta)
674 size_t bufferSize = 0;
676 cusparse_env, modeA, alphap, matA, vecX, betap, vecY, cTp,
677 CUSPARSE_SPMV_ALG_DEFAULT, &bufferSize))
682 void *y, int32_t ctp,
685 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
686 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
687 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
688 cusparseDnVecDescr_t vecX =
reinterpret_cast<cusparseDnVecDescr_t
>(x);
689 cusparseDnVecDescr_t vecY =
reinterpret_cast<cusparseDnVecDescr_t
>(y);
690 cudaDataType_t cTp =
static_cast<cudaDataType_t
>(ctp);
691 ALPHABETA(cTp, alpha, beta)
694 CUSPARSE_SPMV_ALG_DEFAULT, buf))
698 mgpuSpMMBufferSize(int32_t ma, int32_t mb,
void *a,
void *b,
void *c,
699 int32_t ctp, CUstream ) {
700 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
701 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
702 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
703 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
704 cusparseDnMatDescr_t matB =
reinterpret_cast<cusparseDnMatDescr_t
>(b);
705 cusparseDnMatDescr_t matC =
reinterpret_cast<cusparseDnMatDescr_t
>(c);
706 cudaDataType_t cTp =
static_cast<cudaDataType_t
>(ctp);
707 ALPHABETA(cTp, alpha, beta)
708 size_t bufferSize = 0;
710 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
711 CUSPARSE_SPMM_ALG_DEFAULT, &bufferSize))
716 void *a,
void *b,
void *c,
717 int32_t ctp,
void *buf,
719 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
720 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
721 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
722 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
723 cusparseDnMatDescr_t matB =
reinterpret_cast<cusparseDnMatDescr_t
>(b);
724 cusparseDnMatDescr_t matC =
reinterpret_cast<cusparseDnMatDescr_t
>(c);
725 cudaDataType_t cTp =
static_cast<cudaDataType_t
>(ctp);
726 ALPHABETA(cTp, alpha, beta)
728 matA, matB, betap, matC, cTp,
729 CUSPARSE_SPMM_ALG_DEFAULT, buf))
733 mgpuSDDMMBufferSize(int32_t ma, int32_t mb,
void *a,
void *b,
void *c,
734 int32_t ctp, CUstream ) {
735 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
736 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
737 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
738 cusparseDnMatDescr_t matA =
reinterpret_cast<cusparseDnMatDescr_t
>(a);
739 cusparseDnMatDescr_t matB =
reinterpret_cast<cusparseDnMatDescr_t
>(b);
740 cusparseSpMatDescr_t matC =
reinterpret_cast<cusparseSpMatDescr_t
>(c);
741 auto cTp =
static_cast<cudaDataType_t
>(ctp);
742 ALPHABETA(cTp, alpha, beta)
743 size_t bufferSize = 0;
745 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
746 CUSPARSE_SDDMM_ALG_DEFAULT, &bufferSize))
751 void *a,
void *b,
void *c,
752 int32_t ctp,
void *buf,
754 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
755 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
756 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
757 cusparseDnMatDescr_t matA =
reinterpret_cast<cusparseDnMatDescr_t
>(a);
758 cusparseDnMatDescr_t matB =
reinterpret_cast<cusparseDnMatDescr_t
>(b);
759 cusparseSpMatDescr_t matC =
reinterpret_cast<cusparseSpMatDescr_t
>(c);
760 auto cTp =
static_cast<cudaDataType_t
>(ctp);
761 ALPHABETA(cTp, alpha, beta)
763 matA, matB, betap, matC, cTp,
764 CUSPARSE_SDDMM_ALG_DEFAULT, buf))
768 mgpuSpGEMMCreateDescr(CUstream ) {
769 cusparseSpGEMMDescr_t spgemmDesc =
nullptr;
771 return reinterpret_cast<void *
>(spgemmDesc);
775 mgpuSpGEMMDestroyDescr(
void *s, CUstream ) {
776 cusparseSpGEMMDescr_t spgemmDesc =
reinterpret_cast<cusparseSpGEMMDescr_t
>(s);
781 void *s, int32_t ma, int32_t mb,
void *a,
void *b,
void *c, int32_t ctp,
782 intptr_t bs,
void *buf, CUstream ) {
783 cusparseSpGEMMDescr_t spgemmDesc =
reinterpret_cast<cusparseSpGEMMDescr_t
>(s);
784 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
785 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
786 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
787 cusparseSpMatDescr_t matB =
reinterpret_cast<cusparseSpMatDescr_t
>(b);
788 cusparseSpMatDescr_t matC =
reinterpret_cast<cusparseSpMatDescr_t
>(c);
789 auto cTp =
static_cast<cudaDataType_t
>(ctp);
790 ALPHABETA(cTp, alpha, beta)
791 size_t newBufferSize = bs;
793 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
794 CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &newBufferSize, buf))
795 return newBufferSize;
799 mgpuSpGEMMCompute(
void *s, int32_t ma, int32_t mb,
void *a,
void *b,
void *c,
800 int32_t ctp, intptr_t bsz2,
void *buf2, CUstream ) {
801 cusparseSpGEMMDescr_t spgemmDesc =
reinterpret_cast<cusparseSpGEMMDescr_t
>(s);
802 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
803 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
804 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
805 cusparseSpMatDescr_t matB =
reinterpret_cast<cusparseSpMatDescr_t
>(b);
806 cusparseSpMatDescr_t matC =
reinterpret_cast<cusparseSpMatDescr_t
>(c);
807 auto cTp =
static_cast<cudaDataType_t
>(ctp);
808 ALPHABETA(cTp, alpha, beta)
809 size_t newBufferSize2 = bsz2;
811 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
812 CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &newBufferSize2, buf2))
813 return newBufferSize2;
817 mgpuSpGEMMCopy(
void *s, int32_t ma, int32_t mb,
void *a,
void *b,
void *c,
818 int32_t ctp, CUstream ) {
819 cusparseSpGEMMDescr_t spgemmDesc =
reinterpret_cast<cusparseSpGEMMDescr_t
>(s);
820 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
821 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
822 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
823 cusparseSpMatDescr_t matB =
reinterpret_cast<cusparseSpMatDescr_t
>(b);
824 cusparseSpMatDescr_t matC =
reinterpret_cast<cusparseSpMatDescr_t
>(c);
825 auto cTp =
static_cast<cudaDataType_t
>(ctp);
826 ALPHABETA(cTp, alpha, beta)
828 cusparseSpGEMM_copy(cusparse_env, modeA, modeB, alphap, matA, matB, betap,
829 matC, cTp, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc))
833 mgpuSpMatGetSize(
void *m,
void *r,
void *c,
void *n, CUstream ) {
834 cusparseConstSpMatDescr_t matDescr =
835 reinterpret_cast<cusparseConstSpMatDescr_t
>(m);
836 int64_t *rows =
reinterpret_cast<int64_t *
>(r);
837 int64_t *cols =
reinterpret_cast<int64_t *
>(c);
838 int64_t *nnz =
reinterpret_cast<int64_t *
>(n);
843 mgpuSetCsrPointers(
void *m,
void *p,
void *c,
void *v, CUstream ) {
844 cusparseSpMatDescr_t matDescr =
reinterpret_cast<cusparseSpMatDescr_t
>(m);
848 #ifdef MLIR_ENABLE_CUDA_CUSPARSELT
854 struct cusparseLtSpMatHandleAndData {
855 cusparseLtMatDescriptor_t mat;
859 cusparseLtMatmulAlgSelection_t alg_sel;
860 cusparseLtMatmulPlan_t plan;
861 cusparseLtMatmulDescriptor_t matmul;
862 void *values{
nullptr};
865 struct cusparseLtDnMatHandleAndData {
866 cusparseLtMatDescriptor_t mat;
867 void *values{
nullptr};
870 static_assert(
sizeof(cusparseLtHandle_t) == 11024,
871 "Unexpected cusparseLt handle size");
872 static_assert(
sizeof(cusparseLtSpMatHandleAndData) == 44104,
873 "Unexpected cusparseLt sparse matrix handle size");
874 static_assert(
sizeof(cusparseLtDnMatHandleAndData) == 11032,
875 "Unexpected cusparseLt dense matrix handle size");
880 assert(!cusparseLt_initiated &&
881 "client called mgpuCreateSparseLtEnv() twice");
884 cusparseLt_initiated =
true;
888 assert(cusparseLt_initiated &&
"client did not call mgpuCreateSparseLtEnv()");
890 cusparseLt_initiated =
false;
894 mgpuCreateCuSparseLtDnMat(
void *dh, intptr_t rows, intptr_t cols,
void *values,
895 int32_t dtp, CUstream ) {
896 assert(cusparseLt_initiated &&
"client did not call mgpuCreateSparseLtEnv()");
897 auto dnmat_handle =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(dh);
898 dnmat_handle->values = values;
899 auto dTp =
static_cast<cudaDataType_t
>(dtp);
901 const uint32_t alignment = 16;
903 &cusparseLt_env, &(dnmat_handle->mat), rows, cols, cols,
904 alignment, dTp, CUSPARSE_ORDER_ROW))
908 mgpuDestroyCuSparseLtDnMat(
void *dh, CUstream ) {
909 auto dnmat_handle =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(dh);
914 mgpuCusparseLtCreate2To4SpMat(
void *sh, intptr_t rows, intptr_t cols,
915 void *values, int32_t dtp, CUstream ) {
916 assert(cusparseLt_initiated &&
"client did not call mgpuCreateSparseLtEnv()");
917 auto spmat_handle =
reinterpret_cast<cusparseLtSpMatHandleAndData *
>(sh);
918 spmat_handle->values = values;
919 auto dTp =
static_cast<cudaDataType_t
>(dtp);
921 const uint32_t alignment = 16;
923 &cusparseLt_env, &(spmat_handle->mat), rows, cols, cols, alignment,
924 dTp, CUSPARSE_ORDER_ROW, CUSPARSELT_SPARSITY_50_PERCENT))
928 mgpuDestroyCuSparseLtSpMat(
void *sh, CUstream ) {
929 auto spmat_handle =
reinterpret_cast<cusparseLtSpMatHandleAndData *
>(sh);
939 mgpuCuSparseLtSpMMBufferSize(
void *bs, int32_t ma, int32_t mb,
void *a,
void *b,
940 void *c, int32_t ctp, int32_t prune_flag,
942 assert(cusparseLt_initiated &&
"client did not call mgpuCreateSparseLtEnv()");
945 auto matA =
reinterpret_cast<cusparseLtSpMatHandleAndData *
>(a);
946 auto matB =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(b);
947 auto matC =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(c);
948 auto workspace_size =
reinterpret_cast<size_t *
>(bs);
949 auto compressed_size = &(
reinterpret_cast<size_t *
>(bs)[1]);
950 auto compressed_buffer_size = &(
reinterpret_cast<size_t *
>(bs)[2]);
951 auto cTp =
static_cast<cusparseComputeType
>(ctp);
953 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
954 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
956 &cusparseLt_env, &(matA->matmul), modeA, modeB, &(matA->mat),
957 &(matB->mat), &(matC->mat), &(matC->mat), cTp))
959 &cusparseLt_env, &(matA->alg_sel), &(matA->matmul),
960 CUSPARSELT_MATMUL_ALG_DEFAULT))
963 &cusparseLt_env, &(matA->alg_sel), CUSPARSELT_MATMUL_ALG_CONFIG_ID, &alg,
967 &cusparseLt_env, &(matA->plan), &(matA->matmul), &(matA->alg_sel)))
972 &cusparseLt_env, &(matA->matmul), matA->values, matA->values,
973 CUSPARSELT_PRUNE_SPMMA_STRIP, stream))
978 if (prune_flag == 2) {
979 int *dvalid = (
int *)
mgpuMemAlloc(
sizeof(
int), stream,
false);
981 &cusparseLt_env, &(matA->matmul), matA->values, dvalid, stream))
983 mgpuMemcpy(&valid, dvalid,
sizeof(
int), stream);
987 fprintf(stderr,
"CUPARSE-LT: sparse matrix is not 2:4; computed results "
988 "will be invalid\n");
992 &cusparseLt_env, &(matA->plan), workspace_size))
994 &cusparseLt_env, &(matA->plan), compressed_size, compressed_buffer_size))
998 mgpuCuSparseLtSpMM(
void *a,
void *b,
void *c,
void *d_workspace,
999 void *dA_compressed,
void *dA_compressedBuffer,
1001 assert(cusparseLt_initiated &&
"client did not call mgpuCreateSparseLtEnv()");
1002 auto matA =
reinterpret_cast<cusparseLtSpMatHandleAndData *
>(a);
1003 auto matB =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(b);
1004 auto matC =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(c);
1006 ALPHABETA(CUDA_R_32F, alpha, beta)
1008 cusparseLtSpMMACompress(&cusparseLt_env, &(matA->plan), (matA->values),
1009 dA_compressed, dA_compressedBuffer, stream))
1014 cusparseLtMatmul(&cusparseLt_env, &(matA->plan), alphap, dA_compressed,
1015 matB->values, betap, matC->values,
1016 matC->values, d_workspace,
nullptr, 0))
#define CUSPARSE_REPORT_IF_ERROR(expr)
bool isDebugEnabled()
Helper method that checks environment value for debugging.
MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamWaitEvent(CUstream stream, CUevent event)
#define MLIR_CUDA_WRAPPERS_EXPORT
MLIR_CUDA_WRAPPERS_EXPORT void mgpuModuleUnload(CUmodule module)
#define CUDA_REPORT_IF_ERROR(expr)
static thread_local int32_t defaultDevice
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostRegisterMemRef(int64_t rank, StridedMemRefType< char, 1 > *descriptor, int64_t elementSizeBytes)
Registers a memref with the CUDA runtime.
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostUnregister(void *ptr)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventRecord(CUevent event, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemcpy(void *dst, void *src, size_t sizeBytes, CUstream stream)
const char * kDebugEnvironmentVariable
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemset16(void *dst, unsigned short value, size_t count, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemset32(void *dst, unsigned int value, size_t count, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT CUmodule mgpuModuleLoadJIT(void *data, int optLevel)
MLIR_CUDA_WRAPPERS_EXPORT CUfunction mgpuModuleGetFunction(CUmodule module, const char *name)
#define debug_print(fmt,...)
MLIR_CUDA_WRAPPERS_EXPORT CUevent mgpuEventCreate()
CUdevice getDefaultCuDevice()
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostRegister(void *ptr, uint64_t sizeBytes)
Helper functions for writing mlir example code.
MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamSynchronize(CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT CUstream mgpuStreamCreate()
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemFree(void *ptr, CUstream)
MLIR_CUDA_WRAPPERS_EXPORT CUmodule mgpuModuleLoad(void *data, size_t)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuSetDefaultDevice(int32_t device)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamDestroy(CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuLaunchKernel(CUfunction function, intptr_t gridX, intptr_t gridY, intptr_t gridZ, intptr_t blockX, intptr_t blockY, intptr_t blockZ, int32_t smem, CUstream stream, void **params, void **extra, size_t)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostUnregisterMemRef(int64_t rank, StridedMemRefType< char, 1 > *descriptor, int64_t elementSizeBytes)
Unregisters a memref with the CUDA runtime.
MLIR_CUDA_WRAPPERS_EXPORT void * mgpuMemAlloc(uint64_t sizeBytes, CUstream, bool)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventSynchronize(CUevent event)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventDestroy(CUevent event)
StridedMemRef descriptor type with static rank.