20 #include "cuda_bf16.h"
21 #include "cuda_fp16.h"
23 #ifdef MLIR_ENABLE_CUDA_CUSPARSE
25 #ifdef MLIR_ENABLE_CUDA_CUSPARSELT
26 #include "cusparseLt.h"
32 #define MLIR_CUDA_WRAPPERS_EXPORT __declspec(dllexport)
34 #define MLIR_CUDA_WRAPPERS_EXPORT __attribute__((visibility("default")))
37 #define CUDA_REPORT_IF_ERROR(expr) \
38 [](CUresult result) { \
41 const char *name = nullptr; \
42 cuGetErrorName(result, &name); \
45 fprintf(stderr, "'%s' failed with '%s'\n", #expr, name); \
48 #define CUSPARSE_REPORT_IF_ERROR(expr) \
50 cusparseStatus_t status = (expr); \
51 if (status != CUSPARSE_STATUS_SUCCESS) { \
52 fprintf(stderr, "cuSPARSE '%s' failed with '%s'\n", #expr, \
53 cusparseGetErrorString(status)); \
63 static bool isInitialized =
false;
64 static bool isEnabled =
false;
70 #define debug_print(fmt, ...) \
72 if (isDebugEnabled()) \
73 fprintf(stderr, "%s:%d:%s(): " fmt, "CudaRuntimeWrappers.cpp", __LINE__, \
74 __func__, __VA_ARGS__); \
92 static CUcontext context = [] {
107 #ifdef MLIR_ENABLE_CUDA_CUSPARSE
112 static cusparseHandle_t cusparse_env =
nullptr;
114 #ifdef MLIR_ENABLE_CUDA_CUSPARSELT
117 static cusparseLtHandle_t cusparseLt_env;
118 static bool cusparseLt_initiated =
false;
126 CUmodule module =
nullptr;
134 CUmodule module =
nullptr;
135 char jitErrorBuffer[4096] = {0};
136 CUjit_option jitOptions[] = {CU_JIT_ERROR_LOG_BUFFER,
137 CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES,
138 CU_JIT_OPTIMIZATION_LEVEL};
139 void *jitOptionsVals[] = {jitErrorBuffer,
140 reinterpret_cast<void *
>(
sizeof(jitErrorBuffer)),
141 reinterpret_cast<void *
>(optLevel)};
144 cuModuleLoadDataEx(&module, data, 3, jitOptions, jitOptionsVals);
146 fprintf(stderr,
"JIT compilation failed with: '%s'\n", jitErrorBuffer);
158 CUfunction
function =
nullptr;
168 intptr_t gridZ, intptr_t blockX, intptr_t blockY,
169 intptr_t blockZ, int32_t smem, CUstream stream,
void **params,
170 void **extra,
size_t ) {
174 int32_t maxShmem = 0;
178 &maxShmem, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN,
180 if (maxShmem < smem) {
182 "Requested shared memory (%dkb) is larger than maximum allowed "
183 "shared memory (%dkb) for this device\n",
187 function, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, smem));
190 "threads: %ld, %ld, %ld, "
192 gridX, gridY, gridZ, blockX, blockY, blockZ, smem);
194 blockY, blockZ, smem, stream, params,
200 CUstream stream =
nullptr;
221 CUevent
event =
nullptr;
244 return reinterpret_cast<void *
>(ptr);
248 cuMemAllocManaged(&ptr, sizeBytes, CU_MEM_ATTACH_GLOBAL));
249 return reinterpret_cast<void *
>(ptr);
252 return reinterpret_cast<void *
>(ptr);
261 mgpuMemcpy(
void *dst,
void *src,
size_t sizeBytes, CUstream stream) {
263 reinterpret_cast<CUdeviceptr
>(src),
268 mgpuMemset32(
void *dst,
unsigned int value,
size_t count, CUstream stream) {
270 value, count, stream));
274 mgpuMemset16(
void *dst,
unsigned short value,
size_t count, CUstream stream) {
276 value, count, stream));
296 int64_t elementSizeBytes) {
299 int64_t *denseStrides = (int64_t *)_alloca(rank *
sizeof(int64_t));
301 int64_t *denseStrides = (int64_t *)alloca(rank *
sizeof(int64_t));
303 int64_t *sizes = descriptor->
sizes;
304 for (int64_t i = rank - 1, runningStride = 1; i >= 0; i--) {
305 denseStrides[i] = runningStride;
306 runningStride *= sizes[i];
308 uint64_t sizeBytes = sizes[0] * denseStrides[0] * elementSizeBytes;
309 int64_t *strides = &sizes[rank];
311 for (
unsigned i = 0; i < rank; ++i)
312 assert(strides[i] == denseStrides[i] &&
313 "Mismatch in computed dense strides");
315 auto *ptr = descriptor->
data + descriptor->
offset * elementSizeBytes;
330 int64_t elementSizeBytes) {
331 auto *ptr = descriptor->
data + descriptor->
offset * elementSizeBytes;
343 #if (CUDA_VERSION >= 12000)
346 CUfunction
function, intptr_t clusterX, intptr_t clusterY,
347 intptr_t clusterZ, intptr_t gridX, intptr_t gridY, intptr_t gridZ,
348 intptr_t blockX, intptr_t blockY, intptr_t blockZ, int32_t smem,
349 CUstream stream,
void **params,
void **extra,
size_t ) {
353 int32_t maxShmem = 0;
357 &maxShmem, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN,
359 if (maxShmem < smem) {
361 "Requested shared memory (%dkb) is larger than maximum allowed "
362 "shared memory (%dkb) for this device\n",
366 function, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, smem));
368 CUlaunchConfig config;
369 config.gridDimX = gridX;
370 config.gridDimY = gridY;
371 config.gridDimZ = gridZ;
372 config.blockDimX = blockX;
373 config.blockDimY = blockY;
374 config.blockDimZ = blockZ;
375 config.sharedMemBytes = smem;
376 config.hStream = stream;
377 CUlaunchAttribute launchAttr[2];
378 launchAttr[0].id = CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION;
379 launchAttr[0].value.clusterDim.x = clusterX;
380 launchAttr[0].value.clusterDim.y = clusterY;
381 launchAttr[0].value.clusterDim.z = clusterZ;
382 launchAttr[1].id = CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE;
383 launchAttr[1].value.clusterSchedulingPolicyPreference =
384 CU_CLUSTER_SCHEDULING_POLICY_SPREAD;
386 config.attrs = launchAttr;
389 "cluster: %ld, %ld, %ld, "
391 "threads: %ld, %ld, %ld, "
393 clusterX, clusterY, clusterZ, gridX, gridY, gridZ, blockX, blockY,
400 CUtensorMap *tensorMap,
401 CUtensorMapDataType tensorDataType,
402 cuuint32_t tensorRank,
404 const cuuint64_t *globalDim,
405 const cuuint64_t *globalStrides,
406 const cuuint32_t *boxDim,
407 const cuuint32_t *elementStrides,
408 CUtensorMapInterleave interleave,
409 CUtensorMapSwizzle swizzle,
410 CUtensorMapL2promotion l2Promotion,
411 CUtensorMapFloatOOBfill oobFill
415 tensorMap, tensorDataType, tensorRank, globalAddress, globalDim,
416 globalStrides, boxDim, elementStrides, interleave, swizzle, l2Promotion,
421 "globalDim[5]: %zu, %zu, %zu, %zu, %zu\n"
422 "globalStrides[5]: %zu, %zu, %zu, %zu, %zu\n"
423 "boxDim[5]: %u, %u, %u, %u, %u\n"
424 "elementStrides[5]: %u, %u, %u, %u, %u\n"
429 (
void *)&tensorMap, tensorDataType, tensorRank, globalDim[0],
430 globalDim[1], globalDim[2], globalDim[3], globalDim[4],
431 globalStrides[0], globalStrides[1], globalStrides[2],
432 globalStrides[3], globalStrides[4], boxDim[0], boxDim[1],
433 boxDim[2], boxDim[3], boxDim[4], elementStrides[0],
434 elementStrides[1], elementStrides[2], elementStrides[3],
435 elementStrides[4], interleave, swizzle, l2Promotion, oobFill);
439 void mgpuGetMemRefDataAndShape(
void *rawDescriptor,
char **addr,
440 uint64_t *globalDim, uint64_t *globalStrides,
441 const CUtensorMapDataType tensorDataType) {
444 *addr = descriptor->
data;
445 for (
int i = 0; i < Rank; ++i) {
446 globalDim[i] =
static_cast<uint64_t
>(descriptor->sizes[Rank - i - 1]);
448 static constexpr
int elementSizeInBytes[] = {1, 2, 4, 4, 8, 8, 2,
450 for (
int i = 0; i < Rank - 1; ++i) {
451 globalStrides[i] =
static_cast<uint64_t
>(
452 descriptor->strides[Rank - i - 2] * elementSizeInBytes[tensorDataType]);
458 void *rankedDescriptor,
459 const CUtensorMapDataType tensorDataType,
460 CUtensorMapInterleave interleave,
461 CUtensorMapSwizzle swizzle,
462 CUtensorMapL2promotion l2Promotion,
463 CUtensorMapFloatOOBfill oobFill,
464 int64_t *inputBoxDims
466 CUtensorMap tensorMap;
468 uint32_t boxDim[5] = {1, 1, 1, 1, 1}, elementStrides[5] = {1, 1, 1, 1, 1};
469 uint64_t globalDim[5] = {1, 1, 1, 1, 1}, globalStrides[5] = {0};
470 uint32_t tensorRank32 = uint32_t(tensorRank);
472 char *globalAddress =
nullptr;
473 switch (tensorRank) {
475 mgpuGetMemRefDataAndShape<1>(rankedDescriptor, &globalAddress, globalDim,
476 globalStrides, tensorDataType);
479 mgpuGetMemRefDataAndShape<2>(rankedDescriptor, &globalAddress, globalDim,
480 globalStrides, tensorDataType);
483 mgpuGetMemRefDataAndShape<3>(rankedDescriptor, &globalAddress, globalDim,
484 globalStrides, tensorDataType);
487 mgpuGetMemRefDataAndShape<4>(rankedDescriptor, &globalAddress, globalDim,
488 globalStrides, tensorDataType);
491 mgpuGetMemRefDataAndShape<5>(rankedDescriptor, &globalAddress, globalDim,
492 globalStrides, tensorDataType);
497 "'mgpuTensorMapEncodeTiledMemref' failed with 'rank is too high'\n");
501 for (int64_t r = 0; r < tensorRank; ++r) {
502 boxDim[r] =
static_cast<uint32_t
>(inputBoxDims[tensorRank - r - 1]);
506 mgpuTensorMapEncodeTiled(&tensorMap, tensorDataType, tensorRank32,
507 globalAddress, globalDim, globalStrides, boxDim,
508 elementStrides, interleave, swizzle, l2Promotion,
511 CUdeviceptr dTensorMap;
514 reinterpret_cast<CUdeviceptr
>(&tensorMap),
515 sizeof(CUtensorMap)));
516 return reinterpret_cast<void *
>(dTensorMap);
520 #ifdef MLIR_ENABLE_CUDA_CUSPARSE
528 #define ALPHABETA(dtp, alpha, beta) \
529 __nv_bfloat16(alpha##16bf) = 1.0f; \
530 __nv_bfloat16(beta##16bf) = 1.0f; \
531 __half(alpha##16f) = 1.0f; \
532 __half(beta##16f) = 1.0f; \
533 float(alpha##f) = 1.0f; \
534 float(beta##f) = 1.0f; \
535 double(alpha##d) = 1.0; \
536 double(beta##d) = 1.0; \
537 const void *(alpha##p) = nullptr; \
538 const void *(beta##p) = nullptr; \
539 if (dtp == CUDA_R_16BF || dtp == CUDA_C_16BF) { \
540 (alpha##p) = reinterpret_cast<void *>(&(alpha##16bf)); \
541 (beta##p) = reinterpret_cast<void *>(&(beta##16bf)); \
542 } else if (dtp == CUDA_R_16F || dtp == CUDA_C_16F) { \
543 (alpha##p) = reinterpret_cast<void *>(&(alpha##16f)); \
544 (beta##p) = reinterpret_cast<void *>(&(beta##16f)); \
545 } else if (dtp == CUDA_R_32F || dtp == CUDA_C_32F) { \
546 (alpha##p) = reinterpret_cast<void *>(&(alpha##f)); \
547 (beta##p) = reinterpret_cast<void *>(&(beta##f)); \
549 (alpha##p) = reinterpret_cast<void *>(&(alpha##d)); \
550 (beta##p) = reinterpret_cast<void *>(&(beta##d)); \
556 assert(!cusparse_env &&
"client called mgpuCreateSparseEnv() twice");
561 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
563 cusparse_env =
nullptr;
567 mgpuCreateDnVec(intptr_t size,
void *values, int32_t dtp, CUstream ) {
568 cusparseDnVecDescr_t vec =
nullptr;
569 auto dTp =
static_cast<cudaDataType_t
>(dtp);
571 return reinterpret_cast<void *
>(vec);
575 mgpuDestroyDnVec(
void *v, CUstream ) {
576 cusparseDnVecDescr_t vec =
reinterpret_cast<cusparseDnVecDescr_t
>(v);
581 mgpuCreateDnMat(intptr_t
rows, intptr_t
cols,
void *values, int32_t dtp,
583 cusparseDnMatDescr_t mat =
nullptr;
584 auto dTp =
static_cast<cudaDataType_t
>(dtp);
586 values, dTp, CUSPARSE_ORDER_ROW))
587 return reinterpret_cast<void *
>(mat);
591 mgpuDestroyDnMat(
void *m, CUstream ) {
592 cusparseDnMatDescr_t mat =
reinterpret_cast<cusparseDnMatDescr_t
>(m);
597 mgpuCreateCoo(intptr_t
rows, intptr_t
cols, intptr_t nnz,
void *rowIdxs,
598 void *colIdxs,
void *values, int32_t itp, int32_t dtp,
600 cusparseSpMatDescr_t mat =
nullptr;
601 auto iTp =
static_cast<cusparseIndexType_t
>(itp);
602 auto dTp =
static_cast<cudaDataType_t
>(dtp);
604 colIdxs, values, iTp,
605 CUSPARSE_INDEX_BASE_ZERO, dTp))
606 return reinterpret_cast<void *
>(mat);
609 #ifdef CUSPARSE_COO_AOS
611 mgpuCreateCooAoS(intptr_t
rows, intptr_t
cols, intptr_t nnz,
void *idxs,
612 void *values, int32_t itp, int32_t dtp, CUstream ) {
613 cusparseSpMatDescr_t mat =
nullptr;
614 auto iTp =
static_cast<cusparseIndexType_t
>(itp);
615 auto dTp =
static_cast<cudaDataType_t
>(dtp);
617 &mat,
rows,
cols, nnz, idxs, values, iTp, CUSPARSE_INDEX_BASE_ZERO, dTp))
618 return reinterpret_cast<void *
>(mat);
623 mgpuCreateCsr(intptr_t
rows, intptr_t
cols, intptr_t nnz,
void *rowPos,
624 void *colIdxs,
void *values, int32_t ptp, int32_t itp,
625 int32_t dtp, CUstream ) {
626 cusparseSpMatDescr_t mat =
nullptr;
627 auto pTp =
static_cast<cusparseIndexType_t
>(ptp);
628 auto iTp =
static_cast<cusparseIndexType_t
>(itp);
629 auto dTp =
static_cast<cudaDataType_t
>(dtp);
631 colIdxs, values, pTp, iTp,
632 CUSPARSE_INDEX_BASE_ZERO, dTp))
633 return reinterpret_cast<void *
>(mat);
637 mgpuCreateCsc(intptr_t
rows, intptr_t
cols, intptr_t nnz,
void *colPos,
638 void *rowIdxs,
void *values, int32_t ptp, int32_t itp,
639 int32_t dtp, CUstream ) {
640 cusparseSpMatDescr_t mat =
nullptr;
641 auto pTp =
static_cast<cusparseIndexType_t
>(ptp);
642 auto iTp =
static_cast<cusparseIndexType_t
>(itp);
643 auto dTp =
static_cast<cudaDataType_t
>(dtp);
645 rowIdxs, values, pTp, iTp,
646 CUSPARSE_INDEX_BASE_ZERO, dTp))
647 return reinterpret_cast<void *
>(mat);
651 mgpuCreateBsr(intptr_t brows, intptr_t bcols, intptr_t bnnz, intptr_t rBsz,
652 intptr_t cBsz,
void *rowPos,
void *colIdxs,
void *values,
653 int32_t ptp, int32_t itp, int32_t dtp, CUstream ) {
654 cusparseSpMatDescr_t mat =
nullptr;
655 #if CUSPARSE_VERSION >= 12100
656 auto pTp =
static_cast<cusparseIndexType_t
>(ptp);
657 auto iTp =
static_cast<cusparseIndexType_t
>(itp);
658 auto dTp =
static_cast<cudaDataType_t
>(dtp);
660 &mat, brows, bcols, bnnz, rBsz, cBsz, rowPos, colIdxs, values, pTp, iTp,
661 CUSPARSE_INDEX_BASE_ZERO, dTp, CUSPARSE_ORDER_ROW))
663 return reinterpret_cast<void *
>(mat);
667 mgpuDestroySpMat(
void *m, CUstream ) {
668 cusparseSpMatDescr_t mat =
reinterpret_cast<cusparseSpMatDescr_t
>(m);
673 int32_t ma,
void *a,
void *x,
void *y, int32_t ctp, CUstream ) {
674 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
675 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
676 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
677 cusparseDnVecDescr_t vecX =
reinterpret_cast<cusparseDnVecDescr_t
>(x);
678 cusparseDnVecDescr_t vecY =
reinterpret_cast<cusparseDnVecDescr_t
>(y);
679 cudaDataType_t cTp =
static_cast<cudaDataType_t
>(ctp);
680 ALPHABETA(cTp, alpha, beta)
681 size_t bufferSize = 0;
683 cusparse_env, modeA, alphap, matA, vecX, betap, vecY, cTp,
684 CUSPARSE_SPMV_ALG_DEFAULT, &bufferSize))
689 void *y, int32_t ctp,
692 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
693 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
694 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
695 cusparseDnVecDescr_t vecX =
reinterpret_cast<cusparseDnVecDescr_t
>(x);
696 cusparseDnVecDescr_t vecY =
reinterpret_cast<cusparseDnVecDescr_t
>(y);
697 cudaDataType_t cTp =
static_cast<cudaDataType_t
>(ctp);
698 ALPHABETA(cTp, alpha, beta)
701 CUSPARSE_SPMV_ALG_DEFAULT, buf))
705 mgpuSpMMBufferSize(int32_t ma, int32_t mb,
void *a,
void *b,
void *c,
706 int32_t ctp, CUstream ) {
707 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
708 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
709 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
710 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
711 cusparseDnMatDescr_t matB =
reinterpret_cast<cusparseDnMatDescr_t
>(b);
712 cusparseDnMatDescr_t matC =
reinterpret_cast<cusparseDnMatDescr_t
>(c);
713 cudaDataType_t cTp =
static_cast<cudaDataType_t
>(ctp);
714 ALPHABETA(cTp, alpha, beta)
715 size_t bufferSize = 0;
717 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
718 CUSPARSE_SPMM_ALG_DEFAULT, &bufferSize))
723 void *a,
void *b,
void *c,
724 int32_t ctp,
void *buf,
726 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
727 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
728 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
729 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
730 cusparseDnMatDescr_t matB =
reinterpret_cast<cusparseDnMatDescr_t
>(b);
731 cusparseDnMatDescr_t matC =
reinterpret_cast<cusparseDnMatDescr_t
>(c);
732 cudaDataType_t cTp =
static_cast<cudaDataType_t
>(ctp);
733 ALPHABETA(cTp, alpha, beta)
735 matA, matB, betap, matC, cTp,
736 CUSPARSE_SPMM_ALG_DEFAULT, buf))
740 mgpuSDDMMBufferSize(int32_t ma, int32_t mb,
void *a,
void *b,
void *c,
741 int32_t ctp, CUstream ) {
742 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
743 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
744 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
745 cusparseDnMatDescr_t matA =
reinterpret_cast<cusparseDnMatDescr_t
>(a);
746 cusparseDnMatDescr_t matB =
reinterpret_cast<cusparseDnMatDescr_t
>(b);
747 cusparseSpMatDescr_t matC =
reinterpret_cast<cusparseSpMatDescr_t
>(c);
748 auto cTp =
static_cast<cudaDataType_t
>(ctp);
749 ALPHABETA(cTp, alpha, beta)
750 size_t bufferSize = 0;
752 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
753 CUSPARSE_SDDMM_ALG_DEFAULT, &bufferSize))
758 void *a,
void *b,
void *c,
759 int32_t ctp,
void *buf,
761 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
762 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
763 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
764 cusparseDnMatDescr_t matA =
reinterpret_cast<cusparseDnMatDescr_t
>(a);
765 cusparseDnMatDescr_t matB =
reinterpret_cast<cusparseDnMatDescr_t
>(b);
766 cusparseSpMatDescr_t matC =
reinterpret_cast<cusparseSpMatDescr_t
>(c);
767 auto cTp =
static_cast<cudaDataType_t
>(ctp);
768 ALPHABETA(cTp, alpha, beta)
770 matA, matB, betap, matC, cTp,
771 CUSPARSE_SDDMM_ALG_DEFAULT, buf))
775 mgpuSpGEMMCreateDescr(CUstream ) {
776 cusparseSpGEMMDescr_t spgemmDesc =
nullptr;
778 return reinterpret_cast<void *
>(spgemmDesc);
782 mgpuSpGEMMDestroyDescr(
void *s, CUstream ) {
783 cusparseSpGEMMDescr_t spgemmDesc =
reinterpret_cast<cusparseSpGEMMDescr_t
>(s);
788 void *s, int32_t ma, int32_t mb,
void *a,
void *b,
void *c, int32_t ctp,
789 intptr_t bs,
void *buf, CUstream ) {
790 cusparseSpGEMMDescr_t spgemmDesc =
reinterpret_cast<cusparseSpGEMMDescr_t
>(s);
791 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
792 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
793 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
794 cusparseSpMatDescr_t matB =
reinterpret_cast<cusparseSpMatDescr_t
>(b);
795 cusparseSpMatDescr_t matC =
reinterpret_cast<cusparseSpMatDescr_t
>(c);
796 auto cTp =
static_cast<cudaDataType_t
>(ctp);
797 ALPHABETA(cTp, alpha, beta)
798 size_t newBufferSize = bs;
800 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
801 CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &newBufferSize, buf))
802 return newBufferSize;
806 mgpuSpGEMMCompute(
void *s, int32_t ma, int32_t mb,
void *a,
void *b,
void *c,
807 int32_t ctp, intptr_t bsz2,
void *buf2, CUstream ) {
808 cusparseSpGEMMDescr_t spgemmDesc =
reinterpret_cast<cusparseSpGEMMDescr_t
>(s);
809 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
810 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
811 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
812 cusparseSpMatDescr_t matB =
reinterpret_cast<cusparseSpMatDescr_t
>(b);
813 cusparseSpMatDescr_t matC =
reinterpret_cast<cusparseSpMatDescr_t
>(c);
814 auto cTp =
static_cast<cudaDataType_t
>(ctp);
815 ALPHABETA(cTp, alpha, beta)
816 size_t newBufferSize2 = bsz2;
818 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
819 CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &newBufferSize2, buf2))
820 return newBufferSize2;
824 mgpuSpGEMMCopy(
void *s, int32_t ma, int32_t mb,
void *a,
void *b,
void *c,
825 int32_t ctp, CUstream ) {
826 cusparseSpGEMMDescr_t spgemmDesc =
reinterpret_cast<cusparseSpGEMMDescr_t
>(s);
827 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
828 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
829 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
830 cusparseSpMatDescr_t matB =
reinterpret_cast<cusparseSpMatDescr_t
>(b);
831 cusparseSpMatDescr_t matC =
reinterpret_cast<cusparseSpMatDescr_t
>(c);
832 auto cTp =
static_cast<cudaDataType_t
>(ctp);
833 ALPHABETA(cTp, alpha, beta)
835 cusparseSpGEMM_copy(cusparse_env, modeA, modeB, alphap, matA, matB, betap,
836 matC, cTp, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc))
840 mgpuSpMatGetSize(
void *m,
void *r,
void *c,
void *n, CUstream ) {
841 cusparseConstSpMatDescr_t matDescr =
842 reinterpret_cast<cusparseConstSpMatDescr_t
>(m);
843 int64_t *
rows =
reinterpret_cast<int64_t *
>(r);
844 int64_t *
cols =
reinterpret_cast<int64_t *
>(c);
845 int64_t *nnz =
reinterpret_cast<int64_t *
>(n);
850 mgpuSetCsrPointers(
void *m,
void *p,
void *c,
void *v, CUstream ) {
851 cusparseSpMatDescr_t matDescr =
reinterpret_cast<cusparseSpMatDescr_t
>(m);
855 #ifdef MLIR_ENABLE_CUDA_CUSPARSELT
861 struct cusparseLtSpMatHandleAndData {
862 cusparseLtMatDescriptor_t mat;
866 cusparseLtMatmulAlgSelection_t alg_sel;
867 cusparseLtMatmulPlan_t plan;
868 cusparseLtMatmulDescriptor_t matmul;
869 void *values{
nullptr};
872 struct cusparseLtDnMatHandleAndData {
873 cusparseLtMatDescriptor_t mat;
874 void *values{
nullptr};
877 static_assert(
sizeof(cusparseLtHandle_t) == 11024,
878 "Unexpected cusparseLt handle size");
879 static_assert(
sizeof(cusparseLtSpMatHandleAndData) == 44104,
880 "Unexpected cusparseLt sparse matrix handle size");
881 static_assert(
sizeof(cusparseLtDnMatHandleAndData) == 11032,
882 "Unexpected cusparseLt dense matrix handle size");
887 assert(!cusparseLt_initiated &&
888 "client called mgpuCreateSparseLtEnv() twice");
891 cusparseLt_initiated =
true;
895 assert(cusparseLt_initiated &&
"client did not call mgpuCreateSparseLtEnv()");
897 cusparseLt_initiated =
false;
901 mgpuCreateCuSparseLtDnMat(
void *dh, intptr_t
rows, intptr_t
cols,
void *values,
902 int32_t dtp, CUstream ) {
903 assert(cusparseLt_initiated &&
"client did not call mgpuCreateSparseLtEnv()");
904 auto dnmat_handle =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(dh);
905 dnmat_handle->values = values;
906 auto dTp =
static_cast<cudaDataType_t
>(dtp);
908 const uint32_t alignment = 16;
910 &cusparseLt_env, &(dnmat_handle->mat),
rows,
cols,
cols,
911 alignment, dTp, CUSPARSE_ORDER_ROW))
915 mgpuDestroyCuSparseLtDnMat(
void *dh, CUstream ) {
916 auto dnmat_handle =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(dh);
921 mgpuCusparseLtCreate2To4SpMat(
void *sh, intptr_t
rows, intptr_t
cols,
922 void *values, int32_t dtp, CUstream ) {
923 assert(cusparseLt_initiated &&
"client did not call mgpuCreateSparseLtEnv()");
924 auto spmat_handle =
reinterpret_cast<cusparseLtSpMatHandleAndData *
>(sh);
925 spmat_handle->values = values;
926 auto dTp =
static_cast<cudaDataType_t
>(dtp);
928 const uint32_t alignment = 16;
930 &cusparseLt_env, &(spmat_handle->mat),
rows,
cols,
cols, alignment,
931 dTp, CUSPARSE_ORDER_ROW, CUSPARSELT_SPARSITY_50_PERCENT))
935 mgpuDestroyCuSparseLtSpMat(
void *sh, CUstream ) {
936 auto spmat_handle =
reinterpret_cast<cusparseLtSpMatHandleAndData *
>(sh);
946 mgpuCuSparseLtSpMMBufferSize(
void *bs, int32_t ma, int32_t mb,
void *a,
void *b,
947 void *c, int32_t ctp, int32_t prune_flag,
949 assert(cusparseLt_initiated &&
"client did not call mgpuCreateSparseLtEnv()");
952 auto matA =
reinterpret_cast<cusparseLtSpMatHandleAndData *
>(a);
953 auto matB =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(b);
954 auto matC =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(c);
955 auto workspace_size =
reinterpret_cast<size_t *
>(bs);
956 auto compressed_size = &(
reinterpret_cast<size_t *
>(bs)[1]);
957 auto compressed_buffer_size = &(
reinterpret_cast<size_t *
>(bs)[2]);
958 auto cTp =
static_cast<cusparseComputeType
>(ctp);
960 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
961 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
963 &cusparseLt_env, &(matA->matmul), modeA, modeB, &(matA->mat),
964 &(matB->mat), &(matC->mat), &(matC->mat), cTp))
966 &cusparseLt_env, &(matA->alg_sel), &(matA->matmul),
967 CUSPARSELT_MATMUL_ALG_DEFAULT))
970 &cusparseLt_env, &(matA->alg_sel), CUSPARSELT_MATMUL_ALG_CONFIG_ID, &alg,
974 &cusparseLt_env, &(matA->plan), &(matA->matmul), &(matA->alg_sel)))
979 &cusparseLt_env, &(matA->matmul), matA->values, matA->values,
980 CUSPARSELT_PRUNE_SPMMA_STRIP, stream))
985 if (prune_flag == 2) {
986 int *dvalid = (
int *)
mgpuMemAlloc(
sizeof(
int), stream,
false);
988 &cusparseLt_env, &(matA->matmul), matA->values, dvalid, stream))
990 mgpuMemcpy(&valid, dvalid,
sizeof(
int), stream);
994 fprintf(stderr,
"CUPARSE-LT: sparse matrix is not 2:4; computed results "
995 "will be invalid\n");
999 &cusparseLt_env, &(matA->plan), workspace_size))
1001 &cusparseLt_env, &(matA->plan), compressed_size, compressed_buffer_size))
1005 mgpuCuSparseLtSpMM(
void *a,
void *b,
void *c,
void *d_workspace,
1006 void *dA_compressed,
void *dA_compressedBuffer,
1008 assert(cusparseLt_initiated &&
"client did not call mgpuCreateSparseLtEnv()");
1009 auto matA =
reinterpret_cast<cusparseLtSpMatHandleAndData *
>(a);
1010 auto matB =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(b);
1011 auto matC =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(c);
1013 ALPHABETA(CUDA_R_32F, alpha, beta)
1015 cusparseLtSpMMACompress(&cusparseLt_env, &(matA->plan), (matA->values),
1016 dA_compressed, dA_compressedBuffer, stream))
1021 cusparseLtMatmul(&cusparseLt_env, &(matA->plan), alphap, dA_compressed,
1022 matB->values, betap, matC->values,
1023 matC->values, d_workspace,
nullptr, 0))
#define CUSPARSE_REPORT_IF_ERROR(expr)
bool isDebugEnabled()
Helper method that checks environment value for debugging.
MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamWaitEvent(CUstream stream, CUevent event)
#define MLIR_CUDA_WRAPPERS_EXPORT
MLIR_CUDA_WRAPPERS_EXPORT void mgpuModuleUnload(CUmodule module)
#define CUDA_REPORT_IF_ERROR(expr)
static thread_local int32_t defaultDevice
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostRegisterMemRef(int64_t rank, StridedMemRefType< char, 1 > *descriptor, int64_t elementSizeBytes)
Registers a memref with the CUDA runtime.
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostUnregister(void *ptr)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventRecord(CUevent event, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemcpy(void *dst, void *src, size_t sizeBytes, CUstream stream)
const char * kDebugEnvironmentVariable
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemset16(void *dst, unsigned short value, size_t count, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemset32(void *dst, unsigned int value, size_t count, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT CUmodule mgpuModuleLoadJIT(void *data, int optLevel)
MLIR_CUDA_WRAPPERS_EXPORT CUfunction mgpuModuleGetFunction(CUmodule module, const char *name)
#define debug_print(fmt,...)
MLIR_CUDA_WRAPPERS_EXPORT CUevent mgpuEventCreate()
CUdevice getDefaultCuDevice()
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostRegister(void *ptr, uint64_t sizeBytes)
Helper functions for writing mlir example code.
MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamSynchronize(CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT CUstream mgpuStreamCreate()
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemFree(void *ptr, CUstream)
MLIR_CUDA_WRAPPERS_EXPORT void * mgpuMemAlloc(uint64_t sizeBytes, CUstream stream, bool isHostShared)
MLIR_CUDA_WRAPPERS_EXPORT CUmodule mgpuModuleLoad(void *data, size_t)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuSetDefaultDevice(int32_t device)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamDestroy(CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuLaunchKernel(CUfunction function, intptr_t gridX, intptr_t gridY, intptr_t gridZ, intptr_t blockX, intptr_t blockY, intptr_t blockZ, int32_t smem, CUstream stream, void **params, void **extra, size_t)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostUnregisterMemRef(int64_t rank, StridedMemRefType< char, 1 > *descriptor, int64_t elementSizeBytes)
Unregisters a memref with the CUDA runtime.
MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventSynchronize(CUevent event)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventDestroy(CUevent event)
StridedMemRef descriptor type with static rank.