20 #include "cuda_bf16.h"
21 #include "cuda_fp16.h"
23 #ifdef MLIR_ENABLE_CUDA_CUSPARSE
25 #ifdef MLIR_ENABLE_CUDA_CUSPARSELT
26 #include "cusparseLt.h"
32 #define MLIR_CUDA_WRAPPERS_EXPORT __declspec(dllexport)
34 #define MLIR_CUDA_WRAPPERS_EXPORT __attribute__((visibility("default")))
37 #define CUDA_REPORT_IF_ERROR(expr) \
38 [](CUresult result) { \
41 const char *name = nullptr; \
42 cuGetErrorName(result, &name); \
45 fprintf(stderr, "'%s' failed with '%s'\n", #expr, name); \
48 #define CUSPARSE_REPORT_IF_ERROR(expr) \
50 cusparseStatus_t status = (expr); \
51 if (status != CUSPARSE_STATUS_SUCCESS) { \
52 fprintf(stderr, "cuSPARSE '%s' failed with '%s'\n", #expr, \
53 cusparseGetErrorString(status)); \
61 const char *kDebugEnvironmentVariable =
"MLIR_CUDA_DEBUG";
62 static bool isEnabled = getenv(kDebugEnvironmentVariable) !=
nullptr;
66 #define debug_print(fmt, ...) \
68 if (isDebugEnabled()) \
69 fprintf(stderr, "%s:%d:%s(): " fmt, "CudaRuntimeWrappers.cpp", __LINE__, \
70 __func__, __VA_ARGS__); \
88 static CUcontext context = [] {
103 #ifdef MLIR_ENABLE_CUDA_CUSPARSE
108 static cusparseHandle_t cusparse_env =
nullptr;
110 #ifdef MLIR_ENABLE_CUDA_CUSPARSELT
113 static cusparseLtHandle_t cusparseLt_env;
114 static bool cusparseLt_initiated =
false;
122 CUmodule module =
nullptr;
130 CUmodule module =
nullptr;
131 char jitErrorBuffer[4096] = {0};
132 CUjit_option jitOptions[] = {CU_JIT_ERROR_LOG_BUFFER,
133 CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES,
134 CU_JIT_OPTIMIZATION_LEVEL};
135 void *jitOptionsVals[] = {jitErrorBuffer,
136 reinterpret_cast<void *
>(
sizeof(jitErrorBuffer)),
137 reinterpret_cast<void *
>(optLevel)};
140 cuModuleLoadDataEx(&module, data, 3, jitOptions, jitOptionsVals);
142 fprintf(stderr,
"JIT compilation failed with: '%s'\n", jitErrorBuffer);
154 CUfunction
function =
nullptr;
164 intptr_t gridZ, intptr_t blockX, intptr_t blockY,
165 intptr_t blockZ, int32_t smem, CUstream stream,
void **params,
166 void **extra,
size_t ) {
170 int32_t maxShmem = 0;
174 &maxShmem, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN,
176 if (maxShmem < smem) {
178 "Requested shared memory (%dkb) is larger than maximum allowed "
179 "shared memory (%dkb) for this device\n",
183 function, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, smem));
186 "threads: %ld, %ld, %ld, "
188 gridX, gridY, gridZ, blockX, blockY, blockZ, smem);
190 blockY, blockZ, smem, stream, params,
196 CUstream stream =
nullptr;
217 CUevent
event =
nullptr;
240 return reinterpret_cast<void *
>(ptr);
244 cuMemAllocManaged(&ptr, sizeBytes, CU_MEM_ATTACH_GLOBAL));
245 return reinterpret_cast<void *
>(ptr);
248 return reinterpret_cast<void *
>(ptr);
257 mgpuMemcpy(
void *dst,
void *src,
size_t sizeBytes, CUstream stream) {
259 reinterpret_cast<CUdeviceptr
>(src),
264 mgpuMemset32(
void *dst,
unsigned int value,
size_t count, CUstream stream) {
266 value, count, stream));
270 mgpuMemset16(
void *dst,
unsigned short value,
size_t count, CUstream stream) {
272 value, count, stream));
292 int64_t elementSizeBytes) {
295 int64_t *denseStrides = (int64_t *)_alloca(rank *
sizeof(int64_t));
297 int64_t *denseStrides = (int64_t *)alloca(rank *
sizeof(int64_t));
299 int64_t *sizes = descriptor->
sizes;
300 for (int64_t i = rank - 1, runningStride = 1; i >= 0; i--) {
301 denseStrides[i] = runningStride;
302 runningStride *= sizes[i];
304 uint64_t sizeBytes = sizes[0] * denseStrides[0] * elementSizeBytes;
305 int64_t *strides = &sizes[rank];
307 for (
unsigned i = 0; i < rank; ++i)
308 assert(strides[i] == denseStrides[i] &&
309 "Mismatch in computed dense strides");
311 auto *ptr = descriptor->
data + descriptor->
offset * elementSizeBytes;
326 int64_t elementSizeBytes) {
327 auto *ptr = descriptor->
data + descriptor->
offset * elementSizeBytes;
339 #if (CUDA_VERSION >= 12000)
342 CUfunction
function, intptr_t clusterX, intptr_t clusterY,
343 intptr_t clusterZ, intptr_t gridX, intptr_t gridY, intptr_t gridZ,
344 intptr_t blockX, intptr_t blockY, intptr_t blockZ, int32_t smem,
345 CUstream stream,
void **params,
void **extra,
size_t ) {
349 int32_t maxShmem = 0;
353 &maxShmem, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN,
355 if (maxShmem < smem) {
357 "Requested shared memory (%dkb) is larger than maximum allowed "
358 "shared memory (%dkb) for this device\n",
362 function, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, smem));
368 config.blockDimX = blockX;
369 config.blockDimY = blockY;
370 config.blockDimZ = blockZ;
371 config.sharedMemBytes = smem;
373 CUlaunchAttribute launchAttr[2];
374 launchAttr[0].id = CU_LAUNCH_ATTRIBUTE_CLUSTER_DIMENSION;
375 launchAttr[0].value.clusterDim.x = clusterX;
376 launchAttr[0].value.clusterDim.y = clusterY;
377 launchAttr[0].value.clusterDim.z = clusterZ;
378 launchAttr[1].id = CU_LAUNCH_ATTRIBUTE_CLUSTER_SCHEDULING_POLICY_PREFERENCE;
379 launchAttr[1].value.clusterSchedulingPolicyPreference =
380 CU_CLUSTER_SCHEDULING_POLICY_SPREAD;
382 config.attrs = launchAttr;
385 "cluster: %ld, %ld, %ld, "
387 "threads: %ld, %ld, %ld, "
389 clusterX, clusterY, clusterZ, gridX, gridY, gridZ, blockX, blockY,
396 CUtensorMap *tensorMap,
397 CUtensorMapDataType tensorDataType,
398 cuuint32_t tensorRank,
400 const cuuint64_t *globalDim,
401 const cuuint64_t *globalStrides,
402 const cuuint32_t *boxDim,
403 const cuuint32_t *elementStrides,
404 CUtensorMapInterleave interleave,
405 CUtensorMapSwizzle swizzle,
406 CUtensorMapL2promotion l2Promotion,
407 CUtensorMapFloatOOBfill oobFill
411 tensorMap, tensorDataType, tensorRank, globalAddress, globalDim,
412 globalStrides, boxDim, elementStrides, interleave, swizzle, l2Promotion,
417 "globalDim[5]: %zu, %zu, %zu, %zu, %zu\n"
418 "globalStrides[5]: %zu, %zu, %zu, %zu, %zu\n"
419 "boxDim[5]: %u, %u, %u, %u, %u\n"
420 "elementStrides[5]: %u, %u, %u, %u, %u\n"
425 (
void *)&tensorMap, tensorDataType, tensorRank, globalDim[0],
426 globalDim[1], globalDim[2], globalDim[3], globalDim[4],
427 globalStrides[0], globalStrides[1], globalStrides[2],
428 globalStrides[3], globalStrides[4], boxDim[0], boxDim[1],
429 boxDim[2], boxDim[3], boxDim[4], elementStrides[0],
430 elementStrides[1], elementStrides[2], elementStrides[3],
431 elementStrides[4], interleave, swizzle, l2Promotion, oobFill);
435 void mgpuGetMemRefDataAndShape(
void *rawDescriptor,
char **addr,
436 uint64_t *globalDim, uint64_t *globalStrides,
437 const CUtensorMapDataType tensorDataType) {
440 *addr = descriptor->
data;
441 for (
int i = 0; i < Rank; ++i) {
442 globalDim[i] =
static_cast<uint64_t
>(descriptor->sizes[Rank - i - 1]);
444 static constexpr
int elementSizeInBytes[] = {1, 2, 4, 4, 8, 8, 2,
446 for (
int i = 0; i < Rank - 1; ++i) {
447 globalStrides[i] =
static_cast<uint64_t
>(
448 descriptor->strides[Rank - i - 2] * elementSizeInBytes[tensorDataType]);
454 void *rankedDescriptor,
455 const CUtensorMapDataType tensorDataType,
456 CUtensorMapInterleave interleave,
457 CUtensorMapSwizzle swizzle,
458 CUtensorMapL2promotion l2Promotion,
459 CUtensorMapFloatOOBfill oobFill,
460 int64_t *inputBoxDims
462 CUtensorMap tensorMap;
464 uint32_t boxDim[5] = {1, 1, 1, 1, 1}, elementStrides[5] = {1, 1, 1, 1, 1};
465 uint64_t globalDim[5] = {1, 1, 1, 1, 1}, globalStrides[5] = {0};
466 uint32_t tensorRank32 = uint32_t(tensorRank);
468 char *globalAddress =
nullptr;
469 switch (tensorRank) {
471 mgpuGetMemRefDataAndShape<1>(rankedDescriptor, &globalAddress, globalDim,
472 globalStrides, tensorDataType);
475 mgpuGetMemRefDataAndShape<2>(rankedDescriptor, &globalAddress, globalDim,
476 globalStrides, tensorDataType);
479 mgpuGetMemRefDataAndShape<3>(rankedDescriptor, &globalAddress, globalDim,
480 globalStrides, tensorDataType);
483 mgpuGetMemRefDataAndShape<4>(rankedDescriptor, &globalAddress, globalDim,
484 globalStrides, tensorDataType);
487 mgpuGetMemRefDataAndShape<5>(rankedDescriptor, &globalAddress, globalDim,
488 globalStrides, tensorDataType);
493 "'mgpuTensorMapEncodeTiledMemref' failed with 'rank is too high'\n");
497 for (int64_t r = 0; r < tensorRank; ++r) {
498 boxDim[r] =
static_cast<uint32_t
>(inputBoxDims[tensorRank - r - 1]);
502 mgpuTensorMapEncodeTiled(&tensorMap, tensorDataType, tensorRank32,
503 globalAddress, globalDim, globalStrides, boxDim,
504 elementStrides, interleave, swizzle, l2Promotion,
507 CUdeviceptr dTensorMap;
510 reinterpret_cast<CUdeviceptr
>(&tensorMap),
511 sizeof(CUtensorMap)));
512 return reinterpret_cast<void *
>(dTensorMap);
516 #ifdef MLIR_ENABLE_CUDA_CUSPARSE
524 #define ALPHABETA(dtp, alpha, beta) \
525 __nv_bfloat16(alpha##16bf) = 1.0f; \
526 __nv_bfloat16(beta##16bf) = 1.0f; \
527 __half(alpha##16f) = 1.0f; \
528 __half(beta##16f) = 1.0f; \
529 float(alpha##f) = 1.0f; \
530 float(beta##f) = 1.0f; \
531 double(alpha##d) = 1.0; \
532 double(beta##d) = 1.0; \
533 const void *(alpha##p) = nullptr; \
534 const void *(beta##p) = nullptr; \
535 if (dtp == CUDA_R_16BF || dtp == CUDA_C_16BF) { \
536 (alpha##p) = reinterpret_cast<void *>(&(alpha##16bf)); \
537 (beta##p) = reinterpret_cast<void *>(&(beta##16bf)); \
538 } else if (dtp == CUDA_R_16F || dtp == CUDA_C_16F) { \
539 (alpha##p) = reinterpret_cast<void *>(&(alpha##16f)); \
540 (beta##p) = reinterpret_cast<void *>(&(beta##16f)); \
541 } else if (dtp == CUDA_R_32F || dtp == CUDA_C_32F) { \
542 (alpha##p) = reinterpret_cast<void *>(&(alpha##f)); \
543 (beta##p) = reinterpret_cast<void *>(&(beta##f)); \
545 (alpha##p) = reinterpret_cast<void *>(&(alpha##d)); \
546 (beta##p) = reinterpret_cast<void *>(&(beta##d)); \
552 assert(!cusparse_env &&
"client called mgpuCreateSparseEnv() twice");
557 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
559 cusparse_env =
nullptr;
563 mgpuCreateDnVec(intptr_t size,
void *values, int32_t dtp, CUstream ) {
564 cusparseDnVecDescr_t vec =
nullptr;
565 auto dTp =
static_cast<cudaDataType_t
>(dtp);
567 return reinterpret_cast<void *
>(vec);
571 mgpuDestroyDnVec(
void *v, CUstream ) {
572 cusparseDnVecDescr_t vec =
reinterpret_cast<cusparseDnVecDescr_t
>(v);
577 mgpuCreateDnMat(intptr_t
rows, intptr_t
cols,
void *values, int32_t dtp,
579 cusparseDnMatDescr_t mat =
nullptr;
580 auto dTp =
static_cast<cudaDataType_t
>(dtp);
582 values, dTp, CUSPARSE_ORDER_ROW))
583 return reinterpret_cast<void *
>(mat);
587 mgpuDestroyDnMat(
void *m, CUstream ) {
588 cusparseDnMatDescr_t mat =
reinterpret_cast<cusparseDnMatDescr_t
>(m);
593 mgpuCreateCoo(intptr_t
rows, intptr_t
cols, intptr_t nnz,
void *rowIdxs,
594 void *colIdxs,
void *values, int32_t itp, int32_t dtp,
596 cusparseSpMatDescr_t mat =
nullptr;
597 auto iTp =
static_cast<cusparseIndexType_t
>(itp);
598 auto dTp =
static_cast<cudaDataType_t
>(dtp);
600 colIdxs, values, iTp,
601 CUSPARSE_INDEX_BASE_ZERO, dTp))
602 return reinterpret_cast<void *
>(mat);
605 #ifdef CUSPARSE_COO_AOS
607 mgpuCreateCooAoS(intptr_t
rows, intptr_t
cols, intptr_t nnz,
void *idxs,
608 void *values, int32_t itp, int32_t dtp, CUstream ) {
609 cusparseSpMatDescr_t mat =
nullptr;
610 auto iTp =
static_cast<cusparseIndexType_t
>(itp);
611 auto dTp =
static_cast<cudaDataType_t
>(dtp);
613 &mat,
rows,
cols, nnz, idxs, values, iTp, CUSPARSE_INDEX_BASE_ZERO, dTp))
614 return reinterpret_cast<void *
>(mat);
619 mgpuCreateCsr(intptr_t
rows, intptr_t
cols, intptr_t nnz,
void *rowPos,
620 void *colIdxs,
void *values, int32_t ptp, int32_t itp,
621 int32_t dtp, CUstream ) {
622 cusparseSpMatDescr_t mat =
nullptr;
623 auto pTp =
static_cast<cusparseIndexType_t
>(ptp);
624 auto iTp =
static_cast<cusparseIndexType_t
>(itp);
625 auto dTp =
static_cast<cudaDataType_t
>(dtp);
627 colIdxs, values, pTp, iTp,
628 CUSPARSE_INDEX_BASE_ZERO, dTp))
629 return reinterpret_cast<void *
>(mat);
633 mgpuCreateCsc(intptr_t
rows, intptr_t
cols, intptr_t nnz,
void *colPos,
634 void *rowIdxs,
void *values, int32_t ptp, int32_t itp,
635 int32_t dtp, CUstream ) {
636 cusparseSpMatDescr_t mat =
nullptr;
637 auto pTp =
static_cast<cusparseIndexType_t
>(ptp);
638 auto iTp =
static_cast<cusparseIndexType_t
>(itp);
639 auto dTp =
static_cast<cudaDataType_t
>(dtp);
641 rowIdxs, values, pTp, iTp,
642 CUSPARSE_INDEX_BASE_ZERO, dTp))
643 return reinterpret_cast<void *
>(mat);
647 mgpuCreateBsr(intptr_t brows, intptr_t bcols, intptr_t bnnz, intptr_t rBsz,
648 intptr_t cBsz,
void *rowPos,
void *colIdxs,
void *values,
649 int32_t ptp, int32_t itp, int32_t dtp, CUstream ) {
650 cusparseSpMatDescr_t mat =
nullptr;
651 #if CUSPARSE_VERSION >= 12100
652 auto pTp =
static_cast<cusparseIndexType_t
>(ptp);
653 auto iTp =
static_cast<cusparseIndexType_t
>(itp);
654 auto dTp =
static_cast<cudaDataType_t
>(dtp);
656 &mat, brows, bcols, bnnz, rBsz, cBsz, rowPos, colIdxs, values, pTp, iTp,
657 CUSPARSE_INDEX_BASE_ZERO, dTp, CUSPARSE_ORDER_ROW))
659 return reinterpret_cast<void *
>(mat);
663 mgpuDestroySpMat(
void *m, CUstream ) {
664 cusparseSpMatDescr_t mat =
reinterpret_cast<cusparseSpMatDescr_t
>(m);
669 int32_t ma,
void *a,
void *x,
void *y, int32_t ctp, CUstream ) {
670 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
671 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
672 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
673 cusparseDnVecDescr_t vecX =
reinterpret_cast<cusparseDnVecDescr_t
>(x);
674 cusparseDnVecDescr_t vecY =
reinterpret_cast<cusparseDnVecDescr_t
>(y);
675 cudaDataType_t cTp =
static_cast<cudaDataType_t
>(ctp);
676 ALPHABETA(cTp, alpha, beta)
677 size_t bufferSize = 0;
679 cusparse_env, modeA, alphap, matA, vecX, betap, vecY, cTp,
680 CUSPARSE_SPMV_ALG_DEFAULT, &bufferSize))
685 void *y, int32_t ctp,
688 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
689 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
690 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
691 cusparseDnVecDescr_t vecX =
reinterpret_cast<cusparseDnVecDescr_t
>(x);
692 cusparseDnVecDescr_t vecY =
reinterpret_cast<cusparseDnVecDescr_t
>(y);
693 cudaDataType_t cTp =
static_cast<cudaDataType_t
>(ctp);
694 ALPHABETA(cTp, alpha, beta)
697 CUSPARSE_SPMV_ALG_DEFAULT, buf))
701 mgpuSpMMBufferSize(int32_t ma, int32_t mb,
void *a,
void *b,
void *c,
702 int32_t ctp, CUstream ) {
703 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
704 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
705 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
706 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
707 cusparseDnMatDescr_t matB =
reinterpret_cast<cusparseDnMatDescr_t
>(b);
708 cusparseDnMatDescr_t matC =
reinterpret_cast<cusparseDnMatDescr_t
>(c);
709 cudaDataType_t cTp =
static_cast<cudaDataType_t
>(ctp);
710 ALPHABETA(cTp, alpha, beta)
711 size_t bufferSize = 0;
713 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
714 CUSPARSE_SPMM_ALG_DEFAULT, &bufferSize))
719 void *a,
void *b,
void *c,
720 int32_t ctp,
void *buf,
722 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
723 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
724 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
725 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
726 cusparseDnMatDescr_t matB =
reinterpret_cast<cusparseDnMatDescr_t
>(b);
727 cusparseDnMatDescr_t matC =
reinterpret_cast<cusparseDnMatDescr_t
>(c);
728 cudaDataType_t cTp =
static_cast<cudaDataType_t
>(ctp);
729 ALPHABETA(cTp, alpha, beta)
731 matA, matB, betap, matC, cTp,
732 CUSPARSE_SPMM_ALG_DEFAULT, buf))
736 mgpuSDDMMBufferSize(int32_t ma, int32_t mb,
void *a,
void *b,
void *c,
737 int32_t ctp, CUstream ) {
738 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
739 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
740 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
741 cusparseDnMatDescr_t matA =
reinterpret_cast<cusparseDnMatDescr_t
>(a);
742 cusparseDnMatDescr_t matB =
reinterpret_cast<cusparseDnMatDescr_t
>(b);
743 cusparseSpMatDescr_t matC =
reinterpret_cast<cusparseSpMatDescr_t
>(c);
744 auto cTp =
static_cast<cudaDataType_t
>(ctp);
745 ALPHABETA(cTp, alpha, beta)
746 size_t bufferSize = 0;
748 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
749 CUSPARSE_SDDMM_ALG_DEFAULT, &bufferSize))
754 void *a,
void *b,
void *c,
755 int32_t ctp,
void *buf,
757 assert(cusparse_env &&
"client did not call mgpuCreateSparseEnv()");
758 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
759 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
760 cusparseDnMatDescr_t matA =
reinterpret_cast<cusparseDnMatDescr_t
>(a);
761 cusparseDnMatDescr_t matB =
reinterpret_cast<cusparseDnMatDescr_t
>(b);
762 cusparseSpMatDescr_t matC =
reinterpret_cast<cusparseSpMatDescr_t
>(c);
763 auto cTp =
static_cast<cudaDataType_t
>(ctp);
764 ALPHABETA(cTp, alpha, beta)
766 matA, matB, betap, matC, cTp,
767 CUSPARSE_SDDMM_ALG_DEFAULT, buf))
771 mgpuSpGEMMCreateDescr(CUstream ) {
772 cusparseSpGEMMDescr_t spgemmDesc =
nullptr;
774 return reinterpret_cast<void *
>(spgemmDesc);
778 mgpuSpGEMMDestroyDescr(
void *s, CUstream ) {
779 cusparseSpGEMMDescr_t spgemmDesc =
reinterpret_cast<cusparseSpGEMMDescr_t
>(s);
784 void *s, int32_t ma, int32_t mb,
void *a,
void *b,
void *c, int32_t ctp,
785 intptr_t bs,
void *buf, CUstream ) {
786 cusparseSpGEMMDescr_t spgemmDesc =
reinterpret_cast<cusparseSpGEMMDescr_t
>(s);
787 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
788 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
789 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
790 cusparseSpMatDescr_t matB =
reinterpret_cast<cusparseSpMatDescr_t
>(b);
791 cusparseSpMatDescr_t matC =
reinterpret_cast<cusparseSpMatDescr_t
>(c);
792 auto cTp =
static_cast<cudaDataType_t
>(ctp);
793 ALPHABETA(cTp, alpha, beta)
794 size_t newBufferSize = bs;
796 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
797 CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &newBufferSize, buf))
798 return newBufferSize;
802 mgpuSpGEMMCompute(
void *s, int32_t ma, int32_t mb,
void *a,
void *b,
void *c,
803 int32_t ctp, intptr_t bsz2,
void *buf2, CUstream ) {
804 cusparseSpGEMMDescr_t spgemmDesc =
reinterpret_cast<cusparseSpGEMMDescr_t
>(s);
805 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
806 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
807 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
808 cusparseSpMatDescr_t matB =
reinterpret_cast<cusparseSpMatDescr_t
>(b);
809 cusparseSpMatDescr_t matC =
reinterpret_cast<cusparseSpMatDescr_t
>(c);
810 auto cTp =
static_cast<cudaDataType_t
>(ctp);
811 ALPHABETA(cTp, alpha, beta)
812 size_t newBufferSize2 = bsz2;
814 cusparse_env, modeA, modeB, alphap, matA, matB, betap, matC, cTp,
815 CUSPARSE_SPGEMM_DEFAULT, spgemmDesc, &newBufferSize2, buf2))
816 return newBufferSize2;
820 mgpuSpGEMMCopy(
void *s, int32_t ma, int32_t mb,
void *a,
void *b,
void *c,
821 int32_t ctp, CUstream ) {
822 cusparseSpGEMMDescr_t spgemmDesc =
reinterpret_cast<cusparseSpGEMMDescr_t
>(s);
823 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
824 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
825 cusparseSpMatDescr_t matA =
reinterpret_cast<cusparseSpMatDescr_t
>(a);
826 cusparseSpMatDescr_t matB =
reinterpret_cast<cusparseSpMatDescr_t
>(b);
827 cusparseSpMatDescr_t matC =
reinterpret_cast<cusparseSpMatDescr_t
>(c);
828 auto cTp =
static_cast<cudaDataType_t
>(ctp);
829 ALPHABETA(cTp, alpha, beta)
831 cusparseSpGEMM_copy(cusparse_env, modeA, modeB, alphap, matA, matB, betap,
832 matC, cTp, CUSPARSE_SPGEMM_DEFAULT, spgemmDesc))
836 mgpuSpMatGetSize(
void *m,
void *r,
void *c,
void *n, CUstream ) {
837 cusparseConstSpMatDescr_t matDescr =
838 reinterpret_cast<cusparseConstSpMatDescr_t
>(m);
839 int64_t *
rows =
reinterpret_cast<int64_t *
>(r);
840 int64_t *
cols =
reinterpret_cast<int64_t *
>(c);
841 int64_t *nnz =
reinterpret_cast<int64_t *
>(n);
846 mgpuSetCsrPointers(
void *m,
void *p,
void *c,
void *v, CUstream ) {
847 cusparseSpMatDescr_t matDescr =
reinterpret_cast<cusparseSpMatDescr_t
>(m);
851 #ifdef MLIR_ENABLE_CUDA_CUSPARSELT
857 struct cusparseLtSpMatHandleAndData {
858 cusparseLtMatDescriptor_t mat;
862 cusparseLtMatmulAlgSelection_t alg_sel;
863 cusparseLtMatmulPlan_t plan;
864 cusparseLtMatmulDescriptor_t matmul;
865 void *values{
nullptr};
868 struct cusparseLtDnMatHandleAndData {
869 cusparseLtMatDescriptor_t mat;
870 void *values{
nullptr};
873 static_assert(
sizeof(cusparseLtHandle_t) == 11024,
874 "Unexpected cusparseLt handle size");
875 static_assert(
sizeof(cusparseLtSpMatHandleAndData) == 44104,
876 "Unexpected cusparseLt sparse matrix handle size");
877 static_assert(
sizeof(cusparseLtDnMatHandleAndData) == 11032,
878 "Unexpected cusparseLt dense matrix handle size");
883 assert(!cusparseLt_initiated &&
884 "client called mgpuCreateSparseLtEnv() twice");
887 cusparseLt_initiated =
true;
891 assert(cusparseLt_initiated &&
"client did not call mgpuCreateSparseLtEnv()");
893 cusparseLt_initiated =
false;
897 mgpuCreateCuSparseLtDnMat(
void *dh, intptr_t
rows, intptr_t
cols,
void *values,
898 int32_t dtp, CUstream ) {
899 assert(cusparseLt_initiated &&
"client did not call mgpuCreateSparseLtEnv()");
900 auto dnmat_handle =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(dh);
901 dnmat_handle->values = values;
902 auto dTp =
static_cast<cudaDataType_t
>(dtp);
904 const uint32_t alignment = 16;
906 &cusparseLt_env, &(dnmat_handle->mat),
rows,
cols,
cols,
907 alignment, dTp, CUSPARSE_ORDER_ROW))
911 mgpuDestroyCuSparseLtDnMat(
void *dh, CUstream ) {
912 auto dnmat_handle =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(dh);
917 mgpuCusparseLtCreate2To4SpMat(
void *sh, intptr_t
rows, intptr_t
cols,
918 void *values, int32_t dtp, CUstream ) {
919 assert(cusparseLt_initiated &&
"client did not call mgpuCreateSparseLtEnv()");
920 auto spmat_handle =
reinterpret_cast<cusparseLtSpMatHandleAndData *
>(sh);
921 spmat_handle->values = values;
922 auto dTp =
static_cast<cudaDataType_t
>(dtp);
924 const uint32_t alignment = 16;
926 &cusparseLt_env, &(spmat_handle->mat),
rows,
cols,
cols, alignment,
927 dTp, CUSPARSE_ORDER_ROW, CUSPARSELT_SPARSITY_50_PERCENT))
931 mgpuDestroyCuSparseLtSpMat(
void *sh, CUstream ) {
932 auto spmat_handle =
reinterpret_cast<cusparseLtSpMatHandleAndData *
>(sh);
942 mgpuCuSparseLtSpMMBufferSize(
void *bs, int32_t ma, int32_t mb,
void *a,
void *b,
943 void *c, int32_t ctp, int32_t prune_flag,
945 assert(cusparseLt_initiated &&
"client did not call mgpuCreateSparseLtEnv()");
948 auto matA =
reinterpret_cast<cusparseLtSpMatHandleAndData *
>(a);
949 auto matB =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(b);
950 auto matC =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(c);
951 auto workspace_size =
reinterpret_cast<size_t *
>(bs);
952 auto compressed_size = &(
reinterpret_cast<size_t *
>(bs)[1]);
953 auto compressed_buffer_size = &(
reinterpret_cast<size_t *
>(bs)[2]);
954 auto cTp =
static_cast<cusparseComputeType
>(ctp);
956 cusparseOperation_t modeA =
static_cast<cusparseOperation_t
>(ma);
957 cusparseOperation_t modeB =
static_cast<cusparseOperation_t
>(mb);
959 &cusparseLt_env, &(matA->matmul), modeA, modeB, &(matA->mat),
960 &(matB->mat), &(matC->mat), &(matC->mat), cTp))
962 &cusparseLt_env, &(matA->alg_sel), &(matA->matmul),
963 CUSPARSELT_MATMUL_ALG_DEFAULT))
966 &cusparseLt_env, &(matA->alg_sel), CUSPARSELT_MATMUL_ALG_CONFIG_ID, &alg,
970 &cusparseLt_env, &(matA->plan), &(matA->matmul), &(matA->alg_sel)))
975 &cusparseLt_env, &(matA->matmul), matA->values, matA->values,
976 CUSPARSELT_PRUNE_SPMMA_STRIP, stream))
981 if (prune_flag == 2) {
982 int *dvalid = (
int *)
mgpuMemAlloc(
sizeof(
int), stream,
false);
984 &cusparseLt_env, &(matA->matmul), matA->values, dvalid, stream))
986 mgpuMemcpy(&valid, dvalid,
sizeof(
int), stream);
990 fprintf(stderr,
"CUPARSE-LT: sparse matrix is not 2:4; computed results "
991 "will be invalid\n");
995 &cusparseLt_env, &(matA->plan), workspace_size))
997 &cusparseLt_env, &(matA->plan), compressed_size, compressed_buffer_size))
1001 mgpuCuSparseLtSpMM(
void *a,
void *b,
void *c,
void *d_workspace,
1002 void *dA_compressed,
void *dA_compressedBuffer,
1004 assert(cusparseLt_initiated &&
"client did not call mgpuCreateSparseLtEnv()");
1005 auto matA =
reinterpret_cast<cusparseLtSpMatHandleAndData *
>(a);
1006 auto matB =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(b);
1007 auto matC =
reinterpret_cast<cusparseLtDnMatHandleAndData *
>(c);
1009 ALPHABETA(CUDA_R_32F, alpha, beta)
1011 cusparseLtSpMMACompress(&cusparseLt_env, &(matA->plan), (matA->values),
1012 dA_compressed, dA_compressedBuffer, stream))
1017 cusparseLtMatmul(&cusparseLt_env, &(matA->plan), alphap, dA_compressed,
1018 matB->values, betap, matC->values,
1019 matC->values, d_workspace,
nullptr, 0))
#define CUSPARSE_REPORT_IF_ERROR(expr)
bool isDebugEnabled()
Helper method that checks environment value for debugging.
MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamWaitEvent(CUstream stream, CUevent event)
#define MLIR_CUDA_WRAPPERS_EXPORT
MLIR_CUDA_WRAPPERS_EXPORT void mgpuModuleUnload(CUmodule module)
#define CUDA_REPORT_IF_ERROR(expr)
static thread_local int32_t defaultDevice
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostRegisterMemRef(int64_t rank, StridedMemRefType< char, 1 > *descriptor, int64_t elementSizeBytes)
Registers a memref with the CUDA runtime.
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostUnregister(void *ptr)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventRecord(CUevent event, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemcpy(void *dst, void *src, size_t sizeBytes, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemset16(void *dst, unsigned short value, size_t count, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemset32(void *dst, unsigned int value, size_t count, CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT CUmodule mgpuModuleLoadJIT(void *data, int optLevel)
MLIR_CUDA_WRAPPERS_EXPORT CUfunction mgpuModuleGetFunction(CUmodule module, const char *name)
#define debug_print(fmt,...)
MLIR_CUDA_WRAPPERS_EXPORT CUevent mgpuEventCreate()
CUdevice getDefaultCuDevice()
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostRegister(void *ptr, uint64_t sizeBytes)
Helper functions for writing mlir example code.
MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamSynchronize(CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT CUstream mgpuStreamCreate()
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemFree(void *ptr, CUstream)
MLIR_CUDA_WRAPPERS_EXPORT void * mgpuMemAlloc(uint64_t sizeBytes, CUstream stream, bool isHostShared)
MLIR_CUDA_WRAPPERS_EXPORT CUmodule mgpuModuleLoad(void *data, size_t)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuSetDefaultDevice(int32_t device)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuStreamDestroy(CUstream stream)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuLaunchKernel(CUfunction function, intptr_t gridX, intptr_t gridY, intptr_t gridZ, intptr_t blockX, intptr_t blockY, intptr_t blockZ, int32_t smem, CUstream stream, void **params, void **extra, size_t)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuMemHostUnregisterMemRef(int64_t rank, StridedMemRefType< char, 1 > *descriptor, int64_t elementSizeBytes)
Unregisters a memref with the CUDA runtime.
MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventSynchronize(CUevent event)
MLIR_CUDA_WRAPPERS_EXPORT void mgpuEventDestroy(CUevent event)
const FrozenRewritePatternSet GreedyRewriteConfig config
StridedMemRef descriptor type with static rank.