From 5a790a38b0623ebca1c3be0fcfbe6c7997f2ec65 Mon Sep 17 00:00:00 2001 From: Zoli Somogyi Date: Wed, 6 Mar 2024 07:56:41 +0100 Subject: [PATCH] Free Cublas GPU memory I have corrected the PR https://github.com/ggerganov/llama.cpp/pull/5576 which causes crash and streamlined the code. Unfortunately, this does not free all occupied GPU memory yet (only 15% of it). We still need to find some objects which are not freed after releasing GPU memory. --- ggml-cuda.cu | 21 +++++++++++++++++---- ggml-cuda.h | 3 +++ 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 72bcec8cdb17a..8b7e093831b2f 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -8751,10 +8751,11 @@ GGML_CALL bool ggml_cublas_loaded(void) { return g_cublas_loaded; } +static bool g_cublas_initialized = false; + GGML_CALL void ggml_init_cublas() { - static bool initialized = false; - if (!initialized) { + if (!g_cublas_initialized) { #ifdef __HIP_PLATFORM_AMD__ // Workaround for a rocBLAS bug when using multiple graphics cards: @@ -8764,7 +8765,7 @@ GGML_CALL void ggml_init_cublas() { #endif if (cudaGetDeviceCount(&g_device_count) != cudaSuccess) { - initialized = true; + g_cublas_initialized = true; g_cublas_loaded = false; fprintf(stderr, "%s: no " GGML_CUDA_NAME " devices found, " GGML_CUDA_NAME " will be disabled\n", __func__); return; @@ -8835,7 +8836,7 @@ GGML_CALL void ggml_init_cublas() { // configure logging to stdout // CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr)); - initialized = true; + g_cublas_initialized = true; g_cublas_loaded = true; } } @@ -12490,3 +12491,15 @@ GGML_CALL int ggml_backend_cuda_reg_devices() { } return device_count; } + +extern "C" GGML_CALL void ggml_free_cublas(void); + +GGML_CALL void ggml_free_cublas(void) { +#ifdef GGML_USE_CUBLAS + for (int id = 0; id < g_device_count; ++id) { + CUBLAS_CHECK(cublasDestroy(g_cublas_handles[id])); + g_cublas_handles[id] = nullptr; + } + g_cublas_initialized = false; +#endif +} \ No newline at end of file diff --git a/ggml-cuda.h b/ggml-cuda.h index b1ebd61d7fb66..eac95fd470cb2 100644 --- a/ggml-cuda.h +++ b/ggml-cuda.h @@ -17,6 +17,9 @@ extern "C" { #define GGML_CUDA_MAX_DEVICES 16 +// Release CUDA resources +GGML_API GGML_CALL void ggml_free_cublas(void); + // Always success. To check if CUDA is actually loaded, use `ggml_cublas_loaded`. GGML_API GGML_CALL void ggml_init_cublas(void);