-
Notifications
You must be signed in to change notification settings - Fork 730
Add a CUDA memory tracker and use it in voxtral runner #15780
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from all commits
Commits
Show all changes
4 commits
Select commit
Hold shift + click to select a range
38049a0
Add a CUDA memory tracker and use it in voxtral runner
larryliu0820 383b91f
Define CUDA_AVAILABLE when building CUDA backend; wire GPU stats into…
larryliu0820 56f8997
Fix load time calculation
larryliu0820 59e6a65
Link cudart to extension_llm_runner
larryliu0820 File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,192 @@ | ||
| /* | ||
| * Copyright (c) Meta Platforms, Inc. and affiliates. | ||
| * All rights reserved. | ||
| * | ||
| * This source code is licensed under the BSD-style license found in the | ||
| * LICENSE file in the root directory of this source tree. | ||
| */ | ||
|
|
||
| #pragma once | ||
|
|
||
| #include <cuda_runtime.h> | ||
| #include <algorithm> | ||
| #include <limits> | ||
|
|
||
| #include <executorch/runtime/platform/log.h> | ||
|
|
||
| namespace executorch::backends::cuda { | ||
|
|
||
| /** | ||
| * @class CudaMemoryTracker | ||
| * @brief Tracks CUDA memory usage and logs memory state at key points | ||
| * | ||
| * This class provides utilities to query and track CUDA memory usage, | ||
| * including peak memory usage and detailed memory state logging. | ||
| */ | ||
| class CudaMemoryTracker { | ||
| public: | ||
| /** | ||
| * @brief Constructor - initializes tracker and logs startup memory state | ||
| */ | ||
| CudaMemoryTracker() { | ||
| if (!query(&last_free_bytes_, &total_bytes_)) { | ||
| return; | ||
| } | ||
| available_ = true; | ||
| // Record the initial free bytes observed at startup. We'll use this as a | ||
| // baseline so reported "peak usage" reflects additional memory used | ||
| // since the tracker was created (instead of the absolute device usage, | ||
| // which may include other processes). | ||
| initial_free_bytes_ = last_free_bytes_; | ||
| min_free_bytes_ = last_free_bytes_; | ||
| log_state("startup", last_free_bytes_, total_bytes_); | ||
| } | ||
|
|
||
| /** | ||
| * @brief Logs current memory state at a tagged checkpoint | ||
| * @param tag Descriptive tag for this memory sample (e.g., "after_load") | ||
| */ | ||
| void log_sample(const char* tag) { | ||
| if (!available_) { | ||
| return; | ||
| } | ||
| size_t free_bytes = 0; | ||
| size_t total_bytes = 0; | ||
| if (!query(&free_bytes, &total_bytes)) { | ||
| return; | ||
| } | ||
| min_free_bytes_ = std::min(min_free_bytes_, free_bytes); | ||
| total_bytes_ = total_bytes; | ||
| last_free_bytes_ = free_bytes; | ||
| log_state(tag, free_bytes, total_bytes); | ||
| } | ||
|
|
||
| /** | ||
| * @brief Destructor - logs final memory state and peak usage summary | ||
| */ | ||
| ~CudaMemoryTracker() { | ||
| if (!available_) { | ||
| return; | ||
| } | ||
| size_t free_bytes = 0; | ||
| size_t total_bytes = 0; | ||
| if (!query(&free_bytes, &total_bytes)) { | ||
| return; | ||
| } | ||
| min_free_bytes_ = std::min(min_free_bytes_, free_bytes); | ||
| total_bytes_ = total_bytes; | ||
| last_free_bytes_ = free_bytes; | ||
| // Compute peak usage relative to the initial free baseline so that | ||
| // allocations by other processes present at startup are not attributed | ||
| // to this process. If for some reason initial_free_bytes_ was not set, | ||
| // fall back to absolute device usage. | ||
| double peak_mb = 0.0; | ||
| if (initial_free_bytes_ != std::numeric_limits<size_t>::max()) { | ||
| size_t used_delta = 0; | ||
| if (initial_free_bytes_ > min_free_bytes_) { | ||
| used_delta = initial_free_bytes_ - min_free_bytes_; | ||
| } | ||
| peak_mb = static_cast<double>(used_delta) / (1024.0 * 1024.0); | ||
| } else { | ||
| peak_mb = static_cast<double>(total_bytes_ - min_free_bytes_) / | ||
| (1024.0 * 1024.0); | ||
| } | ||
| const double total_mb = | ||
| static_cast<double>(total_bytes_) / (1024.0 * 1024.0); | ||
| ET_LOG( | ||
| Info, | ||
| "CUDA memory peak usage (since startup): %.2f MB, device total: %.2f MB", | ||
| peak_mb, | ||
| total_mb); | ||
| } | ||
|
|
||
| private: | ||
| /** | ||
| * @brief Queries current CUDA memory info | ||
| * @param free_bytes Output parameter for free memory in bytes | ||
| * @param total_bytes Output parameter for total memory in bytes | ||
| * @return true if query succeeded, false otherwise | ||
| */ | ||
| bool query(size_t* free_bytes, size_t* total_bytes) { | ||
| cudaError_t err = cudaMemGetInfo(free_bytes, total_bytes); | ||
| if (err != cudaSuccess) { | ||
| if (!error_logged_) { | ||
| error_logged_ = true; | ||
| ET_LOG( | ||
| Error, | ||
| "cudaMemGetInfo failed with error: %s", | ||
| cudaGetErrorString(err)); | ||
| } | ||
| available_ = false; | ||
| return false; | ||
| } | ||
| return true; | ||
| } | ||
|
|
||
| /** | ||
| * @brief Logs the current memory state | ||
| * @param tag Tag describing this log point | ||
| * @param free_bytes Current free memory in bytes | ||
| * @param total_bytes Current total memory in bytes | ||
| */ | ||
| void log_state(const char* tag, size_t free_bytes, size_t total_bytes) const { | ||
| const double used_mb = | ||
| static_cast<double>(total_bytes - free_bytes) / (1024.0 * 1024.0); | ||
| const double free_mb = static_cast<double>(free_bytes) / (1024.0 * 1024.0); | ||
| const double total_mb = | ||
| static_cast<double>(total_bytes) / (1024.0 * 1024.0); | ||
| ET_LOG( | ||
| Info, | ||
| "CUDA memory (%s): used %.2f MB, free %.2f MB, total %.2f MB", | ||
| tag, | ||
| used_mb, | ||
| free_mb, | ||
| total_mb); | ||
| } | ||
|
|
||
| bool available_{false}; | ||
| bool error_logged_{false}; | ||
| size_t last_free_bytes_{0}; | ||
| size_t total_bytes_{0}; | ||
| size_t min_free_bytes_{std::numeric_limits<size_t>::max()}; | ||
| // Baseline free bytes observed at tracker construction. Used to compute | ||
| // peak usage attributable to this process since the tracker started. | ||
| size_t initial_free_bytes_{std::numeric_limits<size_t>::max()}; | ||
|
|
||
| public: | ||
| // Simple accessors to allow other components to read last-sampled values. | ||
| // These are safe to call after a successful log_sample() invocation. | ||
| uint64_t last_free_bytes() const { | ||
| return static_cast<uint64_t>(last_free_bytes_); | ||
| } | ||
| uint64_t total_bytes() const { | ||
| return static_cast<uint64_t>(total_bytes_); | ||
| } | ||
| uint64_t min_free_bytes() const { | ||
| return static_cast<uint64_t>(min_free_bytes_); | ||
| } | ||
| uint64_t initial_free_bytes() const { | ||
| return static_cast<uint64_t>(initial_free_bytes_); | ||
| } | ||
| double peak_usage_mb() const { | ||
| // Prefer peak relative to the initial free baseline; fall back to | ||
| // absolute device peak if baseline isn't available. | ||
| if (min_free_bytes_ == std::numeric_limits<size_t>::max()) { | ||
| return 0.0; | ||
| } | ||
| if (initial_free_bytes_ != std::numeric_limits<size_t>::max()) { | ||
| size_t used_delta = 0; | ||
| if (initial_free_bytes_ > min_free_bytes_) { | ||
| used_delta = initial_free_bytes_ - min_free_bytes_; | ||
| } | ||
| return static_cast<double>(used_delta) / (1024.0 * 1024.0); | ||
| } | ||
| if (total_bytes_ == 0) { | ||
| return 0.0; | ||
| } | ||
| return static_cast<double>(total_bytes_ - min_free_bytes_) / | ||
| (1024.0 * 1024.0); | ||
| } | ||
| }; | ||
|
|
||
| } // namespace executorch::backends::cuda |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
why do we want to delete the loading time recording?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Moved it inside
load()