Skip to content

Commit

Permalink
fix: use Cuda cores determination from rust-gpu-tools
Browse files Browse the repository at this point in the history
Determining the number of cores is generally useful, so it was moved
to the rust-gpu-tools library. In order to set a custom GPU please
use the `RUST_GPU_TOOLS_CUSTOM_GPU` variable instead of
`BELLMAN_CUSTOM_GPU`.
  • Loading branch information
vmx committed Jan 24, 2022
1 parent c5fa04b commit eece7eb
Show file tree
Hide file tree
Showing 5 changed files with 79 additions and 64 deletions.
4 changes: 4 additions & 0 deletions Cargo.toml
Expand Up @@ -53,6 +53,7 @@ rand_chacha = "0.3"
csv = "1.1.5"
tempfile = "3.1.0"
subtle = "2.2.1"
temp-env = "0.2.0"

[build-dependencies]
blstrs = "0.4.0"
Expand Down Expand Up @@ -94,3 +95,6 @@ harness = false
members = [
"verifier-bench"
]

[patch.crates-io]
rust-gpu-tools = { git = "https://github.com/filecoin-project/rust-gpu-tools", optional = true, default-features = false }
4 changes: 2 additions & 2 deletions README.md
Expand Up @@ -43,13 +43,13 @@ The gpu extension contains some env vars that may be set externally to this libr
env::set_var("BELLMAN_VERIFIER", "gpu");
```

- `BELLMAN_CUSTOM_GPU`
- `RUST_GPU_TOOLS_CUSTOM_GPU`

Will allow for adding a GPU not in the tested list. This requires researching the name of the GPU device and the number of cores in the format `["name:cores"]`.

```rust
// Example
env::set_var("BELLMAN_CUSTOM_GPU", "GeForce RTX 2080 Ti:4352, GeForce GTX 1060:1280");
env::set_var("RUST_GPU_TOOLS_CUSTOM_GPU", "GeForce RTX 2080 Ti:4352, GeForce GTX 1060:1280");
```

- `BELLMAN_CPU_UTILIZATION`
Expand Down
85 changes: 23 additions & 62 deletions src/gpu/utils.rs
@@ -1,63 +1,31 @@
use log::{info, warn};
use rust_gpu_tools::Device;
use std::collections::HashMap;
use std::env;

lazy_static::lazy_static! {
static ref CORE_COUNTS: HashMap<String, usize> = {
let mut core_counts : HashMap<String, usize> = vec![
// AMD
("gfx1010".to_string(), 2560),
// This value was chosen to give (approximately) empirically best performance for a Radeon Pro VII.
("gfx906".to_string(), 7400),

// NVIDIA
("Quadro RTX 6000".to_string(), 4608),
("Quadro RTX A6000".to_string(), 10752),

("TITAN RTX".to_string(), 4608),

("Tesla V100".to_string(), 5120),
("Tesla P100".to_string(), 3584),
("Tesla T4".to_string(), 2560),
("Quadro M5000".to_string(), 2048),

("GeForce RTX 3090".to_string(), 10496),
("GeForce RTX 3080".to_string(), 8704),
("GeForce RTX 3070".to_string(), 5888),

("GeForce RTX 2080 Ti".to_string(), 4352),
("GeForce RTX 2080 SUPER".to_string(), 3072),
("GeForce RTX 2080".to_string(), 2944),
("GeForce RTX 2070 SUPER".to_string(), 2560),

("GeForce GTX 1080 Ti".to_string(), 3584),
("GeForce GTX 1080".to_string(), 2560),
("GeForce GTX 2060".to_string(), 1920),
("GeForce GTX 1660 Ti".to_string(), 1536),
("GeForce GTX 1060".to_string(), 1280),
("GeForce GTX 1650 SUPER".to_string(), 1280),
("GeForce GTX 1650".to_string(), 896),
].into_iter().collect();

if let Ok(var) = env::var("BELLMAN_CUSTOM_GPU") {
for card in var.split(',') {
let splitted = card.split(':').collect::<Vec<_>>();
if splitted.len() != 2 { panic!("Invalid BELLMAN_CUSTOM_GPU!"); }
let name = splitted[0].trim().to_string();
let cores : usize = splitted[1].trim().parse().expect("Invalid BELLMAN_CUSTOM_GPU!");
info!("Adding \"{}\" to GPU list with {} CUDA cores.", name, cores);
core_counts.insert(name, cores);
}
}

core_counts
};
}
use log::{info, warn};
use rust_gpu_tools::{Device, CUDA_CORES};

const DEFAULT_CORE_COUNT: usize = 2560;
pub fn get_core_count(name: &str) -> usize {
match CORE_COUNTS.get(name) {
// Determining the number of cores was moved to rust-gpu-tools, which uses the
// `RUST_GPU_TOOLS_CUSTOM_GPU` environment variable to set custom GPUs. Users should upgrade
// using that one instead. Though using `BELLMAN_CUSTOM_GPU` is still supported for backwards
// compatibility, but will be ignored if `RUST_GPU_TOOLS_CUSTOM_GPU` is also set.
// Setting `RUST_GPU_TOOLS_CUSTOM_GPU` must happen before the first call to `CUDA_CORES`, as
// it will be initialized only once for the lifetime of the library.
if let Ok(custom_gpu) = env::var("BELLMAN_CUSTOM_GPU") {
match env::var("RUST_GPU_TOOLS_CUSTOM_GPU") {
Ok(_) => {
info!("`BELLMAN_CUSTOM_GPU` was ignored as `RUST_GPU_TOOLS_CUSTOM_GPU` is set.");
}
Err(_) => {
info!(
"Please use `RUST_GPU_TOOLS_CUSTOM_GPU` instead of `BELLMAN_CUSTOM_GPU`, \
their values are fully compatible."
);
env::set_var("RUST_GPU_TOOLS_CUSTOM_GPU", custom_gpu)
}
}
}
match CUDA_CORES.get(name) {
Some(&cores) => cores,
None => {
warn!(
Expand All @@ -77,10 +45,3 @@ pub fn dump_device_list() {
info!("Device: {:?}", d);
}
}

#[cfg(any(feature = "cuda", feature = "opencl"))]
#[test]
pub fn test_list_devices() {
let _ = env_logger::try_init();
dump_device_list();
}
25 changes: 25 additions & 0 deletions tests/envvars1.rs
@@ -0,0 +1,25 @@
#![cfg(any(feature = "cuda", feature = "opencl"))]
// This test is in its own file as the `RUST_GPU_TOOLS_CUSTOM_GPU` variable is checked only when
// `get_core_count` is accessed for the first time. Subsequent calls will return the values it was
// set to on the first call.
use std::env;

use bellperson::gpu::get_core_count;

/// Make sure that setting the `BELLMAN_CUSTOM_GPU` env var still works.
#[test]
fn belllman_custom_gpu_env_var() {
temp_env::with_vars(
vec![
("BELLMAN_CUSTOM_GPU", Some("My custom GPU:3241")),
("RUST_GPU_TOOLS_CUSTOM_GPU", None),
],
|| {
let cores = get_core_count("My custom GPU");
let rust_gpu_tools_custom_gpu = env::var("RUST_GPU_TOOLS_CUSTOM_GPU")
.expect("RUST_GPU_TOOLS_CUSTOM_GPU is set after `get_core_count` was called.");
assert_eq!(rust_gpu_tools_custom_gpu, "My custom GPU:3241");
assert_eq!(cores, 3241, "Cores of custom GPU were set correctly");
},
);
}
25 changes: 25 additions & 0 deletions tests/envvars2.rs
@@ -0,0 +1,25 @@
#![cfg(any(feature = "cuda", feature = "opencl"))]
// This test is in its own file as the `RUST_GPU_TOOLS_CUSTOM_GPU` variable is checked only when
// `get_core_count` is accessed for the first time. Subsequent calls will return the values it was
// set to on the first call.
use std::env;

use bellperson::gpu::get_core_count;

/// Make sure that `BELLMAN_CUSTOM_GPU` env var is ignored if `RUST_GPU_TOOLS_CUSTOM_GPU` is
/// set.
#[test]
fn belllman_custom_gpu_env_var_ignored() {
temp_env::with_vars(
vec![
("RUST_GPU_TOOLS_CUSTOM_GPU", Some("My custom GPU:444")),
("BELLMAN_CUSTOM_GPU", Some("My custom GPU:3242")),
],
|| {
let cores = get_core_count("My custom GPU");
env::var("RUST_GPU_TOOLS_CUSTOM_GPU")
.expect("RUST_GPU_TOOLS_CUSTOM_GPU is set after `get_core_count` was called.");
assert_eq!(cores, 444, "Cores of custom GPU were set correctly to the `RUST_GPU_TOOLS_CUSTOM_GPU` env var.");
},
);
}

0 comments on commit eece7eb

Please sign in to comment.