/
framework_cuda_specs.rs
60 lines (53 loc) · 1.66 KB
/
framework_cuda_specs.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
extern crate collenchyma as co;
extern crate libc;
#[cfg(test)]
#[cfg(feature = "cuda")]
mod framework_cuda_spec {
use co::prelude::*;
use co::frameworks::cuda::memory::*;
#[test]
fn it_works() {
let frm = Cuda::new();
println!("{:?}", frm.hardwares());
}
#[test]
fn it_creates_context() {
let frm = Cuda::new();
let hardwares = &frm.hardwares()[0..1];
println!("{:?}", frm.new_device(hardwares));
}
#[test]
#[allow(unused_must_use)]
fn it_allocates_memory() {
let vec_a = vec![0isize, 1, 2, -3, 4, 5, 6, 7];
let frm = Cuda::new();
if let DeviceType::Cuda(_) = frm.new_device(&frm.hardwares()[0..1]).unwrap() {
// Cuda memory
Memory::new(vec_a.len());
}
}
#[test]
#[allow(unused_must_use)]
// Create a lot of new CUDA devices, tests for correct dropping of device
fn it_creates_a_lot_of_devices() {
for _ in 0..256 {
let cuda = Cuda::new();
let _ = cuda.new_device(&cuda.hardwares()[0..1]).unwrap();
}
}
#[test]
#[allow(unused_must_use)]
// Allocate 128mb blocks with dropping them in between, tests for correct freeing of memory
fn it_allocates_4gb_memory_same_device() {
let cuda = Cuda::new();
let device = cuda.new_device(&cuda.hardwares()[0..1]).unwrap();
for _ in 0..256 {
let _ = &mut SharedTensor::<f32>::new(&device, &vec![256, 1024, 128]).unwrap();
}
}
#[test]
fn it_can_synchronize_context() {
let backend = Backend::<Cuda>::default().unwrap();
backend.synchronize().unwrap();
}
}