Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 84 additions & 0 deletions cscs-checks/microbenchmarks/kernel_latency/kernel_latency.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
import reframe as rfm
import reframe.utility.sanity as sn


@rfm.required_version('>=2.16-dev0')
@rfm.parameterized_test(['sync'], ['async'])
class KernelLatencyTest(rfm.RegressionTest):
def __init__(self, kernel_version):
super().__init__()
self.sourcepath = 'kernel_latency.cu'
self.build_system = 'SingleSource'
self.valid_systems = ['daint:gpu', 'dom:gpu', 'kesch:cn']
self.valid_prog_environs = ['PrgEnv-cray', 'PrgEnv-pgi']
self.num_tasks = 0
self.num_tasks_per_node = 1

if self.current_system.name in {'dom', 'daint'}:
self.num_gpus_per_node = 1
gpu_arch = '60'
self.modules = ['craype-accel-nvidia60']
self.valid_prog_environs += ['PrgEnv-gnu']
else:
self.num_gpus_per_node = 16
self.modules = ['craype-accel-nvidia35']
gpu_arch = '37'

self.build_system.cxxflags = ['-arch=compute_%s' % gpu_arch,
'-code=sm_%s' % gpu_arch, '-std=c++11']

if kernel_version == 'sync':
self.build_system.cppflags = ['-D SYNCKERNEL=1']
else:
self.build_system.cppflags = ['-D SYNCKERNEL=0']

self.sanity_patterns = sn.all([
sn.assert_eq(
sn.count(sn.findall(r'\[\S+\] Found \d+ gpu\(s\)',
self.stdout)),
self.num_tasks_assigned),
sn.assert_eq(
sn.count(sn.findall(r'\[\S+\] \[gpu \d+\] Kernel launch '
r'latency: \S+ us', self.stdout)),
self.num_tasks_assigned * self.num_gpus_per_node)
])

self.perf_patterns = {
'latency': sn.max(sn.extractall(
r'\[\S+\] \[gpu \d+\] Kernel launch latency: '
r'(?P<latency>\S+) us', self.stdout, 'latency', float))
}
self.sys_reference = {
'sync': {
'dom:gpu': {
'latency': (6.6, None, 0.10, 's')
},
'daint:gpu': {
'latency': (6.6, None, 0.10, 'us')
},
'kesch:cn': {
'latency': (12.0, None, 0.10, 'us')
},
},
'async': {
'dom:gpu': {
'latency': (2.2, None, 0.10, 'us')
},
'daint:gpu': {
'latency': (2.2, None, 0.10, 's')
},
'kesch:cn': {
'latency': (5.7, None, 0.10, 'us')
},
},
}

self.reference = self.sys_reference[kernel_version]

self.maintainers = ['TM']
self.tags = {'benchmark', 'diagnostic'}

@property
@sn.sanity_function
def num_tasks_assigned(self):
return self.job.num_tasks
59 changes: 59 additions & 0 deletions cscs-checks/microbenchmarks/kernel_latency/src/kernel_latency.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
#include <iostream>
#include <chrono>
#include <ratio>
#include <unistd.h>
#include <cuda.h>

__global__ void null_kernel() {
};

int main(int argc, char* argv[]) {

char hostname[256];
hostname[255]='\0';
gethostname(hostname, 255);

cudaError_t error;
int gpu_count = 0;

error = cudaGetDeviceCount(&gpu_count);

if (error == cudaSuccess) {
if (gpu_count <= 0) {
std::cout << "[" << hostname << "] " << "Could not find any gpu\n";
return 1;
}
std::cout << "[" << hostname << "] " << "Found " << gpu_count << " gpu(s)\n";
}
else{
std::cout << "[" << hostname << "] " << "Error getting gpu count, exiting...\n";
return 1;
}

for (int i = 0; i < gpu_count; i++) {

cudaSetDevice(i);
// Single kernel launch to initialize cuda runtime
null_kernel<<<1, 1>>>();

auto t_start = std::chrono::system_clock::now();
const int kernel_count = 1000;

for (int i = 0; i < kernel_count; ++i) {
null_kernel<<<1, 1>>>();
#if SYNCKERNEL == 1
cudaDeviceSynchronize();
#endif
}

#if SYNCKERNEL != 1
cudaDeviceSynchronize();
#endif

auto t_end = std::chrono::system_clock::now();
std::cout << "[" << hostname << "] " << "[gpu " << i << "] " << "Kernel launch latency: " << std::chrono::duration_cast<std::chrono::duration<double, std::micro>>(t_end - t_start).count() / kernel_count << " us\n";
}

return 0;
}