forked from JuliaGPU/CUDA.jl
-
Notifications
You must be signed in to change notification settings - Fork 0
/
runbenchmarks.jl
96 lines (77 loc) · 2.94 KB
/
runbenchmarks.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# benchmark suite execution and codespeed submission
using CUDA
CUDA.allowscalar(false)
using BenchmarkTools
BenchmarkTools.DEFAULT_PARAMETERS.evals = 0 # to find untuned benchmarks
SUITE = BenchmarkGroup()
# NOTE: don't use spaces in benchmark names (tobami/codespeed#256)
include("kernel.jl")
include("array.jl")
@info "Warming up"
warmup(SUITE; verbose=false)
paramsfile = joinpath(first(DEPOT_PATH), "cache", "CUDA_benchmark_params.json")
mkpath(dirname(paramsfile))
if !isfile(paramsfile)
@warn "No saved parameters found, will re-tune all benchmarks"
tune!(SUITE)
else
loadparams!(SUITE, BenchmarkTools.load(paramsfile)[1], :evals, :samples)
# find untuned benchmarks for which we have the default evals==0
function find_untuned(group::BenchmarkGroup, untuned=Dict(), prefix="")
for (name, b) in group
find_untuned(b, untuned, isempty(prefix) ? name : "$prefix/$name")
end
return untuned
end
function find_untuned(b::BenchmarkTools.Benchmark, untuned=Dict(), prefix="")
if params(b).evals == 0
untuned[prefix] = b
end
return untuned
end
untuned = find_untuned(SUITE)
if !isempty(untuned)
@info "Tuning parameters: $(join(keys(untuned), ", "))"
foreach(tune!, values(untuned))
BenchmarkTools.save(paramsfile, params(SUITE))
end
end
@info "Running benchmarks"
results = run(SUITE, verbose=true)
println(results)
## submission
using JSON, HTTP
if get(ENV, "CODESPEED_BRANCH", nothing) == "master"
@info "Submitting to Codespeed..."
basedata = Dict(
"branch" => ENV["CODESPEED_BRANCH"],
"commitid" => ENV["CODESPEED_COMMIT"],
"project" => ENV["CODESPEED_PROJECT"],
"environment" => ENV["CODESPEED_ENVIRONMENT"],
"executable" => ENV["CODESPEED_EXECUTABLE"]
)
# convert nested groups of benchmark to flat dictionaries of results
flat_results = []
function flatten(results, prefix="")
for (key,value) in results
if value isa BenchmarkGroup
flatten(value, "$prefix$key/")
else
@assert value isa BenchmarkTools.Trial
# codespeed reports maxima, but those are often very noisy.
# get rid of measurements that unnecessarily skew the distribution.
rmskew!(value)
push!(flat_results,
Dict(basedata...,
"benchmark" => "$prefix$key",
"result_value" => median(value).time / 1e9,
"min" => minimum(value).time / 1e9,
"max" => maximum(value).time / 1e9))
end
end
end
flatten(results)
HTTP.post("$(ENV["CODESPEED_SERVER"])/result/add/json/",
["Content-Type" => "application/x-www-form-urlencoded"],
HTTP.URIs.escapeuri(Dict("json" => JSON.json(flat_results))))
end