-
Notifications
You must be signed in to change notification settings - Fork 15
/
list.ml
92 lines (75 loc) · 2.89 KB
/
list.ml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
open Bechamel
open Toolkit
(* This is our /protected/ function which take an argument and return a simple
function: [unit -> 'a]. The function must be wrapped into a [Staged.t].
NOTE: [words] is __outside__ our [(unit -> 'a) Staged.t]*)
let make_list words =
Staged.stage @@ fun () ->
let rec go n acc = if n = 0 then acc else go (n - 1) (n :: acc) in
ignore (go ((words / 3) + 1) [])
(* From our function [make_list], we make an indexed (by [args]) test. It's a list
of tests which are applied with [args] such as:
{[
let test =
[ make_list 0
; make_list 10
; make_list 100
; make_list 400
; make_list 1000 ]
]} *)
let test =
Test.make_indexed ~name:"list" ~fmt:"%s %d" ~args:[ 0; 10; 100; 400; 1000 ]
make_list
(* From our test, we can start to benchmark it!
A benchmark is a /run/ of your test multiple times. From results given by
[Benchmark.all], an analyse is needed to infer measures of one call of your
test.
[Bechamel] asks 3 things:
- what you want to record (see [instances])
- how you want to analyse (see [ols])
- how you want to benchmark your test (see [cfg])
The core of [Bechamel] (see [Bechamel.Toolkit]) has some possible measures
such as the [monotonic-clock] to see time performances.
The analyse can be OLS (Ordinary Least Square) or RANSAC. In this example, we
use only one.
Finally, to launch the benchmark, we need some others details such as:
- should we stabilise the GC?
- how many /run/ you want
- the maximum of time allowed by the benchmark
- etc.
[raw_results] is what the benchmark produced. [results] is what the analyse
can infer. The first one is used to show graphs or to let the user (with
[Measurement_raw]) to infer something else than what [ols] did. The second is
mostly what you want: a synthesis of /samples/. *)
let benchmark () =
let ols =
Analyze.ols ~bootstrap:0 ~r_square:true ~predictors:Measure.[| run |]
in
let instances =
Instance.[ minor_allocated; major_allocated; monotonic_clock ]
in
let cfg =
Benchmark.cfg ~limit:2000 ~quota:(Time.second 0.5) ~kde:(Some 1000) ()
in
let raw_results = Benchmark.all cfg instances test in
let results =
List.map (fun instance -> Analyze.all ols instance raw_results) instances
in
let results = Analyze.merge ols instances results in
(results, raw_results)
let () =
List.iter
(fun v -> Bechamel_notty.Unit.add v (Measure.unit v))
Instance.[ minor_allocated; major_allocated; monotonic_clock ]
let img (window, results) =
Bechamel_notty.Multiple.image_of_ols_results ~rect:window
~predictor:Measure.run results
open Notty_unix
let () =
let window =
match winsize Unix.stdout with
| Some (w, h) -> { Bechamel_notty.w; h }
| None -> { Bechamel_notty.w = 80; h = 1 }
in
let results, _ = benchmark () in
img (window, results) |> eol |> output_image