Permalink
Switch branches/tags
Nothing to show
Find file
Fetching contributors…
Cannot retrieve contributors at this time
executable file 1680 lines (1391 sloc) 57.2 KB
#!/usr/bin/env perl
# ABSTRACT: Analyze benchmark data produced by timeall
use 5.010;
use strict;
use warnings;
use Pod::Usage;
use Getopt::Long;
use List::Util 'min', 'max';
use List::MoreUtils 'uniq';
use File::Basename;
use IO::File;
use DateTime;
use JSON;
use FindBin;
use lib "$FindBin::Bin/lib";
use Analyze::Summary;
my %FORMATTER = (
json => \&summarize_results_json,
text => \&summarize_results_text,
html => \&summarize_results_html,
html_snippet => \&summarize_results_html_snippet,
html_plot => \&summarize_results_html_plot,
);
my %DIAGNOSIS_ICON = (
SEGFAULT => '',
SPAWN_FAILED => '',
TIMED_OUT => '', # Or ⏰ or ⏱
RECEIVED_SIGNAL => '',
RECEIVED_SIGINT => '',
NONZERO_EXIT => 'Ø',
WROTE_TO_STDERR => 'E',
OUTPUT_MISMATCH => '',
unknown => '?',
mixed => '',
'' => '.',
);
MAIN();
sub MAIN {
# Process options and command line arguments
my $main_opt = process_options_and_arguments();
# Open outfile
my $out = $main_opt->{outfile};
my $out_fh = $out eq '-' ? \*STDOUT : IO::File->new($out, '>')
or die "Could not open outfile '$out': $!";
# Analyze results
analyze_timings_files($main_opt, $out_fh, @ARGV);
}
sub process_options_and_arguments {
my %opt;
GetOptions(\%opt, 'help|h|?!', 'man!', 'format=s', 'style=s', 'outfile=s',
'verbose!', 'min-time|min_time|mintime=f',
'ignore-startup|ignore_startup|ignorestartup!',
'ignore-compile|ignore_compile|ignorecompile!',
'skip-incomplete|skip_incomplete|skipincomplete!',
'show-failures|show_failures|showfailures!',
'show-relative|show_relative|showrelative!',
'show-rates|show_rates|showrates!',
'compare!', 'history!')
or pod2usage(-verbose => 0);
pod2usage(-verbose => 1) if $opt{help};
pod2usage(-verbose => 2) if $opt{man};
$opt{'min-time'} //= 1e-2;
$opt{'show-rates'} //= 1;
$opt{'show-relative'} //= $opt{compare};
$opt{outfile} //= '-';
my $suffix = (fileparse($opt{outfile}, qr/\.[^.]+$/))[2] || '.';
my $ext = lc substr $suffix, 1;
$opt{format} //= exists $FORMATTER{$ext} ? $ext : 'text';
$opt{format} = lc $opt{format};
my $formatter = $FORMATTER{$opt{format}}
or pod2usage(-msg => "Unknown output format '$opt{format}'");
$opt{formatter} = $formatter;
$opt{style} //= 'auto';
$opt{style} = lc $opt{style};
(grep {$_ eq $opt{style}} 0, 1, 'auto')
or pod2usage(-msg => "Unknown output style setting '$opt{style}'");
pod2usage(-msg => "Must list timing files to analyze") unless @ARGV;
return \%opt;
}
sub analyze_timings_files {
my ($opt, $out_fh, @files) = @_;
my $min_time = $opt->{'min-time'};
my $ignore_startup = $opt->{'ignore-startup'};
my $ignore_compile = $opt->{'ignore-compile'};
my $skip_incomplete = $opt->{'skip-incomplete'};
my $analyze_timing_data = sub {
my $data = shift;
my $startup = $data->{run}{startup} || {};
for my $test (@{$data->{times}}) {
$test->{diagnoses} = diagnose_runs($test->{raw});
$test->{compare}
= compare_scaled_times($test->{best}, $startup, $min_time,
$ignore_startup, $ignore_compile);
}
($data->{score}, $data->{test_score})
= compute_scores($data, $skip_incomplete);
$opt->{formatter}->($data, $opt, $out_fh);
};
if ($opt->{compare}) {
my $merged = merge_timing_files(@files);
$analyze_timing_data->($merged);
}
else {
for my $timing_file (@files) {
my $data = load_timing_file($timing_file);
$analyze_timing_data->($data);
}
}
}
sub merge_timing_files {
my (%times);
my (@test_order, @groups, @compilers);
my (@start_times, @end_times, @versions);
for my $file (@_) {
my $basename = fileparse($file, qr/\.[^.]+$/);
my $data = load_timing_file($file);
# Collect all values for lists that will be summarized
# (using min, max, or uniq) after all files are processed
push @groups, @{$data->{config}{groups}};
push @versions, $data->{run}{versions}{bench};
push @end_times, $data->{run}{end_time};
push @start_times, $data->{run}{start_time};
# Merge compiler groups
my $compilers = $data->{config}{compilers};
for my $compiler (@$compilers) {
next unless $compiler->{enabled};
$compiler->{run} = $basename;
$compiler->{key} = "$compiler->{name}/$basename";
push @compilers, $compiler;
}
# Merge test timings
for my $test (@{$data->{times}}) {
my $name = $test->{name};
push @test_order, $name;
my $merged = $times{$name} ||=
{
name => $name,
conf => $test->{conf},
raw => {},
best => {},
};
# Merge in any new configuration keys
for my $key (keys %{$test->{conf}}) {
$merged->{conf}{$key} //= $test->{conf}{$key};
}
for my $compiler (keys %{$test->{best}}) {
my $key = "$compiler/$basename";
$merged->{$_}{$key} = $test->{$_}{$compiler} for qw( raw best );
}
}
}
# Summarize collected lists
my $start_time = min @start_times;
my $end_time = max @end_times;
@groups = uniq @groups;
@versions = uniq sort @versions;
@test_order = uniq @test_order;
# Bad user, no biscuit! But we'll still try ....
if (@versions > 1) {
print STDERR "WARNING: Comparing results from different benchmark versions:\n";
print STDERR " $_\n" for @versions;
}
my $merged = {
config => {
groups => \@groups,
compilers => \@compilers,
},
run => {
start_time => $start_time,
end_time => $end_time,
versions => { bench => join ', ' => @versions },
},
times => [ map $times{$_}, @test_order ],
};
return $merged;
}
sub load_timing_file {
my $timing_file = shift;
my $decoder = JSON->new->utf8;
my $in_fh = $timing_file eq '-' ? \*STDIN : IO::File->new($timing_file, '<')
or die "Could not open timing file '$timing_file': $!";
local $/;
my $json = <$in_fh>;
return $decoder->decode($json);
}
sub diagnose_runs {
my ($run_info) = @_;
my %diagnoses;
while (my ($comp, $runs) = each %$run_info) {
my (%by_scale, %details);
for my $run (@$runs) {
my $scale = $run->{scale};
my $diagnosis = $run->{diagnosis} //= 'unknown';
push @{$by_scale{$scale} ||= []}, $diagnosis;
push @{$details{$scale} ||= []}, $run;
}
my %summary;
while (my ($scale, $diagnoses) = each %by_scale) {
my @diagnoses = uniq @$diagnoses;
$summary{$scale} = @diagnoses > 1 ? 'mixed' : $diagnoses[0];
}
my @sorted = map { $summary{$_} }
sort { $a <=> $b } keys %summary;
my @details = map { $details{$_} }
sort { $a <=> $b } keys %details;
$diagnoses{$comp} = {
by_scale => \%by_scale,
summary => \%summary,
details => \@details,
sorted => \@sorted,
};
}
return \%diagnoses;
}
sub most_diagnoses {
my ($tests) = @_;
my @test_maxes;
for my $test (@$tests) {
my @diagnoses = values %{$test->{diagnoses}};
push @test_maxes, max map { scalar @{$_->{sorted}} } @diagnoses;
}
return max @test_maxes;
}
sub compare_scaled_times {
my ($times, $startup, $min_time, $ignore_startup, $ignore_compile) = @_;
my $max_rate = 0;
my %max_rates;
my %times_by_scale;
my %rates_by_scale;
my %rates_by_comp;
while (my ($comp, $scale_runs) = each %$times) {
my $startup_time = $ignore_startup ? $startup->{$comp} || 0 : 0;
my $compile_time = $ignore_compile ? $scale_runs->{0}{time} || 0 : 0;
my $ignore_time = max($startup_time, $compile_time);
while (my ($scale, $run) = each %$scale_runs) {
next unless $scale && defined $run && defined $run->{time};
my $time = $run->{time};
my $work = $run->{work};
# If below the minimum time after removing ignored
# startup/compile time, the calculated rate is going to be
# effectively garbage, so just skip this data point
$time -= $ignore_time;
next if $time < $min_time;
$times_by_scale{$scale}{$comp} = $time;
my $rate =
$rates_by_scale{$scale}{$comp} =
$rates_by_comp{$comp}{$scale} = $work / $time;
$max_rate = $rate if $max_rate < $rate;
$max_rates{$scale} = $rate
if !$max_rates{$scale} || $max_rates{$scale} < $rate;
}
}
my %relative_rates;
while (my ($scale, $comp_rates) = each %rates_by_scale) {
while (my ($comp, $rate) = each %$comp_rates) {
$relative_rates{$scale}{$comp} = $rate / $max_rates{$scale};
}
}
my %peak_rate;
while (my ($comp, $scale_rates) = each %rates_by_comp) {
my @sorted = sort { $scale_rates->{$b} <=> $scale_rates->{$a} } keys %$scale_rates;
my $fastest = $sorted[0];
my $rate = $scale_rates->{$fastest};
$peak_rate{$comp} = {
scale => $fastest,
rate => $rate,
relative_to_max => $rate / $max_rate,
relative_at_scale => $rate / $max_rates{$fastest},
};
}
return {
times_by_scale => \%times_by_scale,
rates_by_scale => \%rates_by_scale,
rates_by_comp => \%rates_by_comp,
relative_rates => \%relative_rates,
peak_rate => \%peak_rate,
max_rate => $max_rate,
max_rates => \%max_rates,
};
}
# Compute overall 'score' by geometric mean of relative rates to
# a standard compiler serving as the reference 1.0 value.
sub compute_scores {
my ($data, $skip_incomplete) = @_;
my $tests = $data->{times};
my @compilers = map { $_->{key} || $_->{name} }
@{$data->{config}{compilers}};
my $standard = $compilers[0];
my %score;
$score{$_} = 1.0 for @compilers;
my %test_score;
my $summarized = 0;
TEST: for my $test (@$tests) {
my $peak_rate = $test->{compare}{peak_rate};
# Optionally skip any test that doesn't have a peak rate
# specified for every compiler being compared
if ($skip_incomplete) {
for my $compiler (@compilers) {
next TEST unless defined $peak_rate->{$compiler}{rate};
}
}
# Can't compute scores at all if we lack a reference point
my $reference = $peak_rate->{$standard}{rate};
return unless $reference;
# Determine if test should be included in summary
my $summarize = $test->{conf}{summarize} // 1;
$summarized++ if $summarize;
for my $compiler (@compilers) {
my $rate = $peak_rate->{$compiler}{rate};
$score{$compiler} = undef, next
unless defined $rate && defined $score{$compiler};
my $relative = $rate / $reference;
$score{$compiler} *= $relative if $summarize;
$test_score{$test->{name}}{$compiler} = $relative * 100;
}
}
my $power = 1 / ($summarized || 1);
for my $compiler (@compilers) {
next unless defined $score{$compiler};
$score{$compiler} **= $power;
# To get "% of reference compiler speed"
$score{$compiler} *= 100;
}
return (\%score, \%test_score);
}
sub summarize_results_json {
my ($data, $opt, $out_fh) = @_;
my $style = $opt->{style};
$style = 1 if $style eq 'auto';
my $encoder = JSON->new->utf8->canonical;
$encoder->pretty if $style;
my $json = $encoder->encode($data);
print $out_fh $json;
}
sub summarize_results_text {
my ($data, $opt, $out_fh) = @_;
$opt->{style} = -t $out_fh
if $opt->{style} eq 'auto';
return summarize_results_text_history($data, $opt, $out_fh)
if $opt->{history};
my $style = $opt->{style};
my $RED = $style ? "\e[1;31m" : '';
my $GREEN = $style ? "\e[32m" : '';
my $YELLOW = $style ? "\e[1;33m" : '';
my $CLEAR = $style ? "\e[0m" : '';
my $s = Analyze::Summary::Compare->new(data => $data, opt => $opt);
my $longest_test = max 13, map { length } @{$s->{test_names}};
my $longest_comp = max 8, map { length } @{$s->{run_names}},
@{$s->{lang_names}}, @{$s->{comp_names}}, @{$s->{vm_names}};
my $most_diagnoses = most_diagnoses($data->{times});
$longest_comp = max $longest_comp, $most_diagnoses;
my $spacer_length = 3;
my $spacer = ' ' x $spacer_length;
my @comps = @{$s->{compilers}};
my $format = join $spacer => "%-${longest_test}s", (("%${longest_comp}s ") x @comps);
my @lang_titles = map { center(" $_ ", $s->{lang_count}{$_} * ($longest_comp + $spacer_length + 2) - $spacer_length, bg => '-') } @{$s->{langs}};
my $titles = join $spacer => @lang_titles;
my @ignore = @{$s->{ignoring}};
my $ignore = @ignore ? ' (ignoring ' . join(' and ' => @ignore) . ')' : '';
my $start = friendly_time($data->{run}{start_time});
my $run_at = $opt->{compare} ? '' : " run at $start";
my $showing = 'showing ' . english_list(@{$s->{showing}});
my $output = "$CLEAR\n==> perl6-bench version $data->{run}{versions}{bench}$run_at$ignore\n--- $showing\n\n";
$output .= ' ' x $longest_test . "$spacer$titles\n";
$output .= sprintf "$format\n", '', @{$s->{run_names}}
if grep {length} @{$s->{run_names}};
$output .= sprintf "$format\n", '', @{$s->{comp_names}};
$output .= sprintf "$format\n", 'TEST', @{$s->{vm_names}};
$output .= ' ' x $longest_test . $spacer . '-' x length($titles) . "\n";
my $row_height = grep $_, @$opt{qw( show-rates show-failures compare )};
my $double_space = !$style && $row_height > 1;
my %test_name_shown;
my $show_test_name = sub {
my $test = shift;
my $name = $test_name_shown{$test->{name}}++ ? '' : $test->{name};
$output .= sprintf "%-${longest_test}s", $name;
};
my $icon_set = sub {
my @icons = map { $DIAGNOSIS_ICON{$_}
// $DIAGNOSIS_ICON{unknown} } @_;
join '' => @icons;
};
for my $test (@{$data->{times}}) {
$output .= "\n" if $double_space;
if ($opt->{'show-rates'}) {
$show_test_name->($test);
for my $comp (@comps) {
my $key = $comp->{key} || $comp->{name};
my $peak = $test->{compare}{peak_rate}{$key};
if (defined $peak && defined $peak->{rate}) {
$output .= sprintf "$spacer%${longest_comp}.0f/s", $peak->{rate};
}
else {
$output .= sprintf "$spacer%${longest_comp}s ", '-- ';
}
}
$output .= "\n";
}
if ($opt->{'show-relative'}) {
$show_test_name->($test);
for my $comp (@comps) {
my $key = $comp->{key} || $comp->{name};
my $peak = $test->{compare}{peak_rate}{$key};
my $rel = $peak->{relative_to_max};
if ($rel) {
$rel = 1 / $rel;
my $color = $rel < 2 ? $GREEN :
$rel < 10 ? $YELLOW :
$RED ;
$output .= sprintf "$spacer$color%${longest_comp}.1fx $CLEAR", $rel;
}
else {
# XXXX: May have to make this based on key instead of name
my $conf = $test->{conf};
my $is_skip = !defined $conf->{$comp->{group}}
|| (grep { $_ eq $comp->{name} } @{$conf->{skip} || []})
|| (exists $conf->{skip} && !defined $conf->{skip});
$output .= sprintf "$spacer$RED%${longest_comp}s $CLEAR",
$is_skip ? 'SKIP' : 'FAIL';
}
}
$output .= "\n";
}
if ($opt->{'show-failures'}) {
my $len = $longest_comp + 2;
my %icon_sets;
for my $comp (@comps) {
my $key = $comp->{key} || $comp->{name};
my $diags = $test->{diagnoses}{$key};
my @icon_sets;
if ($opt->{verbose}) {
my $by_scale = $diags->{by_scale};
my @scales = sort { $a <=> $b } keys %$by_scale;
for my $scale (@scales) {
my $diag = $by_scale->{$scale};
push @icon_sets, $icon_set->(@$diag);
}
}
else {
my $diag = $diags->{sorted};
push @icon_sets, $icon_set->(@$diag);
}
$icon_sets{$key} = \@icon_sets;
}
if ($opt->{verbose} && $opt->{compare}) {
my $max = max map { scalar @$_ } values %icon_sets;
for my $i (0 .. $max - 1) {
$show_test_name->($test);
for my $comp (@comps) {
my $key = $comp->{key} || $comp->{name};
my $icons = $icon_sets{$key}[$i] || '';
$output .= sprintf "$spacer%${len}s", $icons;
}
$output .= "\n";
}
}
else {
$show_test_name->($test);
for my $comp (@comps) {
my $key = $comp->{key} || $comp->{name};
$output .= sprintf "$spacer%${len}s",
join ',' => @{$icon_sets{$key}};
}
$output .= "\n";
}
}
}
my $scores = $data->{score};
if ($scores && $opt->{'show-relative'}) {
$output .= ' ' x $longest_test . $spacer . '=' x length($titles) . "\n";
$output .= sprintf "%-${longest_test}s", 'SUMMARY SCORE';
for my $comp (@comps) {
my $key = $comp->{key} || $comp->{name};
my $score = $scores->{$key};
if (defined $score) {
my $color = $score > 50 ? $GREEN :
$score > 10 ? $YELLOW :
$RED ;
$output .= sprintf "$spacer$color%${longest_comp}.1f $CLEAR", $score;
}
else {
$output .= sprintf "$spacer%${longest_comp}s ", '-- ';
}
}
$output .= "\n";
}
print $out_fh $output;
}
sub summarize_results_text_history {
my ($data, $opt, $out_fh) = @_;
my $scores = $data->{score}
or die "Can't show history without comparison scores!";
my $style = $opt->{style};
my $RED = $style ? "\e[1;31m" : '';
my $GREEN = $style ? "\e[32m" : '';
my $YELLOW = $style ? "\e[1;33m" : '';
my $CLEAR = $style ? "\e[0m" : '';
my $s = Analyze::Summary->new(data => $data, opt => $opt);
my @comp_names = uniq map { $_->{name} } @{$s->{compilers}};
my $longest_comp = max 8, map { length } @comp_names;
my %column;
my $col = 0;
$column{$_} = ++$col for @comp_names;
my $spacer_length = 3;
my $spacer = ' ' x $spacer_length;
my $format = join $spacer => "%-10s", (("%${longest_comp}s ") x $col);
$format .= "\n";
my @date_sorted = sort { ($a->{commit_time} || 0)
<=> ($b->{commit_time} || 0) } @{$s->{compilers}};
my @ignore = @{$s->{ignoring}};
my $ignore = @ignore ? ' (ignoring ' . join(' and ' => @ignore) . ')' : '';
my $start = friendly_time($data->{run}{start_time});
my $run_at = $opt->{compare} ? '' : " run at $start";
my $skip = $opt->{'skip-incomplete'} ? ' (skipping incomplete data)' : '';
my $output = "$CLEAR\n==> perl6-bench version $data->{run}{versions}{bench}$run_at$ignore\n";
$output .= "--- showing HISTORICAL SCORES$skip\n\n";
$output .= sprintf $format, 'DATE', @comp_names;
# Put scores into columns by compiler name, allowing multiple scores
# for different compilers on the same date to appear in the same row.
my @row;
my $old_date = '';
my $new_row = sub {
my $date = shift;
$old_date = $date;
$output .= sprintf($format, @row) if @row;
@row = ($date, ('') x $col);
};
for my $comp (@date_sorted) {
my $commit = $comp->{commit_time} || 0;
my $date = DateTime->from_epoch(epoch => $commit)->ymd;
$new_row->($date) if $old_date ne $date;
my $key = $comp->{key} || $comp->{name};
my $score = $scores->{$key};
my $column = $column{$comp->{name}};
# Don't let new scores on the same date overwrite old ones
$new_row->($date) if $row[$column] ne '';
if (defined $score) {
my $color = $score > 50 ? $GREEN :
$score > 10 ? $YELLOW :
$RED ;
$row[$column] = sprintf "$color%${longest_comp}.1f$CLEAR", $score;
}
else {
$row[$column] = sprintf "$RED%${longest_comp}s$CLEAR", '--';
}
}
$output .= sprintf $format, @row if @row;
print $out_fh $output;
}
sub center {
my ($string, $length, %opts) = @_;
my $bg_char = $opts{bg} || ' ';
my $output = $bg_char x ($length / length $bg_char);
my $left = int(($length - length $string) / 2);
substr($output, $left, length $string, $string);
return $output;
}
sub summarize_results_html {
my ($data, $opt, $out_fh) = @_;
# Default to including style in full HTML pages
$opt->{style} = 1
if $opt->{style} eq 'auto';
print $out_fh <<'HEADER';
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8">
<title>Perl Bench Summary</title>
</head>
<body>
HEADER
summarize_results_html_snippet($data, $opt, $out_fh);
print $out_fh <<'FOOTER';
</body>
</html>
FOOTER
}
sub summarize_results_html_snippet {
my ($data, $opt, $out_fh) = @_;
my $html = '';
# Default to no style info if just generating an HTML snippet
$opt->{style} = 0
if $opt->{style} eq 'auto';
if ($opt->{style}) {
$html .= <<'CSS';
<style type="text/css">
.bench_summary { font-family: sans-serif; }
.bench_summary caption { font-style: italic; }
.bench_summary td { padding-left: .4em; padding-right: .4em; }
.bench_summary th { padding-left: .4em; padding-right: .4em; }
.bench_ver { font-family: monospace; }
.bench_start_time { font-family: monospace; }
.bench_language { text-align: center; border-bottom: 1px solid #999; border-left: .4em solid #fff; border-right: .4em solid #fff; }
.bench_run { text-align: center; padding-top: .1em; }
.bench_compiler { text-align: center; padding-top: .1em; }
.bench_vm { text-align: center; padding-top: .1em; }
.bench_time_row td { }
.bench_compare_row td { }
.bench_diagnoses_row td { vertical-align: top; }
.bench_spacer_row { height: .7em; }
.bench_rate { text-align: right; font-family: monospace; }
.bench_time { text-align: right; font-family: monospace; }
.bench_no_time { text-align: right; font-family: monospace; }
.bench_diagnoses { text-align: right; font-family: monospace; }
.bench_good { text-align: right; font-family: monospace; background-color: #3f7; color: black; }
.bench_bad { text-align: right; font-family: monospace; background-color: #ee0; color: black; }
.bench_ugly { text-align: right; font-family: monospace; background-color: #f55; color: white; }
.bench_skip { text-align: right; font-family: monospace; background-color: #666; color: white; }
.bench_fail { text-align: right; font-family: monospace; background-color: #f55; color: white; }
</style>
CSS
}
return summarize_results_html_history($html, $data, $opt, $out_fh)
if $opt->{history};
my $s = Analyze::Summary::Compare->new(data => $data, opt => $opt);
my @ignore = @{$s->{ignoring}};
my $ignore = @ignore ? ' (ignoring ' . join(' and ' => @ignore) . ')' : '';
my $start = friendly_time($data->{run}{start_time});
my $run_at = $opt->{compare} ? '' : qq{ run at <span class="bench_start_time">$start</span>};
my $showing = 'showing ' . english_list(@{$s->{showing}});
$showing =~ s/\((\S+?)\)/(<strong>$1<\/strong>)/g;
$html .= qq{<table class="bench_summary" cellspacing="0" cellpadding="0">\n};
$html .= qq{<caption>perl6-bench version <span class="bench_ver">$data->{run}{versions}{bench}</span>$run_at$ignore<br>$showing</caption>\n};
$html .= "<tr><th></th>\n" . join('' => map qq{ <th class="bench_language" colspan="$s->{lang_count}{$_}">$_</th>\n} => @{$s->{langs}}) . "</tr>\n";
$html .= "<tr><th></th>\n" . join('' => map qq{ <th class="bench_run">$_</th>\n} => @{$s->{run_names}}) . "</tr>\n" if grep {length} @{$s->{run_names}};
$html .= "<tr><th></th>\n" . join('' => map qq{ <th class="bench_compiler">$_</th>\n} => @{$s->{comp_names}}) . "</tr>\n";
$html .= "<tr><th>TEST</th>\n" . join('' => map qq{ <th class="bench_vm">$_</th>\n} => @{$s->{vm_names}}) . "</tr>\n";
my $row_height = grep $_, @$opt{qw( show-rates show-failures compare )};
my $double_space = $row_height > 1;
my %test_name_shown;
my $show_test_name = sub {
my ($test, $class) = @_;
my $name = $test_name_shown{$test->{name}}++ ? '' : $test->{name};
$html .= qq{<tr class="$class"><td>$name</td>\n};
};
my $icon_set = sub {
my @icons = map { my $icon = $DIAGNOSIS_ICON{$_}
// $DIAGNOSIS_ICON{unknown};
qq{<span title="$_">$icon</span>}; } @_;
join '' => @icons;
};
my $detailed_icons = sub {
my @icons;
for my $run (@_) {
my $diag = $run->{diagnosis};
my $icon = $DIAGNOSIS_ICON{$diag}
// $DIAGNOSIS_ICON{unknown};
my $hover = $diag ? "$diag: $run->{reason}" : 'ok';
push @icons, qq{<span title="$hover">$icon</span>};
}
join '' => @icons;
};
my @comps = @{$s->{compilers}};
for my $test (@{$data->{times}}) {
if ($opt->{'show-rates'}) {
$show_test_name->($test, 'bench_time_row');
for my $comp (@comps) {
my $key = $comp->{key} || $comp->{name};
my $peak = $test->{compare}{peak_rate}{$key};
if (defined $peak && defined $peak->{rate}) {
$html .= sprintf qq{ <td class="bench_rate">%.0f/s</td>\n}, $peak->{rate};
}
else {
$html .= qq{ <td class="bench_no_time">--</td>\n};
}
}
$html .= "</tr>\n";
}
if ($opt->{'show-relative'}) {
$show_test_name->($test, 'bench_compare_row');
for my $comp (@comps) {
my $key = $comp->{key} || $comp->{name};
my $peak = $test->{compare}{peak_rate}{$key};
my $rel = $peak->{relative_to_max};
if ($rel) {
$rel = 1 / $rel;
my $class = $rel < 2 ? 'bench_good' :
$rel < 10 ? 'bench_bad' :
'bench_ugly' ;
$html .= sprintf qq{ <td class="$class">%.1fx</td>\n}, $rel;
}
else {
# XXXX: May have to make this based on key instead of name
my $conf = $test->{conf};
my $is_skip = !defined $conf->{$comp->{group}}
|| (grep { $_ eq $comp->{name} } @{$conf->{skip} || []})
|| (exists $conf->{skip} && !defined $conf->{skip});
my $class = $is_skip ? 'bench_skip' : 'bench_fail';
my $message = $is_skip ? 'SKIP' : 'FAIL';
$html .= qq{ <td class="$class">$message</td>\n};
}
}
$html .= "</tr>\n";
}
if ($opt->{'show-failures'}) {
$show_test_name->($test, 'bench_diagnoses_row');
for my $comp (@comps) {
my $key = $comp->{key} || $comp->{name};
my $diags = $test->{diagnoses}{$key};
my (@icon_sets, $icons);
if ($opt->{verbose}) {
my $details = $diags->{details};
my @icon_sets;
for my $run_set (@$details) {
push @icon_sets, $detailed_icons->(@$run_set);
}
$icons = join '<br>' => @icon_sets;
}
else {
my $diag = $diags->{sorted};
$icons = $icon_set->(@$diag);
}
$html .= qq{ <td class="bench_diagnoses">$icons</td>\n};
}
$html .= "</tr>\n";
}
$html .= qq{<tr class="bench_spacer_row"></tr>\n}
if $double_space;
}
my $scores = $data->{score};
if ($scores && $opt->{'show-relative'}) {
$html .= qq{<tr class="bench_spacer_row"></tr>\n};
$html .= qq{<tr class="bench_compare_row"><th>SUMMARY SCORE</th>\n};
for my $comp (@comps) {
my $key = $comp->{key} || $comp->{name};
my $score = $scores->{$key};
if (defined $score) {
my $class = $score > 50 ? 'bench_good' :
$score > 10 ? 'bench_bad' :
'bench_ugly' ;
$html .= sprintf qq{ <td class="$class">%.1f</td>\n}, $score;
}
else {
$html .= qq{ <td class="bench_skip">--</td>\n};
}
}
$html .= qq{</tr>\n};
}
$html .= "</table>\n";
print $out_fh $html;
}
sub summarize_results_html_history {
my ($html, $data, $opt, $out_fh) = @_;
my $scores = $data->{score}
or die "Can't show history without comparison scores!";
my $s = Analyze::Summary->new(data => $data, opt => $opt);
my @comp_names = uniq map { $_->{name} } @{$s->{compilers}};
my %column;
my $col = 0;
$column{$_} = ++$col for @comp_names;
my @date_sorted = sort { ($a->{commit_time} || 0)
<=> ($b->{commit_time} || 0) } @{$s->{compilers}};
my @ignore = @{$s->{ignoring}};
my $ignore = @ignore ? ' (ignoring ' . join(' and ' => @ignore) . ')' : '';
my $start = friendly_time($data->{run}{start_time});
my $run_at = $opt->{compare} ? '' : qq{ run at <span class="bench_start_time">$start</span>};
my $skip = $opt->{'skip-incomplete'} ? ' (skipping incomplete data)' : '';
my $showing = "showing HISTORICAL SCORES$skip";
# XXXX: WIP
$html .= qq{<table class="bench_summary" cellspacing="0" cellpadding="0">\n};
$html .= qq{<caption>perl6-bench version <span class="bench_ver">$data->{run}{versions}{bench}</span>$run_at$ignore<br>$showing</caption>\n};
$html .= join "\n" => '<tr><th>DATE</th>', (map qq{ <th class="bench_compiler">$_</th>} => @comp_names), "</tr>\n";
# Put scores into columns by compiler name, allowing multiple scores
# for different compilers on the same date to appear in the same row.
my @row;
my $old_date = '';
my $new_row = sub {
my $date = shift;
$old_date = $date;
if (@row) {
$_ ||= ' <td></td>' for @row;
$html .= join "\n" => '<tr>', @row, "</tr>\n";
}
@row = (" <td>$date</td>", ('') x $col);
};
for my $comp (@date_sorted) {
my $commit = $comp->{commit_time} || 0;
my $date = DateTime->from_epoch(epoch => $commit)->ymd;
$new_row->($date) if $old_date ne $date;
my $key = $comp->{key} || $comp->{name};
my $score = $scores->{$key};
my $column = $column{$comp->{name}};
# Don't let new scores on the same date overwrite old ones
$new_row->($date) if $row[$column] ne '';
if (defined $score) {
my $class = $score > 50 ? 'bench_good' :
$score > 10 ? 'bench_bad' :
'bench_ugly' ;
$row[$column] = sprintf qq{ <td class="$class">%.1f</td>}, $score;
}
else {
$row[$column] = sprintf qq{ <td class="bench_ugly">%.1f</td>}, '--';
}
}
if (@row) {
$_ ||= ' <td></td>' for @row;
$html .= join "\n" => '<tr>', @row, "</tr>\n";
}
$html .= "</table>\n";
print $out_fh $html;
}
sub plot_header {
return <<'PLOT_HEADER';
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8">
<title>Benchmark Plots</title>
<link rel="stylesheet" type="text/css" href="jqplot/jquery.jqplot.min.css" />
<!--[if lt IE 9]><script language="javascript" type="text/javascript" src="jqplot/excanvas.min.js"></script><![endif]-->
<script type="text/javascript" src="jqplot/jquery.min.js"></script>
<script type="text/javascript" src="jqplot/jquery.jqplot.min.js"></script>
<script type="text/javascript" src="jqplot/plugins/jqplot.barRenderer.min.js"></script>
<script type="text/javascript" src="jqplot/plugins/jqplot.canvasAxisTickRenderer.min.js"></script>
<script type="text/javascript" src="jqplot/plugins/jqplot.canvasAxisLabelRenderer.min.js"></script>
<script type="text/javascript" src="jqplot/plugins/jqplot.canvasTextRenderer.min.js"></script>
<script type="text/javascript" src="jqplot/plugins/jqplot.categoryAxisRenderer.min.js"></script>
<script type="text/javascript" src="jqplot/plugins/jqplot.dateAxisRenderer.min.js"></script>
<script type="text/javascript" src="jqplot/plugins/jqplot.enhancedLegendRenderer.min.js"></script>
<script type="text/javascript" src="jqplot/plugins/jqplot.highlighter.min.js"></script>
<script type="text/javascript" src="jqplot/plugins/jqplot.logAxisRenderer.min.js"></script>
<script type="text/javascript">
function handleToggle (ev) {
var d=ev.data, s=d.series, replot=d.replot, plot=d.plot, speed=d.speed, sidx=s.index, showing=false;
if (s.canvas._elem.is(':hidden') || !s.show) { showing = true }
var doLegendToggle = function() {
if (replot) {
var opts = {};
if ($.isPlainObject(replot)) { $.extend(true, opts, replot) }
plot.replot(opts);
if (showing && speed) {
var s = plot.series[sidx];
if (s.shadowCanvas._elem) {
s.shadowCanvas._elem.hide().fadeIn(speed);
}
s.canvas._elem.hide().fadeIn(speed);
s.canvas._elem.nextAll('.jqplot-point-label.jqplot-series-'+s.index).hide().fadeIn(speed);
}
} else {
var s = plot.series[sidx];
if (s.canvas._elem.is(':hidden') || !s.show) {
if (typeof plot.options.legend.showSwatches === 'undefined' || plot.options.legend.showSwatches === true) {
plot.legend._elem.find('td').eq(sidx * 2).addClass('jqplot-series-hidden');
}
if (typeof plot.options.legend.showLabels === 'undefined' || plot.options.legend.showLabels === true) {
plot.legend._elem.find('td').eq((sidx * 2) + 1).addClass('jqplot-series-hidden');
}
} else {
if (typeof plot.options.legend.showSwatches === 'undefined' || plot.options.legend.showSwatches === true) {
plot.legend._elem.find('td').eq(sidx * 2).removeClass('jqplot-series-hidden');
}
if (typeof plot.options.legend.showLabels === 'undefined' || plot.options.legend.showLabels === true) {
plot.legend._elem.find('td').eq((sidx * 2) + 1).removeClass('jqplot-series-hidden');
}
}
}
};
s.toggleDisplay(ev, doLegendToggle);
};
charts = [];
function toggle_comp (idx, v) {
for (var i = 0; i < charts.length; i++) {
var c = charts[i];
var s = c.series[idx];
if (!!v === s.show) continue;
var l = c.legend;
handleToggle({ data: {
plot: c,
series: s,
speed: l.seriesToggle,
replot: l.seriesToggleReplot
} });
}
return v;
}
function do_plot (chart_div, title, series_data, series_labels, opt) {
var series_conf = [];
for (var i in series_labels) {
series_conf.push({ label: series_labels[i] });
}
var rows = 10;
if (series_data.length < rows) rows = series_data.length;
charts.push( $.jqplot(chart_div, series_data, {
title: title,
series: series_conf,
highlighter: {
show: true,
yvalues: 2,
tooltipAxes: "y",
tooltipLocation: "n",
useAxesFormatters: false,
formatString: "<!--%.0f--><strong>%.1f</strong> per second<br><strong>%.0f</strong>x slower than fastest<br><strong>%.0f</strong>x slower than global fastest",
tooltipContentEditor: function (str, i, j) {
return '<span class="highlight-series-label">' + series_labels[i] + '</span><br>' + str;
}
},
legend: {
renderer: $.jqplot.EnhancedLegendRenderer,
rendererOptions: {numberRows: rows},
show: true,
placement: "outsideGrid"
},
axes: {
xaxis: {
label: opt.x_label,
labelRenderer: $.jqplot.CanvasAxisLabelRenderer,
renderer: opt.linear_x ? $.jqplot.LinearAxisRenderer : $.jqplot.LogAxisRenderer,
base: 2,
tickRenderer: $.jqplot.CanvasAxisTickRenderer,
tickOptions: { angle: -30 }
},
yaxis: {
label: opt.y_label,
labelRenderer: $.jqplot.CanvasAxisLabelRenderer,
renderer: $.jqplot.LogAxisRenderer,
base: 2
}
}
}) );
}
function do_summary_plot (chart_div, title, series_data, series_labels, opt) {
var series_conf = [];
for (var i in series_labels) {
series_conf.push({ label: series_labels[i] });
}
var rows = 10;
if (series_data.length < rows) rows = series_data.length;
charts.push( $.jqplot(chart_div, series_data, {
title: title,
series: series_conf,
highlighter: {
show: true,
yvalues: 1,
tooltipAxes: "y",
tooltipLocation: "n",
useAxesFormatters: false,
formatString: "<!--%d--><strong>%.1f</strong><br>%s"
},
legend: {
renderer: $.jqplot.EnhancedLegendRenderer,
rendererOptions: {numberRows: rows},
show: true,
placement: "outsideGrid"
},
axes: {
xaxis: {
label: opt.x_label,
labelRenderer: $.jqplot.CanvasAxisLabelRenderer,
renderer: $.jqplot.DateAxisRenderer,
tickRenderer: $.jqplot.CanvasAxisTickRenderer,
tickOptions: { angle: -30 }
},
yaxis: {
label: opt.y_label,
labelRenderer: $.jqplot.CanvasAxisLabelRenderer,
renderer: $.jqplot.LinearAxisRenderer,
pad: 0
}
}
}) );
}
function do_bar (chart_div, title, bar_data, tick_labels, opt) {
var data = [];
for (var i in bar_data) {
data.push([ bar_data[i] ]);
}
var series_conf = [];
for (var i in tick_labels) {
series_conf.push({ label: tick_labels[i] });
}
var rows = 10;
if (rows > data.length) rows = data.length;
charts.push( $.jqplot(chart_div, data, {
title: title,
seriesDefaults: {
renderer: $.jqplot.BarRenderer
},
series: series_conf,
highlighter: {
show: true,
showMarker: false,
tooltipAxes: "y",
tooltipLocation: "n",
formatString: "<strong>%.1f</strong> per second",
tooltipContentEditor: function (str, i, j) {
return '<span class="highlight-series-label">' + tick_labels[i] + '</span><br>' + str;
}
},
legend: {
renderer: $.jqplot.EnhancedLegendRenderer,
rendererOptions: {numberRows: rows},
show: true,
placement: "outsideGrid"
},
axes: {
xaxis: {
tickOptions: {showGridline: false},
numberTicks: 3,
min: 0.5,
max: 1.5,
showTicks: false
},
yaxis: {
label: opt.y_label,
labelRenderer: $.jqplot.CanvasAxisLabelRenderer,
renderer: $.jqplot.LogAxisRenderer,
base: 2
}
}
}) );
}
</script>
<style type="text/css">
.bench_ver { font-family: monospace; }
.bench_start_time { font-family: monospace; }
.jqplot-highlighter-tooltip,
.jqplot-canvasOverlay-tooltip { background: #eee !important; }
.highlight-series-label { color: blue; font-weight: bold; }
.jqplot-highlighter-tooltip {
top: auto !important;
left: auto !important;
bottom: 0 !important;
right: 0 !important;
}
#toggleform {
text-align: center;
position: fixed;
top: 0;
right: 0.5em;
border: .2em #CFCFCF;
border-style: none solid;
width: 18em;
z-index: 1000;
background: rgba(255,255,255,0.85);
}
#toggleswitches {
display: none;
margin: 0;
padding: 0;
}
#toggleswitches div {
display: inline-block;
text-align: left;
}
#toggleswitches label {
display: block;
font-size: smaller;
margin: 0;
padding: 0.1em;
}
#toggleswitches input {
vertical-align: middle;
padding-right: .1em;
}
#togglebutton {
color: #FFFFFF;
background: #CFCFCF;
margin: 0;
padding: 0.2em 0;
width: 100%;
font-weight: bold;
text-align: center;
cursor: pointer;
}
.chartwrapper {
max-width: 900px;
position: relative;
padding: 0;
margin: 3em 1em;
overflow: hidden;
}
.source {
font-size: smaller;
text-align: right;
position: absolute;
top: 0.25em;
right: 0;
cursor: pointer;
z-index: 100;
color: #3F3F3F;
}
.sources {
display: none;
text-align: left;
background: rgba(255,255,255,0.9);
border: 1px solid #DFDFDF;
padding: 0.25em;
}
.sources div {
margin: 0.25em;
}
</style>
</head>
<body>
PLOT_HEADER
}
sub plot_footer {
return <<'PLOT_FOOTER';
</body>
</html>
PLOT_FOOTER
}
sub summarize_results_html_plot {
my ($data, $opt, $out_fh) = @_;
return summarize_results_html_plot_history($data, $opt, $out_fh)
if $opt->{history};
my $html = plot_header();
my $s = Analyze::Summary->new(data => $data, opt => $opt);
my @ignore = @{$s->{ignoring}};
my $ignore = @ignore ? ' (ignoring ' . join(' and ' => @ignore) . ')' : '';
my $run_at = $opt->{compare} ? '' : qq{ run at <span class="bench_start_time">} . friendly_time($data->{run}{start_time}) . qq{</span>};
$html .= qq{<h2>perl6-bench version <span class="bench_ver">$data->{run}{versions}{bench}</span>$run_at$ignore</h2>\n\n};
my @comps = @{$s->{compilers}};
my $labels = join ', ' => map {'"' . ($_->{key} || $_->{name}) . '"'} @comps;
$html .= qq^
<form autocomplete="off" id="toggleform">
<div id="toggleswitches">
<div>^;
for (0..$#comps) {
my $comp = $comps[$_];
my $label = $comp->{key} || $comp->{name};
$html .= qq^<label><input type="checkbox" value="$_" checked="checked" onclick="toggle_comp(this.value, this.checked); return true;">$label</label>^;
}
$html .= qq^
</div>
</div>
<div id="togglebutton" onclick="\$('#toggleswitches').slideToggle()">Toggles</div>
</form>^;
for my $test (@{$data->{times}}) {
# In compare mode, the 'empty' test data is just residual noise
next if $opt->{compare} && $test->{name} eq 'empty';
my $source = '';
for (qw(perl6 nqp perl5)) {
next unless my $code = $test->{conf}{my $name = $_};
next if ref $code;
$code =~ s/&/&amp;/g;
$code =~ s/</&lt;/g;
$code =~ s/>/&gt;/g;
if ($name =~ s/(\d)$/ $1/) {
$name = ucfirst $name;
} else {
$name = uc $name;
}
$source .= qq^<div>$name: <code>$code</code></div>^;
}
if ($source) {
$source = qq^<div class="source" onclick="\$(this).find('.sources').slideToggle()">[ Code ]<div class="sources">$source</div></div>^;
}
$html .= <<"CHART_TOP";
<div class="chartwrapper">
$source
<div id="chart_$test->{name}" style="position:relative"></div>
<script type="text/javascript">
\$(function () {
var labels = [$labels];
var data = [
CHART_TOP
my $chart_type;
my $compare = $test->{compare};
my $max_rate = $compare->{max_rate};
my $max_rates = $compare->{max_rates};
my $by_scale = $compare->{rates_by_scale};
if (1 == keys %$by_scale) {
$chart_type = 'bar';
my ($rates) = values %$by_scale;
my @data_points;
for my $comp (@comps) {
my $key = $comp->{key} || $comp->{name};
push @data_points, $rates->{$key} || 0;
}
$html .= join ', ' => @data_points;
}
else {
$chart_type = 'plot';
my @data_rows;
for my $comp (@comps) {
my $key = $comp->{key} || $comp->{name};
my $scale_rates = $compare->{rates_by_comp}{$key};
my $rel_rates = $compare->{relative_rates};
my @data_points;
if ($scale_rates) {
for my $scale (sort { $a <=> $b } keys %$scale_rates) {
my $max_rel = $max_rates->{$scale} / $scale_rates->{$scale};
my $max_rel_global = $max_rate / $scale_rates->{$scale};
push @data_points, "[$scale,$scale_rates->{$scale},$max_rel,$max_rel_global]";
}
}
else {
push @data_points, "[]";
}
push @data_rows, ' [' . join(',' => @data_points) . ']';
}
$html .= join ",\n" => @data_rows;
}
my $conf = $test->{conf};
my $is_linear = ($conf->{scaling} // '') eq 'linear' ? 1 : 0;
my $x_label = $conf->{x_label} // 'Iterations per Run';
my $y_label = $conf->{y_label} // 'Iterations per Second';
$html .= <<"CHART_BOTTOM";
];
var opt = {
linear_x: $is_linear,
x_label: "$x_label",
y_label: "$y_label"
};
do_$chart_type("chart_$test->{name}", "$test->{name}", data, labels, opt);
});
</script>
</div>
CHART_BOTTOM
}
$html .= plot_footer();
print $out_fh $html;
}
sub summarize_results_html_plot_history {
my ($data, $opt, $out_fh) = @_;
my $summary_scores = $data->{score}
or die "Can't show history without comparison scores!";
my $html = plot_header();
my $s = Analyze::Summary->new(data => $data, opt => $opt);
my @ignore = @{$s->{ignoring}};
my $ignore = @ignore ? ' (ignoring ' . join(' and ' => @ignore) . ')' : '';
my $run_at = $opt->{compare} ? '' : qq{ run at <span class="bench_start_time">} . friendly_time($data->{run}{start_time}) . qq{</span>};
$html .= qq{<h2>perl6-bench version <span class="bench_ver">$data->{run}{versions}{bench}</span>$run_at$ignore</h2>\n\n};
my @compilers = @{$s->{compilers}};
my @comp_names = uniq map { $_->{name} } @compilers;
my $labels = join ', ' => map qq{"$_"} => @comp_names;
my @date_sorted = sort { ($a->{commit_time} || 0)
<=> ($b->{commit_time} || 0) } @compilers;
my %row;
my $row = 0;
$row{$_} = $row++ for @comp_names;
my $make_chart = sub {
my ($scores, $type, $name, $title) = @_;
$title ||= $name;
$html .= <<"CHART_TOP";
<div class="chartwrapper">
<div id="chart_$name" style="position:relative"></div>
<script type="text/javascript">
\$(function () {
var labels = [$labels];
var data = [
CHART_TOP
my @data_rows;
for my $comp (@date_sorted) {
my $data_row = $data_rows[$row{$comp->{name}}] ||= [];
my $key = $comp->{key} || $comp->{name};
my $score = $scores->{$key};
next unless defined $score;
my $commit = $comp->{commit_time} || 0;
my $date = DateTime->from_epoch(epoch => $commit)->ymd;
push @$data_row, qq{["$date",$score,"$key"]};
}
my @rows;
for my $data_row (@data_rows) {
push @$data_row, "[]" unless @$data_row;
push @rows, ' [' . join(',' => @$data_row) . ']';
}
$html .= join ",\n" => @rows;
my $y_label = ucfirst($type) . ' Score';
$html .= <<"CHART_BOTTOM";
];
var opt = {
x_label: "Commit Date",
y_label: "$y_label"
};
do_summary_plot("chart_$name", "$title", data, labels, opt);
});
</script>
</div>
CHART_BOTTOM
};
$make_chart->($summary_scores, qw( summary summary SUMMARY ));
for my $test (@{$data->{times}}) {
my $name = $test->{name};
# In compare mode, the 'empty' test data is just residual noise
next if $opt->{compare} && $name eq 'empty';
my $test_scores = $data->{test_score}{$name} or next;
$make_chart->($test_scores, 'test', $name);
}
$html .= plot_footer();
print $out_fh $html;
}
sub friendly_time {
my $time = shift;
my $dt = DateTime->from_epoch(epoch => $time);
my $friendly = $dt->ymd . ' ' . $dt->hms;
return $friendly;
}
sub english_list {
return '' if @_ == 0;
return $_[0] if @_ == 1;
return "$_[0] and $_[1]" if @_ == 2;
my $last = pop;
return join ', ', @_, "and $last";
}
__END__
=head1 NAME
analyze -- Analyze benchmark data produced by timeall
=head1 SYNOPSIS
analyze [--help|-h|-?] [--man]
[--format=text|json|html|html_snippet|html_plot]
[--style=0|1|auto] [--outfile=path/to/file.ext]
[--min-time=0.01]
[--ignore-startup] [--ignore-compile] [--skip-incomplete]
[--show-rates] [--show-relative] [--show-failures] [--verbose]
[--compare] [--history]
path/to/timing_file.json [path/to/second_timing_file.json ...]
=head1 DESCRIPTION
After benchmarking a number of implementations of Perl-family languages
against each other with the F<timeall> program, F<analyze> converts the raw
timing data to human-friendly summary information in various formats.
=head1 OPTIONS
=over 4
=item --help|-h|-?
Get basic help for this program
=item --man
Display this program's entire manpage
=item --format=text|json|html|html_snippet|html_plot
Format the summary output in a particular format. If the C<--outfile> option
is set, then the default output format is based on the lowercased extension
of the output filename. Otherwise the default is C<text>, which outputs a
text-rendered summary table with ANSI coloring. HTML output is also available,
either in full document form (C<html>), or just a snippet containing the
summary table (C<html_snippet>). For best visualization of performance at
different scales, use C<html_plot> to get JavaScript/HTML5 output that will
produce plotted result data. To save the results in computer-friendly
form, use the C<json> format.
=item --style=0|1|auto
Select whether style settings are included in the output. C<0> turns off
style output, C<1> turns it on, and C<auto> (the default) tries to DWIM.
For text output, this option selects whether ANSI color codes are used to
highlight entries in the summary table; C<auto> turns on ANSI color whenever
the output is a TTY. For HTML output, this determines whether a CSS style
block is added to the HTML (element C<class> attributes are always output).
C<auto> defaults to adding CSS to full HTML documents (format C<html>), and
I<not> adding it to HTML snippets (format C<html_snippet>). If style is
on (or auto) for JSON output (format C<json>), the result will be
pretty-printed; otherwise it will be output in compact form.
=item --outfile=path/to/file.ext|-
Write the summary report to a particular path and file, or to STDOUT (the
default) if C<--outfile> is set to C<-> (a single hyphen). If this option
is set and C<--format> is not, then the summary format defaults to the
lowercased extension (F<ext> in F<path/to/file.ext>) of the C<--outfile>.
=item --min-time=0.01
Set the noise floor for test timings, below which timing data will be
ignored to avoid nonsensical rate calculations and wildly oscillating
plots. This filter considers the times after ignoring startup and/or
compile times, and defaults to 0.01 seconds.
=item --ignore-startup
Ignore (subtract out) the startup time of the compiler from each benchmark
result, so that runtime performance can be compared more directly.
=item --ignore-compile
Ignore (subtract out) the time the compiler spends compiling the test
itself from each benchmark result, so that runtime performance can be
compared more directly. Only works for scalable tests, because it uses
runtime at C<SCALE = 0> as a portable proxy for true compile time.
=item --skip-incomplete
When computing summary scores, skip any incomplete test data (tests that
have timing data for some compilers but not others). This enables summary
comparison of compilers that can't all complete every test. This can occur
because of bugs, old versions of compilers that don't support current syntax,
or compilers/languages that lack certain language features (NQP being the
most common example of this).
=item --compare
When processing multiple timing files, compare times across all timing files
at once, rather than analyzing each timing file individually.
=item --show-rates
Show actual benchmark processing rates as opposed to normalized scores
(or if --show-relative is on, in addition to). Defaults to on; use
--no-show-rates to turn this off.
=item --show-relative
Show relative benchmark processing rates as opposed to actual values
(or if --show-rates is on, in addition to). Defaults to on if --compare
is set, and off otherwise; use --no-show-rates to turn this off.
=item --show-failures
Show information about test failures. Used especially when performing
stress testing, to show the failure modes exhibited by a failing compiler.
=item --verbose
Show more detailed information about test failures, especially useful when
a compiler has mixed failure modes on a single test (e.g. sometimes giving
wrong results, sometimes dying with an error, sometimes segfaulting).
=item --history
Format the data in a way that emphasizes the collective history of a
compiler's peak performance across multiple compiler versions, rather
than the individual scaling curves of each compiler version. This has
no effect when the output format is JSON, which always carries all of
the available data.
=back
=head1 AUTHOR
Geoffrey Broadwell
=cut