Skip to content

Commit 17d7ec2

Browse files
committed
Move all code in run_benchmarks.rb to CLI.run method
1 parent f9ce1d3 commit 17d7ec2

File tree

5 files changed

+472
-130
lines changed

5 files changed

+472
-130
lines changed

lib/benchmark_runner.rb

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,15 +80,20 @@ def render_graph(json_path)
8080

8181
# Checked system - error or return info if the command fails
8282
def check_call(command, env: {}, raise_error: true, quiet: false)
83+
quiet ||= ENV['BENCHMARK_QUIET'] == '1'
8384
puts("+ #{command}") unless quiet
8485

8586
result = {}
8687

87-
result[:success] = system(env, command)
88+
if quiet
89+
result[:success] = system(env, command, out: File::NULL, err: File::NULL)
90+
else
91+
result[:success] = system(env, command)
92+
end
8893
result[:status] = $?
8994

9095
unless result[:success]
91-
puts "Command #{command.inspect} failed with exit code #{result[:status].exitstatus} in directory #{Dir.pwd}"
96+
puts "Command #{command.inspect} failed with exit code #{result[:status].exitstatus} in directory #{Dir.pwd}" unless quiet
9297
raise RuntimeError.new if raise_error
9398
end
9499

lib/benchmark_runner/cli.rb

Lines changed: 107 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
# frozen_string_literal: true
2+
3+
require 'fileutils'
4+
require_relative '../argument_parser'
5+
require_relative '../cpu_config'
6+
require_relative '../benchmark_runner'
7+
require_relative '../benchmark_suite'
8+
require_relative '../results_table_builder'
9+
10+
module BenchmarkRunner
11+
class CLI
12+
attr_reader :args
13+
14+
def self.run(argv = ARGV)
15+
args = ArgumentParser.parse(argv)
16+
new(args).run
17+
end
18+
19+
def initialize(args)
20+
@args = args
21+
end
22+
23+
def run
24+
CPUConfig.configure_for_benchmarking(turbo: args.turbo)
25+
26+
# Create the output directory
27+
FileUtils.mkdir_p(args.out_path)
28+
29+
ruby_descriptions = {}
30+
31+
# Benchmark with and without YJIT
32+
bench_start_time = Time.now.to_f
33+
bench_data = {}
34+
bench_failures = {}
35+
args.executables.each do |name, executable|
36+
ruby_descriptions[name] = `#{executable.shelljoin} -v`.chomp
37+
38+
suite = BenchmarkSuite.new(
39+
ruby: executable,
40+
ruby_description: ruby_descriptions[name],
41+
categories: args.categories,
42+
name_filters: args.name_filters,
43+
out_path: args.out_path,
44+
harness: args.harness,
45+
pre_init: args.with_pre_init,
46+
no_pinning: args.no_pinning
47+
)
48+
bench_data[name], failures = suite.run
49+
# Make it easier to query later.
50+
bench_failures[name] = failures unless failures.empty?
51+
end
52+
53+
bench_end_time = Time.now.to_f
54+
bench_total_time = (bench_end_time - bench_start_time).to_i
55+
puts("Total time spent benchmarking: #{bench_total_time}s")
56+
57+
if !bench_failures.empty?
58+
puts("Failed benchmarks: #{bench_failures.map { |k, v| v.size }.sum}")
59+
end
60+
61+
puts
62+
63+
# Build results table
64+
builder = ResultsTableBuilder.new(
65+
executable_names: ruby_descriptions.keys,
66+
bench_data: bench_data,
67+
include_rss: args.rss
68+
)
69+
table, format = builder.build
70+
71+
output_path = BenchmarkRunner.output_path(args.out_path, out_override: args.out_override)
72+
73+
# Save the raw data as JSON
74+
out_json_path = BenchmarkRunner.write_json(output_path, ruby_descriptions, bench_data)
75+
76+
# Save data as CSV so we can produce tables/graphs in a spreasheet program
77+
# NOTE: we don't do any number formatting for the output file because
78+
# we don't want to lose any precision
79+
BenchmarkRunner.write_csv(output_path, ruby_descriptions, table)
80+
81+
# Save the output in a text file that we can easily refer to
82+
output_str = BenchmarkRunner.build_output_text(ruby_descriptions, table, format, bench_failures)
83+
out_txt_path = output_path + ".txt"
84+
File.open(out_txt_path, "w") { |f| f.write output_str }
85+
86+
# Print the table to the console, with numbers truncated
87+
puts(output_str)
88+
89+
# Print JSON and PNG file names
90+
puts
91+
puts "Output:"
92+
puts out_json_path
93+
94+
if args.graph
95+
puts BenchmarkRunner.render_graph(out_json_path)
96+
end
97+
98+
if !bench_failures.empty?
99+
puts "\nFailed benchmarks:"
100+
bench_failures.each do |name, data|
101+
puts " #{name}: #{data.keys.join(", ")}"
102+
end
103+
exit(1)
104+
end
105+
end
106+
end
107+
end

run_benchmarks.rb

Lines changed: 2 additions & 96 deletions
Original file line numberDiff line numberDiff line change
@@ -1,99 +1,5 @@
11
#!/usr/bin/env ruby
22

3-
require 'pathname'
4-
require 'fileutils'
5-
require 'csv'
6-
require 'json'
7-
require 'shellwords'
8-
require 'rbconfig'
9-
require 'etc'
10-
require 'yaml'
11-
require_relative 'lib/cpu_config'
12-
require_relative 'lib/benchmark_runner'
13-
require_relative 'lib/benchmark_suite'
14-
require_relative 'lib/argument_parser'
15-
require_relative 'lib/results_table_builder'
3+
require_relative 'lib/benchmark_runner/cli'
164

17-
args = ArgumentParser.parse(ARGV)
18-
19-
CPUConfig.configure_for_benchmarking(turbo: args.turbo)
20-
21-
# Create the output directory
22-
FileUtils.mkdir_p(args.out_path)
23-
24-
ruby_descriptions = {}
25-
26-
# Benchmark with and without YJIT
27-
bench_start_time = Time.now.to_f
28-
bench_data = {}
29-
bench_failures = {}
30-
args.executables.each do |name, executable|
31-
ruby_descriptions[name] = `#{executable.shelljoin} -v`.chomp
32-
33-
suite = BenchmarkSuite.new(
34-
ruby: executable,
35-
ruby_description: ruby_descriptions[name],
36-
categories: args.categories,
37-
name_filters: args.name_filters,
38-
out_path: args.out_path,
39-
harness: args.harness,
40-
pre_init: args.with_pre_init,
41-
no_pinning: args.no_pinning
42-
)
43-
bench_data[name], failures = suite.run
44-
# Make it easier to query later.
45-
bench_failures[name] = failures unless failures.empty?
46-
end
47-
48-
bench_end_time = Time.now.to_f
49-
bench_total_time = (bench_end_time - bench_start_time).to_i
50-
puts("Total time spent benchmarking: #{bench_total_time}s")
51-
52-
if !bench_failures.empty?
53-
puts("Failed benchmarks: #{bench_failures.map { |k, v| v.size }.sum}")
54-
end
55-
56-
puts
57-
58-
# Build results table
59-
builder = ResultsTableBuilder.new(
60-
executable_names: ruby_descriptions.keys,
61-
bench_data: bench_data,
62-
include_rss: args.rss
63-
)
64-
table, format = builder.build
65-
66-
output_path = BenchmarkRunner.output_path(args.out_path, out_override: args.out_override)
67-
68-
# Save the raw data as JSON
69-
out_json_path = BenchmarkRunner.write_json(output_path, ruby_descriptions, bench_data)
70-
71-
# Save data as CSV so we can produce tables/graphs in a spreasheet program
72-
# NOTE: we don't do any number formatting for the output file because
73-
# we don't want to lose any precision
74-
BenchmarkRunner.write_csv(output_path, ruby_descriptions, table)
75-
76-
# Save the output in a text file that we can easily refer to
77-
output_str = BenchmarkRunner.build_output_text(ruby_descriptions, table, format, bench_failures)
78-
out_txt_path = output_path + ".txt"
79-
File.open(out_txt_path, "w") { |f| f.write output_str }
80-
81-
# Print the table to the console, with numbers truncated
82-
puts(output_str)
83-
84-
# Print JSON and PNG file names
85-
puts
86-
puts "Output:"
87-
puts out_json_path
88-
89-
if args.graph
90-
puts BenchmarkRunner.render_graph(out_json_path)
91-
end
92-
93-
if !bench_failures.empty?
94-
puts "\nFailed benchmarks:"
95-
bench_failures.each do |name, data|
96-
puts " #{name}: #{data.keys.join(", ")}"
97-
end
98-
exit(1)
99-
end
5+
BenchmarkRunner::CLI.run(ARGV)

0 commit comments

Comments
 (0)