diff --git a/lib/benchmark_runner.rb b/lib/benchmark_runner.rb index 754afd8e..8d5732c2 100644 --- a/lib/benchmark_runner.rb +++ b/lib/benchmark_runner.rb @@ -3,40 +3,105 @@ require 'csv' require 'json' require 'rbconfig' +require_relative 'table_formatter' # Extracted helper methods from run_benchmarks.rb for testing module BenchmarkRunner - module_function + class << self + # Determine output path - either use the override or find a free file number + def output_path(out_path_dir, out_override: nil) + if out_override + out_override + else + # If no out path is specified, find a free file index for the output files + file_no = free_file_no(out_path_dir) + File.join(out_path_dir, "output_%03d" % file_no) + end + end - # Find the first available file number for output files - def free_file_no(directory) - (1..).each do |file_no| - out_path = File.join(directory, "output_%03d.csv" % file_no) - return file_no unless File.exist?(out_path) + # Write benchmark data to JSON file + def write_json(output_path, ruby_descriptions, bench_data) + out_json_path = "#{output_path}.json" + out_data = { + metadata: ruby_descriptions, + raw_data: bench_data, + } + File.write(out_json_path, JSON.generate(out_data)) + out_json_path end - end - # Render a graph from JSON benchmark data - def render_graph(json_path) - png_path = json_path.sub(/\.json$/, '.png') - require_relative 'graph_renderer' - GraphRenderer.render(json_path, png_path) - end + # Write benchmark results to CSV file + def write_csv(output_path, ruby_descriptions, table) + out_csv_path = "#{output_path}.csv" + + CSV.open(out_csv_path, "wb") do |csv| + ruby_descriptions.each do |key, value| + csv << [key, value] + end + csv << [] + table.each do |row| + csv << row + end + end + + out_csv_path + end + + # Build output text string with metadata, table, and legend + def build_output_text(ruby_descriptions, table, format, bench_failures) + base_name, *other_names = ruby_descriptions.keys + + output_str = +"" + + ruby_descriptions.each do |key, value| + output_str << "#{key}: #{value}\n" + end - # Checked system - error or return info if the command fails - def check_call(command, env: {}, raise_error: true, quiet: false) - puts("+ #{command}") unless quiet + output_str << "\n" + output_str << TableFormatter.new(table, format, bench_failures).to_s + "\n" - result = {} + unless other_names.empty? + output_str << "Legend:\n" + other_names.each do |name| + output_str << "- #{name} 1st itr: ratio of #{base_name}/#{name} time for the first benchmarking iteration.\n" + output_str << "- #{base_name}/#{name}: ratio of #{base_name}/#{name} time. Higher is better for #{name}. Above 1 represents a speedup.\n" + end + end - result[:success] = system(env, command) - result[:status] = $? + output_str + end - unless result[:success] - puts "Command #{command.inspect} failed with exit code #{result[:status].exitstatus} in directory #{Dir.pwd}" - raise RuntimeError.new if raise_error + # Render a graph from JSON benchmark data + def render_graph(json_path) + png_path = json_path.sub(/\.json$/, '.png') + require_relative 'graph_renderer' + GraphRenderer.render(json_path, png_path) end - result + # Checked system - error or return info if the command fails + def check_call(command, env: {}, raise_error: true, quiet: false) + puts("+ #{command}") unless quiet + + result = {} + + result[:success] = system(env, command) + result[:status] = $? + + unless result[:success] + puts "Command #{command.inspect} failed with exit code #{result[:status].exitstatus} in directory #{Dir.pwd}" + raise RuntimeError.new if raise_error + end + + result + end + + private + + def free_file_no(directory) + (1..).each do |file_no| + out_path = File.join(directory, "output_%03d.csv" % file_no) + return file_no unless File.exist?(out_path) + end + end end end diff --git a/run_benchmarks.rb b/run_benchmarks.rb index 8309cdf7..b78e4402 100755 --- a/run_benchmarks.rb +++ b/run_benchmarks.rb @@ -11,7 +11,6 @@ require_relative 'lib/cpu_config' require_relative 'lib/benchmark_runner' require_relative 'lib/benchmark_suite' -require_relative 'lib/table_formatter' require_relative 'lib/argument_parser' require_relative 'lib/results_table_builder' @@ -57,65 +56,25 @@ puts # Build results table -all_names = args.executables.keys -base_name, *other_names = all_names builder = ResultsTableBuilder.new( - executable_names: all_names, + executable_names: ruby_descriptions.keys, bench_data: bench_data, include_rss: args.rss ) table, format = builder.build -output_path = nil -if args.out_override - output_path = args.out_override -else - # If no out path is specified, find a free file index for the output files - file_no = BenchmarkRunner.free_file_no(args.out_path) - output_path = File.join(args.out_path, "output_%03d" % file_no) -end +output_path = BenchmarkRunner.output_path(args.out_path, out_override: args.out_override) # Save the raw data as JSON -out_json_path = output_path + ".json" -File.open(out_json_path, "w") do |file| - out_data = { - metadata: ruby_descriptions, - raw_data: bench_data, - } - json_str = JSON.generate(out_data) - file.write json_str -end +out_json_path = BenchmarkRunner.write_json(output_path, ruby_descriptions, bench_data) # Save data as CSV so we can produce tables/graphs in a spreasheet program # NOTE: we don't do any number formatting for the output file because # we don't want to lose any precision -output_rows = [] -ruby_descriptions.each do |key, value| - output_rows.append([key, value]) -end -output_rows.append([]) -output_rows.concat(table) -out_tbl_path = output_path + ".csv" -CSV.open(out_tbl_path, "wb") do |csv| - output_rows.each do |row| - csv << row - end -end +BenchmarkRunner.write_csv(output_path, ruby_descriptions, table) # Save the output in a text file that we can easily refer to -output_str = "" -ruby_descriptions.each do |key, value| - output_str << "#{key}: #{value}\n" -end -output_str += "\n" -output_str += TableFormatter.new(table, format, bench_failures).to_s + "\n" -unless other_names.empty? - output_str << "Legend:\n" - other_names.each do |name| - output_str << "- #{name} 1st itr: ratio of #{base_name}/#{name} time for the first benchmarking iteration.\n" - output_str << "- #{base_name}/#{name}: ratio of #{base_name}/#{name} time. Higher is better for #{name}. Above 1 represents a speedup.\n" - end -end +output_str = BenchmarkRunner.build_output_text(ruby_descriptions, table, format, bench_failures) out_txt_path = output_path + ".txt" File.open(out_txt_path, "w") { |f| f.write output_str } diff --git a/test/benchmark_runner_test.rb b/test/benchmark_runner_test.rb index c4203f81..8a8485e1 100644 --- a/test/benchmark_runner_test.rb +++ b/test/benchmark_runner_test.rb @@ -9,46 +9,6 @@ require 'yaml' describe BenchmarkRunner do - describe '.free_file_no' do - it 'returns 1 when no files exist' do - Dir.mktmpdir do |dir| - file_no = BenchmarkRunner.free_file_no(dir) - assert_equal 1, file_no - end - end - - it 'returns next available number when files exist' do - Dir.mktmpdir do |dir| - FileUtils.touch(File.join(dir, 'output_001.csv')) - FileUtils.touch(File.join(dir, 'output_002.csv')) - - file_no = BenchmarkRunner.free_file_no(dir) - assert_equal 3, file_no - end - end - - it 'finds first gap in numbering' do - Dir.mktmpdir do |dir| - FileUtils.touch(File.join(dir, 'output_001.csv')) - FileUtils.touch(File.join(dir, 'output_003.csv')) - - file_no = BenchmarkRunner.free_file_no(dir) - assert_equal 2, file_no - end - end - - it 'handles triple digit numbers' do - Dir.mktmpdir do |dir| - (1..100).each do |i| - FileUtils.touch(File.join(dir, 'output_%03d.csv' % i)) - end - - file_no = BenchmarkRunner.free_file_no(dir) - assert_equal 101, file_no - end - end - end - describe '.check_call' do it 'runs a successful command and returns success status' do result = nil @@ -158,6 +118,383 @@ end end + describe '.output_path' do + it 'returns the override path when provided' do + Dir.mktmpdir do |dir| + override = '/custom/path/output' + result = BenchmarkRunner.output_path(dir, out_override: override) + assert_equal override, result + end + end + + it 'generates path with first available file number when no override' do + Dir.mktmpdir do |dir| + result = BenchmarkRunner.output_path(dir) + expected = File.join(dir, 'output_001') + assert_equal expected, result + end + end + + it 'uses next available file number when files exist' do + Dir.mktmpdir do |dir| + FileUtils.touch(File.join(dir, 'output_001.csv')) + FileUtils.touch(File.join(dir, 'output_002.csv')) + + result = BenchmarkRunner.output_path(dir) + expected = File.join(dir, 'output_003') + assert_equal expected, result + end + end + + it 'finds first gap in numbering when files are non-sequential' do + Dir.mktmpdir do |dir| + FileUtils.touch(File.join(dir, 'output_001.csv')) + FileUtils.touch(File.join(dir, 'output_003.csv')) + + result = BenchmarkRunner.output_path(dir) + expected = File.join(dir, 'output_002') + assert_equal expected, result + end + end + + it 'prefers override even when files exist' do + Dir.mktmpdir do |dir| + FileUtils.touch(File.join(dir, 'output_001.csv')) + + override = '/override/path' + result = BenchmarkRunner.output_path(dir, out_override: override) + assert_equal override, result + end + end + + it 'handles triple digit file numbers' do + Dir.mktmpdir do |dir| + (1..100).each do |i| + FileUtils.touch(File.join(dir, 'output_%03d.csv' % i)) + end + + result = BenchmarkRunner.output_path(dir) + expected = File.join(dir, 'output_101') + assert_equal expected, result + end + end + end + + describe '.write_csv' do + it 'writes CSV file with metadata and table data' do + Dir.mktmpdir do |dir| + output_path = File.join(dir, 'output_001') + ruby_descriptions = { + 'ruby-base' => 'ruby 3.3.0', + 'ruby-yjit' => 'ruby 3.3.0 +YJIT' + } + table = [ + ['Benchmark', 'ruby-base', 'ruby-yjit'], + ['fib', '1.5', '1.0'], + ['matmul', '2.0', '1.8'] + ] + + result_path = BenchmarkRunner.write_csv(output_path, ruby_descriptions, table) + + expected_path = File.join(dir, 'output_001.csv') + assert_equal expected_path, result_path + assert File.exist?(expected_path) + + csv_rows = CSV.read(expected_path) + assert_equal 'ruby-base', csv_rows[0][0] + assert_equal 'ruby 3.3.0', csv_rows[0][1] + assert_equal 'ruby-yjit', csv_rows[1][0] + assert_equal 'ruby 3.3.0 +YJIT', csv_rows[1][1] + assert_equal [], csv_rows[2] + assert_equal table, csv_rows[3..5] + end + end + + it 'returns the CSV file path' do + Dir.mktmpdir do |dir| + output_path = File.join(dir, 'output_test') + result_path = BenchmarkRunner.write_csv(output_path, {}, []) + + assert_equal File.join(dir, 'output_test.csv'), result_path + end + end + + it 'handles empty metadata and table' do + Dir.mktmpdir do |dir| + output_path = File.join(dir, 'output_empty') + + result_path = BenchmarkRunner.write_csv(output_path, {}, []) + + assert File.exist?(result_path) + csv_rows = CSV.read(result_path) + assert_equal [[]], csv_rows + end + end + + it 'handles single metadata entry' do + Dir.mktmpdir do |dir| + output_path = File.join(dir, 'output_single') + ruby_descriptions = { 'ruby' => 'ruby 3.3.0' } + table = [['Benchmark', 'Time'], ['test', '1.0']] + + result_path = BenchmarkRunner.write_csv(output_path, ruby_descriptions, table) + + csv_rows = CSV.read(result_path) + assert_equal 'ruby', csv_rows[0][0] + assert_equal 'ruby 3.3.0', csv_rows[0][1] + assert_equal [], csv_rows[1] + assert_equal table, csv_rows[2..3] + end + end + + it 'preserves precision in numeric strings' do + Dir.mktmpdir do |dir| + output_path = File.join(dir, 'output_precision') + table = [['Benchmark', 'Time'], ['test', '1.234567890123']] + + result_path = BenchmarkRunner.write_csv(output_path, {}, table) + + csv_rows = CSV.read(result_path) + assert_equal '1.234567890123', csv_rows[2][1] + end + end + + it 'overwrites existing CSV file' do + Dir.mktmpdir do |dir| + output_path = File.join(dir, 'output_overwrite') + + # Write first version + BenchmarkRunner.write_csv(output_path, { 'v1' => 'first' }, [['old']]) + + # Write second version + new_descriptions = { 'v2' => 'second' } + new_table = [['new']] + result_path = BenchmarkRunner.write_csv(output_path, new_descriptions, new_table) + + csv_rows = CSV.read(result_path) + assert_equal 'v2', csv_rows[0][0] + assert_equal 'second', csv_rows[0][1] + end + end + end + + describe '.write_json' do + it 'writes JSON file with metadata and raw data' do + Dir.mktmpdir do |dir| + output_path = File.join(dir, 'output_001') + ruby_descriptions = { + 'ruby-base' => 'ruby 3.3.0', + 'ruby-yjit' => 'ruby 3.3.0 +YJIT' + } + bench_data = { + 'ruby-base' => { 'fib' => { 'time' => 1.5 } }, + 'ruby-yjit' => { 'fib' => { 'time' => 1.0 } } + } + + result_path = BenchmarkRunner.write_json(output_path, ruby_descriptions, bench_data) + + expected_path = File.join(dir, 'output_001.json') + assert_equal expected_path, result_path + assert File.exist?(expected_path) + + json_content = JSON.parse(File.read(expected_path)) + assert_equal ruby_descriptions, json_content['metadata'] + assert_equal bench_data, json_content['raw_data'] + end + end + + it 'returns the JSON file path' do + Dir.mktmpdir do |dir| + output_path = File.join(dir, 'output_test') + result_path = BenchmarkRunner.write_json(output_path, {}, {}) + + assert_equal File.join(dir, 'output_test.json'), result_path + end + end + + it 'handles empty metadata and bench data' do + Dir.mktmpdir do |dir| + output_path = File.join(dir, 'output_empty') + + result_path = BenchmarkRunner.write_json(output_path, {}, {}) + + assert File.exist?(result_path) + json_content = JSON.parse(File.read(result_path)) + assert_equal({}, json_content['metadata']) + assert_equal({}, json_content['raw_data']) + end + end + + it 'handles nested benchmark data structures' do + Dir.mktmpdir do |dir| + output_path = File.join(dir, 'output_nested') + ruby_descriptions = { 'ruby' => 'ruby 3.3.0' } + bench_data = { + 'ruby' => { + 'benchmark1' => { + 'time' => 1.5, + 'rss' => 12345, + 'iterations' => [1.4, 1.5, 1.6] + } + } + } + + result_path = BenchmarkRunner.write_json(output_path, ruby_descriptions, bench_data) + + json_content = JSON.parse(File.read(result_path)) + assert_equal bench_data, json_content['raw_data'] + end + end + + it 'overwrites existing JSON file' do + Dir.mktmpdir do |dir| + output_path = File.join(dir, 'output_overwrite') + + # Write first version + BenchmarkRunner.write_json(output_path, { 'v' => '1' }, { 'd' => '1' }) + + # Write second version + new_metadata = { 'v' => '2' } + new_data = { 'd' => '2' } + result_path = BenchmarkRunner.write_json(output_path, new_metadata, new_data) + + json_content = JSON.parse(File.read(result_path)) + assert_equal new_metadata, json_content['metadata'] + assert_equal new_data, json_content['raw_data'] + end + end + end + + describe '.build_output_text' do + it 'builds output text with metadata, table, and legend' do + ruby_descriptions = { + 'ruby-base' => 'ruby 3.3.0', + 'ruby-yjit' => 'ruby 3.3.0 +YJIT' + } + table = [ + ['bench', 'ruby-base (ms)', 'stddev (%)', 'ruby-yjit (ms)', 'stddev (%)'], + ['fib', '100.0', '5.0', '50.0', '3.0'] + ] + format = ['%s', '%.1f', '%.1f', '%.1f', '%.1f'] + bench_failures = {} + + result = BenchmarkRunner.build_output_text( + ruby_descriptions, table, format, bench_failures + ) + + assert_includes result, 'ruby-base: ruby 3.3.0' + assert_includes result, 'ruby-yjit: ruby 3.3.0 +YJIT' + assert_includes result, 'Legend:' + assert_includes result, '- ruby-yjit 1st itr: ratio of ruby-base/ruby-yjit time for the first benchmarking iteration.' + assert_includes result, '- ruby-base/ruby-yjit: ratio of ruby-base/ruby-yjit time. Higher is better for ruby-yjit. Above 1 represents a speedup.' + end + + it 'includes formatted table in output' do + ruby_descriptions = { 'ruby' => 'ruby 3.3.0' } + table = [ + ['bench', 'ruby (ms)', 'stddev (%)'], + ['fib', '100.0', '5.0'] + ] + format = ['%s', '%.1f', '%.1f'] + bench_failures = {} + + result = BenchmarkRunner.build_output_text( + ruby_descriptions, table, format, bench_failures + ) + + # Should contain table headers + assert_includes result, 'bench' + assert_includes result, 'ruby (ms)' + # Should contain table data + assert_includes result, 'fib' + assert_includes result, '100.0' + end + + it 'omits legend when no other executables' do + ruby_descriptions = { 'ruby' => 'ruby 3.3.0' } + table = [['bench', 'ruby (ms)'], ['fib', '100.0']] + format = ['%s', '%.1f'] + bench_failures = {} + + result = BenchmarkRunner.build_output_text( + ruby_descriptions, table, format, bench_failures + ) + + refute_includes result, 'Legend:' + end + + it 'handles multiple other executables in legend' do + ruby_descriptions = { + 'ruby' => 'ruby 3.3.0', + 'ruby-yjit' => 'ruby 3.3.0 +YJIT', + 'ruby-rjit' => 'ruby 3.3.0 +RJIT' + } + table = [['bench', 'ruby (ms)', 'ruby-yjit (ms)', 'ruby-rjit (ms)']] + format = ['%s', '%.1f', '%.1f', '%.1f'] + bench_failures = {} + + result = BenchmarkRunner.build_output_text( + ruby_descriptions, table, format, bench_failures + ) + + assert_includes result, 'ruby-yjit 1st itr' + assert_includes result, 'ruby-rjit 1st itr' + assert_includes result, 'ruby/ruby-yjit' + assert_includes result, 'ruby/ruby-rjit' + end + + it 'includes benchmark failures in formatted output' do + ruby_descriptions = { 'ruby' => 'ruby 3.3.0' } + table = [['bench', 'ruby (ms)'], ['fib', '100.0']] + format = ['%s', '%.1f'] + bench_failures = { + 'ruby' => { 'failed_bench' => 'error message' } + } + + result = BenchmarkRunner.build_output_text( + ruby_descriptions, table, format, bench_failures + ) + + # TableFormatter handles displaying failures, just verify it's called + assert_kind_of String, result + assert result.length > 0 + end + + it 'handles empty ruby_descriptions' do + ruby_descriptions = {} + table = [['bench']] + format = ['%s'] + bench_failures = {} + + result = BenchmarkRunner.build_output_text( + ruby_descriptions, table, format, bench_failures + ) + + assert_kind_of String, result + assert result.start_with?("\n") # Should start with newline after empty descriptions + end + + it 'preserves order of ruby_descriptions' do + ruby_descriptions = { + 'ruby-a' => 'version A', + 'ruby-b' => 'version B', + 'ruby-c' => 'version C' + } + table = [['bench']] + format = ['%s'] + bench_failures = {} + + result = BenchmarkRunner.build_output_text( + ruby_descriptions, table, format, bench_failures + ) + + lines = result.lines + assert_includes lines[0], 'ruby-a: version A' + assert_includes lines[1], 'ruby-b: version B' + assert_includes lines[2], 'ruby-c: version C' + end + end + describe '.render_graph' do it 'delegates to GraphRenderer and returns calculated png_path' do Dir.mktmpdir do |dir| diff --git a/test/run_benchmarks_integration_test.rb b/test/run_benchmarks_integration_test.rb index 2c21cb0a..f6bbaa7b 100644 --- a/test/run_benchmarks_integration_test.rb +++ b/test/run_benchmarks_integration_test.rb @@ -34,10 +34,11 @@ File.write(File.join(tmpdir, 'output_001.csv'), 'test') File.write(File.join(tmpdir, 'output_002.csv'), 'test') - # The free_file_no function should find the next number + # The output_path function should find the next number require_relative '../lib/benchmark_runner' - file_no = BenchmarkRunner.free_file_no(tmpdir) - assert_equal 3, file_no + output_path = BenchmarkRunner.output_path(tmpdir) + expected_path = File.join(tmpdir, 'output_003') + assert_equal expected_path, output_path end end