diff --git a/cmd/tools/modules/testing/common.v b/cmd/tools/modules/testing/common.v index de9f5de486739c..4b2e743e776bd7 100644 --- a/cmd/tools/modules/testing/common.v +++ b/cmd/tools/modules/testing/common.v @@ -56,6 +56,11 @@ fn get_all_processes() []string { } } +pub enum ActionMode { + compile + compile_and_run +} + pub struct TestSession { pub mut: files []string @@ -77,6 +82,8 @@ pub mut: failed_cmds shared []string reporter Reporter = Reporter(NormalReporter{}) hash string // used as part of the name of the temporary directory created for tests, to ease cleanup + // + exec_mode ActionMode = .compile // .compile_and_run only for `v test` } pub fn (mut ts TestSession) add_failed_cmd(cmd string) { @@ -137,7 +144,7 @@ pub fn (mut ts TestSession) print_messages() { // first sent *all events* to the output reporter, so it can then process them however it wants: ts.reporter.report(ts.nmessage_idx, rmessage) - if rmessage.kind in [.cmd_begin, .cmd_end] { + if rmessage.kind in [.cmd_begin, .cmd_end, .compile_begin, .compile_end] { // The following events, are sent before the test framework has determined, // what the full completion status is. They can also be repeated multiple times, // for tests that are flaky and need repeating. @@ -432,13 +439,20 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { is_vet := ts.vargs.contains('vet') produces_file_output := !(is_fmt || is_vet) - if relative_file.ends_with('js.v') { + if relative_file.ends_with('.js.v') { if produces_file_output { cmd_options << ' -b js' run_js = true } } + if relative_file.ends_with('.c.v') { + if produces_file_output { + cmd_options << ' -b c' + run_js = false + } + } + if relative_file.contains('global') && !is_fmt { cmd_options << ' -enable-globals' } @@ -451,6 +465,7 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { flow_id: thread_id.str() } normalised_relative_file := relative_file.replace('\\', '/') + // Ensure that the generated binaries will be stored in an *unique*, fresh, and per test folder, // inside the common session temporary folder, used for all the tests. // This is done to provide a clean working environment, for each test, that will not contain @@ -467,10 +482,15 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { os.mkdir_all(test_folder_path) or {} } fname := os.file_name(file) + // There are test files ending with `_test.v`, `_test.c.v` and `_test.js.v`. + mut fname_without_extension := fname.all_before_last('.v') + if fname_without_extension.ends_with('.c') { + fname_without_extension = fname_without_extension.all_before_last('.c') + } generated_binary_fname := if os.user_os() == 'windows' && !run_js { - fname.all_before_last('.v') + '.exe' + fname_without_extension + '.exe' } else { - fname.all_before_last('.v') + fname_without_extension } generated_binary_fpath := os.join_path_single(test_folder_path, generated_binary_fname) if produces_file_output { @@ -479,18 +499,29 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { } cmd_options << ' -o ${os.quoted_path(generated_binary_fpath)}' } - cmd := '${os.quoted_path(ts.vexe)} ${cmd_options.join(' ')} ${os.quoted_path(file)}' + defer { + if produces_file_output && ts.rm_binaries { + os.rmdir_all(test_folder_path) or {} + } + } + + cmd := '${os.quoted_path(ts.vexe)} -skip-running ${cmd_options.join(' ')} ${os.quoted_path(file)}' + run_cmd := if run_js { 'node ${generated_binary_fpath}' } else { generated_binary_fpath } ts.benchmark.step() tls_bench.step() if relative_file.replace('\\', '/') in ts.skip_files { ts.benchmark.skip() tls_bench.skip() if !testing.hide_skips { - ts.append_message(.skip, tls_bench.step_message_skip(normalised_relative_file), - mtc) + ts.append_message(.skip, tls_bench.step_message_with_label_and_duration(benchmark.b_skip, + normalised_relative_file, 0, + preparation: 1 * time.microsecond + ), mtc) } return pool.no_result } + mut compile_cmd_duration := time.Duration(0) + mut cmd_duration := time.Duration(0) if ts.show_stats { ts.reporter.divider() @@ -505,7 +536,7 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { } mut status := res.exit_code - mut cmd_duration := d_cmd.elapsed() + cmd_duration = d_cmd.elapsed() ts.append_message_with_duration(.cmd_end, '', cmd_duration, mtc) if status != 0 { @@ -536,40 +567,49 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { goto test_passed_system } } - // most probably compiler error if res.output.contains(': error: ') { ts.append_message(.cannot_compile, 'Cannot compile file ${file}', mtc) } - ts.benchmark.fail() tls_bench.fail() ts.add_failed_cmd(cmd) return pool.no_result - } else { - test_passed_system: - ts.benchmark.ok() - tls_bench.ok() } } else { if testing.show_start { ts.append_message(.info, ' starting ${relative_file} ...', mtc) } - ts.append_message(.cmd_begin, cmd, mtc) - d_cmd := time.new_stopwatch() - mut r := os.execute(cmd) - mut cmd_duration := d_cmd.elapsed() - ts.append_message_with_duration(.cmd_end, r.output, cmd_duration, mtc) - - if r.exit_code < 0 { + ts.append_message(.compile_begin, cmd, mtc) + compile_d_cmd := time.new_stopwatch() + mut compile_r := os.execute(cmd) + compile_cmd_duration = compile_d_cmd.elapsed() + ts.append_message_with_duration(.compile_end, compile_r.output, compile_cmd_duration, + mtc) + if compile_r.exit_code != 0 { ts.benchmark.fail() tls_bench.fail() - ts.append_message_with_duration(.fail, tls_bench.step_message_fail(normalised_relative_file), - cmd_duration, mtc) + ts.append_message_with_duration(.fail, tls_bench.step_message_with_label_and_duration(benchmark.b_fail, + normalised_relative_file, cmd_duration, + preparation: compile_cmd_duration + ), cmd_duration, mtc) ts.add_failed_cmd(cmd) return pool.no_result } + tls_bench.step_restart() + ts.benchmark.step_restart() + if ts.exec_mode == .compile { + unsafe { + goto test_passed_execute + } + } + // + ts.append_message(.cmd_begin, run_cmd, mtc) + d_cmd := time.new_stopwatch() + mut r := os.execute(run_cmd) + cmd_duration = d_cmd.elapsed() + ts.append_message_with_duration(.cmd_end, r.output, cmd_duration, mtc) if r.exit_code != 0 { details := get_test_details(file) os.setenv('VTEST_RETRY_MAX', '${details.retry}', true) @@ -578,9 +618,9 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { mtc) os.setenv('VTEST_RETRY', '${retry}', true) - ts.append_message(.cmd_begin, cmd, mtc) + ts.append_message(.cmd_begin, run_cmd, mtc) d_cmd_2 := time.new_stopwatch() - r = os.execute(cmd) + r = os.execute(run_cmd) cmd_duration = d_cmd_2.elapsed() ts.append_message_with_duration(.cmd_end, r.output, cmd_duration, mtc) @@ -601,21 +641,24 @@ fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr { ts.benchmark.fail() tls_bench.fail() ending_newline := if r.output.ends_with('\n') { '\n' } else { '' } - ts.append_message_with_duration(.fail, tls_bench.step_message_fail('${normalised_relative_file}\n${r.output.trim_space()}${ending_newline}'), - cmd_duration, mtc) + ts.append_message_with_duration(.fail, tls_bench.step_message_with_label_and_duration(benchmark.b_fail, + '${normalised_relative_file}\n${r.output.trim_space()}${ending_newline}', + cmd_duration, + preparation: compile_cmd_duration + ), cmd_duration, mtc) ts.add_failed_cmd(cmd) - } else { - test_passed_execute: - ts.benchmark.ok() - tls_bench.ok() - if !testing.hide_oks { - ts.append_message_with_duration(.ok, tls_bench.step_message_ok(normalised_relative_file), - cmd_duration, mtc) - } + return pool.no_result } } - if produces_file_output && ts.rm_binaries { - os.rmdir_all(test_folder_path) or {} + test_passed_system: + test_passed_execute: + ts.benchmark.ok() + tls_bench.ok() + if !testing.hide_oks { + ts.append_message_with_duration(.ok, tls_bench.step_message_with_label_and_duration(benchmark.b_ok, + normalised_relative_file, cmd_duration, + preparation: compile_cmd_duration + ), cmd_duration, mtc) } return pool.no_result } diff --git a/cmd/tools/modules/testing/output.v b/cmd/tools/modules/testing/output.v index b3b569d282a084..be6ee04be3b567 100644 --- a/cmd/tools/modules/testing/output.v +++ b/cmd/tools/modules/testing/output.v @@ -3,6 +3,8 @@ module testing import time pub enum MessageKind { + compile_begin // sent right before *each* _test.v file compilation, the resulting status is not known yet, but the _test.v file itself is + compile_end // sent right after *each* _test.v file compilation, the message contains the output of that compilation cmd_begin // sent right before *each* _test.v file execution, the resulting status is not known yet, but the _test.v file itself is cmd_end // sent right after *each* _test.v file execution, the message contains the output of that execution // @@ -21,7 +23,7 @@ pub: kind MessageKind // see the MessageKind declaration above file string // the _test.v file that the message is about when time.Time // when was the message sent (messages are sent by the execution threads at the *end* of each event) - flow_id string // the messages of each thread, producing LogMessage, will have all the same unique flowid. Messages by other threads will have other flowid. If you use VJOBS=1 to serialise the execution, then all messages will have the same flowid. + flow_id string // the messages of each thread, producing LogMessage, will have all the same unique flow_id. Messages by other threads will have other flow_id. If you use VJOBS=1 to serialise the execution, then all messages will have the same flow_id. took time.Duration // the duration of the event, that this message describes message string // the actual message text; the result of the event, that the message describes; most reporters could ignore this, since it could be reconstructed by the other fields } diff --git a/cmd/tools/modules/testing/output_normal.v b/cmd/tools/modules/testing/output_normal.v index 674564f5033a7b..0427bdd6d6c922 100644 --- a/cmd/tools/modules/testing/output_normal.v +++ b/cmd/tools/modules/testing/output_normal.v @@ -1,5 +1,6 @@ module testing +import time import term pub const empty = term.header(' ', ' ') @@ -8,6 +9,9 @@ pub const empty = term.header(' ', ' ') // It is used by default by `v test .` // It was extracted by the original non customiseable output implementation directly in cmd/tools/modules/testing/common.v pub struct NormalReporter { +mut: + runtime time.Duration + comptime time.Duration } pub fn (r NormalReporter) session_start(message string, mut ts TestSession) { @@ -15,13 +19,19 @@ pub fn (r NormalReporter) session_start(message string, mut ts TestSession) { } pub fn (r NormalReporter) session_stop(message string, mut ts TestSession) { - println(ts.benchmark.total_message(message)) + println('${ts.benchmark.total_message(message)} Comptime: ${r.comptime.microseconds() / 1000} ms. Runtime: ${r.runtime.microseconds() / 1000} ms.') } // the most general form; it may be useful for other reporters // in the normal one, it currently does nothing -pub fn (r NormalReporter) report(index int, message LogMessage) { +pub fn (mut r NormalReporter) report(index int, message LogMessage) { // eprintln('> ${@METHOD} index: $index | message: $message') + if message.kind == .compile_end { + r.comptime += message.took + } + if message.kind == .cmd_end { + r.runtime += message.took + } } pub fn (r NormalReporter) report_stop() { diff --git a/cmd/tools/vtest-self.v b/cmd/tools/vtest-self.v index 5261992fa4a839..e7e1425b5f7262 100644 --- a/cmd/tools/vtest-self.v +++ b/cmd/tools/vtest-self.v @@ -352,6 +352,7 @@ fn main() { } testing.eheader(title) mut tsession := testing.new_test_session(cmd_prefix, true) + tsession.exec_mode = .compile_and_run tsession.files << all_test_files.filter(!it.contains('testdata' + os.path_separator)) tsession.skip_files << skip_test_files diff --git a/cmd/tools/vtest.v b/cmd/tools/vtest.v index 00ecf0f93522be..e2855d4f41cfba 100644 --- a/cmd/tools/vtest.v +++ b/cmd/tools/vtest.v @@ -32,9 +32,10 @@ fn main() { exit(1) } backend_pos := args_before.index('-b') - backend := if backend_pos == -1 { '.c' } else { args_before[backend_pos + 1] } // this giant mess because closures are not implemented + backend := if backend_pos == -1 { '.c' } else { args_before[backend_pos + 1] } mut ts := testing.new_test_session(args_before.join(' '), true) + ts.exec_mode = .compile_and_run ts.fail_fast = ctx.fail_fast for targ in args_after { if os.is_dir(targ) { @@ -152,6 +153,9 @@ fn (mut ctx Context) should_test(path string, backend string) ShouldTestStatus { if path.ends_with('_test.v') { return ctx.should_test_when_it_contains_matching_fns(path, backend) } + if path.ends_with('_test.c.v') { + return ctx.should_test_when_it_contains_matching_fns(path, backend) + } if path.ends_with('_test.js.v') { if testing.is_node_present { return ctx.should_test_when_it_contains_matching_fns(path, backend) diff --git a/vlib/benchmark/benchmark.v b/vlib/benchmark/benchmark.v index c5c36114e04069..6103d3c97ba97f 100644 --- a/vlib/benchmark/benchmark.v +++ b/vlib/benchmark/benchmark.v @@ -70,6 +70,15 @@ pub fn (mut b Benchmark) step() { } } +// step_restart will restart the internal step timer. +// Note that the step count will *stay the same*. +// This method is useful, when you want to do some optional preparation +// after you have called .step(), so that the time for that optional +// preparation will *not* be added to the duration of the step. +pub fn (mut b Benchmark) step_restart() { + b.step_timer.restart() +} + // fail increases the fail count by 1 and stops the internal timer. pub fn (mut b Benchmark) fail() { b.step_timer.stop() @@ -140,8 +149,15 @@ pub fn (mut b Benchmark) record_measure(label string) i64 { return res } +// MessageOptions allows passing an optional preparation time too to each label method. +// If it is set, the preparation time (compile time) will be shown before the measured runtime. +@[params] +pub struct MessageOptions { + preparation time.Duration // the duration of the preparation time for the step +} + // step_message_with_label_and_duration returns a string describing the current step. -pub fn (b &Benchmark) step_message_with_label_and_duration(label string, msg string, sduration time.Duration) string { +pub fn (b &Benchmark) step_message_with_label_and_duration(label string, msg string, sduration time.Duration, opts MessageOptions) string { timed_line := b.tdiff_in_ms(msg, sduration.microseconds()) if b.nexpected_steps > 1 { mut sprogress := '' @@ -170,34 +186,38 @@ pub fn (b &Benchmark) step_message_with_label_and_duration(label string, msg str '${b.cstep:4d}/${b.nexpected_steps:4d}' } } + if opts.preparation > 0 { + return '${label:-5s} [${sprogress}] C: ${f64(opts.preparation.microseconds()) / 1_000.0:6.1F} ms, R: ${timed_line}' + } return '${label:-5s} [${sprogress}] ${timed_line}' } return '${label:-5s}${timed_line}' } // step_message_with_label returns a string describing the current step using current time as duration. -pub fn (b &Benchmark) step_message_with_label(label string, msg string) string { - return b.step_message_with_label_and_duration(label, msg, b.step_timer.elapsed()) +pub fn (b &Benchmark) step_message_with_label(label string, msg string, opts MessageOptions) string { + return b.step_message_with_label_and_duration(label, msg, b.step_timer.elapsed(), + opts) } // step_message returns a string describing the current step. -pub fn (b &Benchmark) step_message(msg string) string { - return b.step_message_with_label('', msg) +pub fn (b &Benchmark) step_message(msg string, opts MessageOptions) string { + return b.step_message_with_label('', msg, opts) } // step_message_ok returns a string describing the current step with an standard "OK" label. -pub fn (b &Benchmark) step_message_ok(msg string) string { - return b.step_message_with_label(benchmark.b_ok, msg) +pub fn (b &Benchmark) step_message_ok(msg string, opts MessageOptions) string { + return b.step_message_with_label(benchmark.b_ok, msg, opts) } // step_message_fail returns a string describing the current step with an standard "FAIL" label. -pub fn (b &Benchmark) step_message_fail(msg string) string { - return b.step_message_with_label(benchmark.b_fail, msg) +pub fn (b &Benchmark) step_message_fail(msg string, opts MessageOptions) string { + return b.step_message_with_label(benchmark.b_fail, msg, opts) } // step_message_skip returns a string describing the current step with an standard "SKIP" label. -pub fn (b &Benchmark) step_message_skip(msg string) string { - return b.step_message_with_label(benchmark.b_skip, msg) +pub fn (b &Benchmark) step_message_skip(msg string, opts MessageOptions) string { + return b.step_message_with_label(benchmark.b_skip, msg, opts) } // total_message returns a string with total summary of the benchmark run. @@ -221,7 +241,7 @@ pub fn (b &Benchmark) total_message(msg string) string { njobs_label = ', on ${term.colorize(term.bold, b.njobs.str())} parallel jobs' } } - tmsg += '${b.ntotal} total. ${term.colorize(term.bold, 'Runtime:')} ${b.bench_timer.elapsed().microseconds() / 1000} ms${njobs_label}.\n' + tmsg += '${b.ntotal} total. ${term.colorize(term.bold, 'Elapsed time:')} ${b.bench_timer.elapsed().microseconds() / 1000} ms${njobs_label}.' return tmsg } diff --git a/vlib/v/help/build/build.txt b/vlib/v/help/build/build.txt index 1cb9cbcaf85177..75e3d986218da5 100644 --- a/vlib/v/help/build/build.txt +++ b/vlib/v/help/build/build.txt @@ -149,6 +149,12 @@ NB: the build flags are shared with the run command too: -profile-no-inline Skip [inline] functions when profiling. + -skip-running + Skip the automatic running of a _test.v or .vsh file. Useful for debugging and testing. + V's testing program `v test` uses that option, to measure and report independently the + duration of each _test.v comptime, and then the duration of its runtime. + Note: the same effect can be achieved by setting the environment variable VNORUN to 1. + -skip-unused Skip generating C/JS code for functions, that are provably not used by your project. This speeds up compilation, and reduces the generated output size. diff --git a/vlib/v/pref/pref.c.v b/vlib/v/pref/pref.c.v index 16e7af9d3d9119..11babff7d561e1 100644 --- a/vlib/v/pref/pref.c.v +++ b/vlib/v/pref/pref.c.v @@ -427,6 +427,9 @@ pub fn parse_args_and_show_errors(known_external_commands []string, args []strin '-silent' { res.output_mode = .silent } + '-skip-running' { + res.skip_running = true + } '-cstrict' { res.is_cstrict = true }