/
common.v
770 lines (709 loc) · 24.4 KB
/
common.v
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
module testing
import os
import os.cmdline
import time
import term
import benchmark
import sync
import sync.pool
import v.pref
import v.util.vtest
import runtime
pub const github_job = os.getenv('GITHUB_JOB')
pub const runner_os = os.getenv('RUNNER_OS') // GitHub runner OS
pub const show_start = os.getenv('VTEST_SHOW_START') == '1'
pub const hide_skips = os.getenv('VTEST_HIDE_SKIP') == '1'
pub const hide_oks = os.getenv('VTEST_HIDE_OK') == '1'
pub const fail_fast = os.getenv('VTEST_FAIL_FAST') == '1'
pub const fail_flaky = os.getenv('VTEST_FAIL_FLAKY') == '1'
pub const test_only = os.getenv('VTEST_ONLY').split_any(',')
pub const test_only_fn = os.getenv('VTEST_ONLY_FN').split_any(',')
// TODO: this !!!*reliably*!!! fails compilation of `v cmd/tools/vbuild-examples.v` with a cgen error, without `-no-parallel`:
// pub const fail_retry_delay_ms = os.getenv_opt('VTEST_FAIL_RETRY_DELAY_MS') or { '500' }.int() * time.millisecond
// Note, it works with `-no-parallel`, and it works when that whole expr is inside a function, like below:
pub const fail_retry_delay_ms = get_fail_retry_delay_ms()
pub const is_node_present = os.execute('node --version').exit_code == 0
pub const all_processes = get_all_processes()
pub const header_bytes_to_search_for_module_main = 500
fn get_fail_retry_delay_ms() time.Duration {
return os.getenv_opt('VTEST_FAIL_RETRY_DELAY_MS') or { '500' }.int() * time.millisecond
}
fn get_all_processes() []string {
$if windows {
// TODO
return []
} $else {
return os.execute('ps ax').output.split_any('\r\n')
}
}
pub struct TestSession {
pub mut:
files []string
skip_files []string
vexe string
vroot string
vtmp_dir string
vargs string
fail_fast bool
benchmark benchmark.Benchmark
rm_binaries bool = true
silent_mode bool
show_stats bool
progress_mode bool
root_relative bool // used by CI runs, so that the output is stable everywhere
nmessages chan LogMessage // many publishers, single consumer/printer
nmessage_idx int // currently printed message index
failed_cmds shared []string
reporter Reporter = Reporter(NormalReporter{})
hash string // used during testing in temporary directory and file names to prevent collisions when files and directories are created in a test file.
}
pub fn (mut ts TestSession) add_failed_cmd(cmd string) {
lock ts.failed_cmds {
ts.failed_cmds << cmd
}
}
pub fn (mut ts TestSession) show_list_of_failed_tests() {
rlock ts.failed_cmds {
ts.reporter.list_of_failed_commands(ts.failed_cmds)
}
}
struct MessageThreadContext {
mut:
file string
flow_id string
}
fn (mut ts TestSession) append_message(kind MessageKind, msg string, mtc MessageThreadContext) {
ts.nmessages <- LogMessage{
file: mtc.file
flow_id: mtc.flow_id
message: msg
kind: kind
when: time.now()
}
}
fn (mut ts TestSession) append_message_with_duration(kind MessageKind, msg string, d time.Duration, mtc MessageThreadContext) {
ts.nmessages <- LogMessage{
file: mtc.file
flow_id: mtc.flow_id
message: msg
kind: kind
when: time.now()
took: d
}
}
pub fn (mut ts TestSession) session_start(message string) {
ts.reporter.session_start(message, mut ts)
}
pub fn (mut ts TestSession) session_stop(message string) {
ts.reporter.session_stop(message, mut ts)
}
pub fn (mut ts TestSession) print_messages() {
mut test_idx := 0
mut print_msg_time := time.new_stopwatch()
for {
// get a message from the channel of messages to be printed:
mut rmessage := <-ts.nmessages
ts.nmessage_idx++
// first sent *all events* to the output reporter, so it can then process them however it wants:
ts.reporter.report(ts.nmessage_idx, rmessage)
if rmessage.kind in [.cmd_begin, .cmd_end] {
// The following events, are sent before the test framework has determined,
// what the full completion status is. They can also be repeated multiple times,
// for tests that are flaky and need repeating.
continue
}
if rmessage.kind == .sentinel {
// a sentinel for stopping the printing thread
if !ts.silent_mode && ts.progress_mode {
ts.reporter.report_stop()
}
return
}
if rmessage.kind != .info {
// info events can also be repeated, and should be ignored when determining
// the total order of the current test file, in the following replacements:
test_idx++
}
msg := rmessage.message.replace_each([
'TMP1',
'${test_idx:1d}',
'TMP2',
'${test_idx:2d}',
'TMP3',
'${test_idx:3d}',
'TMP4',
'${test_idx:4d}',
])
is_ok := rmessage.kind == .ok
//
time_passed := print_msg_time.elapsed().seconds()
if time_passed > 10 && ts.silent_mode && is_ok {
// Even if OK tests are suppressed,
// show *at least* 1 result every 10 seconds,
// otherwise the CI can seem stuck ...
ts.reporter.progress(ts.nmessage_idx, msg)
print_msg_time.restart()
continue
}
if ts.progress_mode {
if is_ok && !ts.silent_mode {
ts.reporter.update_last_line(ts.nmessage_idx, msg)
} else {
ts.reporter.update_last_line_and_move_to_next(ts.nmessage_idx, msg)
}
continue
}
if !ts.silent_mode || !is_ok {
// normal expanded mode, or failures in -silent mode
ts.reporter.message(ts.nmessage_idx, msg)
continue
}
}
}
pub fn new_test_session(_vargs string, will_compile bool) TestSession {
mut skip_files := []string{}
if will_compile {
// Skip the call_v_from_c files. They need special instructions for compilation.
// Check the README.md for detailed information.
skip_files << 'examples/call_v_from_c/v_test_print.v'
skip_files << 'examples/call_v_from_c/v_test_math.v'
// Skip the compilation of the coroutines example for now, since the Photon wrapper
// is only available on macos for now, and it is not yet trivial enough to
// build/install on the CI:
skip_files << 'examples/coroutines/simple_coroutines.v'
skip_files << 'examples/coroutines/coroutines_bench.v'
$if msvc {
skip_files << 'vlib/v/tests/const_comptime_eval_before_vinit_test.v' // _constructor used
skip_files << 'vlib/v/tests/project_with_cpp_code/compiling_cpp_files_with_a_cplusplus_compiler_test.v'
}
$if solaris {
skip_files << 'examples/gg/gg2.v'
skip_files << 'examples/pico/pico.v'
skip_files << 'examples/sokol/fonts.v'
skip_files << 'examples/sokol/drawing.v'
}
$if macos {
skip_files << 'examples/database/mysql.v'
skip_files << 'examples/database/orm.v'
skip_files << 'examples/database/psql/customer.v'
}
$if windows {
skip_files << 'examples/database/mysql.v'
skip_files << 'examples/database/orm.v'
skip_files << 'examples/smtp/mail.v' // requires OpenSSL
skip_files << 'examples/websocket/ping.v' // requires OpenSSL
skip_files << 'examples/websocket/client-server/client.v' // requires OpenSSL
skip_files << 'examples/websocket/client-server/server.v' // requires OpenSSL
skip_files << 'vlib/v/tests/websocket_logger_interface_should_compile_test.v' // requires OpenSSL
$if tinyc {
skip_files << 'examples/database/orm.v' // try fix it
}
}
$if windows {
// TODO: remove when closures on windows are supported...
skip_files << 'examples/pendulum-simulation/animation.v'
skip_files << 'examples/pendulum-simulation/full.v'
skip_files << 'examples/pendulum-simulation/parallel.v'
skip_files << 'examples/pendulum-simulation/parallel_with_iw.v'
skip_files << 'examples/pendulum-simulation/sequential.v'
if testing.github_job == 'tcc' {
// TODO: fix these by adding declarations for the missing functions in the prebuilt tcc
skip_files << 'vlib/net/mbedtls/mbedtls_compiles_test.v'
skip_files << 'vlib/net/ssl/ssl_compiles_test.v'
}
}
if testing.runner_os != 'Linux' || testing.github_job != 'tcc' {
skip_files << 'examples/c_interop_wkhtmltopdf.v' // needs installation of wkhtmltopdf from https://github.com/wkhtmltopdf/packaging/releases
skip_files << 'examples/call_v_from_python/test.v' // the example only makes sense to be compiled, when python is installed
skip_files << 'examples/call_v_from_ruby/test.v' // the example only makes sense to be compiled, when ruby is installed
skip_files << 'vlib/vweb/vweb_app_test.v' // imports the `sqlite` module, which in turn includes sqlite3.h
}
$if !macos {
skip_files << 'examples/macos_tray/tray.v'
}
if testing.github_job == 'ubuntu-docker-musl' {
skip_files << 'vlib/net/openssl/openssl_compiles_test.c.v'
skip_files << 'vlib/x/ttf/ttf_test.v'
}
if testing.github_job == 'tests-sanitize-memory-clang' {
skip_files << 'vlib/net/openssl/openssl_compiles_test.c.v'
}
if testing.github_job != 'misc-tooling' {
// These examples need .h files that are produced from the supplied .glsl files,
// using by the shader compiler tools in https://github.com/floooh/sokol-tools-bin/archive/pre-feb2021-api-changes.tar.gz
skip_files << 'examples/sokol/simple_shader_glsl/simple_shader.v'
skip_files << 'examples/sokol/02_cubes_glsl/cube_glsl.v'
skip_files << 'examples/sokol/03_march_tracing_glsl/rt_glsl.v'
skip_files << 'examples/sokol/04_multi_shader_glsl/rt_glsl.v'
skip_files << 'examples/sokol/05_instancing_glsl/rt_glsl.v'
// Skip obj_viewer code in the CI
skip_files << 'examples/sokol/06_obj_viewer/show_obj.v'
// skip the audio examples too on most CI jobs
skip_files << 'examples/sokol/sounds/melody.v'
skip_files << 'examples/sokol/sounds/wav_player.v'
skip_files << 'examples/sokol/sounds/simple_sin_tones.v'
}
// examples/wasm/mandelbrot/mandelbrot.v requires special compilation flags: `-b wasm -os browser`, skip it for now:
skip_files << 'examples/wasm/mandelbrot/mandelbrot.v'
// TODO: always build the wasm_builder in the future, not just when it was build manually before:
wasm_builder_executable := $if !windows {
'cmd/tools/builders/wasm_builder'
} $else {
'cmd/tools/builders/wasm_builder.exe'
}
if !os.exists(wasm_builder_executable) {
skip_files << os.join_path('cmd/tools/builders/wasm_builder.v')
}
}
vargs := _vargs.replace('-progress', '')
vexe := pref.vexe_path()
vroot := os.dir(vexe)
hash := '${sync.thread_id().hex()}_${time.sys_mono_now()}'
new_vtmp_dir := setup_new_vtmp_folder(hash)
if term.can_show_color_on_stderr() {
os.setenv('VCOLORS', 'always', true)
}
mut ts := TestSession{
vexe: vexe
vroot: vroot
skip_files: skip_files
fail_fast: testing.fail_fast
show_stats: '-stats' in vargs.split(' ')
vargs: vargs
vtmp_dir: new_vtmp_dir
hash: hash
silent_mode: _vargs.contains('-silent')
progress_mode: _vargs.contains('-progress')
}
ts.handle_test_runner_option()
return ts
}
fn (mut ts TestSession) handle_test_runner_option() {
test_runner := cmdline.option(os.args, '-test-runner', 'normal')
if test_runner !in pref.supported_test_runners {
eprintln('v test: `-test-runner ${test_runner}` is not using one of the supported test runners: ${pref.supported_test_runners_list()}')
}
test_runner_implementation_file := os.join_path(ts.vroot, 'cmd/tools/modules/testing/output_${test_runner}.v')
if !os.exists(test_runner_implementation_file) {
eprintln('v test: using `-test-runner ${test_runner}` needs ${test_runner_implementation_file} to exist, and contain a valid testing.Reporter implementation for that runner. See `cmd/tools/modules/testing/output_dump.v` for an example.')
exit(1)
}
match test_runner {
'normal' {
// default, nothing to do
}
'dump' {
ts.reporter = DumpReporter{}
}
'teamcity' {
ts.reporter = TeamcityReporter{}
}
else {
dump('just set ts.reporter to an instance of your own struct here')
}
}
}
pub fn (mut ts TestSession) init() {
ts.files.sort()
ts.benchmark = benchmark.new_benchmark_no_cstep()
}
pub fn (mut ts TestSession) add(file string) {
ts.files << file
}
pub fn (mut ts TestSession) test() {
// Ensure that .tmp.c files generated from compiling _test.v files,
// are easy to delete at the end, *without* affecting the existing ones.
current_wd := os.getwd()
if current_wd == os.wd_at_startup && current_wd == ts.vroot {
ts.root_relative = true
}
//
ts.init()
mut remaining_files := []string{}
for dot_relative_file in ts.files {
file := os.real_path(dot_relative_file)
$if windows {
if file.contains('sqlite') || file.contains('httpbin') {
continue
}
}
$if !macos {
if file.contains('customer') {
continue
}
}
$if msvc {
if file.contains('asm') {
continue
}
}
remaining_files << dot_relative_file
}
remaining_files = vtest.filter_vtest_only(remaining_files, fix_slashes: false)
ts.files = remaining_files
ts.benchmark.set_total_expected_steps(remaining_files.len)
mut njobs := runtime.nr_jobs()
if remaining_files.len < njobs {
njobs = remaining_files.len
}
ts.benchmark.njobs = njobs
mut pool_of_test_runners := pool.new_pool_processor(callback: worker_trunner)
// ensure that the nmessages queue/channel, has enough capacity for handling many messages across threads, without blocking
ts.nmessages = chan LogMessage{cap: 10000}
ts.nmessage_idx = 0
printing_thread := spawn ts.print_messages()
pool_of_test_runners.set_shared_context(ts)
ts.reporter.worker_threads_start(remaining_files, mut ts)
// all the testing happens here:
pool_of_test_runners.work_on_pointers(unsafe { remaining_files.pointers() })
//
ts.benchmark.stop()
ts.append_message(.sentinel, '', MessageThreadContext{ flow_id: '-1' }) // send the sentinel
printing_thread.wait()
ts.reporter.worker_threads_finish(mut ts)
ts.reporter.divider()
ts.show_list_of_failed_tests()
// cleanup generated .tmp.c files after successful tests:
if ts.benchmark.nfail == 0 {
if ts.rm_binaries {
os.rmdir_all(ts.vtmp_dir) or {}
}
}
// remove empty session folders:
if os.ls(ts.vtmp_dir) or { [] }.len == 0 {
os.rmdir_all(ts.vtmp_dir) or {}
}
}
fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr {
mut ts := unsafe { &TestSession(p.get_shared_context()) }
if ts.fail_fast {
if ts.failed_cmds.len > 0 {
return pool.no_result
}
}
// tls_bench is used to format the step messages/timings
mut tls_bench := unsafe { &benchmark.Benchmark(p.get_thread_context(idx)) }
if isnil(tls_bench) {
tls_bench = benchmark.new_benchmark_pointer()
tls_bench.set_total_expected_steps(ts.benchmark.nexpected_steps)
p.set_thread_context(idx, tls_bench)
}
tls_bench.no_cstep = true
tls_bench.njobs = ts.benchmark.njobs
mut relative_file := os.real_path(p.get_item[string](idx))
mut cmd_options := [ts.vargs]
mut run_js := false
is_fmt := ts.vargs.contains('fmt')
is_vet := ts.vargs.contains('vet')
produces_file_output := !(is_fmt || is_vet)
if relative_file.ends_with('js.v') {
if produces_file_output {
cmd_options << ' -b js'
run_js = true
}
}
if relative_file.contains('global') && !is_fmt {
cmd_options << ' -enable-globals'
}
if ts.root_relative {
relative_file = relative_file.replace(ts.vroot + os.path_separator, '')
}
file := os.real_path(relative_file)
mtc := MessageThreadContext{
file: file
flow_id: thread_id.str()
}
normalised_relative_file := relative_file.replace('\\', '/')
// Ensure that the generated binaries will be stored in the temporary folder.
// Remove them after a test passes/fails.
fname := os.file_name(file)
generated_binary_fname := if os.user_os() == 'windows' && !run_js {
'${fname.all_before_last('.v')}_${ts.hash}.exe'
} else {
'${fname.all_before_last('.v')}_${ts.hash}'
}
generated_binary_fpath := os.join_path_single(ts.vtmp_dir, generated_binary_fname)
if produces_file_output {
if ts.rm_binaries {
os.rm(generated_binary_fpath) or {}
}
cmd_options << ' -o ${os.quoted_path(generated_binary_fpath)}'
}
cmd := '${os.quoted_path(ts.vexe)} ${cmd_options.join(' ')} ${os.quoted_path(file)}'
ts.benchmark.step()
tls_bench.step()
if relative_file.replace('\\', '/') in ts.skip_files {
ts.benchmark.skip()
tls_bench.skip()
if !testing.hide_skips {
ts.append_message(.skip, tls_bench.step_message_skip(normalised_relative_file),
mtc)
}
return pool.no_result
}
if ts.show_stats {
ts.reporter.divider()
ts.append_message(.cmd_begin, cmd, mtc)
d_cmd := time.new_stopwatch()
mut res := os.execute(cmd)
if res.exit_code != 0 {
eprintln(res.output)
} else {
println(res.output)
}
mut status := res.exit_code
mut cmd_duration := d_cmd.elapsed()
ts.append_message_with_duration(.cmd_end, '', cmd_duration, mtc)
if status != 0 {
details := get_test_details(file)
os.setenv('VTEST_RETRY_MAX', '${details.retry}', true)
for retry := 1; retry <= details.retry; retry++ {
ts.append_message(.info, ' [stats] retrying ${retry}/${details.retry} of ${relative_file} ; known flaky: ${details.flaky} ...',
mtc)
os.setenv('VTEST_RETRY', '${retry}', true)
ts.append_message(.cmd_begin, cmd, mtc)
d_cmd_2 := time.new_stopwatch()
status = os.system(cmd)
cmd_duration = d_cmd_2.elapsed()
ts.append_message_with_duration(.cmd_end, '', cmd_duration, mtc)
if status == 0 {
unsafe {
goto test_passed_system
}
}
time.sleep(testing.fail_retry_delay_ms)
}
if details.flaky && !testing.fail_flaky {
ts.append_message(.info, ' *FAILURE* of the known flaky test file ${relative_file} is ignored, since VTEST_FAIL_FLAKY is 0 . Retry count: ${details.retry} .',
mtc)
unsafe {
goto test_passed_system
}
}
// most probably compiler error
if res.output.contains(': error: ') {
ts.append_message(.cannot_compile, 'Cannot compile file ${file}', mtc)
}
ts.benchmark.fail()
tls_bench.fail()
ts.add_failed_cmd(cmd)
return pool.no_result
} else {
test_passed_system:
ts.benchmark.ok()
tls_bench.ok()
}
} else {
if testing.show_start {
ts.append_message(.info, ' starting ${relative_file} ...',
mtc)
}
ts.append_message(.cmd_begin, cmd, mtc)
d_cmd := time.new_stopwatch()
mut r := os.execute(cmd)
mut cmd_duration := d_cmd.elapsed()
ts.append_message_with_duration(.cmd_end, r.output, cmd_duration, mtc)
if r.exit_code < 0 {
ts.benchmark.fail()
tls_bench.fail()
ts.append_message_with_duration(.fail, tls_bench.step_message_fail(normalised_relative_file),
cmd_duration, mtc)
ts.add_failed_cmd(cmd)
return pool.no_result
}
if r.exit_code != 0 {
details := get_test_details(file)
os.setenv('VTEST_RETRY_MAX', '${details.retry}', true)
for retry := 1; retry <= details.retry; retry++ {
ts.append_message(.info, ' retrying ${retry}/${details.retry} of ${relative_file} ; known flaky: ${details.flaky} ...',
mtc)
os.setenv('VTEST_RETRY', '${retry}', true)
ts.append_message(.cmd_begin, cmd, mtc)
d_cmd_2 := time.new_stopwatch()
r = os.execute(cmd)
cmd_duration = d_cmd_2.elapsed()
ts.append_message_with_duration(.cmd_end, r.output, cmd_duration, mtc)
if r.exit_code == 0 {
unsafe {
goto test_passed_execute
}
}
time.sleep(testing.fail_retry_delay_ms)
}
if details.flaky && !testing.fail_flaky {
ts.append_message(.info, ' *FAILURE* of the known flaky test file ${relative_file} is ignored, since VTEST_FAIL_FLAKY is 0 . Retry count: ${details.retry} .',
mtc)
unsafe {
goto test_passed_execute
}
}
ts.benchmark.fail()
tls_bench.fail()
ending_newline := if r.output.ends_with('\n') { '\n' } else { '' }
ts.append_message_with_duration(.fail, tls_bench.step_message_fail('${normalised_relative_file}\n${r.output.trim_space()}${ending_newline}'),
cmd_duration, mtc)
ts.add_failed_cmd(cmd)
} else {
test_passed_execute:
ts.benchmark.ok()
tls_bench.ok()
if !testing.hide_oks {
ts.append_message_with_duration(.ok, tls_bench.step_message_ok(normalised_relative_file),
cmd_duration, mtc)
}
}
}
if produces_file_output && ts.rm_binaries {
os.rm(generated_binary_fpath) or {}
}
return pool.no_result
}
pub fn vlib_should_be_present(parent_dir string) {
vlib_dir := os.join_path_single(parent_dir, 'vlib')
if !os.is_dir(vlib_dir) {
eprintln('${vlib_dir} is missing, it must be next to the V executable')
exit(1)
}
}
pub fn prepare_test_session(zargs string, folder string, oskipped []string, main_label string) TestSession {
vexe := pref.vexe_path()
parent_dir := os.dir(vexe)
vlib_should_be_present(parent_dir)
vargs := zargs.replace(vexe, '')
eheader(main_label)
if vargs.len > 0 {
eprintln('v compiler args: "${vargs}"')
}
mut session := new_test_session(vargs, true)
files := os.walk_ext(os.join_path_single(parent_dir, folder), '.v')
mut mains := []string{}
mut skipped := oskipped.clone()
next_file: for f in files {
fnormalised := f.replace('\\', '/')
// Note: a `testdata` folder, is the preferred name of a folder, containing V code,
// that you *do not want* the test framework to find incidentally for various reasons,
// for example module import tests, or subtests, that are compiled/run by other parent tests
// in specific configurations, etc.
if fnormalised.contains('testdata/') || fnormalised.contains('modules/')
|| fnormalised.contains('preludes/') {
continue
}
$if windows {
// skip process/command examples on windows. TODO: remove the need for this, fix os.Command
if fnormalised.ends_with('examples/process/command.v') {
continue
}
}
c := os.read_file(f) or { panic(err) }
start := c#[0..testing.header_bytes_to_search_for_module_main]
if start.contains('module ') && !start.contains('module main') {
skipped_f := f.replace(os.join_path_single(parent_dir, ''), '')
skipped << skipped_f
}
for skip_prefix in oskipped {
skip_folder := skip_prefix + '/'
if fnormalised.starts_with(skip_folder) {
continue next_file
}
}
mains << f
}
session.files << mains
session.skip_files << skipped
return session
}
pub type FnTestSetupCb = fn (mut session TestSession)
pub fn v_build_failing_skipped(zargs string, folder string, oskipped []string, cb FnTestSetupCb) bool {
main_label := 'Building ${folder} ...'
finish_label := 'building ${folder}'
mut session := prepare_test_session(zargs, folder, oskipped, main_label)
cb(mut session)
session.test()
eprintln(session.benchmark.total_message(finish_label))
return session.failed_cmds.len > 0
}
pub fn build_v_cmd_failed(cmd string) bool {
res := os.execute(cmd)
if res.exit_code < 0 {
return true
}
if res.exit_code != 0 {
eprintln('')
eprintln(res.output)
return true
}
return false
}
pub fn building_any_v_binaries_failed() bool {
eheader('Building V binaries...')
eprintln('VFLAGS is: "' + os.getenv('VFLAGS') + '"')
vexe := pref.vexe_path()
parent_dir := os.dir(vexe)
vlib_should_be_present(parent_dir)
os.chdir(parent_dir) or { panic(err) }
mut failed := false
v_build_commands := ['${vexe} -o v_g -g cmd/v',
'${vexe} -o v_prod_g -prod -g cmd/v', '${vexe} -o v_cg -cg cmd/v',
'${vexe} -o v_prod_cg -prod -cg cmd/v', '${vexe} -o v_prod -prod cmd/v']
mut bmark := benchmark.new_benchmark()
for cmd in v_build_commands {
bmark.step()
if build_v_cmd_failed(cmd) {
bmark.fail()
failed = true
eprintln(bmark.step_message_fail('command: ${cmd} . See details above ^^^^^^^'))
eprintln('')
continue
}
bmark.ok()
if !testing.hide_oks {
eprintln(bmark.step_message_ok('command: ${cmd}'))
}
}
bmark.stop()
eprintln(term.h_divider('-'))
eprintln(bmark.total_message('building v binaries'))
return failed
}
// setup_new_vtmp_folder creates a new nested folder inside VTMP, then resets VTMP to it,
// so that V programs/tests will write their temporary files to new location.
// The new nested folder, and its contents, will get removed after all tests/programs succeed.
pub fn setup_new_vtmp_folder(hash string) string {
new_vtmp_dir := os.join_path(os.vtmp_dir(), 'tsession_${hash}')
os.mkdir_all(new_vtmp_dir) or { panic(err) }
os.setenv('VTMP', new_vtmp_dir, true)
return new_vtmp_dir
}
pub struct TestDetails {
pub mut:
retry int
flaky bool // when flaky tests fail, the whole run is still considered successful, unless VTEST_FAIL_FLAKY is 1
}
pub fn get_test_details(file string) TestDetails {
mut res := TestDetails{}
lines := os.read_lines(file) or { [] }
for line in lines {
if line.starts_with('// vtest retry:') {
res.retry = line.all_after(':').trim_space().int()
}
if line.starts_with('// vtest flaky:') {
res.flaky = line.all_after(':').trim_space().bool()
}
}
return res
}
pub fn find_started_process(pname string) !string {
for line in testing.all_processes {
if line.contains(pname) {
return line
}
}
return error('could not find process matching ${pname}')
}
pub fn eheader(msg string) {
eprintln(term.header_left(msg, '-'))
}
pub fn header(msg string) {
println(term.header_left(msg, '-'))
flush_stdout()
}