Skip to content

Commit

Permalink
Logfile output from tests; summarise in make check
Browse files Browse the repository at this point in the history
Add an optional --logfile argument to std::test::test_main and to
compiletest.

Use this features and the new 'check-summary.py' script to
summarise all the tests performed by the 'check' target. This is
a short term fix for #2075.
  • Loading branch information
Grahame Bowland authored and brson committed Apr 4, 2012
1 parent 3aed498 commit 5cc050b
Show file tree
Hide file tree
Showing 6 changed files with 122 additions and 26 deletions.
1 change: 1 addition & 0 deletions mk/clean.mk
Expand Up @@ -31,6 +31,7 @@ clean-misc:
$(Q)rm -f $(CRATE_DEPFILES:%.d=%.d.tmp)
$(Q)rm -Rf $(DOCS)
$(Q)rm -Rf $(GENERATED)
$(Q)rm -f tmp/*.log
$(Q)rm -f rustllvm/$(CFG_RUSTLLVM) rustllvm/rustllvmbits.a
$(Q)rm -f rt/$(CFG_RUNTIME)
$(Q)find rustllvm rt -name '*.[odasS]' -delete
Expand Down
55 changes: 36 additions & 19 deletions mk/tests.mk
Expand Up @@ -66,9 +66,11 @@ endif
# Main test targets
######################################################################

check: tidy all check-stage2 \
check: tidy all check-stage2
$(S)src/etc/check-summary.py tmp/*.log

check-full: tidy all check-stage1 check-stage2 check-stage3 \
check-full: tidy all check-stage1 check-stage2 check-stage3
$(S)src/etc/check-summary.py tmp/*.log

# Run the tidy script in multiple parts to avoid huge 'echo' commands
ifdef CFG_NOTIDY
Expand Down Expand Up @@ -120,7 +122,6 @@ tidy:
| xargs -n 10 python $(S)src/etc/tidy.py
endif


######################################################################
# Extracting tests for docs
######################################################################
Expand Down Expand Up @@ -232,7 +233,8 @@ $(3)/test/coretest.stage$(1)-$(2)$$(X): \
check-stage$(1)-T-$(2)-H-$(3)-core-dummy: \
$(3)/test/coretest.stage$(1)-$(2)$$(X)
@$$(call E, run: $$<)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-core.log

# Rules for the standard library test runner

Expand All @@ -245,7 +247,8 @@ $(3)/test/stdtest.stage$(1)-$(2)$$(X): \
check-stage$(1)-T-$(2)-H-$(3)-std-dummy: \
$(3)/test/stdtest.stage$(1)-$(2)$$(X)
@$$(call E, run: $$<)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-std.log

# Rules for the rustc test runner

Expand All @@ -260,7 +263,8 @@ $(3)/test/rustctest.stage$(1)-$(2)$$(X): \
check-stage$(1)-T-$(2)-H-$(3)-rustc-dummy: \
$(3)/test/rustctest.stage$(1)-$(2)$$(X)
@$$(call E, run: $$<)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-rustc.log

# Rules for the rustdoc test runner

Expand All @@ -276,7 +280,8 @@ $(3)/test/rustdoctest.stage$(1)-$(2)$$(X): \
check-stage$(1)-T-$(2)-H-$(3)-rustdoc-dummy: \
$(3)/test/rustdoctest.stage$(1)-$(2)$$(X)
@$$(call E, run: $$<)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS)
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) $$(TESTARGS) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-rustdoc.log

# Rules for the cfail/rfail/rpass/bench/perf test runner

Expand Down Expand Up @@ -365,87 +370,98 @@ check-stage$(1)-T-$(2)-H-$(3)-cfail-dummy: \
$$(CFAIL_TESTS)
@$$(call E, run cfail: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(CFAIL_ARGS$(1)-T-$(2)-H-$(3))
$$(CFAIL_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-cfail.log

check-stage$(1)-T-$(2)-H-$(3)-rfail-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
$$(SREQ$(1)_T_$(2)_H_$(3)) \
$$(RFAIL_TESTS)
@$$(call E, run rfail: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(RFAIL_ARGS$(1)-T-$(2)-H-$(3))
$$(RFAIL_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-rfail.log

check-stage$(1)-T-$(2)-H-$(3)-rpass-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
$$(SREQ$(1)_T_$(2)_H_$(3)) \
$$(RPASS_TESTS)
@$$(call E, run rpass: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(RPASS_ARGS$(1)-T-$(2)-H-$(3))
$$(RPASS_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-rpass.log

check-stage$(1)-T-$(2)-H-$(3)-bench-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
$$(SREQ$(1)_T_$(2)_H_$(3)) \
$$(BENCH_TESTS)
@$$(call E, run bench: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(BENCH_ARGS$(1)-T-$(2)-H-$(3))
$$(BENCH_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-bench.log

check-stage$(1)-T-$(2)-H-$(3)-perf-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
$$(SREQ$(1)_T_$(2)_H_$(3)) \
$$(BENCH_TESTS)
@$$(call E, perf: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(PERF_ARGS$(1)-T-$(2)-H-$(3))
$$(PERF_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-perf.log

check-stage$(1)-T-$(2)-H-$(3)-pretty-rpass-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
$$(SREQ$(1)_T_$(2)_H_$(3)) \
$$(RPASS_TESTS)
@$$(call E, run pretty-rpass: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(PRETTY_RPASS_ARGS$(1)-T-$(2)-H-$(3))
$$(PRETTY_RPASS_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-pretty-rpass.log

check-stage$(1)-T-$(2)-H-$(3)-pretty-rfail-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
$$(SREQ$(1)_T_$(2)_H_$(3)) \
$$(RFAIL_TESTS)
@$$(call E, run pretty-rfail: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(PRETTY_RFAIL_ARGS$(1)-T-$(2)-H-$(3))
$$(PRETTY_RFAIL_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-pretty-rfail.log

check-stage$(1)-T-$(2)-H-$(3)-pretty-bench-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
$$(SREQ$(1)_T_$(2)_H_$(3)) \
$$(BENCH_TESTS)
@$$(call E, run pretty-bench: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(PRETTY_BENCH_ARGS$(1)-T-$(2)-H-$(3))
$$(PRETTY_BENCH_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-pretty-bench.log

check-stage$(1)-T-$(2)-H-$(3)-pretty-pretty-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
$$(SREQ$(1)_T_$(2)_H_$(3)) \
$$(PRETTY_TESTS)
@$$(call E, run pretty-pretty: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(PRETTY_PRETTY_ARGS$(1)-T-$(2)-H-$(3))
$$(PRETTY_PRETTY_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-pretty-pretty.log

check-stage$(1)-T-$(2)-H-$(3)-doc-tutorial-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
$$(SREQ$(1)_T_$(2)_H_$(3)) \
doc-tutorial-extract$(3)
@$$(call E, run doc-tutorial: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(DOC_TUTORIAL_ARGS$(1)-T-$(2)-H-$(3))
$$(DOC_TUTORIAL_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-doc-tutorial.log

check-stage$(1)-T-$(2)-H-$(3)-doc-ref-dummy: \
$$(HBIN$(1)_H_$(3))/compiletest$$(X) \
$$(SREQ$(1)_T_$(2)_H_$(3)) \
doc-ref-extract$(3)
@$$(call E, run doc-ref: $$<)
$$(Q)$$(call CFG_RUN_CTEST,$(1),$$<,$(3)) \
$$(DOC_REF_ARGS$(1)-T-$(2)-H-$(3))
$$(DOC_REF_ARGS$(1)-T-$(2)-H-$(3)) \
--logfile tmp/check-stage$(1)-T-$(2)-H-$(3)-doc-ref.log

endef

Expand Down Expand Up @@ -489,7 +505,8 @@ $(3)/test/$$(FT_DRIVER)-$(2)$$(X): \
$(3)/test/$$(FT_DRIVER)-$(2).out: \
$(3)/test/$$(FT_DRIVER)-$(2)$$(X) \
$$(SREQ2_T_$(2)_H_$(3))
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3))
$$(Q)$$(call CFG_RUN_TEST,$$<,$(2),$(3)) \
--logfile tmp/$$(FT_DRIVER)-$(2).log

check-fast-T-$(2)-H-$(3): tidy \
check-stage2-T-$(2)-H-$(3)-rustc \
Expand Down
3 changes: 3 additions & 0 deletions src/compiletest/common.rs
Expand Up @@ -33,6 +33,9 @@ type config = {
// Only run tests that match this filter
filter: option<str>,

// Write out a parseable log of tests that were run
logfile: option<str>,

// A command line to prefix program execution with,
// for running under valgrind
runtool: option<str>,
Expand Down
12 changes: 10 additions & 2 deletions src/compiletest/compiletest.rs
Expand Up @@ -35,7 +35,8 @@ fn parse_config(args: [str]) -> config {
getopts::reqopt("stage-id"),
getopts::reqopt("mode"), getopts::optflag("ignored"),
getopts::optopt("runtool"), getopts::optopt("rustcflags"),
getopts::optflag("verbose")];
getopts::optflag("verbose"),
getopts::optopt("logfile")];

check (vec::is_not_empty(args));
let args_ = vec::tail(args);
Expand All @@ -58,6 +59,7 @@ fn parse_config(args: [str]) -> config {
if vec::len(match.free) > 0u {
option::some(match.free[0])
} else { option::none },
logfile: getopts::opt_maybe_str(match, "logfile"),
runtool: getopts::opt_maybe_str(match, "runtool"),
rustcflags: getopts::opt_maybe_str(match, "rustcflags"),
verbose: getopts::opt_present(match, "verbose")};
Expand Down Expand Up @@ -121,7 +123,13 @@ fn test_opts(config: config) -> test::test_opts {
option::some(s) { option::some(s) }
option::none { option::none }
},
run_ignored: config.run_ignored}
run_ignored: config.run_ignored,
logfile:
alt config.logfile {
option::some(s) { option::some(s) }
option::none { option::none }
}
}
}

fn make_tests(config: config) -> [test::test_desc] {
Expand Down
32 changes: 32 additions & 0 deletions src/etc/check-summary.py
@@ -0,0 +1,32 @@
#!/usr/bin/env python

import sys

if __name__ == '__main__':
summaries = []
def summarise(fname):
summary = {}
fd = open(fname)
for line in fd:
status, test = line.strip().split(' ', 1)
if not summary.has_key(status):
summary[status] = []
summary[status].append(test)
summaries.append((fname, summary))
def count(t):
return sum(map(lambda (f, s): len(s.get(t, [])), summaries))
logfiles = sys.argv[1:]
map(summarise, logfiles)
ok = count('ok')
failed = count('failed')
ignored = count('ignored')
print "summary of %d test logs: %d passed; %d failed; %d ignored" % \
(len(logfiles), ok, failed, ignored)
if failed > 0:
print "failed tests:"
for f, s in summaries:
failures = s.get('failed', [])
if len(failures) > 0:
print " %s:" % (f)
for test in failures:
print " %s" % (test)
45 changes: 40 additions & 5 deletions src/libstd/test.rs
Expand Up @@ -57,14 +57,15 @@ fn test_main(args: [str], tests: [test_desc]) {
if !run_tests_console(opts, tests) { fail "Some tests failed"; }
}

type test_opts = {filter: option<str>, run_ignored: bool};
type test_opts = {filter: option<str>, run_ignored: bool,
logfile: option<str>};

type opt_res = either<test_opts, str>;

// Parses command line arguments into test options
fn parse_opts(args: [str]) -> opt_res {
let args_ = vec::tail(args);
let opts = [getopts::optflag("ignored")];
let opts = [getopts::optflag("ignored"), getopts::optopt("logfile")];
let match =
alt getopts::getopts(args_, opts) {
ok(m) { m }
Expand All @@ -77,8 +78,10 @@ fn parse_opts(args: [str]) -> opt_res {
} else { option::none };

let run_ignored = getopts::opt_present(match, "ignored");
let logfile = getopts::opt_maybe_str(match, "logfile");

let test_opts = {filter: filter, run_ignored: run_ignored};
let test_opts = {filter: filter, run_ignored: run_ignored,
logfile: logfile};

ret either::left(test_opts);
}
Expand All @@ -87,6 +90,7 @@ enum test_result { tr_ok, tr_failed, tr_ignored, }

type console_test_state =
@{out: io::writer,
log_out: option<io::writer>,
use_color: bool,
mut total: uint,
mut passed: uint,
Expand All @@ -106,6 +110,12 @@ fn run_tests_console(opts: test_opts,
}
te_wait(test) { st.out.write_str(#fmt["test %s ... ", test.name]); }
te_result(test, result) {
alt st.log_out {
some(f) {
write_log(f, result, test);
}
none {}
}
alt result {
tr_ok {
st.passed += 1u;
Expand All @@ -128,8 +138,21 @@ fn run_tests_console(opts: test_opts,
}
}

let log_out = alt opts.logfile {
some(path) {
alt io::file_writer(path, [io::create, io::truncate]) {
result::ok(w) { some(w) }
result::err(s) {
fail(#fmt("can't open output file: %s", s))
}
}
}
none { none }
};

let st =
@{out: io::stdout(),
log_out: log_out,
use_color: use_color(),
mut total: 0u,
mut passed: 0u,
Expand All @@ -156,6 +179,15 @@ fn run_tests_console(opts: test_opts,

ret success;

fn write_log(out: io::writer, result: test_result, test: test_desc) {
out.write_line(#fmt("%s %s",
alt result {
tr_ok { "ok" }
tr_failed { "failed" }
tr_ignored { "ignored" }
}, test.name));
}

fn write_ok(out: io::writer, use_color: bool) {
write_pretty(out, "ok", term::color_green, use_color);
}
Expand Down Expand Up @@ -209,6 +241,7 @@ fn should_sort_failures_before_printing_them() {
let st =
@{out: writer,
log_out: option::none,
use_color: false,
mut total: 0u,
mut passed: 0u,
Expand Down Expand Up @@ -466,7 +499,8 @@ mod tests {
// When we run ignored tests the test filter should filter out all the
// unignored tests and flip the ignore flag on the rest to false

let opts = {filter: option::none, run_ignored: true};
let opts = {filter: option::none, run_ignored: true,
logfile: option::none};
let tests =
[{name: "1", fn: fn~() { }, ignore: true, should_fail: false},
{name: "2", fn: fn~() { }, ignore: false, should_fail: false}];
Expand All @@ -479,7 +513,8 @@ mod tests {

#[test]
fn sort_tests() {
let opts = {filter: option::none, run_ignored: false};
let opts = {filter: option::none, run_ignored: false,
logfile: option::none};

let names =
["sha1::test", "int::test_to_str", "int::test_pow",
Expand Down

0 comments on commit 5cc050b

Please sign in to comment.