|
|
@@ -18,10 +18,6 @@ source test/common.sh |
|
|
|
|
|
export PYTHONPATH=. # current dir
|
|
|
|
|
|
one() {
|
|
|
"$@"
|
|
|
}
|
|
|
|
|
|
# For auto-complete
|
|
|
unit() {
|
|
|
#$py "$@"
|
|
|
@@ -46,97 +42,131 @@ banner() { |
|
|
echo ----
|
|
|
}
|
|
|
|
|
|
# geez wtf!
|
|
|
repro() {
|
|
|
rm -v *.pytrace || true
|
|
|
delete-pyc
|
|
|
tests-to-run() {
|
|
|
# TODO: Add opy.
|
|
|
for t in {build,test,native,asdl,core,osh,tools}/*_test.py; do
|
|
|
# NOTE: This test hasn't passed in awhile. It uses strings as output.
|
|
|
if [[ $t == *arith_parse_test.py ]]; then
|
|
|
continue
|
|
|
fi
|
|
|
echo $t
|
|
|
done
|
|
|
}
|
|
|
|
|
|
#local t=osh/cmd_parse_test.py
|
|
|
#local t='native/fastlex_test.py LexTest.testBug'
|
|
|
local t='core/id_kind_test.py TokensTest.testEquality'
|
|
|
# Exits 255 if a test fails.
|
|
|
run-test-and-maybe-abort() {
|
|
|
local t=$1
|
|
|
echo
|
|
|
echo "[$t]"
|
|
|
if ! $t >/dev/null; then
|
|
|
echo
|
|
|
echo "*** $t FAILED ***"
|
|
|
echo
|
|
|
return 255 # xargs aborts
|
|
|
fi
|
|
|
#echo "OK $t"
|
|
|
}
|
|
|
|
|
|
# with shebang
|
|
|
#py=''
|
|
|
local py='_devbuild/cpython-instrumented/python'
|
|
|
all() {
|
|
|
time tests-to-run | xargs -n 1 -- $0 run-test-and-maybe-abort
|
|
|
echo
|
|
|
echo "All unit tests passed."
|
|
|
}
|
|
|
|
|
|
#local prefix='uftrace record -d one.uftrace'
|
|
|
local prefix=''
|
|
|
#local prefix='gdb --args'
|
|
|
# TODO: Use benchmarks/time.py to make a table.
|
|
|
# all should just enumerate tasks
|
|
|
#
|
|
|
# tests-to-run | xargs -n 1 $0 check-for-success
|
|
|
|
|
|
set +o errexit
|
|
|
run-test-and-log() {
|
|
|
local tasks_csv=$1
|
|
|
local t=$2
|
|
|
|
|
|
banner 'FIRST'
|
|
|
# NOTE: $t is assumed to be a relative path here!
|
|
|
local log=_tmp/unit/$t.txt
|
|
|
mkdir -p $(dirname $log)
|
|
|
|
|
|
$prefix $py $t
|
|
|
local first=$?
|
|
|
benchmarks/time.py --out $tasks_csv \
|
|
|
--field $t --field "$t.txt" -- \
|
|
|
$t > $log
|
|
|
}
|
|
|
|
|
|
banner 'SECOND'
|
|
|
run-for-release() {
|
|
|
local out_dir=_tmp/unit
|
|
|
mkdir -p $out_dir
|
|
|
rm -r -f $out_dir/*
|
|
|
|
|
|
# Fails the second time
|
|
|
$prefix $py $t
|
|
|
local second=$?
|
|
|
local tasks_csv=$out_dir/TASKS.csv
|
|
|
|
|
|
echo "first $first second $second"
|
|
|
#$PY_273 -V
|
|
|
}
|
|
|
local status=0
|
|
|
|
|
|
_log-one() {
|
|
|
local name=$1
|
|
|
$name > _tmp/unit/${name}.log.txt 2>&1
|
|
|
}
|
|
|
# TODO: I need to write a schema too? Or change csv2html.py to support HREF
|
|
|
# in NullSchema.
|
|
|
|
|
|
_all() {
|
|
|
mkdir -p _tmp/unit
|
|
|
echo 'status,elapsed_secs,test,test_HREF' > $tasks_csv
|
|
|
time tests-to-run | xargs -n 1 -- $0 run-test-and-log $tasks_csv || status=1
|
|
|
|
|
|
# NOTE: build and test have small unit tests
|
|
|
# TODO: Add opy.
|
|
|
if test $status -ne 0; then
|
|
|
cat $tasks_csv
|
|
|
echo
|
|
|
echo "*** Some tests failed. See $tasks_csv ***"
|
|
|
echo
|
|
|
|
|
|
for t in {build,test,native,asdl,core,osh,tools}/*_test.py; do
|
|
|
# NOTE: This test hasn't passed in awhile. It uses strings as output.
|
|
|
if [[ $t == *arith_parse_test.py ]]; then
|
|
|
continue
|
|
|
fi
|
|
|
echo $t
|
|
|
return $status
|
|
|
fi
|
|
|
|
|
|
mkdir -p _tmp/unit/$(dirname $t)
|
|
|
run-task-with-status _tmp/unit/${t}.task.txt $0 _log-one $t
|
|
|
done
|
|
|
#tree _tmp/unit
|
|
|
echo
|
|
|
echo "All unit tests passed."
|
|
|
}
|
|
|
|
|
|
# spec-runner looks at .task.txt and .stats.txt. We don't need that. We just
|
|
|
# time, status, and a link to the .txt file.
|
|
|
_html-summary() {
|
|
|
find _tmp/unit -name '*.task.txt' | awk '
|
|
|
{ path = $0
|
|
|
getline < path
|
|
|
status = $1
|
|
|
wall_secs = $2
|
|
|
|
|
|
if (status == 0) {
|
|
|
num_passed += 1
|
|
|
} else {
|
|
|
num_failed = 1
|
|
|
print path " failed"
|
|
|
}
|
|
|
}
|
|
|
END {
|
|
|
if (num_failed == 0) {
|
|
|
print ""
|
|
|
print "ALL " num_passed " TESTS PASSED"
|
|
|
} else {
|
|
|
exit(1) # Fail
|
|
|
}
|
|
|
}
|
|
|
'
|
|
|
source benchmarks/common.sh
|
|
|
|
|
|
# TODO: It would be nice to have timestamps of the underlying CSV files and
|
|
|
# timestamp of running the report. This is useful for benchmarks too.
|
|
|
|
|
|
print-report() {
|
|
|
local in_dir=${1:-_tmp/unit}
|
|
|
local base_url='../../web'
|
|
|
|
|
|
# NOTE: Using benchmarks for now.
|
|
|
cat <<EOF
|
|
|
<!DOCTYPE html>
|
|
|
<html>
|
|
|
<head>
|
|
|
<title>Unit Test Results</title>
|
|
|
<script type="text/javascript" src="$base_url/table/table-sort.js"></script>
|
|
|
<link rel="stylesheet" type="text/css" href="$base_url/table/table-sort.css" />
|
|
|
<link rel="stylesheet" type="text/css" href="$base_url/benchmarks.css" />
|
|
|
|
|
|
</head>
|
|
|
<body>
|
|
|
<p id="home-link">
|
|
|
<a href="/">oilshell.org</a>
|
|
|
</p>
|
|
|
<h2>Unit Test Results</h2>
|
|
|
|
|
|
EOF
|
|
|
csv2html $in_dir/TASKS.csv
|
|
|
|
|
|
cat <<EOF
|
|
|
</body>
|
|
|
</html>
|
|
|
EOF
|
|
|
}
|
|
|
|
|
|
html-summary() {
|
|
|
_html-summary
|
|
|
}
|
|
|
# Presentation changes:
|
|
|
#
|
|
|
# - elapsed seconds -> milliseconds
|
|
|
# - Need a link to the log for the test name (done, but no schema)
|
|
|
# - schema for right-justifying numbers
|
|
|
|
|
|
all() {
|
|
|
time $0 _all
|
|
|
html-summary
|
|
|
# TODO: Also get rid of osh highlighting!
|
|
|
|
|
|
write-report() {
|
|
|
local out=_tmp/unit/index.html
|
|
|
print-report > $out
|
|
|
echo "Wrote $out"
|
|
|
}
|
|
|
|
|
|
"$@"
|