Permalink
Browse files

Use benchmarks/testdata/configure for profiling.

This is the largest file, and it shows the bottlenecks more clearly now
that I've optimized the lexer and ASDL type construction.
  • Loading branch information...
Andy Chu
Andy Chu committed Nov 27, 2017
1 parent 37b0fc5 commit 268155d90296d1d3ea10a69d75ebf85e370d7c75
Showing with 19 additions and 14 deletions.
  1. +19 −14 benchmarks/pytrace.sh
View
@@ -13,38 +13,43 @@ set -o errexit
readonly ABUILD=~/git/alpine/abuild/abuild
readonly -a RUN_ABUILD=(bin/oil.py osh $ABUILD -h)
readonly -a PARSE_ABUILD=(bin/oil.py osh --ast-format none -n $ABUILD)
readonly -a OSH_PARSE=(bin/oil.py osh --ast-format none -n)
#
# Use Python's cProfile, which uses _lsprof. This is pretty fast.
#
# ~2.7 seconds (no tracing)
time-run-abuild() {
time "${RUN_ABUILD[@]}"
}
# Old: ~2.7 seconds (no tracing)
# 2017/11/27, After ASDL optimization: 0.72 seconds.
time-run-abuild() { time "${RUN_ABUILD[@]}"; }
time-parse-abuild() { time "${OSH_PARSE[@]}" $ABUILD; }
# ~1.6 seconds (no tracing)
time-parse-abuild() {
time "${PARSE_ABUILD[@]}"
# 3.8 seconds. So less than 2x overhead.
cprofile-osh-parse() {
local in=${1:-$ABUILD}
local out=${2:-abuild.cprofile}
time python -m cProfile -o $out "${OSH_PARSE[@]}" $in
ls -l $out
}
# 3.8 seconds. So less than 2x overhead.
cprofile-parse-abuild() {
local out=abuild.cprofile
time python -m cProfile -o $out "${PARSE_ABUILD[@]}"
ls -l $out
cprofile-osh-parse $ABUILD _tmp/abuild.cprofile
}
cprofile-parse-configure() {
cprofile-osh-parse benchmarks/testdata/configure _tmp/configure.cprofile
}
# Yeah I understand from this why Chrome Tracing / Flame Graphs are better.
# This format doesn't respect the stack!
# cumtime: bin/oil.py is the top, obviously
print-cprofile() {
local profile=${1:-_tmp/abuild.cprofile}
python -c '
import pstats
p = pstats.Stats("abuild.cprofile")
import sys
p = pstats.Stats(sys.argv[1])
p.sort_stats("tottime").print_stats()
'
' $profile
}
#

0 comments on commit 268155d

Please sign in to comment.