Skip to content

Commit

Permalink
testing: implement -benchtime=100x
Browse files Browse the repository at this point in the history
When running benchmarks with profilers and trying to
compare one run against another, it is very useful to be
able to force each run to execute exactly the same number
of iterations.

Discussion on the proposal issue #24735 led to the decision
to overload -benchtime, so that instead of saying
-benchtime 10s to run a benchmark for 10 seconds,
you say -benchtime 100x to run a benchmark 100 times.

Fixes #24735.

Change-Id: Id17c5bd18bd09987bb48ed12420d61ae9e200fd7
Reviewed-on: https://go-review.googlesource.com/c/139258
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
  • Loading branch information
rsc committed Oct 12, 2018
1 parent 56131cb commit 8e0aea1
Show file tree
Hide file tree
Showing 4 changed files with 63 additions and 20 deletions.
2 changes: 2 additions & 0 deletions src/cmd/go/alldocs.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions src/cmd/go/internal/test/test.go
Expand Up @@ -212,6 +212,8 @@ const testFlag2 = `
Run enough iterations of each benchmark to take t, specified
as a time.Duration (for example, -benchtime 1h30s).
The default is 1 second (1s).
The special syntax Nx means to run the benchmark N times
(for example, -benchtime 100x).
-count n
Run each test and benchmark n times (default 1).
Expand Down
75 changes: 57 additions & 18 deletions src/testing/benchmark.go
Expand Up @@ -10,15 +10,50 @@ import (
"internal/race"
"os"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)

var matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`")
var benchTime = flag.Duration("test.benchtime", 1*time.Second, "run each benchmark for duration `d`")
var benchTime = benchTimeFlag{d: 1 * time.Second}
var benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks")

func init() {
flag.Var(&benchTime, "test.benchtime", "run each benchmark for duration `d`")
}

type benchTimeFlag struct {
d time.Duration
n int
}

func (f *benchTimeFlag) String() string {
if f.n > 0 {
return fmt.Sprintf("%dx", f.n)
}
return time.Duration(f.d).String()
}

func (f *benchTimeFlag) Set(s string) error {
if strings.HasSuffix(s, "x") {
n, err := strconv.ParseInt(s[:len(s)-1], 10, 0)
if err != nil || n <= 0 {
return fmt.Errorf("invalid count")
}
*f = benchTimeFlag{n: int(n)}
return nil
}
d, err := time.ParseDuration(s)
if err != nil || d <= 0 {
return fmt.Errorf("invalid duration")
}
*f = benchTimeFlag{d: d}
return nil
}

// Global lock to ensure only one benchmark runs at a time.
var benchmarkLock sync.Mutex

Expand Down Expand Up @@ -53,7 +88,7 @@ type B struct {
previousN int // number of iterations in the previous run
previousDuration time.Duration // total duration of the previous run
benchFunc func(b *B)
benchTime time.Duration
benchTime benchTimeFlag
bytes int64
missingBytes bool // one of the subbenchmarks does not have bytes set.
timerOn bool
Expand Down Expand Up @@ -273,21 +308,25 @@ func (b *B) launch() {
}()

// Run the benchmark for at least the specified amount of time.
d := b.benchTime
for n := 1; !b.failed && b.duration < d && n < 1e9; {
last := n
// Predict required iterations.
n = int(d.Nanoseconds())
if nsop := b.nsPerOp(); nsop != 0 {
n /= int(nsop)
if b.benchTime.n > 0 {
b.runN(b.benchTime.n)
} else {
d := b.benchTime.d
for n := 1; !b.failed && b.duration < d && n < 1e9; {
last := n
// Predict required iterations.
n = int(d.Nanoseconds())
if nsop := b.nsPerOp(); nsop != 0 {
n /= int(nsop)
}
// Run more iterations than we think we'll need (1.2x).
// Don't grow too fast in case we had timing errors previously.
// Be sure to run at least one more than last time.
n = max(min(n+n/5, 100*last), last+1)
// Round up to something easy to read.
n = roundUp(n)
b.runN(n)
}
// Run more iterations than we think we'll need (1.2x).
// Don't grow too fast in case we had timing errors previously.
// Be sure to run at least one more than last time.
n = max(min(n+n/5, 100*last), last+1)
// Round up to something easy to read.
n = roundUp(n)
b.runN(n)
}
b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes}
}
Expand Down Expand Up @@ -416,7 +455,7 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e
b.Run(Benchmark.Name, Benchmark.F)
}
},
benchTime: *benchTime,
benchTime: benchTime,
context: ctx,
}
main.runN(1)
Expand Down Expand Up @@ -653,7 +692,7 @@ func Benchmark(f func(b *B)) BenchmarkResult {
w: discard{},
},
benchFunc: f,
benchTime: *benchTime,
benchTime: benchTime,
}
if b.run1() {
b.run()
Expand Down
4 changes: 2 additions & 2 deletions src/testing/sub_test.go
Expand Up @@ -17,7 +17,7 @@ import (

func init() {
// Make benchmark tests run 10* faster.
*benchTime = 100 * time.Millisecond
benchTime.d = 100 * time.Millisecond
}

func TestTestContext(t *T) {
Expand Down Expand Up @@ -593,7 +593,7 @@ func TestBRun(t *T) {
chatty: tc.chatty,
},
benchFunc: func(b *B) { ok = b.Run("test", tc.f) }, // Use Run to catch failure.
benchTime: time.Microsecond,
benchTime: benchTimeFlag{d: 1 * time.Microsecond},
}
root.runN(1)
if ok != !tc.failed {
Expand Down

0 comments on commit 8e0aea1

Please sign in to comment.