Commit 8e0aea16 authored by Russ Cox's avatar Russ Cox

testing: implement -benchtime=100x

When running benchmarks with profilers and trying to
compare one run against another, it is very useful to be
able to force each run to execute exactly the same number
of iterations.

Discussion on the proposal issue #24735 led to the decision
to overload -benchtime, so that instead of saying
-benchtime 10s to run a benchmark for 10 seconds,
you say -benchtime 100x to run a benchmark 100 times.

Fixes #24735.

Change-Id: Id17c5bd18bd09987bb48ed12420d61ae9e200fd7
Reviewed-on: https://go-review.googlesource.com/c/139258
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: 's avatarAustin Clements <austin@google.com>
Reviewed-by: 's avatarBrad Fitzpatrick <bradfitz@golang.org>
parent 56131cbd
......@@ -2659,6 +2659,8 @@
// Run enough iterations of each benchmark to take t, specified
// as a time.Duration (for example, -benchtime 1h30s).
// The default is 1 second (1s).
// The special syntax Nx means to run the benchmark N times
// (for example, -benchtime 100x).
//
// -count n
// Run each test and benchmark n times (default 1).
......
......@@ -212,6 +212,8 @@ const testFlag2 = `
Run enough iterations of each benchmark to take t, specified
as a time.Duration (for example, -benchtime 1h30s).
The default is 1 second (1s).
The special syntax Nx means to run the benchmark N times
(for example, -benchtime 100x).
-count n
Run each test and benchmark n times (default 1).
......
......@@ -10,15 +10,50 @@ import (
"internal/race"
"os"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
var matchBenchmarks = flag.String("test.bench", "", "run only benchmarks matching `regexp`")
var benchTime = flag.Duration("test.benchtime", 1*time.Second, "run each benchmark for duration `d`")
var benchTime = benchTimeFlag{d: 1 * time.Second}
var benchmarkMemory = flag.Bool("test.benchmem", false, "print memory allocations for benchmarks")
func init() {
flag.Var(&benchTime, "test.benchtime", "run each benchmark for duration `d`")
}
type benchTimeFlag struct {
d time.Duration
n int
}
func (f *benchTimeFlag) String() string {
if f.n > 0 {
return fmt.Sprintf("%dx", f.n)
}
return time.Duration(f.d).String()
}
func (f *benchTimeFlag) Set(s string) error {
if strings.HasSuffix(s, "x") {
n, err := strconv.ParseInt(s[:len(s)-1], 10, 0)
if err != nil || n <= 0 {
return fmt.Errorf("invalid count")
}
*f = benchTimeFlag{n: int(n)}
return nil
}
d, err := time.ParseDuration(s)
if err != nil || d <= 0 {
return fmt.Errorf("invalid duration")
}
*f = benchTimeFlag{d: d}
return nil
}
// Global lock to ensure only one benchmark runs at a time.
var benchmarkLock sync.Mutex
......@@ -53,7 +88,7 @@ type B struct {
previousN int // number of iterations in the previous run
previousDuration time.Duration // total duration of the previous run
benchFunc func(b *B)
benchTime time.Duration
benchTime benchTimeFlag
bytes int64
missingBytes bool // one of the subbenchmarks does not have bytes set.
timerOn bool
......@@ -273,21 +308,25 @@ func (b *B) launch() {
}()
// Run the benchmark for at least the specified amount of time.
d := b.benchTime
for n := 1; !b.failed && b.duration < d && n < 1e9; {
last := n
// Predict required iterations.
n = int(d.Nanoseconds())
if nsop := b.nsPerOp(); nsop != 0 {
n /= int(nsop)
if b.benchTime.n > 0 {
b.runN(b.benchTime.n)
} else {
d := b.benchTime.d
for n := 1; !b.failed && b.duration < d && n < 1e9; {
last := n
// Predict required iterations.
n = int(d.Nanoseconds())
if nsop := b.nsPerOp(); nsop != 0 {
n /= int(nsop)
}
// Run more iterations than we think we'll need (1.2x).
// Don't grow too fast in case we had timing errors previously.
// Be sure to run at least one more than last time.
n = max(min(n+n/5, 100*last), last+1)
// Round up to something easy to read.
n = roundUp(n)
b.runN(n)
}
// Run more iterations than we think we'll need (1.2x).
// Don't grow too fast in case we had timing errors previously.
// Be sure to run at least one more than last time.
n = max(min(n+n/5, 100*last), last+1)
// Round up to something easy to read.
n = roundUp(n)
b.runN(n)
}
b.result = BenchmarkResult{b.N, b.duration, b.bytes, b.netAllocs, b.netBytes}
}
......@@ -416,7 +455,7 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e
b.Run(Benchmark.Name, Benchmark.F)
}
},
benchTime: *benchTime,
benchTime: benchTime,
context: ctx,
}
main.runN(1)
......@@ -653,7 +692,7 @@ func Benchmark(f func(b *B)) BenchmarkResult {
w: discard{},
},
benchFunc: f,
benchTime: *benchTime,
benchTime: benchTime,
}
if b.run1() {
b.run()
......
......@@ -17,7 +17,7 @@ import (
func init() {
// Make benchmark tests run 10* faster.
*benchTime = 100 * time.Millisecond
benchTime.d = 100 * time.Millisecond
}
func TestTestContext(t *T) {
......@@ -593,7 +593,7 @@ func TestBRun(t *T) {
chatty: tc.chatty,
},
benchFunc: func(b *B) { ok = b.Run("test", tc.f) }, // Use Run to catch failure.
benchTime: time.Microsecond,
benchTime: benchTimeFlag{d: 1 * time.Microsecond},
}
root.runN(1)
if ok != !tc.failed {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment