Commit f04d5836 authored by Dmitry Vyukov's avatar Dmitry Vyukov

testing: parallelize tests over count

Currently all package tests are executed once
with Parallel tests executed in parallel.
Then this process is repeated count*cpu times.
Tests are not parallelized over count*cpu.
Parallelizing over cpu is not possible as
GOMAXPROCS is a global setting. But it is
possible for count.

Parallelize over count.

Brings down testing of my package with -count=100
form 10s to 0.3s.

Change-Id: I76d8322adeb8c5c6e70b99af690291fd69d6402a
Reviewed-on: https://go-review.googlesource.com/44830
Run-TryBot: Dmitry Vyukov <dvyukov@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: 's avatarIan Lance Taylor <iant@golang.org>
parent f7aa454c
...@@ -427,44 +427,46 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e ...@@ -427,44 +427,46 @@ func runBenchmarks(importPath string, matchString func(pat, str string) (bool, e
// processBench runs bench b for the configured CPU counts and prints the results. // processBench runs bench b for the configured CPU counts and prints the results.
func (ctx *benchContext) processBench(b *B) { func (ctx *benchContext) processBench(b *B) {
for i, procs := range cpuList { for i, procs := range cpuList {
runtime.GOMAXPROCS(procs) for j := uint(0); j < *count; j++ {
benchName := benchmarkName(b.name, procs) runtime.GOMAXPROCS(procs)
fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName) benchName := benchmarkName(b.name, procs)
// Recompute the running time for all but the first iteration. fmt.Fprintf(b.w, "%-*s\t", ctx.maxLen, benchName)
if i > 0 { // Recompute the running time for all but the first iteration.
b = &B{ if i > 0 || j > 0 {
common: common{ b = &B{
signal: make(chan bool), common: common{
name: b.name, signal: make(chan bool),
w: b.w, name: b.name,
chatty: b.chatty, w: b.w,
}, chatty: b.chatty,
benchFunc: b.benchFunc, },
benchTime: b.benchTime, benchFunc: b.benchFunc,
benchTime: b.benchTime,
}
b.run1()
}
r := b.doBench()
if b.failed {
// The output could be very long here, but probably isn't.
// We print it all, regardless, because we don't want to trim the reason
// the benchmark failed.
fmt.Fprintf(b.w, "--- FAIL: %s\n%s", benchName, b.output)
continue
}
results := r.String()
if *benchmarkMemory || b.showAllocResult {
results += "\t" + r.MemString()
}
fmt.Fprintln(b.w, results)
// Unlike with tests, we ignore the -chatty flag and always print output for
// benchmarks since the output generation time will skew the results.
if len(b.output) > 0 {
b.trimOutput()
fmt.Fprintf(b.w, "--- BENCH: %s\n%s", benchName, b.output)
}
if p := runtime.GOMAXPROCS(-1); p != procs {
fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
} }
b.run1()
}
r := b.doBench()
if b.failed {
// The output could be very long here, but probably isn't.
// We print it all, regardless, because we don't want to trim the reason
// the benchmark failed.
fmt.Fprintf(b.w, "--- FAIL: %s\n%s", benchName, b.output)
continue
}
results := r.String()
if *benchmarkMemory || b.showAllocResult {
results += "\t" + r.MemString()
}
fmt.Fprintln(b.w, results)
// Unlike with tests, we ignore the -chatty flag and always print output for
// benchmarks since the output generation time will skew the results.
if len(b.output) > 0 {
b.trimOutput()
fmt.Fprintf(b.w, "--- BENCH: %s\n%s", benchName, b.output)
}
if p := runtime.GOMAXPROCS(-1); p != procs {
fmt.Fprintf(os.Stderr, "testing: %s left GOMAXPROCS set to %d\n", benchName, p)
} }
} }
} }
......
...@@ -991,27 +991,29 @@ func runTests(matchString func(pat, str string) (bool, error), tests []InternalT ...@@ -991,27 +991,29 @@ func runTests(matchString func(pat, str string) (bool, error), tests []InternalT
ok = true ok = true
for _, procs := range cpuList { for _, procs := range cpuList {
runtime.GOMAXPROCS(procs) runtime.GOMAXPROCS(procs)
ctx := newTestContext(*parallel, newMatcher(matchString, *match, "-test.run")) for i := uint(0); i < *count; i++ {
t := &T{ ctx := newTestContext(*parallel, newMatcher(matchString, *match, "-test.run"))
common: common{ t := &T{
signal: make(chan bool), common: common{
barrier: make(chan bool), signal: make(chan bool),
w: os.Stdout, barrier: make(chan bool),
chatty: *chatty, w: os.Stdout,
}, chatty: *chatty,
context: ctx, },
} context: ctx,
tRunner(t, func(t *T) {
for _, test := range tests {
t.Run(test.Name, test.F)
} }
// Run catching the signal rather than the tRunner as a separate tRunner(t, func(t *T) {
// goroutine to avoid adding a goroutine during the sequential for _, test := range tests {
// phase as this pollutes the stacktrace output when aborting. t.Run(test.Name, test.F)
go func() { <-t.signal }() }
}) // Run catching the signal rather than the tRunner as a separate
ok = ok && !t.Failed() // goroutine to avoid adding a goroutine during the sequential
ran = ran || t.ran // phase as this pollutes the stacktrace output when aborting.
go func() { <-t.signal }()
})
ok = ok && !t.Failed()
ran = ran || t.ran
}
} }
return ran, ok return ran, ok
} }
...@@ -1167,13 +1169,9 @@ func parseCpuList() { ...@@ -1167,13 +1169,9 @@ func parseCpuList() {
fmt.Fprintf(os.Stderr, "testing: invalid value %q for -test.cpu\n", val) fmt.Fprintf(os.Stderr, "testing: invalid value %q for -test.cpu\n", val)
os.Exit(1) os.Exit(1)
} }
for i := uint(0); i < *count; i++ { cpuList = append(cpuList, cpu)
cpuList = append(cpuList, cpu)
}
} }
if cpuList == nil { if cpuList == nil {
for i := uint(0); i < *count; i++ { cpuList = append(cpuList, runtime.GOMAXPROCS(-1))
cpuList = append(cpuList, runtime.GOMAXPROCS(-1))
}
} }
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment