Commit 7b767f4e authored by Dmitry Vyukov's avatar Dmitry Vyukov

internal/race: add package

Factor out duplicated race thunks from sync, syscall net
and fmt packages into a separate package and use it.

Fixes #8593

Change-Id: I156869c50946277809f6b509463752e7f7d28cdb
Reviewed-on: https://go-review.googlesource.com/14870Reviewed-by: 's avatarBrad Fitzpatrick <bradfitz@golang.org>
Run-TryBot: Dmitry Vyukov <dvyukov@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
parent e9081b3c
This diff is collapsed.
......@@ -7,6 +7,7 @@ package fmt_test
import (
"bytes"
. "fmt"
"internal/race"
"io"
"math"
"reflect"
......@@ -982,7 +983,7 @@ func TestCountMallocs(t *testing.T) {
t.Skip("skipping malloc count in short mode")
case runtime.GOMAXPROCS(0) > 1:
t.Skip("skipping; GOMAXPROCS>1")
case raceenabled:
case race.Enabled:
t.Skip("skipping malloc count under race detector")
}
for _, mt := range mallocTest {
......
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !race
package fmt_test
const raceenabled = false
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build race
package fmt_test
const raceenabled = true
......@@ -39,9 +39,10 @@ var pkgDeps = map[string][]string{
"runtime": {"unsafe", "runtime/internal/atomic", "runtime/internal/sys"},
"runtime/internal/sys": {},
"runtime/internal/atomic": {"unsafe", "runtime/internal/sys"},
"sync": {"runtime", "sync/atomic", "unsafe"},
"sync/atomic": {"unsafe"},
"unsafe": {},
"internal/race": {"runtime", "unsafe"},
"sync": {"internal/race", "runtime", "sync/atomic", "unsafe"},
"sync/atomic": {"unsafe"},
"unsafe": {},
"L0": {
"errors",
......@@ -131,7 +132,7 @@ var pkgDeps = map[string][]string{
// End of linear dependency definitions.
// Operating system access.
"syscall": {"L0", "unicode/utf16"},
"syscall": {"L0", "internal/race", "unicode/utf16"},
"internal/syscall/unix": {"L0", "syscall"},
"internal/syscall/windows": {"L0", "syscall"},
"internal/syscall/windows/registry": {"L0", "syscall", "unicode/utf16"},
......@@ -278,7 +279,7 @@ var pkgDeps = map[string][]string{
// Basic networking.
// Because net must be used by any package that wants to
// do networking portably, it must have a small dependency set: just L0+basic os.
"net": {"L0", "CGO", "math/rand", "os", "sort", "syscall", "time", "internal/syscall/windows", "internal/singleflight"},
"net": {"L0", "CGO", "math/rand", "os", "sort", "syscall", "time", "internal/syscall/windows", "internal/singleflight", "internal/race"},
// NET enables use of basic network-related packages.
"NET": {
......
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package race contains helper functions for manually instrumenting code for the race detector.
The runtime package intentionally exports these functions only in the race build;
this package exports them unconditionally but without the "race" build tag they are no-ops.
*/
package race
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !race
package race
import (
"unsafe"
)
const Enabled = false
func Acquire(addr unsafe.Pointer) {
}
func Release(addr unsafe.Pointer) {
}
func ReleaseMerge(addr unsafe.Pointer) {
}
func Disable() {
}
func Enable() {
}
func Read(addr unsafe.Pointer) {
}
func Write(addr unsafe.Pointer) {
}
func ReadRange(addr unsafe.Pointer, len int) {
}
func WriteRange(addr unsafe.Pointer, len int) {
}
// Copyright 2012 The Go Authors. All rights reserved.
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build race
package sync
package race
import (
"runtime"
"unsafe"
)
const raceenabled = true
const Enabled = true
func raceAcquire(addr unsafe.Pointer) {
func Acquire(addr unsafe.Pointer) {
runtime.RaceAcquire(addr)
}
func raceRelease(addr unsafe.Pointer) {
func Release(addr unsafe.Pointer) {
runtime.RaceRelease(addr)
}
func raceReleaseMerge(addr unsafe.Pointer) {
func ReleaseMerge(addr unsafe.Pointer) {
runtime.RaceReleaseMerge(addr)
}
func raceDisable() {
func Disable() {
runtime.RaceDisable()
}
func raceEnable() {
func Enable() {
runtime.RaceEnable()
}
func raceRead(addr unsafe.Pointer) {
func Read(addr unsafe.Pointer) {
runtime.RaceRead(addr)
}
func raceWrite(addr unsafe.Pointer) {
func Write(addr unsafe.Pointer) {
runtime.RaceWrite(addr)
}
func ReadRange(addr unsafe.Pointer, len int) {
runtime.RaceReadRange(addr, len)
}
func WriteRange(addr unsafe.Pointer, len int) {
runtime.RaceWriteRange(addr, len)
}
......@@ -5,6 +5,7 @@
package net
import (
"internal/race"
"os"
"runtime"
"sync"
......@@ -461,8 +462,8 @@ func (fd *netFD) Read(buf []byte) (int, error) {
n, err := rsrv.ExecIO(o, "WSARecv", func(o *operation) error {
return syscall.WSARecv(o.fd.sysfd, &o.buf, 1, &o.qty, &o.flags, &o.o, nil)
})
if raceenabled {
raceAcquire(unsafe.Pointer(&ioSync))
if race.Enabled {
race.Acquire(unsafe.Pointer(&ioSync))
}
err = fd.eofError(n, err)
if _, ok := err.(syscall.Errno); ok {
......@@ -504,8 +505,8 @@ func (fd *netFD) Write(buf []byte) (int, error) {
return 0, err
}
defer fd.writeUnlock()
if raceenabled {
raceReleaseMerge(unsafe.Pointer(&ioSync))
if race.Enabled {
race.ReleaseMerge(unsafe.Pointer(&ioSync))
}
o := &fd.wop
o.InitBuf(buf)
......
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build race
// +build windows
package net
import (
"runtime"
"unsafe"
)
const raceenabled = true
func raceAcquire(addr unsafe.Pointer) {
runtime.RaceAcquire(addr)
}
func raceReleaseMerge(addr unsafe.Pointer) {
runtime.RaceReleaseMerge(addr)
}
func raceReadRange(addr unsafe.Pointer, len int) {
runtime.RaceReadRange(addr, len)
}
func raceWriteRange(addr unsafe.Pointer, len int) {
runtime.RaceWriteRange(addr, len)
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !race
// +build windows
package net
import (
"unsafe"
)
const raceenabled = false
func raceAcquire(addr unsafe.Pointer) {
}
func raceReleaseMerge(addr unsafe.Pointer) {
}
func raceReadRange(addr unsafe.Pointer, len int) {
}
func raceWriteRange(addr unsafe.Pointer, len int) {
}
......@@ -5,6 +5,7 @@
package sync
import (
"internal/race"
"sync/atomic"
"unsafe"
)
......@@ -51,12 +52,12 @@ func NewCond(l Locker) *Cond {
//
func (c *Cond) Wait() {
c.checker.check()
if raceenabled {
raceDisable()
if race.Enabled {
race.Disable()
}
atomic.AddUint32(&c.waiters, 1)
if raceenabled {
raceEnable()
if race.Enabled {
race.Enable()
}
c.L.Unlock()
runtime_Syncsemacquire(&c.sema)
......@@ -81,14 +82,14 @@ func (c *Cond) Broadcast() {
func (c *Cond) signalImpl(all bool) {
c.checker.check()
if raceenabled {
raceDisable()
if race.Enabled {
race.Disable()
}
for {
old := atomic.LoadUint32(&c.waiters)
if old == 0 {
if raceenabled {
raceEnable()
if race.Enabled {
race.Enable()
}
return
}
......@@ -97,8 +98,8 @@ func (c *Cond) signalImpl(all bool) {
new = 0
}
if atomic.CompareAndSwapUint32(&c.waiters, old, new) {
if raceenabled {
raceEnable()
if race.Enabled {
race.Enable()
}
runtime_Syncsemrelease(&c.sema, old-new)
return
......
......@@ -7,5 +7,3 @@ package sync
// Export for testing.
var Runtime_Semacquire = runtime_Semacquire
var Runtime_Semrelease = runtime_Semrelease
const RaceEnabled = raceenabled
......@@ -11,6 +11,7 @@
package sync
import (
"internal/race"
"sync/atomic"
"unsafe"
)
......@@ -41,8 +42,8 @@ const (
func (m *Mutex) Lock() {
// Fast path: grab unlocked mutex.
if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
if raceenabled {
raceAcquire(unsafe.Pointer(m))
if race.Enabled {
race.Acquire(unsafe.Pointer(m))
}
return
}
......@@ -85,8 +86,8 @@ func (m *Mutex) Lock() {
}
}
if raceenabled {
raceAcquire(unsafe.Pointer(m))
if race.Enabled {
race.Acquire(unsafe.Pointer(m))
}
}
......@@ -97,9 +98,9 @@ func (m *Mutex) Lock() {
// It is allowed for one goroutine to lock a Mutex and then
// arrange for another goroutine to unlock it.
func (m *Mutex) Unlock() {
if raceenabled {
if race.Enabled {
_ = m.state
raceRelease(unsafe.Pointer(m))
race.Release(unsafe.Pointer(m))
}
// Fast path: drop lock bit.
......
......@@ -5,6 +5,7 @@
package sync
import (
"internal/race"
"runtime"
"sync/atomic"
"unsafe"
......@@ -59,7 +60,7 @@ type poolLocal struct {
// Put adds x to the pool.
func (p *Pool) Put(x interface{}) {
if raceenabled {
if race.Enabled {
// Under race detector the Pool degenerates into no-op.
// It's conforming, simple and does not introduce excessive
// happens-before edges between unrelated goroutines.
......@@ -91,7 +92,7 @@ func (p *Pool) Put(x interface{}) {
// If Get would otherwise return nil and p.New is non-nil, Get returns
// the result of calling p.New.
func (p *Pool) Get() interface{} {
if raceenabled {
if race.Enabled {
if p.New != nil {
return p.New()
}
......
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !race
package sync
import (
"unsafe"
)
const raceenabled = false
func raceAcquire(addr unsafe.Pointer) {
}
func raceRelease(addr unsafe.Pointer) {
}
func raceReleaseMerge(addr unsafe.Pointer) {
}
func raceDisable() {
}
func raceEnable() {
}
func raceRead(addr unsafe.Pointer) {
}
func raceWrite(addr unsafe.Pointer) {
}
......@@ -5,6 +5,7 @@
package sync
import (
"internal/race"
"sync/atomic"
"unsafe"
)
......@@ -27,17 +28,17 @@ const rwmutexMaxReaders = 1 << 30
// RLock locks rw for reading.
func (rw *RWMutex) RLock() {
if raceenabled {
if race.Enabled {
_ = rw.w.state
raceDisable()
race.Disable()
}
if atomic.AddInt32(&rw.readerCount, 1) < 0 {
// A writer is pending, wait for it.
runtime_Semacquire(&rw.readerSem)
}
if raceenabled {
raceEnable()
raceAcquire(unsafe.Pointer(&rw.readerSem))
if race.Enabled {
race.Enable()
race.Acquire(unsafe.Pointer(&rw.readerSem))
}
}
......@@ -46,14 +47,14 @@ func (rw *RWMutex) RLock() {
// It is a run-time error if rw is not locked for reading
// on entry to RUnlock.
func (rw *RWMutex) RUnlock() {
if raceenabled {
if race.Enabled {
_ = rw.w.state
raceReleaseMerge(unsafe.Pointer(&rw.writerSem))
raceDisable()
race.ReleaseMerge(unsafe.Pointer(&rw.writerSem))
race.Disable()
}
if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
if r+1 == 0 || r+1 == -rwmutexMaxReaders {
raceEnable()
race.Enable()
panic("sync: RUnlock of unlocked RWMutex")
}
// A writer is pending.
......@@ -62,8 +63,8 @@ func (rw *RWMutex) RUnlock() {
runtime_Semrelease(&rw.writerSem)
}
}
if raceenabled {
raceEnable()
if race.Enabled {
race.Enable()
}
}
......@@ -74,9 +75,9 @@ func (rw *RWMutex) RUnlock() {
// a blocked Lock call excludes new readers from acquiring
// the lock.
func (rw *RWMutex) Lock() {
if raceenabled {
if race.Enabled {
_ = rw.w.state
raceDisable()
race.Disable()
}
// First, resolve competition with other writers.
rw.w.Lock()
......@@ -86,10 +87,10 @@ func (rw *RWMutex) Lock() {
if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
runtime_Semacquire(&rw.writerSem)
}
if raceenabled {
raceEnable()
raceAcquire(unsafe.Pointer(&rw.readerSem))
raceAcquire(unsafe.Pointer(&rw.writerSem))
if race.Enabled {
race.Enable()
race.Acquire(unsafe.Pointer(&rw.readerSem))
race.Acquire(unsafe.Pointer(&rw.writerSem))
}
}
......@@ -100,17 +101,17 @@ func (rw *RWMutex) Lock() {
// goroutine. One goroutine may RLock (Lock) an RWMutex and then
// arrange for another goroutine to RUnlock (Unlock) it.
func (rw *RWMutex) Unlock() {
if raceenabled {
if race.Enabled {
_ = rw.w.state
raceRelease(unsafe.Pointer(&rw.readerSem))
raceRelease(unsafe.Pointer(&rw.writerSem))
raceDisable()
race.Release(unsafe.Pointer(&rw.readerSem))
race.Release(unsafe.Pointer(&rw.writerSem))
race.Disable()
}
// Announce to readers there is no active writer.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
if r >= rwmutexMaxReaders {
raceEnable()
race.Enable()
panic("sync: Unlock of unlocked RWMutex")
}
// Unblock blocked readers, if any.
......@@ -119,8 +120,8 @@ func (rw *RWMutex) Unlock() {
}
// Allow other writers to proceed.
rw.w.Unlock()
if raceenabled {
raceEnable()
if race.Enabled {
race.Enable()
}
}
......
......@@ -5,6 +5,7 @@
package sync
import (
"internal/race"
"sync/atomic"
"unsafe"
)
......@@ -46,24 +47,24 @@ func (wg *WaitGroup) state() *uint64 {
// See the WaitGroup example.
func (wg *WaitGroup) Add(delta int) {
statep := wg.state()
if raceenabled {
if race.Enabled {
_ = *statep // trigger nil deref early
if delta < 0 {
// Synchronize decrements with Wait.
raceReleaseMerge(unsafe.Pointer(wg))
race.ReleaseMerge(unsafe.Pointer(wg))
}
raceDisable()
defer raceEnable()
race.Disable()
defer race.Enable()
}
state := atomic.AddUint64(statep, uint64(delta)<<32)
v := int32(state >> 32)
w := uint32(state)
if raceenabled {
if race.Enabled {
if delta > 0 && v == int32(delta) {
// The first increment must be synchronized with Wait.
// Need to model this as a read, because there can be
// several concurrent wg.counter transitions from 0.
raceRead(unsafe.Pointer(&wg.sema))
race.Read(unsafe.Pointer(&wg.sema))
}
}
if v < 0 {
......@@ -98,9 +99,9 @@ func (wg *WaitGroup) Done() {
// Wait blocks until the WaitGroup counter is zero.
func (wg *WaitGroup) Wait() {
statep := wg.state()
if raceenabled {
if race.Enabled {
_ = *statep // trigger nil deref early
raceDisable()
race.Disable()
}
for {
state := atomic.LoadUint64(statep)
......@@ -108,28 +109,28 @@ func (wg *WaitGroup) Wait() {
w := uint32(state)
if v == 0 {
// Counter is 0, no need to wait.
if raceenabled {
raceEnable()
raceAcquire(unsafe.Pointer(wg))
if race.Enabled {
race.Enable()
race.Acquire(unsafe.Pointer(wg))
}
return
}
// Increment waiters count.
if atomic.CompareAndSwapUint64(statep, state, state+1) {
if raceenabled && w == 0 {
if race.Enabled && w == 0 {
// Wait must be synchronized with the first Add.
// Need to model this is as a write to race with the read in Add.
// As a consequence, can do the write only for the first waiter,
// otherwise concurrent Waits will race with each other.
raceWrite(unsafe.Pointer(&wg.sema))
race.Write(unsafe.Pointer(&wg.sema))
}
runtime_Semacquire(&wg.sema)
if *statep != 0 {
panic("sync: WaitGroup is reused before previous Wait has returned")
}
if raceenabled {
raceEnable()
raceAcquire(unsafe.Pointer(wg))
if race.Enabled {
race.Enable()
race.Acquire(unsafe.Pointer(wg))
}
return
}
......
......@@ -5,6 +5,7 @@
package sync_test
import (
"internal/race"
"runtime"
. "sync"
"sync/atomic"
......@@ -48,7 +49,7 @@ func TestWaitGroup(t *testing.T) {
}
func knownRacy(t *testing.T) {
if RaceEnabled {
if race.Enabled {
t.Skip("skipping known-racy test under the race detector")
}
}
......
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build race
package syscall
import (
"runtime"
"unsafe"
)
const raceenabled = true
func raceAcquire(addr unsafe.Pointer) {
runtime.RaceAcquire(addr)
}
func raceReleaseMerge(addr unsafe.Pointer) {
runtime.RaceReleaseMerge(addr)
}
func raceReadRange(addr unsafe.Pointer, len int) {
runtime.RaceReadRange(addr, len)
}
func raceWriteRange(addr unsafe.Pointer, len int) {
runtime.RaceWriteRange(addr, len)
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !race
package syscall
import (
"unsafe"
)
const raceenabled = false
func raceAcquire(addr unsafe.Pointer) {
}
func raceReleaseMerge(addr unsafe.Pointer) {
}
func raceReadRange(addr unsafe.Pointer, len int) {
}
func raceWriteRange(addr unsafe.Pointer, len int) {
}
......@@ -7,6 +7,7 @@
package syscall
import (
"internal/race"
"runtime"
"sync"
"unsafe"
......@@ -158,12 +159,12 @@ func (s Signal) String() string {
func Read(fd int, p []byte) (n int, err error) {
n, err = read(fd, p)
if raceenabled {
if race.Enabled {
if n > 0 {
raceWriteRange(unsafe.Pointer(&p[0]), n)
race.WriteRange(unsafe.Pointer(&p[0]), n)
}
if err == nil {
raceAcquire(unsafe.Pointer(&ioSync))
race.Acquire(unsafe.Pointer(&ioSync))
}
}
if msanenabled && n > 0 {
......@@ -173,12 +174,12 @@ func Read(fd int, p []byte) (n int, err error) {
}
func Write(fd int, p []byte) (n int, err error) {
if raceenabled {
raceReleaseMerge(unsafe.Pointer(&ioSync))
if race.Enabled {
race.ReleaseMerge(unsafe.Pointer(&ioSync))
}
n, err = write(fd, p)
if raceenabled && n > 0 {
raceReadRange(unsafe.Pointer(&p[0]), n)
if race.Enabled && n > 0 {
race.ReadRange(unsafe.Pointer(&p[0]), n)
}
if msanenabled && n > 0 {
msanRead(unsafe.Pointer(&p[0]), n)
......@@ -320,8 +321,8 @@ func Socketpair(domain, typ, proto int) (fd [2]int, err error) {
}
func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
if raceenabled {
raceReleaseMerge(unsafe.Pointer(&ioSync))
if race.Enabled {
race.ReleaseMerge(unsafe.Pointer(&ioSync))
}
return sendfile(outfd, infd, offset, count)
}
......
......@@ -8,6 +8,7 @@ package syscall
import (
errorspkg "errors"
"internal/race"
"sync"
"unicode/utf16"
"unsafe"
......@@ -304,11 +305,11 @@ func Read(fd Handle, p []byte) (n int, err error) {
}
return 0, e
}
if raceenabled {
if race.Enabled {
if done > 0 {
raceWriteRange(unsafe.Pointer(&p[0]), int(done))
race.WriteRange(unsafe.Pointer(&p[0]), int(done))
}
raceAcquire(unsafe.Pointer(&ioSync))
race.Acquire(unsafe.Pointer(&ioSync))
}
if msanenabled && done > 0 {
msanWrite(unsafe.Pointer(&p[0]), int(done))
......@@ -317,16 +318,16 @@ func Read(fd Handle, p []byte) (n int, err error) {
}
func Write(fd Handle, p []byte) (n int, err error) {
if raceenabled {
raceReleaseMerge(unsafe.Pointer(&ioSync))
if race.Enabled {
race.ReleaseMerge(unsafe.Pointer(&ioSync))
}
var done uint32
e := WriteFile(fd, p, &done, nil)
if e != nil {
return 0, e
}
if raceenabled && done > 0 {
raceReadRange(unsafe.Pointer(&p[0]), int(done))
if race.Enabled && done > 0 {
race.ReadRange(unsafe.Pointer(&p[0]), int(done))
}
if msanenabled && done > 0 {
msanRead(unsafe.Pointer(&p[0]), int(done))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment