Commit 7b767f4e authored by Dmitry Vyukov's avatar Dmitry Vyukov

internal/race: add package

Factor out duplicated race thunks from sync, syscall net
and fmt packages into a separate package and use it.

Fixes #8593

Change-Id: I156869c50946277809f6b509463752e7f7d28cdb
Reviewed-on: https://go-review.googlesource.com/14870Reviewed-by: 's avatarBrad Fitzpatrick <bradfitz@golang.org>
Run-TryBot: Dmitry Vyukov <dvyukov@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
parent e9081b3c
This diff is collapsed.
...@@ -7,6 +7,7 @@ package fmt_test ...@@ -7,6 +7,7 @@ package fmt_test
import ( import (
"bytes" "bytes"
. "fmt" . "fmt"
"internal/race"
"io" "io"
"math" "math"
"reflect" "reflect"
...@@ -982,7 +983,7 @@ func TestCountMallocs(t *testing.T) { ...@@ -982,7 +983,7 @@ func TestCountMallocs(t *testing.T) {
t.Skip("skipping malloc count in short mode") t.Skip("skipping malloc count in short mode")
case runtime.GOMAXPROCS(0) > 1: case runtime.GOMAXPROCS(0) > 1:
t.Skip("skipping; GOMAXPROCS>1") t.Skip("skipping; GOMAXPROCS>1")
case raceenabled: case race.Enabled:
t.Skip("skipping malloc count under race detector") t.Skip("skipping malloc count under race detector")
} }
for _, mt := range mallocTest { for _, mt := range mallocTest {
......
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !race
package fmt_test
const raceenabled = false
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build race
package fmt_test
const raceenabled = true
...@@ -39,9 +39,10 @@ var pkgDeps = map[string][]string{ ...@@ -39,9 +39,10 @@ var pkgDeps = map[string][]string{
"runtime": {"unsafe", "runtime/internal/atomic", "runtime/internal/sys"}, "runtime": {"unsafe", "runtime/internal/atomic", "runtime/internal/sys"},
"runtime/internal/sys": {}, "runtime/internal/sys": {},
"runtime/internal/atomic": {"unsafe", "runtime/internal/sys"}, "runtime/internal/atomic": {"unsafe", "runtime/internal/sys"},
"sync": {"runtime", "sync/atomic", "unsafe"}, "internal/race": {"runtime", "unsafe"},
"sync/atomic": {"unsafe"}, "sync": {"internal/race", "runtime", "sync/atomic", "unsafe"},
"unsafe": {}, "sync/atomic": {"unsafe"},
"unsafe": {},
"L0": { "L0": {
"errors", "errors",
...@@ -131,7 +132,7 @@ var pkgDeps = map[string][]string{ ...@@ -131,7 +132,7 @@ var pkgDeps = map[string][]string{
// End of linear dependency definitions. // End of linear dependency definitions.
// Operating system access. // Operating system access.
"syscall": {"L0", "unicode/utf16"}, "syscall": {"L0", "internal/race", "unicode/utf16"},
"internal/syscall/unix": {"L0", "syscall"}, "internal/syscall/unix": {"L0", "syscall"},
"internal/syscall/windows": {"L0", "syscall"}, "internal/syscall/windows": {"L0", "syscall"},
"internal/syscall/windows/registry": {"L0", "syscall", "unicode/utf16"}, "internal/syscall/windows/registry": {"L0", "syscall", "unicode/utf16"},
...@@ -278,7 +279,7 @@ var pkgDeps = map[string][]string{ ...@@ -278,7 +279,7 @@ var pkgDeps = map[string][]string{
// Basic networking. // Basic networking.
// Because net must be used by any package that wants to // Because net must be used by any package that wants to
// do networking portably, it must have a small dependency set: just L0+basic os. // do networking portably, it must have a small dependency set: just L0+basic os.
"net": {"L0", "CGO", "math/rand", "os", "sort", "syscall", "time", "internal/syscall/windows", "internal/singleflight"}, "net": {"L0", "CGO", "math/rand", "os", "sort", "syscall", "time", "internal/syscall/windows", "internal/singleflight", "internal/race"},
// NET enables use of basic network-related packages. // NET enables use of basic network-related packages.
"NET": { "NET": {
......
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package race contains helper functions for manually instrumenting code for the race detector.
The runtime package intentionally exports these functions only in the race build;
this package exports them unconditionally but without the "race" build tag they are no-ops.
*/
package race
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !race
package race
import (
"unsafe"
)
const Enabled = false
func Acquire(addr unsafe.Pointer) {
}
func Release(addr unsafe.Pointer) {
}
func ReleaseMerge(addr unsafe.Pointer) {
}
func Disable() {
}
func Enable() {
}
func Read(addr unsafe.Pointer) {
}
func Write(addr unsafe.Pointer) {
}
func ReadRange(addr unsafe.Pointer, len int) {
}
func WriteRange(addr unsafe.Pointer, len int) {
}
// Copyright 2012 The Go Authors. All rights reserved. // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build race // +build race
package sync package race
import ( import (
"runtime" "runtime"
"unsafe" "unsafe"
) )
const raceenabled = true const Enabled = true
func raceAcquire(addr unsafe.Pointer) { func Acquire(addr unsafe.Pointer) {
runtime.RaceAcquire(addr) runtime.RaceAcquire(addr)
} }
func raceRelease(addr unsafe.Pointer) { func Release(addr unsafe.Pointer) {
runtime.RaceRelease(addr) runtime.RaceRelease(addr)
} }
func raceReleaseMerge(addr unsafe.Pointer) { func ReleaseMerge(addr unsafe.Pointer) {
runtime.RaceReleaseMerge(addr) runtime.RaceReleaseMerge(addr)
} }
func raceDisable() { func Disable() {
runtime.RaceDisable() runtime.RaceDisable()
} }
func raceEnable() { func Enable() {
runtime.RaceEnable() runtime.RaceEnable()
} }
func raceRead(addr unsafe.Pointer) { func Read(addr unsafe.Pointer) {
runtime.RaceRead(addr) runtime.RaceRead(addr)
} }
func raceWrite(addr unsafe.Pointer) { func Write(addr unsafe.Pointer) {
runtime.RaceWrite(addr) runtime.RaceWrite(addr)
} }
func ReadRange(addr unsafe.Pointer, len int) {
runtime.RaceReadRange(addr, len)
}
func WriteRange(addr unsafe.Pointer, len int) {
runtime.RaceWriteRange(addr, len)
}
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
package net package net
import ( import (
"internal/race"
"os" "os"
"runtime" "runtime"
"sync" "sync"
...@@ -461,8 +462,8 @@ func (fd *netFD) Read(buf []byte) (int, error) { ...@@ -461,8 +462,8 @@ func (fd *netFD) Read(buf []byte) (int, error) {
n, err := rsrv.ExecIO(o, "WSARecv", func(o *operation) error { n, err := rsrv.ExecIO(o, "WSARecv", func(o *operation) error {
return syscall.WSARecv(o.fd.sysfd, &o.buf, 1, &o.qty, &o.flags, &o.o, nil) return syscall.WSARecv(o.fd.sysfd, &o.buf, 1, &o.qty, &o.flags, &o.o, nil)
}) })
if raceenabled { if race.Enabled {
raceAcquire(unsafe.Pointer(&ioSync)) race.Acquire(unsafe.Pointer(&ioSync))
} }
err = fd.eofError(n, err) err = fd.eofError(n, err)
if _, ok := err.(syscall.Errno); ok { if _, ok := err.(syscall.Errno); ok {
...@@ -504,8 +505,8 @@ func (fd *netFD) Write(buf []byte) (int, error) { ...@@ -504,8 +505,8 @@ func (fd *netFD) Write(buf []byte) (int, error) {
return 0, err return 0, err
} }
defer fd.writeUnlock() defer fd.writeUnlock()
if raceenabled { if race.Enabled {
raceReleaseMerge(unsafe.Pointer(&ioSync)) race.ReleaseMerge(unsafe.Pointer(&ioSync))
} }
o := &fd.wop o := &fd.wop
o.InitBuf(buf) o.InitBuf(buf)
......
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build race
// +build windows
package net
import (
"runtime"
"unsafe"
)
const raceenabled = true
func raceAcquire(addr unsafe.Pointer) {
runtime.RaceAcquire(addr)
}
func raceReleaseMerge(addr unsafe.Pointer) {
runtime.RaceReleaseMerge(addr)
}
func raceReadRange(addr unsafe.Pointer, len int) {
runtime.RaceReadRange(addr, len)
}
func raceWriteRange(addr unsafe.Pointer, len int) {
runtime.RaceWriteRange(addr, len)
}
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !race
// +build windows
package net
import (
"unsafe"
)
const raceenabled = false
func raceAcquire(addr unsafe.Pointer) {
}
func raceReleaseMerge(addr unsafe.Pointer) {
}
func raceReadRange(addr unsafe.Pointer, len int) {
}
func raceWriteRange(addr unsafe.Pointer, len int) {
}
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
package sync package sync
import ( import (
"internal/race"
"sync/atomic" "sync/atomic"
"unsafe" "unsafe"
) )
...@@ -51,12 +52,12 @@ func NewCond(l Locker) *Cond { ...@@ -51,12 +52,12 @@ func NewCond(l Locker) *Cond {
// //
func (c *Cond) Wait() { func (c *Cond) Wait() {
c.checker.check() c.checker.check()
if raceenabled { if race.Enabled {
raceDisable() race.Disable()
} }
atomic.AddUint32(&c.waiters, 1) atomic.AddUint32(&c.waiters, 1)
if raceenabled { if race.Enabled {
raceEnable() race.Enable()
} }
c.L.Unlock() c.L.Unlock()
runtime_Syncsemacquire(&c.sema) runtime_Syncsemacquire(&c.sema)
...@@ -81,14 +82,14 @@ func (c *Cond) Broadcast() { ...@@ -81,14 +82,14 @@ func (c *Cond) Broadcast() {
func (c *Cond) signalImpl(all bool) { func (c *Cond) signalImpl(all bool) {
c.checker.check() c.checker.check()
if raceenabled { if race.Enabled {
raceDisable() race.Disable()
} }
for { for {
old := atomic.LoadUint32(&c.waiters) old := atomic.LoadUint32(&c.waiters)
if old == 0 { if old == 0 {
if raceenabled { if race.Enabled {
raceEnable() race.Enable()
} }
return return
} }
...@@ -97,8 +98,8 @@ func (c *Cond) signalImpl(all bool) { ...@@ -97,8 +98,8 @@ func (c *Cond) signalImpl(all bool) {
new = 0 new = 0
} }
if atomic.CompareAndSwapUint32(&c.waiters, old, new) { if atomic.CompareAndSwapUint32(&c.waiters, old, new) {
if raceenabled { if race.Enabled {
raceEnable() race.Enable()
} }
runtime_Syncsemrelease(&c.sema, old-new) runtime_Syncsemrelease(&c.sema, old-new)
return return
......
...@@ -7,5 +7,3 @@ package sync ...@@ -7,5 +7,3 @@ package sync
// Export for testing. // Export for testing.
var Runtime_Semacquire = runtime_Semacquire var Runtime_Semacquire = runtime_Semacquire
var Runtime_Semrelease = runtime_Semrelease var Runtime_Semrelease = runtime_Semrelease
const RaceEnabled = raceenabled
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
package sync package sync
import ( import (
"internal/race"
"sync/atomic" "sync/atomic"
"unsafe" "unsafe"
) )
...@@ -41,8 +42,8 @@ const ( ...@@ -41,8 +42,8 @@ const (
func (m *Mutex) Lock() { func (m *Mutex) Lock() {
// Fast path: grab unlocked mutex. // Fast path: grab unlocked mutex.
if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) { if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
if raceenabled { if race.Enabled {
raceAcquire(unsafe.Pointer(m)) race.Acquire(unsafe.Pointer(m))
} }
return return
} }
...@@ -85,8 +86,8 @@ func (m *Mutex) Lock() { ...@@ -85,8 +86,8 @@ func (m *Mutex) Lock() {
} }
} }
if raceenabled { if race.Enabled {
raceAcquire(unsafe.Pointer(m)) race.Acquire(unsafe.Pointer(m))
} }
} }
...@@ -97,9 +98,9 @@ func (m *Mutex) Lock() { ...@@ -97,9 +98,9 @@ func (m *Mutex) Lock() {
// It is allowed for one goroutine to lock a Mutex and then // It is allowed for one goroutine to lock a Mutex and then
// arrange for another goroutine to unlock it. // arrange for another goroutine to unlock it.
func (m *Mutex) Unlock() { func (m *Mutex) Unlock() {
if raceenabled { if race.Enabled {
_ = m.state _ = m.state
raceRelease(unsafe.Pointer(m)) race.Release(unsafe.Pointer(m))
} }
// Fast path: drop lock bit. // Fast path: drop lock bit.
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
package sync package sync
import ( import (
"internal/race"
"runtime" "runtime"
"sync/atomic" "sync/atomic"
"unsafe" "unsafe"
...@@ -59,7 +60,7 @@ type poolLocal struct { ...@@ -59,7 +60,7 @@ type poolLocal struct {
// Put adds x to the pool. // Put adds x to the pool.
func (p *Pool) Put(x interface{}) { func (p *Pool) Put(x interface{}) {
if raceenabled { if race.Enabled {
// Under race detector the Pool degenerates into no-op. // Under race detector the Pool degenerates into no-op.
// It's conforming, simple and does not introduce excessive // It's conforming, simple and does not introduce excessive
// happens-before edges between unrelated goroutines. // happens-before edges between unrelated goroutines.
...@@ -91,7 +92,7 @@ func (p *Pool) Put(x interface{}) { ...@@ -91,7 +92,7 @@ func (p *Pool) Put(x interface{}) {
// If Get would otherwise return nil and p.New is non-nil, Get returns // If Get would otherwise return nil and p.New is non-nil, Get returns
// the result of calling p.New. // the result of calling p.New.
func (p *Pool) Get() interface{} { func (p *Pool) Get() interface{} {
if raceenabled { if race.Enabled {
if p.New != nil { if p.New != nil {
return p.New() return p.New()
} }
......
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !race
package sync
import (
"unsafe"
)
const raceenabled = false
func raceAcquire(addr unsafe.Pointer) {
}
func raceRelease(addr unsafe.Pointer) {
}
func raceReleaseMerge(addr unsafe.Pointer) {
}
func raceDisable() {
}
func raceEnable() {
}
func raceRead(addr unsafe.Pointer) {
}
func raceWrite(addr unsafe.Pointer) {
}
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
package sync package sync
import ( import (
"internal/race"
"sync/atomic" "sync/atomic"
"unsafe" "unsafe"
) )
...@@ -27,17 +28,17 @@ const rwmutexMaxReaders = 1 << 30 ...@@ -27,17 +28,17 @@ const rwmutexMaxReaders = 1 << 30
// RLock locks rw for reading. // RLock locks rw for reading.
func (rw *RWMutex) RLock() { func (rw *RWMutex) RLock() {
if raceenabled { if race.Enabled {
_ = rw.w.state _ = rw.w.state
raceDisable() race.Disable()
} }
if atomic.AddInt32(&rw.readerCount, 1) < 0 { if atomic.AddInt32(&rw.readerCount, 1) < 0 {
// A writer is pending, wait for it. // A writer is pending, wait for it.
runtime_Semacquire(&rw.readerSem) runtime_Semacquire(&rw.readerSem)
} }
if raceenabled { if race.Enabled {
raceEnable() race.Enable()
raceAcquire(unsafe.Pointer(&rw.readerSem)) race.Acquire(unsafe.Pointer(&rw.readerSem))
} }
} }
...@@ -46,14 +47,14 @@ func (rw *RWMutex) RLock() { ...@@ -46,14 +47,14 @@ func (rw *RWMutex) RLock() {
// It is a run-time error if rw is not locked for reading // It is a run-time error if rw is not locked for reading
// on entry to RUnlock. // on entry to RUnlock.
func (rw *RWMutex) RUnlock() { func (rw *RWMutex) RUnlock() {
if raceenabled { if race.Enabled {
_ = rw.w.state _ = rw.w.state
raceReleaseMerge(unsafe.Pointer(&rw.writerSem)) race.ReleaseMerge(unsafe.Pointer(&rw.writerSem))
raceDisable() race.Disable()
} }
if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 { if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
if r+1 == 0 || r+1 == -rwmutexMaxReaders { if r+1 == 0 || r+1 == -rwmutexMaxReaders {
raceEnable() race.Enable()
panic("sync: RUnlock of unlocked RWMutex") panic("sync: RUnlock of unlocked RWMutex")
} }
// A writer is pending. // A writer is pending.
...@@ -62,8 +63,8 @@ func (rw *RWMutex) RUnlock() { ...@@ -62,8 +63,8 @@ func (rw *RWMutex) RUnlock() {
runtime_Semrelease(&rw.writerSem) runtime_Semrelease(&rw.writerSem)
} }
} }
if raceenabled { if race.Enabled {
raceEnable() race.Enable()
} }
} }
...@@ -74,9 +75,9 @@ func (rw *RWMutex) RUnlock() { ...@@ -74,9 +75,9 @@ func (rw *RWMutex) RUnlock() {
// a blocked Lock call excludes new readers from acquiring // a blocked Lock call excludes new readers from acquiring
// the lock. // the lock.
func (rw *RWMutex) Lock() { func (rw *RWMutex) Lock() {
if raceenabled { if race.Enabled {
_ = rw.w.state _ = rw.w.state
raceDisable() race.Disable()
} }
// First, resolve competition with other writers. // First, resolve competition with other writers.
rw.w.Lock() rw.w.Lock()
...@@ -86,10 +87,10 @@ func (rw *RWMutex) Lock() { ...@@ -86,10 +87,10 @@ func (rw *RWMutex) Lock() {
if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 { if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
runtime_Semacquire(&rw.writerSem) runtime_Semacquire(&rw.writerSem)
} }
if raceenabled { if race.Enabled {
raceEnable() race.Enable()
raceAcquire(unsafe.Pointer(&rw.readerSem)) race.Acquire(unsafe.Pointer(&rw.readerSem))
raceAcquire(unsafe.Pointer(&rw.writerSem)) race.Acquire(unsafe.Pointer(&rw.writerSem))
} }
} }
...@@ -100,17 +101,17 @@ func (rw *RWMutex) Lock() { ...@@ -100,17 +101,17 @@ func (rw *RWMutex) Lock() {
// goroutine. One goroutine may RLock (Lock) an RWMutex and then // goroutine. One goroutine may RLock (Lock) an RWMutex and then
// arrange for another goroutine to RUnlock (Unlock) it. // arrange for another goroutine to RUnlock (Unlock) it.
func (rw *RWMutex) Unlock() { func (rw *RWMutex) Unlock() {
if raceenabled { if race.Enabled {
_ = rw.w.state _ = rw.w.state
raceRelease(unsafe.Pointer(&rw.readerSem)) race.Release(unsafe.Pointer(&rw.readerSem))
raceRelease(unsafe.Pointer(&rw.writerSem)) race.Release(unsafe.Pointer(&rw.writerSem))
raceDisable() race.Disable()
} }
// Announce to readers there is no active writer. // Announce to readers there is no active writer.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders) r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
if r >= rwmutexMaxReaders { if r >= rwmutexMaxReaders {
raceEnable() race.Enable()
panic("sync: Unlock of unlocked RWMutex") panic("sync: Unlock of unlocked RWMutex")
} }
// Unblock blocked readers, if any. // Unblock blocked readers, if any.
...@@ -119,8 +120,8 @@ func (rw *RWMutex) Unlock() { ...@@ -119,8 +120,8 @@ func (rw *RWMutex) Unlock() {
} }
// Allow other writers to proceed. // Allow other writers to proceed.
rw.w.Unlock() rw.w.Unlock()
if raceenabled { if race.Enabled {
raceEnable() race.Enable()
} }
} }
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
package sync package sync
import ( import (
"internal/race"
"sync/atomic" "sync/atomic"
"unsafe" "unsafe"
) )
...@@ -46,24 +47,24 @@ func (wg *WaitGroup) state() *uint64 { ...@@ -46,24 +47,24 @@ func (wg *WaitGroup) state() *uint64 {
// See the WaitGroup example. // See the WaitGroup example.
func (wg *WaitGroup) Add(delta int) { func (wg *WaitGroup) Add(delta int) {
statep := wg.state() statep := wg.state()
if raceenabled { if race.Enabled {
_ = *statep // trigger nil deref early _ = *statep // trigger nil deref early
if delta < 0 { if delta < 0 {
// Synchronize decrements with Wait. // Synchronize decrements with Wait.
raceReleaseMerge(unsafe.Pointer(wg)) race.ReleaseMerge(unsafe.Pointer(wg))
} }
raceDisable() race.Disable()
defer raceEnable() defer race.Enable()
} }
state := atomic.AddUint64(statep, uint64(delta)<<32) state := atomic.AddUint64(statep, uint64(delta)<<32)
v := int32(state >> 32) v := int32(state >> 32)
w := uint32(state) w := uint32(state)
if raceenabled { if race.Enabled {
if delta > 0 && v == int32(delta) { if delta > 0 && v == int32(delta) {
// The first increment must be synchronized with Wait. // The first increment must be synchronized with Wait.
// Need to model this as a read, because there can be // Need to model this as a read, because there can be
// several concurrent wg.counter transitions from 0. // several concurrent wg.counter transitions from 0.
raceRead(unsafe.Pointer(&wg.sema)) race.Read(unsafe.Pointer(&wg.sema))
} }
} }
if v < 0 { if v < 0 {
...@@ -98,9 +99,9 @@ func (wg *WaitGroup) Done() { ...@@ -98,9 +99,9 @@ func (wg *WaitGroup) Done() {
// Wait blocks until the WaitGroup counter is zero. // Wait blocks until the WaitGroup counter is zero.
func (wg *WaitGroup) Wait() { func (wg *WaitGroup) Wait() {
statep := wg.state() statep := wg.state()
if raceenabled { if race.Enabled {
_ = *statep // trigger nil deref early _ = *statep // trigger nil deref early
raceDisable() race.Disable()
} }
for { for {
state := atomic.LoadUint64(statep) state := atomic.LoadUint64(statep)
...@@ -108,28 +109,28 @@ func (wg *WaitGroup) Wait() { ...@@ -108,28 +109,28 @@ func (wg *WaitGroup) Wait() {
w := uint32(state) w := uint32(state)
if v == 0 { if v == 0 {
// Counter is 0, no need to wait. // Counter is 0, no need to wait.
if raceenabled { if race.Enabled {
raceEnable() race.Enable()
raceAcquire(unsafe.Pointer(wg)) race.Acquire(unsafe.Pointer(wg))
} }
return return
} }
// Increment waiters count. // Increment waiters count.
if atomic.CompareAndSwapUint64(statep, state, state+1) { if atomic.CompareAndSwapUint64(statep, state, state+1) {
if raceenabled && w == 0 { if race.Enabled && w == 0 {
// Wait must be synchronized with the first Add. // Wait must be synchronized with the first Add.
// Need to model this is as a write to race with the read in Add. // Need to model this is as a write to race with the read in Add.
// As a consequence, can do the write only for the first waiter, // As a consequence, can do the write only for the first waiter,
// otherwise concurrent Waits will race with each other. // otherwise concurrent Waits will race with each other.
raceWrite(unsafe.Pointer(&wg.sema)) race.Write(unsafe.Pointer(&wg.sema))
} }
runtime_Semacquire(&wg.sema) runtime_Semacquire(&wg.sema)
if *statep != 0 { if *statep != 0 {
panic("sync: WaitGroup is reused before previous Wait has returned") panic("sync: WaitGroup is reused before previous Wait has returned")
} }
if raceenabled { if race.Enabled {
raceEnable() race.Enable()
raceAcquire(unsafe.Pointer(wg)) race.Acquire(unsafe.Pointer(wg))
} }
return return
} }
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
package sync_test package sync_test
import ( import (
"internal/race"
"runtime" "runtime"
. "sync" . "sync"
"sync/atomic" "sync/atomic"
...@@ -48,7 +49,7 @@ func TestWaitGroup(t *testing.T) { ...@@ -48,7 +49,7 @@ func TestWaitGroup(t *testing.T) {
} }
func knownRacy(t *testing.T) { func knownRacy(t *testing.T) {
if RaceEnabled { if race.Enabled {
t.Skip("skipping known-racy test under the race detector") t.Skip("skipping known-racy test under the race detector")
} }
} }
......
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build race
package syscall
import (
"runtime"
"unsafe"
)
const raceenabled = true
func raceAcquire(addr unsafe.Pointer) {
runtime.RaceAcquire(addr)
}
func raceReleaseMerge(addr unsafe.Pointer) {
runtime.RaceReleaseMerge(addr)
}
func raceReadRange(addr unsafe.Pointer, len int) {
runtime.RaceReadRange(addr, len)
}
func raceWriteRange(addr unsafe.Pointer, len int) {
runtime.RaceWriteRange(addr, len)
}
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !race
package syscall
import (
"unsafe"
)
const raceenabled = false
func raceAcquire(addr unsafe.Pointer) {
}
func raceReleaseMerge(addr unsafe.Pointer) {
}
func raceReadRange(addr unsafe.Pointer, len int) {
}
func raceWriteRange(addr unsafe.Pointer, len int) {
}
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
package syscall package syscall
import ( import (
"internal/race"
"runtime" "runtime"
"sync" "sync"
"unsafe" "unsafe"
...@@ -158,12 +159,12 @@ func (s Signal) String() string { ...@@ -158,12 +159,12 @@ func (s Signal) String() string {
func Read(fd int, p []byte) (n int, err error) { func Read(fd int, p []byte) (n int, err error) {
n, err = read(fd, p) n, err = read(fd, p)
if raceenabled { if race.Enabled {
if n > 0 { if n > 0 {
raceWriteRange(unsafe.Pointer(&p[0]), n) race.WriteRange(unsafe.Pointer(&p[0]), n)
} }
if err == nil { if err == nil {
raceAcquire(unsafe.Pointer(&ioSync)) race.Acquire(unsafe.Pointer(&ioSync))
} }
} }
if msanenabled && n > 0 { if msanenabled && n > 0 {
...@@ -173,12 +174,12 @@ func Read(fd int, p []byte) (n int, err error) { ...@@ -173,12 +174,12 @@ func Read(fd int, p []byte) (n int, err error) {
} }
func Write(fd int, p []byte) (n int, err error) { func Write(fd int, p []byte) (n int, err error) {
if raceenabled { if race.Enabled {
raceReleaseMerge(unsafe.Pointer(&ioSync)) race.ReleaseMerge(unsafe.Pointer(&ioSync))
} }
n, err = write(fd, p) n, err = write(fd, p)
if raceenabled && n > 0 { if race.Enabled && n > 0 {
raceReadRange(unsafe.Pointer(&p[0]), n) race.ReadRange(unsafe.Pointer(&p[0]), n)
} }
if msanenabled && n > 0 { if msanenabled && n > 0 {
msanRead(unsafe.Pointer(&p[0]), n) msanRead(unsafe.Pointer(&p[0]), n)
...@@ -320,8 +321,8 @@ func Socketpair(domain, typ, proto int) (fd [2]int, err error) { ...@@ -320,8 +321,8 @@ func Socketpair(domain, typ, proto int) (fd [2]int, err error) {
} }
func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
if raceenabled { if race.Enabled {
raceReleaseMerge(unsafe.Pointer(&ioSync)) race.ReleaseMerge(unsafe.Pointer(&ioSync))
} }
return sendfile(outfd, infd, offset, count) return sendfile(outfd, infd, offset, count)
} }
......
...@@ -8,6 +8,7 @@ package syscall ...@@ -8,6 +8,7 @@ package syscall
import ( import (
errorspkg "errors" errorspkg "errors"
"internal/race"
"sync" "sync"
"unicode/utf16" "unicode/utf16"
"unsafe" "unsafe"
...@@ -304,11 +305,11 @@ func Read(fd Handle, p []byte) (n int, err error) { ...@@ -304,11 +305,11 @@ func Read(fd Handle, p []byte) (n int, err error) {
} }
return 0, e return 0, e
} }
if raceenabled { if race.Enabled {
if done > 0 { if done > 0 {
raceWriteRange(unsafe.Pointer(&p[0]), int(done)) race.WriteRange(unsafe.Pointer(&p[0]), int(done))
} }
raceAcquire(unsafe.Pointer(&ioSync)) race.Acquire(unsafe.Pointer(&ioSync))
} }
if msanenabled && done > 0 { if msanenabled && done > 0 {
msanWrite(unsafe.Pointer(&p[0]), int(done)) msanWrite(unsafe.Pointer(&p[0]), int(done))
...@@ -317,16 +318,16 @@ func Read(fd Handle, p []byte) (n int, err error) { ...@@ -317,16 +318,16 @@ func Read(fd Handle, p []byte) (n int, err error) {
} }
func Write(fd Handle, p []byte) (n int, err error) { func Write(fd Handle, p []byte) (n int, err error) {
if raceenabled { if race.Enabled {
raceReleaseMerge(unsafe.Pointer(&ioSync)) race.ReleaseMerge(unsafe.Pointer(&ioSync))
} }
var done uint32 var done uint32
e := WriteFile(fd, p, &done, nil) e := WriteFile(fd, p, &done, nil)
if e != nil { if e != nil {
return 0, e return 0, e
} }
if raceenabled && done > 0 { if race.Enabled && done > 0 {
raceReadRange(unsafe.Pointer(&p[0]), int(done)) race.ReadRange(unsafe.Pointer(&p[0]), int(done))
} }
if msanenabled && done > 0 { if msanenabled && done > 0 {
msanRead(unsafe.Pointer(&p[0]), int(done)) msanRead(unsafe.Pointer(&p[0]), int(done))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment