summaryrefslogtreecommitdiff
path: root/libgo/go/sync
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2017-01-14 00:05:42 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2017-01-14 00:05:42 +0000
commitc2047754c300b68c05d65faa8dc2925fe67b71b4 (patch)
treee183ae81a1f48a02945cb6de463a70c5be1b06f6 /libgo/go/sync
parent829afb8f05602bb31c9c597b24df7377fed4f059 (diff)
libgo: update to Go 1.8 release candidate 1
Compiler changes: * Change map assignment to use mapassign and assign value directly. * Change string iteration to use decoderune, faster for ASCII strings. * Change makeslice to take int, and use makeslice64 for larger values. * Add new noverflow field to hmap struct used for maps. Unresolved problems, to be fixed later: * Commented out test in go/types/sizes_test.go that doesn't compile. * Commented out reflect.TestStructOf test for padding after zero-sized field. Reviewed-on: https://go-review.googlesource.com/35231 gotools/: Updates for Go 1.8rc1. * Makefile.am (go_cmd_go_files): Add bug.go. (s-zdefaultcc): Write defaultPkgConfig. * Makefile.in: Rebuild. From-SVN: r244456
Diffstat (limited to 'libgo/go/sync')
-rw-r--r--libgo/go/sync/atomic/atomic.c20
-rw-r--r--libgo/go/sync/atomic/atomic_test.go31
-rw-r--r--libgo/go/sync/cond_test.go4
-rw-r--r--libgo/go/sync/example_pool_test.go45
-rw-r--r--libgo/go/sync/mutex.go8
-rw-r--r--libgo/go/sync/mutex_test.go108
-rw-r--r--libgo/go/sync/pool.go81
-rw-r--r--libgo/go/sync/pool_test.go3
-rw-r--r--libgo/go/sync/runtime.go3
-rw-r--r--libgo/go/sync/rwmutex.go4
-rw-r--r--libgo/go/sync/rwmutex_test.go42
11 files changed, 250 insertions, 99 deletions
diff --git a/libgo/go/sync/atomic/atomic.c b/libgo/go/sync/atomic/atomic.c
index f0ba57b3cca..7e04027c3f1 100644
--- a/libgo/go/sync/atomic/atomic.c
+++ b/libgo/go/sync/atomic/atomic.c
@@ -25,6 +25,8 @@ int64_t SwapInt64 (int64_t *, int64_t)
int64_t
SwapInt64 (int64_t *addr, int64_t new)
{
+ if (((uintptr_t) addr & 7) != 0)
+ addr = NULL;
return __atomic_exchange_n (addr, new, __ATOMIC_SEQ_CST);
}
@@ -45,6 +47,8 @@ uint64_t SwapUint64 (uint64_t *, uint64_t)
uint64_t
SwapUint64 (uint64_t *addr, uint64_t new)
{
+ if (((uintptr_t) addr & 7) != 0)
+ addr = NULL;
return __atomic_exchange_n (addr, new, __ATOMIC_SEQ_CST);
}
@@ -85,6 +89,8 @@ _Bool CompareAndSwapInt64 (int64_t *, int64_t, int64_t)
_Bool
CompareAndSwapInt64 (int64_t *val, int64_t old, int64_t new)
{
+ if (((uintptr_t) val & 7) != 0)
+ val = NULL;
return __sync_bool_compare_and_swap (val, old, new);
}
@@ -105,6 +111,8 @@ _Bool CompareAndSwapUint64 (uint64_t *, uint64_t, uint64_t)
_Bool
CompareAndSwapUint64 (uint64_t *val, uint64_t old, uint64_t new)
{
+ if (((uintptr_t) val & 7) != 0)
+ val = NULL;
return __sync_bool_compare_and_swap (val, old, new);
}
@@ -155,6 +163,8 @@ int64_t AddInt64 (int64_t *, int64_t)
int64_t
AddInt64 (int64_t *val, int64_t delta)
{
+ if (((uintptr_t) val & 7) != 0)
+ val = NULL;
return __sync_add_and_fetch (val, delta);
}
@@ -165,6 +175,8 @@ uint64_t AddUint64 (uint64_t *, uint64_t)
uint64_t
AddUint64 (uint64_t *val, uint64_t delta)
{
+ if (((uintptr_t) val & 7) != 0)
+ val = NULL;
return __sync_add_and_fetch (val, delta);
}
@@ -202,6 +214,8 @@ LoadInt64 (int64_t *addr)
{
int64_t v;
+ if (((uintptr_t) addr & 7) != 0)
+ addr = NULL;
v = *addr;
while (! __sync_bool_compare_and_swap (addr, v, v))
v = *addr;
@@ -232,6 +246,8 @@ LoadUint64 (uint64_t *addr)
{
uint64_t v;
+ if (((uintptr_t) addr & 7) != 0)
+ addr = NULL;
v = *addr;
while (! __sync_bool_compare_and_swap (addr, v, v))
v = *addr;
@@ -291,6 +307,8 @@ StoreInt64 (int64_t *addr, int64_t val)
{
int64_t v;
+ if (((uintptr_t) addr & 7) != 0)
+ addr = NULL;
v = *addr;
while (! __sync_bool_compare_and_swap (addr, v, val))
v = *addr;
@@ -319,6 +337,8 @@ StoreUint64 (uint64_t *addr, uint64_t val)
{
uint64_t v;
+ if (((uintptr_t) addr & 7) != 0)
+ addr = NULL;
v = *addr;
while (! __sync_bool_compare_and_swap (addr, v, val))
v = *addr;
diff --git a/libgo/go/sync/atomic/atomic_test.go b/libgo/go/sync/atomic/atomic_test.go
index 5a33d7fac0d..6d0831c3f9d 100644
--- a/libgo/go/sync/atomic/atomic_test.go
+++ b/libgo/go/sync/atomic/atomic_test.go
@@ -1226,10 +1226,12 @@ func TestStoreLoadSeqCst32(t *testing.T) {
}
his := LoadInt32(&ack[he][i%3])
if (my != i && my != i-1) || (his != i && his != i-1) {
- t.Fatalf("invalid values: %d/%d (%d)", my, his, i)
+ t.Errorf("invalid values: %d/%d (%d)", my, his, i)
+ break
}
if my != i && his != i {
- t.Fatalf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
+ t.Errorf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
+ break
}
StoreInt32(&ack[me][(i-1)%3], -1)
}
@@ -1269,10 +1271,12 @@ func TestStoreLoadSeqCst64(t *testing.T) {
}
his := LoadInt64(&ack[he][i%3])
if (my != i && my != i-1) || (his != i && his != i-1) {
- t.Fatalf("invalid values: %d/%d (%d)", my, his, i)
+ t.Errorf("invalid values: %d/%d (%d)", my, his, i)
+ break
}
if my != i && his != i {
- t.Fatalf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
+ t.Errorf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
+ break
}
StoreInt64(&ack[me][(i-1)%3], -1)
}
@@ -1317,7 +1321,8 @@ func TestStoreLoadRelAcq32(t *testing.T) {
d1 := X.data1
d2 := X.data2
if d1 != i || d2 != float32(i) {
- t.Fatalf("incorrect data: %d/%g (%d)", d1, d2, i)
+ t.Errorf("incorrect data: %d/%g (%d)", d1, d2, i)
+ break
}
}
}
@@ -1365,7 +1370,8 @@ func TestStoreLoadRelAcq64(t *testing.T) {
d1 := X.data1
d2 := X.data2
if d1 != i || d2 != float64(i) {
- t.Fatalf("incorrect data: %d/%g (%d)", d1, d2, i)
+ t.Errorf("incorrect data: %d/%g (%d)", d1, d2, i)
+ break
}
}
}
@@ -1389,11 +1395,16 @@ func TestUnaligned64(t *testing.T) {
// Unaligned 64-bit atomics on 32-bit systems are
// a continual source of pain. Test that on 32-bit systems they crash
// instead of failing silently.
- if unsafe.Sizeof(int(0)) != 4 {
- t.Skip("test only runs on 32-bit systems")
- }
- t.Skip("skipping test for gccgo")
+ switch runtime.GOARCH {
+ default:
+ if unsafe.Sizeof(int(0)) != 4 {
+ t.Skip("test only runs on 32-bit systems")
+ }
+ case "amd64p32":
+ // amd64p32 can handle unaligned atomics.
+ t.Skipf("test not needed on %v", runtime.GOARCH)
+ }
x := make([]uint32, 4)
p := (*uint64)(unsafe.Pointer(&x[1])) // misaligned
diff --git a/libgo/go/sync/cond_test.go b/libgo/go/sync/cond_test.go
index 7b0729571ce..9019f8f1028 100644
--- a/libgo/go/sync/cond_test.go
+++ b/libgo/go/sync/cond_test.go
@@ -137,7 +137,7 @@ func TestRace(t *testing.T) {
x = 1
c.Wait()
if x != 2 {
- t.Fatal("want 2")
+ t.Error("want 2")
}
x = 3
c.Signal()
@@ -165,7 +165,7 @@ func TestRace(t *testing.T) {
if x == 2 {
c.Wait()
if x != 3 {
- t.Fatal("want 3")
+ t.Error("want 3")
}
break
}
diff --git a/libgo/go/sync/example_pool_test.go b/libgo/go/sync/example_pool_test.go
new file mode 100644
index 00000000000..8288d41e8c0
--- /dev/null
+++ b/libgo/go/sync/example_pool_test.go
@@ -0,0 +1,45 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "sync"
+ "time"
+)
+
+var bufPool = sync.Pool{
+ New: func() interface{} {
+ // The Pool's New function should generally only return pointer
+ // types, since a pointer can be put into the return interface
+ // value without an allocation:
+ return new(bytes.Buffer)
+ },
+}
+
+// timeNow is a fake version of time.Now for tests.
+func timeNow() time.Time {
+ return time.Unix(1136214245, 0)
+}
+
+func Log(w io.Writer, key, val string) {
+ b := bufPool.Get().(*bytes.Buffer)
+ b.Reset()
+ // Replace this with time.Now() in a real logger.
+ b.WriteString(timeNow().UTC().Format(time.RFC3339))
+ b.WriteByte(' ')
+ b.WriteString(key)
+ b.WriteByte('=')
+ b.WriteString(val)
+ w.Write(b.Bytes())
+ bufPool.Put(b)
+}
+
+func ExamplePool() {
+ Log(os.Stdout, "path", "/search?q=flowers")
+ // Output: 2006-01-02T15:04:05Z path=/search?q=flowers
+}
diff --git a/libgo/go/sync/mutex.go b/libgo/go/sync/mutex.go
index 90892793f0a..8c9366f4fe1 100644
--- a/libgo/go/sync/mutex.go
+++ b/libgo/go/sync/mutex.go
@@ -16,6 +16,8 @@ import (
"unsafe"
)
+func throw(string) // provided by runtime
+
// A Mutex is a mutual exclusion lock.
// Mutexes can be created as part of other structures;
// the zero value for a Mutex is an unlocked mutex.
@@ -74,7 +76,7 @@ func (m *Mutex) Lock() {
// The goroutine has been woken from sleep,
// so we need to reset the flag in either case.
if new&mutexWoken == 0 {
- panic("sync: inconsistent mutex state")
+ throw("sync: inconsistent mutex state")
}
new &^= mutexWoken
}
@@ -82,7 +84,7 @@ func (m *Mutex) Lock() {
if old&mutexLocked == 0 {
break
}
- runtime_Semacquire(&m.sema)
+ runtime_SemacquireMutex(&m.sema)
awoke = true
iter = 0
}
@@ -108,7 +110,7 @@ func (m *Mutex) Unlock() {
// Fast path: drop lock bit.
new := atomic.AddInt32(&m.state, -mutexLocked)
if (new+mutexLocked)&mutexLocked == 0 {
- panic("sync: unlock of unlocked mutex")
+ throw("sync: unlock of unlocked mutex")
}
old := new
diff --git a/libgo/go/sync/mutex_test.go b/libgo/go/sync/mutex_test.go
index 91a4855cb1f..88dbccf3add 100644
--- a/libgo/go/sync/mutex_test.go
+++ b/libgo/go/sync/mutex_test.go
@@ -7,7 +7,12 @@
package sync_test
import (
+ "fmt"
+ "internal/testenv"
+ "os"
+ "os/exec"
"runtime"
+ "strings"
. "sync"
"testing"
)
@@ -61,6 +66,10 @@ func HammerMutex(m *Mutex, loops int, cdone chan bool) {
}
func TestMutex(t *testing.T) {
+ if n := runtime.SetMutexProfileFraction(1); n != 0 {
+ t.Logf("got mutexrate %d expected 0", n)
+ }
+ defer runtime.SetMutexProfileFraction(0)
m := new(Mutex)
c := make(chan bool)
for i := 0; i < 10; i++ {
@@ -71,17 +80,98 @@ func TestMutex(t *testing.T) {
}
}
-func TestMutexPanic(t *testing.T) {
- defer func() {
- if recover() == nil {
- t.Fatalf("unlock of unlocked mutex did not panic")
+var misuseTests = []struct {
+ name string
+ f func()
+}{
+ {
+ "Mutex.Unlock",
+ func() {
+ var mu Mutex
+ mu.Unlock()
+ },
+ },
+ {
+ "Mutex.Unlock2",
+ func() {
+ var mu Mutex
+ mu.Lock()
+ mu.Unlock()
+ mu.Unlock()
+ },
+ },
+ {
+ "RWMutex.Unlock",
+ func() {
+ var mu RWMutex
+ mu.Unlock()
+ },
+ },
+ {
+ "RWMutex.Unlock2",
+ func() {
+ var mu RWMutex
+ mu.RLock()
+ mu.Unlock()
+ },
+ },
+ {
+ "RWMutex.Unlock3",
+ func() {
+ var mu RWMutex
+ mu.Lock()
+ mu.Unlock()
+ mu.Unlock()
+ },
+ },
+ {
+ "RWMutex.RUnlock",
+ func() {
+ var mu RWMutex
+ mu.RUnlock()
+ },
+ },
+ {
+ "RWMutex.RUnlock2",
+ func() {
+ var mu RWMutex
+ mu.Lock()
+ mu.RUnlock()
+ },
+ },
+ {
+ "RWMutex.RUnlock3",
+ func() {
+ var mu RWMutex
+ mu.RLock()
+ mu.RUnlock()
+ mu.RUnlock()
+ },
+ },
+}
+
+func init() {
+ if len(os.Args) == 3 && os.Args[1] == "TESTMISUSE" {
+ for _, test := range misuseTests {
+ if test.name == os.Args[2] {
+ test.f()
+ fmt.Printf("test completed\n")
+ os.Exit(0)
+ }
}
- }()
+ fmt.Printf("unknown test\n")
+ os.Exit(0)
+ }
+}
- var mu Mutex
- mu.Lock()
- mu.Unlock()
- mu.Unlock()
+func TestMutexMisuse(t *testing.T) {
+ testenv.MustHaveExec(t)
+ for _, test := range misuseTests {
+ out, err := exec.Command(os.Args[0], "TESTMISUSE", test.name).CombinedOutput()
+ if err == nil || !strings.Contains(string(out), "unlocked") {
+ t.Errorf("%s: did not find failure with message about unlocked lock: %s\n%s\n", test.name, err, out)
+ }
+ }
}
func BenchmarkMutexUncontended(b *testing.B) {
diff --git a/libgo/go/sync/pool.go b/libgo/go/sync/pool.go
index bf29d88c5cb..0acdbde096f 100644
--- a/libgo/go/sync/pool.go
+++ b/libgo/go/sync/pool.go
@@ -61,29 +61,49 @@ type poolLocal struct {
pad [128]byte // Prevents false sharing.
}
+// from runtime
+func fastrand() uint32
+
+var poolRaceHash [128]uint64
+
+// poolRaceAddr returns an address to use as the synchronization point
+// for race detector logic. We don't use the actual pointer stored in x
+// directly, for fear of conflicting with other synchronization on that address.
+// Instead, we hash the pointer to get an index into poolRaceHash.
+// See discussion on golang.org/cl/31589.
+func poolRaceAddr(x interface{}) unsafe.Pointer {
+ ptr := uintptr((*[2]unsafe.Pointer)(unsafe.Pointer(&x))[1])
+ h := uint32((uint64(uint32(ptr)) * 0x85ebca6b) >> 16)
+ return unsafe.Pointer(&poolRaceHash[h%uint32(len(poolRaceHash))])
+}
+
// Put adds x to the pool.
func (p *Pool) Put(x interface{}) {
- if race.Enabled {
- // Under race detector the Pool degenerates into no-op.
- // It's conforming, simple and does not introduce excessive
- // happens-before edges between unrelated goroutines.
- return
- }
if x == nil {
return
}
+ if race.Enabled {
+ if fastrand()%4 == 0 {
+ // Randomly drop x on floor.
+ return
+ }
+ race.ReleaseMerge(poolRaceAddr(x))
+ race.Disable()
+ }
l := p.pin()
if l.private == nil {
l.private = x
x = nil
}
runtime_procUnpin()
- if x == nil {
- return
+ if x != nil {
+ l.Lock()
+ l.shared = append(l.shared, x)
+ l.Unlock()
+ }
+ if race.Enabled {
+ race.Enable()
}
- l.Lock()
- l.shared = append(l.shared, x)
- l.Unlock()
}
// Get selects an arbitrary item from the Pool, removes it from the
@@ -96,29 +116,34 @@ func (p *Pool) Put(x interface{}) {
// the result of calling p.New.
func (p *Pool) Get() interface{} {
if race.Enabled {
- if p.New != nil {
- return p.New()
- }
- return nil
+ race.Disable()
}
l := p.pin()
x := l.private
l.private = nil
runtime_procUnpin()
- if x != nil {
- return x
+ if x == nil {
+ l.Lock()
+ last := len(l.shared) - 1
+ if last >= 0 {
+ x = l.shared[last]
+ l.shared = l.shared[:last]
+ }
+ l.Unlock()
+ if x == nil {
+ x = p.getSlow()
+ }
}
- l.Lock()
- last := len(l.shared) - 1
- if last >= 0 {
- x = l.shared[last]
- l.shared = l.shared[:last]
+ if race.Enabled {
+ race.Enable()
+ if x != nil {
+ race.Acquire(poolRaceAddr(x))
+ }
}
- l.Unlock()
- if x != nil {
- return x
+ if x == nil && p.New != nil {
+ x = p.New()
}
- return p.getSlow()
+ return x
}
func (p *Pool) getSlow() (x interface{}) {
@@ -140,10 +165,6 @@ func (p *Pool) getSlow() (x interface{}) {
}
l.Unlock()
}
-
- if x == nil && p.New != nil {
- x = p.New()
- }
return x
}
diff --git a/libgo/go/sync/pool_test.go b/libgo/go/sync/pool_test.go
index 051bb175338..f92e181a6b1 100644
--- a/libgo/go/sync/pool_test.go
+++ b/libgo/go/sync/pool_test.go
@@ -128,7 +128,8 @@ func TestPoolStress(t *testing.T) {
p.Put(v)
v = p.Get()
if v != nil && v.(int) != 0 {
- t.Fatalf("expect 0, got %v", v)
+ t.Errorf("expect 0, got %v", v)
+ break
}
}
done <- true
diff --git a/libgo/go/sync/runtime.go b/libgo/go/sync/runtime.go
index 96c56c85224..4d22ce6b0da 100644
--- a/libgo/go/sync/runtime.go
+++ b/libgo/go/sync/runtime.go
@@ -13,6 +13,9 @@ import "unsafe"
// library and should not be used directly.
func runtime_Semacquire(s *uint32)
+// SemacquireMutex is like Semacquire, but for profiling contended Mutexes.
+func runtime_SemacquireMutex(*uint32)
+
// Semrelease atomically increments *s and notifies a waiting goroutine
// if one is blocked in Semacquire.
// It is intended as a simple wakeup primitive for use by the synchronization
diff --git a/libgo/go/sync/rwmutex.go b/libgo/go/sync/rwmutex.go
index 6734360e37a..71064eeeba3 100644
--- a/libgo/go/sync/rwmutex.go
+++ b/libgo/go/sync/rwmutex.go
@@ -61,7 +61,7 @@ func (rw *RWMutex) RUnlock() {
if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
if r+1 == 0 || r+1 == -rwmutexMaxReaders {
race.Enable()
- panic("sync: RUnlock of unlocked RWMutex")
+ throw("sync: RUnlock of unlocked RWMutex")
}
// A writer is pending.
if atomic.AddInt32(&rw.readerWait, -1) == 0 {
@@ -115,7 +115,7 @@ func (rw *RWMutex) Unlock() {
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
if r >= rwmutexMaxReaders {
race.Enable()
- panic("sync: Unlock of unlocked RWMutex")
+ throw("sync: Unlock of unlocked RWMutex")
}
// Unblock blocked readers, if any.
for i := 0; i < int(r); i++ {
diff --git a/libgo/go/sync/rwmutex_test.go b/libgo/go/sync/rwmutex_test.go
index f625bc3a585..0436f97239c 100644
--- a/libgo/go/sync/rwmutex_test.go
+++ b/libgo/go/sync/rwmutex_test.go
@@ -155,48 +155,6 @@ func TestRLocker(t *testing.T) {
}
}
-func TestUnlockPanic(t *testing.T) {
- defer func() {
- if recover() == nil {
- t.Fatalf("unlock of unlocked RWMutex did not panic")
- }
- }()
- var mu RWMutex
- mu.Unlock()
-}
-
-func TestUnlockPanic2(t *testing.T) {
- defer func() {
- if recover() == nil {
- t.Fatalf("unlock of unlocked RWMutex did not panic")
- }
- }()
- var mu RWMutex
- mu.RLock()
- mu.Unlock()
-}
-
-func TestRUnlockPanic(t *testing.T) {
- defer func() {
- if recover() == nil {
- t.Fatalf("read unlock of unlocked RWMutex did not panic")
- }
- }()
- var mu RWMutex
- mu.RUnlock()
-}
-
-func TestRUnlockPanic2(t *testing.T) {
- defer func() {
- if recover() == nil {
- t.Fatalf("read unlock of unlocked RWMutex did not panic")
- }
- }()
- var mu RWMutex
- mu.Lock()
- mu.RUnlock()
-}
-
func BenchmarkRWMutexUncontended(b *testing.B) {
type PaddedRWMutex struct {
RWMutex