diff options
author | Ian Lance Taylor <iant@golang.org> | 2020-01-02 15:05:27 -0800 |
---|---|---|
committer | Ian Lance Taylor <iant@golang.org> | 2020-01-21 23:53:22 -0800 |
commit | 5a8ea165926cb0737ab03bc48c18dc5198ab5305 (patch) | |
tree | 962dc3357c57f019f85658f99e2e753e30201c27 /libgo/go/runtime/internal | |
parent | 6ac6529e155c9baa0aaaed7aca06bd38ebda5b43 (diff) |
libgo: update to Go1.14beta1
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/214297
Diffstat (limited to 'libgo/go/runtime/internal')
-rw-r--r-- | libgo/go/runtime/internal/atomic/atomic.c | 20 | ||||
-rw-r--r-- | libgo/go/runtime/internal/atomic/atomic_test.go | 127 | ||||
-rw-r--r-- | libgo/go/runtime/internal/atomic/bench_test.go | 40 | ||||
-rw-r--r-- | libgo/go/runtime/internal/atomic/gccgo.go | 6 | ||||
-rw-r--r-- | libgo/go/runtime/internal/sys/intrinsics.go | 19 | ||||
-rw-r--r-- | libgo/go/runtime/internal/sys/intrinsics_common.go | 143 |
6 files changed, 328 insertions, 27 deletions
diff --git a/libgo/go/runtime/internal/atomic/atomic.c b/libgo/go/runtime/internal/atomic/atomic.c index 17c83a28c1c..8ae4d7b619d 100644 --- a/libgo/go/runtime/internal/atomic/atomic.c +++ b/libgo/go/runtime/internal/atomic/atomic.c @@ -26,6 +26,16 @@ Loadp (void *ptr) return __atomic_load_n ((void **) ptr, __ATOMIC_SEQ_CST); } +uint8_t Load8 (uint8_t *ptr) + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Load8") + __attribute__ ((no_split_stack)); + +uint8_t +Load8 (uint8_t *ptr) +{ + return __atomic_load_n (ptr, __ATOMIC_SEQ_CST); +} + uint64_t Load64 (uint64_t *ptr) __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Load64") __attribute__ ((no_split_stack)); @@ -238,6 +248,16 @@ Store (uint32_t *ptr, uint32_t val) __atomic_store_n (ptr, val, __ATOMIC_SEQ_CST); } +void Store8 (uint8_t *ptr, uint8_t val) + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Store8") + __attribute__ ((no_split_stack)); + +void +Store8 (uint8_t *ptr, uint8_t val) +{ + __atomic_store_n (ptr, val, __ATOMIC_SEQ_CST); +} + void Store64 (uint64_t *ptr, uint64_t val) __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Store64") __attribute__ ((no_split_stack)); diff --git a/libgo/go/runtime/internal/atomic/atomic_test.go b/libgo/go/runtime/internal/atomic/atomic_test.go index 0ba75447e8c..0c1125c5583 100644 --- a/libgo/go/runtime/internal/atomic/atomic_test.go +++ b/libgo/go/runtime/internal/atomic/atomic_test.go @@ -86,14 +86,8 @@ func TestUnaligned64(t *testing.T) { // a continual source of pain. Test that on 32-bit systems they crash // instead of failing silently. - switch runtime.GOARCH { - default: - if unsafe.Sizeof(int(0)) != 4 { - t.Skip("test only runs on 32-bit systems") - } - case "amd64p32": - // amd64p32 can handle unaligned atomics. - t.Skipf("test not needed on %v", runtime.GOARCH) + if unsafe.Sizeof(int(0)) != 4 { + t.Skip("test only runs on 32-bit systems") } x := make([]uint32, 4) @@ -109,3 +103,120 @@ func TestUnaligned64(t *testing.T) { shouldPanic(t, "Xchg64", func() { atomic.Xchg64(up64, 1) }) shouldPanic(t, "Cas64", func() { atomic.Cas64(up64, 1, 2) }) } + +func TestAnd8(t *testing.T) { + // Basic sanity check. + x := uint8(0xff) + for i := uint8(0); i < 8; i++ { + atomic.And8(&x, ^(1 << i)) + if r := uint8(0xff) << (i + 1); x != r { + t.Fatalf("clearing bit %#x: want %#x, got %#x", uint8(1<<i), r, x) + } + } + + // Set every bit in array to 1. + a := make([]uint8, 1<<12) + for i := range a { + a[i] = 0xff + } + + // Clear array bit-by-bit in different goroutines. + done := make(chan bool) + for i := 0; i < 8; i++ { + m := ^uint8(1 << i) + go func() { + for i := range a { + atomic.And8(&a[i], m) + } + done <- true + }() + } + for i := 0; i < 8; i++ { + <-done + } + + // Check that the array has been totally cleared. + for i, v := range a { + if v != 0 { + t.Fatalf("a[%v] not cleared: want %#x, got %#x", i, uint8(0), v) + } + } +} + +func TestOr8(t *testing.T) { + // Basic sanity check. + x := uint8(0) + for i := uint8(0); i < 8; i++ { + atomic.Or8(&x, 1<<i) + if r := (uint8(1) << (i + 1)) - 1; x != r { + t.Fatalf("setting bit %#x: want %#x, got %#x", uint8(1)<<i, r, x) + } + } + + // Start with every bit in array set to 0. + a := make([]uint8, 1<<12) + + // Set every bit in array bit-by-bit in different goroutines. + done := make(chan bool) + for i := 0; i < 8; i++ { + m := uint8(1 << i) + go func() { + for i := range a { + atomic.Or8(&a[i], m) + } + done <- true + }() + } + for i := 0; i < 8; i++ { + <-done + } + + // Check that the array has been totally set. + for i, v := range a { + if v != 0xff { + t.Fatalf("a[%v] not fully set: want %#x, got %#x", i, uint8(0xff), v) + } + } +} + +func TestBitwiseContended(t *testing.T) { + // Start with every bit in array set to 0. + a := make([]uint8, 16) + + // Iterations to try. + N := 1 << 16 + if testing.Short() { + N = 1 << 10 + } + + // Set and then clear every bit in the array bit-by-bit in different goroutines. + done := make(chan bool) + for i := 0; i < 8; i++ { + m := uint8(1 << i) + go func() { + for n := 0; n < N; n++ { + for i := range a { + atomic.Or8(&a[i], m) + if atomic.Load8(&a[i])&m != m { + t.Errorf("a[%v] bit %#x not set", i, m) + } + atomic.And8(&a[i], ^m) + if atomic.Load8(&a[i])&m != 0 { + t.Errorf("a[%v] bit %#x not clear", i, m) + } + } + } + done <- true + }() + } + for i := 0; i < 8; i++ { + <-done + } + + // Check that the array has been totally cleared. + for i, v := range a { + if v != 0 { + t.Fatalf("a[%v] not cleared: want %#x, got %#x", i, uint8(0), v) + } + } +} diff --git a/libgo/go/runtime/internal/atomic/bench_test.go b/libgo/go/runtime/internal/atomic/bench_test.go index 083a75cb075..de71b0f2c7b 100644 --- a/libgo/go/runtime/internal/atomic/bench_test.go +++ b/libgo/go/runtime/internal/atomic/bench_test.go @@ -43,6 +43,46 @@ func BenchmarkAtomicStore(b *testing.B) { } } +func BenchmarkAnd8(b *testing.B) { + var x [512]uint8 // give byte its own cache line + sink = &x + for i := 0; i < b.N; i++ { + atomic.And8(&x[255], uint8(i)) + } +} + +func BenchmarkAnd8Parallel(b *testing.B) { + var x [512]uint8 // give byte its own cache line + sink = &x + b.RunParallel(func(pb *testing.PB) { + i := uint8(0) + for pb.Next() { + atomic.And8(&x[255], i) + i++ + } + }) +} + +func BenchmarkOr8(b *testing.B) { + var x [512]uint8 // give byte its own cache line + sink = &x + for i := 0; i < b.N; i++ { + atomic.Or8(&x[255], uint8(i)) + } +} + +func BenchmarkOr8Parallel(b *testing.B) { + var x [512]uint8 // give byte its own cache line + sink = &x + b.RunParallel(func(pb *testing.PB) { + i := uint8(0) + for pb.Next() { + atomic.Or8(&x[255], i) + i++ + } + }) +} + func BenchmarkXadd(b *testing.B) { var x uint32 ptr := &x diff --git a/libgo/go/runtime/internal/atomic/gccgo.go b/libgo/go/runtime/internal/atomic/gccgo.go index e5edbfb17f1..4df83464636 100644 --- a/libgo/go/runtime/internal/atomic/gccgo.go +++ b/libgo/go/runtime/internal/atomic/gccgo.go @@ -15,6 +15,9 @@ func Load(ptr *uint32) uint32 func Loadp(ptr unsafe.Pointer) unsafe.Pointer //go:noescape +func Load8(ptr *uint8) uint8 + +//go:noescape func Load64(ptr *uint64) uint64 //go:noescape @@ -56,6 +59,9 @@ func CasRel(ptr *uint32, old, new uint32) bool func Store(ptr *uint32, val uint32) //go:noescape +func Store8(ptr *uint8, val uint8) + +//go:noescape func Store64(ptr *uint64, val uint64) //go:noescape diff --git a/libgo/go/runtime/internal/sys/intrinsics.go b/libgo/go/runtime/internal/sys/intrinsics.go index 6906938e574..d25f3507fc4 100644 --- a/libgo/go/runtime/internal/sys/intrinsics.go +++ b/libgo/go/runtime/internal/sys/intrinsics.go @@ -37,25 +37,6 @@ func Ctz8(x uint8) int { return int(ntz8tab[x]) } -var ntz8tab = [256]uint8{ - 0x08, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, - 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, - 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, - 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, - 0x06, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, - 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, - 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, - 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, - 0x07, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, - 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, - 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, - 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, - 0x06, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, - 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, - 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, - 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, -} - //extern __builtin_bswap64 func bswap64(uint64) uint64 diff --git a/libgo/go/runtime/internal/sys/intrinsics_common.go b/libgo/go/runtime/internal/sys/intrinsics_common.go new file mode 100644 index 00000000000..818d75ecc53 --- /dev/null +++ b/libgo/go/runtime/internal/sys/intrinsics_common.go @@ -0,0 +1,143 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sys + +// Copied from math/bits to avoid dependence. + +var len8tab = [256]uint8{ + 0x00, 0x01, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, +} + +var ntz8tab = [256]uint8{ + 0x08, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, + 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, + 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, + 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, + 0x06, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, + 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, + 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, + 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, + 0x07, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, + 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, + 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, + 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, + 0x06, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, + 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, + 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, + 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, +} + +// len64 returns the minimum number of bits required to represent x; the result is 0 for x == 0. +func Len64(x uint64) (n int) { + if x >= 1<<32 { + x >>= 32 + n = 32 + } + if x >= 1<<16 { + x >>= 16 + n += 16 + } + if x >= 1<<8 { + x >>= 8 + n += 8 + } + return n + int(len8tab[x]) +} + +// --- OnesCount --- + +const m0 = 0x5555555555555555 // 01010101 ... +const m1 = 0x3333333333333333 // 00110011 ... +const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... + +// OnesCount64 returns the number of one bits ("population count") in x. +func OnesCount64(x uint64) int { + // Implementation: Parallel summing of adjacent bits. + // See "Hacker's Delight", Chap. 5: Counting Bits. + // The following pattern shows the general approach: + // + // x = x>>1&(m0&m) + x&(m0&m) + // x = x>>2&(m1&m) + x&(m1&m) + // x = x>>4&(m2&m) + x&(m2&m) + // x = x>>8&(m3&m) + x&(m3&m) + // x = x>>16&(m4&m) + x&(m4&m) + // x = x>>32&(m5&m) + x&(m5&m) + // return int(x) + // + // Masking (& operations) can be left away when there's no + // danger that a field's sum will carry over into the next + // field: Since the result cannot be > 64, 8 bits is enough + // and we can ignore the masks for the shifts by 8 and up. + // Per "Hacker's Delight", the first line can be simplified + // more, but it saves at best one instruction, so we leave + // it alone for clarity. + const m = 1<<64 - 1 + x = x>>1&(m0&m) + x&(m0&m) + x = x>>2&(m1&m) + x&(m1&m) + x = (x>>4 + x) & (m2 & m) + x += x >> 8 + x += x >> 16 + x += x >> 32 + return int(x) & (1<<7 - 1) +} + +var deBruijn64tab = [64]byte{ + 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, + 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, + 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, + 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, +} + +const deBruijn64 = 0x03f79d71b4ca8b09 + +// TrailingZeros64 returns the number of trailing zero bits in x; the result is 64 for x == 0. +func TrailingZeros64(x uint64) int { + if x == 0 { + return 64 + } + // If popcount is fast, replace code below with return popcount(^x & (x - 1)). + // + // x & -x leaves only the right-most bit set in the word. Let k be the + // index of that bit. Since only a single bit is set, the value is two + // to the power of k. Multiplying by a power of two is equivalent to + // left shifting, in this case by k bits. The de Bruijn (64 bit) constant + // is such that all six bit, consecutive substrings are distinct. + // Therefore, if we have a left shifted version of this constant we can + // find by how many bits it was shifted by looking at which six bit + // substring ended up at the top of the word. + // (Knuth, volume 4, section 7.3.1) + return int(deBruijn64tab[(x&-x)*deBruijn64>>(64-6)]) +} + +// LeadingZeros64 returns the number of leading zero bits in x; the result is 64 for x == 0. +func LeadingZeros64(x uint64) int { return 64 - Len64(x) } + +// LeadingZeros8 returns the number of leading zero bits in x; the result is 8 for x == 0. +func LeadingZeros8(x uint8) int { return 8 - Len8(x) } + +// TrailingZeros8 returns the number of trailing zero bits in x; the result is 8 for x == 0. +func TrailingZeros8(x uint8) int { + return int(ntz8tab[x]) +} + +// Len8 returns the minimum number of bits required to represent x; the result is 0 for x == 0. +func Len8(x uint8) int { + return int(len8tab[x]) +} |