summaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2012-10-23 04:31:11 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2012-10-23 04:31:11 +0000
commit4ccad563d2a3559f0557bfb177bcf45144219bdf (patch)
tree46bb86f514fbf6bad82da48e69a18fb09d878834 /libgo/go/runtime
parent0b7463235f0e23c624d1911c9b15f531108cc5a6 (diff)
libgo: Update to current sources.
From-SVN: r192704
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/complex_test.go67
-rw-r--r--libgo/go/runtime/crash_cgo_test.go15
-rw-r--r--libgo/go/runtime/crash_test.go109
-rw-r--r--libgo/go/runtime/debug.go25
-rw-r--r--libgo/go/runtime/debug/stack_test.go2
-rw-r--r--libgo/go/runtime/error.go32
-rw-r--r--libgo/go/runtime/export_test.go36
-rw-r--r--libgo/go/runtime/gc_test.go19
-rw-r--r--libgo/go/runtime/iface_test.go138
-rw-r--r--libgo/go/runtime/lfstack_test.go130
-rw-r--r--libgo/go/runtime/parfor_test.go125
-rw-r--r--libgo/go/runtime/pprof/pprof.go76
-rw-r--r--libgo/go/runtime/proc_test.go26
-rw-r--r--libgo/go/runtime/string_test.go45
-rw-r--r--libgo/go/runtime/vlop_arm_test.go70
15 files changed, 909 insertions, 6 deletions
diff --git a/libgo/go/runtime/complex_test.go b/libgo/go/runtime/complex_test.go
new file mode 100644
index 00000000000..f41e6a35701
--- /dev/null
+++ b/libgo/go/runtime/complex_test.go
@@ -0,0 +1,67 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "math/cmplx"
+ "testing"
+)
+
+var result complex128
+
+func BenchmarkComplex128DivNormal(b *testing.B) {
+ d := 15 + 2i
+ n := 32 + 3i
+ res := 0i
+ for i := 0; i < b.N; i++ {
+ n += 0.1i
+ res += n / d
+ }
+ result = res
+}
+
+func BenchmarkComplex128DivNisNaN(b *testing.B) {
+ d := cmplx.NaN()
+ n := 32 + 3i
+ res := 0i
+ for i := 0; i < b.N; i++ {
+ n += 0.1i
+ res += n / d
+ }
+ result = res
+}
+
+func BenchmarkComplex128DivDisNaN(b *testing.B) {
+ d := 15 + 2i
+ n := cmplx.NaN()
+ res := 0i
+ for i := 0; i < b.N; i++ {
+ d += 0.1i
+ res += n / d
+ }
+ result = res
+}
+
+func BenchmarkComplex128DivNisInf(b *testing.B) {
+ d := 15 + 2i
+ n := cmplx.Inf()
+ res := 0i
+ for i := 0; i < b.N; i++ {
+ d += 0.1i
+ res += n / d
+ }
+ result = res
+}
+
+func BenchmarkComplex128DivDisInf(b *testing.B) {
+ d := cmplx.Inf()
+ n := 32 + 3i
+ res := 0i
+ for i := 0; i < b.N; i++ {
+ n += 0.1i
+ res += n / d
+ }
+ result = res
+}
diff --git a/libgo/go/runtime/crash_cgo_test.go b/libgo/go/runtime/crash_cgo_test.go
new file mode 100644
index 00000000000..91c4bdb0358
--- /dev/null
+++ b/libgo/go/runtime/crash_cgo_test.go
@@ -0,0 +1,15 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build cgo
+
+package runtime_test
+
+import (
+ "testing"
+)
+
+func TestCgoCrashHandler(t *testing.T) {
+ testCrashHandler(t, &crashTest{Cgo: true})
+}
diff --git a/libgo/go/runtime/crash_test.go b/libgo/go/runtime/crash_test.go
new file mode 100644
index 00000000000..465b2d70964
--- /dev/null
+++ b/libgo/go/runtime/crash_test.go
@@ -0,0 +1,109 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "testing"
+ "text/template"
+)
+
+type crashTest struct {
+ Cgo bool
+}
+
+// This test is a separate program, because it is testing
+// both main (m0) and non-main threads (m).
+
+func testCrashHandler(t *testing.T, ct *crashTest) {
+ if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" {
+ // TODO(brainman): do not know why this test fails on freebsd
+ // TODO(jsing): figure out why this causes delayed failures
+ // on NetBSD - http://golang.org/issue/3954
+ t.Logf("skipping test on %q", runtime.GOOS)
+ return
+ }
+
+ st := template.Must(template.New("crashSource").Parse(crashSource))
+
+ dir, err := ioutil.TempDir("", "go-build")
+ if err != nil {
+ t.Fatalf("failed to create temp directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ src := filepath.Join(dir, "main.go")
+ f, err := os.Create(src)
+ if err != nil {
+ t.Fatalf("failed to create %v: %v", src, err)
+ }
+ err = st.Execute(f, ct)
+ if err != nil {
+ f.Close()
+ t.Fatalf("failed to execute template: %v", err)
+ }
+ f.Close()
+
+ got, err := exec.Command("go", "run", src).CombinedOutput()
+ if err != nil {
+ t.Fatalf("program exited with error: %v\n%v", err, string(got))
+ }
+ want := "main: recovered done\nnew-thread: recovered done\nsecond-new-thread: recovered done\nmain-again: recovered done\n"
+ if string(got) != string(want) {
+ t.Fatalf("expected %q, but got %q", string(want), string(got))
+ }
+}
+
+func TestCrashHandler(t *testing.T) {
+ testCrashHandler(t, &crashTest{Cgo: false})
+}
+
+const crashSource = `
+package main
+
+import (
+ "fmt"
+ "runtime"
+)
+
+{{if .Cgo}}
+import "C"
+{{end}}
+
+func test(name string) {
+ defer func() {
+ if x := recover(); x != nil {
+ fmt.Printf(" recovered")
+ }
+ fmt.Printf(" done\n")
+ }()
+ fmt.Printf("%s:", name)
+ var s *string
+ _ = *s
+ fmt.Print("SHOULD NOT BE HERE")
+}
+
+func testInNewThread(name string) {
+ c := make(chan bool)
+ go func() {
+ runtime.LockOSThread()
+ test(name)
+ c <- true
+ }()
+ <-c
+}
+
+func main() {
+ runtime.LockOSThread()
+ test("main")
+ testInNewThread("new-thread")
+ testInNewThread("second-new-thread")
+ test("main-again")
+}
+`
diff --git a/libgo/go/runtime/debug.go b/libgo/go/runtime/debug.go
index b802fc63f71..e9d7601710c 100644
--- a/libgo/go/runtime/debug.go
+++ b/libgo/go/runtime/debug.go
@@ -138,6 +138,31 @@ func CPUProfile() []byte
// SetCPUProfileRate directly.
func SetCPUProfileRate(hz int)
+// SetBlockProfileRate controls the fraction of goroutine blocking events
+// that are reported in the blocking profile. The profiler aims to sample
+// an average of one blocking event per rate nanoseconds spent blocked.
+//
+// To include every blocking event in the profile, pass rate = 1.
+// To turn off profiling entirely, pass rate <= 0.
+func SetBlockProfileRate(rate int)
+
+// BlockProfileRecord describes blocking events originated
+// at a particular call sequence (stack trace).
+type BlockProfileRecord struct {
+ Count int64
+ Cycles int64
+ StackRecord
+}
+
+// BlockProfile returns n, the number of records in the current blocking profile.
+// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
+// If len(p) < n, BlockProfile does not change p and returns n, false.
+//
+// Most clients should use the runtime/pprof package or
+// the testing package's -test.blockprofile flag instead
+// of calling BlockProfile directly.
+func BlockProfile(p []BlockProfileRecord) (n int, ok bool)
+
// Stack formats a stack trace of the calling goroutine into buf
// and returns the number of bytes written to buf.
// If all is true, Stack formats stack traces of all other goroutines
diff --git a/libgo/go/runtime/debug/stack_test.go b/libgo/go/runtime/debug/stack_test.go
index f33f5072b40..bbd662618fd 100644
--- a/libgo/go/runtime/debug/stack_test.go
+++ b/libgo/go/runtime/debug/stack_test.go
@@ -36,7 +36,7 @@ func (t T) method() []byte {
func TestStack(t *testing.T) {
b := T(0).method()
lines := strings.Split(string(b), "\n")
- if len(lines) <= 6 {
+ if len(lines) < 6 {
t.Fatal("too few lines")
}
n := 0
diff --git a/libgo/go/runtime/error.go b/libgo/go/runtime/error.go
index d3913ec27b8..f7f81e95d3e 100644
--- a/libgo/go/runtime/error.go
+++ b/libgo/go/runtime/error.go
@@ -57,9 +57,41 @@ func NewTypeAssertionError(ps1, ps2, ps3 *string, pmeth *string, ret *interface{
if pmeth != nil {
meth = *pmeth
}
+
+ // For gccgo, strip out quoted strings.
+ s1 = unquote(s1)
+ s2 = unquote(s2)
+ s3 = unquote(s3)
+
*ret = &TypeAssertionError{s1, s2, s3, meth}
}
+// Remove quoted strings from gccgo reflection strings.
+func unquote(s string) string {
+ ls := len(s)
+ var i int
+ for i = 0; i < ls; i++ {
+ if s[i] == '\t' {
+ break
+ }
+ }
+ if i == ls {
+ return s
+ }
+ var q bool
+ r := make([]byte, len(s))
+ j := 0
+ for i = 0; i < ls; i++ {
+ if s[i] == '\t' {
+ q = !q
+ } else if !q {
+ r[j] = s[i]
+ j++
+ }
+ }
+ return string(r[:j])
+}
+
// An errorString represents a runtime error described by a single string.
type errorString string
diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go
index c603e1b0d79..7551ab802ca 100644
--- a/libgo/go/runtime/export_test.go
+++ b/libgo/go/runtime/export_test.go
@@ -23,3 +23,39 @@ func golockedOSThread() bool
var Entersyscall = entersyscall
var Exitsyscall = exitsyscall
var LockedOSThread = golockedOSThread
+
+type LFNode struct {
+ Next *LFNode
+ Pushcnt uintptr
+}
+
+func lfstackpush(head *uint64, node *LFNode)
+func lfstackpop2(head *uint64) *LFNode
+
+var LFStackPush = lfstackpush
+var LFStackPop = lfstackpop2
+
+type ParFor struct {
+ body *byte
+ done uint32
+ Nthr uint32
+ nthrmax uint32
+ thrseq uint32
+ Cnt uint32
+ Ctx *byte
+ wait bool
+}
+
+func parforalloc2(nthrmax uint32) *ParFor
+func parforsetup2(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(*ParFor, uint32))
+func parfordo(desc *ParFor)
+func parforiters(desc *ParFor, tid uintptr) (uintptr, uintptr)
+
+var NewParFor = parforalloc2
+var ParForSetup = parforsetup2
+var ParForDo = parfordo
+
+func ParForIters(desc *ParFor, tid uint32) (uint32, uint32) {
+ begin, end := parforiters(desc, uintptr(tid))
+ return uint32(begin), uint32(end)
+}
diff --git a/libgo/go/runtime/gc_test.go b/libgo/go/runtime/gc_test.go
index 7770e499ad3..56dd93819e1 100644
--- a/libgo/go/runtime/gc_test.go
+++ b/libgo/go/runtime/gc_test.go
@@ -26,6 +26,7 @@ func TestGcSys(t *testing.T) {
}
// Should only be using a few MB.
+ // We allocated 100 MB or (if not short) 1 GB.
runtime.ReadMemStats(memstats)
if sys > memstats.Sys {
sys = 0
@@ -33,7 +34,7 @@ func TestGcSys(t *testing.T) {
sys = memstats.Sys - sys
}
t.Logf("used %d extra bytes", sys)
- if sys > 4<<20 {
+ if sys > 16<<20 {
t.Fatalf("using too much memory: %d bytes", sys)
}
}
@@ -41,3 +42,19 @@ func TestGcSys(t *testing.T) {
func workthegc() []byte {
return make([]byte, 1029)
}
+
+func TestGcDeepNesting(t *testing.T) {
+ type T [2][2][2][2][2][2][2][2][2][2]*int
+ a := new(T)
+
+ // Prevent the compiler from applying escape analysis.
+ // This makes sure new(T) is allocated on heap, not on the stack.
+ t.Logf("%p", a)
+
+ a[0][0][0][0][0][0][0][0][0][0] = new(int)
+ *a[0][0][0][0][0][0][0][0][0][0] = 13
+ runtime.GC()
+ if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
+ t.Fail()
+ }
+}
diff --git a/libgo/go/runtime/iface_test.go b/libgo/go/runtime/iface_test.go
new file mode 100644
index 00000000000..bca0ea0ee75
--- /dev/null
+++ b/libgo/go/runtime/iface_test.go
@@ -0,0 +1,138 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "testing"
+)
+
+type I1 interface {
+ Method1()
+}
+
+type I2 interface {
+ Method1()
+ Method2()
+}
+
+type TS uint16
+type TM uintptr
+type TL [2]uintptr
+
+func (TS) Method1() {}
+func (TS) Method2() {}
+func (TM) Method1() {}
+func (TM) Method2() {}
+func (TL) Method1() {}
+func (TL) Method2() {}
+
+var (
+ e interface{}
+ e_ interface{}
+ i1 I1
+ i2 I2
+ ts TS
+ tm TM
+ tl TL
+)
+
+func BenchmarkConvT2ESmall(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ e = ts
+ }
+}
+
+func BenchmarkConvT2EUintptr(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ e = tm
+ }
+}
+
+func BenchmarkConvT2ELarge(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ e = tl
+ }
+}
+
+func BenchmarkConvT2ISmall(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i1 = ts
+ }
+}
+
+func BenchmarkConvT2IUintptr(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i1 = tm
+ }
+}
+
+func BenchmarkConvT2ILarge(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i1 = tl
+ }
+}
+
+func BenchmarkConvI2E(b *testing.B) {
+ i2 = tm
+ for i := 0; i < b.N; i++ {
+ e = i2
+ }
+}
+
+func BenchmarkConvI2I(b *testing.B) {
+ i2 = tm
+ for i := 0; i < b.N; i++ {
+ i1 = i2
+ }
+}
+
+func BenchmarkAssertE2T(b *testing.B) {
+ e = tm
+ for i := 0; i < b.N; i++ {
+ tm = e.(TM)
+ }
+}
+
+func BenchmarkAssertE2TLarge(b *testing.B) {
+ e = tl
+ for i := 0; i < b.N; i++ {
+ tl = e.(TL)
+ }
+}
+
+func BenchmarkAssertE2I(b *testing.B) {
+ e = tm
+ for i := 0; i < b.N; i++ {
+ i1 = e.(I1)
+ }
+}
+
+func BenchmarkAssertI2T(b *testing.B) {
+ i1 = tm
+ for i := 0; i < b.N; i++ {
+ tm = i1.(TM)
+ }
+}
+
+func BenchmarkAssertI2I(b *testing.B) {
+ i1 = tm
+ for i := 0; i < b.N; i++ {
+ i2 = i1.(I2)
+ }
+}
+
+func BenchmarkAssertI2E(b *testing.B) {
+ i1 = tm
+ for i := 0; i < b.N; i++ {
+ e = i1.(interface{})
+ }
+}
+
+func BenchmarkAssertE2E(b *testing.B) {
+ e = tm
+ for i := 0; i < b.N; i++ {
+ e_ = e
+ }
+}
diff --git a/libgo/go/runtime/lfstack_test.go b/libgo/go/runtime/lfstack_test.go
new file mode 100644
index 00000000000..505aae60551
--- /dev/null
+++ b/libgo/go/runtime/lfstack_test.go
@@ -0,0 +1,130 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "math/rand"
+ . "runtime"
+ "testing"
+ "unsafe"
+)
+
+type MyNode struct {
+ LFNode
+ data int
+}
+
+func fromMyNode(node *MyNode) *LFNode {
+ return (*LFNode)(unsafe.Pointer(node))
+}
+
+func toMyNode(node *LFNode) *MyNode {
+ return (*MyNode)(unsafe.Pointer(node))
+}
+
+func TestLFStack(t *testing.T) {
+ stack := new(uint64)
+ // Need to keep additional referenfces to nodes, the stack is not all that type-safe.
+ var nodes []*MyNode
+
+ // Check the stack is initially empty.
+ if LFStackPop(stack) != nil {
+ t.Fatalf("stack is not empty")
+ }
+
+ // Push one element.
+ node := &MyNode{data: 42}
+ nodes = append(nodes, node)
+ LFStackPush(stack, fromMyNode(node))
+
+ // Push another.
+ node = &MyNode{data: 43}
+ nodes = append(nodes, node)
+ LFStackPush(stack, fromMyNode(node))
+
+ // Pop one element.
+ node = toMyNode(LFStackPop(stack))
+ if node == nil {
+ t.Fatalf("stack is empty")
+ }
+ if node.data != 43 {
+ t.Fatalf("no lifo")
+ }
+
+ // Pop another.
+ node = toMyNode(LFStackPop(stack))
+ if node == nil {
+ t.Fatalf("stack is empty")
+ }
+ if node.data != 42 {
+ t.Fatalf("no lifo")
+ }
+
+ // Check the stack is empty again.
+ if LFStackPop(stack) != nil {
+ t.Fatalf("stack is not empty")
+ }
+ if *stack != 0 {
+ t.Fatalf("stack is not empty")
+ }
+}
+
+func TestLFStackStress(t *testing.T) {
+ const K = 100
+ P := 4 * GOMAXPROCS(-1)
+ N := 100000
+ if testing.Short() {
+ N /= 10
+ }
+ // Create 2 stacks.
+ stacks := [2]*uint64{new(uint64), new(uint64)}
+ // Need to keep additional referenfces to nodes, the stack is not all that type-safe.
+ var nodes []*MyNode
+ // Push K elements randomly onto the stacks.
+ sum := 0
+ for i := 0; i < K; i++ {
+ sum += i
+ node := &MyNode{data: i}
+ nodes = append(nodes, node)
+ LFStackPush(stacks[i%2], fromMyNode(node))
+ }
+ c := make(chan bool, P)
+ for p := 0; p < P; p++ {
+ go func() {
+ r := rand.New(rand.NewSource(rand.Int63()))
+ // Pop a node from a random stack, then push it onto a random stack.
+ for i := 0; i < N; i++ {
+ node := toMyNode(LFStackPop(stacks[r.Intn(2)]))
+ if node != nil {
+ LFStackPush(stacks[r.Intn(2)], fromMyNode(node))
+ }
+ }
+ c <- true
+ }()
+ }
+ for i := 0; i < P; i++ {
+ <-c
+ }
+ // Pop all elements from both stacks, and verify that nothing lost.
+ sum2 := 0
+ cnt := 0
+ for i := 0; i < 2; i++ {
+ for {
+ node := toMyNode(LFStackPop(stacks[i]))
+ if node == nil {
+ break
+ }
+ cnt++
+ sum2 += node.data
+ node.Next = nil
+ }
+ }
+ if cnt != K {
+ t.Fatalf("Wrong number of nodes %d/%d", cnt, K)
+ }
+ if sum2 != sum {
+ t.Fatalf("Wrong sum %d/%d", sum2, sum)
+ }
+}
diff --git a/libgo/go/runtime/parfor_test.go b/libgo/go/runtime/parfor_test.go
new file mode 100644
index 00000000000..0547db02096
--- /dev/null
+++ b/libgo/go/runtime/parfor_test.go
@@ -0,0 +1,125 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ . "runtime"
+ "testing"
+ "unsafe"
+)
+
+// Simple serial sanity test for parallelfor.
+func TestParFor(t *testing.T) {
+ const P = 1
+ const N = 20
+ data := make([]uint64, N)
+ for i := uint64(0); i < N; i++ {
+ data[i] = i
+ }
+ desc := NewParFor(P)
+ ParForSetup(desc, P, N, nil, true, func(desc *ParFor, i uint32) {
+ data[i] = data[i]*data[i] + 1
+ })
+ ParForDo(desc)
+ for i := uint64(0); i < N; i++ {
+ if data[i] != i*i+1 {
+ t.Fatalf("Wrong element %d: %d", i, data[i])
+ }
+ }
+}
+
+// Test that nonblocking parallelfor does not block.
+func TestParFor2(t *testing.T) {
+ const P = 7
+ const N = 1003
+ data := make([]uint64, N)
+ for i := uint64(0); i < N; i++ {
+ data[i] = i
+ }
+ desc := NewParFor(P)
+ ParForSetup(desc, P, N, (*byte)(unsafe.Pointer(&data)), false, func(desc *ParFor, i uint32) {
+ d := *(*[]uint64)(unsafe.Pointer(desc.Ctx))
+ d[i] = d[i]*d[i] + 1
+ })
+ for p := 0; p < P; p++ {
+ ParForDo(desc)
+ }
+ for i := uint64(0); i < N; i++ {
+ if data[i] != i*i+1 {
+ t.Fatalf("Wrong element %d: %d", i, data[i])
+ }
+ }
+}
+
+// Test that iterations are properly distributed.
+func TestParForSetup(t *testing.T) {
+ const P = 11
+ const N = 101
+ desc := NewParFor(P)
+ for n := uint32(0); n < N; n++ {
+ for p := uint32(1); p <= P; p++ {
+ ParForSetup(desc, p, n, nil, true, func(desc *ParFor, i uint32) {})
+ sum := uint32(0)
+ size0 := uint32(0)
+ end0 := uint32(0)
+ for i := uint32(0); i < p; i++ {
+ begin, end := ParForIters(desc, i)
+ size := end - begin
+ sum += size
+ if i == 0 {
+ size0 = size
+ if begin != 0 {
+ t.Fatalf("incorrect begin: %d (n=%d, p=%d)", begin, n, p)
+ }
+ } else {
+ if size != size0 && size != size0+1 {
+ t.Fatalf("incorrect size: %d/%d (n=%d, p=%d)", size, size0, n, p)
+ }
+ if begin != end0 {
+ t.Fatalf("incorrect begin/end: %d/%d (n=%d, p=%d)", begin, end0, n, p)
+ }
+ }
+ end0 = end
+ }
+ if sum != n {
+ t.Fatalf("incorrect sum: %d/%d (p=%d)", sum, n, p)
+ }
+ }
+ }
+}
+
+// Test parallel parallelfor.
+func TestParForParallel(t *testing.T) {
+ if GOARCH != "amd64" {
+ t.Log("temporarily disabled, see http://golang.org/issue/4155")
+ return
+ }
+
+ N := uint64(1e7)
+ if testing.Short() {
+ N /= 10
+ }
+ data := make([]uint64, N)
+ for i := uint64(0); i < N; i++ {
+ data[i] = i
+ }
+ P := GOMAXPROCS(-1)
+ desc := NewParFor(uint32(P))
+ ParForSetup(desc, uint32(P), uint32(N), nil, true, func(desc *ParFor, i uint32) {
+ data[i] = data[i]*data[i] + 1
+ })
+ for p := 1; p < P; p++ {
+ go ParForDo(desc)
+ }
+ ParForDo(desc)
+ for i := uint64(0); i < N; i++ {
+ if data[i] != i*i+1 {
+ t.Fatalf("Wrong element %d: %d", i, data[i])
+ }
+ }
+
+ data, desc = nil, nil
+ GC()
+}
diff --git a/libgo/go/runtime/pprof/pprof.go b/libgo/go/runtime/pprof/pprof.go
index 8cc15390c14..952ccf6d898 100644
--- a/libgo/go/runtime/pprof/pprof.go
+++ b/libgo/go/runtime/pprof/pprof.go
@@ -36,6 +36,7 @@ import (
// goroutine - stack traces of all current goroutines
// heap - a sampling of all heap allocations
// threadcreate - stack traces that led to the creation of new OS threads
+// block - stack traces that led to blocking on synchronization primitives
//
// These predefine profiles maintain themselves and panic on an explicit
// Add or Remove method call.
@@ -76,6 +77,12 @@ var heapProfile = &Profile{
write: writeHeap,
}
+var blockProfile = &Profile{
+ name: "block",
+ count: countBlock,
+ write: writeBlock,
+}
+
func lockProfiles() {
profiles.mu.Lock()
if profiles.m == nil {
@@ -84,6 +91,7 @@ func lockProfiles() {
"goroutine": goroutineProfile,
"threadcreate": threadcreateProfile,
"heap": heapProfile,
+ "block": blockProfile,
}
}
}
@@ -352,26 +360,26 @@ func WriteHeapProfile(w io.Writer) error {
// countHeap returns the number of records in the heap profile.
func countHeap() int {
- n, _ := runtime.MemProfile(nil, false)
+ n, _ := runtime.MemProfile(nil, true)
return n
}
// writeHeap writes the current runtime heap profile to w.
func writeHeap(w io.Writer, debug int) error {
- // Find out how many records there are (MemProfile(nil, false)),
+ // Find out how many records there are (MemProfile(nil, true)),
// allocate that many records, and get the data.
// There's a race—more records might be added between
// the two calls—so allocate a few extra records for safety
// and also try again if we're very unlucky.
// The loop should only execute one iteration in the common case.
var p []runtime.MemProfileRecord
- n, ok := runtime.MemProfile(nil, false)
+ n, ok := runtime.MemProfile(nil, true)
for {
// Allocate room for a slightly bigger profile,
// in case a few more entries have been added
// since the call to MemProfile.
p = make([]runtime.MemProfileRecord, n+50)
- n, ok = runtime.MemProfile(p, false)
+ n, ok = runtime.MemProfile(p, true)
if ok {
p = p[0:n]
break
@@ -431,11 +439,14 @@ func writeHeap(w io.Writer, debug int) error {
fmt.Fprintf(w, "# Sys = %d\n", s.Sys)
fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups)
fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs)
+ fmt.Fprintf(w, "# Frees = %d\n", s.Frees)
fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc)
fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys)
fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle)
fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse)
+ fmt.Fprintf(w, "# HeapReleased = %d\n", s.HeapReleased)
+ fmt.Fprintf(w, "# HeapObjects = %d\n", s.HeapObjects)
fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
@@ -597,3 +608,60 @@ func StopCPUProfile() {
runtime.SetCPUProfileRate(0)
<-cpu.done
}
+
+type byCycles []runtime.BlockProfileRecord
+
+func (x byCycles) Len() int { return len(x) }
+func (x byCycles) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x byCycles) Less(i, j int) bool { return x[i].Cycles > x[j].Cycles }
+
+// countBlock returns the number of records in the blocking profile.
+func countBlock() int {
+ n, _ := runtime.BlockProfile(nil)
+ return n
+}
+
+// writeBlock writes the current blocking profile to w.
+func writeBlock(w io.Writer, debug int) error {
+ var p []runtime.BlockProfileRecord
+ n, ok := runtime.BlockProfile(nil)
+ for {
+ p = make([]runtime.BlockProfileRecord, n+50)
+ n, ok = runtime.BlockProfile(p)
+ if ok {
+ p = p[:n]
+ break
+ }
+ }
+
+ sort.Sort(byCycles(p))
+
+ b := bufio.NewWriter(w)
+ var tw *tabwriter.Writer
+ w = b
+ if debug > 0 {
+ tw = tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
+ w = tw
+ }
+
+ fmt.Fprintf(w, "--- contention:\n")
+ fmt.Fprintf(w, "cycles/second=%v\n", runtime_cyclesPerSecond())
+ for i := range p {
+ r := &p[i]
+ fmt.Fprintf(w, "%v %v @", r.Cycles, r.Count)
+ for _, pc := range r.Stack() {
+ fmt.Fprintf(w, " %#x", pc)
+ }
+ fmt.Fprint(w, "\n")
+ if debug > 0 {
+ printStackRecord(w, r.Stack(), false)
+ }
+ }
+
+ if tw != nil {
+ tw.Flush()
+ }
+ return b.Flush()
+}
+
+func runtime_cyclesPerSecond() int64
diff --git a/libgo/go/runtime/proc_test.go b/libgo/go/runtime/proc_test.go
index 32111080a54..1d51c5271e3 100644
--- a/libgo/go/runtime/proc_test.go
+++ b/libgo/go/runtime/proc_test.go
@@ -123,3 +123,29 @@ func BenchmarkSyscallWork(b *testing.B) {
<-c
}
}
+
+func BenchmarkCreateGoroutines(b *testing.B) {
+ benchmarkCreateGoroutines(b, 1)
+}
+
+func BenchmarkCreateGoroutinesParallel(b *testing.B) {
+ benchmarkCreateGoroutines(b, runtime.GOMAXPROCS(-1))
+}
+
+func benchmarkCreateGoroutines(b *testing.B, procs int) {
+ c := make(chan bool)
+ var f func(n int)
+ f = func(n int) {
+ if n == 0 {
+ c <- true
+ return
+ }
+ go f(n - 1)
+ }
+ for i := 0; i < procs; i++ {
+ go f(b.N / procs)
+ }
+ for i := 0; i < procs; i++ {
+ <-c
+ }
+}
diff --git a/libgo/go/runtime/string_test.go b/libgo/go/runtime/string_test.go
new file mode 100644
index 00000000000..8f13f0f428b
--- /dev/null
+++ b/libgo/go/runtime/string_test.go
@@ -0,0 +1,45 @@
+package runtime_test
+
+import (
+ "testing"
+)
+
+func BenchmarkCompareStringEqual(b *testing.B) {
+ bytes := []byte("Hello Gophers!")
+ s1, s2 := string(bytes), string(bytes)
+ for i := 0; i < b.N; i++ {
+ if s1 != s2 {
+ b.Fatal("s1 != s2")
+ }
+ }
+}
+
+func BenchmarkCompareStringIdentical(b *testing.B) {
+ s1 := "Hello Gophers!"
+ s2 := s1
+ for i := 0; i < b.N; i++ {
+ if s1 != s2 {
+ b.Fatal("s1 != s2")
+ }
+ }
+}
+
+func BenchmarkCompareStringSameLength(b *testing.B) {
+ s1 := "Hello Gophers!"
+ s2 := "Hello, Gophers"
+ for i := 0; i < b.N; i++ {
+ if s1 == s2 {
+ b.Fatal("s1 == s2")
+ }
+ }
+}
+
+func BenchmarkCompareStringDifferentLength(b *testing.B) {
+ s1 := "Hello Gophers!"
+ s2 := "Hello, Gophers!"
+ for i := 0; i < b.N; i++ {
+ if s1 == s2 {
+ b.Fatal("s1 == s2")
+ }
+ }
+}
diff --git a/libgo/go/runtime/vlop_arm_test.go b/libgo/go/runtime/vlop_arm_test.go
new file mode 100644
index 00000000000..f672f1a0de3
--- /dev/null
+++ b/libgo/go/runtime/vlop_arm_test.go
@@ -0,0 +1,70 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import "testing"
+
+// arm soft division benchmarks adapted from
+// http://ridiculousfish.com/files/division_benchmarks.tar.gz
+
+const numeratorsSize = 1 << 21
+
+var numerators = randomNumerators()
+
+type randstate struct {
+ hi, lo uint32
+}
+
+func (r *randstate) rand() uint32 {
+ r.hi = r.hi<<16 + r.hi>>16
+ r.hi += r.lo
+ r.lo += r.hi
+ return r.hi
+}
+
+func randomNumerators() []uint32 {
+ numerators := make([]uint32, numeratorsSize)
+ random := &randstate{2147483563, 2147483563 ^ 0x49616E42}
+ for i := range numerators {
+ numerators[i] = random.rand()
+ }
+ return numerators
+}
+
+func bmUint32Div(divisor uint32, b *testing.B) {
+ var sum uint32
+ for i := 0; i < b.N; i++ {
+ sum += numerators[i&(numeratorsSize-1)] / divisor
+ }
+}
+
+func BenchmarkUint32Div7(b *testing.B) { bmUint32Div(7, b) }
+func BenchmarkUint32Div37(b *testing.B) { bmUint32Div(37, b) }
+func BenchmarkUint32Div123(b *testing.B) { bmUint32Div(123, b) }
+func BenchmarkUint32Div763(b *testing.B) { bmUint32Div(763, b) }
+func BenchmarkUint32Div1247(b *testing.B) { bmUint32Div(1247, b) }
+func BenchmarkUint32Div9305(b *testing.B) { bmUint32Div(9305, b) }
+func BenchmarkUint32Div13307(b *testing.B) { bmUint32Div(13307, b) }
+func BenchmarkUint32Div52513(b *testing.B) { bmUint32Div(52513, b) }
+func BenchmarkUint32Div60978747(b *testing.B) { bmUint32Div(60978747, b) }
+func BenchmarkUint32Div106956295(b *testing.B) { bmUint32Div(106956295, b) }
+
+func bmUint32Mod(divisor uint32, b *testing.B) {
+ var sum uint32
+ for i := 0; i < b.N; i++ {
+ sum += numerators[i&(numeratorsSize-1)] % divisor
+ }
+}
+
+func BenchmarkUint32Mod7(b *testing.B) { bmUint32Mod(7, b) }
+func BenchmarkUint32Mod37(b *testing.B) { bmUint32Mod(37, b) }
+func BenchmarkUint32Mod123(b *testing.B) { bmUint32Mod(123, b) }
+func BenchmarkUint32Mod763(b *testing.B) { bmUint32Mod(763, b) }
+func BenchmarkUint32Mod1247(b *testing.B) { bmUint32Mod(1247, b) }
+func BenchmarkUint32Mod9305(b *testing.B) { bmUint32Mod(9305, b) }
+func BenchmarkUint32Mod13307(b *testing.B) { bmUint32Mod(13307, b) }
+func BenchmarkUint32Mod52513(b *testing.B) { bmUint32Mod(52513, b) }
+func BenchmarkUint32Mod60978747(b *testing.B) { bmUint32Mod(60978747, b) }
+func BenchmarkUint32Mod106956295(b *testing.B) { bmUint32Mod(106956295, b) }