summaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2014-07-19 08:53:52 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2014-07-19 08:53:52 +0000
commit00d86ac99f5dd6afa5bbd7c38ffe1c585edd2387 (patch)
treeb988e32ea14a3dc1b4718b1fdfa47bab087ae96c /libgo/go/runtime
parentbcf2fc6ee0a7edbe7de4299f28b66527c07bb0a2 (diff)
libgo: Update to Go 1.3 release.
From-SVN: r212837
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/chan_test.go52
-rw-r--r--libgo/go/runtime/crash_test.go96
-rw-r--r--libgo/go/runtime/debug/garbage.go16
-rw-r--r--libgo/go/runtime/debug/heapdump_test.go33
-rw-r--r--libgo/go/runtime/extern.go14
-rw-r--r--libgo/go/runtime/gc_test.go83
-rw-r--r--libgo/go/runtime/map_test.go40
-rw-r--r--libgo/go/runtime/memmove_test.go80
-rw-r--r--libgo/go/runtime/mfinal_test.go164
-rw-r--r--libgo/go/runtime/mgc0.go12
-rw-r--r--libgo/go/runtime/norace_test.go36
-rw-r--r--libgo/go/runtime/pprof/pprof.go2
-rw-r--r--libgo/go/runtime/pprof/pprof_test.go2
-rw-r--r--libgo/go/runtime/proc_test.go23
-rw-r--r--libgo/go/runtime/runtime_test.go82
-rw-r--r--libgo/go/runtime/type.go1
16 files changed, 605 insertions, 131 deletions
diff --git a/libgo/go/runtime/chan_test.go b/libgo/go/runtime/chan_test.go
index 782176c8836..ce4b3962717 100644
--- a/libgo/go/runtime/chan_test.go
+++ b/libgo/go/runtime/chan_test.go
@@ -431,27 +431,15 @@ func TestMultiConsumer(t *testing.T) {
}
func BenchmarkChanNonblocking(b *testing.B) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
myc := make(chan int)
- for p := 0; p < procs; p++ {
- go func() {
- for atomic.AddInt32(&N, -1) >= 0 {
- for g := 0; g < CallsPerSched; g++ {
- select {
- case <-myc:
- default:
- }
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ select {
+ case <-myc:
+ default:
}
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ }
+ })
}
func BenchmarkSelectUncontended(b *testing.B) {
@@ -713,23 +701,11 @@ func BenchmarkChanCreation(b *testing.B) {
func BenchmarkChanSem(b *testing.B) {
type Empty struct{}
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(0)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- myc := make(chan Empty, procs)
- for p := 0; p < procs; p++ {
- go func() {
- for atomic.AddInt32(&N, -1) >= 0 {
- for g := 0; g < CallsPerSched; g++ {
- myc <- Empty{}
- <-myc
- }
- }
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ myc := make(chan Empty, runtime.GOMAXPROCS(0))
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ myc <- Empty{}
+ <-myc
+ }
+ })
}
diff --git a/libgo/go/runtime/crash_test.go b/libgo/go/runtime/crash_test.go
index d8bfdbdad6d..39e04345a10 100644
--- a/libgo/go/runtime/crash_test.go
+++ b/libgo/go/runtime/crash_test.go
@@ -9,6 +9,7 @@ import (
"os"
"os/exec"
"path/filepath"
+ "runtime"
"strings"
"testing"
"text/template"
@@ -32,6 +33,10 @@ func testEnv(cmd *exec.Cmd) *exec.Cmd {
func executeTest(t *testing.T, templ string, data interface{}) string {
t.Skip("gccgo does not have a go command")
+ if runtime.GOOS == "nacl" {
+ t.Skip("skipping on nacl")
+ }
+
checkStaleRuntime(t)
st := template.Must(template.New("crashSource").Parse(templ))
@@ -112,8 +117,9 @@ func TestLockedDeadlock2(t *testing.T) {
func TestGoexitDeadlock(t *testing.T) {
output := executeTest(t, goexitDeadlockSource, nil)
- if output != "" {
- t.Fatalf("expected no output, got:\n%s", output)
+ want := "no goroutines (main called runtime.Goexit) - deadlock!"
+ if !strings.Contains(output, want) {
+ t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
}
}
@@ -133,6 +139,34 @@ func TestThreadExhaustion(t *testing.T) {
}
}
+func TestRecursivePanic(t *testing.T) {
+ output := executeTest(t, recursivePanicSource, nil)
+ want := `wrap: bad
+panic: again
+
+`
+ if !strings.HasPrefix(output, want) {
+ t.Fatalf("output does not start with %q:\n%s", want, output)
+ }
+
+}
+
+func TestGoexitCrash(t *testing.T) {
+ output := executeTest(t, goexitExitSource, nil)
+ want := "no goroutines (main called runtime.Goexit) - deadlock!"
+ if !strings.Contains(output, want) {
+ t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
+ }
+}
+
+func TestGoNil(t *testing.T) {
+ output := executeTest(t, goNilSource, nil)
+ want := "go of nil func value"
+ if !strings.Contains(output, want) {
+ t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
+ }
+}
+
const crashSource = `
package main
@@ -273,3 +307,61 @@ func main() {
}
}
`
+
+const recursivePanicSource = `
+package main
+
+import (
+ "fmt"
+)
+
+func main() {
+ func() {
+ defer func() {
+ fmt.Println(recover())
+ }()
+ var x [8192]byte
+ func(x [8192]byte) {
+ defer func() {
+ if err := recover(); err != nil {
+ panic("wrap: " + err.(string))
+ }
+ }()
+ panic("bad")
+ }(x)
+ }()
+ panic("again")
+}
+`
+
+const goexitExitSource = `
+package main
+
+import (
+ "runtime"
+ "time"
+)
+
+func main() {
+ go func() {
+ time.Sleep(time.Millisecond)
+ }()
+ i := 0
+ runtime.SetFinalizer(&i, func(p *int) {})
+ runtime.GC()
+ runtime.Goexit()
+}
+`
+
+const goNilSource = `
+package main
+
+func main() {
+ defer func() {
+ recover()
+ }()
+ var f func()
+ go f()
+ select{}
+}
+`
diff --git a/libgo/go/runtime/debug/garbage.go b/libgo/go/runtime/debug/garbage.go
index a724fdf8f60..edb3643871e 100644
--- a/libgo/go/runtime/debug/garbage.go
+++ b/libgo/go/runtime/debug/garbage.go
@@ -135,3 +135,19 @@ func SetMaxStack(bytes int) int {
func SetMaxThreads(threads int) int {
return setMaxThreads(threads)
}
+
+// SetPanicOnFault controls the runtime's behavior when a program faults
+// at an unexpected (non-nil) address. Such faults are typically caused by
+// bugs such as runtime memory corruption, so the default response is to crash
+// the program. Programs working with memory-mapped files or unsafe
+// manipulation of memory may cause faults at non-nil addresses in less
+// dramatic situations; SetPanicOnFault allows such programs to request
+// that the runtime trigger only a panic, not a crash.
+// SetPanicOnFault applies only to the current goroutine.
+// It returns the previous setting.
+func SetPanicOnFault(enabled bool) bool
+
+// WriteHeapDump writes a description of the heap and the objects in
+// it to the given file descriptor.
+// The heap dump format is defined at http://golang.org/s/go13heapdump.
+func WriteHeapDump(fd uintptr)
diff --git a/libgo/go/runtime/debug/heapdump_test.go b/libgo/go/runtime/debug/heapdump_test.go
new file mode 100644
index 00000000000..9201901151f
--- /dev/null
+++ b/libgo/go/runtime/debug/heapdump_test.go
@@ -0,0 +1,33 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package debug
+
+import (
+ "io/ioutil"
+ "os"
+ "runtime"
+ "testing"
+)
+
+func TestWriteHeapDumpNonempty(t *testing.T) {
+ if runtime.GOOS == "nacl" {
+ t.Skip("WriteHeapDump is not available on NaCl.")
+ }
+ f, err := ioutil.TempFile("", "heapdumptest")
+ if err != nil {
+ t.Fatalf("TempFile failed: %v", err)
+ }
+ defer os.Remove(f.Name())
+ defer f.Close()
+ WriteHeapDump(f.Fd())
+ fi, err := f.Stat()
+ if err != nil {
+ t.Fatalf("Stat failed: %v", err)
+ }
+ const minSize = 1
+ if size := fi.Size(); size < minSize {
+ t.Fatalf("Heap dump size %d bytes, expected at least %d bytes", size, minSize)
+ }
+}
diff --git a/libgo/go/runtime/extern.go b/libgo/go/runtime/extern.go
index 85b69cfdcfc..333d4fd000f 100644
--- a/libgo/go/runtime/extern.go
+++ b/libgo/go/runtime/extern.go
@@ -36,6 +36,9 @@ a comma-separated list of name=val pairs. Supported names are:
length of the pause. Setting gctrace=2 emits the same summary but also
repeats each collection.
+ gcdead: setting gcdead=1 causes the garbage collector to clobber all stack slots
+ that it thinks are dead.
+
scheddetail: setting schedtrace=X and scheddetail=1 causes the scheduler to emit
detailed multiline info every X milliseconds, describing state of the scheduler,
processors, threads and goroutines.
@@ -76,6 +79,11 @@ func Gosched()
// Goexit terminates the goroutine that calls it. No other goroutine is affected.
// Goexit runs all deferred calls before terminating the goroutine.
+//
+// Calling Goexit from the main goroutine terminates that goroutine
+// without func main returning. Since func main has not returned,
+// the program continues execution of other goroutines.
+// If all other goroutines exit, the program crashes.
func Goexit()
// Caller reports file and line number information about function invocations on
@@ -182,10 +190,8 @@ func GOROOT() string {
}
// Version returns the Go tree's version string.
-// It is either a sequence number or, when possible,
-// a release tag like "release.2010-03-04".
-// A trailing + indicates that the tree had local modifications
-// at the time of the build.
+// It is either the commit hash and date at the time of the build or,
+// when possible, a release tag like "go1.3".
func Version() string {
return theVersion
}
diff --git a/libgo/go/runtime/gc_test.go b/libgo/go/runtime/gc_test.go
index 1b3ccbf7d93..5a1e9b89c42 100644
--- a/libgo/go/runtime/gc_test.go
+++ b/libgo/go/runtime/gc_test.go
@@ -9,6 +9,7 @@ import (
"runtime"
"runtime/debug"
"testing"
+ "time"
)
func TestGcSys(t *testing.T) {
@@ -153,3 +154,85 @@ func TestGcRescan(t *testing.T) {
}
}
}
+
+func TestGcLastTime(t *testing.T) {
+ ms := new(runtime.MemStats)
+ t0 := time.Now().UnixNano()
+ runtime.GC()
+ t1 := time.Now().UnixNano()
+ runtime.ReadMemStats(ms)
+ last := int64(ms.LastGC)
+ if t0 > last || last > t1 {
+ t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
+ }
+}
+
+func BenchmarkSetTypeNoPtr1(b *testing.B) {
+ type NoPtr1 struct {
+ p uintptr
+ }
+ var p *NoPtr1
+ for i := 0; i < b.N; i++ {
+ p = &NoPtr1{}
+ }
+ _ = p
+}
+func BenchmarkSetTypeNoPtr2(b *testing.B) {
+ type NoPtr2 struct {
+ p, q uintptr
+ }
+ var p *NoPtr2
+ for i := 0; i < b.N; i++ {
+ p = &NoPtr2{}
+ }
+ _ = p
+}
+func BenchmarkSetTypePtr1(b *testing.B) {
+ type Ptr1 struct {
+ p *byte
+ }
+ var p *Ptr1
+ for i := 0; i < b.N; i++ {
+ p = &Ptr1{}
+ }
+ _ = p
+}
+func BenchmarkSetTypePtr2(b *testing.B) {
+ type Ptr2 struct {
+ p, q *byte
+ }
+ var p *Ptr2
+ for i := 0; i < b.N; i++ {
+ p = &Ptr2{}
+ }
+ _ = p
+}
+
+func BenchmarkAllocation(b *testing.B) {
+ type T struct {
+ x, y *byte
+ }
+ ngo := runtime.GOMAXPROCS(0)
+ work := make(chan bool, b.N+ngo)
+ result := make(chan *T)
+ for i := 0; i < b.N; i++ {
+ work <- true
+ }
+ for i := 0; i < ngo; i++ {
+ work <- false
+ }
+ for i := 0; i < ngo; i++ {
+ go func() {
+ var x *T
+ for <-work {
+ for i := 0; i < 1000; i++ {
+ x = &T{}
+ }
+ }
+ result <- x
+ }()
+ }
+ for i := 0; i < ngo; i++ {
+ <-result
+ }
+}
diff --git a/libgo/go/runtime/map_test.go b/libgo/go/runtime/map_test.go
index fe5d3ad86c9..d9690253582 100644
--- a/libgo/go/runtime/map_test.go
+++ b/libgo/go/runtime/map_test.go
@@ -449,3 +449,43 @@ func TestMapIterOrder(t *testing.T) {
}
}
}
+
+func TestMapStringBytesLookup(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("skipping for gccgo")
+ }
+ // Use large string keys to avoid small-allocation coalescing,
+ // which can cause AllocsPerRun to report lower counts than it should.
+ m := map[string]int{
+ "1000000000000000000000000000000000000000000000000": 1,
+ "2000000000000000000000000000000000000000000000000": 2,
+ }
+ buf := []byte("1000000000000000000000000000000000000000000000000")
+ if x := m[string(buf)]; x != 1 {
+ t.Errorf(`m[string([]byte("1"))] = %d, want 1`, x)
+ }
+ buf[0] = '2'
+ if x := m[string(buf)]; x != 2 {
+ t.Errorf(`m[string([]byte("2"))] = %d, want 2`, x)
+ }
+
+ var x int
+ n := testing.AllocsPerRun(100, func() {
+ x += m[string(buf)]
+ })
+ if n != 0 {
+ t.Errorf("AllocsPerRun for m[string(buf)] = %v, want 0", n)
+ }
+
+ x = 0
+ n = testing.AllocsPerRun(100, func() {
+ y, ok := m[string(buf)]
+ if !ok {
+ panic("!ok")
+ }
+ x += y
+ })
+ if n != 0 {
+ t.Errorf("AllocsPerRun for x,ok = m[string(buf)] = %v, want 0", n)
+ }
+}
diff --git a/libgo/go/runtime/memmove_test.go b/libgo/go/runtime/memmove_test.go
index 5c01aac97a9..540f0feb549 100644
--- a/libgo/go/runtime/memmove_test.go
+++ b/libgo/go/runtime/memmove_test.go
@@ -161,3 +161,83 @@ func BenchmarkMemclr64(b *testing.B) { bmMemclr(b, 64) }
func BenchmarkMemclr256(b *testing.B) { bmMemclr(b, 256) }
func BenchmarkMemclr4096(b *testing.B) { bmMemclr(b, 4096) }
func BenchmarkMemclr65536(b *testing.B) { bmMemclr(b, 65536) }
+
+func BenchmarkClearFat32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var x [32]byte
+ _ = x
+ }
+}
+func BenchmarkClearFat64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var x [64]byte
+ _ = x
+ }
+}
+func BenchmarkClearFat128(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var x [128]byte
+ _ = x
+ }
+}
+func BenchmarkClearFat256(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var x [256]byte
+ _ = x
+ }
+}
+func BenchmarkClearFat512(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var x [512]byte
+ _ = x
+ }
+}
+func BenchmarkClearFat1024(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var x [1024]byte
+ _ = x
+ }
+}
+
+func BenchmarkCopyFat32(b *testing.B) {
+ var x [32 / 4]uint32
+ for i := 0; i < b.N; i++ {
+ y := x
+ _ = y
+ }
+}
+func BenchmarkCopyFat64(b *testing.B) {
+ var x [64 / 4]uint32
+ for i := 0; i < b.N; i++ {
+ y := x
+ _ = y
+ }
+}
+func BenchmarkCopyFat128(b *testing.B) {
+ var x [128 / 4]uint32
+ for i := 0; i < b.N; i++ {
+ y := x
+ _ = y
+ }
+}
+func BenchmarkCopyFat256(b *testing.B) {
+ var x [256 / 4]uint32
+ for i := 0; i < b.N; i++ {
+ y := x
+ _ = y
+ }
+}
+func BenchmarkCopyFat512(b *testing.B) {
+ var x [512 / 4]uint32
+ for i := 0; i < b.N; i++ {
+ y := x
+ _ = y
+ }
+}
+func BenchmarkCopyFat1024(b *testing.B) {
+ var x [1024 / 4]uint32
+ for i := 0; i < b.N; i++ {
+ y := x
+ _ = y
+ }
+}
diff --git a/libgo/go/runtime/mfinal_test.go b/libgo/go/runtime/mfinal_test.go
index ffcffbd4bef..b47f83c3923 100644
--- a/libgo/go/runtime/mfinal_test.go
+++ b/libgo/go/runtime/mfinal_test.go
@@ -6,10 +6,9 @@ package runtime_test
import (
"runtime"
- "sync"
- "sync/atomic"
"testing"
"time"
+ "unsafe"
)
type Tintptr *int // assignable to *int
@@ -112,50 +111,133 @@ func TestFinalizerZeroSizedStruct(t *testing.T) {
}
func BenchmarkFinalizer(b *testing.B) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- var wg sync.WaitGroup
- wg.Add(procs)
- for p := 0; p < procs; p++ {
- go func() {
- var data [CallsPerSched]*int
- for i := 0; i < CallsPerSched; i++ {
- data[i] = new(int)
+ const Batch = 1000
+ b.RunParallel(func(pb *testing.PB) {
+ var data [Batch]*int
+ for i := 0; i < Batch; i++ {
+ data[i] = new(int)
+ }
+ for pb.Next() {
+ for i := 0; i < Batch; i++ {
+ runtime.SetFinalizer(data[i], fin)
}
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for i := 0; i < CallsPerSched; i++ {
- runtime.SetFinalizer(data[i], fin)
- }
- for i := 0; i < CallsPerSched; i++ {
- runtime.SetFinalizer(data[i], nil)
- }
+ for i := 0; i < Batch; i++ {
+ runtime.SetFinalizer(data[i], nil)
}
- wg.Done()
- }()
- }
- wg.Wait()
+ }
+ })
}
func BenchmarkFinalizerRun(b *testing.B) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- var wg sync.WaitGroup
- wg.Add(procs)
- for p := 0; p < procs; p++ {
- go func() {
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for i := 0; i < CallsPerSched; i++ {
- v := new(int)
- runtime.SetFinalizer(v, fin)
- }
- runtime.GC()
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ v := new(int)
+ runtime.SetFinalizer(v, fin)
+ }
+ })
+}
+
+// One chunk must be exactly one sizeclass in size.
+// It should be a sizeclass not used much by others, so we
+// have a greater chance of finding adjacent ones.
+// size class 19: 320 byte objects, 25 per page, 1 page alloc at a time
+const objsize = 320
+
+type objtype [objsize]byte
+
+func adjChunks() (*objtype, *objtype) {
+ var s []*objtype
+
+ for {
+ c := new(objtype)
+ for _, d := range s {
+ if uintptr(unsafe.Pointer(c))+unsafe.Sizeof(*c) == uintptr(unsafe.Pointer(d)) {
+ return c, d
}
- wg.Done()
- }()
+ if uintptr(unsafe.Pointer(d))+unsafe.Sizeof(*c) == uintptr(unsafe.Pointer(c)) {
+ return d, c
+ }
+ }
+ s = append(s, c)
+ }
+}
+
+// Make sure an empty slice on the stack doesn't pin the next object in memory.
+func TestEmptySlice(t *testing.T) {
+ if true { // disable until bug 7564 is fixed.
+ return
+ }
+ x, y := adjChunks()
+
+ // the pointer inside xs points to y.
+ xs := x[objsize:] // change objsize to objsize-1 and the test passes
+
+ fin := make(chan bool, 1)
+ runtime.SetFinalizer(y, func(z *objtype) { fin <- true })
+ runtime.GC()
+ select {
+ case <-fin:
+ case <-time.After(4 * time.Second):
+ t.Errorf("finalizer of next object in memory didn't run")
+ }
+ xsglobal = xs // keep empty slice alive until here
+}
+
+var xsglobal []byte
+
+func adjStringChunk() (string, *objtype) {
+ b := make([]byte, objsize)
+ for {
+ s := string(b)
+ t := new(objtype)
+ p := *(*uintptr)(unsafe.Pointer(&s))
+ q := uintptr(unsafe.Pointer(t))
+ if p+objsize == q {
+ return s, t
+ }
+ }
+}
+
+// Make sure an empty string on the stack doesn't pin the next object in memory.
+func TestEmptyString(t *testing.T) {
+ if runtime.Compiler == "gccgo" {
+ t.Skip("skipping for gccgo")
+ }
+
+ x, y := adjStringChunk()
+
+ ss := x[objsize:] // change objsize to objsize-1 and the test passes
+ fin := make(chan bool, 1)
+ // set finalizer on string contents of y
+ runtime.SetFinalizer(y, func(z *objtype) { fin <- true })
+ runtime.GC()
+ select {
+ case <-fin:
+ case <-time.After(4 * time.Second):
+ t.Errorf("finalizer of next string in memory didn't run")
}
- wg.Wait()
+ ssglobal = ss // keep 0-length string live until here
+}
+
+var ssglobal string
+
+// Test for issue 7656.
+func TestFinalizerOnGlobal(t *testing.T) {
+ runtime.SetFinalizer(Foo1, func(p *Object1) {})
+ runtime.SetFinalizer(Foo2, func(p *Object2) {})
+ runtime.SetFinalizer(Foo1, nil)
+ runtime.SetFinalizer(Foo2, nil)
}
+
+type Object1 struct {
+ Something []byte
+}
+
+type Object2 struct {
+ Something byte
+}
+
+var (
+ Foo2 = &Object2{}
+ Foo1 = &Object1{}
+)
diff --git a/libgo/go/runtime/mgc0.go b/libgo/go/runtime/mgc0.go
index b1505466222..624485d18bf 100644
--- a/libgo/go/runtime/mgc0.go
+++ b/libgo/go/runtime/mgc0.go
@@ -9,7 +9,19 @@ func gc_m_ptr(ret *interface{}) {
*ret = (*m)(nil)
}
+// Called from C. Returns the Go type *g.
+func gc_g_ptr(ret *interface{}) {
+ *ret = (*g)(nil)
+}
+
// Called from C. Returns the Go type *itab.
func gc_itab_ptr(ret *interface{}) {
*ret = (*itab)(nil)
}
+
+func timenow() (sec int64, nsec int32)
+
+func gc_unixnanotime(now *int64) {
+ sec, nsec := timenow()
+ *now = sec*1e9 + int64(nsec)
+}
diff --git a/libgo/go/runtime/norace_test.go b/libgo/go/runtime/norace_test.go
index a3d5b00860c..3b171877a6f 100644
--- a/libgo/go/runtime/norace_test.go
+++ b/libgo/go/runtime/norace_test.go
@@ -9,7 +9,6 @@ package runtime_test
import (
"runtime"
- "sync/atomic"
"testing"
)
@@ -31,28 +30,17 @@ func BenchmarkSyscallExcessWork(b *testing.B) {
}
func benchmarkSyscall(b *testing.B, work, excess int) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1) * excess
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- for p := 0; p < procs; p++ {
- go func() {
- foo := 42
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for g := 0; g < CallsPerSched; g++ {
- runtime.Entersyscall()
- for i := 0; i < work; i++ {
- foo *= 2
- foo /= 2
- }
- runtime.Exitsyscall()
- }
+ b.SetParallelism(excess)
+ b.RunParallel(func(pb *testing.PB) {
+ foo := 42
+ for pb.Next() {
+ runtime.Entersyscall()
+ for i := 0; i < work; i++ {
+ foo *= 2
+ foo /= 2
}
- c <- foo == 42
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ runtime.Exitsyscall()
+ }
+ _ = foo
+ })
}
diff --git a/libgo/go/runtime/pprof/pprof.go b/libgo/go/runtime/pprof/pprof.go
index 98080457cb5..26aa0b8be5c 100644
--- a/libgo/go/runtime/pprof/pprof.go
+++ b/libgo/go/runtime/pprof/pprof.go
@@ -20,7 +20,7 @@ import (
"text/tabwriter"
)
-// BUG(rsc): Profiles are incomplete and inaccuate on NetBSD and OS X.
+// BUG(rsc): Profiles are incomplete and inaccurate on NetBSD and OS X.
// See http://golang.org/issue/6047 for details.
// A Profile is a collection of stack traces showing the call sequences
diff --git a/libgo/go/runtime/pprof/pprof_test.go b/libgo/go/runtime/pprof/pprof_test.go
index cce60e1be3b..f714472fd55 100644
--- a/libgo/go/runtime/pprof/pprof_test.go
+++ b/libgo/go/runtime/pprof/pprof_test.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build !nacl
+
package pprof_test
import (
diff --git a/libgo/go/runtime/proc_test.go b/libgo/go/runtime/proc_test.go
index bdcb199d727..4f364dc4636 100644
--- a/libgo/go/runtime/proc_test.go
+++ b/libgo/go/runtime/proc_test.go
@@ -373,24 +373,11 @@ func TestSchedLocalQueueSteal(t *testing.T) {
}
func benchmarkStackGrowth(b *testing.B, rec int) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- for p := 0; p < procs; p++ {
- go func() {
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for g := 0; g < CallsPerSched; g++ {
- stackGrowthRecursive(rec)
- }
- }
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ stackGrowthRecursive(rec)
+ }
+ })
}
func BenchmarkStackGrowth(b *testing.B) {
diff --git a/libgo/go/runtime/runtime_test.go b/libgo/go/runtime/runtime_test.go
index 1702298aed1..7dae95a2d4f 100644
--- a/libgo/go/runtime/runtime_test.go
+++ b/libgo/go/runtime/runtime_test.go
@@ -10,9 +10,11 @@ import (
// "os"
// "os/exec"
// . "runtime"
+ "runtime/debug"
// "strconv"
// "strings"
"testing"
+ "unsafe"
)
var errf error
@@ -95,10 +97,10 @@ func BenchmarkDeferMany(b *testing.B) {
// The value reported will include the padding between runtime.gogo and the
// next function in memory. That's fine.
func TestRuntimeGogoBytes(t *testing.T) {
- // TODO(brainman): delete when issue 6973 is fixed.
- if GOOS == "windows" {
- t.Skip("skipping broken test on windows")
+ if GOOS == "nacl" {
+ t.Skip("skipping on nacl")
}
+
dir, err := ioutil.TempDir("", "go-build")
if err != nil {
t.Fatalf("failed to create temp directory: %v", err)
@@ -134,3 +136,77 @@ func TestRuntimeGogoBytes(t *testing.T) {
func TestStopCPUProfilingWithProfilerOff(t *testing.T) {
SetCPUProfileRate(0)
}
+
+// Addresses to test for faulting behavior.
+// This is less a test of SetPanicOnFault and more a check that
+// the operating system and the runtime can process these faults
+// correctly. That is, we're indirectly testing that without SetPanicOnFault
+// these would manage to turn into ordinary crashes.
+// Note that these are truncated on 32-bit systems, so the bottom 32 bits
+// of the larger addresses must themselves be invalid addresses.
+// We might get unlucky and the OS might have mapped one of these
+// addresses, but probably not: they're all in the first page, very high
+// adderesses that normally an OS would reserve for itself, or malformed
+// addresses. Even so, we might have to remove one or two on different
+// systems. We will see.
+
+var faultAddrs = []uint64{
+ // low addresses
+ 0,
+ 1,
+ 0xfff,
+ // high (kernel) addresses
+ // or else malformed.
+ 0xffffffffffffffff,
+ 0xfffffffffffff001,
+ // no 0xffffffffffff0001; 0xffff0001 is mapped for 32-bit user space on OS X
+ // no 0xfffffffffff00001; 0xfff00001 is mapped for 32-bit user space sometimes on Linux
+ 0xffffffffff000001,
+ 0xfffffffff0000001,
+ 0xffffffff00000001,
+ 0xfffffff000000001,
+ 0xffffff0000000001,
+ 0xfffff00000000001,
+ 0xffff000000000001,
+ 0xfff0000000000001,
+ 0xff00000000000001,
+ 0xf000000000000001,
+ 0x8000000000000001,
+}
+
+func TestSetPanicOnFault(t *testing.T) {
+ // This currently results in a fault in the signal trampoline on
+ // dragonfly/386 - see issue 7421.
+ if GOOS == "dragonfly" && GOARCH == "386" {
+ t.Skip("skipping test on dragonfly/386")
+ }
+
+ old := debug.SetPanicOnFault(true)
+ defer debug.SetPanicOnFault(old)
+
+ for _, addr := range faultAddrs {
+ if Compiler == "gccgo" && GOARCH == "386" && (addr&0xff000000) != 0 {
+ // On gccgo these addresses can be used for
+ // the thread stack.
+ continue
+ }
+ testSetPanicOnFault(t, uintptr(addr))
+ }
+}
+
+func testSetPanicOnFault(t *testing.T, addr uintptr) {
+ if GOOS == "nacl" {
+ t.Skip("nacl doesn't seem to fault on high addresses")
+ }
+
+ defer func() {
+ if err := recover(); err == nil {
+ t.Fatalf("did not find error in recover")
+ }
+ }()
+
+ var p *int
+ p = (*int)(unsafe.Pointer(addr))
+ println(*p)
+ t.Fatalf("still here - should have faulted on address %#x", addr)
+}
diff --git a/libgo/go/runtime/type.go b/libgo/go/runtime/type.go
index eba34e4a6ba..1211f222575 100644
--- a/libgo/go/runtime/type.go
+++ b/libgo/go/runtime/type.go
@@ -27,6 +27,7 @@ type rtype struct {
string *string
*uncommonType
ptrToThis *rtype
+ zero unsafe.Pointer
}
type _method struct {