summaryrefslogtreecommitdiff
path: root/libgo/go/runtime/mprof.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime/mprof.go')
-rw-r--r--libgo/go/runtime/mprof.go100
1 files changed, 93 insertions, 7 deletions
diff --git a/libgo/go/runtime/mprof.go b/libgo/go/runtime/mprof.go
index e1900176637..1bfdc39b624 100644
--- a/libgo/go/runtime/mprof.go
+++ b/libgo/go/runtime/mprof.go
@@ -31,6 +31,7 @@ const (
// profile types
memProfile bucketType = 1 + iota
blockProfile
+ mutexProfile
// size of bucket hash table
buckHashSize = 179999
@@ -49,10 +50,14 @@ type bucketType int
//
// Per-call-stack profiling information.
// Lookup by hashing call stack into a linked-list hash table.
+//
+// No heap pointers.
+//
+//go:notinheap
type bucket struct {
next *bucket
allnext *bucket
- typ bucketType // memBucket or blockBucket
+ typ bucketType // memBucket or blockBucket (includes mutexProfile)
hash uintptr
size uintptr
nstk uintptr
@@ -92,7 +97,7 @@ type memRecord struct {
}
// A blockRecord is the bucket data for a bucket of type blockProfile,
-// part of the blocking profile.
+// which is used in blocking and mutex profiles.
type blockRecord struct {
count int64
cycles int64
@@ -101,6 +106,7 @@ type blockRecord struct {
var (
mbuckets *bucket // memory profile buckets
bbuckets *bucket // blocking profile buckets
+ xbuckets *bucket // mutex profile buckets
buckhash *[179999]*bucket
bucketmem uintptr
)
@@ -113,7 +119,7 @@ func newBucket(typ bucketType, nstk int) *bucket {
throw("invalid profile bucket type")
case memProfile:
size += unsafe.Sizeof(memRecord{})
- case blockProfile:
+ case blockProfile, mutexProfile:
size += unsafe.Sizeof(blockRecord{})
}
@@ -141,7 +147,7 @@ func (b *bucket) mp() *memRecord {
// bp returns the blockRecord associated with the blockProfile bucket b.
func (b *bucket) bp() *blockRecord {
- if b.typ != blockProfile {
+ if b.typ != blockProfile && b.typ != mutexProfile {
throw("bad use of bucket.bp")
}
data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(location{}))
@@ -193,6 +199,9 @@ func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket
if typ == memProfile {
b.allnext = mbuckets
mbuckets = b
+ } else if typ == mutexProfile {
+ b.allnext = xbuckets
+ xbuckets = b
} else {
b.allnext = bbuckets
bbuckets = b
@@ -297,10 +306,20 @@ func blockevent(cycles int64, skip int) {
if cycles <= 0 {
cycles = 1
}
+ if blocksampled(cycles) {
+ saveblockevent(cycles, skip+1, blockProfile, &blockprofilerate)
+ }
+}
+
+func blocksampled(cycles int64) bool {
rate := int64(atomic.Load64(&blockprofilerate))
- if rate <= 0 || (rate > cycles && int64(fastrand1())%rate > cycles) {
- return
+ if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
+ return false
}
+ return true
+}
+
+func saveblockevent(cycles int64, skip int, which bucketType, ratep *uint64) {
gp := getg()
var nstk int
var stk [maxStack]location
@@ -312,12 +331,43 @@ func blockevent(cycles int64, skip int) {
nstk = callers(skip, stk[:])
}
lock(&proflock)
- b := stkbucket(blockProfile, 0, stk[:nstk], true)
+ b := stkbucket(which, 0, stk[:nstk], true)
b.bp().count++
b.bp().cycles += cycles
unlock(&proflock)
}
+var mutexprofilerate uint64 // fraction sampled
+
+// SetMutexProfileFraction controls the fraction of mutex contention events
+// that are reported in the mutex profile. On average 1/rate events are
+// reported. The previous rate is returned.
+//
+// To turn off profiling entirely, pass rate 0.
+// To just read the current rate, pass rate -1.
+// (For n>1 the details of sampling may change.)
+func SetMutexProfileFraction(rate int) int {
+ if rate < 0 {
+ return int(mutexprofilerate)
+ }
+ old := mutexprofilerate
+ atomic.Store64(&mutexprofilerate, uint64(rate))
+ return int(old)
+}
+
+//go:linkname mutexevent sync.event
+func mutexevent(cycles int64, skip int) {
+ if cycles < 0 {
+ cycles = 0
+ }
+ rate := int64(atomic.Load64(&mutexprofilerate))
+ // TODO(pjw): measure impact of always calling fastrand vs using something
+ // like malloc.go:nextSample()
+ if rate > 0 && int64(fastrand())%rate == 0 {
+ saveblockevent(cycles, skip+1, mutexProfile, &mutexprofilerate)
+ }
+}
+
// Go interface to profile data.
// A StackRecord describes a single execution stack.
@@ -514,6 +564,42 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
return
}
+// MutexProfile returns n, the number of records in the current mutex profile.
+// If len(p) >= n, MutexProfile copies the profile into p and returns n, true.
+// Otherwise, MutexProfile does not change p, and returns n, false.
+//
+// Most clients should use the runtime/pprof package
+// instead of calling MutexProfile directly.
+func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
+ lock(&proflock)
+ for b := xbuckets; b != nil; b = b.allnext {
+ n++
+ }
+ if n <= len(p) {
+ ok = true
+ for b := xbuckets; b != nil; b = b.allnext {
+ bp := b.bp()
+ r := &p[0]
+ r.Count = int64(bp.count)
+ r.Cycles = bp.cycles
+ i := 0
+ var loc location
+ for i, loc = range b.stk() {
+ if i >= len(r.Stack0) {
+ break
+ }
+ r.Stack0[i] = loc.pc
+ }
+ for ; i < len(r.Stack0); i++ {
+ r.Stack0[i] = 0
+ }
+ p = p[1:]
+ }
+ }
+ unlock(&proflock)
+ return
+}
+
// ThreadCreateProfile returns n, the number of records in the thread creation profile.
// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
// If len(p) < n, ThreadCreateProfile does not change p and returns n, false.