summaryrefslogtreecommitdiff
path: root/libgo/go/runtime/malloc.go
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2019-09-12 23:22:53 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2019-09-12 23:22:53 +0000
commit656297e1fec9a127ff742df16958ee279ccacec5 (patch)
tree24347a35dacea36ce742c32c17420f3e31f17e3d /libgo/go/runtime/malloc.go
parentd6ecb707cc5a58816d27908a7aa324c4b0bc67bb (diff)
libgo: update to Go1.13
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/194698 From-SVN: r275691
Diffstat (limited to 'libgo/go/runtime/malloc.go')
-rw-r--r--libgo/go/runtime/malloc.go53
1 files changed, 44 insertions, 9 deletions
diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go
index cee5f6bc4de..0eee55ef97e 100644
--- a/libgo/go/runtime/malloc.go
+++ b/libgo/go/runtime/malloc.go
@@ -335,12 +335,21 @@ const (
var physPageSize uintptr
// physHugePageSize is the size in bytes of the OS's default physical huge
-// page size whose allocation is opaque to the application.
+// page size whose allocation is opaque to the application. It is assumed
+// and verified to be a power of two.
//
// If set, this must be set by the OS init code (typically in osinit) before
// mallocinit. However, setting it at all is optional, and leaving the default
// value is always safe (though potentially less efficient).
-var physHugePageSize uintptr
+//
+// Since physHugePageSize is always assumed to be a power of two,
+// physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift.
+// The purpose of physHugePageShift is to avoid doing divisions in
+// performance critical functions.
+var (
+ physHugePageSize uintptr
+ physHugePageShift uint
+)
// OS memory management abstraction layer
//
@@ -443,6 +452,17 @@ func mallocinit() {
print("system page size (", physPageSize, ") must be a power of 2\n")
throw("bad system page size")
}
+ if physHugePageSize&(physHugePageSize-1) != 0 {
+ print("system huge page size (", physHugePageSize, ") must be a power of 2\n")
+ throw("bad system huge page size")
+ }
+ if physHugePageSize != 0 {
+ // Since physHugePageSize is a power of 2, it suffices to increase
+ // physHugePageShift until 1<<physHugePageShift == physHugePageSize.
+ for 1<<physHugePageShift != physHugePageSize {
+ physHugePageShift++
+ }
+ }
// Initialize the heap.
mheap_.init()
@@ -877,7 +897,22 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if debug.sbrk != 0 {
align := uintptr(16)
if typ != nil {
- align = uintptr(typ.align)
+ // TODO(austin): This should be just
+ // align = uintptr(typ.align)
+ // but that's only 4 on 32-bit platforms,
+ // even if there's a uint64 field in typ (see #599).
+ // This causes 64-bit atomic accesses to panic.
+ // Hence, we use stricter alignment that matches
+ // the normal allocator better.
+ if size&7 == 0 {
+ align = 8
+ } else if size&3 == 0 {
+ align = 4
+ } else if size&1 == 0 {
+ align = 2
+ } else {
+ align = 1
+ }
}
return persistentalloc(size, align, &memstats.other_sys)
}
@@ -1076,8 +1111,8 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
if rate := MemProfileRate; rate > 0 {
- if rate != 1 && int32(size) < c.next_sample {
- c.next_sample -= int32(size)
+ if rate != 1 && size < c.next_sample {
+ c.next_sample -= size
} else {
mp := acquirem()
profilealloc(mp, x, size)
@@ -1180,7 +1215,7 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
// processes, the distance between two samples follows the exponential
// distribution (exp(MemProfileRate)), so the best return value is a random
// number taken from an exponential distribution whose mean is MemProfileRate.
-func nextSample() int32 {
+func nextSample() uintptr {
if GOOS == "plan9" {
// Plan 9 doesn't support floating point in note handler.
if g := getg(); g == g.m.gsignal {
@@ -1188,7 +1223,7 @@ func nextSample() int32 {
}
}
- return fastexprand(MemProfileRate)
+ return uintptr(fastexprand(MemProfileRate))
}
// fastexprand returns a random number from an exponential distribution with
@@ -1223,14 +1258,14 @@ func fastexprand(mean int) int32 {
// nextSampleNoFP is similar to nextSample, but uses older,
// simpler code to avoid floating point.
-func nextSampleNoFP() int32 {
+func nextSampleNoFP() uintptr {
// Set first allocation sample size.
rate := MemProfileRate
if rate > 0x3fffffff { // make 2*rate not overflow
rate = 0x3fffffff
}
if rate != 0 {
- return int32(fastrand() % uint32(2*rate))
+ return uintptr(fastrand() % uint32(2*rate))
}
return 0
}