summaryrefslogtreecommitdiff
path: root/libgo/go/runtime/mcache.go
blob: b65dd37421dec78ca2ce6dcf65095eb262081d1f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

// This is a temporary mcache.go for gccgo.
// At some point it will be replaced by the one in the gc runtime package.

import "unsafe"

type mcachelist struct {
	list  *mlink
	nlist uint32
}

// Per-thread (in Go, per-P) cache for small objects.
// No locking needed because it is per-thread (per-P).
//
// mcaches are allocated from non-GC'd memory, so any heap pointers
// must be specially handled.
//
//go:notinheap
type mcache struct {
	// The following members are accessed on every malloc,
	// so they are grouped here for better caching.
	next_sample      int32   // trigger heap sample after allocating this many bytes
	local_cachealloc uintptr // bytes allocated (or freed) from cache since last lock of heap

	// Allocator cache for tiny objects w/o pointers.
	// See "Tiny allocator" comment in malloc.go.

	// tiny points to the beginning of the current tiny block, or
	// nil if there is no current tiny block.
	//
	// tiny is a heap pointer. Since mcache is in non-GC'd memory,
	// we handle it by clearing it in releaseAll during mark
	// termination.
	tiny     unsafe.Pointer
	tinysize uintptr

	// The rest is not accessed on every malloc.
	alloc [_NumSizeClasses]*mspan     // spans to allocate from
	free  [_NumSizeClasses]mcachelist // lists of explicitly freed objects

	// Local allocator stats, flushed during GC.
	local_nlookup    uintptr                  // number of pointer lookups
	local_largefree  uintptr                  // bytes freed for large objects (>maxsmallsize)
	local_nlargefree uintptr                  // number of frees for large objects (>maxsmallsize)
	local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
}

type mtypes struct {
	compression byte
	data        uintptr
}

type special struct {
	next   *special
	offset uint16
	kind   byte
}

type mspan struct {
	next     *mspan // next span in list, or nil if none
	prev     *mspan // previous span's next field, or list head's first field if none
	start    uintptr
	npages   uintptr // number of pages in span
	freelist *mlink

	// sweep generation:
	// if sweepgen == h->sweepgen - 2, the span needs sweeping
	// if sweepgen == h->sweepgen - 1, the span is currently being swept
	// if sweepgen == h->sweepgen, the span is swept and ready to use
	// h->sweepgen is incremented by 2 after every GC

	sweepgen    uint32
	ref         uint16
	sizeclass   uint8   // size class
	incache     bool    // being used by an mcache
	state       uint8   // mspaninuse etc
	needzero    uint8   // needs to be zeroed before allocation
	elemsize    uintptr // computed from sizeclass or from npages
	unusedsince int64   // first time spotted by gc in mspanfree state
	npreleased  uintptr // number of pages released to the os
	limit       uintptr // end of data in span
	types       mtypes
	speciallock mutex    // guards specials list
	specials    *special // linked list of special records sorted by offset.
	freebuf     *mlink
}

type mlink struct {
	next *mlink
}