summaryrefslogtreecommitdiff
path: root/libgo/go/reflect/type.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/reflect/type.go')
-rw-r--r--libgo/go/reflect/type.go503
1 files changed, 327 insertions, 176 deletions
diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go
index 5cdfba55648..e488938a13f 100644
--- a/libgo/go/reflect/type.go
+++ b/libgo/go/reflect/type.go
@@ -12,7 +12,7 @@
// for that type.
//
// See "The Laws of Reflection" for an introduction to reflection in Go:
-// http://golang.org/doc/articles/laws_of_reflection.html
+// https://golang.org/doc/articles/laws_of_reflection.html
package reflect
import (
@@ -90,16 +90,16 @@ type Type interface {
// Kind returns the specific kind of this type.
Kind() Kind
- // Implements returns true if the type implements the interface type u.
+ // Implements reports whether the type implements the interface type u.
Implements(u Type) bool
- // AssignableTo returns true if a value of the type is assignable to type u.
+ // AssignableTo reports whether a value of the type is assignable to type u.
AssignableTo(u Type) bool
- // ConvertibleTo returns true if a value of the type is convertible to type u.
+ // ConvertibleTo reports whether a value of the type is convertible to type u.
ConvertibleTo(u Type) bool
- // Comparable returns true if values of this type are comparable.
+ // Comparable reports whether values of this type are comparable.
Comparable() bool
// Methods applicable only to some types, depending on Kind.
@@ -123,7 +123,7 @@ type Type interface {
// It panics if the type's Kind is not Chan.
ChanDir() ChanDir
- // IsVariadic returns true if a function type's final input parameter
+ // IsVariadic reports whether a function type's final input parameter
// is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
// implicit actual type []T.
//
@@ -204,9 +204,9 @@ type Type interface {
// See golang.org/issue/4876 for more details.
/*
- * These data structures are known to the compiler (../../cmd/gc/reflect.c).
+ * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go).
* A few are known to ../runtime/type.go to convey to debuggers.
- * They are also known to ../runtime/type.h.
+ * They are also known to ../runtime/type.go.
*/
// A Kind represents the specific kind of type that a Type represents.
@@ -255,8 +255,8 @@ type rtype struct {
size uintptr
hash uint32 // hash of type; avoids computation in hash tables
- hashfn func(unsafe.Pointer, uintptr) // hash function
- equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) // equality function
+ hashfn func(unsafe.Pointer, uintptr) uintptr // hash function
+ equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) bool // equality function
gc unsafe.Pointer // garbage collection data
string *string // string form; unnecessary but undeniably useful
@@ -392,7 +392,7 @@ type Method struct {
// method name. It is empty for upper case (exported) method names.
// The combination of PkgPath and Name uniquely identifies a method
// in a method set.
- // See http://golang.org/ref/spec#Uniqueness_of_identifiers
+ // See https://golang.org/ref/spec#Uniqueness_of_identifiers
Name string
PkgPath string
@@ -548,7 +548,7 @@ func (t *uncommonType) MethodByName(name string) (m Method, ok bool) {
return
}
-// TODO(rsc): 6g supplies these, but they are not
+// TODO(rsc): gc supplies these, but they are not
// as efficient as they could be: they have commonType
// as the receiver instead of *rtype.
func (t *rtype) NumMethod() int {
@@ -758,7 +758,7 @@ type StructField struct {
// Name is the field name.
// PkgPath is the package path that qualifies a lower case (unexported)
// field name. It is empty for upper case (exported) field names.
- // See http://golang.org/ref/spec#Uniqueness_of_identifiers
+ // See https://golang.org/ref/spec#Uniqueness_of_identifiers
Name string
PkgPath string
@@ -784,8 +784,11 @@ type StructTag string
// If the tag does not have the conventional format, the value
// returned by Get is unspecified.
func (tag StructTag) Get(key string) string {
+ // When modifying this code, also update the validateStructTag code
+ // in golang.org/x/tools/cmd/vet/structtag.go.
+
for tag != "" {
- // skip leading space
+ // Skip leading space.
i := 0
for i < len(tag) && tag[i] == ' ' {
i++
@@ -795,19 +798,21 @@ func (tag StructTag) Get(key string) string {
break
}
- // scan to colon.
- // a space or a quote is a syntax error
+ // Scan to colon. A space, a quote or a control character is a syntax error.
+ // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
+ // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
+ // as it is simpler to inspect the tag's bytes than the tag's runes.
i = 0
- for i < len(tag) && tag[i] != ' ' && tag[i] != ':' && tag[i] != '"' {
+ for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
i++
}
- if i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
+ if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
break
}
name := string(tag[:i])
tag = tag[i+1:]
- // scan quoted string to find value
+ // Scan quoted string to find value.
i = 1
for i < len(tag) && tag[i] != '"' {
if tag[i] == '\\' {
@@ -822,7 +827,10 @@ func (tag StructTag) Get(key string) string {
tag = tag[i+1:]
if key == name {
- value, _ := strconv.Unquote(qvalue)
+ value, err := strconv.Unquote(qvalue)
+ if err != nil {
+ break
+ }
return value
}
}
@@ -1024,8 +1032,8 @@ func (t *structType) FieldByName(name string) (f StructField, present bool) {
return t.FieldByNameFunc(func(s string) bool { return s == name })
}
-// TypeOf returns the reflection Type of the value in the interface{}.
-// TypeOf(nil) returns nil.
+// TypeOf returns the reflection Type that represents the dynamic type of i.
+// If i is a nil interface value, TypeOf returns nil.
func TypeOf(i interface{}) Type {
eface := *(*emptyInterface)(unsafe.Pointer(&i))
return toType(eface.typ)
@@ -1109,7 +1117,8 @@ func (t *rtype) ptrTo() *rtype {
return r.(*rtype)
}
- // initialize p using *byte's ptrType as a prototype.
+ // Create a new ptrType starting with the description
+ // of an *unsafe.Pointer.
p = new(ptrType)
var iptr interface{} = (*unsafe.Pointer)(nil)
prototype := *(**ptrType)(unsafe.Pointer(&iptr))
@@ -1145,6 +1154,7 @@ func (t *rtype) ptrTo() *rtype {
q := canonicalize(&p.rtype)
p = (*ptrType)(unsafe.Pointer(q.(*rtype)))
+ ptrMap.m[t] = p
ptrMap.Unlock()
return &p.rtype
}
@@ -1211,7 +1221,7 @@ func (t *rtype) Comparable() bool {
}
}
-// implements returns true if the type V implements the interface type T.
+// implements reports whether the type V implements the interface type T.
func implements(T, V *rtype) bool {
if T.Kind() != Interface {
return false
@@ -1232,7 +1242,7 @@ func implements(T, V *rtype) bool {
// methods along the way, or else V does not implement T.
// This lets us run the scan in overall linear time instead of
// the quadratic time a naive search would require.
- // See also ../runtime/iface.c.
+ // See also ../runtime/iface.go.
if V.Kind() == Interface {
v := (*interfaceType)(unsafe.Pointer(V))
i := 0
@@ -1265,9 +1275,9 @@ func implements(T, V *rtype) bool {
return false
}
-// directlyAssignable returns true if a value x of type V can be directly
+// directlyAssignable reports whether a value x of type V can be directly
// assigned (using memmove) to a value of type T.
-// http://golang.org/doc/go_spec.html#Assignability
+// https://golang.org/doc/go_spec.html#Assignability
// Ignoring the interface rules (implemented elsewhere)
// and the ideal constant rules (no ideal constants at run time).
func directlyAssignable(T, V *rtype) bool {
@@ -1445,6 +1455,14 @@ type chanGC struct {
end uintptr // _GC_END
}
+// The funcLookupCache caches FuncOf lookups.
+// FuncOf does not share the common lookupCache since cacheKey is not
+// sufficient to represent functions unambiguously.
+var funcLookupCache struct {
+ sync.RWMutex
+ m map[uint32][]*rtype // keyed by hash calculated in FuncOf
+}
+
// ChanOf returns the channel type with the given direction and element type.
// For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
//
@@ -1485,6 +1503,7 @@ func ChanOf(dir ChanDir, t Type) Type {
prototype := *(**chanType)(unsafe.Pointer(&ichan))
ch := new(chanType)
*ch = *prototype
+ ch.dir = uintptr(dir)
ch.string = &s
// gccgo uses a different hash.
@@ -1545,9 +1564,8 @@ func MapOf(key, elem Type) Type {
// Make a map type.
var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
- prototype := *(**mapType)(unsafe.Pointer(&imap))
mt := new(mapType)
- *mt = *prototype
+ *mt = **(**mapType)(unsafe.Pointer(&imap))
mt.string = &s
// gccgo uses a different hash
@@ -1576,154 +1594,243 @@ func MapOf(key, elem Type) Type {
return cachePut(ckey, &mt.rtype)
}
-// gcProg is a helper type for generatation of GC pointer info.
-type gcProg struct {
- gc []byte
- size uintptr // size of type in bytes
- hasPtr bool
-}
+// FuncOf returns the function type with the given argument and result types.
+// For example if k represents int and e represents string,
+// FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
+//
+// The variadic argument controls whether the function is variadic. FuncOf
+// panics if the in[len(in)-1] does not represent a slice and variadic is
+// true.
+func FuncOf(in, out []Type, variadic bool) Type {
+ if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
+ panic("reflect.FuncOf: last arg of variadic func must be slice")
+ }
+
+ // Make a func type.
+ var ifunc interface{} = (func())(nil)
+ prototype := *(**funcType)(unsafe.Pointer(&ifunc))
+ ft := new(funcType)
+ *ft = *prototype
+
+ // Build a hash and minimally populate ft.
+ var hash uint32 = 8
+ var fin, fout []*rtype
+ shift := uint(1)
+ for _, in := range in {
+ t := in.(*rtype)
+ fin = append(fin, t)
+ hash += t.hash << shift
+ shift++
+ }
+ shift = 2
+ for _, out := range out {
+ t := out.(*rtype)
+ fout = append(fout, t)
+ hash += t.hash << shift
+ shift++
+ }
+ if variadic {
+ hash++
+ }
+ hash <<= 4
+ ft.hash = hash
+ ft.in = fin
+ ft.out = fout
+ ft.dotdotdot = variadic
-func (gc *gcProg) append(v byte) {
- gc.align(unsafe.Sizeof(uintptr(0)))
- gc.appendWord(v)
-}
+ // Look in cache.
+ funcLookupCache.RLock()
+ for _, t := range funcLookupCache.m[hash] {
+ if haveIdenticalUnderlyingType(&ft.rtype, t) {
+ funcLookupCache.RUnlock()
+ return t
+ }
+ }
+ funcLookupCache.RUnlock()
-// Appends t's type info to the current program.
-func (gc *gcProg) appendProg(t *rtype) {
- gc.align(uintptr(t.align))
- if !t.pointers() {
- gc.size += t.size
- return
+ // Not in cache, lock and retry.
+ funcLookupCache.Lock()
+ defer funcLookupCache.Unlock()
+ if funcLookupCache.m == nil {
+ funcLookupCache.m = make(map[uint32][]*rtype)
}
- switch t.Kind() {
- default:
- panic("reflect: non-pointer type marked as having pointers")
- case Ptr, UnsafePointer, Chan, Func, Map:
- gc.appendWord(bitsPointer)
- case Slice:
- gc.appendWord(bitsPointer)
- gc.appendWord(bitsScalar)
- gc.appendWord(bitsScalar)
- case String:
- gc.appendWord(bitsPointer)
- gc.appendWord(bitsScalar)
- case Array:
- c := t.Len()
- e := t.Elem().common()
- for i := 0; i < c; i++ {
- gc.appendProg(e)
+ for _, t := range funcLookupCache.m[hash] {
+ if haveIdenticalUnderlyingType(&ft.rtype, t) {
+ return t
}
- case Interface:
- gc.appendWord(bitsMultiWord)
- if t.NumMethod() == 0 {
- gc.appendWord(bitsEface)
- } else {
- gc.appendWord(bitsIface)
- }
- case Struct:
- c := t.NumField()
- for i := 0; i < c; i++ {
- gc.appendProg(t.Field(i).Type.common())
- }
- gc.align(uintptr(t.align))
}
+
+ str := funcStr(ft)
+
+ // Populate the remaining fields of ft and store in cache.
+ ft.string = &str
+ ft.uncommonType = nil
+ ft.ptrToThis = nil
+
+ // TODO(cmang): Generate GC data for funcs.
+ ft.gc = unsafe.Pointer(&ptrDataGCProg)
+
+ funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype)
+
+ return toType(&ft.rtype)
}
-func (gc *gcProg) appendWord(v byte) {
- ptrsize := unsafe.Sizeof(uintptr(0))
- if gc.size%ptrsize != 0 {
- panic("reflect: unaligned GC program")
+// funcStr builds a string representation of a funcType.
+func funcStr(ft *funcType) string {
+ repr := make([]byte, 0, 64)
+ repr = append(repr, "func("...)
+ for i, t := range ft.in {
+ if i > 0 {
+ repr = append(repr, ", "...)
+ }
+ if ft.dotdotdot && i == len(ft.in)-1 {
+ repr = append(repr, "..."...)
+ repr = append(repr, *(*sliceType)(unsafe.Pointer(t)).elem.string...)
+ } else {
+ repr = append(repr, *t.string...)
+ }
}
- nptr := gc.size / ptrsize
- for uintptr(len(gc.gc)) < nptr/2+1 {
- gc.gc = append(gc.gc, 0x44) // BitsScalar
+ repr = append(repr, ')')
+ if l := len(ft.out); l == 1 {
+ repr = append(repr, ' ')
+ } else if l > 1 {
+ repr = append(repr, " ("...)
}
- gc.gc[nptr/2] &= ^(3 << ((nptr%2)*4 + 2))
- gc.gc[nptr/2] |= v << ((nptr%2)*4 + 2)
- gc.size += ptrsize
- if v == bitsPointer {
- gc.hasPtr = true
+ for i, t := range ft.out {
+ if i > 0 {
+ repr = append(repr, ", "...)
+ }
+ repr = append(repr, *t.string...)
}
+ if len(ft.out) > 1 {
+ repr = append(repr, ')')
+ }
+ return string(repr)
}
-func (gc *gcProg) finalize() (unsafe.Pointer, bool) {
- if gc.size == 0 {
- return nil, false
- }
- ptrsize := unsafe.Sizeof(uintptr(0))
- gc.align(ptrsize)
- nptr := gc.size / ptrsize
- for uintptr(len(gc.gc)) < nptr/2+1 {
- gc.gc = append(gc.gc, 0x44) // BitsScalar
- }
- // If number of words is odd, repeat the mask twice.
- // Compiler does the same.
- if nptr%2 != 0 {
- for i := uintptr(0); i < nptr; i++ {
- gc.appendWord(extractGCWord(gc.gc, i))
+// isReflexive reports whether the == operation on the type is reflexive.
+// That is, x == x for all values x of type t.
+func isReflexive(t *rtype) bool {
+ switch t.Kind() {
+ case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer:
+ return true
+ case Float32, Float64, Complex64, Complex128, Interface:
+ return false
+ case Array:
+ tt := (*arrayType)(unsafe.Pointer(t))
+ return isReflexive(tt.elem)
+ case Struct:
+ tt := (*structType)(unsafe.Pointer(t))
+ for _, f := range tt.fields {
+ if !isReflexive(f.typ) {
+ return false
+ }
}
+ return true
+ default:
+ // Func, Map, Slice, Invalid
+ panic("isReflexive called on non-key type " + t.String())
}
- return unsafe.Pointer(&gc.gc[0]), gc.hasPtr
-}
-
-func extractGCWord(gc []byte, i uintptr) byte {
- return (gc[i/2] >> ((i%2)*4 + 2)) & 3
-}
-
-func (gc *gcProg) align(a uintptr) {
- gc.size = align(gc.size, a)
}
-// These constants must stay in sync with ../runtime/mgc0.h.
-const (
- bitsScalar = 1
- bitsPointer = 2
- bitsMultiWord = 3
-
- bitsIface = 2
- bitsEface = 3
-)
-
// Make sure these routines stay in sync with ../../runtime/hashmap.go!
// These types exist only for GC, so we only fill out GC relevant info.
// Currently, that's just size and the GC program. We also fill in string
// for possible debugging use.
const (
- bucketSize = 8
- maxKeySize = 128
- maxValSize = 128
+ bucketSize uintptr = 8
+ maxKeySize uintptr = 128
+ maxValSize uintptr = 128
)
func bucketOf(ktyp, etyp *rtype) *rtype {
+ // See comment on hmap.overflow in ../runtime/hashmap.go.
+ var kind uint8
+ if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 &&
+ ktyp.size <= maxKeySize && etyp.size <= maxValSize {
+ kind = kindNoPointers
+ }
+
if ktyp.size > maxKeySize {
ktyp = PtrTo(ktyp).(*rtype)
}
if etyp.size > maxValSize {
etyp = PtrTo(etyp).(*rtype)
}
- ptrsize := unsafe.Sizeof(uintptr(0))
- var gc gcProg
- // topbits
- for i := 0; i < int(bucketSize*unsafe.Sizeof(uint8(0))/ptrsize); i++ {
- gc.append(bitsScalar)
- }
- // keys
- for i := 0; i < bucketSize; i++ {
- gc.appendProg(ktyp)
- }
- // values
- for i := 0; i < bucketSize; i++ {
- gc.appendProg(etyp)
- }
- // overflow
- gc.append(bitsPointer)
- if runtime.GOARCH == "amd64p32" {
- gc.append(bitsScalar)
+ // Prepare GC data if any.
+ // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
+ // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
+ // Normally the enforced limit on pointer maps is 16 bytes,
+ // but larger ones are acceptable, 33 bytes isn't too too big,
+ // and it's easier to generate a pointer bitmap than a GC program.
+ // Note that since the key and value are known to be <= 128 bytes,
+ // they're guaranteed to have bitmaps instead of GC programs.
+ // var gcdata *byte
+ var ptrdata uintptr
+ var overflowPad uintptr
+
+ // On NaCl, pad if needed to make overflow end at the proper struct alignment.
+ // On other systems, align > ptrSize is not possible.
+ if runtime.GOARCH == "amd64p32" && (ktyp.align > ptrSize || etyp.align > ptrSize) {
+ overflowPad = ptrSize
+ }
+ size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize
+ if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 {
+ panic("reflect: bad size computation in MapOf")
+ }
+
+ if kind != kindNoPointers {
+ nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize
+ mask := make([]byte, (nptr+7)/8)
+ base := bucketSize / ptrSize
+
+ if ktyp.kind&kindNoPointers == 0 {
+ if ktyp.kind&kindGCProg != 0 {
+ panic("reflect: unexpected GC program in MapOf")
+ }
+ kmask := (*[16]byte)(unsafe.Pointer( /*ktyp.gcdata*/ nil))
+ for i := uintptr(0); i < ktyp.size/ptrSize; i++ {
+ if (kmask[i/8]>>(i%8))&1 != 0 {
+ for j := uintptr(0); j < bucketSize; j++ {
+ word := base + j*ktyp.size/ptrSize + i
+ mask[word/8] |= 1 << (word % 8)
+ }
+ }
+ }
+ }
+ base += bucketSize * ktyp.size / ptrSize
+
+ if etyp.kind&kindNoPointers == 0 {
+ if etyp.kind&kindGCProg != 0 {
+ panic("reflect: unexpected GC program in MapOf")
+ }
+ emask := (*[16]byte)(unsafe.Pointer( /*etyp.gcdata*/ nil))
+ for i := uintptr(0); i < etyp.size/ptrSize; i++ {
+ if (emask[i/8]>>(i%8))&1 != 0 {
+ for j := uintptr(0); j < bucketSize; j++ {
+ word := base + j*etyp.size/ptrSize + i
+ mask[word/8] |= 1 << (word % 8)
+ }
+ }
+ }
+ }
+ base += bucketSize * etyp.size / ptrSize
+ base += overflowPad / ptrSize
+
+ word := base
+ mask[word/8] |= 1 << (word % 8)
+ // gcdata = &mask[0]
+ ptrdata = (word + 1) * ptrSize
+
+ // overflow word must be last
+ if ptrdata != size {
+ panic("reflect: bad layout computation in MapOf")
+ }
}
b := new(rtype)
- b.size = gc.size
+ // b.size = gc.size
// b.gc[0], _ = gc.finalize()
b.kind |= kindGCProg
s := "bucket(" + *ktyp.string + "," + *etyp.string + ")"
@@ -1863,24 +1970,25 @@ func SliceOf(t Type) Type {
return cachePut(ckey, &slice.rtype)
}
+// See cmd/compile/internal/gc/reflect.go for derivation of constant.
+const maxPtrmaskBytes = 2048
+
// ArrayOf returns the array type with the given count and element type.
// For example, if t represents int, ArrayOf(5, t) represents [5]int.
//
// If the resulting type would be larger than the available address space,
// ArrayOf panics.
-//
-// TODO(rsc): Unexported for now. Export once the alg field is set correctly
-// for the type. This may require significant work.
-//
-// TODO(rsc): TestArrayOf is also disabled. Re-enable.
-func arrayOf(count int, elem Type) Type {
+func ArrayOf(count int, elem Type) Type {
typ := elem.(*rtype)
+ // call SliceOf here as it calls cacheGet/cachePut.
+ // ArrayOf also calls cacheGet/cachePut and thus may modify the state of
+ // the lookupCache mutex.
slice := SliceOf(elem)
// Look in cache.
ckey := cacheKey{Array, typ, nil, uintptr(count)}
- if slice := cacheGet(ckey); slice != nil {
- return slice
+ if array := cacheGet(ckey); array != nil {
+ return array
}
// Look in known types.
@@ -1891,7 +1999,6 @@ func arrayOf(count int, elem Type) Type {
prototype := *(**arrayType)(unsafe.Pointer(&iarray))
array := new(arrayType)
*array = *prototype
- // TODO: Set extra kind bits correctly.
array.string = &s
// gccgo uses a different hash.
@@ -1908,19 +2015,70 @@ func arrayOf(count int, elem Type) Type {
panic("reflect.ArrayOf: array size would exceed virtual address space")
}
array.size = typ.size * uintptr(count)
+ // if count > 0 && typ.ptrdata != 0 {
+ // array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata
+ // }
array.align = typ.align
array.fieldAlign = typ.fieldAlign
- // TODO: array.alg
- // TODO: array.gc
- // TODO:
array.uncommonType = nil
array.ptrToThis = nil
array.len = uintptr(count)
array.slice = slice.(*rtype)
+ array.kind &^= kindNoPointers
+ switch {
+ case typ.kind&kindNoPointers != 0 || array.size == 0:
+ // No pointers.
+ array.kind |= kindNoPointers
+ gc := [...]uintptr{array.size, _GC_END}
+ array.gc = unsafe.Pointer(&gc[0])
+
+ case count == 1:
+ // In memory, 1-element array looks just like the element.
+ array.kind |= typ.kind & kindGCProg
+ array.gc = typ.gc
+
+ default:
+ gc := []uintptr{array.size, _GC_ARRAY_START, 0, uintptr(count), typ.size}
+ gc = appendGCProgram(gc, typ)
+ gc = append(gc, _GC_ARRAY_NEXT, _GC_END)
+ array.gc = unsafe.Pointer(&gc[0])
+ }
+
+ array.kind &^= kindDirectIface
+
+ array.hashfn = func(p unsafe.Pointer, size uintptr) uintptr {
+ ret := uintptr(0)
+ for i := 0; i < count; i++ {
+ ret *= 33
+ ret += typ.hashfn(p, typ.size)
+ p = unsafe.Pointer(uintptr(p) + typ.size)
+ }
+ return ret
+ }
+
+ array.equalfn = func(p1, p2 unsafe.Pointer, size uintptr) bool {
+ for i := 0; i < count; i++ {
+ if !typ.equalfn(p1, p2, typ.size) {
+ return false
+ }
+ p1 = unsafe.Pointer(uintptr(p1) + typ.size)
+ p2 = unsafe.Pointer(uintptr(p2) + typ.size)
+ }
+ return true
+ }
+
return cachePut(ckey, &array.rtype)
}
+func appendVarint(x []byte, v uintptr) []byte {
+ for ; v >= 0x80; v >>= 7 {
+ x = append(x, byte(v|0x80))
+ }
+ x = append(x, byte(v))
+ return x
+}
+
// toType converts from a *rtype to a Type that can be returned
// to the client of package reflect. In gc, the only concern is that
// a nil *rtype must be replaced by a nil Type, but in gccgo this
@@ -1969,56 +2127,49 @@ type bitVector struct {
data []byte
}
-// append a bit pair to the bitmap.
-func (bv *bitVector) append2(bits uint8) {
- // assume bv.n is a multiple of 2, since append2 is the only operation.
+// append a bit to the bitmap.
+func (bv *bitVector) append(bit uint8) {
if bv.n%8 == 0 {
bv.data = append(bv.data, 0)
}
- bv.data[bv.n/8] |= bits << (bv.n % 8)
- bv.n += 2
+ bv.data[bv.n/8] |= bit << (bv.n % 8)
+ bv.n++
}
-func addTypeBits(bv *bitVector, offset *uintptr, t *rtype) {
- *offset = align(*offset, uintptr(t.align))
+func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
if t.kind&kindNoPointers != 0 {
- *offset += t.size
return
}
switch Kind(t.kind & kindMask) {
case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
// 1 pointer at start of representation
- for bv.n < 2*uint32(*offset/uintptr(ptrSize)) {
- bv.append2(bitsScalar)
+ for bv.n < uint32(offset/uintptr(ptrSize)) {
+ bv.append(0)
}
- bv.append2(bitsPointer)
+ bv.append(1)
case Interface:
// 2 pointers
- for bv.n < 2*uint32(*offset/uintptr(ptrSize)) {
- bv.append2(bitsScalar)
+ for bv.n < uint32(offset/uintptr(ptrSize)) {
+ bv.append(0)
}
- bv.append2(bitsPointer)
- bv.append2(bitsPointer)
+ bv.append(1)
+ bv.append(1)
case Array:
// repeat inner type
tt := (*arrayType)(unsafe.Pointer(t))
for i := 0; i < int(tt.len); i++ {
- addTypeBits(bv, offset, tt.elem)
+ addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
}
case Struct:
// apply fields
tt := (*structType)(unsafe.Pointer(t))
- start := *offset
for i := range tt.fields {
f := &tt.fields[i]
- off := start + f.offset
- addTypeBits(bv, &off, f.typ)
+ addTypeBits(bv, offset+f.offset, f.typ)
}
}
-
- *offset += t.size
}