From af146490bb04205107cb23e301ec7a8ff927b5fc Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Sat, 31 Oct 2015 00:59:47 +0000 Subject: runtime: Remove now unnecessary pad field from ParFor. It is not needed due to the removal of the ctx field. Reviewed-on: https://go-review.googlesource.com/16525 From-SVN: r229616 --- libgo/go/reflect/all_test.go | 672 +++++++++++++++++++++++++++++++++++++-- libgo/go/reflect/example_test.go | 15 + libgo/go/reflect/export_test.go | 24 +- libgo/go/reflect/type.go | 503 +++++++++++++++++++---------- libgo/go/reflect/value.go | 114 ++++--- 5 files changed, 1082 insertions(+), 246 deletions(-) (limited to 'libgo/go/reflect') diff --git a/libgo/go/reflect/all_test.go b/libgo/go/reflect/all_test.go index bda87867c74..33ee9ed83cc 100644 --- a/libgo/go/reflect/all_test.go +++ b/libgo/go/reflect/all_test.go @@ -15,6 +15,7 @@ import ( . "reflect" "runtime" "sort" + "strconv" "strings" "sync" "testing" @@ -1052,6 +1053,11 @@ func TestChan(t *testing.T) { ok = cv.TrySend(ValueOf(6)) if !ok { t.Errorf("TrySend on empty chan failed") + select { + case x := <-c: + t.Errorf("TrySend failed but it did send %d", x) + default: + } } else { if i = <-c; i != 6 { t.Errorf("TrySend 6, recv %d", i) @@ -1376,7 +1382,7 @@ func selectWatcher() { for { time.Sleep(1 * time.Second) selectWatch.Lock() - if selectWatch.info != nil && time.Since(selectWatch.now) > 1*time.Second { + if selectWatch.info != nil && time.Since(selectWatch.now) > 10*time.Second { fmt.Fprintf(os.Stderr, "TestSelect:\n%s blocked indefinitely\n", fmtSelect(selectWatch.info)) panic("select stuck") } @@ -1501,6 +1507,17 @@ func TestCallWithStruct(t *testing.T) { } } +func BenchmarkCall(b *testing.B) { + fv := ValueOf(func(a, b string) {}) + b.ReportAllocs() + b.RunParallel(func(pb *testing.PB) { + args := []Value{ValueOf("a"), ValueOf("b")} + for pb.Next() { + fv.Call(args) + } + }) +} + func TestMakeFunc(t *testing.T) { f := dummy fv := MakeFunc(TypeOf(f), func(in []Value) []Value { return in }) @@ -2726,6 +2743,8 @@ var tagGetTests = []struct { {`protobuf:"PB(1,2)"`, `rotobuf`, ``}, {`protobuf:"PB(1,2)" json:"name"`, `json`, `name`}, {`protobuf:"PB(1,2)" json:"name"`, `protobuf`, `PB(1,2)`}, + {`k0:"values contain spaces" k1:"and\ttabs"`, "k0", "values contain spaces"}, + {`k0:"values contain spaces" k1:"and\ttabs"`, "k1", "and\ttabs"}, } func TestTagGet(t *testing.T) { @@ -3377,26 +3396,243 @@ func checkSameType(t *testing.T, x, y interface{}) { } func TestArrayOf(t *testing.T) { - // TODO(rsc): Finish ArrayOf and enable-test. - t.Skip("ArrayOf is not finished (and not exported)") - // check construction and use of type not in binary - type T int - at := ArrayOf(10, TypeOf(T(1))) - v := New(at).Elem() - for i := 0; i < v.Len(); i++ { - v.Index(i).Set(ValueOf(T(i))) - } - s := fmt.Sprint(v.Interface()) - want := "[0 1 2 3 4 5 6 7 8 9]" - if s != want { - t.Errorf("constructed array = %s, want %s", s, want) + for _, table := range []struct { + n int + value func(i int) interface{} + comparable bool + want string + }{ + { + n: 0, + value: func(i int) interface{} { type Tint int; return Tint(i) }, + comparable: true, + want: "[]", + }, + { + n: 10, + value: func(i int) interface{} { type Tint int; return Tint(i) }, + comparable: true, + want: "[0 1 2 3 4 5 6 7 8 9]", + }, + { + n: 10, + value: func(i int) interface{} { type Tfloat float64; return Tfloat(i) }, + comparable: true, + want: "[0 1 2 3 4 5 6 7 8 9]", + }, + { + n: 10, + value: func(i int) interface{} { type Tstring string; return Tstring(strconv.Itoa(i)) }, + comparable: true, + want: "[0 1 2 3 4 5 6 7 8 9]", + }, + { + n: 10, + value: func(i int) interface{} { type Tstruct struct{ V int }; return Tstruct{i} }, + comparable: true, + want: "[{0} {1} {2} {3} {4} {5} {6} {7} {8} {9}]", + }, + { + n: 10, + value: func(i int) interface{} { type Tint int; return []Tint{Tint(i)} }, + comparable: false, + want: "[[0] [1] [2] [3] [4] [5] [6] [7] [8] [9]]", + }, + { + n: 10, + value: func(i int) interface{} { type Tint int; return [1]Tint{Tint(i)} }, + comparable: true, + want: "[[0] [1] [2] [3] [4] [5] [6] [7] [8] [9]]", + }, + { + n: 10, + value: func(i int) interface{} { type Tstruct struct{ V [1]int }; return Tstruct{[1]int{i}} }, + comparable: true, + want: "[{[0]} {[1]} {[2]} {[3]} {[4]} {[5]} {[6]} {[7]} {[8]} {[9]}]", + }, + { + n: 10, + value: func(i int) interface{} { type Tstruct struct{ V []int }; return Tstruct{[]int{i}} }, + comparable: false, + want: "[{[0]} {[1]} {[2]} {[3]} {[4]} {[5]} {[6]} {[7]} {[8]} {[9]}]", + }, + { + n: 10, + value: func(i int) interface{} { type TstructUV struct{ U, V int }; return TstructUV{i, i} }, + comparable: true, + want: "[{0 0} {1 1} {2 2} {3 3} {4 4} {5 5} {6 6} {7 7} {8 8} {9 9}]", + }, + { + n: 10, + value: func(i int) interface{} { + type TstructUV struct { + U int + V float64 + } + return TstructUV{i, float64(i)} + }, + comparable: true, + want: "[{0 0} {1 1} {2 2} {3 3} {4 4} {5 5} {6 6} {7 7} {8 8} {9 9}]", + }, + } { + at := ArrayOf(table.n, TypeOf(table.value(0))) + v := New(at).Elem() + vok := New(at).Elem() + vnot := New(at).Elem() + for i := 0; i < v.Len(); i++ { + v.Index(i).Set(ValueOf(table.value(i))) + vok.Index(i).Set(ValueOf(table.value(i))) + j := i + if i+1 == v.Len() { + j = i + 1 + } + vnot.Index(i).Set(ValueOf(table.value(j))) // make it differ only by last element + } + s := fmt.Sprint(v.Interface()) + if s != table.want { + t.Errorf("constructed array = %s, want %s", s, table.want) + } + + if table.comparable != at.Comparable() { + t.Errorf("constructed array (%#v) is comparable=%v, want=%v", v.Interface(), at.Comparable(), table.comparable) + } + if table.comparable { + if table.n > 0 { + if DeepEqual(vnot.Interface(), v.Interface()) { + t.Errorf( + "arrays (%#v) compare ok (but should not)", + v.Interface(), + ) + } + } + if !DeepEqual(vok.Interface(), v.Interface()) { + t.Errorf( + "arrays (%#v) compare NOT-ok (but should)", + v.Interface(), + ) + } + } } // check that type already in binary is found + type T int checkSameType(t, Zero(ArrayOf(5, TypeOf(T(1)))).Interface(), [5]T{}) } +func TestArrayOfGC(t *testing.T) { + type T *uintptr + tt := TypeOf(T(nil)) + const n = 100 + var x []interface{} + for i := 0; i < n; i++ { + v := New(ArrayOf(n, tt)).Elem() + for j := 0; j < v.Len(); j++ { + p := new(uintptr) + *p = uintptr(i*n + j) + v.Index(j).Set(ValueOf(p).Convert(tt)) + } + x = append(x, v.Interface()) + } + runtime.GC() + + for i, xi := range x { + v := ValueOf(xi) + for j := 0; j < v.Len(); j++ { + k := v.Index(j).Elem().Interface() + if k != uintptr(i*n+j) { + t.Errorf("lost x[%d][%d] = %d, want %d", i, j, k, i*n+j) + } + } + } +} + +func TestArrayOfAlg(t *testing.T) { + at := ArrayOf(6, TypeOf(byte(0))) + v1 := New(at).Elem() + v2 := New(at).Elem() + if v1.Interface() != v1.Interface() { + t.Errorf("constructed array %v not equal to itself", v1.Interface()) + } + v1.Index(5).Set(ValueOf(byte(1))) + if i1, i2 := v1.Interface(), v2.Interface(); i1 == i2 { + t.Errorf("constructed arrays %v and %v should not be equal", i1, i2) + } + + at = ArrayOf(6, TypeOf([]int(nil))) + v1 = New(at).Elem() + shouldPanic(func() { _ = v1.Interface() == v1.Interface() }) +} + +func TestArrayOfGenericAlg(t *testing.T) { + at1 := ArrayOf(5, TypeOf(string(""))) + at := ArrayOf(6, at1) + v1 := New(at).Elem() + v2 := New(at).Elem() + if v1.Interface() != v1.Interface() { + t.Errorf("constructed array %v not equal to itself", v1.Interface()) + } + + v1.Index(0).Index(0).Set(ValueOf("abc")) + v2.Index(0).Index(0).Set(ValueOf("efg")) + if i1, i2 := v1.Interface(), v2.Interface(); i1 == i2 { + t.Errorf("constructed arrays %v and %v should not be equal", i1, i2) + } + + v1.Index(0).Index(0).Set(ValueOf("abc")) + v2.Index(0).Index(0).Set(ValueOf((v1.Index(0).Index(0).String() + " ")[:3])) + if i1, i2 := v1.Interface(), v2.Interface(); i1 != i2 { + t.Errorf("constructed arrays %v and %v should be equal", i1, i2) + } + + // Test hash + m := MakeMap(MapOf(at, TypeOf(int(0)))) + m.SetMapIndex(v1, ValueOf(1)) + if i1, i2 := v1.Interface(), v2.Interface(); !m.MapIndex(v2).IsValid() { + t.Errorf("constructed arrays %v and %v have different hashes", i1, i2) + } +} + +func TestArrayOfDirectIface(t *testing.T) { + t.Skip("skipping test because gccgo uses a different directiface value") + { + type T [1]*byte + i1 := Zero(TypeOf(T{})).Interface() + v1 := ValueOf(&i1).Elem() + p1 := v1.InterfaceData()[1] + + i2 := Zero(ArrayOf(1, PtrTo(TypeOf(int8(0))))).Interface() + v2 := ValueOf(&i2).Elem() + p2 := v2.InterfaceData()[1] + + if p1 != 0 { + t.Errorf("got p1=%v. want=%v", p1, nil) + } + + if p2 != 0 { + t.Errorf("got p2=%v. want=%v", p2, nil) + } + } + { + type T [0]*byte + i1 := Zero(TypeOf(T{})).Interface() + v1 := ValueOf(&i1).Elem() + p1 := v1.InterfaceData()[1] + + i2 := Zero(ArrayOf(0, PtrTo(TypeOf(int8(0))))).Interface() + v2 := ValueOf(&i2).Elem() + p2 := v2.InterfaceData()[1] + + if p1 == 0 { + t.Errorf("got p1=%v. want=not-%v", p1, nil) + } + + if p2 == 0 { + t.Errorf("got p2=%v. want=not-%v", p2, nil) + } + } +} + func TestSliceOf(t *testing.T) { // check construction and use of type not in binary type T int @@ -3489,6 +3725,26 @@ func TestChanOf(t *testing.T) { checkSameType(t, Zero(ChanOf(BothDir, TypeOf(T1(1)))).Interface(), (chan T1)(nil)) } +func TestChanOfDir(t *testing.T) { + // check construction and use of type not in binary + type T string + crt := ChanOf(RecvDir, TypeOf(T(""))) + cst := ChanOf(SendDir, TypeOf(T(""))) + + // check that type already in binary is found + type T1 int + checkSameType(t, Zero(ChanOf(RecvDir, TypeOf(T1(1)))).Interface(), (<-chan T1)(nil)) + checkSameType(t, Zero(ChanOf(SendDir, TypeOf(T1(1)))).Interface(), (chan<- T1)(nil)) + + // check String form of ChanDir + if crt.ChanDir().String() != "<-chan" { + t.Errorf("chan dir: have %q, want %q", crt.ChanDir().String(), "<-chan") + } + if cst.ChanDir().String() != "chan<-" { + t.Errorf("chan dir: have %q, want %q", cst.ChanDir().String(), "chan<-") + } +} + func TestChanOfGC(t *testing.T) { done := make(chan bool, 1) go func() { @@ -3632,6 +3888,67 @@ func TestMapOfGCValues(t *testing.T) { } } +func TestTypelinksSorted(t *testing.T) { + var last string + for i, n := range TypeLinks() { + if n < last { + t.Errorf("typelinks not sorted: %q [%d] > %q [%d]", last, i-1, n, i) + } + last = n + } +} + +func TestFuncOf(t *testing.T) { + // check construction and use of type not in binary + type K string + type V float64 + + fn := func(args []Value) []Value { + if len(args) != 1 { + t.Errorf("args == %v, want exactly one arg", args) + } else if args[0].Type() != TypeOf(K("")) { + t.Errorf("args[0] is type %v, want %v", args[0].Type, TypeOf(K(""))) + } else if args[0].String() != "gopher" { + t.Errorf("args[0] = %q, want %q", args[0].String(), "gopher") + } + return []Value{ValueOf(V(3.14))} + } + v := MakeFunc(FuncOf([]Type{TypeOf(K(""))}, []Type{TypeOf(V(0))}, false), fn) + + outs := v.Call([]Value{ValueOf(K("gopher"))}) + if len(outs) != 1 { + t.Fatalf("v.Call returned %v, want exactly one result", outs) + } else if outs[0].Type() != TypeOf(V(0)) { + t.Fatalf("c.Call[0] is type %v, want %v", outs[0].Type, TypeOf(V(0))) + } + f := outs[0].Float() + if f != 3.14 { + t.Errorf("constructed func returned %f, want %f", f, 3.14) + } + + // check that types already in binary are found + type T1 int + testCases := []struct { + in, out []Type + variadic bool + want interface{} + }{ + {in: []Type{TypeOf(T1(0))}, want: (func(T1))(nil)}, + {in: []Type{TypeOf(int(0))}, want: (func(int))(nil)}, + {in: []Type{SliceOf(TypeOf(int(0)))}, variadic: true, want: (func(...int))(nil)}, + {in: []Type{TypeOf(int(0))}, out: []Type{TypeOf(false)}, want: (func(int) bool)(nil)}, + {in: []Type{TypeOf(int(0))}, out: []Type{TypeOf(false), TypeOf("")}, want: (func(int) (bool, string))(nil)}, + } + for _, tt := range testCases { + checkSameType(t, Zero(FuncOf(tt.in, tt.out, tt.variadic)).Interface(), tt.want) + } + + // check that variadic requires last element be a slice. + FuncOf([]Type{TypeOf(1), TypeOf(""), SliceOf(TypeOf(false))}, nil, true) + shouldPanic(func() { FuncOf([]Type{TypeOf(0), TypeOf(""), TypeOf(false)}, nil, true) }) + shouldPanic(func() { FuncOf(nil, nil, true) }) +} + type B1 struct { X int Y int @@ -4077,15 +4394,16 @@ func TestCallGC(t *testing.T) { } type funcLayoutTest struct { - rcvr, t Type - argsize, retOffset uintptr - stack []byte + rcvr, t Type + size, argsize, retOffset uintptr + stack []byte // pointer bitmap: 1 is pointer, 0 is scalar (or uninitialized) + gc []byte } var funcLayoutTests []funcLayoutTest func init() { - var argAlign = PtrSize + var argAlign uintptr = PtrSize if runtime.GOARCH == "amd64p32" { argAlign = 2 * PtrSize } @@ -4097,24 +4415,28 @@ func init() { funcLayoutTest{ nil, ValueOf(func(a, b string) string { return "" }).Type(), + 6 * PtrSize, 4 * PtrSize, 4 * PtrSize, - []byte{BitsPointer, BitsScalar, BitsPointer}, + []byte{1, 0, 1}, + []byte{1, 0, 1, 0, 1}, }) var r []byte if PtrSize == 4 { - r = []byte{BitsScalar, BitsScalar, BitsScalar, BitsPointer} + r = []byte{0, 0, 0, 1} } else { - r = []byte{BitsScalar, BitsScalar, BitsPointer} + r = []byte{0, 0, 1} } funcLayoutTests = append(funcLayoutTests, funcLayoutTest{ nil, ValueOf(func(a, b, c uint32, p *byte, d uint16) {}).Type(), + roundup(roundup(3*4, PtrSize)+PtrSize+2, argAlign), roundup(3*4, PtrSize) + PtrSize + 2, roundup(roundup(3*4, PtrSize)+PtrSize+2, argAlign), r, + r, }) funcLayoutTests = append(funcLayoutTests, @@ -4123,7 +4445,9 @@ func init() { ValueOf(func(a map[int]int, b uintptr, c interface{}) {}).Type(), 4 * PtrSize, 4 * PtrSize, - []byte{BitsPointer, BitsScalar, BitsPointer, BitsPointer}, + 4 * PtrSize, + []byte{1, 0, 1, 1}, + []byte{1, 0, 1, 1}, }) type S struct { @@ -4136,23 +4460,66 @@ func init() { ValueOf(func(a S) {}).Type(), 4 * PtrSize, 4 * PtrSize, - []byte{BitsScalar, BitsScalar, BitsPointer, BitsPointer}, + 4 * PtrSize, + []byte{0, 0, 1, 1}, + []byte{0, 0, 1, 1}, }) funcLayoutTests = append(funcLayoutTests, funcLayoutTest{ ValueOf((*byte)(nil)).Type(), ValueOf(func(a uintptr, b *int) {}).Type(), + roundup(3*PtrSize, argAlign), 3 * PtrSize, roundup(3*PtrSize, argAlign), - []byte{BitsPointer, BitsScalar, BitsPointer}, + []byte{1, 0, 1}, + []byte{1, 0, 1}, + }) + + funcLayoutTests = append(funcLayoutTests, + funcLayoutTest{ + nil, + ValueOf(func(a uintptr) {}).Type(), + roundup(PtrSize, argAlign), + PtrSize, + roundup(PtrSize, argAlign), + []byte{}, + []byte{}, + }) + + funcLayoutTests = append(funcLayoutTests, + funcLayoutTest{ + nil, + ValueOf(func() uintptr { return 0 }).Type(), + PtrSize, + 0, + 0, + []byte{}, + []byte{}, + }) + + funcLayoutTests = append(funcLayoutTests, + funcLayoutTest{ + ValueOf(uintptr(0)).Type(), + ValueOf(func(a uintptr) {}).Type(), + 2 * PtrSize, + 2 * PtrSize, + 2 * PtrSize, + []byte{1}, + []byte{1}, + // Note: this one is tricky, as the receiver is not a pointer. But we + // pass the receiver by reference to the autogenerated pointer-receiver + // version of the function. }) } func TestFuncLayout(t *testing.T) { t.Skip("gccgo does not use funcLayout") for _, lt := range funcLayoutTests { - _, argsize, retOffset, stack := FuncLayout(lt.t, lt.rcvr) + typ, argsize, retOffset, stack, gc, ptrs := FuncLayout(lt.t, lt.rcvr) + if typ.Size() != lt.size { + t.Errorf("funcLayout(%v, %v).size=%d, want %d", lt.t, lt.rcvr, typ.Size(), lt.size) + } if argsize != lt.argsize { t.Errorf("funcLayout(%v, %v).argsize=%d, want %d", lt.t, lt.rcvr, argsize, lt.argsize) } @@ -4162,5 +4529,260 @@ func TestFuncLayout(t *testing.T) { if !bytes.Equal(stack, lt.stack) { t.Errorf("funcLayout(%v, %v).stack=%v, want %v", lt.t, lt.rcvr, stack, lt.stack) } + if !bytes.Equal(gc, lt.gc) { + t.Errorf("funcLayout(%v, %v).gc=%v, want %v", lt.t, lt.rcvr, gc, lt.gc) + } + if ptrs && len(stack) == 0 || !ptrs && len(stack) > 0 { + t.Errorf("funcLayout(%v, %v) pointers flag=%v, want %v", lt.t, lt.rcvr, ptrs, !ptrs) + } + } +} + +func verifyGCBits(t *testing.T, typ Type, bits []byte) { + heapBits := GCBits(New(typ).Interface()) + if !bytes.Equal(heapBits, bits) { + t.Errorf("heapBits incorrect for %v\nhave %v\nwant %v", typ, heapBits, bits) + } +} + +func verifyGCBitsSlice(t *testing.T, typ Type, cap int, bits []byte) { + // Creating a slice causes the runtime to repeat a bitmap, + // which exercises a different path from making the compiler + // repeat a bitmap for a small array or executing a repeat in + // a GC program. + val := MakeSlice(typ, 0, cap) + data := NewAt(ArrayOf(cap, typ), unsafe.Pointer(val.Pointer())) + heapBits := GCBits(data.Interface()) + // Repeat the bitmap for the slice size, trimming scalars in + // the last element. + bits = rep(cap, bits) + for len(bits) > 2 && bits[len(bits)-1] == 0 { + bits = bits[:len(bits)-1] + } + if !bytes.Equal(heapBits, bits) { + t.Errorf("heapBits incorrect for make(%v, 0, %v)\nhave %v\nwant %v", typ, cap, heapBits, bits) + } +} + +func TestGCBits(t *testing.T) { + t.Skip("gccgo does not use gcbits yet") + + verifyGCBits(t, TypeOf((*byte)(nil)), []byte{1}) + + // Building blocks for types seen by the compiler (like [2]Xscalar). + // The compiler will create the type structures for the derived types, + // including their GC metadata. + type Xscalar struct{ x uintptr } + type Xptr struct{ x *byte } + type Xptrscalar struct { + *byte + uintptr + } + type Xscalarptr struct { + uintptr + *byte + } + type Xbigptrscalar struct { + _ [100]*byte + _ [100]uintptr + } + + var Tscalar, Tint64, Tptr, Tscalarptr, Tptrscalar, Tbigptrscalar Type + { + // Building blocks for types constructed by reflect. + // This code is in a separate block so that code below + // cannot accidentally refer to these. + // The compiler must NOT see types derived from these + // (for example, [2]Scalar must NOT appear in the program), + // or else reflect will use it instead of having to construct one. + // The goal is to test the construction. + type Scalar struct{ x uintptr } + type Ptr struct{ x *byte } + type Ptrscalar struct { + *byte + uintptr + } + type Scalarptr struct { + uintptr + *byte + } + type Bigptrscalar struct { + _ [100]*byte + _ [100]uintptr + } + type Int64 int64 + Tscalar = TypeOf(Scalar{}) + Tint64 = TypeOf(Int64(0)) + Tptr = TypeOf(Ptr{}) + Tscalarptr = TypeOf(Scalarptr{}) + Tptrscalar = TypeOf(Ptrscalar{}) + Tbigptrscalar = TypeOf(Bigptrscalar{}) + } + + empty := []byte{} + + verifyGCBits(t, TypeOf(Xscalar{}), empty) + verifyGCBits(t, Tscalar, empty) + verifyGCBits(t, TypeOf(Xptr{}), lit(1)) + verifyGCBits(t, Tptr, lit(1)) + verifyGCBits(t, TypeOf(Xscalarptr{}), lit(0, 1)) + verifyGCBits(t, Tscalarptr, lit(0, 1)) + verifyGCBits(t, TypeOf(Xptrscalar{}), lit(1)) + verifyGCBits(t, Tptrscalar, lit(1)) + + verifyGCBits(t, TypeOf([0]Xptr{}), empty) + verifyGCBits(t, ArrayOf(0, Tptr), empty) + verifyGCBits(t, TypeOf([1]Xptrscalar{}), lit(1)) + verifyGCBits(t, ArrayOf(1, Tptrscalar), lit(1)) + verifyGCBits(t, TypeOf([2]Xscalar{}), empty) + verifyGCBits(t, ArrayOf(2, Tscalar), empty) + verifyGCBits(t, TypeOf([10000]Xscalar{}), empty) + verifyGCBits(t, ArrayOf(10000, Tscalar), empty) + verifyGCBits(t, TypeOf([2]Xptr{}), lit(1, 1)) + verifyGCBits(t, ArrayOf(2, Tptr), lit(1, 1)) + verifyGCBits(t, TypeOf([10000]Xptr{}), rep(10000, lit(1))) + verifyGCBits(t, ArrayOf(10000, Tptr), rep(10000, lit(1))) + verifyGCBits(t, TypeOf([2]Xscalarptr{}), lit(0, 1, 0, 1)) + verifyGCBits(t, ArrayOf(2, Tscalarptr), lit(0, 1, 0, 1)) + verifyGCBits(t, TypeOf([10000]Xscalarptr{}), rep(10000, lit(0, 1))) + verifyGCBits(t, ArrayOf(10000, Tscalarptr), rep(10000, lit(0, 1))) + verifyGCBits(t, TypeOf([2]Xptrscalar{}), lit(1, 0, 1)) + verifyGCBits(t, ArrayOf(2, Tptrscalar), lit(1, 0, 1)) + verifyGCBits(t, TypeOf([10000]Xptrscalar{}), rep(10000, lit(1, 0))) + verifyGCBits(t, ArrayOf(10000, Tptrscalar), rep(10000, lit(1, 0))) + verifyGCBits(t, TypeOf([1][10000]Xptrscalar{}), rep(10000, lit(1, 0))) + verifyGCBits(t, ArrayOf(1, ArrayOf(10000, Tptrscalar)), rep(10000, lit(1, 0))) + verifyGCBits(t, TypeOf([2][10000]Xptrscalar{}), rep(2*10000, lit(1, 0))) + verifyGCBits(t, ArrayOf(2, ArrayOf(10000, Tptrscalar)), rep(2*10000, lit(1, 0))) + verifyGCBits(t, TypeOf([4]Xbigptrscalar{}), join(rep(3, join(rep(100, lit(1)), rep(100, lit(0)))), rep(100, lit(1)))) + verifyGCBits(t, ArrayOf(4, Tbigptrscalar), join(rep(3, join(rep(100, lit(1)), rep(100, lit(0)))), rep(100, lit(1)))) + + verifyGCBitsSlice(t, TypeOf([]Xptr{}), 0, empty) + verifyGCBitsSlice(t, SliceOf(Tptr), 0, empty) + verifyGCBitsSlice(t, TypeOf([]Xptrscalar{}), 1, lit(1)) + verifyGCBitsSlice(t, SliceOf(Tptrscalar), 1, lit(1)) + verifyGCBitsSlice(t, TypeOf([]Xscalar{}), 2, lit(0)) + verifyGCBitsSlice(t, SliceOf(Tscalar), 2, lit(0)) + verifyGCBitsSlice(t, TypeOf([]Xscalar{}), 10000, lit(0)) + verifyGCBitsSlice(t, SliceOf(Tscalar), 10000, lit(0)) + verifyGCBitsSlice(t, TypeOf([]Xptr{}), 2, lit(1)) + verifyGCBitsSlice(t, SliceOf(Tptr), 2, lit(1)) + verifyGCBitsSlice(t, TypeOf([]Xptr{}), 10000, lit(1)) + verifyGCBitsSlice(t, SliceOf(Tptr), 10000, lit(1)) + verifyGCBitsSlice(t, TypeOf([]Xscalarptr{}), 2, lit(0, 1)) + verifyGCBitsSlice(t, SliceOf(Tscalarptr), 2, lit(0, 1)) + verifyGCBitsSlice(t, TypeOf([]Xscalarptr{}), 10000, lit(0, 1)) + verifyGCBitsSlice(t, SliceOf(Tscalarptr), 10000, lit(0, 1)) + verifyGCBitsSlice(t, TypeOf([]Xptrscalar{}), 2, lit(1, 0)) + verifyGCBitsSlice(t, SliceOf(Tptrscalar), 2, lit(1, 0)) + verifyGCBitsSlice(t, TypeOf([]Xptrscalar{}), 10000, lit(1, 0)) + verifyGCBitsSlice(t, SliceOf(Tptrscalar), 10000, lit(1, 0)) + verifyGCBitsSlice(t, TypeOf([][10000]Xptrscalar{}), 1, rep(10000, lit(1, 0))) + verifyGCBitsSlice(t, SliceOf(ArrayOf(10000, Tptrscalar)), 1, rep(10000, lit(1, 0))) + verifyGCBitsSlice(t, TypeOf([][10000]Xptrscalar{}), 2, rep(10000, lit(1, 0))) + verifyGCBitsSlice(t, SliceOf(ArrayOf(10000, Tptrscalar)), 2, rep(10000, lit(1, 0))) + verifyGCBitsSlice(t, TypeOf([]Xbigptrscalar{}), 4, join(rep(100, lit(1)), rep(100, lit(0)))) + verifyGCBitsSlice(t, SliceOf(Tbigptrscalar), 4, join(rep(100, lit(1)), rep(100, lit(0)))) + + verifyGCBits(t, TypeOf((chan [100]Xscalar)(nil)), lit(1)) + verifyGCBits(t, ChanOf(BothDir, ArrayOf(100, Tscalar)), lit(1)) + + verifyGCBits(t, TypeOf((func([10000]Xscalarptr))(nil)), lit(1)) + verifyGCBits(t, FuncOf([]Type{ArrayOf(10000, Tscalarptr)}, nil, false), lit(1)) + + verifyGCBits(t, TypeOf((map[[10000]Xscalarptr]Xscalar)(nil)), lit(1)) + verifyGCBits(t, MapOf(ArrayOf(10000, Tscalarptr), Tscalar), lit(1)) + + verifyGCBits(t, TypeOf((*[10000]Xscalar)(nil)), lit(1)) + verifyGCBits(t, PtrTo(ArrayOf(10000, Tscalar)), lit(1)) + + verifyGCBits(t, TypeOf(([][10000]Xscalar)(nil)), lit(1)) + verifyGCBits(t, SliceOf(ArrayOf(10000, Tscalar)), lit(1)) + + hdr := make([]byte, 8/PtrSize) + + verifyMapBucket := func(t *testing.T, k, e Type, m interface{}, want []byte) { + verifyGCBits(t, MapBucketOf(k, e), want) + verifyGCBits(t, CachedBucketOf(TypeOf(m)), want) + } + verifyMapBucket(t, + Tscalar, Tptr, + map[Xscalar]Xptr(nil), + join(hdr, rep(8, lit(0)), rep(8, lit(1)), lit(1))) + verifyMapBucket(t, + Tscalarptr, Tptr, + map[Xscalarptr]Xptr(nil), + join(hdr, rep(8, lit(0, 1)), rep(8, lit(1)), lit(1))) + verifyMapBucket(t, Tint64, Tptr, + map[int64]Xptr(nil), + join(hdr, rep(8, rep(8/PtrSize, lit(0))), rep(8, lit(1)), naclpad(), lit(1))) + verifyMapBucket(t, + Tscalar, Tscalar, + map[Xscalar]Xscalar(nil), + empty) + verifyMapBucket(t, + ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar), + map[[2]Xscalarptr][3]Xptrscalar(nil), + join(hdr, rep(8*2, lit(0, 1)), rep(8*3, lit(1, 0)), lit(1))) + verifyMapBucket(t, + ArrayOf(64/PtrSize, Tscalarptr), ArrayOf(64/PtrSize, Tptrscalar), + map[[64 / PtrSize]Xscalarptr][64 / PtrSize]Xptrscalar(nil), + join(hdr, rep(8*64/PtrSize, lit(0, 1)), rep(8*64/PtrSize, lit(1, 0)), lit(1))) + verifyMapBucket(t, + ArrayOf(64/PtrSize+1, Tscalarptr), ArrayOf(64/PtrSize, Tptrscalar), + map[[64/PtrSize + 1]Xscalarptr][64 / PtrSize]Xptrscalar(nil), + join(hdr, rep(8, lit(1)), rep(8*64/PtrSize, lit(1, 0)), lit(1))) + verifyMapBucket(t, + ArrayOf(64/PtrSize, Tscalarptr), ArrayOf(64/PtrSize+1, Tptrscalar), + map[[64 / PtrSize]Xscalarptr][64/PtrSize + 1]Xptrscalar(nil), + join(hdr, rep(8*64/PtrSize, lit(0, 1)), rep(8, lit(1)), lit(1))) + verifyMapBucket(t, + ArrayOf(64/PtrSize+1, Tscalarptr), ArrayOf(64/PtrSize+1, Tptrscalar), + map[[64/PtrSize + 1]Xscalarptr][64/PtrSize + 1]Xptrscalar(nil), + join(hdr, rep(8, lit(1)), rep(8, lit(1)), lit(1))) +} + +func naclpad() []byte { + if runtime.GOARCH == "amd64p32" { + return lit(0) + } + return nil +} + +func rep(n int, b []byte) []byte { return bytes.Repeat(b, n) } +func join(b ...[]byte) []byte { return bytes.Join(b, nil) } +func lit(x ...byte) []byte { return x } + +func TestTypeOfTypeOf(t *testing.T) { + // Check that all the type constructors return concrete *rtype implementations. + // It's difficult to test directly because the reflect package is only at arm's length. + // The easiest thing to do is just call a function that crashes if it doesn't get an *rtype. + check := func(name string, typ Type) { + if underlying := TypeOf(typ).String(); underlying != "*reflect.rtype" { + t.Errorf("%v returned %v, not *reflect.rtype", name, underlying) + } + } + + type T struct{ int } + check("TypeOf", TypeOf(T{})) + + check("ArrayOf", ArrayOf(10, TypeOf(T{}))) + check("ChanOf", ChanOf(BothDir, TypeOf(T{}))) + check("FuncOf", FuncOf([]Type{TypeOf(T{})}, nil, false)) + check("MapOf", MapOf(TypeOf(T{}), TypeOf(T{}))) + check("PtrTo", PtrTo(TypeOf(T{}))) + check("SliceOf", SliceOf(TypeOf(T{}))) +} + +type XM struct{} + +func (*XM) String() string { return "" } + +func TestPtrToMethods(t *testing.T) { + var y struct{ XM } + yp := New(TypeOf(y)).Interface() + _, ok := yp.(fmt.Stringer) + if !ok { + t.Fatal("does not implement Stringer, but should") } } diff --git a/libgo/go/reflect/example_test.go b/libgo/go/reflect/example_test.go index cca28eeece8..8ebf9765b8d 100644 --- a/libgo/go/reflect/example_test.go +++ b/libgo/go/reflect/example_test.go @@ -6,6 +6,8 @@ package reflect_test import ( "fmt" + "io" + "os" "reflect" ) @@ -64,3 +66,16 @@ func ExampleStructTag() { // Output: // blue gopher } + +func ExampleTypeOf() { + // As interface types are only used for static typing, a + // common idiom to find the reflection Type for an interface + // type Foo is to use a *Foo value. + writerType := reflect.TypeOf((*io.Writer)(nil)).Elem() + + fileType := reflect.TypeOf((*os.File)(nil)) + fmt.Println(fileType.Implements(writerType)) + + // Output: + // true +} diff --git a/libgo/go/reflect/export_test.go b/libgo/go/reflect/export_test.go index 49c45e82b2e..89473d352a7 100644 --- a/libgo/go/reflect/export_test.go +++ b/libgo/go/reflect/export_test.go @@ -15,13 +15,29 @@ func IsRO(v Value) bool { return v.flag&flagRO != 0 } -var ArrayOf = arrayOf var CallGC = &callGC const PtrSize = ptrSize -const BitsPointer = bitsPointer -const BitsScalar = bitsScalar -func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, stack []byte) { +func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, stack []byte, gc []byte, ptrs bool) { return } + +func TypeLinks() []string { + return nil +} + +var GCBits = gcbits + +// Will be provided by runtime eventually. +func gcbits(interface{}) []byte { + return nil +} + +func MapBucketOf(x, y Type) Type { + return nil +} + +func CachedBucketOf(m Type) Type { + return nil +} diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go index 5cdfba55648..e488938a13f 100644 --- a/libgo/go/reflect/type.go +++ b/libgo/go/reflect/type.go @@ -12,7 +12,7 @@ // for that type. // // See "The Laws of Reflection" for an introduction to reflection in Go: -// http://golang.org/doc/articles/laws_of_reflection.html +// https://golang.org/doc/articles/laws_of_reflection.html package reflect import ( @@ -90,16 +90,16 @@ type Type interface { // Kind returns the specific kind of this type. Kind() Kind - // Implements returns true if the type implements the interface type u. + // Implements reports whether the type implements the interface type u. Implements(u Type) bool - // AssignableTo returns true if a value of the type is assignable to type u. + // AssignableTo reports whether a value of the type is assignable to type u. AssignableTo(u Type) bool - // ConvertibleTo returns true if a value of the type is convertible to type u. + // ConvertibleTo reports whether a value of the type is convertible to type u. ConvertibleTo(u Type) bool - // Comparable returns true if values of this type are comparable. + // Comparable reports whether values of this type are comparable. Comparable() bool // Methods applicable only to some types, depending on Kind. @@ -123,7 +123,7 @@ type Type interface { // It panics if the type's Kind is not Chan. ChanDir() ChanDir - // IsVariadic returns true if a function type's final input parameter + // IsVariadic reports whether a function type's final input parameter // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's // implicit actual type []T. // @@ -204,9 +204,9 @@ type Type interface { // See golang.org/issue/4876 for more details. /* - * These data structures are known to the compiler (../../cmd/gc/reflect.c). + * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go). * A few are known to ../runtime/type.go to convey to debuggers. - * They are also known to ../runtime/type.h. + * They are also known to ../runtime/type.go. */ // A Kind represents the specific kind of type that a Type represents. @@ -255,8 +255,8 @@ type rtype struct { size uintptr hash uint32 // hash of type; avoids computation in hash tables - hashfn func(unsafe.Pointer, uintptr) // hash function - equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) // equality function + hashfn func(unsafe.Pointer, uintptr) uintptr // hash function + equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) bool // equality function gc unsafe.Pointer // garbage collection data string *string // string form; unnecessary but undeniably useful @@ -392,7 +392,7 @@ type Method struct { // method name. It is empty for upper case (exported) method names. // The combination of PkgPath and Name uniquely identifies a method // in a method set. - // See http://golang.org/ref/spec#Uniqueness_of_identifiers + // See https://golang.org/ref/spec#Uniqueness_of_identifiers Name string PkgPath string @@ -548,7 +548,7 @@ func (t *uncommonType) MethodByName(name string) (m Method, ok bool) { return } -// TODO(rsc): 6g supplies these, but they are not +// TODO(rsc): gc supplies these, but they are not // as efficient as they could be: they have commonType // as the receiver instead of *rtype. func (t *rtype) NumMethod() int { @@ -758,7 +758,7 @@ type StructField struct { // Name is the field name. // PkgPath is the package path that qualifies a lower case (unexported) // field name. It is empty for upper case (exported) field names. - // See http://golang.org/ref/spec#Uniqueness_of_identifiers + // See https://golang.org/ref/spec#Uniqueness_of_identifiers Name string PkgPath string @@ -784,8 +784,11 @@ type StructTag string // If the tag does not have the conventional format, the value // returned by Get is unspecified. func (tag StructTag) Get(key string) string { + // When modifying this code, also update the validateStructTag code + // in golang.org/x/tools/cmd/vet/structtag.go. + for tag != "" { - // skip leading space + // Skip leading space. i := 0 for i < len(tag) && tag[i] == ' ' { i++ @@ -795,19 +798,21 @@ func (tag StructTag) Get(key string) string { break } - // scan to colon. - // a space or a quote is a syntax error + // Scan to colon. A space, a quote or a control character is a syntax error. + // Strictly speaking, control chars include the range [0x7f, 0x9f], not just + // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters + // as it is simpler to inspect the tag's bytes than the tag's runes. i = 0 - for i < len(tag) && tag[i] != ' ' && tag[i] != ':' && tag[i] != '"' { + for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f { i++ } - if i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { + if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' { break } name := string(tag[:i]) tag = tag[i+1:] - // scan quoted string to find value + // Scan quoted string to find value. i = 1 for i < len(tag) && tag[i] != '"' { if tag[i] == '\\' { @@ -822,7 +827,10 @@ func (tag StructTag) Get(key string) string { tag = tag[i+1:] if key == name { - value, _ := strconv.Unquote(qvalue) + value, err := strconv.Unquote(qvalue) + if err != nil { + break + } return value } } @@ -1024,8 +1032,8 @@ func (t *structType) FieldByName(name string) (f StructField, present bool) { return t.FieldByNameFunc(func(s string) bool { return s == name }) } -// TypeOf returns the reflection Type of the value in the interface{}. -// TypeOf(nil) returns nil. +// TypeOf returns the reflection Type that represents the dynamic type of i. +// If i is a nil interface value, TypeOf returns nil. func TypeOf(i interface{}) Type { eface := *(*emptyInterface)(unsafe.Pointer(&i)) return toType(eface.typ) @@ -1109,7 +1117,8 @@ func (t *rtype) ptrTo() *rtype { return r.(*rtype) } - // initialize p using *byte's ptrType as a prototype. + // Create a new ptrType starting with the description + // of an *unsafe.Pointer. p = new(ptrType) var iptr interface{} = (*unsafe.Pointer)(nil) prototype := *(**ptrType)(unsafe.Pointer(&iptr)) @@ -1145,6 +1154,7 @@ func (t *rtype) ptrTo() *rtype { q := canonicalize(&p.rtype) p = (*ptrType)(unsafe.Pointer(q.(*rtype))) + ptrMap.m[t] = p ptrMap.Unlock() return &p.rtype } @@ -1211,7 +1221,7 @@ func (t *rtype) Comparable() bool { } } -// implements returns true if the type V implements the interface type T. +// implements reports whether the type V implements the interface type T. func implements(T, V *rtype) bool { if T.Kind() != Interface { return false @@ -1232,7 +1242,7 @@ func implements(T, V *rtype) bool { // methods along the way, or else V does not implement T. // This lets us run the scan in overall linear time instead of // the quadratic time a naive search would require. - // See also ../runtime/iface.c. + // See also ../runtime/iface.go. if V.Kind() == Interface { v := (*interfaceType)(unsafe.Pointer(V)) i := 0 @@ -1265,9 +1275,9 @@ func implements(T, V *rtype) bool { return false } -// directlyAssignable returns true if a value x of type V can be directly +// directlyAssignable reports whether a value x of type V can be directly // assigned (using memmove) to a value of type T. -// http://golang.org/doc/go_spec.html#Assignability +// https://golang.org/doc/go_spec.html#Assignability // Ignoring the interface rules (implemented elsewhere) // and the ideal constant rules (no ideal constants at run time). func directlyAssignable(T, V *rtype) bool { @@ -1445,6 +1455,14 @@ type chanGC struct { end uintptr // _GC_END } +// The funcLookupCache caches FuncOf lookups. +// FuncOf does not share the common lookupCache since cacheKey is not +// sufficient to represent functions unambiguously. +var funcLookupCache struct { + sync.RWMutex + m map[uint32][]*rtype // keyed by hash calculated in FuncOf +} + // ChanOf returns the channel type with the given direction and element type. // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int. // @@ -1485,6 +1503,7 @@ func ChanOf(dir ChanDir, t Type) Type { prototype := *(**chanType)(unsafe.Pointer(&ichan)) ch := new(chanType) *ch = *prototype + ch.dir = uintptr(dir) ch.string = &s // gccgo uses a different hash. @@ -1545,9 +1564,8 @@ func MapOf(key, elem Type) Type { // Make a map type. var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil) - prototype := *(**mapType)(unsafe.Pointer(&imap)) mt := new(mapType) - *mt = *prototype + *mt = **(**mapType)(unsafe.Pointer(&imap)) mt.string = &s // gccgo uses a different hash @@ -1576,154 +1594,243 @@ func MapOf(key, elem Type) Type { return cachePut(ckey, &mt.rtype) } -// gcProg is a helper type for generatation of GC pointer info. -type gcProg struct { - gc []byte - size uintptr // size of type in bytes - hasPtr bool -} +// FuncOf returns the function type with the given argument and result types. +// For example if k represents int and e represents string, +// FuncOf([]Type{k}, []Type{e}, false) represents func(int) string. +// +// The variadic argument controls whether the function is variadic. FuncOf +// panics if the in[len(in)-1] does not represent a slice and variadic is +// true. +func FuncOf(in, out []Type, variadic bool) Type { + if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) { + panic("reflect.FuncOf: last arg of variadic func must be slice") + } + + // Make a func type. + var ifunc interface{} = (func())(nil) + prototype := *(**funcType)(unsafe.Pointer(&ifunc)) + ft := new(funcType) + *ft = *prototype + + // Build a hash and minimally populate ft. + var hash uint32 = 8 + var fin, fout []*rtype + shift := uint(1) + for _, in := range in { + t := in.(*rtype) + fin = append(fin, t) + hash += t.hash << shift + shift++ + } + shift = 2 + for _, out := range out { + t := out.(*rtype) + fout = append(fout, t) + hash += t.hash << shift + shift++ + } + if variadic { + hash++ + } + hash <<= 4 + ft.hash = hash + ft.in = fin + ft.out = fout + ft.dotdotdot = variadic -func (gc *gcProg) append(v byte) { - gc.align(unsafe.Sizeof(uintptr(0))) - gc.appendWord(v) -} + // Look in cache. + funcLookupCache.RLock() + for _, t := range funcLookupCache.m[hash] { + if haveIdenticalUnderlyingType(&ft.rtype, t) { + funcLookupCache.RUnlock() + return t + } + } + funcLookupCache.RUnlock() -// Appends t's type info to the current program. -func (gc *gcProg) appendProg(t *rtype) { - gc.align(uintptr(t.align)) - if !t.pointers() { - gc.size += t.size - return + // Not in cache, lock and retry. + funcLookupCache.Lock() + defer funcLookupCache.Unlock() + if funcLookupCache.m == nil { + funcLookupCache.m = make(map[uint32][]*rtype) } - switch t.Kind() { - default: - panic("reflect: non-pointer type marked as having pointers") - case Ptr, UnsafePointer, Chan, Func, Map: - gc.appendWord(bitsPointer) - case Slice: - gc.appendWord(bitsPointer) - gc.appendWord(bitsScalar) - gc.appendWord(bitsScalar) - case String: - gc.appendWord(bitsPointer) - gc.appendWord(bitsScalar) - case Array: - c := t.Len() - e := t.Elem().common() - for i := 0; i < c; i++ { - gc.appendProg(e) + for _, t := range funcLookupCache.m[hash] { + if haveIdenticalUnderlyingType(&ft.rtype, t) { + return t } - case Interface: - gc.appendWord(bitsMultiWord) - if t.NumMethod() == 0 { - gc.appendWord(bitsEface) - } else { - gc.appendWord(bitsIface) - } - case Struct: - c := t.NumField() - for i := 0; i < c; i++ { - gc.appendProg(t.Field(i).Type.common()) - } - gc.align(uintptr(t.align)) } + + str := funcStr(ft) + + // Populate the remaining fields of ft and store in cache. + ft.string = &str + ft.uncommonType = nil + ft.ptrToThis = nil + + // TODO(cmang): Generate GC data for funcs. + ft.gc = unsafe.Pointer(&ptrDataGCProg) + + funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype) + + return toType(&ft.rtype) } -func (gc *gcProg) appendWord(v byte) { - ptrsize := unsafe.Sizeof(uintptr(0)) - if gc.size%ptrsize != 0 { - panic("reflect: unaligned GC program") +// funcStr builds a string representation of a funcType. +func funcStr(ft *funcType) string { + repr := make([]byte, 0, 64) + repr = append(repr, "func("...) + for i, t := range ft.in { + if i > 0 { + repr = append(repr, ", "...) + } + if ft.dotdotdot && i == len(ft.in)-1 { + repr = append(repr, "..."...) + repr = append(repr, *(*sliceType)(unsafe.Pointer(t)).elem.string...) + } else { + repr = append(repr, *t.string...) + } } - nptr := gc.size / ptrsize - for uintptr(len(gc.gc)) < nptr/2+1 { - gc.gc = append(gc.gc, 0x44) // BitsScalar + repr = append(repr, ')') + if l := len(ft.out); l == 1 { + repr = append(repr, ' ') + } else if l > 1 { + repr = append(repr, " ("...) } - gc.gc[nptr/2] &= ^(3 << ((nptr%2)*4 + 2)) - gc.gc[nptr/2] |= v << ((nptr%2)*4 + 2) - gc.size += ptrsize - if v == bitsPointer { - gc.hasPtr = true + for i, t := range ft.out { + if i > 0 { + repr = append(repr, ", "...) + } + repr = append(repr, *t.string...) } + if len(ft.out) > 1 { + repr = append(repr, ')') + } + return string(repr) } -func (gc *gcProg) finalize() (unsafe.Pointer, bool) { - if gc.size == 0 { - return nil, false - } - ptrsize := unsafe.Sizeof(uintptr(0)) - gc.align(ptrsize) - nptr := gc.size / ptrsize - for uintptr(len(gc.gc)) < nptr/2+1 { - gc.gc = append(gc.gc, 0x44) // BitsScalar - } - // If number of words is odd, repeat the mask twice. - // Compiler does the same. - if nptr%2 != 0 { - for i := uintptr(0); i < nptr; i++ { - gc.appendWord(extractGCWord(gc.gc, i)) +// isReflexive reports whether the == operation on the type is reflexive. +// That is, x == x for all values x of type t. +func isReflexive(t *rtype) bool { + switch t.Kind() { + case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer: + return true + case Float32, Float64, Complex64, Complex128, Interface: + return false + case Array: + tt := (*arrayType)(unsafe.Pointer(t)) + return isReflexive(tt.elem) + case Struct: + tt := (*structType)(unsafe.Pointer(t)) + for _, f := range tt.fields { + if !isReflexive(f.typ) { + return false + } } + return true + default: + // Func, Map, Slice, Invalid + panic("isReflexive called on non-key type " + t.String()) } - return unsafe.Pointer(&gc.gc[0]), gc.hasPtr -} - -func extractGCWord(gc []byte, i uintptr) byte { - return (gc[i/2] >> ((i%2)*4 + 2)) & 3 -} - -func (gc *gcProg) align(a uintptr) { - gc.size = align(gc.size, a) } -// These constants must stay in sync with ../runtime/mgc0.h. -const ( - bitsScalar = 1 - bitsPointer = 2 - bitsMultiWord = 3 - - bitsIface = 2 - bitsEface = 3 -) - // Make sure these routines stay in sync with ../../runtime/hashmap.go! // These types exist only for GC, so we only fill out GC relevant info. // Currently, that's just size and the GC program. We also fill in string // for possible debugging use. const ( - bucketSize = 8 - maxKeySize = 128 - maxValSize = 128 + bucketSize uintptr = 8 + maxKeySize uintptr = 128 + maxValSize uintptr = 128 ) func bucketOf(ktyp, etyp *rtype) *rtype { + // See comment on hmap.overflow in ../runtime/hashmap.go. + var kind uint8 + if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 && + ktyp.size <= maxKeySize && etyp.size <= maxValSize { + kind = kindNoPointers + } + if ktyp.size > maxKeySize { ktyp = PtrTo(ktyp).(*rtype) } if etyp.size > maxValSize { etyp = PtrTo(etyp).(*rtype) } - ptrsize := unsafe.Sizeof(uintptr(0)) - var gc gcProg - // topbits - for i := 0; i < int(bucketSize*unsafe.Sizeof(uint8(0))/ptrsize); i++ { - gc.append(bitsScalar) - } - // keys - for i := 0; i < bucketSize; i++ { - gc.appendProg(ktyp) - } - // values - for i := 0; i < bucketSize; i++ { - gc.appendProg(etyp) - } - // overflow - gc.append(bitsPointer) - if runtime.GOARCH == "amd64p32" { - gc.append(bitsScalar) + // Prepare GC data if any. + // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes, + // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap. + // Normally the enforced limit on pointer maps is 16 bytes, + // but larger ones are acceptable, 33 bytes isn't too too big, + // and it's easier to generate a pointer bitmap than a GC program. + // Note that since the key and value are known to be <= 128 bytes, + // they're guaranteed to have bitmaps instead of GC programs. + // var gcdata *byte + var ptrdata uintptr + var overflowPad uintptr + + // On NaCl, pad if needed to make overflow end at the proper struct alignment. + // On other systems, align > ptrSize is not possible. + if runtime.GOARCH == "amd64p32" && (ktyp.align > ptrSize || etyp.align > ptrSize) { + overflowPad = ptrSize + } + size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize + if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 { + panic("reflect: bad size computation in MapOf") + } + + if kind != kindNoPointers { + nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize + mask := make([]byte, (nptr+7)/8) + base := bucketSize / ptrSize + + if ktyp.kind&kindNoPointers == 0 { + if ktyp.kind&kindGCProg != 0 { + panic("reflect: unexpected GC program in MapOf") + } + kmask := (*[16]byte)(unsafe.Pointer( /*ktyp.gcdata*/ nil)) + for i := uintptr(0); i < ktyp.size/ptrSize; i++ { + if (kmask[i/8]>>(i%8))&1 != 0 { + for j := uintptr(0); j < bucketSize; j++ { + word := base + j*ktyp.size/ptrSize + i + mask[word/8] |= 1 << (word % 8) + } + } + } + } + base += bucketSize * ktyp.size / ptrSize + + if etyp.kind&kindNoPointers == 0 { + if etyp.kind&kindGCProg != 0 { + panic("reflect: unexpected GC program in MapOf") + } + emask := (*[16]byte)(unsafe.Pointer( /*etyp.gcdata*/ nil)) + for i := uintptr(0); i < etyp.size/ptrSize; i++ { + if (emask[i/8]>>(i%8))&1 != 0 { + for j := uintptr(0); j < bucketSize; j++ { + word := base + j*etyp.size/ptrSize + i + mask[word/8] |= 1 << (word % 8) + } + } + } + } + base += bucketSize * etyp.size / ptrSize + base += overflowPad / ptrSize + + word := base + mask[word/8] |= 1 << (word % 8) + // gcdata = &mask[0] + ptrdata = (word + 1) * ptrSize + + // overflow word must be last + if ptrdata != size { + panic("reflect: bad layout computation in MapOf") + } } b := new(rtype) - b.size = gc.size + // b.size = gc.size // b.gc[0], _ = gc.finalize() b.kind |= kindGCProg s := "bucket(" + *ktyp.string + "," + *etyp.string + ")" @@ -1863,24 +1970,25 @@ func SliceOf(t Type) Type { return cachePut(ckey, &slice.rtype) } +// See cmd/compile/internal/gc/reflect.go for derivation of constant. +const maxPtrmaskBytes = 2048 + // ArrayOf returns the array type with the given count and element type. // For example, if t represents int, ArrayOf(5, t) represents [5]int. // // If the resulting type would be larger than the available address space, // ArrayOf panics. -// -// TODO(rsc): Unexported for now. Export once the alg field is set correctly -// for the type. This may require significant work. -// -// TODO(rsc): TestArrayOf is also disabled. Re-enable. -func arrayOf(count int, elem Type) Type { +func ArrayOf(count int, elem Type) Type { typ := elem.(*rtype) + // call SliceOf here as it calls cacheGet/cachePut. + // ArrayOf also calls cacheGet/cachePut and thus may modify the state of + // the lookupCache mutex. slice := SliceOf(elem) // Look in cache. ckey := cacheKey{Array, typ, nil, uintptr(count)} - if slice := cacheGet(ckey); slice != nil { - return slice + if array := cacheGet(ckey); array != nil { + return array } // Look in known types. @@ -1891,7 +1999,6 @@ func arrayOf(count int, elem Type) Type { prototype := *(**arrayType)(unsafe.Pointer(&iarray)) array := new(arrayType) *array = *prototype - // TODO: Set extra kind bits correctly. array.string = &s // gccgo uses a different hash. @@ -1908,19 +2015,70 @@ func arrayOf(count int, elem Type) Type { panic("reflect.ArrayOf: array size would exceed virtual address space") } array.size = typ.size * uintptr(count) + // if count > 0 && typ.ptrdata != 0 { + // array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata + // } array.align = typ.align array.fieldAlign = typ.fieldAlign - // TODO: array.alg - // TODO: array.gc - // TODO: array.uncommonType = nil array.ptrToThis = nil array.len = uintptr(count) array.slice = slice.(*rtype) + array.kind &^= kindNoPointers + switch { + case typ.kind&kindNoPointers != 0 || array.size == 0: + // No pointers. + array.kind |= kindNoPointers + gc := [...]uintptr{array.size, _GC_END} + array.gc = unsafe.Pointer(&gc[0]) + + case count == 1: + // In memory, 1-element array looks just like the element. + array.kind |= typ.kind & kindGCProg + array.gc = typ.gc + + default: + gc := []uintptr{array.size, _GC_ARRAY_START, 0, uintptr(count), typ.size} + gc = appendGCProgram(gc, typ) + gc = append(gc, _GC_ARRAY_NEXT, _GC_END) + array.gc = unsafe.Pointer(&gc[0]) + } + + array.kind &^= kindDirectIface + + array.hashfn = func(p unsafe.Pointer, size uintptr) uintptr { + ret := uintptr(0) + for i := 0; i < count; i++ { + ret *= 33 + ret += typ.hashfn(p, typ.size) + p = unsafe.Pointer(uintptr(p) + typ.size) + } + return ret + } + + array.equalfn = func(p1, p2 unsafe.Pointer, size uintptr) bool { + for i := 0; i < count; i++ { + if !typ.equalfn(p1, p2, typ.size) { + return false + } + p1 = unsafe.Pointer(uintptr(p1) + typ.size) + p2 = unsafe.Pointer(uintptr(p2) + typ.size) + } + return true + } + return cachePut(ckey, &array.rtype) } +func appendVarint(x []byte, v uintptr) []byte { + for ; v >= 0x80; v >>= 7 { + x = append(x, byte(v|0x80)) + } + x = append(x, byte(v)) + return x +} + // toType converts from a *rtype to a Type that can be returned // to the client of package reflect. In gc, the only concern is that // a nil *rtype must be replaced by a nil Type, but in gccgo this @@ -1969,56 +2127,49 @@ type bitVector struct { data []byte } -// append a bit pair to the bitmap. -func (bv *bitVector) append2(bits uint8) { - // assume bv.n is a multiple of 2, since append2 is the only operation. +// append a bit to the bitmap. +func (bv *bitVector) append(bit uint8) { if bv.n%8 == 0 { bv.data = append(bv.data, 0) } - bv.data[bv.n/8] |= bits << (bv.n % 8) - bv.n += 2 + bv.data[bv.n/8] |= bit << (bv.n % 8) + bv.n++ } -func addTypeBits(bv *bitVector, offset *uintptr, t *rtype) { - *offset = align(*offset, uintptr(t.align)) +func addTypeBits(bv *bitVector, offset uintptr, t *rtype) { if t.kind&kindNoPointers != 0 { - *offset += t.size return } switch Kind(t.kind & kindMask) { case Chan, Func, Map, Ptr, Slice, String, UnsafePointer: // 1 pointer at start of representation - for bv.n < 2*uint32(*offset/uintptr(ptrSize)) { - bv.append2(bitsScalar) + for bv.n < uint32(offset/uintptr(ptrSize)) { + bv.append(0) } - bv.append2(bitsPointer) + bv.append(1) case Interface: // 2 pointers - for bv.n < 2*uint32(*offset/uintptr(ptrSize)) { - bv.append2(bitsScalar) + for bv.n < uint32(offset/uintptr(ptrSize)) { + bv.append(0) } - bv.append2(bitsPointer) - bv.append2(bitsPointer) + bv.append(1) + bv.append(1) case Array: // repeat inner type tt := (*arrayType)(unsafe.Pointer(t)) for i := 0; i < int(tt.len); i++ { - addTypeBits(bv, offset, tt.elem) + addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem) } case Struct: // apply fields tt := (*structType)(unsafe.Pointer(t)) - start := *offset for i := range tt.fields { f := &tt.fields[i] - off := start + f.offset - addTypeBits(bv, &off, f.typ) + addTypeBits(bv, offset+f.offset, f.typ) } } - - *offset += t.size } diff --git a/libgo/go/reflect/value.go b/libgo/go/reflect/value.go index 7cc4f7f8bfd..a924d8639a2 100644 --- a/libgo/go/reflect/value.go +++ b/libgo/go/reflect/value.go @@ -10,7 +10,7 @@ import ( "unsafe" ) -const ptrSize = unsafe.Sizeof((*byte)(nil)) +const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const const cannotSet = "cannot set value obtained from unexported struct field" // Value is the reflection interface to a Go value. @@ -30,6 +30,10 @@ const cannotSet = "cannot set value obtained from unexported struct field" // A Value can be used concurrently by multiple goroutines provided that // the underlying Go value can be used concurrently for the equivalent // direct operations. +// +// Using == on two Values does not compare the underlying values +// they represent, but rather the contents of the Value structs. +// To compare two Values, compare the results of the Interface method. type Value struct { // typ holds the type of the value represented by a Value. typ *rtype @@ -104,7 +108,7 @@ func packEface(v Value) interface{} { // TODO: pass safe boolean from valueInterface so // we don't need to copy if safe==true? c := unsafe_New(t) - memmove(c, ptr, t.size) + typedmemmove(t, c, ptr) ptr = c } e.word = ptr @@ -173,7 +177,7 @@ type emptyInterface struct { // nonEmptyInterface is the header for a interface value with methods. type nonEmptyInterface struct { - // see ../runtime/iface.c:/Itab + // see ../runtime/iface.go:/Itab itab *struct { typ *rtype // dynamic concrete type fun [100000]unsafe.Pointer // method table @@ -261,7 +265,7 @@ func (v Value) runes() []rune { return *(*[]rune)(v.ptr) } -// CanAddr returns true if the value's address can be obtained with Addr. +// CanAddr reports whether the value's address can be obtained with Addr. // Such values are called addressable. A value is addressable if it is // an element of a slice, an element of an addressable array, // a field of an addressable struct, or the result of dereferencing a pointer. @@ -270,11 +274,11 @@ func (v Value) CanAddr() bool { return v.flag&flagAddr != 0 } -// CanSet returns true if the value of v can be changed. +// CanSet reports whether the value of v can be changed. // A Value can be changed only if it is addressable and was not // obtained by the use of unexported struct fields. // If CanSet returns false, calling Set or any type-specific -// setter (e.g., SetBool, SetInt64) will panic. +// setter (e.g., SetBool, SetInt) will panic. func (v Value) CanSet() bool { return v.flag&(flagAddr|flagRO) == flagAddr } @@ -295,8 +299,8 @@ func (v Value) Call(in []Value) []Value { // CallSlice calls the variadic function v with the input arguments in, // assigning the slice in[len(in)-1] to v's final variadic argument. -// For example, if len(in) == 3, v.Call(in) represents the Go call v(in[0], in[1], in[2]...). -// Call panics if v's Kind is not Func or if v is not variadic. +// For example, if len(in) == 3, v.CallSlice(in) represents the Go call v(in[0], in[1], in[2]...). +// CallSlice panics if v's Kind is not Func or if v is not variadic. // It returns the output results as Values. // As in Go, each input argument must be assignable to the // type of the function's corresponding input parameter. @@ -622,7 +626,7 @@ func (v Value) Field(i int) Value { // Either flagIndir is set and v.ptr points at struct, // or flagIndir is not set and v.ptr is the actual struct data. // In the former case, we want v.ptr + offset. - // In the latter case, we must be have field.offset = 0, + // In the latter case, we must have field.offset = 0, // so v.ptr + field.offset is still okay. ptr := unsafe.Pointer(uintptr(v.ptr) + field.offset) return Value{typ, ptr, fl} @@ -716,7 +720,7 @@ func (v Value) Index(i int) Value { } tt := (*sliceType)(unsafe.Pointer(v.typ)) typ := tt.elem - val := unsafe.Pointer(uintptr(s.Data) + uintptr(i)*typ.size) + val := arrayAt(s.Data, i, typ.size) fl := flagAddr | flagIndir | v.flag&flagRO | flag(typ.Kind()) return Value{typ, val, fl} @@ -725,7 +729,7 @@ func (v Value) Index(i int) Value { if uint(i) >= uint(s.Len) { panic("reflect: string index out of range") } - p := unsafe.Pointer(uintptr(s.Data) + uintptr(i)) + p := arrayAt(s.Data, i, 1) fl := v.flag&flagRO | flag(Uint8) | flagIndir return Value{uint8Type, p, fl} } @@ -752,7 +756,7 @@ func (v Value) Int() int64 { panic(&ValueError{"reflect.Value.Int", v.kind()}) } -// CanInterface returns true if Interface can be used without panicking. +// CanInterface reports whether Interface can be used without panicking. func (v Value) CanInterface() bool { if v.flag == 0 { panic(&ValueError{"reflect.Value.CanInterface", Invalid}) @@ -849,7 +853,7 @@ func (v Value) IsNil() bool { panic(&ValueError{"reflect.Value.IsNil", v.kind()}) } -// IsValid returns true if v represents a value. +// IsValid reports whether v represents a value. // It returns false if v is the zero Value. // If IsValid returns false, all other methods except String panic. // Most functions and methods never return an invalid value. @@ -920,7 +924,7 @@ func (v Value) MapIndex(key Value) Value { // Copy result so future changes to the map // won't change the underlying value. c := unsafe_New(typ) - memmove(c, e, typ.size) + typedmemmove(typ, c, e) return Value{typ, c, fl | flagIndir} } else { return Value{typ, *(*unsafe.Pointer)(e), fl} @@ -958,7 +962,7 @@ func (v Value) MapKeys() []Value { // Copy result so future changes to the map // won't change the underlying value. c := unsafe_New(keyType) - memmove(c, key, keyType.size) + typedmemmove(keyType, c, key) a[i] = Value{keyType, c, fl | flagIndir} } else { a[i] = Value{keyType, *(*unsafe.Pointer)(key), fl} @@ -1026,7 +1030,7 @@ func (v Value) NumField() int { return len(tt.fields) } -// OverflowComplex returns true if the complex128 x cannot be represented by v's type. +// OverflowComplex reports whether the complex128 x cannot be represented by v's type. // It panics if v's Kind is not Complex64 or Complex128. func (v Value) OverflowComplex(x complex128) bool { k := v.kind() @@ -1039,7 +1043,7 @@ func (v Value) OverflowComplex(x complex128) bool { panic(&ValueError{"reflect.Value.OverflowComplex", v.kind()}) } -// OverflowFloat returns true if the float64 x cannot be represented by v's type. +// OverflowFloat reports whether the float64 x cannot be represented by v's type. // It panics if v's Kind is not Float32 or Float64. func (v Value) OverflowFloat(x float64) bool { k := v.kind() @@ -1059,7 +1063,7 @@ func overflowFloat32(x float64) bool { return math.MaxFloat32 < x && x <= math.MaxFloat64 } -// OverflowInt returns true if the int64 x cannot be represented by v's type. +// OverflowInt reports whether the int64 x cannot be represented by v's type. // It panics if v's Kind is not Int, Int8, int16, Int32, or Int64. func (v Value) OverflowInt(x int64) bool { k := v.kind() @@ -1072,7 +1076,7 @@ func (v Value) OverflowInt(x int64) bool { panic(&ValueError{"reflect.Value.OverflowInt", v.kind()}) } -// OverflowUint returns true if the uint64 x cannot be represented by v's type. +// OverflowUint reports whether the uint64 x cannot be represented by v's type. // It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64. func (v Value) OverflowUint(x uint64) bool { k := v.kind() @@ -1194,7 +1198,7 @@ func (v Value) Set(x Value) { } x = x.assignTo("reflect.Set", v.typ, target) if x.flag&flagIndir != 0 { - memmove(v.ptr, x.ptr, v.typ.size) + typedmemmove(v.typ, v.ptr, x.ptr) } else { *(*unsafe.Pointer)(v.ptr) = x.ptr } @@ -1408,7 +1412,7 @@ func (v Value) Slice(i, j int) Value { if i < 0 || j < i || j > s.Len { panic("reflect.Value.Slice: string slice index out of bounds") } - t := stringHeader{unsafe.Pointer(uintptr(s.Data) + uintptr(i)), j - i} + t := stringHeader{arrayAt(s.Data, i, 1), j - i} return Value{v.typ, unsafe.Pointer(&t), v.flag} } @@ -1424,7 +1428,7 @@ func (v Value) Slice(i, j int) Value { s.Len = j - i s.Cap = cap - i if cap-i > 0 { - s.Data = unsafe.Pointer(uintptr(base) + uintptr(i)*typ.elem.Size()) + s.Data = arrayAt(base, i, typ.elem.Size()) } else { // do not advance pointer, to avoid pointing beyond end of slice s.Data = base @@ -1476,7 +1480,7 @@ func (v Value) Slice3(i, j, k int) Value { s.Len = j - i s.Cap = k - i if k-i > 0 { - s.Data = unsafe.Pointer(uintptr(base) + uintptr(i)*typ.elem.Size()) + s.Data = arrayAt(base, i, typ.elem.Size()) } else { // do not advance pointer, to avoid pointing beyond end of slice s.Data = base @@ -1490,6 +1494,8 @@ func (v Value) Slice3(i, j, k int) Value { // String is a special case because of Go's String method convention. // Unlike the other getters, it does not panic if v's Kind is not String. // Instead, it returns a string of the form "" where T is v's type. +// The fmt package treats Values specially. It does not call their String +// method implicitly but instead prints the concrete values they hold. func (v Value) String() string { switch k := v.kind(); k { case Invalid: @@ -1515,7 +1521,7 @@ func (v Value) TryRecv() (x Value, ok bool) { // TrySend attempts to send x on the channel v but will not block. // It panics if v's Kind is not Chan. -// It returns true if the value was sent, false otherwise. +// It reports whether the value was sent. // As in Go, x's value must be assignable to the channel's element type. func (v Value) TrySend(x Value) bool { v.mustBe(Chan) @@ -1633,6 +1639,12 @@ func typesMustMatch(what string, t1, t2 Type) { } } +// arrayAt returns the i-th element of p, a C-array whose elements are +// eltSize wide (in bytes). +func arrayAt(p unsafe.Pointer, i int, eltSize uintptr) unsafe.Pointer { + return unsafe.Pointer(uintptr(p) + uintptr(i)*eltSize) +} + // grow grows the slice s so that it can hold extra more values, allocating // more capacity if needed. It also returns the old and new slice lengths. func grow(s Value, extra int) (Value, int, int) { @@ -1708,27 +1720,23 @@ func Copy(dst, src Value) int { se := src.typ.Elem() typesMustMatch("reflect.Copy", de, se) - n := dst.Len() - if sn := src.Len(); n > sn { - n = sn - } - - // Copy via memmove. - var da, sa unsafe.Pointer + var ds, ss sliceHeader if dk == Array { - da = dst.ptr + ds.Data = dst.ptr + ds.Len = dst.Len() + ds.Cap = ds.Len } else { - da = (*sliceHeader)(dst.ptr).Data + ds = *(*sliceHeader)(dst.ptr) } - if src.flag&flagIndir == 0 { - sa = unsafe.Pointer(&src.ptr) - } else if sk == Array { - sa = src.ptr + if sk == Array { + ss.Data = src.ptr + ss.Len = src.Len() + ss.Cap = ss.Len } else { - sa = (*sliceHeader)(src.ptr).Data + ss = *(*sliceHeader)(src.ptr) } - memmove(da, sa, uintptr(n)*de.Size()) - return n + + return typedslicecopy(de.common(), ds, ss) } // A runtimeSelect is a single case passed to rselect. @@ -2269,7 +2277,7 @@ func cvtDirect(v Value, typ Type) Value { if f&flagAddr != 0 { // indirect, mutable word - make a copy c := unsafe_New(t) - memmove(c, ptr, t.size) + typedmemmove(t, c, ptr) ptr = c f &^= flagAddr } @@ -2311,17 +2319,41 @@ func chansend(t *rtype, ch unsafe.Pointer, val unsafe.Pointer, nb bool) bool func makechan(typ *rtype, size uint64) (ch unsafe.Pointer) func makemap(t *rtype) (m unsafe.Pointer) + +//go:noescape func mapaccess(t *rtype, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer) + func mapassign(t *rtype, m unsafe.Pointer, key, val unsafe.Pointer) + +//go:noescape func mapdelete(t *rtype, m unsafe.Pointer, key unsafe.Pointer) + +// m escapes into the return value, but the caller of mapiterinit +// doesn't let the return value escape. +//go:noescape func mapiterinit(t *rtype, m unsafe.Pointer) unsafe.Pointer + +//go:noescape func mapiterkey(it unsafe.Pointer) (key unsafe.Pointer) + +//go:noescape func mapiternext(it unsafe.Pointer) + +//go:noescape func maplen(m unsafe.Pointer) int func call(typ *rtype, fnaddr unsafe.Pointer, isInterface bool, isMethod bool, params *unsafe.Pointer, results *unsafe.Pointer) func ifaceE2I(t *rtype, src interface{}, dst unsafe.Pointer) +// typedmemmove copies a value of type t to dst from src. +//go:noescape +func typedmemmove(t *rtype, dst, src unsafe.Pointer) + +// typedslicecopy copies a slice of elemType values from src to dst, +// returning the number of elements copied. +//go:noescape +func typedslicecopy(elemType *rtype, dst, src sliceHeader) int + //go:noescape //extern memmove func memmove(adst, asrc unsafe.Pointer, n uintptr) -- cgit v1.2.3