libgo: update to go1.8rc2
[official-gcc.git] / libgo / go / reflect / type.go
blob29d89f7176d6501e45d9a002dcd75ba945905127
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Package reflect implements run-time reflection, allowing a program to
6 // manipulate objects with arbitrary types. The typical use is to take a value
7 // with static type interface{} and extract its dynamic type information by
8 // calling TypeOf, which returns a Type.
9 //
10 // A call to ValueOf returns a Value representing the run-time data.
11 // Zero takes a Type and returns a Value representing a zero value
12 // for that type.
14 // See "The Laws of Reflection" for an introduction to reflection in Go:
15 // https://golang.org/doc/articles/laws_of_reflection.html
16 package reflect
18 import (
19 "strconv"
20 "sync"
21 "unsafe"
24 // Type is the representation of a Go type.
26 // Not all methods apply to all kinds of types. Restrictions,
27 // if any, are noted in the documentation for each method.
28 // Use the Kind method to find out the kind of type before
29 // calling kind-specific methods. Calling a method
30 // inappropriate to the kind of type causes a run-time panic.
32 // Type values are comparable, such as with the == operator.
33 // Two Type values are equal if they represent identical types.
34 type Type interface {
35 // Methods applicable to all types.
37 // Align returns the alignment in bytes of a value of
38 // this type when allocated in memory.
39 Align() int
41 // FieldAlign returns the alignment in bytes of a value of
42 // this type when used as a field in a struct.
43 FieldAlign() int
45 // Method returns the i'th method in the type's method set.
46 // It panics if i is not in the range [0, NumMethod()).
48 // For a non-interface type T or *T, the returned Method's Type and Func
49 // fields describe a function whose first argument is the receiver.
51 // For an interface type, the returned Method's Type field gives the
52 // method signature, without a receiver, and the Func field is nil.
53 Method(int) Method
55 // MethodByName returns the method with that name in the type's
56 // method set and a boolean indicating if the method was found.
58 // For a non-interface type T or *T, the returned Method's Type and Func
59 // fields describe a function whose first argument is the receiver.
61 // For an interface type, the returned Method's Type field gives the
62 // method signature, without a receiver, and the Func field is nil.
63 MethodByName(string) (Method, bool)
65 // NumMethod returns the number of exported methods in the type's method set.
66 NumMethod() int
68 // Name returns the type's name within its package.
69 // It returns an empty string for unnamed types.
70 Name() string
72 // PkgPath returns a named type's package path, that is, the import path
73 // that uniquely identifies the package, such as "encoding/base64".
74 // If the type was predeclared (string, error) or unnamed (*T, struct{}, []int),
75 // the package path will be the empty string.
76 PkgPath() string
78 // Size returns the number of bytes needed to store
79 // a value of the given type; it is analogous to unsafe.Sizeof.
80 Size() uintptr
82 // String returns a string representation of the type.
83 // The string representation may use shortened package names
84 // (e.g., base64 instead of "encoding/base64") and is not
85 // guaranteed to be unique among types. To test for type identity,
86 // compare the Types directly.
87 String() string
89 // Used internally by gccgo--the string retaining quoting.
90 rawString() string
92 // Kind returns the specific kind of this type.
93 Kind() Kind
95 // Implements reports whether the type implements the interface type u.
96 Implements(u Type) bool
98 // AssignableTo reports whether a value of the type is assignable to type u.
99 AssignableTo(u Type) bool
101 // ConvertibleTo reports whether a value of the type is convertible to type u.
102 ConvertibleTo(u Type) bool
104 // Comparable reports whether values of this type are comparable.
105 Comparable() bool
107 // Methods applicable only to some types, depending on Kind.
108 // The methods allowed for each kind are:
110 // Int*, Uint*, Float*, Complex*: Bits
111 // Array: Elem, Len
112 // Chan: ChanDir, Elem
113 // Func: In, NumIn, Out, NumOut, IsVariadic.
114 // Map: Key, Elem
115 // Ptr: Elem
116 // Slice: Elem
117 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
119 // Bits returns the size of the type in bits.
120 // It panics if the type's Kind is not one of the
121 // sized or unsized Int, Uint, Float, or Complex kinds.
122 Bits() int
124 // ChanDir returns a channel type's direction.
125 // It panics if the type's Kind is not Chan.
126 ChanDir() ChanDir
128 // IsVariadic reports whether a function type's final input parameter
129 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
130 // implicit actual type []T.
132 // For concreteness, if t represents func(x int, y ... float64), then
134 // t.NumIn() == 2
135 // t.In(0) is the reflect.Type for "int"
136 // t.In(1) is the reflect.Type for "[]float64"
137 // t.IsVariadic() == true
139 // IsVariadic panics if the type's Kind is not Func.
140 IsVariadic() bool
142 // Elem returns a type's element type.
143 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
144 Elem() Type
146 // Field returns a struct type's i'th field.
147 // It panics if the type's Kind is not Struct.
148 // It panics if i is not in the range [0, NumField()).
149 Field(i int) StructField
151 // FieldByIndex returns the nested field corresponding
152 // to the index sequence. It is equivalent to calling Field
153 // successively for each index i.
154 // It panics if the type's Kind is not Struct.
155 FieldByIndex(index []int) StructField
157 // FieldByName returns the struct field with the given name
158 // and a boolean indicating if the field was found.
159 FieldByName(name string) (StructField, bool)
161 // FieldByNameFunc returns the struct field with a name
162 // that satisfies the match function and a boolean indicating if
163 // the field was found.
165 // FieldByNameFunc considers the fields in the struct itself
166 // and then the fields in any anonymous structs, in breadth first order,
167 // stopping at the shallowest nesting depth containing one or more
168 // fields satisfying the match function. If multiple fields at that depth
169 // satisfy the match function, they cancel each other
170 // and FieldByNameFunc returns no match.
171 // This behavior mirrors Go's handling of name lookup in
172 // structs containing anonymous fields.
173 FieldByNameFunc(match func(string) bool) (StructField, bool)
175 // In returns the type of a function type's i'th input parameter.
176 // It panics if the type's Kind is not Func.
177 // It panics if i is not in the range [0, NumIn()).
178 In(i int) Type
180 // Key returns a map type's key type.
181 // It panics if the type's Kind is not Map.
182 Key() Type
184 // Len returns an array type's length.
185 // It panics if the type's Kind is not Array.
186 Len() int
188 // NumField returns a struct type's field count.
189 // It panics if the type's Kind is not Struct.
190 NumField() int
192 // NumIn returns a function type's input parameter count.
193 // It panics if the type's Kind is not Func.
194 NumIn() int
196 // NumOut returns a function type's output parameter count.
197 // It panics if the type's Kind is not Func.
198 NumOut() int
200 // Out returns the type of a function type's i'th output parameter.
201 // It panics if the type's Kind is not Func.
202 // It panics if i is not in the range [0, NumOut()).
203 Out(i int) Type
205 common() *rtype
206 uncommon() *uncommonType
209 // BUG(rsc): FieldByName and related functions consider struct field names to be equal
210 // if the names are equal, even if they are unexported names originating
211 // in different packages. The practical effect of this is that the result of
212 // t.FieldByName("x") is not well defined if the struct type t contains
213 // multiple fields named x (embedded from different packages).
214 // FieldByName may return one of the fields named x or may report that there are none.
215 // See golang.org/issue/4876 for more details.
218 * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go).
219 * A few are known to ../runtime/type.go to convey to debuggers.
220 * They are also known to ../runtime/type.go.
223 // A Kind represents the specific kind of type that a Type represents.
224 // The zero Kind is not a valid kind.
225 type Kind uint
227 const (
228 Invalid Kind = iota
229 Bool
231 Int8
232 Int16
233 Int32
234 Int64
235 Uint
236 Uint8
237 Uint16
238 Uint32
239 Uint64
240 Uintptr
241 Float32
242 Float64
243 Complex64
244 Complex128
245 Array
246 Chan
247 Func
248 Interface
251 Slice
252 String
253 Struct
254 UnsafePointer
257 // rtype is the common implementation of most values.
258 // It is embedded in other, public struct types, but always
259 // with a unique tag like `reflect:"array"` or `reflect:"ptr"`
260 // so that code cannot convert from, say, *arrayType to *ptrType.
261 type rtype struct {
262 kind uint8 // enumeration for C
263 align int8 // alignment of variable with this type
264 fieldAlign uint8 // alignment of struct field with this type
265 _ uint8 // unused/padding
266 size uintptr
267 hash uint32 // hash of type; avoids computation in hash tables
269 hashfn func(unsafe.Pointer, uintptr) uintptr // hash function
270 equalfn func(unsafe.Pointer, unsafe.Pointer) bool // equality function
272 gc unsafe.Pointer // garbage collection data
273 string *string // string form; unnecessary but undeniably useful
274 *uncommonType // (relatively) uncommon fields
275 ptrToThis *rtype // type for pointer to this type, if used in binary or has methods
278 // Method on non-interface type
279 type method struct {
280 name *string // name of method
281 pkgPath *string // nil for exported Names; otherwise import path
282 mtyp *rtype // method type (without receiver)
283 typ *rtype // .(*FuncType) underneath (with receiver)
284 tfn unsafe.Pointer // fn used for normal method call
287 // uncommonType is present only for types with names or methods
288 // (if T is a named type, the uncommonTypes for T and *T have methods).
289 // Using a pointer to this struct reduces the overall size required
290 // to describe an unnamed type with no methods.
291 type uncommonType struct {
292 name *string // name of type
293 pkgPath *string // import path; nil for built-in types like int, string
294 methods []method // methods associated with type
297 // ChanDir represents a channel type's direction.
298 type ChanDir int
300 const (
301 RecvDir ChanDir = 1 << iota // <-chan
302 SendDir // chan<-
303 BothDir = RecvDir | SendDir // chan
306 // arrayType represents a fixed array type.
307 type arrayType struct {
308 rtype `reflect:"array"`
309 elem *rtype // array element type
310 slice *rtype // slice type
311 len uintptr
314 // chanType represents a channel type.
315 type chanType struct {
316 rtype `reflect:"chan"`
317 elem *rtype // channel element type
318 dir uintptr // channel direction (ChanDir)
321 // funcType represents a function type.
322 type funcType struct {
323 rtype `reflect:"func"`
324 dotdotdot bool // last input parameter is ...
325 in []*rtype // input parameter types
326 out []*rtype // output parameter types
329 // imethod represents a method on an interface type
330 type imethod struct {
331 name *string // name of method
332 pkgPath *string // nil for exported Names; otherwise import path
333 typ *rtype // .(*FuncType) underneath
336 // interfaceType represents an interface type.
337 type interfaceType struct {
338 rtype `reflect:"interface"`
339 methods []imethod // sorted by hash
342 // mapType represents a map type.
343 type mapType struct {
344 rtype `reflect:"map"`
345 key *rtype // map key type
346 elem *rtype // map element (value) type
347 bucket *rtype // internal bucket structure
348 hmap *rtype // internal map header
349 keysize uint8 // size of key slot
350 indirectkey uint8 // store ptr to key instead of key itself
351 valuesize uint8 // size of value slot
352 indirectvalue uint8 // store ptr to value instead of value itself
353 bucketsize uint16 // size of bucket
354 reflexivekey bool // true if k==k for all keys
355 needkeyupdate bool // true if we need to update key on an overwrite
358 // ptrType represents a pointer type.
359 type ptrType struct {
360 rtype `reflect:"ptr"`
361 elem *rtype // pointer element (pointed at) type
364 // sliceType represents a slice type.
365 type sliceType struct {
366 rtype `reflect:"slice"`
367 elem *rtype // slice element type
370 // Struct field
371 type structField struct {
372 name *string // nil for embedded fields
373 pkgPath *string // nil for exported Names; otherwise import path
374 typ *rtype // type of field
375 tag *string // nil if no tag
376 offset uintptr // byte offset of field within struct
379 // structType represents a struct type.
380 type structType struct {
381 rtype `reflect:"struct"`
382 fields []structField // sorted by offset
385 // NOTE: These are copied from ../runtime/mgc0.h.
386 // They must be kept in sync.
387 const (
388 _GC_END = iota
389 _GC_PTR
390 _GC_APTR
391 _GC_ARRAY_START
392 _GC_ARRAY_NEXT
393 _GC_CALL
394 _GC_CHAN_PTR
395 _GC_STRING
396 _GC_EFACE
397 _GC_IFACE
398 _GC_SLICE
399 _GC_REGION
400 _GC_NUM_INSTR
404 * The compiler knows the exact layout of all the data structures above.
405 * The compiler does not know about the data structures and methods below.
408 // Method represents a single method.
409 type Method struct {
410 // Name is the method name.
411 // PkgPath is the package path that qualifies a lower case (unexported)
412 // method name. It is empty for upper case (exported) method names.
413 // The combination of PkgPath and Name uniquely identifies a method
414 // in a method set.
415 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
416 Name string
417 PkgPath string
419 Type Type // method type
420 Func Value // func with receiver as first argument
421 Index int // index for Type.Method
424 const (
425 kindDirectIface = 1 << 5
426 kindGCProg = 1 << 6 // Type.gc points to GC program
427 kindNoPointers = 1 << 7
428 kindMask = (1 << 5) - 1
431 func (k Kind) String() string {
432 if int(k) < len(kindNames) {
433 return kindNames[k]
435 return "kind" + strconv.Itoa(int(k))
438 var kindNames = []string{
439 Invalid: "invalid",
440 Bool: "bool",
441 Int: "int",
442 Int8: "int8",
443 Int16: "int16",
444 Int32: "int32",
445 Int64: "int64",
446 Uint: "uint",
447 Uint8: "uint8",
448 Uint16: "uint16",
449 Uint32: "uint32",
450 Uint64: "uint64",
451 Uintptr: "uintptr",
452 Float32: "float32",
453 Float64: "float64",
454 Complex64: "complex64",
455 Complex128: "complex128",
456 Array: "array",
457 Chan: "chan",
458 Func: "func",
459 Interface: "interface",
460 Map: "map",
461 Ptr: "ptr",
462 Slice: "slice",
463 String: "string",
464 Struct: "struct",
465 UnsafePointer: "unsafe.Pointer",
468 func (t *uncommonType) uncommon() *uncommonType {
469 return t
472 func (t *uncommonType) PkgPath() string {
473 if t == nil || t.pkgPath == nil {
474 return ""
476 return *t.pkgPath
479 func (t *uncommonType) Name() string {
480 if t == nil || t.name == nil {
481 return ""
483 return *t.name
486 func (t *rtype) rawString() string { return *t.string }
488 func (t *rtype) String() string {
489 // For gccgo, strip out quoted strings.
490 s := *t.string
491 var q bool
492 r := make([]byte, len(s))
493 j := 0
494 for i := 0; i < len(s); i++ {
495 if s[i] == '\t' {
496 q = !q
497 } else if !q {
498 r[j] = s[i]
502 return string(r[:j])
505 func (t *rtype) Size() uintptr { return t.size }
507 func (t *rtype) Bits() int {
508 if t == nil {
509 panic("reflect: Bits of nil Type")
511 k := t.Kind()
512 if k < Int || k > Complex128 {
513 panic("reflect: Bits of non-arithmetic Type " + t.String())
515 return int(t.size) * 8
518 func (t *rtype) Align() int { return int(t.align) }
520 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
522 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
524 func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 }
526 func (t *rtype) common() *rtype { return t }
528 func (t *uncommonType) Method(i int) (m Method) {
529 if t == nil || i < 0 || i >= len(t.methods) {
530 panic("reflect: Method index out of range")
532 found := false
533 for mi := range t.methods {
534 if t.methods[mi].pkgPath == nil {
535 if i == 0 {
536 i = mi
537 found = true
538 break
543 if !found {
544 panic("reflect: Method index out of range")
547 p := &t.methods[i]
548 if p.name != nil {
549 m.Name = *p.name
551 fl := flag(Func)
552 if p.pkgPath != nil {
553 m.PkgPath = *p.pkgPath
554 fl |= flagStickyRO
556 mt := p.typ
557 m.Type = toType(mt)
558 x := new(unsafe.Pointer)
559 *x = unsafe.Pointer(&p.tfn)
560 m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn}
561 m.Index = i
562 return
565 func (t *uncommonType) NumMethod() int {
566 if t == nil {
567 return 0
569 c := 0
570 for i := range t.methods {
571 if t.methods[i].pkgPath == nil {
575 return c
578 func (t *uncommonType) MethodByName(name string) (m Method, ok bool) {
579 if t == nil {
580 return
582 var p *method
583 for i := range t.methods {
584 p = &t.methods[i]
585 if p.pkgPath == nil && p.name != nil && *p.name == name {
586 return t.Method(i), true
589 return
592 // TODO(rsc): gc supplies these, but they are not
593 // as efficient as they could be: they have commonType
594 // as the receiver instead of *rtype.
595 func (t *rtype) NumMethod() int {
596 if t.Kind() == Interface {
597 tt := (*interfaceType)(unsafe.Pointer(t))
598 return tt.NumMethod()
600 return t.uncommonType.NumMethod()
603 func (t *rtype) Method(i int) (m Method) {
604 if t.Kind() == Interface {
605 tt := (*interfaceType)(unsafe.Pointer(t))
606 return tt.Method(i)
608 return t.uncommonType.Method(i)
611 func (t *rtype) MethodByName(name string) (m Method, ok bool) {
612 if t.Kind() == Interface {
613 tt := (*interfaceType)(unsafe.Pointer(t))
614 return tt.MethodByName(name)
616 return t.uncommonType.MethodByName(name)
619 func (t *rtype) PkgPath() string {
620 return t.uncommonType.PkgPath()
623 func (t *rtype) Name() string {
624 return t.uncommonType.Name()
627 func (t *rtype) ChanDir() ChanDir {
628 if t.Kind() != Chan {
629 panic("reflect: ChanDir of non-chan type")
631 tt := (*chanType)(unsafe.Pointer(t))
632 return ChanDir(tt.dir)
635 func (t *rtype) IsVariadic() bool {
636 if t.Kind() != Func {
637 panic("reflect: IsVariadic of non-func type")
639 tt := (*funcType)(unsafe.Pointer(t))
640 return tt.dotdotdot
643 func (t *rtype) Elem() Type {
644 switch t.Kind() {
645 case Array:
646 tt := (*arrayType)(unsafe.Pointer(t))
647 return toType(tt.elem)
648 case Chan:
649 tt := (*chanType)(unsafe.Pointer(t))
650 return toType(tt.elem)
651 case Map:
652 tt := (*mapType)(unsafe.Pointer(t))
653 return toType(tt.elem)
654 case Ptr:
655 tt := (*ptrType)(unsafe.Pointer(t))
656 return toType(tt.elem)
657 case Slice:
658 tt := (*sliceType)(unsafe.Pointer(t))
659 return toType(tt.elem)
661 panic("reflect: Elem of invalid type")
664 func (t *rtype) Field(i int) StructField {
665 if t.Kind() != Struct {
666 panic("reflect: Field of non-struct type")
668 tt := (*structType)(unsafe.Pointer(t))
669 return tt.Field(i)
672 func (t *rtype) FieldByIndex(index []int) StructField {
673 if t.Kind() != Struct {
674 panic("reflect: FieldByIndex of non-struct type")
676 tt := (*structType)(unsafe.Pointer(t))
677 return tt.FieldByIndex(index)
680 func (t *rtype) FieldByName(name string) (StructField, bool) {
681 if t.Kind() != Struct {
682 panic("reflect: FieldByName of non-struct type")
684 tt := (*structType)(unsafe.Pointer(t))
685 return tt.FieldByName(name)
688 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
689 if t.Kind() != Struct {
690 panic("reflect: FieldByNameFunc of non-struct type")
692 tt := (*structType)(unsafe.Pointer(t))
693 return tt.FieldByNameFunc(match)
696 func (t *rtype) In(i int) Type {
697 if t.Kind() != Func {
698 panic("reflect: In of non-func type")
700 tt := (*funcType)(unsafe.Pointer(t))
701 return toType(tt.in[i])
704 func (t *rtype) Key() Type {
705 if t.Kind() != Map {
706 panic("reflect: Key of non-map type")
708 tt := (*mapType)(unsafe.Pointer(t))
709 return toType(tt.key)
712 func (t *rtype) Len() int {
713 if t.Kind() != Array {
714 panic("reflect: Len of non-array type")
716 tt := (*arrayType)(unsafe.Pointer(t))
717 return int(tt.len)
720 func (t *rtype) NumField() int {
721 if t.Kind() != Struct {
722 panic("reflect: NumField of non-struct type")
724 tt := (*structType)(unsafe.Pointer(t))
725 return len(tt.fields)
728 func (t *rtype) NumIn() int {
729 if t.Kind() != Func {
730 panic("reflect: NumIn of non-func type")
732 tt := (*funcType)(unsafe.Pointer(t))
733 return len(tt.in)
736 func (t *rtype) NumOut() int {
737 if t.Kind() != Func {
738 panic("reflect: NumOut of non-func type")
740 tt := (*funcType)(unsafe.Pointer(t))
741 return len(tt.out)
744 func (t *rtype) Out(i int) Type {
745 if t.Kind() != Func {
746 panic("reflect: Out of non-func type")
748 tt := (*funcType)(unsafe.Pointer(t))
749 return toType(tt.out[i])
752 func (d ChanDir) String() string {
753 switch d {
754 case SendDir:
755 return "chan<-"
756 case RecvDir:
757 return "<-chan"
758 case BothDir:
759 return "chan"
761 return "ChanDir" + strconv.Itoa(int(d))
764 // Method returns the i'th method in the type's method set.
765 func (t *interfaceType) Method(i int) (m Method) {
766 if i < 0 || i >= len(t.methods) {
767 return
769 p := &t.methods[i]
770 m.Name = *p.name
771 if p.pkgPath != nil {
772 m.PkgPath = *p.pkgPath
774 m.Type = toType(p.typ)
775 m.Index = i
776 return
779 // NumMethod returns the number of interface methods in the type's method set.
780 func (t *interfaceType) NumMethod() int { return len(t.methods) }
782 // MethodByName method with the given name in the type's method set.
783 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
784 if t == nil {
785 return
787 var p *imethod
788 for i := range t.methods {
789 p = &t.methods[i]
790 if *p.name == name {
791 return t.Method(i), true
794 return
797 // A StructField describes a single field in a struct.
798 type StructField struct {
799 // Name is the field name.
800 Name string
801 // PkgPath is the package path that qualifies a lower case (unexported)
802 // field name. It is empty for upper case (exported) field names.
803 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
804 PkgPath string
806 Type Type // field type
807 Tag StructTag // field tag string
808 Offset uintptr // offset within struct, in bytes
809 Index []int // index sequence for Type.FieldByIndex
810 Anonymous bool // is an embedded field
813 // A StructTag is the tag string in a struct field.
815 // By convention, tag strings are a concatenation of
816 // optionally space-separated key:"value" pairs.
817 // Each key is a non-empty string consisting of non-control
818 // characters other than space (U+0020 ' '), quote (U+0022 '"'),
819 // and colon (U+003A ':'). Each value is quoted using U+0022 '"'
820 // characters and Go string literal syntax.
821 type StructTag string
823 // Get returns the value associated with key in the tag string.
824 // If there is no such key in the tag, Get returns the empty string.
825 // If the tag does not have the conventional format, the value
826 // returned by Get is unspecified. To determine whether a tag is
827 // explicitly set to the empty string, use Lookup.
828 func (tag StructTag) Get(key string) string {
829 v, _ := tag.Lookup(key)
830 return v
833 // Lookup returns the value associated with key in the tag string.
834 // If the key is present in the tag the value (which may be empty)
835 // is returned. Otherwise the returned value will be the empty string.
836 // The ok return value reports whether the value was explicitly set in
837 // the tag string. If the tag does not have the conventional format,
838 // the value returned by Lookup is unspecified.
839 func (tag StructTag) Lookup(key string) (value string, ok bool) {
840 // When modifying this code, also update the validateStructTag code
841 // in cmd/vet/structtag.go.
843 for tag != "" {
844 // Skip leading space.
845 i := 0
846 for i < len(tag) && tag[i] == ' ' {
849 tag = tag[i:]
850 if tag == "" {
851 break
854 // Scan to colon. A space, a quote or a control character is a syntax error.
855 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
856 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
857 // as it is simpler to inspect the tag's bytes than the tag's runes.
858 i = 0
859 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
862 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
863 break
865 name := string(tag[:i])
866 tag = tag[i+1:]
868 // Scan quoted string to find value.
869 i = 1
870 for i < len(tag) && tag[i] != '"' {
871 if tag[i] == '\\' {
876 if i >= len(tag) {
877 break
879 qvalue := string(tag[:i+1])
880 tag = tag[i+1:]
882 if key == name {
883 value, err := strconv.Unquote(qvalue)
884 if err != nil {
885 break
887 return value, true
890 return "", false
893 // Field returns the i'th struct field.
894 func (t *structType) Field(i int) (f StructField) {
895 if i < 0 || i >= len(t.fields) {
896 panic("reflect: Field index out of bounds")
898 p := &t.fields[i]
899 f.Type = toType(p.typ)
900 if p.name != nil {
901 f.Name = *p.name
902 } else {
903 t := f.Type
904 if t.Kind() == Ptr {
905 t = t.Elem()
907 f.Name = t.Name()
908 f.Anonymous = true
910 if p.pkgPath != nil {
911 f.PkgPath = *p.pkgPath
913 if p.tag != nil {
914 f.Tag = StructTag(*p.tag)
916 f.Offset = p.offset
918 // NOTE(rsc): This is the only allocation in the interface
919 // presented by a reflect.Type. It would be nice to avoid,
920 // at least in the common cases, but we need to make sure
921 // that misbehaving clients of reflect cannot affect other
922 // uses of reflect. One possibility is CL 5371098, but we
923 // postponed that ugliness until there is a demonstrated
924 // need for the performance. This is issue 2320.
925 f.Index = []int{i}
926 return
929 // TODO(gri): Should there be an error/bool indicator if the index
930 // is wrong for FieldByIndex?
932 // FieldByIndex returns the nested field corresponding to index.
933 func (t *structType) FieldByIndex(index []int) (f StructField) {
934 f.Type = toType(&t.rtype)
935 for i, x := range index {
936 if i > 0 {
937 ft := f.Type
938 if ft.Kind() == Ptr && ft.Elem().Kind() == Struct {
939 ft = ft.Elem()
941 f.Type = ft
943 f = f.Type.Field(x)
945 return
948 // A fieldScan represents an item on the fieldByNameFunc scan work list.
949 type fieldScan struct {
950 typ *structType
951 index []int
954 // FieldByNameFunc returns the struct field with a name that satisfies the
955 // match function and a boolean to indicate if the field was found.
956 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
957 // This uses the same condition that the Go language does: there must be a unique instance
958 // of the match at a given depth level. If there are multiple instances of a match at the
959 // same depth, they annihilate each other and inhibit any possible match at a lower level.
960 // The algorithm is breadth first search, one depth level at a time.
962 // The current and next slices are work queues:
963 // current lists the fields to visit on this depth level,
964 // and next lists the fields on the next lower level.
965 current := []fieldScan{}
966 next := []fieldScan{{typ: t}}
968 // nextCount records the number of times an embedded type has been
969 // encountered and considered for queueing in the 'next' slice.
970 // We only queue the first one, but we increment the count on each.
971 // If a struct type T can be reached more than once at a given depth level,
972 // then it annihilates itself and need not be considered at all when we
973 // process that next depth level.
974 var nextCount map[*structType]int
976 // visited records the structs that have been considered already.
977 // Embedded pointer fields can create cycles in the graph of
978 // reachable embedded types; visited avoids following those cycles.
979 // It also avoids duplicated effort: if we didn't find the field in an
980 // embedded type T at level 2, we won't find it in one at level 4 either.
981 visited := map[*structType]bool{}
983 for len(next) > 0 {
984 current, next = next, current[:0]
985 count := nextCount
986 nextCount = nil
988 // Process all the fields at this depth, now listed in 'current'.
989 // The loop queues embedded fields found in 'next', for processing during the next
990 // iteration. The multiplicity of the 'current' field counts is recorded
991 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
992 for _, scan := range current {
993 t := scan.typ
994 if visited[t] {
995 // We've looked through this type before, at a higher level.
996 // That higher level would shadow the lower level we're now at,
997 // so this one can't be useful to us. Ignore it.
998 continue
1000 visited[t] = true
1001 for i := range t.fields {
1002 f := &t.fields[i]
1003 // Find name and type for field f.
1004 var fname string
1005 var ntyp *rtype
1006 if f.name != nil {
1007 fname = *f.name
1008 } else {
1009 // Anonymous field of type T or *T.
1010 // Name taken from type.
1011 ntyp = f.typ
1012 if ntyp.Kind() == Ptr {
1013 ntyp = ntyp.Elem().common()
1015 fname = ntyp.Name()
1018 // Does it match?
1019 if match(fname) {
1020 // Potential match
1021 if count[t] > 1 || ok {
1022 // Name appeared multiple times at this level: annihilate.
1023 return StructField{}, false
1025 result = t.Field(i)
1026 result.Index = nil
1027 result.Index = append(result.Index, scan.index...)
1028 result.Index = append(result.Index, i)
1029 ok = true
1030 continue
1033 // Queue embedded struct fields for processing with next level,
1034 // but only if we haven't seen a match yet at this level and only
1035 // if the embedded types haven't already been queued.
1036 if ok || ntyp == nil || ntyp.Kind() != Struct {
1037 continue
1039 ntyp = toType(ntyp).common()
1040 styp := (*structType)(unsafe.Pointer(ntyp))
1041 if nextCount[styp] > 0 {
1042 nextCount[styp] = 2 // exact multiple doesn't matter
1043 continue
1045 if nextCount == nil {
1046 nextCount = map[*structType]int{}
1048 nextCount[styp] = 1
1049 if count[t] > 1 {
1050 nextCount[styp] = 2 // exact multiple doesn't matter
1052 var index []int
1053 index = append(index, scan.index...)
1054 index = append(index, i)
1055 next = append(next, fieldScan{styp, index})
1058 if ok {
1059 break
1062 return
1065 // FieldByName returns the struct field with the given name
1066 // and a boolean to indicate if the field was found.
1067 func (t *structType) FieldByName(name string) (f StructField, present bool) {
1068 // Quick check for top-level name, or struct without anonymous fields.
1069 hasAnon := false
1070 if name != "" {
1071 for i := range t.fields {
1072 tf := &t.fields[i]
1073 if tf.name == nil {
1074 hasAnon = true
1075 continue
1077 if *tf.name == name {
1078 return t.Field(i), true
1082 if !hasAnon {
1083 return
1085 return t.FieldByNameFunc(func(s string) bool { return s == name })
1088 // TypeOf returns the reflection Type that represents the dynamic type of i.
1089 // If i is a nil interface value, TypeOf returns nil.
1090 func TypeOf(i interface{}) Type {
1091 eface := *(*emptyInterface)(unsafe.Pointer(&i))
1092 return toType(eface.typ)
1095 // ptrMap is the cache for PtrTo.
1096 var ptrMap struct {
1097 sync.RWMutex
1098 m map[*rtype]*ptrType
1101 // garbage collection bytecode program for pointer to memory without pointers.
1102 // See ../../cmd/gc/reflect.c:/^dgcsym1 and :/^dgcsym.
1103 type ptrDataGC struct {
1104 width uintptr // sizeof(ptr)
1105 op uintptr // _GC_APTR
1106 off uintptr // 0
1107 end uintptr // _GC_END
1110 var ptrDataGCProg = ptrDataGC{
1111 width: unsafe.Sizeof((*byte)(nil)),
1112 op: _GC_APTR,
1113 off: 0,
1114 end: _GC_END,
1117 // garbage collection bytecode program for pointer to memory with pointers.
1118 // See ../../cmd/gc/reflect.c:/^dgcsym1 and :/^dgcsym.
1119 type ptrGC struct {
1120 width uintptr // sizeof(ptr)
1121 op uintptr // _GC_PTR
1122 off uintptr // 0
1123 elemgc unsafe.Pointer // element gc type
1124 end uintptr // _GC_END
1127 // PtrTo returns the pointer type with element t.
1128 // For example, if t represents type Foo, PtrTo(t) represents *Foo.
1129 func PtrTo(t Type) Type {
1130 return t.(*rtype).ptrTo()
1133 func (t *rtype) ptrTo() *rtype {
1134 if p := t.ptrToThis; p != nil {
1135 return p
1138 // Check the cache.
1139 ptrMap.RLock()
1140 if m := ptrMap.m; m != nil {
1141 if p := m[t]; p != nil {
1142 ptrMap.RUnlock()
1143 return &p.rtype
1146 ptrMap.RUnlock()
1148 ptrMap.Lock()
1149 if ptrMap.m == nil {
1150 ptrMap.m = make(map[*rtype]*ptrType)
1152 p := ptrMap.m[t]
1153 if p != nil {
1154 // some other goroutine won the race and created it
1155 ptrMap.Unlock()
1156 return &p.rtype
1159 s := "*" + *t.string
1161 canonicalTypeLock.RLock()
1162 r, ok := canonicalType[s]
1163 canonicalTypeLock.RUnlock()
1164 if ok {
1165 ptrMap.m[t] = (*ptrType)(unsafe.Pointer(r.(*rtype)))
1166 ptrMap.Unlock()
1167 return r.(*rtype)
1170 // Create a new ptrType starting with the description
1171 // of an *unsafe.Pointer.
1172 var iptr interface{} = (*unsafe.Pointer)(nil)
1173 prototype := *(**ptrType)(unsafe.Pointer(&iptr))
1174 pp := *prototype
1176 pp.string = &s
1178 // For the type structures linked into the binary, the
1179 // compiler provides a good hash of the string.
1180 // Create a good hash for the new string by using
1181 // the FNV-1 hash's mixing function to combine the
1182 // old hash and the new "*".
1183 // p.hash = fnv1(t.hash, '*')
1184 // This is the gccgo version.
1185 pp.hash = (t.hash << 4) + 9
1187 pp.uncommonType = nil
1188 pp.ptrToThis = nil
1189 pp.elem = t
1191 if t.kind&kindNoPointers != 0 {
1192 pp.gc = unsafe.Pointer(&ptrDataGCProg)
1193 } else {
1194 pp.gc = unsafe.Pointer(&ptrGC{
1195 width: pp.size,
1196 op: _GC_PTR,
1197 off: 0,
1198 elemgc: t.gc,
1199 end: _GC_END,
1203 q := canonicalize(&pp.rtype)
1204 p = (*ptrType)(unsafe.Pointer(q.(*rtype)))
1206 ptrMap.m[t] = p
1207 ptrMap.Unlock()
1208 return &p.rtype
1211 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
1212 func fnv1(x uint32, list ...byte) uint32 {
1213 for _, b := range list {
1214 x = x*16777619 ^ uint32(b)
1216 return x
1219 func (t *rtype) Implements(u Type) bool {
1220 if u == nil {
1221 panic("reflect: nil type passed to Type.Implements")
1223 if u.Kind() != Interface {
1224 panic("reflect: non-interface type passed to Type.Implements")
1226 return implements(u.(*rtype), t)
1229 func (t *rtype) AssignableTo(u Type) bool {
1230 if u == nil {
1231 panic("reflect: nil type passed to Type.AssignableTo")
1233 uu := u.(*rtype)
1234 return directlyAssignable(uu, t) || implements(uu, t)
1237 func (t *rtype) ConvertibleTo(u Type) bool {
1238 if u == nil {
1239 panic("reflect: nil type passed to Type.ConvertibleTo")
1241 uu := u.(*rtype)
1242 return convertOp(uu, t) != nil
1245 func (t *rtype) Comparable() bool {
1246 switch t.Kind() {
1247 case Bool, Int, Int8, Int16, Int32, Int64,
1248 Uint, Uint8, Uint16, Uint32, Uint64, Uintptr,
1249 Float32, Float64, Complex64, Complex128,
1250 Chan, Interface, Ptr, String, UnsafePointer:
1251 return true
1253 case Func, Map, Slice:
1254 return false
1256 case Array:
1257 return (*arrayType)(unsafe.Pointer(t)).elem.Comparable()
1259 case Struct:
1260 tt := (*structType)(unsafe.Pointer(t))
1261 for i := range tt.fields {
1262 if !tt.fields[i].typ.Comparable() {
1263 return false
1266 return true
1268 default:
1269 panic("reflect: impossible")
1273 // implements reports whether the type V implements the interface type T.
1274 func implements(T, V *rtype) bool {
1275 if T.Kind() != Interface {
1276 return false
1278 t := (*interfaceType)(unsafe.Pointer(T))
1279 if len(t.methods) == 0 {
1280 return true
1283 // The same algorithm applies in both cases, but the
1284 // method tables for an interface type and a concrete type
1285 // are different, so the code is duplicated.
1286 // In both cases the algorithm is a linear scan over the two
1287 // lists - T's methods and V's methods - simultaneously.
1288 // Since method tables are stored in a unique sorted order
1289 // (alphabetical, with no duplicate method names), the scan
1290 // through V's methods must hit a match for each of T's
1291 // methods along the way, or else V does not implement T.
1292 // This lets us run the scan in overall linear time instead of
1293 // the quadratic time a naive search would require.
1294 // See also ../runtime/iface.go.
1295 if V.Kind() == Interface {
1296 v := (*interfaceType)(unsafe.Pointer(V))
1297 i := 0
1298 for j := 0; j < len(v.methods); j++ {
1299 tm := &t.methods[i]
1300 vm := &v.methods[j]
1301 if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.typ).common() == toType(tm.typ).common() {
1302 if i++; i >= len(t.methods) {
1303 return true
1307 return false
1310 v := V.uncommon()
1311 if v == nil {
1312 return false
1314 i := 0
1315 for j := 0; j < len(v.methods); j++ {
1316 tm := &t.methods[i]
1317 vm := &v.methods[j]
1318 if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.mtyp).common() == toType(tm.typ).common() {
1319 if i++; i >= len(t.methods) {
1320 return true
1324 return false
1327 // directlyAssignable reports whether a value x of type V can be directly
1328 // assigned (using memmove) to a value of type T.
1329 // https://golang.org/doc/go_spec.html#Assignability
1330 // Ignoring the interface rules (implemented elsewhere)
1331 // and the ideal constant rules (no ideal constants at run time).
1332 func directlyAssignable(T, V *rtype) bool {
1333 // x's type V is identical to T?
1334 if T == V {
1335 return true
1338 // Otherwise at least one of T and V must be unnamed
1339 // and they must have the same kind.
1340 if T.Name() != "" && V.Name() != "" || T.Kind() != V.Kind() {
1341 return false
1344 // x's type T and V must have identical underlying types.
1345 return haveIdenticalUnderlyingType(T, V, true)
1348 func haveIdenticalType(T, V Type, cmpTags bool) bool {
1349 if cmpTags {
1350 return T == V
1353 if T.Name() != V.Name() || T.Kind() != V.Kind() {
1354 return false
1357 return haveIdenticalUnderlyingType(T.common(), V.common(), false)
1360 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
1361 if T == V {
1362 return true
1365 kind := T.Kind()
1366 if kind != V.Kind() {
1367 return false
1370 // Non-composite types of equal kind have same underlying type
1371 // (the predefined instance of the type).
1372 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
1373 return true
1376 // Composite types.
1377 switch kind {
1378 case Array:
1379 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1381 case Chan:
1382 // Special case:
1383 // x is a bidirectional channel value, T is a channel type,
1384 // and x's type V and T have identical element types.
1385 if V.ChanDir() == BothDir && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) {
1386 return true
1389 // Otherwise continue test for identical underlying type.
1390 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1392 case Func:
1393 t := (*funcType)(unsafe.Pointer(T))
1394 v := (*funcType)(unsafe.Pointer(V))
1395 if t.dotdotdot != v.dotdotdot || len(t.in) != len(v.in) || len(t.out) != len(v.out) {
1396 return false
1398 for i, typ := range t.in {
1399 if !haveIdenticalType(typ, v.in[i], cmpTags) {
1400 return false
1403 for i, typ := range t.out {
1404 if !haveIdenticalType(typ, v.out[i], cmpTags) {
1405 return false
1408 return true
1410 case Interface:
1411 t := (*interfaceType)(unsafe.Pointer(T))
1412 v := (*interfaceType)(unsafe.Pointer(V))
1413 if len(t.methods) == 0 && len(v.methods) == 0 {
1414 return true
1416 // Might have the same methods but still
1417 // need a run time conversion.
1418 return false
1420 case Map:
1421 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1423 case Ptr, Slice:
1424 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1426 case Struct:
1427 t := (*structType)(unsafe.Pointer(T))
1428 v := (*structType)(unsafe.Pointer(V))
1429 if len(t.fields) != len(v.fields) {
1430 return false
1432 for i := range t.fields {
1433 tf := &t.fields[i]
1434 vf := &v.fields[i]
1435 if tf.name != vf.name && (tf.name == nil || vf.name == nil || *tf.name != *vf.name) {
1436 return false
1438 if tf.pkgPath != vf.pkgPath && (tf.pkgPath == nil || vf.pkgPath == nil || *tf.pkgPath != *vf.pkgPath) {
1439 return false
1441 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
1442 return false
1444 if cmpTags && tf.tag != vf.tag && (tf.tag == nil || vf.tag == nil || *tf.tag != *vf.tag) {
1445 return false
1447 if tf.offset != vf.offset {
1448 return false
1451 return true
1454 return false
1457 // The lookupCache caches ChanOf, MapOf, and SliceOf lookups.
1458 var lookupCache struct {
1459 sync.RWMutex
1460 m map[cacheKey]*rtype
1463 // A cacheKey is the key for use in the lookupCache.
1464 // Four values describe any of the types we are looking for:
1465 // type kind, one or two subtypes, and an extra integer.
1466 type cacheKey struct {
1467 kind Kind
1468 t1 *rtype
1469 t2 *rtype
1470 extra uintptr
1473 // cacheGet looks for a type under the key k in the lookupCache.
1474 // If it finds one, it returns that type.
1475 // If not, it returns nil with the cache locked.
1476 // The caller is expected to use cachePut to unlock the cache.
1477 func cacheGet(k cacheKey) Type {
1478 lookupCache.RLock()
1479 t := lookupCache.m[k]
1480 lookupCache.RUnlock()
1481 if t != nil {
1482 return t
1485 lookupCache.Lock()
1486 t = lookupCache.m[k]
1487 if t != nil {
1488 lookupCache.Unlock()
1489 return t
1492 if lookupCache.m == nil {
1493 lookupCache.m = make(map[cacheKey]*rtype)
1496 return nil
1499 // cachePut stores the given type in the cache, unlocks the cache,
1500 // and returns the type. It is expected that the cache is locked
1501 // because cacheGet returned nil.
1502 func cachePut(k cacheKey, t *rtype) Type {
1503 t = toType(t).common()
1504 lookupCache.m[k] = t
1505 lookupCache.Unlock()
1506 return t
1509 // garbage collection bytecode program for chan.
1510 // See ../../cmd/gc/reflect.c:/^dgcsym1 and :/^dgcsym.
1511 type chanGC struct {
1512 width uintptr // sizeof(map)
1513 op uintptr // _GC_CHAN_PTR
1514 off uintptr // 0
1515 typ *rtype // map type
1516 end uintptr // _GC_END
1519 // The funcLookupCache caches FuncOf lookups.
1520 // FuncOf does not share the common lookupCache since cacheKey is not
1521 // sufficient to represent functions unambiguously.
1522 var funcLookupCache struct {
1523 sync.RWMutex
1524 m map[uint32][]*rtype // keyed by hash calculated in FuncOf
1527 // ChanOf returns the channel type with the given direction and element type.
1528 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
1530 // The gc runtime imposes a limit of 64 kB on channel element types.
1531 // If t's size is equal to or exceeds this limit, ChanOf panics.
1532 func ChanOf(dir ChanDir, t Type) Type {
1533 typ := t.(*rtype)
1535 // Look in cache.
1536 ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
1537 if ch := cacheGet(ckey); ch != nil {
1538 return ch
1541 // This restriction is imposed by the gc compiler and the runtime.
1542 if typ.size >= 1<<16 {
1543 lookupCache.Unlock()
1544 panic("reflect.ChanOf: element size too large")
1547 // Look in known types.
1548 // TODO: Precedence when constructing string.
1549 var s string
1550 switch dir {
1551 default:
1552 lookupCache.Unlock()
1553 panic("reflect.ChanOf: invalid dir")
1554 case SendDir:
1555 s = "chan<- " + *typ.string
1556 case RecvDir:
1557 s = "<-chan " + *typ.string
1558 case BothDir:
1559 s = "chan " + *typ.string
1562 // Make a channel type.
1563 var ichan interface{} = (chan unsafe.Pointer)(nil)
1564 prototype := *(**chanType)(unsafe.Pointer(&ichan))
1565 ch := *prototype
1566 ch.dir = uintptr(dir)
1567 ch.string = &s
1569 // gccgo uses a different hash.
1570 // ch.hash = fnv1(typ.hash, 'c', byte(dir))
1571 ch.hash = 0
1572 if dir&SendDir != 0 {
1573 ch.hash += 1
1575 if dir&RecvDir != 0 {
1576 ch.hash += 2
1578 ch.hash += typ.hash << 2
1579 ch.hash <<= 3
1580 ch.hash += 15
1582 ch.elem = typ
1583 ch.uncommonType = nil
1584 ch.ptrToThis = nil
1586 ch.gc = unsafe.Pointer(&chanGC{
1587 width: ch.size,
1588 op: _GC_CHAN_PTR,
1589 off: 0,
1590 typ: &ch.rtype,
1591 end: _GC_END,
1594 // INCORRECT. Uncomment to check that TestChanOfGC fails when ch.gc is wrong.
1595 // ch.gc = unsafe.Pointer(&badGC{width: ch.size, end: _GC_END})
1597 return cachePut(ckey, &ch.rtype)
1600 func ismapkey(*rtype) bool // implemented in runtime
1602 // MapOf returns the map type with the given key and element types.
1603 // For example, if k represents int and e represents string,
1604 // MapOf(k, e) represents map[int]string.
1606 // If the key type is not a valid map key type (that is, if it does
1607 // not implement Go's == operator), MapOf panics.
1608 func MapOf(key, elem Type) Type {
1609 ktyp := key.(*rtype)
1610 etyp := elem.(*rtype)
1612 if !ismapkey(ktyp) {
1613 panic("reflect.MapOf: invalid key type " + ktyp.String())
1616 // Look in cache.
1617 ckey := cacheKey{Map, ktyp, etyp, 0}
1618 if mt := cacheGet(ckey); mt != nil {
1619 return mt
1622 // Look in known types.
1623 s := "map[" + *ktyp.string + "]" + *etyp.string
1625 // Make a map type.
1626 var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
1627 mt := **(**mapType)(unsafe.Pointer(&imap))
1628 mt.string = &s
1630 // gccgo uses a different hash
1631 // mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
1632 mt.hash = ktyp.hash + etyp.hash + 2 + 14
1634 mt.key = ktyp
1635 mt.elem = etyp
1636 mt.uncommonType = nil
1637 mt.ptrToThis = nil
1639 mt.bucket = bucketOf(ktyp, etyp)
1640 if ktyp.size > maxKeySize {
1641 mt.keysize = uint8(ptrSize)
1642 mt.indirectkey = 1
1643 } else {
1644 mt.keysize = uint8(ktyp.size)
1645 mt.indirectkey = 0
1647 if etyp.size > maxValSize {
1648 mt.valuesize = uint8(ptrSize)
1649 mt.indirectvalue = 1
1650 } else {
1651 mt.valuesize = uint8(etyp.size)
1652 mt.indirectvalue = 0
1654 mt.bucketsize = uint16(mt.bucket.size)
1655 mt.reflexivekey = isReflexive(ktyp)
1656 mt.needkeyupdate = needKeyUpdate(ktyp)
1658 return cachePut(ckey, &mt.rtype)
1661 // FuncOf returns the function type with the given argument and result types.
1662 // For example if k represents int and e represents string,
1663 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
1665 // The variadic argument controls whether the function is variadic. FuncOf
1666 // panics if the in[len(in)-1] does not represent a slice and variadic is
1667 // true.
1668 func FuncOf(in, out []Type, variadic bool) Type {
1669 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
1670 panic("reflect.FuncOf: last arg of variadic func must be slice")
1673 // Make a func type.
1674 var ifunc interface{} = (func())(nil)
1675 prototype := *(**funcType)(unsafe.Pointer(&ifunc))
1676 ft := new(funcType)
1677 *ft = *prototype
1679 // Build a hash and minimally populate ft.
1680 var hash uint32
1681 var fin, fout []*rtype
1682 shift := uint(1)
1683 for _, in := range in {
1684 t := in.(*rtype)
1685 fin = append(fin, t)
1686 hash += t.hash << shift
1687 shift++
1689 shift = 2
1690 for _, out := range out {
1691 t := out.(*rtype)
1692 fout = append(fout, t)
1693 hash += t.hash << shift
1694 shift++
1696 if variadic {
1697 hash++
1699 hash <<= 4
1700 hash += 8
1701 ft.hash = hash
1702 ft.in = fin
1703 ft.out = fout
1704 ft.dotdotdot = variadic
1706 // Look in cache.
1707 funcLookupCache.RLock()
1708 for _, t := range funcLookupCache.m[hash] {
1709 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
1710 funcLookupCache.RUnlock()
1711 return t
1714 funcLookupCache.RUnlock()
1716 // Not in cache, lock and retry.
1717 funcLookupCache.Lock()
1718 defer funcLookupCache.Unlock()
1719 if funcLookupCache.m == nil {
1720 funcLookupCache.m = make(map[uint32][]*rtype)
1722 for _, t := range funcLookupCache.m[hash] {
1723 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
1724 return t
1728 str := funcStr(ft)
1730 // Populate the remaining fields of ft and store in cache.
1731 ft.string = &str
1732 ft.uncommonType = nil
1733 ft.ptrToThis = nil
1735 // TODO(cmang): Generate GC data for funcs.
1736 ft.gc = unsafe.Pointer(&ptrDataGCProg)
1738 funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype)
1740 return toType(&ft.rtype)
1743 // funcStr builds a string representation of a funcType.
1744 func funcStr(ft *funcType) string {
1745 repr := make([]byte, 0, 64)
1746 repr = append(repr, "func("...)
1747 for i, t := range ft.in {
1748 if i > 0 {
1749 repr = append(repr, ", "...)
1751 if ft.dotdotdot && i == len(ft.in)-1 {
1752 repr = append(repr, "..."...)
1753 repr = append(repr, *(*sliceType)(unsafe.Pointer(t)).elem.string...)
1754 } else {
1755 repr = append(repr, *t.string...)
1758 repr = append(repr, ')')
1759 if l := len(ft.out); l == 1 {
1760 repr = append(repr, ' ')
1761 } else if l > 1 {
1762 repr = append(repr, " ("...)
1764 for i, t := range ft.out {
1765 if i > 0 {
1766 repr = append(repr, ", "...)
1768 repr = append(repr, *t.string...)
1770 if len(ft.out) > 1 {
1771 repr = append(repr, ')')
1773 return string(repr)
1776 // isReflexive reports whether the == operation on the type is reflexive.
1777 // That is, x == x for all values x of type t.
1778 func isReflexive(t *rtype) bool {
1779 switch t.Kind() {
1780 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer:
1781 return true
1782 case Float32, Float64, Complex64, Complex128, Interface:
1783 return false
1784 case Array:
1785 tt := (*arrayType)(unsafe.Pointer(t))
1786 return isReflexive(tt.elem)
1787 case Struct:
1788 tt := (*structType)(unsafe.Pointer(t))
1789 for _, f := range tt.fields {
1790 if !isReflexive(f.typ) {
1791 return false
1794 return true
1795 default:
1796 // Func, Map, Slice, Invalid
1797 panic("isReflexive called on non-key type " + t.String())
1801 // needKeyUpdate reports whether map overwrites require the key to be copied.
1802 func needKeyUpdate(t *rtype) bool {
1803 switch t.Kind() {
1804 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer:
1805 return false
1806 case Float32, Float64, Complex64, Complex128, Interface, String:
1807 // Float keys can be updated from +0 to -0.
1808 // String keys can be updated to use a smaller backing store.
1809 // Interfaces might have floats of strings in them.
1810 return true
1811 case Array:
1812 tt := (*arrayType)(unsafe.Pointer(t))
1813 return needKeyUpdate(tt.elem)
1814 case Struct:
1815 tt := (*structType)(unsafe.Pointer(t))
1816 for _, f := range tt.fields {
1817 if needKeyUpdate(f.typ) {
1818 return true
1821 return false
1822 default:
1823 // Func, Map, Slice, Invalid
1824 panic("needKeyUpdate called on non-key type " + t.String())
1828 // Make sure these routines stay in sync with ../../runtime/hashmap.go!
1829 // These types exist only for GC, so we only fill out GC relevant info.
1830 // Currently, that's just size and the GC program. We also fill in string
1831 // for possible debugging use.
1832 const (
1833 bucketSize uintptr = 8
1834 maxKeySize uintptr = 128
1835 maxValSize uintptr = 128
1838 func bucketOf(ktyp, etyp *rtype) *rtype {
1839 // See comment on hmap.overflow in ../runtime/hashmap.go.
1840 var kind uint8
1841 if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 &&
1842 ktyp.size <= maxKeySize && etyp.size <= maxValSize {
1843 kind = kindNoPointers
1846 if ktyp.size > maxKeySize {
1847 ktyp = PtrTo(ktyp).(*rtype)
1849 if etyp.size > maxValSize {
1850 etyp = PtrTo(etyp).(*rtype)
1853 // Prepare GC data if any.
1854 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
1855 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
1856 // Normally the enforced limit on pointer maps is 16 bytes,
1857 // but larger ones are acceptable, 33 bytes isn't too too big,
1858 // and it's easier to generate a pointer bitmap than a GC program.
1859 // Note that since the key and value are known to be <= 128 bytes,
1860 // they're guaranteed to have bitmaps instead of GC programs.
1861 // var gcdata *byte
1862 // var ptrdata uintptr
1864 size := bucketSize
1865 size = align(size, uintptr(ktyp.fieldAlign))
1866 size += bucketSize * ktyp.size
1867 size = align(size, uintptr(etyp.fieldAlign))
1868 size += bucketSize * etyp.size
1870 maxAlign := uintptr(ktyp.fieldAlign)
1871 if maxAlign < uintptr(etyp.fieldAlign) {
1872 maxAlign = uintptr(etyp.fieldAlign)
1874 if maxAlign > ptrSize {
1875 size = align(size, maxAlign)
1876 size += align(ptrSize, maxAlign) - ptrSize
1879 ovoff := size
1880 size += ptrSize
1881 if maxAlign < ptrSize {
1882 maxAlign = ptrSize
1885 var gcPtr unsafe.Pointer
1886 if kind != kindNoPointers {
1887 gc := []uintptr{size}
1888 base := bucketSize
1889 base = align(base, uintptr(ktyp.fieldAlign))
1890 if ktyp.kind&kindNoPointers == 0 {
1891 gc = append(gc, _GC_ARRAY_START, base, bucketSize, ktyp.size)
1892 gc = appendGCProgram(gc, ktyp, 0)
1893 gc = append(gc, _GC_ARRAY_NEXT)
1895 base += ktyp.size * bucketSize
1896 base = align(base, uintptr(etyp.fieldAlign))
1897 if etyp.kind&kindNoPointers == 0 {
1898 gc = append(gc, _GC_ARRAY_START, base, bucketSize, etyp.size)
1899 gc = appendGCProgram(gc, etyp, 0)
1900 gc = append(gc, _GC_ARRAY_NEXT)
1902 gc = append(gc, _GC_APTR, ovoff, _GC_END)
1903 gcPtr = unsafe.Pointer(&gc[0])
1904 } else {
1905 // No pointers in bucket.
1906 gc := [...]uintptr{size, _GC_END}
1907 gcPtr = unsafe.Pointer(&gc[0])
1910 b := &rtype{
1911 align: int8(maxAlign),
1912 fieldAlign: uint8(maxAlign),
1913 size: size,
1914 kind: kind,
1915 gc: gcPtr,
1917 s := "bucket(" + *ktyp.string + "," + *etyp.string + ")"
1918 b.string = &s
1919 return b
1922 // Take the GC program for "t" and append it to the GC program "gc".
1923 func appendGCProgram(gc []uintptr, t *rtype, offset uintptr) []uintptr {
1924 p := t.gc
1925 p = unsafe.Pointer(uintptr(p) + unsafe.Sizeof(uintptr(0))) // skip size
1926 loop:
1927 for {
1928 var argcnt int
1929 switch *(*uintptr)(p) {
1930 case _GC_END:
1931 // Note: _GC_END not included in append
1932 break loop
1933 case _GC_ARRAY_NEXT:
1934 argcnt = 0
1935 case _GC_APTR, _GC_STRING, _GC_EFACE, _GC_IFACE:
1936 argcnt = 1
1937 case _GC_PTR, _GC_CALL, _GC_CHAN_PTR, _GC_SLICE:
1938 argcnt = 2
1939 case _GC_ARRAY_START, _GC_REGION:
1940 argcnt = 3
1941 default:
1942 panic("unknown GC program op for " + *t.string + ": " + strconv.FormatUint(*(*uint64)(p), 10))
1944 for i := 0; i < argcnt+1; i++ {
1945 v := *(*uintptr)(p)
1946 if i == 1 {
1947 v += offset
1949 gc = append(gc, v)
1950 p = unsafe.Pointer(uintptr(p) + unsafe.Sizeof(uintptr(0)))
1953 return gc
1955 func hMapOf(bucket *rtype) *rtype {
1956 ptrsize := unsafe.Sizeof(uintptr(0))
1958 // make gc program & compute hmap size
1959 gc := make([]uintptr, 1) // first entry is size, filled in at the end
1960 offset := unsafe.Sizeof(uint(0)) // count
1961 offset += unsafe.Sizeof(uint32(0)) // flags
1962 offset += unsafe.Sizeof(uint32(0)) // hash0
1963 offset += unsafe.Sizeof(uint8(0)) // B
1964 offset += unsafe.Sizeof(uint8(0)) // keysize
1965 offset += unsafe.Sizeof(uint8(0)) // valuesize
1966 offset = (offset + 1) / 2 * 2
1967 offset += unsafe.Sizeof(uint16(0)) // bucketsize
1968 offset = (offset + ptrsize - 1) / ptrsize * ptrsize
1969 // gc = append(gc, _GC_PTR, offset, uintptr(bucket.gc)) // buckets
1970 offset += ptrsize
1971 // gc = append(gc, _GC_PTR, offset, uintptr(bucket.gc)) // oldbuckets
1972 offset += ptrsize
1973 offset += ptrsize // nevacuate
1974 gc = append(gc, _GC_END)
1975 gc[0] = offset
1977 h := new(rtype)
1978 h.size = offset
1979 // h.gc = unsafe.Pointer(&gc[0])
1980 s := "hmap(" + *bucket.string + ")"
1981 h.string = &s
1982 return h
1985 // garbage collection bytecode program for slice of non-zero-length values.
1986 // See ../../cmd/gc/reflect.c:/^dgcsym1 and :/^dgcsym.
1987 type sliceGC struct {
1988 width uintptr // sizeof(slice)
1989 op uintptr // _GC_SLICE
1990 off uintptr // 0
1991 elemgc unsafe.Pointer // element gc program
1992 end uintptr // _GC_END
1995 // garbage collection bytecode program for slice of zero-length values.
1996 // See ../../cmd/gc/reflect.c:/^dgcsym1 and :/^dgcsym.
1997 type sliceEmptyGC struct {
1998 width uintptr // sizeof(slice)
1999 op uintptr // _GC_APTR
2000 off uintptr // 0
2001 end uintptr // _GC_END
2004 var sliceEmptyGCProg = sliceEmptyGC{
2005 width: unsafe.Sizeof([]byte(nil)),
2006 op: _GC_APTR,
2007 off: 0,
2008 end: _GC_END,
2011 // SliceOf returns the slice type with element type t.
2012 // For example, if t represents int, SliceOf(t) represents []int.
2013 func SliceOf(t Type) Type {
2014 typ := t.(*rtype)
2016 // Look in cache.
2017 ckey := cacheKey{Slice, typ, nil, 0}
2018 if slice := cacheGet(ckey); slice != nil {
2019 return slice
2022 // Look in known types.
2023 s := "[]" + *typ.string
2025 // Make a slice type.
2026 var islice interface{} = ([]unsafe.Pointer)(nil)
2027 prototype := *(**sliceType)(unsafe.Pointer(&islice))
2028 slice := *prototype
2029 slice.string = &s
2031 // gccgo uses a different hash.
2032 // slice.hash = fnv1(typ.hash, '[')
2033 slice.hash = typ.hash + 1 + 13
2035 slice.elem = typ
2036 slice.uncommonType = nil
2037 slice.ptrToThis = nil
2039 if typ.size == 0 {
2040 slice.gc = unsafe.Pointer(&sliceEmptyGCProg)
2041 } else {
2042 slice.gc = unsafe.Pointer(&sliceGC{
2043 width: slice.size,
2044 op: _GC_SLICE,
2045 off: 0,
2046 elemgc: typ.gc,
2047 end: _GC_END,
2051 // INCORRECT. Uncomment to check that TestSliceOfOfGC fails when slice.gc is wrong.
2052 // slice.gc = unsafe.Pointer(&badGC{width: slice.size, end: _GC_END})
2054 return cachePut(ckey, &slice.rtype)
2057 // The structLookupCache caches StructOf lookups.
2058 // StructOf does not share the common lookupCache since we need to pin
2059 // the memory associated with *structTypeFixedN.
2060 var structLookupCache struct {
2061 sync.RWMutex
2062 m map[uint32][]interface {
2063 common() *rtype
2064 } // keyed by hash calculated in StructOf
2067 // StructOf returns the struct type containing fields.
2068 // The Offset and Index fields are ignored and computed as they would be
2069 // by the compiler.
2071 // StructOf currently does not generate wrapper methods for embedded fields.
2072 // This limitation may be lifted in a future version.
2073 func StructOf(fields []StructField) Type {
2074 var (
2075 hash = uint32(0)
2076 size uintptr
2077 typalign int8
2079 fs = make([]structField, len(fields))
2080 repr = make([]byte, 0, 64)
2081 fset = map[string]struct{}{} // fields' names
2083 hasPtr = false // records whether at least one struct-field is a pointer
2086 lastzero := uintptr(0)
2087 repr = append(repr, "struct {"...)
2088 for i, field := range fields {
2089 if field.Type == nil {
2090 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
2092 f := runtimeStructField(field)
2093 ft := f.typ
2094 if ft.pointers() {
2095 hasPtr = true
2098 name := ""
2099 // Update string and hash
2100 hash = (hash << 1) + ft.hash
2101 if f.name != nil {
2102 name = *f.name
2103 repr = append(repr, (" " + name)...)
2104 } else {
2105 // Embedded field
2106 repr = append(repr, " ?"...)
2107 if f.typ.Kind() == Ptr {
2108 // Embedded ** and *interface{} are illegal
2109 elem := ft.Elem()
2110 if k := elem.Kind(); k == Ptr || k == Interface {
2111 panic("reflect.StructOf: illegal anonymous field type " + ft.String())
2113 name = elem.String()
2114 } else {
2115 name = ft.String()
2117 // TODO(sbinet) check for syntactically impossible type names?
2119 switch f.typ.Kind() {
2120 case Interface:
2121 ift := (*interfaceType)(unsafe.Pointer(ft))
2122 if len(ift.methods) > 0 {
2123 panic("reflect.StructOf: embedded field with methods not supported")
2125 case Ptr:
2126 ptr := (*ptrType)(unsafe.Pointer(ft))
2127 if unt := ptr.uncommon(); unt != nil {
2128 if len(unt.methods) > 0 {
2129 panic("reflect.StructOf: embedded field with methods not supported")
2132 if unt := ptr.elem.uncommon(); unt != nil {
2133 if len(unt.methods) > 0 {
2134 panic("reflect.StructOf: embedded field with methods not supported")
2137 default:
2138 if unt := ft.uncommon(); unt != nil {
2139 if len(unt.methods) > 0 {
2140 panic("reflect.StructOf: embedded field with methods not supported")
2145 if _, dup := fset[name]; dup {
2146 panic("reflect.StructOf: duplicate field " + name)
2148 fset[name] = struct{}{}
2150 repr = append(repr, (" " + ft.String())...)
2151 if f.tag != nil {
2152 repr = append(repr, (" " + strconv.Quote(*f.tag))...)
2154 if i < len(fields)-1 {
2155 repr = append(repr, ';')
2158 f.offset = align(size, uintptr(ft.fieldAlign))
2159 if int8(ft.fieldAlign) > typalign {
2160 typalign = int8(ft.fieldAlign)
2162 size = f.offset + ft.size
2164 if ft.size == 0 {
2165 lastzero = size
2168 fs[i] = f
2171 if size > 0 && lastzero == size {
2172 // This is a non-zero sized struct that ends in a
2173 // zero-sized field. We add an extra byte of padding,
2174 // to ensure that taking the address of the final
2175 // zero-sized field can't manufacture a pointer to the
2176 // next object in the heap. See issue 9401.
2177 size++
2180 if len(fs) > 0 {
2181 repr = append(repr, ' ')
2183 repr = append(repr, '}')
2184 hash <<= 2
2185 str := string(repr)
2187 // Round the size up to be a multiple of the alignment.
2188 size = align(size, uintptr(typalign))
2190 // Make the struct type.
2191 var istruct interface{} = struct{}{}
2192 prototype := *(**structType)(unsafe.Pointer(&istruct))
2193 typ := new(structType)
2194 *typ = *prototype
2195 typ.fields = fs
2197 // Look in cache
2198 structLookupCache.RLock()
2199 for _, st := range structLookupCache.m[hash] {
2200 t := st.common()
2201 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2202 structLookupCache.RUnlock()
2203 return t
2206 structLookupCache.RUnlock()
2208 // not in cache, lock and retry
2209 structLookupCache.Lock()
2210 defer structLookupCache.Unlock()
2211 if structLookupCache.m == nil {
2212 structLookupCache.m = make(map[uint32][]interface {
2213 common() *rtype
2216 for _, st := range structLookupCache.m[hash] {
2217 t := st.common()
2218 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2219 return t
2223 typ.string = &str
2224 typ.hash = hash
2225 typ.size = size
2226 typ.align = typalign
2227 typ.fieldAlign = uint8(typalign)
2228 if !hasPtr {
2229 typ.kind |= kindNoPointers
2230 gc := [...]uintptr{size, _GC_END}
2231 typ.gc = unsafe.Pointer(&gc[0])
2232 } else {
2233 typ.kind &^= kindNoPointers
2234 gc := []uintptr{size}
2235 for _, ft := range fs {
2236 gc = appendGCProgram(gc, ft.typ, ft.offset)
2238 gc = append(gc, _GC_END)
2239 typ.gc = unsafe.Pointer(&gc[0])
2242 typ.hashfn = func(p unsafe.Pointer, seed uintptr) uintptr {
2243 ret := seed
2244 for _, ft := range typ.fields {
2245 o := unsafe.Pointer(uintptr(p) + ft.offset)
2246 ret = ft.typ.hashfn(o, ret)
2248 return ret
2251 typ.equalfn = func(p, q unsafe.Pointer) bool {
2252 for _, ft := range typ.fields {
2253 pi := unsafe.Pointer(uintptr(p) + ft.offset)
2254 qi := unsafe.Pointer(uintptr(q) + ft.offset)
2255 if !ft.typ.equalfn(pi, qi) {
2256 return false
2259 return true
2262 typ.kind &^= kindDirectIface
2263 typ.uncommonType = nil
2264 typ.ptrToThis = nil
2266 structLookupCache.m[hash] = append(structLookupCache.m[hash], typ)
2267 return &typ.rtype
2270 func runtimeStructField(field StructField) structField {
2271 var name *string
2272 if field.Name == "" {
2273 t := field.Type.(*rtype)
2274 if t.Kind() == Ptr {
2275 t = t.Elem().(*rtype)
2277 } else if field.PkgPath == "" {
2278 s := field.Name
2279 name = &s
2280 b0 := s[0]
2281 if ('a' <= b0 && b0 <= 'z') || b0 == '_' {
2282 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but has no PkgPath")
2286 var pkgPath *string
2287 if field.PkgPath != "" {
2288 s := field.PkgPath
2289 pkgPath = &s
2290 // This could work with gccgo but we panic to be
2291 // compatible with gc.
2292 panic("reflect: creating a name with a package path is not supported")
2295 var tag *string
2296 if field.Tag != "" {
2297 s := string(field.Tag)
2298 tag = &s
2301 return structField{
2302 name: name,
2303 pkgPath: pkgPath,
2304 typ: field.Type.common(),
2305 tag: tag,
2306 offset: 0,
2310 // ArrayOf returns the array type with the given count and element type.
2311 // For example, if t represents int, ArrayOf(5, t) represents [5]int.
2313 // If the resulting type would be larger than the available address space,
2314 // ArrayOf panics.
2315 func ArrayOf(count int, elem Type) Type {
2316 typ := elem.(*rtype)
2317 // call SliceOf here as it calls cacheGet/cachePut.
2318 // ArrayOf also calls cacheGet/cachePut and thus may modify the state of
2319 // the lookupCache mutex.
2320 slice := SliceOf(elem)
2322 // Look in cache.
2323 ckey := cacheKey{Array, typ, nil, uintptr(count)}
2324 if array := cacheGet(ckey); array != nil {
2325 return array
2328 // Look in known types.
2329 s := "[" + strconv.Itoa(count) + "]" + *typ.string
2331 // Make an array type.
2332 var iarray interface{} = [1]unsafe.Pointer{}
2333 prototype := *(**arrayType)(unsafe.Pointer(&iarray))
2334 array := *prototype
2335 array.string = &s
2337 // gccgo uses a different hash.
2338 // array.hash = fnv1(typ.hash, '[')
2339 // for n := uint32(count); n > 0; n >>= 8 {
2340 // array.hash = fnv1(array.hash, byte(n))
2341 // }
2342 // array.hash = fnv1(array.hash, ']')
2343 array.hash = typ.hash + 1 + 13
2345 array.elem = typ
2346 array.ptrToThis = nil
2347 max := ^uintptr(0) / typ.size
2348 if uintptr(count) > max {
2349 panic("reflect.ArrayOf: array size would exceed virtual address space")
2351 array.size = typ.size * uintptr(count)
2352 // if count > 0 && typ.ptrdata != 0 {
2353 // array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata
2354 // }
2355 array.align = typ.align
2356 array.fieldAlign = typ.fieldAlign
2357 array.uncommonType = nil
2358 array.len = uintptr(count)
2359 array.slice = slice.(*rtype)
2361 array.kind &^= kindNoPointers
2362 switch {
2363 case typ.kind&kindNoPointers != 0 || array.size == 0:
2364 // No pointers.
2365 array.kind |= kindNoPointers
2366 gc := [...]uintptr{array.size, _GC_END}
2367 array.gc = unsafe.Pointer(&gc[0])
2369 case count == 1:
2370 // In memory, 1-element array looks just like the element.
2371 array.kind |= typ.kind & kindGCProg
2372 array.gc = typ.gc
2374 default:
2375 gc := []uintptr{array.size, _GC_ARRAY_START, 0, uintptr(count), typ.size}
2376 gc = appendGCProgram(gc, typ, 0)
2377 gc = append(gc, _GC_ARRAY_NEXT, _GC_END)
2378 array.gc = unsafe.Pointer(&gc[0])
2381 array.kind &^= kindDirectIface
2383 array.hashfn = func(p unsafe.Pointer, seed uintptr) uintptr {
2384 ret := seed
2385 for i := 0; i < count; i++ {
2386 ret = typ.hashfn(p, ret)
2387 p = unsafe.Pointer(uintptr(p) + typ.size)
2389 return ret
2392 array.equalfn = func(p1, p2 unsafe.Pointer) bool {
2393 for i := 0; i < count; i++ {
2394 if !typ.equalfn(p1, p2) {
2395 return false
2397 p1 = unsafe.Pointer(uintptr(p1) + typ.size)
2398 p2 = unsafe.Pointer(uintptr(p2) + typ.size)
2400 return true
2403 return cachePut(ckey, &array.rtype)
2406 func appendVarint(x []byte, v uintptr) []byte {
2407 for ; v >= 0x80; v >>= 7 {
2408 x = append(x, byte(v|0x80))
2410 x = append(x, byte(v))
2411 return x
2414 // toType converts from a *rtype to a Type that can be returned
2415 // to the client of package reflect. In gc, the only concern is that
2416 // a nil *rtype must be replaced by a nil Type, but in gccgo this
2417 // function takes care of ensuring that multiple *rtype for the same
2418 // type are coalesced into a single Type.
2419 var canonicalType = make(map[string]Type)
2421 var canonicalTypeLock sync.RWMutex
2423 func canonicalize(t Type) Type {
2424 if t == nil {
2425 return nil
2427 s := t.rawString()
2428 canonicalTypeLock.RLock()
2429 if r, ok := canonicalType[s]; ok {
2430 canonicalTypeLock.RUnlock()
2431 return r
2433 canonicalTypeLock.RUnlock()
2434 canonicalTypeLock.Lock()
2435 if r, ok := canonicalType[s]; ok {
2436 canonicalTypeLock.Unlock()
2437 return r
2439 canonicalType[s] = t
2440 canonicalTypeLock.Unlock()
2441 return t
2444 func toType(p *rtype) Type {
2445 if p == nil {
2446 return nil
2448 return canonicalize(p)
2451 // ifaceIndir reports whether t is stored indirectly in an interface value.
2452 func ifaceIndir(t *rtype) bool {
2453 return t.kind&kindDirectIface == 0
2456 // Layout matches runtime.BitVector (well enough).
2457 type bitVector struct {
2458 n uint32 // number of bits
2459 data []byte
2462 // append a bit to the bitmap.
2463 func (bv *bitVector) append(bit uint8) {
2464 if bv.n%8 == 0 {
2465 bv.data = append(bv.data, 0)
2467 bv.data[bv.n/8] |= bit << (bv.n % 8)
2468 bv.n++
2471 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
2472 if t.kind&kindNoPointers != 0 {
2473 return
2476 switch Kind(t.kind & kindMask) {
2477 case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
2478 // 1 pointer at start of representation
2479 for bv.n < uint32(offset/uintptr(ptrSize)) {
2480 bv.append(0)
2482 bv.append(1)
2484 case Interface:
2485 // 2 pointers
2486 for bv.n < uint32(offset/uintptr(ptrSize)) {
2487 bv.append(0)
2489 bv.append(1)
2490 bv.append(1)
2492 case Array:
2493 // repeat inner type
2494 tt := (*arrayType)(unsafe.Pointer(t))
2495 for i := 0; i < int(tt.len); i++ {
2496 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
2499 case Struct:
2500 // apply fields
2501 tt := (*structType)(unsafe.Pointer(t))
2502 for i := range tt.fields {
2503 f := &tt.fields[i]
2504 addTypeBits(bv, offset+f.offset, f.typ)