compiler, runtime: drop size arguments to hash/equal functions
[official-gcc.git] / libgo / go / reflect / type.go
blob4f13f144327f00ac230b15baa8263fc5936e3945
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Package reflect implements run-time reflection, allowing a program to
6 // manipulate objects with arbitrary types. The typical use is to take a value
7 // with static type interface{} and extract its dynamic type information by
8 // calling TypeOf, which returns a Type.
9 //
10 // A call to ValueOf returns a Value representing the run-time data.
11 // Zero takes a Type and returns a Value representing a zero value
12 // for that type.
14 // See "The Laws of Reflection" for an introduction to reflection in Go:
15 // https://golang.org/doc/articles/laws_of_reflection.html
16 package reflect
18 import (
19 "strconv"
20 "sync"
21 "unsafe"
24 // Type is the representation of a Go type.
26 // Not all methods apply to all kinds of types. Restrictions,
27 // if any, are noted in the documentation for each method.
28 // Use the Kind method to find out the kind of type before
29 // calling kind-specific methods. Calling a method
30 // inappropriate to the kind of type causes a run-time panic.
31 type Type interface {
32 // Methods applicable to all types.
34 // Align returns the alignment in bytes of a value of
35 // this type when allocated in memory.
36 Align() int
38 // FieldAlign returns the alignment in bytes of a value of
39 // this type when used as a field in a struct.
40 FieldAlign() int
42 // Method returns the i'th method in the type's method set.
43 // It panics if i is not in the range [0, NumMethod()).
45 // For a non-interface type T or *T, the returned Method's Type and Func
46 // fields describe a function whose first argument is the receiver.
48 // For an interface type, the returned Method's Type field gives the
49 // method signature, without a receiver, and the Func field is nil.
50 Method(int) Method
52 // MethodByName returns the method with that name in the type's
53 // method set and a boolean indicating if the method was found.
55 // For a non-interface type T or *T, the returned Method's Type and Func
56 // fields describe a function whose first argument is the receiver.
58 // For an interface type, the returned Method's Type field gives the
59 // method signature, without a receiver, and the Func field is nil.
60 MethodByName(string) (Method, bool)
62 // NumMethod returns the number of methods in the type's method set.
63 NumMethod() int
65 // Name returns the type's name within its package.
66 // It returns an empty string for unnamed types.
67 Name() string
69 // PkgPath returns a named type's package path, that is, the import path
70 // that uniquely identifies the package, such as "encoding/base64".
71 // If the type was predeclared (string, error) or unnamed (*T, struct{}, []int),
72 // the package path will be the empty string.
73 PkgPath() string
75 // Size returns the number of bytes needed to store
76 // a value of the given type; it is analogous to unsafe.Sizeof.
77 Size() uintptr
79 // String returns a string representation of the type.
80 // The string representation may use shortened package names
81 // (e.g., base64 instead of "encoding/base64") and is not
82 // guaranteed to be unique among types. To test for equality,
83 // compare the Types directly.
84 String() string
86 // Used internally by gccgo--the string retaining quoting.
87 rawString() string
89 // Kind returns the specific kind of this type.
90 Kind() Kind
92 // Implements reports whether the type implements the interface type u.
93 Implements(u Type) bool
95 // AssignableTo reports whether a value of the type is assignable to type u.
96 AssignableTo(u Type) bool
98 // ConvertibleTo reports whether a value of the type is convertible to type u.
99 ConvertibleTo(u Type) bool
101 // Comparable reports whether values of this type are comparable.
102 Comparable() bool
104 // Methods applicable only to some types, depending on Kind.
105 // The methods allowed for each kind are:
107 // Int*, Uint*, Float*, Complex*: Bits
108 // Array: Elem, Len
109 // Chan: ChanDir, Elem
110 // Func: In, NumIn, Out, NumOut, IsVariadic.
111 // Map: Key, Elem
112 // Ptr: Elem
113 // Slice: Elem
114 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
116 // Bits returns the size of the type in bits.
117 // It panics if the type's Kind is not one of the
118 // sized or unsized Int, Uint, Float, or Complex kinds.
119 Bits() int
121 // ChanDir returns a channel type's direction.
122 // It panics if the type's Kind is not Chan.
123 ChanDir() ChanDir
125 // IsVariadic reports whether a function type's final input parameter
126 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
127 // implicit actual type []T.
129 // For concreteness, if t represents func(x int, y ... float64), then
131 // t.NumIn() == 2
132 // t.In(0) is the reflect.Type for "int"
133 // t.In(1) is the reflect.Type for "[]float64"
134 // t.IsVariadic() == true
136 // IsVariadic panics if the type's Kind is not Func.
137 IsVariadic() bool
139 // Elem returns a type's element type.
140 // It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
141 Elem() Type
143 // Field returns a struct type's i'th field.
144 // It panics if the type's Kind is not Struct.
145 // It panics if i is not in the range [0, NumField()).
146 Field(i int) StructField
148 // FieldByIndex returns the nested field corresponding
149 // to the index sequence. It is equivalent to calling Field
150 // successively for each index i.
151 // It panics if the type's Kind is not Struct.
152 FieldByIndex(index []int) StructField
154 // FieldByName returns the struct field with the given name
155 // and a boolean indicating if the field was found.
156 FieldByName(name string) (StructField, bool)
158 // FieldByNameFunc returns the first struct field with a name
159 // that satisfies the match function and a boolean indicating if
160 // the field was found.
161 FieldByNameFunc(match func(string) bool) (StructField, bool)
163 // In returns the type of a function type's i'th input parameter.
164 // It panics if the type's Kind is not Func.
165 // It panics if i is not in the range [0, NumIn()).
166 In(i int) Type
168 // Key returns a map type's key type.
169 // It panics if the type's Kind is not Map.
170 Key() Type
172 // Len returns an array type's length.
173 // It panics if the type's Kind is not Array.
174 Len() int
176 // NumField returns a struct type's field count.
177 // It panics if the type's Kind is not Struct.
178 NumField() int
180 // NumIn returns a function type's input parameter count.
181 // It panics if the type's Kind is not Func.
182 NumIn() int
184 // NumOut returns a function type's output parameter count.
185 // It panics if the type's Kind is not Func.
186 NumOut() int
188 // Out returns the type of a function type's i'th output parameter.
189 // It panics if the type's Kind is not Func.
190 // It panics if i is not in the range [0, NumOut()).
191 Out(i int) Type
193 common() *rtype
194 uncommon() *uncommonType
197 // BUG(rsc): FieldByName and related functions consider struct field names to be equal
198 // if the names are equal, even if they are unexported names originating
199 // in different packages. The practical effect of this is that the result of
200 // t.FieldByName("x") is not well defined if the struct type t contains
201 // multiple fields named x (embedded from different packages).
202 // FieldByName may return one of the fields named x or may report that there are none.
203 // See golang.org/issue/4876 for more details.
206 * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go).
207 * A few are known to ../runtime/type.go to convey to debuggers.
208 * They are also known to ../runtime/type.go.
211 // A Kind represents the specific kind of type that a Type represents.
212 // The zero Kind is not a valid kind.
213 type Kind uint
215 const (
216 Invalid Kind = iota
217 Bool
219 Int8
220 Int16
221 Int32
222 Int64
223 Uint
224 Uint8
225 Uint16
226 Uint32
227 Uint64
228 Uintptr
229 Float32
230 Float64
231 Complex64
232 Complex128
233 Array
234 Chan
235 Func
236 Interface
239 Slice
240 String
241 Struct
242 UnsafePointer
245 // rtype is the common implementation of most values.
246 // It is embedded in other, public struct types, but always
247 // with a unique tag like `reflect:"array"` or `reflect:"ptr"`
248 // so that code cannot convert from, say, *arrayType to *ptrType.
249 type rtype struct {
250 kind uint8 // enumeration for C
251 align int8 // alignment of variable with this type
252 fieldAlign uint8 // alignment of struct field with this type
253 _ uint8 // unused/padding
254 size uintptr
255 hash uint32 // hash of type; avoids computation in hash tables
257 hashfn func(unsafe.Pointer, uintptr) uintptr // hash function
258 equalfn func(unsafe.Pointer, unsafe.Pointer) bool // equality function
260 gc unsafe.Pointer // garbage collection data
261 string *string // string form; unnecessary but undeniably useful
262 *uncommonType // (relatively) uncommon fields
263 ptrToThis *rtype // type for pointer to this type, if used in binary or has methods
266 // Method on non-interface type
267 type method struct {
268 name *string // name of method
269 pkgPath *string // nil for exported Names; otherwise import path
270 mtyp *rtype // method type (without receiver)
271 typ *rtype // .(*FuncType) underneath (with receiver)
272 tfn unsafe.Pointer // fn used for normal method call
275 // uncommonType is present only for types with names or methods
276 // (if T is a named type, the uncommonTypes for T and *T have methods).
277 // Using a pointer to this struct reduces the overall size required
278 // to describe an unnamed type with no methods.
279 type uncommonType struct {
280 name *string // name of type
281 pkgPath *string // import path; nil for built-in types like int, string
282 methods []method // methods associated with type
285 // ChanDir represents a channel type's direction.
286 type ChanDir int
288 const (
289 RecvDir ChanDir = 1 << iota // <-chan
290 SendDir // chan<-
291 BothDir = RecvDir | SendDir // chan
294 // arrayType represents a fixed array type.
295 type arrayType struct {
296 rtype `reflect:"array"`
297 elem *rtype // array element type
298 slice *rtype // slice type
299 len uintptr
302 // chanType represents a channel type.
303 type chanType struct {
304 rtype `reflect:"chan"`
305 elem *rtype // channel element type
306 dir uintptr // channel direction (ChanDir)
309 // funcType represents a function type.
310 type funcType struct {
311 rtype `reflect:"func"`
312 dotdotdot bool // last input parameter is ...
313 in []*rtype // input parameter types
314 out []*rtype // output parameter types
317 // imethod represents a method on an interface type
318 type imethod struct {
319 name *string // name of method
320 pkgPath *string // nil for exported Names; otherwise import path
321 typ *rtype // .(*FuncType) underneath
324 // interfaceType represents an interface type.
325 type interfaceType struct {
326 rtype `reflect:"interface"`
327 methods []imethod // sorted by hash
330 // mapType represents a map type.
331 type mapType struct {
332 rtype `reflect:"map"`
333 key *rtype // map key type
334 elem *rtype // map element (value) type
335 bucket *rtype // internal bucket structure
336 hmap *rtype // internal map header
337 keysize uint8 // size of key slot
338 indirectkey uint8 // store ptr to key instead of key itself
339 valuesize uint8 // size of value slot
340 indirectvalue uint8 // store ptr to value instead of value itself
341 bucketsize uint16 // size of bucket
342 reflexivekey bool // true if k==k for all keys
343 needkeyupdate bool // true if we need to update key on an overwrite
346 // ptrType represents a pointer type.
347 type ptrType struct {
348 rtype `reflect:"ptr"`
349 elem *rtype // pointer element (pointed at) type
352 // sliceType represents a slice type.
353 type sliceType struct {
354 rtype `reflect:"slice"`
355 elem *rtype // slice element type
358 // Struct field
359 type structField struct {
360 name *string // nil for embedded fields
361 pkgPath *string // nil for exported Names; otherwise import path
362 typ *rtype // type of field
363 tag *string // nil if no tag
364 offset uintptr // byte offset of field within struct
367 // structType represents a struct type.
368 type structType struct {
369 rtype `reflect:"struct"`
370 fields []structField // sorted by offset
373 // NOTE: These are copied from ../runtime/mgc0.h.
374 // They must be kept in sync.
375 const (
376 _GC_END = iota
377 _GC_PTR
378 _GC_APTR
379 _GC_ARRAY_START
380 _GC_ARRAY_NEXT
381 _GC_CALL
382 _GC_CHAN_PTR
383 _GC_STRING
384 _GC_EFACE
385 _GC_IFACE
386 _GC_SLICE
387 _GC_REGION
388 _GC_NUM_INSTR
392 * The compiler knows the exact layout of all the data structures above.
393 * The compiler does not know about the data structures and methods below.
396 // Method represents a single method.
397 type Method struct {
398 // Name is the method name.
399 // PkgPath is the package path that qualifies a lower case (unexported)
400 // method name. It is empty for upper case (exported) method names.
401 // The combination of PkgPath and Name uniquely identifies a method
402 // in a method set.
403 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
404 Name string
405 PkgPath string
407 Type Type // method type
408 Func Value // func with receiver as first argument
409 Index int // index for Type.Method
412 const (
413 kindDirectIface = 1 << 5
414 kindGCProg = 1 << 6 // Type.gc points to GC program
415 kindNoPointers = 1 << 7
416 kindMask = (1 << 5) - 1
419 func (k Kind) String() string {
420 if int(k) < len(kindNames) {
421 return kindNames[k]
423 return "kind" + strconv.Itoa(int(k))
426 var kindNames = []string{
427 Invalid: "invalid",
428 Bool: "bool",
429 Int: "int",
430 Int8: "int8",
431 Int16: "int16",
432 Int32: "int32",
433 Int64: "int64",
434 Uint: "uint",
435 Uint8: "uint8",
436 Uint16: "uint16",
437 Uint32: "uint32",
438 Uint64: "uint64",
439 Uintptr: "uintptr",
440 Float32: "float32",
441 Float64: "float64",
442 Complex64: "complex64",
443 Complex128: "complex128",
444 Array: "array",
445 Chan: "chan",
446 Func: "func",
447 Interface: "interface",
448 Map: "map",
449 Ptr: "ptr",
450 Slice: "slice",
451 String: "string",
452 Struct: "struct",
453 UnsafePointer: "unsafe.Pointer",
456 func (t *uncommonType) uncommon() *uncommonType {
457 return t
460 func (t *uncommonType) PkgPath() string {
461 if t == nil || t.pkgPath == nil {
462 return ""
464 return *t.pkgPath
467 func (t *uncommonType) Name() string {
468 if t == nil || t.name == nil {
469 return ""
471 return *t.name
474 func (t *rtype) rawString() string { return *t.string }
476 func (t *rtype) String() string {
477 // For gccgo, strip out quoted strings.
478 s := *t.string
479 var q bool
480 r := make([]byte, len(s))
481 j := 0
482 for i := 0; i < len(s); i++ {
483 if s[i] == '\t' {
484 q = !q
485 } else if !q {
486 r[j] = s[i]
490 return string(r[:j])
493 func (t *rtype) Size() uintptr { return t.size }
495 func (t *rtype) Bits() int {
496 if t == nil {
497 panic("reflect: Bits of nil Type")
499 k := t.Kind()
500 if k < Int || k > Complex128 {
501 panic("reflect: Bits of non-arithmetic Type " + t.String())
503 return int(t.size) * 8
506 func (t *rtype) Align() int { return int(t.align) }
508 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
510 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
512 func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 }
514 func (t *rtype) common() *rtype { return t }
516 func (t *uncommonType) Method(i int) (m Method) {
517 if t == nil || i < 0 || i >= len(t.methods) {
518 panic("reflect: Method index out of range")
520 found := false
521 for mi := range t.methods {
522 if t.methods[mi].pkgPath == nil {
523 if i == 0 {
524 i = mi
525 found = true
526 break
531 if !found {
532 panic("reflect: Method index out of range")
535 p := &t.methods[i]
536 if p.name != nil {
537 m.Name = *p.name
539 fl := flag(Func)
540 if p.pkgPath != nil {
541 m.PkgPath = *p.pkgPath
542 fl |= flagStickyRO
544 mt := p.typ
545 m.Type = toType(mt)
546 x := new(unsafe.Pointer)
547 *x = unsafe.Pointer(&p.tfn)
548 m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn}
549 m.Index = i
550 return
553 func (t *uncommonType) NumMethod() int {
554 if t == nil {
555 return 0
557 c := 0
558 for i := range t.methods {
559 if t.methods[i].pkgPath == nil {
563 return c
566 func (t *uncommonType) MethodByName(name string) (m Method, ok bool) {
567 if t == nil {
568 return
570 var p *method
571 for i := range t.methods {
572 p = &t.methods[i]
573 if p.pkgPath == nil && p.name != nil && *p.name == name {
574 return t.Method(i), true
577 return
580 // TODO(rsc): gc supplies these, but they are not
581 // as efficient as they could be: they have commonType
582 // as the receiver instead of *rtype.
583 func (t *rtype) NumMethod() int {
584 if t.Kind() == Interface {
585 tt := (*interfaceType)(unsafe.Pointer(t))
586 return tt.NumMethod()
588 return t.uncommonType.NumMethod()
591 func (t *rtype) Method(i int) (m Method) {
592 if t.Kind() == Interface {
593 tt := (*interfaceType)(unsafe.Pointer(t))
594 return tt.Method(i)
596 return t.uncommonType.Method(i)
599 func (t *rtype) MethodByName(name string) (m Method, ok bool) {
600 if t.Kind() == Interface {
601 tt := (*interfaceType)(unsafe.Pointer(t))
602 return tt.MethodByName(name)
604 return t.uncommonType.MethodByName(name)
607 func (t *rtype) PkgPath() string {
608 return t.uncommonType.PkgPath()
611 func (t *rtype) Name() string {
612 return t.uncommonType.Name()
615 func (t *rtype) ChanDir() ChanDir {
616 if t.Kind() != Chan {
617 panic("reflect: ChanDir of non-chan type")
619 tt := (*chanType)(unsafe.Pointer(t))
620 return ChanDir(tt.dir)
623 func (t *rtype) IsVariadic() bool {
624 if t.Kind() != Func {
625 panic("reflect: IsVariadic of non-func type")
627 tt := (*funcType)(unsafe.Pointer(t))
628 return tt.dotdotdot
631 func (t *rtype) Elem() Type {
632 switch t.Kind() {
633 case Array:
634 tt := (*arrayType)(unsafe.Pointer(t))
635 return toType(tt.elem)
636 case Chan:
637 tt := (*chanType)(unsafe.Pointer(t))
638 return toType(tt.elem)
639 case Map:
640 tt := (*mapType)(unsafe.Pointer(t))
641 return toType(tt.elem)
642 case Ptr:
643 tt := (*ptrType)(unsafe.Pointer(t))
644 return toType(tt.elem)
645 case Slice:
646 tt := (*sliceType)(unsafe.Pointer(t))
647 return toType(tt.elem)
649 panic("reflect: Elem of invalid type")
652 func (t *rtype) Field(i int) StructField {
653 if t.Kind() != Struct {
654 panic("reflect: Field of non-struct type")
656 tt := (*structType)(unsafe.Pointer(t))
657 return tt.Field(i)
660 func (t *rtype) FieldByIndex(index []int) StructField {
661 if t.Kind() != Struct {
662 panic("reflect: FieldByIndex of non-struct type")
664 tt := (*structType)(unsafe.Pointer(t))
665 return tt.FieldByIndex(index)
668 func (t *rtype) FieldByName(name string) (StructField, bool) {
669 if t.Kind() != Struct {
670 panic("reflect: FieldByName of non-struct type")
672 tt := (*structType)(unsafe.Pointer(t))
673 return tt.FieldByName(name)
676 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
677 if t.Kind() != Struct {
678 panic("reflect: FieldByNameFunc of non-struct type")
680 tt := (*structType)(unsafe.Pointer(t))
681 return tt.FieldByNameFunc(match)
684 func (t *rtype) In(i int) Type {
685 if t.Kind() != Func {
686 panic("reflect: In of non-func type")
688 tt := (*funcType)(unsafe.Pointer(t))
689 return toType(tt.in[i])
692 func (t *rtype) Key() Type {
693 if t.Kind() != Map {
694 panic("reflect: Key of non-map type")
696 tt := (*mapType)(unsafe.Pointer(t))
697 return toType(tt.key)
700 func (t *rtype) Len() int {
701 if t.Kind() != Array {
702 panic("reflect: Len of non-array type")
704 tt := (*arrayType)(unsafe.Pointer(t))
705 return int(tt.len)
708 func (t *rtype) NumField() int {
709 if t.Kind() != Struct {
710 panic("reflect: NumField of non-struct type")
712 tt := (*structType)(unsafe.Pointer(t))
713 return len(tt.fields)
716 func (t *rtype) NumIn() int {
717 if t.Kind() != Func {
718 panic("reflect: NumIn of non-func type")
720 tt := (*funcType)(unsafe.Pointer(t))
721 return len(tt.in)
724 func (t *rtype) NumOut() int {
725 if t.Kind() != Func {
726 panic("reflect: NumOut of non-func type")
728 tt := (*funcType)(unsafe.Pointer(t))
729 return len(tt.out)
732 func (t *rtype) Out(i int) Type {
733 if t.Kind() != Func {
734 panic("reflect: Out of non-func type")
736 tt := (*funcType)(unsafe.Pointer(t))
737 return toType(tt.out[i])
740 func (d ChanDir) String() string {
741 switch d {
742 case SendDir:
743 return "chan<-"
744 case RecvDir:
745 return "<-chan"
746 case BothDir:
747 return "chan"
749 return "ChanDir" + strconv.Itoa(int(d))
752 // Method returns the i'th method in the type's method set.
753 func (t *interfaceType) Method(i int) (m Method) {
754 if i < 0 || i >= len(t.methods) {
755 return
757 p := &t.methods[i]
758 m.Name = *p.name
759 if p.pkgPath != nil {
760 m.PkgPath = *p.pkgPath
762 m.Type = toType(p.typ)
763 m.Index = i
764 return
767 // NumMethod returns the number of interface methods in the type's method set.
768 func (t *interfaceType) NumMethod() int { return len(t.methods) }
770 // MethodByName method with the given name in the type's method set.
771 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
772 if t == nil {
773 return
775 var p *imethod
776 for i := range t.methods {
777 p = &t.methods[i]
778 if *p.name == name {
779 return t.Method(i), true
782 return
785 // A StructField describes a single field in a struct.
786 type StructField struct {
787 // Name is the field name.
788 Name string
789 // PkgPath is the package path that qualifies a lower case (unexported)
790 // field name. It is empty for upper case (exported) field names.
791 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
792 PkgPath string
794 Type Type // field type
795 Tag StructTag // field tag string
796 Offset uintptr // offset within struct, in bytes
797 Index []int // index sequence for Type.FieldByIndex
798 Anonymous bool // is an embedded field
801 // A StructTag is the tag string in a struct field.
803 // By convention, tag strings are a concatenation of
804 // optionally space-separated key:"value" pairs.
805 // Each key is a non-empty string consisting of non-control
806 // characters other than space (U+0020 ' '), quote (U+0022 '"'),
807 // and colon (U+003A ':'). Each value is quoted using U+0022 '"'
808 // characters and Go string literal syntax.
809 type StructTag string
811 // Get returns the value associated with key in the tag string.
812 // If there is no such key in the tag, Get returns the empty string.
813 // If the tag does not have the conventional format, the value
814 // returned by Get is unspecified. To determine whether a tag is
815 // explicitly set to the empty string, use Lookup.
816 func (tag StructTag) Get(key string) string {
817 v, _ := tag.Lookup(key)
818 return v
821 // Lookup returns the value associated with key in the tag string.
822 // If the key is present in the tag the value (which may be empty)
823 // is returned. Otherwise the returned value will be the empty string.
824 // The ok return value reports whether the value was explicitly set in
825 // the tag string. If the tag does not have the conventional format,
826 // the value returned by Lookup is unspecified.
827 func (tag StructTag) Lookup(key string) (value string, ok bool) {
828 // When modifying this code, also update the validateStructTag code
829 // in golang.org/x/tools/cmd/vet/structtag.go.
831 for tag != "" {
832 // Skip leading space.
833 i := 0
834 for i < len(tag) && tag[i] == ' ' {
837 tag = tag[i:]
838 if tag == "" {
839 break
842 // Scan to colon. A space, a quote or a control character is a syntax error.
843 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
844 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
845 // as it is simpler to inspect the tag's bytes than the tag's runes.
846 i = 0
847 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
850 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
851 break
853 name := string(tag[:i])
854 tag = tag[i+1:]
856 // Scan quoted string to find value.
857 i = 1
858 for i < len(tag) && tag[i] != '"' {
859 if tag[i] == '\\' {
864 if i >= len(tag) {
865 break
867 qvalue := string(tag[:i+1])
868 tag = tag[i+1:]
870 if key == name {
871 value, err := strconv.Unquote(qvalue)
872 if err != nil {
873 break
875 return value, true
878 return "", false
881 // Field returns the i'th struct field.
882 func (t *structType) Field(i int) (f StructField) {
883 if i < 0 || i >= len(t.fields) {
884 panic("reflect: Field index out of bounds")
886 p := &t.fields[i]
887 f.Type = toType(p.typ)
888 if p.name != nil {
889 f.Name = *p.name
890 } else {
891 t := f.Type
892 if t.Kind() == Ptr {
893 t = t.Elem()
895 f.Name = t.Name()
896 f.Anonymous = true
898 if p.pkgPath != nil {
899 f.PkgPath = *p.pkgPath
901 if p.tag != nil {
902 f.Tag = StructTag(*p.tag)
904 f.Offset = p.offset
906 // NOTE(rsc): This is the only allocation in the interface
907 // presented by a reflect.Type. It would be nice to avoid,
908 // at least in the common cases, but we need to make sure
909 // that misbehaving clients of reflect cannot affect other
910 // uses of reflect. One possibility is CL 5371098, but we
911 // postponed that ugliness until there is a demonstrated
912 // need for the performance. This is issue 2320.
913 f.Index = []int{i}
914 return
917 // TODO(gri): Should there be an error/bool indicator if the index
918 // is wrong for FieldByIndex?
920 // FieldByIndex returns the nested field corresponding to index.
921 func (t *structType) FieldByIndex(index []int) (f StructField) {
922 f.Type = toType(&t.rtype)
923 for i, x := range index {
924 if i > 0 {
925 ft := f.Type
926 if ft.Kind() == Ptr && ft.Elem().Kind() == Struct {
927 ft = ft.Elem()
929 f.Type = ft
931 f = f.Type.Field(x)
933 return
936 // A fieldScan represents an item on the fieldByNameFunc scan work list.
937 type fieldScan struct {
938 typ *structType
939 index []int
942 // FieldByNameFunc returns the struct field with a name that satisfies the
943 // match function and a boolean to indicate if the field was found.
944 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
945 // This uses the same condition that the Go language does: there must be a unique instance
946 // of the match at a given depth level. If there are multiple instances of a match at the
947 // same depth, they annihilate each other and inhibit any possible match at a lower level.
948 // The algorithm is breadth first search, one depth level at a time.
950 // The current and next slices are work queues:
951 // current lists the fields to visit on this depth level,
952 // and next lists the fields on the next lower level.
953 current := []fieldScan{}
954 next := []fieldScan{{typ: t}}
956 // nextCount records the number of times an embedded type has been
957 // encountered and considered for queueing in the 'next' slice.
958 // We only queue the first one, but we increment the count on each.
959 // If a struct type T can be reached more than once at a given depth level,
960 // then it annihilates itself and need not be considered at all when we
961 // process that next depth level.
962 var nextCount map[*structType]int
964 // visited records the structs that have been considered already.
965 // Embedded pointer fields can create cycles in the graph of
966 // reachable embedded types; visited avoids following those cycles.
967 // It also avoids duplicated effort: if we didn't find the field in an
968 // embedded type T at level 2, we won't find it in one at level 4 either.
969 visited := map[*structType]bool{}
971 for len(next) > 0 {
972 current, next = next, current[:0]
973 count := nextCount
974 nextCount = nil
976 // Process all the fields at this depth, now listed in 'current'.
977 // The loop queues embedded fields found in 'next', for processing during the next
978 // iteration. The multiplicity of the 'current' field counts is recorded
979 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
980 for _, scan := range current {
981 t := scan.typ
982 if visited[t] {
983 // We've looked through this type before, at a higher level.
984 // That higher level would shadow the lower level we're now at,
985 // so this one can't be useful to us. Ignore it.
986 continue
988 visited[t] = true
989 for i := range t.fields {
990 f := &t.fields[i]
991 // Find name and type for field f.
992 var fname string
993 var ntyp *rtype
994 if f.name != nil {
995 fname = *f.name
996 } else {
997 // Anonymous field of type T or *T.
998 // Name taken from type.
999 ntyp = f.typ
1000 if ntyp.Kind() == Ptr {
1001 ntyp = ntyp.Elem().common()
1003 fname = ntyp.Name()
1006 // Does it match?
1007 if match(fname) {
1008 // Potential match
1009 if count[t] > 1 || ok {
1010 // Name appeared multiple times at this level: annihilate.
1011 return StructField{}, false
1013 result = t.Field(i)
1014 result.Index = nil
1015 result.Index = append(result.Index, scan.index...)
1016 result.Index = append(result.Index, i)
1017 ok = true
1018 continue
1021 // Queue embedded struct fields for processing with next level,
1022 // but only if we haven't seen a match yet at this level and only
1023 // if the embedded types haven't already been queued.
1024 if ok || ntyp == nil || ntyp.Kind() != Struct {
1025 continue
1027 ntyp = toType(ntyp).common()
1028 styp := (*structType)(unsafe.Pointer(ntyp))
1029 if nextCount[styp] > 0 {
1030 nextCount[styp] = 2 // exact multiple doesn't matter
1031 continue
1033 if nextCount == nil {
1034 nextCount = map[*structType]int{}
1036 nextCount[styp] = 1
1037 if count[t] > 1 {
1038 nextCount[styp] = 2 // exact multiple doesn't matter
1040 var index []int
1041 index = append(index, scan.index...)
1042 index = append(index, i)
1043 next = append(next, fieldScan{styp, index})
1046 if ok {
1047 break
1050 return
1053 // FieldByName returns the struct field with the given name
1054 // and a boolean to indicate if the field was found.
1055 func (t *structType) FieldByName(name string) (f StructField, present bool) {
1056 // Quick check for top-level name, or struct without anonymous fields.
1057 hasAnon := false
1058 if name != "" {
1059 for i := range t.fields {
1060 tf := &t.fields[i]
1061 if tf.name == nil {
1062 hasAnon = true
1063 continue
1065 if *tf.name == name {
1066 return t.Field(i), true
1070 if !hasAnon {
1071 return
1073 return t.FieldByNameFunc(func(s string) bool { return s == name })
1076 // TypeOf returns the reflection Type that represents the dynamic type of i.
1077 // If i is a nil interface value, TypeOf returns nil.
1078 func TypeOf(i interface{}) Type {
1079 eface := *(*emptyInterface)(unsafe.Pointer(&i))
1080 return toType(eface.typ)
1083 // ptrMap is the cache for PtrTo.
1084 var ptrMap struct {
1085 sync.RWMutex
1086 m map[*rtype]*ptrType
1089 // garbage collection bytecode program for pointer to memory without pointers.
1090 // See ../../cmd/gc/reflect.c:/^dgcsym1 and :/^dgcsym.
1091 type ptrDataGC struct {
1092 width uintptr // sizeof(ptr)
1093 op uintptr // _GC_APTR
1094 off uintptr // 0
1095 end uintptr // _GC_END
1098 var ptrDataGCProg = ptrDataGC{
1099 width: unsafe.Sizeof((*byte)(nil)),
1100 op: _GC_APTR,
1101 off: 0,
1102 end: _GC_END,
1105 // garbage collection bytecode program for pointer to memory with pointers.
1106 // See ../../cmd/gc/reflect.c:/^dgcsym1 and :/^dgcsym.
1107 type ptrGC struct {
1108 width uintptr // sizeof(ptr)
1109 op uintptr // _GC_PTR
1110 off uintptr // 0
1111 elemgc unsafe.Pointer // element gc type
1112 end uintptr // _GC_END
1115 // PtrTo returns the pointer type with element t.
1116 // For example, if t represents type Foo, PtrTo(t) represents *Foo.
1117 func PtrTo(t Type) Type {
1118 return t.(*rtype).ptrTo()
1121 func (t *rtype) ptrTo() *rtype {
1122 if p := t.ptrToThis; p != nil {
1123 return p
1126 // Check the cache.
1127 ptrMap.RLock()
1128 if m := ptrMap.m; m != nil {
1129 if p := m[t]; p != nil {
1130 ptrMap.RUnlock()
1131 return &p.rtype
1134 ptrMap.RUnlock()
1136 ptrMap.Lock()
1137 if ptrMap.m == nil {
1138 ptrMap.m = make(map[*rtype]*ptrType)
1140 p := ptrMap.m[t]
1141 if p != nil {
1142 // some other goroutine won the race and created it
1143 ptrMap.Unlock()
1144 return &p.rtype
1147 s := "*" + *t.string
1149 canonicalTypeLock.RLock()
1150 r, ok := canonicalType[s]
1151 canonicalTypeLock.RUnlock()
1152 if ok {
1153 ptrMap.m[t] = (*ptrType)(unsafe.Pointer(r.(*rtype)))
1154 ptrMap.Unlock()
1155 return r.(*rtype)
1158 // Create a new ptrType starting with the description
1159 // of an *unsafe.Pointer.
1160 p = new(ptrType)
1161 var iptr interface{} = (*unsafe.Pointer)(nil)
1162 prototype := *(**ptrType)(unsafe.Pointer(&iptr))
1163 *p = *prototype
1165 p.string = &s
1167 // For the type structures linked into the binary, the
1168 // compiler provides a good hash of the string.
1169 // Create a good hash for the new string by using
1170 // the FNV-1 hash's mixing function to combine the
1171 // old hash and the new "*".
1172 // p.hash = fnv1(t.hash, '*')
1173 // This is the gccgo version.
1174 p.hash = (t.hash << 4) + 9
1176 p.uncommonType = nil
1177 p.ptrToThis = nil
1178 p.elem = t
1180 if t.kind&kindNoPointers != 0 {
1181 p.gc = unsafe.Pointer(&ptrDataGCProg)
1182 } else {
1183 p.gc = unsafe.Pointer(&ptrGC{
1184 width: p.size,
1185 op: _GC_PTR,
1186 off: 0,
1187 elemgc: t.gc,
1188 end: _GC_END,
1192 q := canonicalize(&p.rtype)
1193 p = (*ptrType)(unsafe.Pointer(q.(*rtype)))
1195 ptrMap.m[t] = p
1196 ptrMap.Unlock()
1197 return &p.rtype
1200 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
1201 func fnv1(x uint32, list ...byte) uint32 {
1202 for _, b := range list {
1203 x = x*16777619 ^ uint32(b)
1205 return x
1208 func (t *rtype) Implements(u Type) bool {
1209 if u == nil {
1210 panic("reflect: nil type passed to Type.Implements")
1212 if u.Kind() != Interface {
1213 panic("reflect: non-interface type passed to Type.Implements")
1215 return implements(u.(*rtype), t)
1218 func (t *rtype) AssignableTo(u Type) bool {
1219 if u == nil {
1220 panic("reflect: nil type passed to Type.AssignableTo")
1222 uu := u.(*rtype)
1223 return directlyAssignable(uu, t) || implements(uu, t)
1226 func (t *rtype) ConvertibleTo(u Type) bool {
1227 if u == nil {
1228 panic("reflect: nil type passed to Type.ConvertibleTo")
1230 uu := u.(*rtype)
1231 return convertOp(uu, t) != nil
1234 func (t *rtype) Comparable() bool {
1235 switch t.Kind() {
1236 case Bool, Int, Int8, Int16, Int32, Int64,
1237 Uint, Uint8, Uint16, Uint32, Uint64, Uintptr,
1238 Float32, Float64, Complex64, Complex128,
1239 Chan, Interface, Ptr, String, UnsafePointer:
1240 return true
1242 case Func, Map, Slice:
1243 return false
1245 case Array:
1246 return (*arrayType)(unsafe.Pointer(t)).elem.Comparable()
1248 case Struct:
1249 tt := (*structType)(unsafe.Pointer(t))
1250 for i := range tt.fields {
1251 if !tt.fields[i].typ.Comparable() {
1252 return false
1255 return true
1257 default:
1258 panic("reflect: impossible")
1262 // implements reports whether the type V implements the interface type T.
1263 func implements(T, V *rtype) bool {
1264 if T.Kind() != Interface {
1265 return false
1267 t := (*interfaceType)(unsafe.Pointer(T))
1268 if len(t.methods) == 0 {
1269 return true
1272 // The same algorithm applies in both cases, but the
1273 // method tables for an interface type and a concrete type
1274 // are different, so the code is duplicated.
1275 // In both cases the algorithm is a linear scan over the two
1276 // lists - T's methods and V's methods - simultaneously.
1277 // Since method tables are stored in a unique sorted order
1278 // (alphabetical, with no duplicate method names), the scan
1279 // through V's methods must hit a match for each of T's
1280 // methods along the way, or else V does not implement T.
1281 // This lets us run the scan in overall linear time instead of
1282 // the quadratic time a naive search would require.
1283 // See also ../runtime/iface.go.
1284 if V.Kind() == Interface {
1285 v := (*interfaceType)(unsafe.Pointer(V))
1286 i := 0
1287 for j := 0; j < len(v.methods); j++ {
1288 tm := &t.methods[i]
1289 vm := &v.methods[j]
1290 if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.typ).common() == toType(tm.typ).common() {
1291 if i++; i >= len(t.methods) {
1292 return true
1296 return false
1299 v := V.uncommon()
1300 if v == nil {
1301 return false
1303 i := 0
1304 for j := 0; j < len(v.methods); j++ {
1305 tm := &t.methods[i]
1306 vm := &v.methods[j]
1307 if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.mtyp).common() == toType(tm.typ).common() {
1308 if i++; i >= len(t.methods) {
1309 return true
1313 return false
1316 // directlyAssignable reports whether a value x of type V can be directly
1317 // assigned (using memmove) to a value of type T.
1318 // https://golang.org/doc/go_spec.html#Assignability
1319 // Ignoring the interface rules (implemented elsewhere)
1320 // and the ideal constant rules (no ideal constants at run time).
1321 func directlyAssignable(T, V *rtype) bool {
1322 // x's type V is identical to T?
1323 if T == V {
1324 return true
1327 // Otherwise at least one of T and V must be unnamed
1328 // and they must have the same kind.
1329 if T.Name() != "" && V.Name() != "" || T.Kind() != V.Kind() {
1330 return false
1333 // x's type T and V must have identical underlying types.
1334 return haveIdenticalUnderlyingType(T, V)
1337 func haveIdenticalUnderlyingType(T, V *rtype) bool {
1338 if T == V {
1339 return true
1342 kind := T.Kind()
1343 if kind != V.Kind() {
1344 return false
1347 // Non-composite types of equal kind have same underlying type
1348 // (the predefined instance of the type).
1349 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
1350 return true
1353 // Composite types.
1354 switch kind {
1355 case Array:
1356 return T.Elem() == V.Elem() && T.Len() == V.Len()
1358 case Chan:
1359 // Special case:
1360 // x is a bidirectional channel value, T is a channel type,
1361 // and x's type V and T have identical element types.
1362 if V.ChanDir() == BothDir && T.Elem() == V.Elem() {
1363 return true
1366 // Otherwise continue test for identical underlying type.
1367 return V.ChanDir() == T.ChanDir() && T.Elem() == V.Elem()
1369 case Func:
1370 t := (*funcType)(unsafe.Pointer(T))
1371 v := (*funcType)(unsafe.Pointer(V))
1372 if t.dotdotdot != v.dotdotdot || len(t.in) != len(v.in) || len(t.out) != len(v.out) {
1373 return false
1375 for i, typ := range t.in {
1376 if typ != v.in[i] {
1377 return false
1380 for i, typ := range t.out {
1381 if typ != v.out[i] {
1382 return false
1385 return true
1387 case Interface:
1388 t := (*interfaceType)(unsafe.Pointer(T))
1389 v := (*interfaceType)(unsafe.Pointer(V))
1390 if len(t.methods) == 0 && len(v.methods) == 0 {
1391 return true
1393 // Might have the same methods but still
1394 // need a run time conversion.
1395 return false
1397 case Map:
1398 return T.Key() == V.Key() && T.Elem() == V.Elem()
1400 case Ptr, Slice:
1401 return T.Elem() == V.Elem()
1403 case Struct:
1404 t := (*structType)(unsafe.Pointer(T))
1405 v := (*structType)(unsafe.Pointer(V))
1406 if len(t.fields) != len(v.fields) {
1407 return false
1409 for i := range t.fields {
1410 tf := &t.fields[i]
1411 vf := &v.fields[i]
1412 if tf.name != vf.name && (tf.name == nil || vf.name == nil || *tf.name != *vf.name) {
1413 return false
1415 if tf.pkgPath != vf.pkgPath && (tf.pkgPath == nil || vf.pkgPath == nil || *tf.pkgPath != *vf.pkgPath) {
1416 return false
1418 if tf.typ != vf.typ {
1419 return false
1421 if tf.tag != vf.tag && (tf.tag == nil || vf.tag == nil || *tf.tag != *vf.tag) {
1422 return false
1424 if tf.offset != vf.offset {
1425 return false
1428 return true
1431 return false
1434 // The lookupCache caches ChanOf, MapOf, and SliceOf lookups.
1435 var lookupCache struct {
1436 sync.RWMutex
1437 m map[cacheKey]*rtype
1440 // A cacheKey is the key for use in the lookupCache.
1441 // Four values describe any of the types we are looking for:
1442 // type kind, one or two subtypes, and an extra integer.
1443 type cacheKey struct {
1444 kind Kind
1445 t1 *rtype
1446 t2 *rtype
1447 extra uintptr
1450 // cacheGet looks for a type under the key k in the lookupCache.
1451 // If it finds one, it returns that type.
1452 // If not, it returns nil with the cache locked.
1453 // The caller is expected to use cachePut to unlock the cache.
1454 func cacheGet(k cacheKey) Type {
1455 lookupCache.RLock()
1456 t := lookupCache.m[k]
1457 lookupCache.RUnlock()
1458 if t != nil {
1459 return t
1462 lookupCache.Lock()
1463 t = lookupCache.m[k]
1464 if t != nil {
1465 lookupCache.Unlock()
1466 return t
1469 if lookupCache.m == nil {
1470 lookupCache.m = make(map[cacheKey]*rtype)
1473 return nil
1476 // cachePut stores the given type in the cache, unlocks the cache,
1477 // and returns the type. It is expected that the cache is locked
1478 // because cacheGet returned nil.
1479 func cachePut(k cacheKey, t *rtype) Type {
1480 t = toType(t).common()
1481 lookupCache.m[k] = t
1482 lookupCache.Unlock()
1483 return t
1486 // garbage collection bytecode program for chan.
1487 // See ../../cmd/gc/reflect.c:/^dgcsym1 and :/^dgcsym.
1488 type chanGC struct {
1489 width uintptr // sizeof(map)
1490 op uintptr // _GC_CHAN_PTR
1491 off uintptr // 0
1492 typ *rtype // map type
1493 end uintptr // _GC_END
1496 // The funcLookupCache caches FuncOf lookups.
1497 // FuncOf does not share the common lookupCache since cacheKey is not
1498 // sufficient to represent functions unambiguously.
1499 var funcLookupCache struct {
1500 sync.RWMutex
1501 m map[uint32][]*rtype // keyed by hash calculated in FuncOf
1504 // ChanOf returns the channel type with the given direction and element type.
1505 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
1507 // The gc runtime imposes a limit of 64 kB on channel element types.
1508 // If t's size is equal to or exceeds this limit, ChanOf panics.
1509 func ChanOf(dir ChanDir, t Type) Type {
1510 typ := t.(*rtype)
1512 // Look in cache.
1513 ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
1514 if ch := cacheGet(ckey); ch != nil {
1515 return ch
1518 // This restriction is imposed by the gc compiler and the runtime.
1519 if typ.size >= 1<<16 {
1520 lookupCache.Unlock()
1521 panic("reflect.ChanOf: element size too large")
1524 // Look in known types.
1525 // TODO: Precedence when constructing string.
1526 var s string
1527 switch dir {
1528 default:
1529 lookupCache.Unlock()
1530 panic("reflect.ChanOf: invalid dir")
1531 case SendDir:
1532 s = "chan<- " + *typ.string
1533 case RecvDir:
1534 s = "<-chan " + *typ.string
1535 case BothDir:
1536 s = "chan " + *typ.string
1539 // Make a channel type.
1540 var ichan interface{} = (chan unsafe.Pointer)(nil)
1541 prototype := *(**chanType)(unsafe.Pointer(&ichan))
1542 ch := new(chanType)
1543 *ch = *prototype
1544 ch.dir = uintptr(dir)
1545 ch.string = &s
1547 // gccgo uses a different hash.
1548 // ch.hash = fnv1(typ.hash, 'c', byte(dir))
1549 ch.hash = 0
1550 if dir&SendDir != 0 {
1551 ch.hash += 1
1553 if dir&RecvDir != 0 {
1554 ch.hash += 2
1556 ch.hash += typ.hash << 2
1557 ch.hash <<= 3
1558 ch.hash += 15
1560 ch.elem = typ
1561 ch.uncommonType = nil
1562 ch.ptrToThis = nil
1564 ch.gc = unsafe.Pointer(&chanGC{
1565 width: ch.size,
1566 op: _GC_CHAN_PTR,
1567 off: 0,
1568 typ: &ch.rtype,
1569 end: _GC_END,
1572 // INCORRECT. Uncomment to check that TestChanOfGC fails when ch.gc is wrong.
1573 // ch.gc = unsafe.Pointer(&badGC{width: ch.size, end: _GC_END})
1575 return cachePut(ckey, &ch.rtype)
1578 func ismapkey(*rtype) bool // implemented in runtime
1580 // MapOf returns the map type with the given key and element types.
1581 // For example, if k represents int and e represents string,
1582 // MapOf(k, e) represents map[int]string.
1584 // If the key type is not a valid map key type (that is, if it does
1585 // not implement Go's == operator), MapOf panics.
1586 func MapOf(key, elem Type) Type {
1587 ktyp := key.(*rtype)
1588 etyp := elem.(*rtype)
1590 if !ismapkey(ktyp) {
1591 panic("reflect.MapOf: invalid key type " + ktyp.String())
1594 // Look in cache.
1595 ckey := cacheKey{Map, ktyp, etyp, 0}
1596 if mt := cacheGet(ckey); mt != nil {
1597 return mt
1600 // Look in known types.
1601 s := "map[" + *ktyp.string + "]" + *etyp.string
1603 // Make a map type.
1604 var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
1605 mt := new(mapType)
1606 *mt = **(**mapType)(unsafe.Pointer(&imap))
1607 mt.string = &s
1609 // gccgo uses a different hash
1610 // mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
1611 mt.hash = ktyp.hash + etyp.hash + 2 + 14
1613 mt.key = ktyp
1614 mt.elem = etyp
1615 mt.uncommonType = nil
1616 mt.ptrToThis = nil
1618 mt.bucket = bucketOf(ktyp, etyp)
1619 if ktyp.size > maxKeySize {
1620 mt.keysize = uint8(ptrSize)
1621 mt.indirectkey = 1
1622 } else {
1623 mt.keysize = uint8(ktyp.size)
1624 mt.indirectkey = 0
1626 if etyp.size > maxValSize {
1627 mt.valuesize = uint8(ptrSize)
1628 mt.indirectvalue = 1
1629 } else {
1630 mt.valuesize = uint8(etyp.size)
1631 mt.indirectvalue = 0
1633 mt.bucketsize = uint16(mt.bucket.size)
1634 mt.reflexivekey = isReflexive(ktyp)
1635 mt.needkeyupdate = needKeyUpdate(ktyp)
1637 return cachePut(ckey, &mt.rtype)
1640 // FuncOf returns the function type with the given argument and result types.
1641 // For example if k represents int and e represents string,
1642 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
1644 // The variadic argument controls whether the function is variadic. FuncOf
1645 // panics if the in[len(in)-1] does not represent a slice and variadic is
1646 // true.
1647 func FuncOf(in, out []Type, variadic bool) Type {
1648 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
1649 panic("reflect.FuncOf: last arg of variadic func must be slice")
1652 // Make a func type.
1653 var ifunc interface{} = (func())(nil)
1654 prototype := *(**funcType)(unsafe.Pointer(&ifunc))
1655 ft := new(funcType)
1656 *ft = *prototype
1658 // Build a hash and minimally populate ft.
1659 var hash uint32 = 8
1660 var fin, fout []*rtype
1661 shift := uint(1)
1662 for _, in := range in {
1663 t := in.(*rtype)
1664 fin = append(fin, t)
1665 hash += t.hash << shift
1666 shift++
1668 shift = 2
1669 for _, out := range out {
1670 t := out.(*rtype)
1671 fout = append(fout, t)
1672 hash += t.hash << shift
1673 shift++
1675 if variadic {
1676 hash++
1678 hash <<= 4
1679 ft.hash = hash
1680 ft.in = fin
1681 ft.out = fout
1682 ft.dotdotdot = variadic
1684 // Look in cache.
1685 funcLookupCache.RLock()
1686 for _, t := range funcLookupCache.m[hash] {
1687 if haveIdenticalUnderlyingType(&ft.rtype, t) {
1688 funcLookupCache.RUnlock()
1689 return t
1692 funcLookupCache.RUnlock()
1694 // Not in cache, lock and retry.
1695 funcLookupCache.Lock()
1696 defer funcLookupCache.Unlock()
1697 if funcLookupCache.m == nil {
1698 funcLookupCache.m = make(map[uint32][]*rtype)
1700 for _, t := range funcLookupCache.m[hash] {
1701 if haveIdenticalUnderlyingType(&ft.rtype, t) {
1702 return t
1706 str := funcStr(ft)
1708 // Populate the remaining fields of ft and store in cache.
1709 ft.string = &str
1710 ft.uncommonType = nil
1711 ft.ptrToThis = nil
1713 // TODO(cmang): Generate GC data for funcs.
1714 ft.gc = unsafe.Pointer(&ptrDataGCProg)
1716 funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype)
1718 return toType(&ft.rtype)
1721 // funcStr builds a string representation of a funcType.
1722 func funcStr(ft *funcType) string {
1723 repr := make([]byte, 0, 64)
1724 repr = append(repr, "func("...)
1725 for i, t := range ft.in {
1726 if i > 0 {
1727 repr = append(repr, ", "...)
1729 if ft.dotdotdot && i == len(ft.in)-1 {
1730 repr = append(repr, "..."...)
1731 repr = append(repr, *(*sliceType)(unsafe.Pointer(t)).elem.string...)
1732 } else {
1733 repr = append(repr, *t.string...)
1736 repr = append(repr, ')')
1737 if l := len(ft.out); l == 1 {
1738 repr = append(repr, ' ')
1739 } else if l > 1 {
1740 repr = append(repr, " ("...)
1742 for i, t := range ft.out {
1743 if i > 0 {
1744 repr = append(repr, ", "...)
1746 repr = append(repr, *t.string...)
1748 if len(ft.out) > 1 {
1749 repr = append(repr, ')')
1751 return string(repr)
1754 // isReflexive reports whether the == operation on the type is reflexive.
1755 // That is, x == x for all values x of type t.
1756 func isReflexive(t *rtype) bool {
1757 switch t.Kind() {
1758 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer:
1759 return true
1760 case Float32, Float64, Complex64, Complex128, Interface:
1761 return false
1762 case Array:
1763 tt := (*arrayType)(unsafe.Pointer(t))
1764 return isReflexive(tt.elem)
1765 case Struct:
1766 tt := (*structType)(unsafe.Pointer(t))
1767 for _, f := range tt.fields {
1768 if !isReflexive(f.typ) {
1769 return false
1772 return true
1773 default:
1774 // Func, Map, Slice, Invalid
1775 panic("isReflexive called on non-key type " + t.String())
1779 // needKeyUpdate reports whether map overwrites require the key to be copied.
1780 func needKeyUpdate(t *rtype) bool {
1781 switch t.Kind() {
1782 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer:
1783 return false
1784 case Float32, Float64, Complex64, Complex128, Interface, String:
1785 // Float keys can be updated from +0 to -0.
1786 // String keys can be updated to use a smaller backing store.
1787 // Interfaces might have floats of strings in them.
1788 return true
1789 case Array:
1790 tt := (*arrayType)(unsafe.Pointer(t))
1791 return needKeyUpdate(tt.elem)
1792 case Struct:
1793 tt := (*structType)(unsafe.Pointer(t))
1794 for _, f := range tt.fields {
1795 if needKeyUpdate(f.typ) {
1796 return true
1799 return false
1800 default:
1801 // Func, Map, Slice, Invalid
1802 panic("needKeyUpdate called on non-key type " + t.String())
1806 // Make sure these routines stay in sync with ../../runtime/hashmap.go!
1807 // These types exist only for GC, so we only fill out GC relevant info.
1808 // Currently, that's just size and the GC program. We also fill in string
1809 // for possible debugging use.
1810 const (
1811 bucketSize uintptr = 8
1812 maxKeySize uintptr = 128
1813 maxValSize uintptr = 128
1816 func bucketOf(ktyp, etyp *rtype) *rtype {
1817 // See comment on hmap.overflow in ../runtime/hashmap.go.
1818 var kind uint8
1819 if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 &&
1820 ktyp.size <= maxKeySize && etyp.size <= maxValSize {
1821 kind = kindNoPointers
1824 if ktyp.size > maxKeySize {
1825 ktyp = PtrTo(ktyp).(*rtype)
1827 if etyp.size > maxValSize {
1828 etyp = PtrTo(etyp).(*rtype)
1831 // Prepare GC data if any.
1832 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
1833 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
1834 // Normally the enforced limit on pointer maps is 16 bytes,
1835 // but larger ones are acceptable, 33 bytes isn't too too big,
1836 // and it's easier to generate a pointer bitmap than a GC program.
1837 // Note that since the key and value are known to be <= 128 bytes,
1838 // they're guaranteed to have bitmaps instead of GC programs.
1839 // var gcdata *byte
1840 // var ptrdata uintptr
1842 size := bucketSize
1843 size = align(size, uintptr(ktyp.fieldAlign))
1844 size += bucketSize * ktyp.size
1845 size = align(size, uintptr(etyp.fieldAlign))
1846 size += bucketSize * etyp.size
1848 maxAlign := uintptr(ktyp.fieldAlign)
1849 if maxAlign < uintptr(etyp.fieldAlign) {
1850 maxAlign = uintptr(etyp.fieldAlign)
1852 if maxAlign > ptrSize {
1853 size = align(size, maxAlign)
1854 size += align(ptrSize, maxAlign) - ptrSize
1857 ovoff := size
1858 size += ptrSize
1859 if maxAlign < ptrSize {
1860 maxAlign = ptrSize
1863 var gcPtr unsafe.Pointer
1864 if kind != kindNoPointers {
1865 gc := []uintptr{size}
1866 base := bucketSize
1867 base = align(base, uintptr(ktyp.fieldAlign))
1868 if ktyp.kind&kindNoPointers == 0 {
1869 gc = append(gc, _GC_ARRAY_START, base, bucketSize, ktyp.size)
1870 gc = appendGCProgram(gc, ktyp, 0)
1871 gc = append(gc, _GC_ARRAY_NEXT)
1873 base += ktyp.size * bucketSize
1874 base = align(base, uintptr(etyp.fieldAlign))
1875 if etyp.kind&kindNoPointers == 0 {
1876 gc = append(gc, _GC_ARRAY_START, base, bucketSize, etyp.size)
1877 gc = appendGCProgram(gc, etyp, 0)
1878 gc = append(gc, _GC_ARRAY_NEXT)
1880 gc = append(gc, _GC_APTR, ovoff, _GC_END)
1881 gcPtr = unsafe.Pointer(&gc[0])
1882 } else {
1883 // No pointers in bucket.
1884 gc := [...]uintptr{size, _GC_END}
1885 gcPtr = unsafe.Pointer(&gc[0])
1888 b := new(rtype)
1889 b.align = int8(maxAlign)
1890 b.fieldAlign = uint8(maxAlign)
1891 b.size = size
1892 b.kind = kind
1893 b.gc = gcPtr
1894 s := "bucket(" + *ktyp.string + "," + *etyp.string + ")"
1895 b.string = &s
1896 return b
1899 // Take the GC program for "t" and append it to the GC program "gc".
1900 func appendGCProgram(gc []uintptr, t *rtype, offset uintptr) []uintptr {
1901 p := t.gc
1902 p = unsafe.Pointer(uintptr(p) + unsafe.Sizeof(uintptr(0))) // skip size
1903 loop:
1904 for {
1905 var argcnt int
1906 switch *(*uintptr)(p) {
1907 case _GC_END:
1908 // Note: _GC_END not included in append
1909 break loop
1910 case _GC_ARRAY_NEXT:
1911 argcnt = 0
1912 case _GC_APTR, _GC_STRING, _GC_EFACE, _GC_IFACE:
1913 argcnt = 1
1914 case _GC_PTR, _GC_CALL, _GC_CHAN_PTR, _GC_SLICE:
1915 argcnt = 2
1916 case _GC_ARRAY_START, _GC_REGION:
1917 argcnt = 3
1918 default:
1919 panic("unknown GC program op for " + *t.string + ": " + strconv.FormatUint(*(*uint64)(p), 10))
1921 for i := 0; i < argcnt+1; i++ {
1922 v := *(*uintptr)(p)
1923 if i == 1 {
1924 v += offset
1926 gc = append(gc, v)
1927 p = unsafe.Pointer(uintptr(p) + unsafe.Sizeof(uintptr(0)))
1930 return gc
1932 func hMapOf(bucket *rtype) *rtype {
1933 ptrsize := unsafe.Sizeof(uintptr(0))
1935 // make gc program & compute hmap size
1936 gc := make([]uintptr, 1) // first entry is size, filled in at the end
1937 offset := unsafe.Sizeof(uint(0)) // count
1938 offset += unsafe.Sizeof(uint32(0)) // flags
1939 offset += unsafe.Sizeof(uint32(0)) // hash0
1940 offset += unsafe.Sizeof(uint8(0)) // B
1941 offset += unsafe.Sizeof(uint8(0)) // keysize
1942 offset += unsafe.Sizeof(uint8(0)) // valuesize
1943 offset = (offset + 1) / 2 * 2
1944 offset += unsafe.Sizeof(uint16(0)) // bucketsize
1945 offset = (offset + ptrsize - 1) / ptrsize * ptrsize
1946 // gc = append(gc, _GC_PTR, offset, uintptr(bucket.gc)) // buckets
1947 offset += ptrsize
1948 // gc = append(gc, _GC_PTR, offset, uintptr(bucket.gc)) // oldbuckets
1949 offset += ptrsize
1950 offset += ptrsize // nevacuate
1951 gc = append(gc, _GC_END)
1952 gc[0] = offset
1954 h := new(rtype)
1955 h.size = offset
1956 // h.gc = unsafe.Pointer(&gc[0])
1957 s := "hmap(" + *bucket.string + ")"
1958 h.string = &s
1959 return h
1962 // garbage collection bytecode program for slice of non-zero-length values.
1963 // See ../../cmd/gc/reflect.c:/^dgcsym1 and :/^dgcsym.
1964 type sliceGC struct {
1965 width uintptr // sizeof(slice)
1966 op uintptr // _GC_SLICE
1967 off uintptr // 0
1968 elemgc unsafe.Pointer // element gc program
1969 end uintptr // _GC_END
1972 // garbage collection bytecode program for slice of zero-length values.
1973 // See ../../cmd/gc/reflect.c:/^dgcsym1 and :/^dgcsym.
1974 type sliceEmptyGC struct {
1975 width uintptr // sizeof(slice)
1976 op uintptr // _GC_APTR
1977 off uintptr // 0
1978 end uintptr // _GC_END
1981 var sliceEmptyGCProg = sliceEmptyGC{
1982 width: unsafe.Sizeof([]byte(nil)),
1983 op: _GC_APTR,
1984 off: 0,
1985 end: _GC_END,
1988 // SliceOf returns the slice type with element type t.
1989 // For example, if t represents int, SliceOf(t) represents []int.
1990 func SliceOf(t Type) Type {
1991 typ := t.(*rtype)
1993 // Look in cache.
1994 ckey := cacheKey{Slice, typ, nil, 0}
1995 if slice := cacheGet(ckey); slice != nil {
1996 return slice
1999 // Look in known types.
2000 s := "[]" + *typ.string
2002 // Make a slice type.
2003 var islice interface{} = ([]unsafe.Pointer)(nil)
2004 prototype := *(**sliceType)(unsafe.Pointer(&islice))
2005 slice := new(sliceType)
2006 *slice = *prototype
2007 slice.string = &s
2009 // gccgo uses a different hash.
2010 // slice.hash = fnv1(typ.hash, '[')
2011 slice.hash = typ.hash + 1 + 13
2013 slice.elem = typ
2014 slice.uncommonType = nil
2015 slice.ptrToThis = nil
2017 if typ.size == 0 {
2018 slice.gc = unsafe.Pointer(&sliceEmptyGCProg)
2019 } else {
2020 slice.gc = unsafe.Pointer(&sliceGC{
2021 width: slice.size,
2022 op: _GC_SLICE,
2023 off: 0,
2024 elemgc: typ.gc,
2025 end: _GC_END,
2029 // INCORRECT. Uncomment to check that TestSliceOfOfGC fails when slice.gc is wrong.
2030 // slice.gc = unsafe.Pointer(&badGC{width: slice.size, end: _GC_END})
2032 return cachePut(ckey, &slice.rtype)
2035 // The structLookupCache caches StructOf lookups.
2036 // StructOf does not share the common lookupCache since we need to pin
2037 // the memory associated with *structTypeFixedN.
2038 var structLookupCache struct {
2039 sync.RWMutex
2040 m map[uint32][]interface {
2041 common() *rtype
2042 } // keyed by hash calculated in StructOf
2045 // StructOf returns the struct type containing fields.
2046 // The Offset and Index fields are ignored and computed as they would be
2047 // by the compiler.
2049 // StructOf currently does not generate wrapper methods for embedded fields.
2050 // This limitation may be lifted in a future version.
2051 func StructOf(fields []StructField) Type {
2052 var (
2053 hash = uint32(0)
2054 size uintptr
2055 typalign int8
2057 fs = make([]structField, len(fields))
2058 repr = make([]byte, 0, 64)
2059 fset = map[string]struct{}{} // fields' names
2061 hasPtr = false // records whether at least one struct-field is a pointer
2064 repr = append(repr, "struct {"...)
2065 for i, field := range fields {
2066 if field.Type == nil {
2067 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
2069 f := runtimeStructField(field)
2070 ft := f.typ
2071 if ft.pointers() {
2072 hasPtr = true
2075 name := ""
2076 // Update string and hash
2077 hash = (hash << 1) + ft.hash
2078 if f.name != nil {
2079 name = *f.name
2080 repr = append(repr, (" " + name)...)
2081 } else {
2082 // Embedded field
2083 repr = append(repr, " ?"...)
2084 if f.typ.Kind() == Ptr {
2085 // Embedded ** and *interface{} are illegal
2086 elem := ft.Elem()
2087 if k := elem.Kind(); k == Ptr || k == Interface {
2088 panic("reflect.StructOf: illegal anonymous field type " + ft.String())
2090 name = elem.String()
2091 } else {
2092 name = ft.String()
2094 // TODO(sbinet) check for syntactically impossible type names?
2096 switch f.typ.Kind() {
2097 case Interface:
2098 ift := (*interfaceType)(unsafe.Pointer(ft))
2099 if len(ift.methods) > 0 {
2100 panic("reflect.StructOf: embedded field with methods not supported")
2102 case Ptr:
2103 ptr := (*ptrType)(unsafe.Pointer(ft))
2104 if unt := ptr.uncommon(); unt != nil {
2105 if len(unt.methods) > 0 {
2106 panic("reflect.StructOf: embedded field with methods not supported")
2109 if unt := ptr.elem.uncommon(); unt != nil {
2110 if len(unt.methods) > 0 {
2111 panic("reflect.StructOf: embedded field with methods not supported")
2114 default:
2115 if unt := ft.uncommon(); unt != nil {
2116 if len(unt.methods) > 0 {
2117 panic("reflect.StructOf: embedded field with methods not supported")
2122 if _, dup := fset[name]; dup {
2123 panic("reflect.StructOf: duplicate field " + name)
2125 fset[name] = struct{}{}
2127 repr = append(repr, (" " + ft.String())...)
2128 if f.tag != nil {
2129 repr = append(repr, (" " + strconv.Quote(*f.tag))...)
2131 if i < len(fields)-1 {
2132 repr = append(repr, ';')
2135 f.offset = align(size, uintptr(ft.fieldAlign))
2136 if int8(ft.fieldAlign) > typalign {
2137 typalign = int8(ft.fieldAlign)
2139 size = f.offset + ft.size
2141 fs[i] = f
2144 if len(fs) > 0 {
2145 repr = append(repr, ' ')
2147 repr = append(repr, '}')
2148 hash <<= 2
2149 str := string(repr)
2151 // Round the size up to be a multiple of the alignment.
2152 size = align(size, uintptr(typalign))
2154 // Make the struct type.
2155 var istruct interface{} = struct{}{}
2156 prototype := *(**structType)(unsafe.Pointer(&istruct))
2157 typ := new(structType)
2158 *typ = *prototype
2159 typ.fields = fs
2161 // Look in cache
2162 structLookupCache.RLock()
2163 for _, st := range structLookupCache.m[hash] {
2164 t := st.common()
2165 if haveIdenticalUnderlyingType(&typ.rtype, t) {
2166 structLookupCache.RUnlock()
2167 return t
2170 structLookupCache.RUnlock()
2172 // not in cache, lock and retry
2173 structLookupCache.Lock()
2174 defer structLookupCache.Unlock()
2175 if structLookupCache.m == nil {
2176 structLookupCache.m = make(map[uint32][]interface {
2177 common() *rtype
2180 for _, st := range structLookupCache.m[hash] {
2181 t := st.common()
2182 if haveIdenticalUnderlyingType(&typ.rtype, t) {
2183 return t
2187 typ.string = &str
2188 typ.hash = hash
2189 typ.size = size
2190 typ.align = typalign
2191 typ.fieldAlign = uint8(typalign)
2192 if !hasPtr {
2193 typ.kind |= kindNoPointers
2194 gc := [...]uintptr{size, _GC_END}
2195 typ.gc = unsafe.Pointer(&gc[0])
2196 } else {
2197 typ.kind &^= kindNoPointers
2198 gc := []uintptr{size}
2199 for _, ft := range fs {
2200 gc = appendGCProgram(gc, ft.typ, ft.offset)
2202 gc = append(gc, _GC_END)
2203 typ.gc = unsafe.Pointer(&gc[0])
2206 typ.hashfn = func(p unsafe.Pointer, seed uintptr) uintptr {
2207 ret := seed
2208 for _, ft := range typ.fields {
2209 o := unsafe.Pointer(uintptr(p) + ft.offset)
2210 ret = ft.typ.hashfn(o, ret)
2212 return ret
2215 typ.equalfn = func(p, q unsafe.Pointer) bool {
2216 for _, ft := range typ.fields {
2217 pi := unsafe.Pointer(uintptr(p) + ft.offset)
2218 qi := unsafe.Pointer(uintptr(q) + ft.offset)
2219 if !ft.typ.equalfn(pi, qi) {
2220 return false
2223 return true
2226 typ.kind &^= kindDirectIface
2227 typ.uncommonType = nil
2228 typ.ptrToThis = nil
2230 structLookupCache.m[hash] = append(structLookupCache.m[hash], typ)
2231 return &typ.rtype
2234 func runtimeStructField(field StructField) structField {
2235 var name *string
2236 if field.Name == "" {
2237 t := field.Type.(*rtype)
2238 if t.Kind() == Ptr {
2239 t = t.Elem().(*rtype)
2241 } else if field.PkgPath == "" {
2242 s := field.Name
2243 name = &s
2244 b0 := s[0]
2245 if ('a' <= b0 && b0 <= 'z') || b0 == '_' {
2246 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but has no PkgPath")
2250 var pkgPath *string
2251 if field.PkgPath != "" {
2252 s := field.PkgPath
2253 pkgPath = &s
2254 // This could work with gccgo but we panic to be
2255 // compatible with gc.
2256 panic("reflect: creating a name with a package path is not supported")
2259 var tag *string
2260 if field.Tag != "" {
2261 s := string(field.Tag)
2262 tag = &s
2265 return structField{
2266 name: name,
2267 pkgPath: pkgPath,
2268 typ: field.Type.common(),
2269 tag: tag,
2270 offset: 0,
2274 // ArrayOf returns the array type with the given count and element type.
2275 // For example, if t represents int, ArrayOf(5, t) represents [5]int.
2277 // If the resulting type would be larger than the available address space,
2278 // ArrayOf panics.
2279 func ArrayOf(count int, elem Type) Type {
2280 typ := elem.(*rtype)
2281 // call SliceOf here as it calls cacheGet/cachePut.
2282 // ArrayOf also calls cacheGet/cachePut and thus may modify the state of
2283 // the lookupCache mutex.
2284 slice := SliceOf(elem)
2286 // Look in cache.
2287 ckey := cacheKey{Array, typ, nil, uintptr(count)}
2288 if array := cacheGet(ckey); array != nil {
2289 return array
2292 // Look in known types.
2293 s := "[" + strconv.Itoa(count) + "]" + *typ.string
2295 // Make an array type.
2296 var iarray interface{} = [1]unsafe.Pointer{}
2297 prototype := *(**arrayType)(unsafe.Pointer(&iarray))
2298 array := new(arrayType)
2299 *array = *prototype
2300 array.string = &s
2302 // gccgo uses a different hash.
2303 // array.hash = fnv1(typ.hash, '[')
2304 // for n := uint32(count); n > 0; n >>= 8 {
2305 // array.hash = fnv1(array.hash, byte(n))
2306 // }
2307 // array.hash = fnv1(array.hash, ']')
2308 array.hash = typ.hash + 1 + 13
2310 array.elem = typ
2311 array.ptrToThis = nil
2312 max := ^uintptr(0) / typ.size
2313 if uintptr(count) > max {
2314 panic("reflect.ArrayOf: array size would exceed virtual address space")
2316 array.size = typ.size * uintptr(count)
2317 // if count > 0 && typ.ptrdata != 0 {
2318 // array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata
2319 // }
2320 array.align = typ.align
2321 array.fieldAlign = typ.fieldAlign
2322 array.uncommonType = nil
2323 array.len = uintptr(count)
2324 array.slice = slice.(*rtype)
2326 array.kind &^= kindNoPointers
2327 switch {
2328 case typ.kind&kindNoPointers != 0 || array.size == 0:
2329 // No pointers.
2330 array.kind |= kindNoPointers
2331 gc := [...]uintptr{array.size, _GC_END}
2332 array.gc = unsafe.Pointer(&gc[0])
2334 case count == 1:
2335 // In memory, 1-element array looks just like the element.
2336 array.kind |= typ.kind & kindGCProg
2337 array.gc = typ.gc
2339 default:
2340 gc := []uintptr{array.size, _GC_ARRAY_START, 0, uintptr(count), typ.size}
2341 gc = appendGCProgram(gc, typ, 0)
2342 gc = append(gc, _GC_ARRAY_NEXT, _GC_END)
2343 array.gc = unsafe.Pointer(&gc[0])
2346 array.kind &^= kindDirectIface
2348 array.hashfn = func(p unsafe.Pointer, seed uintptr) uintptr {
2349 ret := seed
2350 for i := 0; i < count; i++ {
2351 ret = typ.hashfn(p, ret)
2352 p = unsafe.Pointer(uintptr(p) + typ.size)
2354 return ret
2357 array.equalfn = func(p1, p2 unsafe.Pointer) bool {
2358 for i := 0; i < count; i++ {
2359 if !typ.equalfn(p1, p2) {
2360 return false
2362 p1 = unsafe.Pointer(uintptr(p1) + typ.size)
2363 p2 = unsafe.Pointer(uintptr(p2) + typ.size)
2365 return true
2368 return cachePut(ckey, &array.rtype)
2371 func appendVarint(x []byte, v uintptr) []byte {
2372 for ; v >= 0x80; v >>= 7 {
2373 x = append(x, byte(v|0x80))
2375 x = append(x, byte(v))
2376 return x
2379 // toType converts from a *rtype to a Type that can be returned
2380 // to the client of package reflect. In gc, the only concern is that
2381 // a nil *rtype must be replaced by a nil Type, but in gccgo this
2382 // function takes care of ensuring that multiple *rtype for the same
2383 // type are coalesced into a single Type.
2384 var canonicalType = make(map[string]Type)
2386 var canonicalTypeLock sync.RWMutex
2388 func canonicalize(t Type) Type {
2389 if t == nil {
2390 return nil
2392 s := t.rawString()
2393 canonicalTypeLock.RLock()
2394 if r, ok := canonicalType[s]; ok {
2395 canonicalTypeLock.RUnlock()
2396 return r
2398 canonicalTypeLock.RUnlock()
2399 canonicalTypeLock.Lock()
2400 if r, ok := canonicalType[s]; ok {
2401 canonicalTypeLock.Unlock()
2402 return r
2404 canonicalType[s] = t
2405 canonicalTypeLock.Unlock()
2406 return t
2409 func toType(p *rtype) Type {
2410 if p == nil {
2411 return nil
2413 return canonicalize(p)
2416 // ifaceIndir reports whether t is stored indirectly in an interface value.
2417 func ifaceIndir(t *rtype) bool {
2418 return t.kind&kindDirectIface == 0
2421 // Layout matches runtime.BitVector (well enough).
2422 type bitVector struct {
2423 n uint32 // number of bits
2424 data []byte
2427 // append a bit to the bitmap.
2428 func (bv *bitVector) append(bit uint8) {
2429 if bv.n%8 == 0 {
2430 bv.data = append(bv.data, 0)
2432 bv.data[bv.n/8] |= bit << (bv.n % 8)
2433 bv.n++
2436 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
2437 if t.kind&kindNoPointers != 0 {
2438 return
2441 switch Kind(t.kind & kindMask) {
2442 case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
2443 // 1 pointer at start of representation
2444 for bv.n < uint32(offset/uintptr(ptrSize)) {
2445 bv.append(0)
2447 bv.append(1)
2449 case Interface:
2450 // 2 pointers
2451 for bv.n < uint32(offset/uintptr(ptrSize)) {
2452 bv.append(0)
2454 bv.append(1)
2455 bv.append(1)
2457 case Array:
2458 // repeat inner type
2459 tt := (*arrayType)(unsafe.Pointer(t))
2460 for i := 0; i < int(tt.len); i++ {
2461 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
2464 case Struct:
2465 // apply fields
2466 tt := (*structType)(unsafe.Pointer(t))
2467 for i := range tt.fields {
2468 f := &tt.fields[i]
2469 addTypeBits(bv, offset+f.offset, f.typ)