libstdc++: Implement std::formatter<std::thread::id> without <sstream> [PR115099]
[official-gcc.git] / libgo / go / reflect / type.go
blob82edcf82d504ffd76e6432fa58eca3c320457105
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Package reflect implements run-time reflection, allowing a program to
6 // manipulate objects with arbitrary types. The typical use is to take a value
7 // with static type interface{} and extract its dynamic type information by
8 // calling TypeOf, which returns a Type.
9 //
10 // A call to ValueOf returns a Value representing the run-time data.
11 // Zero takes a Type and returns a Value representing a zero value
12 // for that type.
14 // See "The Laws of Reflection" for an introduction to reflection in Go:
15 // https://golang.org/doc/articles/laws_of_reflection.html
16 package reflect
18 import (
19 "internal/goarch"
20 "strconv"
21 "sync"
22 "unicode"
23 "unicode/utf8"
24 "unsafe"
27 // Type is the representation of a Go type.
29 // Not all methods apply to all kinds of types. Restrictions,
30 // if any, are noted in the documentation for each method.
31 // Use the Kind method to find out the kind of type before
32 // calling kind-specific methods. Calling a method
33 // inappropriate to the kind of type causes a run-time panic.
35 // Type values are comparable, such as with the == operator,
36 // so they can be used as map keys.
37 // Two Type values are equal if they represent identical types.
38 type Type interface {
39 // Methods applicable to all types.
41 // Align returns the alignment in bytes of a value of
42 // this type when allocated in memory.
43 Align() int
45 // FieldAlign returns the alignment in bytes of a value of
46 // this type when used as a field in a struct.
47 FieldAlign() int
49 // Method returns the i'th method in the type's method set.
50 // It panics if i is not in the range [0, NumMethod()).
52 // For a non-interface type T or *T, the returned Method's Type and Func
53 // fields describe a function whose first argument is the receiver,
54 // and only exported methods are accessible.
56 // For an interface type, the returned Method's Type field gives the
57 // method signature, without a receiver, and the Func field is nil.
59 // Methods are sorted in lexicographic order.
60 Method(int) Method
62 // MethodByName returns the method with that name in the type's
63 // method set and a boolean indicating if the method was found.
65 // For a non-interface type T or *T, the returned Method's Type and Func
66 // fields describe a function whose first argument is the receiver.
68 // For an interface type, the returned Method's Type field gives the
69 // method signature, without a receiver, and the Func field is nil.
70 MethodByName(string) (Method, bool)
72 // NumMethod returns the number of methods accessible using Method.
74 // Note that NumMethod counts unexported methods only for interface types.
75 NumMethod() int
77 // Name returns the type's name within its package for a defined type.
78 // For other (non-defined) types it returns the empty string.
79 Name() string
81 // PkgPath returns a defined type's package path, that is, the import path
82 // that uniquely identifies the package, such as "encoding/base64".
83 // If the type was predeclared (string, error) or not defined (*T, struct{},
84 // []int, or A where A is an alias for a non-defined type), the package path
85 // will be the empty string.
86 PkgPath() string
88 // Size returns the number of bytes needed to store
89 // a value of the given type; it is analogous to unsafe.Sizeof.
90 Size() uintptr
92 // String returns a string representation of the type.
93 // The string representation may use shortened package names
94 // (e.g., base64 instead of "encoding/base64") and is not
95 // guaranteed to be unique among types. To test for type identity,
96 // compare the Types directly.
97 String() string
99 // Used internally by gccgo--the string retaining quoting.
100 rawString() string
102 // Kind returns the specific kind of this type.
103 Kind() Kind
105 // Implements reports whether the type implements the interface type u.
106 Implements(u Type) bool
108 // AssignableTo reports whether a value of the type is assignable to type u.
109 AssignableTo(u Type) bool
111 // ConvertibleTo reports whether a value of the type is convertible to type u.
112 // Even if ConvertibleTo returns true, the conversion may still panic.
113 // For example, a slice of type []T is convertible to *[N]T,
114 // but the conversion will panic if its length is less than N.
115 ConvertibleTo(u Type) bool
117 // Comparable reports whether values of this type are comparable.
118 // Even if Comparable returns true, the comparison may still panic.
119 // For example, values of interface type are comparable,
120 // but the comparison will panic if their dynamic type is not comparable.
121 Comparable() bool
123 // Methods applicable only to some types, depending on Kind.
124 // The methods allowed for each kind are:
126 // Int*, Uint*, Float*, Complex*: Bits
127 // Array: Elem, Len
128 // Chan: ChanDir, Elem
129 // Func: In, NumIn, Out, NumOut, IsVariadic.
130 // Map: Key, Elem
131 // Pointer: Elem
132 // Slice: Elem
133 // Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
135 // Bits returns the size of the type in bits.
136 // It panics if the type's Kind is not one of the
137 // sized or unsized Int, Uint, Float, or Complex kinds.
138 Bits() int
140 // ChanDir returns a channel type's direction.
141 // It panics if the type's Kind is not Chan.
142 ChanDir() ChanDir
144 // IsVariadic reports whether a function type's final input parameter
145 // is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
146 // implicit actual type []T.
148 // For concreteness, if t represents func(x int, y ... float64), then
150 // t.NumIn() == 2
151 // t.In(0) is the reflect.Type for "int"
152 // t.In(1) is the reflect.Type for "[]float64"
153 // t.IsVariadic() == true
155 // IsVariadic panics if the type's Kind is not Func.
156 IsVariadic() bool
158 // Elem returns a type's element type.
159 // It panics if the type's Kind is not Array, Chan, Map, Pointer, or Slice.
160 Elem() Type
162 // Field returns a struct type's i'th field.
163 // It panics if the type's Kind is not Struct.
164 // It panics if i is not in the range [0, NumField()).
165 Field(i int) StructField
167 // FieldByIndex returns the nested field corresponding
168 // to the index sequence. It is equivalent to calling Field
169 // successively for each index i.
170 // It panics if the type's Kind is not Struct.
171 FieldByIndex(index []int) StructField
173 // FieldByName returns the struct field with the given name
174 // and a boolean indicating if the field was found.
175 FieldByName(name string) (StructField, bool)
177 // FieldByNameFunc returns the struct field with a name
178 // that satisfies the match function and a boolean indicating if
179 // the field was found.
181 // FieldByNameFunc considers the fields in the struct itself
182 // and then the fields in any embedded structs, in breadth first order,
183 // stopping at the shallowest nesting depth containing one or more
184 // fields satisfying the match function. If multiple fields at that depth
185 // satisfy the match function, they cancel each other
186 // and FieldByNameFunc returns no match.
187 // This behavior mirrors Go's handling of name lookup in
188 // structs containing embedded fields.
189 FieldByNameFunc(match func(string) bool) (StructField, bool)
191 // In returns the type of a function type's i'th input parameter.
192 // It panics if the type's Kind is not Func.
193 // It panics if i is not in the range [0, NumIn()).
194 In(i int) Type
196 // Key returns a map type's key type.
197 // It panics if the type's Kind is not Map.
198 Key() Type
200 // Len returns an array type's length.
201 // It panics if the type's Kind is not Array.
202 Len() int
204 // NumField returns a struct type's field count.
205 // It panics if the type's Kind is not Struct.
206 NumField() int
208 // NumIn returns a function type's input parameter count.
209 // It panics if the type's Kind is not Func.
210 NumIn() int
212 // NumOut returns a function type's output parameter count.
213 // It panics if the type's Kind is not Func.
214 NumOut() int
216 // Out returns the type of a function type's i'th output parameter.
217 // It panics if the type's Kind is not Func.
218 // It panics if i is not in the range [0, NumOut()).
219 Out(i int) Type
221 common() *rtype
222 uncommon() *uncommonType
225 // BUG(rsc): FieldByName and related functions consider struct field names to be equal
226 // if the names are equal, even if they are unexported names originating
227 // in different packages. The practical effect of this is that the result of
228 // t.FieldByName("x") is not well defined if the struct type t contains
229 // multiple fields named x (embedded from different packages).
230 // FieldByName may return one of the fields named x or may report that there are none.
231 // See https://golang.org/issue/4876 for more details.
234 * These data structures are known to the compiler (../cmd/compile/internal/reflectdata/reflect.go).
235 * A few are known to ../runtime/type.go to convey to debuggers.
236 * They are also known to ../runtime/type.go.
239 // A Kind represents the specific kind of type that a Type represents.
240 // The zero Kind is not a valid kind.
241 type Kind uint
243 const (
244 Invalid Kind = iota
245 Bool
247 Int8
248 Int16
249 Int32
250 Int64
251 Uint
252 Uint8
253 Uint16
254 Uint32
255 Uint64
256 Uintptr
257 Float32
258 Float64
259 Complex64
260 Complex128
261 Array
262 Chan
263 Func
264 Interface
266 Pointer
267 Slice
268 String
269 Struct
270 UnsafePointer
273 // Ptr is the old name for the Pointer kind.
274 const Ptr = Pointer
276 // tflag is used by an rtype to signal what extra type information is
277 // available in the memory directly following the rtype value.
279 // tflag values must be kept in sync with copies in:
280 // go/types.cc
281 // runtime/type.go
282 type tflag uint8
284 const (
285 // tflagRegularMemory means that equal and hash functions can treat
286 // this type as a single region of t.size bytes.
287 tflagRegularMemory tflag = 1 << 3
290 // rtype is the common implementation of most values.
291 // It is embedded in other struct types.
293 // rtype must be kept in sync with ../runtime/type.go:/^type._type.
294 type rtype struct {
295 size uintptr
296 ptrdata uintptr // size of memory prefix holding all pointers
297 hash uint32 // hash of type; avoids computation in hash tables
298 tflag tflag // extra type information flags
299 align uint8 // alignment of variable with this type
300 fieldAlign uint8 // alignment of struct field with this type
301 kind uint8 // enumeration for C
302 // function for comparing objects of this type
303 // (ptr to object A, ptr to object B) -> ==?
304 equal func(unsafe.Pointer, unsafe.Pointer) bool
305 gcdata *byte // garbage collection data
306 string *string // string form; unnecessary but undeniably useful
307 *uncommonType // (relatively) uncommon fields
308 ptrToThis *rtype // type for pointer to this type, if used in binary or has methods
311 // Method on non-interface type
312 type method struct {
313 name *string // name of method
314 pkgPath *string // nil for exported Names; otherwise import path
315 mtyp *rtype // method type (without receiver)
316 typ *rtype // .(*FuncType) underneath (with receiver)
317 tfn unsafe.Pointer // fn used for normal method call
320 // uncommonType is present only for defined types or types with methods
321 // (if T is a defined type, the uncommonTypes for T and *T have methods).
322 // Using a pointer to this struct reduces the overall size required
323 // to describe a non-defined type with no methods.
324 type uncommonType struct {
325 name *string // name of type
326 pkgPath *string // import path; nil for built-in types like int, string
327 methods []method // methods associated with type
330 // ChanDir represents a channel type's direction.
331 type ChanDir int
333 const (
334 RecvDir ChanDir = 1 << iota // <-chan
335 SendDir // chan<-
336 BothDir = RecvDir | SendDir // chan
339 // arrayType represents a fixed array type.
340 type arrayType struct {
341 rtype
342 elem *rtype // array element type
343 slice *rtype // slice type
344 len uintptr
347 // chanType represents a channel type.
348 type chanType struct {
349 rtype
350 elem *rtype // channel element type
351 dir uintptr // channel direction (ChanDir)
354 // funcType represents a function type.
355 type funcType struct {
356 rtype
357 dotdotdot bool // last input parameter is ...
358 in []*rtype // input parameter types
359 out []*rtype // output parameter types
362 // imethod represents a method on an interface type
363 type imethod struct {
364 name *string // name of method
365 pkgPath *string // nil for exported Names; otherwise import path
366 typ *rtype // .(*FuncType) underneath
369 // interfaceType represents an interface type.
370 type interfaceType struct {
371 rtype
372 methods []imethod // sorted by hash
375 // mapType represents a map type.
376 type mapType struct {
377 rtype
378 key *rtype // map key type
379 elem *rtype // map element (value) type
380 bucket *rtype // internal bucket structure
381 // function for hashing keys (ptr to key, seed) -> hash
382 hasher func(unsafe.Pointer, uintptr) uintptr
383 keysize uint8 // size of key slot
384 valuesize uint8 // size of value slot
385 bucketsize uint16 // size of bucket
386 flags uint32
389 // ptrType represents a pointer type.
390 type ptrType struct {
391 rtype
392 elem *rtype // pointer element (pointed at) type
395 // sliceType represents a slice type.
396 type sliceType struct {
397 rtype
398 elem *rtype // slice element type
401 // Struct field
402 type structField struct {
403 name *string // name is always non-empty
404 pkgPath *string // nil for exported Names; otherwise import path
405 typ *rtype // type of field
406 tag *string // nil if no tag
407 offsetEmbed uintptr // byte offset of field<<1 | isAnonymous
410 func (f *structField) offset() uintptr {
411 return f.offsetEmbed >> 1
414 func (f *structField) embedded() bool {
415 return f.offsetEmbed&1 != 0
418 // structType represents a struct type.
419 type structType struct {
420 rtype
421 fields []structField // sorted by offset
425 * The compiler knows the exact layout of all the data structures above.
426 * The compiler does not know about the data structures and methods below.
429 // Method represents a single method.
430 type Method struct {
431 // Name is the method name.
432 Name string
434 // PkgPath is the package path that qualifies a lower case (unexported)
435 // method name. It is empty for upper case (exported) method names.
436 // The combination of PkgPath and Name uniquely identifies a method
437 // in a method set.
438 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
439 PkgPath string
441 Type Type // method type
442 Func Value // func with receiver as first argument
443 Index int // index for Type.Method
446 // IsExported reports whether the method is exported.
447 func (m Method) IsExported() bool {
448 return m.PkgPath == ""
451 const (
452 kindDirectIface = 1 << 5
453 kindGCProg = 1 << 6 // Type.gc points to GC program
454 kindMask = (1 << 5) - 1
457 // String returns the name of k.
458 func (k Kind) String() string {
459 if int(k) < len(kindNames) {
460 return kindNames[k]
462 return "kind" + strconv.Itoa(int(k))
465 var kindNames = []string{
466 Invalid: "invalid",
467 Bool: "bool",
468 Int: "int",
469 Int8: "int8",
470 Int16: "int16",
471 Int32: "int32",
472 Int64: "int64",
473 Uint: "uint",
474 Uint8: "uint8",
475 Uint16: "uint16",
476 Uint32: "uint32",
477 Uint64: "uint64",
478 Uintptr: "uintptr",
479 Float32: "float32",
480 Float64: "float64",
481 Complex64: "complex64",
482 Complex128: "complex128",
483 Array: "array",
484 Chan: "chan",
485 Func: "func",
486 Interface: "interface",
487 Map: "map",
488 Pointer: "ptr",
489 Slice: "slice",
490 String: "string",
491 Struct: "struct",
492 UnsafePointer: "unsafe.Pointer",
495 func (t *uncommonType) uncommon() *uncommonType {
496 return t
499 func (t *uncommonType) PkgPath() string {
500 if t == nil || t.pkgPath == nil {
501 return ""
503 return *t.pkgPath
506 func (t *uncommonType) Name() string {
507 if t == nil || t.name == nil {
508 return ""
510 return *t.name
513 var methodCache sync.Map // map[*uncommonType][]method
515 func (t *uncommonType) exportedMethods() []method {
516 methodsi, found := methodCache.Load(t)
517 if found {
518 return methodsi.([]method)
521 allm := t.methods
522 allExported := true
523 for _, m := range allm {
524 if m.pkgPath != nil {
525 allExported = false
526 break
529 var methods []method
530 if allExported {
531 methods = allm
532 } else {
533 methods = make([]method, 0, len(allm))
534 for _, m := range allm {
535 if m.pkgPath == nil {
536 methods = append(methods, m)
539 methods = methods[:len(methods):len(methods)]
542 methodsi, _ = methodCache.LoadOrStore(t, methods)
543 return methodsi.([]method)
546 func (t *rtype) rawString() string { return *t.string }
548 func (t *rtype) String() string {
549 // For gccgo, strip out quoted strings.
550 s := *t.string
551 var q bool
552 r := make([]byte, len(s))
553 j := 0
554 for i := 0; i < len(s); i++ {
555 if s[i] == '\t' {
556 q = !q
557 } else if !q {
558 r[j] = s[i]
562 return string(r[:j])
565 func (t *rtype) Size() uintptr { return t.size }
567 func (t *rtype) Bits() int {
568 if t == nil {
569 panic("reflect: Bits of nil Type")
571 k := t.Kind()
572 if k < Int || k > Complex128 {
573 panic("reflect: Bits of non-arithmetic Type " + t.String())
575 return int(t.size) * 8
578 func (t *rtype) Align() int { return int(t.align) }
580 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
582 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
584 func (t *rtype) pointers() bool { return t.ptrdata != 0 }
586 func (t *rtype) common() *rtype { return t }
588 func (t *rtype) exportedMethods() []method {
589 ut := t.uncommon()
590 if ut == nil {
591 return nil
593 return ut.exportedMethods()
596 func (t *rtype) NumMethod() int {
597 if t.Kind() == Interface {
598 tt := (*interfaceType)(unsafe.Pointer(t))
599 return tt.NumMethod()
601 return len(t.exportedMethods())
604 func (t *rtype) Method(i int) (m Method) {
605 if t.Kind() == Interface {
606 tt := (*interfaceType)(unsafe.Pointer(t))
607 return tt.Method(i)
609 methods := t.exportedMethods()
610 if i < 0 || i >= len(methods) {
611 panic("reflect: Method index out of range")
613 p := methods[i]
614 if p.name != nil {
615 m.Name = *p.name
617 fl := flag(Func)
618 mt := p.typ
619 m.Type = toType(mt)
620 x := new(unsafe.Pointer)
621 *x = unsafe.Pointer(&p.tfn)
622 m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn}
623 m.Index = i
624 return m
627 func (t *rtype) MethodByName(name string) (m Method, ok bool) {
628 if t.Kind() == Interface {
629 tt := (*interfaceType)(unsafe.Pointer(t))
630 return tt.MethodByName(name)
632 ut := t.uncommon()
633 if ut == nil {
634 return Method{}, false
636 utmethods := ut.methods
637 var eidx int
638 for i := 0; i < len(utmethods); i++ {
639 p := utmethods[i]
640 if p.pkgPath == nil {
641 if p.name != nil && *p.name == name {
642 return t.Method(eidx), true
644 eidx++
647 return Method{}, false
650 func (t *rtype) PkgPath() string {
651 return t.uncommonType.PkgPath()
654 func (t *rtype) hasName() bool {
655 return t.uncommonType != nil && t.uncommonType.name != nil
658 func (t *rtype) Name() string {
659 return t.uncommonType.Name()
662 func (t *rtype) ChanDir() ChanDir {
663 if t.Kind() != Chan {
664 panic("reflect: ChanDir of non-chan type " + t.String())
666 tt := (*chanType)(unsafe.Pointer(t))
667 return ChanDir(tt.dir)
670 func (t *rtype) IsVariadic() bool {
671 if t.Kind() != Func {
672 panic("reflect: IsVariadic of non-func type " + t.String())
674 tt := (*funcType)(unsafe.Pointer(t))
675 return tt.dotdotdot
678 func (t *rtype) Elem() Type {
679 switch t.Kind() {
680 case Array:
681 tt := (*arrayType)(unsafe.Pointer(t))
682 return toType(tt.elem)
683 case Chan:
684 tt := (*chanType)(unsafe.Pointer(t))
685 return toType(tt.elem)
686 case Map:
687 tt := (*mapType)(unsafe.Pointer(t))
688 return toType(tt.elem)
689 case Pointer:
690 tt := (*ptrType)(unsafe.Pointer(t))
691 return toType(tt.elem)
692 case Slice:
693 tt := (*sliceType)(unsafe.Pointer(t))
694 return toType(tt.elem)
696 panic("reflect: Elem of invalid type " + t.String())
699 func (t *rtype) Field(i int) StructField {
700 if t.Kind() != Struct {
701 panic("reflect: Field of non-struct type " + t.String())
703 tt := (*structType)(unsafe.Pointer(t))
704 return tt.Field(i)
707 func (t *rtype) FieldByIndex(index []int) StructField {
708 if t.Kind() != Struct {
709 panic("reflect: FieldByIndex of non-struct type " + t.String())
711 tt := (*structType)(unsafe.Pointer(t))
712 return tt.FieldByIndex(index)
715 func (t *rtype) FieldByName(name string) (StructField, bool) {
716 if t.Kind() != Struct {
717 panic("reflect: FieldByName of non-struct type " + t.String())
719 tt := (*structType)(unsafe.Pointer(t))
720 return tt.FieldByName(name)
723 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
724 if t.Kind() != Struct {
725 panic("reflect: FieldByNameFunc of non-struct type " + t.String())
727 tt := (*structType)(unsafe.Pointer(t))
728 return tt.FieldByNameFunc(match)
731 func (t *rtype) In(i int) Type {
732 if t.Kind() != Func {
733 panic("reflect: In of non-func type " + t.String())
735 tt := (*funcType)(unsafe.Pointer(t))
736 return toType(tt.in[i])
739 func (t *rtype) Key() Type {
740 if t.Kind() != Map {
741 panic("reflect: Key of non-map type " + t.String())
743 tt := (*mapType)(unsafe.Pointer(t))
744 return toType(tt.key)
747 func (t *rtype) Len() int {
748 if t.Kind() != Array {
749 panic("reflect: Len of non-array type " + t.String())
751 tt := (*arrayType)(unsafe.Pointer(t))
752 return int(tt.len)
755 func (t *rtype) NumField() int {
756 if t.Kind() != Struct {
757 panic("reflect: NumField of non-struct type " + t.String())
759 tt := (*structType)(unsafe.Pointer(t))
760 return len(tt.fields)
763 func (t *rtype) NumIn() int {
764 if t.Kind() != Func {
765 panic("reflect: NumIn of non-func type " + t.String())
767 tt := (*funcType)(unsafe.Pointer(t))
768 return len(tt.in)
771 func (t *rtype) NumOut() int {
772 if t.Kind() != Func {
773 panic("reflect: NumOut of non-func type " + t.String())
775 tt := (*funcType)(unsafe.Pointer(t))
776 return len(tt.out)
779 func (t *rtype) Out(i int) Type {
780 if t.Kind() != Func {
781 panic("reflect: Out of non-func type " + t.String())
783 tt := (*funcType)(unsafe.Pointer(t))
784 return toType(tt.out[i])
787 // add returns p+x.
789 // The whySafe string is ignored, so that the function still inlines
790 // as efficiently as p+x, but all call sites should use the string to
791 // record why the addition is safe, which is to say why the addition
792 // does not cause x to advance to the very end of p's allocation
793 // and therefore point incorrectly at the next block in memory.
794 func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
795 return unsafe.Pointer(uintptr(p) + x)
798 func (d ChanDir) String() string {
799 switch d {
800 case SendDir:
801 return "chan<-"
802 case RecvDir:
803 return "<-chan"
804 case BothDir:
805 return "chan"
807 return "ChanDir" + strconv.Itoa(int(d))
810 // Method returns the i'th method in the type's method set.
811 func (t *interfaceType) Method(i int) (m Method) {
812 if i < 0 || i >= len(t.methods) {
813 return
815 p := &t.methods[i]
816 m.Name = *p.name
817 if p.pkgPath != nil {
818 m.PkgPath = *p.pkgPath
820 m.Type = toType(p.typ)
821 m.Index = i
822 return
825 // NumMethod returns the number of interface methods in the type's method set.
826 func (t *interfaceType) NumMethod() int { return len(t.methods) }
828 // MethodByName method with the given name in the type's method set.
829 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
830 if t == nil {
831 return
833 var p *imethod
834 for i := range t.methods {
835 p = &t.methods[i]
836 if *p.name == name {
837 return t.Method(i), true
840 return
843 // A StructField describes a single field in a struct.
844 type StructField struct {
845 // Name is the field name.
846 Name string
848 // PkgPath is the package path that qualifies a lower case (unexported)
849 // field name. It is empty for upper case (exported) field names.
850 // See https://golang.org/ref/spec#Uniqueness_of_identifiers
851 PkgPath string
853 Type Type // field type
854 Tag StructTag // field tag string
855 Offset uintptr // offset within struct, in bytes
856 Index []int // index sequence for Type.FieldByIndex
857 Anonymous bool // is an embedded field
860 // IsExported reports whether the field is exported.
861 func (f StructField) IsExported() bool {
862 return f.PkgPath == ""
865 // A StructTag is the tag string in a struct field.
867 // By convention, tag strings are a concatenation of
868 // optionally space-separated key:"value" pairs.
869 // Each key is a non-empty string consisting of non-control
870 // characters other than space (U+0020 ' '), quote (U+0022 '"'),
871 // and colon (U+003A ':'). Each value is quoted using U+0022 '"'
872 // characters and Go string literal syntax.
873 type StructTag string
875 // Get returns the value associated with key in the tag string.
876 // If there is no such key in the tag, Get returns the empty string.
877 // If the tag does not have the conventional format, the value
878 // returned by Get is unspecified. To determine whether a tag is
879 // explicitly set to the empty string, use Lookup.
880 func (tag StructTag) Get(key string) string {
881 v, _ := tag.Lookup(key)
882 return v
885 // Lookup returns the value associated with key in the tag string.
886 // If the key is present in the tag the value (which may be empty)
887 // is returned. Otherwise the returned value will be the empty string.
888 // The ok return value reports whether the value was explicitly set in
889 // the tag string. If the tag does not have the conventional format,
890 // the value returned by Lookup is unspecified.
891 func (tag StructTag) Lookup(key string) (value string, ok bool) {
892 // When modifying this code, also update the validateStructTag code
893 // in cmd/vet/structtag.go.
895 for tag != "" {
896 // Skip leading space.
897 i := 0
898 for i < len(tag) && tag[i] == ' ' {
901 tag = tag[i:]
902 if tag == "" {
903 break
906 // Scan to colon. A space, a quote or a control character is a syntax error.
907 // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
908 // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
909 // as it is simpler to inspect the tag's bytes than the tag's runes.
910 i = 0
911 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
914 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
915 break
917 name := string(tag[:i])
918 tag = tag[i+1:]
920 // Scan quoted string to find value.
921 i = 1
922 for i < len(tag) && tag[i] != '"' {
923 if tag[i] == '\\' {
928 if i >= len(tag) {
929 break
931 qvalue := string(tag[:i+1])
932 tag = tag[i+1:]
934 if key == name {
935 value, err := strconv.Unquote(qvalue)
936 if err != nil {
937 break
939 return value, true
942 return "", false
945 // Field returns the i'th struct field.
946 func (t *structType) Field(i int) (f StructField) {
947 if i < 0 || i >= len(t.fields) {
948 panic("reflect: Field index out of bounds")
950 p := &t.fields[i]
951 f.Type = toType(p.typ)
952 f.Name = *p.name
953 f.Anonymous = p.embedded()
954 if p.pkgPath != nil {
955 f.PkgPath = *p.pkgPath
957 if p.tag != nil {
958 f.Tag = StructTag(*p.tag)
960 f.Offset = p.offset()
962 // NOTE(rsc): This is the only allocation in the interface
963 // presented by a reflect.Type. It would be nice to avoid,
964 // at least in the common cases, but we need to make sure
965 // that misbehaving clients of reflect cannot affect other
966 // uses of reflect. One possibility is CL 5371098, but we
967 // postponed that ugliness until there is a demonstrated
968 // need for the performance. This is issue 2320.
969 f.Index = []int{i}
970 return
973 // TODO(gri): Should there be an error/bool indicator if the index
974 // is wrong for FieldByIndex?
976 // FieldByIndex returns the nested field corresponding to index.
977 func (t *structType) FieldByIndex(index []int) (f StructField) {
978 f.Type = toType(&t.rtype)
979 for i, x := range index {
980 if i > 0 {
981 ft := f.Type
982 if ft.Kind() == Pointer && ft.Elem().Kind() == Struct {
983 ft = ft.Elem()
985 f.Type = ft
987 f = f.Type.Field(x)
989 return
992 // A fieldScan represents an item on the fieldByNameFunc scan work list.
993 type fieldScan struct {
994 typ *structType
995 index []int
998 // FieldByNameFunc returns the struct field with a name that satisfies the
999 // match function and a boolean to indicate if the field was found.
1000 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
1001 // This uses the same condition that the Go language does: there must be a unique instance
1002 // of the match at a given depth level. If there are multiple instances of a match at the
1003 // same depth, they annihilate each other and inhibit any possible match at a lower level.
1004 // The algorithm is breadth first search, one depth level at a time.
1006 // The current and next slices are work queues:
1007 // current lists the fields to visit on this depth level,
1008 // and next lists the fields on the next lower level.
1009 current := []fieldScan{}
1010 next := []fieldScan{{typ: t}}
1012 // nextCount records the number of times an embedded type has been
1013 // encountered and considered for queueing in the 'next' slice.
1014 // We only queue the first one, but we increment the count on each.
1015 // If a struct type T can be reached more than once at a given depth level,
1016 // then it annihilates itself and need not be considered at all when we
1017 // process that next depth level.
1018 var nextCount map[*structType]int
1020 // visited records the structs that have been considered already.
1021 // Embedded pointer fields can create cycles in the graph of
1022 // reachable embedded types; visited avoids following those cycles.
1023 // It also avoids duplicated effort: if we didn't find the field in an
1024 // embedded type T at level 2, we won't find it in one at level 4 either.
1025 visited := map[*structType]bool{}
1027 for len(next) > 0 {
1028 current, next = next, current[:0]
1029 count := nextCount
1030 nextCount = nil
1032 // Process all the fields at this depth, now listed in 'current'.
1033 // The loop queues embedded fields found in 'next', for processing during the next
1034 // iteration. The multiplicity of the 'current' field counts is recorded
1035 // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
1036 for _, scan := range current {
1037 t := scan.typ
1038 if visited[t] {
1039 // We've looked through this type before, at a higher level.
1040 // That higher level would shadow the lower level we're now at,
1041 // so this one can't be useful to us. Ignore it.
1042 continue
1044 visited[t] = true
1045 for i := range t.fields {
1046 f := &t.fields[i]
1047 // Find name and (for embedded field) type for field f.
1048 fname := *f.name
1049 var ntyp *rtype
1050 if f.embedded() {
1051 // Embedded field of type T or *T.
1052 ntyp = f.typ
1053 if ntyp.Kind() == Pointer {
1054 ntyp = ntyp.Elem().common()
1058 // Does it match?
1059 if match(fname) {
1060 // Potential match
1061 if count[t] > 1 || ok {
1062 // Name appeared multiple times at this level: annihilate.
1063 return StructField{}, false
1065 result = t.Field(i)
1066 result.Index = nil
1067 result.Index = append(result.Index, scan.index...)
1068 result.Index = append(result.Index, i)
1069 ok = true
1070 continue
1073 // Queue embedded struct fields for processing with next level,
1074 // but only if we haven't seen a match yet at this level and only
1075 // if the embedded types haven't already been queued.
1076 if ok || ntyp == nil || ntyp.Kind() != Struct {
1077 continue
1079 ntyp = toType(ntyp).common()
1080 styp := (*structType)(unsafe.Pointer(ntyp))
1081 if nextCount[styp] > 0 {
1082 nextCount[styp] = 2 // exact multiple doesn't matter
1083 continue
1085 if nextCount == nil {
1086 nextCount = map[*structType]int{}
1088 nextCount[styp] = 1
1089 if count[t] > 1 {
1090 nextCount[styp] = 2 // exact multiple doesn't matter
1092 var index []int
1093 index = append(index, scan.index...)
1094 index = append(index, i)
1095 next = append(next, fieldScan{styp, index})
1098 if ok {
1099 break
1102 return
1105 // FieldByName returns the struct field with the given name
1106 // and a boolean to indicate if the field was found.
1107 func (t *structType) FieldByName(name string) (f StructField, present bool) {
1108 // Quick check for top-level name, or struct without embedded fields.
1109 hasEmbeds := false
1110 if name != "" {
1111 for i := range t.fields {
1112 tf := &t.fields[i]
1113 if *tf.name == name {
1114 return t.Field(i), true
1116 if tf.embedded() {
1117 hasEmbeds = true
1121 if !hasEmbeds {
1122 return
1124 return t.FieldByNameFunc(func(s string) bool { return s == name })
1127 // TypeOf returns the reflection Type that represents the dynamic type of i.
1128 // If i is a nil interface value, TypeOf returns nil.
1129 func TypeOf(i any) Type {
1130 eface := *(*emptyInterface)(unsafe.Pointer(&i))
1131 return toType(eface.typ)
1134 // ptrMap is the cache for PointerTo.
1135 var ptrMap sync.Map // map[*rtype]*ptrType
1137 // PtrTo returns the pointer type with element t.
1138 // For example, if t represents type Foo, PtrTo(t) represents *Foo.
1140 // PtrTo is the old spelling of PointerTo.
1141 // The two functions behave identically.
1142 func PtrTo(t Type) Type { return PointerTo(t) }
1144 // PointerTo returns the pointer type with element t.
1145 // For example, if t represents type Foo, PointerTo(t) represents *Foo.
1146 func PointerTo(t Type) Type {
1147 return t.(*rtype).ptrTo()
1150 func (t *rtype) ptrTo() *rtype {
1151 if p := t.ptrToThis; p != nil {
1152 return p
1155 // Check the cache.
1156 if pi, ok := ptrMap.Load(t); ok {
1157 return &pi.(*ptrType).rtype
1160 // Look in known types.
1161 s := "*" + *t.string
1162 if tt := lookupType(s); tt != nil {
1163 p := (*ptrType)(unsafe.Pointer(toType(tt).(*rtype)))
1164 if p.elem == t {
1165 pi, _ := ptrMap.LoadOrStore(t, p)
1166 return &pi.(*ptrType).rtype
1170 // Create a new ptrType starting with the description
1171 // of an *unsafe.Pointer.
1172 var iptr any = (*unsafe.Pointer)(nil)
1173 prototype := *(**ptrType)(unsafe.Pointer(&iptr))
1174 pp := *prototype
1176 pp.string = &s
1177 pp.ptrToThis = nil
1179 // For the type structures linked into the binary, the
1180 // compiler provides a good hash of the string.
1181 // Create a good hash for the new string by using
1182 // the FNV-1 hash's mixing function to combine the
1183 // old hash and the new "*".
1184 // p.hash = fnv1(t.hash, '*')
1185 // This is the gccgo version.
1186 pp.hash = (t.hash << 4) + 9
1188 pp.uncommonType = nil
1189 pp.ptrToThis = nil
1190 pp.elem = t
1192 q := toType(&pp.rtype).(*rtype)
1193 p := (*ptrType)(unsafe.Pointer(q))
1194 pi, _ := ptrMap.LoadOrStore(t, p)
1195 return &pi.(*ptrType).rtype
1198 // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
1199 func fnv1(x uint32, list ...byte) uint32 {
1200 for _, b := range list {
1201 x = x*16777619 ^ uint32(b)
1203 return x
1206 func (t *rtype) Implements(u Type) bool {
1207 if u == nil {
1208 panic("reflect: nil type passed to Type.Implements")
1210 if u.Kind() != Interface {
1211 panic("reflect: non-interface type passed to Type.Implements")
1213 return implements(u.(*rtype), t)
1216 func (t *rtype) AssignableTo(u Type) bool {
1217 if u == nil {
1218 panic("reflect: nil type passed to Type.AssignableTo")
1220 uu := u.(*rtype)
1221 return directlyAssignable(uu, t) || implements(uu, t)
1224 func (t *rtype) ConvertibleTo(u Type) bool {
1225 if u == nil {
1226 panic("reflect: nil type passed to Type.ConvertibleTo")
1228 uu := u.(*rtype)
1229 return convertOp(uu, t) != nil
1232 func (t *rtype) Comparable() bool {
1233 return t.equal != nil
1236 // implements reports whether the type V implements the interface type T.
1237 func implements(T, V *rtype) bool {
1238 if T.Kind() != Interface {
1239 return false
1241 t := (*interfaceType)(unsafe.Pointer(T))
1242 if len(t.methods) == 0 {
1243 return true
1246 // The same algorithm applies in both cases, but the
1247 // method tables for an interface type and a concrete type
1248 // are different, so the code is duplicated.
1249 // In both cases the algorithm is a linear scan over the two
1250 // lists - T's methods and V's methods - simultaneously.
1251 // Since method tables are stored in a unique sorted order
1252 // (alphabetical, with no duplicate method names), the scan
1253 // through V's methods must hit a match for each of T's
1254 // methods along the way, or else V does not implement T.
1255 // This lets us run the scan in overall linear time instead of
1256 // the quadratic time a naive search would require.
1257 // See also ../runtime/iface.go.
1258 if V.Kind() == Interface {
1259 v := (*interfaceType)(unsafe.Pointer(V))
1260 i := 0
1261 for j := 0; j < len(v.methods); j++ {
1262 tm := &t.methods[i]
1263 vm := &v.methods[j]
1264 if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.typ).common() == toType(tm.typ).common() {
1265 if i++; i >= len(t.methods) {
1266 return true
1270 return false
1273 v := V.uncommon()
1274 if v == nil {
1275 return false
1277 i := 0
1278 for j := 0; j < len(v.methods); j++ {
1279 tm := &t.methods[i]
1280 vm := &v.methods[j]
1281 if *vm.name == *tm.name && (vm.pkgPath == tm.pkgPath || (vm.pkgPath != nil && tm.pkgPath != nil && *vm.pkgPath == *tm.pkgPath)) && toType(vm.mtyp).common() == toType(tm.typ).common() {
1282 if i++; i >= len(t.methods) {
1283 return true
1287 return false
1290 // specialChannelAssignability reports whether a value x of channel type V
1291 // can be directly assigned (using memmove) to another channel type T.
1292 // https://golang.org/doc/go_spec.html#Assignability
1293 // T and V must be both of Chan kind.
1294 func specialChannelAssignability(T, V *rtype) bool {
1295 // Special case:
1296 // x is a bidirectional channel value, T is a channel type,
1297 // x's type V and T have identical element types,
1298 // and at least one of V or T is not a defined type.
1299 return V.ChanDir() == BothDir && (T.Name() == "" || V.Name() == "") && haveIdenticalType(T.Elem(), V.Elem(), true)
1302 // directlyAssignable reports whether a value x of type V can be directly
1303 // assigned (using memmove) to a value of type T.
1304 // https://golang.org/doc/go_spec.html#Assignability
1305 // Ignoring the interface rules (implemented elsewhere)
1306 // and the ideal constant rules (no ideal constants at run time).
1307 func directlyAssignable(T, V *rtype) bool {
1308 // x's type V is identical to T?
1309 if rtypeEqual(T, V) {
1310 return true
1313 // Otherwise at least one of T and V must not be defined
1314 // and they must have the same kind.
1315 if T.hasName() && V.hasName() || T.Kind() != V.Kind() {
1316 return false
1319 if T.Kind() == Chan && specialChannelAssignability(T, V) {
1320 return true
1323 // x's type T and V must have identical underlying types.
1324 return haveIdenticalUnderlyingType(T, V, true)
1327 func haveIdenticalType(T, V Type, cmpTags bool) bool {
1328 if cmpTags {
1329 return T == V
1332 if T.Name() != V.Name() || T.Kind() != V.Kind() || T.PkgPath() != V.PkgPath() {
1333 return false
1336 return haveIdenticalUnderlyingType(T.common(), V.common(), false)
1339 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
1340 if rtypeEqual(T, V) {
1341 return true
1344 kind := T.Kind()
1345 if kind != V.Kind() {
1346 return false
1349 // Non-composite types of equal kind have same underlying type
1350 // (the predefined instance of the type).
1351 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
1352 return true
1355 // Composite types.
1356 switch kind {
1357 case Array:
1358 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1360 case Chan:
1361 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1363 case Func:
1364 t := (*funcType)(unsafe.Pointer(T))
1365 v := (*funcType)(unsafe.Pointer(V))
1366 if t.dotdotdot != v.dotdotdot || len(t.in) != len(v.in) || len(t.out) != len(v.out) {
1367 return false
1369 for i, typ := range t.in {
1370 if !haveIdenticalType(typ, v.in[i], cmpTags) {
1371 return false
1374 for i, typ := range t.out {
1375 if !haveIdenticalType(typ, v.out[i], cmpTags) {
1376 return false
1379 return true
1381 case Interface:
1382 t := (*interfaceType)(unsafe.Pointer(T))
1383 v := (*interfaceType)(unsafe.Pointer(V))
1384 if len(t.methods) == 0 && len(v.methods) == 0 {
1385 return true
1387 // Might have the same methods but still
1388 // need a run time conversion.
1389 return false
1391 case Map:
1392 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1394 case Pointer, Slice:
1395 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1397 case Struct:
1398 t := (*structType)(unsafe.Pointer(T))
1399 v := (*structType)(unsafe.Pointer(V))
1400 if len(t.fields) != len(v.fields) {
1401 return false
1403 for i := range t.fields {
1404 tf := &t.fields[i]
1405 vf := &v.fields[i]
1406 if tf.name != vf.name && (tf.name == nil || vf.name == nil || *tf.name != *vf.name) {
1407 return false
1409 if tf.pkgPath != vf.pkgPath && (tf.pkgPath == nil || vf.pkgPath == nil || *tf.pkgPath != *vf.pkgPath) {
1410 return false
1412 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
1413 return false
1415 if cmpTags && tf.tag != vf.tag && (tf.tag == nil || vf.tag == nil || *tf.tag != *vf.tag) {
1416 return false
1418 if tf.offsetEmbed != vf.offsetEmbed {
1419 return false
1422 return true
1425 return false
1428 // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
1429 var lookupCache sync.Map // map[cacheKey]*rtype
1431 // A cacheKey is the key for use in the lookupCache.
1432 // Four values describe any of the types we are looking for:
1433 // type kind, one or two subtypes, and an extra integer.
1434 type cacheKey struct {
1435 kind Kind
1436 t1 *rtype
1437 t2 *rtype
1438 extra uintptr
1441 // The funcLookupCache caches FuncOf lookups.
1442 // FuncOf does not share the common lookupCache since cacheKey is not
1443 // sufficient to represent functions unambiguously.
1444 var funcLookupCache struct {
1445 sync.Mutex // Guards stores (but not loads) on m.
1447 // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
1448 // Elements of m are append-only and thus safe for concurrent reading.
1449 m sync.Map
1452 // ChanOf returns the channel type with the given direction and element type.
1453 // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
1455 // The gc runtime imposes a limit of 64 kB on channel element types.
1456 // If t's size is equal to or exceeds this limit, ChanOf panics.
1457 func ChanOf(dir ChanDir, t Type) Type {
1458 typ := t.(*rtype)
1460 // Look in cache.
1461 ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
1462 if ch, ok := lookupCache.Load(ckey); ok {
1463 return ch.(*rtype)
1466 // This restriction is imposed by the gc compiler and the runtime.
1467 if typ.size >= 1<<16 {
1468 panic("reflect.ChanOf: element size too large")
1471 // Look in known types.
1472 var s string
1473 switch dir {
1474 default:
1475 panic("reflect.ChanOf: invalid dir")
1476 case SendDir:
1477 s = "chan<- " + *typ.string
1478 case RecvDir:
1479 s = "<-chan " + *typ.string
1480 case BothDir:
1481 typeStr := *typ.string
1482 if typeStr[0] == '<' {
1483 // typ is recv chan, need parentheses as "<-" associates with leftmost
1484 // chan possible, see:
1485 // * https://golang.org/ref/spec#Channel_types
1486 // * https://github.com/golang/go/issues/39897
1487 s = "chan (" + typeStr + ")"
1488 } else {
1489 s = "chan " + typeStr
1492 if tt := lookupType(s); tt != nil {
1493 ch := (*chanType)(unsafe.Pointer(toType(tt).(*rtype)))
1494 if ch.elem == typ && ch.dir == uintptr(dir) {
1495 ti, _ := lookupCache.LoadOrStore(ckey, tt)
1496 return ti.(Type)
1500 // Make a channel type.
1501 var ichan any = (chan unsafe.Pointer)(nil)
1502 prototype := *(**chanType)(unsafe.Pointer(&ichan))
1503 ch := *prototype
1504 ch.tflag = tflagRegularMemory
1505 ch.dir = uintptr(dir)
1506 ch.string = &s
1508 // gccgo uses a different hash.
1509 // ch.hash = fnv1(typ.hash, 'c', byte(dir))
1510 ch.hash = 0
1511 if dir&SendDir != 0 {
1512 ch.hash += 1
1514 if dir&RecvDir != 0 {
1515 ch.hash += 2
1517 ch.hash += typ.hash << 2
1518 ch.hash <<= 3
1519 ch.hash += 15
1521 ch.elem = typ
1522 ch.uncommonType = nil
1523 ch.ptrToThis = nil
1525 ti, _ := lookupCache.LoadOrStore(ckey, toType(&ch.rtype).(*rtype))
1526 return ti.(Type)
1529 // MapOf returns the map type with the given key and element types.
1530 // For example, if k represents int and e represents string,
1531 // MapOf(k, e) represents map[int]string.
1533 // If the key type is not a valid map key type (that is, if it does
1534 // not implement Go's == operator), MapOf panics.
1535 func MapOf(key, elem Type) Type {
1536 ktyp := key.(*rtype)
1537 etyp := elem.(*rtype)
1539 if ktyp.equal == nil {
1540 panic("reflect.MapOf: invalid key type " + ktyp.String())
1543 // Look in cache.
1544 ckey := cacheKey{Map, ktyp, etyp, 0}
1545 if mt, ok := lookupCache.Load(ckey); ok {
1546 return mt.(Type)
1549 // Look in known types.
1550 s := "map[" + *ktyp.string + "]" + *etyp.string
1551 if tt := lookupType(s); tt != nil {
1552 mt := (*mapType)(unsafe.Pointer(toType(tt).(*rtype)))
1553 if mt.key == ktyp && mt.elem == etyp {
1554 ti, _ := lookupCache.LoadOrStore(ckey, tt)
1555 return ti.(Type)
1559 // Make a map type.
1560 // Note: flag values must match those used in the TMAP case
1561 // in ../cmd/compile/internal/reflectdata/reflect.go:writeType.
1562 var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil)
1563 mt := **(**mapType)(unsafe.Pointer(&imap))
1564 mt.string = &s
1566 // gccgo uses a different hash
1567 // mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
1568 mt.hash = ktyp.hash + etyp.hash + 2 + 14
1570 mt.key = ktyp
1571 mt.elem = etyp
1572 mt.uncommonType = nil
1573 mt.ptrToThis = nil
1575 mt.bucket = bucketOf(ktyp, etyp)
1576 mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
1577 return typehash(ktyp, p, seed)
1579 mt.flags = 0
1580 if ktyp.size > maxKeySize {
1581 mt.keysize = uint8(goarch.PtrSize)
1582 mt.flags |= 1 // indirect key
1583 } else {
1584 mt.keysize = uint8(ktyp.size)
1586 if etyp.size > maxValSize {
1587 mt.valuesize = uint8(goarch.PtrSize)
1588 mt.flags |= 2 // indirect value
1589 } else {
1590 mt.valuesize = uint8(etyp.size)
1592 mt.bucketsize = uint16(mt.bucket.size)
1593 if isReflexive(ktyp) {
1594 mt.flags |= 4
1596 if needKeyUpdate(ktyp) {
1597 mt.flags |= 8
1599 if hashMightPanic(ktyp) {
1600 mt.flags |= 16
1603 ti, _ := lookupCache.LoadOrStore(ckey, toType(&mt.rtype).(*rtype))
1604 return ti.(Type)
1607 // FuncOf returns the function type with the given argument and result types.
1608 // For example if k represents int and e represents string,
1609 // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
1611 // The variadic argument controls whether the function is variadic. FuncOf
1612 // panics if the in[len(in)-1] does not represent a slice and variadic is
1613 // true.
1614 func FuncOf(in, out []Type, variadic bool) Type {
1615 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
1616 panic("reflect.FuncOf: last arg of variadic func must be slice")
1619 // Make a func type.
1620 var ifunc any = (func())(nil)
1621 prototype := *(**funcType)(unsafe.Pointer(&ifunc))
1622 ft := new(funcType)
1623 *ft = *prototype
1625 // Build a hash and minimally populate ft.
1626 var hash uint32
1627 var fin, fout []*rtype
1628 shift := uint(1)
1629 for _, in := range in {
1630 t := in.(*rtype)
1631 fin = append(fin, t)
1632 hash += t.hash << shift
1633 shift++
1635 shift = 2
1636 for _, out := range out {
1637 t := out.(*rtype)
1638 fout = append(fout, t)
1639 hash += t.hash << shift
1640 shift++
1642 if variadic {
1643 hash++
1645 hash <<= 4
1646 hash += 8
1647 ft.hash = hash
1648 ft.in = fin
1649 ft.out = fout
1650 ft.dotdotdot = variadic
1652 // Look in cache.
1653 if ts, ok := funcLookupCache.m.Load(hash); ok {
1654 for _, t := range ts.([]*rtype) {
1655 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
1656 return t
1661 // Not in cache, lock and retry.
1662 funcLookupCache.Lock()
1663 defer funcLookupCache.Unlock()
1664 if ts, ok := funcLookupCache.m.Load(hash); ok {
1665 for _, t := range ts.([]*rtype) {
1666 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
1667 return t
1672 addToCache := func(tt *rtype) Type {
1673 var rts []*rtype
1674 if rti, ok := funcLookupCache.m.Load(hash); ok {
1675 rts = rti.([]*rtype)
1677 funcLookupCache.m.Store(hash, append(rts, tt))
1678 return tt
1681 str := funcStr(ft)
1682 if tt := lookupType(str); tt != nil {
1683 if haveIdenticalUnderlyingType(&ft.rtype, tt, true) {
1684 return addToCache(tt)
1688 // Populate the remaining fields of ft and store in cache.
1689 ft.string = &str
1690 ft.uncommonType = nil
1691 ft.ptrToThis = nil
1692 return addToCache(toType(&ft.rtype).(*rtype))
1695 // funcStr builds a string representation of a funcType.
1696 func funcStr(ft *funcType) string {
1697 repr := make([]byte, 0, 64)
1698 repr = append(repr, "func("...)
1699 for i, t := range ft.in {
1700 if i > 0 {
1701 repr = append(repr, ", "...)
1703 if ft.dotdotdot && i == len(ft.in)-1 {
1704 repr = append(repr, "..."...)
1705 repr = append(repr, *(*sliceType)(unsafe.Pointer(t)).elem.string...)
1706 } else {
1707 repr = append(repr, *t.string...)
1710 repr = append(repr, ')')
1711 if l := len(ft.out); l == 1 {
1712 repr = append(repr, ' ')
1713 } else if l > 1 {
1714 repr = append(repr, " ("...)
1716 for i, t := range ft.out {
1717 if i > 0 {
1718 repr = append(repr, ", "...)
1720 repr = append(repr, *t.string...)
1722 if len(ft.out) > 1 {
1723 repr = append(repr, ')')
1725 return string(repr)
1728 // isReflexive reports whether the == operation on the type is reflexive.
1729 // That is, x == x for all values x of type t.
1730 func isReflexive(t *rtype) bool {
1731 switch t.Kind() {
1732 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, String, UnsafePointer:
1733 return true
1734 case Float32, Float64, Complex64, Complex128, Interface:
1735 return false
1736 case Array:
1737 tt := (*arrayType)(unsafe.Pointer(t))
1738 return isReflexive(tt.elem)
1739 case Struct:
1740 tt := (*structType)(unsafe.Pointer(t))
1741 for _, f := range tt.fields {
1742 if !isReflexive(f.typ) {
1743 return false
1746 return true
1747 default:
1748 // Func, Map, Slice, Invalid
1749 panic("isReflexive called on non-key type " + t.String())
1753 // needKeyUpdate reports whether map overwrites require the key to be copied.
1754 func needKeyUpdate(t *rtype) bool {
1755 switch t.Kind() {
1756 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Pointer, UnsafePointer:
1757 return false
1758 case Float32, Float64, Complex64, Complex128, Interface, String:
1759 // Float keys can be updated from +0 to -0.
1760 // String keys can be updated to use a smaller backing store.
1761 // Interfaces might have floats of strings in them.
1762 return true
1763 case Array:
1764 tt := (*arrayType)(unsafe.Pointer(t))
1765 return needKeyUpdate(tt.elem)
1766 case Struct:
1767 tt := (*structType)(unsafe.Pointer(t))
1768 for _, f := range tt.fields {
1769 if needKeyUpdate(f.typ) {
1770 return true
1773 return false
1774 default:
1775 // Func, Map, Slice, Invalid
1776 panic("needKeyUpdate called on non-key type " + t.String())
1780 // hashMightPanic reports whether the hash of a map key of type t might panic.
1781 func hashMightPanic(t *rtype) bool {
1782 switch t.Kind() {
1783 case Interface:
1784 return true
1785 case Array:
1786 tt := (*arrayType)(unsafe.Pointer(t))
1787 return hashMightPanic(tt.elem)
1788 case Struct:
1789 tt := (*structType)(unsafe.Pointer(t))
1790 for _, f := range tt.fields {
1791 if hashMightPanic(f.typ) {
1792 return true
1795 return false
1796 default:
1797 return false
1801 // Make sure these routines stay in sync with ../../runtime/map.go!
1802 // These types exist only for GC, so we only fill out GC relevant info.
1803 // Currently, that's just size and the GC program. We also fill in string
1804 // for possible debugging use.
1805 const (
1806 bucketSize uintptr = 8
1807 maxKeySize uintptr = 128
1808 maxValSize uintptr = 128
1811 func bucketOf(ktyp, etyp *rtype) *rtype {
1812 if ktyp.size > maxKeySize {
1813 ktyp = PointerTo(ktyp).(*rtype)
1815 if etyp.size > maxValSize {
1816 etyp = PointerTo(etyp).(*rtype)
1819 // Prepare GC data if any.
1820 // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*goarch.PtrSize bytes,
1821 // or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
1822 // Note that since the key and value are known to be <= 128 bytes,
1823 // they're guaranteed to have bitmaps instead of GC programs.
1824 var gcdata *byte
1825 var ptrdata uintptr
1827 size := bucketSize
1828 size = align(size, uintptr(ktyp.fieldAlign))
1829 size += bucketSize * ktyp.size
1830 size = align(size, uintptr(etyp.fieldAlign))
1831 size += bucketSize * etyp.size
1833 maxAlign := uintptr(ktyp.fieldAlign)
1834 if maxAlign < uintptr(etyp.fieldAlign) {
1835 maxAlign = uintptr(etyp.fieldAlign)
1837 if maxAlign > goarch.PtrSize {
1838 size = align(size, maxAlign)
1839 size += align(goarch.PtrSize, maxAlign) - goarch.PtrSize
1840 } else if maxAlign < goarch.PtrSize {
1841 size = align(size, goarch.PtrSize)
1842 maxAlign = goarch.PtrSize
1845 ovoff := size
1846 size += goarch.PtrSize
1848 if ktyp.ptrdata != 0 || etyp.ptrdata != 0 {
1849 nptr := size / goarch.PtrSize
1850 mask := make([]byte, (nptr+7)/8)
1851 psize := bucketSize
1852 psize = align(psize, uintptr(ktyp.fieldAlign))
1853 base := psize / goarch.PtrSize
1855 if ktyp.ptrdata != 0 {
1856 emitGCMask(mask, base, ktyp, bucketSize)
1858 psize += bucketSize * ktyp.size
1859 psize = align(psize, uintptr(etyp.fieldAlign))
1860 base = psize / goarch.PtrSize
1862 if etyp.ptrdata != 0 {
1863 emitGCMask(mask, base, etyp, bucketSize)
1866 word := ovoff / goarch.PtrSize
1867 mask[word/8] |= 1 << (word % 8)
1868 gcdata = &mask[0]
1869 ptrdata = (word + 1) * goarch.PtrSize
1871 // overflow word must be last
1872 if ptrdata != size {
1873 panic("reflect: bad layout computation in MapOf")
1877 b := &rtype{
1878 align: uint8(maxAlign),
1879 fieldAlign: uint8(maxAlign),
1880 size: size,
1881 kind: uint8(Struct),
1882 ptrdata: ptrdata,
1883 gcdata: gcdata,
1885 s := "bucket(" + *ktyp.string + "," + *etyp.string + ")"
1886 b.string = &s
1887 return b
1890 func (t *rtype) gcSlice(begin, end uintptr) []byte {
1891 return (*[1 << 30]byte)(unsafe.Pointer(t.gcdata))[begin:end:end]
1894 // emitGCMask writes the GC mask for [n]typ into out, starting at bit
1895 // offset base.
1896 func emitGCMask(out []byte, base uintptr, typ *rtype, n uintptr) {
1897 if typ.kind&kindGCProg != 0 {
1898 panic("reflect: unexpected GC program")
1900 ptrs := typ.ptrdata / goarch.PtrSize
1901 words := typ.size / goarch.PtrSize
1902 mask := typ.gcSlice(0, (ptrs+7)/8)
1903 for j := uintptr(0); j < ptrs; j++ {
1904 if (mask[j/8]>>(j%8))&1 != 0 {
1905 for i := uintptr(0); i < n; i++ {
1906 k := base + i*words + j
1907 out[k/8] |= 1 << (k % 8)
1913 // appendGCProg appends the GC program for the first ptrdata bytes of
1914 // typ to dst and returns the extended slice.
1915 func appendGCProg(dst []byte, typ *rtype) []byte {
1916 if typ.kind&kindGCProg != 0 {
1917 // Element has GC program; emit one element.
1918 n := uintptr(*(*uint32)(unsafe.Pointer(typ.gcdata)))
1919 prog := typ.gcSlice(4, 4+n-1)
1920 return append(dst, prog...)
1923 // Element is small with pointer mask; use as literal bits.
1924 ptrs := typ.ptrdata / goarch.PtrSize
1925 mask := typ.gcSlice(0, (ptrs+7)/8)
1927 // Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
1928 for ; ptrs > 120; ptrs -= 120 {
1929 dst = append(dst, 120)
1930 dst = append(dst, mask[:15]...)
1931 mask = mask[15:]
1934 dst = append(dst, byte(ptrs))
1935 dst = append(dst, mask...)
1936 return dst
1939 // SliceOf returns the slice type with element type t.
1940 // For example, if t represents int, SliceOf(t) represents []int.
1941 func SliceOf(t Type) Type {
1942 typ := t.(*rtype)
1944 // Look in cache.
1945 ckey := cacheKey{Slice, typ, nil, 0}
1946 if slice, ok := lookupCache.Load(ckey); ok {
1947 return slice.(Type)
1950 // Look in known types.
1951 s := "[]" + *typ.string
1952 if tt := lookupType(s); tt != nil {
1953 slice := (*sliceType)(unsafe.Pointer(toType(tt).(*rtype)))
1954 if slice.elem == typ {
1955 ti, _ := lookupCache.LoadOrStore(ckey, tt)
1956 return ti.(Type)
1960 // Make a slice type.
1961 var islice any = ([]unsafe.Pointer)(nil)
1962 prototype := *(**sliceType)(unsafe.Pointer(&islice))
1963 slice := *prototype
1964 slice.string = &s
1966 // gccgo uses a different hash.
1967 // slice.hash = fnv1(typ.hash, '[')
1968 slice.hash = typ.hash + 1 + 13
1970 slice.elem = typ
1971 slice.uncommonType = nil
1972 slice.ptrToThis = nil
1974 ti, _ := lookupCache.LoadOrStore(ckey, toType(&slice.rtype).(*rtype))
1975 return ti.(Type)
1978 // The structLookupCache caches StructOf lookups.
1979 // StructOf does not share the common lookupCache since we need to pin
1980 // the memory associated with *structTypeFixedN.
1981 var structLookupCache struct {
1982 sync.Mutex // Guards stores (but not loads) on m.
1984 // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
1985 // Elements in m are append-only and thus safe for concurrent reading.
1986 m sync.Map
1989 // isLetter reports whether a given 'rune' is classified as a Letter.
1990 func isLetter(ch rune) bool {
1991 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
1994 // isValidFieldName checks if a string is a valid (struct) field name or not.
1996 // According to the language spec, a field name should be an identifier.
1998 // identifier = letter { letter | unicode_digit } .
1999 // letter = unicode_letter | "_" .
2000 func isValidFieldName(fieldName string) bool {
2001 for i, c := range fieldName {
2002 if i == 0 && !isLetter(c) {
2003 return false
2006 if !(isLetter(c) || unicode.IsDigit(c)) {
2007 return false
2011 return len(fieldName) > 0
2014 // StructOf returns the struct type containing fields.
2015 // The Offset and Index fields are ignored and computed as they would be
2016 // by the compiler.
2018 // StructOf currently does not generate wrapper methods for embedded
2019 // fields and panics if passed unexported StructFields.
2020 // These limitations may be lifted in a future version.
2021 func StructOf(fields []StructField) Type {
2022 var (
2023 hash = uint32(12)
2024 size uintptr
2025 typalign uint8
2026 comparable = true
2028 fs = make([]structField, len(fields))
2029 repr = make([]byte, 0, 64)
2030 fset = map[string]struct{}{} // fields' names
2032 hasGCProg = false // records whether a struct-field type has a GCProg
2035 lastzero := uintptr(0)
2036 repr = append(repr, "struct {"...)
2037 pkgpath := ""
2038 for i, field := range fields {
2039 if field.Name == "" {
2040 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
2042 if !isValidFieldName(field.Name) {
2043 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
2045 if field.Type == nil {
2046 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
2048 f, fpkgpath := runtimeStructField(field)
2049 ft := f.typ
2050 if ft.kind&kindGCProg != 0 {
2051 hasGCProg = true
2053 if fpkgpath != "" {
2054 if pkgpath == "" {
2055 pkgpath = fpkgpath
2056 } else if pkgpath != fpkgpath {
2057 panic("reflect.Struct: fields with different PkgPath " + pkgpath + " and " + fpkgpath)
2061 // Update string and hash
2062 name := *f.name
2063 hash = (hash << 1) + ft.hash
2064 if !f.embedded() {
2065 repr = append(repr, (" " + name)...)
2066 } else {
2067 // Embedded field
2068 repr = append(repr, " ?"...)
2069 if f.typ.Kind() == Pointer {
2070 // Embedded ** and *interface{} are illegal
2071 elem := ft.Elem()
2072 if k := elem.Kind(); k == Pointer || k == Interface {
2073 panic("reflect.StructOf: illegal embedded field type " + ft.String())
2075 name = elem.String()
2076 } else {
2077 name = ft.String()
2080 switch f.typ.Kind() {
2081 case Interface:
2082 ift := (*interfaceType)(unsafe.Pointer(ft))
2083 if len(ift.methods) > 0 {
2084 panic("reflect.StructOf: embedded field with methods not implemented")
2086 case Pointer:
2087 ptr := (*ptrType)(unsafe.Pointer(ft))
2088 if unt := ptr.uncommon(); unt != nil {
2089 if len(unt.methods) > 0 {
2090 panic("reflect.StructOf: embedded field with methods not implemented")
2093 if unt := ptr.elem.uncommon(); unt != nil {
2094 if len(unt.methods) > 0 {
2095 panic("reflect.StructOf: embedded field with methods not implemented")
2098 default:
2099 if unt := ft.uncommon(); unt != nil {
2100 if len(unt.methods) > 0 {
2101 panic("reflect.StructOf: embedded field with methods not implemented")
2106 if _, dup := fset[name]; dup && name != "_" {
2107 panic("reflect.StructOf: duplicate field " + name)
2109 fset[name] = struct{}{}
2111 repr = append(repr, (" " + *ft.string)...)
2112 if f.tag != nil {
2113 repr = append(repr, (" " + strconv.Quote(*f.tag))...)
2115 if i < len(fields)-1 {
2116 repr = append(repr, ';')
2119 comparable = comparable && (ft.equal != nil)
2121 offset := align(size, uintptr(ft.fieldAlign))
2122 if ft.fieldAlign > typalign {
2123 typalign = ft.fieldAlign
2125 size = offset + ft.size
2126 f.offsetEmbed |= offset << 1
2128 if ft.size == 0 {
2129 lastzero = size
2132 fs[i] = f
2135 if size > 0 && lastzero == size {
2136 // This is a non-zero sized struct that ends in a
2137 // zero-sized field. We add an extra byte of padding,
2138 // to ensure that taking the address of the final
2139 // zero-sized field can't manufacture a pointer to the
2140 // next object in the heap. See issue 9401.
2141 size++
2144 if len(fs) > 0 {
2145 repr = append(repr, ' ')
2147 repr = append(repr, '}')
2148 hash <<= 2
2149 str := string(repr)
2151 // Round the size up to be a multiple of the alignment.
2152 size = align(size, uintptr(typalign))
2154 // Make the struct type.
2155 var istruct any = struct{}{}
2156 prototype := *(**structType)(unsafe.Pointer(&istruct))
2157 typ := new(structType)
2158 *typ = *prototype
2159 typ.fields = fs
2161 // Look in cache.
2162 if ts, ok := structLookupCache.m.Load(hash); ok {
2163 for _, st := range ts.([]Type) {
2164 t := st.common()
2165 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2166 return t
2171 // Not in cache, lock and retry.
2172 structLookupCache.Lock()
2173 defer structLookupCache.Unlock()
2174 if ts, ok := structLookupCache.m.Load(hash); ok {
2175 for _, st := range ts.([]Type) {
2176 t := st.common()
2177 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2178 return t
2183 addToCache := func(t Type) Type {
2184 var ts []Type
2185 if ti, ok := structLookupCache.m.Load(hash); ok {
2186 ts = ti.([]Type)
2188 structLookupCache.m.Store(hash, append(ts, t))
2189 return t
2192 // Look in known types.
2193 if tt := lookupType(str); tt != nil {
2194 if haveIdenticalUnderlyingType(&typ.rtype, tt, true) {
2195 return addToCache(tt)
2199 typ.string = &str
2200 typ.tflag = 0 // TODO: set tflagRegularMemory
2201 typ.hash = hash
2202 typ.size = size
2203 typ.ptrdata = typeptrdata(typ.common())
2204 typ.align = typalign
2205 typ.fieldAlign = typalign
2207 if hasGCProg {
2208 lastPtrField := 0
2209 for i, ft := range fs {
2210 if ft.typ.pointers() {
2211 lastPtrField = i
2214 prog := []byte{0, 0, 0, 0} // will be length of prog
2215 var off uintptr
2216 for i, ft := range fs {
2217 if i > lastPtrField {
2218 // gcprog should not include anything for any field after
2219 // the last field that contains pointer data
2220 break
2222 if !ft.typ.pointers() {
2223 // Ignore pointerless fields.
2224 continue
2226 // Pad to start of this field with zeros.
2227 if ft.offset() > off {
2228 n := (ft.offset() - off) / goarch.PtrSize
2229 prog = append(prog, 0x01, 0x00) // emit a 0 bit
2230 if n > 1 {
2231 prog = append(prog, 0x81) // repeat previous bit
2232 prog = appendVarint(prog, n-1) // n-1 times
2234 off = ft.offset()
2237 prog = appendGCProg(prog, ft.typ)
2238 off += ft.typ.ptrdata
2240 prog = append(prog, 0)
2241 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2242 typ.kind |= kindGCProg
2243 typ.gcdata = &prog[0]
2244 } else {
2245 typ.kind &^= kindGCProg
2246 bv := new(bitVector)
2247 addTypeBits(bv, 0, typ.common())
2248 if len(bv.data) > 0 {
2249 typ.gcdata = &bv.data[0]
2252 typ.ptrdata = typeptrdata(typ.common())
2254 typ.equal = nil
2255 if comparable {
2256 typ.equal = func(p, q unsafe.Pointer) bool {
2257 for _, ft := range typ.fields {
2258 pi := add(p, ft.offset(), "&x.field safe")
2259 qi := add(q, ft.offset(), "&x.field safe")
2260 if !ft.typ.equal(pi, qi) {
2261 return false
2264 return true
2268 switch {
2269 case len(fs) == 1 && !ifaceIndir(fs[0].typ):
2270 // structs of 1 direct iface type can be direct
2271 typ.kind |= kindDirectIface
2272 default:
2273 typ.kind &^= kindDirectIface
2276 typ.uncommonType = nil
2277 typ.ptrToThis = nil
2278 return addToCache(toType(&typ.rtype).(*rtype))
2281 // runtimeStructField takes a StructField value passed to StructOf and
2282 // returns both the corresponding internal representation, of type
2283 // structField, and the pkgpath value to use for this field.
2284 func runtimeStructField(field StructField) (structField, string) {
2285 if field.Anonymous && field.PkgPath != "" {
2286 panic("reflect.StructOf: field \"" + field.Name + "\" is anonymous but has PkgPath set")
2289 if field.IsExported() {
2290 // Best-effort check for misuse.
2291 // Since this field will be treated as exported, not much harm done if Unicode lowercase slips through.
2292 c := field.Name[0]
2293 if 'a' <= c && c <= 'z' || c == '_' {
2294 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
2298 offsetEmbed := uintptr(0)
2299 if field.Anonymous {
2300 offsetEmbed |= 1
2303 s := field.Name
2304 name := &s
2306 var tag *string
2307 if field.Tag != "" {
2308 st := string(field.Tag)
2309 tag = &st
2312 var pkgPath *string
2313 if field.PkgPath != "" {
2314 s := field.PkgPath
2315 pkgPath = &s
2317 f := structField{
2318 name: name,
2319 pkgPath: pkgPath,
2320 typ: field.Type.common(),
2321 tag: tag,
2322 offsetEmbed: offsetEmbed,
2324 return f, field.PkgPath
2327 // typeptrdata returns the length in bytes of the prefix of t
2328 // containing pointer data. Anything after this offset is scalar data.
2329 // keep in sync with ../cmd/compile/internal/reflectdata/reflect.go
2330 func typeptrdata(t *rtype) uintptr {
2331 switch t.Kind() {
2332 case Struct:
2333 st := (*structType)(unsafe.Pointer(t))
2334 // find the last field that has pointers.
2335 field := -1
2336 for i := range st.fields {
2337 ft := st.fields[i].typ
2338 if ft.pointers() {
2339 field = i
2342 if field == -1 {
2343 return 0
2345 f := st.fields[field]
2346 return f.offset() + f.typ.ptrdata
2348 default:
2349 panic("reflect.typeptrdata: unexpected type, " + t.String())
2353 // See cmd/compile/internal/reflectdata/reflect.go for derivation of constant.
2354 const maxPtrmaskBytes = 2048
2356 // ArrayOf returns the array type with the given length and element type.
2357 // For example, if t represents int, ArrayOf(5, t) represents [5]int.
2359 // If the resulting type would be larger than the available address space,
2360 // ArrayOf panics.
2361 func ArrayOf(length int, elem Type) Type {
2362 if length < 0 {
2363 panic("reflect: negative length passed to ArrayOf")
2366 typ := elem.(*rtype)
2368 // Look in cache.
2369 ckey := cacheKey{Array, typ, nil, uintptr(length)}
2370 if array, ok := lookupCache.Load(ckey); ok {
2371 return array.(Type)
2374 // Look in known types.
2375 s := "[" + strconv.Itoa(length) + "]" + *typ.string
2376 if tt := lookupType(s); tt != nil {
2377 array := (*arrayType)(unsafe.Pointer(toType(tt).(*rtype)))
2378 if array.elem == typ {
2379 ti, _ := lookupCache.LoadOrStore(ckey, tt)
2380 return ti.(Type)
2384 // Make an array type.
2385 var iarray any = [1]unsafe.Pointer{}
2386 prototype := *(**arrayType)(unsafe.Pointer(&iarray))
2387 array := *prototype
2388 array.tflag = typ.tflag & tflagRegularMemory
2389 array.string = &s
2391 // gccgo uses a different hash.
2392 // array.hash = fnv1(typ.hash, '[')
2393 // for n := uint32(length); n > 0; n >>= 8 {
2394 // array.hash = fnv1(array.hash, byte(n))
2395 // }
2396 // array.hash = fnv1(array.hash, ']')
2397 array.hash = typ.hash + 1 + 13
2398 array.elem = typ
2399 array.ptrToThis = nil
2400 if typ.size > 0 {
2401 max := ^uintptr(0) / typ.size
2402 if uintptr(length) > max {
2403 panic("reflect.ArrayOf: array size would exceed virtual address space")
2406 array.size = typ.size * uintptr(length)
2407 if length > 0 && typ.ptrdata != 0 {
2408 array.ptrdata = typ.size*uintptr(length-1) + typ.ptrdata
2410 array.align = typ.align
2411 array.fieldAlign = typ.fieldAlign
2412 array.uncommonType = nil
2413 array.len = uintptr(length)
2414 array.slice = SliceOf(elem).(*rtype)
2416 switch {
2417 case typ.ptrdata == 0 || array.size == 0:
2418 // No pointers.
2419 array.gcdata = nil
2420 array.ptrdata = 0
2422 case length == 1:
2423 // In memory, 1-element array looks just like the element.
2424 array.kind |= typ.kind & kindGCProg
2425 array.gcdata = typ.gcdata
2426 array.ptrdata = typ.ptrdata
2428 case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*goarch.PtrSize:
2429 // Element is small with pointer mask; array is still small.
2430 // Create direct pointer mask by turning each 1 bit in elem
2431 // into length 1 bits in larger mask.
2432 mask := make([]byte, (array.ptrdata/goarch.PtrSize+7)/8)
2433 emitGCMask(mask, 0, typ, array.len)
2434 array.gcdata = &mask[0]
2436 default:
2437 // Create program that emits one element
2438 // and then repeats to make the array.
2439 prog := []byte{0, 0, 0, 0} // will be length of prog
2440 prog = appendGCProg(prog, typ)
2441 // Pad from ptrdata to size.
2442 elemPtrs := typ.ptrdata / goarch.PtrSize
2443 elemWords := typ.size / goarch.PtrSize
2444 if elemPtrs < elemWords {
2445 // Emit literal 0 bit, then repeat as needed.
2446 prog = append(prog, 0x01, 0x00)
2447 if elemPtrs+1 < elemWords {
2448 prog = append(prog, 0x81)
2449 prog = appendVarint(prog, elemWords-elemPtrs-1)
2452 // Repeat length-1 times.
2453 if elemWords < 0x80 {
2454 prog = append(prog, byte(elemWords|0x80))
2455 } else {
2456 prog = append(prog, 0x80)
2457 prog = appendVarint(prog, elemWords)
2459 prog = appendVarint(prog, uintptr(length)-1)
2460 prog = append(prog, 0)
2461 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2462 array.kind |= kindGCProg
2463 array.gcdata = &prog[0]
2464 array.ptrdata = array.size // overestimate but ok; must match program
2467 etyp := typ.common()
2468 esize := typ.size
2470 array.equal = nil
2471 if eequal := etyp.equal; eequal != nil {
2472 array.equal = func(p, q unsafe.Pointer) bool {
2473 for i := 0; i < length; i++ {
2474 pi := arrayAt(p, i, esize, "i < length")
2475 qi := arrayAt(q, i, esize, "i < length")
2476 if !eequal(pi, qi) {
2477 return false
2480 return true
2484 switch {
2485 case length == 1 && !ifaceIndir(typ):
2486 // array of 1 direct iface type can be direct
2487 array.kind |= kindDirectIface
2488 default:
2489 array.kind &^= kindDirectIface
2492 ti, _ := lookupCache.LoadOrStore(ckey, toType(&array.rtype).(*rtype))
2493 return ti.(Type)
2496 func appendVarint(x []byte, v uintptr) []byte {
2497 for ; v >= 0x80; v >>= 7 {
2498 x = append(x, byte(v|0x80))
2500 x = append(x, byte(v))
2501 return x
2504 // Look up a compiler-generated type descriptor.
2505 // Implemented in runtime.
2506 func lookupType(s string) *rtype
2508 // ifaceIndir reports whether t is stored indirectly in an interface value.
2509 func ifaceIndir(t *rtype) bool {
2510 return t.kind&kindDirectIface == 0
2513 // Note: this type must agree with runtime.bitvector.
2514 type bitVector struct {
2515 n uint32 // number of bits
2516 data []byte
2519 // append a bit to the bitmap.
2520 func (bv *bitVector) append(bit uint8) {
2521 if bv.n%8 == 0 {
2522 bv.data = append(bv.data, 0)
2524 bv.data[bv.n/8] |= bit << (bv.n % 8)
2525 bv.n++
2528 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
2529 if t.ptrdata == 0 {
2530 return
2533 switch Kind(t.kind & kindMask) {
2534 case Chan, Func, Map, Pointer, Slice, String, UnsafePointer:
2535 // 1 pointer at start of representation
2536 for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
2537 bv.append(0)
2539 bv.append(1)
2541 case Interface:
2542 // 2 pointers
2543 for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
2544 bv.append(0)
2546 bv.append(1)
2547 bv.append(1)
2549 case Array:
2550 // repeat inner type
2551 tt := (*arrayType)(unsafe.Pointer(t))
2552 for i := 0; i < int(tt.len); i++ {
2553 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
2556 case Struct:
2557 // apply fields
2558 tt := (*structType)(unsafe.Pointer(t))
2559 for i := range tt.fields {
2560 f := &tt.fields[i]
2561 addTypeBits(bv, offset+f.offset(), f.typ)