...
Run Format

Source file src/reflect/type.go

Documentation: reflect

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Package reflect implements run-time reflection, allowing a program to
     6  // manipulate objects with arbitrary types. The typical use is to take a value
     7  // with static type interface{} and extract its dynamic type information by
     8  // calling TypeOf, which returns a Type.
     9  //
    10  // A call to ValueOf returns a Value representing the run-time data.
    11  // Zero takes a Type and returns a Value representing a zero value
    12  // for that type.
    13  //
    14  // See "The Laws of Reflection" for an introduction to reflection in Go:
    15  // https://golang.org/doc/articles/laws_of_reflection.html
    16  package reflect
    17  
    18  import (
    19  	"runtime"
    20  	"strconv"
    21  	"sync"
    22  	"unicode"
    23  	"unicode/utf8"
    24  	"unsafe"
    25  )
    26  
    27  // Type is the representation of a Go type.
    28  //
    29  // Not all methods apply to all kinds of types. Restrictions,
    30  // if any, are noted in the documentation for each method.
    31  // Use the Kind method to find out the kind of type before
    32  // calling kind-specific methods. Calling a method
    33  // inappropriate to the kind of type causes a run-time panic.
    34  //
    35  // Type values are comparable, such as with the == operator,
    36  // so they can be used as map keys.
    37  // Two Type values are equal if they represent identical types.
    38  type Type interface {
    39  	// Methods applicable to all types.
    40  
    41  	// Align returns the alignment in bytes of a value of
    42  	// this type when allocated in memory.
    43  	Align() int
    44  
    45  	// FieldAlign returns the alignment in bytes of a value of
    46  	// this type when used as a field in a struct.
    47  	FieldAlign() int
    48  
    49  	// Method returns the i'th method in the type's method set.
    50  	// It panics if i is not in the range [0, NumMethod()).
    51  	//
    52  	// For a non-interface type T or *T, the returned Method's Type and Func
    53  	// fields describe a function whose first argument is the receiver.
    54  	//
    55  	// For an interface type, the returned Method's Type field gives the
    56  	// method signature, without a receiver, and the Func field is nil.
    57  	Method(int) Method
    58  
    59  	// MethodByName returns the method with that name in the type's
    60  	// method set and a boolean indicating if the method was found.
    61  	//
    62  	// For a non-interface type T or *T, the returned Method's Type and Func
    63  	// fields describe a function whose first argument is the receiver.
    64  	//
    65  	// For an interface type, the returned Method's Type field gives the
    66  	// method signature, without a receiver, and the Func field is nil.
    67  	MethodByName(string) (Method, bool)
    68  
    69  	// NumMethod returns the number of exported methods in the type's method set.
    70  	NumMethod() int
    71  
    72  	// Name returns the type's name within its package.
    73  	// It returns an empty string for unnamed types.
    74  	Name() string
    75  
    76  	// PkgPath returns a named type's package path, that is, the import path
    77  	// that uniquely identifies the package, such as "encoding/base64".
    78  	// If the type was predeclared (string, error) or unnamed (*T, struct{}, []int),
    79  	// the package path will be the empty string.
    80  	PkgPath() string
    81  
    82  	// Size returns the number of bytes needed to store
    83  	// a value of the given type; it is analogous to unsafe.Sizeof.
    84  	Size() uintptr
    85  
    86  	// String returns a string representation of the type.
    87  	// The string representation may use shortened package names
    88  	// (e.g., base64 instead of "encoding/base64") and is not
    89  	// guaranteed to be unique among types. To test for type identity,
    90  	// compare the Types directly.
    91  	String() string
    92  
    93  	// Kind returns the specific kind of this type.
    94  	Kind() Kind
    95  
    96  	// Implements reports whether the type implements the interface type u.
    97  	Implements(u Type) bool
    98  
    99  	// AssignableTo reports whether a value of the type is assignable to type u.
   100  	AssignableTo(u Type) bool
   101  
   102  	// ConvertibleTo reports whether a value of the type is convertible to type u.
   103  	ConvertibleTo(u Type) bool
   104  
   105  	// Comparable reports whether values of this type are comparable.
   106  	Comparable() bool
   107  
   108  	// Methods applicable only to some types, depending on Kind.
   109  	// The methods allowed for each kind are:
   110  	//
   111  	//	Int*, Uint*, Float*, Complex*: Bits
   112  	//	Array: Elem, Len
   113  	//	Chan: ChanDir, Elem
   114  	//	Func: In, NumIn, Out, NumOut, IsVariadic.
   115  	//	Map: Key, Elem
   116  	//	Ptr: Elem
   117  	//	Slice: Elem
   118  	//	Struct: Field, FieldByIndex, FieldByName, FieldByNameFunc, NumField
   119  
   120  	// Bits returns the size of the type in bits.
   121  	// It panics if the type's Kind is not one of the
   122  	// sized or unsized Int, Uint, Float, or Complex kinds.
   123  	Bits() int
   124  
   125  	// ChanDir returns a channel type's direction.
   126  	// It panics if the type's Kind is not Chan.
   127  	ChanDir() ChanDir
   128  
   129  	// IsVariadic reports whether a function type's final input parameter
   130  	// is a "..." parameter. If so, t.In(t.NumIn() - 1) returns the parameter's
   131  	// implicit actual type []T.
   132  	//
   133  	// For concreteness, if t represents func(x int, y ... float64), then
   134  	//
   135  	//	t.NumIn() == 2
   136  	//	t.In(0) is the reflect.Type for "int"
   137  	//	t.In(1) is the reflect.Type for "[]float64"
   138  	//	t.IsVariadic() == true
   139  	//
   140  	// IsVariadic panics if the type's Kind is not Func.
   141  	IsVariadic() bool
   142  
   143  	// Elem returns a type's element type.
   144  	// It panics if the type's Kind is not Array, Chan, Map, Ptr, or Slice.
   145  	Elem() Type
   146  
   147  	// Field returns a struct type's i'th field.
   148  	// It panics if the type's Kind is not Struct.
   149  	// It panics if i is not in the range [0, NumField()).
   150  	Field(i int) StructField
   151  
   152  	// FieldByIndex returns the nested field corresponding
   153  	// to the index sequence. It is equivalent to calling Field
   154  	// successively for each index i.
   155  	// It panics if the type's Kind is not Struct.
   156  	FieldByIndex(index []int) StructField
   157  
   158  	// FieldByName returns the struct field with the given name
   159  	// and a boolean indicating if the field was found.
   160  	FieldByName(name string) (StructField, bool)
   161  
   162  	// FieldByNameFunc returns the struct field with a name
   163  	// that satisfies the match function and a boolean indicating if
   164  	// the field was found.
   165  	//
   166  	// FieldByNameFunc considers the fields in the struct itself
   167  	// and then the fields in any anonymous structs, in breadth first order,
   168  	// stopping at the shallowest nesting depth containing one or more
   169  	// fields satisfying the match function. If multiple fields at that depth
   170  	// satisfy the match function, they cancel each other
   171  	// and FieldByNameFunc returns no match.
   172  	// This behavior mirrors Go's handling of name lookup in
   173  	// structs containing anonymous fields.
   174  	FieldByNameFunc(match func(string) bool) (StructField, bool)
   175  
   176  	// In returns the type of a function type's i'th input parameter.
   177  	// It panics if the type's Kind is not Func.
   178  	// It panics if i is not in the range [0, NumIn()).
   179  	In(i int) Type
   180  
   181  	// Key returns a map type's key type.
   182  	// It panics if the type's Kind is not Map.
   183  	Key() Type
   184  
   185  	// Len returns an array type's length.
   186  	// It panics if the type's Kind is not Array.
   187  	Len() int
   188  
   189  	// NumField returns a struct type's field count.
   190  	// It panics if the type's Kind is not Struct.
   191  	NumField() int
   192  
   193  	// NumIn returns a function type's input parameter count.
   194  	// It panics if the type's Kind is not Func.
   195  	NumIn() int
   196  
   197  	// NumOut returns a function type's output parameter count.
   198  	// It panics if the type's Kind is not Func.
   199  	NumOut() int
   200  
   201  	// Out returns the type of a function type's i'th output parameter.
   202  	// It panics if the type's Kind is not Func.
   203  	// It panics if i is not in the range [0, NumOut()).
   204  	Out(i int) Type
   205  
   206  	common() *rtype
   207  	uncommon() *uncommonType
   208  }
   209  
   210  // BUG(rsc): FieldByName and related functions consider struct field names to be equal
   211  // if the names are equal, even if they are unexported names originating
   212  // in different packages. The practical effect of this is that the result of
   213  // t.FieldByName("x") is not well defined if the struct type t contains
   214  // multiple fields named x (embedded from different packages).
   215  // FieldByName may return one of the fields named x or may report that there are none.
   216  // See https://golang.org/issue/4876 for more details.
   217  
   218  /*
   219   * These data structures are known to the compiler (../../cmd/internal/gc/reflect.go).
   220   * A few are known to ../runtime/type.go to convey to debuggers.
   221   * They are also known to ../runtime/type.go.
   222   */
   223  
   224  // A Kind represents the specific kind of type that a Type represents.
   225  // The zero Kind is not a valid kind.
   226  type Kind uint
   227  
   228  const (
   229  	Invalid Kind = iota
   230  	Bool
   231  	Int
   232  	Int8
   233  	Int16
   234  	Int32
   235  	Int64
   236  	Uint
   237  	Uint8
   238  	Uint16
   239  	Uint32
   240  	Uint64
   241  	Uintptr
   242  	Float32
   243  	Float64
   244  	Complex64
   245  	Complex128
   246  	Array
   247  	Chan
   248  	Func
   249  	Interface
   250  	Map
   251  	Ptr
   252  	Slice
   253  	String
   254  	Struct
   255  	UnsafePointer
   256  )
   257  
   258  // tflag is used by an rtype to signal what extra type information is
   259  // available in the memory directly following the rtype value.
   260  //
   261  // tflag values must be kept in sync with copies in:
   262  //	cmd/compile/internal/gc/reflect.go
   263  //	cmd/link/internal/ld/decodesym.go
   264  //	runtime/type.go
   265  type tflag uint8
   266  
   267  const (
   268  	// tflagUncommon means that there is a pointer, *uncommonType,
   269  	// just beyond the outer type structure.
   270  	//
   271  	// For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0,
   272  	// then t has uncommonType data and it can be accessed as:
   273  	//
   274  	//	type tUncommon struct {
   275  	//		structType
   276  	//		u uncommonType
   277  	//	}
   278  	//	u := &(*tUncommon)(unsafe.Pointer(t)).u
   279  	tflagUncommon tflag = 1 << 0
   280  
   281  	// tflagExtraStar means the name in the str field has an
   282  	// extraneous '*' prefix. This is because for most types T in
   283  	// a program, the type *T also exists and reusing the str data
   284  	// saves binary size.
   285  	tflagExtraStar tflag = 1 << 1
   286  
   287  	// tflagNamed means the type has a name.
   288  	tflagNamed tflag = 1 << 2
   289  )
   290  
   291  // rtype is the common implementation of most values.
   292  // It is embedded in other, public struct types, but always
   293  // with a unique tag like `reflect:"array"` or `reflect:"ptr"`
   294  // so that code cannot convert from, say, *arrayType to *ptrType.
   295  //
   296  // rtype must be kept in sync with ../runtime/type.go:/^type._type.
   297  type rtype struct {
   298  	size       uintptr
   299  	ptrdata    uintptr  // number of bytes in the type that can contain pointers
   300  	hash       uint32   // hash of type; avoids computation in hash tables
   301  	tflag      tflag    // extra type information flags
   302  	align      uint8    // alignment of variable with this type
   303  	fieldAlign uint8    // alignment of struct field with this type
   304  	kind       uint8    // enumeration for C
   305  	alg        *typeAlg // algorithm table
   306  	gcdata     *byte    // garbage collection data
   307  	str        nameOff  // string form
   308  	ptrToThis  typeOff  // type for pointer to this type, may be zero
   309  }
   310  
   311  // a copy of runtime.typeAlg
   312  type typeAlg struct {
   313  	// function for hashing objects of this type
   314  	// (ptr to object, seed) -> hash
   315  	hash func(unsafe.Pointer, uintptr) uintptr
   316  	// function for comparing objects of this type
   317  	// (ptr to object A, ptr to object B) -> ==?
   318  	equal func(unsafe.Pointer, unsafe.Pointer) bool
   319  }
   320  
   321  // Method on non-interface type
   322  type method struct {
   323  	name nameOff // name of method
   324  	mtyp typeOff // method type (without receiver)
   325  	ifn  textOff // fn used in interface call (one-word receiver)
   326  	tfn  textOff // fn used for normal method call
   327  }
   328  
   329  // uncommonType is present only for types with names or methods
   330  // (if T is a named type, the uncommonTypes for T and *T have methods).
   331  // Using a pointer to this struct reduces the overall size required
   332  // to describe an unnamed type with no methods.
   333  type uncommonType struct {
   334  	pkgPath nameOff // import path; empty for built-in types like int, string
   335  	mcount  uint16  // number of methods
   336  	_       uint16  // unused
   337  	moff    uint32  // offset from this uncommontype to [mcount]method
   338  	_       uint32  // unused
   339  }
   340  
   341  // ChanDir represents a channel type's direction.
   342  type ChanDir int
   343  
   344  const (
   345  	RecvDir ChanDir             = 1 << iota // <-chan
   346  	SendDir                                 // chan<-
   347  	BothDir = RecvDir | SendDir             // chan
   348  )
   349  
   350  // arrayType represents a fixed array type.
   351  type arrayType struct {
   352  	rtype `reflect:"array"`
   353  	elem  *rtype // array element type
   354  	slice *rtype // slice type
   355  	len   uintptr
   356  }
   357  
   358  // chanType represents a channel type.
   359  type chanType struct {
   360  	rtype `reflect:"chan"`
   361  	elem  *rtype  // channel element type
   362  	dir   uintptr // channel direction (ChanDir)
   363  }
   364  
   365  // funcType represents a function type.
   366  //
   367  // A *rtype for each in and out parameter is stored in an array that
   368  // directly follows the funcType (and possibly its uncommonType). So
   369  // a function type with one method, one input, and one output is:
   370  //
   371  //	struct {
   372  //		funcType
   373  //		uncommonType
   374  //		[2]*rtype    // [0] is in, [1] is out
   375  //	}
   376  type funcType struct {
   377  	rtype    `reflect:"func"`
   378  	inCount  uint16
   379  	outCount uint16 // top bit is set if last input parameter is ...
   380  }
   381  
   382  // imethod represents a method on an interface type
   383  type imethod struct {
   384  	name nameOff // name of method
   385  	typ  typeOff // .(*FuncType) underneath
   386  }
   387  
   388  // interfaceType represents an interface type.
   389  type interfaceType struct {
   390  	rtype   `reflect:"interface"`
   391  	pkgPath name      // import path
   392  	methods []imethod // sorted by hash
   393  }
   394  
   395  // mapType represents a map type.
   396  type mapType struct {
   397  	rtype         `reflect:"map"`
   398  	key           *rtype // map key type
   399  	elem          *rtype // map element (value) type
   400  	bucket        *rtype // internal bucket structure
   401  	hmap          *rtype // internal map header
   402  	keysize       uint8  // size of key slot
   403  	indirectkey   uint8  // store ptr to key instead of key itself
   404  	valuesize     uint8  // size of value slot
   405  	indirectvalue uint8  // store ptr to value instead of value itself
   406  	bucketsize    uint16 // size of bucket
   407  	reflexivekey  bool   // true if k==k for all keys
   408  	needkeyupdate bool   // true if we need to update key on an overwrite
   409  }
   410  
   411  // ptrType represents a pointer type.
   412  type ptrType struct {
   413  	rtype `reflect:"ptr"`
   414  	elem  *rtype // pointer element (pointed at) type
   415  }
   416  
   417  // sliceType represents a slice type.
   418  type sliceType struct {
   419  	rtype `reflect:"slice"`
   420  	elem  *rtype // slice element type
   421  }
   422  
   423  // Struct field
   424  type structField struct {
   425  	name       name    // name is always non-empty
   426  	typ        *rtype  // type of field
   427  	offsetAnon uintptr // byte offset of field<<1 | isAnonymous
   428  }
   429  
   430  func (f *structField) offset() uintptr {
   431  	return f.offsetAnon >> 1
   432  }
   433  
   434  func (f *structField) anon() bool {
   435  	return f.offsetAnon&1 != 0
   436  }
   437  
   438  // structType represents a struct type.
   439  type structType struct {
   440  	rtype   `reflect:"struct"`
   441  	pkgPath name
   442  	fields  []structField // sorted by offset
   443  }
   444  
   445  // name is an encoded type name with optional extra data.
   446  //
   447  // The first byte is a bit field containing:
   448  //
   449  //	1<<0 the name is exported
   450  //	1<<1 tag data follows the name
   451  //	1<<2 pkgPath nameOff follows the name and tag
   452  //
   453  // The next two bytes are the data length:
   454  //
   455  //	 l := uint16(data[1])<<8 | uint16(data[2])
   456  //
   457  // Bytes [3:3+l] are the string data.
   458  //
   459  // If tag data follows then bytes 3+l and 3+l+1 are the tag length,
   460  // with the data following.
   461  //
   462  // If the import path follows, then 4 bytes at the end of
   463  // the data form a nameOff. The import path is only set for concrete
   464  // methods that are defined in a different package than their type.
   465  //
   466  // If a name starts with "*", then the exported bit represents
   467  // whether the pointed to type is exported.
   468  type name struct {
   469  	bytes *byte
   470  }
   471  
   472  func (n name) data(off int, whySafe string) *byte {
   473  	return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe))
   474  }
   475  
   476  func (n name) isExported() bool {
   477  	return (*n.bytes)&(1<<0) != 0
   478  }
   479  
   480  func (n name) nameLen() int {
   481  	return int(uint16(*n.data(1, "name len field"))<<8 | uint16(*n.data(2, "name len field")))
   482  }
   483  
   484  func (n name) tagLen() int {
   485  	if *n.data(0, "name flag field")&(1<<1) == 0 {
   486  		return 0
   487  	}
   488  	off := 3 + n.nameLen()
   489  	return int(uint16(*n.data(off, "name taglen field"))<<8 | uint16(*n.data(off+1, "name taglen field")))
   490  }
   491  
   492  func (n name) name() (s string) {
   493  	if n.bytes == nil {
   494  		return
   495  	}
   496  	b := (*[4]byte)(unsafe.Pointer(n.bytes))
   497  
   498  	hdr := (*stringHeader)(unsafe.Pointer(&s))
   499  	hdr.Data = unsafe.Pointer(&b[3])
   500  	hdr.Len = int(b[1])<<8 | int(b[2])
   501  	return s
   502  }
   503  
   504  func (n name) tag() (s string) {
   505  	tl := n.tagLen()
   506  	if tl == 0 {
   507  		return ""
   508  	}
   509  	nl := n.nameLen()
   510  	hdr := (*stringHeader)(unsafe.Pointer(&s))
   511  	hdr.Data = unsafe.Pointer(n.data(3+nl+2, "non-empty string"))
   512  	hdr.Len = tl
   513  	return s
   514  }
   515  
   516  func (n name) pkgPath() string {
   517  	if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 {
   518  		return ""
   519  	}
   520  	off := 3 + n.nameLen()
   521  	if tl := n.tagLen(); tl > 0 {
   522  		off += 2 + tl
   523  	}
   524  	var nameOff int32
   525  	// Note that this field may not be aligned in memory,
   526  	// so we cannot use a direct int32 assignment here.
   527  	copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:])
   528  	pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))}
   529  	return pkgPathName.name()
   530  }
   531  
   532  // round n up to a multiple of a.  a must be a power of 2.
   533  func round(n, a uintptr) uintptr {
   534  	return (n + a - 1) &^ (a - 1)
   535  }
   536  
   537  func newName(n, tag string, exported bool) name {
   538  	if len(n) > 1<<16-1 {
   539  		panic("reflect.nameFrom: name too long: " + n)
   540  	}
   541  	if len(tag) > 1<<16-1 {
   542  		panic("reflect.nameFrom: tag too long: " + tag)
   543  	}
   544  
   545  	var bits byte
   546  	l := 1 + 2 + len(n)
   547  	if exported {
   548  		bits |= 1 << 0
   549  	}
   550  	if len(tag) > 0 {
   551  		l += 2 + len(tag)
   552  		bits |= 1 << 1
   553  	}
   554  
   555  	b := make([]byte, l)
   556  	b[0] = bits
   557  	b[1] = uint8(len(n) >> 8)
   558  	b[2] = uint8(len(n))
   559  	copy(b[3:], n)
   560  	if len(tag) > 0 {
   561  		tb := b[3+len(n):]
   562  		tb[0] = uint8(len(tag) >> 8)
   563  		tb[1] = uint8(len(tag))
   564  		copy(tb[2:], tag)
   565  	}
   566  
   567  	return name{bytes: &b[0]}
   568  }
   569  
   570  /*
   571   * The compiler knows the exact layout of all the data structures above.
   572   * The compiler does not know about the data structures and methods below.
   573   */
   574  
   575  // Method represents a single method.
   576  type Method struct {
   577  	// Name is the method name.
   578  	// PkgPath is the package path that qualifies a lower case (unexported)
   579  	// method name. It is empty for upper case (exported) method names.
   580  	// The combination of PkgPath and Name uniquely identifies a method
   581  	// in a method set.
   582  	// See https://golang.org/ref/spec#Uniqueness_of_identifiers
   583  	Name    string
   584  	PkgPath string
   585  
   586  	Type  Type  // method type
   587  	Func  Value // func with receiver as first argument
   588  	Index int   // index for Type.Method
   589  }
   590  
   591  const (
   592  	kindDirectIface = 1 << 5
   593  	kindGCProg      = 1 << 6 // Type.gc points to GC program
   594  	kindNoPointers  = 1 << 7
   595  	kindMask        = (1 << 5) - 1
   596  )
   597  
   598  func (k Kind) String() string {
   599  	if int(k) < len(kindNames) {
   600  		return kindNames[k]
   601  	}
   602  	return "kind" + strconv.Itoa(int(k))
   603  }
   604  
   605  var kindNames = []string{
   606  	Invalid:       "invalid",
   607  	Bool:          "bool",
   608  	Int:           "int",
   609  	Int8:          "int8",
   610  	Int16:         "int16",
   611  	Int32:         "int32",
   612  	Int64:         "int64",
   613  	Uint:          "uint",
   614  	Uint8:         "uint8",
   615  	Uint16:        "uint16",
   616  	Uint32:        "uint32",
   617  	Uint64:        "uint64",
   618  	Uintptr:       "uintptr",
   619  	Float32:       "float32",
   620  	Float64:       "float64",
   621  	Complex64:     "complex64",
   622  	Complex128:    "complex128",
   623  	Array:         "array",
   624  	Chan:          "chan",
   625  	Func:          "func",
   626  	Interface:     "interface",
   627  	Map:           "map",
   628  	Ptr:           "ptr",
   629  	Slice:         "slice",
   630  	String:        "string",
   631  	Struct:        "struct",
   632  	UnsafePointer: "unsafe.Pointer",
   633  }
   634  
   635  func (t *uncommonType) methods() []method {
   636  	if t.mcount == 0 {
   637  		return nil
   638  	}
   639  	return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount]
   640  }
   641  
   642  // resolveNameOff resolves a name offset from a base pointer.
   643  // The (*rtype).nameOff method is a convenience wrapper for this function.
   644  // Implemented in the runtime package.
   645  func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
   646  
   647  // resolveTypeOff resolves an *rtype offset from a base type.
   648  // The (*rtype).typeOff method is a convenience wrapper for this function.
   649  // Implemented in the runtime package.
   650  func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
   651  
   652  // resolveTextOff resolves an function pointer offset from a base type.
   653  // The (*rtype).textOff method is a convenience wrapper for this function.
   654  // Implemented in the runtime package.
   655  func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
   656  
   657  // addReflectOff adds a pointer to the reflection lookup map in the runtime.
   658  // It returns a new ID that can be used as a typeOff or textOff, and will
   659  // be resolved correctly. Implemented in the runtime package.
   660  func addReflectOff(ptr unsafe.Pointer) int32
   661  
   662  // resolveReflectType adds a name to the reflection lookup map in the runtime.
   663  // It returns a new nameOff that can be used to refer to the pointer.
   664  func resolveReflectName(n name) nameOff {
   665  	return nameOff(addReflectOff(unsafe.Pointer(n.bytes)))
   666  }
   667  
   668  // resolveReflectType adds a *rtype to the reflection lookup map in the runtime.
   669  // It returns a new typeOff that can be used to refer to the pointer.
   670  func resolveReflectType(t *rtype) typeOff {
   671  	return typeOff(addReflectOff(unsafe.Pointer(t)))
   672  }
   673  
   674  // resolveReflectText adds a function pointer to the reflection lookup map in
   675  // the runtime. It returns a new textOff that can be used to refer to the
   676  // pointer.
   677  func resolveReflectText(ptr unsafe.Pointer) textOff {
   678  	return textOff(addReflectOff(ptr))
   679  }
   680  
   681  type nameOff int32 // offset to a name
   682  type typeOff int32 // offset to an *rtype
   683  type textOff int32 // offset from top of text section
   684  
   685  func (t *rtype) nameOff(off nameOff) name {
   686  	return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))}
   687  }
   688  
   689  func (t *rtype) typeOff(off typeOff) *rtype {
   690  	return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
   691  }
   692  
   693  func (t *rtype) textOff(off textOff) unsafe.Pointer {
   694  	return resolveTextOff(unsafe.Pointer(t), int32(off))
   695  }
   696  
   697  func (t *rtype) uncommon() *uncommonType {
   698  	if t.tflag&tflagUncommon == 0 {
   699  		return nil
   700  	}
   701  	switch t.Kind() {
   702  	case Struct:
   703  		return &(*structTypeUncommon)(unsafe.Pointer(t)).u
   704  	case Ptr:
   705  		type u struct {
   706  			ptrType
   707  			u uncommonType
   708  		}
   709  		return &(*u)(unsafe.Pointer(t)).u
   710  	case Func:
   711  		type u struct {
   712  			funcType
   713  			u uncommonType
   714  		}
   715  		return &(*u)(unsafe.Pointer(t)).u
   716  	case Slice:
   717  		type u struct {
   718  			sliceType
   719  			u uncommonType
   720  		}
   721  		return &(*u)(unsafe.Pointer(t)).u
   722  	case Array:
   723  		type u struct {
   724  			arrayType
   725  			u uncommonType
   726  		}
   727  		return &(*u)(unsafe.Pointer(t)).u
   728  	case Chan:
   729  		type u struct {
   730  			chanType
   731  			u uncommonType
   732  		}
   733  		return &(*u)(unsafe.Pointer(t)).u
   734  	case Map:
   735  		type u struct {
   736  			mapType
   737  			u uncommonType
   738  		}
   739  		return &(*u)(unsafe.Pointer(t)).u
   740  	case Interface:
   741  		type u struct {
   742  			interfaceType
   743  			u uncommonType
   744  		}
   745  		return &(*u)(unsafe.Pointer(t)).u
   746  	default:
   747  		type u struct {
   748  			rtype
   749  			u uncommonType
   750  		}
   751  		return &(*u)(unsafe.Pointer(t)).u
   752  	}
   753  }
   754  
   755  func (t *rtype) String() string {
   756  	s := t.nameOff(t.str).name()
   757  	if t.tflag&tflagExtraStar != 0 {
   758  		return s[1:]
   759  	}
   760  	return s
   761  }
   762  
   763  func (t *rtype) Size() uintptr { return t.size }
   764  
   765  func (t *rtype) Bits() int {
   766  	if t == nil {
   767  		panic("reflect: Bits of nil Type")
   768  	}
   769  	k := t.Kind()
   770  	if k < Int || k > Complex128 {
   771  		panic("reflect: Bits of non-arithmetic Type " + t.String())
   772  	}
   773  	return int(t.size) * 8
   774  }
   775  
   776  func (t *rtype) Align() int { return int(t.align) }
   777  
   778  func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
   779  
   780  func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
   781  
   782  func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 }
   783  
   784  func (t *rtype) common() *rtype { return t }
   785  
   786  var methodCache sync.Map // map[*rtype][]method
   787  
   788  func (t *rtype) exportedMethods() []method {
   789  	methodsi, found := methodCache.Load(t)
   790  	if found {
   791  		return methodsi.([]method)
   792  	}
   793  
   794  	ut := t.uncommon()
   795  	if ut == nil {
   796  		return nil
   797  	}
   798  	allm := ut.methods()
   799  	allExported := true
   800  	for _, m := range allm {
   801  		name := t.nameOff(m.name)
   802  		if !name.isExported() {
   803  			allExported = false
   804  			break
   805  		}
   806  	}
   807  	var methods []method
   808  	if allExported {
   809  		methods = allm
   810  	} else {
   811  		methods = make([]method, 0, len(allm))
   812  		for _, m := range allm {
   813  			name := t.nameOff(m.name)
   814  			if name.isExported() {
   815  				methods = append(methods, m)
   816  			}
   817  		}
   818  		methods = methods[:len(methods):len(methods)]
   819  	}
   820  
   821  	methodsi, _ = methodCache.LoadOrStore(t, methods)
   822  	return methodsi.([]method)
   823  }
   824  
   825  func (t *rtype) NumMethod() int {
   826  	if t.Kind() == Interface {
   827  		tt := (*interfaceType)(unsafe.Pointer(t))
   828  		return tt.NumMethod()
   829  	}
   830  	if t.tflag&tflagUncommon == 0 {
   831  		return 0 // avoid methodCache synchronization
   832  	}
   833  	return len(t.exportedMethods())
   834  }
   835  
   836  func (t *rtype) Method(i int) (m Method) {
   837  	if t.Kind() == Interface {
   838  		tt := (*interfaceType)(unsafe.Pointer(t))
   839  		return tt.Method(i)
   840  	}
   841  	methods := t.exportedMethods()
   842  	if i < 0 || i >= len(methods) {
   843  		panic("reflect: Method index out of range")
   844  	}
   845  	p := methods[i]
   846  	pname := t.nameOff(p.name)
   847  	m.Name = pname.name()
   848  	fl := flag(Func)
   849  	mtyp := t.typeOff(p.mtyp)
   850  	ft := (*funcType)(unsafe.Pointer(mtyp))
   851  	in := make([]Type, 0, 1+len(ft.in()))
   852  	in = append(in, t)
   853  	for _, arg := range ft.in() {
   854  		in = append(in, arg)
   855  	}
   856  	out := make([]Type, 0, len(ft.out()))
   857  	for _, ret := range ft.out() {
   858  		out = append(out, ret)
   859  	}
   860  	mt := FuncOf(in, out, ft.IsVariadic())
   861  	m.Type = mt
   862  	tfn := t.textOff(p.tfn)
   863  	fn := unsafe.Pointer(&tfn)
   864  	m.Func = Value{mt.(*rtype), fn, fl}
   865  
   866  	m.Index = i
   867  	return m
   868  }
   869  
   870  func (t *rtype) MethodByName(name string) (m Method, ok bool) {
   871  	if t.Kind() == Interface {
   872  		tt := (*interfaceType)(unsafe.Pointer(t))
   873  		return tt.MethodByName(name)
   874  	}
   875  	ut := t.uncommon()
   876  	if ut == nil {
   877  		return Method{}, false
   878  	}
   879  	utmethods := ut.methods()
   880  	var eidx int
   881  	for i := 0; i < int(ut.mcount); i++ {
   882  		p := utmethods[i]
   883  		pname := t.nameOff(p.name)
   884  		if pname.isExported() {
   885  			if pname.name() == name {
   886  				return t.Method(eidx), true
   887  			}
   888  			eidx++
   889  		}
   890  	}
   891  	return Method{}, false
   892  }
   893  
   894  func (t *rtype) PkgPath() string {
   895  	if t.tflag&tflagNamed == 0 {
   896  		return ""
   897  	}
   898  	ut := t.uncommon()
   899  	if ut == nil {
   900  		return ""
   901  	}
   902  	return t.nameOff(ut.pkgPath).name()
   903  }
   904  
   905  func hasPrefix(s, prefix string) bool {
   906  	return len(s) >= len(prefix) && s[:len(prefix)] == prefix
   907  }
   908  
   909  func (t *rtype) Name() string {
   910  	if t.tflag&tflagNamed == 0 {
   911  		return ""
   912  	}
   913  	s := t.String()
   914  	i := len(s) - 1
   915  	for i >= 0 {
   916  		if s[i] == '.' {
   917  			break
   918  		}
   919  		i--
   920  	}
   921  	return s[i+1:]
   922  }
   923  
   924  func (t *rtype) ChanDir() ChanDir {
   925  	if t.Kind() != Chan {
   926  		panic("reflect: ChanDir of non-chan type")
   927  	}
   928  	tt := (*chanType)(unsafe.Pointer(t))
   929  	return ChanDir(tt.dir)
   930  }
   931  
   932  func (t *rtype) IsVariadic() bool {
   933  	if t.Kind() != Func {
   934  		panic("reflect: IsVariadic of non-func type")
   935  	}
   936  	tt := (*funcType)(unsafe.Pointer(t))
   937  	return tt.outCount&(1<<15) != 0
   938  }
   939  
   940  func (t *rtype) Elem() Type {
   941  	switch t.Kind() {
   942  	case Array:
   943  		tt := (*arrayType)(unsafe.Pointer(t))
   944  		return toType(tt.elem)
   945  	case Chan:
   946  		tt := (*chanType)(unsafe.Pointer(t))
   947  		return toType(tt.elem)
   948  	case Map:
   949  		tt := (*mapType)(unsafe.Pointer(t))
   950  		return toType(tt.elem)
   951  	case Ptr:
   952  		tt := (*ptrType)(unsafe.Pointer(t))
   953  		return toType(tt.elem)
   954  	case Slice:
   955  		tt := (*sliceType)(unsafe.Pointer(t))
   956  		return toType(tt.elem)
   957  	}
   958  	panic("reflect: Elem of invalid type")
   959  }
   960  
   961  func (t *rtype) Field(i int) StructField {
   962  	if t.Kind() != Struct {
   963  		panic("reflect: Field of non-struct type")
   964  	}
   965  	tt := (*structType)(unsafe.Pointer(t))
   966  	return tt.Field(i)
   967  }
   968  
   969  func (t *rtype) FieldByIndex(index []int) StructField {
   970  	if t.Kind() != Struct {
   971  		panic("reflect: FieldByIndex of non-struct type")
   972  	}
   973  	tt := (*structType)(unsafe.Pointer(t))
   974  	return tt.FieldByIndex(index)
   975  }
   976  
   977  func (t *rtype) FieldByName(name string) (StructField, bool) {
   978  	if t.Kind() != Struct {
   979  		panic("reflect: FieldByName of non-struct type")
   980  	}
   981  	tt := (*structType)(unsafe.Pointer(t))
   982  	return tt.FieldByName(name)
   983  }
   984  
   985  func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
   986  	if t.Kind() != Struct {
   987  		panic("reflect: FieldByNameFunc of non-struct type")
   988  	}
   989  	tt := (*structType)(unsafe.Pointer(t))
   990  	return tt.FieldByNameFunc(match)
   991  }
   992  
   993  func (t *rtype) In(i int) Type {
   994  	if t.Kind() != Func {
   995  		panic("reflect: In of non-func type")
   996  	}
   997  	tt := (*funcType)(unsafe.Pointer(t))
   998  	return toType(tt.in()[i])
   999  }
  1000  
  1001  func (t *rtype) Key() Type {
  1002  	if t.Kind() != Map {
  1003  		panic("reflect: Key of non-map type")
  1004  	}
  1005  	tt := (*mapType)(unsafe.Pointer(t))
  1006  	return toType(tt.key)
  1007  }
  1008  
  1009  func (t *rtype) Len() int {
  1010  	if t.Kind() != Array {
  1011  		panic("reflect: Len of non-array type")
  1012  	}
  1013  	tt := (*arrayType)(unsafe.Pointer(t))
  1014  	return int(tt.len)
  1015  }
  1016  
  1017  func (t *rtype) NumField() int {
  1018  	if t.Kind() != Struct {
  1019  		panic("reflect: NumField of non-struct type")
  1020  	}
  1021  	tt := (*structType)(unsafe.Pointer(t))
  1022  	return len(tt.fields)
  1023  }
  1024  
  1025  func (t *rtype) NumIn() int {
  1026  	if t.Kind() != Func {
  1027  		panic("reflect: NumIn of non-func type")
  1028  	}
  1029  	tt := (*funcType)(unsafe.Pointer(t))
  1030  	return int(tt.inCount)
  1031  }
  1032  
  1033  func (t *rtype) NumOut() int {
  1034  	if t.Kind() != Func {
  1035  		panic("reflect: NumOut of non-func type")
  1036  	}
  1037  	tt := (*funcType)(unsafe.Pointer(t))
  1038  	return len(tt.out())
  1039  }
  1040  
  1041  func (t *rtype) Out(i int) Type {
  1042  	if t.Kind() != Func {
  1043  		panic("reflect: Out of non-func type")
  1044  	}
  1045  	tt := (*funcType)(unsafe.Pointer(t))
  1046  	return toType(tt.out()[i])
  1047  }
  1048  
  1049  func (t *funcType) in() []*rtype {
  1050  	uadd := unsafe.Sizeof(*t)
  1051  	if t.tflag&tflagUncommon != 0 {
  1052  		uadd += unsafe.Sizeof(uncommonType{})
  1053  	}
  1054  	if t.inCount == 0 {
  1055  		return nil
  1056  	}
  1057  	return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount]
  1058  }
  1059  
  1060  func (t *funcType) out() []*rtype {
  1061  	uadd := unsafe.Sizeof(*t)
  1062  	if t.tflag&tflagUncommon != 0 {
  1063  		uadd += unsafe.Sizeof(uncommonType{})
  1064  	}
  1065  	outCount := t.outCount & (1<<15 - 1)
  1066  	if outCount == 0 {
  1067  		return nil
  1068  	}
  1069  	return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount]
  1070  }
  1071  
  1072  // add returns p+x.
  1073  //
  1074  // The whySafe string is ignored, so that the function still inlines
  1075  // as efficiently as p+x, but all call sites should use the string to
  1076  // record why the addition is safe, which is to say why the addition
  1077  // does not cause x to advance to the very end of p's allocation
  1078  // and therefore point incorrectly at the next block in memory.
  1079  func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
  1080  	return unsafe.Pointer(uintptr(p) + x)
  1081  }
  1082  
  1083  func (d ChanDir) String() string {
  1084  	switch d {
  1085  	case SendDir:
  1086  		return "chan<-"
  1087  	case RecvDir:
  1088  		return "<-chan"
  1089  	case BothDir:
  1090  		return "chan"
  1091  	}
  1092  	return "ChanDir" + strconv.Itoa(int(d))
  1093  }
  1094  
  1095  // Method returns the i'th method in the type's method set.
  1096  func (t *interfaceType) Method(i int) (m Method) {
  1097  	if i < 0 || i >= len(t.methods) {
  1098  		return
  1099  	}
  1100  	p := &t.methods[i]
  1101  	pname := t.nameOff(p.name)
  1102  	m.Name = pname.name()
  1103  	if !pname.isExported() {
  1104  		m.PkgPath = pname.pkgPath()
  1105  		if m.PkgPath == "" {
  1106  			m.PkgPath = t.pkgPath.name()
  1107  		}
  1108  	}
  1109  	m.Type = toType(t.typeOff(p.typ))
  1110  	m.Index = i
  1111  	return
  1112  }
  1113  
  1114  // NumMethod returns the number of interface methods in the type's method set.
  1115  func (t *interfaceType) NumMethod() int { return len(t.methods) }
  1116  
  1117  // MethodByName method with the given name in the type's method set.
  1118  func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
  1119  	if t == nil {
  1120  		return
  1121  	}
  1122  	var p *imethod
  1123  	for i := range t.methods {
  1124  		p = &t.methods[i]
  1125  		if t.nameOff(p.name).name() == name {
  1126  			return t.Method(i), true
  1127  		}
  1128  	}
  1129  	return
  1130  }
  1131  
  1132  // A StructField describes a single field in a struct.
  1133  type StructField struct {
  1134  	// Name is the field name.
  1135  	Name string
  1136  	// PkgPath is the package path that qualifies a lower case (unexported)
  1137  	// field name. It is empty for upper case (exported) field names.
  1138  	// See https://golang.org/ref/spec#Uniqueness_of_identifiers
  1139  	PkgPath string
  1140  
  1141  	Type      Type      // field type
  1142  	Tag       StructTag // field tag string
  1143  	Offset    uintptr   // offset within struct, in bytes
  1144  	Index     []int     // index sequence for Type.FieldByIndex
  1145  	Anonymous bool      // is an embedded field
  1146  }
  1147  
  1148  // A StructTag is the tag string in a struct field.
  1149  //
  1150  // By convention, tag strings are a concatenation of
  1151  // optionally space-separated key:"value" pairs.
  1152  // Each key is a non-empty string consisting of non-control
  1153  // characters other than space (U+0020 ' '), quote (U+0022 '"'),
  1154  // and colon (U+003A ':').  Each value is quoted using U+0022 '"'
  1155  // characters and Go string literal syntax.
  1156  type StructTag string
  1157  
  1158  // Get returns the value associated with key in the tag string.
  1159  // If there is no such key in the tag, Get returns the empty string.
  1160  // If the tag does not have the conventional format, the value
  1161  // returned by Get is unspecified. To determine whether a tag is
  1162  // explicitly set to the empty string, use Lookup.
  1163  func (tag StructTag) Get(key string) string {
  1164  	v, _ := tag.Lookup(key)
  1165  	return v
  1166  }
  1167  
  1168  // Lookup returns the value associated with key in the tag string.
  1169  // If the key is present in the tag the value (which may be empty)
  1170  // is returned. Otherwise the returned value will be the empty string.
  1171  // The ok return value reports whether the value was explicitly set in
  1172  // the tag string. If the tag does not have the conventional format,
  1173  // the value returned by Lookup is unspecified.
  1174  func (tag StructTag) Lookup(key string) (value string, ok bool) {
  1175  	// When modifying this code, also update the validateStructTag code
  1176  	// in cmd/vet/structtag.go.
  1177  
  1178  	for tag != "" {
  1179  		// Skip leading space.
  1180  		i := 0
  1181  		for i < len(tag) && tag[i] == ' ' {
  1182  			i++
  1183  		}
  1184  		tag = tag[i:]
  1185  		if tag == "" {
  1186  			break
  1187  		}
  1188  
  1189  		// Scan to colon. A space, a quote or a control character is a syntax error.
  1190  		// Strictly speaking, control chars include the range [0x7f, 0x9f], not just
  1191  		// [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
  1192  		// as it is simpler to inspect the tag's bytes than the tag's runes.
  1193  		i = 0
  1194  		for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
  1195  			i++
  1196  		}
  1197  		if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
  1198  			break
  1199  		}
  1200  		name := string(tag[:i])
  1201  		tag = tag[i+1:]
  1202  
  1203  		// Scan quoted string to find value.
  1204  		i = 1
  1205  		for i < len(tag) && tag[i] != '"' {
  1206  			if tag[i] == '\\' {
  1207  				i++
  1208  			}
  1209  			i++
  1210  		}
  1211  		if i >= len(tag) {
  1212  			break
  1213  		}
  1214  		qvalue := string(tag[:i+1])
  1215  		tag = tag[i+1:]
  1216  
  1217  		if key == name {
  1218  			value, err := strconv.Unquote(qvalue)
  1219  			if err != nil {
  1220  				break
  1221  			}
  1222  			return value, true
  1223  		}
  1224  	}
  1225  	return "", false
  1226  }
  1227  
  1228  // Field returns the i'th struct field.
  1229  func (t *structType) Field(i int) (f StructField) {
  1230  	if i < 0 || i >= len(t.fields) {
  1231  		panic("reflect: Field index out of bounds")
  1232  	}
  1233  	p := &t.fields[i]
  1234  	f.Type = toType(p.typ)
  1235  	f.Name = p.name.name()
  1236  	f.Anonymous = p.anon()
  1237  	if !p.name.isExported() {
  1238  		f.PkgPath = t.pkgPath.name()
  1239  	}
  1240  	if tag := p.name.tag(); tag != "" {
  1241  		f.Tag = StructTag(tag)
  1242  	}
  1243  	f.Offset = p.offset()
  1244  
  1245  	// NOTE(rsc): This is the only allocation in the interface
  1246  	// presented by a reflect.Type. It would be nice to avoid,
  1247  	// at least in the common cases, but we need to make sure
  1248  	// that misbehaving clients of reflect cannot affect other
  1249  	// uses of reflect. One possibility is CL 5371098, but we
  1250  	// postponed that ugliness until there is a demonstrated
  1251  	// need for the performance. This is issue 2320.
  1252  	f.Index = []int{i}
  1253  	return
  1254  }
  1255  
  1256  // TODO(gri): Should there be an error/bool indicator if the index
  1257  //            is wrong for FieldByIndex?
  1258  
  1259  // FieldByIndex returns the nested field corresponding to index.
  1260  func (t *structType) FieldByIndex(index []int) (f StructField) {
  1261  	f.Type = toType(&t.rtype)
  1262  	for i, x := range index {
  1263  		if i > 0 {
  1264  			ft := f.Type
  1265  			if ft.Kind() == Ptr && ft.Elem().Kind() == Struct {
  1266  				ft = ft.Elem()
  1267  			}
  1268  			f.Type = ft
  1269  		}
  1270  		f = f.Type.Field(x)
  1271  	}
  1272  	return
  1273  }
  1274  
  1275  // A fieldScan represents an item on the fieldByNameFunc scan work list.
  1276  type fieldScan struct {
  1277  	typ   *structType
  1278  	index []int
  1279  }
  1280  
  1281  // FieldByNameFunc returns the struct field with a name that satisfies the
  1282  // match function and a boolean to indicate if the field was found.
  1283  func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
  1284  	// This uses the same condition that the Go language does: there must be a unique instance
  1285  	// of the match at a given depth level. If there are multiple instances of a match at the
  1286  	// same depth, they annihilate each other and inhibit any possible match at a lower level.
  1287  	// The algorithm is breadth first search, one depth level at a time.
  1288  
  1289  	// The current and next slices are work queues:
  1290  	// current lists the fields to visit on this depth level,
  1291  	// and next lists the fields on the next lower level.
  1292  	current := []fieldScan{}
  1293  	next := []fieldScan{{typ: t}}
  1294  
  1295  	// nextCount records the number of times an embedded type has been
  1296  	// encountered and considered for queueing in the 'next' slice.
  1297  	// We only queue the first one, but we increment the count on each.
  1298  	// If a struct type T can be reached more than once at a given depth level,
  1299  	// then it annihilates itself and need not be considered at all when we
  1300  	// process that next depth level.
  1301  	var nextCount map[*structType]int
  1302  
  1303  	// visited records the structs that have been considered already.
  1304  	// Embedded pointer fields can create cycles in the graph of
  1305  	// reachable embedded types; visited avoids following those cycles.
  1306  	// It also avoids duplicated effort: if we didn't find the field in an
  1307  	// embedded type T at level 2, we won't find it in one at level 4 either.
  1308  	visited := map[*structType]bool{}
  1309  
  1310  	for len(next) > 0 {
  1311  		current, next = next, current[:0]
  1312  		count := nextCount
  1313  		nextCount = nil
  1314  
  1315  		// Process all the fields at this depth, now listed in 'current'.
  1316  		// The loop queues embedded fields found in 'next', for processing during the next
  1317  		// iteration. The multiplicity of the 'current' field counts is recorded
  1318  		// in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
  1319  		for _, scan := range current {
  1320  			t := scan.typ
  1321  			if visited[t] {
  1322  				// We've looked through this type before, at a higher level.
  1323  				// That higher level would shadow the lower level we're now at,
  1324  				// so this one can't be useful to us. Ignore it.
  1325  				continue
  1326  			}
  1327  			visited[t] = true
  1328  			for i := range t.fields {
  1329  				f := &t.fields[i]
  1330  				// Find name and (for anonymous field) type for field f.
  1331  				fname := f.name.name()
  1332  				var ntyp *rtype
  1333  				if f.anon() {
  1334  					// Anonymous field of type T or *T.
  1335  					ntyp = f.typ
  1336  					if ntyp.Kind() == Ptr {
  1337  						ntyp = ntyp.Elem().common()
  1338  					}
  1339  				}
  1340  
  1341  				// Does it match?
  1342  				if match(fname) {
  1343  					// Potential match
  1344  					if count[t] > 1 || ok {
  1345  						// Name appeared multiple times at this level: annihilate.
  1346  						return StructField{}, false
  1347  					}
  1348  					result = t.Field(i)
  1349  					result.Index = nil
  1350  					result.Index = append(result.Index, scan.index...)
  1351  					result.Index = append(result.Index, i)
  1352  					ok = true
  1353  					continue
  1354  				}
  1355  
  1356  				// Queue embedded struct fields for processing with next level,
  1357  				// but only if we haven't seen a match yet at this level and only
  1358  				// if the embedded types haven't already been queued.
  1359  				if ok || ntyp == nil || ntyp.Kind() != Struct {
  1360  					continue
  1361  				}
  1362  				styp := (*structType)(unsafe.Pointer(ntyp))
  1363  				if nextCount[styp] > 0 {
  1364  					nextCount[styp] = 2 // exact multiple doesn't matter
  1365  					continue
  1366  				}
  1367  				if nextCount == nil {
  1368  					nextCount = map[*structType]int{}
  1369  				}
  1370  				nextCount[styp] = 1
  1371  				if count[t] > 1 {
  1372  					nextCount[styp] = 2 // exact multiple doesn't matter
  1373  				}
  1374  				var index []int
  1375  				index = append(index, scan.index...)
  1376  				index = append(index, i)
  1377  				next = append(next, fieldScan{styp, index})
  1378  			}
  1379  		}
  1380  		if ok {
  1381  			break
  1382  		}
  1383  	}
  1384  	return
  1385  }
  1386  
  1387  // FieldByName returns the struct field with the given name
  1388  // and a boolean to indicate if the field was found.
  1389  func (t *structType) FieldByName(name string) (f StructField, present bool) {
  1390  	// Quick check for top-level name, or struct without anonymous fields.
  1391  	hasAnon := false
  1392  	if name != "" {
  1393  		for i := range t.fields {
  1394  			tf := &t.fields[i]
  1395  			if tf.name.name() == name {
  1396  				return t.Field(i), true
  1397  			}
  1398  			if tf.anon() {
  1399  				hasAnon = true
  1400  			}
  1401  		}
  1402  	}
  1403  	if !hasAnon {
  1404  		return
  1405  	}
  1406  	return t.FieldByNameFunc(func(s string) bool { return s == name })
  1407  }
  1408  
  1409  // TypeOf returns the reflection Type that represents the dynamic type of i.
  1410  // If i is a nil interface value, TypeOf returns nil.
  1411  func TypeOf(i interface{}) Type {
  1412  	eface := *(*emptyInterface)(unsafe.Pointer(&i))
  1413  	return toType(eface.typ)
  1414  }
  1415  
  1416  // ptrMap is the cache for PtrTo.
  1417  var ptrMap sync.Map // map[*rtype]*ptrType
  1418  
  1419  // PtrTo returns the pointer type with element t.
  1420  // For example, if t represents type Foo, PtrTo(t) represents *Foo.
  1421  func PtrTo(t Type) Type {
  1422  	return t.(*rtype).ptrTo()
  1423  }
  1424  
  1425  func (t *rtype) ptrTo() *rtype {
  1426  	if t.ptrToThis != 0 {
  1427  		return t.typeOff(t.ptrToThis)
  1428  	}
  1429  
  1430  	// Check the cache.
  1431  	if pi, ok := ptrMap.Load(t); ok {
  1432  		return &pi.(*ptrType).rtype
  1433  	}
  1434  
  1435  	// Look in known types.
  1436  	s := "*" + t.String()
  1437  	for _, tt := range typesByString(s) {
  1438  		p := (*ptrType)(unsafe.Pointer(tt))
  1439  		if p.elem != t {
  1440  			continue
  1441  		}
  1442  		pi, _ := ptrMap.LoadOrStore(t, p)
  1443  		return &pi.(*ptrType).rtype
  1444  	}
  1445  
  1446  	// Create a new ptrType starting with the description
  1447  	// of an *unsafe.Pointer.
  1448  	var iptr interface{} = (*unsafe.Pointer)(nil)
  1449  	prototype := *(**ptrType)(unsafe.Pointer(&iptr))
  1450  	pp := *prototype
  1451  
  1452  	pp.str = resolveReflectName(newName(s, "", false))
  1453  	pp.ptrToThis = 0
  1454  
  1455  	// For the type structures linked into the binary, the
  1456  	// compiler provides a good hash of the string.
  1457  	// Create a good hash for the new string by using
  1458  	// the FNV-1 hash's mixing function to combine the
  1459  	// old hash and the new "*".
  1460  	pp.hash = fnv1(t.hash, '*')
  1461  
  1462  	pp.elem = t
  1463  
  1464  	pi, _ := ptrMap.LoadOrStore(t, &pp)
  1465  	return &pi.(*ptrType).rtype
  1466  }
  1467  
  1468  // fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
  1469  func fnv1(x uint32, list ...byte) uint32 {
  1470  	for _, b := range list {
  1471  		x = x*16777619 ^ uint32(b)
  1472  	}
  1473  	return x
  1474  }
  1475  
  1476  func (t *rtype) Implements(u Type) bool {
  1477  	if u == nil {
  1478  		panic("reflect: nil type passed to Type.Implements")
  1479  	}
  1480  	if u.Kind() != Interface {
  1481  		panic("reflect: non-interface type passed to Type.Implements")
  1482  	}
  1483  	return implements(u.(*rtype), t)
  1484  }
  1485  
  1486  func (t *rtype) AssignableTo(u Type) bool {
  1487  	if u == nil {
  1488  		panic("reflect: nil type passed to Type.AssignableTo")
  1489  	}
  1490  	uu := u.(*rtype)
  1491  	return directlyAssignable(uu, t) || implements(uu, t)
  1492  }
  1493  
  1494  func (t *rtype) ConvertibleTo(u Type) bool {
  1495  	if u == nil {
  1496  		panic("reflect: nil type passed to Type.ConvertibleTo")
  1497  	}
  1498  	uu := u.(*rtype)
  1499  	return convertOp(uu, t) != nil
  1500  }
  1501  
  1502  func (t *rtype) Comparable() bool {
  1503  	return t.alg != nil && t.alg.equal != nil
  1504  }
  1505  
  1506  // implements reports whether the type V implements the interface type T.
  1507  func implements(T, V *rtype) bool {
  1508  	if T.Kind() != Interface {
  1509  		return false
  1510  	}
  1511  	t := (*interfaceType)(unsafe.Pointer(T))
  1512  	if len(t.methods) == 0 {
  1513  		return true
  1514  	}
  1515  
  1516  	// The same algorithm applies in both cases, but the
  1517  	// method tables for an interface type and a concrete type
  1518  	// are different, so the code is duplicated.
  1519  	// In both cases the algorithm is a linear scan over the two
  1520  	// lists - T's methods and V's methods - simultaneously.
  1521  	// Since method tables are stored in a unique sorted order
  1522  	// (alphabetical, with no duplicate method names), the scan
  1523  	// through V's methods must hit a match for each of T's
  1524  	// methods along the way, or else V does not implement T.
  1525  	// This lets us run the scan in overall linear time instead of
  1526  	// the quadratic time  a naive search would require.
  1527  	// See also ../runtime/iface.go.
  1528  	if V.Kind() == Interface {
  1529  		v := (*interfaceType)(unsafe.Pointer(V))
  1530  		i := 0
  1531  		for j := 0; j < len(v.methods); j++ {
  1532  			tm := &t.methods[i]
  1533  			tmName := t.nameOff(tm.name)
  1534  			vm := &v.methods[j]
  1535  			vmName := V.nameOff(vm.name)
  1536  			if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) {
  1537  				if !tmName.isExported() {
  1538  					tmPkgPath := tmName.pkgPath()
  1539  					if tmPkgPath == "" {
  1540  						tmPkgPath = t.pkgPath.name()
  1541  					}
  1542  					vmPkgPath := vmName.pkgPath()
  1543  					if vmPkgPath == "" {
  1544  						vmPkgPath = v.pkgPath.name()
  1545  					}
  1546  					if tmPkgPath != vmPkgPath {
  1547  						continue
  1548  					}
  1549  				}
  1550  				if i++; i >= len(t.methods) {
  1551  					return true
  1552  				}
  1553  			}
  1554  		}
  1555  		return false
  1556  	}
  1557  
  1558  	v := V.uncommon()
  1559  	if v == nil {
  1560  		return false
  1561  	}
  1562  	i := 0
  1563  	vmethods := v.methods()
  1564  	for j := 0; j < int(v.mcount); j++ {
  1565  		tm := &t.methods[i]
  1566  		tmName := t.nameOff(tm.name)
  1567  		vm := vmethods[j]
  1568  		vmName := V.nameOff(vm.name)
  1569  		if vmName.name() == tmName.name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) {
  1570  			if !tmName.isExported() {
  1571  				tmPkgPath := tmName.pkgPath()
  1572  				if tmPkgPath == "" {
  1573  					tmPkgPath = t.pkgPath.name()
  1574  				}
  1575  				vmPkgPath := vmName.pkgPath()
  1576  				if vmPkgPath == "" {
  1577  					vmPkgPath = V.nameOff(v.pkgPath).name()
  1578  				}
  1579  				if tmPkgPath != vmPkgPath {
  1580  					continue
  1581  				}
  1582  			}
  1583  			if i++; i >= len(t.methods) {
  1584  				return true
  1585  			}
  1586  		}
  1587  	}
  1588  	return false
  1589  }
  1590  
  1591  // directlyAssignable reports whether a value x of type V can be directly
  1592  // assigned (using memmove) to a value of type T.
  1593  // https://golang.org/doc/go_spec.html#Assignability
  1594  // Ignoring the interface rules (implemented elsewhere)
  1595  // and the ideal constant rules (no ideal constants at run time).
  1596  func directlyAssignable(T, V *rtype) bool {
  1597  	// x's type V is identical to T?
  1598  	if T == V {
  1599  		return true
  1600  	}
  1601  
  1602  	// Otherwise at least one of T and V must be unnamed
  1603  	// and they must have the same kind.
  1604  	if T.Name() != "" && V.Name() != "" || T.Kind() != V.Kind() {
  1605  		return false
  1606  	}
  1607  
  1608  	// x's type T and V must  have identical underlying types.
  1609  	return haveIdenticalUnderlyingType(T, V, true)
  1610  }
  1611  
  1612  func haveIdenticalType(T, V Type, cmpTags bool) bool {
  1613  	if cmpTags {
  1614  		return T == V
  1615  	}
  1616  
  1617  	if T.Name() != V.Name() || T.Kind() != V.Kind() {
  1618  		return false
  1619  	}
  1620  
  1621  	return haveIdenticalUnderlyingType(T.common(), V.common(), false)
  1622  }
  1623  
  1624  func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
  1625  	if T == V {
  1626  		return true
  1627  	}
  1628  
  1629  	kind := T.Kind()
  1630  	if kind != V.Kind() {
  1631  		return false
  1632  	}
  1633  
  1634  	// Non-composite types of equal kind have same underlying type
  1635  	// (the predefined instance of the type).
  1636  	if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
  1637  		return true
  1638  	}
  1639  
  1640  	// Composite types.
  1641  	switch kind {
  1642  	case Array:
  1643  		return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
  1644  
  1645  	case Chan:
  1646  		// Special case:
  1647  		// x is a bidirectional channel value, T is a channel type,
  1648  		// and x's type V and T have identical element types.
  1649  		if V.ChanDir() == BothDir && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) {
  1650  			return true
  1651  		}
  1652  
  1653  		// Otherwise continue test for identical underlying type.
  1654  		return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
  1655  
  1656  	case Func:
  1657  		t := (*funcType)(unsafe.Pointer(T))
  1658  		v := (*funcType)(unsafe.Pointer(V))
  1659  		if t.outCount != v.outCount || t.inCount != v.inCount {
  1660  			return false
  1661  		}
  1662  		for i := 0; i < t.NumIn(); i++ {
  1663  			if !haveIdenticalType(t.In(i), v.In(i), cmpTags) {
  1664  				return false
  1665  			}
  1666  		}
  1667  		for i := 0; i < t.NumOut(); i++ {
  1668  			if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) {
  1669  				return false
  1670  			}
  1671  		}
  1672  		return true
  1673  
  1674  	case Interface:
  1675  		t := (*interfaceType)(unsafe.Pointer(T))
  1676  		v := (*interfaceType)(unsafe.Pointer(V))
  1677  		if len(t.methods) == 0 && len(v.methods) == 0 {
  1678  			return true
  1679  		}
  1680  		// Might have the same methods but still
  1681  		// need a run time conversion.
  1682  		return false
  1683  
  1684  	case Map:
  1685  		return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
  1686  
  1687  	case Ptr, Slice:
  1688  		return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
  1689  
  1690  	case Struct:
  1691  		t := (*structType)(unsafe.Pointer(T))
  1692  		v := (*structType)(unsafe.Pointer(V))
  1693  		if len(t.fields) != len(v.fields) {
  1694  			return false
  1695  		}
  1696  		if t.pkgPath.name() != v.pkgPath.name() {
  1697  			return false
  1698  		}
  1699  		for i := range t.fields {
  1700  			tf := &t.fields[i]
  1701  			vf := &v.fields[i]
  1702  			if tf.name.name() != vf.name.name() {
  1703  				return false
  1704  			}
  1705  			if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
  1706  				return false
  1707  			}
  1708  			if cmpTags && tf.name.tag() != vf.name.tag() {
  1709  				return false
  1710  			}
  1711  			if tf.offsetAnon != vf.offsetAnon {
  1712  				return false
  1713  			}
  1714  		}
  1715  		return true
  1716  	}
  1717  
  1718  	return false
  1719  }
  1720  
  1721  // typelinks is implemented in package runtime.
  1722  // It returns a slice of the sections in each module,
  1723  // and a slice of *rtype offsets in each module.
  1724  //
  1725  // The types in each module are sorted by string. That is, the first
  1726  // two linked types of the first module are:
  1727  //
  1728  //	d0 := sections[0]
  1729  //	t1 := (*rtype)(add(d0, offset[0][0]))
  1730  //	t2 := (*rtype)(add(d0, offset[0][1]))
  1731  //
  1732  // and
  1733  //
  1734  //	t1.String() < t2.String()
  1735  //
  1736  // Note that strings are not unique identifiers for types:
  1737  // there can be more than one with a given string.
  1738  // Only types we might want to look up are included:
  1739  // pointers, channels, maps, slices, and arrays.
  1740  func typelinks() (sections []unsafe.Pointer, offset [][]int32)
  1741  
  1742  func rtypeOff(section unsafe.Pointer, off int32) *rtype {
  1743  	return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0"))
  1744  }
  1745  
  1746  // typesByString returns the subslice of typelinks() whose elements have
  1747  // the given string representation.
  1748  // It may be empty (no known types with that string) or may have
  1749  // multiple elements (multiple types with that string).
  1750  func typesByString(s string) []*rtype {
  1751  	sections, offset := typelinks()
  1752  	var ret []*rtype
  1753  
  1754  	for offsI, offs := range offset {
  1755  		section := sections[offsI]
  1756  
  1757  		// We are looking for the first index i where the string becomes >= s.
  1758  		// This is a copy of sort.Search, with f(h) replaced by (*typ[h].String() >= s).
  1759  		i, j := 0, len(offs)
  1760  		for i < j {
  1761  			h := i + (j-i)/2 // avoid overflow when computing h
  1762  			// i ≤ h < j
  1763  			if !(rtypeOff(section, offs[h]).String() >= s) {
  1764  				i = h + 1 // preserves f(i-1) == false
  1765  			} else {
  1766  				j = h // preserves f(j) == true
  1767  			}
  1768  		}
  1769  		// i == j, f(i-1) == false, and f(j) (= f(i)) == true  =>  answer is i.
  1770  
  1771  		// Having found the first, linear scan forward to find the last.
  1772  		// We could do a second binary search, but the caller is going
  1773  		// to do a linear scan anyway.
  1774  		for j := i; j < len(offs); j++ {
  1775  			typ := rtypeOff(section, offs[j])
  1776  			if typ.String() != s {
  1777  				break
  1778  			}
  1779  			ret = append(ret, typ)
  1780  		}
  1781  	}
  1782  	return ret
  1783  }
  1784  
  1785  // The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
  1786  var lookupCache sync.Map // map[cacheKey]*rtype
  1787  
  1788  // A cacheKey is the key for use in the lookupCache.
  1789  // Four values describe any of the types we are looking for:
  1790  // type kind, one or two subtypes, and an extra integer.
  1791  type cacheKey struct {
  1792  	kind  Kind
  1793  	t1    *rtype
  1794  	t2    *rtype
  1795  	extra uintptr
  1796  }
  1797  
  1798  // The funcLookupCache caches FuncOf lookups.
  1799  // FuncOf does not share the common lookupCache since cacheKey is not
  1800  // sufficient to represent functions unambiguously.
  1801  var funcLookupCache struct {
  1802  	sync.Mutex // Guards stores (but not loads) on m.
  1803  
  1804  	// m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
  1805  	// Elements of m are append-only and thus safe for concurrent reading.
  1806  	m sync.Map
  1807  }
  1808  
  1809  // ChanOf returns the channel type with the given direction and element type.
  1810  // For example, if t represents int, ChanOf(RecvDir, t) represents <-chan int.
  1811  //
  1812  // The gc runtime imposes a limit of 64 kB on channel element types.
  1813  // If t's size is equal to or exceeds this limit, ChanOf panics.
  1814  func ChanOf(dir ChanDir, t Type) Type {
  1815  	typ := t.(*rtype)
  1816  
  1817  	// Look in cache.
  1818  	ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
  1819  	if ch, ok := lookupCache.Load(ckey); ok {
  1820  		return ch.(*rtype)
  1821  	}
  1822  
  1823  	// This restriction is imposed by the gc compiler and the runtime.
  1824  	if typ.size >= 1<<16 {
  1825  		panic("reflect.ChanOf: element size too large")
  1826  	}
  1827  
  1828  	// Look in known types.
  1829  	// TODO: Precedence when constructing string.
  1830  	var s string
  1831  	switch dir {
  1832  	default:
  1833  		panic("reflect.ChanOf: invalid dir")
  1834  	case SendDir:
  1835  		s = "chan<- " + typ.String()
  1836  	case RecvDir:
  1837  		s = "<-chan " + typ.String()
  1838  	case BothDir:
  1839  		s = "chan " + typ.String()
  1840  	}
  1841  	for _, tt := range typesByString(s) {
  1842  		ch := (*chanType)(unsafe.Pointer(tt))
  1843  		if ch.elem == typ && ch.dir == uintptr(dir) {
  1844  			ti, _ := lookupCache.LoadOrStore(ckey, tt)
  1845  			return ti.(Type)
  1846  		}
  1847  	}
  1848  
  1849  	// Make a channel type.
  1850  	var ichan interface{} = (chan unsafe.Pointer)(nil)
  1851  	prototype := *(**chanType)(unsafe.Pointer(&ichan))
  1852  	ch := *prototype
  1853  	ch.tflag = 0
  1854  	ch.dir = uintptr(dir)
  1855  	ch.str = resolveReflectName(newName(s, "", false))
  1856  	ch.hash = fnv1(typ.hash, 'c', byte(dir))
  1857  	ch.elem = typ
  1858  
  1859  	ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
  1860  	return ti.(Type)
  1861  }
  1862  
  1863  func ismapkey(*rtype) bool // implemented in runtime
  1864  
  1865  // MapOf returns the map type with the given key and element types.
  1866  // For example, if k represents int and e represents string,
  1867  // MapOf(k, e) represents map[int]string.
  1868  //
  1869  // If the key type is not a valid map key type (that is, if it does
  1870  // not implement Go's == operator), MapOf panics.
  1871  func MapOf(key, elem Type) Type {
  1872  	ktyp := key.(*rtype)
  1873  	etyp := elem.(*rtype)
  1874  
  1875  	if !ismapkey(ktyp) {
  1876  		panic("reflect.MapOf: invalid key type " + ktyp.String())
  1877  	}
  1878  
  1879  	// Look in cache.
  1880  	ckey := cacheKey{Map, ktyp, etyp, 0}
  1881  	if mt, ok := lookupCache.Load(ckey); ok {
  1882  		return mt.(Type)
  1883  	}
  1884  
  1885  	// Look in known types.
  1886  	s := "map[" + ktyp.String() + "]" + etyp.String()
  1887  	for _, tt := range typesByString(s) {
  1888  		mt := (*mapType)(unsafe.Pointer(tt))
  1889  		if mt.key == ktyp && mt.elem == etyp {
  1890  			ti, _ := lookupCache.LoadOrStore(ckey, tt)
  1891  			return ti.(Type)
  1892  		}
  1893  	}
  1894  
  1895  	// Make a map type.
  1896  	var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
  1897  	mt := **(**mapType)(unsafe.Pointer(&imap))
  1898  	mt.str = resolveReflectName(newName(s, "", false))
  1899  	mt.tflag = 0
  1900  	mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
  1901  	mt.key = ktyp
  1902  	mt.elem = etyp
  1903  	mt.bucket = bucketOf(ktyp, etyp)
  1904  	if ktyp.size > maxKeySize {
  1905  		mt.keysize = uint8(ptrSize)
  1906  		mt.indirectkey = 1
  1907  	} else {
  1908  		mt.keysize = uint8(ktyp.size)
  1909  		mt.indirectkey = 0
  1910  	}
  1911  	if etyp.size > maxValSize {
  1912  		mt.valuesize = uint8(ptrSize)
  1913  		mt.indirectvalue = 1
  1914  	} else {
  1915  		mt.valuesize = uint8(etyp.size)
  1916  		mt.indirectvalue = 0
  1917  	}
  1918  	mt.bucketsize = uint16(mt.bucket.size)
  1919  	mt.reflexivekey = isReflexive(ktyp)
  1920  	mt.needkeyupdate = needKeyUpdate(ktyp)
  1921  	mt.ptrToThis = 0
  1922  
  1923  	ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
  1924  	return ti.(Type)
  1925  }
  1926  
  1927  type funcTypeFixed4 struct {
  1928  	funcType
  1929  	args [4]*rtype
  1930  }
  1931  type funcTypeFixed8 struct {
  1932  	funcType
  1933  	args [8]*rtype
  1934  }
  1935  type funcTypeFixed16 struct {
  1936  	funcType
  1937  	args [16]*rtype
  1938  }
  1939  type funcTypeFixed32 struct {
  1940  	funcType
  1941  	args [32]*rtype
  1942  }
  1943  type funcTypeFixed64 struct {
  1944  	funcType
  1945  	args [64]*rtype
  1946  }
  1947  type funcTypeFixed128 struct {
  1948  	funcType
  1949  	args [128]*rtype
  1950  }
  1951  
  1952  // FuncOf returns the function type with the given argument and result types.
  1953  // For example if k represents int and e represents string,
  1954  // FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
  1955  //
  1956  // The variadic argument controls whether the function is variadic. FuncOf
  1957  // panics if the in[len(in)-1] does not represent a slice and variadic is
  1958  // true.
  1959  func FuncOf(in, out []Type, variadic bool) Type {
  1960  	if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
  1961  		panic("reflect.FuncOf: last arg of variadic func must be slice")
  1962  	}
  1963  
  1964  	// Make a func type.
  1965  	var ifunc interface{} = (func())(nil)
  1966  	prototype := *(**funcType)(unsafe.Pointer(&ifunc))
  1967  	n := len(in) + len(out)
  1968  
  1969  	var ft *funcType
  1970  	var args []*rtype
  1971  	switch {
  1972  	case n <= 4:
  1973  		fixed := new(funcTypeFixed4)
  1974  		args = fixed.args[:0:len(fixed.args)]
  1975  		ft = &fixed.funcType
  1976  	case n <= 8:
  1977  		fixed := new(funcTypeFixed8)
  1978  		args = fixed.args[:0:len(fixed.args)]
  1979  		ft = &fixed.funcType
  1980  	case n <= 16:
  1981  		fixed := new(funcTypeFixed16)
  1982  		args = fixed.args[:0:len(fixed.args)]
  1983  		ft = &fixed.funcType
  1984  	case n <= 32:
  1985  		fixed := new(funcTypeFixed32)
  1986  		args = fixed.args[:0:len(fixed.args)]
  1987  		ft = &fixed.funcType
  1988  	case n <= 64:
  1989  		fixed := new(funcTypeFixed64)
  1990  		args = fixed.args[:0:len(fixed.args)]
  1991  		ft = &fixed.funcType
  1992  	case n <= 128:
  1993  		fixed := new(funcTypeFixed128)
  1994  		args = fixed.args[:0:len(fixed.args)]
  1995  		ft = &fixed.funcType
  1996  	default:
  1997  		panic("reflect.FuncOf: too many arguments")
  1998  	}
  1999  	*ft = *prototype
  2000  
  2001  	// Build a hash and minimally populate ft.
  2002  	var hash uint32
  2003  	for _, in := range in {
  2004  		t := in.(*rtype)
  2005  		args = append(args, t)
  2006  		hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
  2007  	}
  2008  	if variadic {
  2009  		hash = fnv1(hash, 'v')
  2010  	}
  2011  	hash = fnv1(hash, '.')
  2012  	for _, out := range out {
  2013  		t := out.(*rtype)
  2014  		args = append(args, t)
  2015  		hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
  2016  	}
  2017  	if len(args) > 50 {
  2018  		panic("reflect.FuncOf does not support more than 50 arguments")
  2019  	}
  2020  	ft.tflag = 0
  2021  	ft.hash = hash
  2022  	ft.inCount = uint16(len(in))
  2023  	ft.outCount = uint16(len(out))
  2024  	if variadic {
  2025  		ft.outCount |= 1 << 15
  2026  	}
  2027  
  2028  	// Look in cache.
  2029  	if ts, ok := funcLookupCache.m.Load(hash); ok {
  2030  		for _, t := range ts.([]*rtype) {
  2031  			if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
  2032  				return t
  2033  			}
  2034  		}
  2035  	}
  2036  
  2037  	// Not in cache, lock and retry.
  2038  	funcLookupCache.Lock()
  2039  	defer funcLookupCache.Unlock()
  2040  	if ts, ok := funcLookupCache.m.Load(hash); ok {
  2041  		for _, t := range ts.([]*rtype) {
  2042  			if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
  2043  				return t
  2044  			}
  2045  		}
  2046  	}
  2047  
  2048  	addToCache := func(tt *rtype) Type {
  2049  		var rts []*rtype
  2050  		if rti, ok := funcLookupCache.m.Load(hash); ok {
  2051  			rts = rti.([]*rtype)
  2052  		}
  2053  		funcLookupCache.m.Store(hash, append(rts, tt))
  2054  		return tt
  2055  	}
  2056  
  2057  	// Look in known types for the same string representation.
  2058  	str := funcStr(ft)
  2059  	for _, tt := range typesByString(str) {
  2060  		if haveIdenticalUnderlyingType(&ft.rtype, tt, true) {
  2061  			return addToCache(tt)
  2062  		}
  2063  	}
  2064  
  2065  	// Populate the remaining fields of ft and store in cache.
  2066  	ft.str = resolveReflectName(newName(str, "", false))
  2067  	ft.ptrToThis = 0
  2068  	return addToCache(&ft.rtype)
  2069  }
  2070  
  2071  // funcStr builds a string representation of a funcType.
  2072  func funcStr(ft *funcType) string {
  2073  	repr := make([]byte, 0, 64)
  2074  	repr = append(repr, "func("...)
  2075  	for i, t := range ft.in() {
  2076  		if i > 0 {
  2077  			repr = append(repr, ", "...)
  2078  		}
  2079  		if ft.IsVariadic() && i == int(ft.inCount)-1 {
  2080  			repr = append(repr, "..."...)
  2081  			repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...)
  2082  		} else {
  2083  			repr = append(repr, t.String()...)
  2084  		}
  2085  	}
  2086  	repr = append(repr, ')')
  2087  	out := ft.out()
  2088  	if len(out) == 1 {
  2089  		repr = append(repr, ' ')
  2090  	} else if len(out) > 1 {
  2091  		repr = append(repr, " ("...)
  2092  	}
  2093  	for i, t := range out {
  2094  		if i > 0 {
  2095  			repr = append(repr, ", "...)
  2096  		}
  2097  		repr = append(repr, t.String()...)
  2098  	}
  2099  	if len(out) > 1 {
  2100  		repr = append(repr, ')')
  2101  	}
  2102  	return string(repr)
  2103  }
  2104  
  2105  // isReflexive reports whether the == operation on the type is reflexive.
  2106  // That is, x == x for all values x of type t.
  2107  func isReflexive(t *rtype) bool {
  2108  	switch t.Kind() {
  2109  	case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer:
  2110  		return true
  2111  	case Float32, Float64, Complex64, Complex128, Interface:
  2112  		return false
  2113  	case Array:
  2114  		tt := (*arrayType)(unsafe.Pointer(t))
  2115  		return isReflexive(tt.elem)
  2116  	case Struct:
  2117  		tt := (*structType)(unsafe.Pointer(t))
  2118  		for _, f := range tt.fields {
  2119  			if !isReflexive(f.typ) {
  2120  				return false
  2121  			}
  2122  		}
  2123  		return true
  2124  	default:
  2125  		// Func, Map, Slice, Invalid
  2126  		panic("isReflexive called on non-key type " + t.String())
  2127  	}
  2128  }
  2129  
  2130  // needKeyUpdate reports whether map overwrites require the key to be copied.
  2131  func needKeyUpdate(t *rtype) bool {
  2132  	switch t.Kind() {
  2133  	case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer:
  2134  		return false
  2135  	case Float32, Float64, Complex64, Complex128, Interface, String:
  2136  		// Float keys can be updated from +0 to -0.
  2137  		// String keys can be updated to use a smaller backing store.
  2138  		// Interfaces might have floats of strings in them.
  2139  		return true
  2140  	case Array:
  2141  		tt := (*arrayType)(unsafe.Pointer(t))
  2142  		return needKeyUpdate(tt.elem)
  2143  	case Struct:
  2144  		tt := (*structType)(unsafe.Pointer(t))
  2145  		for _, f := range tt.fields {
  2146  			if needKeyUpdate(f.typ) {
  2147  				return true
  2148  			}
  2149  		}
  2150  		return false
  2151  	default:
  2152  		// Func, Map, Slice, Invalid
  2153  		panic("needKeyUpdate called on non-key type " + t.String())
  2154  	}
  2155  }
  2156  
  2157  // Make sure these routines stay in sync with ../../runtime/hashmap.go!
  2158  // These types exist only for GC, so we only fill out GC relevant info.
  2159  // Currently, that's just size and the GC program. We also fill in string
  2160  // for possible debugging use.
  2161  const (
  2162  	bucketSize uintptr = 8
  2163  	maxKeySize uintptr = 128
  2164  	maxValSize uintptr = 128
  2165  )
  2166  
  2167  func bucketOf(ktyp, etyp *rtype) *rtype {
  2168  	// See comment on hmap.overflow in ../runtime/hashmap.go.
  2169  	var kind uint8
  2170  	if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 &&
  2171  		ktyp.size <= maxKeySize && etyp.size <= maxValSize {
  2172  		kind = kindNoPointers
  2173  	}
  2174  
  2175  	if ktyp.size > maxKeySize {
  2176  		ktyp = PtrTo(ktyp).(*rtype)
  2177  	}
  2178  	if etyp.size > maxValSize {
  2179  		etyp = PtrTo(etyp).(*rtype)
  2180  	}
  2181  
  2182  	// Prepare GC data if any.
  2183  	// A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
  2184  	// or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
  2185  	// Note that since the key and value are known to be <= 128 bytes,
  2186  	// they're guaranteed to have bitmaps instead of GC programs.
  2187  	var gcdata *byte
  2188  	var ptrdata uintptr
  2189  	var overflowPad uintptr
  2190  
  2191  	// On NaCl, pad if needed to make overflow end at the proper struct alignment.
  2192  	// On other systems, align > ptrSize is not possible.
  2193  	if runtime.GOARCH == "amd64p32" && (ktyp.align > ptrSize || etyp.align > ptrSize) {
  2194  		overflowPad = ptrSize
  2195  	}
  2196  	size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize
  2197  	if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 {
  2198  		panic("reflect: bad size computation in MapOf")
  2199  	}
  2200  
  2201  	if kind != kindNoPointers {
  2202  		nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize
  2203  		mask := make([]byte, (nptr+7)/8)
  2204  		base := bucketSize / ptrSize
  2205  
  2206  		if ktyp.kind&kindNoPointers == 0 {
  2207  			if ktyp.kind&kindGCProg != 0 {
  2208  				panic("reflect: unexpected GC program in MapOf")
  2209  			}
  2210  			kmask := (*[16]byte)(unsafe.Pointer(ktyp.gcdata))
  2211  			for i := uintptr(0); i < ktyp.ptrdata/ptrSize; i++ {
  2212  				if (kmask[i/8]>>(i%8))&1 != 0 {
  2213  					for j := uintptr(0); j < bucketSize; j++ {
  2214  						word := base + j*ktyp.size/ptrSize + i
  2215  						mask[word/8] |= 1 << (word % 8)
  2216  					}
  2217  				}
  2218  			}
  2219  		}
  2220  		base += bucketSize * ktyp.size / ptrSize
  2221  
  2222  		if etyp.kind&kindNoPointers == 0 {
  2223  			if etyp.kind&kindGCProg != 0 {
  2224  				panic("reflect: unexpected GC program in MapOf")
  2225  			}
  2226  			emask := (*[16]byte)(unsafe.Pointer(etyp.gcdata))
  2227  			for i := uintptr(0); i < etyp.ptrdata/ptrSize; i++ {
  2228  				if (emask[i/8]>>(i%8))&1 != 0 {
  2229  					for j := uintptr(0); j < bucketSize; j++ {
  2230  						word := base + j*etyp.size/ptrSize + i
  2231  						mask[word/8] |= 1 << (word % 8)
  2232  					}
  2233  				}
  2234  			}
  2235  		}
  2236  		base += bucketSize * etyp.size / ptrSize
  2237  		base += overflowPad / ptrSize
  2238  
  2239  		word := base
  2240  		mask[word/8] |= 1 << (word % 8)
  2241  		gcdata = &mask[0]
  2242  		ptrdata = (word + 1) * ptrSize
  2243  
  2244  		// overflow word must be last
  2245  		if ptrdata != size {
  2246  			panic("reflect: bad layout computation in MapOf")
  2247  		}
  2248  	}
  2249  
  2250  	b := &rtype{
  2251  		align:   ptrSize,
  2252  		size:    size,
  2253  		kind:    kind,
  2254  		ptrdata: ptrdata,
  2255  		gcdata:  gcdata,
  2256  	}
  2257  	if overflowPad > 0 {
  2258  		b.align = 8
  2259  	}
  2260  	s := "bucket(" + ktyp.String() + "," + etyp.String() + ")"
  2261  	b.str = resolveReflectName(newName(s, "", false))
  2262  	return b
  2263  }
  2264  
  2265  // SliceOf returns the slice type with element type t.
  2266  // For example, if t represents int, SliceOf(t) represents []int.
  2267  func SliceOf(t Type) Type {
  2268  	typ := t.(*rtype)
  2269  
  2270  	// Look in cache.
  2271  	ckey := cacheKey{Slice, typ, nil, 0}
  2272  	if slice, ok := lookupCache.Load(ckey); ok {
  2273  		return slice.(Type)
  2274  	}
  2275  
  2276  	// Look in known types.
  2277  	s := "[]" + typ.String()
  2278  	for _, tt := range typesByString(s) {
  2279  		slice := (*sliceType)(unsafe.Pointer(tt))
  2280  		if slice.elem == typ {
  2281  			ti, _ := lookupCache.LoadOrStore(ckey, tt)
  2282  			return ti.(Type)
  2283  		}
  2284  	}
  2285  
  2286  	// Make a slice type.
  2287  	var islice interface{} = ([]unsafe.Pointer)(nil)
  2288  	prototype := *(**sliceType)(unsafe.Pointer(&islice))
  2289  	slice := *prototype
  2290  	slice.tflag = 0
  2291  	slice.str = resolveReflectName(newName(s, "", false))
  2292  	slice.hash = fnv1(typ.hash, '[')
  2293  	slice.elem = typ
  2294  	slice.ptrToThis = 0
  2295  
  2296  	ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
  2297  	return ti.(Type)
  2298  }
  2299  
  2300  // The structLookupCache caches StructOf lookups.
  2301  // StructOf does not share the common lookupCache since we need to pin
  2302  // the memory associated with *structTypeFixedN.
  2303  var structLookupCache struct {
  2304  	sync.Mutex // Guards stores (but not loads) on m.
  2305  
  2306  	// m is a map[uint32][]Type keyed by the hash calculated in StructOf.
  2307  	// Elements in m are append-only and thus safe for concurrent reading.
  2308  	m sync.Map
  2309  }
  2310  
  2311  type structTypeUncommon struct {
  2312  	structType
  2313  	u uncommonType
  2314  }
  2315  
  2316  // A *rtype representing a struct is followed directly in memory by an
  2317  // array of method objects representing the methods attached to the
  2318  // struct. To get the same layout for a run time generated type, we
  2319  // need an array directly following the uncommonType memory. The types
  2320  // structTypeFixed4, ...structTypeFixedN are used to do this.
  2321  //
  2322  // A similar strategy is used for funcTypeFixed4, ...funcTypeFixedN.
  2323  
  2324  // TODO(crawshaw): as these structTypeFixedN and funcTypeFixedN structs
  2325  // have no methods, they could be defined at runtime using the StructOf
  2326  // function.
  2327  
  2328  type structTypeFixed4 struct {
  2329  	structType
  2330  	u uncommonType
  2331  	m [4]method
  2332  }
  2333  
  2334  type structTypeFixed8 struct {
  2335  	structType
  2336  	u uncommonType
  2337  	m [8]method
  2338  }
  2339  
  2340  type structTypeFixed16 struct {
  2341  	structType
  2342  	u uncommonType
  2343  	m [16]method
  2344  }
  2345  
  2346  type structTypeFixed32 struct {
  2347  	structType
  2348  	u uncommonType
  2349  	m [32]method
  2350  }
  2351  
  2352  // isLetter returns true if a given 'rune' is classified as a Letter.
  2353  func isLetter(ch rune) bool {
  2354  	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
  2355  }
  2356  
  2357  // isValidFieldName checks if a string is a valid (struct) field name or not.
  2358  //
  2359  // According to the language spec, a field name should be an identifier.
  2360  //
  2361  // identifier = letter { letter | unicode_digit } .
  2362  // letter = unicode_letter | "_" .
  2363  func isValidFieldName(fieldName string) bool {
  2364  	for i, c := range fieldName {
  2365  		if i == 0 && !isLetter(c) {
  2366  			return false
  2367  		}
  2368  
  2369  		if !(isLetter(c) || unicode.IsDigit(c)) {
  2370  			return false
  2371  		}
  2372  	}
  2373  
  2374  	return len(fieldName) > 0
  2375  }
  2376  
  2377  // StructOf returns the struct type containing fields.
  2378  // The Offset and Index fields are ignored and computed as they would be
  2379  // by the compiler.
  2380  //
  2381  // StructOf currently does not generate wrapper methods for embedded fields.
  2382  // This limitation may be lifted in a future version.
  2383  func StructOf(fields []StructField) Type {
  2384  	var (
  2385  		hash       = fnv1(0, []byte("struct {")...)
  2386  		size       uintptr
  2387  		typalign   uint8
  2388  		comparable = true
  2389  		hashable   = true
  2390  		methods    []method
  2391  
  2392  		fs   = make([]structField, len(fields))
  2393  		repr = make([]byte, 0, 64)
  2394  		fset = map[string]struct{}{} // fields' names
  2395  
  2396  		hasPtr    = false // records whether at least one struct-field is a pointer
  2397  		hasGCProg = false // records whether a struct-field type has a GCProg
  2398  	)
  2399  
  2400  	lastzero := uintptr(0)
  2401  	repr = append(repr, "struct {"...)
  2402  	for i, field := range fields {
  2403  		if field.Name == "" {
  2404  			panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
  2405  		}
  2406  		if !isValidFieldName(field.Name) {
  2407  			panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
  2408  		}
  2409  		if field.Type == nil {
  2410  			panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
  2411  		}
  2412  		f := runtimeStructField(field)
  2413  		ft := f.typ
  2414  		if ft.kind&kindGCProg != 0 {
  2415  			hasGCProg = true
  2416  		}
  2417  		if ft.pointers() {
  2418  			hasPtr = true
  2419  		}
  2420  
  2421  		// Update string and hash
  2422  		name := f.name.name()
  2423  		hash = fnv1(hash, []byte(name)...)
  2424  		repr = append(repr, (" " + name)...)
  2425  		if f.anon() {
  2426  			// Embedded field
  2427  			if f.typ.Kind() == Ptr {
  2428  				// Embedded ** and *interface{} are illegal
  2429  				elem := ft.Elem()
  2430  				if k := elem.Kind(); k == Ptr || k == Interface {
  2431  					panic("reflect.StructOf: illegal anonymous field type " + ft.String())
  2432  				}
  2433  			}
  2434  
  2435  			switch f.typ.Kind() {
  2436  			case Interface:
  2437  				ift := (*interfaceType)(unsafe.Pointer(ft))
  2438  				for im, m := range ift.methods {
  2439  					if ift.nameOff(m.name).pkgPath() != "" {
  2440  						// TODO(sbinet).  Issue 15924.
  2441  						panic("reflect: embedded interface with unexported method(s) not implemented")
  2442  					}
  2443  
  2444  					var (
  2445  						mtyp    = ift.typeOff(m.typ)
  2446  						ifield  = i
  2447  						imethod = im
  2448  						ifn     Value
  2449  						tfn     Value
  2450  					)
  2451  
  2452  					if ft.kind&kindDirectIface != 0 {
  2453  						tfn = MakeFunc(mtyp, func(in []Value) []Value {
  2454  							var args []Value
  2455  							var recv = in[0]
  2456  							if len(in) > 1 {
  2457  								args = in[1:]
  2458  							}
  2459  							return recv.Field(ifield).Method(imethod).Call(args)
  2460  						})
  2461  						ifn = MakeFunc(mtyp, func(in []Value) []Value {
  2462  							var args []Value
  2463  							var recv = in[0]
  2464  							if len(in) > 1 {
  2465  								args = in[1:]
  2466  							}
  2467  							return recv.Field(ifield).Method(imethod).Call(args)
  2468  						})
  2469  					} else {
  2470  						tfn = MakeFunc(mtyp, func(in []Value) []Value {
  2471  							var args []Value
  2472  							var recv = in[0]
  2473  							if len(in) > 1 {
  2474  								args = in[1:]
  2475  							}
  2476  							return recv.Field(ifield).Method(imethod).Call(args)
  2477  						})
  2478  						ifn = MakeFunc(mtyp, func(in []Value) []Value {
  2479  							var args []Value
  2480  							var recv = Indirect(in[0])
  2481  							if len(in) > 1 {
  2482  								args = in[1:]
  2483  							}
  2484  							return recv.Field(ifield).Method(imethod).Call(args)
  2485  						})
  2486  					}
  2487  
  2488  					methods = append(methods, method{
  2489  						name: resolveReflectName(ift.nameOff(m.name)),
  2490  						mtyp: resolveReflectType(mtyp),
  2491  						ifn:  resolveReflectText(unsafe.Pointer(&ifn)),
  2492  						tfn:  resolveReflectText(unsafe.Pointer(&tfn)),
  2493  					})
  2494  				}
  2495  			case Ptr:
  2496  				ptr := (*ptrType)(unsafe.Pointer(ft))
  2497  				if unt := ptr.uncommon(); unt != nil {
  2498  					if i > 0 && unt.mcount > 0 {
  2499  						// Issue 15924.
  2500  						panic("reflect: embedded type with methods not implemented if type is not first field")
  2501  					}
  2502  					for _, m := range unt.methods() {
  2503  						mname := ptr.nameOff(m.name)
  2504  						if mname.pkgPath() != "" {
  2505  							// TODO(sbinet).
  2506  							// Issue 15924.
  2507  							panic("reflect: embedded interface with unexported method(s) not implemented")
  2508  						}
  2509  						methods = append(methods, method{
  2510  							name: resolveReflectName(mname),
  2511  							mtyp: resolveReflectType(ptr.typeOff(m.mtyp)),
  2512  							ifn:  resolveReflectText(ptr.textOff(m.ifn)),
  2513  							tfn:  resolveReflectText(ptr.textOff(m.tfn)),
  2514  						})
  2515  					}
  2516  				}
  2517  				if unt := ptr.elem.uncommon(); unt != nil {
  2518  					for _, m := range unt.methods() {
  2519  						mname := ptr.nameOff(m.name)
  2520  						if mname.pkgPath() != "" {
  2521  							// TODO(sbinet)
  2522  							// Issue 15924.
  2523  							panic("reflect: embedded interface with unexported method(s) not implemented")
  2524  						}
  2525  						methods = append(methods, method{
  2526  							name: resolveReflectName(mname),
  2527  							mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)),
  2528  							ifn:  resolveReflectText(ptr.elem.textOff(m.ifn)),
  2529  							tfn:  resolveReflectText(ptr.elem.textOff(m.tfn)),
  2530  						})
  2531  					}
  2532  				}
  2533  			default:
  2534  				if unt := ft.uncommon(); unt != nil {
  2535  					if i > 0 && unt.mcount > 0 {
  2536  						// Issue 15924.
  2537  						panic("reflect: embedded type with methods not implemented if type is not first field")
  2538  					}
  2539  					for _, m := range unt.methods() {
  2540  						mname := ft.nameOff(m.name)
  2541  						if mname.pkgPath() != "" {
  2542  							// TODO(sbinet)
  2543  							// Issue 15924.
  2544  							panic("reflect: embedded interface with unexported method(s) not implemented")
  2545  						}
  2546  						methods = append(methods, method{
  2547  							name: resolveReflectName(mname),
  2548  							mtyp: resolveReflectType(ft.typeOff(m.mtyp)),
  2549  							ifn:  resolveReflectText(ft.textOff(m.ifn)),
  2550  							tfn:  resolveReflectText(ft.textOff(m.tfn)),
  2551  						})
  2552  
  2553  					}
  2554  				}
  2555  			}
  2556  		}
  2557  		if _, dup := fset[name]; dup {
  2558  			panic("reflect.StructOf: duplicate field " + name)
  2559  		}
  2560  		fset[name] = struct{}{}
  2561  
  2562  		hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash))
  2563  
  2564  		repr = append(repr, (" " + ft.String())...)
  2565  		if f.name.tagLen() > 0 {
  2566  			hash = fnv1(hash, []byte(f.name.tag())...)
  2567  			repr = append(repr, (" " + strconv.Quote(f.name.tag()))...)
  2568  		}
  2569  		if i < len(fields)-1 {
  2570  			repr = append(repr, ';')
  2571  		}
  2572  
  2573  		comparable = comparable && (ft.alg.equal != nil)
  2574  		hashable = hashable && (ft.alg.hash != nil)
  2575  
  2576  		offset := align(size, uintptr(ft.align))
  2577  		if ft.align > typalign {
  2578  			typalign = ft.align
  2579  		}
  2580  		size = offset + ft.size
  2581  		f.offsetAnon |= offset << 1
  2582  
  2583  		if ft.size == 0 {
  2584  			lastzero = size
  2585  		}
  2586  
  2587  		fs[i] = f
  2588  	}
  2589  
  2590  	if size > 0 && lastzero == size {
  2591  		// This is a non-zero sized struct that ends in a
  2592  		// zero-sized field. We add an extra byte of padding,
  2593  		// to ensure that taking the address of the final
  2594  		// zero-sized field can't manufacture a pointer to the
  2595  		// next object in the heap. See issue 9401.
  2596  		size++
  2597  	}
  2598  
  2599  	var typ *structType
  2600  	var ut *uncommonType
  2601  
  2602  	switch {
  2603  	case len(methods) == 0:
  2604  		t := new(structTypeUncommon)
  2605  		typ = &t.structType
  2606  		ut = &t.u
  2607  	case len(methods) <= 4:
  2608  		t := new(structTypeFixed4)
  2609  		typ = &t.structType
  2610  		ut = &t.u
  2611  		copy(t.m[:], methods)
  2612  	case len(methods) <= 8:
  2613  		t := new(structTypeFixed8)
  2614  		typ = &t.structType
  2615  		ut = &t.u
  2616  		copy(t.m[:], methods)
  2617  	case len(methods) <= 16:
  2618  		t := new(structTypeFixed16)
  2619  		typ = &t.structType
  2620  		ut = &t.u
  2621  		copy(t.m[:], methods)
  2622  	case len(methods) <= 32:
  2623  		t := new(structTypeFixed32)
  2624  		typ = &t.structType
  2625  		ut = &t.u
  2626  		copy(t.m[:], methods)
  2627  	default:
  2628  		panic("reflect.StructOf: too many methods")
  2629  	}
  2630  	ut.mcount = uint16(len(methods))
  2631  	ut.moff = uint32(unsafe.Sizeof(uncommonType{}))
  2632  
  2633  	if len(fs) > 0 {
  2634  		repr = append(repr, ' ')
  2635  	}
  2636  	repr = append(repr, '}')
  2637  	hash = fnv1(hash, '}')
  2638  	str := string(repr)
  2639  
  2640  	// Round the size up to be a multiple of the alignment.
  2641  	size = align(size, uintptr(typalign))
  2642  
  2643  	// Make the struct type.
  2644  	var istruct interface{} = struct{}{}
  2645  	prototype := *(**structType)(unsafe.Pointer(&istruct))
  2646  	*typ = *prototype
  2647  	typ.fields = fs
  2648  
  2649  	// Look in cache.
  2650  	if ts, ok := structLookupCache.m.Load(hash); ok {
  2651  		for _, st := range ts.([]Type) {
  2652  			t := st.common()
  2653  			if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
  2654  				return t
  2655  			}
  2656  		}
  2657  	}
  2658  
  2659  	// Not in cache, lock and retry.
  2660  	structLookupCache.Lock()
  2661  	defer structLookupCache.Unlock()
  2662  	if ts, ok := structLookupCache.m.Load(hash); ok {
  2663  		for _, st := range ts.([]Type) {
  2664  			t := st.common()
  2665  			if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
  2666  				return t
  2667  			}
  2668  		}
  2669  	}
  2670  
  2671  	addToCache := func(t Type) Type {
  2672  		var ts []Type
  2673  		if ti, ok := structLookupCache.m.Load(hash); ok {
  2674  			ts = ti.([]Type)
  2675  		}
  2676  		structLookupCache.m.Store(hash, append(ts, t))
  2677  		return t
  2678  	}
  2679  
  2680  	// Look in known types.
  2681  	for _, t := range typesByString(str) {
  2682  		if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
  2683  			// even if 't' wasn't a structType with methods, we should be ok
  2684  			// as the 'u uncommonType' field won't be accessed except when
  2685  			// tflag&tflagUncommon is set.
  2686  			return addToCache(t)
  2687  		}
  2688  	}
  2689  
  2690  	typ.str = resolveReflectName(newName(str, "", false))
  2691  	typ.tflag = 0
  2692  	typ.hash = hash
  2693  	typ.size = size
  2694  	typ.align = typalign
  2695  	typ.fieldAlign = typalign
  2696  	typ.ptrToThis = 0
  2697  	if len(methods) > 0 {
  2698  		typ.tflag |= tflagUncommon
  2699  	}
  2700  	if !hasPtr {
  2701  		typ.kind |= kindNoPointers
  2702  	} else {
  2703  		typ.kind &^= kindNoPointers
  2704  	}
  2705  
  2706  	if hasGCProg {
  2707  		lastPtrField := 0
  2708  		for i, ft := range fs {
  2709  			if ft.typ.pointers() {
  2710  				lastPtrField = i
  2711  			}
  2712  		}
  2713  		prog := []byte{0, 0, 0, 0} // will be length of prog
  2714  		for i, ft := range fs {
  2715  			if i > lastPtrField {
  2716  				// gcprog should not include anything for any field after
  2717  				// the last field that contains pointer data
  2718  				break
  2719  			}
  2720  			// FIXME(sbinet) handle padding, fields smaller than a word
  2721  			elemGC := (*[1 << 30]byte)(unsafe.Pointer(ft.typ.gcdata))[:]
  2722  			elemPtrs := ft.typ.ptrdata / ptrSize
  2723  			switch {
  2724  			case ft.typ.kind&kindGCProg == 0 && ft.typ.ptrdata != 0:
  2725  				// Element is small with pointer mask; use as literal bits.
  2726  				mask := elemGC
  2727  				// Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
  2728  				var n uintptr
  2729  				for n := elemPtrs; n > 120; n -= 120 {
  2730  					prog = append(prog, 120)
  2731  					prog = append(prog, mask[:15]...)
  2732  					mask = mask[15:]
  2733  				}
  2734  				prog = append(prog, byte(n))
  2735  				prog = append(prog, mask[:(n+7)/8]...)
  2736  			case ft.typ.kind&kindGCProg != 0:
  2737  				// Element has GC program; emit one element.
  2738  				elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1]
  2739  				prog = append(prog, elemProg...)
  2740  			}
  2741  			// Pad from ptrdata to size.
  2742  			elemWords := ft.typ.size / ptrSize
  2743  			if elemPtrs < elemWords {
  2744  				// Emit literal 0 bit, then repeat as needed.
  2745  				prog = append(prog, 0x01, 0x00)
  2746  				if elemPtrs+1 < elemWords {
  2747  					prog = append(prog, 0x81)
  2748  					prog = appendVarint(prog, elemWords-elemPtrs-1)
  2749  				}
  2750  			}
  2751  		}
  2752  		*(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
  2753  		typ.kind |= kindGCProg
  2754  		typ.gcdata = &prog[0]
  2755  	} else {
  2756  		typ.kind &^= kindGCProg
  2757  		bv := new(bitVector)
  2758  		addTypeBits(bv, 0, typ.common())
  2759  		if len(bv.data) > 0 {
  2760  			typ.gcdata = &bv.data[0]
  2761  		}
  2762  	}
  2763  	typ.ptrdata = typeptrdata(typ.common())
  2764  	typ.alg = new(typeAlg)
  2765  	if hashable {
  2766  		typ.alg.hash = func(p unsafe.Pointer, seed uintptr) uintptr {
  2767  			o := seed
  2768  			for _, ft := range typ.fields {
  2769  				pi := add(p, ft.offset(), "&x.field safe")
  2770  				o = ft.typ.alg.hash(pi, o)
  2771  			}
  2772  			return o
  2773  		}
  2774  	}
  2775  
  2776  	if comparable {
  2777  		typ.alg.equal = func(p, q unsafe.Pointer) bool {
  2778  			for _, ft := range typ.fields {
  2779  				pi := add(p, ft.offset(), "&x.field safe")
  2780  				qi := add(q, ft.offset(), "&x.field safe")
  2781  				if !ft.typ.alg.equal(pi, qi) {
  2782  					return false
  2783  				}
  2784  			}
  2785  			return true
  2786  		}
  2787  	}
  2788  
  2789  	switch {
  2790  	case len(fs) == 1 && !ifaceIndir(fs[0].typ):
  2791  		// structs of 1 direct iface type can be direct
  2792  		typ.kind |= kindDirectIface
  2793  	default:
  2794  		typ.kind &^= kindDirectIface
  2795  	}
  2796  
  2797  	return addToCache(&typ.rtype)
  2798  }
  2799  
  2800  func runtimeStructField(field StructField) structField {
  2801  	if field.PkgPath != "" {
  2802  		panic("reflect.StructOf: StructOf does not allow unexported fields")
  2803  	}
  2804  
  2805  	// Best-effort check for misuse.
  2806  	// Since PkgPath is empty, not much harm done if Unicode lowercase slips through.
  2807  	c := field.Name[0]
  2808  	if 'a' <= c && c <= 'z' || c == '_' {
  2809  		panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
  2810  	}
  2811  
  2812  	offsetAnon := uintptr(0)
  2813  	if field.Anonymous {
  2814  		offsetAnon |= 1
  2815  	}
  2816  
  2817  	resolveReflectType(field.Type.common()) // install in runtime
  2818  	return structField{
  2819  		name:       newName(field.Name, string(field.Tag), true),
  2820  		typ:        field.Type.common(),
  2821  		offsetAnon: offsetAnon,
  2822  	}
  2823  }
  2824  
  2825  // typeptrdata returns the length in bytes of the prefix of t
  2826  // containing pointer data. Anything after this offset is scalar data.
  2827  // keep in sync with ../cmd/compile/internal/gc/reflect.go
  2828  func typeptrdata(t *rtype) uintptr {
  2829  	if !t.pointers() {
  2830  		return 0
  2831  	}
  2832  	switch t.Kind() {
  2833  	case Struct:
  2834  		st := (*structType)(unsafe.Pointer(t))
  2835  		// find the last field that has pointers.
  2836  		field := 0
  2837  		for i := range st.fields {
  2838  			ft := st.fields[i].typ
  2839  			if ft.pointers() {
  2840  				field = i
  2841  			}
  2842  		}
  2843  		f := st.fields[field]
  2844  		return f.offset() + f.typ.ptrdata
  2845  
  2846  	default:
  2847  		panic("reflect.typeptrdata: unexpected type, " + t.String())
  2848  	}
  2849  }
  2850  
  2851  // See cmd/compile/internal/gc/reflect.go for derivation of constant.
  2852  const maxPtrmaskBytes = 2048
  2853  
  2854  // ArrayOf returns the array type with the given count and element type.
  2855  // For example, if t represents int, ArrayOf(5, t) represents [5]int.
  2856  //
  2857  // If the resulting type would be larger than the available address space,
  2858  // ArrayOf panics.
  2859  func ArrayOf(count int, elem Type) Type {
  2860  	typ := elem.(*rtype)
  2861  
  2862  	// Look in cache.
  2863  	ckey := cacheKey{Array, typ, nil, uintptr(count)}
  2864  	if array, ok := lookupCache.Load(ckey); ok {
  2865  		return array.(Type)
  2866  	}
  2867  
  2868  	// Look in known types.
  2869  	s := "[" + strconv.Itoa(count) + "]" + typ.String()
  2870  	for _, tt := range typesByString(s) {
  2871  		array := (*arrayType)(unsafe.Pointer(tt))
  2872  		if array.elem == typ {
  2873  			ti, _ := lookupCache.LoadOrStore(ckey, tt)
  2874  			return ti.(Type)
  2875  		}
  2876  	}
  2877  
  2878  	// Make an array type.
  2879  	var iarray interface{} = [1]unsafe.Pointer{}
  2880  	prototype := *(**arrayType)(unsafe.Pointer(&iarray))
  2881  	array := *prototype
  2882  	array.tflag = 0
  2883  	array.str = resolveReflectName(newName(s, "", false))
  2884  	array.hash = fnv1(typ.hash, '[')
  2885  	for n := uint32(count); n > 0; n >>= 8 {
  2886  		array.hash = fnv1(array.hash, byte(n))
  2887  	}
  2888  	array.hash = fnv1(array.hash, ']')
  2889  	array.elem = typ
  2890  	array.ptrToThis = 0
  2891  	if typ.size > 0 {
  2892  		max := ^uintptr(0) / typ.size
  2893  		if uintptr(count) > max {
  2894  			panic("reflect.ArrayOf: array size would exceed virtual address space")
  2895  		}
  2896  	}
  2897  	array.size = typ.size * uintptr(count)
  2898  	if count > 0 && typ.ptrdata != 0 {
  2899  		array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata
  2900  	}
  2901  	array.align = typ.align
  2902  	array.fieldAlign = typ.fieldAlign
  2903  	array.len = uintptr(count)
  2904  	array.slice = SliceOf(elem).(*rtype)
  2905  
  2906  	array.kind &^= kindNoPointers
  2907  	switch {
  2908  	case typ.kind&kindNoPointers != 0 || array.size == 0:
  2909  		// No pointers.
  2910  		array.kind |= kindNoPointers
  2911  		array.gcdata = nil
  2912  		array.ptrdata = 0
  2913  
  2914  	case count == 1:
  2915  		// In memory, 1-element array looks just like the element.
  2916  		array.kind |= typ.kind & kindGCProg
  2917  		array.gcdata = typ.gcdata
  2918  		array.ptrdata = typ.ptrdata
  2919  
  2920  	case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize:
  2921  		// Element is small with pointer mask; array is still small.
  2922  		// Create direct pointer mask by turning each 1 bit in elem
  2923  		// into count 1 bits in larger mask.
  2924  		mask := make([]byte, (array.ptrdata/ptrSize+7)/8)
  2925  		elemMask := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:]
  2926  		elemWords := typ.size / ptrSize
  2927  		for j := uintptr(0); j < typ.ptrdata/ptrSize; j++ {
  2928  			if (elemMask[j/8]>>(j%8))&1 != 0 {
  2929  				for i := uintptr(0); i < array.len; i++ {
  2930  					k := i*elemWords + j
  2931  					mask[k/8] |= 1 << (k % 8)
  2932  				}
  2933  			}
  2934  		}
  2935  		array.gcdata = &mask[0]
  2936  
  2937  	default:
  2938  		// Create program that emits one element
  2939  		// and then repeats to make the array.
  2940  		prog := []byte{0, 0, 0, 0} // will be length of prog
  2941  		elemGC := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:]
  2942  		elemPtrs := typ.ptrdata / ptrSize
  2943  		if typ.kind&kindGCProg == 0 {
  2944  			// Element is small with pointer mask; use as literal bits.
  2945  			mask := elemGC
  2946  			// Emit 120-bit chunks of full bytes (max is 127 but we avoid using partial bytes).
  2947  			var n uintptr
  2948  			for n = elemPtrs; n > 120; n -= 120 {
  2949  				prog = append(prog, 120)
  2950  				prog = append(prog, mask[:15]...)
  2951  				mask = mask[15:]
  2952  			}
  2953  			prog = append(prog, byte(n))
  2954  			prog = append(prog, mask[:(n+7)/8]...)
  2955  		} else {
  2956  			// Element has GC program; emit one element.
  2957  			elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1]
  2958  			prog = append(prog, elemProg...)
  2959  		}
  2960  		// Pad from ptrdata to size.
  2961  		elemWords := typ.size / ptrSize
  2962  		if elemPtrs < elemWords {
  2963  			// Emit literal 0 bit, then repeat as needed.
  2964  			prog = append(prog, 0x01, 0x00)
  2965  			if elemPtrs+1 < elemWords {
  2966  				prog = append(prog, 0x81)
  2967  				prog = appendVarint(prog, elemWords-elemPtrs-1)
  2968  			}
  2969  		}
  2970  		// Repeat count-1 times.
  2971  		if elemWords < 0x80 {
  2972  			prog = append(prog, byte(elemWords|0x80))
  2973  		} else {
  2974  			prog = append(prog, 0x80)
  2975  			prog = appendVarint(prog, elemWords)
  2976  		}
  2977  		prog = appendVarint(prog, uintptr(count)-1)
  2978  		prog = append(prog, 0)
  2979  		*(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
  2980  		array.kind |= kindGCProg
  2981  		array.gcdata = &prog[0]
  2982  		array.ptrdata = array.size // overestimate but ok; must match program
  2983  	}
  2984  
  2985  	etyp := typ.common()
  2986  	esize := etyp.Size()
  2987  	ealg := etyp.alg
  2988  
  2989  	array.alg = new(typeAlg)
  2990  	if ealg.equal != nil {
  2991  		eequal := ealg.equal
  2992  		array.alg.equal = func(p, q unsafe.Pointer) bool {
  2993  			for i := 0; i < count; i++ {
  2994  				pi := arrayAt(p, i, esize, "i < count")
  2995  				qi := arrayAt(q, i, esize, "i < count")
  2996  				if !eequal(pi, qi) {
  2997  					return false
  2998  				}
  2999  
  3000  			}
  3001  			return true
  3002  		}
  3003  	}
  3004  	if ealg.hash != nil {
  3005  		ehash := ealg.hash
  3006  		array.alg.hash = func(ptr unsafe.Pointer, seed uintptr) uintptr {
  3007  			o := seed
  3008  			for i := 0; i < count; i++ {
  3009  				o = ehash(arrayAt(ptr, i, esize, "i < count"), o)
  3010  			}
  3011  			return o
  3012  		}
  3013  	}
  3014  
  3015  	switch {
  3016  	case count == 1 && !ifaceIndir(typ):
  3017  		// array of 1 direct iface type can be direct
  3018  		array.kind |= kindDirectIface
  3019  	default:
  3020  		array.kind &^= kindDirectIface
  3021  	}
  3022  
  3023  	ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
  3024  	return ti.(Type)
  3025  }
  3026  
  3027  func appendVarint(x []byte, v uintptr) []byte {
  3028  	for ; v >= 0x80; v >>= 7 {
  3029  		x = append(x, byte(v|0x80))
  3030  	}
  3031  	x = append(x, byte(v))
  3032  	return x
  3033  }
  3034  
  3035  // toType converts from a *rtype to a Type that can be returned
  3036  // to the client of package reflect. In gc, the only concern is that
  3037  // a nil *rtype must be replaced by a nil Type, but in gccgo this
  3038  // function takes care of ensuring that multiple *rtype for the same
  3039  // type are coalesced into a single Type.
  3040  func toType(t *rtype) Type {
  3041  	if t == nil {
  3042  		return nil
  3043  	}
  3044  	return t
  3045  }
  3046  
  3047  type layoutKey struct {
  3048  	t    *rtype // function signature
  3049  	rcvr *rtype // receiver type, or nil if none
  3050  }
  3051  
  3052  type layoutType struct {
  3053  	t         *rtype
  3054  	argSize   uintptr // size of arguments
  3055  	retOffset uintptr // offset of return values.
  3056  	stack     *bitVector
  3057  	framePool *sync.Pool
  3058  }
  3059  
  3060  var layoutCache sync.Map // map[layoutKey]layoutType
  3061  
  3062  // funcLayout computes a struct type representing the layout of the
  3063  // function arguments and return values for the function type t.
  3064  // If rcvr != nil, rcvr specifies the type of the receiver.
  3065  // The returned type exists only for GC, so we only fill out GC relevant info.
  3066  // Currently, that's just size and the GC program. We also fill in
  3067  // the name for possible debugging use.
  3068  func funcLayout(t *rtype, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) {
  3069  	if t.Kind() != Func {
  3070  		panic("reflect: funcLayout of non-func type")
  3071  	}
  3072  	if rcvr != nil && rcvr.Kind() == Interface {
  3073  		panic("reflect: funcLayout with interface receiver " + rcvr.String())
  3074  	}
  3075  	k := layoutKey{t, rcvr}
  3076  	if lti, ok := layoutCache.Load(k); ok {
  3077  		lt := lti.(layoutType)
  3078  		return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool
  3079  	}
  3080  
  3081  	tt := (*funcType)(unsafe.Pointer(t))
  3082  
  3083  	// compute gc program & stack bitmap for arguments
  3084  	ptrmap := new(bitVector)
  3085  	var offset uintptr
  3086  	if rcvr != nil {
  3087  		// Reflect uses the "interface" calling convention for
  3088  		// methods, where receivers take one word of argument
  3089  		// space no matter how big they actually are.
  3090  		if ifaceIndir(rcvr) || rcvr.pointers() {
  3091  			ptrmap.append(1)
  3092  		}
  3093  		offset += ptrSize
  3094  	}
  3095  	for _, arg := range tt.in() {
  3096  		offset += -offset & uintptr(arg.align-1)
  3097  		addTypeBits(ptrmap, offset, arg)
  3098  		offset += arg.size
  3099  	}
  3100  	argN := ptrmap.n
  3101  	argSize = offset
  3102  	if runtime.GOARCH == "amd64p32" {
  3103  		offset += -offset & (8 - 1)
  3104  	}
  3105  	offset += -offset & (ptrSize - 1)
  3106  	retOffset = offset
  3107  	for _, res := range tt.out() {
  3108  		offset += -offset & uintptr(res.align-1)
  3109  		addTypeBits(ptrmap, offset, res)
  3110  		offset += res.size
  3111  	}
  3112  	offset += -offset & (ptrSize - 1)
  3113  
  3114  	// build dummy rtype holding gc program
  3115  	x := &rtype{
  3116  		align:   ptrSize,
  3117  		size:    offset,
  3118  		ptrdata: uintptr(ptrmap.n) * ptrSize,
  3119  	}
  3120  	if runtime.GOARCH == "amd64p32" {
  3121  		x.align = 8
  3122  	}
  3123  	if ptrmap.n > 0 {
  3124  		x.gcdata = &ptrmap.data[0]
  3125  	} else {
  3126  		x.kind |= kindNoPointers
  3127  	}
  3128  	ptrmap.n = argN
  3129  
  3130  	var s string
  3131  	if rcvr != nil {
  3132  		s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")"
  3133  	} else {
  3134  		s = "funcargs(" + t.String() + ")"
  3135  	}
  3136  	x.str = resolveReflectName(newName(s, "", false))
  3137  
  3138  	// cache result for future callers
  3139  	framePool = &sync.Pool{New: func() interface{} {
  3140  		return unsafe_New(x)
  3141  	}}
  3142  	lti, _ := layoutCache.LoadOrStore(k, layoutType{
  3143  		t:         x,
  3144  		argSize:   argSize,
  3145  		retOffset: retOffset,
  3146  		stack:     ptrmap,
  3147  		framePool: framePool,
  3148  	})
  3149  	lt := lti.(layoutType)
  3150  	return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool
  3151  }
  3152  
  3153  // ifaceIndir reports whether t is stored indirectly in an interface value.
  3154  func ifaceIndir(t *rtype) bool {
  3155  	return t.kind&kindDirectIface == 0
  3156  }
  3157  
  3158  // Layout matches runtime.gobitvector (well enough).
  3159  type bitVector struct {
  3160  	n    uint32 // number of bits
  3161  	data []byte
  3162  }
  3163  
  3164  // append a bit to the bitmap.
  3165  func (bv *bitVector) append(bit uint8) {
  3166  	if bv.n%8 == 0 {
  3167  		bv.data = append(bv.data, 0)
  3168  	}
  3169  	bv.data[bv.n/8] |= bit << (bv.n % 8)
  3170  	bv.n++
  3171  }
  3172  
  3173  func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
  3174  	if t.kind&kindNoPointers != 0 {
  3175  		return
  3176  	}
  3177  
  3178  	switch Kind(t.kind & kindMask) {
  3179  	case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
  3180  		// 1 pointer at start of representation
  3181  		for bv.n < uint32(offset/uintptr(ptrSize)) {
  3182  			bv.append(0)
  3183  		}
  3184  		bv.append(1)
  3185  
  3186  	case Interface:
  3187  		// 2 pointers
  3188  		for bv.n < uint32(offset/uintptr(ptrSize)) {
  3189  			bv.append(0)
  3190  		}
  3191  		bv.append(1)
  3192  		bv.append(1)
  3193  
  3194  	case Array:
  3195  		// repeat inner type
  3196  		tt := (*arrayType)(unsafe.Pointer(t))
  3197  		for i := 0; i < int(tt.len); i++ {
  3198  			addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
  3199  		}
  3200  
  3201  	case Struct:
  3202  		// apply fields
  3203  		tt := (*structType)(unsafe.Pointer(t))
  3204  		for i := range tt.fields {
  3205  			f := &tt.fields[i]
  3206  			addTypeBits(bv, offset+f.offset(), f.typ)
  3207  		}
  3208  	}
  3209  }
  3210  

View as plain text