gtsocial-umbx

Unnamed repository; edit this file 'description' to name the repository.
Log | Files | Refs | README | LICENSE

helper.go (87831B)


      1 // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
      2 // Use of this source code is governed by a MIT license found in the LICENSE file.
      3 
      4 package codec
      5 
      6 // Contains code shared by both encode and decode.
      7 
      8 // Some shared ideas around encoding/decoding
      9 // ------------------------------------------
     10 //
     11 // If an interface{} is passed, we first do a type assertion to see if it is
     12 // a primitive type or a map/slice of primitive types, and use a fastpath to handle it.
     13 //
     14 // If we start with a reflect.Value, we are already in reflect.Value land and
     15 // will try to grab the function for the underlying Type and directly call that function.
     16 // This is more performant than calling reflect.Value.Interface().
     17 //
     18 // This still helps us bypass many layers of reflection, and give best performance.
     19 //
     20 // Containers
     21 // ------------
     22 // Containers in the stream are either associative arrays (key-value pairs) or
     23 // regular arrays (indexed by incrementing integers).
     24 //
     25 // Some streams support indefinite-length containers, and use a breaking
     26 // byte-sequence to denote that the container has come to an end.
     27 //
     28 // Some streams also are text-based, and use explicit separators to denote the
     29 // end/beginning of different values.
     30 //
     31 // Philosophy
     32 // ------------
     33 // On decode, this codec will update containers appropriately:
     34 //    - If struct, update fields from stream into fields of struct.
     35 //      If field in stream not found in struct, handle appropriately (based on option).
     36 //      If a struct field has no corresponding value in the stream, leave it AS IS.
     37 //      If nil in stream, set value to nil/zero value.
     38 //    - If map, update map from stream.
     39 //      If the stream value is NIL, set the map to nil.
     40 //    - if slice, try to update up to length of array in stream.
     41 //      if container len is less than stream array length,
     42 //      and container cannot be expanded, handled (based on option).
     43 //      This means you can decode 4-element stream array into 1-element array.
     44 //
     45 // ------------------------------------
     46 // On encode, user can specify omitEmpty. This means that the value will be omitted
     47 // if the zero value. The problem may occur during decode, where omitted values do not affect
     48 // the value being decoded into. This means that if decoding into a struct with an
     49 // int field with current value=5, and the field is omitted in the stream, then after
     50 // decoding, the value will still be 5 (not 0).
     51 // omitEmpty only works if you guarantee that you always decode into zero-values.
     52 //
     53 // ------------------------------------
     54 // We could have truncated a map to remove keys not available in the stream,
     55 // or set values in the struct which are not in the stream to their zero values.
     56 // We decided against it because there is no efficient way to do it.
     57 // We may introduce it as an option later.
     58 // However, that will require enabling it for both runtime and code generation modes.
     59 //
     60 // To support truncate, we need to do 2 passes over the container:
     61 //   map
     62 //   - first collect all keys (e.g. in k1)
     63 //   - for each key in stream, mark k1 that the key should not be removed
     64 //   - after updating map, do second pass and call delete for all keys in k1 which are not marked
     65 //   struct:
     66 //   - for each field, track the *typeInfo s1
     67 //   - iterate through all s1, and for each one not marked, set value to zero
     68 //   - this involves checking the possible anonymous fields which are nil ptrs.
     69 //     too much work.
     70 //
     71 // ------------------------------------------
     72 // Error Handling is done within the library using panic.
     73 //
     74 // This way, the code doesn't have to keep checking if an error has happened,
     75 // and we don't have to keep sending the error value along with each call
     76 // or storing it in the En|Decoder and checking it constantly along the way.
     77 //
     78 // We considered storing the error is En|Decoder.
     79 //   - once it has its err field set, it cannot be used again.
     80 //   - panicing will be optional, controlled by const flag.
     81 //   - code should always check error first and return early.
     82 //
     83 // We eventually decided against it as it makes the code clumsier to always
     84 // check for these error conditions.
     85 //
     86 // ------------------------------------------
     87 // We use sync.Pool only for the aid of long-lived objects shared across multiple goroutines.
     88 // Encoder, Decoder, enc|decDriver, reader|writer, etc do not fall into this bucket.
     89 //
     90 // Also, GC is much better now, eliminating some of the reasons to use a shared pool structure.
     91 // Instead, the short-lived objects use free-lists that live as long as the object exists.
     92 //
     93 // ------------------------------------------
     94 // Performance is affected by the following:
     95 //    - Bounds Checking
     96 //    - Inlining
     97 //    - Pointer chasing
     98 // This package tries hard to manage the performance impact of these.
     99 //
    100 // ------------------------------------------
    101 // To alleviate performance due to pointer-chasing:
    102 //    - Prefer non-pointer values in a struct field
    103 //    - Refer to these directly within helper classes
    104 //      e.g. json.go refers directly to d.d.decRd
    105 //
    106 // We made the changes to embed En/Decoder in en/decDriver,
    107 // but we had to explicitly reference the fields as opposed to using a function
    108 // to get the better performance that we were looking for.
    109 // For example, we explicitly call d.d.decRd.fn() instead of d.d.r().fn().
    110 //
    111 // ------------------------------------------
    112 // Bounds Checking
    113 //    - Allow bytesDecReader to incur "bounds check error", and
    114 //      recover that as an io.EOF.
    115 //      This allows the bounds check branch to always be taken by the branch predictor,
    116 //      giving better performance (in theory), while ensuring that the code is shorter.
    117 //
    118 // ------------------------------------------
    119 // Escape Analysis
    120 //    - Prefer to return non-pointers if the value is used right away.
    121 //      Newly allocated values returned as pointers will be heap-allocated as they escape.
    122 //
    123 // Prefer functions and methods that
    124 //    - take no parameters and
    125 //    - return no results and
    126 //    - do not allocate.
    127 // These are optimized by the runtime.
    128 // For example, in json, we have dedicated functions for ReadMapElemKey, etc
    129 // which do not delegate to readDelim, as readDelim takes a parameter.
    130 // The difference in runtime was as much as 5%.
    131 //
    132 // ------------------------------------------
    133 // Handling Nil
    134 //   - In dynamic (reflection) mode, decodeValue and encodeValue handle nil at the top
    135 //   - Consequently, methods used with them as a parent in the chain e.g. kXXX
    136 //     methods do not handle nil.
    137 //   - Fastpath methods also do not handle nil.
    138 //     The switch called in (en|de)code(...) handles it so the dependent calls don't have to.
    139 //   - codecgen will handle nil before calling into the library for further work also.
    140 //
    141 // ------------------------------------------
    142 // Passing reflect.Kind to functions that take a reflect.Value
    143 //   - Note that reflect.Value.Kind() is very cheap, as its fundamentally a binary AND of 2 numbers
    144 //
    145 // ------------------------------------------
    146 // Transient values during decoding
    147 //
    148 // With reflection, the stack is not used. Consequently, values which may be stack-allocated in
    149 // normal use will cause a heap allocation when using reflection.
    150 //
    151 // There are cases where we know that a value is transient, and we just need to decode into it
    152 // temporarily so we can right away use its value for something else.
    153 //
    154 // In these situations, we can elide the heap allocation by being deliberate with use of a pre-cached
    155 // scratch memory or scratch value.
    156 //
    157 // We use this for situations:
    158 // - decode into a temp value x, and then set x into an interface
    159 // - decode into a temp value, for use as a map key, to lookup up a map value
    160 // - decode into a temp value, for use as a map value, to set into a map
    161 // - decode into a temp value, for sending into a channel
    162 //
    163 // By definition, Transient values are NEVER pointer-shaped values,
    164 // like pointer, func, map, chan. Using transient for pointer-shaped values
    165 // can lead to data corruption when GC tries to follow what it saw as a pointer at one point.
    166 //
    167 // In general, transient values are values which can be decoded as an atomic value
    168 // using a single call to the decDriver. This naturally includes bool or numeric types.
    169 //
    170 // Note that some values which "contain" pointers, specifically string and slice,
    171 // can also be transient. In the case of string, it is decoded as an atomic value.
    172 // In the case of a slice, decoding into its elements always uses an addressable
    173 // value in memory ie we grow the slice, and then decode directly into the memory
    174 // address corresponding to that index in the slice.
    175 //
    176 // To handle these string and slice values, we have to use a scratch value
    177 // which has the same shape of a string or slice.
    178 //
    179 // Consequently, the full range of types which can be transient is:
    180 // - numbers
    181 // - bool
    182 // - string
    183 // - slice
    184 //
    185 // and whbut we MUST use a scratch space with that element
    186 // being defined as an unsafe.Pointer to start with.
    187 //
    188 // We have to be careful with maps. Because we iterate map keys and values during a range,
    189 // we must have 2 variants of the scratch space/value for maps and keys separately.
    190 //
    191 // These are the TransientAddrK and TransientAddr2K methods of decPerType.
    192 
    193 import (
    194 	"encoding"
    195 	"encoding/binary"
    196 	"errors"
    197 	"fmt"
    198 	"io"
    199 	"math"
    200 	"reflect"
    201 	"runtime"
    202 	"sort"
    203 	"strconv"
    204 	"strings"
    205 	"sync"
    206 	"sync/atomic"
    207 	"time"
    208 	"unicode/utf8"
    209 )
    210 
    211 // if debugging is true, then
    212 //   - within Encode/Decode, do not recover from panic's
    213 //   - etc
    214 //
    215 // Note: Negative tests that check for errors will fail, so only use this
    216 // when debugging, and run only one test at a time preferably.
    217 //
    218 // Note: RPC tests depend on getting the error from an Encode/Decode call.
    219 // Consequently, they will always fail if debugging = true.
    220 const debugging = false
    221 
    222 const (
    223 	// containerLenUnknown is length returned from Read(Map|Array)Len
    224 	// when a format doesn't know apiori.
    225 	// For example, json doesn't pre-determine the length of a container (sequence/map).
    226 	containerLenUnknown = -1
    227 
    228 	// containerLenNil is length returned from Read(Map|Array)Len
    229 	// when a 'nil' was encountered in the stream.
    230 	containerLenNil = math.MinInt32
    231 
    232 	// [N]byte is handled by converting to []byte first,
    233 	// and sending to the dedicated fast-path function for []byte.
    234 	//
    235 	// Code exists in case our understanding is wrong.
    236 	// keep the defensive code behind this flag, so we can remove/hide it if needed.
    237 	// For now, we enable the defensive code (ie set it to true).
    238 	handleBytesWithinKArray = true
    239 
    240 	// Support encoding.(Binary|Text)(Unm|M)arshaler.
    241 	// This constant flag will enable or disable it.
    242 	supportMarshalInterfaces = true
    243 
    244 	// bytesFreeListNoCache is used for debugging, when we want to skip using a cache of []byte.
    245 	bytesFreeListNoCache = false
    246 
    247 	// size of the cacheline: defaulting to value for archs: amd64, arm64, 386
    248 	// should use "runtime/internal/sys".CacheLineSize, but that is not exposed.
    249 	cacheLineSize = 64
    250 
    251 	wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize
    252 	wordSize     = wordSizeBits / 8
    253 
    254 	// MARKER: determines whether to skip calling fastpath(En|De)codeTypeSwitch.
    255 	// Calling the fastpath switch in encode() or decode() could be redundant,
    256 	// as we still have to introspect it again within fnLoad
    257 	// to determine the function to use for values of that type.
    258 	skipFastpathTypeSwitchInDirectCall = false
    259 )
    260 
    261 const cpu32Bit = ^uint(0)>>32 == 0
    262 
    263 type rkind byte
    264 
    265 const (
    266 	rkindPtr    = rkind(reflect.Ptr)
    267 	rkindString = rkind(reflect.String)
    268 	rkindChan   = rkind(reflect.Chan)
    269 )
    270 
    271 type mapKeyFastKind uint8
    272 
    273 const (
    274 	mapKeyFastKind32 = iota + 1
    275 	mapKeyFastKind32ptr
    276 	mapKeyFastKind64
    277 	mapKeyFastKind64ptr
    278 	mapKeyFastKindStr
    279 )
    280 
    281 var (
    282 	// use a global mutex to ensure each Handle is initialized.
    283 	// We do this, so we don't have to store the basicHandle mutex
    284 	// directly in BasicHandle, so it can be shallow-copied.
    285 	handleInitMu sync.Mutex
    286 
    287 	must mustHdl
    288 	halt panicHdl
    289 
    290 	digitCharBitset      bitset256
    291 	numCharBitset        bitset256
    292 	whitespaceCharBitset bitset256
    293 	asciiAlphaNumBitset  bitset256
    294 
    295 	// numCharWithExpBitset64 bitset64
    296 	// numCharNoExpBitset64   bitset64
    297 	// whitespaceCharBitset64 bitset64
    298 	//
    299 	// // hasptrBitset sets bit for all kinds which always have internal pointers
    300 	// hasptrBitset bitset32
    301 
    302 	// refBitset sets bit for all kinds which are direct internal references
    303 	refBitset bitset32
    304 
    305 	// isnilBitset sets bit for all kinds which can be compared to nil
    306 	isnilBitset bitset32
    307 
    308 	// numBoolBitset sets bit for all number and bool kinds
    309 	numBoolBitset bitset32
    310 
    311 	// numBoolStrSliceBitset sets bits for all kinds which are numbers, bool, strings and slices
    312 	numBoolStrSliceBitset bitset32
    313 
    314 	// scalarBitset sets bit for all kinds which are scalars/primitives and thus immutable
    315 	scalarBitset bitset32
    316 
    317 	mapKeyFastKindVals [32]mapKeyFastKind
    318 
    319 	// codecgen is set to true by codecgen, so that tests, etc can use this information as needed.
    320 	codecgen bool
    321 
    322 	oneByteArr    [1]byte
    323 	zeroByteSlice = oneByteArr[:0:0]
    324 
    325 	eofReader devNullReader
    326 )
    327 
    328 var (
    329 	errMapTypeNotMapKind     = errors.New("MapType MUST be of Map Kind")
    330 	errSliceTypeNotSliceKind = errors.New("SliceType MUST be of Slice Kind")
    331 
    332 	errExtFnWriteExtUnsupported   = errors.New("BytesExt.WriteExt is not supported")
    333 	errExtFnReadExtUnsupported    = errors.New("BytesExt.ReadExt is not supported")
    334 	errExtFnConvertExtUnsupported = errors.New("InterfaceExt.ConvertExt is not supported")
    335 	errExtFnUpdateExtUnsupported  = errors.New("InterfaceExt.UpdateExt is not supported")
    336 
    337 	errPanicUndefined = errors.New("panic: undefined error")
    338 
    339 	errHandleInited = errors.New("cannot modify initialized Handle")
    340 
    341 	errNoFormatHandle = errors.New("no handle (cannot identify format)")
    342 )
    343 
    344 var pool4tiload = sync.Pool{
    345 	New: func() interface{} {
    346 		return &typeInfoLoad{
    347 			etypes:   make([]uintptr, 0, 4),
    348 			sfis:     make([]structFieldInfo, 0, 4),
    349 			sfiNames: make(map[string]uint16, 4),
    350 		}
    351 	},
    352 }
    353 
    354 func init() {
    355 	xx := func(f mapKeyFastKind, k ...reflect.Kind) {
    356 		for _, v := range k {
    357 			mapKeyFastKindVals[byte(v)&31] = f // 'v % 32' equal to 'v & 31'
    358 		}
    359 	}
    360 
    361 	var f mapKeyFastKind
    362 
    363 	f = mapKeyFastKind64
    364 	if wordSizeBits == 32 {
    365 		f = mapKeyFastKind32
    366 	}
    367 	xx(f, reflect.Int, reflect.Uint, reflect.Uintptr)
    368 
    369 	f = mapKeyFastKind64ptr
    370 	if wordSizeBits == 32 {
    371 		f = mapKeyFastKind32ptr
    372 	}
    373 	xx(f, reflect.Ptr)
    374 
    375 	xx(mapKeyFastKindStr, reflect.String)
    376 	xx(mapKeyFastKind32, reflect.Uint32, reflect.Int32, reflect.Float32)
    377 	xx(mapKeyFastKind64, reflect.Uint64, reflect.Int64, reflect.Float64)
    378 
    379 	numBoolBitset.
    380 		set(byte(reflect.Bool)).
    381 		set(byte(reflect.Int)).
    382 		set(byte(reflect.Int8)).
    383 		set(byte(reflect.Int16)).
    384 		set(byte(reflect.Int32)).
    385 		set(byte(reflect.Int64)).
    386 		set(byte(reflect.Uint)).
    387 		set(byte(reflect.Uint8)).
    388 		set(byte(reflect.Uint16)).
    389 		set(byte(reflect.Uint32)).
    390 		set(byte(reflect.Uint64)).
    391 		set(byte(reflect.Uintptr)).
    392 		set(byte(reflect.Float32)).
    393 		set(byte(reflect.Float64)).
    394 		set(byte(reflect.Complex64)).
    395 		set(byte(reflect.Complex128))
    396 
    397 	numBoolStrSliceBitset = numBoolBitset
    398 
    399 	numBoolStrSliceBitset.
    400 		set(byte(reflect.String)).
    401 		set(byte(reflect.Slice))
    402 
    403 	scalarBitset = numBoolBitset
    404 
    405 	scalarBitset.
    406 		set(byte(reflect.String))
    407 
    408 	// MARKER: reflect.Array is not a scalar, as its contents can be modified.
    409 
    410 	refBitset.
    411 		set(byte(reflect.Map)).
    412 		set(byte(reflect.Ptr)).
    413 		set(byte(reflect.Func)).
    414 		set(byte(reflect.Chan)).
    415 		set(byte(reflect.UnsafePointer))
    416 
    417 	isnilBitset = refBitset
    418 
    419 	isnilBitset.
    420 		set(byte(reflect.Interface)).
    421 		set(byte(reflect.Slice))
    422 
    423 	// hasptrBitset = isnilBitset
    424 	//
    425 	// hasptrBitset.
    426 	// 	set(byte(reflect.String))
    427 
    428 	for i := byte(0); i <= utf8.RuneSelf; i++ {
    429 		if (i >= '0' && i <= '9') || (i >= 'a' && i <= 'z') || (i >= 'A' && i <= 'Z') {
    430 			asciiAlphaNumBitset.set(i)
    431 		}
    432 		switch i {
    433 		case ' ', '\t', '\r', '\n':
    434 			whitespaceCharBitset.set(i)
    435 		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
    436 			digitCharBitset.set(i)
    437 			numCharBitset.set(i)
    438 		case '.', '+', '-':
    439 			numCharBitset.set(i)
    440 		case 'e', 'E':
    441 			numCharBitset.set(i)
    442 		}
    443 	}
    444 }
    445 
    446 // driverStateManager supports the runtime state of an (enc|dec)Driver.
    447 //
    448 // During a side(En|De)code call, we can capture the state, reset it,
    449 // and then restore it later to continue the primary encoding/decoding.
    450 type driverStateManager interface {
    451 	resetState()
    452 	captureState() interface{}
    453 	restoreState(state interface{})
    454 }
    455 
    456 type bdAndBdread struct {
    457 	bdRead bool
    458 	bd     byte
    459 }
    460 
    461 func (x bdAndBdread) captureState() interface{}   { return x }
    462 func (x *bdAndBdread) resetState()                { x.bd, x.bdRead = 0, false }
    463 func (x *bdAndBdread) reset()                     { x.resetState() }
    464 func (x *bdAndBdread) restoreState(v interface{}) { *x = v.(bdAndBdread) }
    465 
    466 type clsErr struct {
    467 	err    error // error on closing
    468 	closed bool  // is it closed?
    469 }
    470 
    471 type charEncoding uint8
    472 
    473 const (
    474 	_ charEncoding = iota // make 0 unset
    475 	cUTF8
    476 	cUTF16LE
    477 	cUTF16BE
    478 	cUTF32LE
    479 	cUTF32BE
    480 	// Deprecated: not a true char encoding value
    481 	cRAW charEncoding = 255
    482 )
    483 
    484 // valueType is the stream type
    485 type valueType uint8
    486 
    487 const (
    488 	valueTypeUnset valueType = iota
    489 	valueTypeNil
    490 	valueTypeInt
    491 	valueTypeUint
    492 	valueTypeFloat
    493 	valueTypeBool
    494 	valueTypeString
    495 	valueTypeSymbol
    496 	valueTypeBytes
    497 	valueTypeMap
    498 	valueTypeArray
    499 	valueTypeTime
    500 	valueTypeExt
    501 
    502 	// valueTypeInvalid = 0xff
    503 )
    504 
    505 var valueTypeStrings = [...]string{
    506 	"Unset",
    507 	"Nil",
    508 	"Int",
    509 	"Uint",
    510 	"Float",
    511 	"Bool",
    512 	"String",
    513 	"Symbol",
    514 	"Bytes",
    515 	"Map",
    516 	"Array",
    517 	"Timestamp",
    518 	"Ext",
    519 }
    520 
    521 func (x valueType) String() string {
    522 	if int(x) < len(valueTypeStrings) {
    523 		return valueTypeStrings[x]
    524 	}
    525 	return strconv.FormatInt(int64(x), 10)
    526 }
    527 
    528 // note that containerMapStart and containerArraySend are not sent.
    529 // This is because the ReadXXXStart and EncodeXXXStart already does these.
    530 type containerState uint8
    531 
    532 const (
    533 	_ containerState = iota
    534 
    535 	containerMapStart
    536 	containerMapKey
    537 	containerMapValue
    538 	containerMapEnd
    539 	containerArrayStart
    540 	containerArrayElem
    541 	containerArrayEnd
    542 )
    543 
    544 // do not recurse if a containing type refers to an embedded type
    545 // which refers back to its containing type (via a pointer).
    546 // The second time this back-reference happens, break out,
    547 // so as not to cause an infinite loop.
    548 const rgetMaxRecursion = 2
    549 
    550 // fauxUnion is used to keep track of the primitives decoded.
    551 //
    552 // Without it, we would have to decode each primitive and wrap it
    553 // in an interface{}, causing an allocation.
    554 // In this model, the primitives are decoded in a "pseudo-atomic" fashion,
    555 // so we can rest assured that no other decoding happens while these
    556 // primitives are being decoded.
    557 //
    558 // maps and arrays are not handled by this mechanism.
    559 type fauxUnion struct {
    560 	// r RawExt // used for RawExt, uint, []byte.
    561 
    562 	// primitives below
    563 	u uint64
    564 	i int64
    565 	f float64
    566 	l []byte
    567 	s string
    568 
    569 	// ---- cpu cache line boundary?
    570 	t time.Time
    571 	b bool
    572 
    573 	// state
    574 	v valueType
    575 }
    576 
    577 // typeInfoLoad is a transient object used while loading up a typeInfo.
    578 type typeInfoLoad struct {
    579 	etypes   []uintptr
    580 	sfis     []structFieldInfo
    581 	sfiNames map[string]uint16
    582 }
    583 
    584 func (x *typeInfoLoad) reset() {
    585 	x.etypes = x.etypes[:0]
    586 	x.sfis = x.sfis[:0]
    587 	for k := range x.sfiNames { // optimized to zero the map
    588 		delete(x.sfiNames, k)
    589 	}
    590 }
    591 
    592 // mirror json.Marshaler and json.Unmarshaler here,
    593 // so we don't import the encoding/json package
    594 
    595 type jsonMarshaler interface {
    596 	MarshalJSON() ([]byte, error)
    597 }
    598 type jsonUnmarshaler interface {
    599 	UnmarshalJSON([]byte) error
    600 }
    601 
    602 type isZeroer interface {
    603 	IsZero() bool
    604 }
    605 
    606 type isCodecEmptyer interface {
    607 	IsCodecEmpty() bool
    608 }
    609 
    610 type codecError struct {
    611 	err    error
    612 	name   string
    613 	pos    int
    614 	encode bool
    615 }
    616 
    617 func (e *codecError) Cause() error {
    618 	return e.err
    619 }
    620 
    621 func (e *codecError) Unwrap() error {
    622 	return e.err
    623 }
    624 
    625 func (e *codecError) Error() string {
    626 	if e.encode {
    627 		return fmt.Sprintf("%s encode error: %v", e.name, e.err)
    628 	}
    629 	return fmt.Sprintf("%s decode error [pos %d]: %v", e.name, e.pos, e.err)
    630 }
    631 
    632 func wrapCodecErr(in error, name string, numbytesread int, encode bool) (out error) {
    633 	x, ok := in.(*codecError)
    634 	if ok && x.pos == numbytesread && x.name == name && x.encode == encode {
    635 		return in
    636 	}
    637 	return &codecError{in, name, numbytesread, encode}
    638 }
    639 
    640 var (
    641 	bigen bigenHelper
    642 
    643 	bigenstd = binary.BigEndian
    644 
    645 	structInfoFieldName = "_struct"
    646 
    647 	mapStrIntfTyp  = reflect.TypeOf(map[string]interface{}(nil))
    648 	mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
    649 	intfSliceTyp   = reflect.TypeOf([]interface{}(nil))
    650 	intfTyp        = intfSliceTyp.Elem()
    651 
    652 	reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem()
    653 
    654 	stringTyp     = reflect.TypeOf("")
    655 	timeTyp       = reflect.TypeOf(time.Time{})
    656 	rawExtTyp     = reflect.TypeOf(RawExt{})
    657 	rawTyp        = reflect.TypeOf(Raw{})
    658 	uintptrTyp    = reflect.TypeOf(uintptr(0))
    659 	uint8Typ      = reflect.TypeOf(uint8(0))
    660 	uint8SliceTyp = reflect.TypeOf([]uint8(nil))
    661 	uintTyp       = reflect.TypeOf(uint(0))
    662 	intTyp        = reflect.TypeOf(int(0))
    663 
    664 	mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
    665 
    666 	binaryMarshalerTyp   = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
    667 	binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
    668 
    669 	textMarshalerTyp   = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
    670 	textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
    671 
    672 	jsonMarshalerTyp   = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
    673 	jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
    674 
    675 	selferTyp                = reflect.TypeOf((*Selfer)(nil)).Elem()
    676 	missingFielderTyp        = reflect.TypeOf((*MissingFielder)(nil)).Elem()
    677 	iszeroTyp                = reflect.TypeOf((*isZeroer)(nil)).Elem()
    678 	isCodecEmptyerTyp        = reflect.TypeOf((*isCodecEmptyer)(nil)).Elem()
    679 	isSelferViaCodecgenerTyp = reflect.TypeOf((*isSelferViaCodecgener)(nil)).Elem()
    680 
    681 	uint8TypId      = rt2id(uint8Typ)
    682 	uint8SliceTypId = rt2id(uint8SliceTyp)
    683 	rawExtTypId     = rt2id(rawExtTyp)
    684 	rawTypId        = rt2id(rawTyp)
    685 	intfTypId       = rt2id(intfTyp)
    686 	timeTypId       = rt2id(timeTyp)
    687 	stringTypId     = rt2id(stringTyp)
    688 
    689 	mapStrIntfTypId  = rt2id(mapStrIntfTyp)
    690 	mapIntfIntfTypId = rt2id(mapIntfIntfTyp)
    691 	intfSliceTypId   = rt2id(intfSliceTyp)
    692 	// mapBySliceTypId  = rt2id(mapBySliceTyp)
    693 
    694 	intBitsize  = uint8(intTyp.Bits())
    695 	uintBitsize = uint8(uintTyp.Bits())
    696 
    697 	// bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
    698 	bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
    699 
    700 	chkOvf checkOverflow
    701 )
    702 
    703 var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
    704 
    705 // SelfExt is a sentinel extension signifying that types
    706 // registered with it SHOULD be encoded and decoded
    707 // based on the native mode of the format.
    708 //
    709 // This allows users to define a tag for an extension,
    710 // but signify that the types should be encoded/decoded as the native encoding.
    711 // This way, users need not also define how to encode or decode the extension.
    712 var SelfExt = &extFailWrapper{}
    713 
    714 // Selfer defines methods by which a value can encode or decode itself.
    715 //
    716 // Any type which implements Selfer will be able to encode or decode itself.
    717 // Consequently, during (en|de)code, this takes precedence over
    718 // (text|binary)(M|Unm)arshal or extension support.
    719 //
    720 // By definition, it is not allowed for a Selfer to directly call Encode or Decode on itself.
    721 // If that is done, Encode/Decode will rightfully fail with a Stack Overflow style error.
    722 // For example, the snippet below will cause such an error.
    723 //
    724 //	type testSelferRecur struct{}
    725 //	func (s *testSelferRecur) CodecEncodeSelf(e *Encoder) { e.MustEncode(s) }
    726 //	func (s *testSelferRecur) CodecDecodeSelf(d *Decoder) { d.MustDecode(s) }
    727 //
    728 // Note: *the first set of bytes of any value MUST NOT represent nil in the format*.
    729 // This is because, during each decode, we first check the the next set of bytes
    730 // represent nil, and if so, we just set the value to nil.
    731 type Selfer interface {
    732 	CodecEncodeSelf(*Encoder)
    733 	CodecDecodeSelf(*Decoder)
    734 }
    735 
    736 type isSelferViaCodecgener interface {
    737 	codecSelferViaCodecgen()
    738 }
    739 
    740 // MissingFielder defines the interface allowing structs to internally decode or encode
    741 // values which do not map to struct fields.
    742 //
    743 // We expect that this interface is bound to a pointer type (so the mutation function works).
    744 //
    745 // A use-case is if a version of a type unexports a field, but you want compatibility between
    746 // both versions during encoding and decoding.
    747 //
    748 // Note that the interface is completely ignored during codecgen.
    749 type MissingFielder interface {
    750 	// CodecMissingField is called to set a missing field and value pair.
    751 	//
    752 	// It returns true if the missing field was set on the struct.
    753 	CodecMissingField(field []byte, value interface{}) bool
    754 
    755 	// CodecMissingFields returns the set of fields which are not struct fields.
    756 	//
    757 	// Note that the returned map may be mutated by the caller.
    758 	CodecMissingFields() map[string]interface{}
    759 }
    760 
    761 // MapBySlice is a tag interface that denotes the slice or array value should encode as a map
    762 // in the stream, and can be decoded from a map in the stream.
    763 //
    764 // The slice or array must contain a sequence of key-value pairs.
    765 // The length of the slice or array must be even (fully divisible by 2).
    766 //
    767 // This affords storing a map in a specific sequence in the stream.
    768 //
    769 // Example usage:
    770 //
    771 //	type T1 []string         // or []int or []Point or any other "slice" type
    772 //	func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map
    773 //	type T2 struct { KeyValues T1 }
    774 //
    775 //	var kvs = []string{"one", "1", "two", "2", "three", "3"}
    776 //	var v2 = T2{ KeyValues: T1(kvs) }
    777 //	// v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} }
    778 //
    779 // The support of MapBySlice affords the following:
    780 //   - A slice or array type which implements MapBySlice will be encoded as a map
    781 //   - A slice can be decoded from a map in the stream
    782 type MapBySlice interface {
    783 	MapBySlice()
    784 }
    785 
    786 // basicHandleRuntimeState holds onto all BasicHandle runtime and cached config information.
    787 //
    788 // Storing this outside BasicHandle allows us create shallow copies of a Handle,
    789 // which can be used e.g. when we need to modify config fields temporarily.
    790 // Shallow copies are used within tests, so we can modify some config fields for a test
    791 // temporarily when running tests in parallel, without running the risk that a test executing
    792 // in parallel with other tests does not see a transient modified values not meant for it.
    793 type basicHandleRuntimeState struct {
    794 	// these are used during runtime.
    795 	// At init time, they should have nothing in them.
    796 	rtidFns      atomicRtidFnSlice
    797 	rtidFnsNoExt atomicRtidFnSlice
    798 
    799 	// Note: basicHandleRuntimeState is not comparable, due to these slices here (extHandle, intf2impls).
    800 	// If *[]T is used instead, this becomes comparable, at the cost of extra indirection.
    801 	// Thses slices are used all the time, so keep as slices (not pointers).
    802 
    803 	extHandle
    804 
    805 	intf2impls
    806 
    807 	mu sync.Mutex
    808 
    809 	jsonHandle   bool
    810 	binaryHandle bool
    811 
    812 	// timeBuiltin is initialized from TimeNotBuiltin, and used internally.
    813 	// once initialized, it cannot be changed, as the function for encoding/decoding time.Time
    814 	// will have been cached and the TimeNotBuiltin value will not be consulted thereafter.
    815 	timeBuiltin bool
    816 	_           bool // padding
    817 }
    818 
    819 // BasicHandle encapsulates the common options and extension functions.
    820 //
    821 // Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
    822 type BasicHandle struct {
    823 	// BasicHandle is always a part of a different type.
    824 	// It doesn't have to fit into it own cache lines.
    825 
    826 	// TypeInfos is used to get the type info for any type.
    827 	//
    828 	// If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
    829 	TypeInfos *TypeInfos
    830 
    831 	*basicHandleRuntimeState
    832 
    833 	// ---- cache line
    834 
    835 	DecodeOptions
    836 
    837 	// ---- cache line
    838 
    839 	EncodeOptions
    840 
    841 	RPCOptions
    842 
    843 	// TimeNotBuiltin configures whether time.Time should be treated as a builtin type.
    844 	//
    845 	// All Handlers should know how to encode/decode time.Time as part of the core
    846 	// format specification, or as a standard extension defined by the format.
    847 	//
    848 	// However, users can elect to handle time.Time as a custom extension, or via the
    849 	// standard library's encoding.Binary(M|Unm)arshaler or Text(M|Unm)arshaler interface.
    850 	// To elect this behavior, users can set TimeNotBuiltin=true.
    851 	//
    852 	// Note: Setting TimeNotBuiltin=true can be used to enable the legacy behavior
    853 	// (for Cbor and Msgpack), where time.Time was not a builtin supported type.
    854 	//
    855 	// Note: DO NOT CHANGE AFTER FIRST USE.
    856 	//
    857 	// Once a Handle has been initialized (used), do not modify this option. It will be ignored.
    858 	TimeNotBuiltin bool
    859 
    860 	// ExplicitRelease configures whether Release() is implicitly called after an encode or
    861 	// decode call.
    862 	//
    863 	// If you will hold onto an Encoder or Decoder for re-use, by calling Reset(...)
    864 	// on it or calling (Must)Encode repeatedly into a given []byte or io.Writer,
    865 	// then you do not want it to be implicitly closed after each Encode/Decode call.
    866 	// Doing so will unnecessarily return resources to the shared pool, only for you to
    867 	// grab them right after again to do another Encode/Decode call.
    868 	//
    869 	// Instead, you configure ExplicitRelease=true, and you explicitly call Release() when
    870 	// you are truly done.
    871 	//
    872 	// As an alternative, you can explicitly set a finalizer - so its resources
    873 	// are returned to the shared pool before it is garbage-collected. Do it as below:
    874 	//    runtime.SetFinalizer(e, (*Encoder).Release)
    875 	//    runtime.SetFinalizer(d, (*Decoder).Release)
    876 	//
    877 	// Deprecated: This is not longer used as pools are only used for long-lived objects
    878 	// which are shared across goroutines.
    879 	// Setting this value has no effect. It is maintained for backward compatibility.
    880 	ExplicitRelease bool
    881 
    882 	// ---- cache line
    883 	inited uint32 // holds if inited, and also handle flags (binary encoding, json handler, etc)
    884 
    885 }
    886 
    887 // initHandle does a one-time initialization of the handle.
    888 // After this is run, do not modify the Handle, as some modifications are ignored
    889 // e.g. extensions, registered interfaces, TimeNotBuiltIn, etc
    890 func initHandle(hh Handle) {
    891 	x := hh.getBasicHandle()
    892 
    893 	// MARKER: We need to simulate once.Do, to ensure no data race within the block.
    894 	// Consequently, below would not work.
    895 	//
    896 	// if atomic.CompareAndSwapUint32(&x.inited, 0, 1) {
    897 	// 	x.be = hh.isBinary()
    898 	// 	x.js = hh.isJson
    899 	// 	x.n = hh.Name()[0]
    900 	// }
    901 
    902 	// simulate once.Do using our own stored flag and mutex as a CompareAndSwap
    903 	// is not sufficient, since a race condition can occur within init(Handle) function.
    904 	// init is made noinline, so that this function can be inlined by its caller.
    905 	if atomic.LoadUint32(&x.inited) == 0 {
    906 		x.initHandle(hh)
    907 	}
    908 }
    909 
    910 func (x *BasicHandle) basicInit() {
    911 	x.rtidFns.store(nil)
    912 	x.rtidFnsNoExt.store(nil)
    913 	x.timeBuiltin = !x.TimeNotBuiltin
    914 }
    915 
    916 func (x *BasicHandle) init() {}
    917 
    918 func (x *BasicHandle) isInited() bool {
    919 	return atomic.LoadUint32(&x.inited) != 0
    920 }
    921 
    922 // clearInited: DANGEROUS - only use in testing, etc
    923 func (x *BasicHandle) clearInited() {
    924 	atomic.StoreUint32(&x.inited, 0)
    925 }
    926 
    927 // TimeBuiltin returns whether time.Time OOTB support is used,
    928 // based on the initial configuration of TimeNotBuiltin
    929 func (x *basicHandleRuntimeState) TimeBuiltin() bool {
    930 	return x.timeBuiltin
    931 }
    932 
    933 func (x *basicHandleRuntimeState) isJs() bool {
    934 	return x.jsonHandle
    935 }
    936 
    937 func (x *basicHandleRuntimeState) isBe() bool {
    938 	return x.binaryHandle
    939 }
    940 
    941 func (x *basicHandleRuntimeState) setExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
    942 	rk := rt.Kind()
    943 	for rk == reflect.Ptr {
    944 		rt = rt.Elem()
    945 		rk = rt.Kind()
    946 	}
    947 
    948 	if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr {
    949 		return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt)
    950 	}
    951 
    952 	rtid := rt2id(rt)
    953 	// handle all natively supported type appropriately, so they cannot have an extension.
    954 	// However, we do not return an error for these, as we do not document that.
    955 	// Instead, we silently treat as a no-op, and return.
    956 	switch rtid {
    957 	case rawTypId, rawExtTypId:
    958 		return
    959 	case timeTypId:
    960 		if x.timeBuiltin {
    961 			return
    962 		}
    963 	}
    964 
    965 	for i := range x.extHandle {
    966 		v := &x.extHandle[i]
    967 		if v.rtid == rtid {
    968 			v.tag, v.ext = tag, ext
    969 			return
    970 		}
    971 	}
    972 	rtidptr := rt2id(reflect.PtrTo(rt))
    973 	x.extHandle = append(x.extHandle, extTypeTagFn{rtid, rtidptr, rt, tag, ext})
    974 	return
    975 }
    976 
    977 // initHandle should be called only from codec.initHandle global function.
    978 // make it uninlineable, as it is called at most once for each handle.
    979 //
    980 //go:noinline
    981 func (x *BasicHandle) initHandle(hh Handle) {
    982 	handleInitMu.Lock()
    983 	defer handleInitMu.Unlock() // use defer, as halt may panic below
    984 	if x.inited == 0 {
    985 		if x.basicHandleRuntimeState == nil {
    986 			x.basicHandleRuntimeState = new(basicHandleRuntimeState)
    987 		}
    988 		x.jsonHandle = hh.isJson()
    989 		x.binaryHandle = hh.isBinary()
    990 		// ensure MapType and SliceType are of correct type
    991 		if x.MapType != nil && x.MapType.Kind() != reflect.Map {
    992 			halt.onerror(errMapTypeNotMapKind)
    993 		}
    994 		if x.SliceType != nil && x.SliceType.Kind() != reflect.Slice {
    995 			halt.onerror(errSliceTypeNotSliceKind)
    996 		}
    997 		x.basicInit()
    998 		hh.init()
    999 		atomic.StoreUint32(&x.inited, 1)
   1000 	}
   1001 }
   1002 
   1003 func (x *BasicHandle) getBasicHandle() *BasicHandle {
   1004 	return x
   1005 }
   1006 
   1007 func (x *BasicHandle) typeInfos() *TypeInfos {
   1008 	if x.TypeInfos != nil {
   1009 		return x.TypeInfos
   1010 	}
   1011 	return defTypeInfos
   1012 }
   1013 
   1014 func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
   1015 	return x.typeInfos().get(rtid, rt)
   1016 }
   1017 
   1018 func findRtidFn(s []codecRtidFn, rtid uintptr) (i uint, fn *codecFn) {
   1019 	// binary search. adapted from sort/search.go.
   1020 	// Note: we use goto (instead of for loop) so this can be inlined.
   1021 
   1022 	// h, i, j := 0, 0, len(s)
   1023 	var h uint // var h, i uint
   1024 	var j = uint(len(s))
   1025 LOOP:
   1026 	if i < j {
   1027 		h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
   1028 		if s[h].rtid < rtid {
   1029 			i = h + 1
   1030 		} else {
   1031 			j = h
   1032 		}
   1033 		goto LOOP
   1034 	}
   1035 	if i < uint(len(s)) && s[i].rtid == rtid {
   1036 		fn = s[i].fn
   1037 	}
   1038 	return
   1039 }
   1040 
   1041 func (x *BasicHandle) fn(rt reflect.Type) (fn *codecFn) {
   1042 	return x.fnVia(rt, x.typeInfos(), &x.rtidFns, x.CheckCircularRef, true)
   1043 }
   1044 
   1045 func (x *BasicHandle) fnNoExt(rt reflect.Type) (fn *codecFn) {
   1046 	return x.fnVia(rt, x.typeInfos(), &x.rtidFnsNoExt, x.CheckCircularRef, false)
   1047 }
   1048 
   1049 func (x *basicHandleRuntimeState) fnVia(rt reflect.Type, tinfos *TypeInfos, fs *atomicRtidFnSlice, checkCircularRef, checkExt bool) (fn *codecFn) {
   1050 	rtid := rt2id(rt)
   1051 	sp := fs.load()
   1052 	if sp != nil {
   1053 		if _, fn = findRtidFn(sp, rtid); fn != nil {
   1054 			return
   1055 		}
   1056 	}
   1057 
   1058 	fn = x.fnLoad(rt, rtid, tinfos, checkCircularRef, checkExt)
   1059 	x.mu.Lock()
   1060 	sp = fs.load()
   1061 	// since this is an atomic load/store, we MUST use a different array each time,
   1062 	// else we have a data race when a store is happening simultaneously with a findRtidFn call.
   1063 	if sp == nil {
   1064 		sp = []codecRtidFn{{rtid, fn}}
   1065 		fs.store(sp)
   1066 	} else {
   1067 		idx, fn2 := findRtidFn(sp, rtid)
   1068 		if fn2 == nil {
   1069 			sp2 := make([]codecRtidFn, len(sp)+1)
   1070 			copy(sp2[idx+1:], sp[idx:])
   1071 			copy(sp2, sp[:idx])
   1072 			sp2[idx] = codecRtidFn{rtid, fn}
   1073 			fs.store(sp2)
   1074 		}
   1075 	}
   1076 	x.mu.Unlock()
   1077 	return
   1078 }
   1079 
   1080 func fnloadFastpathUnderlying(ti *typeInfo) (f *fastpathE, u reflect.Type) {
   1081 	var rtid uintptr
   1082 	var idx int
   1083 	rtid = rt2id(ti.fastpathUnderlying)
   1084 	idx = fastpathAvIndex(rtid)
   1085 	if idx == -1 {
   1086 		return
   1087 	}
   1088 	f = &fastpathAv[idx]
   1089 	if uint8(reflect.Array) == ti.kind {
   1090 		u = reflectArrayOf(ti.rt.Len(), ti.elem)
   1091 	} else {
   1092 		u = f.rt
   1093 	}
   1094 	return
   1095 }
   1096 
   1097 func (x *basicHandleRuntimeState) fnLoad(rt reflect.Type, rtid uintptr, tinfos *TypeInfos, checkCircularRef, checkExt bool) (fn *codecFn) {
   1098 	fn = new(codecFn)
   1099 	fi := &(fn.i)
   1100 	ti := tinfos.get(rtid, rt)
   1101 	fi.ti = ti
   1102 	rk := reflect.Kind(ti.kind)
   1103 
   1104 	// anything can be an extension except the built-in ones: time, raw and rawext.
   1105 	// ensure we check for these types, then if extension, before checking if
   1106 	// it implementes one of the pre-declared interfaces.
   1107 
   1108 	fi.addrDf = true
   1109 	// fi.addrEf = true
   1110 
   1111 	if rtid == timeTypId && x.timeBuiltin {
   1112 		fn.fe = (*Encoder).kTime
   1113 		fn.fd = (*Decoder).kTime
   1114 	} else if rtid == rawTypId {
   1115 		fn.fe = (*Encoder).raw
   1116 		fn.fd = (*Decoder).raw
   1117 	} else if rtid == rawExtTypId {
   1118 		fn.fe = (*Encoder).rawExt
   1119 		fn.fd = (*Decoder).rawExt
   1120 		fi.addrD = true
   1121 		fi.addrE = true
   1122 	} else if xfFn := x.getExt(rtid, checkExt); xfFn != nil {
   1123 		fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
   1124 		fn.fe = (*Encoder).ext
   1125 		fn.fd = (*Decoder).ext
   1126 		fi.addrD = true
   1127 		if rk == reflect.Struct || rk == reflect.Array {
   1128 			fi.addrE = true
   1129 		}
   1130 	} else if (ti.flagSelfer || ti.flagSelferPtr) &&
   1131 		!(checkCircularRef && ti.flagSelferViaCodecgen && ti.kind == byte(reflect.Struct)) {
   1132 		// do not use Selfer generated by codecgen if it is a struct and CheckCircularRef=true
   1133 		fn.fe = (*Encoder).selferMarshal
   1134 		fn.fd = (*Decoder).selferUnmarshal
   1135 		fi.addrD = ti.flagSelferPtr
   1136 		fi.addrE = ti.flagSelferPtr
   1137 	} else if supportMarshalInterfaces && x.isBe() &&
   1138 		(ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr) &&
   1139 		(ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr) {
   1140 		fn.fe = (*Encoder).binaryMarshal
   1141 		fn.fd = (*Decoder).binaryUnmarshal
   1142 		fi.addrD = ti.flagBinaryUnmarshalerPtr
   1143 		fi.addrE = ti.flagBinaryMarshalerPtr
   1144 	} else if supportMarshalInterfaces && !x.isBe() && x.isJs() &&
   1145 		(ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr) &&
   1146 		(ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr) {
   1147 		//If JSON, we should check JSONMarshal before textMarshal
   1148 		fn.fe = (*Encoder).jsonMarshal
   1149 		fn.fd = (*Decoder).jsonUnmarshal
   1150 		fi.addrD = ti.flagJsonUnmarshalerPtr
   1151 		fi.addrE = ti.flagJsonMarshalerPtr
   1152 	} else if supportMarshalInterfaces && !x.isBe() &&
   1153 		(ti.flagTextMarshaler || ti.flagTextMarshalerPtr) &&
   1154 		(ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr) {
   1155 		fn.fe = (*Encoder).textMarshal
   1156 		fn.fd = (*Decoder).textUnmarshal
   1157 		fi.addrD = ti.flagTextUnmarshalerPtr
   1158 		fi.addrE = ti.flagTextMarshalerPtr
   1159 	} else {
   1160 		if fastpathEnabled && (rk == reflect.Map || rk == reflect.Slice || rk == reflect.Array) {
   1161 			// by default (without using unsafe),
   1162 			// if an array is not addressable, converting from an array to a slice
   1163 			// requires an allocation (see helper_not_unsafe.go: func rvGetSlice4Array).
   1164 			//
   1165 			// (Non-addressable arrays mostly occur as keys/values from a map).
   1166 			//
   1167 			// However, fastpath functions are mostly for slices of numbers or strings,
   1168 			// which are small by definition and thus allocation should be fast/cheap in time.
   1169 			//
   1170 			// Consequently, the value of doing this quick allocation to elide the overhead cost of
   1171 			// non-optimized (not-unsafe) reflection is a fair price.
   1172 			var rtid2 uintptr
   1173 			if !ti.flagHasPkgPath { // un-named type (slice or mpa or array)
   1174 				rtid2 = rtid
   1175 				if rk == reflect.Array {
   1176 					rtid2 = rt2id(ti.key) // ti.key for arrays = reflect.SliceOf(ti.elem)
   1177 				}
   1178 				if idx := fastpathAvIndex(rtid2); idx != -1 {
   1179 					fn.fe = fastpathAv[idx].encfn
   1180 					fn.fd = fastpathAv[idx].decfn
   1181 					fi.addrD = true
   1182 					fi.addrDf = false
   1183 					if rk == reflect.Array {
   1184 						fi.addrD = false // decode directly into array value (slice made from it)
   1185 					}
   1186 				}
   1187 			} else { // named type (with underlying type of map or slice or array)
   1188 				// try to use mapping for underlying type
   1189 				xfe, xrt := fnloadFastpathUnderlying(ti)
   1190 				if xfe != nil {
   1191 					xfnf := xfe.encfn
   1192 					xfnf2 := xfe.decfn
   1193 					if rk == reflect.Array {
   1194 						fi.addrD = false // decode directly into array value (slice made from it)
   1195 						fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
   1196 							xfnf2(d, xf, rvConvert(xrv, xrt))
   1197 						}
   1198 					} else {
   1199 						fi.addrD = true
   1200 						fi.addrDf = false // meaning it can be an address(ptr) or a value
   1201 						xptr2rt := reflect.PtrTo(xrt)
   1202 						fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
   1203 							if xrv.Kind() == reflect.Ptr {
   1204 								xfnf2(d, xf, rvConvert(xrv, xptr2rt))
   1205 							} else {
   1206 								xfnf2(d, xf, rvConvert(xrv, xrt))
   1207 							}
   1208 						}
   1209 					}
   1210 					fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) {
   1211 						xfnf(e, xf, rvConvert(xrv, xrt))
   1212 					}
   1213 				}
   1214 			}
   1215 		}
   1216 		if fn.fe == nil && fn.fd == nil {
   1217 			switch rk {
   1218 			case reflect.Bool:
   1219 				fn.fe = (*Encoder).kBool
   1220 				fn.fd = (*Decoder).kBool
   1221 			case reflect.String:
   1222 				// Do not use different functions based on StringToRaw option, as that will statically
   1223 				// set the function for a string type, and if the Handle is modified thereafter,
   1224 				// behaviour is non-deterministic
   1225 				// i.e. DO NOT DO:
   1226 				//   if x.StringToRaw {
   1227 				//   	fn.fe = (*Encoder).kStringToRaw
   1228 				//   } else {
   1229 				//   	fn.fe = (*Encoder).kStringEnc
   1230 				//   }
   1231 
   1232 				fn.fe = (*Encoder).kString
   1233 				fn.fd = (*Decoder).kString
   1234 			case reflect.Int:
   1235 				fn.fd = (*Decoder).kInt
   1236 				fn.fe = (*Encoder).kInt
   1237 			case reflect.Int8:
   1238 				fn.fe = (*Encoder).kInt8
   1239 				fn.fd = (*Decoder).kInt8
   1240 			case reflect.Int16:
   1241 				fn.fe = (*Encoder).kInt16
   1242 				fn.fd = (*Decoder).kInt16
   1243 			case reflect.Int32:
   1244 				fn.fe = (*Encoder).kInt32
   1245 				fn.fd = (*Decoder).kInt32
   1246 			case reflect.Int64:
   1247 				fn.fe = (*Encoder).kInt64
   1248 				fn.fd = (*Decoder).kInt64
   1249 			case reflect.Uint:
   1250 				fn.fd = (*Decoder).kUint
   1251 				fn.fe = (*Encoder).kUint
   1252 			case reflect.Uint8:
   1253 				fn.fe = (*Encoder).kUint8
   1254 				fn.fd = (*Decoder).kUint8
   1255 			case reflect.Uint16:
   1256 				fn.fe = (*Encoder).kUint16
   1257 				fn.fd = (*Decoder).kUint16
   1258 			case reflect.Uint32:
   1259 				fn.fe = (*Encoder).kUint32
   1260 				fn.fd = (*Decoder).kUint32
   1261 			case reflect.Uint64:
   1262 				fn.fe = (*Encoder).kUint64
   1263 				fn.fd = (*Decoder).kUint64
   1264 			case reflect.Uintptr:
   1265 				fn.fe = (*Encoder).kUintptr
   1266 				fn.fd = (*Decoder).kUintptr
   1267 			case reflect.Float32:
   1268 				fn.fe = (*Encoder).kFloat32
   1269 				fn.fd = (*Decoder).kFloat32
   1270 			case reflect.Float64:
   1271 				fn.fe = (*Encoder).kFloat64
   1272 				fn.fd = (*Decoder).kFloat64
   1273 			case reflect.Complex64:
   1274 				fn.fe = (*Encoder).kComplex64
   1275 				fn.fd = (*Decoder).kComplex64
   1276 			case reflect.Complex128:
   1277 				fn.fe = (*Encoder).kComplex128
   1278 				fn.fd = (*Decoder).kComplex128
   1279 			case reflect.Chan:
   1280 				fn.fe = (*Encoder).kChan
   1281 				fn.fd = (*Decoder).kChan
   1282 			case reflect.Slice:
   1283 				fn.fe = (*Encoder).kSlice
   1284 				fn.fd = (*Decoder).kSlice
   1285 			case reflect.Array:
   1286 				fi.addrD = false // decode directly into array value (slice made from it)
   1287 				fn.fe = (*Encoder).kArray
   1288 				fn.fd = (*Decoder).kArray
   1289 			case reflect.Struct:
   1290 				if ti.anyOmitEmpty ||
   1291 					ti.flagMissingFielder ||
   1292 					ti.flagMissingFielderPtr {
   1293 					fn.fe = (*Encoder).kStruct
   1294 				} else {
   1295 					fn.fe = (*Encoder).kStructNoOmitempty
   1296 				}
   1297 				fn.fd = (*Decoder).kStruct
   1298 			case reflect.Map:
   1299 				fn.fe = (*Encoder).kMap
   1300 				fn.fd = (*Decoder).kMap
   1301 			case reflect.Interface:
   1302 				// encode: reflect.Interface are handled already by preEncodeValue
   1303 				fn.fd = (*Decoder).kInterface
   1304 				fn.fe = (*Encoder).kErr
   1305 			default:
   1306 				// reflect.Ptr and reflect.Interface are handled already by preEncodeValue
   1307 				fn.fe = (*Encoder).kErr
   1308 				fn.fd = (*Decoder).kErr
   1309 			}
   1310 		}
   1311 	}
   1312 	return
   1313 }
   1314 
   1315 // Handle defines a specific encoding format. It also stores any runtime state
   1316 // used during an Encoding or Decoding session e.g. stored state about Types, etc.
   1317 //
   1318 // Once a handle is configured, it can be shared across multiple Encoders and Decoders.
   1319 //
   1320 // Note that a Handle is NOT safe for concurrent modification.
   1321 //
   1322 // A Handle also should not be modified after it is configured and has
   1323 // been used at least once. This is because stored state may be out of sync with the
   1324 // new configuration, and a data race can occur when multiple goroutines access it.
   1325 // i.e. multiple Encoders or Decoders in different goroutines.
   1326 //
   1327 // Consequently, the typical usage model is that a Handle is pre-configured
   1328 // before first time use, and not modified while in use.
   1329 // Such a pre-configured Handle is safe for concurrent access.
   1330 type Handle interface {
   1331 	Name() string
   1332 	getBasicHandle() *BasicHandle
   1333 	newEncDriver() encDriver
   1334 	newDecDriver() decDriver
   1335 	isBinary() bool
   1336 	isJson() bool // json is special for now, so track it
   1337 	// desc describes the current byte descriptor, or returns "unknown[XXX]" if not understood.
   1338 	desc(bd byte) string
   1339 	// init initializes the handle based on handle-specific info (beyond what is in BasicHandle)
   1340 	init()
   1341 }
   1342 
   1343 // Raw represents raw formatted bytes.
   1344 // We "blindly" store it during encode and retrieve the raw bytes during decode.
   1345 // Note: it is dangerous during encode, so we may gate the behaviour
   1346 // behind an Encode flag which must be explicitly set.
   1347 type Raw []byte
   1348 
   1349 // RawExt represents raw unprocessed extension data.
   1350 // Some codecs will decode extension data as a *RawExt
   1351 // if there is no registered extension for the tag.
   1352 //
   1353 // Only one of Data or Value is nil.
   1354 // If Data is nil, then the content of the RawExt is in the Value.
   1355 type RawExt struct {
   1356 	Tag uint64
   1357 	// Data is the []byte which represents the raw ext. If nil, ext is exposed in Value.
   1358 	// Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types
   1359 	Data []byte
   1360 	// Value represents the extension, if Data is nil.
   1361 	// Value is used by codecs (e.g. cbor, json) which leverage the format to do
   1362 	// custom serialization of the types.
   1363 	Value interface{}
   1364 }
   1365 
   1366 func (re *RawExt) setData(xbs []byte, zerocopy bool) {
   1367 	if zerocopy {
   1368 		re.Data = xbs
   1369 	} else {
   1370 		re.Data = append(re.Data[:0], xbs...)
   1371 	}
   1372 }
   1373 
   1374 // BytesExt handles custom (de)serialization of types to/from []byte.
   1375 // It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
   1376 type BytesExt interface {
   1377 	// WriteExt converts a value to a []byte.
   1378 	//
   1379 	// Note: v is a pointer iff the registered extension type is a struct or array kind.
   1380 	WriteExt(v interface{}) []byte
   1381 
   1382 	// ReadExt updates a value from a []byte.
   1383 	//
   1384 	// Note: dst is always a pointer kind to the registered extension type.
   1385 	ReadExt(dst interface{}, src []byte)
   1386 }
   1387 
   1388 // InterfaceExt handles custom (de)serialization of types to/from another interface{} value.
   1389 // The Encoder or Decoder will then handle the further (de)serialization of that known type.
   1390 //
   1391 // It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types.
   1392 type InterfaceExt interface {
   1393 	// ConvertExt converts a value into a simpler interface for easy encoding
   1394 	// e.g. convert time.Time to int64.
   1395 	//
   1396 	// Note: v is a pointer iff the registered extension type is a struct or array kind.
   1397 	ConvertExt(v interface{}) interface{}
   1398 
   1399 	// UpdateExt updates a value from a simpler interface for easy decoding
   1400 	// e.g. convert int64 to time.Time.
   1401 	//
   1402 	// Note: dst is always a pointer kind to the registered extension type.
   1403 	UpdateExt(dst interface{}, src interface{})
   1404 }
   1405 
   1406 // Ext handles custom (de)serialization of custom types / extensions.
   1407 type Ext interface {
   1408 	BytesExt
   1409 	InterfaceExt
   1410 }
   1411 
   1412 // addExtWrapper is a wrapper implementation to support former AddExt exported method.
   1413 type addExtWrapper struct {
   1414 	encFn func(reflect.Value) ([]byte, error)
   1415 	decFn func(reflect.Value, []byte) error
   1416 }
   1417 
   1418 func (x addExtWrapper) WriteExt(v interface{}) []byte {
   1419 	bs, err := x.encFn(reflect.ValueOf(v))
   1420 	halt.onerror(err)
   1421 	return bs
   1422 }
   1423 
   1424 func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
   1425 	halt.onerror(x.decFn(reflect.ValueOf(v), bs))
   1426 }
   1427 
   1428 func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
   1429 	return x.WriteExt(v)
   1430 }
   1431 
   1432 func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
   1433 	x.ReadExt(dest, v.([]byte))
   1434 }
   1435 
   1436 type bytesExtFailer struct{}
   1437 
   1438 func (bytesExtFailer) WriteExt(v interface{}) []byte {
   1439 	halt.onerror(errExtFnWriteExtUnsupported)
   1440 	return nil
   1441 }
   1442 func (bytesExtFailer) ReadExt(v interface{}, bs []byte) {
   1443 	halt.onerror(errExtFnReadExtUnsupported)
   1444 }
   1445 
   1446 type interfaceExtFailer struct{}
   1447 
   1448 func (interfaceExtFailer) ConvertExt(v interface{}) interface{} {
   1449 	halt.onerror(errExtFnConvertExtUnsupported)
   1450 	return nil
   1451 }
   1452 func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) {
   1453 	halt.onerror(errExtFnUpdateExtUnsupported)
   1454 }
   1455 
   1456 type bytesExtWrapper struct {
   1457 	interfaceExtFailer
   1458 	BytesExt
   1459 }
   1460 
   1461 type interfaceExtWrapper struct {
   1462 	bytesExtFailer
   1463 	InterfaceExt
   1464 }
   1465 
   1466 type extFailWrapper struct {
   1467 	bytesExtFailer
   1468 	interfaceExtFailer
   1469 }
   1470 
   1471 type binaryEncodingType struct{}
   1472 
   1473 func (binaryEncodingType) isBinary() bool { return true }
   1474 func (binaryEncodingType) isJson() bool   { return false }
   1475 
   1476 type textEncodingType struct{}
   1477 
   1478 func (textEncodingType) isBinary() bool { return false }
   1479 func (textEncodingType) isJson() bool   { return false }
   1480 
   1481 type notJsonType struct{}
   1482 
   1483 func (notJsonType) isJson() bool { return false }
   1484 
   1485 // noBuiltInTypes is embedded into many types which do not support builtins
   1486 // e.g. msgpack, simple, cbor.
   1487 
   1488 type noBuiltInTypes struct{}
   1489 
   1490 func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
   1491 func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
   1492 
   1493 // bigenHelper handles ByteOrder operations directly using
   1494 // arrays of bytes (not slice of bytes).
   1495 //
   1496 // Since byteorder operations are very common for encoding and decoding
   1497 // numbers, lengths, etc - it is imperative that this operation is as
   1498 // fast as possible. Removing indirection (pointer chasing) to look
   1499 // at up to 8 bytes helps a lot here.
   1500 //
   1501 // For times where it is expedient to use a slice, delegate to the
   1502 // bigenstd (equal to the binary.BigEndian value).
   1503 //
   1504 // retrofitted from stdlib: encoding/binary/BigEndian (ByteOrder)
   1505 type bigenHelper struct{}
   1506 
   1507 func (z bigenHelper) PutUint16(v uint16) (b [2]byte) {
   1508 	return [...]byte{
   1509 		byte(v >> 8),
   1510 		byte(v),
   1511 	}
   1512 }
   1513 
   1514 func (z bigenHelper) PutUint32(v uint32) (b [4]byte) {
   1515 	return [...]byte{
   1516 		byte(v >> 24),
   1517 		byte(v >> 16),
   1518 		byte(v >> 8),
   1519 		byte(v),
   1520 	}
   1521 }
   1522 
   1523 func (z bigenHelper) PutUint64(v uint64) (b [8]byte) {
   1524 	return [...]byte{
   1525 		byte(v >> 56),
   1526 		byte(v >> 48),
   1527 		byte(v >> 40),
   1528 		byte(v >> 32),
   1529 		byte(v >> 24),
   1530 		byte(v >> 16),
   1531 		byte(v >> 8),
   1532 		byte(v),
   1533 	}
   1534 }
   1535 
   1536 func (z bigenHelper) Uint16(b [2]byte) (v uint16) {
   1537 	return uint16(b[1]) |
   1538 		uint16(b[0])<<8
   1539 }
   1540 
   1541 func (z bigenHelper) Uint32(b [4]byte) (v uint32) {
   1542 	return uint32(b[3]) |
   1543 		uint32(b[2])<<8 |
   1544 		uint32(b[1])<<16 |
   1545 		uint32(b[0])<<24
   1546 }
   1547 
   1548 func (z bigenHelper) Uint64(b [8]byte) (v uint64) {
   1549 	return uint64(b[7]) |
   1550 		uint64(b[6])<<8 |
   1551 		uint64(b[5])<<16 |
   1552 		uint64(b[4])<<24 |
   1553 		uint64(b[3])<<32 |
   1554 		uint64(b[2])<<40 |
   1555 		uint64(b[1])<<48 |
   1556 		uint64(b[0])<<56
   1557 }
   1558 
   1559 func (z bigenHelper) writeUint16(w *encWr, v uint16) {
   1560 	x := z.PutUint16(v)
   1561 	w.writen2(x[0], x[1])
   1562 }
   1563 
   1564 func (z bigenHelper) writeUint32(w *encWr, v uint32) {
   1565 	// w.writeb((z.PutUint32(v))[:])
   1566 	// x := z.PutUint32(v)
   1567 	// w.writeb(x[:])
   1568 	// w.writen4(x[0], x[1], x[2], x[3])
   1569 	w.writen4(z.PutUint32(v))
   1570 }
   1571 
   1572 func (z bigenHelper) writeUint64(w *encWr, v uint64) {
   1573 	w.writen8(z.PutUint64(v))
   1574 }
   1575 
   1576 type extTypeTagFn struct {
   1577 	rtid    uintptr
   1578 	rtidptr uintptr
   1579 	rt      reflect.Type
   1580 	tag     uint64
   1581 	ext     Ext
   1582 }
   1583 
   1584 type extHandle []extTypeTagFn
   1585 
   1586 // AddExt registes an encode and decode function for a reflect.Type.
   1587 // To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
   1588 //
   1589 // Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
   1590 func (x *BasicHandle) AddExt(rt reflect.Type, tag byte,
   1591 	encfn func(reflect.Value) ([]byte, error),
   1592 	decfn func(reflect.Value, []byte) error) (err error) {
   1593 	if encfn == nil || decfn == nil {
   1594 		return x.SetExt(rt, uint64(tag), nil)
   1595 	}
   1596 	return x.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
   1597 }
   1598 
   1599 // SetExt will set the extension for a tag and reflect.Type.
   1600 // Note that the type must be a named type, and specifically not a pointer or Interface.
   1601 // An error is returned if that is not honored.
   1602 // To Deregister an ext, call SetExt with nil Ext.
   1603 //
   1604 // Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
   1605 func (x *BasicHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
   1606 	if x.isInited() {
   1607 		return errHandleInited
   1608 	}
   1609 	if x.basicHandleRuntimeState == nil {
   1610 		x.basicHandleRuntimeState = new(basicHandleRuntimeState)
   1611 	}
   1612 	return x.basicHandleRuntimeState.setExt(rt, tag, ext)
   1613 }
   1614 
   1615 func (o extHandle) getExtForI(x interface{}) (v *extTypeTagFn) {
   1616 	if len(o) > 0 {
   1617 		v = o.getExt(i2rtid(x), true)
   1618 	}
   1619 	return
   1620 }
   1621 
   1622 func (o extHandle) getExt(rtid uintptr, check bool) (v *extTypeTagFn) {
   1623 	if !check {
   1624 		return
   1625 	}
   1626 	for i := range o {
   1627 		v = &o[i]
   1628 		if v.rtid == rtid || v.rtidptr == rtid {
   1629 			return
   1630 		}
   1631 	}
   1632 	return nil
   1633 }
   1634 
   1635 func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) {
   1636 	for i := range o {
   1637 		v = &o[i]
   1638 		if v.tag == tag {
   1639 			return
   1640 		}
   1641 	}
   1642 	return nil
   1643 }
   1644 
   1645 type intf2impl struct {
   1646 	rtid uintptr // for intf
   1647 	impl reflect.Type
   1648 }
   1649 
   1650 type intf2impls []intf2impl
   1651 
   1652 // Intf2Impl maps an interface to an implementing type.
   1653 // This allows us support infering the concrete type
   1654 // and populating it when passed an interface.
   1655 // e.g. var v io.Reader can be decoded as a bytes.Buffer, etc.
   1656 //
   1657 // Passing a nil impl will clear the mapping.
   1658 func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) {
   1659 	if impl != nil && !impl.Implements(intf) {
   1660 		return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf)
   1661 	}
   1662 	rtid := rt2id(intf)
   1663 	o2 := *o
   1664 	for i := range o2 {
   1665 		v := &o2[i]
   1666 		if v.rtid == rtid {
   1667 			v.impl = impl
   1668 			return
   1669 		}
   1670 	}
   1671 	*o = append(o2, intf2impl{rtid, impl})
   1672 	return
   1673 }
   1674 
   1675 func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) {
   1676 	for i := range o {
   1677 		v := &o[i]
   1678 		if v.rtid == rtid {
   1679 			if v.impl == nil {
   1680 				return
   1681 			}
   1682 			vkind := v.impl.Kind()
   1683 			if vkind == reflect.Ptr {
   1684 				return reflect.New(v.impl.Elem())
   1685 			}
   1686 			return rvZeroAddrK(v.impl, vkind)
   1687 		}
   1688 	}
   1689 	return
   1690 }
   1691 
   1692 // structFieldinfopathNode is a node in a tree, which allows us easily
   1693 // walk the anonymous path.
   1694 //
   1695 // In the typical case, the node is not embedded/anonymous, and thus the parent
   1696 // will be nil and this information becomes a value (not needing any indirection).
   1697 type structFieldInfoPathNode struct {
   1698 	parent *structFieldInfoPathNode
   1699 
   1700 	offset   uint16
   1701 	index    uint16
   1702 	kind     uint8
   1703 	numderef uint8
   1704 
   1705 	// encNameAsciiAlphaNum and omitEmpty should be in structFieldInfo,
   1706 	// but are kept here for tighter packaging.
   1707 
   1708 	encNameAsciiAlphaNum bool // the encName only contains ascii alphabet and numbers
   1709 	omitEmpty            bool
   1710 
   1711 	typ reflect.Type
   1712 }
   1713 
   1714 // depth returns number of valid nodes in the hierachy
   1715 func (path *structFieldInfoPathNode) depth() (d int) {
   1716 TOP:
   1717 	if path != nil {
   1718 		d++
   1719 		path = path.parent
   1720 		goto TOP
   1721 	}
   1722 	return
   1723 }
   1724 
   1725 // field returns the field of the struct.
   1726 func (path *structFieldInfoPathNode) field(v reflect.Value) (rv2 reflect.Value) {
   1727 	if parent := path.parent; parent != nil {
   1728 		v = parent.field(v)
   1729 		for j, k := uint8(0), parent.numderef; j < k; j++ {
   1730 			if rvIsNil(v) {
   1731 				return
   1732 			}
   1733 			v = v.Elem()
   1734 		}
   1735 	}
   1736 	return path.rvField(v)
   1737 }
   1738 
   1739 // fieldAlloc returns the field of the struct.
   1740 // It allocates if a nil value was seen while searching.
   1741 func (path *structFieldInfoPathNode) fieldAlloc(v reflect.Value) (rv2 reflect.Value) {
   1742 	if parent := path.parent; parent != nil {
   1743 		v = parent.fieldAlloc(v)
   1744 		for j, k := uint8(0), parent.numderef; j < k; j++ {
   1745 			if rvIsNil(v) {
   1746 				rvSetDirect(v, reflect.New(v.Type().Elem()))
   1747 			}
   1748 			v = v.Elem()
   1749 		}
   1750 	}
   1751 	return path.rvField(v)
   1752 }
   1753 
   1754 type structFieldInfo struct {
   1755 	encName string // encode name
   1756 
   1757 	// encNameHash uintptr
   1758 
   1759 	// fieldName string // currently unused
   1760 
   1761 	// encNameAsciiAlphaNum and omitEmpty should be here,
   1762 	// but are stored in structFieldInfoPathNode for tighter packaging.
   1763 
   1764 	path structFieldInfoPathNode
   1765 }
   1766 
   1767 func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) {
   1768 	keytype = valueTypeString // default
   1769 	if stag == "" {
   1770 		return
   1771 	}
   1772 	ss := strings.Split(stag, ",")
   1773 	if len(ss) < 2 {
   1774 		return
   1775 	}
   1776 	for _, s := range ss[1:] {
   1777 		switch s {
   1778 		case "omitempty":
   1779 			omitEmpty = true
   1780 		case "toarray":
   1781 			toArray = true
   1782 		case "int":
   1783 			keytype = valueTypeInt
   1784 		case "uint":
   1785 			keytype = valueTypeUint
   1786 		case "float":
   1787 			keytype = valueTypeFloat
   1788 			// case "bool":
   1789 			// 	keytype = valueTypeBool
   1790 		case "string":
   1791 			keytype = valueTypeString
   1792 		}
   1793 	}
   1794 	return
   1795 }
   1796 
   1797 func (si *structFieldInfo) parseTag(stag string) {
   1798 	if stag == "" {
   1799 		return
   1800 	}
   1801 	for i, s := range strings.Split(stag, ",") {
   1802 		if i == 0 {
   1803 			if s != "" {
   1804 				si.encName = s
   1805 			}
   1806 		} else {
   1807 			switch s {
   1808 			case "omitempty":
   1809 				si.path.omitEmpty = true
   1810 			}
   1811 		}
   1812 	}
   1813 }
   1814 
   1815 type sfiSortedByEncName []*structFieldInfo
   1816 
   1817 func (p sfiSortedByEncName) Len() int           { return len(p) }
   1818 func (p sfiSortedByEncName) Swap(i, j int)      { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
   1819 func (p sfiSortedByEncName) Less(i, j int) bool { return p[uint(i)].encName < p[uint(j)].encName }
   1820 
   1821 // typeInfo4Container holds information that is only available for
   1822 // containers like map, array, chan, slice.
   1823 type typeInfo4Container struct {
   1824 	elem reflect.Type
   1825 	// key is:
   1826 	//   - if map kind: map key
   1827 	//   - if array kind: sliceOf(elem)
   1828 	//   - if chan kind: sliceof(elem)
   1829 	key reflect.Type
   1830 
   1831 	// fastpathUnderlying is underlying type of a named slice/map/array, as defined by go spec,
   1832 	// that is used by fastpath where we defined fastpath functions for the underlying type.
   1833 	//
   1834 	// for a map, it's a map; for a slice or array, it's a slice; else its nil.
   1835 	fastpathUnderlying reflect.Type
   1836 
   1837 	tikey  *typeInfo
   1838 	tielem *typeInfo
   1839 }
   1840 
   1841 // typeInfo keeps static (non-changing readonly)information
   1842 // about each (non-ptr) type referenced in the encode/decode sequence.
   1843 //
   1844 // During an encode/decode sequence, we work as below:
   1845 //   - If base is a built in type, en/decode base value
   1846 //   - If base is registered as an extension, en/decode base value
   1847 //   - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
   1848 //   - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
   1849 //   - Else decode appropriately based on the reflect.Kind
   1850 type typeInfo struct {
   1851 	rt  reflect.Type
   1852 	ptr reflect.Type
   1853 
   1854 	// pkgpath string
   1855 
   1856 	rtid uintptr
   1857 
   1858 	numMeth uint16 // number of methods
   1859 	kind    uint8
   1860 	chandir uint8
   1861 
   1862 	anyOmitEmpty bool      // true if a struct, and any of the fields are tagged "omitempty"
   1863 	toArray      bool      // whether this (struct) type should be encoded as an array
   1864 	keyType      valueType // if struct, how is the field name stored in a stream? default is string
   1865 	mbs          bool      // base type (T or *T) is a MapBySlice
   1866 
   1867 	sfi4Name map[string]*structFieldInfo // map. used for finding sfi given a name
   1868 
   1869 	*typeInfo4Container
   1870 
   1871 	// ---- cpu cache line boundary?
   1872 
   1873 	size, keysize, elemsize uint32
   1874 
   1875 	keykind, elemkind uint8
   1876 
   1877 	flagHasPkgPath   bool // Type.PackagePath != ""
   1878 	flagComparable   bool
   1879 	flagCanTransient bool
   1880 
   1881 	flagMarshalInterface  bool // does this have custom (un)marshal implementation?
   1882 	flagSelferViaCodecgen bool
   1883 
   1884 	// custom implementation flags
   1885 	flagIsZeroer    bool
   1886 	flagIsZeroerPtr bool
   1887 
   1888 	flagIsCodecEmptyer    bool
   1889 	flagIsCodecEmptyerPtr bool
   1890 
   1891 	flagBinaryMarshaler    bool
   1892 	flagBinaryMarshalerPtr bool
   1893 
   1894 	flagBinaryUnmarshaler    bool
   1895 	flagBinaryUnmarshalerPtr bool
   1896 
   1897 	flagTextMarshaler    bool
   1898 	flagTextMarshalerPtr bool
   1899 
   1900 	flagTextUnmarshaler    bool
   1901 	flagTextUnmarshalerPtr bool
   1902 
   1903 	flagJsonMarshaler    bool
   1904 	flagJsonMarshalerPtr bool
   1905 
   1906 	flagJsonUnmarshaler    bool
   1907 	flagJsonUnmarshalerPtr bool
   1908 
   1909 	flagSelfer    bool
   1910 	flagSelferPtr bool
   1911 
   1912 	flagMissingFielder    bool
   1913 	flagMissingFielderPtr bool
   1914 
   1915 	infoFieldOmitempty bool
   1916 
   1917 	sfi structFieldInfos
   1918 }
   1919 
   1920 func (ti *typeInfo) siForEncName(name []byte) (si *structFieldInfo) {
   1921 	return ti.sfi4Name[string(name)]
   1922 }
   1923 
   1924 func (ti *typeInfo) resolve(x []structFieldInfo, ss map[string]uint16) (n int) {
   1925 	n = len(x)
   1926 
   1927 	for i := range x {
   1928 		ui := uint16(i)
   1929 		xn := x[i].encName
   1930 		j, ok := ss[xn]
   1931 		if ok {
   1932 			i2clear := ui                              // index to be cleared
   1933 			if x[i].path.depth() < x[j].path.depth() { // this one is shallower
   1934 				ss[xn] = ui
   1935 				i2clear = j
   1936 			}
   1937 			if x[i2clear].encName != "" {
   1938 				x[i2clear].encName = ""
   1939 				n--
   1940 			}
   1941 		} else {
   1942 			ss[xn] = ui
   1943 		}
   1944 	}
   1945 
   1946 	return
   1947 }
   1948 
   1949 func (ti *typeInfo) init(x []structFieldInfo, n int) {
   1950 	var anyOmitEmpty bool
   1951 
   1952 	// remove all the nils (non-ready)
   1953 	m := make(map[string]*structFieldInfo, n)
   1954 	w := make([]structFieldInfo, n)
   1955 	y := make([]*structFieldInfo, n+n)
   1956 	z := y[n:]
   1957 	y = y[:n]
   1958 	n = 0
   1959 	for i := range x {
   1960 		if x[i].encName == "" {
   1961 			continue
   1962 		}
   1963 		if !anyOmitEmpty && x[i].path.omitEmpty {
   1964 			anyOmitEmpty = true
   1965 		}
   1966 		w[n] = x[i]
   1967 		y[n] = &w[n]
   1968 		m[x[i].encName] = &w[n]
   1969 		n++
   1970 	}
   1971 	if n != len(y) {
   1972 		halt.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d", ti.rt, len(y), len(x), n)
   1973 	}
   1974 
   1975 	copy(z, y)
   1976 	sort.Sort(sfiSortedByEncName(z))
   1977 
   1978 	ti.anyOmitEmpty = anyOmitEmpty
   1979 	ti.sfi.load(y, z)
   1980 	ti.sfi4Name = m
   1981 }
   1982 
   1983 // Handling flagCanTransient
   1984 //
   1985 // We support transient optimization if the kind of the type is
   1986 // a number, bool, string, or slice (of number/bool).
   1987 // In addition, we also support if the kind is struct or array,
   1988 // and the type does not contain any pointers recursively).
   1989 //
   1990 // Noteworthy that all reference types (string, slice, func, map, ptr, interface, etc) have pointers.
   1991 //
   1992 // If using transient for a type with a pointer, there is the potential for data corruption
   1993 // when GC tries to follow a "transient" pointer which may become a non-pointer soon after.
   1994 //
   1995 
   1996 func transientBitsetFlags() *bitset32 {
   1997 	if transientValueHasStringSlice {
   1998 		return &numBoolStrSliceBitset
   1999 	}
   2000 	return &numBoolBitset
   2001 }
   2002 
   2003 func isCanTransient(t reflect.Type, k reflect.Kind) (v bool) {
   2004 	var bs = transientBitsetFlags()
   2005 	if bs.isset(byte(k)) {
   2006 		v = true
   2007 	} else if k == reflect.Slice {
   2008 		elem := t.Elem()
   2009 		v = numBoolBitset.isset(byte(elem.Kind()))
   2010 	} else if k == reflect.Array {
   2011 		elem := t.Elem()
   2012 		v = isCanTransient(elem, elem.Kind())
   2013 	} else if k == reflect.Struct {
   2014 		v = true
   2015 		for j, jlen := 0, t.NumField(); j < jlen; j++ {
   2016 			f := t.Field(j)
   2017 			if !isCanTransient(f.Type, f.Type.Kind()) {
   2018 				v = false
   2019 				return
   2020 			}
   2021 		}
   2022 	} else {
   2023 		v = false
   2024 	}
   2025 	return
   2026 }
   2027 
   2028 func (ti *typeInfo) doSetFlagCanTransient() {
   2029 	if transientSizeMax > 0 {
   2030 		ti.flagCanTransient = ti.size <= transientSizeMax
   2031 	} else {
   2032 		ti.flagCanTransient = true
   2033 	}
   2034 	if ti.flagCanTransient {
   2035 		if !transientBitsetFlags().isset(ti.kind) {
   2036 			ti.flagCanTransient = isCanTransient(ti.rt, reflect.Kind(ti.kind))
   2037 		}
   2038 	}
   2039 }
   2040 
   2041 type rtid2ti struct {
   2042 	rtid uintptr
   2043 	ti   *typeInfo
   2044 }
   2045 
   2046 // TypeInfos caches typeInfo for each type on first inspection.
   2047 //
   2048 // It is configured with a set of tag keys, which are used to get
   2049 // configuration for the type.
   2050 type TypeInfos struct {
   2051 	infos atomicTypeInfoSlice
   2052 	mu    sync.Mutex
   2053 	_     uint64 // padding (cache-aligned)
   2054 	tags  []string
   2055 	_     uint64 // padding (cache-aligned)
   2056 }
   2057 
   2058 // NewTypeInfos creates a TypeInfos given a set of struct tags keys.
   2059 //
   2060 // This allows users customize the struct tag keys which contain configuration
   2061 // of their types.
   2062 func NewTypeInfos(tags []string) *TypeInfos {
   2063 	return &TypeInfos{tags: tags}
   2064 }
   2065 
   2066 func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
   2067 	// check for tags: codec, json, in that order.
   2068 	// this allows seamless support for many configured structs.
   2069 	for _, x := range x.tags {
   2070 		s = t.Get(x)
   2071 		if s != "" {
   2072 			return s
   2073 		}
   2074 	}
   2075 	return
   2076 }
   2077 
   2078 func findTypeInfo(s []rtid2ti, rtid uintptr) (i uint, ti *typeInfo) {
   2079 	// binary search. adapted from sort/search.go.
   2080 	// Note: we use goto (instead of for loop) so this can be inlined.
   2081 
   2082 	var h uint
   2083 	var j = uint(len(s))
   2084 LOOP:
   2085 	if i < j {
   2086 		h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
   2087 		if s[h].rtid < rtid {
   2088 			i = h + 1
   2089 		} else {
   2090 			j = h
   2091 		}
   2092 		goto LOOP
   2093 	}
   2094 	if i < uint(len(s)) && s[i].rtid == rtid {
   2095 		ti = s[i].ti
   2096 	}
   2097 	return
   2098 }
   2099 
   2100 func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
   2101 	if pti = x.find(rtid); pti == nil {
   2102 		pti = x.load(rt)
   2103 	}
   2104 	return
   2105 }
   2106 
   2107 func (x *TypeInfos) find(rtid uintptr) (pti *typeInfo) {
   2108 	sp := x.infos.load()
   2109 	if sp != nil {
   2110 		_, pti = findTypeInfo(sp, rtid)
   2111 	}
   2112 	return
   2113 }
   2114 
   2115 func (x *TypeInfos) load(rt reflect.Type) (pti *typeInfo) {
   2116 	rk := rt.Kind()
   2117 
   2118 	if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) {
   2119 		halt.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt)
   2120 	}
   2121 
   2122 	rtid := rt2id(rt)
   2123 
   2124 	// do not hold lock while computing this.
   2125 	// it may lead to duplication, but that's ok.
   2126 	ti := typeInfo{
   2127 		rt:      rt,
   2128 		ptr:     reflect.PtrTo(rt),
   2129 		rtid:    rtid,
   2130 		kind:    uint8(rk),
   2131 		size:    uint32(rt.Size()),
   2132 		numMeth: uint16(rt.NumMethod()),
   2133 		keyType: valueTypeString, // default it - so it's never 0
   2134 
   2135 		// pkgpath: rt.PkgPath(),
   2136 		flagHasPkgPath: rt.PkgPath() != "",
   2137 	}
   2138 
   2139 	// bset sets custom implementation flags
   2140 	bset := func(when bool, b *bool) {
   2141 		if when {
   2142 			*b = true
   2143 		}
   2144 	}
   2145 
   2146 	var b1, b2 bool
   2147 
   2148 	b1, b2 = implIntf(rt, binaryMarshalerTyp)
   2149 	bset(b1, &ti.flagBinaryMarshaler)
   2150 	bset(b2, &ti.flagBinaryMarshalerPtr)
   2151 	b1, b2 = implIntf(rt, binaryUnmarshalerTyp)
   2152 	bset(b1, &ti.flagBinaryUnmarshaler)
   2153 	bset(b2, &ti.flagBinaryUnmarshalerPtr)
   2154 	b1, b2 = implIntf(rt, textMarshalerTyp)
   2155 	bset(b1, &ti.flagTextMarshaler)
   2156 	bset(b2, &ti.flagTextMarshalerPtr)
   2157 	b1, b2 = implIntf(rt, textUnmarshalerTyp)
   2158 	bset(b1, &ti.flagTextUnmarshaler)
   2159 	bset(b2, &ti.flagTextUnmarshalerPtr)
   2160 	b1, b2 = implIntf(rt, jsonMarshalerTyp)
   2161 	bset(b1, &ti.flagJsonMarshaler)
   2162 	bset(b2, &ti.flagJsonMarshalerPtr)
   2163 	b1, b2 = implIntf(rt, jsonUnmarshalerTyp)
   2164 	bset(b1, &ti.flagJsonUnmarshaler)
   2165 	bset(b2, &ti.flagJsonUnmarshalerPtr)
   2166 	b1, b2 = implIntf(rt, selferTyp)
   2167 	bset(b1, &ti.flagSelfer)
   2168 	bset(b2, &ti.flagSelferPtr)
   2169 	b1, b2 = implIntf(rt, missingFielderTyp)
   2170 	bset(b1, &ti.flagMissingFielder)
   2171 	bset(b2, &ti.flagMissingFielderPtr)
   2172 	b1, b2 = implIntf(rt, iszeroTyp)
   2173 	bset(b1, &ti.flagIsZeroer)
   2174 	bset(b2, &ti.flagIsZeroerPtr)
   2175 	b1, b2 = implIntf(rt, isCodecEmptyerTyp)
   2176 	bset(b1, &ti.flagIsCodecEmptyer)
   2177 	bset(b2, &ti.flagIsCodecEmptyerPtr)
   2178 
   2179 	b1, b2 = implIntf(rt, isSelferViaCodecgenerTyp)
   2180 	ti.flagSelferViaCodecgen = b1 || b2
   2181 
   2182 	ti.flagMarshalInterface = ti.flagSelfer || ti.flagSelferPtr ||
   2183 		ti.flagSelferViaCodecgen ||
   2184 		ti.flagBinaryMarshaler || ti.flagBinaryMarshalerPtr ||
   2185 		ti.flagBinaryUnmarshaler || ti.flagBinaryUnmarshalerPtr ||
   2186 		ti.flagTextMarshaler || ti.flagTextMarshalerPtr ||
   2187 		ti.flagTextUnmarshaler || ti.flagTextUnmarshalerPtr ||
   2188 		ti.flagJsonMarshaler || ti.flagJsonMarshalerPtr ||
   2189 		ti.flagJsonUnmarshaler || ti.flagJsonUnmarshalerPtr
   2190 
   2191 	b1 = rt.Comparable()
   2192 	// bset(b1, &ti.flagComparable)
   2193 	ti.flagComparable = b1
   2194 
   2195 	ti.doSetFlagCanTransient()
   2196 
   2197 	var tt reflect.Type
   2198 	switch rk {
   2199 	case reflect.Struct:
   2200 		var omitEmpty bool
   2201 		if f, ok := rt.FieldByName(structInfoFieldName); ok {
   2202 			ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag))
   2203 			ti.infoFieldOmitempty = omitEmpty
   2204 		} else {
   2205 			ti.keyType = valueTypeString
   2206 		}
   2207 		pp, pi := &pool4tiload, pool4tiload.Get()
   2208 		pv := pi.(*typeInfoLoad)
   2209 		pv.reset()
   2210 		pv.etypes = append(pv.etypes, ti.rtid)
   2211 		x.rget(rt, rtid, nil, pv, omitEmpty)
   2212 		n := ti.resolve(pv.sfis, pv.sfiNames)
   2213 		ti.init(pv.sfis, n)
   2214 		pp.Put(pi)
   2215 	case reflect.Map:
   2216 		ti.typeInfo4Container = new(typeInfo4Container)
   2217 		ti.elem = rt.Elem()
   2218 		for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
   2219 		}
   2220 		ti.tielem = x.get(rt2id(tt), tt)
   2221 		ti.elemkind = uint8(ti.elem.Kind())
   2222 		ti.elemsize = uint32(ti.elem.Size())
   2223 		ti.key = rt.Key()
   2224 		for tt = ti.key; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
   2225 		}
   2226 		ti.tikey = x.get(rt2id(tt), tt)
   2227 		ti.keykind = uint8(ti.key.Kind())
   2228 		ti.keysize = uint32(ti.key.Size())
   2229 		if ti.flagHasPkgPath {
   2230 			ti.fastpathUnderlying = reflect.MapOf(ti.key, ti.elem)
   2231 		}
   2232 	case reflect.Slice:
   2233 		ti.typeInfo4Container = new(typeInfo4Container)
   2234 		ti.mbs, b2 = implIntf(rt, mapBySliceTyp)
   2235 		if !ti.mbs && b2 {
   2236 			ti.mbs = b2
   2237 		}
   2238 		ti.elem = rt.Elem()
   2239 		for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
   2240 		}
   2241 		ti.tielem = x.get(rt2id(tt), tt)
   2242 		ti.elemkind = uint8(ti.elem.Kind())
   2243 		ti.elemsize = uint32(ti.elem.Size())
   2244 		if ti.flagHasPkgPath {
   2245 			ti.fastpathUnderlying = reflect.SliceOf(ti.elem)
   2246 		}
   2247 	case reflect.Chan:
   2248 		ti.typeInfo4Container = new(typeInfo4Container)
   2249 		ti.elem = rt.Elem()
   2250 		for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
   2251 		}
   2252 		ti.tielem = x.get(rt2id(tt), tt)
   2253 		ti.elemkind = uint8(ti.elem.Kind())
   2254 		ti.elemsize = uint32(ti.elem.Size())
   2255 		ti.chandir = uint8(rt.ChanDir())
   2256 		ti.key = reflect.SliceOf(ti.elem)
   2257 		ti.keykind = uint8(reflect.Slice)
   2258 	case reflect.Array:
   2259 		ti.typeInfo4Container = new(typeInfo4Container)
   2260 		ti.mbs, b2 = implIntf(rt, mapBySliceTyp)
   2261 		if !ti.mbs && b2 {
   2262 			ti.mbs = b2
   2263 		}
   2264 		ti.elem = rt.Elem()
   2265 		ti.elemkind = uint8(ti.elem.Kind())
   2266 		ti.elemsize = uint32(ti.elem.Size())
   2267 		for tt = ti.elem; tt.Kind() == reflect.Ptr; tt = tt.Elem() {
   2268 		}
   2269 		ti.tielem = x.get(rt2id(tt), tt)
   2270 		ti.key = reflect.SliceOf(ti.elem)
   2271 		ti.keykind = uint8(reflect.Slice)
   2272 		ti.keysize = uint32(ti.key.Size())
   2273 		if ti.flagHasPkgPath {
   2274 			ti.fastpathUnderlying = ti.key
   2275 		}
   2276 
   2277 		// MARKER: reflect.Ptr cannot happen here, as we halt early if reflect.Ptr passed in
   2278 		// case reflect.Ptr:
   2279 		// 	ti.elem = rt.Elem()
   2280 		// 	ti.elemkind = uint8(ti.elem.Kind())
   2281 		// 	ti.elemsize = uint32(ti.elem.Size())
   2282 	}
   2283 
   2284 	x.mu.Lock()
   2285 	sp := x.infos.load()
   2286 	// since this is an atomic load/store, we MUST use a different array each time,
   2287 	// else we have a data race when a store is happening simultaneously with a findRtidFn call.
   2288 	if sp == nil {
   2289 		pti = &ti
   2290 		sp = []rtid2ti{{rtid, pti}}
   2291 		x.infos.store(sp)
   2292 	} else {
   2293 		var idx uint
   2294 		idx, pti = findTypeInfo(sp, rtid)
   2295 		if pti == nil {
   2296 			pti = &ti
   2297 			sp2 := make([]rtid2ti, len(sp)+1)
   2298 			copy(sp2[idx+1:], sp[idx:])
   2299 			copy(sp2, sp[:idx])
   2300 			sp2[idx] = rtid2ti{rtid, pti}
   2301 			x.infos.store(sp2)
   2302 		}
   2303 	}
   2304 	x.mu.Unlock()
   2305 	return
   2306 }
   2307 
   2308 func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr,
   2309 	path *structFieldInfoPathNode, pv *typeInfoLoad, omitEmpty bool) {
   2310 	// Read up fields and store how to access the value.
   2311 	//
   2312 	// It uses go's rules for message selectors,
   2313 	// which say that the field with the shallowest depth is selected.
   2314 	//
   2315 	// Note: we consciously use slices, not a map, to simulate a set.
   2316 	//       Typically, types have < 16 fields,
   2317 	//       and iteration using equals is faster than maps there
   2318 	flen := rt.NumField()
   2319 LOOP:
   2320 	for j, jlen := uint16(0), uint16(flen); j < jlen; j++ {
   2321 		f := rt.Field(int(j))
   2322 		fkind := f.Type.Kind()
   2323 
   2324 		// skip if a func type, or is unexported, or structTag value == "-"
   2325 		switch fkind {
   2326 		case reflect.Func, reflect.UnsafePointer:
   2327 			continue LOOP
   2328 		}
   2329 
   2330 		isUnexported := f.PkgPath != ""
   2331 		if isUnexported && !f.Anonymous {
   2332 			continue
   2333 		}
   2334 		stag := x.structTag(f.Tag)
   2335 		if stag == "-" {
   2336 			continue
   2337 		}
   2338 		var si structFieldInfo
   2339 
   2340 		var numderef uint8 = 0
   2341 		for xft := f.Type; xft.Kind() == reflect.Ptr; xft = xft.Elem() {
   2342 			numderef++
   2343 		}
   2344 
   2345 		var parsed bool
   2346 		// if anonymous and no struct tag (or it's blank),
   2347 		// and a struct (or pointer to struct), inline it.
   2348 		if f.Anonymous && fkind != reflect.Interface {
   2349 			// ^^ redundant but ok: per go spec, an embedded pointer type cannot be to an interface
   2350 			ft := f.Type
   2351 			isPtr := ft.Kind() == reflect.Ptr
   2352 			for ft.Kind() == reflect.Ptr {
   2353 				ft = ft.Elem()
   2354 			}
   2355 			isStruct := ft.Kind() == reflect.Struct
   2356 
   2357 			// Ignore embedded fields of unexported non-struct types.
   2358 			// Also, from go1.10, ignore pointers to unexported struct types
   2359 			// because unmarshal cannot assign a new struct to an unexported field.
   2360 			// See https://golang.org/issue/21357
   2361 			if (isUnexported && !isStruct) || (!allowSetUnexportedEmbeddedPtr && isUnexported && isPtr) {
   2362 				continue
   2363 			}
   2364 			doInline := stag == ""
   2365 			if !doInline {
   2366 				si.parseTag(stag)
   2367 				parsed = true
   2368 				doInline = si.encName == "" // si.isZero()
   2369 			}
   2370 			if doInline && isStruct {
   2371 				// if etypes contains this, don't call rget again (as fields are already seen here)
   2372 				ftid := rt2id(ft)
   2373 				// We cannot recurse forever, but we need to track other field depths.
   2374 				// So - we break if we see a type twice (not the first time).
   2375 				// This should be sufficient to handle an embedded type that refers to its
   2376 				// owning type, which then refers to its embedded type.
   2377 				processIt := true
   2378 				numk := 0
   2379 				for _, k := range pv.etypes {
   2380 					if k == ftid {
   2381 						numk++
   2382 						if numk == rgetMaxRecursion {
   2383 							processIt = false
   2384 							break
   2385 						}
   2386 					}
   2387 				}
   2388 				if processIt {
   2389 					pv.etypes = append(pv.etypes, ftid)
   2390 					path2 := &structFieldInfoPathNode{
   2391 						parent:   path,
   2392 						typ:      f.Type,
   2393 						offset:   uint16(f.Offset),
   2394 						index:    j,
   2395 						kind:     uint8(fkind),
   2396 						numderef: numderef,
   2397 					}
   2398 					x.rget(ft, ftid, path2, pv, omitEmpty)
   2399 				}
   2400 				continue
   2401 			}
   2402 		}
   2403 
   2404 		// after the anonymous dance: if an unexported field, skip
   2405 		if isUnexported || f.Name == "" { // f.Name cannot be "", but defensively handle it
   2406 			continue
   2407 		}
   2408 
   2409 		si.path = structFieldInfoPathNode{
   2410 			parent:   path,
   2411 			typ:      f.Type,
   2412 			offset:   uint16(f.Offset),
   2413 			index:    j,
   2414 			kind:     uint8(fkind),
   2415 			numderef: numderef,
   2416 			// set asciiAlphaNum to true (default); checked and may be set to false below
   2417 			encNameAsciiAlphaNum: true,
   2418 			// note: omitEmpty might have been set in an earlier parseTag call, etc - so carry it forward
   2419 			omitEmpty: si.path.omitEmpty,
   2420 		}
   2421 
   2422 		if !parsed {
   2423 			si.encName = f.Name
   2424 			si.parseTag(stag)
   2425 			parsed = true
   2426 		} else if si.encName == "" {
   2427 			si.encName = f.Name
   2428 		}
   2429 
   2430 		// si.encNameHash = maxUintptr() // hashShortString(bytesView(si.encName))
   2431 
   2432 		if omitEmpty {
   2433 			si.path.omitEmpty = true
   2434 		}
   2435 
   2436 		for i := len(si.encName) - 1; i >= 0; i-- { // bounds-check elimination
   2437 			if !asciiAlphaNumBitset.isset(si.encName[i]) {
   2438 				si.path.encNameAsciiAlphaNum = false
   2439 				break
   2440 			}
   2441 		}
   2442 
   2443 		pv.sfis = append(pv.sfis, si)
   2444 	}
   2445 }
   2446 
   2447 func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) {
   2448 	// return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp)
   2449 
   2450 	// if I's method is defined on T (ie T implements I), then *T implements I.
   2451 	// The converse is not true.
   2452 
   2453 	// Type.Implements can be expensive, as it does a simulataneous linear search across 2 lists
   2454 	// with alphanumeric string comparisons.
   2455 	// If we can avoid running one of these 2 calls, we should.
   2456 
   2457 	base = rt.Implements(iTyp)
   2458 	if base {
   2459 		indir = true
   2460 	} else {
   2461 		indir = reflect.PtrTo(rt).Implements(iTyp)
   2462 	}
   2463 	return
   2464 }
   2465 
   2466 func bool2int(b bool) (v uint8) {
   2467 	// MARKER: optimized to be a single instruction
   2468 	if b {
   2469 		v = 1
   2470 	}
   2471 	return
   2472 }
   2473 
   2474 func isSliceBoundsError(s string) bool {
   2475 	return strings.Contains(s, "index out of range") ||
   2476 		strings.Contains(s, "slice bounds out of range")
   2477 }
   2478 
   2479 func sprintf(format string, v ...interface{}) string {
   2480 	return fmt.Sprintf(format, v...)
   2481 }
   2482 
   2483 func panicValToErr(h errDecorator, v interface{}, err *error) {
   2484 	if v == *err {
   2485 		return
   2486 	}
   2487 	switch xerr := v.(type) {
   2488 	case nil:
   2489 	case runtime.Error:
   2490 		d, dok := h.(*Decoder)
   2491 		if dok && d.bytes && isSliceBoundsError(xerr.Error()) {
   2492 			*err = io.EOF
   2493 		} else {
   2494 			h.wrapErr(xerr, err)
   2495 		}
   2496 	case error:
   2497 		switch xerr {
   2498 		case nil:
   2499 		case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized:
   2500 			// treat as special (bubble up)
   2501 			*err = xerr
   2502 		default:
   2503 			h.wrapErr(xerr, err)
   2504 		}
   2505 	default:
   2506 		// we don't expect this to happen (as this library always panics with an error)
   2507 		h.wrapErr(fmt.Errorf("%v", v), err)
   2508 	}
   2509 }
   2510 
   2511 func usableByteSlice(bs []byte, slen int) (out []byte, changed bool) {
   2512 	const maxCap = 1024 * 1024 * 64 // 64MB
   2513 	const skipMaxCap = false        // allow to test
   2514 	if slen <= 0 {
   2515 		return []byte{}, true
   2516 	}
   2517 	if slen <= cap(bs) {
   2518 		return bs[:slen], false
   2519 	}
   2520 	// slen > cap(bs) ... handle memory overload appropriately
   2521 	if skipMaxCap || slen <= maxCap {
   2522 		return make([]byte, slen), true
   2523 	}
   2524 	return make([]byte, maxCap), true
   2525 }
   2526 
   2527 func mapKeyFastKindFor(k reflect.Kind) mapKeyFastKind {
   2528 	return mapKeyFastKindVals[k&31]
   2529 }
   2530 
   2531 // ----
   2532 
   2533 type codecFnInfo struct {
   2534 	ti     *typeInfo
   2535 	xfFn   Ext
   2536 	xfTag  uint64
   2537 	addrD  bool
   2538 	addrDf bool // force: if addrD, then decode function MUST take a ptr
   2539 	addrE  bool
   2540 	// addrEf bool // force: if addrE, then encode function MUST take a ptr
   2541 }
   2542 
   2543 // codecFn encapsulates the captured variables and the encode function.
   2544 // This way, we only do some calculations one times, and pass to the
   2545 // code block that should be called (encapsulated in a function)
   2546 // instead of executing the checks every time.
   2547 type codecFn struct {
   2548 	i  codecFnInfo
   2549 	fe func(*Encoder, *codecFnInfo, reflect.Value)
   2550 	fd func(*Decoder, *codecFnInfo, reflect.Value)
   2551 	// _  [1]uint64 // padding (cache-aligned)
   2552 }
   2553 
   2554 type codecRtidFn struct {
   2555 	rtid uintptr
   2556 	fn   *codecFn
   2557 }
   2558 
   2559 func makeExt(ext interface{}) Ext {
   2560 	switch t := ext.(type) {
   2561 	case Ext:
   2562 		return t
   2563 	case BytesExt:
   2564 		return &bytesExtWrapper{BytesExt: t}
   2565 	case InterfaceExt:
   2566 		return &interfaceExtWrapper{InterfaceExt: t}
   2567 	}
   2568 	return &extFailWrapper{}
   2569 }
   2570 
   2571 func baseRV(v interface{}) (rv reflect.Value) {
   2572 	// use reflect.ValueOf, not rv4i, as of go 1.16beta, rv4i was not inlineable
   2573 	for rv = reflect.ValueOf(v); rv.Kind() == reflect.Ptr; rv = rv.Elem() {
   2574 	}
   2575 	return
   2576 }
   2577 
   2578 // ----
   2579 
   2580 // these "checkOverflow" functions must be inlinable, and not call anybody.
   2581 // Overflow means that the value cannot be represented without wrapping/overflow.
   2582 // Overflow=false does not mean that the value can be represented without losing precision
   2583 // (especially for floating point).
   2584 
   2585 type checkOverflow struct{}
   2586 
   2587 func (checkOverflow) Float32(v float64) (overflow bool) {
   2588 	if v < 0 {
   2589 		v = -v
   2590 	}
   2591 	return math.MaxFloat32 < v && v <= math.MaxFloat64
   2592 }
   2593 func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
   2594 	if v != 0 && v != (v<<(64-bitsize))>>(64-bitsize) {
   2595 		overflow = true
   2596 	}
   2597 	return
   2598 }
   2599 func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
   2600 	if v != 0 && v != (v<<(64-bitsize))>>(64-bitsize) {
   2601 		overflow = true
   2602 	}
   2603 	return
   2604 }
   2605 
   2606 func (checkOverflow) Uint2Int(v uint64, neg bool) (overflow bool) {
   2607 	return (neg && v > 1<<63) || (!neg && v >= 1<<63)
   2608 }
   2609 
   2610 func (checkOverflow) SignedInt(v uint64) (overflow bool) {
   2611 	//e.g. -127 to 128 for int8
   2612 	// pos := (v >> 63) == 0
   2613 	// ui2 := v & 0x7fffffffffffffff
   2614 	// if pos {
   2615 	// 	if ui2 > math.MaxInt64 {
   2616 	// 		overflow = true
   2617 	// 	}
   2618 	// } else {
   2619 	// 	if ui2 > math.MaxInt64-1 {
   2620 	// 		overflow = true
   2621 	// 	}
   2622 	// }
   2623 
   2624 	// a signed integer has overflow if the sign (first) bit is 1 (negative)
   2625 	// and the numbers after the sign bit is > maxint64 - 1
   2626 	overflow = (v>>63) != 0 && v&0x7fffffffffffffff > math.MaxInt64-1
   2627 
   2628 	return
   2629 }
   2630 
   2631 func (x checkOverflow) Float32V(v float64) float64 {
   2632 	if x.Float32(v) {
   2633 		halt.errorf("float32 overflow: %v", v)
   2634 	}
   2635 	return v
   2636 }
   2637 func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 {
   2638 	if x.Uint(v, bitsize) {
   2639 		halt.errorf("uint64 overflow: %v", v)
   2640 	}
   2641 	return v
   2642 }
   2643 func (x checkOverflow) IntV(v int64, bitsize uint8) int64 {
   2644 	if x.Int(v, bitsize) {
   2645 		halt.errorf("int64 overflow: %v", v)
   2646 	}
   2647 	return v
   2648 }
   2649 func (x checkOverflow) SignedIntV(v uint64) int64 {
   2650 	if x.SignedInt(v) {
   2651 		halt.errorf("uint64 to int64 overflow: %v", v)
   2652 	}
   2653 	return int64(v)
   2654 }
   2655 
   2656 // ------------------ FLOATING POINT -----------------
   2657 
   2658 func isNaN64(f float64) bool { return f != f }
   2659 
   2660 func isWhitespaceChar(v byte) bool {
   2661 	// these are in order of speed below ...
   2662 
   2663 	return v < 33
   2664 	// return v < 33 && whitespaceCharBitset64.isset(v)
   2665 	// return v < 33 && (v == ' ' || v == '\n' || v == '\t' || v == '\r')
   2666 	// return v == ' ' || v == '\n' || v == '\t' || v == '\r'
   2667 	// return whitespaceCharBitset.isset(v)
   2668 }
   2669 
   2670 func isNumberChar(v byte) bool {
   2671 	// these are in order of speed below ...
   2672 
   2673 	return numCharBitset.isset(v)
   2674 	// return v < 64 && numCharNoExpBitset64.isset(v) || v == 'e' || v == 'E'
   2675 	// return v > 42 && v < 102 && numCharWithExpBitset64.isset(v-42)
   2676 }
   2677 
   2678 // -----------------------
   2679 
   2680 type ioFlusher interface {
   2681 	Flush() error
   2682 }
   2683 
   2684 type ioBuffered interface {
   2685 	Buffered() int
   2686 }
   2687 
   2688 // -----------------------
   2689 
   2690 type sfiRv struct {
   2691 	v *structFieldInfo
   2692 	r reflect.Value
   2693 }
   2694 
   2695 // ------
   2696 
   2697 // bitset types are better than [256]bool, because they permit the whole
   2698 // bitset array being on a single cache line and use less memory.
   2699 //
   2700 // Also, since pos is a byte (0-255), there's no bounds checks on indexing (cheap).
   2701 //
   2702 // We previously had bitset128 [16]byte, and bitset32 [4]byte, but those introduces
   2703 // bounds checking, so we discarded them, and everyone uses bitset256.
   2704 //
   2705 // given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1).
   2706 // consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7
   2707 //
   2708 // Note that using >> or & is faster than using / or %, as division is quite expensive if not optimized.
   2709 
   2710 // MARKER:
   2711 // We noticed a little performance degradation when using bitset256 as [32]byte (or bitset32 as uint32).
   2712 // For example, json encoding went from 188K ns/op to 168K ns/op (~ 10% reduction).
   2713 // Consequently, we are using a [NNN]bool for bitsetNNN.
   2714 // To eliminate bounds-checking, we use x % v as that is guaranteed to be within bounds.
   2715 
   2716 // ----
   2717 type bitset32 [32]bool
   2718 
   2719 func (x *bitset32) set(pos byte) *bitset32 {
   2720 	x[pos&31] = true // x[pos%32] = true
   2721 	return x
   2722 }
   2723 func (x *bitset32) isset(pos byte) bool {
   2724 	return x[pos&31] // x[pos%32]
   2725 }
   2726 
   2727 type bitset256 [256]bool
   2728 
   2729 func (x *bitset256) set(pos byte) *bitset256 {
   2730 	x[pos] = true
   2731 	return x
   2732 }
   2733 func (x *bitset256) isset(pos byte) bool {
   2734 	return x[pos]
   2735 }
   2736 
   2737 // ------------
   2738 
   2739 type panicHdl struct{}
   2740 
   2741 // errorv will panic if err is defined (not nil)
   2742 func (panicHdl) onerror(err error) {
   2743 	if err != nil {
   2744 		panic(err)
   2745 	}
   2746 }
   2747 
   2748 // errorf will always panic, using the parameters passed.
   2749 //
   2750 // Note: it is ok to pass in a stringView, as it will just pass it directly
   2751 // to a fmt.Sprintf call and not hold onto it.
   2752 //
   2753 //go:noinline
   2754 func (panicHdl) errorf(format string, params ...interface{}) {
   2755 	if format == "" {
   2756 		panic(errPanicUndefined)
   2757 	}
   2758 	if len(params) == 0 {
   2759 		panic(errors.New(format))
   2760 	}
   2761 	panic(fmt.Errorf(format, params...))
   2762 }
   2763 
   2764 // ----------------------------------------------------
   2765 
   2766 type errDecorator interface {
   2767 	wrapErr(in error, out *error)
   2768 }
   2769 
   2770 type errDecoratorDef struct{}
   2771 
   2772 func (errDecoratorDef) wrapErr(v error, e *error) { *e = v }
   2773 
   2774 // ----------------------------------------------------
   2775 
   2776 type mustHdl struct{}
   2777 
   2778 func (mustHdl) String(s string, err error) string {
   2779 	halt.onerror(err)
   2780 	return s
   2781 }
   2782 func (mustHdl) Int(s int64, err error) int64 {
   2783 	halt.onerror(err)
   2784 	return s
   2785 }
   2786 func (mustHdl) Uint(s uint64, err error) uint64 {
   2787 	halt.onerror(err)
   2788 	return s
   2789 }
   2790 func (mustHdl) Float(s float64, err error) float64 {
   2791 	halt.onerror(err)
   2792 	return s
   2793 }
   2794 
   2795 // -------------------
   2796 
   2797 func freelistCapacity(length int) (capacity int) {
   2798 	for capacity = 8; capacity <= length; capacity *= 2 {
   2799 	}
   2800 	return
   2801 }
   2802 
   2803 // bytesFreelist is a list of byte buffers, sorted by cap.
   2804 //
   2805 // In anecdotal testing (running go test -tsd 1..6), we couldn't get
   2806 // the length ofthe list > 4 at any time. So we believe a linear search
   2807 // without bounds checking is sufficient.
   2808 //
   2809 // Typical usage model:
   2810 //
   2811 //	peek may go together with put, iff pop=true. peek gets largest byte slice temporarily.
   2812 //	check is used to switch a []byte if necessary
   2813 //	get/put go together
   2814 //
   2815 // Given that folks may get a []byte, and then append to it a lot which may re-allocate
   2816 // a new []byte, we should try to return both (one received from blist and new one allocated).
   2817 //
   2818 // Typical usage model for get/put, when we don't know whether we may need more than requested
   2819 //
   2820 //	v0 := blist.get()
   2821 //	v1 := v0
   2822 //	... use v1 ...
   2823 //	blist.put(v1)
   2824 //	if byteSliceAddr(v0) != byteSliceAddr(v1) {
   2825 //	  blist.put(v0)
   2826 //	}
   2827 type bytesFreelist [][]byte
   2828 
   2829 // peek returns a slice of possibly non-zero'ed bytes, with len=0,
   2830 // and with the largest capacity from the list.
   2831 func (x *bytesFreelist) peek(length int, pop bool) (out []byte) {
   2832 	if bytesFreeListNoCache {
   2833 		return make([]byte, 0, freelistCapacity(length))
   2834 	}
   2835 	y := *x
   2836 	if len(y) > 0 {
   2837 		out = y[len(y)-1]
   2838 	}
   2839 	// start buf with a minimum of 64 bytes
   2840 	const minLenBytes = 64
   2841 	if length < minLenBytes {
   2842 		length = minLenBytes
   2843 	}
   2844 	if cap(out) < length {
   2845 		out = make([]byte, 0, freelistCapacity(length))
   2846 		y = append(y, out)
   2847 		*x = y
   2848 	}
   2849 	if pop && len(y) > 0 {
   2850 		y = y[:len(y)-1]
   2851 		*x = y
   2852 	}
   2853 	return
   2854 }
   2855 
   2856 // get returns a slice of possibly non-zero'ed bytes, with len=0,
   2857 // and with cap >= length requested.
   2858 func (x *bytesFreelist) get(length int) (out []byte) {
   2859 	if bytesFreeListNoCache {
   2860 		return make([]byte, 0, freelistCapacity(length))
   2861 	}
   2862 	y := *x
   2863 	// MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
   2864 	// for i, v := range y {
   2865 	for i := 0; i < len(y); i++ {
   2866 		v := y[i]
   2867 		if cap(v) >= length {
   2868 			// *x = append(y[:i], y[i+1:]...)
   2869 			copy(y[i:], y[i+1:])
   2870 			*x = y[:len(y)-1]
   2871 			return v
   2872 		}
   2873 	}
   2874 	return make([]byte, 0, freelistCapacity(length))
   2875 }
   2876 
   2877 func (x *bytesFreelist) put(v []byte) {
   2878 	if bytesFreeListNoCache || cap(v) == 0 {
   2879 		return
   2880 	}
   2881 	if len(v) != 0 {
   2882 		v = v[:0]
   2883 	}
   2884 	// append the new value, then try to put it in a better position
   2885 	y := append(*x, v)
   2886 	*x = y
   2887 	// MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
   2888 	// for i, z := range y[:len(y)-1] {
   2889 	for i := 0; i < len(y)-1; i++ {
   2890 		z := y[i]
   2891 		if cap(z) > cap(v) {
   2892 			copy(y[i+1:], y[i:])
   2893 			y[i] = v
   2894 			return
   2895 		}
   2896 	}
   2897 }
   2898 
   2899 func (x *bytesFreelist) check(v []byte, length int) (out []byte) {
   2900 	// ensure inlineable, by moving slow-path out to its own function
   2901 	if cap(v) >= length {
   2902 		return v[:0]
   2903 	}
   2904 	return x.checkPutGet(v, length)
   2905 }
   2906 
   2907 func (x *bytesFreelist) checkPutGet(v []byte, length int) []byte {
   2908 	// checkPutGet broken out into its own function, so check is inlineable in general case
   2909 	const useSeparateCalls = false
   2910 
   2911 	if useSeparateCalls {
   2912 		x.put(v)
   2913 		return x.get(length)
   2914 	}
   2915 
   2916 	if bytesFreeListNoCache {
   2917 		return make([]byte, 0, freelistCapacity(length))
   2918 	}
   2919 
   2920 	// assume cap(v) < length, so put must happen before get
   2921 	y := *x
   2922 	var put = cap(v) == 0 // if empty, consider it already put
   2923 	if !put {
   2924 		y = append(y, v)
   2925 		*x = y
   2926 	}
   2927 	for i := 0; i < len(y); i++ {
   2928 		z := y[i]
   2929 		if put {
   2930 			if cap(z) >= length {
   2931 				copy(y[i:], y[i+1:])
   2932 				y = y[:len(y)-1]
   2933 				*x = y
   2934 				return z
   2935 			}
   2936 		} else {
   2937 			if cap(z) > cap(v) {
   2938 				copy(y[i+1:], y[i:])
   2939 				y[i] = v
   2940 				put = true
   2941 			}
   2942 		}
   2943 	}
   2944 	return make([]byte, 0, freelistCapacity(length))
   2945 }
   2946 
   2947 // -------------------------
   2948 
   2949 // sfiRvFreelist is used by Encoder for encoding structs,
   2950 // where we have to gather the fields first and then
   2951 // analyze them for omitEmpty, before knowing the length of the array/map to encode.
   2952 //
   2953 // Typically, the length here will depend on the number of cycles e.g.
   2954 // if type T1 has reference to T1, or T1 has reference to type T2 which has reference to T1.
   2955 //
   2956 // In the general case, the length of this list at most times is 1,
   2957 // so linear search is fine.
   2958 type sfiRvFreelist [][]sfiRv
   2959 
   2960 func (x *sfiRvFreelist) get(length int) (out []sfiRv) {
   2961 	y := *x
   2962 
   2963 	// MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
   2964 	// for i, v := range y {
   2965 	for i := 0; i < len(y); i++ {
   2966 		v := y[i]
   2967 		if cap(v) >= length {
   2968 			// *x = append(y[:i], y[i+1:]...)
   2969 			copy(y[i:], y[i+1:])
   2970 			*x = y[:len(y)-1]
   2971 			return v
   2972 		}
   2973 	}
   2974 	return make([]sfiRv, 0, freelistCapacity(length))
   2975 }
   2976 
   2977 func (x *sfiRvFreelist) put(v []sfiRv) {
   2978 	if len(v) != 0 {
   2979 		v = v[:0]
   2980 	}
   2981 	// append the new value, then try to put it in a better position
   2982 	y := append(*x, v)
   2983 	*x = y
   2984 	// MARKER: do not use range, as range is not currently inlineable as of go 1.16-beta
   2985 	// for i, z := range y[:len(y)-1] {
   2986 	for i := 0; i < len(y)-1; i++ {
   2987 		z := y[i]
   2988 		if cap(z) > cap(v) {
   2989 			copy(y[i+1:], y[i:])
   2990 			y[i] = v
   2991 			return
   2992 		}
   2993 	}
   2994 }
   2995 
   2996 // ---- multiple interner implementations ----
   2997 
   2998 // Hard to tell which is most performant:
   2999 //   - use a map[string]string - worst perf, no collisions, and unlimited entries
   3000 //   - use a linear search with move to front heuristics - no collisions, and maxed at 64 entries
   3001 //   - use a computationally-intensive hash - best performance, some collisions, maxed at 64 entries
   3002 
   3003 const (
   3004 	internMaxStrLen = 16     // if more than 16 bytes, faster to copy than compare bytes
   3005 	internCap       = 64 * 2 // 64 uses 1K bytes RAM, so 128 (anecdotal sweet spot) uses 2K bytes
   3006 )
   3007 
   3008 type internerMap map[string]string
   3009 
   3010 func (x *internerMap) init() {
   3011 	*x = make(map[string]string, internCap)
   3012 }
   3013 
   3014 func (x internerMap) string(v []byte) (s string) {
   3015 	s, ok := x[string(v)] // no allocation here, per go implementation
   3016 	if !ok {
   3017 		s = string(v) // new allocation here
   3018 		x[s] = s
   3019 	}
   3020 	return
   3021 }