gtsocial-umbx

Unnamed repository; edit this file 'description' to name the repository.
Log | Files | Refs | README | LICENSE

helper_unsafe.go (41589B)


      1 // Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
      2 // Use of this source code is governed by a MIT license found in the LICENSE file.
      3 
      4 //go:build !safe && !codec.safe && !appengine && go1.9
      5 // +build !safe,!codec.safe,!appengine,go1.9
      6 
      7 // minimum of go 1.9 is needed, as that is the minimum for all features and linked functions we need
      8 // - typedmemclr was introduced in go 1.8
      9 // - mapassign_fastXXX was introduced in go 1.9
     10 // etc
     11 
     12 package codec
     13 
     14 import (
     15 	"reflect"
     16 	_ "runtime" // needed for go linkname(s)
     17 	"sync/atomic"
     18 	"time"
     19 	"unsafe"
     20 )
     21 
     22 // This file has unsafe variants of some helper functions.
     23 // MARKER: See helper_unsafe.go for the usage documentation.
     24 
     25 // There are a number of helper_*unsafe*.go files.
     26 //
     27 // - helper_unsafe
     28 //   unsafe variants of dependent functions
     29 // - helper_unsafe_compiler_gc (gc)
     30 //   unsafe variants of dependent functions which cannot be shared with gollvm or gccgo
     31 // - helper_not_unsafe_not_gc (gccgo/gollvm or safe)
     32 //   safe variants of functions in helper_unsafe_compiler_gc
     33 // - helper_not_unsafe (safe)
     34 //   safe variants of functions in helper_unsafe
     35 // - helper_unsafe_compiler_not_gc (gccgo, gollvm)
     36 //   unsafe variants of functions/variables which non-standard compilers need
     37 //
     38 // This way, we can judiciously use build tags to include the right set of files
     39 // for any compiler, and make it run optimally in unsafe mode.
     40 //
     41 // As of March 2021, we cannot differentiate whether running with gccgo or gollvm
     42 // using a build constraint, as both satisfy 'gccgo' build tag.
     43 // Consequently, we must use the lowest common denominator to support both.
     44 
     45 // For reflect.Value code, we decided to do the following:
     46 //    - if we know the kind, we can elide conditional checks for
     47 //      - SetXXX (Int, Uint, String, Bool, etc)
     48 //      - SetLen
     49 //
     50 // We can also optimize
     51 //      - IsNil
     52 
     53 // MARKER: Some functions here will not be hit during code coverage runs due to optimizations, e.g.
     54 //   - rvCopySlice:      called by decode if rvGrowSlice did not set new slice into pointer to orig slice.
     55 //                       however, helper_unsafe sets it, so no need to call rvCopySlice later
     56 //   - rvSlice:          same as above
     57 
     58 const safeMode = false
     59 
     60 // helperUnsafeDirectAssignMapEntry says that we should not copy the pointer in the map
     61 // to another value during mapRange/iteration and mapGet calls, but directly assign it.
     62 //
     63 // The only callers of mapRange/iteration is encode.
     64 // Here, we just walk through the values and encode them
     65 //
     66 // The only caller of mapGet is decode.
     67 // Here, it does a Get if the underlying value is a pointer, and decodes into that.
     68 //
     69 // For both users, we are very careful NOT to modify or keep the pointers around.
     70 // Consequently, it is ok for take advantage of the performance that the map is not modified
     71 // during an iteration and we can just "peek" at the internal value" in the map and use it.
     72 const helperUnsafeDirectAssignMapEntry = true
     73 
     74 // MARKER: keep in sync with GO_ROOT/src/reflect/value.go
     75 const (
     76 	unsafeFlagStickyRO = 1 << 5
     77 	unsafeFlagEmbedRO  = 1 << 6
     78 	unsafeFlagIndir    = 1 << 7
     79 	unsafeFlagAddr     = 1 << 8
     80 	unsafeFlagRO       = unsafeFlagStickyRO | unsafeFlagEmbedRO
     81 	// unsafeFlagKindMask = (1 << 5) - 1 // 5 bits for 27 kinds (up to 31)
     82 	// unsafeTypeKindDirectIface = 1 << 5
     83 )
     84 
     85 // transientSizeMax below is used in TransientAddr as the backing storage.
     86 //
     87 // Must be >= 16 as the maximum size is a complex128 (or string on 64-bit machines).
     88 const transientSizeMax = 64
     89 
     90 // should struct/array support internal strings and slices?
     91 const transientValueHasStringSlice = false
     92 
     93 type unsafeString struct {
     94 	Data unsafe.Pointer
     95 	Len  int
     96 }
     97 
     98 type unsafeSlice struct {
     99 	Data unsafe.Pointer
    100 	Len  int
    101 	Cap  int
    102 }
    103 
    104 type unsafeIntf struct {
    105 	typ unsafe.Pointer
    106 	ptr unsafe.Pointer
    107 }
    108 
    109 type unsafeReflectValue struct {
    110 	unsafeIntf
    111 	flag uintptr
    112 }
    113 
    114 // keep in sync with stdlib runtime/type.go
    115 type unsafeRuntimeType struct {
    116 	size uintptr
    117 	// ... many other fields here
    118 }
    119 
    120 // unsafeZeroAddr and unsafeZeroSlice points to a read-only block of memory
    121 // used for setting a zero value for most types or creating a read-only
    122 // zero value for a given type.
    123 var (
    124 	unsafeZeroAddr  = unsafe.Pointer(&unsafeZeroArr[0])
    125 	unsafeZeroSlice = unsafeSlice{unsafeZeroAddr, 0, 0}
    126 )
    127 
    128 // We use a scratch memory and an unsafeSlice for transient values:
    129 //
    130 // unsafeSlice is used for standalone strings and slices (outside an array or struct).
    131 // scratch memory is used for other kinds, based on contract below:
    132 // - numbers, bool are always transient
    133 // - structs and arrays are transient iff they have no pointers i.e.
    134 //   no string, slice, chan, func, interface, map, etc only numbers and bools.
    135 // - slices and strings are transient (using the unsafeSlice)
    136 
    137 type unsafePerTypeElem struct {
    138 	arr   [transientSizeMax]byte // for bool, number, struct, array kinds
    139 	slice unsafeSlice            // for string and slice kinds
    140 }
    141 
    142 func (x *unsafePerTypeElem) addrFor(k reflect.Kind) unsafe.Pointer {
    143 	if k == reflect.String || k == reflect.Slice {
    144 		x.slice = unsafeSlice{} // memclr
    145 		return unsafe.Pointer(&x.slice)
    146 	}
    147 	x.arr = [transientSizeMax]byte{} // memclr
    148 	return unsafe.Pointer(&x.arr)
    149 }
    150 
    151 type perType struct {
    152 	elems [2]unsafePerTypeElem
    153 }
    154 
    155 type decPerType struct {
    156 	perType
    157 }
    158 
    159 type encPerType struct{}
    160 
    161 // TransientAddrK is used for getting a *transient* value to be decoded into,
    162 // which will right away be used for something else.
    163 //
    164 // See notes in helper.go about "Transient values during decoding"
    165 
    166 func (x *perType) TransientAddrK(t reflect.Type, k reflect.Kind) reflect.Value {
    167 	return rvZeroAddrTransientAnyK(t, k, x.elems[0].addrFor(k))
    168 }
    169 
    170 func (x *perType) TransientAddr2K(t reflect.Type, k reflect.Kind) reflect.Value {
    171 	return rvZeroAddrTransientAnyK(t, k, x.elems[1].addrFor(k))
    172 }
    173 
    174 func (encPerType) AddressableRO(v reflect.Value) reflect.Value {
    175 	return rvAddressableReadonly(v)
    176 }
    177 
    178 // byteAt returns the byte given an index which is guaranteed
    179 // to be within the bounds of the slice i.e. we defensively
    180 // already verified that the index is less than the length of the slice.
    181 func byteAt(b []byte, index uint) byte {
    182 	// return b[index]
    183 	return *(*byte)(unsafe.Pointer(uintptr((*unsafeSlice)(unsafe.Pointer(&b)).Data) + uintptr(index)))
    184 }
    185 
    186 func byteSliceOf(b []byte, start, end uint) []byte {
    187 	s := (*unsafeSlice)(unsafe.Pointer(&b))
    188 	s.Data = unsafe.Pointer(uintptr(s.Data) + uintptr(start))
    189 	s.Len = int(end - start)
    190 	s.Cap -= int(start)
    191 	return b
    192 }
    193 
    194 // func byteSliceWithLen(b []byte, length uint) []byte {
    195 // 	(*unsafeSlice)(unsafe.Pointer(&b)).Len = int(length)
    196 // 	return b
    197 // }
    198 
    199 func setByteAt(b []byte, index uint, val byte) {
    200 	// b[index] = val
    201 	*(*byte)(unsafe.Pointer(uintptr((*unsafeSlice)(unsafe.Pointer(&b)).Data) + uintptr(index))) = val
    202 }
    203 
    204 // stringView returns a view of the []byte as a string.
    205 // In unsafe mode, it doesn't incur allocation and copying caused by conversion.
    206 // In regular safe mode, it is an allocation and copy.
    207 func stringView(v []byte) string {
    208 	return *(*string)(unsafe.Pointer(&v))
    209 }
    210 
    211 // bytesView returns a view of the string as a []byte.
    212 // In unsafe mode, it doesn't incur allocation and copying caused by conversion.
    213 // In regular safe mode, it is an allocation and copy.
    214 func bytesView(v string) (b []byte) {
    215 	sx := (*unsafeString)(unsafe.Pointer(&v))
    216 	bx := (*unsafeSlice)(unsafe.Pointer(&b))
    217 	bx.Data, bx.Len, bx.Cap = sx.Data, sx.Len, sx.Len
    218 	return
    219 }
    220 
    221 func byteSliceSameData(v1 []byte, v2 []byte) bool {
    222 	return (*unsafeSlice)(unsafe.Pointer(&v1)).Data == (*unsafeSlice)(unsafe.Pointer(&v2)).Data
    223 }
    224 
    225 // MARKER: okBytesN functions will copy N bytes into the top slots of the return array.
    226 // These functions expect that the bound check already occured and are are valid.
    227 // copy(...) does a number of checks which are unnecessary in this situation when in bounds.
    228 
    229 func okBytes2(b []byte) [2]byte {
    230 	return *((*[2]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
    231 }
    232 
    233 func okBytes3(b []byte) [3]byte {
    234 	return *((*[3]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
    235 }
    236 
    237 func okBytes4(b []byte) [4]byte {
    238 	return *((*[4]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
    239 }
    240 
    241 func okBytes8(b []byte) [8]byte {
    242 	return *((*[8]byte)(((*unsafeSlice)(unsafe.Pointer(&b))).Data))
    243 }
    244 
    245 // isNil says whether the value v is nil.
    246 // This applies to references like map/ptr/unsafepointer/chan/func,
    247 // and non-reference values like interface/slice.
    248 func isNil(v interface{}) (rv reflect.Value, isnil bool) {
    249 	var ui = (*unsafeIntf)(unsafe.Pointer(&v))
    250 	isnil = ui.ptr == nil
    251 	if !isnil {
    252 		rv, isnil = unsafeIsNilIntfOrSlice(ui, v)
    253 	}
    254 	return
    255 }
    256 
    257 func unsafeIsNilIntfOrSlice(ui *unsafeIntf, v interface{}) (rv reflect.Value, isnil bool) {
    258 	rv = reflect.ValueOf(v) // reflect.ValueOf is currently not inline'able - so call it directly
    259 	tk := rv.Kind()
    260 	isnil = (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.ptr) == nil
    261 	return
    262 }
    263 
    264 // return the pointer for a reference (map/chan/func/pointer/unsafe.Pointer).
    265 // true references (map, func, chan, ptr - NOT slice) may be double-referenced? as flagIndir
    266 //
    267 // Assumes that v is a reference (map/func/chan/ptr/func)
    268 func rvRefPtr(v *unsafeReflectValue) unsafe.Pointer {
    269 	if v.flag&unsafeFlagIndir != 0 {
    270 		return *(*unsafe.Pointer)(v.ptr)
    271 	}
    272 	return v.ptr
    273 }
    274 
    275 func eq4i(i0, i1 interface{}) bool {
    276 	v0 := (*unsafeIntf)(unsafe.Pointer(&i0))
    277 	v1 := (*unsafeIntf)(unsafe.Pointer(&i1))
    278 	return v0.typ == v1.typ && v0.ptr == v1.ptr
    279 }
    280 
    281 func rv4iptr(i interface{}) (v reflect.Value) {
    282 	// Main advantage here is that it is inlined, nothing escapes to heap, i is never nil
    283 	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
    284 	uv.unsafeIntf = *(*unsafeIntf)(unsafe.Pointer(&i))
    285 	uv.flag = uintptr(rkindPtr)
    286 	return
    287 }
    288 
    289 func rv4istr(i interface{}) (v reflect.Value) {
    290 	// Main advantage here is that it is inlined, nothing escapes to heap, i is never nil
    291 	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
    292 	uv.unsafeIntf = *(*unsafeIntf)(unsafe.Pointer(&i))
    293 	uv.flag = uintptr(rkindString) | unsafeFlagIndir
    294 	return
    295 }
    296 
    297 func rv2i(rv reflect.Value) (i interface{}) {
    298 	// We tap into implememtation details from
    299 	// the source go stdlib reflect/value.go, and trims the implementation.
    300 	//
    301 	// e.g.
    302 	// - a map/ptr is a reference,        thus flagIndir is not set on it
    303 	// - an int/slice is not a reference, thus flagIndir is set on it
    304 
    305 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    306 	if refBitset.isset(byte(rv.Kind())) && urv.flag&unsafeFlagIndir != 0 {
    307 		urv.ptr = *(*unsafe.Pointer)(urv.ptr)
    308 	}
    309 	return *(*interface{})(unsafe.Pointer(&urv.unsafeIntf))
    310 }
    311 
    312 func rvAddr(rv reflect.Value, ptrType reflect.Type) reflect.Value {
    313 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    314 	urv.flag = (urv.flag & unsafeFlagRO) | uintptr(reflect.Ptr)
    315 	urv.typ = ((*unsafeIntf)(unsafe.Pointer(&ptrType))).ptr
    316 	return rv
    317 }
    318 
    319 func rvIsNil(rv reflect.Value) bool {
    320 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    321 	if urv.flag&unsafeFlagIndir != 0 {
    322 		return *(*unsafe.Pointer)(urv.ptr) == nil
    323 	}
    324 	return urv.ptr == nil
    325 }
    326 
    327 func rvSetSliceLen(rv reflect.Value, length int) {
    328 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    329 	(*unsafeString)(urv.ptr).Len = length
    330 }
    331 
    332 func rvZeroAddrK(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
    333 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    334 	urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
    335 	urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
    336 	urv.ptr = unsafeNew(urv.typ)
    337 	return
    338 }
    339 
    340 func rvZeroAddrTransientAnyK(t reflect.Type, k reflect.Kind, addr unsafe.Pointer) (rv reflect.Value) {
    341 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    342 	urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
    343 	urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
    344 	urv.ptr = addr
    345 	return
    346 }
    347 
    348 func rvZeroK(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
    349 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    350 	urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
    351 	if refBitset.isset(byte(k)) {
    352 		urv.flag = uintptr(k)
    353 	} else if rtsize2(urv.typ) <= uintptr(len(unsafeZeroArr)) {
    354 		urv.flag = uintptr(k) | unsafeFlagIndir
    355 		urv.ptr = unsafeZeroAddr
    356 	} else { // meaning struct or array
    357 		urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
    358 		urv.ptr = unsafeNew(urv.typ)
    359 	}
    360 	return
    361 }
    362 
    363 // rvConvert will convert a value to a different type directly,
    364 // ensuring that they still point to the same underlying value.
    365 func rvConvert(v reflect.Value, t reflect.Type) reflect.Value {
    366 	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
    367 	uv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
    368 	return v
    369 }
    370 
    371 // rvAddressableReadonly returns an addressable reflect.Value.
    372 //
    373 // Use it within encode calls, when you just want to "read" the underlying ptr
    374 // without modifying the value.
    375 //
    376 // Note that it cannot be used for r/w use, as those non-addressable values
    377 // may have been stored in read-only memory, and trying to write the pointer
    378 // may cause a segfault.
    379 func rvAddressableReadonly(v reflect.Value) reflect.Value {
    380 	// hack to make an addressable value out of a non-addressable one.
    381 	// Assume folks calling it are passing a value that can be addressable, but isn't.
    382 	// This assumes that the flagIndir is already set on it.
    383 	// so we just set the flagAddr bit on the flag (and do not set the flagIndir).
    384 
    385 	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
    386 	uv.flag = uv.flag | unsafeFlagAddr // | unsafeFlagIndir
    387 
    388 	return v
    389 }
    390 
    391 func rtsize2(rt unsafe.Pointer) uintptr {
    392 	return ((*unsafeRuntimeType)(rt)).size
    393 }
    394 
    395 func rt2id(rt reflect.Type) uintptr {
    396 	return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).ptr)
    397 }
    398 
    399 func i2rtid(i interface{}) uintptr {
    400 	return uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).typ)
    401 }
    402 
    403 // --------------------------
    404 
    405 func unsafeCmpZero(ptr unsafe.Pointer, size int) bool {
    406 	// verified that size is always within right range, so no chance of OOM
    407 	var s1 = unsafeString{ptr, size}
    408 	var s2 = unsafeString{unsafeZeroAddr, size}
    409 	if size > len(unsafeZeroArr) {
    410 		arr := make([]byte, size)
    411 		s2.Data = unsafe.Pointer(&arr[0])
    412 	}
    413 	return *(*string)(unsafe.Pointer(&s1)) == *(*string)(unsafe.Pointer(&s2)) // memcmp
    414 }
    415 
    416 func isEmptyValue(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
    417 	urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
    418 	if urv.flag == 0 {
    419 		return true
    420 	}
    421 	if recursive {
    422 		return isEmptyValueFallbackRecur(urv, v, tinfos)
    423 	}
    424 	return unsafeCmpZero(urv.ptr, int(rtsize2(urv.typ)))
    425 }
    426 
    427 func isEmptyValueFallbackRecur(urv *unsafeReflectValue, v reflect.Value, tinfos *TypeInfos) bool {
    428 	const recursive = true
    429 
    430 	switch v.Kind() {
    431 	case reflect.Invalid:
    432 		return true
    433 	case reflect.String:
    434 		return (*unsafeString)(urv.ptr).Len == 0
    435 	case reflect.Slice:
    436 		return (*unsafeSlice)(urv.ptr).Len == 0
    437 	case reflect.Bool:
    438 		return !*(*bool)(urv.ptr)
    439 	case reflect.Int:
    440 		return *(*int)(urv.ptr) == 0
    441 	case reflect.Int8:
    442 		return *(*int8)(urv.ptr) == 0
    443 	case reflect.Int16:
    444 		return *(*int16)(urv.ptr) == 0
    445 	case reflect.Int32:
    446 		return *(*int32)(urv.ptr) == 0
    447 	case reflect.Int64:
    448 		return *(*int64)(urv.ptr) == 0
    449 	case reflect.Uint:
    450 		return *(*uint)(urv.ptr) == 0
    451 	case reflect.Uint8:
    452 		return *(*uint8)(urv.ptr) == 0
    453 	case reflect.Uint16:
    454 		return *(*uint16)(urv.ptr) == 0
    455 	case reflect.Uint32:
    456 		return *(*uint32)(urv.ptr) == 0
    457 	case reflect.Uint64:
    458 		return *(*uint64)(urv.ptr) == 0
    459 	case reflect.Uintptr:
    460 		return *(*uintptr)(urv.ptr) == 0
    461 	case reflect.Float32:
    462 		return *(*float32)(urv.ptr) == 0
    463 	case reflect.Float64:
    464 		return *(*float64)(urv.ptr) == 0
    465 	case reflect.Complex64:
    466 		return unsafeCmpZero(urv.ptr, 8)
    467 	case reflect.Complex128:
    468 		return unsafeCmpZero(urv.ptr, 16)
    469 	case reflect.Struct:
    470 		// return isEmptyStruct(v, tinfos, recursive)
    471 		if tinfos == nil {
    472 			tinfos = defTypeInfos
    473 		}
    474 		ti := tinfos.find(uintptr(urv.typ))
    475 		if ti == nil {
    476 			ti = tinfos.load(v.Type())
    477 		}
    478 		return unsafeCmpZero(urv.ptr, int(ti.size))
    479 	case reflect.Interface, reflect.Ptr:
    480 		// isnil := urv.ptr == nil // (not sufficient, as a pointer value encodes the type)
    481 		isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
    482 		if recursive && !isnil {
    483 			return isEmptyValue(v.Elem(), tinfos, recursive)
    484 		}
    485 		return isnil
    486 	case reflect.UnsafePointer:
    487 		return urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil
    488 	case reflect.Chan:
    489 		return urv.ptr == nil || len_chan(rvRefPtr(urv)) == 0
    490 	case reflect.Map:
    491 		return urv.ptr == nil || len_map(rvRefPtr(urv)) == 0
    492 	case reflect.Array:
    493 		return v.Len() == 0 ||
    494 			urv.ptr == nil ||
    495 			urv.typ == nil ||
    496 			rtsize2(urv.typ) == 0 ||
    497 			unsafeCmpZero(urv.ptr, int(rtsize2(urv.typ)))
    498 	}
    499 	return false
    500 }
    501 
    502 // --------------------------
    503 
    504 type structFieldInfos struct {
    505 	c      unsafe.Pointer // source
    506 	s      unsafe.Pointer // sorted
    507 	length int
    508 }
    509 
    510 func (x *structFieldInfos) load(source, sorted []*structFieldInfo) {
    511 	s := (*unsafeSlice)(unsafe.Pointer(&sorted))
    512 	x.s = s.Data
    513 	x.length = s.Len
    514 	s = (*unsafeSlice)(unsafe.Pointer(&source))
    515 	x.c = s.Data
    516 }
    517 
    518 func (x *structFieldInfos) sorted() (v []*structFieldInfo) {
    519 	*(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.s, x.length, x.length}
    520 	// s := (*unsafeSlice)(unsafe.Pointer(&v))
    521 	// s.Data = x.sorted0
    522 	// s.Len = x.length
    523 	// s.Cap = s.Len
    524 	return
    525 }
    526 
    527 func (x *structFieldInfos) source() (v []*structFieldInfo) {
    528 	*(*unsafeSlice)(unsafe.Pointer(&v)) = unsafeSlice{x.c, x.length, x.length}
    529 	return
    530 }
    531 
    532 // atomicXXX is expected to be 2 words (for symmetry with atomic.Value)
    533 //
    534 // Note that we do not atomically load/store length and data pointer separately,
    535 // as this could lead to some races. Instead, we atomically load/store cappedSlice.
    536 //
    537 // Note: with atomic.(Load|Store)Pointer, we MUST work with an unsafe.Pointer directly.
    538 
    539 // ----------------------
    540 type atomicTypeInfoSlice struct {
    541 	v unsafe.Pointer // *[]rtid2ti
    542 }
    543 
    544 func (x *atomicTypeInfoSlice) load() (s []rtid2ti) {
    545 	x2 := atomic.LoadPointer(&x.v)
    546 	if x2 != nil {
    547 		s = *(*[]rtid2ti)(x2)
    548 	}
    549 	return
    550 }
    551 
    552 func (x *atomicTypeInfoSlice) store(p []rtid2ti) {
    553 	atomic.StorePointer(&x.v, unsafe.Pointer(&p))
    554 }
    555 
    556 // MARKER: in safe mode, atomicXXX are atomic.Value, which contains an interface{}.
    557 // This is 2 words.
    558 // consider padding atomicXXX here with a uintptr, so they fit into 2 words also.
    559 
    560 // --------------------------
    561 type atomicRtidFnSlice struct {
    562 	v unsafe.Pointer // *[]codecRtidFn
    563 }
    564 
    565 func (x *atomicRtidFnSlice) load() (s []codecRtidFn) {
    566 	x2 := atomic.LoadPointer(&x.v)
    567 	if x2 != nil {
    568 		s = *(*[]codecRtidFn)(x2)
    569 	}
    570 	return
    571 }
    572 
    573 func (x *atomicRtidFnSlice) store(p []codecRtidFn) {
    574 	atomic.StorePointer(&x.v, unsafe.Pointer(&p))
    575 }
    576 
    577 // --------------------------
    578 type atomicClsErr struct {
    579 	v unsafe.Pointer // *clsErr
    580 }
    581 
    582 func (x *atomicClsErr) load() (e clsErr) {
    583 	x2 := (*clsErr)(atomic.LoadPointer(&x.v))
    584 	if x2 != nil {
    585 		e = *x2
    586 	}
    587 	return
    588 }
    589 
    590 func (x *atomicClsErr) store(p clsErr) {
    591 	atomic.StorePointer(&x.v, unsafe.Pointer(&p))
    592 }
    593 
    594 // --------------------------
    595 
    596 // to create a reflect.Value for each member field of fauxUnion,
    597 // we first create a global fauxUnion, and create reflect.Value
    598 // for them all.
    599 // This way, we have the flags and type in the reflect.Value.
    600 // Then, when a reflect.Value is called, we just copy it,
    601 // update the ptr to the fauxUnion's, and return it.
    602 
    603 type unsafeDecNakedWrapper struct {
    604 	fauxUnion
    605 	ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above
    606 }
    607 
    608 func (n *unsafeDecNakedWrapper) init() {
    609 	n.ru = rv4iptr(&n.u).Elem()
    610 	n.ri = rv4iptr(&n.i).Elem()
    611 	n.rf = rv4iptr(&n.f).Elem()
    612 	n.rl = rv4iptr(&n.l).Elem()
    613 	n.rs = rv4iptr(&n.s).Elem()
    614 	n.rt = rv4iptr(&n.t).Elem()
    615 	n.rb = rv4iptr(&n.b).Elem()
    616 	// n.rr[] = reflect.ValueOf(&n.)
    617 }
    618 
    619 var defUnsafeDecNakedWrapper unsafeDecNakedWrapper
    620 
    621 func init() {
    622 	defUnsafeDecNakedWrapper.init()
    623 }
    624 
    625 func (n *fauxUnion) ru() (v reflect.Value) {
    626 	v = defUnsafeDecNakedWrapper.ru
    627 	((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.u)
    628 	return
    629 }
    630 func (n *fauxUnion) ri() (v reflect.Value) {
    631 	v = defUnsafeDecNakedWrapper.ri
    632 	((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.i)
    633 	return
    634 }
    635 func (n *fauxUnion) rf() (v reflect.Value) {
    636 	v = defUnsafeDecNakedWrapper.rf
    637 	((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.f)
    638 	return
    639 }
    640 func (n *fauxUnion) rl() (v reflect.Value) {
    641 	v = defUnsafeDecNakedWrapper.rl
    642 	((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.l)
    643 	return
    644 }
    645 func (n *fauxUnion) rs() (v reflect.Value) {
    646 	v = defUnsafeDecNakedWrapper.rs
    647 	((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.s)
    648 	return
    649 }
    650 func (n *fauxUnion) rt() (v reflect.Value) {
    651 	v = defUnsafeDecNakedWrapper.rt
    652 	((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.t)
    653 	return
    654 }
    655 func (n *fauxUnion) rb() (v reflect.Value) {
    656 	v = defUnsafeDecNakedWrapper.rb
    657 	((*unsafeReflectValue)(unsafe.Pointer(&v))).ptr = unsafe.Pointer(&n.b)
    658 	return
    659 }
    660 
    661 // --------------------------
    662 func rvSetBytes(rv reflect.Value, v []byte) {
    663 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    664 	*(*[]byte)(urv.ptr) = v
    665 }
    666 
    667 func rvSetString(rv reflect.Value, v string) {
    668 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    669 	*(*string)(urv.ptr) = v
    670 }
    671 
    672 func rvSetBool(rv reflect.Value, v bool) {
    673 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    674 	*(*bool)(urv.ptr) = v
    675 }
    676 
    677 func rvSetTime(rv reflect.Value, v time.Time) {
    678 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    679 	*(*time.Time)(urv.ptr) = v
    680 }
    681 
    682 func rvSetFloat32(rv reflect.Value, v float32) {
    683 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    684 	*(*float32)(urv.ptr) = v
    685 }
    686 
    687 func rvSetFloat64(rv reflect.Value, v float64) {
    688 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    689 	*(*float64)(urv.ptr) = v
    690 }
    691 
    692 func rvSetComplex64(rv reflect.Value, v complex64) {
    693 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    694 	*(*complex64)(urv.ptr) = v
    695 }
    696 
    697 func rvSetComplex128(rv reflect.Value, v complex128) {
    698 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    699 	*(*complex128)(urv.ptr) = v
    700 }
    701 
    702 func rvSetInt(rv reflect.Value, v int) {
    703 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    704 	*(*int)(urv.ptr) = v
    705 }
    706 
    707 func rvSetInt8(rv reflect.Value, v int8) {
    708 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    709 	*(*int8)(urv.ptr) = v
    710 }
    711 
    712 func rvSetInt16(rv reflect.Value, v int16) {
    713 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    714 	*(*int16)(urv.ptr) = v
    715 }
    716 
    717 func rvSetInt32(rv reflect.Value, v int32) {
    718 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    719 	*(*int32)(urv.ptr) = v
    720 }
    721 
    722 func rvSetInt64(rv reflect.Value, v int64) {
    723 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    724 	*(*int64)(urv.ptr) = v
    725 }
    726 
    727 func rvSetUint(rv reflect.Value, v uint) {
    728 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    729 	*(*uint)(urv.ptr) = v
    730 }
    731 
    732 func rvSetUintptr(rv reflect.Value, v uintptr) {
    733 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    734 	*(*uintptr)(urv.ptr) = v
    735 }
    736 
    737 func rvSetUint8(rv reflect.Value, v uint8) {
    738 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    739 	*(*uint8)(urv.ptr) = v
    740 }
    741 
    742 func rvSetUint16(rv reflect.Value, v uint16) {
    743 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    744 	*(*uint16)(urv.ptr) = v
    745 }
    746 
    747 func rvSetUint32(rv reflect.Value, v uint32) {
    748 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    749 	*(*uint32)(urv.ptr) = v
    750 }
    751 
    752 func rvSetUint64(rv reflect.Value, v uint64) {
    753 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    754 	*(*uint64)(urv.ptr) = v
    755 }
    756 
    757 // ----------------
    758 
    759 // rvSetZero is rv.Set(reflect.Zero(rv.Type()) for all kinds (including reflect.Interface).
    760 func rvSetZero(rv reflect.Value) {
    761 	rvSetDirectZero(rv)
    762 }
    763 
    764 func rvSetIntf(rv reflect.Value, v reflect.Value) {
    765 	rv.Set(v)
    766 }
    767 
    768 // rvSetDirect is rv.Set for all kinds except reflect.Interface.
    769 //
    770 // Callers MUST not pass a value of kind reflect.Interface, as it may cause unexpected segfaults.
    771 func rvSetDirect(rv reflect.Value, v reflect.Value) {
    772 	// MARKER: rv.Set for kind reflect.Interface may do a separate allocation if a scalar value.
    773 	// The book-keeping is onerous, so we just do the simple ones where a memmove is sufficient.
    774 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    775 	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
    776 	if uv.flag&unsafeFlagIndir == 0 {
    777 		*(*unsafe.Pointer)(urv.ptr) = uv.ptr
    778 	} else if uv.ptr == unsafeZeroAddr {
    779 		if urv.ptr != unsafeZeroAddr {
    780 			typedmemclr(urv.typ, urv.ptr)
    781 		}
    782 	} else {
    783 		typedmemmove(urv.typ, urv.ptr, uv.ptr)
    784 	}
    785 }
    786 
    787 // rvSetDirectZero is rv.Set(reflect.Zero(rv.Type()) for all kinds except reflect.Interface.
    788 func rvSetDirectZero(rv reflect.Value) {
    789 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    790 	if urv.ptr != unsafeZeroAddr {
    791 		typedmemclr(urv.typ, urv.ptr)
    792 	}
    793 }
    794 
    795 // rvMakeSlice updates the slice to point to a new array.
    796 // It copies data from old slice to new slice.
    797 // It returns set=true iff it updates it, else it just returns a new slice pointing to a newly made array.
    798 func rvMakeSlice(rv reflect.Value, ti *typeInfo, xlen, xcap int) (_ reflect.Value, set bool) {
    799 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    800 	ux := (*unsafeSlice)(urv.ptr)
    801 	t := ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
    802 	s := unsafeSlice{newarray(t, xcap), xlen, xcap}
    803 	if ux.Len > 0 {
    804 		typedslicecopy(t, s, *ux)
    805 	}
    806 	*ux = s
    807 	return rv, true
    808 }
    809 
    810 // rvSlice returns a sub-slice of the slice given new lenth,
    811 // without modifying passed in value.
    812 // It is typically called when we know that SetLen(...) cannot be done.
    813 func rvSlice(rv reflect.Value, length int) reflect.Value {
    814 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    815 	var x []struct{}
    816 	ux := (*unsafeSlice)(unsafe.Pointer(&x))
    817 	*ux = *(*unsafeSlice)(urv.ptr)
    818 	ux.Len = length
    819 	urv.ptr = unsafe.Pointer(ux)
    820 	return rv
    821 }
    822 
    823 // rcGrowSlice updates the slice to point to a new array with the cap incremented, and len set to the new cap value.
    824 // It copies data from old slice to new slice.
    825 // It returns set=true iff it updates it, else it just returns a new slice pointing to a newly made array.
    826 func rvGrowSlice(rv reflect.Value, ti *typeInfo, cap, incr int) (v reflect.Value, newcap int, set bool) {
    827 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    828 	ux := (*unsafeSlice)(urv.ptr)
    829 	t := ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
    830 	*ux = unsafeGrowslice(t, *ux, cap, incr)
    831 	ux.Len = ux.Cap
    832 	return rv, ux.Cap, true
    833 }
    834 
    835 // ------------
    836 
    837 func rvSliceIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) {
    838 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    839 	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
    840 	uv.ptr = unsafe.Pointer(uintptr(((*unsafeSlice)(urv.ptr)).Data) + uintptr(int(ti.elemsize)*i))
    841 	uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
    842 	uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr
    843 	return
    844 }
    845 
    846 func rvSliceZeroCap(t reflect.Type) (v reflect.Value) {
    847 	urv := (*unsafeReflectValue)(unsafe.Pointer(&v))
    848 	urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
    849 	urv.flag = uintptr(reflect.Slice) | unsafeFlagIndir
    850 	urv.ptr = unsafe.Pointer(&unsafeZeroSlice)
    851 	return
    852 }
    853 
    854 func rvLenSlice(rv reflect.Value) int {
    855 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    856 	return (*unsafeSlice)(urv.ptr).Len
    857 }
    858 
    859 func rvCapSlice(rv reflect.Value) int {
    860 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    861 	return (*unsafeSlice)(urv.ptr).Cap
    862 }
    863 
    864 func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) (v reflect.Value) {
    865 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    866 	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
    867 	uv.ptr = unsafe.Pointer(uintptr(urv.ptr) + uintptr(int(ti.elemsize)*i))
    868 	uv.typ = ((*unsafeIntf)(unsafe.Pointer(&ti.elem))).ptr
    869 	uv.flag = uintptr(ti.elemkind) | unsafeFlagIndir | unsafeFlagAddr
    870 	return
    871 }
    872 
    873 // if scratch is nil, then return a writable view (assuming canAddr=true)
    874 func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) {
    875 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    876 	bx := (*unsafeSlice)(unsafe.Pointer(&bs))
    877 	bx.Data = urv.ptr
    878 	bx.Len = rv.Len()
    879 	bx.Cap = bx.Len
    880 	return
    881 }
    882 
    883 func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) {
    884 	// It is possible that this slice is based off an array with a larger
    885 	// len that we want (where array len == slice cap).
    886 	// However, it is ok to create an array type that is a subset of the full
    887 	// e.g. full slice is based off a *[16]byte, but we can create a *[4]byte
    888 	// off of it. That is ok.
    889 	//
    890 	// Consequently, we use rvLenSlice, not rvCapSlice.
    891 
    892 	t := reflectArrayOf(rvLenSlice(rv), rv.Type().Elem())
    893 	// v = rvZeroAddrK(t, reflect.Array)
    894 
    895 	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
    896 	uv.flag = uintptr(reflect.Array) | unsafeFlagIndir | unsafeFlagAddr
    897 	uv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
    898 
    899 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    900 	uv.ptr = *(*unsafe.Pointer)(urv.ptr) // slice rv has a ptr to the slice.
    901 
    902 	return
    903 }
    904 
    905 func rvGetSlice4Array(rv reflect.Value, v interface{}) {
    906 	// v is a pointer to a slice to be populated
    907 	uv := (*unsafeIntf)(unsafe.Pointer(&v))
    908 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    909 
    910 	s := (*unsafeSlice)(uv.ptr)
    911 	s.Data = urv.ptr
    912 	s.Len = rv.Len()
    913 	s.Cap = s.Len
    914 }
    915 
    916 func rvCopySlice(dest, src reflect.Value, elemType reflect.Type) {
    917 	typedslicecopy((*unsafeIntf)(unsafe.Pointer(&elemType)).ptr,
    918 		*(*unsafeSlice)((*unsafeReflectValue)(unsafe.Pointer(&dest)).ptr),
    919 		*(*unsafeSlice)((*unsafeReflectValue)(unsafe.Pointer(&src)).ptr))
    920 }
    921 
    922 // ------------
    923 
    924 func rvGetBool(rv reflect.Value) bool {
    925 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    926 	return *(*bool)(v.ptr)
    927 }
    928 
    929 func rvGetBytes(rv reflect.Value) []byte {
    930 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    931 	return *(*[]byte)(v.ptr)
    932 }
    933 
    934 func rvGetTime(rv reflect.Value) time.Time {
    935 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    936 	return *(*time.Time)(v.ptr)
    937 }
    938 
    939 func rvGetString(rv reflect.Value) string {
    940 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    941 	return *(*string)(v.ptr)
    942 }
    943 
    944 func rvGetFloat64(rv reflect.Value) float64 {
    945 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    946 	return *(*float64)(v.ptr)
    947 }
    948 
    949 func rvGetFloat32(rv reflect.Value) float32 {
    950 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    951 	return *(*float32)(v.ptr)
    952 }
    953 
    954 func rvGetComplex64(rv reflect.Value) complex64 {
    955 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    956 	return *(*complex64)(v.ptr)
    957 }
    958 
    959 func rvGetComplex128(rv reflect.Value) complex128 {
    960 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    961 	return *(*complex128)(v.ptr)
    962 }
    963 
    964 func rvGetInt(rv reflect.Value) int {
    965 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    966 	return *(*int)(v.ptr)
    967 }
    968 
    969 func rvGetInt8(rv reflect.Value) int8 {
    970 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    971 	return *(*int8)(v.ptr)
    972 }
    973 
    974 func rvGetInt16(rv reflect.Value) int16 {
    975 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    976 	return *(*int16)(v.ptr)
    977 }
    978 
    979 func rvGetInt32(rv reflect.Value) int32 {
    980 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    981 	return *(*int32)(v.ptr)
    982 }
    983 
    984 func rvGetInt64(rv reflect.Value) int64 {
    985 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    986 	return *(*int64)(v.ptr)
    987 }
    988 
    989 func rvGetUint(rv reflect.Value) uint {
    990 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    991 	return *(*uint)(v.ptr)
    992 }
    993 
    994 func rvGetUint8(rv reflect.Value) uint8 {
    995 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
    996 	return *(*uint8)(v.ptr)
    997 }
    998 
    999 func rvGetUint16(rv reflect.Value) uint16 {
   1000 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   1001 	return *(*uint16)(v.ptr)
   1002 }
   1003 
   1004 func rvGetUint32(rv reflect.Value) uint32 {
   1005 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   1006 	return *(*uint32)(v.ptr)
   1007 }
   1008 
   1009 func rvGetUint64(rv reflect.Value) uint64 {
   1010 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   1011 	return *(*uint64)(v.ptr)
   1012 }
   1013 
   1014 func rvGetUintptr(rv reflect.Value) uintptr {
   1015 	v := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   1016 	return *(*uintptr)(v.ptr)
   1017 }
   1018 
   1019 func rvLenMap(rv reflect.Value) int {
   1020 	// maplen is not inlined, because as of go1.16beta, go:linkname's are not inlined.
   1021 	// thus, faster to call rv.Len() directly.
   1022 	//
   1023 	// MARKER: review after https://github.com/golang/go/issues/20019 fixed.
   1024 
   1025 	// return rv.Len()
   1026 
   1027 	return len_map(rvRefPtr((*unsafeReflectValue)(unsafe.Pointer(&rv))))
   1028 }
   1029 
   1030 // copy is an intrinsic, which may use asm if length is small,
   1031 // or make a runtime call to runtime.memmove if length is large.
   1032 // Performance suffers when you always call runtime.memmove function.
   1033 //
   1034 // Consequently, there's no value in a copybytes call - just call copy() directly
   1035 
   1036 // func copybytes(to, from []byte) (n int) {
   1037 // 	n = (*unsafeSlice)(unsafe.Pointer(&from)).Len
   1038 // 	memmove(
   1039 // 		(*unsafeSlice)(unsafe.Pointer(&to)).Data,
   1040 // 		(*unsafeSlice)(unsafe.Pointer(&from)).Data,
   1041 // 		uintptr(n),
   1042 // 	)
   1043 // 	return
   1044 // }
   1045 
   1046 // func copybytestr(to []byte, from string) (n int) {
   1047 // 	n = (*unsafeSlice)(unsafe.Pointer(&from)).Len
   1048 // 	memmove(
   1049 // 		(*unsafeSlice)(unsafe.Pointer(&to)).Data,
   1050 // 		(*unsafeSlice)(unsafe.Pointer(&from)).Data,
   1051 // 		uintptr(n),
   1052 // 	)
   1053 // 	return
   1054 // }
   1055 
   1056 // Note: it is hard to find len(...) of an array type,
   1057 // as that is a field in the arrayType representing the array, and hard to introspect.
   1058 //
   1059 // func rvLenArray(rv reflect.Value) int {	return rv.Len() }
   1060 
   1061 // ------------ map range and map indexing ----------
   1062 
   1063 // regular calls to map via reflection: MapKeys, MapIndex, MapRange/MapIter etc
   1064 // will always allocate for each map key or value.
   1065 //
   1066 // It is more performant to provide a value that the map entry is set into,
   1067 // and that elides the allocation.
   1068 
   1069 // go 1.4+ has runtime/hashmap.go or runtime/map.go which has a
   1070 // hIter struct with the first 2 values being key and value
   1071 // of the current iteration.
   1072 //
   1073 // This *hIter is passed to mapiterinit, mapiternext, mapiterkey, mapiterelem.
   1074 // We bypass the reflect wrapper functions and just use the *hIter directly.
   1075 //
   1076 // Though *hIter has many fields, we only care about the first 2.
   1077 //
   1078 // We directly embed this in unsafeMapIter below
   1079 //
   1080 // hiter is typically about 12 words, but we just fill up unsafeMapIter to 32 words,
   1081 // so it fills multiple cache lines and can give some extra space to accomodate small growth.
   1082 
   1083 type unsafeMapIter struct {
   1084 	mtyp, mptr unsafe.Pointer
   1085 	k, v       reflect.Value
   1086 	kisref     bool
   1087 	visref     bool
   1088 	mapvalues  bool
   1089 	done       bool
   1090 	started    bool
   1091 	_          [3]byte // padding
   1092 	it         struct {
   1093 		key   unsafe.Pointer
   1094 		value unsafe.Pointer
   1095 		_     [20]uintptr // padding for other fields (to make up 32 words for enclosing struct)
   1096 	}
   1097 }
   1098 
   1099 func (t *unsafeMapIter) Next() (r bool) {
   1100 	if t == nil || t.done {
   1101 		return
   1102 	}
   1103 	if t.started {
   1104 		mapiternext((unsafe.Pointer)(&t.it))
   1105 	} else {
   1106 		t.started = true
   1107 	}
   1108 
   1109 	t.done = t.it.key == nil
   1110 	if t.done {
   1111 		return
   1112 	}
   1113 
   1114 	if helperUnsafeDirectAssignMapEntry || t.kisref {
   1115 		(*unsafeReflectValue)(unsafe.Pointer(&t.k)).ptr = t.it.key
   1116 	} else {
   1117 		k := (*unsafeReflectValue)(unsafe.Pointer(&t.k))
   1118 		typedmemmove(k.typ, k.ptr, t.it.key)
   1119 	}
   1120 
   1121 	if t.mapvalues {
   1122 		if helperUnsafeDirectAssignMapEntry || t.visref {
   1123 			(*unsafeReflectValue)(unsafe.Pointer(&t.v)).ptr = t.it.value
   1124 		} else {
   1125 			v := (*unsafeReflectValue)(unsafe.Pointer(&t.v))
   1126 			typedmemmove(v.typ, v.ptr, t.it.value)
   1127 		}
   1128 	}
   1129 
   1130 	return true
   1131 }
   1132 
   1133 func (t *unsafeMapIter) Key() (r reflect.Value) {
   1134 	return t.k
   1135 }
   1136 
   1137 func (t *unsafeMapIter) Value() (r reflect.Value) {
   1138 	return t.v
   1139 }
   1140 
   1141 func (t *unsafeMapIter) Done() {}
   1142 
   1143 type mapIter struct {
   1144 	unsafeMapIter
   1145 }
   1146 
   1147 func mapRange(t *mapIter, m, k, v reflect.Value, mapvalues bool) {
   1148 	if rvIsNil(m) {
   1149 		t.done = true
   1150 		return
   1151 	}
   1152 	t.done = false
   1153 	t.started = false
   1154 	t.mapvalues = mapvalues
   1155 
   1156 	// var urv *unsafeReflectValue
   1157 
   1158 	urv := (*unsafeReflectValue)(unsafe.Pointer(&m))
   1159 	t.mtyp = urv.typ
   1160 	t.mptr = rvRefPtr(urv)
   1161 
   1162 	// t.it = (*unsafeMapHashIter)(reflect_mapiterinit(t.mtyp, t.mptr))
   1163 	mapiterinit(t.mtyp, t.mptr, unsafe.Pointer(&t.it))
   1164 
   1165 	t.k = k
   1166 	t.kisref = refBitset.isset(byte(k.Kind()))
   1167 
   1168 	if mapvalues {
   1169 		t.v = v
   1170 		t.visref = refBitset.isset(byte(v.Kind()))
   1171 	} else {
   1172 		t.v = reflect.Value{}
   1173 	}
   1174 }
   1175 
   1176 // unsafeMapKVPtr returns the pointer if flagIndir, else it returns a pointer to the pointer.
   1177 // It is needed as maps always keep a reference to the underlying value.
   1178 func unsafeMapKVPtr(urv *unsafeReflectValue) unsafe.Pointer {
   1179 	if urv.flag&unsafeFlagIndir == 0 {
   1180 		return unsafe.Pointer(&urv.ptr)
   1181 	}
   1182 	return urv.ptr
   1183 }
   1184 
   1185 // func mapDelete(m, k reflect.Value) {
   1186 // 	var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
   1187 // 	var kptr = unsafeMapKVPtr(urv)
   1188 // 	urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
   1189 // 	mapdelete(urv.typ, rv2ptr(urv), kptr)
   1190 // }
   1191 
   1192 // return an addressable reflect value that can be used in mapRange and mapGet operations.
   1193 //
   1194 // all calls to mapGet or mapRange will call here to get an addressable reflect.Value.
   1195 func mapAddrLoopvarRV(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
   1196 	// return rvZeroAddrK(t, k)
   1197 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   1198 	urv.flag = uintptr(k) | unsafeFlagIndir | unsafeFlagAddr
   1199 	urv.typ = ((*unsafeIntf)(unsafe.Pointer(&t))).ptr
   1200 	// since we always set the ptr when helperUnsafeDirectAssignMapEntry=true,
   1201 	// we should only allocate if it is not true
   1202 	if !helperUnsafeDirectAssignMapEntry {
   1203 		urv.ptr = unsafeNew(urv.typ)
   1204 	}
   1205 	return
   1206 }
   1207 
   1208 // ---------- ENCODER optimized ---------------
   1209 
   1210 func (e *Encoder) jsondriver() *jsonEncDriver {
   1211 	return (*jsonEncDriver)((*unsafeIntf)(unsafe.Pointer(&e.e)).ptr)
   1212 }
   1213 
   1214 func (d *Decoder) zerocopystate() bool {
   1215 	return d.decByteState == decByteStateZerocopy && d.h.ZeroCopy
   1216 }
   1217 
   1218 func (d *Decoder) stringZC(v []byte) (s string) {
   1219 	// MARKER: inline zerocopystate directly so genHelper forwarding function fits within inlining cost
   1220 
   1221 	// if d.zerocopystate() {
   1222 	if d.decByteState == decByteStateZerocopy && d.h.ZeroCopy {
   1223 		return stringView(v)
   1224 	}
   1225 	return d.string(v)
   1226 }
   1227 
   1228 func (d *Decoder) mapKeyString(callFnRvk *bool, kstrbs, kstr2bs *[]byte) string {
   1229 	if !d.zerocopystate() {
   1230 		*callFnRvk = true
   1231 		if d.decByteState == decByteStateReuseBuf {
   1232 			*kstrbs = append((*kstrbs)[:0], (*kstr2bs)...)
   1233 			*kstr2bs = *kstrbs
   1234 		}
   1235 	}
   1236 	return stringView(*kstr2bs)
   1237 }
   1238 
   1239 // ---------- DECODER optimized ---------------
   1240 
   1241 func (d *Decoder) jsondriver() *jsonDecDriver {
   1242 	return (*jsonDecDriver)((*unsafeIntf)(unsafe.Pointer(&d.d)).ptr)
   1243 }
   1244 
   1245 // ---------- structFieldInfo optimized ---------------
   1246 
   1247 func (n *structFieldInfoPathNode) rvField(v reflect.Value) (rv reflect.Value) {
   1248 	// we already know this is exported, and maybe embedded (based on what si says)
   1249 	uv := (*unsafeReflectValue)(unsafe.Pointer(&v))
   1250 	urv := (*unsafeReflectValue)(unsafe.Pointer(&rv))
   1251 	// clear flagEmbedRO if necessary, and inherit permission bits from v
   1252 	urv.flag = uv.flag&(unsafeFlagStickyRO|unsafeFlagIndir|unsafeFlagAddr) | uintptr(n.kind)
   1253 	urv.typ = ((*unsafeIntf)(unsafe.Pointer(&n.typ))).ptr
   1254 	urv.ptr = unsafe.Pointer(uintptr(uv.ptr) + uintptr(n.offset))
   1255 	return
   1256 }
   1257 
   1258 // runtime chan and map are designed such that the first field is the count.
   1259 // len builtin uses this to get the length of a chan/map easily.
   1260 // leverage this knowledge, since maplen and chanlen functions from runtime package
   1261 // are go:linkname'd here, and thus not inlined as of go1.16beta
   1262 
   1263 func len_map_chan(m unsafe.Pointer) int {
   1264 	if m == nil {
   1265 		return 0
   1266 	}
   1267 	return *((*int)(m))
   1268 }
   1269 
   1270 func len_map(m unsafe.Pointer) int {
   1271 	// return maplen(m)
   1272 	return len_map_chan(m)
   1273 }
   1274 func len_chan(m unsafe.Pointer) int {
   1275 	// return chanlen(m)
   1276 	return len_map_chan(m)
   1277 }
   1278 
   1279 func unsafeNew(typ unsafe.Pointer) unsafe.Pointer {
   1280 	return mallocgc(rtsize2(typ), typ, true)
   1281 }
   1282 
   1283 // ---------- go linknames (LINKED to runtime/reflect) ---------------
   1284 
   1285 // MARKER: always check that these linknames match subsequent versions of go
   1286 //
   1287 // Note that as of Jan 2021 (go 1.16 release), go:linkname(s) are not inlined
   1288 // outside of the standard library use (e.g. within sync, reflect, etc).
   1289 // If these link'ed functions were normally inlined, calling them here would
   1290 // not necessarily give a performance boost, due to function overhead.
   1291 //
   1292 // However, it seems most of these functions are not inlined anyway,
   1293 // as only maplen, chanlen and mapaccess are small enough to get inlined.
   1294 //
   1295 //   We checked this by going into $GOROOT/src/runtime and running:
   1296 //   $ go build -tags codec.notfastpath -gcflags "-m=2"
   1297 
   1298 // reflect.{unsafe_New, unsafe_NewArray} are not supported in gollvm,
   1299 // failing with "error: undefined reference" error.
   1300 // however, runtime.{mallocgc, newarray} are supported, so use that instead.
   1301 
   1302 //go:linkname memmove runtime.memmove
   1303 //go:noescape
   1304 func memmove(to, from unsafe.Pointer, n uintptr)
   1305 
   1306 //go:linkname mallocgc runtime.mallocgc
   1307 //go:noescape
   1308 func mallocgc(size uintptr, typ unsafe.Pointer, needzero bool) unsafe.Pointer
   1309 
   1310 //go:linkname newarray runtime.newarray
   1311 //go:noescape
   1312 func newarray(typ unsafe.Pointer, n int) unsafe.Pointer
   1313 
   1314 //go:linkname mapiterinit runtime.mapiterinit
   1315 //go:noescape
   1316 func mapiterinit(typ unsafe.Pointer, m unsafe.Pointer, it unsafe.Pointer)
   1317 
   1318 //go:linkname mapiternext runtime.mapiternext
   1319 //go:noescape
   1320 func mapiternext(it unsafe.Pointer) (key unsafe.Pointer)
   1321 
   1322 //go:linkname mapdelete runtime.mapdelete
   1323 //go:noescape
   1324 func mapdelete(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer)
   1325 
   1326 //go:linkname mapassign runtime.mapassign
   1327 //go:noescape
   1328 func mapassign(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
   1329 
   1330 //go:linkname mapaccess2 runtime.mapaccess2
   1331 //go:noescape
   1332 func mapaccess2(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer, ok bool)
   1333 
   1334 // reflect.typed{memmove, memclr, slicecopy} will handle checking if the type has pointers or not,
   1335 // and if a writeBarrier is needed, before delegating to the right method in the runtime.
   1336 //
   1337 // This is why we use the functions in reflect, and not the ones in runtime directly.
   1338 // Calling runtime.XXX here will lead to memory issues.
   1339 
   1340 //go:linkname typedslicecopy reflect.typedslicecopy
   1341 //go:noescape
   1342 func typedslicecopy(elemType unsafe.Pointer, dst, src unsafeSlice) int
   1343 
   1344 //go:linkname typedmemmove reflect.typedmemmove
   1345 //go:noescape
   1346 func typedmemmove(typ unsafe.Pointer, dst, src unsafe.Pointer)
   1347 
   1348 //go:linkname typedmemclr reflect.typedmemclr
   1349 //go:noescape
   1350 func typedmemclr(typ unsafe.Pointer, dst unsafe.Pointer)