gtsocial-umbx

Unnamed repository; edit this file 'description' to name the repository.
Log | Files | Refs | README | LICENSE

abi.go (27621B)


      1 // Copyright 2019 The CC Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 package cc // import "modernc.org/cc/v3"
      6 
      7 import (
      8 	"encoding/binary"
      9 	"fmt"
     10 	"math"
     11 	"os"
     12 	"runtime"
     13 
     14 	"lukechampine.com/uint128"
     15 	"modernc.org/mathutil"
     16 )
     17 
     18 var (
     19 	idAligned   = String("aligned")
     20 	idGCCStruct = String("gcc_struct")
     21 	idMSStruct  = String("ms_struct")
     22 	idPacked    = String("packed")
     23 
     24 	complexTypedefs = map[StringID]Kind{
     25 		dict.sid("__COMPLEX_CHAR_TYPE__"):               ComplexChar,
     26 		dict.sid("__COMPLEX_DOUBLE_TYPE__"):             ComplexDouble,
     27 		dict.sid("__COMPLEX_FLOAT_TYPE__"):              ComplexFloat,
     28 		dict.sid("__COMPLEX_INT_TYPE__"):                ComplexInt,
     29 		dict.sid("__COMPLEX_LONG_TYPE__"):               ComplexLong,
     30 		dict.sid("__COMPLEX_LONG_DOUBLE_TYPE__"):        ComplexLongDouble,
     31 		dict.sid("__COMPLEX_LONG_LONG_TYPE__"):          ComplexLongLong,
     32 		dict.sid("__COMPLEX_SHORT_TYPE__"):              ComplexShort,
     33 		dict.sid("__COMPLEX_UNSIGNED_TYPE__"):           ComplexUInt,
     34 		dict.sid("__COMPLEX_LONG_UNSIGNED_TYPE__"):      ComplexULong,
     35 		dict.sid("__COMPLEX_LONG_LONG_UNSIGNED_TYPE__"): ComplexULongLong,
     36 		dict.sid("__COMPLEX_SHORT_UNSIGNED_TYPE__"):     ComplexUShort,
     37 	}
     38 )
     39 
     40 // NewABI creates an ABI for a given OS and architecture. The OS and architecture values are the same as used in Go.
     41 // The ABI type map may miss advanced types like complex numbers, etc. If the os/arch pair is not recognized, a
     42 // *ErrUnsupportedOSArch is returned.
     43 func NewABI(os, arch string) (ABI, error) {
     44 	order, ok := abiByteOrders[arch]
     45 	if !ok {
     46 		return ABI{}, fmt.Errorf("unsupported arch: %s", arch)
     47 	}
     48 	types, ok := abiTypes[[2]string{os, arch}]
     49 	if !ok {
     50 		return ABI{}, fmt.Errorf("unsupported os/arch pair: %s-%s", os, arch)
     51 	}
     52 	abi := ABI{
     53 		ByteOrder:  order,
     54 		Types:      make(map[Kind]ABIType, len(types)),
     55 		SignedChar: abiSignedChar[[2]string{os, arch}],
     56 		os:         os,
     57 		arch:       arch,
     58 	}
     59 	// copy the map, so it can be modified by user
     60 	for k, v := range types {
     61 		abi.Types[k] = v
     62 	}
     63 	return abi, nil
     64 }
     65 
     66 // NewABIFromEnv uses GOOS and GOARCH values to create a corresponding ABI.
     67 // If those environment variables are not set, an OS/arch of a Go runtime is used.
     68 // It returns a *ErrUnsupportedOSArch if OS/arch pair is not supported.
     69 func NewABIFromEnv() (ABI, error) {
     70 	osv := os.Getenv("GOOS")
     71 	if osv == "" {
     72 		osv = runtime.GOOS
     73 	}
     74 	arch := os.Getenv("GOARCH")
     75 	if arch == "" {
     76 		arch = runtime.GOARCH
     77 	}
     78 	return NewABI(osv, arch)
     79 }
     80 
     81 // ABIType describes properties of a non-aggregate type.
     82 type ABIType struct {
     83 	Size       uintptr
     84 	Align      int
     85 	FieldAlign int
     86 }
     87 
     88 // ABI describes selected parts of the Application Binary Interface.
     89 type ABI struct {
     90 	ByteOrder binary.ByteOrder
     91 	Types     map[Kind]ABIType
     92 	arch      string
     93 	os        string
     94 	types     map[Kind]Type
     95 
     96 	SignedChar bool
     97 }
     98 
     99 func (a *ABI) sanityCheck(ctx *context, intMaxWidth int, s Scope) error {
    100 	if intMaxWidth == 0 {
    101 		intMaxWidth = 64
    102 	}
    103 
    104 	a.types = map[Kind]Type{}
    105 	for _, k := range []Kind{
    106 		Bool,
    107 		Char,
    108 		Double,
    109 		Enum,
    110 		Float,
    111 		Int,
    112 		Long,
    113 		LongDouble,
    114 		LongLong,
    115 		Ptr,
    116 		SChar,
    117 		Short,
    118 		UChar,
    119 		UInt,
    120 		ULong,
    121 		ULongLong,
    122 		UShort,
    123 		Void,
    124 	} {
    125 		v, ok := a.Types[k]
    126 		if !ok {
    127 			if ctx.err(noPos, "ABI is missing %s", k) {
    128 				return ctx.Err()
    129 			}
    130 
    131 			continue
    132 		}
    133 
    134 		if (k != Void && v.Size == 0) || v.Align == 0 || v.FieldAlign == 0 ||
    135 			v.Align > math.MaxUint8 || v.FieldAlign > math.MaxUint8 {
    136 			if ctx.err(noPos, "invalid ABI type %s: %+v", k, v) {
    137 				return ctx.Err()
    138 			}
    139 		}
    140 
    141 		if integerTypes[k] && v.Size > 8 {
    142 			if ctx.err(noPos, "invalid ABI type %s size: %v, must be <= 8", k, v.Size) {
    143 				return ctx.Err()
    144 			}
    145 		}
    146 		var f flag
    147 		if integerTypes[k] && a.isSignedInteger(k) {
    148 			f = fSigned
    149 		}
    150 		t := &typeBase{
    151 			align:      byte(a.align(k)),
    152 			fieldAlign: byte(a.fieldAlign(k)),
    153 			flags:      f,
    154 			kind:       byte(k),
    155 			size:       uintptr(a.size(k)),
    156 		}
    157 		a.types[k] = t
    158 	}
    159 	if _, ok := a.Types[Int128]; ok {
    160 		t := &typeBase{
    161 			align:      byte(a.align(Int128)),
    162 			fieldAlign: byte(a.fieldAlign(Int128)),
    163 			flags:      fSigned,
    164 			kind:       byte(Int128),
    165 			size:       uintptr(a.size(Int128)),
    166 		}
    167 		a.types[Int128] = t
    168 	}
    169 	if _, ok := a.Types[UInt128]; ok {
    170 		t := &typeBase{
    171 			align:      byte(a.align(UInt128)),
    172 			fieldAlign: byte(a.fieldAlign(UInt128)),
    173 			kind:       byte(UInt128),
    174 			size:       uintptr(a.size(UInt128)),
    175 		}
    176 		a.types[UInt128] = t
    177 	}
    178 	return ctx.Err()
    179 }
    180 
    181 func (a *ABI) Type(k Kind) Type { return a.types[k] }
    182 
    183 func (a *ABI) align(k Kind) int      { return a.Types[k].Align }
    184 func (a *ABI) fieldAlign(k Kind) int { return a.Types[k].FieldAlign }
    185 func (a *ABI) size(k Kind) int       { return int(a.Types[k].Size) }
    186 
    187 func (a *ABI) isSignedInteger(k Kind) bool {
    188 	if !integerTypes[k] {
    189 		internalError()
    190 	}
    191 
    192 	switch k {
    193 	case Bool, UChar, UInt, ULong, ULongLong, UShort:
    194 		return false
    195 	case Char:
    196 		return a.SignedChar
    197 	default:
    198 		return true
    199 	}
    200 }
    201 
    202 func roundup(n, to int64) int64 {
    203 	if r := n % to; r != 0 {
    204 		return n + to - r
    205 	}
    206 
    207 	return n
    208 }
    209 
    210 func roundup128(n uint128.Uint128, to uint64) uint128.Uint128 {
    211 	if r := n.Mod(uint128.From64(to)); !r.IsZero() {
    212 		return n.Add64(to).Sub(r)
    213 	}
    214 
    215 	return n
    216 }
    217 
    218 func rounddown(n, to int64) int64 {
    219 	return n &^ (to - 1)
    220 }
    221 
    222 func rounddown128(n uint128.Uint128, to uint64) uint128.Uint128 {
    223 	return n.And(uint128.Uint128{Hi: ^uint64(0), Lo: ^(to - 1)})
    224 }
    225 
    226 func normalizeBitFieldWidth(n byte) byte {
    227 	switch {
    228 	case n <= 8:
    229 		return 8
    230 	case n <= 16:
    231 		return 16
    232 	case n <= 32:
    233 		return 32
    234 	case n <= 64:
    235 		return 64
    236 	default:
    237 		panic(todo("internal error: %v", n))
    238 	}
    239 }
    240 
    241 func (a *ABI) layout(ctx *context, n Node, t *structType) *structType {
    242 	if t == nil {
    243 		return nil
    244 	}
    245 
    246 	if t.typeBase.align < 1 {
    247 		t.typeBase.align = 1
    248 	}
    249 	for _, v := range t.attr {
    250 		if _, ok := v.Has(idGCCStruct); ok {
    251 			return a.gccLayout(ctx, n, t)
    252 		}
    253 
    254 		//TODO if _, ok := v.Has(idMSStruct); ok {
    255 		//TODO 	return a.msLayout(ctx, n, t)
    256 		//TODO }
    257 	}
    258 
    259 	switch {
    260 	case ctx.cfg.Config3.GCCStructs:
    261 		return a.gccLayout(ctx, n, t)
    262 		//TODO case ctx.cfg.Config3.MSStructs:
    263 		//TODO 	return a.msLayout(ctx, n, t)
    264 	}
    265 
    266 	var hasBitfields bool
    267 
    268 	defer func() {
    269 		if !hasBitfields {
    270 			return
    271 		}
    272 
    273 		m := make(map[uintptr][]*field, len(t.fields))
    274 		for _, f := range t.fields {
    275 			off := f.offset
    276 			m[off] = append(m[off], f)
    277 		}
    278 		for _, s := range m {
    279 			var first *field
    280 			var w byte
    281 			for _, f := range s {
    282 				if first == nil {
    283 					first = f
    284 				}
    285 				if f.isBitField {
    286 					n := f.bitFieldOffset + f.bitFieldWidth
    287 					if n > w {
    288 						w = n
    289 					}
    290 				}
    291 			}
    292 			w = normalizeBitFieldWidth(w)
    293 			for _, f := range s {
    294 				if f.isBitField {
    295 					f.blockStart = first
    296 					f.blockWidth = w
    297 				}
    298 				if a.ByteOrder == binary.BigEndian {
    299 					f.bitFieldOffset = w - f.bitFieldWidth - f.bitFieldOffset
    300 					f.bitFieldMask = (uint64(1)<<f.bitFieldWidth - 1) << f.bitFieldOffset
    301 				}
    302 			}
    303 		}
    304 	}()
    305 
    306 	var off int64 // bit offset
    307 	align := int(t.typeBase.align)
    308 
    309 	switch {
    310 	case t.Kind() == Union:
    311 		for _, f := range t.fields {
    312 			ft := f.Type()
    313 			sz := ft.Size()
    314 			if n := int64(8 * sz); n > off {
    315 				off = n
    316 			}
    317 			al := ft.FieldAlign()
    318 			if al == 0 {
    319 				al = 1
    320 			}
    321 			if al > align {
    322 				align = al
    323 			}
    324 
    325 			if f.isBitField {
    326 				hasBitfields = true
    327 				f.bitFieldMask = 1<<f.bitFieldWidth - 1
    328 			}
    329 			f.promote = integerPromotion(a, ft)
    330 		}
    331 		t.align = byte(align)
    332 		t.fieldAlign = byte(align)
    333 		off = roundup(off, 8*int64(align))
    334 		t.size = uintptr(off >> 3)
    335 		ctx.structs[StructInfo{Size: t.size, Align: t.Align()}] = struct{}{}
    336 	default:
    337 		var i int
    338 		var group byte
    339 		var f, lf *field
    340 		for i, f = range t.fields {
    341 			ft := f.Type()
    342 			var sz uintptr
    343 			switch {
    344 			case ft.Kind() == Array && i == len(t.fields)-1:
    345 				if ft.IsIncomplete() || ft.Len() == 0 {
    346 					t.hasFlexibleMember = true
    347 					f.isFlexible = true
    348 					break
    349 				}
    350 
    351 				fallthrough
    352 			default:
    353 				sz = ft.Size()
    354 			}
    355 
    356 			bitSize := 8 * int(sz)
    357 			al := ft.FieldAlign()
    358 			if al == 0 {
    359 				al = 1
    360 			}
    361 			if al > align {
    362 				align = al
    363 			}
    364 
    365 			switch {
    366 			case f.isBitField:
    367 				hasBitfields = true
    368 				eal := 8 * al
    369 				if eal < bitSize {
    370 					eal = bitSize
    371 				}
    372 				down := off &^ (int64(eal) - 1)
    373 				bitoff := off - down
    374 				downMax := off &^ (int64(bitSize) - 1)
    375 				skip := lf != nil && lf.isBitField && lf.bitFieldWidth == 0 ||
    376 					lf != nil && lf.bitFieldWidth == 0 && ctx.cfg.NoFieldAndBitfieldOverlap
    377 				switch {
    378 				case skip || int(off-downMax)+int(f.bitFieldWidth) > bitSize:
    379 					group = 0
    380 					off = roundup(off, 8*int64(al))
    381 					f.offset = uintptr(off >> 3)
    382 					f.bitFieldOffset = 0
    383 					f.bitFieldMask = 1<<f.bitFieldWidth - 1
    384 					off += int64(f.bitFieldWidth)
    385 					if f.bitFieldWidth == 0 {
    386 						lf = f
    387 						continue
    388 					}
    389 				default:
    390 					f.offset = uintptr(down >> 3)
    391 					f.bitFieldOffset = byte(bitoff)
    392 					f.bitFieldMask = (1<<f.bitFieldWidth - 1) << byte(bitoff)
    393 					off += int64(f.bitFieldWidth)
    394 				}
    395 				group += f.bitFieldWidth
    396 			default:
    397 				if n := group % 64; n != 0 {
    398 					if ctx.cfg.FixBitfieldPadding {
    399 						off += int64(normalizeBitFieldWidth(group-n) - group)
    400 					} else {
    401 						group -= n
    402 						off += int64(normalizeBitFieldWidth(group) - group)
    403 					}
    404 				}
    405 				off0 := off
    406 				off = roundup(off, 8*int64(al))
    407 				f.pad = byte(off-off0) >> 3
    408 				f.offset = uintptr(off) >> 3
    409 				off += 8 * int64(sz)
    410 				group = 0
    411 			}
    412 			f.promote = integerPromotion(a, ft)
    413 			lf = f
    414 		}
    415 		t.align = byte(align)
    416 		t.fieldAlign = byte(align)
    417 		off0 := off
    418 		off = roundup(off, 8*int64(align))
    419 		if f != nil && !f.IsBitField() {
    420 			f.pad = byte(off-off0) >> 3
    421 		}
    422 		t.size = uintptr(off >> 3)
    423 		ctx.structs[StructInfo{Size: t.size, Align: t.Align()}] = struct{}{}
    424 	}
    425 	return t
    426 }
    427 
    428 func (a *ABI) Ptr(n Node, t Type) Type {
    429 	base := t.base()
    430 	base.align = byte(a.align(Ptr))
    431 	base.fieldAlign = byte(a.fieldAlign(Ptr))
    432 	base.kind = byte(Ptr)
    433 	base.size = uintptr(a.size(Ptr))
    434 	base.flags &^= fIncomplete
    435 	return &pointerType{
    436 		elem:     t,
    437 		typeBase: base,
    438 	}
    439 }
    440 
    441 func (a *ABI) gccLayout(ctx *context, n Node, t *structType) (r *structType) {
    442 	if t.IsPacked() {
    443 		return a.gccPackedLayout(ctx, n, t)
    444 	}
    445 
    446 	if t.Kind() == Union {
    447 		var off uint128.Uint128 // In bits.
    448 		align := int(t.typeBase.align)
    449 		for _, f := range t.fields {
    450 			switch {
    451 			case f.isBitField:
    452 				f.offset = 0
    453 				f.bitFieldOffset = 0
    454 				f.bitFieldMask = 1<<f.bitFieldWidth - 1
    455 				if uint64(f.bitFieldWidth) > off.Lo {
    456 					off.Lo = uint64(f.bitFieldWidth)
    457 				}
    458 			default:
    459 				al := f.Type().Align()
    460 				if al > align {
    461 					align = al
    462 				}
    463 				f.offset = 0
    464 				off2 := uint128.From64(uint64(f.Type().Size())).Mul64(8)
    465 				if off2.Cmp(off) > 0 {
    466 					off = off2
    467 				}
    468 			}
    469 			f.promote = integerPromotion(a, f.Type())
    470 		}
    471 		t.align = byte(align)
    472 		t.fieldAlign = byte(align)
    473 		off = roundup128(off, 8*uint64(align))
    474 		t.size = uintptr(off.Rsh(3).Lo)
    475 		ctx.structs[StructInfo{Size: t.size, Align: t.Align()}] = struct{}{}
    476 		return t
    477 	}
    478 
    479 	var off uint128.Uint128 // In bits.
    480 	align := int(t.typeBase.align)
    481 	for i, f := range t.fields {
    482 		switch {
    483 		case f.isBitField:
    484 			al := f.Type().Align()
    485 
    486 			// http://jkz.wtf/bit-field-packing-in-gcc-and-clang
    487 
    488 			// 1. Jump backwards to nearest address that would support this type. For
    489 			// example if we have an int jump to the closest address where an int could be
    490 			// stored according to the platform alignment rules.
    491 			down := rounddown128(off, 8*uint64(al))
    492 
    493 			// 2. Get sizeof(current field) bytes from that address.
    494 			alloc := int64(f.Type().Size()) * 8
    495 			need := int64(f.bitFieldWidth)
    496 			if need == 0 && i != 0 {
    497 				off = roundup128(off, 8*uint64(al))
    498 				continue
    499 			}
    500 
    501 			if al > align {
    502 				align = al
    503 			}
    504 			used := int64(off.Sub(down).Lo)
    505 			switch {
    506 			case alloc-used >= need:
    507 				// 3. If the number of bits that we need to store can be stored in these bits,
    508 				// put the bits in the lowest possible bits of this block.
    509 				off = down.Add64(uint64(used))
    510 				f.offset = uintptr(down.Rsh(3).Lo)
    511 				f.bitFieldOffset = byte(used)
    512 				f.bitFieldMask = (1<<f.bitFieldWidth - 1) << used
    513 				off = off.Add64(uint64(f.bitFieldWidth))
    514 				f.promote = integerPromotion(a, f.Type())
    515 			default:
    516 				// 4. Otherwise, pad the rest of this block with zeros, and store the bits that
    517 				// make up this bit-field in the lowest bits of the next block.
    518 				off = roundup128(off, 8*uint64(al))
    519 				f.offset = uintptr(off.Rsh(3).Lo)
    520 				f.bitFieldOffset = 0
    521 				f.bitFieldMask = 1<<f.bitFieldWidth - 1
    522 				off = off.Add64(uint64(f.bitFieldWidth))
    523 				f.promote = integerPromotion(a, f.Type())
    524 			}
    525 		default:
    526 			al := f.Type().Align()
    527 			if al > align {
    528 				align = al
    529 			}
    530 			off = roundup128(off, 8*uint64(al))
    531 			f.offset = uintptr(off.Rsh(3).Lo)
    532 			sz := uint128.From64(uint64(f.Type().Size()))
    533 			off = off.Add(sz.Mul64(8))
    534 			f.promote = integerPromotion(a, f.Type())
    535 		}
    536 	}
    537 	var lf *field
    538 	for _, f := range t.fields {
    539 		if lf != nil && !lf.isBitField && !f.isBitField {
    540 			lf.pad = byte(f.offset - lf.offset - lf.Type().Size())
    541 		}
    542 		lf = f
    543 	}
    544 	t.align = byte(align)
    545 	t.fieldAlign = byte(align)
    546 	off0 := off
    547 	off = roundup128(off, 8*uint64(align))
    548 	if lf != nil && !lf.IsBitField() {
    549 		lf.pad = byte(off.Sub(off0).Rsh(3).Lo)
    550 	}
    551 	t.size = uintptr(off.Rsh(3).Lo)
    552 	ctx.structs[StructInfo{Size: t.size, Align: t.Align()}] = struct{}{}
    553 	return t
    554 }
    555 
    556 func (a *ABI) gccPackedLayout(ctx *context, n Node, t *structType) (r *structType) {
    557 	switch a.arch {
    558 	case "arm", "arm64":
    559 		return a.gccPackedLayoutARM(ctx, n, t)
    560 	}
    561 
    562 	if t.typeBase.flags&fAligned == 0 {
    563 		t.align = 1
    564 	}
    565 	t.fieldAlign = t.align
    566 	if t.Kind() == Union {
    567 		var off int64 // In bits.
    568 		for _, f := range t.fields {
    569 			switch {
    570 			case f.isBitField:
    571 				panic(todo("%v: ", n.Position()))
    572 			default:
    573 				f.offset = 0
    574 				if off2 := 8 * int64(f.Type().Size()); off2 > off {
    575 					off = off2
    576 				}
    577 				f.promote = integerPromotion(a, f.Type())
    578 			}
    579 		}
    580 		off = roundup(off, 8)
    581 		t.size = uintptr(off >> 3)
    582 		ctx.structs[StructInfo{Size: t.size, Align: t.Align()}] = struct{}{}
    583 		return t
    584 	}
    585 
    586 	var off int64 // In bits.
    587 	for i, f := range t.fields {
    588 		switch {
    589 		case f.isBitField:
    590 			if f.bitFieldWidth == 0 {
    591 				if i != 0 {
    592 					off = roundup(off, 8*int64(f.Type().Align()))
    593 				}
    594 				continue
    595 			}
    596 
    597 			if b := f.Type().base(); b.flags&fAligned != 0 {
    598 				off = roundup(off, 8*int64(a.Types[f.Type().Kind()].Align))
    599 			}
    600 			f.offset = uintptr(off >> 3)
    601 			f.bitFieldOffset = byte(off & 7)
    602 			f.bitFieldMask = (1<<f.bitFieldWidth - 1) << f.bitFieldOffset
    603 			off += int64(f.bitFieldWidth)
    604 			f.promote = integerPromotion(a, f.Type())
    605 		default:
    606 			al := f.Type().Align()
    607 			off = roundup(off, 8*int64(al))
    608 			f.offset = uintptr(off) >> 3
    609 			off += 8 * int64(f.Type().Size())
    610 			f.promote = integerPromotion(a, f.Type())
    611 		}
    612 	}
    613 	var lf *field
    614 	for _, f := range t.fields {
    615 		if lf != nil && !lf.isBitField && !f.isBitField {
    616 			lf.pad = byte(f.offset - lf.offset - lf.Type().Size())
    617 		}
    618 		lf = f
    619 	}
    620 	off0 := off
    621 	off = roundup(off, 8*int64(t.Align()))
    622 	if lf != nil && !lf.IsBitField() {
    623 		lf.pad = byte(off-off0) >> 3
    624 	}
    625 	t.size = uintptr(off >> 3)
    626 	ctx.structs[StructInfo{Size: t.size, Align: t.Align()}] = struct{}{}
    627 	return t
    628 }
    629 
    630 func (a *ABI) gccPackedLayoutARM(ctx *context, n Node, t *structType) (r *structType) {
    631 	align := 1
    632 	if t.typeBase.flags&fAligned == 0 {
    633 		t.align = 1
    634 	}
    635 	t.fieldAlign = t.align
    636 	if t.Kind() == Union {
    637 		var off int64 // In bits.
    638 		for _, f := range t.fields {
    639 			switch {
    640 			case f.isBitField:
    641 				panic(todo("%v: ", n.Position()))
    642 			default:
    643 				f.offset = 0
    644 				if off2 := 8 * int64(f.Type().Size()); off2 > off {
    645 					off = off2
    646 				}
    647 				f.promote = integerPromotion(a, f.Type())
    648 			}
    649 		}
    650 		off = roundup(off, 8)
    651 		t.size = uintptr(off >> 3)
    652 		ctx.structs[StructInfo{Size: t.size, Align: t.Align()}] = struct{}{}
    653 		return t
    654 	}
    655 
    656 	var off int64 // In bits.
    657 	for i, f := range t.fields {
    658 		switch {
    659 		case f.isBitField:
    660 			if f.bitFieldWidth == 0 {
    661 				al := f.Type().Align()
    662 				if al > align {
    663 					align = al
    664 				}
    665 				if i != 0 {
    666 					off = roundup(off, 8*int64(f.Type().Align()))
    667 				}
    668 				continue
    669 			}
    670 
    671 			if b := f.Type().base(); b.flags&fAligned != 0 {
    672 				off = roundup(off, 8*int64(a.Types[f.Type().Kind()].Align))
    673 			}
    674 			f.offset = uintptr(off >> 3)
    675 			f.bitFieldOffset = byte(off & 7)
    676 			f.bitFieldMask = (1<<f.bitFieldWidth - 1) << f.bitFieldOffset
    677 			off += int64(f.bitFieldWidth)
    678 			f.promote = integerPromotion(a, f.Type())
    679 		default:
    680 			al := f.Type().Align()
    681 			off = roundup(off, 8*int64(al))
    682 			f.offset = uintptr(off) >> 3
    683 			off += 8 * int64(f.Type().Size())
    684 			f.promote = integerPromotion(a, f.Type())
    685 		}
    686 	}
    687 	var lf *field
    688 	for _, f := range t.fields {
    689 		if lf != nil && !lf.isBitField && !f.isBitField {
    690 			lf.pad = byte(f.offset - lf.offset - lf.Type().Size())
    691 		}
    692 		lf = f
    693 	}
    694 	if b := t.base(); b.flags&fAligned == 0 {
    695 		t.align = byte(align)
    696 		t.fieldAlign = byte(align)
    697 	}
    698 	off0 := off
    699 	off = roundup(off, 8*int64(t.Align()))
    700 	if lf != nil && !lf.IsBitField() {
    701 		lf.pad = byte(off-off0) >> 3
    702 	}
    703 	t.size = uintptr(off >> 3)
    704 	ctx.structs[StructInfo{Size: t.size, Align: t.Align()}] = struct{}{}
    705 	return t
    706 }
    707 
    708 // https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html#x86-Options
    709 //
    710 //	-mno-ms-bitfields
    711 //
    712 // Enable/disable bit-field layout compatible with the native Microsoft Windows
    713 // compiler.
    714 //
    715 // If packed is used on a structure, or if bit-fields are used, it may be that
    716 // the Microsoft ABI lays out the structure differently than the way GCC
    717 // normally does. Particularly when moving packed data between functions
    718 // compiled with GCC and the native Microsoft compiler (either via function
    719 // call or as data in a file), it may be necessary to access either format.
    720 //
    721 // This option is enabled by default for Microsoft Windows targets. This
    722 // behavior can also be controlled locally by use of variable or type
    723 // attributes. For more information, see x86 Variable Attributes and x86 Type
    724 // Attributes.
    725 //
    726 // The Microsoft structure layout algorithm is fairly simple with the exception
    727 // of the bit-field packing. The padding and alignment of members of structures
    728 // and whether a bit-field can straddle a storage-unit boundary are determine
    729 // by these rules:
    730 //
    731 // Structure members are stored sequentially in the order in which they are
    732 // declared: the first member has the lowest memory address and the last member
    733 // the highest.  Every data object has an alignment requirement. The alignment
    734 // requirement for all data except structures, unions, and arrays is either the
    735 // size of the object or the current packing size (specified with either the
    736 // aligned attribute or the pack pragma), whichever is less. For structures,
    737 // unions, and arrays, the alignment requirement is the largest alignment
    738 // requirement of its members. Every object is allocated an offset so that:
    739 // offset % alignment_requirement == 0 Adjacent bit-fields are packed into the
    740 // same 1-, 2-, or 4-byte allocation unit if the integral types are the same
    741 // size and if the next bit-field fits into the current allocation unit without
    742 // crossing the boundary imposed by the common alignment requirements of the
    743 // bit-fields.  MSVC interprets zero-length bit-fields in the following ways:
    744 //
    745 // If a zero-length bit-field is inserted between two bit-fields that are
    746 // normally coalesced, the bit-fields are not coalesced.  For example:
    747 //
    748 // 	struct
    749 // 	 {
    750 // 	   unsigned long bf_1 : 12;
    751 // 	   unsigned long : 0;
    752 // 	   unsigned long bf_2 : 12;
    753 // 	 } t1;
    754 //
    755 // The size of t1 is 8 bytes with the zero-length bit-field. If the zero-length
    756 // bit-field were removed, t1’s size would be 4 bytes.
    757 //
    758 // If a zero-length bit-field is inserted after a bit-field, foo, and the
    759 // alignment of the zero-length bit-field is greater than the member that
    760 // follows it, bar, bar is aligned as the type of the zero-length bit-field.
    761 // For example:
    762 //
    763 // 	struct
    764 // 	 {
    765 // 	   char foo : 4;
    766 // 	   short : 0;
    767 // 	   char bar;
    768 // 	 } t2;
    769 //
    770 // 	struct
    771 // 	 {
    772 // 	   char foo : 4;
    773 // 	   short : 0;
    774 // 	   double bar;
    775 // 	 } t3;
    776 //
    777 // For t2, bar is placed at offset 2, rather than offset 1. Accordingly, the
    778 // size of t2 is 4. For t3, the zero-length bit-field does not affect the
    779 // alignment of bar or, as a result, the size of the structure.
    780 //
    781 // Taking this into account, it is important to note the following:
    782 //
    783 // If a zero-length bit-field follows a normal bit-field, the type of the
    784 // zero-length bit-field may affect the alignment of the structure as whole.
    785 // For example, t2 has a size of 4 bytes, since the zero-length bit-field
    786 // follows a normal bit-field, and is of type short.  Even if a zero-length
    787 // bit-field is not followed by a normal bit-field, it may still affect the
    788 // alignment of the structure:
    789 //
    790 // 	struct
    791 // 	 {
    792 // 	   char foo : 6;
    793 // 	   long : 0;
    794 // 	 } t4;
    795 //
    796 // Here, t4 takes up 4 bytes.
    797 //
    798 // Zero-length bit-fields following non-bit-field members are ignored:
    799 //
    800 // 	struct
    801 // 	 {
    802 // 	   char foo;
    803 // 	   long : 0;
    804 // 	   char bar;
    805 // 	 } t5;
    806 //
    807 // Here, t5 takes up 2 bytes.
    808 
    809 func (a *ABI) msLayout(ctx *context, n Node, t *structType) (r *structType) {
    810 	if t.IsPacked() {
    811 		return a.msPackedLayout(ctx, n, t)
    812 	}
    813 
    814 	if t.Kind() == Union {
    815 		panic(todo(""))
    816 	}
    817 
    818 	var off int64 // In bits.
    819 	align := int(t.typeBase.align)
    820 	var prev *field
    821 	for i, f := range t.fields {
    822 		switch {
    823 		case f.isBitField:
    824 			al := f.Type().Align()
    825 			if prev != nil {
    826 				switch {
    827 				case prev.isBitField && prev.Type().Size() != f.Type().Size():
    828 					off = roundup(off, 8*int64(prev.Type().Align()))
    829 					off = roundup(off, 8*int64(al))
    830 				case !prev.isBitField:
    831 					off = roundup(off, 8*int64(al))
    832 				default:
    833 					// Adjacent bit-fields are packed into the same 1-, 2-, or 4-byte allocation
    834 					// unit if the integral types are the same size and if the next bit-field fits
    835 					// into the current allocation unit without crossing the boundary imposed by
    836 					// the common alignment requirements of the bit-fields.
    837 				}
    838 			}
    839 
    840 			// http://jkz.wtf/bit-field-packing-in-gcc-and-clang
    841 
    842 			// 1. Jump backwards to nearest address that would support this type. For
    843 			// example if we have an int jump to the closest address where an int could be
    844 			// stored according to the platform alignment rules.
    845 			down := rounddown(off, 8*int64(al))
    846 
    847 			// 2. Get sizeof(current field) bytes from that address.
    848 			alloc := int64(f.Type().Size()) * 8
    849 			need := int64(f.bitFieldWidth)
    850 			if need == 0 && i != 0 {
    851 				off = roundup(off, 8*int64(al))
    852 				continue
    853 			}
    854 
    855 			if al > align {
    856 				align = al
    857 			}
    858 			used := off - down
    859 			switch {
    860 			case alloc-used >= need:
    861 				// 3. If the number of bits that we need to store can be stored in these bits,
    862 				// put the bits in the lowest possible bits of this block.
    863 				off = down + used
    864 				f.offset = uintptr(down >> 3)
    865 				f.bitFieldOffset = byte(used)
    866 				f.bitFieldMask = (1<<f.bitFieldWidth - 1) << used
    867 				off += int64(f.bitFieldWidth)
    868 				f.promote = integerPromotion(a, f.Type())
    869 			default:
    870 				// 4. Otherwise, pad the rest of this block with zeros, and store the bits that
    871 				// make up this bit-field in the lowest bits of the next block.
    872 				off = roundup(off, 8*int64(al))
    873 				f.offset = uintptr(off >> 3)
    874 				f.bitFieldOffset = 0
    875 				f.bitFieldMask = 1<<f.bitFieldWidth - 1
    876 				off += int64(f.bitFieldWidth)
    877 				f.promote = integerPromotion(a, f.Type())
    878 			}
    879 		default:
    880 			if prev != nil && prev.isBitField {
    881 				off = roundup(off, 8*int64(prev.Type().Align()))
    882 			}
    883 			al := f.Type().Align()
    884 			if al > align {
    885 				align = al
    886 			}
    887 			off = roundup(off, 8*int64(al))
    888 			f.offset = uintptr(off) >> 3
    889 			off += 8 * int64(f.Type().Size())
    890 			f.promote = integerPromotion(a, f.Type())
    891 		}
    892 		prev = f
    893 	}
    894 	var lf *field
    895 	for _, f := range t.fields {
    896 		if lf != nil && !lf.isBitField && !f.isBitField {
    897 			lf.pad = byte(f.offset - lf.offset - lf.Type().Size())
    898 		}
    899 		lf = f
    900 	}
    901 	t.align = byte(align)
    902 	t.fieldAlign = byte(align)
    903 	off0 := off
    904 	off = roundup(off, 8*int64(align))
    905 	if lf != nil && !lf.IsBitField() {
    906 		lf.pad = byte(off-off0) >> 3
    907 	}
    908 	t.size = uintptr(off >> 3)
    909 	ctx.structs[StructInfo{Size: t.size, Align: t.Align()}] = struct{}{}
    910 	return t
    911 }
    912 
    913 func (a *ABI) msPackedLayout(ctx *context, n Node, t *structType) (r *structType) {
    914 	if t.typeBase.flags&fAligned == 0 {
    915 		t.align = 1
    916 	}
    917 	t.fieldAlign = t.align
    918 	if t.Kind() == Union {
    919 		panic(todo(""))
    920 		var off int64 // In bits.
    921 		for _, f := range t.fields {
    922 			switch {
    923 			case f.isBitField:
    924 				panic(todo("%v: ", n.Position()))
    925 			default:
    926 				f.offset = 0
    927 				if off2 := 8 * int64(f.Type().Size()); off2 > off {
    928 					off = off2
    929 				}
    930 				f.promote = integerPromotion(a, f.Type())
    931 			}
    932 		}
    933 		off = roundup(off, 8)
    934 		t.size = uintptr(off >> 3)
    935 		ctx.structs[StructInfo{Size: t.size, Align: t.Align()}] = struct{}{}
    936 		return t
    937 	}
    938 
    939 	var off int64 // In bits.
    940 	var prev *field
    941 	align := int(t.typeBase.align)
    942 	for i, f := range t.fields {
    943 	out:
    944 		switch {
    945 		case f.isBitField:
    946 			al := f.Type().Align()
    947 			switch {
    948 			case prev != nil && prev.IsBitField() && prev.Type().Size() != f.Type().Size():
    949 				off = mathutil.MaxInt64(off, int64(prev.Offset()*8)+int64(prev.BitFieldOffset()+8*prev.Type().Align()))
    950 				off = roundup(off, 8*int64(align))
    951 				f.offset = uintptr(off >> 3)
    952 				f.bitFieldOffset = 0
    953 				f.bitFieldMask = 1<<f.bitFieldWidth - 1
    954 				off += int64(f.bitFieldWidth)
    955 				f.promote = integerPromotion(a, f.Type())
    956 				break out
    957 			}
    958 
    959 			// http://jkz.wtf/bit-field-packing-in-gcc-and-clang
    960 
    961 			// 1. Jump backwards to nearest address that would support this type. For
    962 			// example if we have an int jump to the closest address where an int could be
    963 			// stored according to the platform alignment rules.
    964 			down := rounddown(off, 8*int64(al))
    965 
    966 			// 2. Get sizeof(current field) bytes from that address.
    967 			alloc := int64(f.Type().Size()) * 8
    968 			need := int64(f.bitFieldWidth)
    969 			if need == 0 && i != 0 {
    970 				off = roundup(off, 8*int64(al))
    971 				continue
    972 			}
    973 
    974 			used := off - down
    975 			switch {
    976 			case alloc-used >= need:
    977 				// 3. If the number of bits that we need to store can be stored in these bits,
    978 				// put the bits in the lowest possible bits of this block.
    979 				off = down + used
    980 				f.offset = uintptr(down >> 3)
    981 				f.bitFieldOffset = byte(used)
    982 				f.bitFieldMask = (1<<f.bitFieldWidth - 1) << used
    983 				off += int64(f.bitFieldWidth)
    984 				f.promote = integerPromotion(a, f.Type())
    985 			default:
    986 				// 4. Otherwise, pad the rest of this block with zeros, and store the bits that
    987 				// make up this bit-field in the lowest bits of the next block.
    988 				off = roundup(off, 8*int64(al))
    989 				f.offset = uintptr(off >> 3)
    990 				f.bitFieldOffset = 0
    991 				f.bitFieldMask = 1<<f.bitFieldWidth - 1
    992 				off += int64(f.bitFieldWidth)
    993 				f.promote = integerPromotion(a, f.Type())
    994 			}
    995 		default:
    996 			off = roundup(off, 8)
    997 			f.offset = uintptr(off) >> 3
    998 			off += 8 * int64(f.Type().Size())
    999 			f.promote = integerPromotion(a, f.Type())
   1000 		}
   1001 		prev = f
   1002 	}
   1003 	var lf *field
   1004 	for _, f := range t.fields {
   1005 		if lf != nil && !lf.isBitField && !f.isBitField {
   1006 			lf.pad = byte(f.offset - lf.offset - lf.Type().Size())
   1007 		}
   1008 		lf = f
   1009 	}
   1010 	t.align = byte(align)
   1011 	t.fieldAlign = byte(align)
   1012 	switch {
   1013 	case lf != nil && lf.IsBitField():
   1014 		off = mathutil.MaxInt64(off, int64(lf.Offset()*8)+int64(lf.BitFieldOffset()+8*lf.Type().Align()))
   1015 		off = roundup(off, 8*int64(align))
   1016 	default:
   1017 		off0 := off
   1018 		off = roundup(off, 8*int64(align))
   1019 		if lf != nil && !lf.IsBitField() {
   1020 			lf.pad = byte(off-off0) >> 3
   1021 		}
   1022 	}
   1023 	t.size = uintptr(off >> 3)
   1024 	ctx.structs[StructInfo{Size: t.size, Align: t.Align()}] = struct{}{}
   1025 	return t
   1026 }