obj6.go (34240B)
1 // Inferno utils/6l/pass.c 2 // https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6l/pass.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package x86 32 33 import ( 34 "github.com/twitchyliquid64/golang-asm/obj" 35 "github.com/twitchyliquid64/golang-asm/objabi" 36 "github.com/twitchyliquid64/golang-asm/src" 37 "github.com/twitchyliquid64/golang-asm/sys" 38 "math" 39 "strings" 40 ) 41 42 func CanUse1InsnTLS(ctxt *obj.Link) bool { 43 if isAndroid { 44 // Android uses a global variable for the tls offset. 45 return false 46 } 47 48 if ctxt.Arch.Family == sys.I386 { 49 switch ctxt.Headtype { 50 case objabi.Hlinux, 51 objabi.Hplan9, 52 objabi.Hwindows: 53 return false 54 } 55 56 return true 57 } 58 59 switch ctxt.Headtype { 60 case objabi.Hplan9, objabi.Hwindows: 61 return false 62 case objabi.Hlinux, objabi.Hfreebsd: 63 return !ctxt.Flag_shared 64 } 65 66 return true 67 } 68 69 func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { 70 // Thread-local storage references use the TLS pseudo-register. 71 // As a register, TLS refers to the thread-local storage base, and it 72 // can only be loaded into another register: 73 // 74 // MOVQ TLS, AX 75 // 76 // An offset from the thread-local storage base is written off(reg)(TLS*1). 77 // Semantically it is off(reg), but the (TLS*1) annotation marks this as 78 // indexing from the loaded TLS base. This emits a relocation so that 79 // if the linker needs to adjust the offset, it can. For example: 80 // 81 // MOVQ TLS, AX 82 // MOVQ 0(AX)(TLS*1), CX // load g into CX 83 // 84 // On systems that support direct access to the TLS memory, this 85 // pair of instructions can be reduced to a direct TLS memory reference: 86 // 87 // MOVQ 0(TLS), CX // load g into CX 88 // 89 // The 2-instruction and 1-instruction forms correspond to the two code 90 // sequences for loading a TLS variable in the local exec model given in "ELF 91 // Handling For Thread-Local Storage". 92 // 93 // We apply this rewrite on systems that support the 1-instruction form. 94 // The decision is made using only the operating system and the -shared flag, 95 // not the link mode. If some link modes on a particular operating system 96 // require the 2-instruction form, then all builds for that operating system 97 // will use the 2-instruction form, so that the link mode decision can be 98 // delayed to link time. 99 // 100 // In this way, all supported systems use identical instructions to 101 // access TLS, and they are rewritten appropriately first here in 102 // liblink and then finally using relocations in the linker. 103 // 104 // When -shared is passed, we leave the code in the 2-instruction form but 105 // assemble (and relocate) them in different ways to generate the initial 106 // exec code sequence. It's a bit of a fluke that this is possible without 107 // rewriting the instructions more comprehensively, and it only does because 108 // we only support a single TLS variable (g). 109 110 if CanUse1InsnTLS(ctxt) { 111 // Reduce 2-instruction sequence to 1-instruction sequence. 112 // Sequences like 113 // MOVQ TLS, BX 114 // ... off(BX)(TLS*1) ... 115 // become 116 // NOP 117 // ... off(TLS) ... 118 // 119 // TODO(rsc): Remove the Hsolaris special case. It exists only to 120 // guarantee we are producing byte-identical binaries as before this code. 121 // But it should be unnecessary. 122 if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_REG && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 && ctxt.Headtype != objabi.Hsolaris { 123 obj.Nopout(p) 124 } 125 if p.From.Type == obj.TYPE_MEM && p.From.Index == REG_TLS && REG_AX <= p.From.Reg && p.From.Reg <= REG_R15 { 126 p.From.Reg = REG_TLS 127 p.From.Scale = 0 128 p.From.Index = REG_NONE 129 } 130 131 if p.To.Type == obj.TYPE_MEM && p.To.Index == REG_TLS && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 { 132 p.To.Reg = REG_TLS 133 p.To.Scale = 0 134 p.To.Index = REG_NONE 135 } 136 } else { 137 // load_g_cx, below, always inserts the 1-instruction sequence. Rewrite it 138 // as the 2-instruction sequence if necessary. 139 // MOVQ 0(TLS), BX 140 // becomes 141 // MOVQ TLS, BX 142 // MOVQ 0(BX)(TLS*1), BX 143 if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 { 144 q := obj.Appendp(p, newprog) 145 q.As = p.As 146 q.From = p.From 147 q.From.Type = obj.TYPE_MEM 148 q.From.Reg = p.To.Reg 149 q.From.Index = REG_TLS 150 q.From.Scale = 2 // TODO: use 1 151 q.To = p.To 152 p.From.Type = obj.TYPE_REG 153 p.From.Reg = REG_TLS 154 p.From.Index = REG_NONE 155 p.From.Offset = 0 156 } 157 } 158 159 // Android uses a tls offset determined at runtime. Rewrite 160 // MOVQ TLS, BX 161 // to 162 // MOVQ runtime.tls_g(SB), BX 163 if isAndroid && (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_REG && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 { 164 p.From.Type = obj.TYPE_MEM 165 p.From.Name = obj.NAME_EXTERN 166 p.From.Reg = REG_NONE 167 p.From.Sym = ctxt.Lookup("runtime.tls_g") 168 p.From.Index = REG_NONE 169 } 170 171 // TODO: Remove. 172 if ctxt.Headtype == objabi.Hwindows && ctxt.Arch.Family == sys.AMD64 || ctxt.Headtype == objabi.Hplan9 { 173 if p.From.Scale == 1 && p.From.Index == REG_TLS { 174 p.From.Scale = 2 175 } 176 if p.To.Scale == 1 && p.To.Index == REG_TLS { 177 p.To.Scale = 2 178 } 179 } 180 181 // Rewrite 0 to $0 in 3rd argument to CMPPS etc. 182 // That's what the tables expect. 183 switch p.As { 184 case ACMPPD, ACMPPS, ACMPSD, ACMPSS: 185 if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE && p.To.Reg == REG_NONE && p.To.Index == REG_NONE && p.To.Sym == nil { 186 p.To.Type = obj.TYPE_CONST 187 } 188 } 189 190 // Rewrite CALL/JMP/RET to symbol as TYPE_BRANCH. 191 switch p.As { 192 case obj.ACALL, obj.AJMP, obj.ARET: 193 if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil { 194 p.To.Type = obj.TYPE_BRANCH 195 } 196 } 197 198 // Rewrite MOVL/MOVQ $XXX(FP/SP) as LEAL/LEAQ. 199 if p.From.Type == obj.TYPE_ADDR && (ctxt.Arch.Family == sys.AMD64 || p.From.Name != obj.NAME_EXTERN && p.From.Name != obj.NAME_STATIC) { 200 switch p.As { 201 case AMOVL: 202 p.As = ALEAL 203 p.From.Type = obj.TYPE_MEM 204 case AMOVQ: 205 p.As = ALEAQ 206 p.From.Type = obj.TYPE_MEM 207 } 208 } 209 210 // Rewrite float constants to values stored in memory. 211 switch p.As { 212 // Convert AMOVSS $(0), Xx to AXORPS Xx, Xx 213 case AMOVSS: 214 if p.From.Type == obj.TYPE_FCONST { 215 // f == 0 can't be used here due to -0, so use Float64bits 216 if f := p.From.Val.(float64); math.Float64bits(f) == 0 { 217 if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 { 218 p.As = AXORPS 219 p.From = p.To 220 break 221 } 222 } 223 } 224 fallthrough 225 226 case AFMOVF, 227 AFADDF, 228 AFSUBF, 229 AFSUBRF, 230 AFMULF, 231 AFDIVF, 232 AFDIVRF, 233 AFCOMF, 234 AFCOMFP, 235 AADDSS, 236 ASUBSS, 237 AMULSS, 238 ADIVSS, 239 ACOMISS, 240 AUCOMISS: 241 if p.From.Type == obj.TYPE_FCONST { 242 f32 := float32(p.From.Val.(float64)) 243 p.From.Type = obj.TYPE_MEM 244 p.From.Name = obj.NAME_EXTERN 245 p.From.Sym = ctxt.Float32Sym(f32) 246 p.From.Offset = 0 247 } 248 249 case AMOVSD: 250 // Convert AMOVSD $(0), Xx to AXORPS Xx, Xx 251 if p.From.Type == obj.TYPE_FCONST { 252 // f == 0 can't be used here due to -0, so use Float64bits 253 if f := p.From.Val.(float64); math.Float64bits(f) == 0 { 254 if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 { 255 p.As = AXORPS 256 p.From = p.To 257 break 258 } 259 } 260 } 261 fallthrough 262 263 case AFMOVD, 264 AFADDD, 265 AFSUBD, 266 AFSUBRD, 267 AFMULD, 268 AFDIVD, 269 AFDIVRD, 270 AFCOMD, 271 AFCOMDP, 272 AADDSD, 273 ASUBSD, 274 AMULSD, 275 ADIVSD, 276 ACOMISD, 277 AUCOMISD: 278 if p.From.Type == obj.TYPE_FCONST { 279 f64 := p.From.Val.(float64) 280 p.From.Type = obj.TYPE_MEM 281 p.From.Name = obj.NAME_EXTERN 282 p.From.Sym = ctxt.Float64Sym(f64) 283 p.From.Offset = 0 284 } 285 } 286 287 if ctxt.Flag_dynlink { 288 rewriteToUseGot(ctxt, p, newprog) 289 } 290 291 if ctxt.Flag_shared && ctxt.Arch.Family == sys.I386 { 292 rewriteToPcrel(ctxt, p, newprog) 293 } 294 } 295 296 // Rewrite p, if necessary, to access global data via the global offset table. 297 func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { 298 var lea, mov obj.As 299 var reg int16 300 if ctxt.Arch.Family == sys.AMD64 { 301 lea = ALEAQ 302 mov = AMOVQ 303 reg = REG_R15 304 } else { 305 lea = ALEAL 306 mov = AMOVL 307 reg = REG_CX 308 if p.As == ALEAL && p.To.Reg != p.From.Reg && p.To.Reg != p.From.Index { 309 // Special case: clobber the destination register with 310 // the PC so we don't have to clobber CX. 311 // The SSA backend depends on CX not being clobbered across LEAL. 312 // See cmd/compile/internal/ssa/gen/386.rules (search for Flag_shared). 313 reg = p.To.Reg 314 } 315 } 316 317 if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO { 318 // ADUFFxxx $offset 319 // becomes 320 // $MOV runtime.duffxxx@GOT, $reg 321 // $LEA $offset($reg), $reg 322 // CALL $reg 323 // (we use LEAx rather than ADDx because ADDx clobbers 324 // flags and duffzero on 386 does not otherwise do so). 325 var sym *obj.LSym 326 if p.As == obj.ADUFFZERO { 327 sym = ctxt.Lookup("runtime.duffzero") 328 } else { 329 sym = ctxt.Lookup("runtime.duffcopy") 330 } 331 offset := p.To.Offset 332 p.As = mov 333 p.From.Type = obj.TYPE_MEM 334 p.From.Name = obj.NAME_GOTREF 335 p.From.Sym = sym 336 p.To.Type = obj.TYPE_REG 337 p.To.Reg = reg 338 p.To.Offset = 0 339 p.To.Sym = nil 340 p1 := obj.Appendp(p, newprog) 341 p1.As = lea 342 p1.From.Type = obj.TYPE_MEM 343 p1.From.Offset = offset 344 p1.From.Reg = reg 345 p1.To.Type = obj.TYPE_REG 346 p1.To.Reg = reg 347 p2 := obj.Appendp(p1, newprog) 348 p2.As = obj.ACALL 349 p2.To.Type = obj.TYPE_REG 350 p2.To.Reg = reg 351 } 352 353 // We only care about global data: NAME_EXTERN means a global 354 // symbol in the Go sense, and p.Sym.Local is true for a few 355 // internally defined symbols. 356 if p.As == lea && p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { 357 // $LEA sym, Rx becomes $MOV $sym, Rx which will be rewritten below 358 p.As = mov 359 p.From.Type = obj.TYPE_ADDR 360 } 361 if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { 362 // $MOV $sym, Rx becomes $MOV sym@GOT, Rx 363 // $MOV $sym+<off>, Rx becomes $MOV sym@GOT, Rx; $LEA <off>(Rx), Rx 364 // On 386 only, more complicated things like PUSHL $sym become $MOV sym@GOT, CX; PUSHL CX 365 cmplxdest := false 366 pAs := p.As 367 var dest obj.Addr 368 if p.To.Type != obj.TYPE_REG || pAs != mov { 369 if ctxt.Arch.Family == sys.AMD64 { 370 ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p) 371 } 372 cmplxdest = true 373 dest = p.To 374 p.As = mov 375 p.To.Type = obj.TYPE_REG 376 p.To.Reg = reg 377 p.To.Sym = nil 378 p.To.Name = obj.NAME_NONE 379 } 380 p.From.Type = obj.TYPE_MEM 381 p.From.Name = obj.NAME_GOTREF 382 q := p 383 if p.From.Offset != 0 { 384 q = obj.Appendp(p, newprog) 385 q.As = lea 386 q.From.Type = obj.TYPE_MEM 387 q.From.Reg = p.To.Reg 388 q.From.Offset = p.From.Offset 389 q.To = p.To 390 p.From.Offset = 0 391 } 392 if cmplxdest { 393 q = obj.Appendp(q, newprog) 394 q.As = pAs 395 q.To = dest 396 q.From.Type = obj.TYPE_REG 397 q.From.Reg = reg 398 } 399 } 400 if p.GetFrom3() != nil && p.GetFrom3().Name == obj.NAME_EXTERN { 401 ctxt.Diag("don't know how to handle %v with -dynlink", p) 402 } 403 var source *obj.Addr 404 // MOVx sym, Ry becomes $MOV sym@GOT, R15; MOVx (R15), Ry 405 // MOVx Ry, sym becomes $MOV sym@GOT, R15; MOVx Ry, (R15) 406 // An addition may be inserted between the two MOVs if there is an offset. 407 if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { 408 if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { 409 ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p) 410 } 411 source = &p.From 412 } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { 413 source = &p.To 414 } else { 415 return 416 } 417 if p.As == obj.ACALL { 418 // When dynlinking on 386, almost any call might end up being a call 419 // to a PLT, so make sure the GOT pointer is loaded into BX. 420 // RegTo2 is set on the replacement call insn to stop it being 421 // processed when it is in turn passed to progedit. 422 // 423 // We disable open-coded defers in buildssa() on 386 ONLY with shared 424 // libraries because of this extra code added before deferreturn calls. 425 if ctxt.Arch.Family == sys.AMD64 || (p.To.Sym != nil && p.To.Sym.Local()) || p.RegTo2 != 0 { 426 return 427 } 428 p1 := obj.Appendp(p, newprog) 429 p2 := obj.Appendp(p1, newprog) 430 431 p1.As = ALEAL 432 p1.From.Type = obj.TYPE_MEM 433 p1.From.Name = obj.NAME_STATIC 434 p1.From.Sym = ctxt.Lookup("_GLOBAL_OFFSET_TABLE_") 435 p1.To.Type = obj.TYPE_REG 436 p1.To.Reg = REG_BX 437 438 p2.As = p.As 439 p2.Scond = p.Scond 440 p2.From = p.From 441 if p.RestArgs != nil { 442 p2.RestArgs = append(p2.RestArgs, p.RestArgs...) 443 } 444 p2.Reg = p.Reg 445 p2.To = p.To 446 // p.To.Type was set to TYPE_BRANCH above, but that makes checkaddr 447 // in ../pass.go complain, so set it back to TYPE_MEM here, until p2 448 // itself gets passed to progedit. 449 p2.To.Type = obj.TYPE_MEM 450 p2.RegTo2 = 1 451 452 obj.Nopout(p) 453 return 454 455 } 456 if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ARET || p.As == obj.AJMP { 457 return 458 } 459 if source.Type != obj.TYPE_MEM { 460 ctxt.Diag("don't know how to handle %v with -dynlink", p) 461 } 462 p1 := obj.Appendp(p, newprog) 463 p2 := obj.Appendp(p1, newprog) 464 465 p1.As = mov 466 p1.From.Type = obj.TYPE_MEM 467 p1.From.Sym = source.Sym 468 p1.From.Name = obj.NAME_GOTREF 469 p1.To.Type = obj.TYPE_REG 470 p1.To.Reg = reg 471 472 p2.As = p.As 473 p2.From = p.From 474 p2.To = p.To 475 if p.From.Name == obj.NAME_EXTERN { 476 p2.From.Reg = reg 477 p2.From.Name = obj.NAME_NONE 478 p2.From.Sym = nil 479 } else if p.To.Name == obj.NAME_EXTERN { 480 p2.To.Reg = reg 481 p2.To.Name = obj.NAME_NONE 482 p2.To.Sym = nil 483 } else { 484 return 485 } 486 obj.Nopout(p) 487 } 488 489 func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { 490 // RegTo2 is set on the instructions we insert here so they don't get 491 // processed twice. 492 if p.RegTo2 != 0 { 493 return 494 } 495 if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { 496 return 497 } 498 // Any Prog (aside from the above special cases) with an Addr with Name == 499 // NAME_EXTERN, NAME_STATIC or NAME_GOTREF has a CALL __x86.get_pc_thunk.XX 500 // inserted before it. 501 isName := func(a *obj.Addr) bool { 502 if a.Sym == nil || (a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR) || a.Reg != 0 { 503 return false 504 } 505 if a.Sym.Type == objabi.STLSBSS { 506 return false 507 } 508 return a.Name == obj.NAME_EXTERN || a.Name == obj.NAME_STATIC || a.Name == obj.NAME_GOTREF 509 } 510 511 if isName(&p.From) && p.From.Type == obj.TYPE_ADDR { 512 // Handle things like "MOVL $sym, (SP)" or "PUSHL $sym" by rewriting 513 // to "MOVL $sym, CX; MOVL CX, (SP)" or "MOVL $sym, CX; PUSHL CX" 514 // respectively. 515 if p.To.Type != obj.TYPE_REG { 516 q := obj.Appendp(p, newprog) 517 q.As = p.As 518 q.From.Type = obj.TYPE_REG 519 q.From.Reg = REG_CX 520 q.To = p.To 521 p.As = AMOVL 522 p.To.Type = obj.TYPE_REG 523 p.To.Reg = REG_CX 524 p.To.Sym = nil 525 p.To.Name = obj.NAME_NONE 526 } 527 } 528 529 if !isName(&p.From) && !isName(&p.To) && (p.GetFrom3() == nil || !isName(p.GetFrom3())) { 530 return 531 } 532 var dst int16 = REG_CX 533 if (p.As == ALEAL || p.As == AMOVL) && p.To.Reg != p.From.Reg && p.To.Reg != p.From.Index { 534 dst = p.To.Reg 535 // Why? See the comment near the top of rewriteToUseGot above. 536 // AMOVLs might be introduced by the GOT rewrites. 537 } 538 q := obj.Appendp(p, newprog) 539 q.RegTo2 = 1 540 r := obj.Appendp(q, newprog) 541 r.RegTo2 = 1 542 q.As = obj.ACALL 543 thunkname := "__x86.get_pc_thunk." + strings.ToLower(rconv(int(dst))) 544 q.To.Sym = ctxt.LookupInit(thunkname, func(s *obj.LSym) { s.Set(obj.AttrLocal, true) }) 545 q.To.Type = obj.TYPE_MEM 546 q.To.Name = obj.NAME_EXTERN 547 r.As = p.As 548 r.Scond = p.Scond 549 r.From = p.From 550 r.RestArgs = p.RestArgs 551 r.Reg = p.Reg 552 r.To = p.To 553 if isName(&p.From) { 554 r.From.Reg = dst 555 } 556 if isName(&p.To) { 557 r.To.Reg = dst 558 } 559 if p.GetFrom3() != nil && isName(p.GetFrom3()) { 560 r.GetFrom3().Reg = dst 561 } 562 obj.Nopout(p) 563 } 564 565 func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { 566 if cursym.Func.Text == nil || cursym.Func.Text.Link == nil { 567 return 568 } 569 570 p := cursym.Func.Text 571 autoffset := int32(p.To.Offset) 572 if autoffset < 0 { 573 autoffset = 0 574 } 575 576 hasCall := false 577 for q := p; q != nil; q = q.Link { 578 if q.As == obj.ACALL || q.As == obj.ADUFFCOPY || q.As == obj.ADUFFZERO { 579 hasCall = true 580 break 581 } 582 } 583 584 var bpsize int 585 if ctxt.Arch.Family == sys.AMD64 && 586 !p.From.Sym.NoFrame() && // (1) below 587 !(autoffset == 0 && p.From.Sym.NoSplit()) && // (2) below 588 !(autoffset == 0 && !hasCall) { // (3) below 589 // Make room to save a base pointer. 590 // There are 2 cases we must avoid: 591 // 1) If noframe is set (which we do for functions which tail call). 592 // 2) Scary runtime internals which would be all messed up by frame pointers. 593 // We detect these using a heuristic: frameless nosplit functions. 594 // TODO: Maybe someday we label them all with NOFRAME and get rid of this heuristic. 595 // For performance, we also want to avoid: 596 // 3) Frameless leaf functions 597 bpsize = ctxt.Arch.PtrSize 598 autoffset += int32(bpsize) 599 p.To.Offset += int64(bpsize) 600 } else { 601 bpsize = 0 602 } 603 604 textarg := int64(p.To.Val.(int32)) 605 cursym.Func.Args = int32(textarg) 606 cursym.Func.Locals = int32(p.To.Offset) 607 608 // TODO(rsc): Remove. 609 if ctxt.Arch.Family == sys.I386 && cursym.Func.Locals < 0 { 610 cursym.Func.Locals = 0 611 } 612 613 // TODO(rsc): Remove 'ctxt.Arch.Family == sys.AMD64 &&'. 614 if ctxt.Arch.Family == sys.AMD64 && autoffset < objabi.StackSmall && !p.From.Sym.NoSplit() { 615 leaf := true 616 LeafSearch: 617 for q := p; q != nil; q = q.Link { 618 switch q.As { 619 case obj.ACALL: 620 // Treat common runtime calls that take no arguments 621 // the same as duffcopy and duffzero. 622 if !isZeroArgRuntimeCall(q.To.Sym) { 623 leaf = false 624 break LeafSearch 625 } 626 fallthrough 627 case obj.ADUFFCOPY, obj.ADUFFZERO: 628 if autoffset >= objabi.StackSmall-8 { 629 leaf = false 630 break LeafSearch 631 } 632 } 633 } 634 635 if leaf { 636 p.From.Sym.Set(obj.AttrNoSplit, true) 637 } 638 } 639 640 if !p.From.Sym.NoSplit() || p.From.Sym.Wrapper() { 641 p = obj.Appendp(p, newprog) 642 p = load_g_cx(ctxt, p, newprog) // load g into CX 643 } 644 645 if !cursym.Func.Text.From.Sym.NoSplit() { 646 p = stacksplit(ctxt, cursym, p, newprog, autoffset, int32(textarg)) // emit split check 647 } 648 649 // Delve debugger would like the next instruction to be noted as the end of the function prologue. 650 // TODO: are there other cases (e.g., wrapper functions) that need marking? 651 markedPrologue := false 652 653 if autoffset != 0 { 654 if autoffset%int32(ctxt.Arch.RegSize) != 0 { 655 ctxt.Diag("unaligned stack size %d", autoffset) 656 } 657 p = obj.Appendp(p, newprog) 658 p.As = AADJSP 659 p.From.Type = obj.TYPE_CONST 660 p.From.Offset = int64(autoffset) 661 p.Spadj = autoffset 662 p.Pos = p.Pos.WithXlogue(src.PosPrologueEnd) 663 markedPrologue = true 664 } 665 666 if bpsize > 0 { 667 // Save caller's BP 668 p = obj.Appendp(p, newprog) 669 670 p.As = AMOVQ 671 p.From.Type = obj.TYPE_REG 672 p.From.Reg = REG_BP 673 p.To.Type = obj.TYPE_MEM 674 p.To.Reg = REG_SP 675 p.To.Scale = 1 676 p.To.Offset = int64(autoffset) - int64(bpsize) 677 if !markedPrologue { 678 p.Pos = p.Pos.WithXlogue(src.PosPrologueEnd) 679 } 680 681 // Move current frame to BP 682 p = obj.Appendp(p, newprog) 683 684 p.As = ALEAQ 685 p.From.Type = obj.TYPE_MEM 686 p.From.Reg = REG_SP 687 p.From.Scale = 1 688 p.From.Offset = int64(autoffset) - int64(bpsize) 689 p.To.Type = obj.TYPE_REG 690 p.To.Reg = REG_BP 691 } 692 693 if cursym.Func.Text.From.Sym.Wrapper() { 694 // if g._panic != nil && g._panic.argp == FP { 695 // g._panic.argp = bottom-of-frame 696 // } 697 // 698 // MOVQ g_panic(CX), BX 699 // TESTQ BX, BX 700 // JNE checkargp 701 // end: 702 // NOP 703 // ... rest of function ... 704 // checkargp: 705 // LEAQ (autoffset+8)(SP), DI 706 // CMPQ panic_argp(BX), DI 707 // JNE end 708 // MOVQ SP, panic_argp(BX) 709 // JMP end 710 // 711 // The NOP is needed to give the jumps somewhere to land. 712 // It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes. 713 // 714 // The layout is chosen to help static branch prediction: 715 // Both conditional jumps are unlikely, so they are arranged to be forward jumps. 716 717 // MOVQ g_panic(CX), BX 718 p = obj.Appendp(p, newprog) 719 p.As = AMOVQ 720 p.From.Type = obj.TYPE_MEM 721 p.From.Reg = REG_CX 722 p.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // g_panic 723 p.To.Type = obj.TYPE_REG 724 p.To.Reg = REG_BX 725 if ctxt.Arch.Family == sys.I386 { 726 p.As = AMOVL 727 } 728 729 // TESTQ BX, BX 730 p = obj.Appendp(p, newprog) 731 p.As = ATESTQ 732 p.From.Type = obj.TYPE_REG 733 p.From.Reg = REG_BX 734 p.To.Type = obj.TYPE_REG 735 p.To.Reg = REG_BX 736 if ctxt.Arch.Family == sys.I386 { 737 p.As = ATESTL 738 } 739 740 // JNE checkargp (checkargp to be resolved later) 741 jne := obj.Appendp(p, newprog) 742 jne.As = AJNE 743 jne.To.Type = obj.TYPE_BRANCH 744 745 // end: 746 // NOP 747 end := obj.Appendp(jne, newprog) 748 end.As = obj.ANOP 749 750 // Fast forward to end of function. 751 var last *obj.Prog 752 for last = end; last.Link != nil; last = last.Link { 753 } 754 755 // LEAQ (autoffset+8)(SP), DI 756 p = obj.Appendp(last, newprog) 757 p.As = ALEAQ 758 p.From.Type = obj.TYPE_MEM 759 p.From.Reg = REG_SP 760 p.From.Offset = int64(autoffset) + int64(ctxt.Arch.RegSize) 761 p.To.Type = obj.TYPE_REG 762 p.To.Reg = REG_DI 763 if ctxt.Arch.Family == sys.I386 { 764 p.As = ALEAL 765 } 766 767 // Set jne branch target. 768 jne.To.SetTarget(p) 769 770 // CMPQ panic_argp(BX), DI 771 p = obj.Appendp(p, newprog) 772 p.As = ACMPQ 773 p.From.Type = obj.TYPE_MEM 774 p.From.Reg = REG_BX 775 p.From.Offset = 0 // Panic.argp 776 p.To.Type = obj.TYPE_REG 777 p.To.Reg = REG_DI 778 if ctxt.Arch.Family == sys.I386 { 779 p.As = ACMPL 780 } 781 782 // JNE end 783 p = obj.Appendp(p, newprog) 784 p.As = AJNE 785 p.To.Type = obj.TYPE_BRANCH 786 p.To.SetTarget(end) 787 788 // MOVQ SP, panic_argp(BX) 789 p = obj.Appendp(p, newprog) 790 p.As = AMOVQ 791 p.From.Type = obj.TYPE_REG 792 p.From.Reg = REG_SP 793 p.To.Type = obj.TYPE_MEM 794 p.To.Reg = REG_BX 795 p.To.Offset = 0 // Panic.argp 796 if ctxt.Arch.Family == sys.I386 { 797 p.As = AMOVL 798 } 799 800 // JMP end 801 p = obj.Appendp(p, newprog) 802 p.As = obj.AJMP 803 p.To.Type = obj.TYPE_BRANCH 804 p.To.SetTarget(end) 805 806 // Reset p for following code. 807 p = end 808 } 809 810 var deltasp int32 811 for p = cursym.Func.Text; p != nil; p = p.Link { 812 pcsize := ctxt.Arch.RegSize 813 switch p.From.Name { 814 case obj.NAME_AUTO: 815 p.From.Offset += int64(deltasp) - int64(bpsize) 816 case obj.NAME_PARAM: 817 p.From.Offset += int64(deltasp) + int64(pcsize) 818 } 819 if p.GetFrom3() != nil { 820 switch p.GetFrom3().Name { 821 case obj.NAME_AUTO: 822 p.GetFrom3().Offset += int64(deltasp) - int64(bpsize) 823 case obj.NAME_PARAM: 824 p.GetFrom3().Offset += int64(deltasp) + int64(pcsize) 825 } 826 } 827 switch p.To.Name { 828 case obj.NAME_AUTO: 829 p.To.Offset += int64(deltasp) - int64(bpsize) 830 case obj.NAME_PARAM: 831 p.To.Offset += int64(deltasp) + int64(pcsize) 832 } 833 834 switch p.As { 835 default: 836 continue 837 838 case APUSHL, APUSHFL: 839 deltasp += 4 840 p.Spadj = 4 841 continue 842 843 case APUSHQ, APUSHFQ: 844 deltasp += 8 845 p.Spadj = 8 846 continue 847 848 case APUSHW, APUSHFW: 849 deltasp += 2 850 p.Spadj = 2 851 continue 852 853 case APOPL, APOPFL: 854 deltasp -= 4 855 p.Spadj = -4 856 continue 857 858 case APOPQ, APOPFQ: 859 deltasp -= 8 860 p.Spadj = -8 861 continue 862 863 case APOPW, APOPFW: 864 deltasp -= 2 865 p.Spadj = -2 866 continue 867 868 case AADJSP: 869 p.Spadj = int32(p.From.Offset) 870 deltasp += int32(p.From.Offset) 871 continue 872 873 case obj.ARET: 874 // do nothing 875 } 876 877 if autoffset != deltasp { 878 ctxt.Diag("unbalanced PUSH/POP") 879 } 880 881 if autoffset != 0 { 882 to := p.To // Keep To attached to RET for retjmp below 883 p.To = obj.Addr{} 884 if bpsize > 0 { 885 // Restore caller's BP 886 p.As = AMOVQ 887 888 p.From.Type = obj.TYPE_MEM 889 p.From.Reg = REG_SP 890 p.From.Scale = 1 891 p.From.Offset = int64(autoffset) - int64(bpsize) 892 p.To.Type = obj.TYPE_REG 893 p.To.Reg = REG_BP 894 p = obj.Appendp(p, newprog) 895 } 896 897 p.As = AADJSP 898 p.From.Type = obj.TYPE_CONST 899 p.From.Offset = int64(-autoffset) 900 p.Spadj = -autoffset 901 p = obj.Appendp(p, newprog) 902 p.As = obj.ARET 903 p.To = to 904 905 // If there are instructions following 906 // this ARET, they come from a branch 907 // with the same stackframe, so undo 908 // the cleanup. 909 p.Spadj = +autoffset 910 } 911 912 if p.To.Sym != nil { // retjmp 913 p.As = obj.AJMP 914 } 915 } 916 } 917 918 func isZeroArgRuntimeCall(s *obj.LSym) bool { 919 if s == nil { 920 return false 921 } 922 switch s.Name { 923 case "runtime.panicdivide", "runtime.panicwrap", "runtime.panicshift": 924 return true 925 } 926 if strings.HasPrefix(s.Name, "runtime.panicIndex") || strings.HasPrefix(s.Name, "runtime.panicSlice") { 927 // These functions do take arguments (in registers), 928 // but use no stack before they do a stack check. We 929 // should include them. See issue 31219. 930 return true 931 } 932 return false 933 } 934 935 func indir_cx(ctxt *obj.Link, a *obj.Addr) { 936 a.Type = obj.TYPE_MEM 937 a.Reg = REG_CX 938 } 939 940 // Append code to p to load g into cx. 941 // Overwrites p with the first instruction (no first appendp). 942 // Overwriting p is unusual but it lets use this in both the 943 // prologue (caller must call appendp first) and in the epilogue. 944 // Returns last new instruction. 945 func load_g_cx(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) *obj.Prog { 946 p.As = AMOVQ 947 if ctxt.Arch.PtrSize == 4 { 948 p.As = AMOVL 949 } 950 p.From.Type = obj.TYPE_MEM 951 p.From.Reg = REG_TLS 952 p.From.Offset = 0 953 p.To.Type = obj.TYPE_REG 954 p.To.Reg = REG_CX 955 956 next := p.Link 957 progedit(ctxt, p, newprog) 958 for p.Link != next { 959 p = p.Link 960 progedit(ctxt, p, newprog) 961 } 962 963 if p.From.Index == REG_TLS { 964 p.From.Scale = 2 965 } 966 967 return p 968 } 969 970 // Append code to p to check for stack split. 971 // Appends to (does not overwrite) p. 972 // Assumes g is in CX. 973 // Returns last new instruction. 974 func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgAlloc, framesize int32, textarg int32) *obj.Prog { 975 cmp := ACMPQ 976 lea := ALEAQ 977 mov := AMOVQ 978 sub := ASUBQ 979 980 if ctxt.Arch.Family == sys.I386 { 981 cmp = ACMPL 982 lea = ALEAL 983 mov = AMOVL 984 sub = ASUBL 985 } 986 987 var q1 *obj.Prog 988 if framesize <= objabi.StackSmall { 989 // small stack: SP <= stackguard 990 // CMPQ SP, stackguard 991 p = obj.Appendp(p, newprog) 992 993 p.As = cmp 994 p.From.Type = obj.TYPE_REG 995 p.From.Reg = REG_SP 996 indir_cx(ctxt, &p.To) 997 p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 998 if cursym.CFunc() { 999 p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 1000 } 1001 1002 // Mark the stack bound check and morestack call async nonpreemptible. 1003 // If we get preempted here, when resumed the preemption request is 1004 // cleared, but we'll still call morestack, which will double the stack 1005 // unnecessarily. See issue #35470. 1006 p = ctxt.StartUnsafePoint(p, newprog) 1007 } else if framesize <= objabi.StackBig { 1008 // large stack: SP-framesize <= stackguard-StackSmall 1009 // LEAQ -xxx(SP), AX 1010 // CMPQ AX, stackguard 1011 p = obj.Appendp(p, newprog) 1012 1013 p.As = lea 1014 p.From.Type = obj.TYPE_MEM 1015 p.From.Reg = REG_SP 1016 p.From.Offset = -(int64(framesize) - objabi.StackSmall) 1017 p.To.Type = obj.TYPE_REG 1018 p.To.Reg = REG_AX 1019 1020 p = obj.Appendp(p, newprog) 1021 p.As = cmp 1022 p.From.Type = obj.TYPE_REG 1023 p.From.Reg = REG_AX 1024 indir_cx(ctxt, &p.To) 1025 p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 1026 if cursym.CFunc() { 1027 p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 1028 } 1029 1030 p = ctxt.StartUnsafePoint(p, newprog) // see the comment above 1031 } else { 1032 // Such a large stack we need to protect against wraparound. 1033 // If SP is close to zero: 1034 // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall) 1035 // The +StackGuard on both sides is required to keep the left side positive: 1036 // SP is allowed to be slightly below stackguard. See stack.h. 1037 // 1038 // Preemption sets stackguard to StackPreempt, a very large value. 1039 // That breaks the math above, so we have to check for that explicitly. 1040 // MOVQ stackguard, SI 1041 // CMPQ SI, $StackPreempt 1042 // JEQ label-of-call-to-morestack 1043 // LEAQ StackGuard(SP), AX 1044 // SUBQ SI, AX 1045 // CMPQ AX, $(framesize+(StackGuard-StackSmall)) 1046 1047 p = obj.Appendp(p, newprog) 1048 1049 p.As = mov 1050 indir_cx(ctxt, &p.From) 1051 p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 1052 if cursym.CFunc() { 1053 p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 1054 } 1055 p.To.Type = obj.TYPE_REG 1056 p.To.Reg = REG_SI 1057 1058 p = ctxt.StartUnsafePoint(p, newprog) // see the comment above 1059 1060 p = obj.Appendp(p, newprog) 1061 p.As = cmp 1062 p.From.Type = obj.TYPE_REG 1063 p.From.Reg = REG_SI 1064 p.To.Type = obj.TYPE_CONST 1065 p.To.Offset = objabi.StackPreempt 1066 if ctxt.Arch.Family == sys.I386 { 1067 p.To.Offset = int64(uint32(objabi.StackPreempt & (1<<32 - 1))) 1068 } 1069 1070 p = obj.Appendp(p, newprog) 1071 p.As = AJEQ 1072 p.To.Type = obj.TYPE_BRANCH 1073 q1 = p 1074 1075 p = obj.Appendp(p, newprog) 1076 p.As = lea 1077 p.From.Type = obj.TYPE_MEM 1078 p.From.Reg = REG_SP 1079 p.From.Offset = int64(objabi.StackGuard) 1080 p.To.Type = obj.TYPE_REG 1081 p.To.Reg = REG_AX 1082 1083 p = obj.Appendp(p, newprog) 1084 p.As = sub 1085 p.From.Type = obj.TYPE_REG 1086 p.From.Reg = REG_SI 1087 p.To.Type = obj.TYPE_REG 1088 p.To.Reg = REG_AX 1089 1090 p = obj.Appendp(p, newprog) 1091 p.As = cmp 1092 p.From.Type = obj.TYPE_REG 1093 p.From.Reg = REG_AX 1094 p.To.Type = obj.TYPE_CONST 1095 p.To.Offset = int64(framesize) + (int64(objabi.StackGuard) - objabi.StackSmall) 1096 } 1097 1098 // common 1099 jls := obj.Appendp(p, newprog) 1100 jls.As = AJLS 1101 jls.To.Type = obj.TYPE_BRANCH 1102 1103 end := ctxt.EndUnsafePoint(jls, newprog, -1) 1104 1105 var last *obj.Prog 1106 for last = cursym.Func.Text; last.Link != nil; last = last.Link { 1107 } 1108 1109 // Now we are at the end of the function, but logically 1110 // we are still in function prologue. We need to fix the 1111 // SP data and PCDATA. 1112 spfix := obj.Appendp(last, newprog) 1113 spfix.As = obj.ANOP 1114 spfix.Spadj = -framesize 1115 1116 pcdata := ctxt.EmitEntryStackMap(cursym, spfix, newprog) 1117 pcdata = ctxt.StartUnsafePoint(pcdata, newprog) 1118 1119 call := obj.Appendp(pcdata, newprog) 1120 call.Pos = cursym.Func.Text.Pos 1121 call.As = obj.ACALL 1122 call.To.Type = obj.TYPE_BRANCH 1123 call.To.Name = obj.NAME_EXTERN 1124 morestack := "runtime.morestack" 1125 switch { 1126 case cursym.CFunc(): 1127 morestack = "runtime.morestackc" 1128 case !cursym.Func.Text.From.Sym.NeedCtxt(): 1129 morestack = "runtime.morestack_noctxt" 1130 } 1131 call.To.Sym = ctxt.Lookup(morestack) 1132 // When compiling 386 code for dynamic linking, the call needs to be adjusted 1133 // to follow PIC rules. This in turn can insert more instructions, so we need 1134 // to keep track of the start of the call (where the jump will be to) and the 1135 // end (which following instructions are appended to). 1136 callend := call 1137 progedit(ctxt, callend, newprog) 1138 for ; callend.Link != nil; callend = callend.Link { 1139 progedit(ctxt, callend.Link, newprog) 1140 } 1141 1142 pcdata = ctxt.EndUnsafePoint(callend, newprog, -1) 1143 1144 jmp := obj.Appendp(pcdata, newprog) 1145 jmp.As = obj.AJMP 1146 jmp.To.Type = obj.TYPE_BRANCH 1147 jmp.To.SetTarget(cursym.Func.Text.Link) 1148 jmp.Spadj = +framesize 1149 1150 jls.To.SetTarget(call) 1151 if q1 != nil { 1152 q1.To.SetTarget(call) 1153 } 1154 1155 return end 1156 } 1157 1158 var unaryDst = map[obj.As]bool{ 1159 ABSWAPL: true, 1160 ABSWAPQ: true, 1161 ACLDEMOTE: true, 1162 ACLFLUSH: true, 1163 ACLFLUSHOPT: true, 1164 ACLWB: true, 1165 ACMPXCHG16B: true, 1166 ACMPXCHG8B: true, 1167 ADECB: true, 1168 ADECL: true, 1169 ADECQ: true, 1170 ADECW: true, 1171 AFBSTP: true, 1172 AFFREE: true, 1173 AFLDENV: true, 1174 AFSAVE: true, 1175 AFSTCW: true, 1176 AFSTENV: true, 1177 AFSTSW: true, 1178 AFXSAVE64: true, 1179 AFXSAVE: true, 1180 AINCB: true, 1181 AINCL: true, 1182 AINCQ: true, 1183 AINCW: true, 1184 ANEGB: true, 1185 ANEGL: true, 1186 ANEGQ: true, 1187 ANEGW: true, 1188 ANOTB: true, 1189 ANOTL: true, 1190 ANOTQ: true, 1191 ANOTW: true, 1192 APOPL: true, 1193 APOPQ: true, 1194 APOPW: true, 1195 ARDFSBASEL: true, 1196 ARDFSBASEQ: true, 1197 ARDGSBASEL: true, 1198 ARDGSBASEQ: true, 1199 ARDRANDL: true, 1200 ARDRANDQ: true, 1201 ARDRANDW: true, 1202 ARDSEEDL: true, 1203 ARDSEEDQ: true, 1204 ARDSEEDW: true, 1205 ASETCC: true, 1206 ASETCS: true, 1207 ASETEQ: true, 1208 ASETGE: true, 1209 ASETGT: true, 1210 ASETHI: true, 1211 ASETLE: true, 1212 ASETLS: true, 1213 ASETLT: true, 1214 ASETMI: true, 1215 ASETNE: true, 1216 ASETOC: true, 1217 ASETOS: true, 1218 ASETPC: true, 1219 ASETPL: true, 1220 ASETPS: true, 1221 ASGDT: true, 1222 ASIDT: true, 1223 ASLDTL: true, 1224 ASLDTQ: true, 1225 ASLDTW: true, 1226 ASMSWL: true, 1227 ASMSWQ: true, 1228 ASMSWW: true, 1229 ASTMXCSR: true, 1230 ASTRL: true, 1231 ASTRQ: true, 1232 ASTRW: true, 1233 AXSAVE64: true, 1234 AXSAVE: true, 1235 AXSAVEC64: true, 1236 AXSAVEC: true, 1237 AXSAVEOPT64: true, 1238 AXSAVEOPT: true, 1239 AXSAVES64: true, 1240 AXSAVES: true, 1241 } 1242 1243 var Linkamd64 = obj.LinkArch{ 1244 Arch: sys.ArchAMD64, 1245 Init: instinit, 1246 Preprocess: preprocess, 1247 Assemble: span6, 1248 Progedit: progedit, 1249 UnaryDst: unaryDst, 1250 DWARFRegisters: AMD64DWARFRegisters, 1251 } 1252 1253 var Link386 = obj.LinkArch{ 1254 Arch: sys.Arch386, 1255 Init: instinit, 1256 Preprocess: preprocess, 1257 Assemble: span6, 1258 Progedit: progedit, 1259 UnaryDst: unaryDst, 1260 DWARFRegisters: X86DWARFRegisters, 1261 }