@@ -401,6 +401,7 @@ RelExpr X86_64::getRelExpr(RelType type, const Symbol &s,
401401 case R_X86_64_CODE_4_GOTPCRELX:
402402 case R_X86_64_GOTTPOFF:
403403 case R_X86_64_CODE_4_GOTTPOFF:
404+ case R_X86_64_CODE_6_GOTTPOFF:
404405 return R_GOT_PC;
405406 case R_X86_64_GOTOFF64:
406407 return R_GOTPLTREL;
@@ -562,8 +563,9 @@ void X86_64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
562563 }
563564}
564565
565- // In some conditions, R_X86_64_GOTTPOFF/R_X86_64_CODE_4_GOTTPOFF relocation can
566- // be optimized to R_X86_64_TPOFF32 so that it does not use GOT.
566+ // In some conditions,
567+ // R_X86_64_GOTTPOFF/R_X86_64_CODE_4_GOTTPOFF/R_X86_64_CODE_6_GOTTPOFF
568+ // relocation can be optimized to R_X86_64_TPOFF32 so that it does not use GOT.
567569void X86_64::relaxTlsIeToLe (uint8_t *loc, const Relocation &rel,
568570 uint64_t val) const {
569571 uint8_t *inst = loc - 3 ;
@@ -605,7 +607,7 @@ void X86_64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
605607 } else if (rel.type == R_X86_64_CODE_4_GOTTPOFF) {
606608 if (loc[-4 ] != 0xd5 ) {
607609 Err (ctx) << getErrorLoc (ctx, loc - 4 )
608- << " Invalid prefix with R_X86_64_CODE_4_GOTTPOFF!" ;
610+ << " invalid prefix with R_X86_64_CODE_4_GOTTPOFF!" ;
609611 return ;
610612 }
611613 const uint8_t rex = loc[-3 ];
@@ -623,6 +625,41 @@ void X86_64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
623625 << " R_X86_64_CODE_4_GOTTPOFF must be used in MOVQ or ADDQ "
624626 " instructions only" ;
625627 }
628+ } else if (rel.type == R_X86_64_CODE_6_GOTTPOFF) {
629+ if (loc[-6 ] != 0x62 ) {
630+ Err (ctx) << getErrorLoc (ctx, loc - 6 )
631+ << " invalid prefix with R_X86_64_CODE_6_GOTTPOFF!" ;
632+ return ;
633+ }
634+ // Check bits are satisfied:
635+ // loc[-5]: X==1 (inverted polarity), (loc[-5] & 0x7) == 0x4
636+ // loc[-4]: W==1, X2==1 (inverted polarity), pp==0b00(NP)
637+ // loc[-3]: NF==1 or ND==1
638+ // loc[-2]: opcode==0x1 or opcode==0x3
639+ // loc[-1]: Mod==0b00, RM==0b101
640+ if (((loc[-5 ] & 0x47 ) == 0x44 ) && ((loc[-4 ] & 0x87 ) == 0x84 ) &&
641+ ((loc[-3 ] & 0x14 ) != 0 ) && (loc[-2 ] == 0x1 || loc[-2 ] == 0x3 ) &&
642+ ((loc[-1 ] & 0xc7 ) == 0x5 )) {
643+ // "addq %reg1, foo@GOTTPOFF(%rip), %reg2" -> "addq $foo, %reg1, %reg2"
644+ // "addq foo@GOTTPOFF(%rip), %reg1, %reg2" -> "addq $foo, %reg1, %reg2"
645+ // "{nf} addq %reg1, foo@GOTTPOFF(%rip), %reg2"
646+ // -> "{nf} addq $foo, %reg1, %reg2"
647+ // "{nf} addq name@GOTTPOFF(%rip), %reg1, %reg2"
648+ // -> "{nf} addq $foo, %reg1, %reg2"
649+ // "{nf} addq name@GOTTPOFF(%rip), %reg" -> "{nf} addq $foo, %reg"
650+ loc[-2 ] = 0x81 ;
651+ // Move R bits to B bits in EVEX payloads and ModRM byte.
652+ const uint8_t evexPayload0 = loc[-5 ];
653+ if ((evexPayload0 & (1 << 7 )) == 0 )
654+ loc[-5 ] = (evexPayload0 | (1 << 7 )) & ~(1 << 5 );
655+ if ((evexPayload0 & (1 << 4 )) == 0 )
656+ loc[-5 ] = evexPayload0 | (1 << 4 ) | (1 << 3 );
657+ *regSlot = 0xc0 | reg;
658+ } else {
659+ Err (ctx) << getErrorLoc (ctx, loc - 6 )
660+ << " R_X86_64_CODE_6_GOTTPOFF must be used in ADDQ instructions "
661+ " with NDD/NF/NDD+NF only" ;
662+ }
626663 } else {
627664 llvm_unreachable (" Unsupported relocation type!" );
628665 }
@@ -782,6 +819,7 @@ int64_t X86_64::getImplicitAddend(const uint8_t *buf, RelType type) const {
782819 case R_X86_64_PC32:
783820 case R_X86_64_GOTTPOFF:
784821 case R_X86_64_CODE_4_GOTTPOFF:
822+ case R_X86_64_CODE_6_GOTTPOFF:
785823 case R_X86_64_PLT32:
786824 case R_X86_64_TLSGD:
787825 case R_X86_64_TLSLD:
@@ -893,6 +931,7 @@ void X86_64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
893931 break ;
894932 case R_X86_64_GOTTPOFF:
895933 case R_X86_64_CODE_4_GOTTPOFF:
934+ case R_X86_64_CODE_6_GOTTPOFF:
896935 if (rel.expr == R_RELAX_TLS_IE_TO_LE) {
897936 relaxTlsIeToLe (loc, rel, val);
898937 } else {
0 commit comments