Skip to content

Commit 702aae3

Browse files
committed
[X86] Move 's{hr,ar,hl} , <op>' to 'shift <op>' optimization in the assembler into processInstruction.
Instead of detecting the mnemonic and hacking the operands before parsing. Just fix it up after parsing.
1 parent 9339ab3 commit 702aae3

File tree

1 file changed

+112
-21
lines changed

1 file changed

+112
-21
lines changed

llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp

Lines changed: 112 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -3305,27 +3305,6 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
33053305
return HadVerifyError;
33063306
}
33073307

3308-
// FIXME: Hack to handle recognize s{hr,ar,hl} $1, <op>. Canonicalize to
3309-
// "shift <op>".
3310-
if ((Name.startswith("shr") || Name.startswith("sar") ||
3311-
Name.startswith("shl") || Name.startswith("sal") ||
3312-
Name.startswith("rcl") || Name.startswith("rcr") ||
3313-
Name.startswith("rol") || Name.startswith("ror")) &&
3314-
Operands.size() == 3) {
3315-
if (isParsingIntelSyntax()) {
3316-
// Intel syntax
3317-
X86Operand &Op1 = static_cast<X86Operand &>(*Operands[2]);
3318-
if (Op1.isImm() && isa<MCConstantExpr>(Op1.getImm()) &&
3319-
cast<MCConstantExpr>(Op1.getImm())->getValue() == 1)
3320-
Operands.pop_back();
3321-
} else {
3322-
X86Operand &Op1 = static_cast<X86Operand &>(*Operands[1]);
3323-
if (Op1.isImm() && isa<MCConstantExpr>(Op1.getImm()) &&
3324-
cast<MCConstantExpr>(Op1.getImm())->getValue() == 1)
3325-
Operands.erase(Operands.begin() + 1);
3326-
}
3327-
}
3328-
33293308
// Transforms "int $3" into "int3" as a size optimization. We can't write an
33303309
// instalias with an immediate operand yet.
33313310
if (Name == "int" && Operands.size() == 2) {
@@ -3437,6 +3416,111 @@ bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) {
34373416
Inst.setOpcode(NewOpc);
34383417
return true;
34393418
}
3419+
case X86::RCR8ri: case X86::RCR16ri: case X86::RCR32ri: case X86::RCR64ri:
3420+
case X86::RCL8ri: case X86::RCL16ri: case X86::RCL32ri: case X86::RCL64ri:
3421+
case X86::ROR8ri: case X86::ROR16ri: case X86::ROR32ri: case X86::ROR64ri:
3422+
case X86::ROL8ri: case X86::ROL16ri: case X86::ROL32ri: case X86::ROL64ri:
3423+
case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri: case X86::SAR64ri:
3424+
case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri: case X86::SHR64ri:
3425+
case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri: case X86::SHL64ri: {
3426+
// Optimize s{hr,ar,hl} $1, <op> to "shift <op>". Similar for rotate.
3427+
// FIXME: It would be great if we could just do this with an InstAlias.
3428+
if (!Inst.getOperand(2).isImm() || Inst.getOperand(2).getImm() != 1)
3429+
return false;
3430+
3431+
unsigned NewOpc;
3432+
switch (Inst.getOpcode()) {
3433+
default: llvm_unreachable("Invalid opcode");
3434+
case X86::RCR8ri: NewOpc = X86::RCR8r1; break;
3435+
case X86::RCR16ri: NewOpc = X86::RCR16r1; break;
3436+
case X86::RCR32ri: NewOpc = X86::RCR32r1; break;
3437+
case X86::RCR64ri: NewOpc = X86::RCR64r1; break;
3438+
case X86::RCL8ri: NewOpc = X86::RCL8r1; break;
3439+
case X86::RCL16ri: NewOpc = X86::RCL16r1; break;
3440+
case X86::RCL32ri: NewOpc = X86::RCL32r1; break;
3441+
case X86::RCL64ri: NewOpc = X86::RCL64r1; break;
3442+
case X86::ROR8ri: NewOpc = X86::ROR8r1; break;
3443+
case X86::ROR16ri: NewOpc = X86::ROR16r1; break;
3444+
case X86::ROR32ri: NewOpc = X86::ROR32r1; break;
3445+
case X86::ROR64ri: NewOpc = X86::ROR64r1; break;
3446+
case X86::ROL8ri: NewOpc = X86::ROL8r1; break;
3447+
case X86::ROL16ri: NewOpc = X86::ROL16r1; break;
3448+
case X86::ROL32ri: NewOpc = X86::ROL32r1; break;
3449+
case X86::ROL64ri: NewOpc = X86::ROL64r1; break;
3450+
case X86::SAR8ri: NewOpc = X86::SAR8r1; break;
3451+
case X86::SAR16ri: NewOpc = X86::SAR16r1; break;
3452+
case X86::SAR32ri: NewOpc = X86::SAR32r1; break;
3453+
case X86::SAR64ri: NewOpc = X86::SAR64r1; break;
3454+
case X86::SHR8ri: NewOpc = X86::SHR8r1; break;
3455+
case X86::SHR16ri: NewOpc = X86::SHR16r1; break;
3456+
case X86::SHR32ri: NewOpc = X86::SHR32r1; break;
3457+
case X86::SHR64ri: NewOpc = X86::SHR64r1; break;
3458+
case X86::SHL8ri: NewOpc = X86::SHL8r1; break;
3459+
case X86::SHL16ri: NewOpc = X86::SHL16r1; break;
3460+
case X86::SHL32ri: NewOpc = X86::SHL32r1; break;
3461+
case X86::SHL64ri: NewOpc = X86::SHL64r1; break;
3462+
}
3463+
3464+
MCInst TmpInst;
3465+
TmpInst.setOpcode(NewOpc);
3466+
TmpInst.addOperand(Inst.getOperand(0));
3467+
TmpInst.addOperand(Inst.getOperand(1));
3468+
Inst = TmpInst;
3469+
return true;
3470+
}
3471+
case X86::RCR8mi: case X86::RCR16mi: case X86::RCR32mi: case X86::RCR64mi:
3472+
case X86::RCL8mi: case X86::RCL16mi: case X86::RCL32mi: case X86::RCL64mi:
3473+
case X86::ROR8mi: case X86::ROR16mi: case X86::ROR32mi: case X86::ROR64mi:
3474+
case X86::ROL8mi: case X86::ROL16mi: case X86::ROL32mi: case X86::ROL64mi:
3475+
case X86::SAR8mi: case X86::SAR16mi: case X86::SAR32mi: case X86::SAR64mi:
3476+
case X86::SHR8mi: case X86::SHR16mi: case X86::SHR32mi: case X86::SHR64mi:
3477+
case X86::SHL8mi: case X86::SHL16mi: case X86::SHL32mi: case X86::SHL64mi: {
3478+
// Optimize s{hr,ar,hl} $1, <op> to "shift <op>". Similar for rotate.
3479+
// FIXME: It would be great if we could just do this with an InstAlias.
3480+
if (!Inst.getOperand(X86::AddrNumOperands).isImm() ||
3481+
Inst.getOperand(X86::AddrNumOperands).getImm() != 1)
3482+
return false;
3483+
3484+
unsigned NewOpc;
3485+
switch (Inst.getOpcode()) {
3486+
default: llvm_unreachable("Invalid opcode");
3487+
case X86::RCR8mi: NewOpc = X86::RCR8m1; break;
3488+
case X86::RCR16mi: NewOpc = X86::RCR16m1; break;
3489+
case X86::RCR32mi: NewOpc = X86::RCR32m1; break;
3490+
case X86::RCR64mi: NewOpc = X86::RCR64m1; break;
3491+
case X86::RCL8mi: NewOpc = X86::RCL8m1; break;
3492+
case X86::RCL16mi: NewOpc = X86::RCL16m1; break;
3493+
case X86::RCL32mi: NewOpc = X86::RCL32m1; break;
3494+
case X86::RCL64mi: NewOpc = X86::RCL64m1; break;
3495+
case X86::ROR8mi: NewOpc = X86::ROR8m1; break;
3496+
case X86::ROR16mi: NewOpc = X86::ROR16m1; break;
3497+
case X86::ROR32mi: NewOpc = X86::ROR32m1; break;
3498+
case X86::ROR64mi: NewOpc = X86::ROR64m1; break;
3499+
case X86::ROL8mi: NewOpc = X86::ROL8m1; break;
3500+
case X86::ROL16mi: NewOpc = X86::ROL16m1; break;
3501+
case X86::ROL32mi: NewOpc = X86::ROL32m1; break;
3502+
case X86::ROL64mi: NewOpc = X86::ROL64m1; break;
3503+
case X86::SAR8mi: NewOpc = X86::SAR8m1; break;
3504+
case X86::SAR16mi: NewOpc = X86::SAR16m1; break;
3505+
case X86::SAR32mi: NewOpc = X86::SAR32m1; break;
3506+
case X86::SAR64mi: NewOpc = X86::SAR64m1; break;
3507+
case X86::SHR8mi: NewOpc = X86::SHR8m1; break;
3508+
case X86::SHR16mi: NewOpc = X86::SHR16m1; break;
3509+
case X86::SHR32mi: NewOpc = X86::SHR32m1; break;
3510+
case X86::SHR64mi: NewOpc = X86::SHR64m1; break;
3511+
case X86::SHL8mi: NewOpc = X86::SHL8m1; break;
3512+
case X86::SHL16mi: NewOpc = X86::SHL16m1; break;
3513+
case X86::SHL32mi: NewOpc = X86::SHL32m1; break;
3514+
case X86::SHL64mi: NewOpc = X86::SHL64m1; break;
3515+
}
3516+
3517+
MCInst TmpInst;
3518+
TmpInst.setOpcode(NewOpc);
3519+
for (int i = 0; i != X86::AddrNumOperands; ++i)
3520+
TmpInst.addOperand(Inst.getOperand(i));
3521+
Inst = TmpInst;
3522+
return true;
3523+
}
34403524
}
34413525
}
34423526

@@ -3906,6 +3990,13 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
39063990
unsigned NumSuccessfulMatches =
39073991
std::count(std::begin(Match), std::end(Match), Match_Success);
39083992
if (NumSuccessfulMatches == 1) {
3993+
// Some instructions need post-processing to, for example, tweak which
3994+
// encoding is selected. Loop on it while changes happen so the
3995+
// individual transformations can chain off each other.
3996+
if (!MatchingInlineAsm)
3997+
while (processInstruction(Inst, Operands))
3998+
;
3999+
39094000
Inst.setLoc(IDLoc);
39104001
if (!MatchingInlineAsm)
39114002
emitInstruction(Inst, Operands, Out);

0 commit comments

Comments
 (0)