diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index 4bf660b5e234a..9e4e5547c642c 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -3544,7 +3544,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) { MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CallOpc)); if (NeedLoad) - MIB.addReg(Is64Bit ? X86::RIP : 0).addImm(1).addReg(0); + MIB.addReg(Is64Bit ? X86::RIP : X86::NoRegister).addImm(1).addReg(0); if (Symbol) MIB.addSym(Symbol, OpFlags); else diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index db633d10edc49..a469a5a554354 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -29986,7 +29986,7 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget, MVT::getVectorVT(NarrowScalarVT, WideNumElts), dl, AmtWideElts); AmtWide = DAG.getZExtOrTrunc(AmtWide, dl, WideVT); // Perform the actual shift. - unsigned LogicalOpc = Opc == ISD::SRA ? ISD::SRL : Opc; + unsigned LogicalOpc = Opc == ISD::SRA ? (unsigned)ISD::SRL : Opc; SDValue ShiftedR = DAG.getNode(LogicalOpc, dl, WideVT, RWide, AmtWide); // Now we need to construct a mask which will "drop" bits that get // shifted past the LSB/MSB. For a logical shift left, it will look