diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index d75c7a178b4a8..8f9c0530e09c5 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -4328,7 +4328,8 @@ SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N, Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, LHSSL, TargetType, SplitLHS, One); } - SDValue NewShift = DAG.getNode(ISD::SRL, SL, TargetType, Hi, ShiftAmt); + SDValue NewShift = + DAG.getNode(ISD::SRL, SL, TargetType, Hi, ShiftAmt, N->getFlags()); SDValue Vec; if (VT.isVector()) { diff --git a/llvm/test/CodeGen/AMDGPU/srl64_reduce_flags.ll b/llvm/test/CodeGen/AMDGPU/srl64_reduce_flags.ll index c79a94b837a31..ca4b72872a03b 100644 --- a/llvm/test/CodeGen/AMDGPU/srl64_reduce_flags.ll +++ b/llvm/test/CodeGen/AMDGPU/srl64_reduce_flags.ll @@ -30,7 +30,7 @@ define i64 @srl_exact(i64 %arg0, i64 %shift_amt) { ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[DEF3]] ; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, killed [[COPY4]], %subreg.sub1 ; CHECK-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[REG_SEQUENCE1]].sub0 - ; CHECK-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 killed [[COPY5]], killed [[COPY3]], implicit $exec + ; CHECK-NEXT: [[V_LSHRREV_B32_e64_:%[0-9]+]]:vgpr_32 = exact V_LSHRREV_B32_e64 killed [[COPY5]], killed [[COPY3]], implicit $exec ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec ; CHECK-NEXT: $vgpr0 = COPY [[V_LSHRREV_B32_e64_]] ; CHECK-NEXT: $vgpr1 = COPY [[V_MOV_B32_e32_]]