diff --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll index 33007e5b285d8..3be17f9538d0f 100644 --- a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll +++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll @@ -1333,5 +1333,668 @@ define i48 @mad_i48_i48(i48 %arg0, i48 %arg1, i48 %arg2) #0 { ret i48 %a } +define i64 @lshr_mad_i64_1(i64 %arg0, i64 %arg1) #0 { +; CI-LABEL: lshr_mad_i64_1: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: s_movk_i32 s4, 0xfc19 +; CI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1] +; CI-NEXT: v_sub_i32_e32 v1, vcc, v3, v1 +; CI-NEXT: v_mov_b32_e32 v0, v2 +; CI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: lshr_mad_i64_1: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_movk_i32 s4, 0xfc19 +; SI-NEXT: v_mul_hi_u32 v2, v1, s4 +; SI-NEXT: v_mul_lo_u32 v3, v1, s4 +; SI-NEXT: v_sub_i32_e32 v2, vcc, v2, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, v3, v0 +; SI-NEXT: v_addc_u32_e32 v1, vcc, v2, v1, vcc +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: lshr_mad_i64_1: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_movk_i32 s4, 0xfc19 +; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1] +; GFX9-NEXT: v_sub_u32_e32 v1, v3, v1 +; GFX9-NEXT: v_mov_b32_e32 v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: lshr_mad_i64_1: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, 0xfffffc19, v1, v[0:1] +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_sub_nc_u32_e32 v1, v3, v1 +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: lshr_mad_i64_1: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mad_co_u64_u32 v[2:3], null, 0xfffffc19, v1, v[0:1] +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-NEXT: v_sub_nc_u32_e32 v1, v3, v1 +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %lsh = lshr i64 %arg0, 32 + %mul = mul i64 %lsh, s0xfffffffffffffc19 + %mad = add i64 %mul, %arg0 + + ret i64 %mad +} + +define i64 @lshr_mad_i64_2(i64 %arg0) #0 { +; CI-LABEL: lshr_mad_i64_2: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: s_movk_i32 s4, 0xd1 +; CI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1] +; CI-NEXT: v_sub_i32_e32 v1, vcc, v3, v1 +; CI-NEXT: v_mov_b32_e32 v0, v2 +; CI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: lshr_mad_i64_2: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_movk_i32 s4, 0xd1 +; SI-NEXT: v_mul_hi_u32 v2, v1, s4 +; SI-NEXT: v_mul_lo_u32 v3, v1, s4 +; SI-NEXT: v_sub_i32_e32 v2, vcc, v2, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, v3, v0 +; SI-NEXT: v_addc_u32_e32 v1, vcc, v2, v1, vcc +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: lshr_mad_i64_2: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_movk_i32 s4, 0xd1 +; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1] +; GFX9-NEXT: v_sub_u32_e32 v1, v3, v1 +; GFX9-NEXT: v_mov_b32_e32 v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: lshr_mad_i64_2: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, 0xd1, v1, v[0:1] +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_sub_nc_u32_e32 v1, v3, v1 +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: lshr_mad_i64_2: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mad_co_u64_u32 v[2:3], null, 0xd1, v1, v[0:1] +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-NEXT: v_sub_nc_u32_e32 v1, v3, v1 +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %lsh = lshr i64 %arg0, 32 + %mul = mul i64 %lsh, s0xffffffff000000d1 + %mad = add i64 %mul, %arg0 + + ret i64 %mad +} + +define i64 @lshr_mad_i64_3(i64 %arg0) #0 { +; CI-LABEL: lshr_mad_i64_3: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: s_movk_i32 s4, 0xfc88 +; CI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1] +; CI-NEXT: v_sub_i32_e32 v1, vcc, v3, v1 +; CI-NEXT: v_mov_b32_e32 v0, v2 +; CI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: lshr_mad_i64_3: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_movk_i32 s4, 0xfc88 +; SI-NEXT: v_mul_hi_u32 v2, v1, s4 +; SI-NEXT: v_mul_lo_u32 v3, v1, s4 +; SI-NEXT: v_sub_i32_e32 v2, vcc, v2, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, v3, v0 +; SI-NEXT: v_addc_u32_e32 v1, vcc, v2, v1, vcc +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: lshr_mad_i64_3: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_movk_i32 s4, 0xfc88 +; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1] +; GFX9-NEXT: v_sub_u32_e32 v1, v3, v1 +; GFX9-NEXT: v_mov_b32_e32 v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: lshr_mad_i64_3: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, 0xfffffc88, v1, v[0:1] +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_sub_nc_u32_e32 v1, v3, v1 +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: lshr_mad_i64_3: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mad_co_u64_u32 v[2:3], null, 0xfffffc88, v1, v[0:1] +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-NEXT: v_sub_nc_u32_e32 v1, v3, v1 +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %lsh = lshr i64 %arg0, 32 + %mul = mul i64 s0xfffffffffffffc88, %lsh + %mad = add i64 %mul, %arg0 + + ret i64 %mad +} + +define i64 @lshr_mad_i64_4(i32 %arg0, i64 %arg1) #0 { +; CI-LABEL: lshr_mad_i64_4: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_mul_lo_u32 v3, v2, v0 +; CI-NEXT: v_mad_u64_u32 v[1:2], s[4:5], v1, v0, 0 +; CI-NEXT: s_movk_i32 s4, 0xfc88 +; CI-NEXT: v_add_i32_e32 v2, vcc, v2, v3 +; CI-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[1:2] +; CI-NEXT: v_sub_i32_e32 v1, vcc, v1, v2 +; CI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: lshr_mad_i64_4: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_mul_lo_u32 v2, v2, v0 +; SI-NEXT: v_mul_hi_u32 v3, v1, v0 +; SI-NEXT: s_movk_i32 s4, 0xfc88 +; SI-NEXT: v_mul_lo_u32 v0, v1, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, v3, v2 +; SI-NEXT: v_mul_hi_u32 v3, v2, s4 +; SI-NEXT: v_mul_lo_u32 v1, v2, s4 +; SI-NEXT: v_sub_i32_e32 v3, vcc, v3, v2 +; SI-NEXT: v_add_i32_e32 v0, vcc, v1, v0 +; SI-NEXT: v_addc_u32_e32 v1, vcc, v3, v2, vcc +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: lshr_mad_i64_4: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, v0, 0 +; GFX9-NEXT: v_mov_b32_e32 v6, v5 +; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v2, v0, v[6:7] +; GFX9-NEXT: v_mov_b32_e32 v5, v2 +; GFX9-NEXT: s_movk_i32 s4, 0xfc88 +; GFX9-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v2, s4, v[4:5] +; GFX9-NEXT: v_sub_u32_e32 v1, v1, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: lshr_mad_i64_4: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_mad_u64_u32 v[3:4], null, v1, v0, 0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_mov_b32_e32 v1, v4 +; GFX11-NEXT: v_mad_u64_u32 v[5:6], null, v2, v0, v[1:2] +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_mov_b32_e32 v4, v5 +; GFX11-NEXT: v_mad_u64_u32 v[0:1], null, 0xfffffc88, v5, v[3:4] +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_sub_nc_u32_e32 v1, v1, v5 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: lshr_mad_i64_4: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mad_co_u64_u32 v[3:4], null, v1, v0, 0 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_mov_b32_e32 v1, v4 +; GFX12-NEXT: v_mad_co_u64_u32 v[5:6], null, v2, v0, v[1:2] +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_mov_b32_e32 v4, v5 +; GFX12-NEXT: v_mad_co_u64_u32 v[0:1], null, 0xfffffc88, v5, v[3:4] +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_sub_nc_u32_e32 v1, v1, v5 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %ext = zext i32 %arg0 to i64 + %mul1 = mul i64 %arg1, %ext + %lsh = lshr i64 %mul1, 32 + %mul2 = mul i64 %lsh, s0xfffffffffffffc88 + %mad = add i64 %mul2, %mul1 + ret i64 %mad +} + +define i64 @lshr_mad_i64_negative_1(i64 %arg0) #0 { +; CI-LABEL: lshr_mad_i64_negative_1: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_lshrrev_b32_e32 v2, 4, v1 +; CI-NEXT: s_movk_i32 s4, 0xfc19 +; CI-NEXT: v_mad_i64_i32 v[0:1], s[4:5], v2, s4, v[0:1] +; CI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: lshr_mad_i64_negative_1: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 4, v1 +; SI-NEXT: s_movk_i32 s4, 0xfc19 +; SI-NEXT: v_mul_lo_u32 v3, v2, s4 +; SI-NEXT: v_mul_hi_i32 v2, v2, s4 +; SI-NEXT: v_add_i32_e32 v0, vcc, v3, v0 +; SI-NEXT: v_addc_u32_e32 v1, vcc, v2, v1, vcc +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: lshr_mad_i64_negative_1: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b32_e32 v2, 4, v1 +; GFX9-NEXT: s_movk_i32 s4, 0xfc19 +; GFX9-NEXT: v_mad_i64_i32 v[0:1], s[4:5], v2, s4, v[0:1] +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX1100-LABEL: lshr_mad_i64_negative_1: +; GFX1100: ; %bb.0: +; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1100-NEXT: v_lshrrev_b32_e32 v4, 4, v1 +; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1100-NEXT: v_mad_i64_i32 v[2:3], null, 0xfffffc19, v4, v[0:1] +; GFX1100-NEXT: v_dual_mov_b32 v0, v2 :: v_dual_mov_b32 v1, v3 +; GFX1100-NEXT: s_setpc_b64 s[30:31] +; +; GFX1150-LABEL: lshr_mad_i64_negative_1: +; GFX1150: ; %bb.0: +; GFX1150-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1150-NEXT: v_lshrrev_b32_e32 v2, 4, v1 +; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1150-NEXT: v_mad_i64_i32 v[0:1], null, 0xfffffc19, v2, v[0:1] +; GFX1150-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: lshr_mad_i64_negative_1: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_lshrrev_b32_e32 v2, 4, v1 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX12-NEXT: v_mad_co_i64_i32 v[0:1], null, 0xfffffc19, v2, v[0:1] +; GFX12-NEXT: s_setpc_b64 s[30:31] + %lsh = lshr i64 %arg0, 36 + %mul = mul i64 %lsh, s0xfffffffffffffc19 + %mad = add i64 %mul, %arg0 + + ret i64 %mad +} + +define i64 @lshr_mad_i64_negative_2(i64 %arg0) #0 { +; CI-LABEL: lshr_mad_i64_negative_2: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: s_movk_i32 s4, 0xd1 +; CI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1] +; CI-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; CI-NEXT: v_sub_i32_e32 v1, vcc, v3, v0 +; CI-NEXT: v_mov_b32_e32 v0, v2 +; CI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: lshr_mad_i64_negative_2: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_movk_i32 s4, 0xd1 +; SI-NEXT: v_mul_hi_u32 v2, v1, s4 +; SI-NEXT: v_mul_lo_u32 v4, v1, s4 +; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v1 +; SI-NEXT: v_sub_i32_e32 v2, vcc, v2, v3 +; SI-NEXT: v_add_i32_e32 v0, vcc, v4, v0 +; SI-NEXT: v_addc_u32_e32 v1, vcc, v2, v1, vcc +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: lshr_mad_i64_negative_2: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_movk_i32 s4, 0xd1 +; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, s4, v[0:1] +; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; GFX9-NEXT: v_sub_u32_e32 v1, v3, v0 +; GFX9-NEXT: v_mov_b32_e32 v0, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: lshr_mad_i64_negative_2: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_mad_u64_u32 v[2:3], null, 0xd1, v1, v[0:1] +; GFX11-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-NEXT: v_sub_nc_u32_e32 v1, v3, v0 +; GFX11-NEXT: v_mov_b32_e32 v0, v2 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: lshr_mad_i64_negative_2: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mad_co_u64_u32 v[2:3], null, 0xd1, v1, v[0:1] +; GFX12-NEXT: v_lshlrev_b32_e32 v0, 8, v1 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX12-NEXT: v_sub_nc_u32_e32 v1, v3, v0 +; GFX12-NEXT: v_mov_b32_e32 v0, v2 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %lsh = lshr i64 %arg0, 32 + %mul = mul i64 %lsh, s0xffffff00000000d1 + %mad = add i64 %mul, %arg0 + + ret i64 %mad +} + +define i64 @lshr_mad_i64_negative_3(i64 %arg0) #0 { +; CI-LABEL: lshr_mad_i64_negative_3: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_lshr_b64 v[2:3], v[0:1], 22 +; CI-NEXT: v_and_b32_e32 v2, 0xfffffc00, v2 +; CI-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 +; CI-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc +; CI-NEXT: v_add_i32_e32 v0, vcc, 1, v0 +; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; CI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: lshr_mad_i64_negative_3: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshr_b64 v[2:3], v[0:1], 22 +; SI-NEXT: v_and_b32_e32 v2, 0xfffffc00, v2 +; SI-NEXT: v_sub_i32_e32 v0, vcc, v0, v2 +; SI-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc +; SI-NEXT: v_add_i32_e32 v0, vcc, 1, v0 +; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: lshr_mad_i64_negative_3: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_lshrrev_b64 v[2:3], 22, v[0:1] +; GFX9-NEXT: v_and_b32_e32 v2, 0xfffffc00, v2 +; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2 +; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: lshr_mad_i64_negative_3: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_lshrrev_b64 v[2:3], 22, v[0:1] +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX11-NEXT: v_and_b32_e32 v2, 0xfffffc00, v2 +; GFX11-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX11-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 1 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: lshr_mad_i64_negative_3: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_lshrrev_b64 v[2:3], 22, v[0:1] +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_and_b32_e32 v2, 0xfffffc00, v2 +; GFX12-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) +; GFX12-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo +; GFX12-NEXT: v_add_co_u32 v0, vcc_lo, v0, 1 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) +; GFX12-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo +; GFX12-NEXT: s_setpc_b64 s[30:31] + %op = add i64 %arg0, 1 + %lsh = lshr i64 %arg0, 32 + %mul = mul i64 %lsh, s0xfffffffffffffc00 + %mad = add i64 %mul, %op + + ret i64 %mad +} + +define i64 @lshr_mad_i64_negative_4(i64 %arg0) #0 { +; CI-LABEL: lshr_mad_i64_negative_4: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, v0, v[0:1] +; CI-NEXT: v_mul_lo_u32 v0, v1, v1 +; CI-NEXT: v_add_i32_e32 v1, vcc, v0, v3 +; CI-NEXT: v_mov_b32_e32 v0, v2 +; CI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: lshr_mad_i64_negative_4: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_mul_hi_u32 v2, v1, v0 +; SI-NEXT: v_mul_lo_u32 v3, v1, v1 +; SI-NEXT: v_mul_lo_u32 v4, v1, v0 +; SI-NEXT: v_add_i32_e32 v2, vcc, v2, v3 +; SI-NEXT: v_add_i32_e32 v0, vcc, v4, v0 +; SI-NEXT: v_addc_u32_e32 v1, vcc, v2, v1, vcc +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: lshr_mad_i64_negative_4: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mad_u64_u32 v[2:3], s[4:5], v1, v0, v[0:1] +; GFX9-NEXT: v_mov_b32_e32 v0, v3 +; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, v1, v[0:1] +; GFX9-NEXT: v_mov_b32_e32 v0, v2 +; GFX9-NEXT: v_mov_b32_e32 v1, v4 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX1100-LABEL: lshr_mad_i64_negative_4: +; GFX1100: ; %bb.0: +; GFX1100-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1100-NEXT: v_mad_u64_u32 v[2:3], null, v1, v0, v[0:1] +; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1100-NEXT: v_mov_b32_e32 v0, v3 +; GFX1100-NEXT: v_mad_u64_u32 v[3:4], null, v1, v1, v[0:1] +; GFX1100-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX1100-NEXT: v_dual_mov_b32 v0, v2 :: v_dual_mov_b32 v1, v3 +; GFX1100-NEXT: s_setpc_b64 s[30:31] +; +; GFX1150-LABEL: lshr_mad_i64_negative_4: +; GFX1150: ; %bb.0: +; GFX1150-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1150-NEXT: v_mad_u64_u32 v[3:4], null, v1, v0, v[0:1] +; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX1150-NEXT: v_mov_b32_e32 v0, v4 +; GFX1150-NEXT: v_mad_u64_u32 v[1:2], null, v1, v1, v[0:1] +; GFX1150-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX1150-NEXT: v_mov_b32_e32 v0, v3 +; GFX1150-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: lshr_mad_i64_negative_4: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mad_co_u64_u32 v[3:4], null, v1, v0, v[0:1] +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; GFX12-NEXT: v_mov_b32_e32 v0, v4 +; GFX12-NEXT: v_mad_co_u64_u32 v[1:2], null, v1, v1, v[0:1] +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) +; GFX12-NEXT: v_mov_b32_e32 v0, v3 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %lsh = lshr i64 %arg0, 32 + %mul = mul i64 %lsh, %arg0 + %mad = add i64 %mul, %arg0 + + ret i64 %mad +} + +define amdgpu_ps i64 @lshr_mad_i64_sgpr(i64 inreg %arg0) #0 { +; CI-LABEL: lshr_mad_i64_sgpr: +; CI: ; %bb.0: +; CI-NEXT: v_mov_b32_e32 v0, s0 +; CI-NEXT: v_mov_b32_e32 v2, 0xffff1c18 +; CI-NEXT: v_mov_b32_e32 v1, s1 +; CI-NEXT: v_mad_u64_u32 v[0:1], s[2:3], s1, v2, v[0:1] +; CI-NEXT: v_subrev_i32_e32 v1, vcc, s1, v1 +; CI-NEXT: v_readfirstlane_b32 s0, v0 +; CI-NEXT: v_readfirstlane_b32 s1, v1 +; CI-NEXT: ; return to shader part epilog +; +; SI-LABEL: lshr_mad_i64_sgpr: +; SI: ; %bb.0: +; SI-NEXT: v_mov_b32_e32 v0, 0xffff1c18 +; SI-NEXT: v_mul_hi_u32 v0, s1, v0 +; SI-NEXT: s_mul_i32 s2, s1, 0xffff1c18 +; SI-NEXT: v_readfirstlane_b32 s3, v0 +; SI-NEXT: s_sub_i32 s3, s3, s1 +; SI-NEXT: s_add_u32 s0, s2, s0 +; SI-NEXT: s_addc_u32 s1, s3, s1 +; SI-NEXT: ; return to shader part epilog +; +; GFX9-LABEL: lshr_mad_i64_sgpr: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_mul_hi_u32 s2, s1, 0xffff1c18 +; GFX9-NEXT: s_sub_i32 s2, s2, s1 +; GFX9-NEXT: s_mul_i32 s3, s1, 0xffff1c18 +; GFX9-NEXT: s_add_u32 s0, s3, s0 +; GFX9-NEXT: s_addc_u32 s1, s2, s1 +; GFX9-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: lshr_mad_i64_sgpr: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_mul_hi_u32 s2, s1, 0xffff1c18 +; GFX11-NEXT: s_mul_i32 s3, s1, 0xffff1c18 +; GFX11-NEXT: s_sub_i32 s2, s2, s1 +; GFX11-NEXT: s_add_u32 s0, s3, s0 +; GFX11-NEXT: s_addc_u32 s1, s2, s1 +; GFX11-NEXT: ; return to shader part epilog +; +; GFX12-LABEL: lshr_mad_i64_sgpr: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_mov_b32 s4, 0xffff1c18 +; GFX12-NEXT: s_mov_b32 s3, 0 +; GFX12-NEXT: s_mov_b32 s2, s1 +; GFX12-NEXT: s_mov_b32 s5, -1 +; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX12-NEXT: s_mul_u64 s[2:3], s[2:3], s[4:5] +; GFX12-NEXT: s_add_nc_u64 s[0:1], s[2:3], s[0:1] +; GFX12-NEXT: ; return to shader part epilog + %lsh = lshr i64 %arg0, 32 + %mul = mul i64 %lsh, s0xffffffffffff1c18 + %mad = add i64 %mul, %arg0 + + ret i64 %mad +} + +define <2 x i64> @lshr_mad_i64_vec(<2 x i64> %arg0) #0 { +; CI-LABEL: lshr_mad_i64_vec: +; CI: ; %bb.0: +; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CI-NEXT: s_mov_b32 s4, 0xffff1c18 +; CI-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s4, v[0:1] +; CI-NEXT: s_mov_b32 s4, 0xffff1118 +; CI-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v3, s4, v[2:3] +; CI-NEXT: v_sub_i32_e32 v1, vcc, v5, v1 +; CI-NEXT: v_sub_i32_e32 v3, vcc, v7, v3 +; CI-NEXT: v_mov_b32_e32 v0, v4 +; CI-NEXT: v_mov_b32_e32 v2, v6 +; CI-NEXT: s_setpc_b64 s[30:31] +; +; SI-LABEL: lshr_mad_i64_vec: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: s_mov_b32 s4, 0xffff1118 +; SI-NEXT: v_mul_lo_u32 v4, v3, s4 +; SI-NEXT: v_mul_hi_u32 v5, v3, s4 +; SI-NEXT: s_mov_b32 s4, 0xffff1c18 +; SI-NEXT: v_mul_hi_u32 v6, v1, s4 +; SI-NEXT: v_mul_lo_u32 v7, v1, s4 +; SI-NEXT: v_sub_i32_e32 v5, vcc, v5, v3 +; SI-NEXT: v_sub_i32_e32 v6, vcc, v6, v1 +; SI-NEXT: v_add_i32_e32 v0, vcc, v7, v0 +; SI-NEXT: v_addc_u32_e32 v1, vcc, v6, v1, vcc +; SI-NEXT: v_add_i32_e32 v2, vcc, v4, v2 +; SI-NEXT: v_addc_u32_e32 v3, vcc, v5, v3, vcc +; SI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: lshr_mad_i64_vec: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_mov_b32 s4, 0xffff1c18 +; GFX9-NEXT: v_mad_u64_u32 v[4:5], s[4:5], v1, s4, v[0:1] +; GFX9-NEXT: s_mov_b32 s4, 0xffff1118 +; GFX9-NEXT: v_mad_u64_u32 v[6:7], s[4:5], v3, s4, v[2:3] +; GFX9-NEXT: v_sub_u32_e32 v1, v5, v1 +; GFX9-NEXT: v_sub_u32_e32 v3, v7, v3 +; GFX9-NEXT: v_mov_b32_e32 v0, v4 +; GFX9-NEXT: v_mov_b32_e32 v2, v6 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX11-LABEL: lshr_mad_i64_vec: +; GFX11: ; %bb.0: +; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX11-NEXT: v_mad_u64_u32 v[4:5], null, 0xffff1c18, v1, v[0:1] +; GFX11-NEXT: v_mad_u64_u32 v[6:7], null, 0xffff1118, v3, v[2:3] +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX11-NEXT: v_sub_nc_u32_e32 v1, v5, v1 +; GFX11-NEXT: v_mov_b32_e32 v0, v4 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX11-NEXT: v_sub_nc_u32_e32 v3, v7, v3 +; GFX11-NEXT: v_mov_b32_e32 v2, v6 +; GFX11-NEXT: s_setpc_b64 s[30:31] +; +; GFX12-LABEL: lshr_mad_i64_vec: +; GFX12: ; %bb.0: +; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0 +; GFX12-NEXT: s_wait_expcnt 0x0 +; GFX12-NEXT: s_wait_samplecnt 0x0 +; GFX12-NEXT: s_wait_bvhcnt 0x0 +; GFX12-NEXT: s_wait_kmcnt 0x0 +; GFX12-NEXT: v_mad_co_u64_u32 v[4:5], null, 0xffff1c18, v1, v[0:1] +; GFX12-NEXT: v_mad_co_u64_u32 v[6:7], null, 0xffff1118, v3, v[2:3] +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) +; GFX12-NEXT: v_sub_nc_u32_e32 v1, v5, v1 +; GFX12-NEXT: v_mov_b32_e32 v0, v4 +; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) +; GFX12-NEXT: v_sub_nc_u32_e32 v3, v7, v3 +; GFX12-NEXT: v_mov_b32_e32 v2, v6 +; GFX12-NEXT: s_setpc_b64 s[30:31] + %lsh = lshr <2 x i64> %arg0, + %mul = mul <2 x i64> %lsh, + %mad = add <2 x i64> %mul, %arg0 + + ret <2 x i64> %mad +} + attributes #0 = { nounwind } attributes #1 = { nounwind readnone speculatable }