Skip to content

Commit cb906e3

Browse files
committed
[AMDGPU] Rework getDivNumBits API
Rework involves 3 things: - return unsigned value. - change from AtLeast(SignBits) to MaxDivBits hint. - use MaxDivBits hint for unsigned case.
1 parent 463e93b commit cb906e3

File tree

3 files changed

+88
-214
lines changed

3 files changed

+88
-214
lines changed

llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp

Lines changed: 30 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -254,9 +254,8 @@ class AMDGPUCodeGenPrepareImpl
254254

255255
bool divHasSpecialOptimization(BinaryOperator &I,
256256
Value *Num, Value *Den) const;
257-
int getDivNumBits(BinaryOperator &I,
258-
Value *Num, Value *Den,
259-
unsigned AtLeast, bool Signed) const;
257+
unsigned getDivNumBits(BinaryOperator &I, Value *Num, Value *Den,
258+
unsigned AtLeast, bool Signed) const;
260259

261260
/// Expands 24 bit div or rem.
262261
Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I,
@@ -1189,27 +1188,32 @@ static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
11891188
return getMul64(Builder, LHS, RHS).second;
11901189
}
11911190

1192-
/// Figure out how many bits are really needed for this division. \p AtLeast is
1193-
/// an optimization hint to bypass the second ComputeNumSignBits call if we the
1194-
/// first one is insufficient. Returns -1 on failure.
1195-
int AMDGPUCodeGenPrepareImpl::getDivNumBits(BinaryOperator &I, Value *Num,
1196-
Value *Den, unsigned AtLeast,
1197-
bool IsSigned) const {
1191+
/// Figure out how many bits are really needed for this division.
1192+
/// \p MaxDivBits is an optimization hint to bypass the second
1193+
/// ComputeNumSignBits/computeKnownBits call if we the first one is
1194+
/// insufficient.
1195+
unsigned AMDGPUCodeGenPrepareImpl::getDivNumBits(BinaryOperator &I, Value *Num,
1196+
Value *Den,
1197+
unsigned MaxDivBits,
1198+
bool IsSigned) const {
11981199
assert(Num->getType()->getScalarSizeInBits() ==
11991200
Den->getType()->getScalarSizeInBits());
12001201
unsigned SSBits = Num->getType()->getScalarSizeInBits();
12011202
if (IsSigned) {
12021203
unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
1203-
if (RHSSignBits < AtLeast)
1204-
return -1;
1204+
// a SignBit needs to be reserved for shrinking
1205+
unsigned DivBits = SSBits - RHSSignBits + 1;
1206+
if (DivBits > MaxDivBits)
1207+
return DivBits;
12051208

12061209
unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
1207-
if (LHSSignBits < AtLeast)
1208-
return -1;
1210+
DivBits = SSBits - LHSSignBits + 1;
1211+
if (DivBits > MaxDivBits)
1212+
return DivBits;
12091213

12101214
unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1211-
unsigned DivBits = SSBits - SignBits + 1;
1212-
return DivBits; // a SignBit needs to be reserved for shrinking
1215+
DivBits = SSBits - SignBits + 1;
1216+
return DivBits;
12131217
}
12141218

12151219
// All bits are used for unsigned division for Num or Den in range
@@ -1218,14 +1222,20 @@ int AMDGPUCodeGenPrepareImpl::getDivNumBits(BinaryOperator &I, Value *Num,
12181222
if (Known.isNegative() || !Known.isNonNegative())
12191223
return SSBits;
12201224
unsigned RHSSignBits = Known.countMinLeadingZeros();
1225+
unsigned DivBits = SSBits - RHSSignBits;
1226+
if (DivBits > MaxDivBits)
1227+
return DivBits;
12211228

12221229
Known = computeKnownBits(Num, DL, 0, AC, &I);
12231230
if (Known.isNegative() || !Known.isNonNegative())
12241231
return SSBits;
12251232
unsigned LHSSignBits = Known.countMinLeadingZeros();
1233+
DivBits = SSBits - LHSSignBits;
1234+
if (DivBits > MaxDivBits)
1235+
return DivBits;
12261236

12271237
unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1228-
unsigned DivBits = SSBits - SignBits;
1238+
DivBits = SSBits - SignBits;
12291239
return DivBits;
12301240
}
12311241

@@ -1235,11 +1245,8 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem24(IRBuilder<> &Builder,
12351245
BinaryOperator &I, Value *Num,
12361246
Value *Den, bool IsDiv,
12371247
bool IsSigned) const {
1238-
unsigned SSBits = Num->getType()->getScalarSizeInBits();
1239-
// If Num bits <= 24, assume 0 signbits.
1240-
unsigned AtLeast = (SSBits <= 24) ? 0 : (SSBits - 24 + IsSigned);
1241-
int DivBits = getDivNumBits(I, Num, Den, AtLeast, IsSigned);
1242-
if (DivBits == -1 || DivBits > 24)
1248+
unsigned DivBits = getDivNumBits(I, Num, Den, 24, IsSigned);
1249+
if (DivBits > 24)
12431250
return nullptr;
12441251
return expandDivRem24Impl(Builder, I, Num, Den, DivBits, IsDiv, IsSigned);
12451252
}
@@ -1523,8 +1530,8 @@ Value *AMDGPUCodeGenPrepareImpl::shrinkDivRem64(IRBuilder<> &Builder,
15231530
bool IsDiv = Opc == Instruction::SDiv || Opc == Instruction::UDiv;
15241531
bool IsSigned = Opc == Instruction::SDiv || Opc == Instruction::SRem;
15251532

1526-
int NumDivBits = getDivNumBits(I, Num, Den, 32, IsSigned);
1527-
if (NumDivBits == -1)
1533+
unsigned NumDivBits = getDivNumBits(I, Num, Den, 32, IsSigned);
1534+
if (NumDivBits > 32)
15281535
return nullptr;
15291536

15301537
Value *Narrowed = nullptr;

llvm/test/CodeGen/AMDGPU/sdiv64.ll

Lines changed: 27 additions & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -1065,100 +1065,37 @@ define amdgpu_kernel void @s_test_sdiv24_48(ptr addrspace(1) %out, i48 %x, i48 %
10651065
; GCN-NEXT: s_endpgm
10661066
;
10671067
; GCN-IR-LABEL: s_test_sdiv24_48:
1068-
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
1069-
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xb
1070-
; GCN-IR-NEXT: s_mov_b32 s15, 0
1071-
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
1072-
; GCN-IR-NEXT: s_sext_i32_i16 s1, s1
1073-
; GCN-IR-NEXT: s_ashr_i64 s[0:1], s[0:1], 24
1074-
; GCN-IR-NEXT: s_sext_i32_i16 s3, s3
1075-
; GCN-IR-NEXT: s_lshl_b64 s[0:1], s[0:1], 16
1076-
; GCN-IR-NEXT: s_ashr_i64 s[2:3], s[2:3], 24
1077-
; GCN-IR-NEXT: s_ashr_i64 s[6:7], s[0:1], 16
1078-
; GCN-IR-NEXT: s_ashr_i32 s0, s1, 31
1079-
; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[2:3], 16
1080-
; GCN-IR-NEXT: s_mov_b32 s1, s0
1081-
; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[2:3], 16
1082-
; GCN-IR-NEXT: s_ashr_i32 s2, s3, 31
1083-
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[6:7], s[0:1]
1084-
; GCN-IR-NEXT: s_mov_b32 s3, s2
1085-
; GCN-IR-NEXT: s_sub_u32 s12, s6, s0
1086-
; GCN-IR-NEXT: s_subb_u32 s13, s7, s0
1087-
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[8:9], s[2:3]
1088-
; GCN-IR-NEXT: s_sub_u32 s6, s6, s2
1089-
; GCN-IR-NEXT: s_subb_u32 s7, s7, s2
1090-
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
1091-
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[12:13], 0
1092-
; GCN-IR-NEXT: s_flbit_i32_b64 s14, s[6:7]
1093-
; GCN-IR-NEXT: s_or_b64 s[10:11], s[8:9], s[10:11]
1094-
; GCN-IR-NEXT: s_flbit_i32_b64 s20, s[12:13]
1095-
; GCN-IR-NEXT: s_sub_u32 s16, s14, s20
1096-
; GCN-IR-NEXT: s_subb_u32 s17, 0, 0
1097-
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[18:19], s[16:17], 63
1098-
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[22:23], s[16:17], 63
1099-
; GCN-IR-NEXT: s_or_b64 s[18:19], s[10:11], s[18:19]
1100-
; GCN-IR-NEXT: s_and_b64 s[10:11], s[18:19], exec
1101-
; GCN-IR-NEXT: s_cselect_b32 s11, 0, s13
1102-
; GCN-IR-NEXT: s_cselect_b32 s10, 0, s12
1103-
; GCN-IR-NEXT: s_or_b64 s[18:19], s[18:19], s[22:23]
1104-
; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
1105-
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[18:19]
1106-
; GCN-IR-NEXT: s_cbranch_vccz .LBB9_5
1107-
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
1108-
; GCN-IR-NEXT: s_add_u32 s18, s16, 1
1109-
; GCN-IR-NEXT: s_addc_u32 s19, s17, 0
1110-
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[18:19], 0
1111-
; GCN-IR-NEXT: s_sub_i32 s16, 63, s16
1112-
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
1113-
; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[12:13], s16
1114-
; GCN-IR-NEXT: s_cbranch_vccz .LBB9_4
1115-
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
1116-
; GCN-IR-NEXT: s_lshr_b64 s[16:17], s[12:13], s18
1117-
; GCN-IR-NEXT: s_add_u32 s18, s6, -1
1118-
; GCN-IR-NEXT: s_addc_u32 s19, s7, -1
1119-
; GCN-IR-NEXT: s_not_b64 s[8:9], s[14:15]
1120-
; GCN-IR-NEXT: s_add_u32 s12, s8, s20
1121-
; GCN-IR-NEXT: s_addc_u32 s13, s9, 0
1122-
; GCN-IR-NEXT: s_mov_b64 s[14:15], 0
1123-
; GCN-IR-NEXT: s_mov_b32 s9, 0
1124-
; GCN-IR-NEXT: .LBB9_3: ; %udiv-do-while
1125-
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
1126-
; GCN-IR-NEXT: s_lshl_b64 s[16:17], s[16:17], 1
1127-
; GCN-IR-NEXT: s_lshr_b32 s8, s11, 31
1128-
; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1
1129-
; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[8:9]
1130-
; GCN-IR-NEXT: s_or_b64 s[10:11], s[14:15], s[10:11]
1131-
; GCN-IR-NEXT: s_sub_u32 s8, s18, s16
1132-
; GCN-IR-NEXT: s_subb_u32 s8, s19, s17
1133-
; GCN-IR-NEXT: s_ashr_i32 s14, s8, 31
1134-
; GCN-IR-NEXT: s_mov_b32 s15, s14
1135-
; GCN-IR-NEXT: s_and_b32 s8, s14, 1
1136-
; GCN-IR-NEXT: s_and_b64 s[14:15], s[14:15], s[6:7]
1137-
; GCN-IR-NEXT: s_sub_u32 s16, s16, s14
1138-
; GCN-IR-NEXT: s_subb_u32 s17, s17, s15
1139-
; GCN-IR-NEXT: s_add_u32 s12, s12, 1
1140-
; GCN-IR-NEXT: s_addc_u32 s13, s13, 0
1141-
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
1142-
; GCN-IR-NEXT: s_mov_b64 s[14:15], s[8:9]
1143-
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[20:21]
1144-
; GCN-IR-NEXT: s_cbranch_vccz .LBB9_3
1145-
; GCN-IR-NEXT: .LBB9_4: ; %Flow4
1146-
; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[10:11], 1
1147-
; GCN-IR-NEXT: s_or_b64 s[10:11], s[8:9], s[6:7]
1148-
; GCN-IR-NEXT: .LBB9_5: ; %udiv-end
1149-
; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9
1150-
; GCN-IR-NEXT: s_xor_b64 s[0:1], s[2:3], s[0:1]
1151-
; GCN-IR-NEXT: s_xor_b64 s[2:3], s[10:11], s[0:1]
1152-
; GCN-IR-NEXT: s_sub_u32 s0, s2, s0
1153-
; GCN-IR-NEXT: s_subb_u32 s1, s3, s1
1068+
; GCN-IR: ; %bb.0:
1069+
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
1070+
; GCN-IR-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
11541071
; GCN-IR-NEXT: s_mov_b32 s7, 0xf000
11551072
; GCN-IR-NEXT: s_mov_b32 s6, -1
1156-
; GCN-IR-NEXT: v_mov_b32_e32 v0, s1
11571073
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
1158-
; GCN-IR-NEXT: buffer_store_short v0, off, s[4:7], 0 offset:4
1159-
; GCN-IR-NEXT: s_waitcnt expcnt(0)
1160-
; GCN-IR-NEXT: v_mov_b32_e32 v0, s0
1074+
; GCN-IR-NEXT: s_mov_b32 s5, s1
1075+
; GCN-IR-NEXT: s_sext_i32_i16 s1, s9
1076+
; GCN-IR-NEXT: v_mov_b32_e32 v0, s8
1077+
; GCN-IR-NEXT: v_alignbit_b32 v0, s1, v0, 24
1078+
; GCN-IR-NEXT: v_cvt_f32_i32_e32 v1, v0
1079+
; GCN-IR-NEXT: s_mov_b32 s4, s0
1080+
; GCN-IR-NEXT: s_sext_i32_i16 s0, s3
1081+
; GCN-IR-NEXT: v_mov_b32_e32 v2, s2
1082+
; GCN-IR-NEXT: v_alignbit_b32 v2, s0, v2, 24
1083+
; GCN-IR-NEXT: v_cvt_f32_i32_e32 v3, v2
1084+
; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v4, v1
1085+
; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v0
1086+
; GCN-IR-NEXT: v_ashrrev_i32_e32 v0, 30, v0
1087+
; GCN-IR-NEXT: v_or_b32_e32 v0, 1, v0
1088+
; GCN-IR-NEXT: v_mul_f32_e32 v2, v3, v4
1089+
; GCN-IR-NEXT: v_trunc_f32_e32 v2, v2
1090+
; GCN-IR-NEXT: v_mad_f32 v3, -v2, v1, v3
1091+
; GCN-IR-NEXT: v_cvt_i32_f32_e32 v2, v2
1092+
; GCN-IR-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v1|
1093+
; GCN-IR-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
1094+
; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, v0, v2
1095+
; GCN-IR-NEXT: v_bfe_i32 v0, v0, 0, 24
1096+
; GCN-IR-NEXT: v_ashrrev_i32_e32 v1, 31, v0
11611097
; GCN-IR-NEXT: buffer_store_dword v0, off, s[4:7], 0
1098+
; GCN-IR-NEXT: buffer_store_short v1, off, s[4:7], 0 offset:4
11621099
; GCN-IR-NEXT: s_endpgm
11631100
%1 = ashr i48 %x, 24
11641101
%2 = ashr i48 %y, 24

llvm/test/CodeGen/AMDGPU/srem64.ll

Lines changed: 31 additions & 101 deletions
Original file line numberDiff line numberDiff line change
@@ -1188,109 +1188,39 @@ define amdgpu_kernel void @s_test_srem24_48(ptr addrspace(1) %out, i48 %x, i48 %
11881188
; GCN-NEXT: s_endpgm
11891189
;
11901190
; GCN-IR-LABEL: s_test_srem24_48:
1191-
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
1192-
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xb
1193-
; GCN-IR-NEXT: s_mov_b32 s13, 0
1191+
; GCN-IR: ; %bb.0:
1192+
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
1193+
; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
1194+
; GCN-IR-NEXT: s_mov_b32 s7, 0xf000
1195+
; GCN-IR-NEXT: s_mov_b32 s6, -1
11941196
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
1195-
; GCN-IR-NEXT: s_sext_i32_i16 s1, s1
11961197
; GCN-IR-NEXT: s_sext_i32_i16 s3, s3
1197-
; GCN-IR-NEXT: s_ashr_i64 s[0:1], s[0:1], 24
1198-
; GCN-IR-NEXT: s_ashr_i64 s[2:3], s[2:3], 24
1199-
; GCN-IR-NEXT: s_lshl_b64 s[0:1], s[0:1], 16
1200-
; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[2:3], 16
1201-
; GCN-IR-NEXT: s_ashr_i64 s[2:3], s[0:1], 16
1202-
; GCN-IR-NEXT: s_ashr_i32 s0, s1, 31
1203-
; GCN-IR-NEXT: s_mov_b32 s1, s0
1204-
; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[6:7], 16
1205-
; GCN-IR-NEXT: s_xor_b64 s[2:3], s[2:3], s[0:1]
1206-
; GCN-IR-NEXT: s_sub_u32 s2, s2, s0
1207-
; GCN-IR-NEXT: s_subb_u32 s3, s3, s0
1208-
; GCN-IR-NEXT: s_ashr_i32 s10, s7, 31
1209-
; GCN-IR-NEXT: s_mov_b32 s11, s10
1210-
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[8:9], s[10:11]
1211-
; GCN-IR-NEXT: s_sub_u32 s6, s6, s10
1212-
; GCN-IR-NEXT: s_subb_u32 s7, s7, s10
1213-
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
1214-
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[2:3], 0
1215-
; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[6:7]
1216-
; GCN-IR-NEXT: s_or_b64 s[10:11], s[8:9], s[10:11]
1217-
; GCN-IR-NEXT: s_flbit_i32_b64 s20, s[2:3]
1218-
; GCN-IR-NEXT: s_sub_u32 s14, s12, s20
1219-
; GCN-IR-NEXT: s_subb_u32 s15, 0, 0
1220-
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[16:17], s[14:15], 63
1221-
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[14:15], 63
1222-
; GCN-IR-NEXT: s_or_b64 s[16:17], s[10:11], s[16:17]
1223-
; GCN-IR-NEXT: s_and_b64 s[10:11], s[16:17], exec
1224-
; GCN-IR-NEXT: s_cselect_b32 s11, 0, s3
1225-
; GCN-IR-NEXT: s_cselect_b32 s10, 0, s2
1226-
; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[18:19]
1227-
; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
1228-
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[16:17]
1229-
; GCN-IR-NEXT: s_cbranch_vccz .LBB9_5
1230-
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
1231-
; GCN-IR-NEXT: s_add_u32 s16, s14, 1
1232-
; GCN-IR-NEXT: s_addc_u32 s17, s15, 0
1233-
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[16:17], 0
1234-
; GCN-IR-NEXT: s_sub_i32 s14, 63, s14
1235-
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
1236-
; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[2:3], s14
1237-
; GCN-IR-NEXT: s_cbranch_vccz .LBB9_4
1238-
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
1239-
; GCN-IR-NEXT: s_lshr_b64 s[14:15], s[2:3], s16
1240-
; GCN-IR-NEXT: s_add_u32 s18, s6, -1
1241-
; GCN-IR-NEXT: s_addc_u32 s19, s7, -1
1242-
; GCN-IR-NEXT: s_not_b64 s[8:9], s[12:13]
1243-
; GCN-IR-NEXT: s_add_u32 s12, s8, s20
1244-
; GCN-IR-NEXT: s_addc_u32 s13, s9, 0
1245-
; GCN-IR-NEXT: s_mov_b64 s[16:17], 0
1246-
; GCN-IR-NEXT: s_mov_b32 s9, 0
1247-
; GCN-IR-NEXT: .LBB9_3: ; %udiv-do-while
1248-
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
1249-
; GCN-IR-NEXT: s_lshl_b64 s[14:15], s[14:15], 1
1250-
; GCN-IR-NEXT: s_lshr_b32 s8, s11, 31
1251-
; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1
1252-
; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[8:9]
1253-
; GCN-IR-NEXT: s_or_b64 s[10:11], s[16:17], s[10:11]
1254-
; GCN-IR-NEXT: s_sub_u32 s8, s18, s14
1255-
; GCN-IR-NEXT: s_subb_u32 s8, s19, s15
1256-
; GCN-IR-NEXT: s_ashr_i32 s16, s8, 31
1257-
; GCN-IR-NEXT: s_mov_b32 s17, s16
1258-
; GCN-IR-NEXT: s_and_b32 s8, s16, 1
1259-
; GCN-IR-NEXT: s_and_b64 s[16:17], s[16:17], s[6:7]
1260-
; GCN-IR-NEXT: s_sub_u32 s14, s14, s16
1261-
; GCN-IR-NEXT: s_subb_u32 s15, s15, s17
1262-
; GCN-IR-NEXT: s_add_u32 s12, s12, 1
1263-
; GCN-IR-NEXT: s_addc_u32 s13, s13, 0
1264-
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
1265-
; GCN-IR-NEXT: s_mov_b64 s[16:17], s[8:9]
1266-
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[20:21]
1267-
; GCN-IR-NEXT: s_cbranch_vccz .LBB9_3
1268-
; GCN-IR-NEXT: .LBB9_4: ; %Flow4
1269-
; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1
1270-
; GCN-IR-NEXT: s_or_b64 s[10:11], s[8:9], s[10:11]
1271-
; GCN-IR-NEXT: .LBB9_5: ; %udiv-end
1272-
; GCN-IR-NEXT: v_mov_b32_e32 v0, s10
1273-
; GCN-IR-NEXT: v_mul_hi_u32 v0, s6, v0
1274-
; GCN-IR-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x9
1275-
; GCN-IR-NEXT: s_mul_i32 s4, s6, s11
1276-
; GCN-IR-NEXT: v_mov_b32_e32 v2, s3
1277-
; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, s4, v0
1278-
; GCN-IR-NEXT: s_mul_i32 s4, s7, s10
1279-
; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, s4, v0
1280-
; GCN-IR-NEXT: s_mul_i32 s4, s6, s10
1281-
; GCN-IR-NEXT: v_mov_b32_e32 v1, s4
1282-
; GCN-IR-NEXT: v_sub_i32_e32 v1, vcc, s2, v1
1283-
; GCN-IR-NEXT: v_subb_u32_e32 v0, vcc, v2, v0, vcc
1284-
; GCN-IR-NEXT: v_xor_b32_e32 v1, s0, v1
1285-
; GCN-IR-NEXT: v_xor_b32_e32 v0, s1, v0
1286-
; GCN-IR-NEXT: v_mov_b32_e32 v2, s1
1287-
; GCN-IR-NEXT: v_subrev_i32_e32 v1, vcc, s0, v1
1288-
; GCN-IR-NEXT: s_mov_b32 s15, 0xf000
1289-
; GCN-IR-NEXT: s_mov_b32 s14, -1
1290-
; GCN-IR-NEXT: v_subb_u32_e32 v0, vcc, v0, v2, vcc
1291-
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
1292-
; GCN-IR-NEXT: buffer_store_short v0, off, s[12:15], 0 offset:4
1293-
; GCN-IR-NEXT: buffer_store_dword v1, off, s[12:15], 0
1198+
; GCN-IR-NEXT: s_sext_i32_i16 s5, s5
1199+
; GCN-IR-NEXT: v_mov_b32_e32 v0, s4
1200+
; GCN-IR-NEXT: v_alignbit_b32 v0, s5, v0, 24
1201+
; GCN-IR-NEXT: v_cvt_f32_i32_e32 v1, v0
1202+
; GCN-IR-NEXT: v_mov_b32_e32 v2, s2
1203+
; GCN-IR-NEXT: v_alignbit_b32 v2, s3, v2, 24
1204+
; GCN-IR-NEXT: v_cvt_f32_i32_e32 v3, v2
1205+
; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v4, v1
1206+
; GCN-IR-NEXT: v_xor_b32_e32 v5, v2, v0
1207+
; GCN-IR-NEXT: v_ashrrev_i32_e32 v5, 30, v5
1208+
; GCN-IR-NEXT: v_or_b32_e32 v5, 1, v5
1209+
; GCN-IR-NEXT: v_mul_f32_e32 v4, v3, v4
1210+
; GCN-IR-NEXT: v_trunc_f32_e32 v4, v4
1211+
; GCN-IR-NEXT: v_mad_f32 v3, -v4, v1, v3
1212+
; GCN-IR-NEXT: v_cvt_i32_f32_e32 v4, v4
1213+
; GCN-IR-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v1|
1214+
; GCN-IR-NEXT: v_cndmask_b32_e32 v1, 0, v5, vcc
1215+
; GCN-IR-NEXT: s_mov_b32 s4, s0
1216+
; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v4
1217+
; GCN-IR-NEXT: v_mul_lo_u32 v0, v1, v0
1218+
; GCN-IR-NEXT: s_mov_b32 s5, s1
1219+
; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, v0, v2
1220+
; GCN-IR-NEXT: v_bfe_i32 v0, v0, 0, 24
1221+
; GCN-IR-NEXT: v_ashrrev_i32_e32 v1, 31, v0
1222+
; GCN-IR-NEXT: buffer_store_dword v0, off, s[4:7], 0
1223+
; GCN-IR-NEXT: buffer_store_short v1, off, s[4:7], 0 offset:4
12941224
; GCN-IR-NEXT: s_endpgm
12951225
%1 = ashr i48 %x, 24
12961226
%2 = ashr i48 %y, 24

0 commit comments

Comments
 (0)