44define i32 @fold_srem_positive_odd (i32 %x ) {
55; CHECK-LABEL: fold_srem_positive_odd:
66; CHECK: // %bb.0:
7- ; CHECK-NEXT: mov w8, #37253
7+ ; CHECK-NEXT: mov w8, #37253 // =0x9185
88; CHECK-NEXT: movk w8, #44150, lsl #16
99; CHECK-NEXT: smull x8, w0, w8
1010; CHECK-NEXT: lsr x8, x8, #32
1111; CHECK-NEXT: add w8, w8, w0
1212; CHECK-NEXT: asr w9, w8, #6
1313; CHECK-NEXT: add w8, w9, w8, lsr #31
14- ; CHECK-NEXT: mov w9, #95
14+ ; CHECK-NEXT: mov w9, #95 // =0x5f
1515; CHECK-NEXT: msub w0, w8, w9, w0
1616; CHECK-NEXT: ret
1717 %1 = srem i32 %x , 95
@@ -22,13 +22,12 @@ define i32 @fold_srem_positive_odd(i32 %x) {
2222define i32 @fold_srem_positive_even (i32 %x ) {
2323; CHECK-LABEL: fold_srem_positive_even:
2424; CHECK: // %bb.0:
25- ; CHECK-NEXT: mov w8, #36849
25+ ; CHECK-NEXT: mov w8, #36849 // =0x8ff1
26+ ; CHECK-NEXT: mov w9, #1060 // =0x424
2627; CHECK-NEXT: movk w8, #15827, lsl #16
2728; CHECK-NEXT: smull x8, w0, w8
28- ; CHECK-NEXT: lsr x9, x8, #63
2929; CHECK-NEXT: asr x8, x8, #40
30- ; CHECK-NEXT: add w8, w8, w9
31- ; CHECK-NEXT: mov w9, #1060
30+ ; CHECK-NEXT: add w8, w8, w8, lsr #31
3231; CHECK-NEXT: msub w0, w8, w9, w0
3332; CHECK-NEXT: ret
3433 %1 = srem i32 %x , 1060
@@ -39,13 +38,12 @@ define i32 @fold_srem_positive_even(i32 %x) {
3938define i32 @fold_srem_negative_odd (i32 %x ) {
4039; CHECK-LABEL: fold_srem_negative_odd:
4140; CHECK: // %bb.0:
42- ; CHECK-NEXT: mov w8, #65445
41+ ; CHECK-NEXT: mov w8, #65445 // =0xffa5
42+ ; CHECK-NEXT: mov w9, #-723 // =0xfffffd2d
4343; CHECK-NEXT: movk w8, #42330, lsl #16
4444; CHECK-NEXT: smull x8, w0, w8
45- ; CHECK-NEXT: lsr x9, x8, #63
4645; CHECK-NEXT: asr x8, x8, #40
47- ; CHECK-NEXT: add w8, w8, w9
48- ; CHECK-NEXT: mov w9, #-723
46+ ; CHECK-NEXT: add w8, w8, w8, lsr #31
4947; CHECK-NEXT: msub w0, w8, w9, w0
5048; CHECK-NEXT: ret
5149 %1 = srem i32 %x , -723
@@ -56,13 +54,12 @@ define i32 @fold_srem_negative_odd(i32 %x) {
5654define i32 @fold_srem_negative_even (i32 %x ) {
5755; CHECK-LABEL: fold_srem_negative_even:
5856; CHECK: // %bb.0:
59- ; CHECK-NEXT: mov w8, #62439
57+ ; CHECK-NEXT: mov w8, #62439 // =0xf3e7
58+ ; CHECK-NEXT: mov w9, #-22981 // =0xffffa63b
6059; CHECK-NEXT: movk w8, #64805, lsl #16
6160; CHECK-NEXT: smull x8, w0, w8
62- ; CHECK-NEXT: lsr x9, x8, #63
6361; CHECK-NEXT: asr x8, x8, #40
64- ; CHECK-NEXT: add w8, w8, w9
65- ; CHECK-NEXT: mov w9, #-22981
62+ ; CHECK-NEXT: add w8, w8, w8, lsr #31
6663; CHECK-NEXT: msub w0, w8, w9, w0
6764; CHECK-NEXT: ret
6865 %1 = srem i32 %x , -22981
@@ -74,14 +71,14 @@ define i32 @fold_srem_negative_even(i32 %x) {
7471define i32 @combine_srem_sdiv (i32 %x ) {
7572; CHECK-LABEL: combine_srem_sdiv:
7673; CHECK: // %bb.0:
77- ; CHECK-NEXT: mov w8, #37253
74+ ; CHECK-NEXT: mov w8, #37253 // =0x9185
7875; CHECK-NEXT: movk w8, #44150, lsl #16
7976; CHECK-NEXT: smull x8, w0, w8
8077; CHECK-NEXT: lsr x8, x8, #32
8178; CHECK-NEXT: add w8, w8, w0
8279; CHECK-NEXT: asr w9, w8, #6
8380; CHECK-NEXT: add w8, w9, w8, lsr #31
84- ; CHECK-NEXT: mov w9, #95
81+ ; CHECK-NEXT: mov w9, #95 // =0x5f
8582; CHECK-NEXT: msub w9, w8, w9, w0
8683; CHECK-NEXT: add w0, w9, w8
8784; CHECK-NEXT: ret
@@ -95,14 +92,14 @@ define i32 @combine_srem_sdiv(i32 %x) {
9592define i64 @dont_fold_srem_i64 (i64 %x ) {
9693; CHECK-LABEL: dont_fold_srem_i64:
9794; CHECK: // %bb.0:
98- ; CHECK-NEXT: mov x8, #58849
95+ ; CHECK-NEXT: mov x8, #58849 // =0xe5e1
9996; CHECK-NEXT: movk x8, #48148, lsl #16
10097; CHECK-NEXT: movk x8, #33436, lsl #32
10198; CHECK-NEXT: movk x8, #21399, lsl #48
10299; CHECK-NEXT: smulh x8, x0, x8
103100; CHECK-NEXT: asr x9, x8, #5
104101; CHECK-NEXT: add x8, x9, x8, lsr #63
105- ; CHECK-NEXT: mov w9, #98
102+ ; CHECK-NEXT: mov w9, #98 // =0x62
106103; CHECK-NEXT: msub x0, x8, x9, x0
107104; CHECK-NEXT: ret
108105 %1 = srem i64 %x , 98
0 commit comments