44define i32 @fold_srem_positive_odd (i32 %x ) {
55; CHECK-LABEL: fold_srem_positive_odd:
66; CHECK: // %bb.0:
7- ; CHECK-NEXT: mov w8, #37253
7+ ; CHECK-NEXT: mov w8, #37253 // =0x9185
88; CHECK-NEXT: movk w8, #44150, lsl #16
99; CHECK-NEXT: smull x8, w0, w8
1010; CHECK-NEXT: lsr x8, x8, #32
1111; CHECK-NEXT: add w8, w8, w0
1212; CHECK-NEXT: asr w9, w8, #6
1313; CHECK-NEXT: add w8, w9, w8, lsr #31
14- ; CHECK-NEXT: mov w9, #95
14+ ; CHECK-NEXT: mov w9, #95 // =0x5f
1515; CHECK-NEXT: msub w0, w8, w9, w0
1616; CHECK-NEXT: ret
1717 %1 = srem i32 %x , 95
@@ -22,13 +22,13 @@ define i32 @fold_srem_positive_odd(i32 %x) {
2222define i32 @fold_srem_positive_even (i32 %x ) {
2323; CHECK-LABEL: fold_srem_positive_even:
2424; CHECK: // %bb.0:
25- ; CHECK-NEXT: mov w8, #36849
25+ ; CHECK-NEXT: mov w8, #36849 // =0x8ff1
2626; CHECK-NEXT: movk w8, #15827, lsl #16
2727; CHECK-NEXT: smull x8, w0, w8
2828; CHECK-NEXT: lsr x9, x8, #63
2929; CHECK-NEXT: asr x8, x8, #40
3030; CHECK-NEXT: add w8, w8, w9
31- ; CHECK-NEXT: mov w9, #1060
31+ ; CHECK-NEXT: mov w9, #1060 // =0x424
3232; CHECK-NEXT: msub w0, w8, w9, w0
3333; CHECK-NEXT: ret
3434 %1 = srem i32 %x , 1060
@@ -39,13 +39,13 @@ define i32 @fold_srem_positive_even(i32 %x) {
3939define i32 @fold_srem_negative_odd (i32 %x ) {
4040; CHECK-LABEL: fold_srem_negative_odd:
4141; CHECK: // %bb.0:
42- ; CHECK-NEXT: mov w8, #65445
42+ ; CHECK-NEXT: mov w8, #65445 // =0xffa5
4343; CHECK-NEXT: movk w8, #42330, lsl #16
4444; CHECK-NEXT: smull x8, w0, w8
4545; CHECK-NEXT: lsr x9, x8, #63
4646; CHECK-NEXT: asr x8, x8, #40
4747; CHECK-NEXT: add w8, w8, w9
48- ; CHECK-NEXT: mov w9, #-723
48+ ; CHECK-NEXT: mov w9, #-723 // =0xfffffd2d
4949; CHECK-NEXT: msub w0, w8, w9, w0
5050; CHECK-NEXT: ret
5151 %1 = srem i32 %x , -723
@@ -56,13 +56,13 @@ define i32 @fold_srem_negative_odd(i32 %x) {
5656define i32 @fold_srem_negative_even (i32 %x ) {
5757; CHECK-LABEL: fold_srem_negative_even:
5858; CHECK: // %bb.0:
59- ; CHECK-NEXT: mov w8, #62439
59+ ; CHECK-NEXT: mov w8, #62439 // =0xf3e7
6060; CHECK-NEXT: movk w8, #64805, lsl #16
6161; CHECK-NEXT: smull x8, w0, w8
6262; CHECK-NEXT: lsr x9, x8, #63
6363; CHECK-NEXT: asr x8, x8, #40
6464; CHECK-NEXT: add w8, w8, w9
65- ; CHECK-NEXT: mov w9, #-22981
65+ ; CHECK-NEXT: mov w9, #-22981 // =0xffffa63b
6666; CHECK-NEXT: msub w0, w8, w9, w0
6767; CHECK-NEXT: ret
6868 %1 = srem i32 %x , -22981
@@ -74,14 +74,14 @@ define i32 @fold_srem_negative_even(i32 %x) {
7474define i32 @combine_srem_sdiv (i32 %x ) {
7575; CHECK-LABEL: combine_srem_sdiv:
7676; CHECK: // %bb.0:
77- ; CHECK-NEXT: mov w8, #37253
77+ ; CHECK-NEXT: mov w8, #37253 // =0x9185
7878; CHECK-NEXT: movk w8, #44150, lsl #16
7979; CHECK-NEXT: smull x8, w0, w8
8080; CHECK-NEXT: lsr x8, x8, #32
8181; CHECK-NEXT: add w8, w8, w0
8282; CHECK-NEXT: asr w9, w8, #6
8383; CHECK-NEXT: add w8, w9, w8, lsr #31
84- ; CHECK-NEXT: mov w9, #95
84+ ; CHECK-NEXT: mov w9, #95 // =0x5f
8585; CHECK-NEXT: msub w9, w8, w9, w0
8686; CHECK-NEXT: add w0, w9, w8
8787; CHECK-NEXT: ret
@@ -95,14 +95,14 @@ define i32 @combine_srem_sdiv(i32 %x) {
9595define i64 @dont_fold_srem_i64 (i64 %x ) {
9696; CHECK-LABEL: dont_fold_srem_i64:
9797; CHECK: // %bb.0:
98- ; CHECK-NEXT: mov x8, #58849
98+ ; CHECK-NEXT: mov x8, #58849 // =0xe5e1
9999; CHECK-NEXT: movk x8, #48148, lsl #16
100100; CHECK-NEXT: movk x8, #33436, lsl #32
101101; CHECK-NEXT: movk x8, #21399, lsl #48
102102; CHECK-NEXT: smulh x8, x0, x8
103103; CHECK-NEXT: asr x9, x8, #5
104104; CHECK-NEXT: add x8, x9, x8, lsr #63
105- ; CHECK-NEXT: mov w9, #98
105+ ; CHECK-NEXT: mov w9, #98 // =0x62
106106; CHECK-NEXT: msub x0, x8, x9, x0
107107; CHECK-NEXT: ret
108108 %1 = srem i64 %x , 98
0 commit comments