4
4
define i32 @fold_srem_positive_odd (i32 %x ) {
5
5
; CHECK-LABEL: fold_srem_positive_odd:
6
6
; CHECK: // %bb.0:
7
- ; CHECK-NEXT: mov w8, #37253
7
+ ; CHECK-NEXT: mov w8, #37253 // =0x9185
8
8
; CHECK-NEXT: movk w8, #44150, lsl #16
9
9
; CHECK-NEXT: smull x8, w0, w8
10
10
; CHECK-NEXT: lsr x8, x8, #32
11
11
; CHECK-NEXT: add w8, w8, w0
12
12
; CHECK-NEXT: asr w9, w8, #6
13
13
; CHECK-NEXT: add w8, w9, w8, lsr #31
14
- ; CHECK-NEXT: mov w9, #95
14
+ ; CHECK-NEXT: mov w9, #95 // =0x5f
15
15
; CHECK-NEXT: msub w0, w8, w9, w0
16
16
; CHECK-NEXT: ret
17
17
%1 = srem i32 %x , 95
@@ -22,13 +22,12 @@ define i32 @fold_srem_positive_odd(i32 %x) {
22
22
define i32 @fold_srem_positive_even (i32 %x ) {
23
23
; CHECK-LABEL: fold_srem_positive_even:
24
24
; CHECK: // %bb.0:
25
- ; CHECK-NEXT: mov w8, #36849
25
+ ; CHECK-NEXT: mov w8, #36849 // =0x8ff1
26
+ ; CHECK-NEXT: mov w9, #1060 // =0x424
26
27
; CHECK-NEXT: movk w8, #15827, lsl #16
27
28
; CHECK-NEXT: smull x8, w0, w8
28
- ; CHECK-NEXT: lsr x9, x8, #63
29
29
; CHECK-NEXT: asr x8, x8, #40
30
- ; CHECK-NEXT: add w8, w8, w9
31
- ; CHECK-NEXT: mov w9, #1060
30
+ ; CHECK-NEXT: add w8, w8, w8, lsr #31
32
31
; CHECK-NEXT: msub w0, w8, w9, w0
33
32
; CHECK-NEXT: ret
34
33
%1 = srem i32 %x , 1060
@@ -39,13 +38,12 @@ define i32 @fold_srem_positive_even(i32 %x) {
39
38
define i32 @fold_srem_negative_odd (i32 %x ) {
40
39
; CHECK-LABEL: fold_srem_negative_odd:
41
40
; CHECK: // %bb.0:
42
- ; CHECK-NEXT: mov w8, #65445
41
+ ; CHECK-NEXT: mov w8, #65445 // =0xffa5
42
+ ; CHECK-NEXT: mov w9, #-723 // =0xfffffd2d
43
43
; CHECK-NEXT: movk w8, #42330, lsl #16
44
44
; CHECK-NEXT: smull x8, w0, w8
45
- ; CHECK-NEXT: lsr x9, x8, #63
46
45
; CHECK-NEXT: asr x8, x8, #40
47
- ; CHECK-NEXT: add w8, w8, w9
48
- ; CHECK-NEXT: mov w9, #-723
46
+ ; CHECK-NEXT: add w8, w8, w8, lsr #31
49
47
; CHECK-NEXT: msub w0, w8, w9, w0
50
48
; CHECK-NEXT: ret
51
49
%1 = srem i32 %x , -723
@@ -56,13 +54,12 @@ define i32 @fold_srem_negative_odd(i32 %x) {
56
54
define i32 @fold_srem_negative_even (i32 %x ) {
57
55
; CHECK-LABEL: fold_srem_negative_even:
58
56
; CHECK: // %bb.0:
59
- ; CHECK-NEXT: mov w8, #62439
57
+ ; CHECK-NEXT: mov w8, #62439 // =0xf3e7
58
+ ; CHECK-NEXT: mov w9, #-22981 // =0xffffa63b
60
59
; CHECK-NEXT: movk w8, #64805, lsl #16
61
60
; CHECK-NEXT: smull x8, w0, w8
62
- ; CHECK-NEXT: lsr x9, x8, #63
63
61
; CHECK-NEXT: asr x8, x8, #40
64
- ; CHECK-NEXT: add w8, w8, w9
65
- ; CHECK-NEXT: mov w9, #-22981
62
+ ; CHECK-NEXT: add w8, w8, w8, lsr #31
66
63
; CHECK-NEXT: msub w0, w8, w9, w0
67
64
; CHECK-NEXT: ret
68
65
%1 = srem i32 %x , -22981
@@ -74,14 +71,14 @@ define i32 @fold_srem_negative_even(i32 %x) {
74
71
define i32 @combine_srem_sdiv (i32 %x ) {
75
72
; CHECK-LABEL: combine_srem_sdiv:
76
73
; CHECK: // %bb.0:
77
- ; CHECK-NEXT: mov w8, #37253
74
+ ; CHECK-NEXT: mov w8, #37253 // =0x9185
78
75
; CHECK-NEXT: movk w8, #44150, lsl #16
79
76
; CHECK-NEXT: smull x8, w0, w8
80
77
; CHECK-NEXT: lsr x8, x8, #32
81
78
; CHECK-NEXT: add w8, w8, w0
82
79
; CHECK-NEXT: asr w9, w8, #6
83
80
; CHECK-NEXT: add w8, w9, w8, lsr #31
84
- ; CHECK-NEXT: mov w9, #95
81
+ ; CHECK-NEXT: mov w9, #95 // =0x5f
85
82
; CHECK-NEXT: msub w9, w8, w9, w0
86
83
; CHECK-NEXT: add w0, w9, w8
87
84
; CHECK-NEXT: ret
@@ -95,14 +92,14 @@ define i32 @combine_srem_sdiv(i32 %x) {
95
92
define i64 @dont_fold_srem_i64 (i64 %x ) {
96
93
; CHECK-LABEL: dont_fold_srem_i64:
97
94
; CHECK: // %bb.0:
98
- ; CHECK-NEXT: mov x8, #58849
95
+ ; CHECK-NEXT: mov x8, #58849 // =0xe5e1
99
96
; CHECK-NEXT: movk x8, #48148, lsl #16
100
97
; CHECK-NEXT: movk x8, #33436, lsl #32
101
98
; CHECK-NEXT: movk x8, #21399, lsl #48
102
99
; CHECK-NEXT: smulh x8, x0, x8
103
100
; CHECK-NEXT: asr x9, x8, #5
104
101
; CHECK-NEXT: add x8, x9, x8, lsr #63
105
- ; CHECK-NEXT: mov w9, #98
102
+ ; CHECK-NEXT: mov w9, #98 // =0x62
106
103
; CHECK-NEXT: msub x0, x8, x9, x0
107
104
; CHECK-NEXT: ret
108
105
%1 = srem i64 %x , 98
0 commit comments