6
6
// X64-LABEL: define dso_local double @test_double_post_inc(
7
7
// X64-SAME: ) #[[ATTR0:[0-9]+]] {
8
8
// X64-NEXT: entry:
9
- // X64-NEXT: [[RETVAL:%.*]] = alloca double, align 8
10
- // X64-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr @test_double_post_inc.n, float 1.000000e+00 seq_cst, align 8
11
- // X64-NEXT: store float [[TMP0]], ptr [[RETVAL]], align 8
12
- // X64-NEXT: [[TMP1:%.*]] = load double, ptr [[RETVAL]], align 8
13
- // X64-NEXT: ret double [[TMP1]]
9
+ // X64-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr @test_double_post_inc.n, double 1.000000e+00 seq_cst, align 8
10
+ // X64-NEXT: ret double [[TMP0]]
14
11
//
15
12
// X86-LABEL: define dso_local double @test_double_post_inc(
16
13
// X86-SAME: ) #[[ATTR0:[0-9]+]] {
17
14
// X86-NEXT: entry:
18
- // X86-NEXT: [[RETVAL:%.*]] = alloca double, align 4
19
- // X86-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr @test_double_post_inc.n, float 1.000000e+00 seq_cst, align 8
20
- // X86-NEXT: store float [[TMP0]], ptr [[RETVAL]], align 4
21
- // X86-NEXT: [[TMP1:%.*]] = load double, ptr [[RETVAL]], align 4
22
- // X86-NEXT: ret double [[TMP1]]
15
+ // X86-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr @test_double_post_inc.n, double 1.000000e+00 seq_cst, align 8
16
+ // X86-NEXT: ret double [[TMP0]]
23
17
//
24
18
double test_double_post_inc ()
25
19
{
@@ -30,20 +24,14 @@ double test_double_post_inc()
30
24
// X64-LABEL: define dso_local double @test_double_post_dc(
31
25
// X64-SAME: ) #[[ATTR0]] {
32
26
// X64-NEXT: entry:
33
- // X64-NEXT: [[RETVAL:%.*]] = alloca double, align 8
34
- // X64-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr @test_double_post_dc.n, float 1.000000e+00 seq_cst, align 8
35
- // X64-NEXT: store float [[TMP0]], ptr [[RETVAL]], align 8
36
- // X64-NEXT: [[TMP1:%.*]] = load double, ptr [[RETVAL]], align 8
37
- // X64-NEXT: ret double [[TMP1]]
27
+ // X64-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr @test_double_post_dc.n, double 1.000000e+00 seq_cst, align 8
28
+ // X64-NEXT: ret double [[TMP0]]
38
29
//
39
30
// X86-LABEL: define dso_local double @test_double_post_dc(
40
31
// X86-SAME: ) #[[ATTR0]] {
41
32
// X86-NEXT: entry:
42
- // X86-NEXT: [[RETVAL:%.*]] = alloca double, align 4
43
- // X86-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr @test_double_post_dc.n, float 1.000000e+00 seq_cst, align 8
44
- // X86-NEXT: store float [[TMP0]], ptr [[RETVAL]], align 4
45
- // X86-NEXT: [[TMP1:%.*]] = load double, ptr [[RETVAL]], align 4
46
- // X86-NEXT: ret double [[TMP1]]
33
+ // X86-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr @test_double_post_dc.n, double 1.000000e+00 seq_cst, align 8
34
+ // X86-NEXT: ret double [[TMP0]]
47
35
//
48
36
double test_double_post_dc ()
49
37
{
@@ -54,22 +42,16 @@ double test_double_post_dc()
54
42
// X64-LABEL: define dso_local double @test_double_pre_dc(
55
43
// X64-SAME: ) #[[ATTR0]] {
56
44
// X64-NEXT: entry:
57
- // X64-NEXT: [[RETVAL:%.*]] = alloca double, align 8
58
- // X64-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr @test_double_pre_dc.n, float 1.000000e+00 seq_cst, align 8
59
- // X64-NEXT: [[TMP1:%.*]] = fsub float [[TMP0]], 1.000000e+00
60
- // X64-NEXT: store float [[TMP1]], ptr [[RETVAL]], align 8
61
- // X64-NEXT: [[TMP2:%.*]] = load double, ptr [[RETVAL]], align 8
62
- // X64-NEXT: ret double [[TMP2]]
45
+ // X64-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr @test_double_pre_dc.n, double 1.000000e+00 seq_cst, align 8
46
+ // X64-NEXT: [[TMP1:%.*]] = fsub double [[TMP0]], 1.000000e+00
47
+ // X64-NEXT: ret double [[TMP1]]
63
48
//
64
49
// X86-LABEL: define dso_local double @test_double_pre_dc(
65
50
// X86-SAME: ) #[[ATTR0]] {
66
51
// X86-NEXT: entry:
67
- // X86-NEXT: [[RETVAL:%.*]] = alloca double, align 4
68
- // X86-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr @test_double_pre_dc.n, float 1.000000e+00 seq_cst, align 8
69
- // X86-NEXT: [[TMP1:%.*]] = fsub float [[TMP0]], 1.000000e+00
70
- // X86-NEXT: store float [[TMP1]], ptr [[RETVAL]], align 4
71
- // X86-NEXT: [[TMP2:%.*]] = load double, ptr [[RETVAL]], align 4
72
- // X86-NEXT: ret double [[TMP2]]
52
+ // X86-NEXT: [[TMP0:%.*]] = atomicrmw fsub ptr @test_double_pre_dc.n, double 1.000000e+00 seq_cst, align 8
53
+ // X86-NEXT: [[TMP1:%.*]] = fsub double [[TMP0]], 1.000000e+00
54
+ // X86-NEXT: ret double [[TMP1]]
73
55
//
74
56
double test_double_pre_dc ()
75
57
{
@@ -80,25 +62,43 @@ double test_double_pre_dc()
80
62
// X64-LABEL: define dso_local double @test_double_pre_inc(
81
63
// X64-SAME: ) #[[ATTR0]] {
82
64
// X64-NEXT: entry:
83
- // X64-NEXT: [[RETVAL:%.*]] = alloca double, align 8
84
- // X64-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr @test_double_pre_inc.n, float 1.000000e+00 seq_cst, align 8
85
- // X64-NEXT: [[TMP1:%.*]] = fadd float [[TMP0]], 1.000000e+00
86
- // X64-NEXT: store float [[TMP1]], ptr [[RETVAL]], align 8
87
- // X64-NEXT: [[TMP2:%.*]] = load double, ptr [[RETVAL]], align 8
88
- // X64-NEXT: ret double [[TMP2]]
65
+ // X64-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr @test_double_pre_inc.n, double 1.000000e+00 seq_cst, align 8
66
+ // X64-NEXT: [[TMP1:%.*]] = fadd double [[TMP0]], 1.000000e+00
67
+ // X64-NEXT: ret double [[TMP1]]
89
68
//
90
69
// X86-LABEL: define dso_local double @test_double_pre_inc(
91
70
// X86-SAME: ) #[[ATTR0]] {
92
71
// X86-NEXT: entry:
93
- // X86-NEXT: [[RETVAL:%.*]] = alloca double, align 4
94
- // X86-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr @test_double_pre_inc.n, float 1.000000e+00 seq_cst, align 8
95
- // X86-NEXT: [[TMP1:%.*]] = fadd float [[TMP0]], 1.000000e+00
96
- // X86-NEXT: store float [[TMP1]], ptr [[RETVAL]], align 4
97
- // X86-NEXT: [[TMP2:%.*]] = load double, ptr [[RETVAL]], align 4
98
- // X86-NEXT: ret double [[TMP2]]
72
+ // X86-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr @test_double_pre_inc.n, double 1.000000e+00 seq_cst, align 8
73
+ // X86-NEXT: [[TMP1:%.*]] = fadd double [[TMP0]], 1.000000e+00
74
+ // X86-NEXT: ret double [[TMP1]]
99
75
//
100
76
double test_double_pre_inc ()
101
77
{
102
78
static _Atomic double n ;
103
79
return ++ n ;
104
80
}
81
+
82
+ // X64-LABEL: define dso_local i32 @pr107054(
83
+ // X64-SAME: ) #[[ATTR0]] {
84
+ // X64-NEXT: entry:
85
+ // X64-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr @pr107054.n, double 1.000000e+00 seq_cst, align 8
86
+ // X64-NEXT: [[TMP1:%.*]] = fadd double [[TMP0]], 1.000000e+00
87
+ // X64-NEXT: [[CMP:%.*]] = fcmp oeq double [[TMP1]], 1.000000e+00
88
+ // X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
89
+ // X64-NEXT: ret i32 [[CONV]]
90
+ //
91
+ // X86-LABEL: define dso_local i32 @pr107054(
92
+ // X86-SAME: ) #[[ATTR0]] {
93
+ // X86-NEXT: entry:
94
+ // X86-NEXT: [[TMP0:%.*]] = atomicrmw fadd ptr @pr107054.n, double 1.000000e+00 seq_cst, align 8
95
+ // X86-NEXT: [[TMP1:%.*]] = fadd double [[TMP0]], 1.000000e+00
96
+ // X86-NEXT: [[CMP:%.*]] = fcmp oeq double [[TMP1]], 1.000000e+00
97
+ // X86-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
98
+ // X86-NEXT: ret i32 [[CONV]]
99
+ //
100
+ int pr107054 ()
101
+ {
102
+ static _Atomic double n ;
103
+ return (++ n ) == 1 ;
104
+ }
0 commit comments