@@ -15,7 +15,7 @@ void foo() {
1515// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["b"]
1616// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
1717// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
18- // CIR: %[[ADD:.*]] = cir.complex.add %[[TMP_A]], %[[TMP_B]] -> !cir.complex<!s32i>
18+ // CIR: %[[ADD:.*]] = cir.complex.add %[[TMP_A]], %[[TMP_B]] : !cir.complex<!s32i>
1919
2020// LLVM: %[[COMPLEX_A:.*]] = alloca { i32, i32 }, i64 1, align 4
2121// LLVM: %[[COMPLEX_B:.*]] = alloca { i32, i32 }, i64 1, align 4
@@ -27,7 +27,7 @@ void foo() {
2727// LLVM: %[[B_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 1
2828// LLVM: %[[ADD_REAL:.*]] = add i32 %[[A_REAL]], %[[B_REAL]]
2929// LLVM: %[[ADD_IMAG:.*]] = add i32 %[[A_IMAG]], %[[B_IMAG]]
30- // LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } undef , i32 %[[ADD_REAL]], 0
30+ // LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } poison , i32 %[[ADD_REAL]], 0
3131// LLVM: %[[RESULT_2:.*]] = insertvalue { i32, i32 } %[[RESULT]], i32 %[[ADD_IMAG]], 1
3232
3333// OGCG: %[[COMPLEX_A:.*]] = alloca { i32, i32 }, align 4
@@ -58,7 +58,7 @@ void foo2() {
5858// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
5959// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
6060// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
61- // CIR: %[[ADD:.*]] = cir.complex.add %[[TMP_A]], %[[TMP_B]] -> !cir.complex<!cir.float>
61+ // CIR: %[[ADD:.*]] = cir.complex.add %[[TMP_A]], %[[TMP_B]] : !cir.complex<!cir.float>
6262
6363// LLVM: %[[COMPLEX_A:.*]] = alloca { float, float }, i64 1, align 4
6464// LLVM: %[[COMPLEX_B:.*]] = alloca { float, float }, i64 1, align 4
@@ -70,7 +70,7 @@ void foo2() {
7070// LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
7171// LLVM: %[[ADD_REAL:.*]] = fadd float %[[A_REAL]], %[[B_REAL]]
7272// LLVM: %[[ADD_IMAG:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]]
73- // LLVM: %[[RESULT:.*]] = insertvalue { float, float } undef , float %[[ADD_REAL]], 0
73+ // LLVM: %[[RESULT:.*]] = insertvalue { float, float } poison , float %[[ADD_REAL]], 0
7474// LLVM: %[[RESULT_2:.*]] = insertvalue { float, float } %[[RESULT]], float %[[ADD_IMAG]], 1
7575
7676// OGCG: %[[COMPLEX_A:.*]] = alloca { float, float }, align 4
@@ -90,3 +90,71 @@ void foo2() {
9090// OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 1
9191// OGCG: store float %[[ADD_REAL]], ptr %[[RESULT_REAL_PTR]], align 4
9292// OGCG: store float %[[ADD_IMAG]], ptr %[[RESULT_IMAG_PTR]], align 4
93+
94+ void foo3 () {
95+ float _Complex a;
96+ float _Complex b;
97+ float _Complex c;
98+ float _Complex d = (a + b) + c;
99+ }
100+
101+ // CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
102+ // CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
103+ // CIR: %[[COMPLEX_C:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c"]
104+ // CIR: %[[RESULT:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["d", init]
105+ // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
106+ // CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
107+ // CIR: %[[ADD_A_B:.*]] = cir.complex.add %[[TMP_A]], %[[TMP_B]] : !cir.complex<!cir.float>
108+ // CIR: %[[TMP_C:.*]] = cir.load{{.*}} %[[COMPLEX_C]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
109+ // CIR: %[[ADD_A_B_C:.*]] = cir.complex.add %[[ADD_A_B]], %[[TMP_C]] : !cir.complex<!cir.float>
110+ // CIR: cir.store{{.*}} %[[ADD_A_B_C]], %[[RESULT]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
111+
112+ // LLVM: %[[COMPLEX_A:.*]] = alloca { float, float }, i64 1, align 4
113+ // LLVM: %[[COMPLEX_B:.*]] = alloca { float, float }, i64 1, align 4
114+ // LLVM: %[[COMPLEX_C:.*]] = alloca { float, float }, i64 1, align 4
115+ // LLVM: %[[RESULT:.*]] = alloca { float, float }, i64 1, align 4
116+ // LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[COMPLEX_A]], align 4
117+ // LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[COMPLEX_B]], align 4
118+ // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
119+ // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
120+ // LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
121+ // LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
122+ // LLVM: %[[ADD_REAL_A_B:.*]] = fadd float %[[A_REAL]], %[[B_REAL]]
123+ // LLVM: %[[ADD_IMAG_A_B:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]]
124+ // LLVM: %[[A_B:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL_A_B]], 0
125+ // LLVM: %[[TMP_A_B:.*]] = insertvalue { float, float } %[[A_B]], float %[[ADD_IMAG_A_B]], 1
126+ // LLVM: %[[TMP_C:.*]] = load { float, float }, ptr %[[COMPLEX_C]], align 4
127+ // LLVM: %[[A_B_REAL:.*]] = extractvalue { float, float } %[[TMP_A_B]], 0
128+ // LLVM: %[[A_B_IMAG:.*]] = extractvalue { float, float } %[[TMP_A_B]], 1
129+ // LLVM: %[[C_REAL:.*]] = extractvalue { float, float } %[[TMP_C]], 0
130+ // LLVM: %[[C_IMAG:.*]] = extractvalue { float, float } %[[TMP_C]], 1
131+ // LLVM: %[[ADD_REAL_A_B_C:.*]] = fadd float %[[A_B_REAL]], %[[C_REAL]]
132+ // LLVM: %[[ADD_IMAG_A_B_C:.*]] = fadd float %[[A_B_IMAG]], %[[C_IMAG]]
133+ // LLVM: %[[A_B_C:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL_A_B_C]], 0
134+ // LLVM: %[[TMP_A_B_C:.*]] = insertvalue { float, float } %[[A_B_C]], float %[[ADD_IMAG_A_B_C]], 1
135+ // LLVM: store { float, float } %[[TMP_A_B_C]], ptr %[[RESULT]], align 4
136+
137+ // OGCG: %[[COMPLEX_A:.*]] = alloca { float, float }, align 4
138+ // OGCG: %[[COMPLEX_B:.*]] = alloca { float, float }, align 4
139+ // OGCG: %[[COMPLEX_C:.*]] = alloca { float, float }, align 4
140+ // OGCG: %[[RESULT:.*]] = alloca { float, float }, align 4
141+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 0
142+ // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
143+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 1
144+ // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
145+ // OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 0
146+ // OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
147+ // OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 1
148+ // OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
149+ // OGCG: %[[ADD_REAL_A_B:.*]] = fadd float %[[A_REAL]], %[[B_REAL]]
150+ // OGCG: %[[ADD_IMAG_A_B:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]]
151+ // OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_C]], i32 0, i32 0
152+ // OGCG: %[[C_REAL:.*]] = load float, ptr %[[C_REAL_PTR]], align 4
153+ // OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_C]], i32 0, i32 1
154+ // OGCG: %[[C_IMAG:.*]] = load float, ptr %[[C_IMAG_PTR]], align 4
155+ // OGCG: %[[ADD_REAL_A_B_C:.*]] = fadd float %[[ADD_REAL_A_B]], %[[C_REAL]]
156+ // OGCG: %[[ADD_IMAG_A_B_C:.*]] = fadd float %[[ADD_IMAG_A_B]], %[[C_IMAG]]
157+ // OGCG: %[[RESULT_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 0
158+ // OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 1
159+ // OGCG: store float %[[ADD_REAL_A_B_C]], ptr %[[RESULT_REAL_PTR]], align 4
160+ // OGCG: store float %[[ADD_IMAG_A_B_C]], ptr %[[RESULT_IMAG_PTR]], align 4
0 commit comments