@@ -158,3 +158,159 @@ void foo3() {
158158// OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 1
159159// OGCG: store float %[[ADD_REAL_A_B_C]], ptr %[[RESULT_REAL_PTR]], align 4
160160// OGCG: store float %[[ADD_IMAG_A_B_C]], ptr %[[RESULT_IMAG_PTR]], align 4
161+
162+ void foo4 () {
163+ int _Complex a;
164+ int _Complex b;
165+ int _Complex c = a - b;
166+ }
167+
168+ // CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a"]
169+ // CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["b"]
170+ // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
171+ // CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
172+ // CIR: %[[SUB:.*]] = cir.complex.sub %[[TMP_A]], %[[TMP_B]] : !cir.complex<!s32i>
173+
174+ // LLVM: %[[COMPLEX_A:.*]] = alloca { i32, i32 }, i64 1, align 4
175+ // LLVM: %[[COMPLEX_B:.*]] = alloca { i32, i32 }, i64 1, align 4
176+ // LLVM: %[[COMPLEX_C:.*]] = alloca { i32, i32 }, i64 1, align 4
177+ // LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[COMPLEX_A]], align 4
178+ // LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[COMPLEX_B]], align 4
179+ // LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0
180+ // LLVM: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1
181+ // LLVM: %[[B_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 0
182+ // LLVM: %[[B_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 1
183+ // LLVM: %[[SUB_REAL:.*]] = sub i32 %[[A_REAL]], %[[B_REAL]]
184+ // LLVM: %[[SUB_IMAG:.*]] = sub i32 %[[A_IMAG]], %[[B_IMAG]]
185+ // LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } poison, i32 %[[SUB_REAL]], 0
186+ // LLVM: %[[RESULT_2:.*]] = insertvalue { i32, i32 } %[[RESULT]], i32 %[[SUB_IMAG]], 1
187+ // LLVM: store { i32, i32 } %[[RESULT_2]], ptr %[[COMPLEX_C]], align 4
188+
189+ // OGCG: %[[COMPLEX_A:.*]] = alloca { i32, i32 }, align 4
190+ // OGCG: %[[COMPLEX_B:.*]] = alloca { i32, i32 }, align 4
191+ // OGCG: %[[RESULT:.*]] = alloca { i32, i32 }, align 4
192+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_A]], i32 0, i32 0
193+ // OGCG: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4
194+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_A]], i32 0, i32 1
195+ // OGCG: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4
196+ // OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_B]], i32 0, i32 0
197+ // OGCG: %[[B_REAL:.*]] = load i32, ptr %[[B_REAL_PTR]], align 4
198+ // OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_B]], i32 0, i32 1
199+ // OGCG: %[[B_IMAG:.*]] = load i32, ptr %[[B_IMAG_PTR]], align 4
200+ // OGCG: %[[SUB_REAL:.*]] = sub i32 %[[A_REAL]], %[[B_REAL]]
201+ // OGCG: %[[SUB_IMAG:.*]] = sub i32 %[[A_IMAG]], %[[B_IMAG]]
202+ // OGCG: %[[RESULT_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[RESULT]], i32 0, i32 0
203+ // OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[RESULT]], i32 0, i32 1
204+ // OGCG: store i32 %[[SUB_REAL]], ptr %[[RESULT_REAL_PTR]], align 4
205+ // OGCG: store i32 %[[SUB_IMAG]], ptr %[[RESULT_IMAG_PTR]], align 4
206+
207+ void foo5 () {
208+ float _Complex a;
209+ float _Complex b;
210+ float _Complex c = a - b;
211+ }
212+
213+ // CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
214+ // CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
215+ // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
216+ // CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
217+ // CIR: %[[SUB:.*]] = cir.complex.sub %[[TMP_A]], %[[TMP_B]] : !cir.complex<!cir.float>
218+
219+ // LLVM: %[[COMPLEX_A:.*]] = alloca { float, float }, i64 1, align 4
220+ // LLVM: %[[COMPLEX_B:.*]] = alloca { float, float }, i64 1, align 4
221+ // LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[COMPLEX_A]], align 4
222+ // LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[COMPLEX_B]], align 4
223+ // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
224+ // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
225+ // LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
226+ // LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
227+ // LLVM: %[[SUB_REAL:.*]] = fsub float %[[A_REAL]], %[[B_REAL]]
228+ // LLVM: %[[SUB_IMAG:.*]] = fsub float %[[A_IMAG]], %[[B_IMAG]]
229+ // LLVM: %[[RESULT:.*]] = insertvalue { float, float } poison, float %[[SUB_REAL]], 0
230+ // LLVM: %[[RESULT_2:.*]] = insertvalue { float, float } %[[RESULT]], float %[[SUB_IMAG]], 1
231+
232+ // OGCG: %[[COMPLEX_A:.*]] = alloca { float, float }, align 4
233+ // OGCG: %[[COMPLEX_B:.*]] = alloca { float, float }, align 4
234+ // OGCG: %[[RESULT:.*]] = alloca { float, float }, align 4
235+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 0
236+ // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
237+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 1
238+ // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
239+ // OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 0
240+ // OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
241+ // OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 1
242+ // OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
243+ // OGCG: %[[SUB_REAL:.*]] = fsub float %[[A_REAL]], %[[B_REAL]]
244+ // OGCG: %[[SUB_IMAG:.*]] = fsub float %[[A_IMAG]], %[[B_IMAG]]
245+ // OGCG: %[[RESULT_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 0
246+ // OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 1
247+ // OGCG: store float %[[SUB_REAL]], ptr %[[RESULT_REAL_PTR]], align 4
248+ // OGCG: store float %[[SUB_IMAG]], ptr %[[RESULT_IMAG_PTR]], align 4
249+
250+ void foo6 () {
251+ float _Complex a;
252+ float _Complex b;
253+ float _Complex c;
254+ float _Complex d = (a - b) - c;
255+ }
256+
257+ // CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
258+ // CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
259+ // CIR: %[[COMPLEX_C:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c"]
260+ // CIR: %[[RESULT:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["d", init]
261+ // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
262+ // CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
263+ // CIR: %[[SUB_A_B:.*]] = cir.complex.sub %[[TMP_A]], %[[TMP_B]] : !cir.complex<!cir.float>
264+ // CIR: %[[TMP_C:.*]] = cir.load{{.*}} %[[COMPLEX_C]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
265+ // CIR: %[[SUB_A_B_C:.*]] = cir.complex.sub %[[SUB_A_B]], %[[TMP_C]] : !cir.complex<!cir.float>
266+ // CIR: cir.store{{.*}} %[[SUB_A_B_C]], %[[RESULT]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
267+
268+ // LLVM: %[[COMPLEX_A:.*]] = alloca { float, float }, i64 1, align 4
269+ // LLVM: %[[COMPLEX_B:.*]] = alloca { float, float }, i64 1, align 4
270+ // LLVM: %[[COMPLEX_C:.*]] = alloca { float, float }, i64 1, align 4
271+ // LLVM: %[[RESULT:.*]] = alloca { float, float }, i64 1, align 4
272+ // LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[COMPLEX_A]], align 4
273+ // LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[COMPLEX_B]], align 4
274+ // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
275+ // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
276+ // LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
277+ // LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
278+ // LLVM: %[[SUB_REAL_A_B:.*]] = fsub float %[[A_REAL]], %[[B_REAL]]
279+ // LLVM: %[[SUB_IMAG_A_B:.*]] = fsub float %[[A_IMAG]], %[[B_IMAG]]
280+ // LLVM: %[[A_B:.*]] = insertvalue { float, float } poison, float %[[SUB_REAL_A_B]], 0
281+ // LLVM: %[[TMP_A_B:.*]] = insertvalue { float, float } %[[A_B]], float %[[SUB_IMAG_A_B]], 1
282+ // LLVM: %[[TMP_C:.*]] = load { float, float }, ptr %[[COMPLEX_C]], align 4
283+ // LLVM: %[[A_B_REAL:.*]] = extractvalue { float, float } %[[TMP_A_B]], 0
284+ // LLVM: %[[A_B_IMAG:.*]] = extractvalue { float, float } %[[TMP_A_B]], 1
285+ // LLVM: %[[C_REAL:.*]] = extractvalue { float, float } %[[TMP_C]], 0
286+ // LLVM: %[[C_IMAG:.*]] = extractvalue { float, float } %[[TMP_C]], 1
287+ // LLVM: %[[SUB_REAL_A_B_C:.*]] = fsub float %[[A_B_REAL]], %[[C_REAL]]
288+ // LLVM: %[[SUB_IMAG_A_B_C:.*]] = fsub float %[[A_B_IMAG]], %[[C_IMAG]]
289+ // LLVM: %[[A_B_C:.*]] = insertvalue { float, float } poison, float %[[SUB_REAL_A_B_C]], 0
290+ // LLVM: %[[TMP_A_B_C:.*]] = insertvalue { float, float } %[[A_B_C]], float %[[SUB_IMAG_A_B_C]], 1
291+ // LLVM: store { float, float } %[[TMP_A_B_C]], ptr %[[RESULT]], align 4
292+
293+ // OGCG: %[[COMPLEX_A:.*]] = alloca { float, float }, align 4
294+ // OGCG: %[[COMPLEX_B:.*]] = alloca { float, float }, align 4
295+ // OGCG: %[[COMPLEX_C:.*]] = alloca { float, float }, align 4
296+ // OGCG: %[[RESULT:.*]] = alloca { float, float }, align 4
297+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 0
298+ // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
299+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 1
300+ // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
301+ // OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 0
302+ // OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
303+ // OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 1
304+ // OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
305+ // OGCG: %[[SUB_REAL_A_B:.*]] = fsub float %[[A_REAL]], %[[B_REAL]]
306+ // OGCG: %[[SUB_IMAG_A_B:.*]] = fsub float %[[A_IMAG]], %[[B_IMAG]]
307+ // OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_C]], i32 0, i32 0
308+ // OGCG: %[[C_REAL:.*]] = load float, ptr %[[C_REAL_PTR]], align 4
309+ // OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_C]], i32 0, i32 1
310+ // OGCG: %[[C_IMAG:.*]] = load float, ptr %[[C_IMAG_PTR]], align 4
311+ // OGCG: %[[SUB_REAL_A_B_C:.*]] = fsub float %[[SUB_REAL_A_B]], %[[C_REAL]]
312+ // OGCG: %[[SUB_IMAG_A_B_C:.*]] = fsub float %[[SUB_IMAG_A_B]], %[[C_IMAG]]
313+ // OGCG: %[[RESULT_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 0
314+ // OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 1
315+ // OGCG: store float %[[SUB_REAL_A_B_C]], ptr %[[RESULT_REAL_PTR]], align 4
316+ // OGCG: store float %[[SUB_IMAG_A_B_C]], ptr %[[RESULT_IMAG_PTR]], align 4
0 commit comments