@@ -216,6 +216,29 @@ void foo9(double a, double b) {
216216// OGCG: store double %[[TMP_A]], ptr %[[C_REAL_PTR]], align 8
217217// OGCG: store double %[[TMP_B]], ptr %[[C_IMAG_PTR]], align 8
218218
219+ void foo13 () {
220+ double _Complex c;
221+ double imag = __imag__ c;
222+ }
223+
224+ // CIR: %[[COMPLEX:.*]] = cir.alloca !cir.complex<!cir.double>, !cir.ptr<!cir.complex<!cir.double>>, ["c"]
225+ // CIR: %[[INIT:.*]] = cir.alloca !cir.double, !cir.ptr<!cir.double>, ["imag", init]
226+ // CIR: %[[TMP:.*]] = cir.load{{.*}} %[[COMPLEX]] : !cir.ptr<!cir.complex<!cir.double>>, !cir.complex<!cir.double>
227+ // CIR: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!cir.double> -> !cir.double
228+ // CIR: cir.store{{.*}} %[[IMAG]], %[[INIT]] : !cir.double, !cir.ptr<!cir.double>
229+
230+ // LLVM: %[[COMPLEX:.*]] = alloca { double, double }, i64 1, align 8
231+ // LLVM: %[[INIT:.*]] = alloca double, i64 1, align 8
232+ // LLVM: %[[TMP:.*]] = load { double, double }, ptr %[[COMPLEX]], align 8
233+ // LLVM: %[[IMAG:.*]] = extractvalue { double, double } %[[TMP]], 1
234+ // LLVM: store double %[[IMAG]], ptr %[[INIT]], align 8
235+
236+ // OGCG: %[[COMPLEX:.*]] = alloca { double, double }, align 8
237+ // OGCG: %[[INIT:.*]] = alloca double, align 8
238+ // OGCG: %[[IMAG:.*]] = getelementptr inbounds nuw { double, double }, ptr %[[COMPLEX]], i32 0, i32 1
239+ // OGCG: %[[TMP:.*]] = load double, ptr %[[IMAG]], align 8
240+ // OGCG: store double %[[TMP]], ptr %[[INIT]], align 8
241+
219242void foo14 () {
220243 int _Complex c = 2i;
221244}
@@ -256,3 +279,36 @@ void foo15() {
256279// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_B]], i32 0, i32 1
257280// OGCG: store i32 %[[A_REAL]], ptr %[[B_REAL_PTR]], align 4
258281// OGCG: store i32 %[[A_IMAG]], ptr %[[B_IMAG_PTR]], align 4
282+
283+ int foo16 (int _Complex a, int _Complex b) {
284+ return __imag__ a + __imag__ b;
285+ }
286+
287+ // CIR: %[[RET:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["__retval"]
288+ // CIR: %[[COMPLEX_A:.*]] = cir.load{{.*}} {{.*}} : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
289+ // CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[COMPLEX_A]] : !cir.complex<!s32i> -> !s32i
290+ // CIR: %[[COMPLEX_B:.*]] = cir.load{{.*}} {{.*}} : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
291+ // CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[COMPLEX_B]] : !cir.complex<!s32i> -> !s32i
292+ // CIR: %[[ADD:.*]] = cir.binop(add, %[[A_IMAG]], %[[B_IMAG]]) nsw : !s32i
293+ // CIR: cir.store %[[ADD]], %[[RET]] : !s32i, !cir.ptr<!s32i>
294+ // CIR: %[[TMP:.*]] = cir.load %[[RET]] : !cir.ptr<!s32i>, !s32i
295+ // CIR: cir.return %[[TMP]] : !s32i
296+
297+ // LLVM: %[[RET:.*]] = alloca i32, i64 1, align 4
298+ // LLVM: %[[COMPLEX_A:.*]] = load { i32, i32 }, ptr {{.*}}, align 4
299+ // LLVM: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[COMPLEX_A]], 1
300+ // LLVM: %[[COMPLEX_B:.*]] = load { i32, i32 }, ptr {{.*}}, align 4
301+ // LLVM: %[[B_IMAG:.*]] = extractvalue { i32, i32 } %[[COMPLEX_B]], 1
302+ // LLVM: %[[ADD:.*]] = add nsw i32 %[[A_IMAG]], %[[B_IMAG]]
303+ // LLVM: store i32 %[[ADD]], ptr %[[RET]], align 4
304+ // LLVM: %[[TMP:.*]] = load i32, ptr %[[RET]], align 4
305+ // LLVM: ret i32 %[[TMP]]
306+
307+ // OGCG: %[[COMPLEX_A:.*]] = alloca { i32, i32 }, align 4
308+ // OGCG: %[[COMPLEX_B:.*]] = alloca { i32, i32 }, align 4
309+ // OGCG: %[[A_IMAG:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_A]], i32 0, i32 1
310+ // OGCG: %[[TMP_A:.*]] = load i32, ptr %[[A_IMAG]], align 4
311+ // OGCG: %[[B_IMAG:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_B]], i32 0, i32 1
312+ // OGCG: %[[TMP_B:.*]] = load i32, ptr %[[B_IMAG]], align 4
313+ // OGCG: %[[ADD:.*]] = add nsw i32 %[[TMP_A]], %[[TMP_B]]
314+ // OGCG: ret i32 %[[ADD]]
0 commit comments