@@ -284,3 +284,89 @@ void foo6() {
284284// OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
285285// OGCG: store float %[[A_REAL_DEC]], ptr %[[RESULT_REAL_PTR]], align 4
286286// OGCG: store float %[[A_IMAG]], ptr %[[RESULT_IMAG_PTR]], align 4
287+
288+ void foo7 () {
289+ float _Complex a;
290+ float _Complex b = +a;
291+ }
292+
293+ // CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
294+ // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
295+ // CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
296+ // CIR-BEFORE: %[[COMPLEX_PLUS:.*]] = cir.unary(plus, %[[TMP]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
297+ // CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_PLUS]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
298+
299+ // CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
300+ // CIR-AFTER: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
301+ // CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
302+ // CIR-AFTER: %[[REAL:.*]] = cir.complex.real %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
303+ // CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
304+ // CIR-AFTER: %[[REAL_PLUS:.*]] = cir.unary(plus, %[[REAL]]) : !cir.float, !cir.float
305+ // CIR-AFTER: %[[IMAG_PLUS:.*]] = cir.unary(plus, %[[IMAG]]) : !cir.float, !cir.float
306+ // CIR-AFTER: %[[NEW_COMPLEX:.*]] = cir.complex.create %[[REAL_PLUS]], %[[IMAG_PLUS]] : !cir.float -> !cir.complex<!cir.float>
307+ // CIR-AFTER: cir.store{{.*}} %[[NEW_COMPLEX]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
308+
309+ // LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
310+ // LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
311+ // LLVM: %[[TMP:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
312+ // LLVM: %[[REAL:.*]] = extractvalue { float, float } %[[TMP]], 0
313+ // LLVM: %[[IMAG:.*]] = extractvalue { float, float } %[[TMP]], 1
314+ // LLVM: %[[RESULT_TMP:.*]] = insertvalue { float, float } {{.*}}, float %[[REAL]], 0
315+ // LLVM: %[[RESULT_VAL:.*]] = insertvalue { float, float } %[[RESULT_TMP]], float %[[IMAG]], 1
316+ // LLVM: store { float, float } %[[RESULT_VAL]], ptr %[[B_ADDR]], align 4
317+
318+ // OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
319+ // OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4
320+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
321+ // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
322+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
323+ // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
324+ // OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
325+ // OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
326+ // OGCG: store float %[[A_REAL]], ptr %[[B_REAL_PTR]], align 4
327+ // OGCG: store float %[[A_IMAG]], ptr %[[B_IMAG_PTR]], align 4
328+
329+ void foo8 () {
330+ float _Complex a;
331+ float _Complex b = -a;
332+ }
333+
334+ // CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
335+ // CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
336+ // CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
337+ // CIR-BEFORE: %[[COMPLEX_MINUS:.*]] = cir.unary(minus, %[[TMP]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
338+ // CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_MINUS]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
339+
340+ // CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
341+ // CIR-AFTER: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
342+ // CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
343+ // CIR-AFTER: %[[REAL:.*]] = cir.complex.real %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
344+ // CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
345+ // CIR-AFTER: %[[REAL_MINUS:.*]] = cir.unary(minus, %[[REAL]]) : !cir.float, !cir.float
346+ // CIR-AFTER: %[[IMAG_MINUS:.*]] = cir.unary(minus, %[[IMAG]]) : !cir.float, !cir.float
347+ // CIR-AFTER: %[[NEW_COMPLEX:.*]] = cir.complex.create %[[REAL_MINUS]], %[[IMAG_MINUS]] : !cir.float -> !cir.complex<!cir.float>
348+ // CIR-AFTER: cir.store{{.*}} %[[NEW_COMPLEX]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
349+
350+ // LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
351+ // LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
352+ // LLVM: %[[TMP:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
353+ // LLVM: %[[REAL:.*]] = extractvalue { float, float } %[[TMP]], 0
354+ // LLVM: %[[IMAG:.*]] = extractvalue { float, float } %[[TMP]], 1
355+ // LLVM: %[[REAL_MINUS:.*]] = fneg float %[[REAL]]
356+ // LLVM: %[[IMAG_MINUS:.*]] = fneg float %[[IMAG]]
357+ // LLVM: %[[RESULT_TMP:.*]] = insertvalue { float, float } {{.*}}, float %[[REAL_MINUS]], 0
358+ // LLVM: %[[RESULT_VAL:.*]] = insertvalue { float, float } %[[RESULT_TMP]], float %[[IMAG_MINUS]], 1
359+ // LLVM: store { float, float } %[[RESULT_VAL]], ptr %[[B_ADDR]], align 4
360+
361+ // OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
362+ // OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4
363+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
364+ // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
365+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
366+ // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
367+ // OGCG: %[[A_REAL_MINUS:.*]] = fneg float %[[A_REAL]]
368+ // OGCG: %[[A_IMAG_MINUS:.*]] = fneg float %[[A_IMAG]]
369+ // OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
370+ // OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
371+ // OGCG: store float %[[A_REAL_MINUS]], ptr %[[B_REAL_PTR]], align 4
372+ // OGCG: store float %[[A_IMAG_MINUS]], ptr %[[B_IMAG_PTR]], align 4
0 commit comments