@@ -286,3 +286,211 @@ void foo4() {
286286// CXX_OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[C_ADDR]], i32 0, i32 1
287287// CXX_OGCG: store i32 %[[B_REAL]], ptr %[[C_REAL_PTR]], align 4
288288// CXX_OGCG: store i32 %[[B_IMAG]], ptr %[[C_IMAG_PTR]], align 4
289+
290+ void foo5 () {
291+ float _Complex a;
292+ float b;
293+ a += b;
294+ }
295+
296+ // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
297+ // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
298+ // CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.float>, !cir.float
299+ // CIR: %[[CONST_ZERO:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float
300+ // CIR: %[[COMPLEX_B:.*]] = cir.complex.create %[[TMP_B]], %[[CONST_ZERO]] : !cir.float -> !cir.complex<!cir.float>
301+ // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
302+ // CIR: %[[RESULT:.*]] = cir.complex.add %[[TMP_A]], %[[COMPLEX_B]] : !cir.complex<!cir.float>
303+ // CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
304+
305+ // LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
306+ // LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
307+ // LLVM: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
308+ // LLVM: %[[TMP_COMPLEX_B:.*]] = insertvalue { float, float } {{.*}}, float %[[TMP_B]], 0
309+ // LLVM: %[[COMPLEX_B:.*]] = insertvalue { float, float } %[[TMP_COMPLEX_B]], float 0.000000e+00, 1
310+ // LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
311+ // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
312+ // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
313+ // LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[COMPLEX_B]], 0
314+ // LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[COMPLEX_B]], 1
315+ // LLVM: %[[ADD_REAL:.*]] = fadd float %[[A_REAL]], %[[B_REAL]]
316+ // LLVM: %[[ADD_IMAG:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]]
317+ // LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL]], 0
318+ // LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[ADD_IMAG]], 1
319+ // LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4
320+
321+ // OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
322+ // OGCG: %[[B_ADDR:.*]] = alloca float, align 4
323+ // OGCG: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
324+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
325+ // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
326+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
327+ // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
328+ // OGCG: %[[ADD_REAL:.*]] = fadd float %[[A_REAL]], %[[TMP_B]]
329+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
330+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
331+ // OGCG: store float %[[ADD_REAL]], ptr %[[A_REAL_PTR]], align 4
332+ // OGCG: store float %[[A_IMAG]], ptr %[[A_IMAG_PTR]], align 4
333+
334+ void foo6 () {
335+ int _Complex a;
336+ int _Complex b;
337+ b *= a;
338+ }
339+
340+ // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a"]
341+ // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["b"]
342+ // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
343+ // CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
344+ // CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!s32i> -> !s32i
345+ // CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!s32i> -> !s32i
346+ // CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
347+ // CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
348+ // CIR: %[[MUL_BR_AR:.*]] = cir.binop(mul, %[[B_REAL]], %[[A_REAL]]) : !s32i
349+ // CIR: %[[MUL_BI_AI:.*]] = cir.binop(mul, %[[B_IMAG]], %[[A_IMAG]]) : !s32i
350+ // CIR: %[[MUL_BR_AI:.*]] = cir.binop(mul, %[[B_REAL]], %[[A_IMAG]]) : !s32i
351+ // CIR: %[[MUL_BI_AR:.*]] = cir.binop(mul, %[[B_IMAG]], %[[A_REAL]]) : !s32i
352+ // CIR: %[[RESULT_REAL:.*]] = cir.binop(sub, %[[MUL_BR_AR]], %[[MUL_BI_AI]]) : !s32i
353+ // CIR: %[[RESULT_IMAG:.*]] = cir.binop(add, %[[MUL_BR_AI]], %[[MUL_BI_AR]]) : !s32i
354+ // CIR: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !s32i -> !cir.complex<!s32i>
355+ // CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
356+
357+ // LLVM: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
358+ // LLVM: %[[B_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
359+ // LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4
360+ // LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4
361+ // LLVM: %[[B_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 0
362+ // LLVM: %[[B_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 1
363+ // LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0
364+ // LLVM: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1
365+ // LLVM: %[[MUL_BR_AR:.*]] = mul i32 %[[B_REAL]], %[[A_REAL]]
366+ // LLVM: %[[MUL_BI_AI:.*]] = mul i32 %[[B_IMAG]], %[[A_IMAG]]
367+ // LLVM: %[[MUL_BR_AI:.*]] = mul i32 %[[B_REAL]], %[[A_IMAG]]
368+ // LLVM: %[[MUL_BI_AR:.*]] = mul i32 %[[B_IMAG]], %[[A_REAL]]
369+ // LLVM: %[[RESULT_REAL:.*]] = sub i32 %[[MUL_BR_AR]], %[[MUL_BI_AI]]
370+ // LLVM: %[[RESULT_IMAG:.*]] = add i32 %[[MUL_BR_AI]], %[[MUL_BI_AR]]
371+ // LLVM: %[[MUL_A_B:.*]] = insertvalue { i32, i32 } {{.*}}, i32 %[[RESULT_REAL]], 0
372+ // LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } %[[MUL_A_B]], i32 %[[RESULT_IMAG]], 1
373+ // LLVM: store { i32, i32 } %[[RESULT]], ptr %[[B_ADDR]], align 4
374+
375+ // OGCG: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4
376+ // OGCG: %[[B_ADDR:.*]] = alloca { i32, i32 }, align 4
377+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0
378+ // OGCG: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4
379+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1
380+ // OGCG: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4
381+ // OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0
382+ // OGCG: %[[B_REAL:.*]] = load i32, ptr %[[B_REAL_PTR]], align 4
383+ // OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1
384+ // OGCG: %[[B_IMAG:.*]] = load i32, ptr %[[B_IMAG_PTR]], align 4
385+ // OGCG: %[[MUL_BR_AR:.*]] = mul i32 %[[B_REAL]], %[[A_REAL]]
386+ // OGCG: %[[MUL_BI_AI:.*]] = mul i32 %[[B_IMAG]], %[[A_IMAG]]
387+ // OGCG: %[[RESULT_REAL:.*]] = sub i32 %[[MUL_BR_AR]], %[[MUL_BI_AI]]
388+ // OGCG: %[[MUL_BI_AR:.*]] = mul i32 %[[B_IMAG]], %[[A_REAL]]
389+ // OGCG: %[[MUL_BR_AI:.*]] = mul i32 %[[B_REAL]], %[[A_IMAG]]
390+ // OGCG: %[[RESULT_IMAG:.*]] = add i32 %[[MUL_BI_AR]], %[[MUL_BR_AI]]
391+ // OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0
392+ // OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1
393+ // OGCG: store i32 %[[RESULT_REAL]], ptr %[[B_REAL_PTR]], align 4
394+ // OGCG: store i32 %[[RESULT_IMAG]], ptr %[[B_IMAG_PTR]], align 4
395+
396+ void foo7 () {
397+ float _Complex a;
398+ float _Complex b;
399+ b *= a;
400+ }
401+
402+ // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
403+ // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
404+ // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
405+ // CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
406+ // CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
407+ // CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
408+ // CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
409+ // CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
410+ // CIR: %[[MUL_BR_AR:.*]] = cir.binop(mul, %[[B_REAL]], %[[A_REAL]]) : !cir.float
411+ // CIR: %[[MUL_BI_AI:.*]] = cir.binop(mul, %[[B_IMAG]], %[[A_IMAG]]) : !cir.float
412+ // CIR: %[[MUL_BR_AI:.*]] = cir.binop(mul, %[[B_REAL]], %[[A_IMAG]]) : !cir.float
413+ // CIR: %[[MUL_BI_AR:.*]] = cir.binop(mul, %[[B_IMAG]], %[[A_REAL]]) : !cir.float
414+ // CIR: %[[C_REAL:.*]] = cir.binop(sub, %[[MUL_BR_AR]], %[[MUL_BI_AI]]) : !cir.float
415+ // CIR: %[[C_IMAG:.*]] = cir.binop(add, %[[MUL_BR_AI]], %[[MUL_BI_AR]]) : !cir.float
416+ // CIR: %[[COMPLEX:.*]] = cir.complex.create %[[C_REAL]], %[[C_IMAG]] : !cir.float -> !cir.complex<!cir.float>
417+ // CIR: %[[IS_C_REAL_NAN:.*]] = cir.cmp(ne, %[[C_REAL]], %[[C_REAL]]) : !cir.float, !cir.bool
418+ // CIR: %[[IS_C_IMAG_NAN:.*]] = cir.cmp(ne, %[[C_IMAG]], %[[C_IMAG]]) : !cir.float, !cir.bool
419+ // CIR: %[[CONST_FALSE:.*]] = cir.const #false
420+ // CIR: %[[SELECT_CONDITION:.*]] = cir.select if %[[IS_C_REAL_NAN]] then %[[IS_C_IMAG_NAN]] else %[[CONST_FALSE]] : (!cir.bool, !cir.bool, !cir.bool) -> !cir.bool
421+ // CIR: %[[RESULT:.*]] = cir.ternary(%[[SELECT_CONDITION]], true {
422+ // CIR: %[[LIBC_COMPLEX:.*]] = cir.call @__mulsc3(%[[B_REAL]], %[[B_IMAG]], %[[A_REAL]], %[[A_IMAG]]) : (!cir.float, !cir.float, !cir.float, !cir.float) -> !cir.complex<!cir.float>
423+ // CIR: cir.yield %[[LIBC_COMPLEX]] : !cir.complex<!cir.float>
424+ // CIR: }, false {
425+ // CIR: cir.yield %[[COMPLEX]] : !cir.complex<!cir.float>
426+ // CIR: }) : (!cir.bool) -> !cir.complex<!cir.float>
427+ // CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
428+
429+ // LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
430+ // LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
431+ // LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
432+ // LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
433+ // LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
434+ // LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
435+ // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
436+ // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
437+ // LLVM: %[[MUL_BR_AR:.*]] = fmul float %[[B_REAL]], %[[A_REAL]]
438+ // LLVM: %[[MUL_BI_AI:.*]] = fmul float %[[B_IMAG]], %[[A_IMAG]]
439+ // LLVM: %[[MUL_BR_AI:.*]] = fmul float %[[B_REAL]], %[[A_IMAG]]
440+ // LLVM: %[[MUL_BI_AR:.*]] = fmul float %[[B_IMAG]], %[[A_REAL]]
441+ // LLVM: %[[C_REAL:.*]] = fsub float %[[MUL_BR_AR]], %[[MUL_BI_AI]]
442+ // LLVM: %[[C_IMAG:.*]] = fadd float %[[MUL_BR_AI]], %[[MUL_BI_AR]]
443+ // LLVM: %[[MUL_A_B:.*]] = insertvalue { float, float } {{.*}}, float %[[C_REAL]], 0
444+ // LLVM: %[[COMPLEX:.*]] = insertvalue { float, float } %[[MUL_A_B]], float %[[C_IMAG]], 1
445+ // LLVM: %[[IS_C_REAL_NAN:.*]] = fcmp une float %[[C_REAL]], %[[C_REAL]]
446+ // LLVM: %[[IS_C_IMAG_NAN:.*]] = fcmp une float %[[C_IMAG]], %[[C_IMAG]]
447+ // LLVM: %[[SELECT_CONDITION:.*]] = and i1 %[[IS_C_REAL_NAN]], %[[IS_C_IMAG_NAN]]
448+ // LLVM: br i1 %[[SELECT_CONDITION]], label %[[THEN_LABEL:.*]], label %[[ELSE_LABEL:.*]]
449+ // LLVM: [[THEN_LABEL]]:
450+ // LLVM: %[[LIBC_COMPLEX:.*]] = call { float, float } @__mulsc3(float %[[B_REAL]], float %[[B_IMAG]], float %[[A_REAL]], float %[[A_IMAG]])
451+ // LLVM: br label %[[PHI_BRANCH:.*]]
452+ // LLVM: [[ELSE_LABEL]]:
453+ // LLVM: br label %[[PHI_BRANCH:]]
454+ // LLVM: [[PHI_BRANCH:]]:
455+ // LLVM: %[[RESULT:.*]] = phi { float, float } [ %[[COMPLEX]], %[[ELSE_LABEL]] ], [ %[[LIBC_COMPLEX]], %[[THEN_LABEL]] ]
456+ // LLVM: br label %[[END_LABEL:.*]]
457+ // LLVM: [[END_LABEL]]:
458+ // LLVM: store { float, float } %[[RESULT]], ptr %[[B_ADDR]], align 4
459+
460+ // OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
461+ // OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4
462+ // OGCG: %[[COMPLEX_CALL_ADDR:.*]] = alloca { float, float }, align 4
463+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
464+ // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
465+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
466+ // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
467+ // OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
468+ // OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
469+ // OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
470+ // OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
471+ // OGCG: %[[MUL_BR_AR:.*]] = fmul float %[[B_REAL]], %[[A_REAL]]
472+ // OGCG: %[[MUL_BI_AI:.*]] = fmul float %[[B_IMAG]], %[[A_IMAG]]
473+ // OGCG: %[[MUL_BR_AI:.*]] = fmul float %[[B_REAL]], %[[A_IMAG]]
474+ // OGCG: %[[MUL_BI_AR:.*]] = fmul float %[[B_IMAG]], %[[A_REAL]]
475+ // OGCG: %[[C_REAL:.*]] = fsub float %[[MUL_BR_AR]], %[[MUL_BI_AI]]
476+ // OGCG: %[[C_IMAG:.*]] = fadd float %[[MUL_BR_AI]], %[[MUL_BI_AR]]
477+ // OGCG: %[[IS_C_REAL_NAN:.*]] = fcmp uno float %[[C_REAL]], %[[C_REAL]]
478+ // OGCG: br i1 %[[IS_C_REAL_NAN]], label %[[COMPLEX_IS_IMAG_NAN:.*]], label %[[END_LABEL:.*]], !prof !2
479+ // OGCG: [[COMPLEX_IS_IMAG_NAN]]:
480+ // OGCG: %[[IS_C_IMAG_NAN:.*]] = fcmp uno float %[[C_IMAG]], %[[C_IMAG]]
481+ // OGCG: br i1 %[[IS_C_IMAG_NAN]], label %[[COMPLEX_LIB_CALL:.*]], label %[[END_LABEL]], !prof !2
482+ // OGCG: [[COMPLEX_LIB_CALL]]:
483+ // OGCG: %[[CALL_RESULT:.*]] = call{{.*}} <2 x float> @__mulsc3(float noundef %[[B_REAL]], float noundef %[[B_IMAG]], float noundef %[[A_REAL]], float noundef %[[A_IMAG]])
484+ // OGCG: store <2 x float> %[[CALL_RESULT]], ptr %[[COMPLEX_CALL_ADDR]], align 4
485+ // OGCG: %[[COMPLEX_CALL_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_CALL_ADDR]], i32 0, i32 0
486+ // OGCG: %[[COMPLEX_CALL_REAL:.*]] = load float, ptr %[[COMPLEX_CALL_REAL_PTR]], align 4
487+ // OGCG: %[[COMPLEX_CALL_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_CALL_ADDR]], i32 0, i32 1
488+ // OGCG: %[[COMPLEX_CALL_IMAG:.*]] = load float, ptr %[[COMPLEX_CALL_IMAG_PTR]], align 4
489+ // OGCG: br label %[[END_LABEL]]
490+ // OGCG: [[END_LABEL]]:
491+ // OGCG: %[[FINAL_REAL:.*]] = phi float [ %[[C_REAL]], %[[ENTRY:.*]] ], [ %[[C_REAL]], %[[COMPLEX_IS_IMAG_NAN]] ], [ %[[COMPLEX_CALL_REAL]], %[[COMPLEX_LIB_CALL]] ]
492+ // OGCG: %[[FINAL_IMAG:.*]] = phi float [ %[[C_IMAG]], %[[ENTRY]] ], [ %[[C_IMAG]], %[[COMPLEX_IS_IMAG_NAN]] ], [ %[[COMPLEX_CALL_IMAG]], %[[COMPLEX_LIB_CALL]] ]
493+ // OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
494+ // OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
495+ // OGCG: store float %[[FINAL_REAL]], ptr %[[C_REAL_PTR]], align 4
496+ // OGCG: store float %[[FINAL_IMAG]], ptr %[[C_IMAG_PTR]], align 4
0 commit comments