@@ -534,6 +534,173 @@ void foo8() {
534534// OGCG: store float %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4
535535// OGCG: store float %[[RESULT_IMAG]], ptr %[[A_IMAG_PTR]], align 4
536536
537+ void foo10 () {
538+ float _Complex a;
539+ float _Complex b;
540+ a /= b;
541+ }
542+
543+ // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
544+ // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"]
545+ // CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
546+ // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
547+ // CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
548+ // CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
549+ // CIR: %[[B_REAL:.*]] = cir.complex.real %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
550+ // CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[TMP_B]] : !cir.complex<!cir.float> -> !cir.float
551+ // CIR: %[[RESULT:.*]] = cir.call @__divsc3(%[[A_REAL]], %[[A_IMAG]], %[[B_REAL]], %[[B_IMAG]]) : (!cir.float, !cir.float, !cir.float, !cir.float) -> !cir.complex<!cir.float>
552+ // CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
553+
554+ // LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
555+ // LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
556+ // LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4
557+ // LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
558+ // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
559+ // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
560+ // LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0
561+ // LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1
562+ // LLVM: %[[RESULT:.*]] = call { float, float } @__divsc3(float %[[A_REAL]], float %[[A_IMAG]], float %[[B_REAL]], float %[[B_IMAG]])
563+ // LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4
564+
565+ // OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
566+ // OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4
567+ // OGCG: %[[RESULT_ADDR:.*]] = alloca { float, float }, align 4
568+ // OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
569+ // OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4
570+ // OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
571+ // OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4
572+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
573+ // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
574+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
575+ // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
576+ // OGCG: %[[RESULT:.*]] = call{{.*}} <2 x float> @__divsc3(float noundef %[[A_REAL]], float noundef %[[A_IMAG]], float noundef %[[B_REAL]], float noundef %[[B_IMAG]])
577+ // OGCG: store <2 x float> %[[RESULT]], ptr %[[RESULT_ADDR]], align 4
578+ // OGCG: %[[RESULT_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT_ADDR]], i32 0, i32 0
579+ // OGCG: %[[RESULT_REAL:.*]] = load float, ptr %[[RESULT_REAL_PTR]], align 4
580+ // OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT_ADDR]], i32 0, i32 1
581+ // OGCG: %[[RESULT_IMAG:.*]] = load float, ptr %[[RESULT_IMAG_PTR]], align 4
582+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
583+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
584+ // OGCG: store float %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4
585+ // OGCG: store float %[[RESULT_IMAG]], ptr %[[A_IMAG_PTR]], align 4
586+
587+ void foo11 () {
588+ float _Complex a;
589+ float b;
590+ a /= b;
591+ }
592+
593+ // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
594+ // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
595+ // CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.float>, !cir.float
596+ // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
597+ // CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
598+ // CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
599+ // CIR: %[[RESULT_REAL:.*]] = cir.binop(div, %[[A_REAL]], %[[TMP_B]]) : !cir.float
600+ // CIR: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[A_IMAG]], %[[TMP_B]]) : !cir.float
601+ // CIR: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
602+ // CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
603+
604+ // LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
605+ // LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
606+ // LLVM: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
607+ // LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
608+ // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
609+ // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
610+ // LLVM: %[[RESULT_REAL:.*]] = fdiv float %[[A_REAL]], %[[TMP_B]]
611+ // LLVM: %[[RESULT_IMAG:.*]] = fdiv float %[[A_IMAG]], %[[TMP_B]]
612+ // LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
613+ // LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[RESULT_IMAG]], 1
614+ // LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4
615+
616+ // OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
617+ // OGCG: %[[B_ADDR:.*]] = alloca float, align 4
618+ // OGCG: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
619+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
620+ // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
621+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
622+ // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
623+ // OGCG: %[[RESULT_REAL:.*]] = fdiv float %[[A_REAL]], %[[TMP_B]]
624+ // OGCG: %[[RESULT_IMAG:.*]] = fdiv float %[[A_IMAG]], %[[TMP_B]]
625+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
626+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
627+ // OGCG: store float %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4
628+ // OGCG: store float %[[RESULT_IMAG]], ptr %[[A_IMAG_PTR]], align 4
629+
630+ void foo12 () {
631+ int _Complex a;
632+ int b;
633+ a /= b;
634+ }
635+
636+ // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a"]
637+ // CIR: %[[B_ADDR:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["b"]
638+ // CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!s32i>, !s32i
639+ // CIR: %[[CONST_0:.*]] = cir.const #cir.int<0> : !s32i
640+ // CIR: %[[B_COMPLEX:.*]] = cir.complex.create %[[TMP_B]], %[[CONST_0]] : !s32i -> !cir.complex<!s32i>
641+ // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i>
642+ // CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
643+ // CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!s32i> -> !s32i
644+ // CIR: %[[B_REAL:.*]] = cir.complex.real %[[B_COMPLEX]] : !cir.complex<!s32i> -> !s32i
645+ // CIR: %[[B_IMAG:.*]] = cir.complex.imag %[[B_COMPLEX]] : !cir.complex<!s32i> -> !s32i
646+ // CIR: %[[MUL_AR_BR:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_REAL]]) : !s32i
647+ // CIR: %[[MUL_AI_BI:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_IMAG]]) : !s32i
648+ // CIR: %[[MUL_BR_BR:.*]] = cir.binop(mul, %[[B_REAL]], %[[B_REAL]]) : !s32i
649+ // CIR: %[[MUL_BI_BI:.*]] = cir.binop(mul, %[[B_IMAG]], %[[B_IMAG]]) : !s32i
650+ // CIR: %[[ADD_ARBR_AIBI:.*]] = cir.binop(add, %[[MUL_AR_BR]], %[[MUL_AI_BI]]) : !s32i
651+ // CIR: %[[ADD_BRBR_BIBI:.*]] = cir.binop(add, %[[MUL_BR_BR]], %[[MUL_BI_BI]]) : !s32i
652+ // CIR: %[[RESULT_REAL:.*]] = cir.binop(div, %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]) : !s32i
653+ // CIR: %[[MUL_AI_BR:.*]] = cir.binop(mul, %[[A_IMAG]], %[[B_REAL]]) : !s32i
654+ // CIR: %[[MUL_AR_BI:.*]] = cir.binop(mul, %[[A_REAL]], %[[B_IMAG]]) : !s32i
655+ // CIR: %[[SUB_AIBR_ARBI:.*]] = cir.binop(sub, %[[MUL_AI_BR]], %[[MUL_AR_BI]]) : !s32i
656+ // CIR: %[[RESULT_IMAG:.*]] = cir.binop(div, %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]) : !s32i
657+ // CIR: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !s32i -> !cir.complex<!s32i>
658+ // CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>
659+
660+ // LLVM: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4
661+ // LLVM: %[[B_ADDR:.*]] = alloca i32, i64 1, align 4
662+ // LLVM: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
663+ // LLVM: %[[TMP_B_COMPLEX:.*]] = insertvalue { i32, i32 } {{.*}}, i32 %[[TMP_B]], 0
664+ // LLVM: %[[B_COMPLEX:.*]] = insertvalue { i32, i32 } %[[TMP_B_COMPLEX]], i32 0, 1
665+ // LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4
666+ // LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0
667+ // LLVM: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1
668+ // LLVM: %[[MUL_AR_BR:.*]] = mul i32 %[[A_REAL]], %[[TMP_B]]
669+ // LLVM: %[[MUL_AI_BI:.*]] = mul i32 %[[A_IMAG]], 0
670+ // LLVM: %[[MUL_BR_BR:.*]] = mul i32 %[[TMP_B]], %[[TMP_B]]
671+ // LLVM: %[[ADD_ARBR_AIBI:.*]] = add i32 %[[MUL_AR_BR]], %[[MUL_AI_BI]]
672+ // LLVM: %[[ADD_BRBR_BIBI:.*]] = add i32 %[[MUL_BR_BR]], 0
673+ // LLVM: %[[RESULT_REAL:.*]] = sdiv i32 %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
674+ // LLVM: %[[MUL_AI_BR:.*]] = mul i32 %[[A_IMAG]], %[[TMP_B]]
675+ // LLVM: %[[MUL_AR_BI:.*]] = mul i32 %[[A_REAL]], 0
676+ // LLVM: %[[SUB_AIBR_ARBI:.*]] = sub i32 %[[MUL_AI_BR]], %[[MUL_AR_BI]]
677+ // LLVM: %[[RESULT_IMAG:.*]] = sdiv i32 %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]
678+ // LLVM: %[[TMP_RESULT:.*]] = insertvalue { i32, i32 } {{.*}}, i32 %[[RESULT_REAL]], 0
679+ // LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } %[[TMP_RESULT]], i32 %[[RESULT_IMAG]], 1
680+ // LLVM: store { i32, i32 } %[[RESULT]], ptr %[[A_ADDR]], align 4
681+
682+ // OGCG: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4
683+ // OGCG: %[[B_ADDR:.*]] = alloca i32, align 4
684+ // OGCG: %[[TMP_B:.*]] = load i32, ptr %[[B_ADDR]], align 4
685+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0
686+ // OGCG: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4
687+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1
688+ // OGCG: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4
689+ // OGCG: %[[MUL_AR_BR:.*]] = mul i32 %[[A_REAL]], %[[TMP_B]]
690+ // OGCG: %[[MUL_AI_BI:.*]] = mul i32 %[[A_IMAG]], 0
691+ // OGCG: %[[ADD_ARBR_AIBI:.*]] = add i32 %[[MUL_AR_BR]], %[[MUL_AI_BI]]
692+ // OGCG: %[[MUL_BR_BR:.*]] = mul i32 %[[TMP_B]], %[[TMP_B]]
693+ // OGCG: %[[ADD_BRBR_BIBI:.*]] = add i32 %[[MUL_BR_BR]], 0
694+ // OGCG: %[[MUL_AI_BR:.*]] = mul i32 %[[A_IMAG]], %[[TMP_B]]
695+ // OGCG: %[[MUL_AR_BI:.*]] = mul i32 %[[A_REAL]], 0
696+ // OGCG: %[[SUB_AIBR_ARBI:.*]] = sub i32 %[[MUL_AI_BR]], %[[MUL_AR_BI]]
697+ // OGCG: %[[RESULT_REAL:.*]] = sdiv i32 %[[ADD_ARBR_AIBI]], %[[ADD_BRBR_BIBI]]
698+ // OGCG: %[[RESULT_IMAG:.*]] = sdiv i32 %[[SUB_AIBR_ARBI]], %[[ADD_BRBR_BIBI]]
699+ // OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0
700+ // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1
701+ // OGCG: store i32 %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4
702+ // OGCG: store i32 %[[RESULT_IMAG]], ptr %[[A_IMAG_PTR]], align 4
703+
537704#ifndef __cplusplus
538705void foo9 () {
539706 float _Complex a;
@@ -571,4 +738,4 @@ void foo9() {
571738// C_OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
572739// C_OGCG: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
573740// C_OGCG: %[[ADD_REAL:.*]] = fadd float %[[TMP_B]], %[[A_REAL]]
574- // C_OGCG: store float %[[ADD_REAL]], ptr %[[B_ADDR]], align 4
741+ // C_OGCG: store float %[[ADD_REAL]], ptr %[[B_ADDR]], align 4
0 commit comments