Skip to content

Commit 1d81a7d

Browse files
AmrDevelopergithub-actions[bot]
authored andcommitted
Automerge: [CIR] CompoundAssignment from ComplexType to ScalarType (#152915)
This change adds support for the CompoundAssignment for ComplexType and updates our approach for emitting bin op between Complex & Scalar llvm/llvm-project#141365
2 parents c6d183c + 475aa1b commit 1d81a7d

File tree

5 files changed

+246
-48
lines changed

5 files changed

+246
-48
lines changed

clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp

Lines changed: 94 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -188,12 +188,6 @@ static const ComplexType *getComplexType(QualType type) {
188188
}
189189
#endif // NDEBUG
190190

191-
static mlir::Value createComplexFromReal(CIRGenBuilderTy &builder,
192-
mlir::Location loc, mlir::Value real) {
193-
mlir::Value imag = builder.getNullValue(real.getType(), loc);
194-
return builder.createComplexCreate(loc, real, imag);
195-
}
196-
197191
LValue ComplexExprEmitter::emitBinAssignLValue(const BinaryOperator *e,
198192
mlir::Value &value) {
199193
assert(cgf.getContext().hasSameUnqualifiedType(e->getLHS()->getType(),
@@ -644,8 +638,12 @@ ComplexExprEmitter::emitPromotedComplexOperand(const Expr *e,
644638
return Visit(const_cast<Expr *>(e));
645639
}
646640

647-
cgf.cgm.errorNYI("emitPromotedComplexOperand non-complex type");
648-
return {};
641+
if (!promotionTy.isNull()) {
642+
QualType complexElementTy =
643+
promotionTy->castAs<ComplexType>()->getElementType();
644+
return cgf.emitPromotedScalarExpr(e, complexElementTy);
645+
}
646+
return cgf.emitScalarExpr(e);
649647
}
650648

651649
ComplexExprEmitter::BinOpInfo
@@ -690,13 +688,10 @@ LValue ComplexExprEmitter::emitCompoundAssignLValue(
690688
// The RHS should have been converted to the computation type.
691689
if (e->getRHS()->getType()->isRealFloatingType()) {
692690
if (!promotionTypeRHS.isNull()) {
693-
opInfo.rhs = createComplexFromReal(
694-
cgf.getBuilder(), loc,
695-
cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS));
691+
opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
696692
} else {
697693
assert(cgf.getContext().hasSameUnqualifiedType(complexElementTy, rhsTy));
698-
opInfo.rhs = createComplexFromReal(cgf.getBuilder(), loc,
699-
cgf.emitScalarExpr(e->getRHS()));
694+
opInfo.rhs = cgf.emitScalarExpr(e->getRHS());
700695
}
701696
} else {
702697
if (!promotionTypeRHS.isNull()) {
@@ -716,8 +711,27 @@ LValue ComplexExprEmitter::emitCompoundAssignLValue(
716711
QualType destTy = promotionTypeLHS.isNull() ? opInfo.ty : promotionTypeLHS;
717712
opInfo.lhs = emitComplexToComplexCast(lhsValue, lhsTy, destTy, exprLoc);
718713
} else {
719-
cgf.cgm.errorNYI("emitCompoundAssignLValue emitLoadOfScalar");
720-
return {};
714+
mlir::Value lhsVal = cgf.emitLoadOfScalar(lhs, exprLoc);
715+
// For floating point real operands we can directly pass the scalar form
716+
// to the binary operator emission and potentially get more efficient code.
717+
if (lhsTy->isRealFloatingType()) {
718+
QualType promotedComplexElementTy;
719+
if (!promotionTypeLHS.isNull()) {
720+
promotedComplexElementTy =
721+
cast<ComplexType>(promotionTypeLHS)->getElementType();
722+
if (!cgf.getContext().hasSameUnqualifiedType(promotedComplexElementTy,
723+
promotionTypeLHS))
724+
lhsVal = cgf.emitScalarConversion(lhsVal, lhsTy,
725+
promotedComplexElementTy, exprLoc);
726+
} else {
727+
if (!cgf.getContext().hasSameUnqualifiedType(complexElementTy, lhsTy))
728+
lhsVal = cgf.emitScalarConversion(lhsVal, lhsTy, complexElementTy,
729+
exprLoc);
730+
}
731+
opInfo.lhs = lhsVal;
732+
} else {
733+
opInfo.lhs = emitScalarToComplexCast(lhsVal, lhsTy, opInfo.ty, exprLoc);
734+
}
721735
}
722736

723737
// Expand the binary operator.
@@ -759,13 +773,45 @@ mlir::Value ComplexExprEmitter::emitCompoundAssign(
759773
mlir::Value ComplexExprEmitter::emitBinAdd(const BinOpInfo &op) {
760774
assert(!cir::MissingFeatures::fastMathFlags());
761775
assert(!cir::MissingFeatures::cgFPOptionsRAII());
762-
return builder.create<cir::ComplexAddOp>(op.loc, op.lhs, op.rhs);
776+
777+
if (mlir::isa<cir::ComplexType>(op.lhs.getType()) &&
778+
mlir::isa<cir::ComplexType>(op.rhs.getType()))
779+
return builder.create<cir::ComplexAddOp>(op.loc, op.lhs, op.rhs);
780+
781+
if (mlir::isa<cir::ComplexType>(op.lhs.getType())) {
782+
mlir::Value real = builder.createComplexReal(op.loc, op.lhs);
783+
mlir::Value imag = builder.createComplexImag(op.loc, op.lhs);
784+
mlir::Value newReal = builder.createAdd(op.loc, real, op.rhs);
785+
return builder.createComplexCreate(op.loc, newReal, imag);
786+
}
787+
788+
assert(mlir::isa<cir::ComplexType>(op.rhs.getType()));
789+
mlir::Value real = builder.createComplexReal(op.loc, op.rhs);
790+
mlir::Value imag = builder.createComplexImag(op.loc, op.rhs);
791+
mlir::Value newReal = builder.createAdd(op.loc, op.lhs, real);
792+
return builder.createComplexCreate(op.loc, newReal, imag);
763793
}
764794

765795
mlir::Value ComplexExprEmitter::emitBinSub(const BinOpInfo &op) {
766796
assert(!cir::MissingFeatures::fastMathFlags());
767797
assert(!cir::MissingFeatures::cgFPOptionsRAII());
768-
return builder.create<cir::ComplexSubOp>(op.loc, op.lhs, op.rhs);
798+
799+
if (mlir::isa<cir::ComplexType>(op.lhs.getType()) &&
800+
mlir::isa<cir::ComplexType>(op.rhs.getType()))
801+
return builder.create<cir::ComplexSubOp>(op.loc, op.lhs, op.rhs);
802+
803+
if (mlir::isa<cir::ComplexType>(op.lhs.getType())) {
804+
mlir::Value real = builder.createComplexReal(op.loc, op.lhs);
805+
mlir::Value imag = builder.createComplexImag(op.loc, op.lhs);
806+
mlir::Value newReal = builder.createSub(op.loc, real, op.rhs);
807+
return builder.createComplexCreate(op.loc, newReal, imag);
808+
}
809+
810+
assert(mlir::isa<cir::ComplexType>(op.rhs.getType()));
811+
mlir::Value real = builder.createComplexReal(op.loc, op.rhs);
812+
mlir::Value imag = builder.createComplexImag(op.loc, op.rhs);
813+
mlir::Value newReal = builder.createSub(op.loc, op.lhs, real);
814+
return builder.createComplexCreate(op.loc, newReal, imag);
769815
}
770816

771817
static cir::ComplexRangeKind
@@ -788,9 +834,28 @@ getComplexRangeAttr(LangOptions::ComplexRangeKind range) {
788834
mlir::Value ComplexExprEmitter::emitBinMul(const BinOpInfo &op) {
789835
assert(!cir::MissingFeatures::fastMathFlags());
790836
assert(!cir::MissingFeatures::cgFPOptionsRAII());
791-
cir::ComplexRangeKind rangeKind =
792-
getComplexRangeAttr(op.fpFeatures.getComplexRange());
793-
return builder.create<cir::ComplexMulOp>(op.loc, op.lhs, op.rhs, rangeKind);
837+
838+
if (mlir::isa<cir::ComplexType>(op.lhs.getType()) &&
839+
mlir::isa<cir::ComplexType>(op.rhs.getType())) {
840+
cir::ComplexRangeKind rangeKind =
841+
getComplexRangeAttr(op.fpFeatures.getComplexRange());
842+
return builder.create<cir::ComplexMulOp>(op.loc, op.lhs, op.rhs, rangeKind);
843+
}
844+
845+
if (mlir::isa<cir::ComplexType>(op.lhs.getType())) {
846+
mlir::Value real = builder.createComplexReal(op.loc, op.lhs);
847+
mlir::Value imag = builder.createComplexImag(op.loc, op.lhs);
848+
mlir::Value newReal = builder.createMul(op.loc, real, op.rhs);
849+
mlir::Value newImag = builder.createMul(op.loc, imag, op.rhs);
850+
return builder.createComplexCreate(op.loc, newReal, newImag);
851+
}
852+
853+
assert(mlir::isa<cir::ComplexType>(op.rhs.getType()));
854+
mlir::Value real = builder.createComplexReal(op.loc, op.rhs);
855+
mlir::Value imag = builder.createComplexImag(op.loc, op.rhs);
856+
mlir::Value newReal = builder.createMul(op.loc, op.lhs, real);
857+
mlir::Value newImag = builder.createMul(op.loc, op.lhs, imag);
858+
return builder.createComplexCreate(op.loc, newReal, newImag);
794859
}
795860

796861
LValue CIRGenFunction::emitComplexAssignmentLValue(const BinaryOperator *e) {
@@ -888,3 +953,12 @@ mlir::Value CIRGenFunction::emitPromotedValue(mlir::Value result,
888953
return builder.createCast(cir::CastKind::float_complex, result,
889954
convertType(promotionType));
890955
}
956+
957+
LValue CIRGenFunction::emitScalarCompoundAssignWithComplex(
958+
const CompoundAssignOperator *e, mlir::Value &result) {
959+
CompoundFunc op = getComplexOp(e->getOpcode());
960+
RValue value;
961+
LValue ret = ComplexExprEmitter(*this).emitCompoundAssignLValue(e, op, value);
962+
result = value.getValue();
963+
return ret;
964+
}

clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1066,14 +1066,12 @@ LValue ScalarExprEmitter::emitCompoundAssignLValue(
10661066
const CompoundAssignOperator *e,
10671067
mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
10681068
mlir::Value &result) {
1069+
if (e->getComputationResultType()->isAnyComplexType())
1070+
return cgf.emitScalarCompoundAssignWithComplex(e, result);
1071+
10691072
QualType lhsTy = e->getLHS()->getType();
10701073
BinOpInfo opInfo;
10711074

1072-
if (e->getComputationResultType()->isAnyComplexType()) {
1073-
cgf.cgm.errorNYI(result.getLoc(), "complex lvalue assign");
1074-
return LValue();
1075-
}
1076-
10771075
// Emit the RHS first. __block variables need to have the rhs evaluated
10781076
// first, plus this should improve codegen a little.
10791077

clang/lib/CIR/CodeGen/CIRGenFunction.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1100,6 +1100,8 @@ class CIRGenFunction : public CIRGenTypeCache {
11001100

11011101
LValue emitComplexAssignmentLValue(const BinaryOperator *e);
11021102
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e);
1103+
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e,
1104+
mlir::Value &result);
11031105

11041106
void emitCompoundStmt(const clang::CompoundStmt &s);
11051107

clang/test/CIR/CodeGen/complex-compound-assignment.cpp

Lines changed: 89 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -296,26 +296,22 @@ void foo5() {
296296
// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
297297
// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
298298
// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.float>, !cir.float
299-
// CIR: %[[CONST_ZERO:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float
300-
// CIR: %[[COMPLEX_B:.*]] = cir.complex.create %[[TMP_B]], %[[CONST_ZERO]] : !cir.float -> !cir.complex<!cir.float>
301299
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
302-
// CIR: %[[RESULT:.*]] = cir.complex.add %[[TMP_A]], %[[COMPLEX_B]] : !cir.complex<!cir.float>
300+
// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
301+
// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
302+
// CIR: %[[RESULT_REAL:.*]] = cir.binop(add, %[[A_REAL]], %[[TMP_B]]) : !cir.float
303+
// CIR: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[A_IMAG]] : !cir.float -> !cir.complex<!cir.float>
303304
// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
304305

305306
// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
306307
// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
307308
// LLVM: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
308-
// LLVM: %[[TMP_COMPLEX_B:.*]] = insertvalue { float, float } {{.*}}, float %[[TMP_B]], 0
309-
// LLVM: %[[COMPLEX_B:.*]] = insertvalue { float, float } %[[TMP_COMPLEX_B]], float 0.000000e+00, 1
310309
// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
311310
// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
312311
// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
313-
// LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[COMPLEX_B]], 0
314-
// LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[COMPLEX_B]], 1
315-
// LLVM: %[[ADD_REAL:.*]] = fadd float %[[A_REAL]], %[[B_REAL]]
316-
// LLVM: %[[ADD_IMAG:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]]
317-
// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL]], 0
318-
// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[ADD_IMAG]], 1
312+
// LLVM: %[[RESULT_REAL:.*]] = fadd float %[[A_REAL]], %[[TMP_B]]
313+
// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
314+
// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[A_IMAG]], 1
319315
// LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4
320316

321317
// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
@@ -494,3 +490,85 @@ void foo7() {
494490
// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
495491
// OGCG: store float %[[FINAL_REAL]], ptr %[[C_REAL_PTR]], align 4
496492
// OGCG: store float %[[FINAL_IMAG]], ptr %[[C_IMAG_PTR]], align 4
493+
494+
void foo8() {
495+
float _Complex a;
496+
float b;
497+
a *= b;
498+
}
499+
500+
// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
501+
// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
502+
// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.float>, !cir.float
503+
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
504+
// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
505+
// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
506+
// CIR: %[[RESULT_REAL:.*]] = cir.binop(mul, %[[A_REAL]], %[[TMP_B]]) : !cir.float
507+
// CIR: %[[RESULT_IMAG:.*]] = cir.binop(mul, %[[A_IMAG]], %[[TMP_B]]) : !cir.float
508+
// CIR: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
509+
// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>
510+
511+
// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
512+
// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
513+
// LLVM: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
514+
// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
515+
// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
516+
// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
517+
// LLVM: %[[RESULT_REAL:.*]] = fmul float %[[A_REAL]], %[[TMP_B]]
518+
// LLVM: %[[RESULT_IMAG:.*]] = fmul float %[[A_IMAG]], %[[TMP_B]]
519+
// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
520+
// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[RESULT_IMAG]], 1
521+
// LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4
522+
523+
// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
524+
// OGCG: %[[B_ADDR:.*]] = alloca float, align 4
525+
// OGCG: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
526+
// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
527+
// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
528+
// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
529+
// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
530+
// OGCG: %[[RESULT_REAL:.*]] = fmul float %[[A_REAL]], %[[TMP_B]]
531+
// OGCG: %[[RESULT_IMAG:.*]] = fmul float %[[A_IMAG]], %[[TMP_B]]
532+
// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
533+
// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
534+
// OGCG: store float %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4
535+
// OGCG: store float %[[RESULT_IMAG]], ptr %[[A_IMAG_PTR]], align 4
536+
537+
#ifndef __cplusplus
538+
void foo9() {
539+
float _Complex a;
540+
float b;
541+
b += a;
542+
}
543+
#endif
544+
545+
// C_CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
546+
// C_CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
547+
// C_CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
548+
// C_CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.float>, !cir.float
549+
// C_CIR: %[[A_REAL:.*]] = cir.complex.real %[[A_ADDR]] : !cir.complex<!cir.float> -> !cir.float
550+
// C_CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[A_ADDR]] : !cir.complex<!cir.float> -> !cir.float
551+
// C_CIR: %[[NEW_REAL:.*]] = cir.binop(add, %[[TMP_B]], %[[A_REAL]]) : !cir.float
552+
// C_CIR: %[[RESULT:.*]] = cir.complex.create %[[NEW_REAL]], %[[A_IMAG]] : !cir.float -> !cir.complex<!cir.float>
553+
// C_CIR: %[[RESULT_REAL:.*]] = cir.complex.real %[[RESULT]] : !cir.complex<!cir.float> -> !cir.float
554+
// C_CIR: cir.store{{.*}} %[[RESULT_REAL]], %[[B_ADDR]] : !cir.float, !cir.ptr<!cir.float>
555+
556+
// C_LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
557+
// C_LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
558+
// C_LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
559+
// C_LLVM: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
560+
// C_LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
561+
// C_LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
562+
// C_LLVM: %[[NEW_REAL:.*]] = fadd float %[[TMP_B]], %[[A_REAL]]
563+
// C_LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[NEW_REAL]], 0
564+
// C_LLVM: store float %[[NEW_REAL]], ptr %[[B_ADDR]], align 4
565+
566+
// C_OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
567+
// C_OGCG: %[[B_ADDR:.*]] = alloca float, align 4
568+
// C_OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
569+
// C_OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
570+
// C_OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
571+
// C_OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
572+
// C_OGCG: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
573+
// C_OGCG: %[[ADD_REAL:.*]] = fadd float %[[TMP_B]], %[[A_REAL]]
574+
// C_OGCG: store float %[[ADD_REAL]], ptr %[[B_ADDR]], align 4

0 commit comments

Comments
 (0)