Skip to content

[CIR] CompoundAssignment from ComplexType to ScalarType #152915

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
114 changes: 94 additions & 20 deletions clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -188,12 +188,6 @@ static const ComplexType *getComplexType(QualType type) {
}
#endif // NDEBUG

static mlir::Value createComplexFromReal(CIRGenBuilderTy &builder,
mlir::Location loc, mlir::Value real) {
mlir::Value imag = builder.getNullValue(real.getType(), loc);
return builder.createComplexCreate(loc, real, imag);
}

LValue ComplexExprEmitter::emitBinAssignLValue(const BinaryOperator *e,
mlir::Value &value) {
assert(cgf.getContext().hasSameUnqualifiedType(e->getLHS()->getType(),
Expand Down Expand Up @@ -644,8 +638,12 @@ ComplexExprEmitter::emitPromotedComplexOperand(const Expr *e,
return Visit(const_cast<Expr *>(e));
}

cgf.cgm.errorNYI("emitPromotedComplexOperand non-complex type");
return {};
if (!promotionTy.isNull()) {
QualType complexElementTy =
promotionTy->castAs<ComplexType>()->getElementType();
return cgf.emitPromotedScalarExpr(e, complexElementTy);
}
return cgf.emitScalarExpr(e);
}

ComplexExprEmitter::BinOpInfo
Expand Down Expand Up @@ -690,13 +688,10 @@ LValue ComplexExprEmitter::emitCompoundAssignLValue(
// The RHS should have been converted to the computation type.
if (e->getRHS()->getType()->isRealFloatingType()) {
if (!promotionTypeRHS.isNull()) {
opInfo.rhs = createComplexFromReal(
cgf.getBuilder(), loc,
cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS));
opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
} else {
assert(cgf.getContext().hasSameUnqualifiedType(complexElementTy, rhsTy));
opInfo.rhs = createComplexFromReal(cgf.getBuilder(), loc,
cgf.emitScalarExpr(e->getRHS()));
opInfo.rhs = cgf.emitScalarExpr(e->getRHS());
}
} else {
if (!promotionTypeRHS.isNull()) {
Expand All @@ -716,8 +711,27 @@ LValue ComplexExprEmitter::emitCompoundAssignLValue(
QualType destTy = promotionTypeLHS.isNull() ? opInfo.ty : promotionTypeLHS;
opInfo.lhs = emitComplexToComplexCast(lhsValue, lhsTy, destTy, exprLoc);
} else {
cgf.cgm.errorNYI("emitCompoundAssignLValue emitLoadOfScalar");
return {};
mlir::Value lhsVal = cgf.emitLoadOfScalar(lhs, exprLoc);
// For floating point real operands we can directly pass the scalar form
// to the binary operator emission and potentially get more efficient code.
if (lhsTy->isRealFloatingType()) {
QualType promotedComplexElementTy;
if (!promotionTypeLHS.isNull()) {
promotedComplexElementTy =
cast<ComplexType>(promotionTypeLHS)->getElementType();
if (!cgf.getContext().hasSameUnqualifiedType(promotedComplexElementTy,
promotionTypeLHS))
lhsVal = cgf.emitScalarConversion(lhsVal, lhsTy,
promotedComplexElementTy, exprLoc);
} else {
if (!cgf.getContext().hasSameUnqualifiedType(complexElementTy, lhsTy))
lhsVal = cgf.emitScalarConversion(lhsVal, lhsTy, complexElementTy,
exprLoc);
}
opInfo.lhs = lhsVal;
} else {
opInfo.lhs = emitScalarToComplexCast(lhsVal, lhsTy, opInfo.ty, exprLoc);
}
}

// Expand the binary operator.
Expand Down Expand Up @@ -759,13 +773,45 @@ mlir::Value ComplexExprEmitter::emitCompoundAssign(
mlir::Value ComplexExprEmitter::emitBinAdd(const BinOpInfo &op) {
assert(!cir::MissingFeatures::fastMathFlags());
assert(!cir::MissingFeatures::cgFPOptionsRAII());
return builder.create<cir::ComplexAddOp>(op.loc, op.lhs, op.rhs);

if (mlir::isa<cir::ComplexType>(op.lhs.getType()) &&
mlir::isa<cir::ComplexType>(op.rhs.getType()))
return builder.create<cir::ComplexAddOp>(op.loc, op.lhs, op.rhs);

if (mlir::isa<cir::ComplexType>(op.lhs.getType())) {
mlir::Value real = builder.createComplexReal(op.loc, op.lhs);
mlir::Value imag = builder.createComplexImag(op.loc, op.lhs);
mlir::Value newReal = builder.createAdd(op.loc, real, op.rhs);
return builder.createComplexCreate(op.loc, newReal, imag);
}

assert(mlir::isa<cir::ComplexType>(op.rhs.getType()));
mlir::Value real = builder.createComplexReal(op.loc, op.rhs);
mlir::Value imag = builder.createComplexImag(op.loc, op.rhs);
mlir::Value newReal = builder.createAdd(op.loc, op.lhs, real);
return builder.createComplexCreate(op.loc, newReal, imag);
}

mlir::Value ComplexExprEmitter::emitBinSub(const BinOpInfo &op) {
assert(!cir::MissingFeatures::fastMathFlags());
assert(!cir::MissingFeatures::cgFPOptionsRAII());
return builder.create<cir::ComplexSubOp>(op.loc, op.lhs, op.rhs);

if (mlir::isa<cir::ComplexType>(op.lhs.getType()) &&
mlir::isa<cir::ComplexType>(op.rhs.getType()))
return builder.create<cir::ComplexSubOp>(op.loc, op.lhs, op.rhs);

if (mlir::isa<cir::ComplexType>(op.lhs.getType())) {
mlir::Value real = builder.createComplexReal(op.loc, op.lhs);
mlir::Value imag = builder.createComplexImag(op.loc, op.lhs);
mlir::Value newReal = builder.createSub(op.loc, real, op.rhs);
return builder.createComplexCreate(op.loc, newReal, imag);
}

assert(mlir::isa<cir::ComplexType>(op.rhs.getType()));
mlir::Value real = builder.createComplexReal(op.loc, op.rhs);
mlir::Value imag = builder.createComplexImag(op.loc, op.rhs);
mlir::Value newReal = builder.createSub(op.loc, op.lhs, real);
return builder.createComplexCreate(op.loc, newReal, imag);
}

static cir::ComplexRangeKind
Expand All @@ -788,9 +834,28 @@ getComplexRangeAttr(LangOptions::ComplexRangeKind range) {
mlir::Value ComplexExprEmitter::emitBinMul(const BinOpInfo &op) {
assert(!cir::MissingFeatures::fastMathFlags());
assert(!cir::MissingFeatures::cgFPOptionsRAII());
cir::ComplexRangeKind rangeKind =
getComplexRangeAttr(op.fpFeatures.getComplexRange());
return builder.create<cir::ComplexMulOp>(op.loc, op.lhs, op.rhs, rangeKind);

if (mlir::isa<cir::ComplexType>(op.lhs.getType()) &&
mlir::isa<cir::ComplexType>(op.rhs.getType())) {
cir::ComplexRangeKind rangeKind =
getComplexRangeAttr(op.fpFeatures.getComplexRange());
return builder.create<cir::ComplexMulOp>(op.loc, op.lhs, op.rhs, rangeKind);
}

if (mlir::isa<cir::ComplexType>(op.lhs.getType())) {
mlir::Value real = builder.createComplexReal(op.loc, op.lhs);
mlir::Value imag = builder.createComplexImag(op.loc, op.lhs);
mlir::Value newReal = builder.createMul(op.loc, real, op.rhs);
mlir::Value newImag = builder.createMul(op.loc, imag, op.rhs);
return builder.createComplexCreate(op.loc, newReal, newImag);
}

assert(mlir::isa<cir::ComplexType>(op.rhs.getType()));
mlir::Value real = builder.createComplexReal(op.loc, op.rhs);
mlir::Value imag = builder.createComplexImag(op.loc, op.rhs);
mlir::Value newReal = builder.createMul(op.loc, op.lhs, real);
mlir::Value newImag = builder.createMul(op.loc, op.lhs, imag);
return builder.createComplexCreate(op.loc, newReal, newImag);
}

LValue CIRGenFunction::emitComplexAssignmentLValue(const BinaryOperator *e) {
Expand Down Expand Up @@ -888,3 +953,12 @@ mlir::Value CIRGenFunction::emitPromotedValue(mlir::Value result,
return builder.createCast(cir::CastKind::float_complex, result,
convertType(promotionType));
}

LValue CIRGenFunction::emitScalarCompoundAssignWithComplex(
const CompoundAssignOperator *e, mlir::Value &result) {
CompoundFunc op = getComplexOp(e->getOpcode());
RValue value;
LValue ret = ComplexExprEmitter(*this).emitCompoundAssignLValue(e, op, value);
result = value.getValue();
return ret;
}
8 changes: 3 additions & 5 deletions clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1066,14 +1066,12 @@ LValue ScalarExprEmitter::emitCompoundAssignLValue(
const CompoundAssignOperator *e,
mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
mlir::Value &result) {
if (e->getComputationResultType()->isAnyComplexType())
return cgf.emitScalarCompoundAssignWithComplex(e, result);

QualType lhsTy = e->getLHS()->getType();
BinOpInfo opInfo;

if (e->getComputationResultType()->isAnyComplexType()) {
cgf.cgm.errorNYI(result.getLoc(), "complex lvalue assign");
return LValue();
}

// Emit the RHS first. __block variables need to have the rhs evaluated
// first, plus this should improve codegen a little.

Expand Down
2 changes: 2 additions & 0 deletions clang/lib/CIR/CodeGen/CIRGenFunction.h
Original file line number Diff line number Diff line change
Expand Up @@ -1100,6 +1100,8 @@ class CIRGenFunction : public CIRGenTypeCache {

LValue emitComplexAssignmentLValue(const BinaryOperator *e);
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e);
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e,
mlir::Value &result);

void emitCompoundStmt(const clang::CompoundStmt &s);

Expand Down
100 changes: 89 additions & 11 deletions clang/test/CIR/CodeGen/complex-compound-assignment.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -296,26 +296,22 @@ void foo5() {
// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.float>, !cir.float
// CIR: %[[CONST_ZERO:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float
// CIR: %[[COMPLEX_B:.*]] = cir.complex.create %[[TMP_B]], %[[CONST_ZERO]] : !cir.float -> !cir.complex<!cir.float>
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
// CIR: %[[RESULT:.*]] = cir.complex.add %[[TMP_A]], %[[COMPLEX_B]] : !cir.complex<!cir.float>
// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
// CIR: %[[RESULT_REAL:.*]] = cir.binop(add, %[[A_REAL]], %[[TMP_B]]) : !cir.float
// CIR: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[A_IMAG]] : !cir.float -> !cir.complex<!cir.float>
// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>

// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
// LLVM: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
// LLVM: %[[TMP_COMPLEX_B:.*]] = insertvalue { float, float } {{.*}}, float %[[TMP_B]], 0
// LLVM: %[[COMPLEX_B:.*]] = insertvalue { float, float } %[[TMP_COMPLEX_B]], float 0.000000e+00, 1
// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
// LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[COMPLEX_B]], 0
// LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[COMPLEX_B]], 1
// LLVM: %[[ADD_REAL:.*]] = fadd float %[[A_REAL]], %[[B_REAL]]
// LLVM: %[[ADD_IMAG:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]]
// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL]], 0
// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[ADD_IMAG]], 1
// LLVM: %[[RESULT_REAL:.*]] = fadd float %[[A_REAL]], %[[TMP_B]]
// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[A_IMAG]], 1
// LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4

// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
Expand Down Expand Up @@ -494,3 +490,85 @@ void foo7() {
// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
// OGCG: store float %[[FINAL_REAL]], ptr %[[C_REAL_PTR]], align 4
// OGCG: store float %[[FINAL_IMAG]], ptr %[[C_IMAG_PTR]], align 4

void foo8() {
float _Complex a;
float b;
a *= b;
}

// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.float>, !cir.float
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.float> -> !cir.float
// CIR: %[[RESULT_REAL:.*]] = cir.binop(mul, %[[A_REAL]], %[[TMP_B]]) : !cir.float
// CIR: %[[RESULT_IMAG:.*]] = cir.binop(mul, %[[A_IMAG]], %[[TMP_B]]) : !cir.float
// CIR: %[[RESULT:.*]] = cir.complex.create %[[RESULT_REAL]], %[[RESULT_IMAG]] : !cir.float -> !cir.complex<!cir.float>
// CIR: cir.store{{.*}} %[[RESULT]], %[[A_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>

// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
// LLVM: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
// LLVM: %[[RESULT_REAL:.*]] = fmul float %[[A_REAL]], %[[TMP_B]]
// LLVM: %[[RESULT_IMAG:.*]] = fmul float %[[A_IMAG]], %[[TMP_B]]
// LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[RESULT_REAL]], 0
// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[TMP_RESULT]], float %[[RESULT_IMAG]], 1
// LLVM: store { float, float } %[[RESULT]], ptr %[[A_ADDR]], align 4

// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
// OGCG: %[[B_ADDR:.*]] = alloca float, align 4
// OGCG: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
// OGCG: %[[RESULT_REAL:.*]] = fmul float %[[A_REAL]], %[[TMP_B]]
// OGCG: %[[RESULT_IMAG:.*]] = fmul float %[[A_IMAG]], %[[TMP_B]]
// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
// OGCG: store float %[[RESULT_REAL]], ptr %[[A_REAL_PTR]], align 4
// OGCG: store float %[[RESULT_IMAG]], ptr %[[A_IMAG_PTR]], align 4

#ifndef __cplusplus
void foo9() {
float _Complex a;
float b;
b += a;
}
#endif

// C_CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
// C_CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b"]
// C_CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
// C_CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.float>, !cir.float
// C_CIR: %[[A_REAL:.*]] = cir.complex.real %[[A_ADDR]] : !cir.complex<!cir.float> -> !cir.float
// C_CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[A_ADDR]] : !cir.complex<!cir.float> -> !cir.float
// C_CIR: %[[NEW_REAL:.*]] = cir.binop(add, %[[TMP_B]], %[[A_REAL]]) : !cir.float
// C_CIR: %[[RESULT:.*]] = cir.complex.create %[[NEW_REAL]], %[[A_IMAG]] : !cir.float -> !cir.complex<!cir.float>
// C_CIR: %[[RESULT_REAL:.*]] = cir.complex.real %[[RESULT]] : !cir.complex<!cir.float> -> !cir.float
// C_CIR: cir.store{{.*}} %[[RESULT_REAL]], %[[B_ADDR]] : !cir.float, !cir.ptr<!cir.float>

// C_LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
// C_LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
// C_LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
// C_LLVM: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
// C_LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0
// C_LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1
// C_LLVM: %[[NEW_REAL:.*]] = fadd float %[[TMP_B]], %[[A_REAL]]
// C_LLVM: %[[TMP_RESULT:.*]] = insertvalue { float, float } {{.*}}, float %[[NEW_REAL]], 0
// C_LLVM: store float %[[NEW_REAL]], ptr %[[B_ADDR]], align 4

// C_OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
// C_OGCG: %[[B_ADDR:.*]] = alloca float, align 4
// C_OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
// C_OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
// C_OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
// C_OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
// C_OGCG: %[[TMP_B:.*]] = load float, ptr %[[B_ADDR]], align 4
// C_OGCG: %[[ADD_REAL:.*]] = fadd float %[[TMP_B]], %[[A_REAL]]
// C_OGCG: store float %[[ADD_REAL]], ptr %[[B_ADDR]], align 4
Loading