Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
75 changes: 72 additions & 3 deletions clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,17 @@ class ComplexExprEmitter : public StmtVisitor<ComplexExprEmitter, mlir::Value> {
}

mlir::Value VisitUnaryDeref(const Expr *e);

mlir::Value VisitUnaryPlus(const UnaryOperator *e,
QualType promotionType = QualType());
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think VisitUnaryPlus and VisitUnaryMinus are ever called with a promotionType argument. I looked at the commit where these arguments were added in classic codegen (5def954) and it looks like it was possibly an artifact of some previous state. I'd suggest leaving them out for now.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure, i will do that


mlir::Value VisitPlus(const UnaryOperator *e, QualType promotionType);

mlir::Value VisitUnaryMinus(const UnaryOperator *e,
QualType promotionType = QualType());

mlir::Value VisitMinus(const UnaryOperator *e, QualType promotionType);

mlir::Value VisitUnaryNot(const UnaryOperator *e);

struct BinOpInfo {
Expand Down Expand Up @@ -174,6 +185,58 @@ mlir::Value ComplexExprEmitter::emitCast(CastKind ck, Expr *op,
return {};
}

mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *e,
QualType promotionType) {
QualType promotionTy = promotionType.isNull()
? getPromotionType(e->getSubExpr()->getType())
: promotionType;
mlir::Value result = VisitPlus(e, promotionTy);
if (!promotionTy.isNull()) {
cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryPlus emitUnPromotedValue");
return {};
}

return result;
}

mlir::Value ComplexExprEmitter::VisitPlus(const UnaryOperator *e,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can VisitPlus and VisitMinus be combined into a single emitUnaryPlusOrMinus as they are in ScalarExprEmitter?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I still think these can be combined, and I don't think they are actually called by the Visitor pattern, so it would be better to rename them. If I'm wrong about them not being called by the Visitor they should stay.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I will try to combine them into one function similar to what we did with VisitPrePostIncDec and check the visitor also

QualType promotionType) {
mlir::Value op;
if (!promotionType.isNull())
op = cgf.emitPromotedComplexExpr(e->getSubExpr(), promotionType);
else
op = Visit(e->getSubExpr());

return builder.createUnaryOp(cgf.getLoc(e->getExprLoc()),
cir::UnaryOpKind::Plus, op);
}

mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *e,
QualType promotionType) {
QualType promotionTy = promotionType.isNull()
? getPromotionType(e->getSubExpr()->getType())
: promotionType;
mlir::Value result = VisitMinus(e, promotionTy);
if (!promotionTy.isNull()) {
cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryMinus emitUnPromotedValue");
return {};
}

return result;
}

mlir::Value ComplexExprEmitter::VisitMinus(const UnaryOperator *e,
QualType promotionType) {
mlir::Value op;
if (!promotionType.isNull())
op = cgf.emitPromotedComplexExpr(e->getSubExpr(), promotionType);
else
op = Visit(e->getSubExpr());

return builder.createUnaryOp(cgf.getLoc(e->getExprLoc()),
cir::UnaryOpKind::Minus, op);
}

mlir::Value ComplexExprEmitter::emitConstant(
const CIRGenFunction::ConstantEmission &constant, Expr *e) {
assert(constant && "not a constant");
Expand Down Expand Up @@ -389,9 +452,15 @@ mlir::Value ComplexExprEmitter::emitPromoted(const Expr *e,
default:
break;
}
} else if (isa<UnaryOperator>(e)) {
cgf.cgm.errorNYI("emitPromoted UnaryOperator");
return {};
} else if (const auto *unaryOp = dyn_cast<UnaryOperator>(e)) {
switch (unaryOp->getOpcode()) {
case UO_Minus:
return VisitMinus(unaryOp, promotionTy);
case UO_Plus:
return VisitPlus(unaryOp, promotionTy);
default:
break;
}
}

mlir::Value result = Visit(const_cast<Expr *>(e));
Expand Down
3 changes: 2 additions & 1 deletion clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,8 @@ void LoweringPreparePass::lowerUnaryOp(cir::UnaryOp op) {

case cir::UnaryOpKind::Plus:
case cir::UnaryOpKind::Minus:
llvm_unreachable("Complex unary Plus/Minus NYI");
resultReal = builder.createUnaryOp(loc, opKind, operandReal);
resultImag = builder.createUnaryOp(loc, opKind, operandImag);
break;

case cir::UnaryOpKind::Not:
Expand Down
86 changes: 86 additions & 0 deletions clang/test/CIR/CodeGen/complex-unary.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -284,3 +284,89 @@ void foo6() {
// OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
// OGCG: store float %[[A_REAL_DEC]], ptr %[[RESULT_REAL_PTR]], align 4
// OGCG: store float %[[A_IMAG]], ptr %[[RESULT_IMAG_PTR]], align 4

void foo7() {
float _Complex a;
float _Complex b = +a;
}

// CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
// CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
// CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
// CIR-BEFORE: %[[COMPLEX_PLUS:.*]] = cir.unary(plus, %[[TMP]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
// CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_PLUS]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>

// CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
// CIR-AFTER: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
// CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
// CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
// CIR-AFTER: %[[REAL_PLUS:.*]] = cir.unary(plus, %[[REAL]]) : !cir.float, !cir.float
// CIR-AFTER: %[[IMAG_PLUS:.*]] = cir.unary(plus, %[[IMAG]]) : !cir.float, !cir.float
// CIR-AFTER: %[[NEW_COMPLEX:.*]] = cir.complex.create %[[REAL_PLUS]], %[[IMAG_PLUS]] : !cir.float -> !cir.complex<!cir.float>
// CIR-AFTER: cir.store{{.*}} %[[NEW_COMPLEX]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>

// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
// LLVM: %[[TMP:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
// LLVM: %[[REAL:.*]] = extractvalue { float, float } %[[TMP]], 0
// LLVM: %[[IMAG:.*]] = extractvalue { float, float } %[[TMP]], 1
// LLVM: %[[RESULT_TMP:.*]] = insertvalue { float, float } {{.*}}, float %[[REAL]], 0
// LLVM: %[[RESULT_VAL:.*]] = insertvalue { float, float } %[[RESULT_TMP]], float %[[IMAG]], 1
// LLVM: store { float, float } %[[RESULT_VAL]], ptr %[[B_ADDR]], align 4

// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4
// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
// OGCG: store float %[[A_REAL]], ptr %[[B_REAL_PTR]], align 4
// OGCG: store float %[[A_IMAG]], ptr %[[B_IMAG_PTR]], align 4

void foo8() {
float _Complex a;
float _Complex b = -a;
}

// CIR-BEFORE: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
// CIR-BEFORE: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
// CIR-BEFORE: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
// CIR-BEFORE: %[[COMPLEX_MINUS:.*]] = cir.unary(minus, %[[TMP]]) : !cir.complex<!cir.float>, !cir.complex<!cir.float>
// CIR-BEFORE: cir.store{{.*}} %[[COMPLEX_MINUS]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>

// CIR-AFTER: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"]
// CIR-AFTER: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b", init]
// CIR-AFTER: %[[TMP:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float>
// CIR-AFTER: %[[REAL:.*]] = cir.complex.real %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
// CIR-AFTER: %[[IMAG:.*]] = cir.complex.imag %[[TMP]] : !cir.complex<!cir.float> -> !cir.float
// CIR-AFTER: %[[REAL_MINUS:.*]] = cir.unary(minus, %[[REAL]]) : !cir.float, !cir.float
// CIR-AFTER: %[[IMAG_MINUS:.*]] = cir.unary(minus, %[[IMAG]]) : !cir.float, !cir.float
// CIR-AFTER: %[[NEW_COMPLEX:.*]] = cir.complex.create %[[REAL_MINUS]], %[[IMAG_MINUS]] : !cir.float -> !cir.complex<!cir.float>
// CIR-AFTER: cir.store{{.*}} %[[NEW_COMPLEX]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>

// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4
// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4
// LLVM: %[[TMP:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4
// LLVM: %[[REAL:.*]] = extractvalue { float, float } %[[TMP]], 0
// LLVM: %[[IMAG:.*]] = extractvalue { float, float } %[[TMP]], 1
// LLVM: %[[REAL_MINUS:.*]] = fneg float %[[REAL]]
// LLVM: %[[IMAG_MINUS:.*]] = fneg float %[[IMAG]]
// LLVM: %[[RESULT_TMP:.*]] = insertvalue { float, float } {{.*}}, float %[[REAL_MINUS]], 0
// LLVM: %[[RESULT_VAL:.*]] = insertvalue { float, float } %[[RESULT_TMP]], float %[[IMAG_MINUS]], 1
// LLVM: store { float, float } %[[RESULT_VAL]], ptr %[[B_ADDR]], align 4

// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4
// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4
// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0
// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
// OGCG: %[[A_REAL_MINUS:.*]] = fneg float %[[A_REAL]]
// OGCG: %[[A_IMAG_MINUS:.*]] = fneg float %[[A_IMAG]]
// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0
// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1
// OGCG: store float %[[A_REAL_MINUS]], ptr %[[B_REAL_PTR]], align 4
// OGCG: store float %[[A_IMAG_MINUS]], ptr %[[B_IMAG_PTR]], align 4