diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index 60e0aa163dc04..c2c6d18806308 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -289,7 +289,6 @@ struct MissingFeatures { static bool scalableVectors() { return false; } static bool unsizedTypes() { return false; } static bool vectorType() { return false; } - static bool complexType() { return false; } static bool fixedPointType() { return false; } static bool stringTypeWithDifferentArraySize() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 2261e24fe44c2..bbe985df7d0b9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -667,8 +667,9 @@ class ScalarExprEmitter : public StmtVisitor { mlir::Value VisitUnaryLNot(const UnaryOperator *e); mlir::Value VisitUnaryReal(const UnaryOperator *e); - mlir::Value VisitUnaryImag(const UnaryOperator *e); + mlir::Value VisitRealImag(const UnaryOperator *e, + QualType promotionType = QualType()); mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) { CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die); @@ -864,11 +865,13 @@ class ScalarExprEmitter : public StmtVisitor { // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM // codegen. QualType getPromotionType(QualType ty) { - if (ty->getAs()) { - assert(!cir::MissingFeatures::complexType()); - cgf.cgm.errorNYI("promotion to complex type"); - return QualType(); + const clang::ASTContext &ctx = cgf.getContext(); + if (auto *complexTy = ty->getAs()) { + QualType elementTy = complexTy->getElementType(); + if (elementTy.UseExcessPrecision(ctx)) + return ctx.getComplexType(ctx.FloatTy); } + if (ty.UseExcessPrecision(cgf.getContext())) { if (ty->getAs()) { assert(!cir::MissingFeatures::vectorType()); @@ -877,6 +880,7 @@ class ScalarExprEmitter : public StmtVisitor { } return cgf.getContext().FloatTy; } + return QualType(); } @@ -2057,28 +2061,27 @@ mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) { } mlir::Value ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *e) { - // TODO(cir): handle scalar promotion. - Expr *op = e->getSubExpr(); - if (op->getType()->isAnyComplexType()) { - // If it's an l-value, load through the appropriate subobject l-value. - // Note that we have to ask `e` because `op` might be an l-value that - // this won't work for, e.g. an Obj-C property. - if (e->isGLValue()) { - mlir::Location loc = cgf.getLoc(e->getExprLoc()); - mlir::Value complex = cgf.emitComplexExpr(op); - return cgf.builder.createComplexReal(loc, complex); - } - - // Otherwise, calculate and project. - cgf.cgm.errorNYI(e->getSourceRange(), - "VisitUnaryReal calculate and project"); - } - - return Visit(op); + QualType promotionTy = getPromotionType(e->getSubExpr()->getType()); + mlir::Value result = VisitRealImag(e, promotionTy); + if (result && !promotionTy.isNull()) + result = emitUnPromotedValue(result, e->getType()); + return result; } mlir::Value ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *e) { - // TODO(cir): handle scalar promotion. + QualType promotionTy = getPromotionType(e->getSubExpr()->getType()); + mlir::Value result = VisitRealImag(e, promotionTy); + if (result && !promotionTy.isNull()) + result = emitUnPromotedValue(result, e->getType()); + return result; +} + +mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e, + QualType promotionTy) { + assert(e->getOpcode() == clang::UO_Real || + e->getOpcode() == clang::UO_Imag && + "Invalid UnaryOp kind for ComplexType Real or Imag"); + Expr *op = e->getSubExpr(); if (op->getType()->isAnyComplexType()) { // If it's an l-value, load through the appropriate subobject l-value. @@ -2087,15 +2090,26 @@ mlir::Value ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *e) { if (e->isGLValue()) { mlir::Location loc = cgf.getLoc(e->getExprLoc()); mlir::Value complex = cgf.emitComplexExpr(op); - return cgf.builder.createComplexImag(loc, complex); + if (!promotionTy.isNull()) { + complex = cgf.emitPromotedValue(complex, promotionTy); + } + + return e->getOpcode() == clang::UO_Real + ? builder.createComplexReal(loc, complex) + : builder.createComplexImag(loc, complex); } // Otherwise, calculate and project. cgf.cgm.errorNYI(e->getSourceRange(), - "VisitUnaryImag calculate and project"); + "VisitRealImag calculate and project"); + return {}; } - return Visit(op); + // __real or __imag on a scalar returns zero. Emit the subexpr to ensure side + // effects are evaluated, but not the actual value. + cgf.cgm.errorNYI(e->getSourceRange(), + "VisitRealImag __real or __imag on a scalar"); + return {}; } /// Return the size or alignment of the type of argument of the sizeof diff --git a/clang/test/CIR/CodeGen/complex.cpp b/clang/test/CIR/CodeGen/complex.cpp index 2e1198b09f010..8335fff414d21 100644 --- a/clang/test/CIR/CodeGen/complex.cpp +++ b/clang/test/CIR/CodeGen/complex.cpp @@ -927,3 +927,77 @@ void foo34() { // OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 // OGCG: store float 1.000000e+00, ptr %[[A_REAL_PTR]], align 8 // OGCG: store float 2.000000e+00, ptr %[[A_IMAG_PTR]], align 4 + +void foo35() { + _Float16 _Complex a; + _Float16 real = __real__ a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[REAL_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["real", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.f16 +// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.f16 +// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float +// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float +// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex +// CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex -> !cir.float +// CIR: %[[A_REAL_F16:.*]] = cir.cast(floating, %[[A_REAL_F32]] : !cir.float), !cir.f16 +// CIR: cir.store{{.*}} %[[A_REAL_F16]], %[[REAL_ADDR]] : !cir.f16, !cir.ptr + +// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2 +// LLVM: %[[REAL_ADDR:.*]] = alloca half, i64 1, align 2 +// LLVM: %[[TMP_A:.*]] = load { half, half }, ptr %[[A_ADDR]], align 2 +// LLVM: %[[A_REAL:.*]] = extractvalue { half, half } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { half, half } %[[TMP_A]], 1 +// LLVM: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float +// LLVM: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float +// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0 +// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1 +// LLVM: %[[A_REAL_F16:.*]] = fptrunc float %[[A_REAL_F32]] to half +// LLVM: store half %[[A_REAL_F16]], ptr %[[REAL_ADDR]], align 2 + +// OGCG: %[[A_ADDR:.*]] = alloca { half, half }, align 2 +// OGCG: %[[REAL_ADDR:.*]] = alloca half, align 2 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load half, ptr %[[A_REAL_PTR]], align 2 +// OGCG: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float +// OGCG: %[[A_REAL_F16:.*]] = fptrunc float %[[A_REAL_F32]] to half +// OGCG: store half %[[A_REAL_F16]], ptr %[[REAL_ADDR]], align 2 + +void foo36() { + _Float16 _Complex a; + _Float16 imag = __imag__ a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex, !cir.ptr>, ["a"] +// CIR: %[[IMAG_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr, ["imag", init] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr>, !cir.complex +// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex -> !cir.f16 +// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex -> !cir.f16 +// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float +// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float +// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex +// CIR: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex -> !cir.float +// CIR: %[[A_IMAG_F16:.*]] = cir.cast(floating, %[[A_IMAG_F32]] : !cir.float), !cir.f16 +// CIR: cir.store{{.*}} %[[A_IMAG_F16]], %[[IMAG_ADDR]] : !cir.f16, !cir.ptr + +// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2 +// LLVM: %[[IMAG_ADDR:.*]] = alloca half, i64 1, align 2 +// LLVM: %[[TMP_A:.*]] = load { half, half }, ptr %[[A_ADDR]], align 2 +// LLVM: %[[A_REAL:.*]] = extractvalue { half, half } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { half, half } %[[TMP_A]], 1 +// LLVM: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float +// LLVM: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float +// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0 +// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1 +// LLVM: %[[A_IMAG_F16:.*]] = fptrunc float %[[A_IMAG_F32]] to half +// LLVM: store half %[[A_IMAG_F16]], ptr %[[IMAG_ADDR]], align 2 + +// OGCG: %[[A_ADDR:.*]] = alloca { half, half }, align 2 +// OGCG: %[[IMAG_ADDR:.*]] = alloca half, align 2 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load half, ptr %[[A_IMAG_PTR]], align 2 +// OGCG: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float +// OGCG: %[[A_IMAG_F16:.*]] = fptrunc float %[[A_IMAG_F32]] to half +// OGCG: store half %[[A_IMAG_F16]], ptr %[[IMAG_ADDR]], align 2