Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion clang/include/clang/CIR/MissingFeatures.h
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,6 @@ struct MissingFeatures {
static bool scalableVectors() { return false; }
static bool unsizedTypes() { return false; }
static bool vectorType() { return false; }
static bool complexType() { return false; }
static bool fixedPointType() { return false; }
static bool stringTypeWithDifferentArraySize() { return false; }

Expand Down
68 changes: 41 additions & 27 deletions clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -667,8 +667,9 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
mlir::Value VisitUnaryLNot(const UnaryOperator *e);

mlir::Value VisitUnaryReal(const UnaryOperator *e);

mlir::Value VisitUnaryImag(const UnaryOperator *e);
mlir::Value VisitRealImag(const UnaryOperator *e,
QualType promotionType = QualType());

mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die);
Expand Down Expand Up @@ -864,11 +865,13 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
// TODO(cir): Candidate to be in a common AST helper between CIR and LLVM
// codegen.
QualType getPromotionType(QualType ty) {
if (ty->getAs<ComplexType>()) {
assert(!cir::MissingFeatures::complexType());
cgf.cgm.errorNYI("promotion to complex type");
return QualType();
const clang::ASTContext &ctx = cgf.getContext();
if (auto *complexTy = ty->getAs<ComplexType>()) {
QualType elementTy = complexTy->getElementType();
if (elementTy.UseExcessPrecision(ctx))
return ctx.getComplexType(ctx.FloatTy);
}

if (ty.UseExcessPrecision(cgf.getContext())) {
if (ty->getAs<VectorType>()) {
assert(!cir::MissingFeatures::vectorType());
Expand All @@ -877,6 +880,7 @@ class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
}
return cgf.getContext().FloatTy;
}

return QualType();
}

Expand Down Expand Up @@ -2057,28 +2061,27 @@ mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) {
}

mlir::Value ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *e) {
// TODO(cir): handle scalar promotion.
Expr *op = e->getSubExpr();
if (op->getType()->isAnyComplexType()) {
// If it's an l-value, load through the appropriate subobject l-value.
// Note that we have to ask `e` because `op` might be an l-value that
// this won't work for, e.g. an Obj-C property.
if (e->isGLValue()) {
mlir::Location loc = cgf.getLoc(e->getExprLoc());
mlir::Value complex = cgf.emitComplexExpr(op);
return cgf.builder.createComplexReal(loc, complex);
}

// Otherwise, calculate and project.
cgf.cgm.errorNYI(e->getSourceRange(),
"VisitUnaryReal calculate and project");
}

return Visit(op);
QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
mlir::Value result = VisitRealImag(e, promotionTy);
if (result && !promotionTy.isNull())
result = emitUnPromotedValue(result, e->getType());
return result;
}

mlir::Value ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *e) {
// TODO(cir): handle scalar promotion.
QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
mlir::Value result = VisitRealImag(e, promotionTy);
if (result && !promotionTy.isNull())
result = emitUnPromotedValue(result, e->getType());
return result;
}

mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e,
QualType promotionTy) {
assert(e->getOpcode() == clang::UO_Real ||
e->getOpcode() == clang::UO_Imag &&
"Invalid UnaryOp kind for ComplexType Real or Imag");

Expr *op = e->getSubExpr();
if (op->getType()->isAnyComplexType()) {
// If it's an l-value, load through the appropriate subobject l-value.
Expand All @@ -2087,15 +2090,26 @@ mlir::Value ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *e) {
if (e->isGLValue()) {
mlir::Location loc = cgf.getLoc(e->getExprLoc());
mlir::Value complex = cgf.emitComplexExpr(op);
return cgf.builder.createComplexImag(loc, complex);
if (!promotionTy.isNull()) {
complex = cgf.emitPromotedValue(complex, promotionTy);
}

return e->getOpcode() == clang::UO_Real
? builder.createComplexReal(loc, complex)
: builder.createComplexImag(loc, complex);
}

// Otherwise, calculate and project.
cgf.cgm.errorNYI(e->getSourceRange(),
"VisitUnaryImag calculate and project");
"VisitRealImag calculate and project");
return {};
}

return Visit(op);
// __real or __imag on a scalar returns zero. Emit the subexpr to ensure side
// effects are evaluated, but not the actual value.
cgf.cgm.errorNYI(e->getSourceRange(),
"VisitRealImag __real or __imag on a scalar");
return {};
}

/// Return the size or alignment of the type of argument of the sizeof
Expand Down
74 changes: 74 additions & 0 deletions clang/test/CIR/CodeGen/complex.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -927,3 +927,77 @@ void foo34() {
// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
// OGCG: store float 1.000000e+00, ptr %[[A_REAL_PTR]], align 8
// OGCG: store float 2.000000e+00, ptr %[[A_IMAG_PTR]], align 4

void foo35() {
_Float16 _Complex a;
_Float16 real = __real__ a;
}

// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["a"]
// CIR: %[[REAL_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["real", init]
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float
// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float
// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR: %[[A_REAL_F32:.*]] = cir.complex.real %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
// CIR: %[[A_REAL_F16:.*]] = cir.cast(floating, %[[A_REAL_F32]] : !cir.float), !cir.f16
// CIR: cir.store{{.*}} %[[A_REAL_F16]], %[[REAL_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>

// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2
// LLVM: %[[REAL_ADDR:.*]] = alloca half, i64 1, align 2
// LLVM: %[[TMP_A:.*]] = load { half, half }, ptr %[[A_ADDR]], align 2
// LLVM: %[[A_REAL:.*]] = extractvalue { half, half } %[[TMP_A]], 0
// LLVM: %[[A_IMAG:.*]] = extractvalue { half, half } %[[TMP_A]], 1
// LLVM: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
// LLVM: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0
// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1
// LLVM: %[[A_REAL_F16:.*]] = fptrunc float %[[A_REAL_F32]] to half
// LLVM: store half %[[A_REAL_F16]], ptr %[[REAL_ADDR]], align 2

// OGCG: %[[A_ADDR:.*]] = alloca { half, half }, align 2
// OGCG: %[[REAL_ADDR:.*]] = alloca half, align 2
// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 0
// OGCG: %[[A_REAL:.*]] = load half, ptr %[[A_REAL_PTR]], align 2
// OGCG: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
// OGCG: %[[A_REAL_F16:.*]] = fptrunc float %[[A_REAL_F32]] to half
// OGCG: store half %[[A_REAL_F16]], ptr %[[REAL_ADDR]], align 2

void foo36() {
_Float16 _Complex a;
_Float16 imag = __imag__ a;
}

// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.f16>, !cir.ptr<!cir.complex<!cir.f16>>, ["a"]
// CIR: %[[IMAG_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["imag", init]
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.f16>>, !cir.complex<!cir.f16>
// CIR: %[[A_REAL:.*]] = cir.complex.real %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
// CIR: %[[A_IMAG:.*]] = cir.complex.imag %[[TMP_A]] : !cir.complex<!cir.f16> -> !cir.f16
// CIR: %[[A_REAL_F32:.*]] = cir.cast(floating, %[[A_REAL]] : !cir.f16), !cir.float
// CIR: %[[A_IMAG_F32:.*]] = cir.cast(floating, %[[A_IMAG]] : !cir.f16), !cir.float
// CIR: %[[A_COMPLEX_F32:.*]] = cir.complex.create %[[A_REAL_F32]], %[[A_IMAG_F32]] : !cir.float -> !cir.complex<!cir.float>
// CIR: %[[A_IMAG_F32:.*]] = cir.complex.imag %[[A_COMPLEX_F32]] : !cir.complex<!cir.float> -> !cir.float
// CIR: %[[A_IMAG_F16:.*]] = cir.cast(floating, %[[A_IMAG_F32]] : !cir.float), !cir.f16
// CIR: cir.store{{.*}} %[[A_IMAG_F16]], %[[IMAG_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>

// LLVM: %[[A_ADDR:.*]] = alloca { half, half }, i64 1, align 2
// LLVM: %[[IMAG_ADDR:.*]] = alloca half, i64 1, align 2
// LLVM: %[[TMP_A:.*]] = load { half, half }, ptr %[[A_ADDR]], align 2
// LLVM: %[[A_REAL:.*]] = extractvalue { half, half } %[[TMP_A]], 0
// LLVM: %[[A_IMAG:.*]] = extractvalue { half, half } %[[TMP_A]], 1
// LLVM: %[[A_REAL_F32:.*]] = fpext half %[[A_REAL]] to float
// LLVM: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
// LLVM: %[[TMP_A_COMPLEX_F32:.*]] = insertvalue { float, float } {{.*}}, float %[[A_REAL_F32]], 0
// LLVM: %[[A_COMPLEX_F32:.*]] = insertvalue { float, float } %[[TMP_A_COMPLEX_F32]], float %[[A_IMAG_F32]], 1
// LLVM: %[[A_IMAG_F16:.*]] = fptrunc float %[[A_IMAG_F32]] to half
// LLVM: store half %[[A_IMAG_F16]], ptr %[[IMAG_ADDR]], align 2

// OGCG: %[[A_ADDR:.*]] = alloca { half, half }, align 2
// OGCG: %[[IMAG_ADDR:.*]] = alloca half, align 2
// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { half, half }, ptr %[[A_ADDR]], i32 0, i32 1
// OGCG: %[[A_IMAG:.*]] = load half, ptr %[[A_IMAG_PTR]], align 2
// OGCG: %[[A_IMAG_F32:.*]] = fpext half %[[A_IMAG]] to float
// OGCG: %[[A_IMAG_F16:.*]] = fptrunc float %[[A_IMAG_F32]] to half
// OGCG: store half %[[A_IMAG_F16]], ptr %[[IMAG_ADDR]], align 2
Loading