Skip to content

Commit ce170d2

Browse files
authored
[CIR] Implement Unary real & imag on scalar expr (llvm#159916)
This change implements Unary real & imag on scalar expr Issue: llvm#141365
1 parent 51fa119 commit ce170d2

File tree

2 files changed

+119
-5
lines changed

2 files changed

+119
-5
lines changed

clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2140,11 +2140,23 @@ mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e,
21402140
: builder.createComplexImag(loc, complex);
21412141
}
21422142

2143-
// __real or __imag on a scalar returns zero. Emit the subexpr to ensure side
2143+
if (e->getOpcode() == UO_Real) {
2144+
return promotionTy.isNull() ? Visit(op)
2145+
: cgf.emitPromotedScalarExpr(op, promotionTy);
2146+
}
2147+
2148+
// __imag on a scalar returns zero. Emit the subexpr to ensure side
21442149
// effects are evaluated, but not the actual value.
2145-
cgf.cgm.errorNYI(e->getSourceRange(),
2146-
"VisitRealImag __real or __imag on a scalar");
2147-
return {};
2150+
if (op->isGLValue())
2151+
cgf.emitLValue(op);
2152+
else if (!promotionTy.isNull())
2153+
cgf.emitPromotedScalarExpr(op, promotionTy);
2154+
else
2155+
cgf.emitScalarExpr(op);
2156+
2157+
mlir::Type valueTy =
2158+
cgf.convertType(promotionTy.isNull() ? e->getType() : promotionTy);
2159+
return builder.getNullValue(valueTy, loc);
21482160
}
21492161

21502162
/// Return the size or alignment of the type of argument of the sizeof

clang/test/CIR/CodeGen/complex.cpp

Lines changed: 103 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1092,4 +1092,106 @@ void imag_on_non_glvalue() {
10921092
// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
10931093
// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
10941094
// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
1095-
// OGCG: store float %[[A_IMAG]], ptr %[[B_ADDR]], align 4
1095+
// OGCG: store float %[[A_IMAG]], ptr %[[B_ADDR]], align 4
1096+
1097+
void real_on_scalar_glvalue() {
1098+
float a;
1099+
float b = __real__ a;
1100+
}
1101+
1102+
// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a"]
1103+
// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b", init]
1104+
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.float>, !cir.float
1105+
// CIR: cir.store{{.*}} %[[TMP_A]], %[[B_ADDR]] : !cir.float, !cir.ptr<!cir.float>
1106+
1107+
// LLVM: %[[A_ADDR:.*]] = alloca float, i64 1, align 4
1108+
// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
1109+
// LLVM: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
1110+
// LLVM: store float %[[TMP_A]], ptr %[[B_ADDR]], align 4
1111+
1112+
// OGCG: %[[A_ADDR:.*]] = alloca float, align 4
1113+
// OGCG: %[[B_ADDR:.*]] = alloca float, align 4
1114+
// OGCG: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
1115+
// OGCG: store float %[[TMP_A]], ptr %[[B_ADDR]], align 4
1116+
1117+
void imag_on_scalar_glvalue() {
1118+
float a;
1119+
float b = __imag__ a;
1120+
}
1121+
1122+
// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a"]
1123+
// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b", init]
1124+
// CIR: %[[CONST_ZERO:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float
1125+
// CIR: cir.store{{.*}} %[[CONST_ZERO]], %[[B_ADDR]] : !cir.float, !cir.ptr<!cir.float>
1126+
1127+
// LLVM: %[[A_ADDR:.*]] = alloca float, i64 1, align 4
1128+
// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
1129+
// LLVM: store float 0.000000e+00, ptr %[[B_ADDR]], align 4
1130+
1131+
// OGCG: %[[A_ADDR:.*]] = alloca float, align 4
1132+
// OGCG: %[[B_ADDR:.*]] = alloca float, align 4
1133+
// OGCG: store float 0.000000e+00, ptr %[[B_ADDR]], align 4
1134+
1135+
void real_on_scalar_with_type_promotion() {
1136+
_Float16 a;
1137+
_Float16 b = __real__ a;
1138+
}
1139+
1140+
// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["a"]
1141+
// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["b", init]
1142+
// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.f16>, !cir.f16
1143+
// CIR: %[[TMP_A_F32:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.f16), !cir.float
1144+
// CIR: %[[TMP_A_F16:.*]] = cir.cast(floating, %[[TMP_A_F32]] : !cir.float), !cir.f16
1145+
// CIR: cir.store{{.*}} %[[TMP_A_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
1146+
1147+
// LLVM: %[[A_ADDR:.*]] = alloca half, i64 1, align 2
1148+
// LLVM: %[[B_ADDR:.*]] = alloca half, i64 1, align 2
1149+
// LLVM: %[[TMP_A:.*]] = load half, ptr %[[A_ADDR]], align 2
1150+
// LLVM: %[[TMP_A_F32:.*]] = fpext half %[[TMP_A]] to float
1151+
// LLVM: %[[TMP_A_F16:.*]] = fptrunc float %[[TMP_A_F32]] to half
1152+
// LLVM: store half %[[TMP_A_F16]], ptr %[[B_ADDR]], align 2
1153+
1154+
// OGCG: %[[A_ADDR:.*]] = alloca half, align 2
1155+
// OGCG: %[[B_ADDR:.*]] = alloca half, align 2
1156+
// OGCG: %[[TMP_A:.*]] = load half, ptr %[[A_ADDR]], align 2
1157+
// OGCG: %[[TMP_A_F32:.*]] = fpext half %[[TMP_A]] to float
1158+
// OGCG: %[[TMP_A_F16:.*]] = fptrunc float %[[TMP_A_F32]] to half
1159+
// OGCG: store half %[[TMP_A_F16]], ptr %[[B_ADDR]], align 2
1160+
1161+
void imag_on_scalar_with_type_promotion() {
1162+
_Float16 a;
1163+
_Float16 b = __imag__ a;
1164+
}
1165+
1166+
// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["a"]
1167+
// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["b", init]
1168+
// CIR: %[[CONST_ZERO:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float
1169+
// CIR: %[[CONST_ZERO_F16:.*]] = cir.cast(floating, %[[CONST_ZERO]] : !cir.float), !cir.f16
1170+
// CIR: cir.store{{.*}} %[[CONST_ZERO_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
1171+
1172+
// LLVM: %[[A_ADDR:.*]] = alloca half, i64 1, align 2
1173+
// LLVM: %[[B_ADDR:.*]] = alloca half, i64 1, align 2
1174+
// LLVM: store half 0xH0000, ptr %[[B_ADDR]], align 2
1175+
1176+
// OGCG: %[[A_ADDR:.*]] = alloca half, align 2
1177+
// OGCG: %[[B_ADDR:.*]] = alloca half, align 2
1178+
// OGCG: store half 0xH0000, ptr %[[B_ADDR]], align 2
1179+
1180+
void imag_on_const_scalar() {
1181+
float a;
1182+
float b = __imag__ 1.0f;
1183+
}
1184+
1185+
// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a"]
1186+
// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b", init]
1187+
// CIR: %[[CONST_ONE:.*]] = cir.const #cir.fp<1.000000e+00> : !cir.float
1188+
// CIR: %[[CONST_ZERO:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float
1189+
// CIR: cir.store{{.*}} %[[CONST_ZERO]], %[[B_ADDR]] : !cir.float, !cir.ptr<!cir.float>
1190+
1191+
// LLVM: %[[A_ADDR:.*]] = alloca float, i64 1, align 4
1192+
// LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
1193+
// LLVM: store float 0.000000e+00, ptr %[[B_ADDR]], align 4
1194+
1195+
// OGCG: %[[A_ADDR:.*]] = alloca float, align 4
1196+
// OGCG: %[[B_ADDR:.*]] = alloca float, align 4
1197+
// OGCG: store float 0.000000e+00, ptr %[[B_ADDR]], align 4

0 commit comments

Comments
 (0)