@@ -1092,4 +1092,106 @@ void imag_on_non_glvalue() {
1092
1092
// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4
1093
1093
// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1
1094
1094
// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4
1095
- // OGCG: store float %[[A_IMAG]], ptr %[[B_ADDR]], align 4
1095
+ // OGCG: store float %[[A_IMAG]], ptr %[[B_ADDR]], align 4
1096
+
1097
+ void real_on_scalar_glvalue () {
1098
+ float a;
1099
+ float b = __real__ a;
1100
+ }
1101
+
1102
+ // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a"]
1103
+ // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b", init]
1104
+ // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.float>, !cir.float
1105
+ // CIR: cir.store{{.*}} %[[TMP_A]], %[[B_ADDR]] : !cir.float, !cir.ptr<!cir.float>
1106
+
1107
+ // LLVM: %[[A_ADDR:.*]] = alloca float, i64 1, align 4
1108
+ // LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
1109
+ // LLVM: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
1110
+ // LLVM: store float %[[TMP_A]], ptr %[[B_ADDR]], align 4
1111
+
1112
+ // OGCG: %[[A_ADDR:.*]] = alloca float, align 4
1113
+ // OGCG: %[[B_ADDR:.*]] = alloca float, align 4
1114
+ // OGCG: %[[TMP_A:.*]] = load float, ptr %[[A_ADDR]], align 4
1115
+ // OGCG: store float %[[TMP_A]], ptr %[[B_ADDR]], align 4
1116
+
1117
+ void imag_on_scalar_glvalue () {
1118
+ float a;
1119
+ float b = __imag__ a;
1120
+ }
1121
+
1122
+ // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a"]
1123
+ // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b", init]
1124
+ // CIR: %[[CONST_ZERO:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float
1125
+ // CIR: cir.store{{.*}} %[[CONST_ZERO]], %[[B_ADDR]] : !cir.float, !cir.ptr<!cir.float>
1126
+
1127
+ // LLVM: %[[A_ADDR:.*]] = alloca float, i64 1, align 4
1128
+ // LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
1129
+ // LLVM: store float 0.000000e+00, ptr %[[B_ADDR]], align 4
1130
+
1131
+ // OGCG: %[[A_ADDR:.*]] = alloca float, align 4
1132
+ // OGCG: %[[B_ADDR:.*]] = alloca float, align 4
1133
+ // OGCG: store float 0.000000e+00, ptr %[[B_ADDR]], align 4
1134
+
1135
+ void real_on_scalar_with_type_promotion () {
1136
+ _Float16 a;
1137
+ _Float16 b = __real__ a;
1138
+ }
1139
+
1140
+ // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["a"]
1141
+ // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["b", init]
1142
+ // CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.f16>, !cir.f16
1143
+ // CIR: %[[TMP_A_F32:.*]] = cir.cast(floating, %[[TMP_A]] : !cir.f16), !cir.float
1144
+ // CIR: %[[TMP_A_F16:.*]] = cir.cast(floating, %[[TMP_A_F32]] : !cir.float), !cir.f16
1145
+ // CIR: cir.store{{.*}} %[[TMP_A_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
1146
+
1147
+ // LLVM: %[[A_ADDR:.*]] = alloca half, i64 1, align 2
1148
+ // LLVM: %[[B_ADDR:.*]] = alloca half, i64 1, align 2
1149
+ // LLVM: %[[TMP_A:.*]] = load half, ptr %[[A_ADDR]], align 2
1150
+ // LLVM: %[[TMP_A_F32:.*]] = fpext half %[[TMP_A]] to float
1151
+ // LLVM: %[[TMP_A_F16:.*]] = fptrunc float %[[TMP_A_F32]] to half
1152
+ // LLVM: store half %[[TMP_A_F16]], ptr %[[B_ADDR]], align 2
1153
+
1154
+ // OGCG: %[[A_ADDR:.*]] = alloca half, align 2
1155
+ // OGCG: %[[B_ADDR:.*]] = alloca half, align 2
1156
+ // OGCG: %[[TMP_A:.*]] = load half, ptr %[[A_ADDR]], align 2
1157
+ // OGCG: %[[TMP_A_F32:.*]] = fpext half %[[TMP_A]] to float
1158
+ // OGCG: %[[TMP_A_F16:.*]] = fptrunc float %[[TMP_A_F32]] to half
1159
+ // OGCG: store half %[[TMP_A_F16]], ptr %[[B_ADDR]], align 2
1160
+
1161
+ void imag_on_scalar_with_type_promotion () {
1162
+ _Float16 a;
1163
+ _Float16 b = __imag__ a;
1164
+ }
1165
+
1166
+ // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["a"]
1167
+ // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.f16, !cir.ptr<!cir.f16>, ["b", init]
1168
+ // CIR: %[[CONST_ZERO:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float
1169
+ // CIR: %[[CONST_ZERO_F16:.*]] = cir.cast(floating, %[[CONST_ZERO]] : !cir.float), !cir.f16
1170
+ // CIR: cir.store{{.*}} %[[CONST_ZERO_F16]], %[[B_ADDR]] : !cir.f16, !cir.ptr<!cir.f16>
1171
+
1172
+ // LLVM: %[[A_ADDR:.*]] = alloca half, i64 1, align 2
1173
+ // LLVM: %[[B_ADDR:.*]] = alloca half, i64 1, align 2
1174
+ // LLVM: store half 0xH0000, ptr %[[B_ADDR]], align 2
1175
+
1176
+ // OGCG: %[[A_ADDR:.*]] = alloca half, align 2
1177
+ // OGCG: %[[B_ADDR:.*]] = alloca half, align 2
1178
+ // OGCG: store half 0xH0000, ptr %[[B_ADDR]], align 2
1179
+
1180
+ void imag_on_const_scalar () {
1181
+ float a;
1182
+ float b = __imag__ 1 .0f ;
1183
+ }
1184
+
1185
+ // CIR: %[[A_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a"]
1186
+ // CIR: %[[B_ADDR:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b", init]
1187
+ // CIR: %[[CONST_ONE:.*]] = cir.const #cir.fp<1.000000e+00> : !cir.float
1188
+ // CIR: %[[CONST_ZERO:.*]] = cir.const #cir.fp<0.000000e+00> : !cir.float
1189
+ // CIR: cir.store{{.*}} %[[CONST_ZERO]], %[[B_ADDR]] : !cir.float, !cir.ptr<!cir.float>
1190
+
1191
+ // LLVM: %[[A_ADDR:.*]] = alloca float, i64 1, align 4
1192
+ // LLVM: %[[B_ADDR:.*]] = alloca float, i64 1, align 4
1193
+ // LLVM: store float 0.000000e+00, ptr %[[B_ADDR]], align 4
1194
+
1195
+ // OGCG: %[[A_ADDR:.*]] = alloca float, align 4
1196
+ // OGCG: %[[B_ADDR:.*]] = alloca float, align 4
1197
+ // OGCG: store float 0.000000e+00, ptr %[[B_ADDR]], align 4
0 commit comments