@@ -31,21 +31,21 @@ void p(char *str, int x) {
3131// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
3232// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
3333// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
34- // CHECK-NEXT: [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4] ]
34+ // CHECK-NEXT: [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5:[0-9]+]] [ "fpe.except"(i32 2) ]
3535// CHECK-NEXT: br i1 [[ISZERO]], label [[FPCLASSIFY_END:%.*]], label [[FPCLASSIFY_NOT_ZERO:%.*]]
3636// CHECK: fpclassify_end:
3737// CHECK-NEXT: [[FPCLASSIFY_RESULT:%.*]] = phi i32 [ 4, [[ENTRY:%.*]] ], [ 0, [[FPCLASSIFY_NOT_ZERO]] ], [ 1, [[FPCLASSIFY_NOT_NAN:%.*]] ], [ [[TMP2:%.*]], [[FPCLASSIFY_NOT_INF:%.*]] ]
3838// CHECK-NEXT: call void @p(ptr noundef @.str.1, i32 noundef [[FPCLASSIFY_RESULT]]) #[[ATTR4]]
3939// CHECK-NEXT: ret void
4040// CHECK: fpclassify_not_zero:
41- // CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") #[[ATTR4] ]
41+ // CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
4242// CHECK-NEXT: br i1 [[CMP]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_NAN]]
4343// CHECK: fpclassify_not_nan:
44- // CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR5 :[0-9]+]]
45- // CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4] ]
44+ // CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6 :[0-9]+]]
45+ // CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
4646// CHECK-NEXT: br i1 [[ISINF]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_INF]]
4747// CHECK: fpclassify_not_inf:
48- // CHECK-NEXT: [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR4] ]
48+ // CHECK-NEXT: [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
4949// CHECK-NEXT: [[TMP2]] = select i1 [[ISNORMAL]], i32 2, i32 3
5050// CHECK-NEXT: br label [[FPCLASSIFY_END]]
5151//
@@ -60,7 +60,7 @@ void test_fpclassify(double d) {
6060// CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2
6161// CHECK-NEXT: store half [[H:%.*]], ptr [[H_ADDR]], align 2
6262// CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2
63- // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 516) #[[ATTR4 ]]
63+ // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 516) #[[ATTR5 ]]
6464// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
6565// CHECK-NEXT: call void @p(ptr noundef @.str.2, i32 noundef [[TMP2]]) #[[ATTR4]]
6666// CHECK-NEXT: ret void
@@ -76,7 +76,7 @@ void test_fp16_isinf(_Float16 h) {
7676// CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
7777// CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4
7878// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
79- // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 516) #[[ATTR4 ]]
79+ // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 516) #[[ATTR5 ]]
8080// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
8181// CHECK-NEXT: call void @p(ptr noundef @.str.3, i32 noundef [[TMP2]]) #[[ATTR4]]
8282// CHECK-NEXT: ret void
@@ -92,7 +92,7 @@ void test_float_isinf(float f) {
9292// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
9393// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
9494// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
95- // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 516) #[[ATTR4 ]]
95+ // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 516) #[[ATTR5 ]]
9696// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
9797// CHECK-NEXT: call void @p(ptr noundef @.str.4, i32 noundef [[TMP2]]) #[[ATTR4]]
9898// CHECK-NEXT: ret void
@@ -108,7 +108,7 @@ void test_double_isinf(double d) {
108108// CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2
109109// CHECK-NEXT: store half [[H:%.*]], ptr [[H_ADDR]], align 2
110110// CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2
111- // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 504) #[[ATTR4 ]]
111+ // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 504) #[[ATTR5 ]]
112112// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
113113// CHECK-NEXT: call void @p(ptr noundef @.str.5, i32 noundef [[TMP2]]) #[[ATTR4]]
114114// CHECK-NEXT: ret void
@@ -124,7 +124,7 @@ void test_fp16_isfinite(_Float16 h) {
124124// CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
125125// CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4
126126// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
127- // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 504) #[[ATTR4 ]]
127+ // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 504) #[[ATTR5 ]]
128128// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
129129// CHECK-NEXT: call void @p(ptr noundef @.str.6, i32 noundef [[TMP2]]) #[[ATTR4]]
130130// CHECK-NEXT: ret void
@@ -140,7 +140,7 @@ void test_float_isfinite(float f) {
140140// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
141141// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
142142// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
143- // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 504) #[[ATTR4 ]]
143+ // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 504) #[[ATTR5 ]]
144144// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
145145// CHECK-NEXT: call void @p(ptr noundef @.str.7, i32 noundef [[TMP2]]) #[[ATTR4]]
146146// CHECK-NEXT: ret void
@@ -156,8 +156,8 @@ void test_double_isfinite(double d) {
156156// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
157157// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
158158// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
159- // CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR5 ]]
160- // CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4] ]
159+ // CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6 ]]
160+ // CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
161161// CHECK-NEXT: [[TMP2:%.*]] = bitcast double [[TMP0]] to i64
162162// CHECK-NEXT: [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 0
163163// CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 -1, i32 1
@@ -176,7 +176,7 @@ void test_isinf_sign(double d) {
176176// CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2
177177// CHECK-NEXT: store half [[H:%.*]], ptr [[H_ADDR]], align 2
178178// CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2
179- // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 3) #[[ATTR4 ]]
179+ // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 3) #[[ATTR5 ]]
180180// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
181181// CHECK-NEXT: call void @p(ptr noundef @.str.9, i32 noundef [[TMP2]]) #[[ATTR4]]
182182// CHECK-NEXT: ret void
@@ -192,7 +192,7 @@ void test_fp16_isnan(_Float16 h) {
192192// CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
193193// CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4
194194// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
195- // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 3) #[[ATTR4 ]]
195+ // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 3) #[[ATTR5 ]]
196196// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
197197// CHECK-NEXT: call void @p(ptr noundef @.str.10, i32 noundef [[TMP2]]) #[[ATTR4]]
198198// CHECK-NEXT: ret void
@@ -208,7 +208,7 @@ void test_float_isnan(float f) {
208208// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
209209// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
210210// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
211- // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 3) #[[ATTR4 ]]
211+ // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 3) #[[ATTR5 ]]
212212// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
213213// CHECK-NEXT: call void @p(ptr noundef @.str.11, i32 noundef [[TMP2]]) #[[ATTR4]]
214214// CHECK-NEXT: ret void
@@ -224,7 +224,7 @@ void test_double_isnan(double d) {
224224// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
225225// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
226226// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
227- // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 264) #[[ATTR4 ]]
227+ // CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 264) #[[ATTR5 ]]
228228// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
229229// CHECK-NEXT: call void @p(ptr noundef @.str.12, i32 noundef [[TMP2]]) #[[ATTR4]]
230230// CHECK-NEXT: ret void
0 commit comments