Skip to content

Commit 2170e67

Browse files
committed
Clang: emit llvm.minnum and llvm.maxnum with nsz always
See: #112852 We will define llvm.minnum and llvm.maxnum with +0.0>-0.0, by default, while libc doesn't require it. fix testcases -ffp-exception-behavior=strict add missing builtin test test auto vectorize fix test cases update testcase disable-llvm-passes fix elementswise fix some tests
1 parent 0bbf2ea commit 2170e67

File tree

13 files changed

+1265
-119
lines changed

13 files changed

+1265
-119
lines changed

clang/lib/CodeGen/CGBuiltin.cpp

Lines changed: 30 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -602,19 +602,20 @@ Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
602602

603603
// Emit an intrinsic that has 2 operands of the same type as its result.
604604
// Depending on mode, this may be a constrained floating-point intrinsic.
605-
static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
606-
const CallExpr *E, unsigned IntrinsicID,
607-
unsigned ConstrainedIntrinsicID) {
605+
static Value *emitBinaryMaybeConstrainedFPBuiltin(
606+
CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID,
607+
unsigned ConstrainedIntrinsicID, llvm::FastMathFlags *FMF = nullptr) {
608608
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
609609
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
610610

611611
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
612612
if (CGF.Builder.getIsFPConstrained()) {
613613
Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
614-
return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
614+
return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1}, "",
615+
std::nullopt, std::nullopt, FMF);
615616
} else {
616617
Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
617-
return CGF.Builder.CreateCall(F, { Src0, Src1 });
618+
return CGF.Builder.CreateCall(F, {Src0, Src1}, "", nullptr, FMF);
618619
}
619620
}
620621

@@ -2890,10 +2891,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
28902891
case Builtin::BI__builtin_fmaxf:
28912892
case Builtin::BI__builtin_fmaxf16:
28922893
case Builtin::BI__builtin_fmaxl:
2893-
case Builtin::BI__builtin_fmaxf128:
2894-
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2895-
Intrinsic::maxnum,
2896-
Intrinsic::experimental_constrained_maxnum));
2894+
case Builtin::BI__builtin_fmaxf128: {
2895+
llvm::FastMathFlags FMF;
2896+
FMF.setNoSignedZeros();
2897+
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
2898+
*this, E, Intrinsic::maxnum,
2899+
Intrinsic::experimental_constrained_maxnum, &FMF));
2900+
}
28972901

28982902
case Builtin::BIfmin:
28992903
case Builtin::BIfminf:
@@ -2902,10 +2906,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
29022906
case Builtin::BI__builtin_fminf:
29032907
case Builtin::BI__builtin_fminf16:
29042908
case Builtin::BI__builtin_fminl:
2905-
case Builtin::BI__builtin_fminf128:
2906-
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2907-
Intrinsic::minnum,
2908-
Intrinsic::experimental_constrained_minnum));
2909+
case Builtin::BI__builtin_fminf128: {
2910+
llvm::FastMathFlags FMF;
2911+
FMF.setNoSignedZeros();
2912+
return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
2913+
*this, E, Intrinsic::minnum,
2914+
Intrinsic::experimental_constrained_minnum, &FMF));
2915+
}
29092916

29102917
case Builtin::BIfmaximum_num:
29112918
case Builtin::BIfmaximum_numf:
@@ -4093,8 +4100,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
40934100
Result = Builder.CreateBinaryIntrinsic(
40944101
Ty->isSignedIntegerType() ? Intrinsic::smax : Intrinsic::umax, Op0,
40954102
Op1, nullptr, "elt.max");
4096-
} else
4097-
Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.max");
4103+
} else {
4104+
FastMathFlags FMF;
4105+
FMF.setNoSignedZeros(true);
4106+
Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/FMF, "elt.max");
4107+
}
40984108
return RValue::get(Result);
40994109
}
41004110
case Builtin::BI__builtin_elementwise_min: {
@@ -4108,8 +4118,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
41084118
Result = Builder.CreateBinaryIntrinsic(
41094119
Ty->isSignedIntegerType() ? Intrinsic::smin : Intrinsic::umin, Op0,
41104120
Op1, nullptr, "elt.min");
4111-
} else
4112-
Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.min");
4121+
} else {
4122+
FastMathFlags FMF;
4123+
FMF.setNoSignedZeros(true);
4124+
Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/FMF, "elt.min");
4125+
}
41134126
return RValue::get(Result);
41144127
}
41154128

clang/test/CodeGen/RISCV/math-builtins.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -134,22 +134,22 @@ long double truncl(long double);
134134
// RV32-NEXT: [[TMP44:%.*]] = call fp128 @llvm.floor.f128(fp128 [[TMP43]])
135135
// RV32-NEXT: [[TMP45:%.*]] = load float, ptr [[FARG_ADDR]], align 4
136136
// RV32-NEXT: [[TMP46:%.*]] = load float, ptr [[FARG_ADDR]], align 4
137-
// RV32-NEXT: [[TMP47:%.*]] = call float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
137+
// RV32-NEXT: [[TMP47:%.*]] = call nsz float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
138138
// RV32-NEXT: [[TMP48:%.*]] = load double, ptr [[DARG_ADDR]], align 8
139139
// RV32-NEXT: [[TMP49:%.*]] = load double, ptr [[DARG_ADDR]], align 8
140-
// RV32-NEXT: [[TMP50:%.*]] = call double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
140+
// RV32-NEXT: [[TMP50:%.*]] = call nsz double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
141141
// RV32-NEXT: [[TMP51:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
142142
// RV32-NEXT: [[TMP52:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
143-
// RV32-NEXT: [[TMP53:%.*]] = call fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
143+
// RV32-NEXT: [[TMP53:%.*]] = call nsz fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
144144
// RV32-NEXT: [[TMP54:%.*]] = load float, ptr [[FARG_ADDR]], align 4
145145
// RV32-NEXT: [[TMP55:%.*]] = load float, ptr [[FARG_ADDR]], align 4
146-
// RV32-NEXT: [[TMP56:%.*]] = call float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
146+
// RV32-NEXT: [[TMP56:%.*]] = call nsz float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
147147
// RV32-NEXT: [[TMP57:%.*]] = load double, ptr [[DARG_ADDR]], align 8
148148
// RV32-NEXT: [[TMP58:%.*]] = load double, ptr [[DARG_ADDR]], align 8
149-
// RV32-NEXT: [[TMP59:%.*]] = call double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
149+
// RV32-NEXT: [[TMP59:%.*]] = call nsz double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
150150
// RV32-NEXT: [[TMP60:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
151151
// RV32-NEXT: [[TMP61:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
152-
// RV32-NEXT: [[TMP62:%.*]] = call fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
152+
// RV32-NEXT: [[TMP62:%.*]] = call nsz fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
153153
// RV32-NEXT: [[TMP63:%.*]] = load float, ptr [[FARG_ADDR]], align 4
154154
// RV32-NEXT: [[TMP64:%.*]] = load float, ptr [[FARG_ADDR]], align 4
155155
// RV32-NEXT: [[FMOD:%.*]] = frem float [[TMP63]], [[TMP64]]
@@ -310,22 +310,22 @@ long double truncl(long double);
310310
// RV64-NEXT: [[TMP44:%.*]] = call fp128 @llvm.floor.f128(fp128 [[TMP43]])
311311
// RV64-NEXT: [[TMP45:%.*]] = load float, ptr [[FARG_ADDR]], align 4
312312
// RV64-NEXT: [[TMP46:%.*]] = load float, ptr [[FARG_ADDR]], align 4
313-
// RV64-NEXT: [[TMP47:%.*]] = call float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
313+
// RV64-NEXT: [[TMP47:%.*]] = call nsz float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]])
314314
// RV64-NEXT: [[TMP48:%.*]] = load double, ptr [[DARG_ADDR]], align 8
315315
// RV64-NEXT: [[TMP49:%.*]] = load double, ptr [[DARG_ADDR]], align 8
316-
// RV64-NEXT: [[TMP50:%.*]] = call double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
316+
// RV64-NEXT: [[TMP50:%.*]] = call nsz double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]])
317317
// RV64-NEXT: [[TMP51:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
318318
// RV64-NEXT: [[TMP52:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
319-
// RV64-NEXT: [[TMP53:%.*]] = call fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
319+
// RV64-NEXT: [[TMP53:%.*]] = call nsz fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]])
320320
// RV64-NEXT: [[TMP54:%.*]] = load float, ptr [[FARG_ADDR]], align 4
321321
// RV64-NEXT: [[TMP55:%.*]] = load float, ptr [[FARG_ADDR]], align 4
322-
// RV64-NEXT: [[TMP56:%.*]] = call float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
322+
// RV64-NEXT: [[TMP56:%.*]] = call nsz float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]])
323323
// RV64-NEXT: [[TMP57:%.*]] = load double, ptr [[DARG_ADDR]], align 8
324324
// RV64-NEXT: [[TMP58:%.*]] = load double, ptr [[DARG_ADDR]], align 8
325-
// RV64-NEXT: [[TMP59:%.*]] = call double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
325+
// RV64-NEXT: [[TMP59:%.*]] = call nsz double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]])
326326
// RV64-NEXT: [[TMP60:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
327327
// RV64-NEXT: [[TMP61:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16
328-
// RV64-NEXT: [[TMP62:%.*]] = call fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
328+
// RV64-NEXT: [[TMP62:%.*]] = call nsz fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]])
329329
// RV64-NEXT: [[TMP63:%.*]] = load float, ptr [[FARG_ADDR]], align 4
330330
// RV64-NEXT: [[TMP64:%.*]] = load float, ptr [[FARG_ADDR]], align 4
331331
// RV64-NEXT: [[FMOD:%.*]] = frem float [[TMP63]], [[TMP64]]

clang/test/CodeGen/builtins-elementwise-math.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -347,21 +347,21 @@ void test_builtin_elementwise_max(float f1, float f2, double d1, double d2,
347347
// CHECK-LABEL: define void @test_builtin_elementwise_max(
348348
// CHECK: [[F1:%.+]] = load float, ptr %f1.addr, align 4
349349
// CHECK-NEXT: [[F2:%.+]] = load float, ptr %f2.addr, align 4
350-
// CHECK-NEXT: call float @llvm.maxnum.f32(float [[F1]], float [[F2]])
350+
// CHECK-NEXT: call nsz float @llvm.maxnum.f32(float [[F1]], float [[F2]])
351351
f1 = __builtin_elementwise_max(f1, f2);
352352

353353
// CHECK: [[D1:%.+]] = load double, ptr %d1.addr, align 8
354354
// CHECK-NEXT: [[D2:%.+]] = load double, ptr %d2.addr, align 8
355-
// CHECK-NEXT: call double @llvm.maxnum.f64(double [[D1]], double [[D2]])
355+
// CHECK-NEXT: call nsz double @llvm.maxnum.f64(double [[D1]], double [[D2]])
356356
d1 = __builtin_elementwise_max(d1, d2);
357357

358358
// CHECK: [[D2:%.+]] = load double, ptr %d2.addr, align 8
359-
// CHECK-NEXT: call double @llvm.maxnum.f64(double 2.000000e+01, double [[D2]])
359+
// CHECK-NEXT: call nsz double @llvm.maxnum.f64(double 2.000000e+01, double [[D2]])
360360
d1 = __builtin_elementwise_max(20.0, d2);
361361

362362
// CHECK: [[VF1:%.+]] = load <4 x float>, ptr %vf1.addr, align 16
363363
// CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
364-
// CHECK-NEXT: call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
364+
// CHECK-NEXT: call nsz <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
365365
vf1 = __builtin_elementwise_max(vf1, vf2);
366366

367367
// CHECK: [[I1:%.+]] = load i64, ptr %i1.addr, align 8
@@ -404,13 +404,13 @@ void test_builtin_elementwise_max(float f1, float f2, double d1, double d2,
404404

405405
// CHECK: [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
406406
// CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
407-
// CHECK-NEXT: call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
407+
// CHECK-NEXT: call nsz <4 x float> @llvm.maxnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
408408
const float4 cvf1 = vf1;
409409
vf1 = __builtin_elementwise_max(cvf1, vf2);
410410

411411
// CHECK: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
412412
// CHECK-NEXT: [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
413-
// CHECK-NEXT: call <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
413+
// CHECK-NEXT: call nsz <4 x float> @llvm.maxnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
414414
vf1 = __builtin_elementwise_max(vf2, cvf1);
415415

416416
// CHECK: [[IAS1:%.+]] = load i32, ptr addrspace(1) @int_as_one, align 4
@@ -431,21 +431,21 @@ void test_builtin_elementwise_min(float f1, float f2, double d1, double d2,
431431
// CHECK-LABEL: define void @test_builtin_elementwise_min(
432432
// CHECK: [[F1:%.+]] = load float, ptr %f1.addr, align 4
433433
// CHECK-NEXT: [[F2:%.+]] = load float, ptr %f2.addr, align 4
434-
// CHECK-NEXT: call float @llvm.minnum.f32(float [[F1]], float [[F2]])
434+
// CHECK-NEXT: call nsz float @llvm.minnum.f32(float [[F1]], float [[F2]])
435435
f1 = __builtin_elementwise_min(f1, f2);
436436

437437
// CHECK: [[D1:%.+]] = load double, ptr %d1.addr, align 8
438438
// CHECK-NEXT: [[D2:%.+]] = load double, ptr %d2.addr, align 8
439-
// CHECK-NEXT: call double @llvm.minnum.f64(double [[D1]], double [[D2]])
439+
// CHECK-NEXT: call nsz double @llvm.minnum.f64(double [[D1]], double [[D2]])
440440
d1 = __builtin_elementwise_min(d1, d2);
441441

442442
// CHECK: [[D1:%.+]] = load double, ptr %d1.addr, align 8
443-
// CHECK-NEXT: call double @llvm.minnum.f64(double [[D1]], double 2.000000e+00)
443+
// CHECK-NEXT: call nsz double @llvm.minnum.f64(double [[D1]], double 2.000000e+00)
444444
d1 = __builtin_elementwise_min(d1, 2.0);
445445

446446
// CHECK: [[VF1:%.+]] = load <4 x float>, ptr %vf1.addr, align 16
447447
// CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
448-
// CHECK-NEXT: call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
448+
// CHECK-NEXT: call nsz <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF1]], <4 x float> [[VF2]])
449449
vf1 = __builtin_elementwise_min(vf1, vf2);
450450

451451
// CHECK: [[I1:%.+]] = load i64, ptr %i1.addr, align 8
@@ -495,13 +495,13 @@ void test_builtin_elementwise_min(float f1, float f2, double d1, double d2,
495495

496496
// CHECK: [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
497497
// CHECK-NEXT: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
498-
// CHECK-NEXT: call <4 x float> @llvm.minnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
498+
// CHECK-NEXT: call nsz <4 x float> @llvm.minnum.v4f32(<4 x float> [[CVF1]], <4 x float> [[VF2]])
499499
const float4 cvf1 = vf1;
500500
vf1 = __builtin_elementwise_min(cvf1, vf2);
501501

502502
// CHECK: [[VF2:%.+]] = load <4 x float>, ptr %vf2.addr, align 16
503503
// CHECK-NEXT: [[CVF1:%.+]] = load <4 x float>, ptr %cvf1, align 16
504-
// CHECK-NEXT: call <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
504+
// CHECK-NEXT: call nsz <4 x float> @llvm.minnum.v4f32(<4 x float> [[VF2]], <4 x float> [[CVF1]])
505505
vf1 = __builtin_elementwise_min(vf2, cvf1);
506506

507507
// CHECK: [[IAS1:%.+]] = load i32, ptr addrspace(1) @int_as_one, align 4

clang/test/CodeGen/builtins.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -344,22 +344,22 @@ void test_float_builtin_ops(float F, double D, long double LD, int I) {
344344
// CHECK: call x86_fp80 @llvm.canonicalize.f80(x86_fp80
345345

346346
resf = __builtin_fminf(F, F);
347-
// CHECK: call float @llvm.minnum.f32
347+
// CHECK: call nsz float @llvm.minnum.f32
348348

349349
resd = __builtin_fmin(D, D);
350-
// CHECK: call double @llvm.minnum.f64
350+
// CHECK: call nsz double @llvm.minnum.f64
351351

352352
resld = __builtin_fminl(LD, LD);
353-
// CHECK: call x86_fp80 @llvm.minnum.f80
353+
// CHECK: call nsz x86_fp80 @llvm.minnum.f80
354354

355355
resf = __builtin_fmaxf(F, F);
356-
// CHECK: call float @llvm.maxnum.f32
356+
// CHECK: call nsz float @llvm.maxnum.f32
357357

358358
resd = __builtin_fmax(D, D);
359-
// CHECK: call double @llvm.maxnum.f64
359+
// CHECK: call nsz double @llvm.maxnum.f64
360360

361361
resld = __builtin_fmaxl(LD, LD);
362-
// CHECK: call x86_fp80 @llvm.maxnum.f80
362+
// CHECK: call nsz x86_fp80 @llvm.maxnum.f80
363363

364364
resf = __builtin_fminimum_numf(F, F);
365365
// CHECK: call float @llvm.minimumnum.f32

clang/test/CodeGen/constrained-math-builtins.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -123,17 +123,17 @@ __builtin_atan2(f,f); __builtin_atan2f(f,f); __builtin_atan2l(f,f);
123123

124124
__builtin_fmax(f,f); __builtin_fmaxf(f,f); __builtin_fmaxl(f,f); __builtin_fmaxf128(f,f);
125125

126-
// CHECK: call double @llvm.experimental.constrained.maxnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
127-
// CHECK: call float @llvm.experimental.constrained.maxnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
128-
// CHECK: call x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
129-
// CHECK: call fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
126+
// CHECK: call nsz double @llvm.experimental.constrained.maxnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
127+
// CHECK: call nsz float @llvm.experimental.constrained.maxnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
128+
// CHECK: call nsz x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
129+
// CHECK: call nsz fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
130130

131131
__builtin_fmin(f,f); __builtin_fminf(f,f); __builtin_fminl(f,f); __builtin_fminf128(f,f);
132132

133-
// CHECK: call double @llvm.experimental.constrained.minnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
134-
// CHECK: call float @llvm.experimental.constrained.minnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
135-
// CHECK: call x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
136-
// CHECK: call fp128 @llvm.experimental.constrained.minnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
133+
// CHECK: call nsz double @llvm.experimental.constrained.minnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
134+
// CHECK: call nsz float @llvm.experimental.constrained.minnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
135+
// CHECK: call nsz x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
136+
// CHECK: call nsz fp128 @llvm.experimental.constrained.minnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
137137

138138
__builtin_llrint(f); __builtin_llrintf(f); __builtin_llrintl(f); __builtin_llrintf128(f);
139139

0 commit comments

Comments
 (0)