@@ -134,22 +134,22 @@ long double truncl(long double);
134134// RV32-NEXT:    [[TMP44:%.*]] = call fp128 @llvm.floor.f128(fp128 [[TMP43]]) 
135135// RV32-NEXT:    [[TMP45:%.*]] = load float, ptr [[FARG_ADDR]], align 4 
136136// RV32-NEXT:    [[TMP46:%.*]] = load float, ptr [[FARG_ADDR]], align 4 
137- // RV32-NEXT:    [[TMP47:%.*]] = call float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]]) 
137+ // RV32-NEXT:    [[TMP47:%.*]] = call nsz  float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]]) 
138138// RV32-NEXT:    [[TMP48:%.*]] = load double, ptr [[DARG_ADDR]], align 8 
139139// RV32-NEXT:    [[TMP49:%.*]] = load double, ptr [[DARG_ADDR]], align 8 
140- // RV32-NEXT:    [[TMP50:%.*]] = call double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]]) 
140+ // RV32-NEXT:    [[TMP50:%.*]] = call nsz  double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]]) 
141141// RV32-NEXT:    [[TMP51:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16 
142142// RV32-NEXT:    [[TMP52:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16 
143- // RV32-NEXT:    [[TMP53:%.*]] = call fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]]) 
143+ // RV32-NEXT:    [[TMP53:%.*]] = call nsz  fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]]) 
144144// RV32-NEXT:    [[TMP54:%.*]] = load float, ptr [[FARG_ADDR]], align 4 
145145// RV32-NEXT:    [[TMP55:%.*]] = load float, ptr [[FARG_ADDR]], align 4 
146- // RV32-NEXT:    [[TMP56:%.*]] = call float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]]) 
146+ // RV32-NEXT:    [[TMP56:%.*]] = call nsz  float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]]) 
147147// RV32-NEXT:    [[TMP57:%.*]] = load double, ptr [[DARG_ADDR]], align 8 
148148// RV32-NEXT:    [[TMP58:%.*]] = load double, ptr [[DARG_ADDR]], align 8 
149- // RV32-NEXT:    [[TMP59:%.*]] = call double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]]) 
149+ // RV32-NEXT:    [[TMP59:%.*]] = call nsz  double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]]) 
150150// RV32-NEXT:    [[TMP60:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16 
151151// RV32-NEXT:    [[TMP61:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16 
152- // RV32-NEXT:    [[TMP62:%.*]] = call fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]]) 
152+ // RV32-NEXT:    [[TMP62:%.*]] = call nsz  fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]]) 
153153// RV32-NEXT:    [[TMP63:%.*]] = load float, ptr [[FARG_ADDR]], align 4 
154154// RV32-NEXT:    [[TMP64:%.*]] = load float, ptr [[FARG_ADDR]], align 4 
155155// RV32-NEXT:    [[FMOD:%.*]] = frem float [[TMP63]], [[TMP64]] 
@@ -310,22 +310,22 @@ long double truncl(long double);
310310// RV64-NEXT:    [[TMP44:%.*]] = call fp128 @llvm.floor.f128(fp128 [[TMP43]]) 
311311// RV64-NEXT:    [[TMP45:%.*]] = load float, ptr [[FARG_ADDR]], align 4 
312312// RV64-NEXT:    [[TMP46:%.*]] = load float, ptr [[FARG_ADDR]], align 4 
313- // RV64-NEXT:    [[TMP47:%.*]] = call float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]]) 
313+ // RV64-NEXT:    [[TMP47:%.*]] = call nsz  float @llvm.maxnum.f32(float [[TMP45]], float [[TMP46]]) 
314314// RV64-NEXT:    [[TMP48:%.*]] = load double, ptr [[DARG_ADDR]], align 8 
315315// RV64-NEXT:    [[TMP49:%.*]] = load double, ptr [[DARG_ADDR]], align 8 
316- // RV64-NEXT:    [[TMP50:%.*]] = call double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]]) 
316+ // RV64-NEXT:    [[TMP50:%.*]] = call nsz  double @llvm.maxnum.f64(double [[TMP48]], double [[TMP49]]) 
317317// RV64-NEXT:    [[TMP51:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16 
318318// RV64-NEXT:    [[TMP52:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16 
319- // RV64-NEXT:    [[TMP53:%.*]] = call fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]]) 
319+ // RV64-NEXT:    [[TMP53:%.*]] = call nsz  fp128 @llvm.maxnum.f128(fp128 [[TMP51]], fp128 [[TMP52]]) 
320320// RV64-NEXT:    [[TMP54:%.*]] = load float, ptr [[FARG_ADDR]], align 4 
321321// RV64-NEXT:    [[TMP55:%.*]] = load float, ptr [[FARG_ADDR]], align 4 
322- // RV64-NEXT:    [[TMP56:%.*]] = call float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]]) 
322+ // RV64-NEXT:    [[TMP56:%.*]] = call nsz  float @llvm.minnum.f32(float [[TMP54]], float [[TMP55]]) 
323323// RV64-NEXT:    [[TMP57:%.*]] = load double, ptr [[DARG_ADDR]], align 8 
324324// RV64-NEXT:    [[TMP58:%.*]] = load double, ptr [[DARG_ADDR]], align 8 
325- // RV64-NEXT:    [[TMP59:%.*]] = call double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]]) 
325+ // RV64-NEXT:    [[TMP59:%.*]] = call nsz  double @llvm.minnum.f64(double [[TMP57]], double [[TMP58]]) 
326326// RV64-NEXT:    [[TMP60:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16 
327327// RV64-NEXT:    [[TMP61:%.*]] = load fp128, ptr [[LDARG_ADDR]], align 16 
328- // RV64-NEXT:    [[TMP62:%.*]] = call fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]]) 
328+ // RV64-NEXT:    [[TMP62:%.*]] = call nsz  fp128 @llvm.minnum.f128(fp128 [[TMP60]], fp128 [[TMP61]]) 
329329// RV64-NEXT:    [[TMP63:%.*]] = load float, ptr [[FARG_ADDR]], align 4 
330330// RV64-NEXT:    [[TMP64:%.*]] = load float, ptr [[FARG_ADDR]], align 4 
331331// RV64-NEXT:    [[FMOD:%.*]] = frem float [[TMP63]], [[TMP64]] 
0 commit comments