@@ -64,11 +64,11 @@ void test_builtin_reduce_min(float4 vf1, si8 vi1, u4 vu1) {
6464
6565void test_builtin_reduce_addf (float4 vf4 , double4 vd4 ) {
6666 // CHECK: [[VF4:%.+]] = load <4 x float>, ptr %vf4.addr, align 16
67- // CHECK-NEXT: call float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[VF4]])
67+ // CHECK-NEXT: call float @llvm.vector.reduce.fadd.v4f32(float - 0.000000e+00, <4 x float> [[VF4]])
6868 float r2 = __builtin_reduce_add (vf4 );
6969
7070 // CHECK: [[VD4:%.+]] = load <4 x double>, ptr %vd4.addr, align 16
71- // CHECK-NEXT: call double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[VD4]])
71+ // CHECK-NEXT: call double @llvm.vector.reduce.fadd.v4f64(double - 0.000000e+00, <4 x double> [[VD4]])
7272 double r3 = __builtin_reduce_add (vd4 );
7373}
7474
@@ -96,11 +96,11 @@ void test_builtin_reduce_add(si8 vi1, u4 vu1) {
9696
9797void test_builtin_reduce_mulf (float4 vf4 , double4 vd4 ) {
9898 // CHECK: [[VF4:%.+]] = load <4 x float>, ptr %vf4.addr, align 16
99- // CHECK-NEXT: call float @llvm.vector.reduce.fmul.v4f32(float 0 .000000e+00, <4 x float> [[VF4]])
99+ // CHECK-NEXT: call float @llvm.vector.reduce.fmul.v4f32(float 1 .000000e+00, <4 x float> [[VF4]])
100100 float r2 = __builtin_reduce_mul (vf4 );
101101
102102 // CHECK: [[VD4:%.+]] = load <4 x double>, ptr %vd4.addr, align 16
103- // CHECK-NEXT: call double @llvm.vector.reduce.fmul.v4f64(double 0 .000000e+00, <4 x double> [[VD4]])
103+ // CHECK-NEXT: call double @llvm.vector.reduce.fmul.v4f64(double 1 .000000e+00, <4 x double> [[VD4]])
104104 double r3 = __builtin_reduce_mul (vd4 );
105105}
106106
0 commit comments