@@ -248,6 +248,41 @@ define <4 x float> @combine_vec_fcopysign_fptrunc_sgn(<4 x float> %x, <4 x doubl
248248 ret <4 x float > %2
249249}
250250
251+ define double @PR136368 (double %x ) {
252+ ; SSE-LABEL: PR136368:
253+ ; SSE: # %bb.0:
254+ ; SSE-NEXT: movapd {{.*#+}} xmm1 = [NaN,NaN]
255+ ; SSE-NEXT: movapd %xmm0, %xmm2
256+ ; SSE-NEXT: andpd %xmm1, %xmm2
257+ ; SSE-NEXT: movsd {{.*#+}} xmm3 = [1.5707963267948966E+0,0.0E+0]
258+ ; SSE-NEXT: movapd %xmm3, %xmm4
259+ ; SSE-NEXT: cmpltsd %xmm2, %xmm4
260+ ; SSE-NEXT: andpd %xmm3, %xmm4
261+ ; SSE-NEXT: andpd %xmm1, %xmm4
262+ ; SSE-NEXT: andnpd %xmm0, %xmm1
263+ ; SSE-NEXT: orpd %xmm4, %xmm1
264+ ; SSE-NEXT: movapd %xmm1, %xmm0
265+ ; SSE-NEXT: retq
266+ ;
267+ ; AVX-LABEL: PR136368:
268+ ; AVX: # %bb.0:
269+ ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [NaN,NaN]
270+ ; AVX-NEXT: # xmm1 = mem[0,0]
271+ ; AVX-NEXT: vandpd %xmm1, %xmm0, %xmm2
272+ ; AVX-NEXT: vmovsd {{.*#+}} xmm3 = [1.5707963267948966E+0,0.0E+0]
273+ ; AVX-NEXT: vcmpltsd %xmm2, %xmm3, %xmm2
274+ ; AVX-NEXT: vandpd %xmm3, %xmm2, %xmm2
275+ ; AVX-NEXT: vandnpd %xmm0, %xmm1, %xmm0
276+ ; AVX-NEXT: vandpd %xmm1, %xmm2, %xmm1
277+ ; AVX-NEXT: vorpd %xmm0, %xmm1, %xmm0
278+ ; AVX-NEXT: retq
279+ %fabs = tail call double @llvm.fabs.f64 (double %x )
280+ %cmp = fcmp ogt double %fabs , 0x3FF921FB54442D18
281+ %cond = select i1 %cmp , double 0x3FF921FB54442D18 , double 0 .000000e+00
282+ %res = tail call double @llvm.copysign.f64 (double %cond , double %x )
283+ ret double %res
284+ }
285+
251286declare <4 x float > @llvm.fabs.v4f32 (<4 x float > %p )
252287declare <4 x float > @llvm.copysign.v4f32 (<4 x float > %Mag , <4 x float > %Sgn )
253288declare <4 x double > @llvm.copysign.v4f64 (<4 x double > %Mag , <4 x double > %Sgn )
0 commit comments