@@ -247,13 +247,13 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
247247; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2,1,3]
248248; AVX2-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
249249; AVX2-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm1
250- ; AVX2-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm8
250+ ; AVX2-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm4
251251; AVX2-SLOW-NEXT: vhaddps %xmm3, %xmm2, %xmm2
252- ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,2],xmm1[0,1]
253- ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm1 = xmm1 [0,1,2],xmm8 [0]
254- ; AVX2-SLOW-NEXT: vhaddps %xmm4, %xmm5, %xmm3
255- ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[3,1 ]
256- ; AVX2-SLOW-NEXT: vaddps %xmm2 , %xmm1 , %xmm1
252+ ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm1[0,1]
253+ ; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm3 = xmm3 [0,1,2],xmm4 [0]
254+ ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
255+ ; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3 ]
256+ ; AVX2-SLOW-NEXT: vaddps %xmm1 , %xmm3 , %xmm1
257257; AVX2-SLOW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
258258; AVX2-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
259259; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -268,13 +268,13 @@ define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x fl
268268; AVX2-FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0
269269; AVX2-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
270270; AVX2-FAST-NEXT: vhaddps %xmm4, %xmm4, %xmm1
271- ; AVX2-FAST-NEXT: vhaddps %xmm5, %xmm5, %xmm8
271+ ; AVX2-FAST-NEXT: vhaddps %xmm5, %xmm5, %xmm4
272272; AVX2-FAST-NEXT: vhaddps %xmm3, %xmm2, %xmm2
273- ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[0,2],xmm1[0,1]
274- ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm1 = xmm1 [0,1,2],xmm8 [0]
275- ; AVX2-FAST-NEXT: vhaddps %xmm4, %xmm5, %xmm3
276- ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[3,1 ]
277- ; AVX2-FAST-NEXT: vaddps %xmm2 , %xmm1 , %xmm1
273+ ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm3 = xmm2[0,2],xmm1[0,1]
274+ ; AVX2-FAST-NEXT: vinsertps {{.*#+}} xmm3 = xmm3 [0,1,2],xmm4 [0]
275+ ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3]
276+ ; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3 ]
277+ ; AVX2-FAST-NEXT: vaddps %xmm1 , %xmm3 , %xmm1
278278; AVX2-FAST-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
279279; AVX2-FAST-NEXT: vshufpd {{.*#+}} xmm1 = xmm1[1,0]
280280; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -424,7 +424,7 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
424424; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,3]
425425; AVX2-SLOW-NEXT: vpbroadcastd %xmm4, %xmm5
426426; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],xmm5[3]
427- ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,1 ]
427+ ; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3 ]
428428; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3]
429429; AVX2-SLOW-NEXT: vpaddd %xmm1, %xmm3, %xmm1
430430; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -447,7 +447,7 @@ define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2,
447447; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,3]
448448; AVX2-FAST-NEXT: vpbroadcastd %xmm4, %xmm5
449449; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0,1,2],xmm5[3]
450- ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,1 ]
450+ ; AVX2-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm2[1,3],xmm1[1,3 ]
451451; AVX2-FAST-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[3]
452452; AVX2-FAST-NEXT: vpaddd %xmm1, %xmm3, %xmm1
453453; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
0 commit comments