@@ -382,7 +382,7 @@ define <16 x i8> @trunc_add_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
382
382
; SSE: # %bb.0:
383
383
; SSE-NEXT: paddw %xmm2, %xmm0
384
384
; SSE-NEXT: paddw %xmm3, %xmm1
385
- ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
385
+ ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0 ]
386
386
; SSE-NEXT: pand %xmm2, %xmm1
387
387
; SSE-NEXT: pand %xmm2, %xmm0
388
388
; SSE-NEXT: packuswb %xmm1, %xmm0
@@ -798,7 +798,7 @@ define <16 x i8> @trunc_add_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
798
798
define <16 x i8 > @trunc_add_const_v16i16_v16i8 (<16 x i16 > %a0 ) nounwind {
799
799
; SSE-LABEL: trunc_add_const_v16i16_v16i8:
800
800
; SSE: # %bb.0:
801
- ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
801
+ ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0 ]
802
802
; SSE-NEXT: pand %xmm2, %xmm1
803
803
; SSE-NEXT: pand %xmm2, %xmm0
804
804
; SSE-NEXT: packuswb %xmm1, %xmm0
@@ -1226,7 +1226,7 @@ define <16 x i8> @trunc_sub_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
1226
1226
; SSE: # %bb.0:
1227
1227
; SSE-NEXT: psubw %xmm2, %xmm0
1228
1228
; SSE-NEXT: psubw %xmm3, %xmm1
1229
- ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
1229
+ ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0 ]
1230
1230
; SSE-NEXT: pand %xmm2, %xmm1
1231
1231
; SSE-NEXT: pand %xmm2, %xmm0
1232
1232
; SSE-NEXT: packuswb %xmm1, %xmm0
@@ -1610,7 +1610,7 @@ define <16 x i8> @trunc_sub_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
1610
1610
define <16 x i8 > @trunc_sub_const_v16i16_v16i8 (<16 x i16 > %a0 ) nounwind {
1611
1611
; SSE-LABEL: trunc_sub_const_v16i16_v16i8:
1612
1612
; SSE: # %bb.0:
1613
- ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
1613
+ ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0 ]
1614
1614
; SSE-NEXT: pand %xmm2, %xmm1
1615
1615
; SSE-NEXT: pand %xmm2, %xmm0
1616
1616
; SSE-NEXT: packuswb %xmm1, %xmm0
@@ -2201,7 +2201,7 @@ define <16 x i8> @trunc_mul_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
2201
2201
; SSE: # %bb.0:
2202
2202
; SSE-NEXT: pmullw %xmm2, %xmm0
2203
2203
; SSE-NEXT: pmullw %xmm3, %xmm1
2204
- ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
2204
+ ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0 ]
2205
2205
; SSE-NEXT: pand %xmm2, %xmm1
2206
2206
; SSE-NEXT: pand %xmm2, %xmm0
2207
2207
; SSE-NEXT: packuswb %xmm1, %xmm0
@@ -2706,7 +2706,7 @@ define <16 x i8> @trunc_mul_const_v16i16_v16i8(<16 x i16> %a0) nounwind {
2706
2706
; SSE: # %bb.0:
2707
2707
; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
2708
2708
; SSE-NEXT: pmullw {{.*}}(%rip), %xmm1
2709
- ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
2709
+ ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0 ]
2710
2710
; SSE-NEXT: pand %xmm2, %xmm1
2711
2711
; SSE-NEXT: pand %xmm2, %xmm0
2712
2712
; SSE-NEXT: packuswb %xmm1, %xmm0
@@ -3106,7 +3106,7 @@ define <16 x i8> @trunc_and_v16i32_v16i8(<16 x i32> %a0, <16 x i32> %a1) nounwin
3106
3106
define <16 x i8 > @trunc_and_v16i16_v16i8 (<16 x i16 > %a0 , <16 x i16 > %a1 ) nounwind {
3107
3107
; SSE-LABEL: trunc_and_v16i16_v16i8:
3108
3108
; SSE: # %bb.0:
3109
- ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
3109
+ ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0 ]
3110
3110
; SSE-NEXT: pand %xmm4, %xmm3
3111
3111
; SSE-NEXT: pand %xmm1, %xmm3
3112
3112
; SSE-NEXT: pand %xmm4, %xmm2
@@ -3471,7 +3471,7 @@ define <16 x i8> @trunc_and_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
3471
3471
define <16 x i8 > @trunc_and_const_v16i16_v16i8 (<16 x i16 > %a0 ) nounwind {
3472
3472
; SSE-LABEL: trunc_and_const_v16i16_v16i8:
3473
3473
; SSE: # %bb.0:
3474
- ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
3474
+ ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0 ]
3475
3475
; SSE-NEXT: pand %xmm2, %xmm1
3476
3476
; SSE-NEXT: pand %xmm2, %xmm0
3477
3477
; SSE-NEXT: packuswb %xmm1, %xmm0
@@ -3871,7 +3871,7 @@ define <16 x i8> @trunc_xor_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwin
3871
3871
; SSE: # %bb.0:
3872
3872
; SSE-NEXT: pxor %xmm2, %xmm0
3873
3873
; SSE-NEXT: pxor %xmm3, %xmm1
3874
- ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
3874
+ ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0 ]
3875
3875
; SSE-NEXT: pand %xmm2, %xmm1
3876
3876
; SSE-NEXT: pand %xmm2, %xmm0
3877
3877
; SSE-NEXT: packuswb %xmm1, %xmm0
@@ -4234,7 +4234,7 @@ define <16 x i8> @trunc_xor_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
4234
4234
define <16 x i8 > @trunc_xor_const_v16i16_v16i8 (<16 x i16 > %a0 ) nounwind {
4235
4235
; SSE-LABEL: trunc_xor_const_v16i16_v16i8:
4236
4236
; SSE: # %bb.0:
4237
- ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
4237
+ ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0 ]
4238
4238
; SSE-NEXT: pand %xmm2, %xmm1
4239
4239
; SSE-NEXT: pand %xmm2, %xmm0
4240
4240
; SSE-NEXT: packuswb %xmm1, %xmm0
@@ -4634,7 +4634,7 @@ define <16 x i8> @trunc_or_v16i16_v16i8(<16 x i16> %a0, <16 x i16> %a1) nounwind
4634
4634
; SSE: # %bb.0:
4635
4635
; SSE-NEXT: por %xmm2, %xmm0
4636
4636
; SSE-NEXT: por %xmm3, %xmm1
4637
- ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
4637
+ ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0 ]
4638
4638
; SSE-NEXT: pand %xmm2, %xmm1
4639
4639
; SSE-NEXT: pand %xmm2, %xmm0
4640
4640
; SSE-NEXT: packuswb %xmm1, %xmm0
@@ -4997,7 +4997,7 @@ define <16 x i8> @trunc_or_const_v16i32_v16i8(<16 x i32> %a0) nounwind {
4997
4997
define <16 x i8 > @trunc_or_const_v16i16_v16i8 (<16 x i16 > %a0 ) nounwind {
4998
4998
; SSE-LABEL: trunc_or_const_v16i16_v16i8:
4999
4999
; SSE: # %bb.0:
5000
- ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
5000
+ ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0, 255,0 ]
5001
5001
; SSE-NEXT: pand %xmm2, %xmm1
5002
5002
; SSE-NEXT: pand %xmm2, %xmm0
5003
5003
; SSE-NEXT: packuswb %xmm1, %xmm0
0 commit comments