@@ -7,11 +7,11 @@ define <32 x double> @test_load_32f64(ptr %ptrs, <32 x i1> %mask, <32 x double>
77; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0
88; AVX512BW-NEXT: vpmovb2m %zmm0, %k1
99; AVX512BW-NEXT: vblendmpd (%rdi), %zmm1, %zmm0 {%k1}
10- ; AVX512BW-NEXT: kshiftrw $8, %k1, %k2
10+ ; AVX512BW-NEXT: kshiftrd $8, %k1, %k2
1111; AVX512BW-NEXT: vblendmpd 64(%rdi), %zmm2, %zmm1 {%k2}
12- ; AVX512BW-NEXT: kshiftrd $16, %k1, %k1
13- ; AVX512BW-NEXT: vblendmpd 128(%rdi), %zmm3, %zmm2 {%k1 }
14- ; AVX512BW-NEXT: kshiftrw $8 , %k1, %k1
12+ ; AVX512BW-NEXT: kshiftrd $16, %k1, %k2
13+ ; AVX512BW-NEXT: vblendmpd 128(%rdi), %zmm3, %zmm2 {%k2 }
14+ ; AVX512BW-NEXT: kshiftrd $24 , %k1, %k1
1515; AVX512BW-NEXT: vblendmpd 192(%rdi), %zmm4, %zmm3 {%k1}
1616; AVX512BW-NEXT: retq
1717 %res = call <32 x double > @llvm.masked.load.v32f64.p0 (ptr %ptrs , i32 4 , <32 x i1 > %mask , <32 x double > %src0 )
@@ -24,11 +24,11 @@ define <32 x i64> @test_load_32i64(ptr %ptrs, <32 x i1> %mask, <32 x i64> %src0)
2424; AVX512BW-NEXT: vpsllw $7, %ymm0, %ymm0
2525; AVX512BW-NEXT: vpmovb2m %zmm0, %k1
2626; AVX512BW-NEXT: vpblendmq (%rdi), %zmm1, %zmm0 {%k1}
27- ; AVX512BW-NEXT: kshiftrw $8, %k1, %k2
27+ ; AVX512BW-NEXT: kshiftrd $8, %k1, %k2
2828; AVX512BW-NEXT: vpblendmq 64(%rdi), %zmm2, %zmm1 {%k2}
29- ; AVX512BW-NEXT: kshiftrd $16, %k1, %k1
30- ; AVX512BW-NEXT: vpblendmq 128(%rdi), %zmm3, %zmm2 {%k1 }
31- ; AVX512BW-NEXT: kshiftrw $8 , %k1, %k1
29+ ; AVX512BW-NEXT: kshiftrd $16, %k1, %k2
30+ ; AVX512BW-NEXT: vpblendmq 128(%rdi), %zmm3, %zmm2 {%k2 }
31+ ; AVX512BW-NEXT: kshiftrd $24 , %k1, %k1
3232; AVX512BW-NEXT: vpblendmq 192(%rdi), %zmm4, %zmm3 {%k1}
3333; AVX512BW-NEXT: retq
3434 %res = call <32 x i64 > @llvm.masked.load.v32i64.p0 (ptr %ptrs , i32 4 , <32 x i1 > %mask , <32 x i64 > %src0 )
0 commit comments