-
Notifications
You must be signed in to change notification settings - Fork 15.4k
Open
Labels
Description
| Bugzilla Link | 21780 |
| Version | trunk |
| OS | All |
| CC | @adibiagio,@rotateright |
Extended Description
Follow up to [Bug #22084] '[X86][AVX] suboptimal expansion of 256 bit vector loads.'
Merging of consecutive loads into a 256-bit ymm register now works well for simple cases, and the loads also fold nicely for bitwise ops (as well as basic float ops - fadd, fsub etc.).
Vector shuffle optimizations however attempt to selectively load individual lanes and in doing so prevent the optimization from folding the load into the shuffle.
e.g.
__m256d vsht_d4(__m256d foo) {
return __builtin_shufflevector( foo, foo, 0, 0, 2, 2 );
} define <4 x double> @_Z7vsht_d4Dv4_d(<4 x double> %foo) #1 {
%1 = shufflevector <4 x double> %foo, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
ret <4 x double> %1
}vpermilpd $0, %ymm0, %ymm0 # ymm0 = ymm0[0,0,2,2]
retq __m256d vsht_d4_fold(const double* ptr) {
__m256d foo = (__m256d){ ptr[0], ptr[1], ptr[2], ptr[3] };
return __builtin_shufflevector( foo, foo, 0, 0, 2, 2 );
}define <4 x double> @_Z12vsht_d4_foldPKd(double* nocapture readonly %ptr) #0 {
%1 = load double* %ptr, align 8, !tbaa !1
%2 = insertelement <4 x double> undef, double %1, i32 0
%3 = getelementptr inbounds double* %ptr, i64 2
%4 = load double* %3, align 8, !tbaa !1
%5 = insertelement <4 x double> %2, double %4, i32 2
%6 = shufflevector <4 x double> %5, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
ret <4 x double> %6
}vmovsd (%rdi), %xmm0
vmovsd 16(%rdi), %xmm1
vinsertf128 $1, %xmm1, %ymm0, %ymm0
vpermilpd $0, %ymm0, %ymm0 # ymm0 = ymm0[0,0,2,2]
retq Manually editing the IR does permit the fold to occur:
define <4 x double> @_Z12vsht_d4_foldPKd(double* nocapture readonly %ptr) #0 {
%1 = load double* %ptr, align 8, !tbaa !1
%2 = insertelement <4 x double> undef, double %1, i32 0
%3 = getelementptr inbounds double* %ptr, i64 1
%4 = load double* %3, align 8, !tbaa !1
%5 = insertelement <4 x double> %2, double %4, i32 1
%6 = getelementptr inbounds double* %ptr, i64 2
%7 = load double* %6, align 8, !tbaa !1
%8 = insertelement <4 x double> %5, double %7, i32 2
%9 = getelementptr inbounds double* %ptr, i64 3
%10 = load double* %9, align 8, !tbaa !1
%11 = insertelement <4 x double> %8, double %10, i32 3
%12 = shufflevector <4 x double> %11, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2>
ret <4 x double> %12
}vpermilpd $0, (%rdi), %ymm0 # ymm0 = mem[0,0,2,2]
retq