@@ -793,23 +793,21 @@ namespace xsimd
793793 {
794794 return vec_sqrt (val);
795795 }
796- #if 0
797796
798797 // slide_left
799798 template <size_t N, class A , class T >
800799 XSIMD_INLINE batch<T, A> slide_left (batch<T, A> const & x, requires_arch<altivec>) noexcept
801800 {
802- return _mm_slli_si128 (x, N );
801+ return vec_sll (x, vec_splat_u8 (N) );
803802 }
804803
805804 // slide_right
806805 template <size_t N, class A , class T >
807806 XSIMD_INLINE batch<T, A> slide_right (batch<T, A> const & x, requires_arch<altivec>) noexcept
808807 {
809- return _mm_srli_si128 (x, N );
808+ return vec_srl (x, vec_splat_u8 (N) );
810809 }
811810
812- #endif
813811 // sadd
814812 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
815813 XSIMD_INLINE batch<T, A> sadd (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
@@ -862,10 +860,10 @@ namespace xsimd
862860 // Make a mask for which parts of the vectors to swap out
863861 auto mask = vec_perm (ox00, oxFF, permuteVector);
864862 // Right rotate our input data
865- v = vec_perm (self, self, permuteVector);
863+ auto v = vec_perm (self, self, permuteVector);
866864 // Insert our data into the low and high vectors
867- low = vec_sel (self , low, mask);
868- high = vec_sel (high, self , mask);
865+ low = vec_sel (v , low, mask);
866+ high = vec_sel (high, v , mask);
869867 // Store the two aligned result vectors
870868 vec_st (low, 0 , mem);
871869 vec_st (high, 16 , mem);
0 commit comments