@@ -81,42 +81,21 @@ namespace xsimd
8181 return vec_add (self, other);
8282 }
8383
84- #if 0
85-
8684 // all
87- template <class A>
88- XSIMD_INLINE bool all(batch_bool<float, A> const& self, requires_arch<altivec>) noexcept
89- {
90- return _mm_movemask_ps(self) == 0x0F;
91- }
92- template <class A>
93- XSIMD_INLINE bool all(batch_bool<double, A> const& self, requires_arch<altivec>) noexcept
94- {
95- return _mm_movemask_pd(self) == 0x03;
96- }
97- template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
85+ template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
9886 XSIMD_INLINE bool all (batch_bool<T, A> const & self, requires_arch<altivec>) noexcept
9987 {
100- return _mm_movemask_epi8 (self) == 0xFFFF ;
88+ return vec_all_ne (self, vec_xor (self, self)) ;
10189 }
10290
10391 // any
104- template <class A>
105- XSIMD_INLINE bool any(batch_bool<float, A> const& self, requires_arch<altivec>) noexcept
106- {
107- return _mm_movemask_ps(self) != 0;
108- }
109- template <class A>
110- XSIMD_INLINE bool any(batch_bool<double, A> const& self, requires_arch<altivec>) noexcept
111- {
112- return _mm_movemask_pd(self) != 0;
113- }
114- template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
92+ template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
11593 XSIMD_INLINE bool any (batch_bool<T, A> const & self, requires_arch<altivec>) noexcept
11694 {
117- return _mm_movemask_epi8 (self) != 0 ;
95+ return vec_any_ne (self, vec_xor (self, self)) ;
11896 }
11997
98+ #if 0
12099 // avgr
121100 template <class A, class T, class = typename std::enable_if<std::is_unsigned<T>::value, void>::type>
122101 XSIMD_INLINE batch<T, A> avgr(batch<T, A> const& self, batch<T, A> const& other, requires_arch<altivec>) noexcept
@@ -460,63 +439,19 @@ namespace xsimd
460439 return _mm_cvttps_epi32(self);
461440 }
462441 }
442+ #endif
463443
464444 // eq
465- template <class A>
466- XSIMD_INLINE batch_bool<float, A> eq(batch<float, A> const& self, batch<float, A> const& other, requires_arch<altivec>) noexcept
467- {
468- return _mm_cmpeq_ps(self, other);
469- }
470- template <class A>
471- XSIMD_INLINE batch_bool<float, A> eq(batch_bool<float, A> const& self, batch_bool<float, A> const& other, requires_arch<altivec>) noexcept
472- {
473- return _mm_castsi128_ps(_mm_cmpeq_epi32(_mm_castps_si128(self), _mm_castps_si128(other)));
474- }
475- template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
445+ template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
476446 XSIMD_INLINE batch_bool<T, A> eq (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
477447 {
478- XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
479- {
480- return _mm_cmpeq_epi8(self, other);
481- }
482- else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
483- {
484- return _mm_cmpeq_epi16(self, other);
485- }
486- else XSIMD_IF_CONSTEXPR(sizeof(T) == 4)
487- {
488- return _mm_cmpeq_epi32(self, other);
489- }
490- else XSIMD_IF_CONSTEXPR(sizeof(T) == 8)
491- {
492- __m128i tmp1 = _mm_cmpeq_epi32(self, other);
493- __m128i tmp2 = _mm_shuffle_epi32(tmp1, 0xB1);
494- __m128i tmp3 = _mm_and_si128(tmp1, tmp2);
495- __m128i tmp4 = _mm_srai_epi32(tmp3, 31);
496- return _mm_shuffle_epi32(tmp4, 0xF5);
497- }
498- else
499- {
500- assert(false && "unsupported arch/op combination");
501- return {};
502- }
448+ return vec_cmpeq (self, other);
503449 }
504- template <class A, class T, class = typename std::enable_if<std::is_integral <T>::value, void>::type>
450+ template <class A , class T , class = typename std::enable_if<std::is_scalar <T>::value, void >::type>
505451 XSIMD_INLINE batch_bool<T, A> eq (batch_bool<T, A> const & self, batch_bool<T, A> const & other, requires_arch<altivec>) noexcept
506452 {
507- return ~ (self != other);
453+ return vec_cmpeq (self, other);
508454 }
509- template <class A>
510- XSIMD_INLINE batch_bool<double, A> eq(batch<double, A> const& self, batch<double, A> const& other, requires_arch<altivec>) noexcept
511- {
512- return _mm_cmpeq_pd(self, other);
513- }
514- template <class A>
515- XSIMD_INLINE batch_bool<double, A> eq(batch_bool<double, A> const& self, batch_bool<double, A> const& other, requires_arch<altivec>) noexcept
516- {
517- return _mm_castsi128_pd(_mm_cmpeq_epi32(_mm_castpd_si128(self), _mm_castpd_si128(other)));
518- }
519- #endif
520455
521456 // first
522457 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
@@ -987,39 +922,20 @@ namespace xsimd
987922 return _mm_xor_pd(
988923 self, _mm_castsi128_pd(_mm_setr_epi32(0, 0x80000000, 0, 0x80000000)));
989924 }
925+ #endif
990926
991927 // neq
992- template <class A>
993- XSIMD_INLINE batch_bool<float, A> neq(batch<float, A> const& self, batch<float, A> const& other, requires_arch<altivec>) noexcept
994- {
995- return _mm_cmpneq_ps(self, other);
996- }
997- template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
928+ template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
998929 XSIMD_INLINE batch_bool<T, A> neq (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
999930 {
1000- return ~ (self == other);
931+ return vec_cmpne (self, other);
1001932 }
1002- template <class A>
1003- XSIMD_INLINE batch_bool<float, A> neq(batch_bool<float, A> const& self, batch_bool<float, A> const& other, requires_arch<altivec>) noexcept
1004- {
1005- return _mm_xor_ps(self, other);
1006- }
1007- template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
933+ template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
1008934 XSIMD_INLINE batch_bool<T, A> neq (batch_bool<T, A> const & self, batch_bool<T, A> const & other, requires_arch<altivec>) noexcept
1009935 {
1010- return _mm_castps_si128(_mm_xor_ps(_mm_castsi128_ps(self.data), _mm_castsi128_ps(other.data)));
1011- }
1012-
1013- template <class A>
1014- XSIMD_INLINE batch_bool<double, A> neq(batch<double, A> const& self, batch<double, A> const& other, requires_arch<altivec>) noexcept
1015- {
1016- return _mm_cmpneq_pd(self, other);
1017- }
1018- template <class A>
1019- XSIMD_INLINE batch_bool<double, A> neq(batch_bool<double, A> const& self, batch_bool<double, A> const& other, requires_arch<altivec>) noexcept
1020- {
1021- return _mm_xor_pd(self, other);
936+ return vec_cmpne (self, other);
1022937 }
938+ #if 0
1023939
1024940 // reciprocal
1025941 template <class A>
0 commit comments