@@ -567,53 +567,15 @@ namespace xsimd
567567 {
568568 return _mm_castsi128_pd(_mm_cmpeq_epi32(_mm_castpd_si128(self), _mm_castpd_si128(other)));
569569 }
570+ #endif
570571
571572 // first
572- template <class A>
573- XSIMD_INLINE float first(batch<float, A> const& self, requires_arch<altivec>) noexcept
574- {
575- return _mm_cvtss_f32(self);
576- }
577-
578- template <class A>
579- XSIMD_INLINE double first(batch<double, A> const& self, requires_arch<altivec>) noexcept
580- {
581- return _mm_cvtsd_f64(self);
582- }
583-
584- template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
573+ template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
585574 XSIMD_INLINE T first (batch<T, A> const & self, requires_arch<altivec>) noexcept
586575 {
587- XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
588- {
589- return static_cast<T>(_mm_cvtsi128_si32(self) & 0xFF);
590- }
591- else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
592- {
593- return static_cast<T>(_mm_cvtsi128_si32(self) & 0xFFFF);
594- }
595- else XSIMD_IF_CONSTEXPR(sizeof(T) == 4)
596- {
597- return static_cast<T>(_mm_cvtsi128_si32(self));
598- }
599- else XSIMD_IF_CONSTEXPR(sizeof(T) == 8)
600- {
601- #if defined(__x86_64__)
602- return static_cast<T>(_mm_cvtsi128_si64(self));
603- #else
604- __m128i m;
605- _mm_storel_epi64(&m, self);
606- int64_t i;
607- std::memcpy(&i, &m, sizeof(i));
608- return i;
609- #endif
610- }
611- else
612- {
613- assert(false && "unsupported arch/op combination");
614- return {};
615- }
576+ return vec_extract (self, 0 );
616577 }
578+ #if 0
617579
618580 // from_mask
619581 template <class A>
0 commit comments