@@ -1115,25 +1115,15 @@ namespace xsimd
11151115 {
11161116 return _mm_min_pd(self, other);
11171117 }
1118+ #endif
11181119
11191120 // mul
1120- template <class A>
1121- XSIMD_INLINE batch<float, A> mul(batch<float, A> const& self, batch<float, A> const& other, requires_arch<sse2>) noexcept
1122- {
1123- return _mm_mul_ps(self, other);
1124- }
1125- template <class A>
1126- XSIMD_INLINE batch<double, A> mul(batch<double, A> const& self, batch<double, A> const& other, requires_arch<sse2>) noexcept
1127- {
1128- return _mm_mul_pd(self, other);
1129- }
1130-
1131- // mul
1132- template <class A>
1133- XSIMD_INLINE batch<int16_t, A> mul(batch<int16_t, A> const& self, batch<int16_t, A> const& other, requires_arch<sse2>) noexcept
1121+ template <class A , class T , typename std::enable_if<std::is_scalar<T>::value, void >::type>
1122+ XSIMD_INLINE batch<T, A> mul (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
11341123 {
1135- return _mm_mullo_epi16 (self, other);
1124+ return vec_mul (self, other);
11361125 }
1126+ #if 0
11371127
11381128 // nearbyint_as_int
11391129 template <class A>
@@ -1479,44 +1469,14 @@ namespace xsimd
14791469 *(typename batch<T, A>::register_type)mem = self;
14801470 }
14811471
1482- #if 0
14831472 // sub
1484- template <class A>
1485- XSIMD_INLINE batch<float, A> sub(batch<float, A> const& self, batch<float, A> const& other, requires_arch<sse2>) noexcept
1486- {
1487- return _mm_sub_ps(self, other);
1488- }
1489- template <class A, class T, class = typename std::enable_if<std::is_integral<T>::value, void>::type>
1490- XSIMD_INLINE batch<T, A> sub(batch<T, A> const& self, batch<T, A> const& other, requires_arch<sse2>) noexcept
1491- {
1492- XSIMD_IF_CONSTEXPR(sizeof(T) == 1)
1493- {
1494- return _mm_sub_epi8(self, other);
1495- }
1496- else XSIMD_IF_CONSTEXPR(sizeof(T) == 2)
1497- {
1498- return _mm_sub_epi16(self, other);
1499- }
1500- else XSIMD_IF_CONSTEXPR(sizeof(T) == 4)
1501- {
1502- return _mm_sub_epi32(self, other);
1503- }
1504- else XSIMD_IF_CONSTEXPR(sizeof(T) == 8)
1505- {
1506- return _mm_sub_epi64(self, other);
1507- }
1508- else
1509- {
1510- assert(false && "unsupported arch/op combination");
1511- return {};
1512- }
1513- }
1514- template <class A>
1515- XSIMD_INLINE batch<double, A> sub(batch<double, A> const& self, batch<double, A> const& other, requires_arch<sse2>) noexcept
1473+ template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
1474+ XSIMD_INLINE batch<T, A> sub (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
15161475 {
1517- return _mm_sub_pd (self, other);
1476+ return vec_sub (self, other);
15181477 }
15191478
1479+ #if 0
15201480 // swizzle
15211481
15221482 template <class A, uint32_t V0, uint32_t V1, uint32_t V2, uint32_t V3>
0 commit comments