@@ -85,21 +85,21 @@ namespace xsimd
8585 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
8686 XSIMD_INLINE bool all (batch_bool<T, A> const & self, requires_arch<altivec>) noexcept
8787 {
88- return vec_all_ne (self, vec_xor (self.data , self.data ));
88+ return vec_all_ne (self. data , vec_xor (self.data , self.data ));
8989 }
9090
9191 // any
9292 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
9393 XSIMD_INLINE bool any (batch_bool<T, A> const & self, requires_arch<altivec>) noexcept
9494 {
95- return vec_any_ne (self, vec_xor (self.data , self.data ));
95+ return vec_any_ne (self. data , vec_xor (self.data , self.data ));
9696 }
9797
9898 // avgr
9999 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
100100 XSIMD_INLINE batch<T, A> avgr (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
101101 {
102- return vec_avg (self, other);
102+ return vec_avg (self. data , other. data );
103103 }
104104
105105 // avg
@@ -108,7 +108,7 @@ namespace xsimd
108108 {
109109 constexpr auto nbit = 8 * sizeof (T) - 1 ;
110110 constexpr auto adj = ((self ^ other) << nbit) >> nbit;
111- return avgr (self, other, A {}) - adj;
111+ return avgr (self. data , other. data , A {}) - adj;
112112 }
113113
114114 // batch_bool_cast
@@ -134,12 +134,12 @@ namespace xsimd
134134 template <class A , class T , class = typename std::enable_if<std::is_integral<T>::value, void >::type>
135135 XSIMD_INLINE batch<T, A> bitwise_andnot (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
136136 {
137- return vec_nand (self, other);
137+ return vec_nand (self. data , other. data );
138138 }
139139 template <class A , class T , class = typename std::enable_if<std::is_integral<T>::value, void >::type>
140140 XSIMD_INLINE batch_bool<T, A> bitwise_andnot (batch_bool<T, A> const & self, batch_bool<T, A> const & other, requires_arch<altivec>) noexcept
141141 {
142- return vec_nand (self, other);
142+ return vec_nand (self. data , other. data );
143143 }
144144
145145 // bitwise_lshift
@@ -148,31 +148,31 @@ namespace xsimd
148148 {
149149 using shift_type = as_unsigned_integer_t <T>;
150150 batch<shift_type, A> shift (static_cast <shift_type>(other));
151- return vec_sl (self, shift);
151+ return vec_sl (self. data , shift. data );
152152 }
153153
154154 // bitwise_not
155155 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
156156 XSIMD_INLINE batch<T, A> bitwise_not (batch<T, A> const & self, requires_arch<altivec>) noexcept
157157 {
158- return vec_nor (self, self);
158+ return vec_nor (self. data , self. data );
159159 }
160160 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
161161 XSIMD_INLINE batch_bool<T, A> bitwise_not (batch_bool<T, A> const & self, requires_arch<altivec>) noexcept
162162 {
163- return vec_nor (self, self);
163+ return vec_nor (self. data , self. data );
164164 }
165165
166166 // bitwise_or
167167 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
168168 XSIMD_INLINE batch<T, A> bitwise_or (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
169169 {
170- return vec_or (self, other);
170+ return vec_or (self. data , other. data );
171171 }
172172 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
173173 XSIMD_INLINE batch_bool<T, A> bitwise_or (batch_bool<T, A> const & self, batch_bool<T, A> const & other, requires_arch<altivec>) noexcept
174174 {
175- return vec_or (self, other);
175+ return vec_or (self. data , other. data );
176176 }
177177
178178 // bitwise_rshift
@@ -181,19 +181,19 @@ namespace xsimd
181181 {
182182 using shift_type = as_unsigned_integer_t <T>;
183183 batch<shift_type, A> shift (static_cast <shift_type>(other));
184- return vec_sr (self, shift);
184+ return vec_sr (self. data , shift. data );
185185 }
186186
187187 // bitwise_xor
188188 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
189189 XSIMD_INLINE batch<T, A> bitwise_xor (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
190190 {
191- return vec_xor (self, other);
191+ return vec_xor (self. data , other. data );
192192 }
193193 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
194194 XSIMD_INLINE batch<T, A> bitwise_xor (batch_bool<T, A> const & self, batch_bool<T, A> const & other, requires_arch<altivec>) noexcept
195195 {
196- return vec_xor (self, other);
196+ return vec_xor (self. data , other. data );
197197 }
198198
199199 // bitwise_cast
@@ -252,7 +252,7 @@ namespace xsimd
252252 template <class A , class T , class = typename std::enable_if<std::is_integral<T>::value, void >::type>
253253 XSIMD_INLINE batch<T, A> div (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
254254 {
255- return vec_div (self, other);
255+ return vec_div (self. data , other. data );
256256 }
257257
258258 // fast_cast
@@ -300,7 +300,7 @@ namespace xsimd
300300 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
301301 XSIMD_INLINE T first (batch<T, A> const & self, requires_arch<altivec>) noexcept
302302 {
303- return vec_extract (self, 0 );
303+ return vec_extract (self. data , 0 );
304304 }
305305#if 0
306306
@@ -404,36 +404,36 @@ namespace xsimd
404404 template <class A , class T , class = typename std::enable_if<std::is_integral<T>::value, void >::type>
405405 XSIMD_INLINE batch_bool<T, A> ge (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
406406 {
407- return vec_cmpge (self, other);
407+ return vec_cmpge (self. data , other. data );
408408 }
409- template <class A >
410- XSIMD_INLINE batch_bool<double , A> ge (batch<double , A> const & self, batch<double , A> const & other, requires_arch<altivec>) noexcept
409+ template <class A , class T , class = typename std::enable_if<std::is_integral<T>::value, void >::type >
410+ XSIMD_INLINE batch_bool<T , A> ge (batch<T , A> const & self, batch<T , A> const & other, requires_arch<altivec>) noexcept
411411 {
412- return vec_cmpge (self, other);
412+ return vec_cmpge (self. data , other. data );
413413 }
414414
415415 // gt
416416 template <class A , class T , class = typename std::enable_if<std::is_integral<T>::value, void >::type>
417417 XSIMD_INLINE batch_bool<T, A> gt (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
418418 {
419- return vec_cmpgt (self, other);
419+ return vec_cmpgt (self. data , other. data );
420420 }
421- template <class A >
422- XSIMD_INLINE batch_bool<double , A> gt (batch<double , A> const & self, batch<double , A> const & other, requires_arch<altivec>) noexcept
421+ template <class A , class T , class = typename std::enable_if<std::is_integral<T>::value, void >::type >
422+ XSIMD_INLINE batch_bool<T , A> gt (batch<T , A> const & self, batch<T , A> const & other, requires_arch<altivec>) noexcept
423423 {
424- return vec_cmpgt (self, other);
424+ return vec_cmpgt (self. data , other. data );
425425 }
426426
427427 // haddp
428428 template <class A >
429429 XSIMD_INLINE batch<float , A> haddp (batch<float , A> const * row, requires_arch<altivec>) noexcept
430430 {
431- auto tmp0 = vec_mergee (row[0 ], row[1 ]); // v00 v10 v02 v12
432- auto tmp1 = vec_mergeo (row[0 ], row[1 ]); // v01 v11 v03 v13
431+ auto tmp0 = vec_mergee (row[0 ]. data , row[1 ]. data ); // v00 v10 v02 v12
432+ auto tmp1 = vec_mergeo (row[0 ]. data , row[1 ]. data ); // v01 v11 v03 v13
433433 auto tmp4 = vec_add (tmp0, tmp1); // (v00 + v01, v10 + v11, v02 + v03, v12 + v13)
434434
435- auto tmp2 = vec_mergee (row[2 ], row[3 ]); // v20 v30 v22 v32
436- auto tmp3 = vec_mergeo (row[2 ], row[3 ]); // v21 v31 v23 v33
435+ auto tmp2 = vec_mergee (row[2 ]. data , row[3 ]. data ); // v20 v30 v22 v32
436+ auto tmp3 = vec_mergeo (row[2 ]. data , row[3 ]. data ); // v21 v31 v23 v33
437437 auto tmp5 = vec_add (tmp0, tmp1); // (v20 + v21, v30 + v31, v22 + v23, v32 + v33)
438438
439439 auto tmp6 = vec_permi (tmp4, tmp5, 0x0 ); // (v00 + v01, v10 + v11, v20 + v21, v30 + v31
@@ -453,14 +453,14 @@ namespace xsimd
453453 template <class A , class T , size_t I, class = typename std::enable_if<std::is_integral<T>::value, void >::type>
454454 XSIMD_INLINE batch<T, A> insert (batch<T, A> const & self, T val, index<I> pos, requires_arch<altivec>) noexcept
455455 {
456- return vec_insert (val, self, pos);
456+ return vec_insert (val, self. data , pos);
457457 }
458458
459459 // isnan
460460 template <class A >
461461 XSIMD_INLINE batch_bool<float , A> isnan (batch<float , A> const & self, requires_arch<altivec>) noexcept
462462 {
463- return ~vec_cmpeq (self, self);
463+ return ~vec_cmpeq (self.data , self.data );
464464 }
465465
466466 // load_aligned
@@ -501,22 +501,22 @@ namespace xsimd
501501 template <class A , class T , class = typename std::enable_if<std::is_integral<T>::value, void >::type>
502502 XSIMD_INLINE batch_bool<T, A> le (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
503503 {
504- return vec_cmple (self, other);
504+ return vec_cmple (self. data , other. data );
505505 }
506- template <class A >
507- XSIMD_INLINE batch_bool<double , A> le (batch<double , A> const & self, batch<double , A> const & other, requires_arch<altivec>) noexcept
506+ template <class A , class = typename std::enable_if<std::is_integral<T>::value, void >::type >
507+ XSIMD_INLINE batch_bool<T , A> le (batch<T , A> const & self, batch<T , A> const & other, requires_arch<altivec>) noexcept
508508 {
509- return vec_cmple (self, other);
509+ return vec_cmple (self. data , other. data );
510510 }
511511
512512 // lt
513513 template <class A , class T , class = typename std::enable_if<std::is_integral<T>::value, void >::type>
514514 XSIMD_INLINE batch_bool<T, A> lt (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
515515 {
516- return vec_cmplt (self, other);
516+ return vec_cmplt (self. data , other. data );
517517 }
518- template <class A >
519- XSIMD_INLINE batch_bool<double , A> lt (batch<double , A> const & self, batch<double , A> const & other, requires_arch<altivec>) noexcept
518+ template <class A , class = typename std::enable_if<std::is_integral<T>::value, void >::type >
519+ XSIMD_INLINE batch_bool<T , A> lt (batch<T , A> const & self, batch<T , A> const & other, requires_arch<altivec>) noexcept
520520 {
521521 return vec_cmplt (self, other);
522522 }
@@ -601,21 +601,21 @@ namespace xsimd
601601 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
602602 XSIMD_INLINE batch<T, A> max (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
603603 {
604- return vec_max (self, other);
604+ return vec_max (self. data , other. data );
605605 }
606606
607607 // min
608608 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
609609 XSIMD_INLINE batch<T, A> min (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
610610 {
611- return vec_min (self, other);
611+ return vec_min (self. data , other. data );
612612 }
613613
614614 // mul
615615 template <class A , class T , typename std::enable_if<std::is_scalar<T>::value, void >::type>
616616 XSIMD_INLINE batch<T, A> mul (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
617617 {
618- return vec_mul (self, other);
618+ return vec_mul (self. data , other. data );
619619 }
620620#if 0
621621
@@ -632,27 +632,27 @@ namespace xsimd
632632 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
633633 XSIMD_INLINE batch<T, A> neg (batch<T, A> const & self, requires_arch<altivec>) noexcept
634634 {
635- return vec_neg (self);
635+ return vec_neg (self. data );
636636 }
637637
638638 // neq
639639 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
640640 XSIMD_INLINE batch_bool<T, A> neq (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
641641 {
642- return ~vec_cmpeq (self, other);
642+ return ~vec_cmpeq (self.data , other.data );
643643 }
644644 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
645645 XSIMD_INLINE batch_bool<T, A> neq (batch_bool<T, A> const & self, batch_bool<T, A> const & other, requires_arch<altivec>) noexcept
646646 {
647- return ~vec_cmpeq (self, other);
647+ return ~vec_cmpeq (self.data , other.data );
648648 }
649649
650650 // reciprocal
651651 template <class A >
652652 XSIMD_INLINE batch<float , A> reciprocal (batch<float , A> const & self,
653653 kernel::requires_arch<altivec>)
654654 {
655- return vec_re (self);
655+ return vec_re (self. data );
656656 }
657657
658658 // reduce_add
@@ -733,7 +733,7 @@ namespace xsimd
733733 template <class A >
734734 XSIMD_INLINE batch<float , A> rsqrt (batch<float , A> const & val, requires_arch<altivec>) noexcept
735735 {
736- return vec_rsqrt (val);
736+ return vec_rsqrt (val. data );
737737 }
738738
739739 // select
@@ -783,28 +783,28 @@ namespace xsimd
783783 template <class A >
784784 XSIMD_INLINE batch<float , A> sqrt (batch<float , A> const & val, requires_arch<altivec>) noexcept
785785 {
786- return vec_sqrt (val);
786+ return vec_sqrt (val. data );
787787 }
788788
789789 // slide_left
790790 template <size_t N, class A , class T >
791791 XSIMD_INLINE batch<T, A> slide_left (batch<T, A> const & x, requires_arch<altivec>) noexcept
792792 {
793- return vec_sll (x, vec_splat_u8 (N));
793+ return vec_sll (x. data , vec_splat_u8 (N));
794794 }
795795
796796 // slide_right
797797 template <size_t N, class A , class T >
798798 XSIMD_INLINE batch<T, A> slide_right (batch<T, A> const & x, requires_arch<altivec>) noexcept
799799 {
800- return vec_srl (x, vec_splat_u8 (N));
800+ return vec_srl (x. data , vec_splat_u8 (N));
801801 }
802802
803803 // sadd
804804 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
805805 XSIMD_INLINE batch<T, A> sadd (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
806806 {
807- return vec_adds (self, other);
807+ return vec_adds (self. data , other. data );
808808 }
809809
810810 // set
@@ -828,7 +828,7 @@ namespace xsimd
828828 {
829829 XSIMD_IF_CONSTEXPR (sizeof (T) == 1 )
830830 {
831- return vec_subs (self, other);
831+ return vec_subs (self. data , other. data );
832832 }
833833 else
834834 {
@@ -861,7 +861,7 @@ namespace xsimd
861861 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
862862 XSIMD_INLINE batch<T, A> sub (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
863863 {
864- return vec_sub (self, other);
864+ return vec_sub (self. data , other. data );
865865 }
866866
867867#if 0
@@ -981,14 +981,14 @@ namespace xsimd
981981 template <class A , class T , class = typename std::enable_if<std::is_integral<T>::value, void >::type>
982982 XSIMD_INLINE batch<T, A> zip_hi (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
983983 {
984- return vec_merge_hi (self, other);
984+ return vec_merge_hi (self. data , other. data );
985985 }
986986
987987 // zip_lo
988988 template <class A , class T , class = typename std::enable_if<std::is_scalar<T>::value, void >::type>
989989 XSIMD_INLINE batch<T, A> zip_lo (batch<T, A> const & self, batch<T, A> const & other, requires_arch<altivec>) noexcept
990990 {
991- return vec_mergel (self, other);
991+ return vec_mergel (self. data , other. data );
992992 }
993993 }
994994}
0 commit comments