99 * The full license is in the file LICENSE, distributed with this software. *
1010 ****************************************************************************/
1111
12- #ifndef XSIMD_AVX512_DQHPP
13- #define XSIMD_AVX512_D_HPP
12+ #ifndef XSIMD_AVX512DQ_HPP
13+ #define XSIMD_AVX512DQ_HPP
1414
1515#include " ../types/xsimd_avx512dq_register.hpp"
1616
@@ -47,12 +47,12 @@ namespace xsimd
4747
4848 // bitwise_not
4949 template <class A >
50- XSIMD_INLINE batch<float , A> bitwise_not (batch<float , A> const & self, requires_arch<avx512f >) noexcept
50+ XSIMD_INLINE batch<float , A> bitwise_not (batch<float , A> const & self, requires_arch<avx512dq >) noexcept
5151 {
5252 return _mm512_xor_ps (self, _mm512_castsi512_ps (_mm512_set1_epi32 (-1 )));
5353 }
5454 template <class A >
55- XSIMD_INLINE batch<double , A> bitwise_not (batch<double , A> const & self, requires_arch<avx512f >) noexcept
55+ XSIMD_INLINE batch<double , A> bitwise_not (batch<double , A> const & self, requires_arch<avx512dq >) noexcept
5656 {
5757 return _mm512_xor_pd (self, _mm512_castsi512_pd (_mm512_set1_epi32 (-1 )));
5858 }
@@ -96,7 +96,7 @@ namespace xsimd
9696 // tmp1 = [a0..8, b0..8]
9797 // tmp2 = [a8..f, b8..f]
9898#define XSIMD_AVX512_HADDP_STEP1 (I, a, b ) \
99- batch<float , avx512f > res##I; \
99+ batch<float , avx512dq > res##I; \
100100 { \
101101 auto tmp1 = _mm512_shuffle_f32x4 (a, b, _MM_SHUFFLE (1 , 0 , 1 , 0 )); \
102102 auto tmp2 = _mm512_shuffle_f32x4 (a, b, _MM_SHUFFLE (3 , 2 , 3 , 2 )); \
@@ -180,7 +180,7 @@ namespace xsimd
180180
181181 // reduce_add
182182 template <class A >
183- XSIMD_INLINE float reduce_add (batch<float , A> const & rhs, requires_arch<avx512f >) noexcept
183+ XSIMD_INLINE float reduce_add (batch<float , A> const & rhs, requires_arch<avx512dq >) noexcept
184184 {
185185 __m256 tmp1 = _mm512_extractf32x8_ps (rhs, 1 );
186186 __m256 tmp2 = _mm512_extractf32x8_ps (rhs, 0 );
@@ -192,7 +192,7 @@ namespace xsimd
192192 namespace detail
193193 {
194194 template <class A >
195- XSIMD_INLINE batch<double , A> fast_cast (batch<int64_t , A> const & x , batch<double , A> const &, requires_arch<avx512dq>) noexcept
195+ XSIMD_INLINE batch<double , A> fast_cast (batch<int64_t , A> const & self , batch<double , A> const &, requires_arch<avx512dq>) noexcept
196196 {
197197 return _mm512_cvtepi64_pd (self);
198198 }
0 commit comments