|
17 | 17 |
|
18 | 18 | #include "../types/xsimd_avx512vbmi2_register.hpp" |
19 | 19 |
|
| 20 | +namespace xsimd |
| 21 | +{ |
| 22 | + |
| 23 | + namespace kernel |
| 24 | + { |
| 25 | + using namespace types; |
| 26 | + |
| 27 | + // compress |
| 28 | + template <class A> |
| 29 | + XSIMD_INLINE batch<int16_t, A> compress(batch<int16_t, A> const& self, batch_bool<int16_t, A> const& mask, requires_arch<avx512vbmi2>) noexcept |
| 30 | + { |
| 31 | + return _mm512_maskz_compress_epi16(mask.mask(), self); |
| 32 | + } |
| 33 | + template <class A> |
| 34 | + XSIMD_INLINE batch<uint16_t, A> compress(batch<uint16_t, A> const& self, batch_bool<uint16_t, A> const& mask, requires_arch<avx512vbmi2>) noexcept |
| 35 | + { |
| 36 | + return _mm512_maskz_compress_epi16(mask.mask(), self); |
| 37 | + } |
| 38 | + template <class A> |
| 39 | + XSIMD_INLINE batch<int8_t, A> compress(batch<int8_t, A> const& self, batch_bool<int8_t, A> const& mask, requires_arch<avx512vbmi2>) noexcept |
| 40 | + { |
| 41 | + return _mm512_maskz_compress_epi8(mask.mask(), self); |
| 42 | + } |
| 43 | + template <class A> |
| 44 | + XSIMD_INLINE batch<uint8_t, A> compress(batch<uint8_t, A> const& self, batch_bool<uint8_t, A> const& mask, requires_arch<avx512vbmi2>) noexcept |
| 45 | + { |
| 46 | + return _mm512_maskz_compress_epi8(mask.mask(), self); |
| 47 | + } |
| 48 | + |
| 49 | + // expand |
| 50 | + template <class A> |
| 51 | + XSIMD_INLINE batch<int16_t, A> expand(batch<int16_t, A> const& self, batch_bool<int16_t, A> const& mask, requires_arch<avx512vbmi2>) noexcept |
| 52 | + { |
| 53 | + return _mm512_maskz_expand_epi16(mask.mask(), self); |
| 54 | + } |
| 55 | + template <class A> |
| 56 | + XSIMD_INLINE batch<uint16_t, A> expand(batch<uint16_t, A> const& self, batch_bool<uint16_t, A> const& mask, requires_arch<avx512vbmi2>) noexcept |
| 57 | + { |
| 58 | + return _mm512_maskz_expand_epi16(mask.mask(), self); |
| 59 | + } |
| 60 | + template <class A> |
| 61 | + XSIMD_INLINE batch<int8_t, A> expand(batch<int8_t, A> const& self, batch_bool<int8_t, A> const& mask, requires_arch<avx512vbmi2>) noexcept |
| 62 | + { |
| 63 | + return _mm512_maskz_expand_epi8(mask.mask(), self); |
| 64 | + } |
| 65 | + template <class A> |
| 66 | + XSIMD_INLINE batch<uint8_t, A> expand(batch<uint8_t, A> const& self, batch_bool<uint8_t, A> const& mask, requires_arch<avx512vbmi2>) noexcept |
| 67 | + { |
| 68 | + return _mm512_maskz_expand_epi8(mask.mask(), self); |
| 69 | + } |
| 70 | + } |
| 71 | +} |
| 72 | + |
20 | 73 | #endif |
0 commit comments