|
| 1 | +/* |
| 2 | + +----------------------------------------------------------------------+ |
| 3 | + | Copyright (c) The PHP Group | |
| 4 | + +----------------------------------------------------------------------+ |
| 5 | + | This source file is subject to version 3.01 of the PHP license, | |
| 6 | + | that is bundled with this package in the file LICENSE, and is | |
| 7 | + | available through the world-wide-web at the following url: | |
| 8 | + | https://www.php.net/license/3_01.txt | |
| 9 | + | If you did not receive a copy of the PHP license and are unable to | |
| 10 | + | obtain it through the world-wide-web, please send a note to | |
| 11 | + | [email protected] so we can mail you a copy immediately. | |
| 12 | + +----------------------------------------------------------------------+ |
| 13 | + | Authors: Saki Takamachi <[email protected]> | |
| 14 | + +----------------------------------------------------------------------+ |
| 15 | +*/ |
| 16 | + |
| 17 | + |
| 18 | +#ifndef _BCMATH_SIMD_H_ |
| 19 | +#define _BCMATH_SIMD_H_ |
| 20 | + |
| 21 | +#ifdef __SSE2__ |
| 22 | +# include <emmintrin.h> |
| 23 | + typedef __m128i bc_simd_128_t; |
| 24 | +# define HAVE_BC_SIMD_128 |
| 25 | +# define bc_simd_set_8x16(x) _mm_set1_epi8(x) |
| 26 | +# define bc_simd_load_8x16(ptr) _mm_loadu_si128((const __m128i *) (ptr)) |
| 27 | +# define bc_simd_xor_8x16(a, b) _mm_xor_si128(a, b) |
| 28 | +# define bc_simd_store_8x16(ptr, val) _mm_storeu_si128((__m128i *) (ptr), val) |
| 29 | +# define bc_simd_add_8x16(a, b) _mm_add_epi8(a, b) |
| 30 | +# define bc_simd_cmpeq_8x16(a, b) _mm_cmpeq_epi8(a, b) |
| 31 | +# define bc_simd_cmplt_8x16(a, b) _mm_cmplt_epi8(a, b) |
| 32 | +# define bc_simd_movemask_8x16(a) _mm_movemask_epi8(a) |
| 33 | + |
| 34 | +#elif defined(__aarch64__) || defined(_M_ARM64) |
| 35 | +# include <arm_neon.h> |
| 36 | + typedef int8x16_t bc_simd_128_t; |
| 37 | +# define HAVE_BC_SIMD_128 |
| 38 | +# define bc_simd_set_8x16(x) vdupq_n_s8(x) |
| 39 | +# define bc_simd_load_8x16(ptr) vld1q_s8((const int8_t *) (ptr)) |
| 40 | +# define bc_simd_xor_8x16(a, b) veorq_s8(a, b) |
| 41 | +# define bc_simd_store_8x16(ptr, val) vst1q_s8((int8_t *) (ptr), val) |
| 42 | +# define bc_simd_add_8x16(a, b) vaddq_s8(a, b) |
| 43 | +# define bc_simd_cmpeq_8x16(a, b) (vreinterpretq_s8_u8(vceqq_s8(a, b))) |
| 44 | +# define bc_simd_cmplt_8x16(a, b) (vreinterpretq_s8_u8(vcltq_s8(a, b))) |
| 45 | + static inline int bc_simd_movemask_8x16(int8x16_t vec) |
| 46 | + { |
| 47 | + /* bulk shift right by 7 */ |
| 48 | + uint8x16_t uvec = vshrq_n_u8(vreinterpretq_u8_s8(vec), 7); |
| 49 | + return |
| 50 | + (vgetq_lane_u8(uvec, 0) << 0) | |
| 51 | + (vgetq_lane_u8(uvec, 1) << 1) | |
| 52 | + (vgetq_lane_u8(uvec, 2) << 2) | |
| 53 | + (vgetq_lane_u8(uvec, 3) << 3) | |
| 54 | + (vgetq_lane_u8(uvec, 4) << 4) | |
| 55 | + (vgetq_lane_u8(uvec, 5) << 5) | |
| 56 | + (vgetq_lane_u8(uvec, 6) << 6) | |
| 57 | + (vgetq_lane_u8(uvec, 7) << 7) | |
| 58 | + (vgetq_lane_u8(uvec, 8) << 8) | |
| 59 | + (vgetq_lane_u8(uvec, 9) << 9) | |
| 60 | + (vgetq_lane_u8(uvec, 10) << 10) | |
| 61 | + (vgetq_lane_u8(uvec, 11) << 11) | |
| 62 | + (vgetq_lane_u8(uvec, 12) << 12) | |
| 63 | + (vgetq_lane_u8(uvec, 13) << 13) | |
| 64 | + (vgetq_lane_u8(uvec, 14) << 14) | |
| 65 | + (vgetq_lane_u8(uvec, 15) << 15); |
| 66 | + } |
| 67 | +#endif |
| 68 | + |
| 69 | +#endif |
0 commit comments