From b8420fff2cb64039e87132da8901cc81b1177636 Mon Sep 17 00:00:00 2001 From: jiepan Date: Fri, 15 Nov 2024 16:05:32 +0800 Subject: [PATCH 1/6] Add AVX2 support --- emcc.py | 5 +- site/source/docs/porting/simd.rst | 85 +- system/include/compat/avx2intrin.h | 1800 ++++++++++++++++++++++++++++ system/include/compat/avxintrin.h | 12 +- system/include/compat/immintrin.h | 28 +- test/sse/test_avx2.cpp | 321 +++++ test/sse/test_sse.h | 894 +++++++++++++- test/test_core.py | 19 + 8 files changed, 3131 insertions(+), 33 deletions(-) create mode 100644 system/include/compat/avx2intrin.h create mode 100644 test/sse/test_avx2.cpp diff --git a/emcc.py b/emcc.py index a556a6f956149..3557f5ffc23c5 100644 --- a/emcc.py +++ b/emcc.py @@ -77,7 +77,7 @@ 'fetchSettings' ] -SIMD_INTEL_FEATURE_TOWER = ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-msse4', '-mavx'] +SIMD_INTEL_FEATURE_TOWER = ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-msse4', '-mavx', '-mavx2'] SIMD_NEON_FLAGS = ['-mfpu=neon'] LINK_ONLY_FLAGS = { '--bind', '--closure', '--cpuprofiler', '--embed-file', @@ -487,6 +487,9 @@ def array_contains_any_of(hay, needles): if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[7:]): cflags += ['-D__AVX__=1'] + if array_contains_any_of(user_args, SIMD_INTEL_FEATURE_TOWER[8:]): + cflags += ['-D__AVX2__=1'] + if array_contains_any_of(user_args, SIMD_NEON_FLAGS): cflags += ['-D__ARM_NEON__=1'] diff --git a/site/source/docs/porting/simd.rst b/site/source/docs/porting/simd.rst index 5259830e1059e..e063e4a6ca28a 100644 --- a/site/source/docs/porting/simd.rst +++ b/site/source/docs/porting/simd.rst @@ -12,7 +12,7 @@ Emscripten supports the `WebAssembly SIMD 1. Enable LLVM/Clang SIMD autovectorizer to automatically target WebAssembly SIMD, without requiring changes to C/C++ source code. 2. Write SIMD code using the GCC/Clang SIMD Vector Extensions (``__attribute__((vector_size(16)))``) 3. Write SIMD code using the WebAssembly SIMD intrinsics (``#include ``) -4. Compile existing SIMD code that uses the x86 SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2 or AVX intrinsics (``#include <*mmintrin.h>``) +4. Compile existing SIMD code that uses the x86 SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, AVX or AVX2 intrinsics (``#include <*mmintrin.h>``) 5. Compile existing SIMD code that uses the ARM NEON intrinsics (``#include ``) These techniques can be freely combined in a single program. @@ -153,6 +153,7 @@ Emscripten supports compiling existing codebases that use x86 SSE instructions b * **SSE4.1**: pass ``-msse4.1`` and ``#include ``. Use ``#ifdef __SSE4_1__`` to gate code. * **SSE4.2**: pass ``-msse4.2`` and ``#include ``. Use ``#ifdef __SSE4_2__`` to gate code. * **AVX**: pass ``-mavx`` and ``#include ``. Use ``#ifdef __AVX__`` to gate code. +* **AVX2**: pass ``-mavx2`` and ``#include ``. Use ``#ifdef __AVX2__`` to gate code. Currently only the SSE1, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, and AVX instruction sets are supported. Each of these instruction sets add on top of the previous ones, so e.g. when targeting SSE3, the instruction sets SSE1 and SSE2 are also available. @@ -1145,6 +1146,88 @@ The following table highlights the availability and expected performance of diff Only the 128-bit wide instructions from AVX instruction set are listed. The 256-bit wide AVX instructions are emulated by two 128-bit wide instructions. +The following table highlights the availability and expected performance of different AVX2 intrinsics. Refer to `Intel Intrinsics Guide on AVX2 `_. + +.. list-table:: x86 AVX2 intrinsics available via #include and -mavx2 + :widths: 20 30 + :header-rows: 1 + + * - Intrinsic name + - WebAssembly SIMD support + * - _mm_broadcastss_ps + - 💡 emulated with a general shuffle + * - _mm_broadcastsd_pd + - 💡 emulated with a general shuffle + * - _mm_blend_epi32 + - 💡 emulated with a general shuffle + * - _mm_broadcastb_epi8 + - 💡 emulated with a general shuffle + * - _mm_broadcastw_epi16 + - 💡 emulated with a general shuffle + * - _mm_broadcastd_epi32 + - 💡 emulated with a general shuffle + * - _mm_broadcastq_epi64 + - 💡 emulated with a general shuffle + * - _mm256_permutevar8x32_epi32 + - ❌ scalarized + * - _mm256_permute4x64_pd + - 💡 emulated with two general shuffle + * - _mm256_permutevar8x32_ps + - ❌ scalarized + * - _mm256_permute4x64_epi64 + - 💡 emulated with two general shuffle + * - _mm_maskload_epi32 + - ⚠️ emulated with SIMD load+shift+and + * - _mm_maskload_epi64 + - ⚠️ emulated with SIMD load+shift+and + * - _mm_maskstore_epi32 + - ❌ scalarized + * - _mm_maskstore_epi64 + - ❌ scalarized + * - _mm_sllv_epi32 + - ❌ scalarized + * - _mm_sllv_epi64 + - ❌ scalarized + * - _mm_srav_epi32 + - ❌ scalarized + * - _mm_srlv_epi32 + - ❌ scalarized + * - _mm_srlv_epi64 + - ❌ scalarized + * - _mm_mask_i32gather_pd + - ❌ scalarized + * - _mm_mask_i64gather_pd + - ❌ scalarized + * - _mm_mask_i32gather_ps + - ❌ scalarized + * - _mm_mask_i64gather_ps + - ❌ scalarized + * - _mm_mask_i32gather_epi32 + - ❌ scalarized + * - _mm_mask_i64gather_epi32 + - ❌ scalarized + * - _mm_mask_i32gather_epi64 + - ❌ scalarized + * - _mm_mask_i64gather_epi64 + - ❌ scalarized + * - _mm_i32gather_pd + - ❌ scalarized + * - _mm_i64gather_pd + - ❌ scalarized + * - _mm_i32gather_ps + - ❌ scalarized + * - _mm_i64gather_ps + - ❌ scalarized + * - _mm_i32gather_epi32 + - ❌ scalarized + * - _mm_i64gather_epi32 + - ❌ scalarized + * - _mm_i32gather_epi64 + - ❌ scalarized + * - _mm_i64gather_epi64 + - ❌ scalarized + +All the 128-bit wide instructions from AVX2 instruction set are listed. Only a small part of the 256-bit AVX2 instruction set are listed, most of the 256-bit wide AVX2 instructions are emulated by two 128-bit wide instructions. ====================================================== Compiling SIMD code targeting ARM NEON instruction set diff --git a/system/include/compat/avx2intrin.h b/system/include/compat/avx2intrin.h new file mode 100644 index 0000000000000..80834dbce01de --- /dev/null +++ b/system/include/compat/avx2intrin.h @@ -0,0 +1,1800 @@ +/* + * Copyright 2024 The Emscripten Authors. All rights reserved. + * Emscripten is available under two separate licenses, the MIT license and the + * University of Illinois/NCSA Open Source License. Both these licenses can be + * found in the LICENSE file. + */ + +#ifndef __emscripten_immintrin_h__ +#error "Never use directly; include instead." +#endif + +#ifndef __emscripten_avx2intrin_h__ +#define __emscripten_avx2intrin_h__ + +#ifndef __AVX2__ +#error "AVX2 instruction set not enabled" +#endif + +#define _mm256_mpsadbw_epu8(__A, __B, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + __m256i __b = (__B); \ + _mm256_set_m128i(_mm_mpsadbw_epu8(__a.v1, __b.v1, (__imm) >> 3), \ + _mm_mpsadbw_epu8(__a.v0, __b.v0, (__imm))); \ + }) + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_abs_epi8(__m256i __a) { + __m256i ret; + ret.v0 = _mm_abs_epi8(__a.v0); + ret.v1 = _mm_abs_epi8(__a.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_abs_epi16(__m256i __a) { + __m256i ret; + ret.v0 = _mm_abs_epi16(__a.v0); + ret.v1 = _mm_abs_epi16(__a.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_abs_epi32(__m256i __a) { + __m256i ret; + ret.v0 = _mm_abs_epi32(__a.v0); + ret.v1 = _mm_abs_epi32(__a.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_packs_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_packs_epi16(__a.v0, __b.v0); + ret.v1 = _mm_packs_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_packs_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_packs_epi32(__a.v0, __b.v0); + ret.v1 = _mm_packs_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_packus_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_packus_epi16(__a.v0, __b.v0); + ret.v1 = _mm_packus_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_packus_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_packus_epi32(__a.v0, __b.v0); + ret.v1 = _mm_packus_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_add_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_add_epi8(__a.v0, __b.v0); + ret.v1 = _mm_add_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_add_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_add_epi16(__a.v0, __b.v0); + ret.v1 = _mm_add_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_add_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_add_epi32(__a.v0, __b.v0); + ret.v1 = _mm_add_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_add_epi64(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_add_epi64(__a.v0, __b.v0); + ret.v1 = _mm_add_epi64(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_adds_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_adds_epi8(__a.v0, __b.v0); + ret.v1 = _mm_adds_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_adds_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_adds_epi16(__a.v0, __b.v0); + ret.v1 = _mm_adds_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_adds_epu8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_adds_epu8(__a.v0, __b.v0); + ret.v1 = _mm_adds_epu8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_adds_epu16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_adds_epu16(__a.v0, __b.v0); + ret.v1 = _mm_adds_epu16(__a.v1, __b.v1); + return ret; +} + +#define _mm256_alignr_epi8(__A, __B, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + __m256i __b = (__B); \ + _mm256_set_m128i(_mm_alignr_epi8(__a.v1, __b.v1, (__imm)), \ + _mm_alignr_epi8(__a.v0, __b.v0, (__imm))); \ + }) + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_and_si256(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_and_si128(__a.v0, __b.v0); + ret.v1 = _mm_and_si128(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_andnot_si256(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_andnot_si128(__a.v0, __b.v0); + ret.v1 = _mm_andnot_si128(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_avg_epu8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_avg_epu8(__a.v0, __b.v0); + ret.v1 = _mm_avg_epu8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_avg_epu16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_avg_epu16(__a.v0, __b.v0); + ret.v1 = _mm_avg_epu16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_blendv_epi8(__m256i __a, __m256i __b, __m256i __mask) { + __m256i ret; + ret.v0 = _mm_blendv_epi8(__a.v0, __b.v0, __mask.v0); + ret.v1 = _mm_blendv_epi8(__a.v1, __b.v1, __mask.v1); + return ret; +} + +#define _mm256_blend_epi16(__A, __B, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + __m256i __b = (__B); \ + _mm256_set_m128i(_mm_blend_epi16(__a.v1, __b.v1, (__imm)), \ + _mm_blend_epi16(__a.v0, __b.v0, (__imm))); \ + }) + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpeq_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpeq_epi8(__a.v0, __b.v0); + ret.v1 = _mm_cmpeq_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpeq_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpeq_epi16(__a.v0, __b.v0); + ret.v1 = _mm_cmpeq_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpeq_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpeq_epi32(__a.v0, __b.v0); + ret.v1 = _mm_cmpeq_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpeq_epi64(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpeq_epi64(__a.v0, __b.v0); + ret.v1 = _mm_cmpeq_epi64(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpgt_epi8(__a.v0, __b.v0); + ret.v1 = _mm_cmpgt_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpgt_epi16(__a.v0, __b.v0); + ret.v1 = _mm_cmpgt_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpgt_epi32(__a.v0, __b.v0); + ret.v1 = _mm_cmpgt_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cmpgt_epi64(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_cmpgt_epi64(__a.v0, __b.v0); + ret.v1 = _mm_cmpgt_epi64(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_hadd_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_hadd_epi16(__a.v0, __b.v0); + ret.v1 = _mm_hadd_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_hadd_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_hadd_epi32(__a.v0, __b.v0); + ret.v1 = _mm_hadd_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_hadds_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_hadds_epi16(__a.v0, __b.v0); + ret.v1 = _mm_hadds_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_hsub_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_hsub_epi16(__a.v0, __b.v0); + ret.v1 = _mm_hsub_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_hsub_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_hsub_epi32(__a.v0, __b.v0); + ret.v1 = _mm_hsub_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_hsubs_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_hsubs_epi16(__a.v0, __b.v0); + ret.v1 = _mm_hsubs_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maddubs_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_maddubs_epi16(__a.v0, __b.v0); + ret.v1 = _mm_maddubs_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_madd_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_madd_epi16(__a.v0, __b.v0); + ret.v1 = _mm_madd_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_max_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_max_epi8(__a.v0, __b.v0); + ret.v1 = _mm_max_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_max_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_max_epi16(__a.v0, __b.v0); + ret.v1 = _mm_max_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_max_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_max_epi32(__a.v0, __b.v0); + ret.v1 = _mm_max_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_max_epu8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_max_epu8(__a.v0, __b.v0); + ret.v1 = _mm_max_epu8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_max_epu16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_max_epu16(__a.v0, __b.v0); + ret.v1 = _mm_max_epu16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_max_epu32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_max_epu32(__a.v0, __b.v0); + ret.v1 = _mm_max_epu32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_min_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_min_epi8(__a.v0, __b.v0); + ret.v1 = _mm_min_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_min_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_min_epi16(__a.v0, __b.v0); + ret.v1 = _mm_min_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_min_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_min_epi32(__a.v0, __b.v0); + ret.v1 = _mm_min_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_min_epu8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_min_epu8(__a.v0, __b.v0); + ret.v1 = _mm_min_epu8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_min_epu16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_min_epu16(__a.v0, __b.v0); + ret.v1 = _mm_min_epu16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_min_epu32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_min_epu32(__a.v0, __b.v0); + ret.v1 = _mm_min_epu32(__a.v1, __b.v1); + return ret; +} + +static __inline__ int __attribute__((__always_inline__, __nodebug__)) +_mm256_movemask_epi8(__m256i __a) { + return (_mm_movemask_epi8(__a.v1) << 16) | _mm_movemask_epi8(__a.v0); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepi8_epi16(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepi8_epi16(__a); + ret.v1 = _mm_cvtepi8_epi16(_mm_shuffle_epi32(__a, 0x4E)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepi8_epi32(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepi8_epi32(__a); + ret.v1 = _mm_cvtepi8_epi32(_mm_shuffle_epi32(__a, 0xE1)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepi8_epi64(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepi8_epi64(__a); + ret.v1 = _mm_cvtepi8_epi64(_mm_srli_epi32(__a, 16)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepi16_epi32(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepi16_epi32(__a); + ret.v1 = _mm_cvtepi16_epi32(_mm_shuffle_epi32(__a, 0x4E)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepi16_epi64(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepi16_epi64(__a); + ret.v1 = _mm_cvtepi16_epi64(_mm_shuffle_epi32(__a, 0xE1)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepi32_epi64(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepi32_epi64(__a); + ret.v1 = _mm_cvtepi32_epi64(_mm_shuffle_epi32(__a, 0x4E)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepu8_epi16(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepu8_epi16(__a); + ret.v1 = _mm_cvtepu8_epi16(_mm_shuffle_epi32(__a, 0x4E)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepu8_epi32(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepu8_epi32(__a); + ret.v1 = _mm_cvtepu8_epi32(_mm_shuffle_epi32(__a, 0xE1)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepu8_epi64(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepu8_epi64(__a); + ret.v1 = _mm_cvtepu8_epi64(_mm_srli_epi32(__a, 16)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepu16_epi32(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepu16_epi32(__a); + ret.v1 = _mm_cvtepu16_epi32(_mm_shuffle_epi32(__a, 0x4E)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepu16_epi64(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepu16_epi64(__a); + ret.v1 = _mm_cvtepu16_epi64(_mm_shuffle_epi32(__a, 0xE1)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_cvtepu32_epi64(__m128i __a) { + __m256i ret; + ret.v0 = _mm_cvtepu32_epi64(__a); + ret.v1 = _mm_cvtepu32_epi64(_mm_shuffle_epi32(__a, 0x4E)); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mul_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_mul_epi32(__a.v0, __b.v0); + ret.v1 = _mm_mul_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mulhrs_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_mulhrs_epi16(__a.v0, __b.v0); + ret.v1 = _mm_mulhrs_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mulhi_epu16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_mulhi_epu16(__a.v0, __b.v0); + ret.v1 = _mm_mulhi_epu16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mulhi_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_mulhi_epi16(__a.v0, __b.v0); + ret.v1 = _mm_mulhi_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mullo_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_mullo_epi16(__a.v0, __b.v0); + ret.v1 = _mm_mullo_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mullo_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_mullo_epi32(__a.v0, __b.v0); + ret.v1 = _mm_mullo_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_mul_epu32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_mul_epu32(__a.v0, __b.v0); + ret.v1 = _mm_mul_epu32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_or_si256(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_or_si128(__a.v0, __b.v0); + ret.v1 = _mm_or_si128(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sad_epu8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sad_epu8(__a.v0, __b.v0); + ret.v1 = _mm_sad_epu8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_shuffle_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_shuffle_epi8(__a.v0, __b.v0); + ret.v1 = _mm_shuffle_epi8(__a.v1, __b.v1); + return ret; +} + +#define _mm256_shuffle_epi32(__A, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + _mm256_set_m128i(_mm_shuffle_epi32(__a.v1, (__imm)), \ + _mm_shuffle_epi32(__a.v0, (__imm))); \ + }) + +#define _mm256_shufflehi_epi16(__A, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + _mm256_set_m128i(_mm_shufflehi_epi16(__a.v1, (__imm)), \ + _mm_shufflehi_epi16(__a.v0, (__imm))); \ + }) + +#define _mm256_shufflelo_epi16(__A, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + _mm256_set_m128i(_mm_shufflelo_epi16(__a.v1, (__imm)), \ + _mm_shufflelo_epi16(__a.v0, (__imm))); \ + }) + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sign_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sign_epi8(__a.v0, __b.v0); + ret.v1 = _mm_sign_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sign_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sign_epi16(__a.v0, __b.v0); + ret.v1 = _mm_sign_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sign_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sign_epi32(__a.v0, __b.v0); + ret.v1 = _mm_sign_epi32(__a.v1, __b.v1); + return ret; +} + +#define _mm256_slli_si256(__A, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + _mm256_set_m128i(_mm_slli_si128(__a.v1, (__imm)), \ + _mm_slli_si128(__a.v0, (__imm))); \ + }) + +#define _mm256_bslli_epi128(__A, __imm) _mm256_slli_si256(__A, __imm) + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_slli_epi16(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_slli_epi16(__a.v0, __count); + ret.v1 = _mm_slli_epi16(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sll_epi16(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_sll_epi16(__a.v0, __count); + ret.v1 = _mm_sll_epi16(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_slli_epi32(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_slli_epi32(__a.v0, __count); + ret.v1 = _mm_slli_epi32(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sll_epi32(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_sll_epi32(__a.v0, __count); + ret.v1 = _mm_sll_epi32(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_slli_epi64(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_slli_epi64(__a.v0, __count); + ret.v1 = _mm_slli_epi64(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sll_epi64(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_sll_epi64(__a.v0, __count); + ret.v1 = _mm_sll_epi64(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srai_epi16(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_srai_epi16(__a.v0, __count); + ret.v1 = _mm_srai_epi16(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sra_epi16(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_sra_epi16(__a.v0, __count); + ret.v1 = _mm_sra_epi16(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srai_epi32(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_srai_epi32(__a.v0, __count); + ret.v1 = _mm_srai_epi32(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sra_epi32(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_sra_epi32(__a.v0, __count); + ret.v1 = _mm_sra_epi32(__a.v1, __count); + return ret; +} + +#define _mm256_srli_si256(__A, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + _mm256_set_m128i(_mm_srli_si128(__a.v1, (__imm)), \ + _mm_srli_si128(__a.v0, (__imm))); \ + }) + +#define _mm256_bsrli_epi128(a, imm) _mm256_srli_si256(a, imm) + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srli_epi16(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_srli_epi16(__a.v0, __count); + ret.v1 = _mm_srli_epi16(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srl_epi16(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_srl_epi16(__a.v0, __count); + ret.v1 = _mm_srl_epi16(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srli_epi32(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_srli_epi32(__a.v0, __count); + ret.v1 = _mm_srli_epi32(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srl_epi32(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_srl_epi32(__a.v0, __count); + ret.v1 = _mm_srl_epi32(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srli_epi64(__m256i __a, int __count) { + __m256i ret; + ret.v0 = _mm_srli_epi64(__a.v0, __count); + ret.v1 = _mm_srli_epi64(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_srl_epi64(__m256i __a, __m128i __count) { + __m256i ret; + ret.v0 = _mm_srl_epi64(__a.v0, __count); + ret.v1 = _mm_srl_epi64(__a.v1, __count); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sub_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sub_epi8(__a.v0, __b.v0); + ret.v1 = _mm_sub_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sub_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sub_epi16(__a.v0, __b.v0); + ret.v1 = _mm_sub_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sub_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sub_epi32(__a.v0, __b.v0); + ret.v1 = _mm_sub_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_sub_epi64(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_sub_epi64(__a.v0, __b.v0); + ret.v1 = _mm_sub_epi64(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_subs_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_subs_epi8(__a.v0, __b.v0); + ret.v1 = _mm_subs_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_subs_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_subs_epi16(__a.v0, __b.v0); + ret.v1 = _mm_subs_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_subs_epu8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_subs_epu8(__a.v0, __b.v0); + ret.v1 = _mm_subs_epu8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_subs_epu16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_subs_epu16(__a.v0, __b.v0); + ret.v1 = _mm_subs_epu16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpackhi_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpackhi_epi8(__a.v0, __b.v0); + ret.v1 = _mm_unpackhi_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpackhi_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpackhi_epi16(__a.v0, __b.v0); + ret.v1 = _mm_unpackhi_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpackhi_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpackhi_epi32(__a.v0, __b.v0); + ret.v1 = _mm_unpackhi_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpackhi_epi64(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpackhi_epi64(__a.v0, __b.v0); + ret.v1 = _mm_unpackhi_epi64(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpacklo_epi8(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpacklo_epi8(__a.v0, __b.v0); + ret.v1 = _mm_unpacklo_epi8(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpacklo_epi16(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpacklo_epi16(__a.v0, __b.v0); + ret.v1 = _mm_unpacklo_epi16(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpacklo_epi32(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpacklo_epi32(__a.v0, __b.v0); + ret.v1 = _mm_unpacklo_epi32(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_unpacklo_epi64(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_unpacklo_epi64(__a.v0, __b.v0); + ret.v1 = _mm_unpacklo_epi64(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_xor_si256(__m256i __a, __m256i __b) { + __m256i ret; + ret.v0 = _mm_xor_si128(__a.v0, __b.v0); + ret.v1 = _mm_xor_si128(__a.v1, __b.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_stream_load_si256(const void* __V) { + __m256i ret; + ret.v0 = _mm_stream_load_si128((const __m128i*)__V); + ret.v1 = _mm_stream_load_si128((const __m128i*)(((const uint8_t*)__V) + 16)); + return ret; +} + +static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) +_mm_broadcastss_ps(__m128 __a) { + return (__m128)wasm_i32x4_shuffle(__a, __a, 0, 0, 0, 0); +} + +static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) +_mm_broadcastsd_pd(__m128d __a) { + return (__m128d)wasm_i64x2_shuffle(__a, __a, 0, 0); +} + +static __inline__ __m256 __attribute__((__always_inline__, __nodebug__)) +_mm256_broadcastss_ps(__m128 __a) { + __m256 ret; + ret.v1 = ret.v0 = _mm_broadcastss_ps(__a); + return ret; +} + +static __inline__ __m256d __attribute__((__always_inline__, __nodebug__)) +_mm256_broadcastsd_pd(__m128d __a) { + __m256d ret; + ret.v1 = ret.v0 = _mm_broadcastsd_pd(__a); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_broadcastsi128_si256(__m128i __a) { + __m256i ret; + ret.v1 = ret.v0 = __a; + return ret; +} + +#define _mm_broadcastsi128_si256(X) _mm256_broadcastsi128_si256(X) + +#define _mm_blend_epi32(__a, __b, __imm8) \ + __extension__({ \ + (__m128i) __builtin_shufflevector((__i32x4)(__m128i)(__a), \ + (__i32x4)(__m128i)(__b), \ + (((__imm8) & 0x01) ? 4 : 0), \ + (((__imm8) & 0x02) ? 5 : 1), \ + (((__imm8) & 0x04) ? 6 : 2), \ + (((__imm8) & 0x08) ? 7 : 3)); \ + }) + +#define _mm256_blend_epi32(__A, __B, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + __m256i __b = (__B); \ + _mm256_set_m128i(_mm_blend_epi32(__a.v1, __b.v1, (__imm) >> 4), \ + _mm_blend_epi32(__a.v0, __b.v0, (__imm))); \ + }) + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_broadcastb_epi8(__m128i __a) { + return (__m128i)wasm_i8x16_shuffle( + __a, __a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_broadcastw_epi16(__m128i __a) { + return (__m128i)wasm_i16x8_shuffle(__a, __a, 0, 0, 0, 0, 0, 0, 0, 0); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_broadcastd_epi32(__m128i __a) { + return (__m128i)wasm_i32x4_shuffle(__a, __a, 0, 0, 0, 0); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_broadcastq_epi64(__m128i __a) { + return (__m128i)wasm_i64x2_shuffle(__a, __a, 0, 0); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_broadcastb_epi8(__m128i __a) { + __m256i ret; + ret.v1 = ret.v0 = _mm_broadcastb_epi8(__a); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_broadcastw_epi16(__m128i __a) { + __m256i ret; + ret.v1 = ret.v0 = _mm_broadcastw_epi16(__a); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_broadcastd_epi32(__m128i __a) { + __m256i ret; + ret.v1 = ret.v0 = _mm_broadcastd_epi32(__a); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_broadcastq_epi64(__m128i __a) { + __m256i ret; + ret.v1 = ret.v0 = _mm_broadcastq_epi64(__a); + return ret; +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_permutevar8x32_epi32(__m256i __a, __m256i __b) { + __m256i ret; + int index[8]; + int lane[8]; + for (int i = 0; i < 4; i++) { + index[i] = ((__i32x4)__b.v0)[i] & 7; + index[i + 4] = ((__i32x4)__b.v1)[i] & 7; + } + + for (int i = 0; i < 8; i++) { + lane[i] = index[i] < 4 ? ((__i32x4)(__a.v0))[index[i]] + : ((__i32x4)(__a.v1))[index[i] - 4]; + } + + ret.v0 = (__m128i)wasm_i32x4_make(lane[0], lane[1], lane[2], lane[3]); + ret.v1 = (__m128i)wasm_i32x4_make(lane[4], lane[5], lane[6], lane[7]); + return ret; +} + +#define _mm256_permute4x64_pd(__A, __imm) \ + __extension__({ \ + __m256d __a = (__A); \ + _mm256_set_m128d( \ + (__m128d)wasm_i64x2_shuffle( \ + __a.v0, __a.v1, (((__imm) >> 4) & 3), (((__imm) >> 6) & 3)), \ + (__m128d)wasm_i64x2_shuffle( \ + __a.v0, __a.v1, ((__imm) & 3), (((__imm) >> 2) & 3))); \ + }) + +static __inline__ __m256 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_permutevar8x32_ps(__m256 __a, __m256i __b) { + __m256 ret; + int index[8]; + float lane[8]; + for (int i = 0; i < 4; i++) { + index[i] = ((__i32x4)__b.v0)[i] & 7; + index[i + 4] = ((__i32x4)__b.v1)[i] & 7; + } + for (int i = 0; i < 8; i++) { + lane[i] = index[i] < 4 ? ((__f32x4)(__a.v0))[index[i]] + : ((__f32x4)(__a.v1))[index[i] - 4]; + } + ret.v0 = (__m128)wasm_f32x4_make(lane[0], lane[1], lane[2], lane[3]); + ret.v1 = (__m128)wasm_f32x4_make(lane[4], lane[5], lane[6], lane[7]); + return ret; +} + +#define _mm256_permute4x64_epi64(__A, __imm) \ + __extension__({ \ + __m256i __a = (__A); \ + _mm256_set_m128i( \ + wasm_i64x2_shuffle( \ + __a.v0, __a.v1, (((__imm) >> 4) & 3), (((__imm) >> 6) & 3)), \ + wasm_i64x2_shuffle( \ + __a.v0, __a.v1, ((__imm) & 3), (((__imm) >> 2) & 3))); \ + }) + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_permute2x128_si256(__m256i __a, __m256i __b, const int imm8) { + __m256i ret; + ret.v0 = __avx_select4i(__a, __b, imm8); + ret.v1 = __avx_select4i(__a, __b, imm8 >> 4); + return ret; +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm256_extracti128_si256(__m256i __a, const int imm8) { + if (imm8 & 0x1) { + return __a.v1; + } else { + return __a.v0; + } +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_inserti128_si256(__m256i __a, __m128i __b, const int imm8) { + __m256i ret = __a; + if (imm8 & 0x1) { + ret.v1 = __b; + } else { + ret.v0 = __b; + } + return ret; +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_maskload_epi32(int32_t const* __p, __m128i __m) { + // This may cause an out-of-bounds memory load since we first load and + // then mask, but since there are no segmentation faults in Wasm memory + // accesses, that is ok (as long as we are within the heap bounds - + // a negligible limitation in practice) + // TODO, loadu or load, 128-bit align? + return _mm_and_si128(_mm_load_si128((const __m128i*)__p), + _mm_srai_epi32(__m, 31)); +} + +static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) +_mm_maskload_epi64(int64_t const* __p, __m128i __m) { + // This may cause an out-of-bounds memory load since we first load and + // then mask, but since there are no segmentation faults in Wasm memory + // accesses, that is ok (as long as we are within the heap bounds - + // a negligible limitation in practice) + // TODO, loadu or load, 128-bit align? + return _mm_and_si128(_mm_load_si128((const __m128i*)__p), + wasm_i64x2_shr(__m, 63)); +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maskload_epi32(int const* __p, __m256i __m) { + __m256i ret; + ret.v0 = _mm_maskload_epi32(__p, __m.v0); + ret.v1 = _mm_maskload_epi32(((int32_t*)__p) + 4, __m.v1); + return ret; +} + +static __inline__ __m256i __attribute__((__always_inline__, __nodebug__)) +_mm256_maskload_epi64(long long const* __p, __m256i __m) { + __m256i ret; + ret.v0 = _mm_maskload_epi64(__p, __m.v0); + ret.v1 = _mm_maskload_epi64(((int64_t*)__p) + 2, __m.v1); + return ret; +} + +static __inline__ void + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_maskstore_epi32(int* __p, __m128i __m, __m128i __a) { + if ((wasm_i32x4_extract_lane(__m, 0) & 0x80000000ull) != 0) + __p[0] = wasm_i32x4_extract_lane((v128_t)__a, 0); + if ((wasm_i32x4_extract_lane(__m, 1) & 0x80000000ull) != 0) + __p[1] = wasm_i32x4_extract_lane((v128_t)__a, 1); + if ((wasm_i32x4_extract_lane(__m, 2) & 0x80000000ull) != 0) + __p[2] = wasm_i32x4_extract_lane((v128_t)__a, 2); + if ((wasm_i32x4_extract_lane(__m, 3) & 0x80000000ull) != 0) + __p[3] = wasm_i32x4_extract_lane((v128_t)__a, 3); +} + +static __inline__ void + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_maskstore_epi64(long long* __p, __m128i __m, __m128i __a) { + if ((wasm_i64x2_extract_lane(__m, 0) & 0x8000000000000000ull) != 0) + __p[0] = wasm_i64x2_extract_lane((v128_t)__a, 0); + if ((wasm_i64x2_extract_lane(__m, 1) & 0x8000000000000000ull) != 0) + __p[1] = wasm_i64x2_extract_lane((v128_t)__a, 1); +} + +static __inline__ void __attribute__((__always_inline__, __nodebug__)) +_mm256_maskstore_epi32(int* __p, __m256i __m, __m256i __a) { + _mm_maskstore_epi32(__p, __m.v0, __a.v0); + _mm_maskstore_epi32(((int32_t*)__p) + 4, __m.v1, __a.v1); +} + +static __inline__ void __attribute__((__always_inline__, __nodebug__)) +_mm256_maskstore_epi64(long long* __p, __m256i __m, __m256i __a) { + _mm_maskstore_epi64(__p, __m.v0, __a.v0); + _mm_maskstore_epi64(((int64_t*)__p) + 2, __m.v1, __a.v1); +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_sllv_epi32(__m128i __a, __m128i __count) { + int32_t lane[4]; + for (size_t i = 0; i < 4; i++) { + uint32_t shift = ((__u32x4)__count)[i]; + lane[i] = shift < 32 ? ((__u32x4)__a)[i] << shift : 0; + } + return (__m128i)wasm_i32x4_make(lane[0], lane[1], lane[2], lane[3]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_sllv_epi32(__m256i __a, __m256i __count) { + __m256i ret; + ret.v0 = _mm_sllv_epi32(__a.v0, __count.v0); + ret.v1 = _mm_sllv_epi32(__a.v1, __count.v1); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_sllv_epi64(__m128i __a, __m128i __count) { + + int64_t lane[2]; + for (size_t i = 0; i < 2; i++) { + uint64_t shift = (uint64_t)((__u64x2)__count)[i]; + lane[i] = shift < 64 ? ((__u64x2)__a)[i] << shift : 0; + } + return (__m128i)wasm_i64x2_make(lane[0], lane[1]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_sllv_epi64(__m256i __a, __m256i __count) { + __m256i ret; + ret.v0 = _mm_sllv_epi64(__a.v0, __count.v0); + ret.v1 = _mm_sllv_epi64(__a.v1, __count.v1); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_srav_epi32(__m128i __a, __m128i __count) { + int32_t lane[4]; + for (size_t i = 0; i < 4; i++) { + uint32_t shift = ((__u32x4)__count)[i]; + shift = shift < 31 ? shift : 31; + lane[i] = ((__i32x4)__a)[i] >> shift; + } + return (__m128i)wasm_i32x4_make(lane[0], lane[1], lane[2], lane[3]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_srav_epi32(__m256i __a, __m256i __count) { + __m256i ret; + ret.v0 = _mm_srav_epi32(__a.v0, __count.v0); + ret.v1 = _mm_srav_epi32(__a.v1, __count.v1); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_srlv_epi32(__m128i __a, __m128i __count) { + int32_t lane[4]; + for (size_t i = 0; i < 4; i++) { + uint32_t shift = ((__u32x4)__count)[i]; + lane[i] = shift < 32 ? ((__u32x4)__a)[i] >> shift : 0; + } + return (__m128i)wasm_i32x4_make(lane[0], lane[1], lane[2], lane[3]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_srlv_epi32(__m256i __a, __m256i __count) { + __m256i ret; + ret.v0 = _mm_srlv_epi32(__a.v0, __count.v0); + ret.v1 = _mm_srlv_epi32(__a.v1, __count.v1); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_srlv_epi64(__m128i __a, __m128i __count) { + int64_t lane[2]; + for (size_t i = 0; i < 2; i++) { + uint64_t shift = ((__u64x2)__count)[i]; + lane[i] = shift < 64 ? ((__u64x2)__a)[i] >> shift : 0; + } + return (__m128i)wasm_i64x2_make(lane[0], lane[1]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_srlv_epi64(__m256i __a, __m256i __count) { + __m256i ret; + ret.v0 = _mm_srlv_epi64(__a.v0, __count.v0); + ret.v1 = _mm_srlv_epi64(__a.v1, __count.v1); + return ret; +} + +static __inline__ __m128d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i32gather_pd(__m128d src, + const double* base_addr, + __m128i vindex, + __m128d mask, + const int scale) { + double lane[2]; + for (size_t i = 0; i < 2; i++) { + if ((((__i64x2)mask)[i] >> 63) & 0x1) { + double* addr = + (double*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__f64x2)src)[i]; + } + } + return (__m128d)wasm_f64x2_make(lane[0], lane[1]); +} + +static __inline__ __m256d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i32gather_pd(__m256d src, + const double* base_addr, + __m128i vindex, + __m256d mask, + const int scale) { + __m256d ret; + ret.v0 = _mm_mask_i32gather_pd(src.v0, base_addr, vindex, mask.v0, scale); + __m128i vindex1 = (__m128i)wasm_i32x4_shuffle(vindex, vindex, 2, 3, 0, 1); + ret.v1 = _mm_mask_i32gather_pd(src.v1, base_addr, vindex1, mask.v1, scale); + return ret; +} + +static __inline__ __m128d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i64gather_pd(__m128d src, + const double* base_addr, + __m128i vindex, + __m128d mask, + const int scale) { + double lane[2]; + for (size_t i = 0; i < 2; i++) { + if ((((__i64x2)mask)[i] >> 63) & 0x1) { + double* addr = + (double*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__f64x2)src)[i]; + } + } + return (__m128d)wasm_f64x2_make(lane[0], lane[1]); +} + +static __inline__ __m256d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i64gather_pd(__m256d src, + const double* base_addr, + __m256i vindex, + __m256d mask, + const int scale) { + __m256d ret; + ret.v0 = _mm_mask_i64gather_pd(src.v0, base_addr, vindex.v0, mask.v0, scale); + ret.v1 = _mm_mask_i64gather_pd(src.v1, base_addr, vindex.v1, mask.v1, scale); + return ret; +} + +static __inline__ __m128 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i32gather_ps(__m128 src, + const float* base_addr, + __m128i vindex, + __m128 mask, + const int scale) { + float lane[4]; + for (size_t i = 0; i < 4; i++) { + if ((((__i32x4)mask)[i] >> 31) & 0x1) { + float* addr = + (float*)((uint8_t*)base_addr + + (int64_t)(((__i32x4)vindex)[i]) * (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__f32x4)src)[i]; + } + } + return (__m128)wasm_f32x4_make(lane[0], lane[1], lane[2], lane[3]); +} + +static __inline__ __m256 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i32gather_ps(__m256 src, + const float* base_addr, + __m256i vindex, + __m256 mask, + const int scale) { + __m256 ret; + ret.v0 = _mm_mask_i32gather_ps(src.v0, base_addr, vindex.v0, mask.v0, scale); + ret.v1 = _mm_mask_i32gather_ps(src.v1, base_addr, vindex.v1, mask.v1, scale); + return ret; +} + +static __inline__ __m128 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i64gather_ps(__m128 src, + const float* base_addr, + __m128i vindex, + __m128 mask, + const int scale) { + float lane[2]; + for (size_t i = 0; i < 2; i++) { + if ((((__i32x4)mask)[i] >> 31) & 0x1) { + float* addr = + (float*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__f32x4)src)[i]; + } + } + return (__m128)wasm_f32x4_make(lane[0], lane[1], 0, 0); +} + +static __inline__ __m128 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i64gather_ps(__m128 src, + const float* base_addr, + __m256i vindex, + __m128 mask, + const int scale) { + float lane[4]; + __m128i current_vindex; + for (size_t i = 0; i < 4; i++) { + current_vindex = i < 2 ? vindex.v0 : vindex.v1; + if ((((__i32x4)mask)[i] >> 31) & 0x1) { + float* addr = + (float*)((uint8_t*)base_addr + ((__i64x2)current_vindex)[i & 1] * + (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__f32x4)src)[i]; + } + } + return (__m128)wasm_f32x4_make(lane[0], lane[1], lane[2], lane[3]); +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i32gather_epi32(__m128i src, + const int* base_addr, + __m128i vindex, + __m128i mask, + const int scale) { + int32_t lane[4]; + for (size_t i = 0; i < 4; i++) { + if ((((__i32x4)mask)[i] >> 31) & 0x1) { + int32_t* addr = + (int32_t*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__i32x4)src)[i]; + } + } + return (__m128i)wasm_i32x4_make(lane[0], lane[1], lane[2], lane[3]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i32gather_epi32(__m256i src, + const int* base_addr, + __m256i vindex, + __m256i mask, + const int scale) { + __m256i ret; + ret.v0 = + _mm_mask_i32gather_epi32(src.v0, base_addr, vindex.v0, mask.v0, scale); + ret.v1 = + _mm_mask_i32gather_epi32(src.v1, base_addr, vindex.v1, mask.v1, scale); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i64gather_epi32(__m128i src, + const int* base_addr, + __m128i vindex, + __m128i mask, + const int scale) { + int32_t lane[2]; + for (size_t i = 0; i < 2; i++) { + if ((((__i32x4)mask)[i] >> 31) & 0x1) { + int32_t* addr = + (int32_t*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__i32x4)src)[i]; + } + } + return (__m128i)wasm_i32x4_make(lane[0], lane[1], 0, 0); +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i64gather_epi32(__m128i src, + const int* base_addr, + __m256i vindex, + __m128i mask, + const int scale) { + int32_t lane[4]; + __m128i current_vindex; + for (size_t i = 0; i < 4; i++) { + current_vindex = i < 2 ? vindex.v0 : vindex.v1; + if ((((__i32x4)mask)[i] >> 31) & 0x1) { + int32_t* addr = + (int32_t*)((uint8_t*)base_addr + ((__i64x2)current_vindex)[i & 1] * + (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__i32x4)src)[i]; + } + } + return (__m128i)wasm_i32x4_make(lane[0], lane[1], lane[2], lane[3]); +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i32gather_epi64(__m128i src, + const long long* base_addr, + __m128i vindex, + __m128i mask, + const int scale) { + int64_t lane[2]; + for (size_t i = 0; i < 2; i++) { + if ((((__i64x2)mask)[i] >> 63) & 0x1) { + int64_t* addr = + (int64_t*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__i64x2)src)[i]; + } + } + return (__m128i)wasm_i64x2_make(lane[0], lane[1]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i32gather_epi64(__m256i src, + const long long* base_addr, + __m128i vindex, + __m256i mask, + const int scale) { + __m256i ret; + ret.v0 = _mm_mask_i32gather_epi64(src.v0, base_addr, vindex, mask.v0, scale); + __m128i vindex1 = (__m128i)wasm_i32x4_shuffle(vindex, vindex, 2, 3, 0, 1); + ret.v1 = _mm_mask_i32gather_epi64(src.v1, base_addr, vindex1, mask.v1, scale); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_mask_i64gather_epi64(__m128i src, + const long long* base_addr, + __m128i vindex, + __m128i mask, + const int scale) { + int64_t lane[2]; + for (size_t i = 0; i < 2; i++) { + if ((((__i64x2)mask)[i] >> 63) & 0x1) { + int64_t* addr = + (int64_t*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + lane[i] = *addr; + } else { + lane[i] = ((__i64x2)src)[i]; + } + } + return (__m128i)wasm_i64x2_make(lane[0], lane[1]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_mask_i64gather_epi64(__m256i src, + const long long* base_addr, + __m256i vindex, + __m256i mask, + const int scale) { + __m256i ret; + ret.v0 = + _mm_mask_i64gather_epi64(src.v0, base_addr, vindex.v0, mask.v0, scale); + ret.v1 = + _mm_mask_i64gather_epi64(src.v1, base_addr, vindex.v1, mask.v1, scale); + return ret; +} + +static __inline__ __m128d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i32gather_pd(const double* base_addr, __m128i vindex, const int scale) { + double* lane[2]; + for (size_t i = 0; i < 2; i++) { + lane[i] = (double*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + } + return (__m128d)wasm_f64x2_make(*lane[0], *lane[1]); +} + +static __inline__ __m256d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i32gather_pd(const double* base_addr, + __m128i vindex, + const int scale) { + __m256d ret; + double* lane[4]; + for (size_t i = 0; i < 4; i++) { + lane[i] = (double*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + } + ret.v0 = (__m128d)wasm_f64x2_make(*lane[0], *lane[1]); + ret.v1 = (__m128d)wasm_f64x2_make(*lane[2], *lane[3]); + return ret; +} + +static __inline__ __m128d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i64gather_pd(const double* base_addr, __m128i vindex, const int scale) { + double* lane[2]; + for (size_t i = 0; i < 2; i++) { + lane[i] = (double*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + } + return (__m128d)wasm_f64x2_make(*lane[0], *lane[1]); +} + +static __inline__ __m256d + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i64gather_pd(const double* base_addr, + __m256i vindex, + const int scale) { + __m256d ret; + ret.v0 = _mm_i64gather_pd(base_addr, vindex.v0, scale); + ret.v1 = _mm_i64gather_pd(base_addr, vindex.v1, scale); + return ret; +} + +static __inline__ __m128 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i32gather_ps(const float* base_addr, __m128i vindex, const int scale) { + float* lane[4]; + for (size_t i = 0; i < 4; i++) { + lane[i] = (float*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + } + return (__m128)wasm_f32x4_make(*lane[0], *lane[1], *lane[2], *lane[3]); +} + +static __inline__ __m256 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i32gather_ps(const float* base_addr, __m256i vindex, const int scale) { + __m256 ret; + ret.v0 = _mm_i32gather_ps(base_addr, vindex.v0, scale); + ret.v1 = _mm_i32gather_ps(base_addr, vindex.v1, scale); + return ret; +} + +static __inline__ __m128 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i64gather_ps(const float* base_addr, __m128i vindex, const int scale) { + float* lane[2]; + for (size_t i = 0; i < 2; i++) { + lane[i] = (float*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + } + return (__m128)wasm_f32x4_make(*lane[0], *lane[1], 0, 0); +} + +static __inline__ __m128 + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i64gather_ps(const float* base_addr, __m256i vindex, const int scale) { + float* lane[4]; + __m128i current_vindex; + for (size_t i = 0; i < 4; i++) { + current_vindex = i < 2 ? vindex.v0 : vindex.v1; + lane[i] = (float*)((uint8_t*)base_addr + ((__i64x2)current_vindex)[i & 1] * + (uint64_t)((uint32_t)scale)); + } + return (__m128)wasm_f32x4_make(*lane[0], *lane[1], *lane[2], *lane[3]); +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i32gather_epi32(const int* base_addr, __m128i vindex, const int scale) { + int32_t* lane[4]; + for (size_t i = 0; i < 4; i++) { + lane[i] = (int32_t*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + } + return (__m128i)wasm_i32x4_make(*lane[0], *lane[1], *lane[2], *lane[3]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i32gather_epi32(const int* base_addr, + __m256i vindex, + const int scale) { + __m256i ret; + ret.v0 = _mm_i32gather_epi32(base_addr, vindex.v0, scale); + ret.v1 = _mm_i32gather_epi32(base_addr, vindex.v1, scale); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i64gather_epi32(const int* base_addr, __m128i vindex, const int scale) { + int32_t* lane[2]; + for (size_t i = 0; i < 2; i++) { + lane[i] = (int32_t*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + } + return (__m128i)wasm_i32x4_make(*lane[0], *lane[1], 0, 0); +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i64gather_epi32(const int* base_addr, + __m256i vindex, + const int scale) { + int32_t* lane[4]; + __m128i current_vindex; + for (size_t i = 0; i < 4; i++) { + current_vindex = i < 2 ? vindex.v0 : vindex.v1; + lane[i] = + (int32_t*)((uint8_t*)base_addr + ((__i64x2)current_vindex)[i & 1] * + (uint64_t)((uint32_t)scale)); + } + return (__m128i)wasm_i32x4_make(*lane[0], *lane[1], *lane[2], *lane[3]); +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i32gather_epi64(const long long* base_addr, + __m128i vindex, + const int scale) { + int64_t* lane[2]; + for (size_t i = 0; i < 2; i++) { + lane[i] = (int64_t*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + } + return (__m128i)wasm_i64x2_make(*lane[0], *lane[1]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i32gather_epi64(const long long* base_addr, + __m128i vindex, + const int scale) { + + __m256i ret; + int64_t* lane[4]; + for (size_t i = 0; i < 4; i++) { + lane[i] = (int64_t*)((uint8_t*)base_addr + (int64_t)(((__i32x4)vindex)[i]) * + (uint64_t)((uint32_t)scale)); + } + ret.v0 = (__m128i)wasm_i64x2_make(*lane[0], *lane[1]); + ret.v1 = (__m128i)wasm_i64x2_make(*lane[2], *lane[3]); + return ret; +} + +static __inline__ __m128i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm_i64gather_epi64(const long long* base_addr, + __m128i vindex, + const int scale) { + int64_t* lane[2]; + for (size_t i = 0; i < 2; i++) { + lane[i] = (int64_t*)((uint8_t*)base_addr + + ((__i64x2)vindex)[i] * (uint64_t)((uint32_t)scale)); + } + return (__m128i)wasm_i64x2_make(*lane[0], *lane[1]); +} + +static __inline__ __m256i + __attribute__((__always_inline__, __nodebug__, DIAGNOSE_SLOW)) + _mm256_i64gather_epi64(const long long* base_addr, + __m256i vindex, + const int scale) { + __m256i ret; + ret.v0 = _mm_i64gather_epi64(base_addr, vindex.v0, scale); + ret.v1 = _mm_i64gather_epi64(base_addr, vindex.v1, scale); + return ret; +} + +#endif /* __emscripten_avx2intrin_h__ */ diff --git a/system/include/compat/avxintrin.h b/system/include/compat/avxintrin.h index 45171ebbe5e6e..1dace2dfbdc84 100644 --- a/system/include/compat/avxintrin.h +++ b/system/include/compat/avxintrin.h @@ -4,6 +4,11 @@ * University of Illinois/NCSA Open Source License. Both these licenses can be * found in the LICENSE file. */ + +#ifndef __emscripten_immintrin_h__ +#error "Never use directly; include instead." +#endif + #ifndef __emscripten_avxintrin_h__ #define __emscripten_avxintrin_h__ @@ -11,13 +16,6 @@ #error "AVX instruction set not enabled" #endif -#include -#include -#include -#include -#include -#include - typedef struct { __m128d v0; __m128d v1; diff --git a/system/include/compat/immintrin.h b/system/include/compat/immintrin.h index e78b8d1e846ca..c0ef3e73e528a 100644 --- a/system/include/compat/immintrin.h +++ b/system/include/compat/immintrin.h @@ -7,32 +7,36 @@ #ifndef __emscripten_immintrin_h__ #define __emscripten_immintrin_h__ -#ifdef __AVX__ -#include +#ifdef __SSE__ +#include #endif -#ifdef __SSE4_2__ -#include +#ifdef __SSE2__ +#include #endif -#ifdef __SSE4_1__ -#include +#ifdef __SSE3__ +#include #endif #ifdef __SSSE3__ #include #endif -#ifdef __SSE3__ -#include +#ifdef __SSE4_1__ +#include #endif -#ifdef __SSE2__ -#include +#ifdef __SSE4_2__ +#include #endif -#ifdef __SSE__ -#include +#ifdef __AVX__ +#include +#endif + +#ifdef __AVX2__ +#include #endif #endif /* __emscripten_immintrin_h__ */ diff --git a/test/sse/test_avx2.cpp b/test/sse/test_avx2.cpp new file mode 100644 index 0000000000000..586c1dcd55f76 --- /dev/null +++ b/test/sse/test_avx2.cpp @@ -0,0 +1,321 @@ +/* + * Copyright 2024 The Emscripten Authors. All rights reserved. + * Emscripten is available under two separate licenses, the MIT license and the + * University of Illinois/NCSA Open Source License. Both these licenses can be + * found in the LICENSE file. + */ +// This file uses AVX2 by calling different functions with different interesting +// inputs and prints the results. Use a diff tool to compare the results between +// platforms. + +// immintrin.h must be included before test_sse.h +// clang-format off +#include +#include "test_sse.h" +// clang-format on + +bool testNaNBits = true; + +float* interesting_floats = get_interesting_floats(); +int numInterestingFloats = + sizeof(interesting_floats_) / sizeof(interesting_floats_[0]); +uint32_t* interesting_ints = get_interesting_ints(); +int numInterestingInts = + sizeof(interesting_ints_) / sizeof(interesting_ints_[0]); +double* interesting_doubles = get_interesting_doubles(); +int numInterestingDoubles = + sizeof(interesting_doubles_) / sizeof(interesting_doubles_[0]); + +void test_arithmetic(void) { + Ret_M256i_M256i(__m256i, _mm256_add_epi8); + Ret_M256i_M256i(__m256i, _mm256_add_epi16); + Ret_M256i_M256i(__m256i, _mm256_add_epi32); + Ret_M256i_M256i(__m256i, _mm256_add_epi64); + + Ret_M256i_M256i(__m256i, _mm256_adds_epi8); + Ret_M256i_M256i(__m256i, _mm256_adds_epi16); + Ret_M256i_M256i(__m256i, _mm256_adds_epu8); + Ret_M256i_M256i(__m256i, _mm256_adds_epu16); + + Ret_M256i_M256i(__m256i, _mm256_hadd_epi16); + Ret_M256i_M256i(__m256i, _mm256_hadd_epi32); + Ret_M256i_M256i(__m256i, _mm256_hadds_epi16); + + Ret_M256i_M256i(__m256i, _mm256_hsub_epi16); + Ret_M256i_M256i(__m256i, _mm256_hsub_epi32); + Ret_M256i_M256i(__m256i, _mm256_hsubs_epi16); + + Ret_M256i_M256i(__m256i, _mm256_maddubs_epi16); + Ret_M256i_M256i(__m256i, _mm256_madd_epi16); + + Ret_M256i_M256i(__m256i, _mm256_mul_epi32); + Ret_M256i_M256i(__m256i, _mm256_mulhrs_epi16); + Ret_M256i_M256i(__m256i, _mm256_mulhi_epu16); + Ret_M256i_M256i(__m256i, _mm256_mulhi_epi16); + Ret_M256i_M256i(__m256i, _mm256_mullo_epi16); + Ret_M256i_M256i(__m256i, _mm256_mullo_epi32); + Ret_M256i_M256i(__m256i, _mm256_mul_epu32); + + Ret_M256i_M256i(__m256i, _mm256_sad_epu8); + + Ret_M256i_M256i(__m256i, _mm256_sign_epi8); + Ret_M256i_M256i(__m256i, _mm256_sign_epi16); + Ret_M256i_M256i(__m256i, _mm256_sign_epi32); + + Ret_M256i_M256i(__m256i, _mm256_sub_epi8); + Ret_M256i_M256i(__m256i, _mm256_sub_epi16); + Ret_M256i_M256i(__m256i, _mm256_sub_epi32); + Ret_M256i_M256i(__m256i, _mm256_sub_epi64); + + Ret_M256i_M256i(__m256i, _mm256_subs_epi8); + Ret_M256i_M256i(__m256i, _mm256_subs_epi16); + Ret_M256i_M256i(__m256i, _mm256_subs_epu8); + Ret_M256i_M256i(__m256i, _mm256_subs_epu16); +} + +void test_special_math(void) { + Ret_M256i(__m256i, _mm256_abs_epi8); + Ret_M256i(__m256i, _mm256_abs_epi16); + Ret_M256i(__m256i, _mm256_abs_epi32); + + Ret_M256i_M256i(__m256i, _mm256_max_epi8); + Ret_M256i_M256i(__m256i, _mm256_max_epi16); + Ret_M256i_M256i(__m256i, _mm256_max_epi32); + + Ret_M256i_M256i(__m256i, _mm256_max_epu8); + Ret_M256i_M256i(__m256i, _mm256_max_epu16); + Ret_M256i_M256i(__m256i, _mm256_max_epu32); + + Ret_M256i_M256i(__m256i, _mm256_min_epi8); + Ret_M256i_M256i(__m256i, _mm256_min_epi16); + Ret_M256i_M256i(__m256i, _mm256_min_epi32); + + Ret_M256i_M256i(__m256i, _mm256_min_epu8); + Ret_M256i_M256i(__m256i, _mm256_min_epu16); + Ret_M256i_M256i(__m256i, _mm256_min_epu32); +} + +void test_logical(void) { + Ret_M256i_M256i(__m256i, _mm256_and_si256); + Ret_M256i_M256i(__m256i, _mm256_andnot_si256); + Ret_M256i_M256i(__m256i, _mm256_or_si256); + Ret_M256i_M256i(__m256i, _mm256_xor_si256); +} + +void test_swizzle(void) { + Ret_M256i_M256i_M256i(__m256i, _mm256_blendv_epi8); + Ret_M256i_M256i_Tint(__m256i, _mm256_blend_epi16); + + Ret_M256i_M256i(__m256i, _mm256_shuffle_epi8); + Ret_M256i_Tint(__m256i, _mm256_shuffle_epi32); + Ret_M256i_Tint(__m256i, _mm256_shufflehi_epi16); + Ret_M256i_Tint(__m256i, _mm256_shufflelo_epi16); + + Ret_M256i_M256i(__m256i, _mm256_unpackhi_epi8); + Ret_M256i_M256i(__m256i, _mm256_unpackhi_epi16); + Ret_M256i_M256i(__m256i, _mm256_unpackhi_epi32); + Ret_M256i_M256i(__m256i, _mm256_unpackhi_epi64); + + Ret_M256i_M256i(__m256i, _mm256_unpacklo_epi8); + Ret_M256i_M256i(__m256i, _mm256_unpacklo_epi16); + Ret_M256i_M256i(__m256i, _mm256_unpacklo_epi32); + Ret_M256i_M256i(__m256i, _mm256_unpacklo_epi64); + + Ret_M128(__m128, _mm_broadcastss_ps); + Ret_M128d(__m128d, _mm_broadcastsd_pd); + Ret_M128(__m256, _mm256_broadcastss_ps); + Ret_M128d(__m256d, _mm256_broadcastsd_pd); + Ret_M128i(__m256i, _mm256_broadcastsi128_si256); + Ret_M128i(__m256i, _mm_broadcastsi128_si256); + + Ret_M128i_M128i_Tint(__m128i, _mm_blend_epi32); + Ret_M256i_M256i_Tint(__m256i, _mm256_blend_epi32); + + Ret_M128i(__m128i, _mm_broadcastb_epi8); + Ret_M128i(__m128i, _mm_broadcastw_epi16); + Ret_M128i(__m128i, _mm_broadcastd_epi32); + Ret_M128i(__m128i, _mm_broadcastq_epi64); + Ret_M128i(__m256i, _mm256_broadcastb_epi8); + Ret_M128i(__m256i, _mm256_broadcastw_epi16); + Ret_M128i(__m256i, _mm256_broadcastd_epi32); + Ret_M128i(__m256i, _mm256_broadcastq_epi64); + + Ret_M256i_M256i(__m256i, _mm256_permutevar8x32_epi32); + Ret_M256_M256i(__m256, _mm256_permutevar8x32_ps); + Ret_M256i_Tint(__m256i, _mm256_permute4x64_epi64); + Ret_M256d_Tint(__m256d, _mm256_permute4x64_pd); + Ret_M256i_M256i_Tint(__m256i, _mm256_permute2x128_si256); + + Ret_M256i_Tint(__m128i, _mm256_extracti128_si256); + Ret_M256i_M128i_Tint(__m256i, _mm256_inserti128_si256); +} + +void test_convert(void) { + Ret_M128i(__m256i, _mm256_cvtepi8_epi16); + Ret_M128i(__m256i, _mm256_cvtepi8_epi32); + Ret_M128i(__m256i, _mm256_cvtepi8_epi64); + Ret_M128i(__m256i, _mm256_cvtepi16_epi32); + Ret_M128i(__m256i, _mm256_cvtepi16_epi64); + Ret_M128i(__m256i, _mm256_cvtepi32_epi64); + + Ret_M128i(__m256i, _mm256_cvtepu8_epi16); + Ret_M128i(__m256i, _mm256_cvtepu8_epi32); + Ret_M128i(__m256i, _mm256_cvtepu8_epi64); + Ret_M128i(__m256i, _mm256_cvtepu16_epi32); + Ret_M128i(__m256i, _mm256_cvtepu16_epi64); + Ret_M128i(__m256i, _mm256_cvtepu32_epi64); +} + +void test_compare(void) { + Ret_M256i_M256i(__m256i, _mm256_cmpeq_epi8); + Ret_M256i_M256i(__m256i, _mm256_cmpeq_epi16); + Ret_M256i_M256i(__m256i, _mm256_cmpeq_epi32); + Ret_M256i_M256i(__m256i, _mm256_cmpeq_epi64); + + Ret_M256i_M256i(__m256i, _mm256_cmpgt_epi8); + Ret_M256i_M256i(__m256i, _mm256_cmpgt_epi16); + Ret_M256i_M256i(__m256i, _mm256_cmpgt_epi32); + Ret_M256i_M256i(__m256i, _mm256_cmpgt_epi64); +} + +void test_misc(void) { + Ret_M256i_M256i_Tint(__m256i, _mm256_mpsadbw_epu8); + + Ret_M256i_M256i(__m256i, _mm256_packs_epi16); + Ret_M256i_M256i(__m256i, _mm256_packs_epi32); + Ret_M256i_M256i(__m256i, _mm256_packus_epi16); + Ret_M256i_M256i(__m256i, _mm256_packus_epi32); + + Ret_M256i_M256i_Tint(__m256i, _mm256_alignr_epi8); + + Ret_M256i(int, _mm256_movemask_epi8); +} + +void test_load(void) { + Ret_IntPtr(__m256i, _mm256_stream_load_si256, __m256i*, 8, 8); + + Ret_IntPtr_M128i(__m128i, _mm_maskload_epi32, int32_t*, 4, 4); + Ret_IntPtr_M256i(__m256i, _mm256_maskload_epi32, int32_t*, 8, 8); + Ret_IntPtr_M128i(__m128i, _mm_maskload_epi64, long long const*, 4, 4); + Ret_IntPtr_M256i(__m256i, _mm256_maskload_epi64, long long const*, 8, 8); + + Ret_M128d_DoublePtr_I32x4_M128d_Tint_body(__m128d, _mm_mask_i32gather_pd, 8); + Ret_M256d_DoublePtr_I32x4_M256d_Tint_body( + __m256d, _mm256_mask_i32gather_pd, 8); + Ret_M128d_DoublePtr_I64x2_M128d_Tint_body(__m128d, _mm_mask_i64gather_pd, 8); + Ret_M256d_DoublePtr_I64x4_M256d_Tint_body( + __m256d, _mm256_mask_i64gather_pd, 8); + + Ret_M128_FloatPtr_I32x4_M128_Tint_body(__m128, _mm_mask_i32gather_ps, 4); + Ret_M256_FloatPtr_I32x8_M256_Tint_body(__m256, _mm256_mask_i32gather_ps, 4); + Ret_M128_FloatPtr_I64x2_M128_Tint_body(__m128, _mm_mask_i64gather_ps, 4); + Ret_M128_FloatPtr_I64x4_M128_Tint_body(__m128, _mm256_mask_i64gather_ps, 4); + + Ret_M128i_Int32Ptr_I32x4_M128i_Tint_body( + __m128i, _mm_mask_i32gather_epi32, 4); + Ret_M256i_Int32Ptr_I32x8_M256i_Tint_body( + __m256i, _mm256_mask_i32gather_epi32, 4); + Ret_M128i_Int32Ptr_I64x2_M128i_Tint_body( + __m128i, _mm_mask_i64gather_epi32, 4); + Ret_M128i_Int32Ptr_I64x4_M128i_Tint_body( + __m128i, _mm256_mask_i64gather_epi32, 4); + + Ret_M128i_Int64Ptr_I32x4_M128i_Tint_body( + __m128i, _mm_mask_i32gather_epi64, 8); + Ret_M256i_Int64Ptr_I32x4_M256i_Tint_body( + __m256i, _mm256_mask_i32gather_epi64, 8); + Ret_M128i_Int64Ptr_I64x2_M128i_Tint_body( + __m128i, _mm_mask_i64gather_epi64, 8); + Ret_M256i_Int64Ptr_I64x4_M256i_Tint_body( + __m256i, _mm256_mask_i64gather_epi64, 8); + + Ret_DoublePtr_I32x4_Tint_body(__m128d, _mm_i32gather_pd, 8); + Ret_DoublePtr_I32x4_Tint_body(__m256d, _mm256_i32gather_pd, 8); + Ret_DoublePtr_I64x2_Tint_body(__m128d, _mm_i64gather_pd, 8); + Ret_DoublePtr_I64x4_Tint_body(__m256d, _mm256_i64gather_pd, 8); + + Ret_FloatPtr_I32x4_Tint_body(__m128, _mm_i32gather_ps, 4); + Ret_FloatPtr_I32x8_Tint_body(__m256, _mm256_i32gather_ps, 4); + Ret_FloatPtr_I64x2_Tint_body(__m128, _mm_i64gather_ps, 4); + Ret_FloatPtr_I64x4_Tint_body(__m128, _mm256_i64gather_ps, 4); + + Ret_IntPtr_I32x4_Tint_body(__m128i, _mm_i32gather_epi32, 4); + Ret_IntPtr_I32x8_Tint_body(__m256i, _mm256_i32gather_epi32, 4); + Ret_IntPtr_I64x2_Tint_body(__m128i, _mm_i64gather_epi32, 4); + Ret_IntPtr_I64x4_Tint_body(__m128i, _mm256_i64gather_epi32, 4); + + Ret_Int64Ptr_I32x4_Tint_body(__m128i, _mm_i32gather_epi64, 8); + Ret_Int64Ptr_I32x4_Tint_body(__m256i, _mm256_i32gather_epi64, 8); + Ret_Int64Ptr_I64x2_Tint_body(__m128i, _mm_i64gather_epi64, 8); + Ret_Int64Ptr_I64x4_Tint_body(__m256i, _mm256_i64gather_epi64, 8); +} + +void test_store(void) { + void_OutIntPtr_M128i_M128i(_mm_maskstore_epi32, int*, 16, 4); + void_OutIntPtr_M256i_M256i(_mm256_maskstore_epi32, int*, 32, 4); + void_OutIntPtr_M128i_M128i(_mm_maskstore_epi64, long long*, 16, 8); + void_OutIntPtr_M256i_M256i(_mm256_maskstore_epi64, long long*, 16, 8); +} + +void test_statisticsa(void) { + Ret_M256i_M256i(__m256i, _mm256_avg_epu16); + Ret_M256i_M256i(__m256i, _mm256_avg_epu8); +} + +void test_shift(void) { + Ret_M256i_Tint(__m256i, _mm256_slli_si256); + Ret_M256i_Tint(__m256i, _mm256_bslli_epi128); + + Ret_M256i_Tint(__m256i, _mm256_slli_epi16); + Ret_M256i_Tint(__m256i, _mm256_slli_epi32); + Ret_M256i_Tint(__m256i, _mm256_slli_epi64); + Ret_M256i_M128i(__m256i, _mm256_sll_epi16); + Ret_M256i_M128i(__m256i, _mm256_sll_epi32); + Ret_M256i_M128i(__m256i, _mm256_sll_epi64); + + Ret_M256i_Tint(__m256i, _mm256_srai_epi16); + Ret_M256i_Tint(__m256i, _mm256_srai_epi32); + Ret_M256i_M128i(__m256i, _mm256_sra_epi16); + Ret_M256i_M128i(__m256i, _mm256_sra_epi32); + + Ret_M256i_Tint(__m256i, _mm256_srli_si256); + Ret_M256i_Tint(__m256i, _mm256_bsrli_epi128); + + Ret_M256i_Tint(__m256i, _mm256_srli_epi16); + Ret_M256i_Tint(__m256i, _mm256_srli_epi32); + Ret_M256i_Tint(__m256i, _mm256_srli_epi64); + Ret_M256i_M128i(__m256i, _mm256_srl_epi16); + Ret_M256i_M128i(__m256i, _mm256_srl_epi32); + Ret_M256i_M128i(__m256i, _mm256_srl_epi64); + + Ret_M128i_M128i(__m128i, _mm_sllv_epi32); + Ret_M256i_M256i(__m256i, _mm256_sllv_epi32); + Ret_M128i_M128i(__m128i, _mm_sllv_epi64); + Ret_M256i_M256i(__m256i, _mm256_sllv_epi64); + + Ret_M128i_M128i(__m128i, _mm_srav_epi32); + Ret_M256i_M256i(__m256i, _mm256_srav_epi32); + + Ret_M128i_M128i(__m128i, _mm_srlv_epi32); + Ret_M256i_M256i(__m256i, _mm256_srlv_epi32); + Ret_M128i_M128i(__m128i, _mm_srlv_epi64); + Ret_M256i_M256i(__m256i, _mm256_srlv_epi64); +} + +int main() { + assert(numInterestingFloats % 8 == 0); + assert(numInterestingInts % 8 == 0); + assert(numInterestingDoubles % 4 == 0); + + test_arithmetic(); + test_special_math(); + test_logical(); + test_swizzle(); + test_convert(); + test_compare(); + test_misc(); + test_load(); + test_store(); + test_statisticsa(); + test_shift(); +} diff --git a/test/sse/test_sse.h b/test/sse/test_sse.h index f1660862a4fd1..e8a1ff608d199 100644 --- a/test/sse/test_sse.h +++ b/test/sse/test_sse.h @@ -967,16 +967,20 @@ double *getTempOutDoubleStore(int alignmentBytes) { return (double*)getTempOutFl printf("%s(%s) = %s\n", #func, str, str2); \ } -#define Ret_FloatPtr_M128i(Ret_type, func, numElemsAccessed, inc) \ - for(int i = 0; i+numElemsAccessed <= numInterestingFloats; i += inc) \ - for(int j = 0; j < numInterestingInts / 4; ++j) \ - { \ - float *ptr = interesting_floats + i; \ - __m128i m1 = (__m128i)E1_Int(interesting_ints, j*4, numInterestingInts); \ - Ret_type ret = func(ptr, m1); \ - char str[256]; tostr(ptr, numElemsAccessed, str); \ - char str2[256]; tostr(&ret, str2); \ - printf("%s(%s) = %s\n", #func, str, str2); \ +#define Ret_FloatPtr_M128i(Ret_type, func, numElemsAccessed, inc) \ + for (int i = 0; i + numElemsAccessed <= numInterestingFloats; i += inc) \ + for (int j = 0; j < numInterestingInts / 4; ++j) { \ + float* ptr = interesting_floats + i; \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, j * 4, numInterestingInts); \ + Ret_type ret = func(ptr, m1); \ + char str[256]; \ + tostr(ptr, numElemsAccessed, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s) = %s\n", #func, str, str2, str3); \ } #define Ret_Float4(Ret_type, func, inc) \ @@ -1559,8 +1563,10 @@ void tostr_approx(__m256* m, char* outstr, bool approximate) { char str[256]; \ tostr(ptr, numElemsAccessed, str); \ char str2[256]; \ - tostr(&ret, str2); \ - printf("%s(%s) = %s\n", #func, str, str2); \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s) = %s\n", #func, str, str2, str3); \ } #define Ret_FloatPtr_M256i(Ret_type, func, numElemsAccessed, inc) \ @@ -2157,3 +2163,867 @@ void tostr_approx(__m256* m, char* outstr, bool approximate) { } #endif + +#ifdef __AVX2__ + +#define Ret_M256i_M256i_M256i(Ret_type, func) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < numInterestingInts / 4; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + __m128i tmp = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + tmp = (__m128i)E2_Int(interesting_ints, j * 4, numInterestingInts); \ + __m256i m2 = _mm256_set_m128i(tmp, tmp); \ + tmp = (__m128i)E1_Int(interesting_ints, l * 4, numInterestingInts); \ + __m256i m3 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func(m1, m2, m3); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(&m2, str2); \ + char str3[256]; \ + tostr(&m3, str3); \ + char str4[256]; \ + tostr(&ret, str4); \ + printf("%s(%s, %s, %s) = %s\n", #func, str, str2, str3, str4); \ + } + +#define Ret_M256i_M128i(Ret_type, func) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < numInterestingInts / 4; ++j) { \ + __m128i tmp = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + __m128i m2 = \ + (__m128i)E2_Int(interesting_ints, j * 4, numInterestingInts); \ + Ret_type ret = func(m1, m2); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(&m2, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s) = %s\n", #func, str, str2, str3); \ + } + +#define Ret_IntPtr_M128i(Ret_type, func, Ptr_type, numElemsAccessed, inc) \ + for (int i = 0; i + numElemsAccessed <= numInterestingInts; i += inc) \ + for (int j = 0; j < numInterestingInts / 4; ++j) { \ + uint32_t* ptr = interesting_ints + i; \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, j * 4, numInterestingInts); \ + Ret_type ret = func((Ptr_type)ptr, m1); \ + char str[256]; \ + tostr((int*)ptr, numElemsAccessed, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s) = %s\n", #func, str, str2, str3); \ + } + +#define Ret_IntPtr_M256i(Ret_type, func, Ptr_type, numElemsAccessed, inc) \ + for (int i = 0; i + numElemsAccessed <= numInterestingInts; i += inc) \ + for (int j = 0; j < numInterestingInts / 4; ++j) { \ + uint32_t* ptr = interesting_ints + i; \ + __m128i tmp = \ + (__m128i)E1_Int(interesting_ints, j * 4, numInterestingInts); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func((Ptr_type)ptr, m1); \ + char str[256]; \ + tostr((int*)ptr, numElemsAccessed, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s) = %s\n", #func, str, str2, str3); \ + } + +#define void_OutIntPtr_M128i_M128i( \ + func, Ptr_type, numBytesWritten, alignmentBytes) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int j = 0; j < numInterestingInts / 4; ++j) \ + for (int offset = 0; offset < numBytesWritten; offset += alignmentBytes) \ + for (int k = 0; k < 4; ++k) { \ + uintptr_t base = (uintptr_t)getTempOutIntStore(16); \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, j * 4, numInterestingInts); \ + __m128i m2 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + align1_int* out = (align1_int*)(base + offset); \ + func((Ptr_type)out, m1, m2); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(&m2, str2); \ + char str3[256]; \ + tostr(out, (numBytesWritten + sizeof(int) - 1) / sizeof(int), str3); \ + printf( \ + "%s(p:align=%d, %s, %s) = %s\n", #func, offset, str, str2, str3); \ + } + +#define void_OutIntPtr_M256i_M256i( \ + func, Ptr_type, numBytesWritten, alignmentBytes) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int j = 0; j < numInterestingInts / 4; ++j) \ + for (int offset = 0; offset < numBytesWritten; offset += alignmentBytes) \ + for (int k = 0; k < 4; ++k) { \ + uintptr_t base = (uintptr_t)getTempOutIntStore(16); \ + __m128i tmp = \ + (__m128i)E1_Int(interesting_ints, j * 4, numInterestingInts); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + tmp = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m256i m2 = _mm256_set_m128i(tmp, tmp); \ + align1_int* out = (align1_int*)(base + offset); \ + func((Ptr_type)out, m1, m2); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(&m2, str2); \ + char str3[256]; \ + tostr(out, (numBytesWritten + sizeof(int) - 1) / sizeof(int), str3); \ + printf( \ + "%s(p:align=%d, %s, %s) = %s\n", #func, offset, str, str2, str3); \ + } + +// Generate random 32x4 index +__m128i GenRandom32BitIndex(int i, int n, int prime) { + return _mm_set_epi32((i * prime) % n, + ((i + 1) * prime) % n, + ((i + 2) * prime) % n, + ((i + 3) * prime) % n); +} + +// Generate random 64x2 index +__m128i GenRandom64BitIndex(int i, int n, int prime) { + return _mm_set_epi64x((i * prime) % n, ((i + 3) * prime) % n); +} + +#define Ret_DoublePtr_I32x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + double* ptr = interesting_doubles; \ + __m128i m1 = GenRandom32BitIndex(j, numInterestingDoubles, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_DoublePtr_I64x2_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + double* ptr = interesting_doubles; \ + __m128i m1 = GenRandom64BitIndex(j, numInterestingDoubles, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_DoublePtr_I64x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + double* ptr = interesting_doubles; \ + __m128i tmp = GenRandom64BitIndex(j, numInterestingDoubles, 1787); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_FloatPtr_I32x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + float* ptr = interesting_floats; \ + __m128i m1 = GenRandom32BitIndex(j, numInterestingFloats, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_FloatPtr_I32x8_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + float* ptr = interesting_floats; \ + __m128i tmp = GenRandom32BitIndex(j, numInterestingFloats, 1787); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_FloatPtr_I64x2_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + float* ptr = interesting_floats; \ + __m128i m1 = GenRandom64BitIndex(j, numInterestingFloats, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_FloatPtr_I64x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + float* ptr = interesting_floats; \ + __m128i tmp = GenRandom64BitIndex(j, numInterestingFloats, 1787); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_IntPtr_I32x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + int32_t* ptr = (int32_t*)interesting_ints; \ + __m128i m1 = GenRandom32BitIndex(j, numInterestingInts, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_IntPtr_I32x8_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + int32_t* ptr = (int32_t*)interesting_ints; \ + __m128i tmp = GenRandom32BitIndex(j, numInterestingInts, 1787); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_IntPtr_I64x2_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + int32_t* ptr = (int32_t*)interesting_ints; \ + __m128i m1 = GenRandom64BitIndex(j, numInterestingInts, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_IntPtr_I64x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + int32_t* ptr = (int*)interesting_ints; \ + __m128i tmp = GenRandom64BitIndex(j, numInterestingInts, 1787); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_Int64Ptr_I32x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + int64_t* ptr = (int64_t*)interesting_ints; \ + __m128i m1 = GenRandom32BitIndex(j, numInterestingInts / 2, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_Int64Ptr_I64x2_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + int64_t* ptr = (int64_t*)interesting_ints; \ + __m128i m1 = GenRandom64BitIndex(j, numInterestingInts / 2, 1787); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_Int64Ptr_I64x4_Tint_body(Ret_type, func, Tint) \ + for (int j = 0; j < 4096; ++j) { \ + int64_t* ptr = (int64_t*)interesting_ints; \ + __m128i tmp = GenRandom64BitIndex(j, numInterestingInts / 2, 1787); \ + __m256i m1 = _mm256_set_m128i(tmp, tmp); \ + Ret_type ret = func(ptr, m1, Tint); \ + char str[256]; \ + tostr(ptr, 4, str); \ + char str2[256]; \ + tostr(&m1, str2); \ + char str3[256]; \ + tostr(&ret, str3); \ + printf("%s(%s, %s, %d) = %s\n", #func, str, str2, Tint, str3); \ + } + +#define Ret_M128d_DoublePtr_I32x4_M128d_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingDoubles / 2; ++i) \ + for (int k = 0; k < 2; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingDoubles / 2; ++l) { \ + double* ptr = interesting_doubles; \ + __m128d m1 = \ + E1_Double(interesting_doubles, i * 2 + k, numInterestingDoubles); \ + __m128i m2 = GenRandom32BitIndex(j, numInterestingDoubles, 1787); \ + __m128d m3 = \ + E2_Double(interesting_doubles, l * 2, numInterestingDoubles); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M256d_DoublePtr_I32x4_M256d_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingDoubles / 2; ++i) \ + for (int k = 0; k < 2; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingDoubles / 2; ++l) { \ + double* ptr = interesting_doubles; \ + __m128d tmp = \ + E1_Double(interesting_doubles, i * 2 + k, numInterestingDoubles); \ + __m256d m1 = _mm256_set_m128d(tmp, tmp); \ + __m128i m2 = GenRandom32BitIndex(j, numInterestingDoubles, 1787); \ + tmp = E2_Double(interesting_doubles, l * 2, numInterestingDoubles); \ + __m256d m3 = _mm256_set_m128d(tmp, tmp); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128d_DoublePtr_I64x2_M128d_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingDoubles / 2; ++i) \ + for (int k = 0; k < 2; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingDoubles / 2; ++l) { \ + double* ptr = interesting_doubles; \ + __m128d m1 = \ + E1_Double(interesting_doubles, i * 2 + k, numInterestingDoubles); \ + __m128i m2 = GenRandom64BitIndex(j, numInterestingDoubles, 1787); \ + __m128d m3 = \ + E2_Double(interesting_doubles, l * 2, numInterestingDoubles); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M256d_DoublePtr_I64x4_M256d_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingDoubles / 2; ++i) \ + for (int k = 0; k < 2; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingDoubles / 2; ++l) { \ + double* ptr = interesting_doubles; \ + __m128d tmp = \ + E1_Double(interesting_doubles, i * 2 + k, numInterestingDoubles); \ + __m256d m1 = _mm256_set_m128d(tmp, tmp); \ + __m128i tmp2 = GenRandom64BitIndex(j, numInterestingDoubles, 1787); \ + __m256i m2 = _mm256_set_m128i(tmp2, tmp2); \ + tmp = E2_Double(interesting_doubles, l * 2, numInterestingDoubles); \ + __m256d m3 = _mm256_set_m128d(tmp, tmp); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128_FloatPtr_I32x4_M128_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingFloats / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingFloats / 4; ++l) { \ + float* ptr = interesting_floats; \ + __m128 m1 = E1(interesting_floats, i * 4 + k, numInterestingFloats); \ + __m128i m2 = GenRandom32BitIndex(j, numInterestingFloats, 1787); \ + __m128 m3 = E2(interesting_floats, l * 4, numInterestingFloats); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M256_FloatPtr_I32x8_M256_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingFloats / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingFloats / 4; ++l) { \ + float* ptr = interesting_floats; \ + __m128 tmp = \ + E1(interesting_floats, i * 4 + k, numInterestingFloats); \ + __m256 m1 = _mm256_set_m128(tmp, tmp); \ + __m128i tmp2 = GenRandom32BitIndex(j, numInterestingFloats, 1787); \ + __m256i m2 = _mm256_set_m128i(tmp2, tmp2); \ + __m128 tmp3 = E2(interesting_floats, l * 4, numInterestingFloats); \ + __m256 m3 = _mm256_set_m128(tmp3, tmp3); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128_FloatPtr_I64x2_M128_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingFloats / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingFloats / 4; ++l) { \ + float* ptr = interesting_floats; \ + __m128 m1 = E1(interesting_floats, i * 4 + k, numInterestingFloats); \ + __m128i m2 = GenRandom64BitIndex(j, numInterestingFloats, 1787); \ + __m128 m3 = E2(interesting_floats, l * 4, numInterestingFloats); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128_FloatPtr_I64x4_M128_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingFloats / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingFloats / 4; ++l) { \ + float* ptr = interesting_floats; \ + __m128 m1 = E1(interesting_floats, i * 4 + k, numInterestingFloats); \ + __m128i tmp = GenRandom64BitIndex(j, numInterestingFloats, 1787); \ + __m256i m2 = _mm256_set_m128i(tmp, tmp); \ + __m128 m3 = E2(interesting_floats, l * 4, numInterestingFloats); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128i_Int32Ptr_I32x4_M128i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int32_t* ptr = (int32_t*)interesting_ints; \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m128i m2 = GenRandom32BitIndex(j, numInterestingInts, 1787); \ + __m128i m3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M256i_Int32Ptr_I32x8_M256i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int32_t* ptr = (int32_t*)interesting_ints; \ + __m128i tmp1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m256i m1 = _mm256_set_m128i(tmp1, tmp1); \ + __m128i tmp2 = GenRandom32BitIndex(j, numInterestingInts, 1787); \ + __m256i m2 = _mm256_set_m128i(tmp2, tmp2); \ + __m128i tmp3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + __m256i m3 = _mm256_set_m128i(tmp3, tmp3); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128i_Int32Ptr_I64x2_M128i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int32_t* ptr = (int32_t*)interesting_ints; \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m128i m2 = GenRandom64BitIndex(j, numInterestingInts, 1787); \ + __m128i m3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128i_Int32Ptr_I64x4_M128i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int32_t* ptr = (int32_t*)interesting_ints; \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m128i tmp = GenRandom64BitIndex(j, numInterestingInts, 1787); \ + __m256i m2 = _mm256_set_m128i(tmp, tmp); \ + __m128i m3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128i_Int64Ptr_I32x4_M128i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int64_t* ptr = (int64_t*)interesting_ints; \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m128i m2 = GenRandom32BitIndex(j, numInterestingInts / 2, 1787); \ + __m128i m3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M256i_Int64Ptr_I32x4_M256i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int64_t* ptr = (int64_t*)interesting_ints; \ + __m128i tmp1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m256i m1 = _mm256_set_m128i(tmp1, tmp1); \ + __m128i m2 = GenRandom32BitIndex(j, numInterestingInts / 2, 1787); \ + __m128i tmp3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + __m256i m3 = _mm256_set_m128i(tmp3, tmp3); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M128i_Int64Ptr_I64x2_M128i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int64_t* ptr = (int64_t*)interesting_ints; \ + __m128i m1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m128i m2 = GenRandom64BitIndex(j, numInterestingInts / 2, 1787); \ + __m128i m3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#define Ret_M256i_Int64Ptr_I64x4_M256i_Tint_body(Ret_type, func, Tint) \ + for (int i = 0; i < numInterestingInts / 4; ++i) \ + for (int k = 0; k < 4; ++k) \ + for (int j = 0; j < 100; ++j) \ + for (int l = 0; l < numInterestingInts / 4; ++l) { \ + int64_t* ptr = (int64_t*)interesting_ints; \ + __m128i tmp1 = \ + (__m128i)E1_Int(interesting_ints, i * 4 + k, numInterestingInts); \ + __m256i m1 = _mm256_set_m128i(tmp1, tmp1); \ + __m128i tmp2 = GenRandom64BitIndex(j, numInterestingInts / 2, 1787); \ + __m256i m2 = _mm256_set_m128i(tmp2, tmp2); \ + __m128i tmp3 = \ + (__m128i)E2_Int(interesting_ints, l * 4, numInterestingInts); \ + __m256i m3 = _mm256_set_m128i(tmp3, tmp3); \ + Ret_type ret = func(m1, ptr, m2, m3, Tint); \ + char str[256]; \ + tostr(&m1, str); \ + char str2[256]; \ + tostr(ptr, 4, str2); \ + char str3[256]; \ + tostr(&m2, str3); \ + char str4[256]; \ + tostr(&m3, str4); \ + char str5[256]; \ + tostr(&ret, str5); \ + printf("%s(%s, %s, %s, %s %d) = %s\n", \ + #func, \ + str, \ + str2, \ + str3, \ + str4, \ + Tint, \ + str5); \ + } + +#endif diff --git a/test/test_core.py b/test/test_core.py index f22252f32ce66..e50543d4ea3ef 100644 --- a/test/test_core.py +++ b/test/test_core.py @@ -6573,6 +6573,25 @@ def test_avx(self, args): self.maybe_closure() self.do_runf(src, native_result) + # Tests invoking the SIMD API via x86 AVX2 avx2intrin.h header (_mm_x()/_mm256_x() functions) + @wasm_simd + @requires_native_clang + @is_slow_test + @no_asan('local count too large') + @no_ubsan('local count too large') + @parameterized({ + '': ([],), + 'nontrapping': (['-mnontrapping-fptoint'],) + }) + def test_avx2(self, args): + src = test_file('sse/test_avx2.cpp') + self.run_process([shared.CLANG_CXX, src, '-mavx2', '-Wno-argument-outside-range', '-Wpedantic', '-o', 'test_avx2', '-D_CRT_SECURE_NO_WARNINGS=1'] + clang_native.get_clang_native_args(), stdout=PIPE) + native_result = self.run_process('./test_avx2', stdout=PIPE).stdout + + self.emcc_args += ['-I' + test_file('sse'), '-mavx2', '-Wno-argument-outside-range', '-sSTACK_SIZE=1MB'] + args + self.maybe_closure() + self.do_runf(src, native_result) + @wasm_simd def test_sse_diagnostics(self): self.emcc_args.remove('-Werror') From be686e61e04359479d3984b559cf953ad907a3b1 Mon Sep 17 00:00:00 2001 From: jiepan Date: Mon, 9 Dec 2024 14:52:54 +0800 Subject: [PATCH 2/6] Address review and fix test --- site/source/docs/porting/simd.rst | 4 +++- test/sse/test_avx2.cpp | 10 +++++++++- test/test_other.py | 7 +++++-- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/site/source/docs/porting/simd.rst b/site/source/docs/porting/simd.rst index e063e4a6ca28a..5257a94d1dea8 100644 --- a/site/source/docs/porting/simd.rst +++ b/site/source/docs/porting/simd.rst @@ -1227,7 +1227,9 @@ The following table highlights the availability and expected performance of diff * - _mm_i64gather_epi64 - ❌ scalarized -All the 128-bit wide instructions from AVX2 instruction set are listed. Only a small part of the 256-bit AVX2 instruction set are listed, most of the 256-bit wide AVX2 instructions are emulated by two 128-bit wide instructions. +All the 128-bit wide instructions from AVX2 instruction set are listed. +Only a small part of the 256-bit AVX2 instruction set are listed, most of the +256-bit wide AVX2 instructions are emulated by two 128-bit wide instructions. ====================================================== Compiling SIMD code targeting ARM NEON instruction set diff --git a/test/sse/test_avx2.cpp b/test/sse/test_avx2.cpp index 586c1dcd55f76..3fd4682336730 100644 --- a/test/sse/test_avx2.cpp +++ b/test/sse/test_avx2.cpp @@ -262,7 +262,8 @@ void test_statisticsa(void) { Ret_M256i_M256i(__m256i, _mm256_avg_epu8); } -void test_shift(void) { +// Split test_shift into two functions to reduce memory consumption +void test_shift1(void) { Ret_M256i_Tint(__m256i, _mm256_slli_si256); Ret_M256i_Tint(__m256i, _mm256_bslli_epi128); @@ -277,7 +278,9 @@ void test_shift(void) { Ret_M256i_Tint(__m256i, _mm256_srai_epi32); Ret_M256i_M128i(__m256i, _mm256_sra_epi16); Ret_M256i_M128i(__m256i, _mm256_sra_epi32); +} +void test_shift2(void) { Ret_M256i_Tint(__m256i, _mm256_srli_si256); Ret_M256i_Tint(__m256i, _mm256_bsrli_epi128); @@ -302,6 +305,11 @@ void test_shift(void) { Ret_M256i_M256i(__m256i, _mm256_srlv_epi64); } +void test_shift(void) { + test_shift1(); + test_shift2(); +} + int main() { assert(numInterestingFloats % 8 == 0); assert(numInterestingInts % 8 == 0); diff --git a/test/test_other.py b/test/test_other.py index 680cdaf4c1b5a..8c3da1144fc65 100644 --- a/test/test_other.py +++ b/test/test_other.py @@ -9527,7 +9527,8 @@ def test_standalone_system_headers(self): print('header: ' + header) # These headers cannot be included in isolation. # e.g: error: unknown type name 'EGLDisplay' - if header in ['eglext.h', 'SDL_config_macosx.h', 'glext.h', 'gl2ext.h']: + # Don't include avxintrin.h and avx2inrin.h directly, include immintrin.h instead + if header in ['eglext.h', 'SDL_config_macosx.h', 'glext.h', 'gl2ext.h', 'avxintrin.h', 'avx2intrin.h']: continue # These headers are C++ only and cannot be included from C code. # But we still want to check they can be included on there own without @@ -9541,7 +9542,9 @@ def test_standalone_system_headers(self): if directory and directory != 'compat': header = f'{directory}/{header}' inc = f'#include <{header}>\n__attribute__((weak)) int foo;\n' - cflags = ['-Werror', '-Wall', '-pedantic', '-mavx', '-msimd128', '-msse3'] + cflags = ['-Werror', '-Wall', '-pedantic', '-msimd128', '-msse4'] + if header == 'immintrin.h': + cflags.append('-mavx2') if cxx_only: create_file('a.cxx', inc) create_file('b.cxx', inc) From 50a767e05a8be65c8e5e06a4d693fb528ce88ffb Mon Sep 17 00:00:00 2001 From: jiepan Date: Mon, 9 Dec 2024 16:51:58 +0800 Subject: [PATCH 3/6] Split test case to reduce memory consumption --- test/sse/test_avx2.cpp | 35 ++++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/test/sse/test_avx2.cpp b/test/sse/test_avx2.cpp index 3fd4682336730..c84161e31270d 100644 --- a/test/sse/test_avx2.cpp +++ b/test/sse/test_avx2.cpp @@ -102,7 +102,8 @@ void test_logical(void) { Ret_M256i_M256i(__m256i, _mm256_xor_si256); } -void test_swizzle(void) { +// Split test_swizzle to reduce memory consumption +void test_swizzle1(void) { Ret_M256i_M256i_M256i(__m256i, _mm256_blendv_epi8); Ret_M256i_M256i_Tint(__m256i, _mm256_blend_epi16); @@ -110,7 +111,9 @@ void test_swizzle(void) { Ret_M256i_Tint(__m256i, _mm256_shuffle_epi32); Ret_M256i_Tint(__m256i, _mm256_shufflehi_epi16); Ret_M256i_Tint(__m256i, _mm256_shufflelo_epi16); +} +void test_swizzle2(void) { Ret_M256i_M256i(__m256i, _mm256_unpackhi_epi8); Ret_M256i_M256i(__m256i, _mm256_unpackhi_epi16); Ret_M256i_M256i(__m256i, _mm256_unpackhi_epi32); @@ -143,6 +146,9 @@ void test_swizzle(void) { Ret_M256i_M256i(__m256i, _mm256_permutevar8x32_epi32); Ret_M256_M256i(__m256, _mm256_permutevar8x32_ps); Ret_M256i_Tint(__m256i, _mm256_permute4x64_epi64); +} + +void test_swizzle3(void) { Ret_M256d_Tint(__m256d, _mm256_permute4x64_pd); Ret_M256i_M256i_Tint(__m256i, _mm256_permute2x128_si256); @@ -150,6 +156,12 @@ void test_swizzle(void) { Ret_M256i_M128i_Tint(__m256i, _mm256_inserti128_si256); } +void test_swizzle(void) { + test_swizzle1(); + test_swizzle2(); + test_swizzle3(); +} + void test_convert(void) { Ret_M128i(__m256i, _mm256_cvtepi8_epi16); Ret_M128i(__m256i, _mm256_cvtepi8_epi32); @@ -191,7 +203,8 @@ void test_misc(void) { Ret_M256i(int, _mm256_movemask_epi8); } -void test_load(void) { +// Split test_load to reduce memory consumption +void test_load1(void) { Ret_IntPtr(__m256i, _mm256_stream_load_si256, __m256i*, 8, 8); Ret_IntPtr_M128i(__m128i, _mm_maskload_epi32, int32_t*, 4, 4); @@ -228,7 +241,9 @@ void test_load(void) { __m128i, _mm_mask_i64gather_epi64, 8); Ret_M256i_Int64Ptr_I64x4_M256i_Tint_body( __m256i, _mm256_mask_i64gather_epi64, 8); +} +void test_load2(void) { Ret_DoublePtr_I32x4_Tint_body(__m128d, _mm_i32gather_pd, 8); Ret_DoublePtr_I32x4_Tint_body(__m256d, _mm256_i32gather_pd, 8); Ret_DoublePtr_I64x2_Tint_body(__m128d, _mm_i64gather_pd, 8); @@ -250,6 +265,11 @@ void test_load(void) { Ret_Int64Ptr_I64x4_Tint_body(__m256i, _mm256_i64gather_epi64, 8); } +void test_load(void) { + test_load1(); + test_load2(); +} + void test_store(void) { void_OutIntPtr_M128i_M128i(_mm_maskstore_epi32, int*, 16, 4); void_OutIntPtr_M256i_M256i(_mm256_maskstore_epi32, int*, 32, 4); @@ -262,13 +282,16 @@ void test_statisticsa(void) { Ret_M256i_M256i(__m256i, _mm256_avg_epu8); } -// Split test_shift into two functions to reduce memory consumption +// Split test_shift to reduce memory consumption void test_shift1(void) { Ret_M256i_Tint(__m256i, _mm256_slli_si256); Ret_M256i_Tint(__m256i, _mm256_bslli_epi128); Ret_M256i_Tint(__m256i, _mm256_slli_epi16); Ret_M256i_Tint(__m256i, _mm256_slli_epi32); +} + +void test_shift2(void) { Ret_M256i_Tint(__m256i, _mm256_slli_epi64); Ret_M256i_M128i(__m256i, _mm256_sll_epi16); Ret_M256i_M128i(__m256i, _mm256_sll_epi32); @@ -278,10 +301,11 @@ void test_shift1(void) { Ret_M256i_Tint(__m256i, _mm256_srai_epi32); Ret_M256i_M128i(__m256i, _mm256_sra_epi16); Ret_M256i_M128i(__m256i, _mm256_sra_epi32); -} -void test_shift2(void) { Ret_M256i_Tint(__m256i, _mm256_srli_si256); +} + +void test_shift3(void) { Ret_M256i_Tint(__m256i, _mm256_bsrli_epi128); Ret_M256i_Tint(__m256i, _mm256_srli_epi16); @@ -308,6 +332,7 @@ void test_shift2(void) { void test_shift(void) { test_shift1(); test_shift2(); + test_shift3(); } int main() { From a6d1270f5526274e7d4274997b1181e8e2b3066a Mon Sep 17 00:00:00 2001 From: jiepan Date: Mon, 9 Dec 2024 21:35:02 +0800 Subject: [PATCH 4/6] Update ChangeLog --- ChangeLog.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ChangeLog.md b/ChangeLog.md index 67160174211a6..66c3ec511fe82 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -20,6 +20,9 @@ See docs/process.md for more on how version tagging works. 4.0.0 (in development) ---------------------- +- Added support for compiling AVX2 intrinsics, 256-bit wide intrinsic is emulated + on top of 128-bit Wasm SIMD instruction set. (#23035). Pass `-msimd128 -mavx2` + to enable targeting AVX2. - compiler-rt, libcxx, libcxxabi, and libunwind were updated to LLVM 19.1.6. (#22937, #22994, and #23294) - The default Safari version targeted by Emscripten has been raised from 14.1 From b12f35bff521ed28952d11515845937d1762c9ab Mon Sep 17 00:00:00 2001 From: jiepan Date: Thu, 9 Jan 2025 17:20:28 +0800 Subject: [PATCH 5/6] Fix build error --- system/include/compat/avx2intrin.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/system/include/compat/avx2intrin.h b/system/include/compat/avx2intrin.h index 80834dbce01de..e4bdde6c05e9e 100644 --- a/system/include/compat/avx2intrin.h +++ b/system/include/compat/avx2intrin.h @@ -1051,9 +1051,9 @@ static __inline__ __m256i index[i + 4] = ((__i32x4)__b.v1)[i] & 7; } - for (int i = 0; i < 8; i++) { - lane[i] = index[i] < 4 ? ((__i32x4)(__a.v0))[index[i]] - : ((__i32x4)(__a.v1))[index[i] - 4]; + for (int j = 0; j < 8; j++) { + lane[j] = index[j] < 4 ? ((__i32x4)(__a.v0))[index[j]] + : ((__i32x4)(__a.v1))[index[j] - 4]; } ret.v0 = (__m128i)wasm_i32x4_make(lane[0], lane[1], lane[2], lane[3]); @@ -1081,9 +1081,9 @@ static __inline__ __m256 index[i] = ((__i32x4)__b.v0)[i] & 7; index[i + 4] = ((__i32x4)__b.v1)[i] & 7; } - for (int i = 0; i < 8; i++) { - lane[i] = index[i] < 4 ? ((__f32x4)(__a.v0))[index[i]] - : ((__f32x4)(__a.v1))[index[i] - 4]; + for (int j = 0; j < 8; j++) { + lane[j] = index[j] < 4 ? ((__f32x4)(__a.v0))[index[j]] + : ((__f32x4)(__a.v1))[index[j] - 4]; } ret.v0 = (__m128)wasm_f32x4_make(lane[0], lane[1], lane[2], lane[3]); ret.v1 = (__m128)wasm_f32x4_make(lane[4], lane[5], lane[6], lane[7]); From 50f888531697f61c5969d6b8f76a20e35ae66033 Mon Sep 17 00:00:00 2001 From: jiepan Date: Fri, 10 Jan 2025 10:41:13 +0800 Subject: [PATCH 6/6] Change _mm_maskload_epi32/_mm_maskload_epi64 implementation --- site/source/docs/porting/simd.rst | 4 ++-- system/include/compat/avx2intrin.h | 26 ++++++++++++-------------- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/site/source/docs/porting/simd.rst b/site/source/docs/porting/simd.rst index 5257a94d1dea8..6409c0811b024 100644 --- a/site/source/docs/porting/simd.rst +++ b/site/source/docs/porting/simd.rst @@ -1177,9 +1177,9 @@ The following table highlights the availability and expected performance of diff * - _mm256_permute4x64_epi64 - 💡 emulated with two general shuffle * - _mm_maskload_epi32 - - ⚠️ emulated with SIMD load+shift+and + - ❌ scalarized * - _mm_maskload_epi64 - - ⚠️ emulated with SIMD load+shift+and + - ❌ scalarized * - _mm_maskstore_epi32 - ❌ scalarized * - _mm_maskstore_epi64 diff --git a/system/include/compat/avx2intrin.h b/system/include/compat/avx2intrin.h index e4bdde6c05e9e..072a5f74c902c 100644 --- a/system/include/compat/avx2intrin.h +++ b/system/include/compat/avx2intrin.h @@ -1131,24 +1131,22 @@ _mm256_inserti128_si256(__m256i __a, __m128i __b, const int imm8) { static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) _mm_maskload_epi32(int32_t const* __p, __m128i __m) { - // This may cause an out-of-bounds memory load since we first load and - // then mask, but since there are no segmentation faults in Wasm memory - // accesses, that is ok (as long as we are within the heap bounds - - // a negligible limitation in practice) - // TODO, loadu or load, 128-bit align? - return _mm_and_si128(_mm_load_si128((const __m128i*)__p), - _mm_srai_epi32(__m, 31)); + int32_t lane[4]; + for (size_t i = 0; i < 4; i++) { + uint32_t mask = ((__i32x4)__m)[i]; + lane[i] = ((mask >> 31) & 0x1) ? __p[i] : 0; + } + return (__m128i)wasm_i32x4_make(lane[0], lane[1], lane[2], lane[3]); } static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) _mm_maskload_epi64(int64_t const* __p, __m128i __m) { - // This may cause an out-of-bounds memory load since we first load and - // then mask, but since there are no segmentation faults in Wasm memory - // accesses, that is ok (as long as we are within the heap bounds - - // a negligible limitation in practice) - // TODO, loadu or load, 128-bit align? - return _mm_and_si128(_mm_load_si128((const __m128i*)__p), - wasm_i64x2_shr(__m, 63)); + int64_t lane[2]; + for (size_t i = 0; i < 2; i++) { + uint64_t mask = ((__i64x2)__m)[i]; + lane[i] = ((mask >> 63) & 0x1) ? __p[i] : 0; + } + return (__m128i)wasm_i64x2_make(lane[0], lane[1]); } static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))