@@ -408,10 +408,10 @@ inline float XM_CALLCONV XMVectorGetByIndex(FXMVECTOR V, size_t i)
408408 _Analysis_assume_ ( i < 4 );
409409#if defined(_XM_NO_INTRINSICS_)
410410 return V.vector4_f32 [i];
411- #elif defined(_XM_ARM_NEON_INTRINSICS_)
412- return V. n128_f32 [i] ;
413- # elif defined(_XM_SSE_INTRINSICS_)
414- return V. m128_f32 [i];
411+ #else
412+ XMVECTORF32 U ;
413+ U. v = V;
414+ return U. f [i];
415415#endif
416416}
417417
@@ -478,10 +478,10 @@ inline void XM_CALLCONV XMVectorGetByIndexPtr(float *f, FXMVECTOR V, size_t i)
478478 _Analysis_assume_ ( i < 4 );
479479#if defined(_XM_NO_INTRINSICS_)
480480 *f = V.vector4_f32 [i];
481- #elif defined(_XM_ARM_NEON_INTRINSICS_)
482- *f = V. n128_f32 [i] ;
483- # elif defined(_XM_SSE_INTRINSICS_)
484- *f = V. m128_f32 [i];
481+ #else
482+ XMVECTORF32 U ;
483+ U. v = V;
484+ *f = U. f [i];
485485#endif
486486}
487487
@@ -562,10 +562,10 @@ inline uint32_t XM_CALLCONV XMVectorGetIntByIndex(FXMVECTOR V, size_t i)
562562 _Analysis_assume_ ( i < 4 );
563563#if defined(_XM_NO_INTRINSICS_)
564564 return V.vector4_u32 [i];
565- #elif defined(_XM_ARM_NEON_INTRINSICS_)
566- return V. n128_u32 [i] ;
567- # elif defined(_XM_SSE_INTRINSICS_)
568- return V. m128_u32 [i];
565+ #else
566+ XMVECTORU32 U ;
567+ U. v = V;
568+ return U. u [i];
569569#endif
570570}
571571
@@ -642,10 +642,10 @@ inline void XM_CALLCONV XMVectorGetIntByIndexPtr(uint32_t *x, FXMVECTOR V, size_
642642 _Analysis_assume_ ( i < 4 );
643643#if defined(_XM_NO_INTRINSICS_)
644644 *x = V.vector4_u32 [i];
645- #elif defined(_XM_ARM_NEON_INTRINSICS_)
646- *x = V. n128_u32 [i] ;
647- # elif defined(_XM_SSE_INTRINSICS_)
648- *x = V. m128_u32 [i];
645+ #else
646+ XMVECTORU32 U ;
647+ U. v = V;
648+ *x = U. u [i];
649649#endif
650650}
651651
@@ -731,14 +731,11 @@ inline XMVECTOR XM_CALLCONV XMVectorSetByIndex(FXMVECTOR V, float f, size_t i)
731731 U = V;
732732 U.vector4_f32 [i] = f;
733733 return U;
734- #elif defined(_XM_ARM_NEON_INTRINSICS_)
735- XMVECTOR U = V;
736- U.n128_f32 [i] = f;
737- return U;
738- #elif defined(_XM_SSE_INTRINSICS_)
739- XMVECTOR U = V;
740- U.m128_f32 [i] = f;
741- return U;
734+ #else
735+ XMVECTORF32 U;
736+ U.v = V;
737+ U.f [i] = f;
738+ return U.v ;
742739#endif
743740}
744741
@@ -863,14 +860,11 @@ inline XMVECTOR XM_CALLCONV XMVectorSetByIndexPtr(FXMVECTOR V, const float *f, s
863860 U = V;
864861 U.vector4_f32 [i] = *f;
865862 return U;
866- #elif defined(_XM_ARM_NEON_INTRINSICS_)
867- XMVECTOR U = V;
868- U.n128_f32 [i] = *f;
869- return U;
870- #elif defined(_XM_SSE_INTRINSICS_)
871- XMVECTOR U = V;
872- U.m128_f32 [i] = *f;
873- return U;
863+ #else
864+ XMVECTORF32 U;
865+ U.v = V;
866+ U.f [i] = *f;
867+ return U.v ;
874868#endif
875869}
876870
@@ -990,12 +984,7 @@ inline XMVECTOR XM_CALLCONV XMVectorSetIntByIndex(FXMVECTOR V, uint32_t x, size_
990984 U = V;
991985 U.vector4_u32 [i] = x;
992986 return U;
993- #elif defined(_XM_ARM_NEON_INTRINSICS_)
994- XMVECTORU32 tmp;
995- tmp.v = V;
996- tmp.u [i] = x;
997- return tmp;
998- #elif defined(_XM_SSE_INTRINSICS_)
987+ #else
999988 XMVECTORU32 tmp;
1000989 tmp.v = V;
1001990 tmp.u [i] = x;
@@ -1125,12 +1114,7 @@ inline XMVECTOR XM_CALLCONV XMVectorSetIntByIndexPtr(FXMVECTOR V, const uint32_t
11251114 U = V;
11261115 U.vector4_u32 [i] = *x;
11271116 return U;
1128- #elif defined(_XM_ARM_NEON_INTRINSICS_)
1129- XMVECTORU32 tmp;
1130- tmp.v = V;
1131- tmp.u [i] = *x;
1132- return tmp;
1133- #elif defined(_XM_SSE_INTRINSICS_)
1117+ #else
11341118 XMVECTORU32 tmp;
11351119 tmp.v = V;
11361120 tmp.u [i] = *x;
0 commit comments