@@ -141,16 +141,6 @@ unsafe extern "unadjusted" {
141141    fn  llvm_f64x2_max ( x :  simd:: f64x2 ,  y :  simd:: f64x2 )  -> simd:: f64x2 ; 
142142} 
143143
144- #[ repr( C ,  packed) ]  
145- #[ derive( Copy ) ]  
146- struct  Unaligned < T > ( T ) ; 
147- 
148- impl < T :  Copy >  Clone  for  Unaligned < T >  { 
149-     fn  clone ( & self )  -> Unaligned < T >  { 
150-         * self 
151-     } 
152- } 
153- 
154144/// Loads a `v128` vector from the given heap address. 
155145/// 
156146/// This intrinsic will emit a load with an alignment of 1. While this is 
@@ -179,7 +169,7 @@ impl<T: Copy> Clone for Unaligned<T> {
179169#[ doc( alias( "v128.load" ) ) ]  
180170#[ stable( feature = "wasm_simd" ,  since = "1.54.0" ) ]  
181171pub  unsafe  fn  v128_load ( m :  * const  v128 )  -> v128  { 
182-     ( * ( m  as   * const   Unaligned < v128 > ) ) . 0 
172+     m . read_unaligned ( ) 
183173} 
184174
185175/// Load eight 8-bit integers and sign extend each one to a 16-bit lane 
@@ -196,8 +186,8 @@ pub unsafe fn v128_load(m: *const v128) -> v128 {
196186#[ doc( alias( "v128.load8x8_s" ) ) ]  
197187#[ stable( feature = "wasm_simd" ,  since = "1.54.0" ) ]  
198188pub  unsafe  fn  i16x8_load_extend_i8x8 ( m :  * const  i8 )  -> v128  { 
199-     let  m = * ( m  as   * const   Unaligned < simd:: i8x8 > ) ; 
200-     simd_cast :: < _ ,  simd:: i16x8 > ( m. 0 ) . v128 ( ) 
189+     let  m = m . cast :: < simd:: i8x8 > ( ) . read_unaligned ( ) ; 
190+     simd_cast :: < _ ,  simd:: i16x8 > ( m) . v128 ( ) 
201191} 
202192
203193/// Load eight 8-bit integers and zero extend each one to a 16-bit lane 
@@ -214,8 +204,8 @@ pub unsafe fn i16x8_load_extend_i8x8(m: *const i8) -> v128 {
214204#[ doc( alias( "v128.load8x8_u" ) ) ]  
215205#[ stable( feature = "wasm_simd" ,  since = "1.54.0" ) ]  
216206pub  unsafe  fn  i16x8_load_extend_u8x8 ( m :  * const  u8 )  -> v128  { 
217-     let  m = * ( m  as   * const   Unaligned < simd:: u8x8 > ) ; 
218-     simd_cast :: < _ ,  simd:: u16x8 > ( m. 0 ) . v128 ( ) 
207+     let  m = m . cast :: < simd:: u8x8 > ( ) . read_unaligned ( ) ; 
208+     simd_cast :: < _ ,  simd:: u16x8 > ( m) . v128 ( ) 
219209} 
220210
221211#[ stable( feature = "wasm_simd" ,  since = "1.54.0" ) ]  
@@ -235,8 +225,8 @@ pub use i16x8_load_extend_u8x8 as u16x8_load_extend_u8x8;
235225#[ doc( alias( "v128.load16x4_s" ) ) ]  
236226#[ stable( feature = "wasm_simd" ,  since = "1.54.0" ) ]  
237227pub  unsafe  fn  i32x4_load_extend_i16x4 ( m :  * const  i16 )  -> v128  { 
238-     let  m = * ( m  as   * const   Unaligned < simd:: i16x4 > ) ; 
239-     simd_cast :: < _ ,  simd:: i32x4 > ( m. 0 ) . v128 ( ) 
228+     let  m = m . cast :: < simd:: i16x4 > ( ) . read_unaligned ( ) ; 
229+     simd_cast :: < _ ,  simd:: i32x4 > ( m) . v128 ( ) 
240230} 
241231
242232/// Load four 16-bit integers and zero extend each one to a 32-bit lane 
@@ -253,8 +243,8 @@ pub unsafe fn i32x4_load_extend_i16x4(m: *const i16) -> v128 {
253243#[ doc( alias( "v128.load16x4_u" ) ) ]  
254244#[ stable( feature = "wasm_simd" ,  since = "1.54.0" ) ]  
255245pub  unsafe  fn  i32x4_load_extend_u16x4 ( m :  * const  u16 )  -> v128  { 
256-     let  m = * ( m  as   * const   Unaligned < simd:: u16x4 > ) ; 
257-     simd_cast :: < _ ,  simd:: u32x4 > ( m. 0 ) . v128 ( ) 
246+     let  m = m . cast :: < simd:: u16x4 > ( ) . read_unaligned ( ) ; 
247+     simd_cast :: < _ ,  simd:: u32x4 > ( m) . v128 ( ) 
258248} 
259249
260250#[ stable( feature = "wasm_simd" ,  since = "1.54.0" ) ]  
@@ -274,8 +264,8 @@ pub use i32x4_load_extend_u16x4 as u32x4_load_extend_u16x4;
274264#[ doc( alias( "v128.load32x2_s" ) ) ]  
275265#[ stable( feature = "wasm_simd" ,  since = "1.54.0" ) ]  
276266pub  unsafe  fn  i64x2_load_extend_i32x2 ( m :  * const  i32 )  -> v128  { 
277-     let  m = * ( m  as   * const   Unaligned < simd:: i32x2 > ) ; 
278-     simd_cast :: < _ ,  simd:: i64x2 > ( m. 0 ) . v128 ( ) 
267+     let  m = m . cast :: < simd:: i32x2 > ( ) . read_unaligned ( ) ; 
268+     simd_cast :: < _ ,  simd:: i64x2 > ( m) . v128 ( ) 
279269} 
280270
281271/// Load two 32-bit integers and zero extend each one to a 64-bit lane 
@@ -292,8 +282,8 @@ pub unsafe fn i64x2_load_extend_i32x2(m: *const i32) -> v128 {
292282#[ doc( alias( "v128.load32x2_u" ) ) ]  
293283#[ stable( feature = "wasm_simd" ,  since = "1.54.0" ) ]  
294284pub  unsafe  fn  i64x2_load_extend_u32x2 ( m :  * const  u32 )  -> v128  { 
295-     let  m = * ( m  as   * const   Unaligned < simd:: u32x2 > ) ; 
296-     simd_cast :: < _ ,  simd:: u64x2 > ( m. 0 ) . v128 ( ) 
285+     let  m = m . cast :: < simd:: u32x2 > ( ) . read_unaligned ( ) ; 
286+     simd_cast :: < _ ,  simd:: u64x2 > ( m) . v128 ( ) 
297287} 
298288
299289#[ stable( feature = "wasm_simd" ,  since = "1.54.0" ) ]  
@@ -453,7 +443,7 @@ pub unsafe fn v128_load64_zero(m: *const u64) -> v128 {
453443#[ doc( alias( "v128.store" ) ) ]  
454444#[ stable( feature = "wasm_simd" ,  since = "1.54.0" ) ]  
455445pub  unsafe  fn  v128_store ( m :  * mut  v128 ,  a :  v128 )  { 
456-     * ( m  as   * mut   Unaligned < v128 > )  =  Unaligned ( a) ; 
446+     m . write_unaligned ( a) 
457447} 
458448
459449/// Loads an 8-bit value from `m` and sets lane `L` of `v` to that value. 
0 commit comments