@@ -56,7 +56,7 @@ unsafe fn load_aligned_partial(src: *const usize, load_sz: usize) -> usize {
5656 let chunk_sz = core:: mem:: size_of:: <$ty>( ) ;
5757 if ( load_sz & chunk_sz) != 0 {
5858 // Since we are doing the large reads first, this must still be aligned to `chunk_sz`.
59- * ( & raw mut out) . byte_add ( i) . cast:: <$ty>( ) = * src. byte_add ( i) . cast:: <$ty>( ) ;
59+ * ( & raw mut out) . wrapping_byte_add ( i) . cast:: <$ty>( ) = * src. wrapping_byte_add ( i) . cast:: <$ty>( ) ;
6060 i |= chunk_sz;
6161 }
6262 ) +} ;
@@ -69,9 +69,9 @@ unsafe fn load_aligned_partial(src: *const usize, load_sz: usize) -> usize {
6969 out
7070}
7171
72- /// Load `load_sz` many bytes from `src.byte_add (WORD_SIZE - load_sz)`. `src` must be `usize`-aligned.
73- /// The bytes are returned as the *last* bytes of the return value, i.e., this acts as if we had done
74- /// a `usize` read from `src`, with the out-of-bounds part filled with 0s.
72+ /// Load `load_sz` many bytes from `src.wrapping_byte_add (WORD_SIZE - load_sz)`. `src` must be
73+ /// `usize`-aligned. The bytes are returned as the *last* bytes of the return value, i.e., this acts
74+ /// as if we had done a `usize` read from `src`, with the out-of-bounds part filled with 0s.
7575/// `load_sz` be strictly less than `WORD_SIZE`.
7676#[ cfg( not( feature = "mem-unaligned" ) ) ]
7777#[ inline( always) ]
@@ -87,7 +87,7 @@ unsafe fn load_aligned_end_partial(src: *const usize, load_sz: usize) -> usize {
8787 if ( load_sz & chunk_sz) != 0 {
8888 // Since we are doing the small reads first, `start_shift + i` has in the mean
8989 // time become aligned to `chunk_sz`.
90- * ( & raw mut out) . byte_add ( start_shift + i) . cast:: <$ty>( ) = * src. byte_add ( start_shift + i) . cast:: <$ty>( ) ;
90+ * ( & raw mut out) . wrapping_byte_add ( start_shift + i) . cast:: <$ty>( ) = * src. wrapping_byte_add ( start_shift + i) . cast:: <$ty>( ) ;
9191 i |= chunk_sz;
9292 }
9393 ) +} ;
@@ -142,7 +142,7 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize)
142142 let shift = offset * 8 ;
143143
144144 // Realign src
145- let mut src_aligned = src. byte_sub ( offset) as * mut usize ;
145+ let mut src_aligned = src. wrapping_byte_sub ( offset) as * mut usize ;
146146 let mut prev_word = load_aligned_end_partial ( src_aligned, WORD_SIZE - offset) ;
147147
148148 while dest_usize. wrapping_add ( 1 ) < dest_end {
@@ -255,7 +255,7 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
255255 let shift = offset * 8 ;
256256
257257 // Realign src
258- let mut src_aligned = src. byte_sub ( offset) as * mut usize ;
258+ let mut src_aligned = src. wrapping_byte_sub ( offset) as * mut usize ;
259259 let mut prev_word = load_aligned_partial ( src_aligned, offset) ;
260260
261261 while dest_start. wrapping_add ( 1 ) < dest_usize {
0 commit comments