@@ -38,6 +38,8 @@ use std::usize;
38
38
39
39
use crate :: { ByteValued , Bytes } ;
40
40
41
+ use copy_slice_impl:: copy_slice;
42
+
41
43
/// `VolatileMemory` related errors.
42
44
#[ allow( missing_docs) ]
43
45
#[ derive( Debug ) ]
@@ -463,57 +465,6 @@ impl<'a> VolatileSlice<'a> {
463
465
}
464
466
}
465
467
466
- // Return the largest value that `addr` is aligned to. Forcing this function to return 1 will
467
- // cause test_non_atomic_access to fail.
468
- fn alignment ( addr : usize ) -> usize {
469
- // Rust is silly and does not let me write addr & -addr.
470
- addr & ( !addr + 1 )
471
- }
472
-
473
- // Has the same safety requirements as `read_volatile` + `write_volatile`, namely:
474
- // - `src_addr` and `dst_addr` must be valid for reads/writes.
475
- // - `src_addr` and `dst_addr` must be properly aligned with respect to `align`.
476
- // - `src_addr` must point to a properly initialized value, which is true here because
477
- // we're only using integer primitives.
478
- unsafe fn copy_single ( align : usize , src_addr : usize , dst_addr : usize ) {
479
- match align {
480
- 8 => write_volatile ( dst_addr as * mut u64 , read_volatile ( src_addr as * const u64 ) ) ,
481
- 4 => write_volatile ( dst_addr as * mut u32 , read_volatile ( src_addr as * const u32 ) ) ,
482
- 2 => write_volatile ( dst_addr as * mut u16 , read_volatile ( src_addr as * const u16 ) ) ,
483
- 1 => write_volatile ( dst_addr as * mut u8 , read_volatile ( src_addr as * const u8 ) ) ,
484
- _ => unreachable ! ( ) ,
485
- }
486
- }
487
-
488
- fn copy_slice ( dst : & mut [ u8 ] , src : & [ u8 ] ) -> usize {
489
- let total = min ( src. len ( ) , dst. len ( ) ) ;
490
- let mut left = total;
491
-
492
- let mut src_addr = src. as_ptr ( ) as usize ;
493
- let mut dst_addr = dst. as_ptr ( ) as usize ;
494
- let align = min ( alignment ( src_addr) , alignment ( dst_addr) ) ;
495
-
496
- let mut copy_aligned_slice = |min_align| {
497
- while align >= min_align && left >= min_align {
498
- // Safe because we check alignment beforehand, the memory areas are valid for
499
- // reads/writes, and the source always contains a valid value.
500
- unsafe { copy_single ( min_align, src_addr, dst_addr) } ;
501
- src_addr += min_align;
502
- dst_addr += min_align;
503
- left -= min_align;
504
- }
505
- } ;
506
-
507
- if size_of :: < usize > ( ) > 4 {
508
- copy_aligned_slice ( 8 ) ;
509
- }
510
- copy_aligned_slice ( 4 ) ;
511
- copy_aligned_slice ( 2 ) ;
512
- copy_aligned_slice ( 1 ) ;
513
-
514
- total
515
- }
516
-
517
468
impl Bytes < usize > for VolatileSlice < ' _ > {
518
469
type E = Error ;
519
470
@@ -1081,6 +1032,61 @@ impl<'a> From<VolatileSlice<'a>> for VolatileArrayRef<'a, u8> {
1081
1032
}
1082
1033
}
1083
1034
1035
+ // Return the largest value that `addr` is aligned to. Forcing this function to return 1 will
1036
+ // cause test_non_atomic_access to fail.
1037
+ fn alignment ( addr : usize ) -> usize {
1038
+ // Rust is silly and does not let me write addr & -addr.
1039
+ addr & ( !addr + 1 )
1040
+ }
1041
+
1042
+ mod copy_slice_impl {
1043
+ use super :: * ;
1044
+
1045
+ // Has the same safety requirements as `read_volatile` + `write_volatile`, namely:
1046
+ // - `src_addr` and `dst_addr` must be valid for reads/writes.
1047
+ // - `src_addr` and `dst_addr` must be properly aligned with respect to `align`.
1048
+ // - `src_addr` must point to a properly initialized value, which is true here because
1049
+ // we're only using integer primitives.
1050
+ unsafe fn copy_single ( align : usize , src_addr : usize , dst_addr : usize ) {
1051
+ match align {
1052
+ 8 => write_volatile ( dst_addr as * mut u64 , read_volatile ( src_addr as * const u64 ) ) ,
1053
+ 4 => write_volatile ( dst_addr as * mut u32 , read_volatile ( src_addr as * const u32 ) ) ,
1054
+ 2 => write_volatile ( dst_addr as * mut u16 , read_volatile ( src_addr as * const u16 ) ) ,
1055
+ 1 => write_volatile ( dst_addr as * mut u8 , read_volatile ( src_addr as * const u8 ) ) ,
1056
+ _ => unreachable ! ( ) ,
1057
+ }
1058
+ }
1059
+
1060
+ pub ( super ) fn copy_slice ( dst : & mut [ u8 ] , src : & [ u8 ] ) -> usize {
1061
+ let total = min ( src. len ( ) , dst. len ( ) ) ;
1062
+ let mut left = total;
1063
+
1064
+ let mut src_addr = src. as_ptr ( ) as usize ;
1065
+ let mut dst_addr = dst. as_ptr ( ) as usize ;
1066
+ let align = min ( alignment ( src_addr) , alignment ( dst_addr) ) ;
1067
+
1068
+ let mut copy_aligned_slice = |min_align| {
1069
+ while align >= min_align && left >= min_align {
1070
+ // Safe because we check alignment beforehand, the memory areas are valid for
1071
+ // reads/writes, and the source always contains a valid value.
1072
+ unsafe { copy_single ( min_align, src_addr, dst_addr) } ;
1073
+ src_addr += min_align;
1074
+ dst_addr += min_align;
1075
+ left -= min_align;
1076
+ }
1077
+ } ;
1078
+
1079
+ if size_of :: < usize > ( ) > 4 {
1080
+ copy_aligned_slice ( 8 ) ;
1081
+ }
1082
+ copy_aligned_slice ( 4 ) ;
1083
+ copy_aligned_slice ( 2 ) ;
1084
+ copy_aligned_slice ( 1 ) ;
1085
+
1086
+ total
1087
+ }
1088
+ }
1089
+
1084
1090
#[ cfg( test) ]
1085
1091
mod tests {
1086
1092
use super :: * ;
0 commit comments