1+ use core:: alloc:: Layout ;
2+ use core:: mem:: ManuallyDrop ;
3+ use core:: ptr:: { addr_of, addr_of_mut} ;
4+ use core:: slice;
5+
16use super :: { BlockDevice , BLOCK_SZ } ;
7+ use alloc:: boxed:: Box ;
28use alloc:: collections:: VecDeque ;
39use alloc:: sync:: Arc ;
4- use alloc:: vec;
5- use alloc:: vec:: Vec ;
610use lazy_static:: * ;
711use spin:: Mutex ;
812
13+ /// Use `ManuallyDrop` to ensure data is deallocated with an alignment of `BLOCK_SZ`
14+ struct CacheData ( ManuallyDrop < Box < [ u8 ; BLOCK_SZ ] > > ) ;
15+
16+ impl CacheData {
17+ pub fn new ( ) -> Self {
18+ let data = unsafe {
19+ let raw = alloc:: alloc:: alloc ( Self :: layout ( ) ) ;
20+ Box :: from_raw ( raw as * mut [ u8 ; BLOCK_SZ ] )
21+ } ;
22+ Self ( ManuallyDrop :: new ( data) )
23+ }
24+
25+ fn layout ( ) -> Layout {
26+ Layout :: from_size_align ( BLOCK_SZ , BLOCK_SZ ) . unwrap ( )
27+ }
28+ }
29+
30+ impl Drop for CacheData {
31+ fn drop ( & mut self ) {
32+ let ptr = self . 0 . as_mut_ptr ( ) ;
33+ unsafe { alloc:: alloc:: dealloc ( ptr, Self :: layout ( ) ) } ;
34+ }
35+ }
36+
37+ impl AsRef < [ u8 ] > for CacheData {
38+ fn as_ref ( & self ) -> & [ u8 ] {
39+ let ptr = self . 0 . as_ptr ( ) as * const u8 ;
40+ unsafe { slice:: from_raw_parts ( ptr, BLOCK_SZ ) }
41+ }
42+ }
43+
44+ impl AsMut < [ u8 ] > for CacheData {
45+ fn as_mut ( & mut self ) -> & mut [ u8 ] {
46+ let ptr = self . 0 . as_mut_ptr ( ) as * mut u8 ;
47+ unsafe { slice:: from_raw_parts_mut ( ptr, BLOCK_SZ ) }
48+ }
49+ }
50+
951pub struct BlockCache {
10- cache : Vec < u8 > ,
52+ cache : CacheData ,
1153 block_id : usize ,
1254 block_device : Arc < dyn BlockDevice > ,
1355 modified : bool ,
@@ -17,8 +59,8 @@ impl BlockCache {
1759 /// Load a new BlockCache from disk.
1860 pub fn new ( block_id : usize , block_device : Arc < dyn BlockDevice > ) -> Self {
1961 // for alignment and move effciency
20- let mut cache = vec ! [ 0u8 ; BLOCK_SZ ] ;
21- block_device. read_block ( block_id, & mut cache) ;
62+ let mut cache = CacheData :: new ( ) ;
63+ block_device. read_block ( block_id, cache. as_mut ( ) ) ;
2264 Self {
2365 cache,
2466 block_id,
@@ -27,8 +69,12 @@ impl BlockCache {
2769 }
2870 }
2971
30- fn addr_of_offset ( & self , offset : usize ) -> usize {
31- & self . cache [ offset] as * const _ as usize
72+ fn addr_of_offset ( & self , offset : usize ) -> * const u8 {
73+ addr_of ! ( self . cache. as_ref( ) [ offset] )
74+ }
75+
76+ fn addr_of_offset_mut ( & mut self , offset : usize ) -> * mut u8 {
77+ addr_of_mut ! ( self . cache. as_mut( ) [ offset] )
3278 }
3379
3480 pub fn get_ref < T > ( & self , offset : usize ) -> & T
@@ -37,8 +83,8 @@ impl BlockCache {
3783 {
3884 let type_size = core:: mem:: size_of :: < T > ( ) ;
3985 assert ! ( offset + type_size <= BLOCK_SZ ) ;
40- let addr = self . addr_of_offset ( offset) ;
41- unsafe { & * ( addr as * const T ) }
86+ let addr = self . addr_of_offset ( offset) as * const T ;
87+ unsafe { & * addr }
4288 }
4389
4490 pub fn get_mut < T > ( & mut self , offset : usize ) -> & mut T
@@ -48,8 +94,8 @@ impl BlockCache {
4894 let type_size = core:: mem:: size_of :: < T > ( ) ;
4995 assert ! ( offset + type_size <= BLOCK_SZ ) ;
5096 self . modified = true ;
51- let addr = self . addr_of_offset ( offset) ;
52- unsafe { & mut * ( addr as * mut T ) }
97+ let addr = self . addr_of_offset_mut ( offset) as * mut T ;
98+ unsafe { & mut * addr }
5399 }
54100
55101 pub fn read < T , V > ( & self , offset : usize , f : impl FnOnce ( & T ) -> V ) -> V {
@@ -63,7 +109,8 @@ impl BlockCache {
63109 pub fn sync ( & mut self ) {
64110 if self . modified {
65111 self . modified = false ;
66- self . block_device . write_block ( self . block_id , & self . cache ) ;
112+ self . block_device
113+ . write_block ( self . block_id , self . cache . as_ref ( ) ) ;
67114 }
68115 }
69116}
0 commit comments