@@ -22,6 +22,8 @@ mod gpt;
22
22
use gpt:: Gpt ;
23
23
24
24
use core:: mem:: MaybeUninit ;
25
+ use core:: ops:: { Deref , DerefMut } ;
26
+ use core:: sync:: atomic:: { AtomicBool , Ordering } ;
25
27
26
28
use alloc:: collections:: BTreeMap ;
27
29
use alloc:: sync:: { Arc , Weak } ;
@@ -45,6 +47,7 @@ struct CachedPage {
45
47
device : Weak < dyn CachedAccess > ,
46
48
offset : usize ,
47
49
page : PhysFrame ,
50
+ dirty : AtomicBool ,
48
51
}
49
52
50
53
impl CachedPage {
@@ -55,6 +58,7 @@ impl CachedPage {
55
58
page : FRAME_ALLOCATOR
56
59
. allocate_frame ( )
57
60
. expect ( "page_cache: out of memory" ) ,
61
+ dirty : AtomicBool :: new ( false ) ,
58
62
}
59
63
}
60
64
@@ -77,6 +81,39 @@ impl CachedPage {
77
81
fn make_key ( device : Weak < dyn CachedAccess > , offset : usize ) -> PageCacheKey {
78
82
( device. as_ptr ( ) as * const u8 as usize , offset)
79
83
}
84
+
85
+ /// Returns whether the page has been marked dirty.
86
+ fn is_dirty ( & self ) -> bool {
87
+ self . dirty . load ( Ordering :: SeqCst )
88
+ }
89
+
90
+ fn mark_dirty ( & self ) {
91
+ self . dirty . store ( true , Ordering :: SeqCst ) ;
92
+ }
93
+
94
+ fn device ( & self ) -> Arc < dyn CachedAccess > {
95
+ self . device . upgrade ( ) . unwrap ( )
96
+ }
97
+
98
+ fn sync ( & self ) {
99
+ if !self . is_dirty ( ) {
100
+ return ;
101
+ }
102
+
103
+ // Commit the changes made to the cache to the disk.
104
+ let disk = self . device ( ) ;
105
+
106
+ let offset_bytes = self . offset * Size4KiB :: SIZE as usize ;
107
+ let sector = offset_bytes / disk. block_size ( ) ;
108
+
109
+ disk. write_dma ( sector, self . data_addr ( ) , Size4KiB :: SIZE as usize ) ;
110
+ }
111
+ }
112
+
113
+ impl Drop for CachedPage {
114
+ fn drop ( & mut self ) {
115
+ self . sync ( )
116
+ }
80
117
}
81
118
82
119
impl Cacheable < PageCacheKey > for CachedPage {
@@ -96,9 +133,8 @@ impl Cache<PageCacheKey, CachedPage> {
96
133
/// ## Arguments
97
134
///
98
135
/// * `device` - The device to get the page from.
99
- ///
100
- /// * `offset` - The offset in bytes to the data. This will be rounded down to
101
- /// the nearest page boundary.
136
+ /// * `offset` - The offset in bytes to the data. This will be rounded down to the nearest page
137
+ /// boundary.
102
138
pub fn get_page ( & self , device : Weak < dyn CachedAccess > , offset : usize ) -> PageCacheItem {
103
139
let cache_offset = offset / Size4KiB :: SIZE as usize ;
104
140
let cache_key = CachedPage :: make_key ( device. clone ( ) , cache_offset) ;
@@ -121,10 +157,48 @@ impl Cache<PageCacheKey, CachedPage> {
121
157
}
122
158
}
123
159
160
+ pub struct DirtyRef < T : Sized > {
161
+ cache : PageCacheItem ,
162
+ ptr : * mut T ,
163
+ }
164
+
165
+ impl < T > DirtyRef < T > {
166
+ pub fn new ( device : Weak < dyn CachedAccess > , offset : usize ) -> Self {
167
+ let cache = PAGE_CACHE . get_page ( device, offset) ;
168
+
169
+ let ptr_offset = offset % Size4KiB :: SIZE as usize ;
170
+ let ptr = & cache. data_mut ( ) [ ptr_offset..ptr_offset + core:: mem:: size_of :: < T > ( ) ] ;
171
+
172
+ Self {
173
+ ptr : ptr. as_ptr ( ) as * mut T ,
174
+ cache,
175
+ }
176
+ }
177
+ }
178
+
179
+ impl < T > Deref for DirtyRef < T > {
180
+ type Target = T ;
181
+
182
+ fn deref ( & self ) -> & Self :: Target {
183
+ unsafe { & * self . ptr }
184
+ }
185
+ }
186
+
187
+ impl < T > DerefMut for DirtyRef < T > {
188
+ fn deref_mut ( & mut self ) -> & mut Self :: Target {
189
+ self . cache . mark_dirty ( ) ;
190
+ unsafe { & mut * self . ptr }
191
+ }
192
+ }
193
+
194
+ unsafe impl < T > Sync for DirtyRef < T > { }
195
+ unsafe impl < T > Send for DirtyRef < T > { }
196
+
124
197
pub trait BlockDeviceInterface : Send + Sync {
125
198
fn block_size ( & self ) -> usize ;
126
199
127
200
fn read_dma ( & self , sector : usize , start : PhysAddr , size : usize ) -> Option < usize > ;
201
+ fn write_dma ( & self , sector : usize , start : PhysAddr , size : usize ) -> Option < usize > ;
128
202
129
203
fn read_block ( & self , sector : usize , dest : & mut [ MaybeUninit < u8 > ] ) -> Option < usize > ;
130
204
fn write_block ( & self , sector : usize , buf : & [ u8 ] ) -> Option < usize > ;
@@ -228,6 +302,10 @@ impl BlockDeviceInterface for BlockDevice {
228
302
self . dev . read_dma ( sector, start, size)
229
303
}
230
304
305
+ fn write_dma ( & self , sector : usize , start : PhysAddr , size : usize ) -> Option < usize > {
306
+ self . dev . write_dma ( sector, start, size)
307
+ }
308
+
231
309
fn read_block ( & self , sector : usize , dest : & mut [ MaybeUninit < u8 > ] ) -> Option < usize > {
232
310
self . dev . read_block ( sector, dest)
233
311
}
@@ -292,6 +370,14 @@ impl BlockDeviceInterface for PartitionBlockDevice {
292
370
self . device . write_block ( self . offset + sector, buf)
293
371
}
294
372
373
+ fn write_dma ( & self , sector : usize , start : PhysAddr , size : usize ) -> Option < usize > {
374
+ if sector >= self . size {
375
+ return None ;
376
+ }
377
+
378
+ self . write_dma ( self . offset + sector, start, size)
379
+ }
380
+
295
381
fn read_dma ( & self , sector : usize , start : PhysAddr , size : usize ) -> Option < usize > {
296
382
if sector >= self . size {
297
383
return None ;
0 commit comments