@@ -109,3 +109,311 @@ pub type BS<'a, B> = <B as WithBitmapSlice<'a>>::S;
109
109
/// Helper type alias for referring to the `BitmapSlice` concrete type associated with
110
110
/// the memory regions of an object `M: GuestMemory`.
111
111
pub type MS < ' a , M > = BS < ' a , <<M as GuestMemory >:: R as GuestMemoryRegion >:: B > ;
112
+
113
+ #[ cfg( test) ]
114
+ pub ( crate ) mod tests {
115
+ use super :: * ;
116
+
117
+ use std:: io:: Cursor ;
118
+ use std:: marker:: PhantomData ;
119
+ use std:: mem:: size_of_val;
120
+ use std:: result:: Result ;
121
+ use std:: sync:: atomic:: Ordering ;
122
+
123
+ use crate :: { Bytes , VolatileMemory } ;
124
+ #[ cfg( feature = "backend-mmap" ) ]
125
+ use crate :: { GuestAddress , MemoryRegionAddress } ;
126
+
127
+ // Helper method to check whether a specified range is clean.
128
+ pub fn range_is_clean < B : Bitmap > ( b : & B , start : usize , len : usize ) -> bool {
129
+ ( start..start + len) . all ( |offset| !b. dirty_at ( offset) )
130
+ }
131
+
132
+ // Helper method to check whether a specified range is dirty.
133
+ pub fn range_is_dirty < B : Bitmap > ( b : & B , start : usize , len : usize ) -> bool {
134
+ ( start..start + len) . all ( |offset| b. dirty_at ( offset) )
135
+ }
136
+
137
+ pub fn check_range < B : Bitmap > ( b : & B , start : usize , len : usize , clean : bool ) -> bool {
138
+ if clean {
139
+ range_is_clean ( b, start, len)
140
+ } else {
141
+ range_is_dirty ( b, start, len)
142
+ }
143
+ }
144
+
145
+ // Helper method that tests a generic `B: Bitmap` implementation. It assumes `b` covers
146
+ // an area of length at least 0x2000.
147
+ pub fn test_bitmap < B : Bitmap > ( b : & B ) {
148
+ let len = 0x2000 ;
149
+ let dirty_offset = 0x1000 ;
150
+ let dirty_len = 0x100 ;
151
+
152
+ // Some basic checks.
153
+ let s = b. slice_at ( dirty_offset) ;
154
+
155
+ assert ! ( range_is_clean( b, 0 , len) ) ;
156
+ assert ! ( range_is_clean( & s, 0 , dirty_len) ) ;
157
+
158
+ b. mark_dirty ( dirty_offset, dirty_len) ;
159
+ assert ! ( range_is_dirty( b, dirty_offset, dirty_len) ) ;
160
+ assert ! ( range_is_dirty( & s, 0 , dirty_len) ) ;
161
+ }
162
+
163
+ #[ derive( Debug ) ]
164
+ pub enum TestAccessError {
165
+ RangeCleanCheck ,
166
+ RangeDirtyCheck ,
167
+ }
168
+
169
+ // A helper object that implements auxiliary operations for testing `Bytes` implementations
170
+ // in the context of dirty bitmap tracking.
171
+ struct BytesHelper < F , G , M > {
172
+ check_range_fn : F ,
173
+ address_fn : G ,
174
+ phantom : PhantomData < * const M > ,
175
+ }
176
+
177
+ // `F` represents a closure the checks whether a specified range associated with the `Bytes`
178
+ // object that's being tested is marked as dirty or not (depending on the value of the last
179
+ // parameter). It has the following parameters:
180
+ // - A reference to a `Bytes` implementations that's subject to testing.
181
+ // - The offset of the range.
182
+ // - The length of the range.
183
+ // - Whether we are checking if the range is clean (when `true`) or marked as dirty.
184
+ //
185
+ // `G` represents a closure that translates an offset into an address value that's
186
+ // relevant for the `Bytes` implementation being tested.
187
+ impl < F , G , M , A > BytesHelper < F , G , M >
188
+ where
189
+ F : Fn ( & M , usize , usize , bool ) -> bool ,
190
+ G : Fn ( usize ) -> A ,
191
+ M : Bytes < A > ,
192
+ {
193
+ fn check_range ( & self , m : & M , start : usize , len : usize , clean : bool ) -> bool {
194
+ ( self . check_range_fn ) ( m, start, len, clean)
195
+ }
196
+
197
+ fn address ( & self , offset : usize ) -> A {
198
+ ( self . address_fn ) ( offset)
199
+ }
200
+
201
+ fn test_access < Op > (
202
+ & self ,
203
+ bytes : & M ,
204
+ dirty_offset : usize ,
205
+ dirty_len : usize ,
206
+ op : Op ,
207
+ ) -> Result < ( ) , TestAccessError >
208
+ where
209
+ Op : Fn ( & M , A ) ,
210
+ {
211
+ if !self . check_range ( bytes, dirty_offset, dirty_len, true ) {
212
+ return Err ( TestAccessError :: RangeCleanCheck ) ;
213
+ }
214
+
215
+ op ( bytes, self . address ( dirty_offset) ) ;
216
+
217
+ if !self . check_range ( bytes, dirty_offset, dirty_len, false ) {
218
+ return Err ( TestAccessError :: RangeDirtyCheck ) ;
219
+ }
220
+
221
+ Ok ( ( ) )
222
+ }
223
+ }
224
+
225
+ // `F` and `G` stand for the same closure types as described in the `BytesHelper` comment.
226
+ // The `step` parameter represents the offset that's added the the current address after
227
+ // performing each access. It provides finer grained control when testing tracking
228
+ // implementations that aggregate entire ranges for accounting purposes (for example, doing
229
+ // tracking at the page level).
230
+ pub fn test_bytes < F , G , M , A > ( bytes : & M , check_range_fn : F , address_fn : G , step : usize )
231
+ where
232
+ F : Fn ( & M , usize , usize , bool ) -> bool ,
233
+ G : Fn ( usize ) -> A ,
234
+ A : Copy ,
235
+ M : Bytes < A > ,
236
+ <M as Bytes < A > >:: E : Debug ,
237
+ {
238
+ const BUF_SIZE : usize = 1024 ;
239
+ let buf = vec ! [ 1u8 ; 1024 ] ;
240
+
241
+ let val = 1u64 ;
242
+
243
+ let h = BytesHelper {
244
+ check_range_fn,
245
+ address_fn,
246
+ phantom : PhantomData ,
247
+ } ;
248
+
249
+ let mut dirty_offset = 0x1000 ;
250
+
251
+ // Test `write`.
252
+ h. test_access ( bytes, dirty_offset, BUF_SIZE , |m, addr| {
253
+ assert_eq ! ( m. write( buf. as_slice( ) , addr) . unwrap( ) , BUF_SIZE )
254
+ } )
255
+ . unwrap ( ) ;
256
+ dirty_offset += step;
257
+
258
+ // Test `write_slice`.
259
+ h. test_access ( bytes, dirty_offset, BUF_SIZE , |m, addr| {
260
+ m. write_slice ( buf. as_slice ( ) , addr) . unwrap ( )
261
+ } )
262
+ . unwrap ( ) ;
263
+ dirty_offset += step;
264
+
265
+ // Test `write_obj`.
266
+ h. test_access ( bytes, dirty_offset, size_of_val ( & val) , |m, addr| {
267
+ m. write_obj ( val, addr) . unwrap ( )
268
+ } )
269
+ . unwrap ( ) ;
270
+ dirty_offset += step;
271
+
272
+ // Test `read_from`.
273
+ h. test_access ( bytes, dirty_offset, BUF_SIZE , |m, addr| {
274
+ assert_eq ! (
275
+ m. read_from( addr, & mut Cursor :: new( & buf) , BUF_SIZE ) . unwrap( ) ,
276
+ BUF_SIZE
277
+ )
278
+ } )
279
+ . unwrap ( ) ;
280
+ dirty_offset += step;
281
+
282
+ // Test `read_exact_from`.
283
+ h. test_access ( bytes, dirty_offset, BUF_SIZE , |m, addr| {
284
+ m. read_exact_from ( addr, & mut Cursor :: new ( & buf) , BUF_SIZE )
285
+ . unwrap ( )
286
+ } )
287
+ . unwrap ( ) ;
288
+ dirty_offset += step;
289
+
290
+ // Test `store`.
291
+ h. test_access ( bytes, dirty_offset, size_of_val ( & val) , |m, addr| {
292
+ m. store ( val, addr, Ordering :: Relaxed ) . unwrap ( )
293
+ } )
294
+ . unwrap ( ) ;
295
+ }
296
+
297
+ // This function and the next are currently conditionally compiled because we only use
298
+ // them to test the mmap-based backend implementations for now. Going forward, the generic
299
+ // test functions defined here can be placed in a separate module (i.e. `test_utilities`)
300
+ // which is gated by a feature and can be used for testing purposes by other crates as well.
301
+ #[ cfg( feature = "backend-mmap" ) ]
302
+ fn test_guest_memory_region < R : GuestMemoryRegion > ( region : & R ) {
303
+ let dirty_addr = MemoryRegionAddress ( 0x0 ) ;
304
+ let val = 123u64 ;
305
+ let dirty_len = size_of_val ( & val) ;
306
+
307
+ let slice = region. get_slice ( dirty_addr, dirty_len) . unwrap ( ) ;
308
+
309
+ assert ! ( range_is_clean( region. bitmap( ) , 0 , region. len( ) as usize ) ) ;
310
+ assert ! ( range_is_clean( slice. bitmap( ) , 0 , dirty_len) ) ;
311
+
312
+ region. write_obj ( val, dirty_addr) . unwrap ( ) ;
313
+
314
+ assert ! ( range_is_dirty(
315
+ region. bitmap( ) ,
316
+ dirty_addr. 0 as usize ,
317
+ dirty_len
318
+ ) ) ;
319
+
320
+ assert ! ( range_is_dirty( slice. bitmap( ) , 0 , dirty_len) ) ;
321
+
322
+ // Finally, let's invoke the generic tests for `R: Bytes`. It's ok to pass the same
323
+ // `region` handle because `test_bytes` starts performing writes after the range that's
324
+ // been already dirtied in the first part of this test.
325
+ test_bytes (
326
+ region,
327
+ |r : & R , start : usize , len : usize , clean : bool | {
328
+ check_range ( r. bitmap ( ) , start, len, clean)
329
+ } ,
330
+ |offset| MemoryRegionAddress ( offset as u64 ) ,
331
+ 0x1000 ,
332
+ ) ;
333
+ }
334
+
335
+ #[ cfg( feature = "backend-mmap" ) ]
336
+ // Assumptions about M generated by f ...
337
+ pub fn test_guest_memory_and_region < M , F > ( f : F )
338
+ where
339
+ M : GuestMemory ,
340
+ F : Fn ( ) -> M ,
341
+ {
342
+ let m = f ( ) ;
343
+ let dirty_addr = GuestAddress ( 0x1000 ) ;
344
+ let val = 123u64 ;
345
+ let dirty_len = size_of_val ( & val) ;
346
+
347
+ let ( region, region_addr) = m. to_region_addr ( dirty_addr) . unwrap ( ) ;
348
+ let slice = m. get_slice ( dirty_addr, dirty_len) . unwrap ( ) ;
349
+
350
+ assert ! ( range_is_clean( region. bitmap( ) , 0 , region. len( ) as usize ) ) ;
351
+ assert ! ( range_is_clean( slice. bitmap( ) , 0 , dirty_len) ) ;
352
+
353
+ m. write_obj ( val, dirty_addr) . unwrap ( ) ;
354
+
355
+ assert ! ( range_is_dirty(
356
+ region. bitmap( ) ,
357
+ region_addr. 0 as usize ,
358
+ dirty_len
359
+ ) ) ;
360
+
361
+ assert ! ( range_is_dirty( slice. bitmap( ) , 0 , dirty_len) ) ;
362
+
363
+ // Now let's invoke the tests for the inner `GuestMemoryRegion` type.
364
+ test_guest_memory_region ( f ( ) . find_region ( GuestAddress ( 0 ) ) . unwrap ( ) ) ;
365
+
366
+ // Finally, let's invoke the generic tests for `Bytes`.
367
+ let check_range_closure = |m : & M , start : usize , len : usize , clean : bool | -> bool {
368
+ let mut check_result = true ;
369
+ m. try_access ( len, GuestAddress ( start as u64 ) , |_, size, reg_addr, reg| {
370
+ if !check_range ( reg. bitmap ( ) , reg_addr. 0 as usize , size, clean) {
371
+ check_result = false ;
372
+ }
373
+ Ok ( size)
374
+ } )
375
+ . unwrap ( ) ;
376
+
377
+ check_result
378
+ } ;
379
+
380
+ test_bytes (
381
+ & f ( ) ,
382
+ check_range_closure,
383
+ |offset| GuestAddress ( offset as u64 ) ,
384
+ 0x1000 ,
385
+ ) ;
386
+ }
387
+
388
+ pub fn test_volatile_memory < M : VolatileMemory > ( m : & M ) {
389
+ assert ! ( m. len( ) >= 0x8000 ) ;
390
+
391
+ let dirty_offset = 0x1000 ;
392
+ let val = 123u64 ;
393
+ let dirty_len = size_of_val ( & val) ;
394
+
395
+ let get_ref_offset = 0x2000 ;
396
+ let array_ref_offset = 0x3000 ;
397
+
398
+ let s1 = m. as_volatile_slice ( ) ;
399
+ let s2 = m. get_slice ( dirty_offset, dirty_len) . unwrap ( ) ;
400
+
401
+ assert ! ( range_is_clean( s1. bitmap( ) , 0 , s1. len( ) ) ) ;
402
+ assert ! ( range_is_clean( s2. bitmap( ) , 0 , s2. len( ) ) ) ;
403
+
404
+ s1. write_obj ( val, dirty_offset) . unwrap ( ) ;
405
+
406
+ assert ! ( range_is_dirty( s1. bitmap( ) , dirty_offset, dirty_len) ) ;
407
+ assert ! ( range_is_dirty( s2. bitmap( ) , 0 , dirty_len) ) ;
408
+
409
+ let v_ref = m. get_ref :: < u64 > ( get_ref_offset) . unwrap ( ) ;
410
+ assert ! ( range_is_clean( s1. bitmap( ) , get_ref_offset, dirty_len) ) ;
411
+ v_ref. store ( val) ;
412
+ assert ! ( range_is_dirty( s1. bitmap( ) , get_ref_offset, dirty_len) ) ;
413
+
414
+ let arr_ref = m. get_array_ref :: < u64 > ( array_ref_offset, 1 ) . unwrap ( ) ;
415
+ assert ! ( range_is_clean( s1. bitmap( ) , array_ref_offset, dirty_len) ) ;
416
+ arr_ref. store ( 0 , val) ;
417
+ assert ! ( range_is_dirty( s1. bitmap( ) , array_ref_offset, dirty_len) ) ;
418
+ }
419
+ }
0 commit comments