@@ -120,7 +120,6 @@ pub type MS<'a, M> = BS<'a, <<M as GuestMemory>::R as GuestMemoryRegion>::B>;
120
120
pub ( crate ) mod tests {
121
121
use super :: * ;
122
122
123
- use std:: marker:: PhantomData ;
124
123
use std:: mem:: size_of_val;
125
124
use std:: sync:: atomic:: Ordering ;
126
125
@@ -164,56 +163,6 @@ pub(crate) mod tests {
164
163
assert ! ( range_is_dirty( & s, 0 , dirty_len) ) ;
165
164
}
166
165
167
- // A helper object that implements auxiliary operations for testing `Bytes` implementations
168
- // in the context of dirty bitmap tracking.
169
- struct BytesHelper < F , G , M > {
170
- check_range_fn : F ,
171
- address_fn : G ,
172
- phantom : PhantomData < * const M > ,
173
- }
174
-
175
- // `F` represents a closure the checks whether a specified range associated with the `Bytes`
176
- // object that's being tested is marked as dirty or not (depending on the value of the last
177
- // parameter). It has the following parameters:
178
- // - A reference to a `Bytes` implementations that's subject to testing.
179
- // - The offset of the range.
180
- // - The length of the range.
181
- // - Whether we are checking if the range is clean (when `true`) or marked as dirty.
182
- //
183
- // `G` represents a closure that translates an offset into an address value that's
184
- // relevant for the `Bytes` implementation being tested.
185
- impl < F , G , M , A > BytesHelper < F , G , M >
186
- where
187
- F : Fn ( & M , usize , usize , bool ) -> bool ,
188
- G : Fn ( usize ) -> A ,
189
- M : Bytes < A > ,
190
- {
191
- fn check_range ( & self , m : & M , start : usize , len : usize , clean : bool ) -> bool {
192
- ( self . check_range_fn ) ( m, start, len, clean)
193
- }
194
-
195
- fn address ( & self , offset : usize ) -> A {
196
- ( self . address_fn ) ( offset)
197
- }
198
-
199
- fn test_access < Op > (
200
- & self ,
201
- bytes : & M ,
202
- dirty_offset : usize ,
203
- dirty_len : usize ,
204
- op : Op ,
205
- )
206
- where
207
- Op : Fn ( & M , A ) ,
208
- {
209
- assert ! ( self . check_range( bytes, dirty_offset, dirty_len, true ) ) ;
210
-
211
- op ( bytes, self . address ( dirty_offset) ) ;
212
-
213
- assert ! ( self . check_range( bytes, dirty_offset, dirty_len, false ) ) ;
214
- }
215
- }
216
-
217
166
// `F` and `G` stand for the same closure types as described in the `BytesHelper` comment.
218
167
// The `step` parameter represents the offset that's added the the current address after
219
168
// performing each access. It provides finer grained control when testing tracking
@@ -223,45 +172,46 @@ pub(crate) mod tests {
223
172
where
224
173
F : Fn ( & M , usize , usize , bool ) -> bool ,
225
174
G : Fn ( usize ) -> A ,
226
- A : Copy ,
227
175
M : Bytes < A > ,
228
176
<M as Bytes < A > >:: E : Debug ,
229
177
{
230
178
const BUF_SIZE : usize = 1024 ;
231
179
let buf = vec ! [ 1u8 ; 1024 ] ;
180
+ let mut dirty_offset = 0x1000 ;
232
181
233
182
let val = 1u64 ;
234
183
235
- let h = BytesHelper {
236
- check_range_fn,
237
- address_fn,
238
- phantom : PhantomData ,
239
- } ;
240
-
241
- let mut dirty_offset = 0x1000 ;
242
-
243
184
// Test `write`.
244
- h. test_access ( bytes, dirty_offset, BUF_SIZE , |m, addr| {
245
- assert_eq ! ( m. write( buf. as_slice( ) , addr) . unwrap( ) , BUF_SIZE )
246
- } ) ;
247
- dirty_offset += step;
185
+ assert ! ( check_range_fn( bytes, dirty_offset, BUF_SIZE , true ) ) ;
186
+ assert_eq ! (
187
+ bytes
188
+ . write( buf. as_slice( ) , address_fn( dirty_offset) )
189
+ . unwrap( ) ,
190
+ BUF_SIZE
191
+ ) ;
192
+ assert ! ( check_range_fn( bytes, dirty_offset, BUF_SIZE , false ) ) ;
248
193
249
194
// Test `write_slice`.
250
- h. test_access ( bytes, dirty_offset, BUF_SIZE , |m, addr| {
251
- m. write_slice ( buf. as_slice ( ) , addr) . unwrap ( )
252
- } ) ;
253
195
dirty_offset += step;
196
+ assert ! ( check_range_fn( bytes, dirty_offset, BUF_SIZE , true ) ) ;
197
+ bytes
198
+ . write_slice ( buf. as_slice ( ) , address_fn ( dirty_offset) )
199
+ . unwrap ( ) ;
200
+ assert ! ( check_range_fn( bytes, dirty_offset, BUF_SIZE , false ) ) ;
254
201
255
202
// Test `write_obj`.
256
- h. test_access ( bytes, dirty_offset, size_of_val ( & val) , |m, addr| {
257
- m. write_obj ( val, addr) . unwrap ( )
258
- } ) ;
259
203
dirty_offset += step;
204
+ assert ! ( check_range_fn( bytes, dirty_offset, BUF_SIZE , true ) ) ;
205
+ bytes. write_obj ( val, address_fn ( dirty_offset) ) . unwrap ( ) ;
206
+ assert ! ( check_range_fn( bytes, dirty_offset, BUF_SIZE , false ) ) ;
260
207
261
208
// Test `store`.
262
- h. test_access ( bytes, dirty_offset, size_of_val ( & val) , |m, addr| {
263
- m. store ( val, addr, Ordering :: Relaxed ) . unwrap ( )
264
- } ) ;
209
+ dirty_offset += step;
210
+ assert ! ( check_range_fn( bytes, dirty_offset, BUF_SIZE , true ) ) ;
211
+ bytes
212
+ . store ( val, address_fn ( dirty_offset) , Ordering :: Relaxed )
213
+ . unwrap ( ) ;
214
+ assert ! ( check_range_fn( bytes, dirty_offset, BUF_SIZE , false ) ) ;
265
215
}
266
216
267
217
// This function and the next are currently conditionally compiled because we only use
0 commit comments