88use std:: num:: Wrapping ;
99use std:: sync:: atomic:: { Ordering , fence} ;
1010
11+ use crate :: arch:: host_page_size;
1112use crate :: logger:: error;
13+ use crate :: utils:: u64_to_usize;
1214use crate :: vstate:: memory:: { Address , Bitmap , ByteValued , GuestAddress , GuestMemory } ;
1315
1416pub const VIRTQ_DESC_F_NEXT : u16 = 0x1 ;
@@ -32,7 +34,7 @@ pub enum QueueError {
3234 /// Failed to write value into the virtio queue used ring: {0}
3335 MemoryError ( #[ from] vm_memory:: GuestMemoryError ) ,
3436 /// Pointer is not aligned properly: {0:#x} not {1}-byte aligned.
35- PointerNotAligned ( usize , u8 ) ,
37+ PointerNotAligned ( usize , usize ) ,
3638}
3739
3840/// Error type indicating the guest configured a virtio queue such that the avail_idx field would
@@ -310,31 +312,34 @@ impl Queue {
310312 + std:: mem:: size_of :: < u16 > ( )
311313 }
312314
313- fn get_slice_ptr < M : GuestMemory > (
315+ fn get_aligned_slice_ptr < T , M : GuestMemory > (
314316 & self ,
315317 mem : & M ,
316318 addr : GuestAddress ,
317319 len : usize ,
318- ) -> Result < * mut u8 , QueueError > {
320+ alignment : usize ,
321+ ) -> Result < * mut T , QueueError > {
322+ assert_eq ! ( host_page_size( ) % alignment, 0 ) ;
323+
324+ // Guest memory base address is page aligned, so as long as alignment divides page size,
325+ // It suffices to check that the GPA is properly aligned (e.g. we don't need to recheck
326+ // the HVA).
327+ if addr. 0 & ( alignment as u64 - 1 ) != 0 {
328+ return Err ( QueueError :: PointerNotAligned (
329+ u64_to_usize ( addr. 0 ) ,
330+ alignment,
331+ ) ) ;
332+ }
333+
319334 let slice = mem. get_slice ( addr, len) . map_err ( QueueError :: MemoryError ) ?;
320335 slice. bitmap ( ) . mark_dirty ( 0 , len) ;
321- Ok ( slice. ptr_guard_mut ( ) . as_ptr ( ) )
336+ Ok ( slice. ptr_guard_mut ( ) . as_ptr ( ) . cast ( ) )
322337 }
323338
324339 /// Set up pointers to the queue objects in the guest memory
325340 /// and mark memory dirty for those objects
326341 pub fn initialize < M : GuestMemory > ( & mut self , mem : & M ) -> Result < ( ) , QueueError > {
327- self . desc_table_ptr = self
328- . get_slice_ptr ( mem, self . desc_table_address , self . desc_table_size ( ) ) ?
329- . cast ( ) ;
330- self . avail_ring_ptr = self
331- . get_slice_ptr ( mem, self . avail_ring_address , self . avail_ring_size ( ) ) ?
332- . cast ( ) ;
333- self . used_ring_ptr = self
334- . get_slice_ptr ( mem, self . used_ring_address , self . used_ring_size ( ) ) ?
335- . cast ( ) ;
336-
337- // All the above pointers are expected to be aligned properly; otherwise some methods (e.g.
342+ // All the below pointers are verified to be aligned properly; otherwise some methods (e.g.
338343 // `read_volatile()`) will panic. Such an unalignment is possible when restored from a
339344 // broken/fuzzed snapshot.
340345 //
@@ -347,24 +352,12 @@ impl Queue {
347352 // > Available Ring 2
348353 // > Used Ring 4
349354 // > ================ ==========
350- if !self . desc_table_ptr . cast :: < u128 > ( ) . is_aligned ( ) {
351- return Err ( QueueError :: PointerNotAligned (
352- self . desc_table_ptr as usize ,
353- 16 ,
354- ) ) ;
355- }
356- if !self . avail_ring_ptr . is_aligned ( ) {
357- return Err ( QueueError :: PointerNotAligned (
358- self . avail_ring_ptr as usize ,
359- 2 ,
360- ) ) ;
361- }
362- if !self . used_ring_ptr . cast :: < u32 > ( ) . is_aligned ( ) {
363- return Err ( QueueError :: PointerNotAligned (
364- self . used_ring_ptr as usize ,
365- 4 ,
366- ) ) ;
367- }
355+ self . desc_table_ptr =
356+ self . get_aligned_slice_ptr ( mem, self . desc_table_address , self . desc_table_size ( ) , 16 ) ?;
357+ self . avail_ring_ptr =
358+ self . get_aligned_slice_ptr ( mem, self . avail_ring_address , self . avail_ring_size ( ) , 2 ) ?;
359+ self . used_ring_ptr =
360+ self . get_aligned_slice_ptr ( mem, self . used_ring_address , self . used_ring_size ( ) , 4 ) ?;
368361
369362 Ok ( ( ) )
370363 }
0 commit comments