@@ -19,7 +19,7 @@ use std::ffi::c_void;
1919use std:: io:: Error ;
2020#[ cfg( target_os = "linux" ) ]
2121use std:: ptr:: null_mut;
22- use std:: sync:: { Arc , RwLock } ;
22+ use std:: sync:: { Arc , Mutex , RwLock } ;
2323
2424use hyperlight_common:: mem:: PAGE_SIZE_USIZE ;
2525use tracing:: { Span , instrument} ;
@@ -39,6 +39,7 @@ use windows::core::PCSTR;
3939use crate :: HyperlightError :: MemoryAllocationFailed ;
4040#[ cfg( target_os = "windows" ) ]
4141use crate :: HyperlightError :: { MemoryRequestTooBig , WindowsAPIError } ;
42+ use crate :: mem:: dirty_page_tracking:: { DirtyPageTracker , DirtyPageTracking } ;
4243use crate :: { Result , log_then_return, new_error} ;
4344
4445/// Makes sure that the given `offset` and `size` are within the bounds of the memory with size `mem_size`.
@@ -95,6 +96,7 @@ pub struct HostMapping {
9596 size : usize ,
9697 #[ cfg( target_os = "windows" ) ]
9798 handle : HANDLE ,
99+ dirty_pages : Mutex < Option < Vec < usize > > > ,
98100}
99101
100102impl Drop for HostMapping {
@@ -383,10 +385,50 @@ impl ExclusiveSharedMemory {
383385 region : Arc :: new ( HostMapping {
384386 ptr : addr as * mut u8 ,
385387 size : total_size,
388+ dirty_pages : None . into ( ) ,
386389 } ) ,
387390 } )
388391 }
389392
393+ /// Starts tracking dirty pages in the shared memory region.
394+ pub ( super ) fn start_tracking_dirty_pages ( & self ) -> Result < DirtyPageTracker > {
395+ DirtyPageTracker :: new ( self )
396+ }
397+
398+ /// Stop tracking dirty pages in the shared memory region.
399+ pub ( crate ) fn stop_tracking_dirty_pages ( & self , tracker : DirtyPageTracker ) -> Result < ( ) > {
400+ let dirty_pages = tracker. get_dirty_pages ( ) ;
401+ let mut existing_dirty_pages = self
402+ . region
403+ . dirty_pages
404+ . lock ( )
405+ . map_err ( |e| new_error ! ( "Failed to lock dirty_pages: {}" , e) ) ?;
406+
407+ match existing_dirty_pages. as_mut ( ) {
408+ Some ( existing) => {
409+ // merge the new dirty pages with the existing ones
410+ existing. extend ( dirty_pages) ;
411+ existing. sort_unstable ( ) ;
412+ existing. dedup ( ) ;
413+ }
414+ None => {
415+ * existing_dirty_pages = Some ( dirty_pages) ;
416+ }
417+ }
418+
419+ Ok ( ( ) )
420+ }
421+
422+ // Take the dirty pages
423+ pub ( super ) fn get_and_clear_host_dirty_page_map ( & self ) -> Result < Option < Vec < usize > > > {
424+ Ok ( self
425+ . region
426+ . dirty_pages
427+ . lock ( )
428+ . map_err ( |e| new_error ! ( "Failed to lock dirty_pages: {}" , e) ) ?
429+ . take ( ) )
430+ }
431+
390432 /// Create a new region of shared memory with the given minimum
391433 /// size in bytes. The region will be surrounded by guard pages.
392434 ///
@@ -498,6 +540,7 @@ impl ExclusiveSharedMemory {
498540 ptr : addr. Value as * mut u8 ,
499541 size : total_size,
500542 handle,
543+ dirty_pages : None . into ( ) ,
501544 } ) ,
502545 } )
503546 }
@@ -613,6 +656,15 @@ impl ExclusiveSharedMemory {
613656 Ok ( ( ) )
614657 }
615658
659+ /// Copies bytes to slice from self starting at offset
660+ #[ instrument( err( Debug ) , skip_all, parent = Span :: current( ) , level= "Trace" ) ]
661+ pub fn copy_to_slice ( & self , slice : & mut [ u8 ] , offset : usize ) -> Result < ( ) > {
662+ let data = self . as_slice ( ) ;
663+ bounds_check ! ( offset, slice. len( ) , data. len( ) ) ;
664+ slice. copy_from_slice ( & data[ offset..offset + slice. len ( ) ] ) ;
665+ Ok ( ( ) )
666+ }
667+
616668 /// Return the address of memory at an offset to this `SharedMemory` checking
617669 /// that the memory is within the bounds of the `SharedMemory`.
618670 #[ instrument( err( Debug ) , skip_all, parent = Span :: current( ) , level= "Trace" ) ]
@@ -621,6 +673,15 @@ impl ExclusiveSharedMemory {
621673 Ok ( self . base_addr ( ) + offset)
622674 }
623675
676+ /// Fill the memory in the range `[offset, offset + len)` with `value`
677+ #[ instrument( err( Debug ) , skip_all, parent = Span :: current( ) , level= "Trace" ) ]
678+ pub fn zero_fill ( & mut self , offset : usize , len : usize ) -> Result < ( ) > {
679+ bounds_check ! ( offset, len, self . mem_size( ) ) ;
680+ let data = self . as_mut_slice ( ) ;
681+ data[ offset..offset + len] . fill ( 0 ) ;
682+ Ok ( ( ) )
683+ }
684+
624685 generate_reader ! ( read_u8, u8 ) ;
625686 generate_reader ! ( read_i8, i8 ) ;
626687 generate_reader ! ( read_u16, u16 ) ;
@@ -678,6 +739,9 @@ pub trait SharedMemory {
678739 /// Return a readonly reference to the host mapping backing this SharedMemory
679740 fn region ( & self ) -> & HostMapping ;
680741
742+ /// Return an Arc clone of the host mapping backing this SharedMemory
743+ fn region_arc ( & self ) -> Arc < HostMapping > ;
744+
681745 /// Return the base address of the host mapping of this
682746 /// region. Following the general Rust philosophy, this does not
683747 /// need to be marked as `unsafe` because doing anything with this
@@ -728,6 +792,11 @@ impl SharedMemory for ExclusiveSharedMemory {
728792 fn region ( & self ) -> & HostMapping {
729793 & self . region
730794 }
795+
796+ fn region_arc ( & self ) -> Arc < HostMapping > {
797+ Arc :: clone ( & self . region )
798+ }
799+
731800 fn with_exclusivity < T , F : FnOnce ( & mut ExclusiveSharedMemory ) -> T > (
732801 & mut self ,
733802 f : F ,
@@ -740,6 +809,11 @@ impl SharedMemory for GuestSharedMemory {
740809 fn region ( & self ) -> & HostMapping {
741810 & self . region
742811 }
812+
813+ fn region_arc ( & self ) -> Arc < HostMapping > {
814+ Arc :: clone ( & self . region )
815+ }
816+
743817 fn with_exclusivity < T , F : FnOnce ( & mut ExclusiveSharedMemory ) -> T > (
744818 & mut self ,
745819 f : F ,
@@ -982,6 +1056,11 @@ impl SharedMemory for HostSharedMemory {
9821056 fn region ( & self ) -> & HostMapping {
9831057 & self . region
9841058 }
1059+
1060+ fn region_arc ( & self ) -> Arc < HostMapping > {
1061+ Arc :: clone ( & self . region )
1062+ }
1063+
9851064 fn with_exclusivity < T , F : FnOnce ( & mut ExclusiveSharedMemory ) -> T > (
9861065 & mut self ,
9871066 f : F ,
0 commit comments