@@ -10,7 +10,8 @@ use std::os::unix::io::AsRawFd;
10
10
use std:: ptr:: { NonNull , null_mut} ;
11
11
12
12
use kvm_bindings:: {
13
- KVM_COALESCED_MMIO_PAGE_OFFSET , kvm_coalesced_mmio, kvm_coalesced_mmio_ring, kvm_run,
13
+ KVM_COALESCED_MMIO_PAGE_OFFSET , KVM_DIRTY_GFN_F_DIRTY , KVM_DIRTY_GFN_F_RESET ,
14
+ KVM_DIRTY_LOG_PAGE_OFFSET , kvm_coalesced_mmio, kvm_coalesced_mmio_ring, kvm_dirty_gfn, kvm_run,
14
15
} ;
15
16
use vmm_sys_util:: errno;
16
17
@@ -29,6 +30,104 @@ pub mod vm;
29
30
/// is otherwise a direct mapping to Result.
30
31
pub type Result < T > = std:: result:: Result < T , errno:: Error > ;
31
32
33
+ /// A wrapper around the KVM dirty log ring page.
34
+ #[ derive( Debug ) ]
35
+ pub ( crate ) struct KvmDirtyLogRing {
36
+ /// Next potentially dirty guest frame number slot index
37
+ next_dirty : u64 ,
38
+ /// Memory-mapped array of dirty guest frame number entries
39
+ gfns : NonNull < kvm_dirty_gfn > ,
40
+ /// Ring size mask (size-1) for efficient modulo operations
41
+ mask : u64 ,
42
+ }
43
+
44
+ // SAFETY: TBD
45
+ unsafe impl Send for KvmDirtyLogRing { }
46
+ unsafe impl Sync for KvmDirtyLogRing { }
47
+ impl KvmDirtyLogRing {
48
+ /// Maps the KVM dirty log ring from the vCPU file descriptor.
49
+ ///
50
+ /// # Arguments
51
+ /// * `fd` - vCPU file descriptor to mmap from.
52
+ /// * `size` - Size of memory region in bytes.
53
+ pub ( crate ) fn mmap_from_fd < F : AsRawFd > ( fd : & F , bytes : usize ) -> Result < Self > {
54
+ // SAFETY: We trust the sysconf libc function and we're calling it
55
+ // with a correct parameter.
56
+ let page_size = match unsafe { libc:: sysconf ( libc:: _SC_PAGESIZE) } {
57
+ -1 => return Err ( errno:: Error :: last ( ) ) ,
58
+ ps => ps as usize ,
59
+ } ;
60
+
61
+ let offset = page_size * KVM_DIRTY_LOG_PAGE_OFFSET as usize ;
62
+
63
+ if bytes % std:: mem:: size_of :: < kvm_dirty_gfn > ( ) != 0 {
64
+ // Size of dirty ring in bytes must be multiples of slot size
65
+ return Err ( errno:: Error :: new ( libc:: EINVAL ) ) ;
66
+ }
67
+ let slots = bytes / std:: mem:: size_of :: < kvm_dirty_gfn > ( ) ;
68
+ if !slots. is_power_of_two ( ) {
69
+ // Number of slots must be power of two
70
+ return Err ( errno:: Error :: new ( libc:: EINVAL ) ) ;
71
+ }
72
+
73
+ // SAFETY: KVM guarantees that there is a page at offset
74
+ // KVM_DIRTY_LOG_PAGE_OFFSET * PAGE_SIZE if the appropriate
75
+ // capability is available. If it is not, the call will simply
76
+ // fail.
77
+ let gfns = unsafe {
78
+ NonNull :: < kvm_dirty_gfn > :: new ( libc:: mmap (
79
+ null_mut ( ) ,
80
+ bytes,
81
+ libc:: PROT_READ | libc:: PROT_WRITE ,
82
+ libc:: MAP_SHARED ,
83
+ fd. as_raw_fd ( ) ,
84
+ offset as i64 ,
85
+ ) as * mut kvm_dirty_gfn )
86
+ . filter ( |addr| addr. as_ptr ( ) != libc:: MAP_FAILED as * mut kvm_dirty_gfn )
87
+ . ok_or_else ( || errno:: Error :: last ( ) ) ?
88
+ } ;
89
+ return Ok ( Self {
90
+ next_dirty : 0 ,
91
+ gfns,
92
+ mask : ( slots - 1 ) as u64 ,
93
+ } ) ;
94
+ }
95
+ }
96
+
97
+ impl Drop for KvmDirtyLogRing {
98
+ fn drop ( & mut self ) {
99
+ // SAFETY: This is safe because we mmap the page ourselves, and nobody
100
+ // else is holding a reference to it.
101
+ unsafe {
102
+ libc:: munmap (
103
+ self . gfns . as_ptr ( ) . cast ( ) ,
104
+ ( self . mask + 1 ) as usize * std:: mem:: size_of :: < kvm_dirty_gfn > ( ) ,
105
+ ) ;
106
+ }
107
+ }
108
+ }
109
+
110
+ impl Iterator for KvmDirtyLogRing {
111
+ type Item = ( u32 , u64 ) ;
112
+ fn next ( & mut self ) -> Option < Self :: Item > {
113
+ let i = self . next_dirty & self . mask ;
114
+ unsafe {
115
+ let gfn_ptr = self . gfns . add ( i as usize ) . as_ptr ( ) ;
116
+ let gfn = gfn_ptr. read_volatile ( ) ;
117
+ if gfn. flags & KVM_DIRTY_GFN_F_DIRTY == 0 {
118
+ // next_dirty stays the same, it will become the next dirty element
119
+ return None ;
120
+ } else {
121
+ self . next_dirty += 1 ;
122
+ let mut updated_gfn = gfn;
123
+ updated_gfn. flags ^= KVM_DIRTY_GFN_F_RESET ;
124
+ gfn_ptr. write_volatile ( updated_gfn) ;
125
+ return Some ( ( gfn. slot , gfn. offset ) ) ;
126
+ }
127
+ }
128
+ }
129
+ }
130
+
32
131
/// A wrapper around the coalesced MMIO ring page.
33
132
#[ derive( Debug ) ]
34
133
pub ( crate ) struct KvmCoalescedIoRing {
0 commit comments