|
58 | 58 | //! use kvm_ioctls::VcpuExit;
|
59 | 59 | //! use kvm_ioctls::{Kvm, VcpuFd, VmFd};
|
60 | 60 | //!
|
61 |
| -//! fn main() { |
62 |
| -//! use std::io::Write; |
63 |
| -//! use std::ptr::null_mut; |
64 |
| -//! use std::slice; |
65 |
| -//! |
66 |
| -//! use kvm_bindings::KVM_MEM_LOG_DIRTY_PAGES; |
67 |
| -//! use kvm_bindings::kvm_userspace_memory_region; |
68 |
| -//! |
69 |
| -//! let mem_size = 0x4000; |
70 |
| -//! let guest_addr = 0x1000; |
71 |
| -//! let asm_code: &[u8]; |
72 |
| -//! |
73 |
| -//! // Setting up architectural dependent values. |
74 |
| -//! #[cfg(target_arch = "x86_64")] |
75 |
| -//! { |
76 |
| -//! asm_code = &[ |
77 |
| -//! 0xba, 0xf8, 0x03, /* mov $0x3f8, %dx */ |
78 |
| -//! 0x00, 0xd8, /* add %bl, %al */ |
79 |
| -//! 0x04, b'0', /* add $'0', %al */ |
80 |
| -//! 0xee, /* out %al, %dx */ |
81 |
| -//! 0xec, /* in %dx, %al */ |
82 |
| -//! 0xc6, 0x06, 0x00, 0x80, |
83 |
| -//! 0x00, /* movl $0, (0x8000); This generates a MMIO Write. */ |
84 |
| -//! 0x8a, 0x16, 0x00, 0x80, /* movl (0x8000), %dl; This generates a MMIO Read. */ |
85 |
| -//! 0xf4, /* hlt */ |
86 |
| -//! ]; |
87 |
| -//! } |
88 |
| -//! #[cfg(target_arch = "aarch64")] |
89 |
| -//! { |
90 |
| -//! asm_code = &[ |
91 |
| -//! 0x01, 0x00, 0x00, 0x10, /* adr x1, <this address> */ |
92 |
| -//! 0x22, 0x10, 0x00, 0xb9, /* str w2, [x1, #16]; write to this page */ |
93 |
| -//! 0x02, 0x00, 0x00, 0xb9, /* str w2, [x0]; This generates a MMIO Write. */ |
94 |
| -//! 0x00, 0x00, 0x00, |
95 |
| -//! 0x14, /* b <this address>; shouldn't get here, but if so loop forever */ |
96 |
| -//! ]; |
97 |
| -//! } |
98 |
| -//! #[cfg(target_arch = "riscv64")] |
99 |
| -//! { |
100 |
| -//! asm_code = &[ |
101 |
| -//! 0x17, 0x03, 0x00, 0x00, // auipc t1, 0; <this address> -> t1 |
102 |
| -//! 0xa3, 0x23, 0x73, 0x00, // sw t2, t1 + 7; dirty current page |
103 |
| -//! 0x23, 0x20, 0x75, 0x00, // sw t2, a0; trigger MMIO exit |
104 |
| -//! 0x6f, 0x00, 0x00, 0x00, // j .;shouldn't get here, but if so loop forever |
105 |
| -//! ]; |
106 |
| -//! } |
107 |
| -//! |
108 |
| -//! // 1. Instantiate KVM. |
109 |
| -//! let kvm = Kvm::new().unwrap(); |
110 |
| -//! |
111 |
| -//! // 2. Create a VM. |
112 |
| -//! let vm = kvm.create_vm().unwrap(); |
113 |
| -//! |
114 |
| -//! // 3. Initialize Guest Memory. |
115 |
| -//! let load_addr: *mut u8 = unsafe { |
116 |
| -//! libc::mmap( |
117 |
| -//! null_mut(), |
118 |
| -//! mem_size, |
119 |
| -//! libc::PROT_READ | libc::PROT_WRITE, |
120 |
| -//! libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE, |
121 |
| -//! -1, |
122 |
| -//! 0, |
123 |
| -//! ) as *mut u8 |
124 |
| -//! }; |
125 |
| -//! |
126 |
| -//! let slot = 0; |
127 |
| -//! // When initializing the guest memory slot specify the |
128 |
| -//! // `KVM_MEM_LOG_DIRTY_PAGES` to enable the dirty log. |
129 |
| -//! let mem_region = kvm_userspace_memory_region { |
130 |
| -//! slot, |
131 |
| -//! guest_phys_addr: guest_addr, |
132 |
| -//! memory_size: mem_size as u64, |
133 |
| -//! userspace_addr: load_addr as u64, |
134 |
| -//! flags: KVM_MEM_LOG_DIRTY_PAGES, |
135 |
| -//! }; |
136 |
| -//! unsafe { vm.set_user_memory_region(mem_region).unwrap() }; |
137 |
| -//! |
138 |
| -//! // Write the code in the guest memory. This will generate a dirty page. |
139 |
| -//! unsafe { |
140 |
| -//! let mut slice = slice::from_raw_parts_mut(load_addr, mem_size); |
141 |
| -//! slice.write(&asm_code).unwrap(); |
142 |
| -//! } |
143 |
| -//! |
144 |
| -//! // 4. Create one vCPU. |
145 |
| -//! let mut vcpu_fd = vm.create_vcpu(0).unwrap(); |
146 |
| -//! |
147 |
| -//! // 5. Initialize general purpose and special registers. |
148 |
| -//! #[cfg(target_arch = "x86_64")] |
149 |
| -//! { |
150 |
| -//! // x86_64 specific registry setup. |
151 |
| -//! let mut vcpu_sregs = vcpu_fd.get_sregs().unwrap(); |
152 |
| -//! vcpu_sregs.cs.base = 0; |
153 |
| -//! vcpu_sregs.cs.selector = 0; |
154 |
| -//! vcpu_fd.set_sregs(&vcpu_sregs).unwrap(); |
| 61 | +//! use std::io::Write; |
| 62 | +//! use std::ptr::null_mut; |
| 63 | +//! use std::slice; |
| 64 | +//! |
| 65 | +//! use kvm_bindings::KVM_MEM_LOG_DIRTY_PAGES; |
| 66 | +//! use kvm_bindings::kvm_userspace_memory_region; |
| 67 | +//! |
| 68 | +//! let mem_size = 0x4000; |
| 69 | +//! let guest_addr = 0x1000; |
| 70 | +//! let asm_code: &[u8]; |
| 71 | +//! |
| 72 | +//! // Setting up architectural dependent values. |
| 73 | +//! #[cfg(target_arch = "x86_64")] |
| 74 | +//! { |
| 75 | +//! asm_code = &[ |
| 76 | +//! 0xba, 0xf8, 0x03, /* mov $0x3f8, %dx */ |
| 77 | +//! 0x00, 0xd8, /* add %bl, %al */ |
| 78 | +//! 0x04, b'0', /* add $'0', %al */ |
| 79 | +//! 0xee, /* out %al, %dx */ |
| 80 | +//! 0xec, /* in %dx, %al */ |
| 81 | +//! 0xc6, 0x06, 0x00, 0x80, |
| 82 | +//! 0x00, /* movl $0, (0x8000); This generates a MMIO Write. */ |
| 83 | +//! 0x8a, 0x16, 0x00, 0x80, /* movl (0x8000), %dl; This generates a MMIO Read. */ |
| 84 | +//! 0xf4, /* hlt */ |
| 85 | +//! ]; |
| 86 | +//! } |
| 87 | +//! #[cfg(target_arch = "aarch64")] |
| 88 | +//! { |
| 89 | +//! asm_code = &[ |
| 90 | +//! 0x01, 0x00, 0x00, 0x10, /* adr x1, <this address> */ |
| 91 | +//! 0x22, 0x10, 0x00, 0xb9, /* str w2, [x1, #16]; write to this page */ |
| 92 | +//! 0x02, 0x00, 0x00, 0xb9, /* str w2, [x0]; This generates a MMIO Write. */ |
| 93 | +//! 0x00, 0x00, 0x00, |
| 94 | +//! 0x14, /* b <this address>; shouldn't get here, but if so loop forever */ |
| 95 | +//! ]; |
| 96 | +//! } |
| 97 | +//! #[cfg(target_arch = "riscv64")] |
| 98 | +//! { |
| 99 | +//! asm_code = &[ |
| 100 | +//! 0x17, 0x03, 0x00, 0x00, // auipc t1, 0; <this address> -> t1 |
| 101 | +//! 0xa3, 0x23, 0x73, 0x00, // sw t2, t1 + 7; dirty current page |
| 102 | +//! 0x23, 0x20, 0x75, 0x00, // sw t2, a0; trigger MMIO exit |
| 103 | +//! 0x6f, 0x00, 0x00, 0x00, // j .;shouldn't get here, but if so loop forever |
| 104 | +//! ]; |
| 105 | +//! } |
155 | 106 | //!
|
156 |
| -//! let mut vcpu_regs = vcpu_fd.get_regs().unwrap(); |
157 |
| -//! vcpu_regs.rip = guest_addr; |
158 |
| -//! vcpu_regs.rax = 2; |
159 |
| -//! vcpu_regs.rbx = 3; |
160 |
| -//! vcpu_regs.rflags = 2; |
161 |
| -//! vcpu_fd.set_regs(&vcpu_regs).unwrap(); |
162 |
| -//! } |
| 107 | +//! // 1. Instantiate KVM. |
| 108 | +//! let kvm = Kvm::new().unwrap(); |
| 109 | +//! |
| 110 | +//! // 2. Create a VM. |
| 111 | +//! let vm = kvm.create_vm().unwrap(); |
| 112 | +//! |
| 113 | +//! // 3. Initialize Guest Memory. |
| 114 | +//! let load_addr: *mut u8 = unsafe { |
| 115 | +//! libc::mmap( |
| 116 | +//! null_mut(), |
| 117 | +//! mem_size, |
| 118 | +//! libc::PROT_READ | libc::PROT_WRITE, |
| 119 | +//! libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE, |
| 120 | +//! -1, |
| 121 | +//! 0, |
| 122 | +//! ) as *mut u8 |
| 123 | +//! }; |
| 124 | +//! |
| 125 | +//! let slot = 0; |
| 126 | +//! // When initializing the guest memory slot specify the |
| 127 | +//! // `KVM_MEM_LOG_DIRTY_PAGES` to enable the dirty log. |
| 128 | +//! let mem_region = kvm_userspace_memory_region { |
| 129 | +//! slot, |
| 130 | +//! guest_phys_addr: guest_addr, |
| 131 | +//! memory_size: mem_size as u64, |
| 132 | +//! userspace_addr: load_addr as u64, |
| 133 | +//! flags: KVM_MEM_LOG_DIRTY_PAGES, |
| 134 | +//! }; |
| 135 | +//! unsafe { vm.set_user_memory_region(mem_region).unwrap() }; |
| 136 | +//! |
| 137 | +//! // Write the code in the guest memory. This will generate a dirty page. |
| 138 | +//! unsafe { |
| 139 | +//! let mut slice = slice::from_raw_parts_mut(load_addr, mem_size); |
| 140 | +//! slice.write(&asm_code).unwrap(); |
| 141 | +//! } |
163 | 142 | //!
|
164 |
| -//! #[cfg(target_arch = "aarch64")] |
165 |
| -//! { |
166 |
| -//! // aarch64 specific registry setup. |
167 |
| -//! let mut kvi = kvm_bindings::kvm_vcpu_init::default(); |
168 |
| -//! vm.get_preferred_target(&mut kvi).unwrap(); |
169 |
| -//! vcpu_fd.vcpu_init(&kvi).unwrap(); |
| 143 | +//! // 4. Create one vCPU. |
| 144 | +//! let mut vcpu_fd = vm.create_vcpu(0).unwrap(); |
| 145 | +//! |
| 146 | +//! // 5. Initialize general purpose and special registers. |
| 147 | +//! #[cfg(target_arch = "x86_64")] |
| 148 | +//! { |
| 149 | +//! // x86_64 specific registry setup. |
| 150 | +//! let mut vcpu_sregs = vcpu_fd.get_sregs().unwrap(); |
| 151 | +//! vcpu_sregs.cs.base = 0; |
| 152 | +//! vcpu_sregs.cs.selector = 0; |
| 153 | +//! vcpu_fd.set_sregs(&vcpu_sregs).unwrap(); |
| 154 | +//! |
| 155 | +//! let mut vcpu_regs = vcpu_fd.get_regs().unwrap(); |
| 156 | +//! vcpu_regs.rip = guest_addr; |
| 157 | +//! vcpu_regs.rax = 2; |
| 158 | +//! vcpu_regs.rbx = 3; |
| 159 | +//! vcpu_regs.rflags = 2; |
| 160 | +//! vcpu_fd.set_regs(&vcpu_regs).unwrap(); |
| 161 | +//! } |
170 | 162 | //!
|
171 |
| -//! let core_reg_base: u64 = 0x6030_0000_0010_0000; |
172 |
| -//! let mmio_addr: u64 = guest_addr + mem_size as u64; |
173 |
| -//! // set PC |
174 |
| -//! vcpu_fd.set_one_reg(core_reg_base + 2 * 32, &guest_addr.to_le_bytes()); |
175 |
| -//! // set X0 |
176 |
| -//! vcpu_fd.set_one_reg(core_reg_base + 2 * 0, &mmio_addr.to_le_bytes()); |
177 |
| -//! } |
| 163 | +//! #[cfg(target_arch = "aarch64")] |
| 164 | +//! { |
| 165 | +//! // aarch64 specific registry setup. |
| 166 | +//! let mut kvi = kvm_bindings::kvm_vcpu_init::default(); |
| 167 | +//! vm.get_preferred_target(&mut kvi).unwrap(); |
| 168 | +//! vcpu_fd.vcpu_init(&kvi).unwrap(); |
| 169 | +//! |
| 170 | +//! let core_reg_base: u64 = 0x6030_0000_0010_0000; |
| 171 | +//! let mmio_addr: u64 = guest_addr + mem_size as u64; |
| 172 | +//! // set PC |
| 173 | +//! vcpu_fd.set_one_reg(core_reg_base + 2 * 32, &guest_addr.to_le_bytes()); |
| 174 | +//! // set X0 |
| 175 | +//! vcpu_fd.set_one_reg(core_reg_base + 2 * 0, &mmio_addr.to_le_bytes()); |
| 176 | +//! } |
178 | 177 | //!
|
179 |
| -//! #[cfg(target_arch = "riscv64")] |
180 |
| -//! { |
181 |
| -//! // riscv64 specific register setup. |
182 |
| -//! let core_reg_base: u64 = 0x8030_0000_0200_0000; |
183 |
| -//! let mmio_addr: u64 = guest_addr + mem_size as u64; |
184 |
| -//! // set PC |
185 |
| -//! vcpu_fd.set_one_reg(core_reg_base, &guest_addr.to_le_bytes()); |
186 |
| -//! // set A0 |
187 |
| -//! vcpu_fd.set_one_reg(core_reg_base + 10, &mmio_addr.to_le_bytes()); |
188 |
| -//! } |
| 178 | +//! #[cfg(target_arch = "riscv64")] |
| 179 | +//! { |
| 180 | +//! // riscv64 specific register setup. |
| 181 | +//! let core_reg_base: u64 = 0x8030_0000_0200_0000; |
| 182 | +//! let mmio_addr: u64 = guest_addr + mem_size as u64; |
| 183 | +//! // set PC |
| 184 | +//! vcpu_fd.set_one_reg(core_reg_base, &guest_addr.to_le_bytes()); |
| 185 | +//! // set A0 |
| 186 | +//! vcpu_fd.set_one_reg(core_reg_base + 10, &mmio_addr.to_le_bytes()); |
| 187 | +//! } |
189 | 188 | //!
|
190 |
| -//! // 6. Run code on the vCPU. |
191 |
| -//! loop { |
192 |
| -//! match vcpu_fd.run().expect("run failed") { |
193 |
| -//! VcpuExit::IoIn(addr, data) => { |
194 |
| -//! println!( |
195 |
| -//! "Received an I/O in exit. Address: {:#x}. Data: {:#x}", |
196 |
| -//! addr, data[0], |
197 |
| -//! ); |
198 |
| -//! } |
199 |
| -//! VcpuExit::IoOut(addr, data) => { |
200 |
| -//! println!( |
201 |
| -//! "Received an I/O out exit. Address: {:#x}. Data: {:#x}", |
202 |
| -//! addr, data[0], |
203 |
| -//! ); |
204 |
| -//! } |
205 |
| -//! VcpuExit::MmioRead(addr, data) => { |
206 |
| -//! println!("Received an MMIO Read Request for the address {:#x}.", addr,); |
207 |
| -//! } |
208 |
| -//! VcpuExit::MmioWrite(addr, data) => { |
209 |
| -//! println!("Received an MMIO Write Request to the address {:#x}.", addr,); |
210 |
| -//! // The code snippet dirties 1 page when it is loaded in memory |
211 |
| -//! let dirty_pages_bitmap = vm.get_dirty_log(slot, mem_size).unwrap(); |
212 |
| -//! let dirty_pages = dirty_pages_bitmap |
213 |
| -//! .into_iter() |
214 |
| -//! .map(|page| page.count_ones()) |
215 |
| -//! .fold(0, |dirty_page_count, i| dirty_page_count + i); |
216 |
| -//! assert_eq!(dirty_pages, 1); |
217 |
| -//! // Since on aarch64 there is not halt instruction, |
218 |
| -//! // we break immediately after the last known instruction |
219 |
| -//! // of the asm code example so that we avoid an infinite loop. |
220 |
| -//! #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))] |
221 |
| -//! break; |
222 |
| -//! } |
223 |
| -//! VcpuExit::Hlt => { |
224 |
| -//! break; |
225 |
| -//! } |
226 |
| -//! r => panic!("Unexpected exit reason: {:?}", r), |
| 189 | +//! // 6. Run code on the vCPU. |
| 190 | +//! loop { |
| 191 | +//! match vcpu_fd.run().expect("run failed") { |
| 192 | +//! VcpuExit::IoIn(addr, data) => { |
| 193 | +//! println!( |
| 194 | +//! "Received an I/O in exit. Address: {:#x}. Data: {:#x}", |
| 195 | +//! addr, data[0], |
| 196 | +//! ); |
| 197 | +//! } |
| 198 | +//! VcpuExit::IoOut(addr, data) => { |
| 199 | +//! println!( |
| 200 | +//! "Received an I/O out exit. Address: {:#x}. Data: {:#x}", |
| 201 | +//! addr, data[0], |
| 202 | +//! ); |
| 203 | +//! } |
| 204 | +//! VcpuExit::MmioRead(addr, data) => { |
| 205 | +//! println!("Received an MMIO Read Request for the address {:#x}.", addr,); |
| 206 | +//! } |
| 207 | +//! VcpuExit::MmioWrite(addr, data) => { |
| 208 | +//! println!("Received an MMIO Write Request to the address {:#x}.", addr,); |
| 209 | +//! // The code snippet dirties 1 page when it is loaded in memory |
| 210 | +//! let dirty_pages_bitmap = vm.get_dirty_log(slot, mem_size).unwrap(); |
| 211 | +//! let dirty_pages = dirty_pages_bitmap |
| 212 | +//! .into_iter() |
| 213 | +//! .map(|page| page.count_ones()) |
| 214 | +//! .fold(0, |dirty_page_count, i| dirty_page_count + i); |
| 215 | +//! assert_eq!(dirty_pages, 1); |
| 216 | +//! // Since on aarch64 there is not halt instruction, |
| 217 | +//! // we break immediately after the last known instruction |
| 218 | +//! // of the asm code example so that we avoid an infinite loop. |
| 219 | +//! #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))] |
| 220 | +//! break; |
| 221 | +//! } |
| 222 | +//! VcpuExit::Hlt => { |
| 223 | +//! break; |
227 | 224 | //! }
|
| 225 | +//! r => panic!("Unexpected exit reason: {:?}", r), |
228 | 226 | //! }
|
229 | 227 | //! }
|
230 | 228 | //! ```
|
|
0 commit comments