@@ -1194,16 +1194,16 @@ impl VcpuFd {
1194
1194
/// let vcpu = vm.create_vcpu(0).unwrap();
1195
1195
///
1196
1196
/// // KVM_GET_REG_LIST on Aarch64 demands that the vcpus be initialized.
1197
- /// #[cfg(target_arch = "aarch64")]
1198
- /// {
1199
- /// let mut kvi = kvm_bindings::kvm_vcpu_init::default();
1200
- /// vm.get_preferred_target(&mut kvi).unwrap();
1201
- /// vcpu.vcpu_init(&kvi).expect("Cannot initialize vcpu");
1202
- ///
1203
- /// let mut reg_list = RegList::new(500).unwrap();
1204
- /// vcpu.get_reg_list(&mut reg_list).unwrap();
1205
- /// assert!(reg_list.as_fam_struct_ref().n > 0);
1206
- /// }
1197
+ /// # # [cfg(target_arch = "aarch64")]
1198
+ /// # {
1199
+ /// let mut kvi = kvm_bindings::kvm_vcpu_init::default();
1200
+ /// vm.get_preferred_target(&mut kvi).unwrap();
1201
+ /// vcpu.vcpu_init(&kvi).expect("Cannot initialize vcpu");
1202
+ ///
1203
+ /// let mut reg_list = RegList::new(500).unwrap();
1204
+ /// vcpu.get_reg_list(&mut reg_list).unwrap();
1205
+ /// assert!(reg_list.as_fam_struct_ref().n > 0);
1206
+ /// # }
1207
1207
/// ```
1208
1208
#[ cfg( any( target_arch = "aarch64" , target_arch = "riscv64" ) ) ]
1209
1209
pub fn get_reg_list ( & self , reg_list : & mut RegList ) -> Result < ( ) > {
@@ -1351,6 +1351,9 @@ impl VcpuFd {
1351
1351
///
1352
1352
/// # Example
1353
1353
///
1354
+ /// Running some dummy code on x86_64 that immediately halts the vCPU. Based on
1355
+ /// [https://lwn.net/Articles/658511/](https://lwn.net/Articles/658511/).
1356
+ ///
1354
1357
/// ```rust
1355
1358
/// # extern crate kvm_ioctls;
1356
1359
/// # extern crate kvm_bindings;
@@ -1361,64 +1364,64 @@ impl VcpuFd {
1361
1364
/// # use kvm_bindings::{kvm_userspace_memory_region, KVM_MEM_LOG_DIRTY_PAGES};
1362
1365
/// # let kvm = Kvm::new().unwrap();
1363
1366
/// # let vm = kvm.create_vm().unwrap();
1364
- /// // This is a dummy example for running on x86 based on https://lwn.net/Articles/658511/.
1365
- /// #[cfg(target_arch = "x86_64")]
1366
- /// {
1367
- /// let mem_size = 0x4000;
1368
- /// let guest_addr: u64 = 0x1000;
1369
- /// let load_addr: *mut u8 = unsafe {
1370
- /// libc::mmap(
1371
- /// null_mut(),
1372
- /// mem_size,
1373
- /// libc::PROT_READ | libc::PROT_WRITE,
1374
- /// libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
1375
- /// -1,
1376
- /// 0,
1377
- /// ) as *mut u8
1378
- /// };
1379
- ///
1380
- /// let mem_region = kvm_userspace_memory_region {
1381
- /// slot: 0,
1382
- /// guest_phys_addr: guest_addr,
1383
- /// memory_size: mem_size as u64,
1384
- /// userspace_addr: load_addr as u64,
1385
- /// flags: 0,
1386
- /// };
1387
- /// unsafe { vm.set_user_memory_region(mem_region).unwrap() };
1388
- ///
1389
- /// // Dummy x86 code that just calls halt.
1390
- /// let x86_code = [0xf4 /* hlt */];
1391
- ///
1392
- /// // Write the code in the guest memory. This will generate a dirty page.
1393
- /// unsafe {
1394
- /// let mut slice = slice::from_raw_parts_mut(load_addr, mem_size);
1395
- /// slice.write(&x86_code).unwrap();
1396
- /// }
1397
1367
///
1398
- /// let mut vcpu_fd = vm.create_vcpu(0).unwrap();
1399
- ///
1400
- /// let mut vcpu_sregs = vcpu_fd.get_sregs().unwrap();
1401
- /// vcpu_sregs.cs.base = 0;
1402
- /// vcpu_sregs.cs.selector = 0;
1403
- /// vcpu_fd.set_sregs(&vcpu_sregs).unwrap();
1404
- ///
1405
- /// let mut vcpu_regs = vcpu_fd.get_regs().unwrap();
1406
- /// // Set the Instruction Pointer to the guest address where we loaded the code.
1407
- /// vcpu_regs.rip = guest_addr;
1408
- /// vcpu_regs.rax = 2;
1409
- /// vcpu_regs.rbx = 3;
1410
- /// vcpu_regs.rflags = 2;
1411
- /// vcpu_fd.set_regs(&vcpu_regs).unwrap();
1412
- ///
1413
- /// loop {
1414
- /// match vcpu_fd.run().expect("run failed") {
1415
- /// VcpuExit::Hlt => {
1416
- /// break;
1417
- /// }
1418
- /// exit_reason => panic!("unexpected exit reason: {:?}", exit_reason),
1368
+ /// # #[cfg(target_arch = "x86_64")]
1369
+ /// # {
1370
+ /// let mem_size = 0x4000;
1371
+ /// let guest_addr: u64 = 0x1000;
1372
+ /// let load_addr: *mut u8 = unsafe {
1373
+ /// libc::mmap(
1374
+ /// null_mut(),
1375
+ /// mem_size,
1376
+ /// libc::PROT_READ | libc::PROT_WRITE,
1377
+ /// libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
1378
+ /// -1,
1379
+ /// 0,
1380
+ /// ) as *mut u8
1381
+ /// };
1382
+ ///
1383
+ /// let mem_region = kvm_userspace_memory_region {
1384
+ /// slot: 0,
1385
+ /// guest_phys_addr: guest_addr,
1386
+ /// memory_size: mem_size as u64,
1387
+ /// userspace_addr: load_addr as u64,
1388
+ /// flags: 0,
1389
+ /// };
1390
+ /// unsafe { vm.set_user_memory_region(mem_region).unwrap() };
1391
+ ///
1392
+ /// // Dummy x86 code that just calls halt.
1393
+ /// let x86_code = [0xf4 /* hlt */];
1394
+ ///
1395
+ /// // Write the code in the guest memory. This will generate a dirty page.
1396
+ /// unsafe {
1397
+ /// let mut slice = slice::from_raw_parts_mut(load_addr, mem_size);
1398
+ /// slice.write(&x86_code).unwrap();
1399
+ /// }
1400
+ ///
1401
+ /// let mut vcpu_fd = vm.create_vcpu(0).unwrap();
1402
+ ///
1403
+ /// let mut vcpu_sregs = vcpu_fd.get_sregs().unwrap();
1404
+ /// vcpu_sregs.cs.base = 0;
1405
+ /// vcpu_sregs.cs.selector = 0;
1406
+ /// vcpu_fd.set_sregs(&vcpu_sregs).unwrap();
1407
+ ///
1408
+ /// let mut vcpu_regs = vcpu_fd.get_regs().unwrap();
1409
+ /// // Set the Instruction Pointer to the guest address where we loaded the code.
1410
+ /// vcpu_regs.rip = guest_addr;
1411
+ /// vcpu_regs.rax = 2;
1412
+ /// vcpu_regs.rbx = 3;
1413
+ /// vcpu_regs.rflags = 2;
1414
+ /// vcpu_fd.set_regs(&vcpu_regs).unwrap();
1415
+ ///
1416
+ /// loop {
1417
+ /// match vcpu_fd.run().expect("run failed") {
1418
+ /// VcpuExit::Hlt => {
1419
+ /// break;
1419
1420
/// }
1421
+ /// exit_reason => panic!("unexpected exit reason: {:?}", exit_reason),
1420
1422
/// }
1421
1423
/// }
1424
+ /// # }
1422
1425
/// ```
1423
1426
pub fn run ( & mut self ) -> Result < VcpuExit > {
1424
1427
// SAFETY: Safe because we know that our file is a vCPU fd and we verify the return result.
0 commit comments