From b964dcb2e6dafd107819cc8dd8174c79bb62f8f4 Mon Sep 17 00:00:00 2001 From: szy Date: Tue, 30 Sep 2025 13:46:02 +0800 Subject: [PATCH 1/3] add arceos qemu config.toml --- configs/vms/arceos-qemu-aarch64-smp1.toml | 69 +++++++++++++++++++++++ src/vmm/fdt/create.rs | 2 +- 2 files changed, 70 insertions(+), 1 deletion(-) create mode 100644 configs/vms/arceos-qemu-aarch64-smp1.toml diff --git a/configs/vms/arceos-qemu-aarch64-smp1.toml b/configs/vms/arceos-qemu-aarch64-smp1.toml new file mode 100644 index 0000000..0ca682d --- /dev/null +++ b/configs/vms/arceos-qemu-aarch64-smp1.toml @@ -0,0 +1,69 @@ +# Vm base info configs +# +[base] +# Guest vm id. +id = 1 +# Guest vm name. +name = "arceos-qemu" +# Virtualization type. +vm_type = 1 +# The number of virtual CPUs. +cpu_num = 1 +# Guest vm physical cpu ids. +phys_cpu_ids = [0] + +# +# Vm kernel configs +# +[kernel] +# The entry point of the kernel image. +entry_point = 0x8020_0000 +# The location of image: "memory" | "fs". +# load from memory. +image_location = "memory" +# The file path of the kernel image. +kernel_path = "path/arceos-aarch64-dyn-smp1.bin" +# The load address of the kernel image. +kernel_load_addr = 0x8020_0000 +# The file path of the device tree blob (DTB). +#dtb_path = "path/aarch64-qemu-gicv3.dtb" +# The load address of the device tree blob (DTB). +dtb_load_addr = 0x8000_0000 + +## The file path of the ramdisk image. +# ramdisk_path = "" +## The load address of the ramdisk image. +# ramdisk_load_addr = 0 +## The path of the disk image. +# disk_path = "disk.img" + +# Memory regions with format (`base_paddr`, `size`, `flags`, `map_type`). +# For `map_type`, 0 means `MAP_ALLOC`, 1 means `MAP_IDENTICAL`. +memory_regions = [ + [0x8000_0000, 0x4000_0000, 0x7, 1], # System RAM 1G MAP_IDENTICAL +] + +# +# Device specifications +# +[devices] +# Pass-through devices. +passthrough_devices = [ + ["/",], +] + +# Devices that are not desired to be passed through to the guest +excluded_devices = [ + ["/pcie@10000000"], +] + +# Emu_devices. +# Name Base-Ipa Ipa_len Alloc-Irq Emu-Type EmuConfig. +emu_devices = [ + # ["gppt-gicd", 0x0800_0000, 0x1_0000, 0, 0x21, []], + # ["gppt-gicr", 0x080a_0000, 0x2_0000, 0, 0x20, [1, 0x2_0000, 0]], # 1 vcpu, stride 0x20000, starts with pcpu 0 + # ["gppt-gits", 0x0808_0000, 0x2_0000, 0, 0x22, [0x0808_0000]], # host_gits_base +] + +interrupt_mode = "passthrough" + diff --git a/src/vmm/fdt/create.rs b/src/vmm/fdt/create.rs index b2f9432..94a40cd 100644 --- a/src/vmm/fdt/create.rs +++ b/src/vmm/fdt/create.rs @@ -314,7 +314,7 @@ pub fn update_fdt(dest_addr: GuestPhysAddr, fdt_src: NonNull, dtb_size: usiz let new_fdt_bytes = new_fdt.finish().unwrap(); - // print_guest_fdt(new_fdt_bytes.as_slice()); + // crate::vmm::fdt::print::print_guest_fdt(new_fdt_bytes.as_slice()); // Load the updated FDT into VM load_vm_image_from_memory(&new_fdt_bytes, dest_addr, vm.clone()) From b0d9bf7d31ca923df26a19aefa882e8713623f0d Mon Sep 17 00:00:00 2001 From: szy Date: Tue, 30 Sep 2025 16:09:13 +0800 Subject: [PATCH 2/3] caculate dtb_addr --- src/vmm/config.rs | 31 +-------------------------- src/vmm/fdt/create.rs | 49 ++++++++++++++++++++++++++++++++++++++++--- src/vmm/images/mod.rs | 39 +++++++++++----------------------- 3 files changed, 59 insertions(+), 60 deletions(-) diff --git a/src/vmm/config.rs b/src/vmm/config.rs index 10a0d93..c74c6d2 100644 --- a/src/vmm/config.rs +++ b/src/vmm/config.rs @@ -1,11 +1,7 @@ use axaddrspace::GuestPhysAddr; use axerrno::AxResult; -use axvm::{ - VMMemoryRegion, - config::{AxVMConfig, AxVMCrateConfig, VmMemMappingType}, -}; +use axvm::config::{AxVMConfig, AxVMCrateConfig, VmMemMappingType}; use core::alloc::Layout; -use memory_addr::MemoryAddr; use crate::vmm::{VM, images::ImageLoader, vm_list::push_vm}; @@ -129,8 +125,6 @@ pub fn init_guest_vm(raw_cfg: &str) -> AxResult { .cloned() .expect("VM must have at least one memory region"); - config_guest_address(&vm, &main_mem); - // Load corresponding images for VM. info!("VM[{}] created success, loading images...", vm.id()); @@ -144,29 +138,6 @@ pub fn init_guest_vm(raw_cfg: &str) -> AxResult { Ok(()) } -fn config_guest_address(vm: &VM, main_memory: &VMMemoryRegion) { - const MB: usize = 1024 * 1024; - vm.with_config(|config| { - if main_memory.is_identical() { - debug!( - "Adjusting kernel load address from {:#x} to {:#x}", - config.image_config.kernel_load_gpa, main_memory.gpa - ); - let mut kernel_addr = main_memory.gpa; - if config.image_config.bios_load_gpa.is_some() { - kernel_addr += MB * 2; // leave 2MB for BIOS - } - let dtb_addr = (main_memory.gpa + (main_memory.size().min(512 * MB) / 2).max(64 * MB)) - .align_up(2 * MB); - - config.image_config.kernel_load_gpa = kernel_addr; - config.cpu_config.bsp_entry = kernel_addr; - config.cpu_config.ap_entry = kernel_addr; - config.image_config.dtb_load_gpa = Some(dtb_addr); - } - }); -} - fn vm_alloc_memorys(vm_create_config: &AxVMCrateConfig, vm: &VM) { const MB: usize = 1024 * 1024; const ALIGN: usize = 2 * MB; diff --git a/src/vmm/fdt/create.rs b/src/vmm/fdt/create.rs index 94a40cd..0218be8 100644 --- a/src/vmm/fdt/create.rs +++ b/src/vmm/fdt/create.rs @@ -7,6 +7,7 @@ use core::ptr::NonNull; use axaddrspace::GuestPhysAddr; use axvm::{VMMemoryRegion, config::AxVMCrateConfig}; use fdt_parser::{Fdt, Node}; +use memory_addr::MemoryAddr; use vm_fdt::{FdtWriter, FdtWriterNode}; use crate::vmm::{VMRef, images::load_vm_image_from_memory}; @@ -259,7 +260,7 @@ fn add_memory_node(new_memory: &[VMMemoryRegion], new_fdt: &mut FdtWriter) { new_fdt.property_string("device_type", "memory").unwrap(); } -pub fn update_fdt(dest_addr: GuestPhysAddr, fdt_src: NonNull, dtb_size: usize, vm: VMRef) { +pub fn update_fdt(fdt_src: NonNull, dtb_size: usize, vm: VMRef) { let mut new_fdt = FdtWriter::new().unwrap(); let mut previous_node_level = 0; let mut node_stack: Vec = Vec::new(); @@ -315,12 +316,54 @@ pub fn update_fdt(dest_addr: GuestPhysAddr, fdt_src: NonNull, dtb_size: usiz let new_fdt_bytes = new_fdt.finish().unwrap(); // crate::vmm::fdt::print::print_guest_fdt(new_fdt_bytes.as_slice()); - + let vm_clone = vm.clone(); + let dest_addr = calculate_dtb_load_addr(vm, new_fdt_bytes.len()); + info!("New FDT will be loaded at {:x}", dest_addr); // Load the updated FDT into VM - load_vm_image_from_memory(&new_fdt_bytes, dest_addr, vm.clone()) + load_vm_image_from_memory(&new_fdt_bytes, dest_addr, vm_clone) .expect("Failed to load VM images"); } +fn calculate_dtb_load_addr(vm: VMRef, fdt_size: usize) -> GuestPhysAddr { + const MB: usize = 1024 * 1024; + + // Get main memory from VM memory regions outside the closure + let main_memory = vm + .memory_regions() + .first() + .cloned() + .expect("VM must have at least one memory region"); + + vm.with_config(|config| { + let dtb_addr = if let Some(addr) = config.image_config.dtb_load_gpa { + // If dtb_load_gpa is already set, use the original value + addr + } else { + // If dtb_load_gpa is None, calculate based on memory size and FDT size + if main_memory.size() > 512 * MB { + // When memory size is greater than 512MB, place in the last area of the first 512MB + let available_space = 2 * MB; + if fdt_size <= available_space { + (main_memory.gpa + 512 * MB - available_space).align_down(2 * MB) + } else { + // If FDT is larger than available space, place it at the end of main memory + (main_memory.gpa + main_memory.size() - fdt_size).align_down(2 * MB) + } + } else { + // When memory size is less than or equal to 512MB, place at the end of main_memory + if fdt_size <= main_memory.size() { + (main_memory.gpa + main_memory.size() - fdt_size).align_down(2 * MB) + } else { + // This shouldn't happen, but just in case + main_memory.gpa.align_down(2 * MB) + } + } + }; + config.image_config.dtb_load_gpa = Some(dtb_addr); + dtb_addr + }) +} + pub fn update_cpu_node(fdt: &Fdt, host_fdt: &Fdt, crate_config: &AxVMCrateConfig) -> Vec { let mut new_fdt = FdtWriter::new().unwrap(); let mut previous_node_level = 0; diff --git a/src/vmm/images/mod.rs b/src/vmm/images/mod.rs index cfe0fca..0f3576e 100644 --- a/src/vmm/images/mod.rs +++ b/src/vmm/images/mod.rs @@ -10,10 +10,10 @@ use crate::vmm::VMRef; use crate::vmm::config::{config, get_vm_dtb_arc}; #[cfg(target_arch = "aarch64")] -use crate::vmm::fdt::update_fdt; +use core::ptr::NonNull; #[cfg(target_arch = "aarch64")] -use core::ptr::NonNull; +use crate::vmm::fdt::update_fdt; mod linux; @@ -107,23 +107,17 @@ impl ImageLoader { .expect("Failed to load VM images"); // Load DTB image let vm_config = axvm::config::AxVMConfig::from(self.config.clone()); - if let Some(dtb_arc) = get_vm_dtb_arc(&vm_config) - && let Some(dtb_load_gpa) = self.dtb_load_gpa - { - let dtb_slice: &[u8] = &dtb_arc; - debug!( - "DTB buffer addr: {:x}, size: {:#}", - dtb_load_gpa, - Byte::from(dtb_slice.len()) - ); + if let Some(dtb_arc) = get_vm_dtb_arc(&vm_config) { + let _dtb_slice: &[u8] = &dtb_arc; #[cfg(target_arch = "aarch64")] update_fdt( - dtb_load_gpa, - NonNull::new(dtb_slice.as_ptr() as *mut u8).unwrap(), - dtb_slice.len(), + NonNull::new(_dtb_slice.as_ptr() as *mut u8).unwrap(), + _dtb_slice.len(), self.vm.clone(), ); + } else { + info!("dtb_load_gpa not provided"); } // Load BIOS image @@ -256,21 +250,12 @@ pub mod fs { }; // Load DTB image if needed. let vm_config = axvm::config::AxVMConfig::from(loader.config.clone()); - if let Some(dtb_arc) = get_vm_dtb_arc(&vm_config) - && let Some(dtb_load_gpa) = loader.dtb_load_gpa - { - let dtb_slice: &[u8] = &dtb_arc; - debug!( - "DTB buffer addr: {:x}, size: {:#}", - dtb_load_gpa, - Byte::from(dtb_slice.len()) - ); - + if let Some(dtb_arc) = get_vm_dtb_arc(&vm_config) { + let _dtb_slice: &[u8] = &dtb_arc; #[cfg(target_arch = "aarch64")] update_fdt( - dtb_load_gpa, - NonNull::new(dtb_slice.as_ptr() as *mut u8).unwrap(), - dtb_slice.len(), + NonNull::new(_dtb_slice.as_ptr() as *mut u8).unwrap(), + _dtb_slice.len(), loader.vm.clone(), ); } From e2147e4c58ceb9546e0e0729d94691f2bf6932e6 Mon Sep 17 00:00:00 2001 From: szy Date: Tue, 30 Sep 2025 16:13:46 +0800 Subject: [PATCH 3/3] update log --- src/vmm/fdt/create.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/vmm/fdt/create.rs b/src/vmm/fdt/create.rs index 0218be8..a865949 100644 --- a/src/vmm/fdt/create.rs +++ b/src/vmm/fdt/create.rs @@ -318,7 +318,11 @@ pub fn update_fdt(fdt_src: NonNull, dtb_size: usize, vm: VMRef) { // crate::vmm::fdt::print::print_guest_fdt(new_fdt_bytes.as_slice()); let vm_clone = vm.clone(); let dest_addr = calculate_dtb_load_addr(vm, new_fdt_bytes.len()); - info!("New FDT will be loaded at {:x}", dest_addr); + info!( + "New FDT will be loaded at {:x}, size: 0x{:x}", + dest_addr, + new_fdt_bytes.len() + ); // Load the updated FDT into VM load_vm_image_from_memory(&new_fdt_bytes, dest_addr, vm_clone) .expect("Failed to load VM images");