Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 69 additions & 0 deletions configs/vms/arceos-qemu-aarch64-smp1.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# Vm base info configs
#
[base]
# Guest vm id.
id = 1
# Guest vm name.
name = "arceos-qemu"
# Virtualization type.
vm_type = 1
# The number of virtual CPUs.
cpu_num = 1
# Guest vm physical cpu ids.
phys_cpu_ids = [0]

#
# Vm kernel configs
#
[kernel]
# The entry point of the kernel image.
entry_point = 0x8020_0000
# The location of image: "memory" | "fs".
# load from memory.
image_location = "memory"
# The file path of the kernel image.
kernel_path = "path/arceos-aarch64-dyn-smp1.bin"
# The load address of the kernel image.
kernel_load_addr = 0x8020_0000
# The file path of the device tree blob (DTB).
#dtb_path = "path/aarch64-qemu-gicv3.dtb"
# The load address of the device tree blob (DTB).
dtb_load_addr = 0x8000_0000

## The file path of the ramdisk image.
# ramdisk_path = ""
## The load address of the ramdisk image.
# ramdisk_load_addr = 0
## The path of the disk image.
# disk_path = "disk.img"

# Memory regions with format (`base_paddr`, `size`, `flags`, `map_type`).
# For `map_type`, 0 means `MAP_ALLOC`, 1 means `MAP_IDENTICAL`.
memory_regions = [
[0x8000_0000, 0x4000_0000, 0x7, 1], # System RAM 1G MAP_IDENTICAL
]

#
# Device specifications
#
[devices]
# Pass-through devices.
passthrough_devices = [
["/",],
]

# Devices that are not desired to be passed through to the guest
excluded_devices = [
["/pcie@10000000"],
]

# Emu_devices.
# Name Base-Ipa Ipa_len Alloc-Irq Emu-Type EmuConfig.
emu_devices = [
# ["gppt-gicd", 0x0800_0000, 0x1_0000, 0, 0x21, []],
# ["gppt-gicr", 0x080a_0000, 0x2_0000, 0, 0x20, [1, 0x2_0000, 0]], # 1 vcpu, stride 0x20000, starts with pcpu 0
# ["gppt-gits", 0x0808_0000, 0x2_0000, 0, 0x22, [0x0808_0000]], # host_gits_base
]

interrupt_mode = "passthrough"

31 changes: 1 addition & 30 deletions src/vmm/config.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,7 @@
use axaddrspace::GuestPhysAddr;
use axerrno::AxResult;
use axvm::{
VMMemoryRegion,
config::{AxVMConfig, AxVMCrateConfig, VmMemMappingType},
};
use axvm::config::{AxVMConfig, AxVMCrateConfig, VmMemMappingType};
use core::alloc::Layout;
use memory_addr::MemoryAddr;

use crate::vmm::{VM, images::ImageLoader, vm_list::push_vm};

Expand Down Expand Up @@ -129,8 +125,6 @@ pub fn init_guest_vm(raw_cfg: &str) -> AxResult {
.cloned()
.expect("VM must have at least one memory region");

config_guest_address(&vm, &main_mem);

// Load corresponding images for VM.
info!("VM[{}] created success, loading images...", vm.id());

Expand All @@ -144,29 +138,6 @@ pub fn init_guest_vm(raw_cfg: &str) -> AxResult {
Ok(())
}

fn config_guest_address(vm: &VM, main_memory: &VMMemoryRegion) {
const MB: usize = 1024 * 1024;
vm.with_config(|config| {
if main_memory.is_identical() {
debug!(
"Adjusting kernel load address from {:#x} to {:#x}",
config.image_config.kernel_load_gpa, main_memory.gpa
);
let mut kernel_addr = main_memory.gpa;
if config.image_config.bios_load_gpa.is_some() {
kernel_addr += MB * 2; // leave 2MB for BIOS
}
let dtb_addr = (main_memory.gpa + (main_memory.size().min(512 * MB) / 2).max(64 * MB))
.align_up(2 * MB);

config.image_config.kernel_load_gpa = kernel_addr;
config.cpu_config.bsp_entry = kernel_addr;
config.cpu_config.ap_entry = kernel_addr;
config.image_config.dtb_load_gpa = Some(dtb_addr);
}
});
}

fn vm_alloc_memorys(vm_create_config: &AxVMCrateConfig, vm: &VM) {
const MB: usize = 1024 * 1024;
const ALIGN: usize = 2 * MB;
Expand Down
55 changes: 51 additions & 4 deletions src/vmm/fdt/create.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ use core::ptr::NonNull;
use axaddrspace::GuestPhysAddr;
use axvm::{VMMemoryRegion, config::AxVMCrateConfig};
use fdt_parser::{Fdt, Node};
use memory_addr::MemoryAddr;
use vm_fdt::{FdtWriter, FdtWriterNode};

use crate::vmm::{VMRef, images::load_vm_image_from_memory};
Expand Down Expand Up @@ -259,7 +260,7 @@ fn add_memory_node(new_memory: &[VMMemoryRegion], new_fdt: &mut FdtWriter) {
new_fdt.property_string("device_type", "memory").unwrap();
}

pub fn update_fdt(dest_addr: GuestPhysAddr, fdt_src: NonNull<u8>, dtb_size: usize, vm: VMRef) {
pub fn update_fdt(fdt_src: NonNull<u8>, dtb_size: usize, vm: VMRef) {
let mut new_fdt = FdtWriter::new().unwrap();
let mut previous_node_level = 0;
let mut node_stack: Vec<FdtWriterNode> = Vec::new();
Expand Down Expand Up @@ -314,13 +315,59 @@ pub fn update_fdt(dest_addr: GuestPhysAddr, fdt_src: NonNull<u8>, dtb_size: usiz

let new_fdt_bytes = new_fdt.finish().unwrap();

// print_guest_fdt(new_fdt_bytes.as_slice());

// crate::vmm::fdt::print::print_guest_fdt(new_fdt_bytes.as_slice());
let vm_clone = vm.clone();
let dest_addr = calculate_dtb_load_addr(vm, new_fdt_bytes.len());
info!(
"New FDT will be loaded at {:x}, size: 0x{:x}",
dest_addr,
new_fdt_bytes.len()
);
// Load the updated FDT into VM
load_vm_image_from_memory(&new_fdt_bytes, dest_addr, vm.clone())
load_vm_image_from_memory(&new_fdt_bytes, dest_addr, vm_clone)
.expect("Failed to load VM images");
}

fn calculate_dtb_load_addr(vm: VMRef, fdt_size: usize) -> GuestPhysAddr {
const MB: usize = 1024 * 1024;

// Get main memory from VM memory regions outside the closure
let main_memory = vm
.memory_regions()
.first()
.cloned()
.expect("VM must have at least one memory region");

vm.with_config(|config| {
let dtb_addr = if let Some(addr) = config.image_config.dtb_load_gpa {
// If dtb_load_gpa is already set, use the original value
addr
} else {
// If dtb_load_gpa is None, calculate based on memory size and FDT size
if main_memory.size() > 512 * MB {
// When memory size is greater than 512MB, place in the last area of the first 512MB
let available_space = 2 * MB;
if fdt_size <= available_space {
(main_memory.gpa + 512 * MB - available_space).align_down(2 * MB)
} else {
// If FDT is larger than available space, place it at the end of main memory
(main_memory.gpa + main_memory.size() - fdt_size).align_down(2 * MB)
}
} else {
// When memory size is less than or equal to 512MB, place at the end of main_memory
if fdt_size <= main_memory.size() {
(main_memory.gpa + main_memory.size() - fdt_size).align_down(2 * MB)
} else {
// This shouldn't happen, but just in case
main_memory.gpa.align_down(2 * MB)
}
}
};
config.image_config.dtb_load_gpa = Some(dtb_addr);
dtb_addr
})
}

pub fn update_cpu_node(fdt: &Fdt, host_fdt: &Fdt, crate_config: &AxVMCrateConfig) -> Vec<u8> {
let mut new_fdt = FdtWriter::new().unwrap();
let mut previous_node_level = 0;
Expand Down
39 changes: 12 additions & 27 deletions src/vmm/images/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@ use crate::vmm::VMRef;
use crate::vmm::config::{config, get_vm_dtb_arc};

#[cfg(target_arch = "aarch64")]
use crate::vmm::fdt::update_fdt;
use core::ptr::NonNull;

#[cfg(target_arch = "aarch64")]
use core::ptr::NonNull;
use crate::vmm::fdt::update_fdt;

mod linux;

Expand Down Expand Up @@ -107,23 +107,17 @@ impl ImageLoader {
.expect("Failed to load VM images");
// Load DTB image
let vm_config = axvm::config::AxVMConfig::from(self.config.clone());
if let Some(dtb_arc) = get_vm_dtb_arc(&vm_config)
&& let Some(dtb_load_gpa) = self.dtb_load_gpa
{
let dtb_slice: &[u8] = &dtb_arc;
debug!(
"DTB buffer addr: {:x}, size: {:#}",
dtb_load_gpa,
Byte::from(dtb_slice.len())
);

if let Some(dtb_arc) = get_vm_dtb_arc(&vm_config) {
let _dtb_slice: &[u8] = &dtb_arc;
#[cfg(target_arch = "aarch64")]
update_fdt(
dtb_load_gpa,
NonNull::new(dtb_slice.as_ptr() as *mut u8).unwrap(),
dtb_slice.len(),
NonNull::new(_dtb_slice.as_ptr() as *mut u8).unwrap(),
_dtb_slice.len(),
self.vm.clone(),
);
} else {
info!("dtb_load_gpa not provided");
}

// Load BIOS image
Expand Down Expand Up @@ -256,21 +250,12 @@ pub mod fs {
};
// Load DTB image if needed.
let vm_config = axvm::config::AxVMConfig::from(loader.config.clone());
if let Some(dtb_arc) = get_vm_dtb_arc(&vm_config)
&& let Some(dtb_load_gpa) = loader.dtb_load_gpa
{
let dtb_slice: &[u8] = &dtb_arc;
debug!(
"DTB buffer addr: {:x}, size: {:#}",
dtb_load_gpa,
Byte::from(dtb_slice.len())
);

if let Some(dtb_arc) = get_vm_dtb_arc(&vm_config) {
let _dtb_slice: &[u8] = &dtb_arc;
#[cfg(target_arch = "aarch64")]
update_fdt(
dtb_load_gpa,
NonNull::new(dtb_slice.as_ptr() as *mut u8).unwrap(),
dtb_slice.len(),
NonNull::new(_dtb_slice.as_ptr() as *mut u8).unwrap(),
_dtb_slice.len(),
loader.vm.clone(),
);
}
Expand Down
Loading