Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]
name = "axvm"
authors = ["aarkegz <aarkegz@gmail.com>"]
version = "0.2.0"
version = "0.2.1"
edition = "2024"
categories = ["virtualization", "no-std"]
description = "Virtual Machine resource management crate for ArceOS's hypervisor variant."
Expand All @@ -12,15 +12,15 @@ license = "Apache-2.0"
[features]
default = ["vmx"]
vmx = []
4-level-ept = ["arm_vcpu/4-level-ept"] # TODO: Realize 4-level-ept on x86_64 and riscv64.
4-level-ept = ["axaddrspace/4-level-ept"] # TODO: Realize 4-level-ept on x86_64 and riscv64.

[dependencies]
log = "0.4"
cfg-if = "1.0"
spin = "0.9"

# System independent crates provided by ArceOS.
axerrno = "0.1.0"
axerrno = "0.2"
cpumask = "0.1.0"
# kspin = "0.1.0"
memory_addr = "0.4"
Expand All @@ -29,18 +29,18 @@ page_table_multiarch = "0.5"
percpu = { version = "0.2.0", features = ["arm-el2"] }

# System dependent modules provided by ArceOS-Hypervisor.
axvcpu = "0.1"
axaddrspace = "0.1"
axdevice = "0.2"
axdevice_base = "0.1"
axvmconfig = { version = "0.1", default-features = false }
axvcpu = "0.2"
axaddrspace = "0.1.4"
axdevice = "0.2.1"
axdevice_base = "0.2.1"
axvmconfig = { version = "0.2", default-features = false }

[target.'cfg(target_arch = "x86_64")'.dependencies]
x86_vcpu = "0.1"
x86_vcpu = "0.2.1"

[target.'cfg(target_arch = "riscv64")'.dependencies]
riscv_vcpu = "0.1"
riscv_vcpu = "0.2.1"

[target.'cfg(target_arch = "aarch64")'.dependencies]
arm_vcpu = "0.1"
arm_vgic = { version = "0.1", features = ["vgicv3"] }
arm_vcpu = "0.2.1"
arm_vgic = { version = "0.2.1", features = ["vgicv3"] }
196 changes: 142 additions & 54 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,17 @@

use alloc::string::String;
use alloc::vec::Vec;
use core::ops::Range;

use axaddrspace::GuestPhysAddr;

pub use axvmconfig::{
AxVMCrateConfig, EmulatedDeviceConfig, PassThroughDeviceConfig, VMInterruptMode, VMType,
VmMemConfig, VmMemMappingType,
AxVMCrateConfig, EmulatedDeviceConfig, PassThroughAddressConfig, PassThroughDeviceConfig,
VMInterruptMode, VMType, VmMemConfig, VmMemMappingType,
};

/// A part of `AxVCpuConfig`, which represents an architecture-dependent `VCpu`.
///
/// The concrete type of configuration is defined in `AxArchVCpuImpl`.
// /// A part of `AxVCpuConfig`, which represents an architecture-dependent `VCpu`.
// ///
// /// The concrete type of configuration is defined in `AxArchVCpuImpl`.
// #[derive(Clone, Copy, Debug, Default)]
// pub struct AxArchVCpuConfig<H: AxVMHal> {
// pub create_config: <AxArchVCpuImpl<H> as AxArchVCpu>::CreateConfig,
Expand All @@ -45,7 +44,7 @@ pub struct AxVCpuConfig {
}

/// A part of `AxVMConfig`, which stores configuration attributes related to the load address of VM images.
#[derive(Debug, Default)]
#[derive(Debug, Default, Clone)]
pub struct VMImageConfig {
/// The load address in GPA for the kernel image.
pub kernel_load_gpa: GuestPhysAddr,
Expand All @@ -64,14 +63,15 @@ pub struct AxVMConfig {
name: String,
#[allow(dead_code)]
vm_type: VMType,
cpu_num: usize,
phys_cpu_ids: Option<Vec<usize>>,
phys_cpu_sets: Option<Vec<usize>>,
cpu_config: AxVCpuConfig,
image_config: VMImageConfig,
memory_regions: Vec<VmMemConfig>,
pub(crate) phys_cpu_ls: PhysCpuList,
/// vCPU configuration.
pub cpu_config: AxVCpuConfig,
/// VM image configuration.
pub image_config: VMImageConfig,
emu_devices: Vec<EmulatedDeviceConfig>,
pass_through_devices: Vec<PassThroughDeviceConfig>,
excluded_devices: Vec<Vec<String>>,
pass_through_addresses: Vec<PassThroughAddressConfig>,
// TODO: improve interrupt passthrough
spi_list: Vec<u32>,
interrupt_mode: VMInterruptMode,
Expand All @@ -83,9 +83,11 @@ impl From<AxVMCrateConfig> for AxVMConfig {
id: cfg.base.id,
name: cfg.base.name,
vm_type: VMType::from(cfg.base.vm_type),
cpu_num: cfg.base.cpu_num,
phys_cpu_ids: cfg.base.phys_cpu_ids,
phys_cpu_sets: cfg.base.phys_cpu_sets,
phys_cpu_ls: PhysCpuList {
cpu_num: cfg.base.cpu_num,
phys_cpu_ids: cfg.base.phys_cpu_ids,
phys_cpu_sets: cfg.base.phys_cpu_sets,
},
cpu_config: AxVCpuConfig {
bsp_entry: GuestPhysAddr::from(cfg.kernel.entry_point),
ap_entry: GuestPhysAddr::from(cfg.kernel.entry_point),
Expand All @@ -96,9 +98,11 @@ impl From<AxVMCrateConfig> for AxVMConfig {
dtb_load_gpa: cfg.kernel.dtb_load_addr.map(GuestPhysAddr::from),
ramdisk_load_gpa: cfg.kernel.ramdisk_load_addr.map(GuestPhysAddr::from),
},
memory_regions: cfg.kernel.memory_regions,
// memory_regions: cfg.kernel.memory_regions,
emu_devices: cfg.devices.emu_devices,
pass_through_devices: cfg.devices.passthrough_devices,
excluded_devices: cfg.devices.excluded_devices,
pass_through_addresses: cfg.devices.passthrough_addresses,
spi_list: Vec::new(),
interrupt_mode: cfg.devices.interrupt_mode,
}
Expand All @@ -116,32 +120,6 @@ impl AxVMConfig {
self.name.clone()
}

/// Returns vCpu id list and its corresponding pCpu affinity list, as well as its physical id.
/// If the pCpu affinity is None, it means the vCpu will be allocated to any available pCpu randomly.
/// if the pCPU id is not provided, the vCpu's physical id will be set as vCpu id.
///
/// Returns a vector of tuples, each tuple contains:
/// - The vCpu id.
/// - The pCpu affinity mask, `None` if not set.
/// - The physical id of the vCpu, equal to vCpu id if not provided.
pub fn get_vcpu_affinities_pcpu_ids(&self) -> Vec<(usize, Option<usize>, usize)> {
let mut vcpu_pcpu_tuples = Vec::new();
for vcpu_id in 0..self.cpu_num {
vcpu_pcpu_tuples.push((vcpu_id, None, vcpu_id));
}
if let Some(phys_cpu_sets) = &self.phys_cpu_sets {
for (vcpu_id, pcpu_mask_bitmap) in phys_cpu_sets.iter().enumerate() {
vcpu_pcpu_tuples[vcpu_id].1 = Some(*pcpu_mask_bitmap);
}
}
if let Some(phys_cpu_ids) = &self.phys_cpu_ids {
for (vcpu_id, phys_id) in phys_cpu_ids.iter().enumerate() {
vcpu_pcpu_tuples[vcpu_id].2 = *phys_id;
}
}
vcpu_pcpu_tuples
}

/// Returns configurations related to VM image load addresses.
pub fn image_config(&self) -> &VMImageConfig {
&self.image_config
Expand All @@ -159,22 +137,36 @@ impl AxVMConfig {
self.cpu_config.ap_entry
}

/// Returns configurations related to VM memory regions.
pub fn memory_regions(&self) -> &Vec<VmMemConfig> {
&self.memory_regions
/// Returns a mutable reference to the physical CPU list.
pub fn phys_cpu_ls_mut(&mut self) -> &mut PhysCpuList {
&mut self.phys_cpu_ls
}

/// Adds a new memory region to the VM configuration.
pub fn add_memory_region(&mut self, region: VmMemConfig) {
self.memory_regions.push(region);
/// Returns the list of excluded devices.
pub fn excluded_devices(&self) -> &Vec<Vec<String>> {
&self.excluded_devices
}

/// Checks if the VM memory regions contain a specific range.
pub fn contains_memory_range(&self, range: &Range<usize>) -> bool {
self.memory_regions
.iter()
.any(|region| region.gpa <= range.start && region.gpa + region.size >= range.end)
/// Returns the list of passthrough address configurations.
pub fn pass_through_addresses(&self) -> &Vec<PassThroughAddressConfig> {
&self.pass_through_addresses
}
// /// Returns configurations related to VM memory regions.
// pub fn memory_regions(&self) -> Vec<VmMemConfig> {
// &self.memory_regions
// }

// /// Adds a new memory region to the VM configuration.
// pub fn add_memory_region(&mut self, region: VmMemConfig) {
// self.memory_regions.push(region);
// }

// /// Checks if the VM memory regions contain a specific range.
// pub fn contains_memory_range(&self, range: &Range<usize>) -> bool {
// self.memory_regions
// .iter()
// .any(|region| region.gpa <= range.start && region.gpa + region.size >= range.end)
// }

/// Returns configurations related to VM emulated devices.
pub fn emu_devices(&self) -> &Vec<EmulatedDeviceConfig> {
Expand All @@ -191,6 +183,16 @@ impl AxVMConfig {
self.pass_through_devices.push(device);
}

/// Removes passthrough device from the VM configuration.
pub fn remove_pass_through_device(&mut self, device: PassThroughDeviceConfig) {
self.pass_through_devices.retain(|d| d == &device);
}

/// Clears all passthrough devices from the VM configuration.
pub fn clear_pass_through_devices(&mut self) {
self.pass_through_devices.clear();
}

/// Adds a passthrough SPI to the VM configuration.
pub fn add_pass_through_spi(&mut self, spi: u32) {
self.spi_list.push(spi);
Expand All @@ -206,3 +208,89 @@ impl AxVMConfig {
self.interrupt_mode
}
}

/// Represents the list of physical CPUs available for the VM.
#[derive(Debug, Default, Clone)]
pub struct PhysCpuList {
cpu_num: usize,
phys_cpu_ids: Option<Vec<usize>>,
phys_cpu_sets: Option<Vec<usize>>,
}

impl PhysCpuList {
/// Returns vCpu id list and its corresponding pCpu affinity list, as well as its physical id.
/// If the pCpu affinity is None, it means the vCpu will be allocated to any available pCpu randomly.
/// if the pCPU id is not provided, the vCpu's physical id will be set as vCpu id.
///
/// Returns a vector of tuples, each tuple contains:
/// - The vCpu id.
/// - The pCpu affinity mask, `None` if not set.
/// - The physical id of the vCpu, equal to vCpu id if not provided.
pub fn get_vcpu_affinities_pcpu_ids(&self) -> Vec<(usize, Option<usize>, usize)> {
let mut vcpu_pcpu_tuples = Vec::new();
#[cfg(target_arch = "riscv64")]
let mut pcpu_mask_flag = false;

if let Some(phys_cpu_ids) = &self.phys_cpu_ids
&& self.cpu_num != phys_cpu_ids.len()
{
error!(
"ERROR!!!: cpu_num: {}, phys_cpu_ids: {:?}",
self.cpu_num, self.phys_cpu_ids
);
}

for vcpu_id in 0..self.cpu_num {
vcpu_pcpu_tuples.push((vcpu_id, None, vcpu_id));
}

#[cfg(target_arch = "riscv64")]
if let Some(phys_cpu_sets) = &self.phys_cpu_sets {
pcpu_mask_flag = true;
for (vcpu_id, pcpu_mask_bitmap) in phys_cpu_sets.iter().enumerate() {
vcpu_pcpu_tuples[vcpu_id].1 = Some(*pcpu_mask_bitmap);
}
}

#[cfg(not(target_arch = "riscv64"))]
if let Some(phys_cpu_sets) = &self.phys_cpu_sets {
for (vcpu_id, pcpu_mask_bitmap) in phys_cpu_sets.iter().enumerate() {
vcpu_pcpu_tuples[vcpu_id].1 = Some(*pcpu_mask_bitmap);
}
}

if let Some(phys_cpu_ids) = &self.phys_cpu_ids {
for (vcpu_id, phys_id) in phys_cpu_ids.iter().enumerate() {
vcpu_pcpu_tuples[vcpu_id].2 = *phys_id;
#[cfg(target_arch = "riscv64")]
{
if !pcpu_mask_flag {
// if don't assign pcpu mask yet, assign it manually
vcpu_pcpu_tuples[vcpu_id].1 = Some(1 << (*phys_id));
}
}
}
}
vcpu_pcpu_tuples
}

/// Returns the number of CPUs.
pub fn cpu_num(&self) -> usize {
self.cpu_num
}

/// Returns the physical CPU IDs.
pub fn phys_cpu_ids(&self) -> &Option<Vec<usize>> {
&self.phys_cpu_ids
}

/// Returns the physical CPU sets.
pub fn phys_cpu_sets(&self) -> &Option<Vec<usize>> {
&self.phys_cpu_sets
}

/// Sets the guest CPU sets.
pub fn set_guest_cpu_sets(&mut self, phys_cpu_sets: Vec<usize>) {
self.phys_cpu_sets = Some(phys_cpu_sets);
}
}
8 changes: 0 additions & 8 deletions src/hal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,6 @@ pub trait AxVMHal: Sized {
/// The low-level **OS-dependent** helpers that must be provided for physical address management.
type PagingHandler: page_table_multiarch::PagingHandler;

/// Allocates a memory region at the specified physical address.
///
/// Returns `true` if the memory region is successfully allocated.
fn alloc_memory_region_at(base: HostPhysAddr, size: usize) -> bool;

/// Deallocates a memory region at the specified physical address.
fn dealloc_memory_region_at(base: HostPhysAddr, size: usize);

/// Converts a virtual address to the corresponding physical address.
fn virt_to_phys(vaddr: HostVirtAddr) -> HostPhysAddr;

Expand Down
2 changes: 2 additions & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ pub use hal::AxVMHal;
pub use vm::AxVCpuRef;
pub use vm::AxVM;
pub use vm::AxVMRef;
pub use vm::VMMemoryRegion;
pub use vm::VMStatus;

/// The architecture-independent per-CPU type.
pub type AxVMPerCpu<U> = axvcpu::AxPerCpu<vcpu::AxVMArchPerCpuImpl<U>>;
Expand Down
Loading