|
| 1 | +use core::sync::atomic::{AtomicUsize, Ordering}; |
| 2 | +use std::{ |
| 3 | + collections::btree_map::BTreeMap, |
| 4 | + os::arceos::{api::task::AxCpuMask, modules::axtask::set_current_affinity}, |
| 5 | + sync::Arc, |
| 6 | +}; |
| 7 | + |
| 8 | +use alloc::vec::Vec; |
| 9 | + |
| 10 | +use crate::{ |
| 11 | + RunError, TASK_STACK_SIZE, VmAddrSpace, arch::cpu::VCpu, data::VmDataWeak, vhal::cpu::CpuHardId, |
| 12 | +}; |
| 13 | + |
| 14 | +pub struct VmMachineRunningCommon { |
| 15 | + pub cpus: BTreeMap<CpuHardId, VCpu>, |
| 16 | + pub vmspace: VmAddrSpace, |
| 17 | + pub vm: VmDataWeak, |
| 18 | + running_cpu_count: Arc<AtomicUsize>, |
| 19 | +} |
| 20 | + |
| 21 | +impl VmMachineRunningCommon { |
| 22 | + pub fn new(vmspace: VmAddrSpace, vcpu: Vec<VCpu>, vm: VmDataWeak) -> Self { |
| 23 | + let mut cpus = BTreeMap::new(); |
| 24 | + for cpu in vcpu.into_iter() { |
| 25 | + cpus.insert(cpu.hard_id(), cpu); |
| 26 | + } |
| 27 | + |
| 28 | + VmMachineRunningCommon { |
| 29 | + vmspace, |
| 30 | + cpus, |
| 31 | + vm, |
| 32 | + running_cpu_count: Arc::new(AtomicUsize::new(0)), |
| 33 | + } |
| 34 | + } |
| 35 | + |
| 36 | + pub fn take_cpu(&mut self) -> anyhow::Result<VCpu> { |
| 37 | + let next = self |
| 38 | + .cpus |
| 39 | + .keys() |
| 40 | + .next() |
| 41 | + .cloned() |
| 42 | + .ok_or_else(|| anyhow!("No CPUs available"))?; |
| 43 | + let cpu = self.cpus.remove(&next).unwrap(); |
| 44 | + Ok(cpu) |
| 45 | + } |
| 46 | + |
| 47 | + pub fn run_cpu(&mut self, mut cpu: VCpu) -> anyhow::Result<()> { |
| 48 | + let waiter = self.new_waiter(); |
| 49 | + |
| 50 | + let bind_id = cpu.bind_id(); |
| 51 | + std::thread::Builder::new() |
| 52 | + .name(format!("init-cpu-{}", bind_id)) |
| 53 | + .stack_size(TASK_STACK_SIZE) |
| 54 | + .spawn(move || { |
| 55 | + // Initialize cpu affinity here. |
| 56 | + assert!( |
| 57 | + set_current_affinity(AxCpuMask::one_shot(bind_id.raw())), |
| 58 | + "Initialize CPU affinity failed!" |
| 59 | + ); |
| 60 | + info!("Starting VCpu {} on {}", cpu.hard_id(), bind_id); |
| 61 | + let res = cpu.run(); |
| 62 | + if let Err(e) = res { |
| 63 | + if let Some(vm) = waiter.vm.upgrade() { |
| 64 | + vm.set_err(RunError::ExitWithError(e)); |
| 65 | + } |
| 66 | + } |
| 67 | + waiter.running_cpu_count.fetch_sub(1, Ordering::SeqCst); |
| 68 | + if waiter.running_cpu_count.load(Ordering::SeqCst) == 0 { |
| 69 | + waiter.vm.set_stopped(); |
| 70 | + } |
| 71 | + }) |
| 72 | + .map_err(|e| anyhow!("{e:?}"))?; |
| 73 | + |
| 74 | + Ok(()) |
| 75 | + } |
| 76 | + |
| 77 | + fn new_waiter(&self) -> Waiter { |
| 78 | + let running_cpu_count = self.running_cpu_count.clone(); |
| 79 | + running_cpu_count.fetch_add(1, Ordering::SeqCst); |
| 80 | + Waiter { |
| 81 | + running_cpu_count, |
| 82 | + vm: self.vm.clone(), |
| 83 | + } |
| 84 | + } |
| 85 | + |
| 86 | + pub fn vmspace(&self) -> &VmAddrSpace { |
| 87 | + &self.vmspace |
| 88 | + } |
| 89 | +} |
| 90 | + |
| 91 | +struct Waiter { |
| 92 | + running_cpu_count: Arc<AtomicUsize>, |
| 93 | + vm: VmDataWeak, |
| 94 | +} |
0 commit comments