@@ -26,7 +26,7 @@ unsafe fn restore_host_sp_el0() {
2626/// (v)CPU register state that must be saved or restored when entering/exiting a VM or switching
2727/// between VMs.
2828#[ repr( C ) ]
29- #[ derive( Clone , Debug , Copy , Default ) ]
29+ #[ derive( Clone , Debug , Copy ) ]
3030pub struct VmCpuRegisters {
3131 /// guest trap context
3232 pub trap_context_regs : TrapFrame ,
@@ -45,11 +45,14 @@ pub struct Aarch64VCpu<H: AxVCpuHal> {
4545 guest_system_regs : GuestSystemRegisters ,
4646 /// The MPIDR_EL1 value for the vCPU.
4747 mpidr : u64 ,
48+ entry_guest : unsafe extern "C" fn ( ) -> !,
4849 _phantom : PhantomData < H > ,
4950}
5051
52+ unsafe impl < H : AxVCpuHal > Send for Aarch64VCpu < H > { }
53+
5154/// Configuration for creating a new `Aarch64VCpu`
52- #[ derive( Clone , Debug , Default ) ]
55+ #[ derive( Clone , Debug ) ]
5356pub struct Aarch64VCpuCreateConfig {
5457 /// The MPIDR_EL1 value for the new vCPU,
5558 /// which is used to identify the CPU in a multiprocessor system.
@@ -58,6 +61,24 @@ pub struct Aarch64VCpuCreateConfig {
5861 pub mpidr_el1 : u64 ,
5962 /// The address of the device tree blob.
6063 pub dtb_addr : usize ,
64+ /// The entry function for the guest.
65+ pub entry_guest : unsafe extern "C" fn ( ) -> !,
66+ }
67+
68+ unsafe impl Send for Aarch64VCpuCreateConfig { }
69+
70+ impl Default for Aarch64VCpuCreateConfig {
71+ fn default ( ) -> Self {
72+ Self {
73+ mpidr_el1 : 0 ,
74+ dtb_addr : 0 ,
75+ entry_guest : default_entry_guest,
76+ }
77+ }
78+ }
79+
80+ unsafe extern "C" fn default_entry_guest ( ) -> ! {
81+ panic ! ( "default_entry_guest called" ) ;
6182}
6283
6384/// Configuration for setting up a new `Aarch64VCpu`
@@ -83,6 +104,7 @@ impl<H: AxVCpuHal> axvcpu::AxArchVCpu for Aarch64VCpu<H> {
83104 host_stack_top : 0 ,
84105 guest_system_regs : GuestSystemRegisters :: default ( ) ,
85106 mpidr : config. mpidr_el1 ,
107+ entry_guest : config. entry_guest ,
86108 _phantom : PhantomData ,
87109 } )
88110 }
@@ -167,68 +189,25 @@ impl<H: AxVCpuHal> Aarch64VCpu<H> {
167189 self . guest_system_regs . sctlr_el1 = 0x30C50830 ;
168190 self . guest_system_regs . pmcr_el0 = 0 ;
169191
170- // use 3 level ept paging
171- // - 4KiB granule (TG0)
172- // - 39-bit address space (T0_SZ)
173- // - start at level 1 (SL0)
174- #[ cfg( not( feature = "4-level-ept" ) ) ]
175- {
176- self . guest_system_regs . vtcr_el2 = ( VTCR_EL2 :: PS :: PA_40B_1TB
177- + VTCR_EL2 :: TG0 :: Granule4KB
192+ let sl0 = Self :: probe_sl0_support ( ) ;
193+
194+ self . guest_system_regs . vtcr_el2 = sl0
195+ + ( VTCR_EL2 :: TG0 :: Granule4KB
178196 + VTCR_EL2 :: SH0 :: Inner
179197 + VTCR_EL2 :: ORGN0 :: NormalWBRAWA
180- + VTCR_EL2 :: IRGN0 :: NormalWBRAWA
181- + VTCR_EL2 :: SL0 . val ( 0b01 )
182- + VTCR_EL2 :: T0SZ . val ( 64 - 39 ) )
183- . into ( ) ;
184- }
185-
186- // use 4 level ept paging
187- // - 4KiB granule (TG0)
188- // - 48-bit address space (T0_SZ)
189- // - start at level 0 (SL0)
190- #[ cfg( feature = "4-level-ept" ) ]
191- {
192- // read PARange (bits 3:0)
193- let parange = ( ID_AA64MMFR0_EL1 . get ( ) & 0xF ) as u8 ;
194- // ARM Definition: 0x5 indicates 48 bits PA, 0x4 indicates 44 bits PA, and so on.
195- if parange <= 0x5 {
196- panic ! (
197- "CPU only supports {}-bit PA (< 48), \
198- cannot enable 4-level EPT paging!",
199- match parange {
200- 0x0 => 32 ,
201- 0x1 => 36 ,
202- 0x2 => 40 ,
203- 0x3 => 42 ,
204- 0x4 => 44 ,
205- _ => 48 ,
206- }
207- ) ;
208- }
209- self . guest_system_regs . vtcr_el2 = ( VTCR_EL2 :: PS :: PA_48B_256TB
210- + VTCR_EL2 :: TG0 :: Granule4KB
211- + VTCR_EL2 :: SH0 :: Inner
212- + VTCR_EL2 :: ORGN0 :: NormalWBRAWA
213- + VTCR_EL2 :: IRGN0 :: NormalWBRAWA
214- + VTCR_EL2 :: SL0 . val ( 0b10 ) // 0b10 means start at level 0
215- + VTCR_EL2 :: T0SZ . val ( 64 - 48 ) )
216- . into ( ) ;
217- }
198+ + VTCR_EL2 :: IRGN0 :: NormalWBRAWA )
199+ . value ;
218200
219- let mut hcr_el2 = HCR_EL2 :: VM :: Enable
220- + HCR_EL2 :: RW :: EL1IsAarch64
221- + HCR_EL2 :: FMO :: EnableVirtualFIQ
222- + HCR_EL2 :: TSC :: EnableTrapEl1SmcToEl2
223- + HCR_EL2 :: RW :: EL1IsAarch64 ;
201+ let mut hcr_el2 =
202+ HCR_EL2 :: VM :: Enable + HCR_EL2 :: TSC :: EnableTrapEl1SmcToEl2 + HCR_EL2 :: RW :: EL1IsAarch64 ;
224203
225204 if !config. passthrough_interrupt {
226205 // Set HCR_EL2.IMO will trap IRQs to EL2 while enabling virtual IRQs.
227206 //
228207 // We must choose one of the two:
229208 // - Enable virtual IRQs and trap physical IRQs to EL2.
230209 // - Disable virtual IRQs and pass through physical IRQs to EL1.
231- hcr_el2 += HCR_EL2 :: IMO :: EnableVirtualIRQ ;
210+ hcr_el2 += HCR_EL2 :: IMO :: EnableVirtualIRQ + HCR_EL2 :: FMO :: EnableVirtualFIQ ;
232211 }
233212
234213 self . guest_system_regs . hcr_el2 = hcr_el2. into ( ) ;
@@ -241,6 +220,37 @@ impl<H: AxVCpuHal> Aarch64VCpu<H> {
241220 self . guest_system_regs . vmpidr_el2 = vmpidr;
242221 }
243222
223+ fn probe_sl0_support ( ) -> u64 {
224+ let pa_bits = match ID_AA64MMFR0_EL1 . read_as_enum ( ID_AA64MMFR0_EL1 :: PARange ) {
225+ Some ( ID_AA64MMFR0_EL1 :: PARange :: Value :: Bits_32 ) => 32 ,
226+ Some ( ID_AA64MMFR0_EL1 :: PARange :: Value :: Bits_36 ) => 36 ,
227+ Some ( ID_AA64MMFR0_EL1 :: PARange :: Value :: Bits_40 ) => 40 ,
228+ Some ( ID_AA64MMFR0_EL1 :: PARange :: Value :: Bits_42 ) => 42 ,
229+ Some ( ID_AA64MMFR0_EL1 :: PARange :: Value :: Bits_44 ) => 44 ,
230+ Some ( ID_AA64MMFR0_EL1 :: PARange :: Value :: Bits_48 ) => 48 ,
231+ Some ( ID_AA64MMFR0_EL1 :: PARange :: Value :: Bits_52 ) => 52 ,
232+ _ => 32 , // Default to 48 bits if unknown
233+ } ;
234+
235+ // 根据物理地址位数推断支持的级别
236+ let mut val = match pa_bits {
237+ 44 .. => VTCR_EL2 :: SL0 :: Granule4KBLevel0 + VTCR_EL2 :: T0SZ . val ( 64 - 48 ) , // 支持 Level 0 (4级页表)
238+ _ => VTCR_EL2 :: SL0 :: Granule4KBLevel1 + VTCR_EL2 :: T0SZ . val ( 64 - 39 ) , // 支持 Level 1 (3级页表)
239+ } ;
240+
241+ match pa_bits {
242+ 52 ..=64 => val += VTCR_EL2 :: PS :: PA_52B_4PB ,
243+ 48 ..=51 => val += VTCR_EL2 :: PS :: PA_48B_256TB ,
244+ 44 ..=47 => val += VTCR_EL2 :: PS :: PA_44B_16TB ,
245+ 42 ..=43 => val += VTCR_EL2 :: PS :: PA_42B_4TB ,
246+ 40 ..=41 => val += VTCR_EL2 :: PS :: PA_40B_1TB ,
247+ 36 ..=39 => val += VTCR_EL2 :: PS :: PA_36B_64GB ,
248+ _ => val += VTCR_EL2 :: PS :: PA_32B_4GB ,
249+ }
250+
251+ val. value
252+ }
253+
244254 /// Set exception return pc
245255 fn set_elr ( & mut self , elr : usize ) {
246256 self . ctx . set_exception_pc ( elr) ;
@@ -302,6 +312,8 @@ impl<H: AxVCpuHal> Aarch64VCpu<H> {
302312 core:: arch:: naked_asm!(
303313 // Save host context.
304314 save_regs_to_stack!( ) ,
315+ // Save self pointer before modifying x0
316+ "mov x10, x0" ,
305317 // Save current host stack top to `self.host_stack_top`.
306318 //
307319 // 'extern "C"' here specifies the aapcs64 calling convention, according to which
@@ -311,11 +323,14 @@ impl<H: AxVCpuHal> Aarch64VCpu<H> {
311323 "str x9, [x0]" ,
312324 "mov sp, x0" ,
313325 "sub sp, sp, {host_stack_top_offset}" ,
314- // Go to `context_vm_entry`.
315- "b {entry}" ,
326+ // Load entry_guest function pointer and jump to it
327+ "add x1, x10, {entry_guest_offset}" ,
328+ "ldr x2, [x1]" ,
329+ "br x2" ,
316330 "b {run_guest_panic}" ,
331+
317332 host_stack_top_offset = const core:: mem:: size_of:: <TrapFrame >( ) ,
318- entry = sym axcpu :: el2 :: enter_guest ,
333+ entry_guest_offset = const core :: mem :: offset_of! ( Aarch64VCpu < H > , entry_guest ) ,
319334 run_guest_panic = sym Self :: run_guest_panic,
320335 ) ;
321336 }
0 commit comments