diff --git a/Cargo.toml b/Cargo.toml index ca077bb..b8baadd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ members = [ "xen", "xen-bindings", "xen-ioctls", + "xen-unix", "xen-store", "xen-sys", ] diff --git a/oxerun/aarch64-xen-hvm.json b/aarch64-xen-hvm.json similarity index 73% rename from oxerun/aarch64-xen-hvm.json rename to aarch64-xen-hvm.json index d610f19..918f44b 100644 --- a/oxerun/aarch64-xen-hvm.json +++ b/aarch64-xen-hvm.json @@ -1,6 +1,6 @@ { "arch": "aarch64", - "data-layout": "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32", + "data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32", "disable-redzone": true, "executables": true, "features": "+strict-align,+neon,+fp-armv8", @@ -13,6 +13,6 @@ "max-atomic-width": 128, "panic-strategy": "abort", "relocation-model": "static", - "target-pointer-width": "64", + "target-pointer-width": 64, "vendor": "xen" } diff --git a/oxerun/aarch64-xen-hvm.ld b/aarch64-xen-hvm.ld similarity index 100% rename from oxerun/aarch64-xen-hvm.ld rename to aarch64-xen-hvm.ld diff --git a/oxerun/Cargo.toml b/oxerun/Cargo.toml index ed731f9..cb72f5d 100644 --- a/oxerun/Cargo.toml +++ b/oxerun/Cargo.toml @@ -1,14 +1,51 @@ [package] -authors = ["Doug Goldstein "] +authors = [ + "Doug Goldstein ", + "Teddy Astie ", + "The Rust Hypervisor Firmware Authors", # Based on Rust Hypervisor Firmware code +] description = "Compiler bits to create Rust unikernels for Xen" homepage = "https://github.com/rust-vmm/xen-sys" repository = "https://github.com/rust-vmm/xen-sys.git" -license = "Apache-2.0 OR MIT" +license = "Apache-2.0" name = "oxerun" readme = "README.md" version = "0.1.0" -edition = "2018" +edition = "2024" + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = "fat" [dependencies] +bitflags = "2.9.1" +atomic_refcell = "0.1.13" +log = "0.4.27" +volatile = { version = "0.6.1", features = ["derive", "unstable"] } xen = { path = "../xen" } xen-sys = { path = "../xen-sys" } +enum_dispatch = "0.3.13" + +[target.'cfg(target_arch = "aarch64")'.dependencies] +tock-registers = "0.10.0" +aarch64-cpu = "10.0.0" +fdt = "0.1.5" + +[target.'cfg(target_arch = "x86_64")'.dependencies] +uart_16550 = "0.4.0" +x86_64 = { version = "0.15.2", default-features = false, features = [ + "instructions", +] } + +[target.'cfg(target_arch = "riscv64")'.dependencies] +fdt = "0.1.5" + +[features] +sev = ["small_mm"] +fastabi = [] + +# Use a small memory model, disable to use a flat 4GB memory model +small_mm = [] diff --git a/oxerun/README.md b/oxerun/README.md index 09da1f7..8f37263 100644 --- a/oxerun/README.md +++ b/oxerun/README.md @@ -4,16 +4,20 @@ This is an example build of Rust building a full unikernel for Xen. +You need to use nightly Rust compiler. + ## Building for x86_64: +Using Rust 1.92 nightly build. + ```shell -# cargo build --target x86_64-xen-pv.json -Zbuild-std=core -Zbuild-std-features=compiler-builtins-mem +# cargo build -p oxerun -Zbuild-std=core -Zbuild-std-features=compiler-builtins-mem -Zjson-target-spec --target x86_64-xen-hvm.json --bin hello ``` And to generate assember files in target/x86_64-xen-pv/{release|debug}/deps/ ```shell -# RUSTFLAGS="--emit asm -C llvm-args=-x86-asm-syntax=intel" cargo build --target x86_64-xen-pv.json -Zbuild-std=core -Zbuild-std-features=compiler-builtins-mem +# RUSTFLAGS="--emit asm -C llvm-args=-x86-asm-syntax=intel" cargo build -p oxerun -Zbuild-std=core -Zbuild-std-features=compiler-builtins-mem -Zjson-target-spec --target x86_64-xen-hvm.json --bin hello ``` ## Building for aarch64: diff --git a/oxerun/src/arch/aarch64/asm.rs b/oxerun/src/arch/aarch64/asm.rs new file mode 100644 index 0000000..d7baac8 --- /dev/null +++ b/oxerun/src/arch/aarch64/asm.rs @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2022 Akira Moroo + +use super::layout::map; +use core::arch::global_asm; + +global_asm!(include_str!("ram64.s"), + FDT_START = const map::dram::FDT_START); diff --git a/oxerun/src/arch/aarch64/layout.rs b/oxerun/src/arch/aarch64/layout.rs new file mode 100644 index 0000000..f903622 --- /dev/null +++ b/oxerun/src/arch/aarch64/layout.rs @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2022 Akira Moroo +// Copyright (c) 2021-2022 Andre Richter + +use core::{ + cell::UnsafeCell, + ops::{Range, RangeInclusive}, +}; + +use crate::layout::{MemoryAttribute, MemoryDescriptor, MemoryLayout}; + +use super::paging::*; + +unsafe extern "Rust" { + unsafe static code_start: UnsafeCell<()>; + unsafe static code_end: UnsafeCell<()>; + unsafe static data_start: UnsafeCell<()>; + unsafe static data_end: UnsafeCell<()>; + unsafe static stack_start: UnsafeCell<()>; + unsafe static stack_end: UnsafeCell<()>; +} + +pub mod map { + // Create page table for 2T + pub const END: usize = 0x20_000_000_000; + + // Firmware region won't be used by this firmware, so merge it into mmio region + // is harmless and better for management. + pub mod mmio { + pub const START: usize = 0x0000_0000; + pub const PL011_START: usize = 0x0900_0000; + pub const PL031_START: usize = 0x0901_0000; + pub const END: usize = 0x4000_0000; + } + + pub mod dram { + pub const FDT_SIZE: usize = 0x0020_0000; + pub const ACPI_SIZE: usize = 0x0020_0000; + + pub const START: usize = super::mmio::END; + pub const FDT_START: usize = START; + pub const ACPI_START: usize = FDT_START + FDT_SIZE; + pub const END: usize = super::END; + } +} + +pub type KernelAddrSpace = AddressSpace<{ map::END }>; + +const NUM_MEM_RANGES: usize = 2; + +pub static LAYOUT: KernelVirtualLayout = KernelVirtualLayout::new( + map::END - 1, + [ + TranslationDescriptor { + name: "Device MMIO", + virtual_range: RangeInclusive::new(map::mmio::START, map::mmio::END - 1), + physical_range_translation: Translation::Identity, + attribute_fields: AttributeFields { + mem_attributes: MemAttributes::Device, + acc_perms: AccessPermissions::ReadWrite, + execute_never: true, + }, + }, + TranslationDescriptor { + name: "System Memory", + virtual_range: RangeInclusive::new(map::dram::START, map::dram::END - 1), + physical_range_translation: Translation::Identity, + attribute_fields: AttributeFields { + mem_attributes: MemAttributes::CacheableDRAM, + acc_perms: AccessPermissions::ReadWrite, // FIXME + execute_never: false, + }, + }, + ], +); + +pub fn virt_mem_layout() -> &'static KernelVirtualLayout { + &LAYOUT +} + +pub fn mmio_range() -> Range { + map::mmio::START..map::mmio::END +} + +pub fn reserved_range() -> Range { + map::dram::START..map::dram::END +} + +pub fn code_range() -> Range { + unsafe { (code_start.get() as _)..(code_end.get() as _) } +} + +pub fn data_range() -> Range { + unsafe { (data_start.get() as _)..(data_end.get() as _) } +} + +pub fn stack_range() -> Range { + unsafe { (stack_start.get() as _)..(stack_end.get() as _) } +} + +const NUM_MEM_DESCS: usize = 5; + +pub static MEM_LAYOUT: MemoryLayout = [ + MemoryDescriptor { + name: "MMIO", + range: mmio_range, + attribute: MemoryAttribute::Mmio, + }, + MemoryDescriptor { + name: "Reserved", + range: reserved_range, + attribute: MemoryAttribute::Unusable, + }, + MemoryDescriptor { + name: "Code", + range: code_range, + attribute: MemoryAttribute::Code, + }, + MemoryDescriptor { + name: "Data", + range: data_range, + attribute: MemoryAttribute::Data, + }, + MemoryDescriptor { + name: "Stack", + range: stack_range, + attribute: MemoryAttribute::Data, + }, +]; diff --git a/oxerun/src/arch/aarch64/mod.rs b/oxerun/src/arch/aarch64/mod.rs new file mode 100644 index 0000000..f7cf5bc --- /dev/null +++ b/oxerun/src/arch/aarch64/mod.rs @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2022 Akira Moroo + +#[cfg(not(test))] +pub mod asm; +pub mod layout; +pub mod paging; +pub mod simd; +mod translation; diff --git a/oxerun/src/arch/aarch64/paging.rs b/oxerun/src/arch/aarch64/paging.rs new file mode 100644 index 0000000..84b814f --- /dev/null +++ b/oxerun/src/arch/aarch64/paging.rs @@ -0,0 +1,327 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2022 Akira Moroo +// Copyright (c) 2021-2022 Andre Richter + +use core::{cell::SyncUnsafeCell, ops::RangeInclusive}; + +use aarch64_cpu::{ + asm::barrier, + registers::{Readable, Writeable, *}, +}; + +use self::interface::Mmu; +use super::{ + layout::{ + KernelAddrSpace, code_range, + map::dram::{ACPI_SIZE, FDT_SIZE, FDT_START}, + }, + translation::TranslationTable, +}; + +/// MMU enable errors variants. +#[derive(Debug)] +pub enum MmuEnableError { + AlreadyEnabled, + #[allow(dead_code)] + Other(&'static str), +} + +/// Memory Management interfaces. +pub mod interface { + use super::*; + + /// MMU functions. + pub trait Mmu { + unsafe fn enable_mmu_and_caching(&self) -> Result<(), MmuEnableError>; + + fn is_enabled(&self) -> bool; + } +} + +/// Describes the characteristics of a translation granule. +pub struct TranslationGranule; + +/// Describes properties of an address space. +pub struct AddressSpace; + +/// Architecture agnostic translation types. +#[allow(dead_code)] +#[derive(Copy, Clone)] +pub enum Translation { + Identity, + Offset(usize), +} + +/// Architecture agnostic memory attributes. +#[derive(Copy, Clone)] +pub enum MemAttributes { + CacheableDRAM, + Device, +} + +/// Architecture agnostic access permissions. +#[derive(Copy, Clone)] +pub enum AccessPermissions { + ReadOnly, + ReadWrite, +} + +/// Collection of memory attributes. +#[derive(Copy, Clone)] +pub struct AttributeFields { + pub mem_attributes: MemAttributes, + pub acc_perms: AccessPermissions, + pub execute_never: bool, +} + +impl Default for AttributeFields { + fn default() -> AttributeFields { + AttributeFields { + mem_attributes: MemAttributes::CacheableDRAM, + acc_perms: AccessPermissions::ReadWrite, + execute_never: true, + } + } +} + +/// Architecture agnostic descriptor for a memory range. +pub struct TranslationDescriptor { + #[allow(dead_code)] + pub name: &'static str, + pub virtual_range: RangeInclusive, + pub physical_range_translation: Translation, + pub attribute_fields: AttributeFields, +} + +/// Type for expressing the kernel's virtual memory layout. +pub struct KernelVirtualLayout { + /// The last (inclusive) address of the address space. + max_virt_addr_inclusive: usize, + + /// Array of descriptors for non-standard (normal cacheable DRAM) memory + /// regions. + inner: [TranslationDescriptor; NUM_SPECIAL_RANGES], +} + +impl TranslationGranule { + /// The granule's size. + pub const SIZE: usize = Self::size_checked(); + + /// The granule's shift, aka log2(size). + pub const SHIFT: usize = Self::SIZE.trailing_zeros() as usize; + + /// The page descriptor's output address mask (48bits) + pub const ADDR_MASK: usize = 0xffffffff << Self::SHIFT; + + const fn size_checked() -> usize { + assert!(GRANULE_SIZE.is_power_of_two()); + + GRANULE_SIZE + } +} + +impl AddressSpace { + /// The address space size. + pub const SIZE: usize = Self::size_checked(); + + /// The address space shift, aka log2(size). + pub const SIZE_SHIFT: usize = Self::SIZE.trailing_zeros() as usize; + + const fn size_checked() -> usize { + assert!(AS_SIZE.is_power_of_two()); + + // Check for architectural restrictions as well. + Self::arch_address_space_size_sanity_checks(); + + AS_SIZE + } +} + +impl KernelVirtualLayout<{ NUM_SPECIAL_RANGES }> { + /// Create a new instance. + pub const fn new(max: usize, layout: [TranslationDescriptor; NUM_SPECIAL_RANGES]) -> Self { + Self { + max_virt_addr_inclusive: max, + inner: layout, + } + } + + /// For a virtual address, find and return the physical output address and + /// corresponding attributes. + /// + /// If the address is not found in `inner`, return an identity mapped + /// default with normal cacheable DRAM attributes. + pub fn virt_addr_properties( + &self, + virt_addr: usize, + ) -> Result<(usize, AttributeFields), &'static str> { + if virt_addr > self.max_virt_addr_inclusive { + return Err("Address out of range"); + } + + // Enhance security for fdt, acpi and code memory range + let code = code_range(); + let fdt_acpi = FDT_START..(FDT_START + FDT_SIZE + ACPI_SIZE); + if code.contains(&virt_addr) { + let attr = AttributeFields { + mem_attributes: MemAttributes::CacheableDRAM, + acc_perms: AccessPermissions::ReadOnly, + execute_never: false, + }; + return Ok((virt_addr, attr)); + } else if fdt_acpi.contains(&virt_addr) { + let attr = AttributeFields { + mem_attributes: MemAttributes::CacheableDRAM, + acc_perms: AccessPermissions::ReadOnly, + execute_never: true, + }; + return Ok((virt_addr, attr)); + } + + for i in self.inner.iter() { + if i.virtual_range.contains(&virt_addr) { + let output_addr = match i.physical_range_translation { + Translation::Identity => virt_addr, + Translation::Offset(a) => a + (virt_addr - (i.virtual_range).start()), + }; + + return Ok((output_addr, i.attribute_fields)); + } + } + + Ok((virt_addr, AttributeFields::default())) + } +} + +/// Memory Management Unit type. +struct MemoryManagementUnit; + +pub type Granule512MiB = TranslationGranule<{ 512 * 1024 * 1024 }>; +pub type Granule64KiB = TranslationGranule<{ 64 * 1024 }>; + +/// Constants for indexing the MAIR_EL1. +pub mod mair { + pub const DEVICE: u64 = 0; + pub const NORMAL: u64 = 1; +} + +/// The kernel translation tables. +/// +/// # Safety +/// +/// - Supposed to land in `.bss`. Therefore, ensure that all initial member +/// values boil down to "0". +static KERNEL_TABLES: SyncUnsafeCell = + SyncUnsafeCell::new(TranslationTable::new()); + +static MMU: MemoryManagementUnit = MemoryManagementUnit; + +impl AddressSpace { + /// Checks for architectural restrictions. + pub const fn arch_address_space_size_sanity_checks() { + // Size must be at least one full 512 MiB table. + assert!((AS_SIZE % Granule512MiB::SIZE) == 0); + + // Check for 48 bit virtual address size as maximum, which is supported by any + // ARMv8 version. + assert!(AS_SIZE <= (1 << 48)); + } +} + +impl MemoryManagementUnit { + /// Setup function for the MAIR_EL1 register. + fn setup_mair(&self) { + // Define the memory types being mapped. + MAIR_EL1.write( + // Attribute 1 - Cacheable normal DRAM. + MAIR_EL1::Attr1_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc + + MAIR_EL1::Attr1_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc + // Attribute 0 - Device. + + MAIR_EL1::Attr0_Device::nonGathering_nonReordering_EarlyWriteAck, + ); + } + + /// Configure various settings of stage 1 of the EL1 translation regime. + fn configure_translation_control(&self) { + let t0sz = (64 - KernelAddrSpace::SIZE_SHIFT) as u64; + + TCR_EL1.write( + TCR_EL1::TBI0::Used + + TCR_EL1::IPS::Bits_40 + + TCR_EL1::TG0::KiB_64 + + TCR_EL1::SH0::Inner + + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + + TCR_EL1::EPD0::EnableTTBR0Walks + + TCR_EL1::A1::TTBR0 + + TCR_EL1::T0SZ.val(t0sz) + + TCR_EL1::EPD1::DisableTTBR1Walks, + ); + } +} + +/// Return a reference to the MMU instance. +fn mmu() -> &'static impl interface::Mmu { + &MMU +} + +impl interface::Mmu for MemoryManagementUnit { + unsafe fn enable_mmu_and_caching(&self) -> Result<(), MmuEnableError> { + if self.is_enabled() { + return Err(MmuEnableError::AlreadyEnabled); + } + + // Fail early if translation granule is not supported. + if !ID_AA64MMFR0_EL1.matches_all(ID_AA64MMFR0_EL1::TGran64::Supported) { + return Err(MmuEnableError::Other( + "Translation granule not supported in HW", + )); + } + + // Prepare the memory attribute indirection register. + self.setup_mair(); + + // Populate translation tables. + let tables = unsafe { &mut *KERNEL_TABLES.get() }; + + unsafe { + tables + .populate_tt_entries() + .map_err(MmuEnableError::Other)?; + } + + // Set the "Translation Table Base Register". + TTBR0_EL1.set_baddr(tables.phys_base_address()); + + self.configure_translation_control(); + + // Switch the MMU on. + // + // First, force all previous changes to be seen before the MMU is enabled. + barrier::isb(barrier::SY); + + // Enable the MMU and turn on data and instruction caching. + SCTLR_EL1.modify_no_read( + SCTLR_EL1.extract(), + SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable, + ); + + // Force MMU init to complete before next instruction. + barrier::isb(barrier::SY); + + Ok(()) + } + + #[inline(always)] + fn is_enabled(&self) -> bool { + SCTLR_EL1.matches_all(SCTLR_EL1::M::Enable) + } +} + +pub fn setup() { + unsafe { + if let Err(e) = mmu().enable_mmu_and_caching() { + panic!("Failed to setup paging: {:?}", e); + } + } +} diff --git a/oxerun/src/arch/aarch64/ram64.s b/oxerun/src/arch/aarch64/ram64.s new file mode 100644 index 0000000..53bc055 --- /dev/null +++ b/oxerun/src/arch/aarch64/ram64.s @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/* Copyright (C) 2022 Akira Moroo */ + +.section .text.boot, "ax" +.global ram64_start +.global efi_image_size +.global efi_image_offset + +ram64_start: + /* + * This header follows the AArch64 Linux kernel image header [1] to load + * as a PE binary by the hypervisor. + * + * [1] https://docs.kernel.org/arm64/booting.html#call-the-kernel-image + */ + add x13, x18, #0x16 /* code0: UEFI "MZ" signature magic instruction */ + b jump_to_rust /* code1 */ + + .quad efi_image_offset /* text_offset */ + .quad efi_image_size /* image_size */ + .quad 0 /* flags */ + .quad 0 /* res2 */ + .quad 0 /* res3 */ + .quad 0 /* res4 */ + + .long 0x644d5241 /* "ARM\x64" magic number */ + .long 0 /* res5 */ + .align 3 + +jump_to_rust: + /* x0 typically points to device tree at entry */ + ldr x0, ={FDT_START} + + /* setup stack */ + ldr x30, =stack_end + mov sp, x30 + + /* x0: pointer to device tree */ + b rust64_start diff --git a/oxerun/src/arch/aarch64/simd.rs b/oxerun/src/arch/aarch64/simd.rs new file mode 100644 index 0000000..4ec5b7f --- /dev/null +++ b/oxerun/src/arch/aarch64/simd.rs @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2023 Akira Moroo + +use aarch64_cpu::registers::*; + +pub fn setup_simd() { + CPACR_EL1.modify_no_read(CPACR_EL1.extract(), CPACR_EL1::FPEN::TrapNothing); +} diff --git a/oxerun/src/arch/aarch64/translation.rs b/oxerun/src/arch/aarch64/translation.rs new file mode 100644 index 0000000..64d976b --- /dev/null +++ b/oxerun/src/arch/aarch64/translation.rs @@ -0,0 +1,328 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2022 Akira Moroo +// Copyright (c) 2021-2022 Andre Richter + +//! Architectural translation table. +//! +//! Only 64 KiB granule is supported. + +use core::convert; +use tock_registers::{ + interfaces::{Readable, Writeable}, + register_bitfields, + registers::InMemoryRegister, +}; + +use crate::arch::aarch64::layout::code_range; +use layout::map::dram::{ACPI_START, FDT_START, KERNEL_START}; + +use super::{layout, paging::*}; +// A table descriptor, as per ARMv8-A Architecture Reference Manual Figure D5-15. +register_bitfields! {u64, + STAGE1_TABLE_DESCRIPTOR [ + BLOCK_OUTPUT_ADDR_64KiB OFFSET(29) NUMBITS(19) [], // [47:29] + + /// Physical address of the next descriptor. + NEXT_LEVEL_TABLE_ADDR_64KiB OFFSET(16) NUMBITS(32) [], // [47:16] + + TYPE OFFSET(1) NUMBITS(1) [ + Block = 0, + Table = 1 + ], + + VALID OFFSET(0) NUMBITS(1) [ + False = 0, + True = 1 + ] + ] +} + +// A level 3 page descriptor, as per ARMv8-A Architecture Reference Manual Figure D5-17. +register_bitfields! {u64, + pub STAGE1_PAGE_DESCRIPTOR [ + /// Unprivileged execute-never. + UXN OFFSET(54) NUMBITS(1) [ + False = 0, + True = 1 + ], + + /// Privileged execute-never. + PXN OFFSET(53) NUMBITS(1) [ + False = 0, + True = 1 + ], + + /// Physical address of the next table descriptor (lvl2) or the page descriptor (lvl3). + OUTPUT_ADDR_64KiB OFFSET(16) NUMBITS(32) [], + + /// Access flag. + AF OFFSET(10) NUMBITS(1) [ + False = 0, + True = 1 + ], + + /// Shareability field. + SH OFFSET(8) NUMBITS(2) [ + OuterShareable = 0b10, + InnerShareable = 0b11 + ], + + /// Access Permissions. + AP OFFSET(6) NUMBITS(2) [ + RW_EL1 = 0b00, + RW_EL1_EL0 = 0b01, + RO_EL1 = 0b10, + RO_EL1_EL0 = 0b11 + ], + + /// Memory attributes index into the MAIR_EL1 register. + AttrIndx OFFSET(2) NUMBITS(3) [], + + TYPE OFFSET(1) NUMBITS(1) [ + Reserved_Invalid = 0, + Page = 1 + ], + + VALID OFFSET(0) NUMBITS(1) [ + False = 0, + True = 1 + ] + ] +} + +/// A table descriptor for 64 KiB aperture. +/// +/// The output points to the next table. +#[derive(Copy, Clone)] +#[repr(C)] +struct TableDescriptor { + value: u64, +} + +/// A page descriptor with 64 KiB aperture. +/// +/// The output points to physical memory. +#[derive(Copy, Clone)] +#[repr(C)] +struct PageDescriptor { + value: u64, +} + +const PAGE_DESC_ADDR_MASK_64KB: u64 = Granule64KiB::ADDR_MASK as u64; + +trait StartAddr { + fn phys_start_addr_u64(&self) -> u64; + fn phys_start_addr_usize(&self) -> usize; +} + +const NUM_LVL2_TABLES: usize = layout::KernelAddrSpace::SIZE >> Granule512MiB::SHIFT; + +/// Big monolithic struct for storing the translation tables. Individual levels must be 64 KiB +/// aligned, so the lvl3 is put first. +#[repr(C)] +#[repr(align(65536))] +pub struct FixedSizeTranslationTable { + /// Page descriptors, covering 64 KiB windows per entry. + lvl3: [[PageDescriptor; 8192]; NUM_TABLES], + + /// Table descriptors, covering 512 MiB windows. + lvl2: [TableDescriptor; NUM_TABLES], +} + +/// A translation table type for the kernel space. +pub type TranslationTable = FixedSizeTranslationTable; + +// The binary is still identity mapped, so we don't need to convert here. +impl StartAddr for [T; N] { + fn phys_start_addr_u64(&self) -> u64 { + self as *const T as u64 + } + + fn phys_start_addr_usize(&self) -> usize { + self as *const _ as usize + } +} + +impl TableDescriptor { + /// Create an instance. + /// + /// Descriptor is invalid by default. + pub const fn new_zeroed() -> Self { + Self { value: 0 } + } + + /// Create an instance pointing to the supplied address. + pub fn from_next_lvl_table_addr(phys_next_lvl_table_addr: usize) -> Self { + let val = InMemoryRegister::::new(0); + + let shifted = phys_next_lvl_table_addr >> Granule64KiB::SHIFT; + val.write( + STAGE1_TABLE_DESCRIPTOR::NEXT_LEVEL_TABLE_ADDR_64KiB.val(shifted as u64) + + STAGE1_TABLE_DESCRIPTOR::TYPE::Table + + STAGE1_TABLE_DESCRIPTOR::VALID::True, + ); + + Self { value: val.get() } + } + + pub fn block_attr(attribute_fields: &AttributeFields) -> Self { + let val = InMemoryRegister::::new(0); + + val.write( + STAGE1_PAGE_DESCRIPTOR::AF::True + + STAGE1_PAGE_DESCRIPTOR::VALID::True + + (*attribute_fields).into(), + ); + + let v = val.get(); + let v = v >> 2 << 2; + Self { value: v } + } + + pub fn block_table_from_addr(addr: usize) -> Self { + let val = InMemoryRegister::::new(0); + let shifted = addr >> Granule512MiB::SHIFT; + let (_, attribute_fields) = layout::virt_mem_layout() + .virt_addr_properties(addr) + .unwrap(); + let attr = Self::block_attr(&attribute_fields).value; + let addr_shifted = STAGE1_TABLE_DESCRIPTOR::BLOCK_OUTPUT_ADDR_64KiB.val(shifted as u64); + val.write( + addr_shifted + + STAGE1_TABLE_DESCRIPTOR::TYPE::Block + + STAGE1_TABLE_DESCRIPTOR::VALID::True, + ); + let v = val.get() + attr; + + Self { value: v } + } +} + +/// Convert the kernel's generic memory attributes to HW-specific attributes of the MMU. +impl convert::From + for tock_registers::fields::FieldValue +{ + fn from(attribute_fields: AttributeFields) -> Self { + // Memory attributes. + let mut desc = match attribute_fields.mem_attributes { + MemAttributes::CacheableDRAM => { + STAGE1_PAGE_DESCRIPTOR::SH::InnerShareable + + STAGE1_PAGE_DESCRIPTOR::AttrIndx.val(mair::NORMAL) + } + MemAttributes::Device => { + STAGE1_PAGE_DESCRIPTOR::SH::OuterShareable + + STAGE1_PAGE_DESCRIPTOR::AttrIndx.val(mair::DEVICE) + } + }; + + // Access Permissions. + desc += match attribute_fields.acc_perms { + AccessPermissions::ReadOnly => STAGE1_PAGE_DESCRIPTOR::AP::RO_EL1, + AccessPermissions::ReadWrite => STAGE1_PAGE_DESCRIPTOR::AP::RW_EL1, + }; + + // The execute-never attribute is mapped to PXN in AArch64. + desc += if attribute_fields.execute_never { + STAGE1_PAGE_DESCRIPTOR::PXN::True + } else { + STAGE1_PAGE_DESCRIPTOR::PXN::False + }; + + // Always set unprivileged exectue-never as long as userspace is not implemented yet. + desc += STAGE1_PAGE_DESCRIPTOR::UXN::True; + + desc + } +} + +impl PageDescriptor { + /// Create an instance. + /// + /// Descriptor is invalid by default. + pub const fn new_zeroed() -> Self { + Self { value: 0 } + } + + /// Create an instance. + pub fn from_output_addr(phys_output_addr: usize, attribute_fields: &AttributeFields) -> Self { + let val = InMemoryRegister::::new(0); + + let shifted = phys_output_addr as u64 >> Granule64KiB::SHIFT; + val.write( + STAGE1_PAGE_DESCRIPTOR::OUTPUT_ADDR_64KiB.val(shifted) + + STAGE1_PAGE_DESCRIPTOR::AF::True + + STAGE1_PAGE_DESCRIPTOR::TYPE::Page + + STAGE1_PAGE_DESCRIPTOR::VALID::True + + (*attribute_fields).into(), + ); + + Self { value: val.get() } + } +} + +impl FixedSizeTranslationTable { + /// Create an instance. + pub const fn new() -> Self { + // Can't have a zero-sized address space. + assert!(NUM_TABLES > 0); + + Self { + lvl3: [[PageDescriptor::new_zeroed(); 8192]; NUM_TABLES], + lvl2: [TableDescriptor::new_zeroed(); NUM_TABLES], + } + } + + /// Iterates over all static translation table entries and fills them at once. + /// + /// # Safety + /// + /// - Modifies a `static mut`. Ensure it only happens from here. + pub unsafe fn populate_tt_entries(&mut self) -> Result<(), &'static str> { + // Use 512M block to map the whole memory region and update 0x40000000 ~ 0x60000000 later + for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() { + let higher_addr = l2_nr << Granule512MiB::SHIFT; + *l2_entry = TableDescriptor::block_table_from_addr(higher_addr); + } + + // Use 64K page table to remap up to size of 512MB from layout::map::dram::START where DT, ACPI and fw reside. + for (l2_nr, l2_entry) in self.lvl2.iter_mut().enumerate() { + let higher_addr = l2_nr << Granule512MiB::SHIFT; + if higher_addr < layout::map::dram::START { + continue; + } else if higher_addr >= layout::map::dram::START + 0x2000_0000 { + break; + } + + *l2_entry = + TableDescriptor::from_next_lvl_table_addr(self.lvl3[l2_nr].phys_start_addr_usize()); + + let code = code_range(); + let mut l3_temp: u64 = 0; + for (l3_nr, l3_entry) in self.lvl3[l2_nr].iter_mut().enumerate() { + let virt_addr = higher_addr + (l3_nr << Granule64KiB::SHIFT); + l3_temp = if virt_addr == FDT_START + || virt_addr == ACPI_START + || virt_addr == KERNEL_START + || virt_addr == code.start + || virt_addr == code.end + { + let (_, attr) = layout::virt_mem_layout().virt_addr_properties(virt_addr)?; + PageDescriptor::from_output_addr(virt_addr, &attr).value + } else { + l3_temp + }; + + l3_temp &= !PAGE_DESC_ADDR_MASK_64KB; + l3_temp += virt_addr as u64 & PAGE_DESC_ADDR_MASK_64KB; + *l3_entry = PageDescriptor { value: l3_temp }; + } + } + + Ok(()) + } + + /// The translation table's base address to be used for programming the MMU. + pub fn phys_base_address(&self) -> u64 { + self.lvl2.phys_start_addr_u64() + } +} diff --git a/oxerun/src/arch/mod.rs b/oxerun/src/arch/mod.rs new file mode 100644 index 0000000..48dadab --- /dev/null +++ b/oxerun/src/arch/mod.rs @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2022 Akira Moroo +// Copyright (C) 2025 Vates SAS - Teddy Astie + +use core::ptr::NonNull; + +#[cfg(target_arch = "aarch64")] +pub mod aarch64; + +#[cfg(target_arch = "x86_64")] +pub mod x86_64; + +#[cfg(target_arch = "riscv64")] +pub mod riscv64; + +pub fn map_4k_frame(pfn: u64, #[allow(unused)] encrypted: bool) -> Option> { + #[cfg(target_arch = "x86_64")] + { + use crate::arch::x86_64::mm::map_frame; + use ::x86_64::{PhysAddr, structures::paging::PhysFrame}; + + let vaddr = unsafe { + map_frame( + PhysFrame::from_start_address(PhysAddr::new(pfn << 12)) + .expect("Invalid mapping pfn"), + encrypted, + ) + }?; + + NonNull::new(vaddr.as_mut_ptr()) + } + + #[cfg(not(target_arch = "x86_64"))] + { + /* Assume flat memory model */ + NonNull::new((pfn << 12) as *mut T) + } +} diff --git a/oxerun/src/arch/riscv64/asm.rs b/oxerun/src/arch/riscv64/asm.rs new file mode 100644 index 0000000..797b84c --- /dev/null +++ b/oxerun/src/arch/riscv64/asm.rs @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2023 Rivos Inc. + +use core::arch::global_asm; + +global_asm!(include_str!("ram64.s")); diff --git a/oxerun/src/arch/riscv64/layout.rs b/oxerun/src/arch/riscv64/layout.rs new file mode 100644 index 0000000..627da76 --- /dev/null +++ b/oxerun/src/arch/riscv64/layout.rs @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2022 Akira Moroo +// Copyright (c) 2021-2022 Andre Richter +// Copyright (C) 2023 Rivos Inc. + +use core::{cell::UnsafeCell, ops::Range}; + +use crate::layout::{MemoryAttribute, MemoryDescriptor, MemoryLayout}; + +unsafe extern "Rust" { + unsafe static code_start: UnsafeCell<()>; + unsafe static code_end: UnsafeCell<()>; + unsafe static data_start: UnsafeCell<()>; + unsafe static data_end: UnsafeCell<()>; + unsafe static stack_start: UnsafeCell<()>; + unsafe static stack_end: UnsafeCell<()>; +} + +pub fn code_range() -> Range { + unsafe { (code_start.get() as _)..(code_end.get() as _) } +} + +pub fn data_range() -> Range { + unsafe { (data_start.get() as _)..(data_end.get() as _) } +} + +pub fn stack_range() -> Range { + unsafe { (stack_start.get() as _)..(stack_end.get() as _) } +} + +pub fn reserved_range() -> Range { + 0x8000_0000..0x8020_0000 +} + +const NUM_MEM_DESCS: usize = 4; + +pub static MEM_LAYOUT: MemoryLayout = [ + MemoryDescriptor { + name: "Code", + range: code_range, + attribute: MemoryAttribute::Code, + }, + MemoryDescriptor { + name: "Data", + range: data_range, + attribute: MemoryAttribute::Data, + }, + MemoryDescriptor { + name: "Stack", + range: stack_range, + attribute: MemoryAttribute::Data, + }, + MemoryDescriptor { + name: "SBI", + range: reserved_range, + attribute: MemoryAttribute::Unusable, + }, +]; diff --git a/oxerun/src/arch/riscv64/mod.rs b/oxerun/src/arch/riscv64/mod.rs new file mode 100644 index 0000000..6075106 --- /dev/null +++ b/oxerun/src/arch/riscv64/mod.rs @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2023 Rivos Inc. + +pub mod asm; +pub mod layout; diff --git a/oxerun/src/arch/riscv64/ram64.s b/oxerun/src/arch/riscv64/ram64.s new file mode 100644 index 0000000..91bdafd --- /dev/null +++ b/oxerun/src/arch/riscv64/ram64.s @@ -0,0 +1,24 @@ +// Copyright (c) 2021 by Rivos Inc. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +.option norvc + +.section .text.boot + +// The entry point for the boot CPU. +.global ram64_start +ram64_start: + +.option push +.option norelax + la gp, __global_pointer$ +.option pop + csrw sstatus, zero + csrw sie, zero + + la sp, stack_end + call rust64_start +wfi_loop: + wfi + j wfi_loop diff --git a/oxerun/src/arch/x86_64/asm.rs b/oxerun/src/arch/x86_64/asm.rs new file mode 100644 index 0000000..0ed5793 --- /dev/null +++ b/oxerun/src/arch/x86_64/asm.rs @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2020 Google LLC + +use core::arch::global_asm; + +global_asm!(include_str!("ram32.s"), options(att_syntax, raw)); diff --git a/oxerun/src/arch/x86_64/gdt.rs b/oxerun/src/arch/x86_64/gdt.rs new file mode 100644 index 0000000..87889b8 --- /dev/null +++ b/oxerun/src/arch/x86_64/gdt.rs @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2020 Google LLC + +bitflags::bitflags! { + // An extension of x86_64::structures::gdt::DescriptorFlags + struct Descriptor: u64 { + const LIMIT_0_15 = 0xFFFF; + const BASE_0_23 = 0xFF_FFFF << 16; + const ACCESSED = 1 << 40; + const WRITABLE = 1 << 41; // Only for Data-Segments + const READABLE = 1 << 41; // Only for Code-Segments + const EXPANSION = 1 << 42; // Only for Data-Segments + const CONFORMING = 1 << 42; // Only for Code-Segments + const EXECUTABLE = 1 << 43; + const USER_SEGMENT = 1 << 44; + const DPL_RING_3 = 3 << 45; + const PRESENT = 1 << 47; + const LIMIT_16_19 = 0xF << 48; + const SOFTWARE = 1 << 52; + const BIT64 = 1 << 53; + const BIT32 = 1 << 54; + const GRANULARITY = 1 << 55; + const BASE_24_31 = 0xFF << 56; + + // All segments are nonconforming, non-system, ring-0 only, and present. + // We set ACCESSED in advance to avoid writing to the descriptor. + const COMMON = Self::ACCESSED.bits() | Self::USER_SEGMENT.bits() | Self::PRESENT.bits(); + // BIT32 must be 0, all other bits (not yet mentioned) are ignored. + const CODE64 = Self::COMMON.bits() | Self::READABLE.bits() | Self::EXECUTABLE.bits() | Self::BIT64.bits(); + const DATA64 = Self::COMMON.bits() | Self::WRITABLE.bits() | Self::BIT64.bits(); + } +} + +// An alternative to x86_64::structures::DescriptorTablePointer that avoids +// "pointer-to-integer cast" (which rust does not support in statics). +#[repr(C, packed)] +struct Pointer { + limit: u16, + base: &'static Descriptor, +} + +impl Pointer { + const fn new(gdt: &'static [Descriptor]) -> Self { + let size = core::mem::size_of_val(gdt); + Self { + limit: size as u16 - 1, + base: &gdt[0], + } + } +} + +// Our 64-bit GDT lives in RAM, so it can be accessed like any other global. +#[unsafe(no_mangle)] +static GDT64_PTR: Pointer = Pointer::new(&GDT64); +static GDT64: [Descriptor; 3] = [Descriptor::empty(), Descriptor::CODE64, Descriptor::DATA64]; diff --git a/oxerun/src/arch/x86_64/idt.rs b/oxerun/src/arch/x86_64/idt.rs new file mode 100644 index 0000000..89572c0 --- /dev/null +++ b/oxerun/src/arch/x86_64/idt.rs @@ -0,0 +1,50 @@ +use core::{arch::naked_asm, cell::SyncUnsafeCell}; + +use x86_64::{VirtAddr, structures::idt::InterruptDescriptorTable}; + +use crate::arch::x86_64::sev::ghcb_vc_handler; + +pub static IDT: SyncUnsafeCell = + SyncUnsafeCell::new(InterruptDescriptorTable::new()); + +#[derive(Clone, Copy, Debug)] +#[repr(C)] +pub struct CpuRegs { + pub r15: u64, + pub r14: u64, + pub r13: u64, + pub r12: u64, + pub rbp: u64, + pub rbx: u64, + pub r11: u64, + pub r10: u64, + pub r9: u64, + pub r8: u64, + pub rax: u64, + pub rcx: u64, + pub rdx: u64, + pub rsi: u64, + pub rdi: u64, + pub orig_rax: u64, + pub rip: u64, + pub cs: u64, + pub eflags: u64, + pub rsp: u64, + pub ss: u64, +} + +#[unsafe(naked)] +unsafe extern "C" fn raw_ghcb_vc_handler() { + naked_asm!(include_str!("isr.s"), options(att_syntax), EXCEPTION_HANDLER = sym ghcb_vc_handler) +} + +pub fn setup() { + let idt = unsafe { &mut *IDT.get() }; + + unsafe { + idt.vmm_communication_exception + .set_handler_addr(VirtAddr::new(raw_ghcb_vc_handler as *const () as u64)); + } + + idt.load(); +} diff --git a/oxerun/src/arch/x86_64/isr.s b/oxerun/src/arch/x86_64/isr.s new file mode 100644 index 0000000..b0e3557 --- /dev/null +++ b/oxerun/src/arch/x86_64/isr.s @@ -0,0 +1,45 @@ +# Based on Linux ISR handler +.code64 +pushq %rdi +pushq %rsi +pushq %rdx +pushq %rcx +pushq %rax +pushq %r8 +pushq %r9 +pushq %r10 +pushq %r11 +pushq %rbx +pushq %rbp +pushq %r12 +pushq %r13 +pushq %r14 +pushq %r15 + +# Call handler with CpuRegs +movq %rsp, %rdi +# Error code is second parameter +movq 120(%rsp), %rsi +call {EXCEPTION_HANDLER} + +/* Restore regs */ +popq %r15 +popq %r14 +popq %r13 +popq %r12 +popq %rbp +popq %rbx +popq %r11 +popq %r10 +popq %r9 +popq %r8 +popq %rax +popq %rcx +popq %rdx +popq %rsi +popq %rdi + +# Remove error code and return +addq $8, %rsp + +iretq diff --git a/oxerun/src/arch/x86_64/layout.rs b/oxerun/src/arch/x86_64/layout.rs new file mode 100644 index 0000000..eb1515c --- /dev/null +++ b/oxerun/src/arch/x86_64/layout.rs @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2022 Akira Moroo + +use core::{cell::UnsafeCell, ops::Range}; + +use crate::layout::{MemoryAttribute, MemoryDescriptor, MemoryLayout}; + +unsafe extern "Rust" { + unsafe static ram_min: UnsafeCell<()>; + unsafe static code_start: UnsafeCell<()>; + unsafe static code_end: UnsafeCell<()>; + unsafe static data_start: UnsafeCell<()>; + unsafe static data_end: UnsafeCell<()>; + unsafe static stack_start: UnsafeCell<()>; + unsafe static stack_end: UnsafeCell<()>; +} + +pub fn header_range() -> Range { + unsafe { (ram_min.get() as _)..(code_start.get() as _) } +} + +pub fn code_range() -> Range { + unsafe { (code_start.get() as _)..(code_end.get() as _) } +} + +pub fn data_range() -> Range { + unsafe { (data_start.get() as _)..(data_end.get() as _) } +} + +pub fn stack_range() -> Range { + unsafe { (stack_start.get() as _)..(stack_end.get() as _) } +} + +const NUM_MEM_DESCS: usize = 4; + +pub static MEM_LAYOUT: MemoryLayout = [ + MemoryDescriptor { + name: "PVH Header", + range: header_range, + attribute: MemoryAttribute::Data, + }, + MemoryDescriptor { + name: "Code", + range: code_range, + attribute: MemoryAttribute::Code, + }, + MemoryDescriptor { + name: "Data", + range: data_range, + attribute: MemoryAttribute::Data, + }, + MemoryDescriptor { + name: "Stack", + range: stack_range, + attribute: MemoryAttribute::Data, + }, +]; diff --git a/oxerun/src/arch/x86_64/mm.rs b/oxerun/src/arch/x86_64/mm.rs new file mode 100644 index 0000000..61c3754 --- /dev/null +++ b/oxerun/src/arch/x86_64/mm.rs @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2020 Google LLC +// Copyright 2025 Vates SAS + +/* + * Memory layout + * + * 0M-1G(-2M) : Identity + * 1G(-2M)-1G : L1 mappings + */ + +use core::cell::SyncUnsafeCell; + +use x86_64::{ + PhysAddr, VirtAddr, + instructions::tlb, + registers::control::Cr3, + structures::paging::{ + PageSize, PageTable, PageTableFlags, PageTableIndex, PhysFrame, Size2MiB, Size4KiB, + }, +}; + +#[unsafe(no_mangle)] +static L4_TABLE: SyncUnsafeCell = SyncUnsafeCell::new(PageTable::new()); +#[unsafe(no_mangle)] +static L3_TABLE: SyncUnsafeCell = SyncUnsafeCell::new(PageTable::new()); +#[unsafe(no_mangle)] +static L2_TABLE: SyncUnsafeCell = SyncUnsafeCell::new(PageTable::new()); +#[unsafe(no_mangle)] +static L1_TABLE: SyncUnsafeCell = SyncUnsafeCell::new(PageTable::new()); + +#[unsafe(no_mangle)] +pub static mut MEMORY_ENCRYPT_FLAG: PageTableFlags = PageTableFlags::empty(); + +/// Position of the L1 mappings in the L2 table. +/// 511 means 2M just below 1G +const L1_INDEX: PageTableIndex = PageTableIndex::new(511); + +pub fn setup() { + // SAFETY: This function is idempontent and only writes to static memory and + // CR3. Thus, it is safe to run multiple times or on multiple threads. + // A SyncUnsafeCell pointer is never null. + let (l4, l3, l2, l1) = unsafe { + ( + &mut *L4_TABLE.get(), + &mut *L3_TABLE.get(), + &mut *L2_TABLE.get(), + &mut *L1_TABLE.get(), + ) + }; + let pt_flags = + PageTableFlags::PRESENT | PageTableFlags::WRITABLE | unsafe { MEMORY_ENCRYPT_FLAG }; + + let mut next_addr = PhysAddr::zero(); + for (pos, l2e) in l2.iter_mut().enumerate() { + if pos == L1_INDEX.into() { + l2e.set_addr(phys_addr_linear(l1), pt_flags); + } else { + l2e.set_addr(next_addr, pt_flags | PageTableFlags::HUGE_PAGE); + } + + next_addr += Size2MiB::SIZE; + } + + // Point L3 at L2 + l3[0].set_addr(phys_addr_linear(l2), pt_flags); + // Point L4 at L3 + l4[0].set_addr(phys_addr_linear(l3), pt_flags); + + // Point Cr3 at L4 + let (cr3_frame, cr3_flags) = Cr3::read(); + let l4_frame = PhysFrame::from_start_address(phys_addr_linear(l4)).unwrap(); + if cr3_frame != l4_frame { + unsafe { Cr3::write(l4_frame, cr3_flags) }; + } +} + +fn phys_addr_linear(virt_addr: *const T) -> PhysAddr { + PhysAddr::new(virt_addr.addr() as u64) +} + +pub unsafe fn map_frame(frame: PhysFrame, encrypted: bool) -> Option { + let l1 = unsafe { &mut *L1_TABLE.get() }; + // Find a spare L1 entry + let (index, entry) = l1.iter_mut().enumerate().find(|(_, l1e)| l1e.is_unused())?; + let mut flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE; + + if encrypted { + flags |= unsafe { MEMORY_ENCRYPT_FLAG }; + } + + entry.set_frame(frame, flags); + + Some(VirtAddr::new( + u64::from(L1_INDEX) * Size2MiB::SIZE + (index as u64) * Size4KiB::SIZE, + )) +} + +pub unsafe fn unmap_frame(va: VirtAddr) -> Result<(), ()> { + if u32::from(va.p4_index()) != 0 && u32::from(va.p3_index()) != 0 && va.p2_index() != L1_INDEX { + return Err(()); + } + + unsafe { + let l1 = &mut *L1_TABLE.get(); + l1[va.p1_index()].set_unused(); + tlb::flush(va); + }; + Ok(()) +} diff --git a/oxerun/src/arch/x86_64/mod.rs b/oxerun/src/arch/x86_64/mod.rs new file mode 100644 index 0000000..26c81a5 --- /dev/null +++ b/oxerun/src/arch/x86_64/mod.rs @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2022 Akira Moroo + +pub mod asm; +pub mod gdt; +pub mod idt; +pub mod layout; +pub mod mm; +pub mod sev; diff --git a/oxerun/src/arch/x86_64/ram32.s b/oxerun/src/arch/x86_64/ram32.s new file mode 100644 index 0000000..ae3d5fc --- /dev/null +++ b/oxerun/src/arch/x86_64/ram32.s @@ -0,0 +1,115 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright 2020 Google LLC +# Copyright 2025 Teddy Astie - Vates SAS + +.section .text32, "ax" +.global ram32_start +.code32 + +ram32_start: + # Stash the PVH start_info struct in %rdi. + movl %ebx, %edi + +setup_page_tables: + # First L2 entry identity maps [0, 2 MiB) + movl $0b10000011, (L2_TABLE) # huge (bit 7), writable (bit 1), present (bit 0) + # First L3 entry points to L2 table + movl $L2_TABLE, %eax + orb $0b00000011, %al # writable (bit 1), present (bit 0) + movl %eax, (L3_TABLE) + # First L4 entry points to L3 table + movl $L3_TABLE, %eax + orb $0b00000011, %al # writable (bit 1), present (bit 0) + movl %eax, (L4_TABLE) + +sev_check: + # Check GHCB/SEV-ES through start_info.flags & SIF_HVM_GHCB. + mov 8(%edi), %edx + btl $5, %edx + jnc no_ghcb + +use_ghcb: + # Use GHCB protocol instead. + movl $0xc0010130, %ecx # MSR_AMD64_SEV_GHCB + rdmsr + # C-bit is in EAX[31:24] + shr $24, %eax + mov %eax, %ebx + jmp sev_bit_known + +no_ghcb: + # Check CPUID highest leaf + movl $0x80000000, %eax + cpuid + cmpl $0x8000001f, %eax + jb enable_paging + + # Check for SEV support + movl $0x8000001f, %eax + cpuid + btl $1, %eax + jnc enable_paging + +sev_bit_known: + # Check if SEV is enabled + movl $0xc0010131, %ecx # MSR_AMD64_SEV + rdmsr + movl %eax, (SEV_STATUS) + btl $0, %eax # MSR_AMD64_SEV_ENABLED_BIT + jnc enable_paging + + movl %ebx, %ecx + andl $0x3f, %ecx # Get C-bit position + subl $0x20, %ecx + movl $1, %ebx + shll %cl, %ebx + + # %ebx contains high part of C-bit mask + # We assume that C-bit is over the 32-bits mark. + movl %ebx, (MEMORY_ENCRYPT_FLAG + 4) + + # Inject C-bit to pagetables + leal (L2_TABLE), %eax + orl %ebx, 4(%eax) + leal (L3_TABLE), %eax + orl %ebx, 4(%eax) + leal (L4_TABLE), %eax + orl %ebx, 4(%eax) + +enable_paging: + # Load page table root into CR3 + movl $L4_TABLE, %eax + movl %eax, %cr3 + + # Set CR4.PAE (Physical Address Extension) + movl %cr4, %eax + orb $0b00100000, %al # Set bit 5 + movl %eax, %cr4 + # Set EFER.LME (Long Mode Enable) + movl $0xC0000080, %ecx + rdmsr + orb $0b00000001, %ah # Set bit 8 + wrmsr + # Set CRO.PG (Paging) + movl %cr0, %eax + orl $(1 << 31), %eax + movl %eax, %cr0 + +jump_to_64bit: + # We are now in 32-bit compatibility mode. To enter 64-bit mode, we need to + # load a 64-bit code segment into our GDT. + lgdtl GDT64_PTR + # Initialize the stack pointer (Rust code always uses the stack) + movl $stack_end, %esp + # Set segment registers to a 64-bit segment. + movw $0x10, %ax + movw %ax, %ds + movw %ax, %es + movw %ax, %gs + movw %ax, %fs + movw %ax, %ss + # Set CS to a 64-bit segment and jump to 64-bit Rust code. + # PVH start_info is in %rdi, the first paramter of the System V ABI. + ljmpl $0x08, $rust64_start + +.code64 \ No newline at end of file diff --git a/oxerun/src/arch/x86_64/sev.rs b/oxerun/src/arch/x86_64/sev.rs new file mode 100644 index 0000000..2d1dfaf --- /dev/null +++ b/oxerun/src/arch/x86_64/sev.rs @@ -0,0 +1,292 @@ +use core::{ + arch::asm, + cell::SyncUnsafeCell, + ptr::{NonNull, null_mut}, + sync::atomic::{AtomicPtr, Ordering}, +}; + +use volatile::{VolatileFieldAccess, VolatilePtr}; +use x86_64::{PhysAddr, registers::model_specific::Msr, structures::paging::PhysFrame}; + +use crate::arch::x86_64::{idt::CpuRegs, mm}; + +const SEV_STATUS_MSR: u32 = 0xc001_0131; +const GHCB_MSR: u32 = 0xc001_0130; + +const GHCB_EXIT_REQ: u64 = 0x100; + +const VMEXIT_CPUID: u64 = 0x72; +const VMEXIT_MSR: u64 = 0x7c; +const VMEXIT_VMMCALL: u64 = 0x81; + +const VMMCALL_INST_LEN: u64 = 0x3; +const CPUID_INST_LEN: u64 = 0x2; +const MSR_INST_LEN: u64 = 0x2; + +// Based on Enarx SEV code + +/// GHCB Save Area +#[derive(Debug, Copy, Clone, VolatileFieldAccess)] +#[repr(C)] +pub struct GhcbSaveArea { + reserved1: [u8; 203], + pub cpl: u8, + reserved2: [u8; 300], + pub rax: u64, + reserved3: [u8; 264], + pub rcx: u64, + pub rdx: u64, + pub rbx: u64, + reserved4: u64, + pub rbp: u64, + pub rsi: u64, + pub rdi: u64, + pub r8: u64, + pub r9: u64, + pub r10: u64, + pub r11: u64, + pub r12: u64, + pub r13: u64, + pub r14: u64, + pub r15: u64, + reserved5: [u8; 16], + pub sw_exit_code: u64, + pub sw_exit_info1: u64, + pub sw_exit_info2: u64, + pub sw_scratch: u64, + reserved6: [u8; 56], + pub xcr0: u64, + pub valid_bitmap: [u8; 16], + pub x87state_gpa: u64, + reserved7: [u8; 1016], +} + +/// GHCB +#[derive(Debug, Copy, Clone, VolatileFieldAccess)] +#[repr(C, align(4096))] +pub struct Ghcb { + pub save_area: GhcbSaveArea, + pub shared_buffer: [u8; 2032], + reserved1: [u8; 10], + pub protocol_version: u16, + pub ghcb_usage: u32, +} + +impl Ghcb { + const fn new() -> Self { + Self { + save_area: GhcbSaveArea { + reserved1: [0; _], + cpl: 0, + reserved2: [0; _], + rax: 0, + reserved3: [0; _], + rcx: 0, + rdx: 0, + rbx: 0, + reserved4: 0, + rbp: 0, + rsi: 0, + rdi: 0, + r8: 0, + r9: 0, + r10: 0, + r11: 0, + r12: 0, + r13: 0, + r14: 0, + r15: 0, + reserved5: [0; _], + sw_exit_code: 0, + sw_exit_info1: 0, + sw_exit_info2: 0, + sw_scratch: 0, + reserved6: [0; _], + xcr0: 0, + valid_bitmap: [0; _], + x87state_gpa: 0, + reserved7: [0; _], + }, + shared_buffer: [0; _], + reserved1: [0; _], + protocol_version: 0, + ghcb_usage: 0, + } + } +} + +static GHCB_PAGE: SyncUnsafeCell = SyncUnsafeCell::new(Ghcb::new()); +static GHCB: AtomicPtr = AtomicPtr::new(null_mut()); + +#[unsafe(no_mangle)] +pub static mut SEV_STATUS: u64 = 0; + +pub fn get_ghcb() -> Option> { + // SAFETY: We always give a valid pointer and there is no SMT. + unsafe { + Some(VolatilePtr::new(NonNull::new( + GHCB.load(Ordering::Relaxed), + )?)) + } +} + +pub fn setup() { + // SAFETY: No other thread exist at this time. + if unsafe { SEV_STATUS } & 0x2 == 0 { + // No SEV-ES + return; + } + + // SAFETY: GHCB_PAGE is identity mapped. + unsafe { + GHCB.store( + mm::map_frame( + PhysFrame::from_start_address_unchecked(PhysAddr::new(GHCB_PAGE.get() as u64)), + false, + ) + .unwrap() + .as_mut_ptr(), + Ordering::Relaxed, + ); + } + + // SAFETY: We just initialized ghcb so it can't be None. + let ghcb = unsafe { get_ghcb().unwrap_unchecked() }; + + ghcb.protocol_version().write(0x1); // SEV-ES Version 1 +} + +#[inline] +pub unsafe fn vmgexit_msr(value: u64) -> u64 { + let mut ghcb_msr = Msr::new(GHCB_MSR); + + unsafe { + ghcb_msr.write(value); + asm!("rep; vmmcall", options(nostack, preserves_flags)); + ghcb_msr.read() + } +} + +fn ghcb_call() -> u64 { + unsafe { vmgexit_msr(GHCB_PAGE.get() as u64) } +} + +fn sev_es_terminate(reason: u16) -> ! { + unsafe { + vmgexit_msr(GHCB_EXIT_REQ | ((reason as u64) << 16)); + asm!("cli;hlt"); + core::hint::unreachable_unchecked() + }; +} + +pub fn ghcb_handle_cpuid(regs: &mut CpuRegs, ghcb: VolatilePtr<'static, Ghcb>) { + let save_area = ghcb.save_area(); + + save_area.rax().write(regs.rax); + save_area.rcx().write(regs.rcx); + + save_area.sw_exit_code().write(VMEXIT_CPUID); + save_area.sw_exit_info1().write(0); + save_area.sw_exit_info2().write(0); + + // TODO: valid bitmap, xcr0 special case + + let result = ghcb_call(); + + // TODO: handle result + + regs.rax = save_area.rax().read(); + regs.rbx = save_area.rbx().read(); + regs.rcx = save_area.rcx().read(); + regs.rdx = save_area.rdx().read(); + + regs.rip += CPUID_INST_LEN; +} + +pub fn ghcb_handle_msr(regs: &mut CpuRegs, ghcb: VolatilePtr<'static, Ghcb>) { + let save_area = ghcb.save_area(); + let opcode = unsafe { (regs.rip as *const u16).read_unaligned() }; + + let wrmsr = match opcode.to_ne_bytes() { + [0x0F, 0x30] => true, /* WRMSR */ + [0x0F, 0x32] => false, /* RDMSR */ + _ => sev_es_terminate(1), + }; + + save_area.sw_exit_code().write(VMEXIT_MSR); + save_area.sw_exit_info1().write(wrmsr as _); + save_area.rcx().write(regs.rcx as u32 as _); + + if wrmsr { + save_area.rdx().write(regs.rdx as u32 as _); + save_area.rax().write(regs.rax as u32 as _); + } + + let result = ghcb_call(); + + if save_area.sw_exit_info1().read() == 1 { + sev_es_terminate(3); + } + + if !wrmsr { + regs.rdx = save_area.rdx().read() as u32 as _; + regs.rax = save_area.rax().read() as u32 as _; + } + + regs.rip += MSR_INST_LEN; +} + +pub fn ghcb_handle_vmmcall(regs: &mut CpuRegs, ghcb: VolatilePtr<'static, Ghcb>) { + let save_area = ghcb.save_area(); + + save_area.rax().write(regs.rax); + save_area.rdi().write(regs.rdi); + save_area.rsi().write(regs.rsi); + save_area.r8().write(regs.r8); + save_area.r9().write(regs.r9); + save_area.r10().write(regs.r10); + save_area.r11().write(regs.r11); + save_area.r12().write(regs.r12); + + save_area.cpl().write(0); + + save_area.sw_exit_code().write(VMEXIT_VMMCALL); + save_area.sw_exit_info1().write(0); + save_area.sw_exit_info2().write(0); + + let result = ghcb_call(); + + // TODO: handle result + + regs.rax = save_area.rax().read(); + regs.rdi = save_area.rdi().read(); + regs.rsi = save_area.rsi().read(); + regs.r8 = save_area.r8().read(); + regs.r9 = save_area.r9().read(); + regs.r10 = save_area.r10().read(); + regs.r11 = save_area.r11().read(); + regs.r12 = save_area.r12().read(); + + regs.rip += VMMCALL_INST_LEN; +} + +pub extern "C" fn ghcb_vc_handler(regs: &mut CpuRegs, error_code: u64) { + let Some(ghcb) = get_ghcb() else { + sev_es_terminate(2); + }; + + match error_code { + VMEXIT_CPUID => ghcb_handle_cpuid(regs, ghcb), + VMEXIT_MSR => ghcb_handle_msr(regs, ghcb), + VMEXIT_VMMCALL => ghcb_handle_vmmcall(regs, ghcb), + _ => {} + } +} + +pub fn is_sev_guest() -> bool { + unsafe { SEV_STATUS != 0 } +} + +pub fn is_sev_es_guest() -> bool { + unsafe { (SEV_STATUS & 0x2) != 0 } +} diff --git a/oxerun/src/bin/hello.rs b/oxerun/src/bin/hello.rs new file mode 100644 index 0000000..1d2aa16 --- /dev/null +++ b/oxerun/src/bin/hello.rs @@ -0,0 +1,79 @@ +#![no_main] +#![no_std] +#![feature(sync_unsafe_cell)] + +use oxerun::{ + bootinfo::{self, Info}, + println, +}; + +#[unsafe(no_mangle)] +fn oxerun_main(info: &bootinfo::BootInfo) { + println!("Hello World (oxerun v2) !"); + + println!("boot protocol: {}", info.name()); + println!( + "cmdline: {}", + str::from_utf8(info.cmdline()).unwrap_or("(non-utf8)") + ); + println!("memory layout"); + for descriptor in info.memory_layout() { + println!( + "- {}\t{:08x?}: {:?}", + descriptor.name, + (descriptor.range)(), + descriptor.attribute + ); + } + + println!("memory map"); + for i in 0..info.num_entries() { + if let Some(entry) = info.entry(i) { + println!( + "- {:012x}..{:012x} {:?}", + entry.addr, + entry.addr + entry.size, + entry.entry_type + ); + } + } + + #[cfg(target_arch = "x86_64")] + unsafe { + use core::arch::x86_64::{__cpuid, __get_cpuid_max, CpuidResult}; + + use oxerun::arch::x86_64::sev::SEV_STATUS; + use x86_64::registers::model_specific::Msr; + + let raw_hypervisor_leaf = { + let CpuidResult { + eax: _, + ebx, + ecx, + edx, + } = __cpuid(0x4000_0000); + [ebx.to_ne_bytes(), ecx.to_ne_bytes(), edx.to_ne_bytes()] + }; + + let hypervisor_string = str::from_utf8(raw_hypervisor_leaf.as_flattened()); + + println!("Hypervisor: {}", hypervisor_string.unwrap_or("(non-utf8)")); + + if __get_cpuid_max(0x8000_0000).0 > 0x8000_001f { + println!("-- SME CPUID --"); + let sev_leaf = __cpuid(0x8000_001f); + + println!("SME status : {}", sev_leaf.eax & (1 << 0) > 0); + println!("SEV status : {}", sev_leaf.eax & (1 << 1) > 0); + println!("C-Bit: {}", sev_leaf.ebx & 0x3f); + println!("SEV-ES: {}", sev_leaf.eax & (1 << 3) > 0); + + println!("-- SEV_STATUS MSR --"); + + println!("SEV_ENABLED: {}", SEV_STATUS & (1 << 0) > 0); + println!("SEV_ES_ENABLED: {}", SEV_STATUS & (1 << 1) > 0); + } + + println!("IA32_APIC_BASE: {:08x}", Msr::new(0x1b).read()); + } +} diff --git a/oxerun/src/bootinfo.rs b/oxerun/src/bootinfo.rs new file mode 100644 index 0000000..10ef91e --- /dev/null +++ b/oxerun/src/bootinfo.rs @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2022 Akira Moroo + +use enum_dispatch::enum_dispatch; + +use crate::{layout::MemoryDescriptor}; + +// Common data needed for all boot paths +#[enum_dispatch(BootInfo)] +pub trait Info { + // Name of for this boot protocol + fn name(&self) -> &str; + // Starting address of the Root System Descriptor Pointer + fn rsdp_addr(&self) -> Option { + None + } + // Address/size of FDT used for booting + fn fdt_reservation(&self) -> Option { + None + } + // The kernel command line (not including null terminator) + fn cmdline(&self) -> &[u8]; + // Methods to access the Memory map + fn num_entries(&self) -> usize; + fn entry(&self, idx: usize) -> Option; + // + fn memory_layout(&self) -> &'static [MemoryDescriptor]; + // MMIO address space that can be used for PCI BARs if needed + fn pci_bar_memory(&self) -> Option { + None + } +} + +#[derive(Clone, Copy)] +pub struct MemoryEntry { + pub addr: u64, + pub size: u64, + pub entry_type: EntryType, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum EntryType { + Ram, + Reserved, + AcpiReclaimable, + AcpiNvs, + Bad, + VendorReserved, + Persistent, +} + +#[enum_dispatch] +#[derive(Clone, Copy)] +pub enum BootInfo { + #[cfg(target_arch = "x86_64")] + Pvh(crate::pvh::StartInfo), + #[cfg(any(target_arch = "riscv64", target_arch = "aarch64"))] + Fdt(crate::fdt::StartInfo<'static>) +} diff --git a/oxerun/src/console/mod.rs b/oxerun/src/console/mod.rs new file mode 100644 index 0000000..9308965 --- /dev/null +++ b/oxerun/src/console/mod.rs @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright © 2019 Intel Corporation + +// Inspired by https://github.com/phil-opp/blog_os/blob/post-03/src/vga_buffer.rs +// from Philipp Oppermann + +#[cfg(target_arch = "riscv64")] +pub mod uart_mmio; +#[cfg(target_arch = "aarch64")] +pub mod uart_pl011; + +pub mod xen; + +use core::fmt; + +use atomic_refcell::AtomicRefCell; +#[cfg(target_arch = "x86_64")] +pub use uart_16550::SerialPort as Uart16550; +#[cfg(target_arch = "riscv64")] +pub use uart_mmio::UartMmio; +#[cfg(target_arch = "aarch64")] +pub use uart_pl011::Pl011 as UartPl011; + +#[cfg(target_arch = "aarch64")] +use crate::arch::aarch64::layout::map; +use crate::console::xen::XenConsole; + +pub static DEFAULT: AtomicRefCell = AtomicRefCell::new(Console::None); + +pub enum Console { + None, + Xen(XenConsole), + #[cfg(target_arch = "x86_64")] + Uart(Uart16550), + #[cfg(target_arch = "aarch64")] + Uart(UartPl011), + #[cfg(target_arch = "riscv64")] + Uart(UartMmio), +} + +impl fmt::Write for Console { + fn write_str(&mut self, s: &str) -> fmt::Result { + match self { + Console::None => Ok(()), + Console::Xen(xen_console) => xen_console.write_str(s), + Console::Uart(serial_port) => serial_port.write_str(s), + } + } +} + +#[macro_export] +macro_rules! println { + ($($arg:tt)*) => {{ + use core::fmt::Write; + writeln!($crate::console::DEFAULT.borrow_mut(), $($arg)*).unwrap(); + }}; +} + +pub fn init() { + // Try to initialize Xen PV console + unsafe { + if let Some(xen) = XenConsole::new() { + *DEFAULT.borrow_mut() = Console::Xen(xen); + return; + } + } + + // Fallback to UART + #[cfg(target_arch = "x86_64")] + unsafe { + // We use COM1 as it is the standard first serial port. + *DEFAULT.borrow_mut() = Console::Uart(Uart16550::new(0x3f8)); + } + + // TODO: Fill from FDT? + + #[cfg(target_arch = "aarch64")] + unsafe { + *DEFAULT.borrow_mut() = Console::Uart(UartPl011::new(map::mmio::PL011_START)); + } + + #[cfg(target_arch = "riscv64")] + { + const SERIAL_PORT_ADDRESS: u64 = 0x1000_0000; + *DEFAULT.borrow_mut() = Console::Uart(UartMmio::new(SERIAL_PORT_ADDRESS)); + } +} diff --git a/oxerun/src/console/uart_mmio.rs b/oxerun/src/console/uart_mmio.rs new file mode 100644 index 0000000..a9ce21c --- /dev/null +++ b/oxerun/src/console/uart_mmio.rs @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2023 Rivos Inc. + +use crate::mem::MemoryRegion; +use core::fmt; + +pub struct UartMmio { + region: MemoryRegion, +} + +impl UartMmio { + pub const fn new(base: u64) -> UartMmio { + UartMmio { + region: MemoryRegion::new(base, 8), + } + } + + fn send(&mut self, byte: u8) { + self.region.io_write_u8(0, byte) + } + + pub fn init(&mut self) {} +} + +impl fmt::Write for UartMmio { + fn write_str(&mut self, s: &str) -> fmt::Result { + for byte in s.bytes() { + self.send(byte); + } + Ok(()) + } +} diff --git a/oxerun/src/console/uart_pl011.rs b/oxerun/src/console/uart_pl011.rs new file mode 100644 index 0000000..0189ebb --- /dev/null +++ b/oxerun/src/console/uart_pl011.rs @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2022 Akira Moroo + +use core::fmt; + +pub struct Pl011 { + base: usize, +} + +impl Pl011 { + pub const fn new(base: usize) -> Self { + Self { base } + } + + pub fn init(&mut self) { + // Do nothing + } + + pub fn send(&mut self, data: u8) { + unsafe { + core::ptr::write_volatile(self.base as *mut u8, data); + } + } +} + +impl fmt::Write for Pl011 { + fn write_str(&mut self, s: &str) -> fmt::Result { + for byte in s.bytes() { + // Unix-like OS treats LF as CRLF + if byte == b'\n' { + self.send(b'\r'); + } + self.send(byte); + } + Ok(()) + } +} diff --git a/oxerun/src/console/xen.rs b/oxerun/src/console/xen.rs new file mode 100644 index 0000000..922f599 --- /dev/null +++ b/oxerun/src/console/xen.rs @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2025 Vates SAS - Teddy Astie + +use core::{fmt, ptr::NonNull}; + +use xen::{ + event::{EventChannel, EventChannelInterface, RawEventChannelInterface}, + hypercall::hvm::XenHvmOp, + io::{XenConsInterface, XenConsInterfaceVolatileFieldAccess, ring::XenRing}, +}; +use xen_sys::{ + NativeXenHypercall, + bindings::{HVM_PARAM_CONSOLE_EVTCHN, HVM_PARAM_CONSOLE_PFN}, +}; + +use crate::{arch::map_4k_frame, delay}; + +pub struct XenConsole { + pub interface: XenRing<'static>, + evtchn_device: RawEventChannelInterface, + event_channel: EventChannel, +} + +unsafe impl Send for XenConsole {} +unsafe impl Sync for XenConsole {} + +impl XenConsole { + pub unsafe fn new() -> Option { + let hyp = unsafe { NativeXenHypercall::new()? }; + + let pfn = hyp.get_hvm_param(HVM_PARAM_CONSOLE_PFN).ok()?; + let evtchn = hyp.get_hvm_param(HVM_PARAM_CONSOLE_EVTCHN).ok()?; + + if pfn == 0 { + return None; + } + + let console = unsafe { XenConsInterface::new(map_4k_frame(pfn, false)?) }; + + console.out_prod().write(1); + + Some(Self { + interface: XenConsInterface::to_ring(console), + evtchn_device: RawEventChannelInterface::new(hyp), + event_channel: EventChannel(evtchn as u32), + }) + } + + /// Move the PV console to another address/event channel. + pub unsafe fn relocate( + &mut self, + addr: Option>, + event_channel: Option, + ) { + if let Some(addr) = addr { + self.interface = XenConsInterface::to_ring(unsafe { XenConsInterface::new(addr) }); + } + + if let Some(event_channel) = event_channel { + self.event_channel = event_channel; + } + } +} + +impl fmt::Write for XenConsole { + fn write_str(&mut self, s: &str) -> fmt::Result { + let mut write_buffer = |mut line: &[u8]| { + while !line.is_empty() { + delay::wait_until(50, || { + let available = self.interface.available(); + if available > 0 { + true + } else { + self.evtchn_device.send(self.event_channel).ok(); + false + } + }); + + let available = self.interface.available(); + let (part, after) = line.split_at(available.min(line.len())); + + self.interface.write(part).ok(); + line = after; + } + }; + + for mut line in s.split_inclusive('\n').map(|s| s.as_bytes()) { + let newline = line.last() == Some(&b'\n'); + if newline { + line = &line[..line.len() - 1]; + } + + write_buffer(line); + + if newline { + write_buffer(&[b'\r', b'\n']); + } + } + + Ok(()) + } +} diff --git a/oxerun/src/delay.rs b/oxerun/src/delay.rs new file mode 100644 index 0000000..d241500 --- /dev/null +++ b/oxerun/src/delay.rs @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: BSD-3-Clause +// Copyright (C) 2021 Akira Moroo +// Copyright (C) 2018 Google LLC + +use core::arch::asm; +#[cfg(target_arch = "riscv64")] +use core::arch::riscv64::pause; +#[cfg(target_arch = "x86_64")] +pub use core::arch::x86_64::_rdtsc as rdtsc; + +#[cfg(target_arch = "aarch64")] +#[inline] +unsafe fn rdtsc() -> u64 { + let value: u64; + asm!("mrs {}, cntvct_el0", out(reg) value); + value +} + +#[cfg(target_arch = "riscv64")] +unsafe fn rdtsc() -> u64 { + let r: u64; + unsafe { asm!("csrr {rd}, time", rd = out(reg) r) }; + r +} + +#[cfg(target_arch = "aarch64")] +#[inline] +fn pause() { + unsafe { asm!("yield") } +} + +#[cfg(target_arch = "x86_64")] +#[inline] +fn pause() { + unsafe { asm!("pause") } +} + +pub fn ndelay(ns: u64) { + #[cfg(not(target_arch = "riscv64"))] + const CPU_KHZ_DEFAULT: u64 = 200; + #[cfg(target_arch = "riscv64")] + const CPU_KHZ_DEFAULT: u64 = 1_000_000; /* QEMU currently defines as 1GHz */ + const NSECS_PER_SEC: u64 = 1_000_000_000; + const PAUSE_THRESHOLD_TICKS: u64 = 150; + + let delta = ns * CPU_KHZ_DEFAULT / NSECS_PER_SEC; + let mut pause_delta = 0; + unsafe { + let start = rdtsc(); + if delta > PAUSE_THRESHOLD_TICKS { + pause_delta = delta - PAUSE_THRESHOLD_TICKS; + } + while rdtsc() - start < pause_delta { + pause(); + } + while rdtsc() - start < delta {} + } +} + +pub fn udelay(us: u64) { + for _i in 0..us as usize { + ndelay(1000) + } +} + +#[allow(dead_code)] +pub fn mdelay(ms: u64) { + for _i in 0..ms as usize { + udelay(1000) + } +} + +#[allow(dead_code)] +pub fn wait_while(ms: u64, mut cond: F) -> bool +where + F: FnMut() -> bool, +{ + let mut us = ms * 1000; + while cond() && us > 0 { + udelay(1); + us -= 1; + } + cond() +} + +#[allow(dead_code)] +pub fn wait_until(ms: u64, mut cond: F) -> bool +where + F: FnMut() -> bool, +{ + let mut us = ms * 1000; + while !cond() && us > 0 { + udelay(1); + us -= 1; + } + cond() +} + +pub fn stop_cpu() -> ! { + #[cfg(target_arch = "x86_64")] + unsafe { + core::arch::asm!("cli; hlt"); + } + + loop { + pause() + } +} diff --git a/oxerun/src/fdt.rs b/oxerun/src/fdt.rs new file mode 100644 index 0000000..9a21eb5 --- /dev/null +++ b/oxerun/src/fdt.rs @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2022 Akira Moroo + +use fdt::Fdt; + +use crate::{ + bootinfo::{EntryType, Info, MemoryEntry}, + layout::MemoryDescriptor, +}; + +pub struct StartInfo<'a> { + acpi_rsdp_addr: Option, + fdt_entry: MemoryEntry, + fdt: Fdt<'a>, + memory_layout: &'static [MemoryDescriptor], + pci_bar_memory: Option, +} + +impl StartInfo<'_> { + pub fn new( + ptr: *const u8, + acpi_rsdp_addr: Option, + memory_layout: &'static [MemoryDescriptor], + pci_bar_memory: Option, + ) -> Self { + let fdt = unsafe { + match Fdt::from_ptr(ptr) { + Ok(fdt) => fdt, + Err(e) => panic!("Failed to create device tree object: {:?}", e), + } + }; + + let fdt_entry = MemoryEntry { + addr: ptr as u64, + size: fdt.total_size() as u64, + entry_type: EntryType::Reserved, + }; + + Self { + fdt_entry, + fdt, + acpi_rsdp_addr, + memory_layout, + pci_bar_memory, + } + } + + pub fn find_compatible_region(&self, with: &[&str]) -> Option<(*const u8, usize)> { + let node = self.fdt.find_compatible(with)?; + if let Some(region) = node.reg()?.next() { + return Some((region.starting_address, region.size?)); + } + None + } +} + +impl Info for StartInfo<'_> { + fn name(&self) -> &str { + "FDT" + } + + fn rsdp_addr(&self) -> Option { + self.acpi_rsdp_addr + } + + fn fdt_reservation(&self) -> Option { + Some(self.fdt_entry) + } + + fn cmdline(&self) -> &[u8] { + match self.fdt.chosen().bootargs() { + Some(s) => s.as_bytes(), + None => b"", + } + } + + fn num_entries(&self) -> usize { + let nodes = self.fdt.find_all_nodes("/memory"); + let regions = nodes.flat_map(|n| n.reg().expect("should contain valid memory regions")); + regions.count() + } + + fn entry(&self, idx: usize) -> Option { + let nodes = self.fdt.find_all_nodes("/memory"); + let regions = nodes.flat_map(|n| n.reg().expect("should contain valid memory regions")); + for (i, region) in regions.enumerate() { + if i == idx { + return Some(MemoryEntry { + addr: region.starting_address as u64, + size: region.size.expect("memory size is required") as u64, + entry_type: EntryType::Ram, + }); + } + } + None + } + + fn memory_layout(&self) -> &'static [MemoryDescriptor] { + self.memory_layout + } + + fn pci_bar_memory(&self) -> Option { + self.pci_bar_memory + } +} diff --git a/oxerun/src/layout.rs b/oxerun/src/layout.rs new file mode 100644 index 0000000..855d1c8 --- /dev/null +++ b/oxerun/src/layout.rs @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2022 Akira Moroo + +use core::ops::Range; + +#[allow(dead_code)] +#[derive(Clone, Copy, Debug)] +pub enum MemoryAttribute { + Code, + Data, + Unusable, + Mmio, +} + +#[derive(Clone, Copy)] +pub struct MemoryDescriptor { + #[allow(dead_code)] + pub name: &'static str, + pub range: fn() -> Range, + pub attribute: MemoryAttribute, +} + +impl MemoryDescriptor { + pub const PAGE_SIZE: usize = 0x1000; + + pub fn range_start(&self) -> usize { + let addr = (self.range)().start; + assert!(addr.is_multiple_of(Self::PAGE_SIZE)); + addr + } + + pub fn range_end(&self) -> usize { + let addr = (self.range)().end; + assert!(addr.is_multiple_of(Self::PAGE_SIZE)); + addr + } + + pub fn page_count(&self) -> usize { + (self.range_end() - self.range_start()) / Self::PAGE_SIZE + } +} + +pub type MemoryLayout = [MemoryDescriptor; NUM_MEM_DESCS]; diff --git a/oxerun/src/lib.rs b/oxerun/src/lib.rs new file mode 100644 index 0000000..5a193cb --- /dev/null +++ b/oxerun/src/lib.rs @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright © 2019 Intel Corporation +#![no_std] +#![no_main] +#![feature(sync_unsafe_cell)] +#![cfg_attr(target_arch = "riscv64", feature(riscv_ext_intrinsics))] + +#[cfg(not(target_os = "none"))] +core::compile_error!("Unexpected non-freestanding target"); + +use core::panic::PanicInfo; + +pub mod arch; +#[macro_use] +pub mod console; +pub mod bootinfo; +pub mod delay; +#[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))] +pub mod fdt; +pub mod layout; +pub mod logger; +pub mod mem; +#[cfg(target_arch = "x86_64")] +pub mod pvh; + +#[panic_handler] +fn panic(info: &PanicInfo) -> ! { + println!("PANIC: {info}"); + delay::stop_cpu() +} + +#[cfg(target_arch = "x86_64")] +#[unsafe(no_mangle)] +pub extern "C" fn rust64_start(pvh_info: &pvh::StartInfo) -> ! { + arch::x86_64::mm::setup(); + arch::x86_64::sev::setup(); + arch::x86_64::idt::setup(); + + console::init(); + logger::init(); + + unsafe { oxerun_main(&(*pvh_info).into()) }; + + delay::stop_cpu() +} + +#[cfg(target_arch = "aarch64")] +#[unsafe(no_mangle)] +pub extern "C" fn rust64_start(x0: *const u8) -> ! { + arch::aarch64::simd::setup_simd(); + arch::aarch64::paging::setup(); + + // Use atomic operation before MMU enabled may cause exception, see https://www.ipshop.xyz/5909.html + console::init(); + logger::init(); + + let info = fdt::StartInfo::new( + x0, + Some(arch::aarch64::layout::map::dram::ACPI_START as u64), + &crate::arch::aarch64::layout::MEM_LAYOUT[..], + None, + ); + + unsafe { oxerun_main(&(*info).into()) }; + + delay::stop_cpu() +} + +#[cfg(target_arch = "riscv64")] +#[no_mangle] +pub extern "C" fn rust64_start(a0: u64, a1: *const u8) -> ! { + use crate::bootinfo::{EntryType, Info, MemoryEntry}; + + console::init(); + logger::init(); + + log::info!("Starting on RV64 0x{:x} 0x{:x}", a0, a1 as u64,); + + let info = fdt::StartInfo::new( + a1, + None, + &crate::arch::riscv64::layout::MEM_LAYOUT[..], + Some(MemoryEntry { + addr: 0x4000_0000, + size: 2 << 20, + entry_type: EntryType::Reserved, + }), + ); + + for i in 0..info.num_entries() { + let region = info.entry(i); + log::info!( + "Memory region {}MiB@0x{:x}", + region.size / 1024 / 1024, + region.addr + ); + } + + unsafe { oxerun_main(&info) }; + + delay::stop_cpu() +} + +#[allow(improper_ctypes)] +unsafe extern "Rust" { + unsafe fn oxerun_main(info: &bootinfo::BootInfo); +} diff --git a/oxerun/src/logger.rs b/oxerun/src/logger.rs new file mode 100644 index 0000000..361b438 --- /dev/null +++ b/oxerun/src/logger.rs @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright (C) 2024 Akira Moroo + +pub struct Logger; + +impl log::Log for Logger { + fn enabled(&self, metadata: &log::Metadata) -> bool { + metadata.level() <= log::max_level() + } + + fn log(&self, record: &log::Record) { + if self.enabled(record.metadata()) { + println!("[{}] {}", record.level(), record.args()); + } + } + + fn flush(&self) {} +} + +pub fn init() { + log::set_logger(&Logger).expect("Failed to set logger"); + log::set_max_level(log::LevelFilter::Info); +} diff --git a/oxerun/src/main.rs b/oxerun/src/main.rs deleted file mode 100644 index 68a3a65..0000000 --- a/oxerun/src/main.rs +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2016-2017 Doug Goldstein - * - * Licensed under the Apache License, Version 2.0 or the MIT license - * , at your - * option. This file may not be copied, modified, or distributed - * except according to those terms. - */ - -#![allow(internal_features)] -#![feature(lang_items)] -#![no_std] -#![no_main] - -extern crate xen; -extern crate xen_sys; - -#[cfg(target_arch = "x86_64")] -use core::arch::asm; -#[cfg(target_arch = "x86_64")] -use core::arch::global_asm; - -use xen::{entry_point, hypercall}; - -entry_point!(hello_world); - -#[allow(clippy::result_unit_err)] -pub fn hello_world() -> Result<(), ()> { - let test = b"oxerun hello world\n"; - - hypercall::console_io::write(test); - Ok(()) -} diff --git a/oxerun/src/mem.rs b/oxerun/src/mem.rs new file mode 100644 index 0000000..a76570d --- /dev/null +++ b/oxerun/src/mem.rs @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright © 2019 Intel Corporation + +#![allow(dead_code)] + +#[derive(Default)] +/// Provides a checked way to access memory offsets from a range of raw memory +pub struct MemoryRegion { + base: u64, + length: u64, +} + +impl MemoryRegion { + pub const fn new(base: u64, length: u64) -> MemoryRegion { + MemoryRegion { base, length } + } + + /// Take a slice and turn it into a region of memory + pub fn from_bytes(data: &[u8]) -> MemoryRegion { + MemoryRegion { + base: data.as_ptr() as u64, + length: data.len() as u64, + } + } + + // Expose the entire region as a byte slice + pub fn as_bytes(&mut self) -> &mut [u8] { + self.as_mut_slice(0, self.length) + } + + /// Expose a section of the memory region as a slice + pub fn as_mut_slice(&mut self, offset: u64, length: u64) -> &mut [T] { + assert!((offset + (length * core::mem::size_of::() as u64)) <= self.length); + unsafe { core::slice::from_raw_parts_mut((self.base + offset) as *mut T, length as usize) } + } + + /// Read a value from a given offset + fn read(&self, offset: u64) -> T + where + T: Copy + Sized, + { + assert!((offset + (core::mem::size_of::() - 1) as u64) < self.length); + let ptr: *const T = core::ptr::with_exposed_provenance((self.base + offset) as usize); + unsafe { ptr.read_unaligned() } + } + + /// Read a single byte at a given offset + pub fn read_u8(&self, offset: u64) -> u8 { + self.read(offset) + } + + /// Read a single word at a given offset + pub fn read_u16(&self, offset: u64) -> u16 { + self.read(offset) + } + + /// Read a single dword at a given offset + pub fn read_u32(&self, offset: u64) -> u32 { + self.read(offset) + } + + // Read a single qword at a given offset + pub fn read_u64(&self, offset: u64) -> u64 { + self.read(offset) + } + + /// Write a value at the given offset + pub fn write(&self, offset: u64, value: T) + where + T: Sized, + { + assert!((offset + (core::mem::size_of::() - 1) as u64) < self.length); + let ptr: *mut T = core::ptr::with_exposed_provenance_mut((self.base + offset) as usize); + unsafe { core::ptr::write_unaligned(ptr, value) } + } + + /// Write a single byte at given offset + pub fn write_u8(&self, offset: u64, value: u8) { + self.write(offset, value) + } + + /// Write a single word at given offset + pub fn write_u16(&self, offset: u64, value: u16) { + self.write(offset, value) + } + + /// Write a single dword at given offset + pub fn write_u32(&self, offset: u64, value: u32) { + self.write(offset, value) + } + + /// Write a single qword at given offset + pub fn write_u64(&self, offset: u64, value: u64) { + self.write(offset, value) + } + + /// Read a value at given offset with a mechanism suitable for MMIO + fn io_read(&self, offset: u64) -> T + where + T: Copy + Sized, + { + assert!((offset + (core::mem::size_of::() - 1) as u64) < self.length); + let ptr: *const T = core::ptr::with_exposed_provenance((self.base + offset) as usize); + unsafe { ptr.read_volatile() } + } + + /// Read a single byte at given offset with a mechanism suitable for MMIO + pub fn io_read_u8(&self, offset: u64) -> u8 { + self.io_read(offset) + } + + /// Read a single word at given offset with a mechanism suitable for MMIO + pub fn io_read_u16(&self, offset: u64) -> u16 { + self.io_read(offset) + } + + /// Read a single dword at given offset with a mechanism suitable for MMIO + pub fn io_read_u32(&self, offset: u64) -> u32 { + self.io_read(offset) + } + + /// Read a single qword at given offset with a mechanism suitable for MMIO + pub fn io_read_u64(&self, offset: u64) -> u64 { + self.io_read(offset) + } + + /// Write a value at given offset using a mechanism suitable for MMIO + pub fn io_write(&self, offset: u64, value: T) + where + T: Sized, + { + assert!((offset + (core::mem::size_of::() - 1) as u64) < self.length); + let ptr: *mut T = core::ptr::with_exposed_provenance_mut((self.base + offset) as usize); + unsafe { core::ptr::write_volatile(ptr, value) } + } + + /// Write a single byte at given offset with a mechanism suitable for MMIO + pub fn io_write_u8(&self, offset: u64, value: u8) { + self.io_write(offset, value) + } + + /// Write a single word at given offset with a mechanism suitable for MMIO + pub fn io_write_u16(&self, offset: u64, value: u16) { + self.io_write(offset, value) + } + + /// Write a single dword at given offset with a mechanism suitable for MMIO + pub fn io_write_u32(&self, offset: u64, value: u32) { + self.io_write(offset, value) + } + + /// Write a single qword at given offset with a mechanism suitable for MMIO + pub fn io_write_u64(&self, offset: u64, value: u64) { + self.io_write(offset, value) + } +} diff --git a/oxerun/src/pvh.rs b/oxerun/src/pvh.rs new file mode 100644 index 0000000..1f5b4c9 --- /dev/null +++ b/oxerun/src/pvh.rs @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2020 Google LLC + +use core::mem::size_of; + +use crate::{ + bootinfo::{EntryType, Info, MemoryEntry}, + layout::MemoryDescriptor, +}; + +pub const XEN_HVM_MEMMAP_TYPE_RAM: u32 = 1; +pub const XEN_HVM_MEMMAP_TYPE_RESERVED: u32 = 2; +pub const XEN_HVM_MEMMAP_TYPE_ACPI: u32 = 3; +pub const XEN_HVM_MEMMAP_TYPE_NVS: u32 = 4; +pub const XEN_HVM_MEMMAP_TYPE_UNUSABLE: u32 = 5; +pub const XEN_HVM_MEMMAP_TYPE_DISABLED: u32 = 6; +pub const XEN_HVM_MEMMAP_TYPE_PMEM: u32 = 7; + +// Structures from xen/include/public/arch-x86/hvm/start_info.h +#[derive(Clone, Copy, Debug)] +#[repr(C)] +pub struct StartInfo { + magic: [u8; 4], + version: u32, + flags: u32, + nr_modules: u32, + modlist_paddr: u64, + cmdline_paddr: u64, + rsdp_paddr: u64, + memmap_paddr: u64, + memmap_entries: u32, + _pad: u32, +} + +#[derive(Clone, Copy, Debug)] +#[repr(C)] +struct MemMapEntry { + addr: u64, + size: u64, + entry_type: u32, + _pad: u32, +} + +impl From for MemoryEntry { + fn from(value: MemMapEntry) -> Self { + Self { + addr: value.addr, + size: value.size, + entry_type: match value.entry_type { + XEN_HVM_MEMMAP_TYPE_RAM => EntryType::Ram, + XEN_HVM_MEMMAP_TYPE_RESERVED => EntryType::Reserved, + XEN_HVM_MEMMAP_TYPE_ACPI => EntryType::AcpiReclaimable, + XEN_HVM_MEMMAP_TYPE_NVS => EntryType::AcpiNvs, + XEN_HVM_MEMMAP_TYPE_UNUSABLE => EntryType::Bad, + XEN_HVM_MEMMAP_TYPE_DISABLED => EntryType::Bad, + XEN_HVM_MEMMAP_TYPE_PMEM => EntryType::Persistent, + _ => EntryType::Reserved, // Unknown + }, + } + } +} + +impl Info for StartInfo { + fn name(&self) -> &str { + "PVH Boot Protocol" + } + fn rsdp_addr(&self) -> Option { + Some(self.rsdp_paddr) + } + fn cmdline(&self) -> &[u8] { + if self.cmdline_paddr == 0 { + return &[]; + } + let start = self.cmdline_paddr as *const u8; + let mut size: usize = 0; + while unsafe { start.add(size).read() } != 0 { + size += 1; + } + unsafe { core::slice::from_raw_parts(start, size) } + } + fn num_entries(&self) -> usize { + // memmap_paddr and memmap_entries only exist in version 1 or later + if self.version < 1 || self.memmap_paddr == 0 { + return 0; + } + self.memmap_entries as usize + } + fn entry(&self, idx: usize) -> Option { + if idx >= self.num_entries() { + return None; + } + + let ptr = self.memmap_paddr as *const MemMapEntry; + Some(MemoryEntry::from(unsafe { *ptr.add(idx) })) + } + fn memory_layout(&self) -> &'static [MemoryDescriptor] { + &crate::arch::x86_64::layout::MEM_LAYOUT[..] + } +} + +// The PVH Boot Protocol starts at the 32-bit entrypoint to our firmware. +unsafe extern "C" { + fn ram32_start(); +} + +// The kind/name/desc of the PHV ELF Note are from xen/include/public/elfnote.h. +// This is the "Physical entry point into the kernel". +const XEN_ELFNOTE_PHYS32_ENTRY: u32 = 18; +type Name = [u8; 4]; +type Desc = unsafe extern "C" fn(); + +// We make sure our ELF Note has an alignment of 4 for maximum compatibility. +// Some software (QEMU) calculates padding incorectly if alignment != 4. +#[repr(C, packed(4))] +struct Note { + name_size: u32, + desc_size: u32, + kind: u32, + name: Name, + desc: Desc, +} + +// This is: ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY, .quad ram32_start) +#[unsafe(link_section = ".note")] +#[used] +static PVH_NOTE: Note = Note { + name_size: size_of::() as u32, + desc_size: size_of::() as u32, + kind: XEN_ELFNOTE_PHYS32_ENTRY, + name: *b"Xen\0", + desc: ram32_start, +}; diff --git a/riscv64gcv-unknown-none-elf.json b/riscv64gcv-unknown-none-elf.json new file mode 100644 index 0000000..37374c1 --- /dev/null +++ b/riscv64gcv-unknown-none-elf.json @@ -0,0 +1,20 @@ +{ + "arch": "riscv64", + "code-model": "medium", + "cpu": "generic-rv64", + "data-layout": "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128", + "eh-frame-header": false, + "emit-debug-gdb-scripts": false, + "features": "+m,+a,-f,+d,+c,-v", + "linker": "rust-lld", + "linker-flavor": "ld.lld", + "llvm-abiname": "lp64d", + "llvm-target": "riscv64", + "max-atomic-width": 64, + "panic-strategy": "abort", + "relocation-model": "static", + "target-pointer-width": 64, + "pre-link-args": { + "ld.lld": ["--script=riscv64gcv-unknown-none-elf.ld"] + } +} diff --git a/riscv64gcv-unknown-none-elf.ld b/riscv64gcv-unknown-none-elf.ld new file mode 100644 index 0000000..ade5681 --- /dev/null +++ b/riscv64gcv-unknown-none-elf.ld @@ -0,0 +1,47 @@ +ENTRY(ram64_start) + +/* OpenSBI loads here */ +ram_min = 0x80200000; + +SECTIONS +{ + /* Mapping the program headers and note into RAM makes the file smaller. */ + . = ram_min; + + /* These sections are mapped into RAM from the file. Omitting :ram from + later sections avoids emitting empty sections in the final binary. */ + code_start = .; + .text.boot : { *(.text.boot) } + .text : { *(.text .text.*) } + . = ALIGN(4K); + code_end = .; + + data_start = .; + + .data : { + . = ALIGN(4096); + *(.data .data.*) + . = ALIGN(8); + PROVIDE(__global_pointer$ = . + 0x800); + } + + .rodata : { *(.rodata .rodata.*) } + .got : { *(.got .got.*) } + + /* The BSS section isn't mapped from file data. It is just zeroed in RAM. */ + .bss : { + *(.bss .bss.*) + } + . = ALIGN(4K); + data_end = .; + + stack_start = .; + .stack (NOLOAD) : ALIGN(4K) { . += 128K; } + stack_end = .; + + /* Strip symbols from the output binary (comment out to get symbols) */ + /DISCARD/ : { + *(.symtab) + *(.strtab) + } +} diff --git a/x86_64-xen-hvm.json b/x86_64-xen-hvm.json new file mode 100644 index 0000000..9443673 --- /dev/null +++ b/x86_64-xen-hvm.json @@ -0,0 +1,21 @@ +{ + "llvm-target": "x86_64-unknown-none", + "data-layout": "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128", + "arch": "x86_64", + "target-endian": "little", + "target-pointer-width": 64, + "target-c-int-width": 32, + "os": "none", + "executables": true, + "linker": "rust-lld", + "linker-flavor": "ld.lld", + "panic-strategy": "abort", + "disable-redzone": true, + "features": "-mmx,-sse,+soft-float", + "code-model": "small", + "relocation-model": "pic", + "pre-link-args": { + "ld.lld": ["--script=x86_64-xen-hvm.ld"] + }, + "rustc-abi": "x86-softfloat" +} diff --git a/x86_64-xen-hvm.ld b/x86_64-xen-hvm.ld new file mode 100644 index 0000000..81a3f8d --- /dev/null +++ b/x86_64-xen-hvm.ld @@ -0,0 +1,52 @@ +ENTRY(ram32_start) /* coreboot uses the ELF entrypoint */ + +PHDRS +{ + ram PT_LOAD FILEHDR PHDRS ; + note PT_NOTE ; +} + +/* Loaders like to put stuff in low memory (< 1M), so we don't use it. */ +ram_min = 1M; + +SECTIONS +{ + /* Mapping the program headers and note into RAM makes the file smaller. */ + . = ram_min; + . += SIZEOF_HEADERS; + .note : { *(.note) } :note :ram + + /* These sections are mapped into RAM from the file. Omitting :ram from + later sections avoids emitting empty sections in the final binary. */ + .rodata : { *(.rodata .rodata.*) } :ram + . = ALIGN(4K); + code_start = .; + .text : { *(.text .text.*) } + .text32 : { *(.text32) } + . = ALIGN(4K); + code_end = .; + + data_start = .; + .data : { *(.data .data.*) } + .got : { *(.got .got.*) } + + /* The BSS section isn't mapped from file data. It is just zeroed in RAM. */ + .bss : { + *(.bss .bss.*) + } + . = ALIGN(4K); + data_end = .; + + /* Our stack grows down and is page-aligned. TODO: Add stack guard pages. */ + stack_start = .; + .stack (NOLOAD) : ALIGN(4K) { . += 128K; } + /* ram32.s only maps the first 2 MiB, and that must include the stack. */ + ASSERT((. <= 2M), "Stack overflows initial identity-mapped memory region") + stack_end = .; + + /* Strip symbols from the output binary (comment out to get symbols) */ + /DISCARD/ : { + *(.symtab) + *(.strtab) + } +} diff --git a/oxerun/x86_64-xen-pv.json b/x86_64-xen-pv.json similarity index 99% rename from oxerun/x86_64-xen-pv.json rename to x86_64-xen-pv.json index 0435d0c..4a13d6a 100644 --- a/oxerun/x86_64-xen-pv.json +++ b/x86_64-xen-pv.json @@ -28,4 +28,4 @@ "target-family": "unix", "target-pointer-width": "64", "vendor": "xen" -} +} \ No newline at end of file diff --git a/oxerun/x86_64-xen-pv.ld b/x86_64-xen-pv.ld similarity index 100% rename from oxerun/x86_64-xen-pv.ld rename to x86_64-xen-pv.ld diff --git a/xen-sys/Cargo.toml b/xen-sys/Cargo.toml index 660ec5f..1646cfc 100644 --- a/xen-sys/Cargo.toml +++ b/xen-sys/Cargo.toml @@ -1,5 +1,8 @@ [package] -authors = ["Doug Goldstein "] +authors = [ + "Doug Goldstein ", + "Teddy Astie ", +] description = "Kernel level hypercall support for Xen" categories = ["external-ffi-bindings"] homepage = "https://github.com/rust-vmm/xen-sys" @@ -11,5 +14,6 @@ version = "0.1.0" edition = "2018" [dependencies] -cty = "=0.2.0" +cty = "0.2.0" xen-bindings = { path = "../xen-bindings" } +thiserror = { version = "2.0.17", default-features = false } diff --git a/xen-sys/src/aarch64/hypercall.rs b/xen-sys/src/aarch64/hypercall.rs index 5083eea..74c8b8b 100644 --- a/xen-sys/src/aarch64/hypercall.rs +++ b/xen-sys/src/aarch64/hypercall.rs @@ -12,89 +12,60 @@ use core::arch::asm; -use cty::c_long; +use crate::{ + error::{parse_hypercall_return, XenError}, + DirectConstXenBuffer, DirectConstXenSlice, DirectMutXenBuffer, DirectMutXenSlice, + XenConstBuffer, XenHypercall, XenMutBuffer, +}; -#[no_mangle] -#[inline] -pub unsafe fn hypercall_1(op: u32, a1: u64) -> c_long { - let ret: c_long; +#[derive(Clone, Copy, Debug)] +pub struct NativeXenHypercall; - asm!( - "hvc 0xea1", - in("x16") op as u64, - in("x0") a1, - lateout("x0") ret, - options(nostack) - ); - ret +impl NativeXenHypercall { + pub unsafe fn new() -> Option { + Some(Self) + } } -#[no_mangle] -#[inline] -pub unsafe fn hypercall_2(op: u32, a1: u64, a2: u64) -> c_long { - let ret: c_long; +impl XenHypercall for NativeXenHypercall { + unsafe fn hypercall5(&self, cmd: usize, param: [usize; 5]) -> Result { + let output: isize; - asm!( - "hvc 0xea1", - in("x16") op as u64, - in("x0") a1, - in("x1") a2, - lateout("x0") ret, - options(nostack) - ); - ret -} - -#[no_mangle] -#[inline] -pub unsafe fn hypercall_3(op: u32, a1: u64, a2: u64, a3: u64) -> c_long { - let ret: c_long; + unsafe { + asm!( + "hvc 0xea1", + inlateout("x16") cmd => output, + inlateout("x0") param[0] => _, + inlateout("x1") param[1] => _, + inlateout("x2") param[2] => _, + inlateout("x3") param[3] => _, + inlateout("x4") param[4] => _, + options(nostack) + ); + } - asm!( - "hvc 0xea1", - in("x16") op as u64, - in("x0") a1, - in("x1") a2, - in("x2") a3, - lateout("x0") ret, - options(nostack) - ); - ret -} + parse_hypercall_return(output) + } -#[no_mangle] -#[inline] -pub unsafe fn hypercall_4(op: u32, a1: u64, a2: u64, a3: u64, a4: u64) -> c_long { - let ret: c_long; + fn make_const_object(&self, buffer: &T) -> Result, XenError> { + Ok(DirectConstXenBuffer(buffer)) + } - asm!( - "hvc 0xea1", - in("x16") op as u64, - in("x0") a1, - in("x1") a2, - in("x2") a3, - in("x3") a4, - lateout("x0") ret, - options(nostack) - ); - ret -} + fn make_mut_object(&self, buffer: &mut T) -> Result, XenError> { + Ok(DirectMutXenBuffer(buffer)) + } -#[no_mangle] -#[inline] -pub unsafe fn hypercall_5(op: u32, a1: u64, a2: u64, a3: u64, a4: u64, a5: u64) -> c_long { - let ret: c_long; + fn make_const_slice( + &self, + slice: &[T], + ) -> Result, XenError> { + Ok(DirectConstXenSlice(slice)) + } - asm!( - "hvc 0xea1", - in("x16") op as u64, - in("x0") a1, - in("x1") a2, - in("x2") a3, - in("x3") a4, - in("x4") a5, - lateout("x0") ret, - options(nostack) - ); - ret + fn make_mut_slice( + &self, + slice: &mut [T], + ) -> Result, XenError> { + Ok(DirectMutXenSlice(slice)) + } } diff --git a/xen-sys/src/error.rs b/xen-sys/src/error.rs new file mode 100644 index 0000000..e29367c --- /dev/null +++ b/xen-sys/src/error.rs @@ -0,0 +1,15 @@ +#[derive(Clone, Copy, Debug, thiserror::Error)] +pub enum XenError { + #[error("Xen error")] + Xen(isize), + #[error("Other error")] + Other(&'static str), +} + +pub fn parse_hypercall_return(ret: isize) -> Result { + if ret < 0 { + Err(XenError::Xen(ret)) + } else { + Ok(ret as usize) + } +} diff --git a/xen-sys/src/hypercall.rs b/xen-sys/src/hypercall.rs deleted file mode 100644 index f9c4d5d..0000000 --- a/xen-sys/src/hypercall.rs +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright 2016-2017 Doug Goldstein - * - * Licensed under the Apache License, Version 2.0 or the MIT license - * , at your - * option. This file may not be copied, modified, or distributed - * except according to those terms. - */ - -#![allow(non_camel_case_types, clippy::missing_safety_doc)] - -use xen_bindings::bindings::{ - CONSOLEIO_read, CONSOLEIO_write, SCHEDOP_shutdown, SCHEDOP_yield, __HYPERVISOR_console_io, - __HYPERVISOR_sched_op, -}; - -/// SCHEDOP_ defines from public/sched.h -#[derive(Debug)] -pub enum SchedOp { - /// SCHEDOP_yield - r#yield, - /// SCHEDOP_block - block, - /// SCHEDOP_shutdown - shutdown, - /// SCHEDOP_poll - poll, - /// SCHEDOP_remote_shutdown - remote_shutdown, - /// SCHEDOP_shutdown_code - shutdown_code, - /// SCHEDOP_watchdog - watchdog, - /// SCHEDOP_pin_override - pin_override, -} - -/// CONSOLEIO_ defines from public/xen.h -#[derive(Debug)] -pub enum ConsoleIO { - /// CONSOLEIO_write - Write, - /// CONSOLEIO_read - Read, -} - -macro_rules! hypercall { - ($op:expr, $a1:expr) => { - $crate::hypercall_1($op, $a1 as u64) - }; - - ($op:expr, $a1:expr, $a2:expr) => { - $crate::hypercall_2($op, $a1 as u64, $a2 as u64) - }; - - ($op:expr, $a1:expr, $a2:expr, $a3:expr) => { - $crate::hypercall_3($op, $a1 as u64, $a2 as u64, $a3 as u64) - }; - - ($op:expr, $a1:expr, $a2:expr, $a3:expr, $a4:expr) => { - $crate::hypercall_4($op, $a1 as u64, $a2 as u64, $a3 as u64, $a4 as u64) - }; - - ($op:expr, $a1:expr, $a2:expr, $a3:expr, $a4:expr, $a5:expr) => { - $crate::hypercall_5( - $op, $a1 as u64, $a2 as u64, $a3 as u64, $a4 as u64, $a5 as u64, - ) - }; -} - -pub fn console_io(mode: ConsoleIO, buf: &[u8]) -> i64 { - match mode { - ConsoleIO::Write => { - // SAFETY: `buf` is valid byte array. - unsafe { - hypercall!( - __HYPERVISOR_console_io, - CONSOLEIO_write, - buf.len() as u64, - buf.as_ptr() as u64 - ) - } - } - ConsoleIO::Read => { - // SAFETY: `buf` is valid byte array. - unsafe { - hypercall!( - __HYPERVISOR_console_io, - CONSOLEIO_read, - buf.len() as u64, - buf.as_ptr() as u64 - ) - } - } - } -} - -#[cfg(target_arch = "x86_64")] -pub unsafe fn sched_op(mode: SchedOp, data: u32) { - match mode { - SchedOp::r#yield => hypercall!(__HYPERVISOR_sched_op, SCHEDOP_yield, data as u64), - SchedOp::shutdown => hypercall!(__HYPERVISOR_sched_op, SCHEDOP_shutdown, data as u64), - _ => panic!(), - }; -} - -#[cfg(target_arch = "aarch64")] -pub unsafe fn sched_op(mode: SchedOp, data: u32) { - let address = &data as *const u32; - - match mode { - SchedOp::r#yield => hypercall!(__HYPERVISOR_sched_op, SCHEDOP_yield, address as u64), - SchedOp::shutdown => hypercall!(__HYPERVISOR_sched_op, SCHEDOP_shutdown, address as u64), - _ => panic!(), - }; -} diff --git a/xen-sys/src/irq.rs b/xen-sys/src/irq.rs deleted file mode 100644 index e69de29..0000000 diff --git a/xen-sys/src/lib.rs b/xen-sys/src/lib.rs index 96d5f0f..ce14e67 100644 --- a/xen-sys/src/lib.rs +++ b/xen-sys/src/lib.rs @@ -9,15 +9,156 @@ */ #![no_std] +pub mod error; #[cfg(target_arch = "x86_64")] -mod x86_64; -#[cfg(target_arch = "x86_64")] -pub use self::x86_64::*; +pub mod x86_64; #[cfg(target_arch = "aarch64")] -mod aarch64; +pub mod aarch64; + +#[cfg(target_arch = "x86_64")] +pub type NativeXenHypercall = x86_64::X86XenHypercall; #[cfg(target_arch = "aarch64")] -pub use self::aarch64::*; +pub type NativeXenHypercall = aarch64::NativeXenHypercall; + +pub use xen_bindings::*; + +use crate::error::XenError; + +/// Wrapper of a reference into a hypercall-safe buffer. +pub trait XenConstBuffer { + /// Get a hypercall-safe reference to the underlying data. + fn as_hypercall_ptr(&self) -> *const T; +} + +/// Wrapper of a mutable reference into a mutable hypercall-safe buffer. +pub trait XenMutBuffer { + /// Get a hypercall-safe mutable reference to the underlying data. + fn as_hypercall_ptr(&mut self) -> *mut T; + + /// Update original reference with new data. + unsafe fn update(&mut self); +} + +/// Hypercall interface. +pub trait XenHypercall: Sized { + /// Perform a raw hypercall. + /// + /// # SAFETY + /// + /// A hypercall has a externally defined behavior. + unsafe fn hypercall5(&self, cmd: usize, param: [usize; 5]) -> Result; + + /// Perform a raw hypercall. + /// + /// # SAFETY + /// + /// A hypercall has a externally defined behavior. + unsafe fn hypercall4(&self, cmd: usize, param: [usize; 4]) -> Result { + self.hypercall5(cmd, [param[0], param[1], param[2], param[3], 0]) + } + + /// Perform a raw hypercall. + /// + /// # SAFETY + /// + /// A hypercall has a externally defined behavior. + unsafe fn hypercall3(&self, cmd: usize, param: [usize; 3]) -> Result { + self.hypercall4(cmd, [param[0], param[1], param[2], 0]) + } + + /// Perform a raw hypercall. + /// + /// # SAFETY + /// + /// A hypercall has a externally defined behavior. + unsafe fn hypercall2(&self, cmd: usize, param: [usize; 2]) -> Result { + self.hypercall3(cmd, [param[0], param[1], 0]) + } + + /// Perform a raw hypercall. + /// + /// # SAFETY + /// + /// A hypercall has a externally defined behavior. + unsafe fn hypercall1(&self, cmd: usize, param: usize) -> Result { + self.hypercall2(cmd, [param, 0]) + } + + /// Perform a raw hypercall. + /// + /// # SAFETY + /// + /// A hypercall has a externally defined behavior. + unsafe fn hypercall0(&self, cmd: usize) -> Result { + self.hypercall1(cmd, 0) + } + + fn make_const_object(&self, buffer: &T) -> Result, XenError>; + + fn make_mut_object(&self, buffer: &mut T) -> Result, XenError>; + + // Slices needs some special handling as they are not Copy themselves + // and a pointer to a slice doesn't point to its first element. + + fn make_const_slice( + &self, + slice: &[T], + ) -> Result, XenError>; + + fn make_mut_slice( + &self, + slice: &mut [T], + ) -> Result, XenError>; +} + +#[repr(transparent)] +#[derive(Clone, Copy, Debug)] +pub struct DomId(pub u16); + +impl DomId { + pub const SELF: Self = Self(0x7FF0); +} + +/// Constant xen buffer that passes the reference as-is. +pub(crate) struct DirectConstXenBuffer<'a, T>(&'a T); + +impl XenConstBuffer for DirectConstXenBuffer<'_, T> { + fn as_hypercall_ptr(&self) -> *const T { + self.0 + } +} + +pub(crate) struct DirectConstXenSlice<'a, T>(&'a [T]); + +impl XenConstBuffer for DirectConstXenSlice<'_, T> { + fn as_hypercall_ptr(&self) -> *const T { + self.0.as_ptr() + } +} + +/// Mutable xen buffer that passes the reference as-is. +pub(crate) struct DirectMutXenBuffer<'a, T>(&'a mut T); + +impl XenMutBuffer for DirectMutXenBuffer<'_, T> { + fn as_hypercall_ptr(&mut self) -> *mut T { + self.0 + } + + unsafe fn update(&mut self) { + // The buffer is passed as is, we don't need to bounce the changes. + } +} + +pub(crate) struct DirectMutXenSlice<'a, T>(&'a mut [T]); + +impl XenMutBuffer for DirectMutXenSlice<'_, T> { + fn as_hypercall_ptr(&mut self) -> *mut T { + self.0.as_mut_ptr() + } -pub mod hypercall; + unsafe fn update(&mut self) { + // The buffer is passed as is, we don't need to bounce the changes. + } +} diff --git a/xen-sys/src/riscv64/bindgen.rs b/xen-sys/src/riscv64/bindgen.rs new file mode 100644 index 0000000..c9d4575 --- /dev/null +++ b/xen-sys/src/riscv64/bindgen.rs @@ -0,0 +1,4685 @@ +/* automatically generated by rust-bindgen 0.59.1 */ + +pub const __XEN_LATEST_INTERFACE_VERSION__: u32 = 265728; +pub const __XEN_INTERFACE_VERSION__: u32 = 0; +pub const FLAT_RING3_CS32: u32 = 57379; +pub const FLAT_RING3_CS64: u32 = 57395; +pub const FLAT_RING3_DS32: u32 = 57387; +pub const FLAT_RING3_DS64: u32 = 0; +pub const FLAT_RING3_SS32: u32 = 57387; +pub const FLAT_RING3_SS64: u32 = 57387; +pub const FLAT_KERNEL_DS64: u32 = 0; +pub const FLAT_KERNEL_DS32: u32 = 57387; +pub const FLAT_KERNEL_DS: u32 = 0; +pub const FLAT_KERNEL_CS64: u32 = 57395; +pub const FLAT_KERNEL_CS32: u32 = 57379; +pub const FLAT_KERNEL_CS: u32 = 57395; +pub const FLAT_KERNEL_SS64: u32 = 57387; +pub const FLAT_KERNEL_SS32: u32 = 57387; +pub const FLAT_KERNEL_SS: u32 = 57387; +pub const FLAT_USER_DS64: u32 = 0; +pub const FLAT_USER_DS32: u32 = 57387; +pub const FLAT_USER_DS: u32 = 0; +pub const FLAT_USER_CS64: u32 = 57395; +pub const FLAT_USER_CS32: u32 = 57379; +pub const FLAT_USER_CS: u32 = 57395; +pub const FLAT_USER_SS64: u32 = 57387; +pub const FLAT_USER_SS32: u32 = 57387; +pub const FLAT_USER_SS: u32 = 57387; +pub const __HYPERVISOR_VIRT_START: i64 = -140737488355328; +pub const __HYPERVISOR_VIRT_END: i64 = -131941395333120; +pub const __MACH2PHYS_VIRT_START: i64 = -140737488355328; +pub const __MACH2PHYS_VIRT_END: i64 = -140462610448384; +pub const SEGBASE_FS: u32 = 0; +pub const SEGBASE_GS_USER: u32 = 1; +pub const SEGBASE_GS_KERNEL: u32 = 2; +pub const SEGBASE_GS_USER_SEL: u32 = 3; +pub const _VGCF_in_syscall: u32 = 8; +pub const VGCF_in_syscall: u32 = 256; +pub const VGCF_IN_SYSCALL: u32 = 256; +pub const PRI_xen_pfn: &'static [u8; 3usize] = b"lx\0"; +pub const PRIu_xen_pfn: &'static [u8; 3usize] = b"lu\0"; +pub const XEN_HAVE_PV_GUEST_ENTRY: u32 = 1; +pub const XEN_HAVE_PV_UPCALL_MASK: u32 = 1; +pub const FIRST_RESERVED_GDT_PAGE: u32 = 14; +pub const FIRST_RESERVED_GDT_BYTE: u32 = 57344; +pub const FIRST_RESERVED_GDT_ENTRY: u32 = 7168; +pub const XEN_LEGACY_MAX_VCPUS: u32 = 32; +pub const PRI_xen_ulong: &'static [u8; 3usize] = b"lx\0"; +pub const VGCF_I387_VALID: u32 = 1; +pub const VGCF_IN_KERNEL: u32 = 4; +pub const _VGCF_i387_valid: u32 = 0; +pub const VGCF_i387_valid: u32 = 1; +pub const _VGCF_in_kernel: u32 = 2; +pub const VGCF_in_kernel: u32 = 4; +pub const _VGCF_failsafe_disables_events: u32 = 3; +pub const VGCF_failsafe_disables_events: u32 = 8; +pub const _VGCF_syscall_disables_events: u32 = 4; +pub const VGCF_syscall_disables_events: u32 = 16; +pub const _VGCF_online: u32 = 5; +pub const VGCF_online: u32 = 32; +pub const XEN_CPUID_NO_SUBLEAF: u32 = 4294967295; +pub const XEN_EMULATE_PREFIX: &'static [u8; 34usize] = b".byte 0x0f,0x0b,0x78,0x65,0x6e ; \0"; +pub const XEN_CPUID: &'static [u8; 39usize] = b".byte 0x0f,0x0b,0x78,0x65,0x6e ; cpuid\0"; +pub const XEN_HVM_DEBUGCONS_IOPORT: u32 = 233; +pub const __HYPERVISOR_set_trap_table: u32 = 0; +pub const __HYPERVISOR_mmu_update: u32 = 1; +pub const __HYPERVISOR_set_gdt: u32 = 2; +pub const __HYPERVISOR_stack_switch: u32 = 3; +pub const __HYPERVISOR_set_callbacks: u32 = 4; +pub const __HYPERVISOR_fpu_taskswitch: u32 = 5; +pub const __HYPERVISOR_sched_op_compat: u32 = 6; +pub const __HYPERVISOR_platform_op: u32 = 7; +pub const __HYPERVISOR_set_debugreg: u32 = 8; +pub const __HYPERVISOR_get_debugreg: u32 = 9; +pub const __HYPERVISOR_update_descriptor: u32 = 10; +pub const __HYPERVISOR_memory_op: u32 = 12; +pub const __HYPERVISOR_multicall: u32 = 13; +pub const __HYPERVISOR_update_va_mapping: u32 = 14; +pub const __HYPERVISOR_set_timer_op: u32 = 15; +pub const __HYPERVISOR_event_channel_op_compat: u32 = 16; +pub const __HYPERVISOR_xen_version: u32 = 17; +pub const __HYPERVISOR_console_io: u32 = 18; +pub const __HYPERVISOR_physdev_op_compat: u32 = 19; +pub const __HYPERVISOR_grant_table_op: u32 = 20; +pub const __HYPERVISOR_vm_assist: u32 = 21; +pub const __HYPERVISOR_update_va_mapping_otherdomain: u32 = 22; +pub const __HYPERVISOR_iret: u32 = 23; +pub const __HYPERVISOR_vcpu_op: u32 = 24; +pub const __HYPERVISOR_set_segment_base: u32 = 25; +pub const __HYPERVISOR_mmuext_op: u32 = 26; +pub const __HYPERVISOR_xsm_op: u32 = 27; +pub const __HYPERVISOR_nmi_op: u32 = 28; +pub const __HYPERVISOR_sched_op: u32 = 29; +pub const __HYPERVISOR_callback_op: u32 = 30; +pub const __HYPERVISOR_xenoprof_op: u32 = 31; +pub const __HYPERVISOR_event_channel_op: u32 = 32; +pub const __HYPERVISOR_physdev_op: u32 = 33; +pub const __HYPERVISOR_hvm_op: u32 = 34; +pub const __HYPERVISOR_sysctl: u32 = 35; +pub const __HYPERVISOR_domctl: u32 = 36; +pub const __HYPERVISOR_kexec_op: u32 = 37; +pub const __HYPERVISOR_tmem_op: u32 = 38; +pub const __HYPERVISOR_argo_op: u32 = 39; +pub const __HYPERVISOR_xenpmu_op: u32 = 40; +pub const __HYPERVISOR_dm_op: u32 = 41; +pub const __HYPERVISOR_hypfs_op: u32 = 42; +pub const __HYPERVISOR_arch_0: u32 = 48; +pub const __HYPERVISOR_arch_1: u32 = 49; +pub const __HYPERVISOR_arch_2: u32 = 50; +pub const __HYPERVISOR_arch_3: u32 = 51; +pub const __HYPERVISOR_arch_4: u32 = 52; +pub const __HYPERVISOR_arch_5: u32 = 53; +pub const __HYPERVISOR_arch_6: u32 = 54; +pub const __HYPERVISOR_arch_7: u32 = 55; +pub const __HYPERVISOR_dom0_op: u32 = 7; +pub const VIRQ_TIMER: u32 = 0; +pub const VIRQ_DEBUG: u32 = 1; +pub const VIRQ_CONSOLE: u32 = 2; +pub const VIRQ_DOM_EXC: u32 = 3; +pub const VIRQ_TBUF: u32 = 4; +pub const VIRQ_DEBUGGER: u32 = 6; +pub const VIRQ_XENOPROF: u32 = 7; +pub const VIRQ_CON_RING: u32 = 8; +pub const VIRQ_PCPU_STATE: u32 = 9; +pub const VIRQ_MEM_EVENT: u32 = 10; +pub const VIRQ_ARGO: u32 = 11; +pub const VIRQ_ENOMEM: u32 = 12; +pub const VIRQ_XENPMU: u32 = 13; +pub const VIRQ_ARCH_0: u32 = 16; +pub const VIRQ_ARCH_1: u32 = 17; +pub const VIRQ_ARCH_2: u32 = 18; +pub const VIRQ_ARCH_3: u32 = 19; +pub const VIRQ_ARCH_4: u32 = 20; +pub const VIRQ_ARCH_5: u32 = 21; +pub const VIRQ_ARCH_6: u32 = 22; +pub const VIRQ_ARCH_7: u32 = 23; +pub const NR_VIRQS: u32 = 24; +pub const MMU_NORMAL_PT_UPDATE: u32 = 0; +pub const MMU_MACHPHYS_UPDATE: u32 = 1; +pub const MMU_PT_UPDATE_PRESERVE_AD: u32 = 2; +pub const MMU_PT_UPDATE_NO_TRANSLATE: u32 = 3; +pub const MMUEXT_PIN_L1_TABLE: u32 = 0; +pub const MMUEXT_PIN_L2_TABLE: u32 = 1; +pub const MMUEXT_PIN_L3_TABLE: u32 = 2; +pub const MMUEXT_PIN_L4_TABLE: u32 = 3; +pub const MMUEXT_UNPIN_TABLE: u32 = 4; +pub const MMUEXT_NEW_BASEPTR: u32 = 5; +pub const MMUEXT_TLB_FLUSH_LOCAL: u32 = 6; +pub const MMUEXT_INVLPG_LOCAL: u32 = 7; +pub const MMUEXT_TLB_FLUSH_MULTI: u32 = 8; +pub const MMUEXT_INVLPG_MULTI: u32 = 9; +pub const MMUEXT_TLB_FLUSH_ALL: u32 = 10; +pub const MMUEXT_INVLPG_ALL: u32 = 11; +pub const MMUEXT_FLUSH_CACHE: u32 = 12; +pub const MMUEXT_SET_LDT: u32 = 13; +pub const MMUEXT_NEW_USER_BASEPTR: u32 = 15; +pub const MMUEXT_CLEAR_PAGE: u32 = 16; +pub const MMUEXT_COPY_PAGE: u32 = 17; +pub const MMUEXT_FLUSH_CACHE_GLOBAL: u32 = 18; +pub const MMUEXT_MARK_SUPER: u32 = 19; +pub const MMUEXT_UNMARK_SUPER: u32 = 20; +pub const CONSOLEIO_write: u32 = 0; +pub const CONSOLEIO_read: u32 = 1; +pub const VMASST_CMD_enable: u32 = 0; +pub const VMASST_CMD_disable: u32 = 1; +pub const VMASST_TYPE_4gb_segments: u32 = 0; +pub const VMASST_TYPE_4gb_segments_notify: u32 = 1; +pub const VMASST_TYPE_writable_pagetables: u32 = 2; +pub const VMASST_TYPE_pae_extended_cr3: u32 = 3; +pub const VMASST_TYPE_architectural_iopl: u32 = 4; +pub const VMASST_TYPE_runstate_update_flag: u32 = 5; +pub const VMASST_TYPE_m2p_strict: u32 = 32; +pub const MAX_VMASST_TYPE: u32 = 3; +pub const XEN_PVCLOCK_TSC_STABLE_BIT: u32 = 1; +pub const XEN_PVCLOCK_GUEST_STOPPED: u32 = 2; +pub const MAX_GUEST_CMDLINE: u32 = 1024; +pub const SIF_PRIVILEGED: u32 = 1; +pub const SIF_INITDOMAIN: u32 = 2; +pub const SIF_MULTIBOOT_MOD: u32 = 4; +pub const SIF_MOD_START_PFN: u32 = 8; +pub const SIF_VIRT_P2M_4TOOLS: u32 = 16; +pub const SIF_PM_MASK: u32 = 65280; +pub const XEN_VGATYPE_TEXT_MODE_3: u32 = 3; +pub const XEN_VGATYPE_VESA_LFB: u32 = 35; +pub const XEN_VGATYPE_EFI_LFB: u32 = 112; +pub const EVTCHNOP_bind_interdomain: u32 = 0; +pub const EVTCHNOP_bind_virq: u32 = 1; +pub const EVTCHNOP_bind_pirq: u32 = 2; +pub const EVTCHNOP_close: u32 = 3; +pub const EVTCHNOP_send: u32 = 4; +pub const EVTCHNOP_status: u32 = 5; +pub const EVTCHNOP_alloc_unbound: u32 = 6; +pub const EVTCHNOP_bind_ipi: u32 = 7; +pub const EVTCHNOP_bind_vcpu: u32 = 8; +pub const EVTCHNOP_unmask: u32 = 9; +pub const EVTCHNOP_reset: u32 = 10; +pub const EVTCHNOP_init_control: u32 = 11; +pub const EVTCHNOP_expand_array: u32 = 12; +pub const EVTCHNOP_set_priority: u32 = 13; +pub const BIND_PIRQ__WILL_SHARE: u32 = 1; +pub const EVTCHNSTAT_closed: u32 = 0; +pub const EVTCHNSTAT_unbound: u32 = 1; +pub const EVTCHNSTAT_interdomain: u32 = 2; +pub const EVTCHNSTAT_pirq: u32 = 3; +pub const EVTCHNSTAT_virq: u32 = 4; +pub const EVTCHNSTAT_ipi: u32 = 5; +pub const EVTCHN_FIFO_PRIORITY_MAX: u32 = 0; +pub const EVTCHN_FIFO_PRIORITY_DEFAULT: u32 = 7; +pub const EVTCHN_FIFO_PRIORITY_MIN: u32 = 15; +pub const EVTCHN_FIFO_MAX_QUEUES: u32 = 16; +pub const EVTCHN_FIFO_PENDING: u32 = 31; +pub const EVTCHN_FIFO_MASKED: u32 = 30; +pub const EVTCHN_FIFO_LINKED: u32 = 29; +pub const EVTCHN_FIFO_BUSY: u32 = 28; +pub const EVTCHN_FIFO_LINK_BITS: u32 = 17; +pub const EVTCHN_FIFO_LINK_MASK: u32 = 131071; +pub const EVTCHN_FIFO_NR_CHANNELS: u32 = 131072; +pub const SCHEDOP_yield: u32 = 0; +pub const SCHEDOP_block: u32 = 1; +pub const SCHEDOP_shutdown: u32 = 2; +pub const SCHEDOP_poll: u32 = 3; +pub const SCHEDOP_remote_shutdown: u32 = 4; +pub const SCHEDOP_shutdown_code: u32 = 5; +pub const SCHEDOP_watchdog: u32 = 6; +pub const SCHEDOP_pin_override: u32 = 7; +pub const SHUTDOWN_poweroff: u32 = 0; +pub const SHUTDOWN_reboot: u32 = 1; +pub const SHUTDOWN_suspend: u32 = 2; +pub const SHUTDOWN_crash: u32 = 3; +pub const SHUTDOWN_watchdog: u32 = 4; +pub const SHUTDOWN_soft_reset: u32 = 5; +pub const SHUTDOWN_MAX: u32 = 5; +pub const XEN_ELFNOTE_INFO: u32 = 0; +pub const XEN_ELFNOTE_ENTRY: u32 = 1; +pub const XEN_ELFNOTE_HYPERCALL_PAGE: u32 = 2; +pub const XEN_ELFNOTE_VIRT_BASE: u32 = 3; +pub const XEN_ELFNOTE_PADDR_OFFSET: u32 = 4; +pub const XEN_ELFNOTE_XEN_VERSION: u32 = 5; +pub const XEN_ELFNOTE_GUEST_OS: u32 = 6; +pub const XEN_ELFNOTE_GUEST_VERSION: u32 = 7; +pub const XEN_ELFNOTE_LOADER: u32 = 8; +pub const XEN_ELFNOTE_PAE_MODE: u32 = 9; +pub const XEN_ELFNOTE_FEATURES: u32 = 10; +pub const XEN_ELFNOTE_BSD_SYMTAB: u32 = 11; +pub const XEN_ELFNOTE_HV_START_LOW: u32 = 12; +pub const XEN_ELFNOTE_L1_MFN_VALID: u32 = 13; +pub const XEN_ELFNOTE_SUSPEND_CANCEL: u32 = 14; +pub const XEN_ELFNOTE_INIT_P2M: u32 = 15; +pub const XEN_ELFNOTE_MOD_START_PFN: u32 = 16; +pub const XEN_ELFNOTE_SUPPORTED_FEATURES: u32 = 17; +pub const XEN_ELFNOTE_PHYS32_ENTRY: u32 = 18; +pub const XEN_ELFNOTE_MAX: u32 = 18; +pub const XEN_ELFNOTE_CRASH_INFO: u32 = 16777217; +pub const XEN_ELFNOTE_CRASH_REGS: u32 = 16777218; +pub const XEN_ELFNOTE_DUMPCORE_NONE: u32 = 33554432; +pub const XEN_ELFNOTE_DUMPCORE_HEADER: u32 = 33554433; +pub const XEN_ELFNOTE_DUMPCORE_XEN_VERSION: u32 = 33554434; +pub const XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION: u32 = 33554435; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct iret_context { + pub rax: u64, + pub r11: u64, + pub rcx: u64, + pub flags: u64, + pub rip: u64, + pub cs: u64, + pub rflags: u64, + pub rsp: u64, + pub ss: u64, +} +#[test] +fn bindgen_test_layout_iret_context() { + assert_eq!( + ::core::mem::size_of::(), + 72usize, + concat!("Size of: ", stringify!(iret_context)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(iret_context)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).rax as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(iret_context), + "::", + stringify!(rax) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).r11 as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(iret_context), + "::", + stringify!(r11) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).rcx as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(iret_context), + "::", + stringify!(rcx) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).flags as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(iret_context), + "::", + stringify!(flags) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).rip as *const _ as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(iret_context), + "::", + stringify!(rip) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).cs as *const _ as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(iret_context), + "::", + stringify!(cs) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).rflags as *const _ as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(iret_context), + "::", + stringify!(rflags) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).rsp as *const _ as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(iret_context), + "::", + stringify!(rsp) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).ss as *const _ as usize }, + 64usize, + concat!( + "Offset of field: ", + stringify!(iret_context), + "::", + stringify!(ss) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct cpu_user_regs { + pub r15: u64, + pub r14: u64, + pub r13: u64, + pub r12: u64, + pub __bindgen_anon_1: cpu_user_regs__bindgen_ty_1, + pub __bindgen_anon_2: cpu_user_regs__bindgen_ty_2, + pub r11: u64, + pub r10: u64, + pub r9: u64, + pub r8: u64, + pub __bindgen_anon_3: cpu_user_regs__bindgen_ty_3, + pub __bindgen_anon_4: cpu_user_regs__bindgen_ty_4, + pub __bindgen_anon_5: cpu_user_regs__bindgen_ty_5, + pub __bindgen_anon_6: cpu_user_regs__bindgen_ty_6, + pub __bindgen_anon_7: cpu_user_regs__bindgen_ty_7, + pub error_code: u32, + pub entry_vector: u32, + pub __bindgen_anon_8: cpu_user_regs__bindgen_ty_8, + pub cs: u16, + pub _pad0: [u16; 1usize], + pub saved_upcall_mask: u8, + pub _pad1: [u8; 3usize], + pub __bindgen_anon_9: cpu_user_regs__bindgen_ty_9, + pub __bindgen_anon_10: cpu_user_regs__bindgen_ty_10, + pub ss: u16, + pub _pad2: [u16; 3usize], + pub es: u16, + pub _pad3: [u16; 3usize], + pub ds: u16, + pub _pad4: [u16; 3usize], + pub fs: u16, + pub _pad5: [u16; 3usize], + pub gs: u16, + pub _pad6: [u16; 3usize], +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union cpu_user_regs__bindgen_ty_1 { + pub rbp: u64, + pub ebp: u64, + pub _ebp: u32, +} +#[test] +fn bindgen_test_layout_cpu_user_regs__bindgen_ty_1() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(cpu_user_regs__bindgen_ty_1)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(cpu_user_regs__bindgen_ty_1)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).rbp as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_1), + "::", + stringify!(rbp) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).ebp as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_1), + "::", + stringify!(ebp) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::()))._ebp as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_1), + "::", + stringify!(_ebp) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union cpu_user_regs__bindgen_ty_2 { + pub rbx: u64, + pub ebx: u64, + pub _ebx: u32, +} +#[test] +fn bindgen_test_layout_cpu_user_regs__bindgen_ty_2() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(cpu_user_regs__bindgen_ty_2)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(cpu_user_regs__bindgen_ty_2)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).rbx as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_2), + "::", + stringify!(rbx) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).ebx as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_2), + "::", + stringify!(ebx) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::()))._ebx as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_2), + "::", + stringify!(_ebx) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union cpu_user_regs__bindgen_ty_3 { + pub rax: u64, + pub eax: u64, + pub _eax: u32, +} +#[test] +fn bindgen_test_layout_cpu_user_regs__bindgen_ty_3() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(cpu_user_regs__bindgen_ty_3)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(cpu_user_regs__bindgen_ty_3)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).rax as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_3), + "::", + stringify!(rax) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).eax as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_3), + "::", + stringify!(eax) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::()))._eax as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_3), + "::", + stringify!(_eax) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union cpu_user_regs__bindgen_ty_4 { + pub rcx: u64, + pub ecx: u64, + pub _ecx: u32, +} +#[test] +fn bindgen_test_layout_cpu_user_regs__bindgen_ty_4() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(cpu_user_regs__bindgen_ty_4)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(cpu_user_regs__bindgen_ty_4)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).rcx as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_4), + "::", + stringify!(rcx) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).ecx as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_4), + "::", + stringify!(ecx) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::()))._ecx as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_4), + "::", + stringify!(_ecx) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union cpu_user_regs__bindgen_ty_5 { + pub rdx: u64, + pub edx: u64, + pub _edx: u32, +} +#[test] +fn bindgen_test_layout_cpu_user_regs__bindgen_ty_5() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(cpu_user_regs__bindgen_ty_5)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(cpu_user_regs__bindgen_ty_5)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).rdx as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_5), + "::", + stringify!(rdx) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).edx as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_5), + "::", + stringify!(edx) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::()))._edx as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_5), + "::", + stringify!(_edx) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union cpu_user_regs__bindgen_ty_6 { + pub rsi: u64, + pub esi: u64, + pub _esi: u32, +} +#[test] +fn bindgen_test_layout_cpu_user_regs__bindgen_ty_6() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(cpu_user_regs__bindgen_ty_6)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(cpu_user_regs__bindgen_ty_6)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).rsi as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_6), + "::", + stringify!(rsi) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).esi as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_6), + "::", + stringify!(esi) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::()))._esi as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_6), + "::", + stringify!(_esi) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union cpu_user_regs__bindgen_ty_7 { + pub rdi: u64, + pub edi: u64, + pub _edi: u32, +} +#[test] +fn bindgen_test_layout_cpu_user_regs__bindgen_ty_7() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(cpu_user_regs__bindgen_ty_7)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(cpu_user_regs__bindgen_ty_7)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).rdi as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_7), + "::", + stringify!(rdi) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).edi as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_7), + "::", + stringify!(edi) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::()))._edi as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_7), + "::", + stringify!(_edi) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union cpu_user_regs__bindgen_ty_8 { + pub rip: u64, + pub eip: u64, + pub _eip: u32, +} +#[test] +fn bindgen_test_layout_cpu_user_regs__bindgen_ty_8() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(cpu_user_regs__bindgen_ty_8)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(cpu_user_regs__bindgen_ty_8)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).rip as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_8), + "::", + stringify!(rip) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).eip as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_8), + "::", + stringify!(eip) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::()))._eip as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_8), + "::", + stringify!(_eip) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union cpu_user_regs__bindgen_ty_9 { + pub rflags: u64, + pub eflags: u64, + pub _eflags: u32, +} +#[test] +fn bindgen_test_layout_cpu_user_regs__bindgen_ty_9() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(cpu_user_regs__bindgen_ty_9)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(cpu_user_regs__bindgen_ty_9)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).rflags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_9), + "::", + stringify!(rflags) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).eflags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_9), + "::", + stringify!(eflags) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::()))._eflags as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_9), + "::", + stringify!(_eflags) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union cpu_user_regs__bindgen_ty_10 { + pub rsp: u64, + pub esp: u64, + pub _esp: u32, +} +#[test] +fn bindgen_test_layout_cpu_user_regs__bindgen_ty_10() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(cpu_user_regs__bindgen_ty_10)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(cpu_user_regs__bindgen_ty_10)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).rsp as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_10), + "::", + stringify!(rsp) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).esp as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_10), + "::", + stringify!(esp) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::()))._esp as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs__bindgen_ty_10), + "::", + stringify!(_esp) + ) + ); +} +#[test] +fn bindgen_test_layout_cpu_user_regs() { + assert_eq!( + ::core::mem::size_of::(), + 200usize, + concat!("Size of: ", stringify!(cpu_user_regs)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(cpu_user_regs)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).r15 as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(r15) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).r14 as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(r14) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).r13 as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(r13) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).r12 as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(r12) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).r11 as *const _ as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(r11) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).r10 as *const _ as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(r10) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).r9 as *const _ as usize }, + 64usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(r9) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).r8 as *const _ as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(r8) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).error_code as *const _ as usize }, + 120usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(error_code) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).entry_vector as *const _ as usize }, + 124usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(entry_vector) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).cs as *const _ as usize }, + 136usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(cs) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::()))._pad0 as *const _ as usize }, + 138usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(_pad0) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).saved_upcall_mask as *const _ as usize + }, + 140usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(saved_upcall_mask) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::()))._pad1 as *const _ as usize }, + 141usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(_pad1) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).ss as *const _ as usize }, + 160usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(ss) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::()))._pad2 as *const _ as usize }, + 162usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(_pad2) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).es as *const _ as usize }, + 168usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(es) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::()))._pad3 as *const _ as usize }, + 170usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(_pad3) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).ds as *const _ as usize }, + 176usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(ds) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::()))._pad4 as *const _ as usize }, + 178usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(_pad4) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).fs as *const _ as usize }, + 184usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(fs) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::()))._pad5 as *const _ as usize }, + 186usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(_pad5) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).gs as *const _ as usize }, + 192usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(gs) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::()))._pad6 as *const _ as usize }, + 194usize, + concat!( + "Offset of field: ", + stringify!(cpu_user_regs), + "::", + stringify!(_pad6) + ) + ); +} +pub type cpu_user_regs_t = cpu_user_regs; +pub type __guest_handle_cpu_user_regs_t = *mut cpu_user_regs_t; +pub type __guest_handle_const_cpu_user_regs_t = *const cpu_user_regs_t; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct arch_vcpu_info { + pub cr2: cty::c_ulong, + pub pad: cty::c_ulong, +} +#[test] +fn bindgen_test_layout_arch_vcpu_info() { + assert_eq!( + ::core::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(arch_vcpu_info)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(arch_vcpu_info)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).cr2 as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(arch_vcpu_info), + "::", + stringify!(cr2) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).pad as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(arch_vcpu_info), + "::", + stringify!(pad) + ) + ); +} +pub type arch_vcpu_info_t = arch_vcpu_info; +pub type xen_callback_t = cty::c_ulong; +pub type xen_pfn_t = cty::c_ulong; +pub type xen_ulong_t = cty::c_ulong; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct trap_info { + pub vector: u8, + pub flags: u8, + pub cs: u16, + pub address: cty::c_ulong, +} +#[test] +fn bindgen_test_layout_trap_info() { + assert_eq!( + ::core::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(trap_info)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(trap_info)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).vector as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(trap_info), + "::", + stringify!(vector) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).flags as *const _ as usize }, + 1usize, + concat!( + "Offset of field: ", + stringify!(trap_info), + "::", + stringify!(flags) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).cs as *const _ as usize }, + 2usize, + concat!( + "Offset of field: ", + stringify!(trap_info), + "::", + stringify!(cs) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).address as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(trap_info), + "::", + stringify!(address) + ) + ); +} +pub type trap_info_t = trap_info; +pub type __guest_handle_trap_info_t = *mut trap_info_t; +pub type __guest_handle_const_trap_info_t = *const trap_info_t; +pub type tsc_timestamp_t = u64; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct vcpu_guest_context { + pub fpu_ctxt: vcpu_guest_context__bindgen_ty_1, + pub flags: cty::c_ulong, + pub user_regs: cpu_user_regs, + pub trap_ctxt: [trap_info; 256usize], + pub ldt_base: cty::c_ulong, + pub ldt_ents: cty::c_ulong, + pub gdt_frames: [cty::c_ulong; 16usize], + pub gdt_ents: cty::c_ulong, + pub kernel_ss: cty::c_ulong, + pub kernel_sp: cty::c_ulong, + pub ctrlreg: [cty::c_ulong; 8usize], + pub debugreg: [cty::c_ulong; 8usize], + pub event_callback_eip: cty::c_ulong, + pub failsafe_callback_eip: cty::c_ulong, + pub syscall_callback_eip: cty::c_ulong, + pub vm_assist: cty::c_ulong, + pub fs_base: u64, + pub gs_base_kernel: u64, + pub gs_base_user: u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct vcpu_guest_context__bindgen_ty_1 { + pub x: [cty::c_char; 512usize], +} +#[test] +fn bindgen_test_layout_vcpu_guest_context__bindgen_ty_1() { + assert_eq!( + ::core::mem::size_of::(), + 512usize, + concat!("Size of: ", stringify!(vcpu_guest_context__bindgen_ty_1)) + ); + assert_eq!( + ::core::mem::align_of::(), + 1usize, + concat!( + "Alignment of ", + stringify!(vcpu_guest_context__bindgen_ty_1) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).x as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context__bindgen_ty_1), + "::", + stringify!(x) + ) + ); +} +#[test] +fn bindgen_test_layout_vcpu_guest_context() { + assert_eq!( + ::core::mem::size_of::(), + 5168usize, + concat!("Size of: ", stringify!(vcpu_guest_context)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(vcpu_guest_context)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).fpu_ctxt as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(fpu_ctxt) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).flags as *const _ as usize }, + 512usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(flags) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).user_regs as *const _ as usize }, + 520usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(user_regs) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).trap_ctxt as *const _ as usize }, + 720usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(trap_ctxt) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).ldt_base as *const _ as usize }, + 4816usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(ldt_base) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).ldt_ents as *const _ as usize }, + 4824usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(ldt_ents) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).gdt_frames as *const _ as usize }, + 4832usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(gdt_frames) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).gdt_ents as *const _ as usize }, + 4960usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(gdt_ents) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).kernel_ss as *const _ as usize }, + 4968usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(kernel_ss) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).kernel_sp as *const _ as usize }, + 4976usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(kernel_sp) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).ctrlreg as *const _ as usize }, + 4984usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(ctrlreg) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).debugreg as *const _ as usize }, + 5048usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(debugreg) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).event_callback_eip as *const _ as usize + }, + 5112usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(event_callback_eip) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).failsafe_callback_eip as *const _ + as usize + }, + 5120usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(failsafe_callback_eip) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).syscall_callback_eip as *const _ + as usize + }, + 5128usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(syscall_callback_eip) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).vm_assist as *const _ as usize }, + 5136usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(vm_assist) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).fs_base as *const _ as usize }, + 5144usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(fs_base) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).gs_base_kernel as *const _ as usize + }, + 5152usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(gs_base_kernel) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).gs_base_user as *const _ as usize + }, + 5160usize, + concat!( + "Offset of field: ", + stringify!(vcpu_guest_context), + "::", + stringify!(gs_base_user) + ) + ); +} +pub type vcpu_guest_context_t = vcpu_guest_context; +pub type __guest_handle_vcpu_guest_context_t = *mut vcpu_guest_context_t; +pub type __guest_handle_const_vcpu_guest_context_t = *const vcpu_guest_context_t; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct arch_shared_info { + pub max_pfn: cty::c_ulong, + pub pfn_to_mfn_frame_list_list: xen_pfn_t, + pub nmi_reason: cty::c_ulong, + pub p2m_cr3: cty::c_ulong, + pub p2m_vaddr: cty::c_ulong, + pub p2m_generation: cty::c_ulong, +} +#[test] +fn bindgen_test_layout_arch_shared_info() { + assert_eq!( + ::core::mem::size_of::(), + 48usize, + concat!("Size of: ", stringify!(arch_shared_info)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(arch_shared_info)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).max_pfn as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(arch_shared_info), + "::", + stringify!(max_pfn) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).pfn_to_mfn_frame_list_list as *const _ + as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(arch_shared_info), + "::", + stringify!(pfn_to_mfn_frame_list_list) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).nmi_reason as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(arch_shared_info), + "::", + stringify!(nmi_reason) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).p2m_cr3 as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(arch_shared_info), + "::", + stringify!(p2m_cr3) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).p2m_vaddr as *const _ as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(arch_shared_info), + "::", + stringify!(p2m_vaddr) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).p2m_generation as *const _ as usize + }, + 40usize, + concat!( + "Offset of field: ", + stringify!(arch_shared_info), + "::", + stringify!(p2m_generation) + ) + ); +} +pub type arch_shared_info_t = arch_shared_info; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct xen_cpuid_leaf { + pub leaf: u32, + pub subleaf: u32, + pub a: u32, + pub b: u32, + pub c: u32, + pub d: u32, +} +#[test] +fn bindgen_test_layout_xen_cpuid_leaf() { + assert_eq!( + ::core::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(xen_cpuid_leaf)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(xen_cpuid_leaf)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).leaf as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(xen_cpuid_leaf), + "::", + stringify!(leaf) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).subleaf as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(xen_cpuid_leaf), + "::", + stringify!(subleaf) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).a as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(xen_cpuid_leaf), + "::", + stringify!(a) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).b as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(xen_cpuid_leaf), + "::", + stringify!(b) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).c as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(xen_cpuid_leaf), + "::", + stringify!(c) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).d as *const _ as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(xen_cpuid_leaf), + "::", + stringify!(d) + ) + ); +} +pub type xen_cpuid_leaf_t = xen_cpuid_leaf; +pub type __guest_handle_xen_cpuid_leaf_t = *mut xen_cpuid_leaf_t; +pub type __guest_handle_const_xen_cpuid_leaf_t = *const xen_cpuid_leaf_t; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct xen_msr_entry { + pub idx: u32, + pub flags: u32, + pub val: u64, +} +#[test] +fn bindgen_test_layout_xen_msr_entry() { + assert_eq!( + ::core::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(xen_msr_entry)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(xen_msr_entry)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).idx as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(xen_msr_entry), + "::", + stringify!(idx) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).flags as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(xen_msr_entry), + "::", + stringify!(flags) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).val as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(xen_msr_entry), + "::", + stringify!(val) + ) + ); +} +pub type xen_msr_entry_t = xen_msr_entry; +pub type __guest_handle_xen_msr_entry_t = *mut xen_msr_entry_t; +pub type __guest_handle_const_xen_msr_entry_t = *const xen_msr_entry_t; +pub type __guest_handle_char = *mut cty::c_char; +pub type __guest_handle_const_char = *const cty::c_char; +pub type __guest_handle_uchar = *mut cty::c_uchar; +pub type __guest_handle_const_uchar = *const cty::c_uchar; +pub type __guest_handle_int = *mut cty::c_int; +pub type __guest_handle_const_int = *const cty::c_int; +pub type __guest_handle_uint = *mut cty::c_uint; +pub type __guest_handle_const_uint = *const cty::c_uint; +pub type __guest_handle_long = *mut cty::c_long; +pub type __guest_handle_const_long = *const cty::c_long; +pub type __guest_handle_ulong = *mut cty::c_ulong; +pub type __guest_handle_const_ulong = *const cty::c_ulong; +pub type __guest_handle_void = *mut cty::c_void; +pub type __guest_handle_const_void = *const cty::c_void; +pub type __guest_handle_uint64_t = *mut u64; +pub type __guest_handle_const_uint64_t = *const u64; +pub type __guest_handle_xen_pfn_t = *mut xen_pfn_t; +pub type __guest_handle_const_xen_pfn_t = *const xen_pfn_t; +pub type __guest_handle_xen_ulong_t = *mut xen_ulong_t; +pub type __guest_handle_const_xen_ulong_t = *const xen_ulong_t; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct mmuext_op { + pub cmd: cty::c_uint, + pub arg1: mmuext_op__bindgen_ty_1, + pub arg2: mmuext_op__bindgen_ty_2, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union mmuext_op__bindgen_ty_1 { + pub mfn: xen_pfn_t, + pub linear_addr: cty::c_ulong, +} +#[test] +fn bindgen_test_layout_mmuext_op__bindgen_ty_1() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(mmuext_op__bindgen_ty_1)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(mmuext_op__bindgen_ty_1)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).mfn as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(mmuext_op__bindgen_ty_1), + "::", + stringify!(mfn) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).linear_addr as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(mmuext_op__bindgen_ty_1), + "::", + stringify!(linear_addr) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union mmuext_op__bindgen_ty_2 { + pub nr_ents: cty::c_uint, + pub vcpumask: *const cty::c_void, + pub src_mfn: xen_pfn_t, +} +#[test] +fn bindgen_test_layout_mmuext_op__bindgen_ty_2() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(mmuext_op__bindgen_ty_2)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(mmuext_op__bindgen_ty_2)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).nr_ents as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(mmuext_op__bindgen_ty_2), + "::", + stringify!(nr_ents) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).vcpumask as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(mmuext_op__bindgen_ty_2), + "::", + stringify!(vcpumask) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).src_mfn as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(mmuext_op__bindgen_ty_2), + "::", + stringify!(src_mfn) + ) + ); +} +#[test] +fn bindgen_test_layout_mmuext_op() { + assert_eq!( + ::core::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(mmuext_op)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(mmuext_op)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).cmd as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(mmuext_op), + "::", + stringify!(cmd) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).arg1 as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(mmuext_op), + "::", + stringify!(arg1) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).arg2 as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(mmuext_op), + "::", + stringify!(arg2) + ) + ); +} +pub type mmuext_op_t = mmuext_op; +pub type __guest_handle_mmuext_op_t = *mut mmuext_op_t; +pub type __guest_handle_const_mmuext_op_t = *const mmuext_op_t; +pub type domid_t = u16; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct mmu_update { + pub ptr: u64, + pub val: u64, +} +#[test] +fn bindgen_test_layout_mmu_update() { + assert_eq!( + ::core::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(mmu_update)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(mmu_update)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).ptr as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(mmu_update), + "::", + stringify!(ptr) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).val as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(mmu_update), + "::", + stringify!(val) + ) + ); +} +pub type mmu_update_t = mmu_update; +pub type __guest_handle_mmu_update_t = *mut mmu_update_t; +pub type __guest_handle_const_mmu_update_t = *const mmu_update_t; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct multicall_entry { + pub op: xen_ulong_t, + pub result: xen_ulong_t, + pub args: [xen_ulong_t; 6usize], +} +#[test] +fn bindgen_test_layout_multicall_entry() { + assert_eq!( + ::core::mem::size_of::(), + 64usize, + concat!("Size of: ", stringify!(multicall_entry)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(multicall_entry)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).op as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(multicall_entry), + "::", + stringify!(op) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).result as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(multicall_entry), + "::", + stringify!(result) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).args as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(multicall_entry), + "::", + stringify!(args) + ) + ); +} +pub type multicall_entry_t = multicall_entry; +pub type __guest_handle_multicall_entry_t = *mut multicall_entry_t; +pub type __guest_handle_const_multicall_entry_t = *const multicall_entry_t; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct vcpu_time_info { + pub version: u32, + pub pad0: u32, + pub tsc_timestamp: u64, + pub system_time: u64, + pub tsc_to_system_mul: u32, + pub tsc_shift: i8, + pub pad1: [i8; 3usize], +} +#[test] +fn bindgen_test_layout_vcpu_time_info() { + assert_eq!( + ::core::mem::size_of::(), + 32usize, + concat!("Size of: ", stringify!(vcpu_time_info)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(vcpu_time_info)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).version as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(vcpu_time_info), + "::", + stringify!(version) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).pad0 as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(vcpu_time_info), + "::", + stringify!(pad0) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).tsc_timestamp as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(vcpu_time_info), + "::", + stringify!(tsc_timestamp) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).system_time as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(vcpu_time_info), + "::", + stringify!(system_time) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).tsc_to_system_mul as *const _ as usize + }, + 24usize, + concat!( + "Offset of field: ", + stringify!(vcpu_time_info), + "::", + stringify!(tsc_to_system_mul) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).tsc_shift as *const _ as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(vcpu_time_info), + "::", + stringify!(tsc_shift) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).pad1 as *const _ as usize }, + 29usize, + concat!( + "Offset of field: ", + stringify!(vcpu_time_info), + "::", + stringify!(pad1) + ) + ); +} +pub type vcpu_time_info_t = vcpu_time_info; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct vcpu_info { + pub evtchn_upcall_pending: u8, + pub evtchn_upcall_mask: u8, + pub evtchn_pending_sel: xen_ulong_t, + pub arch: arch_vcpu_info, + pub time: vcpu_time_info_t, +} +#[test] +fn bindgen_test_layout_vcpu_info() { + assert_eq!( + ::core::mem::size_of::(), + 64usize, + concat!("Size of: ", stringify!(vcpu_info)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(vcpu_info)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).evtchn_upcall_pending as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(vcpu_info), + "::", + stringify!(evtchn_upcall_pending) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).evtchn_upcall_mask as *const _ as usize }, + 1usize, + concat!( + "Offset of field: ", + stringify!(vcpu_info), + "::", + stringify!(evtchn_upcall_mask) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).evtchn_pending_sel as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(vcpu_info), + "::", + stringify!(evtchn_pending_sel) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).arch as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(vcpu_info), + "::", + stringify!(arch) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).time as *const _ as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(vcpu_info), + "::", + stringify!(time) + ) + ); +} +pub type vcpu_info_t = vcpu_info; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct shared_info { + pub vcpu_info: [vcpu_info; 32usize], + pub evtchn_pending: [xen_ulong_t; 64usize], + pub evtchn_mask: [xen_ulong_t; 64usize], + pub wc_version: u32, + pub wc_sec: u32, + pub wc_nsec: u32, + pub wc_sec_hi: u32, + pub arch: arch_shared_info, +} +#[test] +fn bindgen_test_layout_shared_info() { + assert_eq!( + ::core::mem::size_of::(), + 3136usize, + concat!("Size of: ", stringify!(shared_info)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(shared_info)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).vcpu_info as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(shared_info), + "::", + stringify!(vcpu_info) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).evtchn_pending as *const _ as usize }, + 2048usize, + concat!( + "Offset of field: ", + stringify!(shared_info), + "::", + stringify!(evtchn_pending) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).evtchn_mask as *const _ as usize }, + 2560usize, + concat!( + "Offset of field: ", + stringify!(shared_info), + "::", + stringify!(evtchn_mask) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).wc_version as *const _ as usize }, + 3072usize, + concat!( + "Offset of field: ", + stringify!(shared_info), + "::", + stringify!(wc_version) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).wc_sec as *const _ as usize }, + 3076usize, + concat!( + "Offset of field: ", + stringify!(shared_info), + "::", + stringify!(wc_sec) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).wc_nsec as *const _ as usize }, + 3080usize, + concat!( + "Offset of field: ", + stringify!(shared_info), + "::", + stringify!(wc_nsec) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).wc_sec_hi as *const _ as usize }, + 3084usize, + concat!( + "Offset of field: ", + stringify!(shared_info), + "::", + stringify!(wc_sec_hi) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).arch as *const _ as usize }, + 3088usize, + concat!( + "Offset of field: ", + stringify!(shared_info), + "::", + stringify!(arch) + ) + ); +} +pub type shared_info_t = shared_info; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct start_info { + pub magic: [cty::c_char; 32usize], + pub nr_pages: cty::c_ulong, + pub shared_info: cty::c_ulong, + pub flags: u32, + pub store_mfn: xen_pfn_t, + pub store_evtchn: u32, + pub console: start_info__bindgen_ty_1, + pub pt_base: cty::c_ulong, + pub nr_pt_frames: cty::c_ulong, + pub mfn_list: cty::c_ulong, + pub mod_start: cty::c_ulong, + pub mod_len: cty::c_ulong, + pub cmd_line: [i8; 1024usize], + pub first_p2m_pfn: cty::c_ulong, + pub nr_p2m_frames: cty::c_ulong, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union start_info__bindgen_ty_1 { + pub domU: start_info__bindgen_ty_1__bindgen_ty_1, + pub dom0: start_info__bindgen_ty_1__bindgen_ty_2, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct start_info__bindgen_ty_1__bindgen_ty_1 { + pub mfn: xen_pfn_t, + pub evtchn: u32, +} +#[test] +fn bindgen_test_layout_start_info__bindgen_ty_1__bindgen_ty_1() { + assert_eq!( + ::core::mem::size_of::(), + 16usize, + concat!( + "Size of: ", + stringify!(start_info__bindgen_ty_1__bindgen_ty_1) + ) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!( + "Alignment of ", + stringify!(start_info__bindgen_ty_1__bindgen_ty_1) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).mfn as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(start_info__bindgen_ty_1__bindgen_ty_1), + "::", + stringify!(mfn) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).evtchn as *const _ + as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(start_info__bindgen_ty_1__bindgen_ty_1), + "::", + stringify!(evtchn) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct start_info__bindgen_ty_1__bindgen_ty_2 { + pub info_off: u32, + pub info_size: u32, +} +#[test] +fn bindgen_test_layout_start_info__bindgen_ty_1__bindgen_ty_2() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!( + "Size of: ", + stringify!(start_info__bindgen_ty_1__bindgen_ty_2) + ) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!( + "Alignment of ", + stringify!(start_info__bindgen_ty_1__bindgen_ty_2) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).info_off as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(start_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(info_off) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).info_size + as *const _ as usize + }, + 4usize, + concat!( + "Offset of field: ", + stringify!(start_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(info_size) + ) + ); +} +#[test] +fn bindgen_test_layout_start_info__bindgen_ty_1() { + assert_eq!( + ::core::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(start_info__bindgen_ty_1)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(start_info__bindgen_ty_1)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).domU as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(start_info__bindgen_ty_1), + "::", + stringify!(domU) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).dom0 as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(start_info__bindgen_ty_1), + "::", + stringify!(dom0) + ) + ); +} +#[test] +fn bindgen_test_layout_start_info() { + assert_eq!( + ::core::mem::size_of::(), + 1168usize, + concat!("Size of: ", stringify!(start_info)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(start_info)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).magic as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(start_info), + "::", + stringify!(magic) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).nr_pages as *const _ as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(start_info), + "::", + stringify!(nr_pages) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).shared_info as *const _ as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(start_info), + "::", + stringify!(shared_info) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).flags as *const _ as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(start_info), + "::", + stringify!(flags) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).store_mfn as *const _ as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(start_info), + "::", + stringify!(store_mfn) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).store_evtchn as *const _ as usize }, + 64usize, + concat!( + "Offset of field: ", + stringify!(start_info), + "::", + stringify!(store_evtchn) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).console as *const _ as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(start_info), + "::", + stringify!(console) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).pt_base as *const _ as usize }, + 88usize, + concat!( + "Offset of field: ", + stringify!(start_info), + "::", + stringify!(pt_base) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).nr_pt_frames as *const _ as usize }, + 96usize, + concat!( + "Offset of field: ", + stringify!(start_info), + "::", + stringify!(nr_pt_frames) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).mfn_list as *const _ as usize }, + 104usize, + concat!( + "Offset of field: ", + stringify!(start_info), + "::", + stringify!(mfn_list) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).mod_start as *const _ as usize }, + 112usize, + concat!( + "Offset of field: ", + stringify!(start_info), + "::", + stringify!(mod_start) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).mod_len as *const _ as usize }, + 120usize, + concat!( + "Offset of field: ", + stringify!(start_info), + "::", + stringify!(mod_len) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).cmd_line as *const _ as usize }, + 128usize, + concat!( + "Offset of field: ", + stringify!(start_info), + "::", + stringify!(cmd_line) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).first_p2m_pfn as *const _ as usize }, + 1152usize, + concat!( + "Offset of field: ", + stringify!(start_info), + "::", + stringify!(first_p2m_pfn) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).nr_p2m_frames as *const _ as usize }, + 1160usize, + concat!( + "Offset of field: ", + stringify!(start_info), + "::", + stringify!(nr_p2m_frames) + ) + ); +} +pub type start_info_t = start_info; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct xen_multiboot_mod_list { + pub mod_start: u32, + pub mod_end: u32, + pub cmdline: u32, + pub pad: u32, +} +#[test] +fn bindgen_test_layout_xen_multiboot_mod_list() { + assert_eq!( + ::core::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(xen_multiboot_mod_list)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(xen_multiboot_mod_list)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).mod_start as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(xen_multiboot_mod_list), + "::", + stringify!(mod_start) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).mod_end as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(xen_multiboot_mod_list), + "::", + stringify!(mod_end) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).cmdline as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(xen_multiboot_mod_list), + "::", + stringify!(cmdline) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).pad as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(xen_multiboot_mod_list), + "::", + stringify!(pad) + ) + ); +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct dom0_vga_console_info { + pub video_type: u8, + pub u: dom0_vga_console_info__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union dom0_vga_console_info__bindgen_ty_1 { + pub text_mode_3: dom0_vga_console_info__bindgen_ty_1__bindgen_ty_1, + pub vesa_lfb: dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct dom0_vga_console_info__bindgen_ty_1__bindgen_ty_1 { + pub font_height: u16, + pub cursor_x: u16, + pub cursor_y: u16, + pub rows: u16, + pub columns: u16, +} +#[test] +fn bindgen_test_layout_dom0_vga_console_info__bindgen_ty_1__bindgen_ty_1() { + assert_eq!( + ::core::mem::size_of::(), + 10usize, + concat!( + "Size of: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_1) + ) + ); + assert_eq!( + ::core::mem::align_of::(), + 2usize, + concat!( + "Alignment of ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_1) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())) + .font_height as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_1), + "::", + stringify!(font_height) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).cursor_x + as *const _ as usize + }, + 2usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_1), + "::", + stringify!(cursor_x) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).cursor_y + as *const _ as usize + }, + 4usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_1), + "::", + stringify!(cursor_y) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).rows + as *const _ as usize + }, + 6usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_1), + "::", + stringify!(rows) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).columns + as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_1), + "::", + stringify!(columns) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2 { + pub width: u16, + pub height: u16, + pub bytes_per_line: u16, + pub bits_per_pixel: u16, + pub lfb_base: u32, + pub lfb_size: u32, + pub red_pos: u8, + pub red_size: u8, + pub green_pos: u8, + pub green_size: u8, + pub blue_pos: u8, + pub blue_size: u8, + pub rsvd_pos: u8, + pub rsvd_size: u8, +} +#[test] +fn bindgen_test_layout_dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2() { + assert_eq!( + ::core::mem::size_of::(), + 24usize, + concat!( + "Size of: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2) + ) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!( + "Alignment of ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).width + as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(width) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).height + as *const _ as usize + }, + 2usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(height) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())) + .bytes_per_line as *const _ as usize + }, + 4usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(bytes_per_line) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())) + .bits_per_pixel as *const _ as usize + }, + 6usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(bits_per_pixel) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).lfb_base + as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(lfb_base) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).lfb_size + as *const _ as usize + }, + 12usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(lfb_size) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).red_pos + as *const _ as usize + }, + 16usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(red_pos) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).red_size + as *const _ as usize + }, + 17usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(red_size) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).green_pos + as *const _ as usize + }, + 18usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(green_pos) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())) + .green_size as *const _ as usize + }, + 19usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(green_size) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).blue_pos + as *const _ as usize + }, + 20usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(blue_pos) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).blue_size + as *const _ as usize + }, + 21usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(blue_size) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).rsvd_pos + as *const _ as usize + }, + 22usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(rsvd_pos) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).rsvd_size + as *const _ as usize + }, + 23usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(rsvd_size) + ) + ); +} +#[test] +fn bindgen_test_layout_dom0_vga_console_info__bindgen_ty_1() { + assert_eq!( + ::core::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(dom0_vga_console_info__bindgen_ty_1)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!( + "Alignment of ", + stringify!(dom0_vga_console_info__bindgen_ty_1) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).text_mode_3 as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1), + "::", + stringify!(text_mode_3) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).vesa_lfb as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info__bindgen_ty_1), + "::", + stringify!(vesa_lfb) + ) + ); +} +#[test] +fn bindgen_test_layout_dom0_vga_console_info() { + assert_eq!( + ::core::mem::size_of::(), + 28usize, + concat!("Size of: ", stringify!(dom0_vga_console_info)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(dom0_vga_console_info)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).video_type as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info), + "::", + stringify!(video_type) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).u as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(dom0_vga_console_info), + "::", + stringify!(u) + ) + ); +} +pub type dom0_vga_console_info_t = dom0_vga_console_info; +pub type xen_domain_handle_t = [u8; 16usize]; +pub type __guest_handle_uint8 = *mut u8; +pub type __guest_handle_const_uint8 = *const u8; +pub type __guest_handle_uint16 = *mut u16; +pub type __guest_handle_const_uint16 = *const u16; +pub type __guest_handle_uint32 = *mut u32; +pub type __guest_handle_const_uint32 = *const u32; +pub type __guest_handle_uint64 = *mut u64; +pub type __guest_handle_const_uint64 = *const u64; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct xen_uuid_t { + pub a: [u8; 16usize], +} +#[test] +fn bindgen_test_layout_xen_uuid_t() { + assert_eq!( + ::core::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(xen_uuid_t)) + ); + assert_eq!( + ::core::mem::align_of::(), + 1usize, + concat!("Alignment of ", stringify!(xen_uuid_t)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).a as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(xen_uuid_t), + "::", + stringify!(a) + ) + ); +} +pub type evtchn_port_t = u32; +pub type __guest_handle_evtchn_port_t = *mut evtchn_port_t; +pub type __guest_handle_const_evtchn_port_t = *const evtchn_port_t; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_alloc_unbound { + pub dom: domid_t, + pub remote_dom: domid_t, + pub port: evtchn_port_t, +} +#[test] +fn bindgen_test_layout_evtchn_alloc_unbound() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(evtchn_alloc_unbound)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(evtchn_alloc_unbound)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).dom as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_alloc_unbound), + "::", + stringify!(dom) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).remote_dom as *const _ as usize + }, + 2usize, + concat!( + "Offset of field: ", + stringify!(evtchn_alloc_unbound), + "::", + stringify!(remote_dom) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).port as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(evtchn_alloc_unbound), + "::", + stringify!(port) + ) + ); +} +pub type evtchn_alloc_unbound_t = evtchn_alloc_unbound; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_bind_interdomain { + pub remote_dom: domid_t, + pub remote_port: evtchn_port_t, + pub local_port: evtchn_port_t, +} +#[test] +fn bindgen_test_layout_evtchn_bind_interdomain() { + assert_eq!( + ::core::mem::size_of::(), + 12usize, + concat!("Size of: ", stringify!(evtchn_bind_interdomain)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(evtchn_bind_interdomain)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).remote_dom as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_bind_interdomain), + "::", + stringify!(remote_dom) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).remote_port as *const _ as usize + }, + 4usize, + concat!( + "Offset of field: ", + stringify!(evtchn_bind_interdomain), + "::", + stringify!(remote_port) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).local_port as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(evtchn_bind_interdomain), + "::", + stringify!(local_port) + ) + ); +} +pub type evtchn_bind_interdomain_t = evtchn_bind_interdomain; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_bind_virq { + pub virq: u32, + pub vcpu: u32, + pub port: evtchn_port_t, +} +#[test] +fn bindgen_test_layout_evtchn_bind_virq() { + assert_eq!( + ::core::mem::size_of::(), + 12usize, + concat!("Size of: ", stringify!(evtchn_bind_virq)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(evtchn_bind_virq)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).virq as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_bind_virq), + "::", + stringify!(virq) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).vcpu as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(evtchn_bind_virq), + "::", + stringify!(vcpu) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).port as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(evtchn_bind_virq), + "::", + stringify!(port) + ) + ); +} +pub type evtchn_bind_virq_t = evtchn_bind_virq; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_bind_pirq { + pub pirq: u32, + pub flags: u32, + pub port: evtchn_port_t, +} +#[test] +fn bindgen_test_layout_evtchn_bind_pirq() { + assert_eq!( + ::core::mem::size_of::(), + 12usize, + concat!("Size of: ", stringify!(evtchn_bind_pirq)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(evtchn_bind_pirq)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).pirq as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_bind_pirq), + "::", + stringify!(pirq) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).flags as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(evtchn_bind_pirq), + "::", + stringify!(flags) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).port as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(evtchn_bind_pirq), + "::", + stringify!(port) + ) + ); +} +pub type evtchn_bind_pirq_t = evtchn_bind_pirq; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_bind_ipi { + pub vcpu: u32, + pub port: evtchn_port_t, +} +#[test] +fn bindgen_test_layout_evtchn_bind_ipi() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(evtchn_bind_ipi)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(evtchn_bind_ipi)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).vcpu as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_bind_ipi), + "::", + stringify!(vcpu) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).port as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(evtchn_bind_ipi), + "::", + stringify!(port) + ) + ); +} +pub type evtchn_bind_ipi_t = evtchn_bind_ipi; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_close { + pub port: evtchn_port_t, +} +#[test] +fn bindgen_test_layout_evtchn_close() { + assert_eq!( + ::core::mem::size_of::(), + 4usize, + concat!("Size of: ", stringify!(evtchn_close)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(evtchn_close)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).port as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_close), + "::", + stringify!(port) + ) + ); +} +pub type evtchn_close_t = evtchn_close; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_send { + pub port: evtchn_port_t, +} +#[test] +fn bindgen_test_layout_evtchn_send() { + assert_eq!( + ::core::mem::size_of::(), + 4usize, + concat!("Size of: ", stringify!(evtchn_send)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(evtchn_send)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).port as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_send), + "::", + stringify!(port) + ) + ); +} +pub type evtchn_send_t = evtchn_send; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct evtchn_status { + pub dom: domid_t, + pub port: evtchn_port_t, + pub status: u32, + pub vcpu: u32, + pub u: evtchn_status__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union evtchn_status__bindgen_ty_1 { + pub unbound: evtchn_status__bindgen_ty_1__bindgen_ty_1, + pub interdomain: evtchn_status__bindgen_ty_1__bindgen_ty_2, + pub pirq: u32, + pub virq: u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_status__bindgen_ty_1__bindgen_ty_1 { + pub dom: domid_t, +} +#[test] +fn bindgen_test_layout_evtchn_status__bindgen_ty_1__bindgen_ty_1() { + assert_eq!( + ::core::mem::size_of::(), + 2usize, + concat!( + "Size of: ", + stringify!(evtchn_status__bindgen_ty_1__bindgen_ty_1) + ) + ); + assert_eq!( + ::core::mem::align_of::(), + 2usize, + concat!( + "Alignment of ", + stringify!(evtchn_status__bindgen_ty_1__bindgen_ty_1) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).dom as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_status__bindgen_ty_1__bindgen_ty_1), + "::", + stringify!(dom) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_status__bindgen_ty_1__bindgen_ty_2 { + pub dom: domid_t, + pub port: evtchn_port_t, +} +#[test] +fn bindgen_test_layout_evtchn_status__bindgen_ty_1__bindgen_ty_2() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!( + "Size of: ", + stringify!(evtchn_status__bindgen_ty_1__bindgen_ty_2) + ) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!( + "Alignment of ", + stringify!(evtchn_status__bindgen_ty_1__bindgen_ty_2) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).dom as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_status__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(dom) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).port as *const _ + as usize + }, + 4usize, + concat!( + "Offset of field: ", + stringify!(evtchn_status__bindgen_ty_1__bindgen_ty_2), + "::", + stringify!(port) + ) + ); +} +#[test] +fn bindgen_test_layout_evtchn_status__bindgen_ty_1() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(evtchn_status__bindgen_ty_1)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(evtchn_status__bindgen_ty_1)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).unbound as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_status__bindgen_ty_1), + "::", + stringify!(unbound) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).interdomain as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_status__bindgen_ty_1), + "::", + stringify!(interdomain) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).pirq as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_status__bindgen_ty_1), + "::", + stringify!(pirq) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).virq as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_status__bindgen_ty_1), + "::", + stringify!(virq) + ) + ); +} +#[test] +fn bindgen_test_layout_evtchn_status() { + assert_eq!( + ::core::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(evtchn_status)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(evtchn_status)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).dom as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_status), + "::", + stringify!(dom) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).port as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(evtchn_status), + "::", + stringify!(port) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).status as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(evtchn_status), + "::", + stringify!(status) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).vcpu as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(evtchn_status), + "::", + stringify!(vcpu) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).u as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(evtchn_status), + "::", + stringify!(u) + ) + ); +} +pub type evtchn_status_t = evtchn_status; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_bind_vcpu { + pub port: evtchn_port_t, + pub vcpu: u32, +} +#[test] +fn bindgen_test_layout_evtchn_bind_vcpu() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(evtchn_bind_vcpu)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(evtchn_bind_vcpu)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).port as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_bind_vcpu), + "::", + stringify!(port) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).vcpu as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(evtchn_bind_vcpu), + "::", + stringify!(vcpu) + ) + ); +} +pub type evtchn_bind_vcpu_t = evtchn_bind_vcpu; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_unmask { + pub port: evtchn_port_t, +} +#[test] +fn bindgen_test_layout_evtchn_unmask() { + assert_eq!( + ::core::mem::size_of::(), + 4usize, + concat!("Size of: ", stringify!(evtchn_unmask)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(evtchn_unmask)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).port as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_unmask), + "::", + stringify!(port) + ) + ); +} +pub type evtchn_unmask_t = evtchn_unmask; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_reset { + pub dom: domid_t, +} +#[test] +fn bindgen_test_layout_evtchn_reset() { + assert_eq!( + ::core::mem::size_of::(), + 2usize, + concat!("Size of: ", stringify!(evtchn_reset)) + ); + assert_eq!( + ::core::mem::align_of::(), + 2usize, + concat!("Alignment of ", stringify!(evtchn_reset)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).dom as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_reset), + "::", + stringify!(dom) + ) + ); +} +pub type evtchn_reset_t = evtchn_reset; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_init_control { + pub control_gfn: u64, + pub offset: u32, + pub vcpu: u32, + pub link_bits: u8, + pub _pad: [u8; 7usize], +} +#[test] +fn bindgen_test_layout_evtchn_init_control() { + assert_eq!( + ::core::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(evtchn_init_control)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(evtchn_init_control)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).control_gfn as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_init_control), + "::", + stringify!(control_gfn) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).offset as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(evtchn_init_control), + "::", + stringify!(offset) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).vcpu as *const _ as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(evtchn_init_control), + "::", + stringify!(vcpu) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).link_bits as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(evtchn_init_control), + "::", + stringify!(link_bits) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::()))._pad as *const _ as usize }, + 17usize, + concat!( + "Offset of field: ", + stringify!(evtchn_init_control), + "::", + stringify!(_pad) + ) + ); +} +pub type evtchn_init_control_t = evtchn_init_control; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_expand_array { + pub array_gfn: u64, +} +#[test] +fn bindgen_test_layout_evtchn_expand_array() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(evtchn_expand_array)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(evtchn_expand_array)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).array_gfn as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_expand_array), + "::", + stringify!(array_gfn) + ) + ); +} +pub type evtchn_expand_array_t = evtchn_expand_array; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_set_priority { + pub port: evtchn_port_t, + pub priority: u32, +} +#[test] +fn bindgen_test_layout_evtchn_set_priority() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(evtchn_set_priority)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(evtchn_set_priority)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).port as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_set_priority), + "::", + stringify!(port) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).priority as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(evtchn_set_priority), + "::", + stringify!(priority) + ) + ); +} +pub type evtchn_set_priority_t = evtchn_set_priority; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct evtchn_op { + pub cmd: u32, + pub u: evtchn_op__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union evtchn_op__bindgen_ty_1 { + pub alloc_unbound: evtchn_alloc_unbound_t, + pub bind_interdomain: evtchn_bind_interdomain_t, + pub bind_virq: evtchn_bind_virq_t, + pub bind_pirq: evtchn_bind_pirq_t, + pub bind_ipi: evtchn_bind_ipi_t, + pub close: evtchn_close_t, + pub send: evtchn_send_t, + pub status: evtchn_status_t, + pub bind_vcpu: evtchn_bind_vcpu_t, + pub unmask: evtchn_unmask_t, +} +#[test] +fn bindgen_test_layout_evtchn_op__bindgen_ty_1() { + assert_eq!( + ::core::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(evtchn_op__bindgen_ty_1)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(evtchn_op__bindgen_ty_1)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).alloc_unbound as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_op__bindgen_ty_1), + "::", + stringify!(alloc_unbound) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).bind_interdomain as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_op__bindgen_ty_1), + "::", + stringify!(bind_interdomain) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).bind_virq as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_op__bindgen_ty_1), + "::", + stringify!(bind_virq) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).bind_pirq as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_op__bindgen_ty_1), + "::", + stringify!(bind_pirq) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).bind_ipi as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_op__bindgen_ty_1), + "::", + stringify!(bind_ipi) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).close as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_op__bindgen_ty_1), + "::", + stringify!(close) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).send as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_op__bindgen_ty_1), + "::", + stringify!(send) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).status as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_op__bindgen_ty_1), + "::", + stringify!(status) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).bind_vcpu as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_op__bindgen_ty_1), + "::", + stringify!(bind_vcpu) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).unmask as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_op__bindgen_ty_1), + "::", + stringify!(unmask) + ) + ); +} +#[test] +fn bindgen_test_layout_evtchn_op() { + assert_eq!( + ::core::mem::size_of::(), + 28usize, + concat!("Size of: ", stringify!(evtchn_op)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(evtchn_op)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).cmd as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_op), + "::", + stringify!(cmd) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).u as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(evtchn_op), + "::", + stringify!(u) + ) + ); +} +pub type evtchn_op_t = evtchn_op; +pub type __guest_handle_evtchn_op_t = *mut evtchn_op_t; +pub type __guest_handle_const_evtchn_op_t = *const evtchn_op_t; +pub type event_word_t = u32; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct evtchn_fifo_control_block { + pub ready: u32, + pub _rsvd: u32, + pub head: [u32; 16usize], +} +#[test] +fn bindgen_test_layout_evtchn_fifo_control_block() { + assert_eq!( + ::core::mem::size_of::(), + 72usize, + concat!("Size of: ", stringify!(evtchn_fifo_control_block)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(evtchn_fifo_control_block)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).ready as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(evtchn_fifo_control_block), + "::", + stringify!(ready) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::()))._rsvd as *const _ as usize + }, + 4usize, + concat!( + "Offset of field: ", + stringify!(evtchn_fifo_control_block), + "::", + stringify!(_rsvd) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).head as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(evtchn_fifo_control_block), + "::", + stringify!(head) + ) + ); +} +pub type evtchn_fifo_control_block_t = evtchn_fifo_control_block; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sched_shutdown { + pub reason: cty::c_uint, +} +#[test] +fn bindgen_test_layout_sched_shutdown() { + assert_eq!( + ::core::mem::size_of::(), + 4usize, + concat!("Size of: ", stringify!(sched_shutdown)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(sched_shutdown)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).reason as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(sched_shutdown), + "::", + stringify!(reason) + ) + ); +} +pub type sched_shutdown_t = sched_shutdown; +pub type __guest_handle_sched_shutdown_t = *mut sched_shutdown_t; +pub type __guest_handle_const_sched_shutdown_t = *const sched_shutdown_t; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sched_poll { + pub ports: __guest_handle_evtchn_port_t, + pub nr_ports: cty::c_uint, + pub timeout: u64, +} +#[test] +fn bindgen_test_layout_sched_poll() { + assert_eq!( + ::core::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(sched_poll)) + ); + assert_eq!( + ::core::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(sched_poll)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).ports as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(sched_poll), + "::", + stringify!(ports) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).nr_ports as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(sched_poll), + "::", + stringify!(nr_ports) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).timeout as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(sched_poll), + "::", + stringify!(timeout) + ) + ); +} +pub type sched_poll_t = sched_poll; +pub type __guest_handle_sched_poll_t = *mut sched_poll_t; +pub type __guest_handle_const_sched_poll_t = *const sched_poll_t; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sched_remote_shutdown { + pub domain_id: domid_t, + pub reason: cty::c_uint, +} +#[test] +fn bindgen_test_layout_sched_remote_shutdown() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(sched_remote_shutdown)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(sched_remote_shutdown)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { + &(*(::core::ptr::null::())).domain_id as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(sched_remote_shutdown), + "::", + stringify!(domain_id) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).reason as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(sched_remote_shutdown), + "::", + stringify!(reason) + ) + ); +} +pub type sched_remote_shutdown_t = sched_remote_shutdown; +pub type __guest_handle_sched_remote_shutdown_t = *mut sched_remote_shutdown_t; +pub type __guest_handle_const_sched_remote_shutdown_t = *const sched_remote_shutdown_t; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sched_watchdog { + pub id: u32, + pub timeout: u32, +} +#[test] +fn bindgen_test_layout_sched_watchdog() { + assert_eq!( + ::core::mem::size_of::(), + 8usize, + concat!("Size of: ", stringify!(sched_watchdog)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(sched_watchdog)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).id as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(sched_watchdog), + "::", + stringify!(id) + ) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).timeout as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(sched_watchdog), + "::", + stringify!(timeout) + ) + ); +} +pub type sched_watchdog_t = sched_watchdog; +pub type __guest_handle_sched_watchdog_t = *mut sched_watchdog_t; +pub type __guest_handle_const_sched_watchdog_t = *const sched_watchdog_t; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct sched_pin_override { + pub pcpu: i32, +} +#[test] +fn bindgen_test_layout_sched_pin_override() { + assert_eq!( + ::core::mem::size_of::(), + 4usize, + concat!("Size of: ", stringify!(sched_pin_override)) + ); + assert_eq!( + ::core::mem::align_of::(), + 4usize, + concat!("Alignment of ", stringify!(sched_pin_override)) + ); + assert_eq!( + // SAFETY: auto-generated by bindgen. + unsafe { &(*(::core::ptr::null::())).pcpu as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(sched_pin_override), + "::", + stringify!(pcpu) + ) + ); +} +pub type sched_pin_override_t = sched_pin_override; +pub type __guest_handle_sched_pin_override_t = *mut sched_pin_override_t; +pub type __guest_handle_const_sched_pin_override_t = *const sched_pin_override_t; diff --git a/xen-sys/src/riscv64/hypercall.rs b/xen-sys/src/riscv64/hypercall.rs new file mode 100644 index 0000000..b91674a --- /dev/null +++ b/xen-sys/src/riscv64/hypercall.rs @@ -0,0 +1,57 @@ +/* + * Copyright 2021-22 Mathieu Poirier + * + * Licensed under the Apache License, Version 2.0 or the MIT license + * , at your + * option. This file may not be copied, modified, or distributed + * except according to those terms. + */ + +#![allow(clippy::missing_safety_doc)] + +use core::arch::asm; + +use crate::{ + error::{parse_hypercall_return, XenError}, + DirectConstXenBuffer, DirectConstXenSlice, DirectMutXenBuffer, DirectMutXenSlice, + XenConstBuffer, XenHypercall, XenMutBuffer, +}; + +#[derive(Clone, Copy, Debug)] +pub struct NativeXenHypercall; + +impl NativeXenHypercall { + pub unsafe fn new() -> Option { + Some(Self) + } +} + +impl XenHypercall for NativeXenHypercall { + unsafe fn hypercall5(&self, cmd: usize, param: [usize; 5]) -> Result { + // TODO + Err(XenError::Other("TODO")) + } + + fn make_const_object(&self, buffer: &T) -> Result, XenError> { + Ok(DirectConstXenBuffer(buffer)) + } + + fn make_mut_object(&self, buffer: &mut T) -> Result, XenError> { + Ok(DirectMutXenBuffer(buffer)) + } + + fn make_const_slice( + &self, + slice: &[T], + ) -> Result, XenError> { + Ok(DirectConstXenSlice(slice)) + } + + fn make_mut_slice( + &self, + slice: &mut [T], + ) -> Result, XenError> { + Ok(DirectMutXenSlice(slice)) + } +} diff --git a/xen-sys/src/riscv64/mod.rs b/xen-sys/src/riscv64/mod.rs new file mode 100644 index 0000000..71b81b2 --- /dev/null +++ b/xen-sys/src/riscv64/mod.rs @@ -0,0 +1,22 @@ +/* + * Licensed under the Apache License, Version 2.0 or the MIT license + * , at your + * option. This file may not be copied, modified, or distributed + * except according to those terms. + */ + +mod hypercall; +pub use hypercall::*; + +mod bindgen { + #![allow( + non_upper_case_globals, + non_camel_case_types, + non_snake_case, + deref_nullptr, + clippy::redundant_static_lifetimes + )] + include!("./bindgen.rs"); +} +pub use bindgen::*; diff --git a/xen-sys/src/x86_64/hypercall.rs b/xen-sys/src/x86_64/hypercall.rs index 4a72b8e..2ae61b5 100644 --- a/xen-sys/src/x86_64/hypercall.rs +++ b/xen-sys/src/x86_64/hypercall.rs @@ -1,129 +1,158 @@ -/* - * Copyright 2016-2019 Doug Goldstein - * - * Licensed under the Apache License, Version 2.0 or the MIT license - * , at your - * option. This file may not be copied, modified, or distributed - * except according to those terms. - */ -#![allow(clippy::missing_safety_doc)] +//! x86 Xen interface. +//! +//! See [X86XenHypercall]. -use core::arch::asm; +use core::arch::x86_64::{CpuidResult, __cpuid}; -use cty::c_long; +use crate::{ + error::{parse_hypercall_return, XenError}, + DirectConstXenBuffer, DirectConstXenSlice, DirectMutXenBuffer, DirectMutXenSlice, + XenConstBuffer, XenHypercall, XenMutBuffer, +}; -// x86_64 hypercalls are called at the address: 32 * HYPERCALL_NUM -#[repr(C)] -#[derive(Clone, Copy)] -struct hypercall_entry([u8; 32]); - -// pages on x86_64 are 4096 bytes giving us 128 32-byte entries -extern "C" { - static HYPERCALL_PAGE: [hypercall_entry; 128]; +/// x86 HVM hypercall interface. +/// +/// This interface can only be used when running in kernel-mode (CPL0). +/// Trying to use it in user-mode (CPL3) will lead to #GP. +/// +/// Using this interface with a incorrect hypercall kind will likely lead to +/// incorrect behavior. +#[derive(Clone, Copy, Debug)] +pub enum X86XenHypercall { + /// Intel hypercalls `vmcall` + Intel, + /// AMD hypercalls `vmmcall` + Amd, } -#[no_mangle] -#[inline] -pub unsafe fn hypercall_1(op: u32, a1: u64) -> c_long { - let ret: c_long; - let _ign1: u64; - let addr = HYPERCALL_PAGE.as_ptr().offset(op as isize); - - asm!( - "call {0}", - in(reg) addr, - inlateout("rax") addr => ret, - inlateout("rdi") a1 => _ign1, - options(nostack) - ); - ret +#[macro_export] +macro_rules! native_hypercall { + ($h:expr, $($t:tt)*) => { + match $h { + Self::Intel => core::arch::asm!("vmcall", $($t)*), + Self::Amd => core::arch::asm!("vmmcall", $($t)*), + } + }; } -#[no_mangle] -#[inline] -pub unsafe fn hypercall_2(op: u32, a1: u64, a2: u64) -> c_long { - let ret: c_long; - let _ign1: u64; - let _ign2: u64; - let addr = HYPERCALL_PAGE.as_ptr().offset(op as isize); - - asm!( - "call {0}", - in(reg) addr, - inlateout("rax") addr => ret, - inlateout("rdi") a1 => _ign1, - inlateout("rsi") a2 => _ign2, - options(nostack) - ); - ret +/// From xen/include/public/arch-x86/cpuid.h +/// +/// For compatibility with other hypervisor interfaces, the Xen cpuid leaves +/// can be found at the first otherwise unused 0x100 aligned boundary starting +/// from 0x40000000. +const XEN_CPUID_FIRST_LEAF: u32 = 0x4000_0000; + +/// Finds Xen leaf using a provided CPUID function. +/// +/// # SAFETY +/// +/// Safety conditions are inherited from provided `cpuid_func`. +pub unsafe fn find_xen_leaves2(cpuid_func: unsafe fn(u32) -> CpuidResult) -> Option { + for base in (XEN_CPUID_FIRST_LEAF..(XEN_CPUID_FIRST_LEAF + 0x10000)).step_by(0x100) { + let cpuid = cpuid_func(base); + + if match cpuid { + CpuidResult { + eax, + ebx: 0x566e6558, // "XenV" + ecx: 0x65584d4d, // "MMXe" + edx: 0x4d4d566e, // "nVMM" + } => eax - base >= 2, + _ => false, + } { + return Some(base); + } + } + + None } -#[no_mangle] -#[inline] -pub unsafe fn hypercall_3(op: u32, a1: u64, a2: u64, a3: u64) -> c_long { - let ret: c_long; - let _ign1: u64; - let _ign2: u64; - let _ign3: u64; - let addr = HYPERCALL_PAGE.as_ptr().offset(op as isize); - - asm!( - "call {0}", - in(reg) addr, - inlateout("rax") addr => ret, - inlateout("rdi") a1 => _ign1, - inlateout("rsi") a2 => _ign2, - inlateout("rdx") a3 => _ign3, - options(nostack) - ); - ret +/// Finds Xen CPUID leaf using native CPUID instruction. +/// +/// # SAFETY +/// +/// This function assumes CPUID instruction is available. +pub unsafe fn find_xen_leaves() -> Option { + find_xen_leaves2(__cpuid) } -#[no_mangle] -#[inline] -pub unsafe fn hypercall_4(op: u32, a1: u64, a2: u64, a3: u64, a4: u64) -> c_long { - let ret: c_long; - let _ign1: u64; - let _ign2: u64; - let _ign3: u64; - let _ign4: u64; - let addr = HYPERCALL_PAGE.as_ptr().offset(op as isize); - - asm!( - "call {0}", - in(reg) addr, - inlateout("rax") addr => ret, - inlateout("rdi") a1 => _ign1, - inlateout("rsi") a2 => _ign2, - inlateout("rdx") a3 => _ign3, - inlateout("r10") a4 => _ign4, - options(nostack) - ); - ret +impl X86XenHypercall { + /// Detect if we are running under Xen, and if so, use the appropriate + /// native hypercall interface. Uses CPUID instruction. + /// + /// # SAFETY + /// + /// This function assumes CPUID instruction is available. + pub unsafe fn new() -> Option { + find_xen_leaves()?; + + // We are running under Xen. + match __cpuid(0) { + // GenuineIntel + CpuidResult { + eax: _, + ebx: 0x756e6547, + ecx: 0x6c65746e, + edx: 0x49656e69, + } => Some(Self::Intel), + // AuthenticAMD + CpuidResult { + eax: _, + ebx: 0x68747541, + ecx: 0x444d4163, + edx: 0x69746e65, + } => Some(Self::Amd), + // TODO: Centaur, Hygon, ... + _ => None, + } + } } -#[no_mangle] -#[inline] -pub unsafe fn hypercall_5(op: u32, a1: u64, a2: u64, a3: u64, a4: u64, a5: u64) -> c_long { - let ret: c_long; - let _ign1: u64; - let _ign2: u64; - let _ign3: u64; - let _ign4: u64; - let _ign5: u64; - let addr = HYPERCALL_PAGE.as_ptr().offset(op as isize); - - asm!( - "call {0}", - in(reg) addr, - inlateout("rax") addr => ret, - inlateout("rdi") a1 => _ign1, - inlateout("rsi") a2 => _ign2, - inlateout("rdx") a3 => _ign3, - inlateout("r10") a4 => _ign4, - inlateout("r9") a5 => _ign5, - options(nostack) - ); - ret +/* + * Based on x86 Hypercall ABI (64-bits) + * + * Hypercall index: RAX + * Parameters: RDI, RSI, RDX, R10, R8 + * Result: RAX + */ + +impl XenHypercall for X86XenHypercall { + #[inline(always)] + unsafe fn hypercall5(&self, cmd: usize, param: [usize; 5]) -> Result { + let output: isize; + + native_hypercall!( + self, + inlateout("rax") cmd => output, + inlateout("rdi") param[0] => _, + inlateout("rsi") param[1] => _, + inlateout("rdx") param[2] => _, + inlateout("r10") param[3] => _, + inlateout("r8") param[4] => _, + ); + + parse_hypercall_return(output) + } + + fn make_const_object(&self, buffer: &T) -> Result, XenError> { + Ok(DirectConstXenBuffer(buffer)) + } + + fn make_mut_object(&self, buffer: &mut T) -> Result, XenError> { + Ok(DirectMutXenBuffer(buffer)) + } + + fn make_const_slice( + &self, + slice: &[T], + ) -> Result, XenError> { + Ok(DirectConstXenSlice(slice)) + } + + fn make_mut_slice( + &self, + slice: &mut [T], + ) -> Result, XenError> { + Ok(DirectMutXenSlice(slice)) + } } diff --git a/xen-unix/Cargo.toml b/xen-unix/Cargo.toml new file mode 100644 index 0000000..6319812 --- /dev/null +++ b/xen-unix/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "xen-unix" +description = "Xen interfaces for Unix platforms" +version = "0.1.0" +edition = "2024" + +[dependencies] +xen = { path = "../xen" } +xen-sys = { path = "../xen-sys" } + +[dependencies.nix] +version = "0.30.1" +features = ["ioctl", "mman"] diff --git a/xen-unix/src/event/ioctl.rs b/xen-unix/src/event/ioctl.rs new file mode 100644 index 0000000..56aef48 --- /dev/null +++ b/xen-unix/src/event/ioctl.rs @@ -0,0 +1,43 @@ +use nix::{ioc, ioctl_write_ptr_bad, libc::c_uint}; + +#[repr(C)] +pub struct BindInterdomainArg { + pub remote_domain: c_uint, + pub remote_port: c_uint, +} +ioctl_write_ptr_bad!( + event_bind_interdomain, + ioc!(0, b'E', 1, size_of::()), + BindInterdomainArg +); + +#[repr(C)] +pub struct BindUnboundPortArg { + pub remote_domain: c_uint, +} + +ioctl_write_ptr_bad!( + event_bind_unbound_port, + ioc!(0, b'E', 2, size_of::()), + BindUnboundPortArg +); + +#[repr(C)] +pub struct UnbindPortArg { + pub port: c_uint, +} +ioctl_write_ptr_bad!( + event_unbind_port, + ioc!(0, b'E', 3, size_of::()), + UnbindPortArg +); + +#[repr(C)] +pub struct NotifyArg { + pub port: c_uint, +} +ioctl_write_ptr_bad!( + notify_port, + ioc!(0, b'E', 4, size_of::()), + NotifyArg +); diff --git a/xen-unix/src/event/mod.rs b/xen-unix/src/event/mod.rs new file mode 100644 index 0000000..672219c --- /dev/null +++ b/xen-unix/src/event/mod.rs @@ -0,0 +1,108 @@ +mod ioctl; + +extern crate std; + +use std::{ + fs::{File, OpenOptions}, + io::{self, Read, Write}, + os::{ + fd::{AsFd, AsRawFd, BorrowedFd}, + unix::prelude::RawFd, + }, +}; + +use xen::event::{EventChannel, EventChannelInterface, SyncEventChannelReceiver}; +use xen_sys::DomId; + +const EVTCHN_PATH: &str = "/dev/xen/evtchn"; + +pub struct UnixEventChannelInterface(File); + +impl UnixEventChannelInterface { + pub fn new() -> io::Result { + Ok(Self( + OpenOptions::new() + .read(true) + .write(true) + .open(EVTCHN_PATH)?, + )) + } +} + +impl EventChannelInterface for UnixEventChannelInterface { + type Error = io::Error; + + fn alloc(&mut self, remote_dom: DomId) -> Result { + let arg = ioctl::BindUnboundPortArg { + remote_domain: remote_dom.0 as _, + }; + + // SAFETY: `data` is properly sized for this ioctl. + let port = unsafe { ioctl::event_bind_unbound_port(self.0.as_raw_fd(), &arg) }?; + + assert!(port >= 0); + + Ok(EventChannel(port as u32)) + } + + fn bind_interdomain( + &mut self, + remote_dom: DomId, + remote_port: EventChannel, + ) -> Result { + let arg = ioctl::BindInterdomainArg { + remote_domain: remote_dom.0 as _, + remote_port: remote_port.0 as _, + }; + + // SAFETY: `data` is properly sized for this ioctl. + let port = unsafe { ioctl::event_bind_interdomain(self.0.as_raw_fd(), &arg) }?; + + assert!(port >= 0); + + Ok(EventChannel(port as u32)) + } + + fn unbind(&mut self, port: EventChannel) -> Result<(), Self::Error> { + let arg = ioctl::UnbindPortArg { port: port.0 as _ }; + + // SAFETY: `arg` is properly sized for this ioctl. + unsafe { ioctl::event_unbind_port(self.0.as_raw_fd(), &arg) }.ok(); + + Ok(()) + } + + fn send(&self, port: EventChannel) -> Result<(), Self::Error> { + let arg = ioctl::NotifyArg { port: port.0 as _ }; + + // SAFETY: `data` is properly sized for this ioctl. + unsafe { ioctl::notify_port(self.0.as_raw_fd(), &arg) }?; + + Ok(()) + } +} + +impl SyncEventChannelReceiver for UnixEventChannelInterface { + fn pending(&mut self) -> Result { + let mut bytes = [0; 4]; + self.0.read_exact(&mut bytes)?; + + Ok(EventChannel(::from_ne_bytes(bytes))) + } + + fn unmask(&mut self, evtchn: EventChannel) -> Result<(), Self::Error> { + self.0.write_all(&evtchn.0.to_ne_bytes()) + } +} + +impl AsFd for UnixEventChannelInterface { + fn as_fd(&self) -> BorrowedFd<'_> { + self.0.as_fd() + } +} + +impl AsRawFd for UnixEventChannelInterface { + fn as_raw_fd(&self) -> RawFd { + self.0.as_raw_fd() + } +} diff --git a/xen-unix/src/hypercall/buffer.rs b/xen-unix/src/hypercall/buffer.rs new file mode 100644 index 0000000..844d6f7 --- /dev/null +++ b/xen-unix/src/hypercall/buffer.rs @@ -0,0 +1,223 @@ +//! xencall device buffers +//! +//! Using the `privcmd` interface, Xen hypercalls needs the +//! parameters/structures to be placed into special memory mappings (onto +//! xencall device) in order to be "hypercall-safe" thus safely used as +//! addresses in the hypercalls. +//! +//! This module provides [UnixConstXenBuffer], [UnixMutXenBuffer], +//! [UnixConstXenSlice], [UnixMutXenSlice] to act as bounce-buffers (used by +//! [super::UnixXenHypercall]), the original value is copied onto a bounce +//! buffer and updated back by [XenMutBuffer::update]. + +extern crate std; + +use std::{ + alloc::Layout, + eprintln, + marker::PhantomData, + num::NonZeroUsize, + os::fd::AsFd, + ptr::{self, NonNull}, +}; + +use nix::{ + errno::Errno, + sys::mman::{self, MapFlags, ProtFlags}, +}; +use xen_sys::{XenConstBuffer, XenMutBuffer}; + +use super::UnixXenHypercall; + +const PAGE_SIZE: usize = 4096; + +impl UnixXenHypercall { + /// Allocate a xencall (hypercall-safe) buffer + fn alloc_xencall<'hyp, T>(&'hyp self, layout: Layout) -> Result, Errno> { + // TODO: It could be interesting to create a [std::alloc::Allocator] for these + // kind of objects. That way, we would be able to create several objects + // in a single page instead of allocating separate pages for each objects. + + assert!( + layout.align() <= PAGE_SIZE, + "Object cannot be aligned to page" + ); + + let size: usize = layout.size(); + + if size == 0 { + // ZST ? + Ok(XenCallBuffer { + interface: PhantomData::<&'hyp Self>, + ptr: NonNull::dangling(), + page_count: 0, + length: 0, + }) + } else { + // Get the number of page to hold the object layout. + let page_count = (size + (PAGE_SIZE - 1)) / PAGE_SIZE; + let length = NonZeroUsize::new(page_count * PAGE_SIZE) + .expect("Invalid size to page count convertion"); + + // SAFETY: `addr` is defined as None + // `prot` and `flags` are legal values + // `length` is a multiple of page size + let ptr: NonNull = unsafe { + mman::mmap( + None, + length, + ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, + MapFlags::MAP_SHARED, + self.hypercall_device.as_fd(), + 0, + )? + } + .cast(); + + assert!(ptr.is_aligned(), "mmap gave us a non-aligned pointer"); + + Ok(XenCallBuffer { + interface: PhantomData::<&'hyp Self>, + ptr, + page_count, + length: layout.size(), + }) + } + } + + pub(super) fn alloc_xencall_buffer(&self) -> Result, Errno> { + self.alloc_xencall(Layout::new::()) + } + + pub(super) fn alloc_xencall_slice(&self, n: usize) -> Result, Errno> { + self.alloc_xencall(Layout::array::(n).map_err(|_| Errno::E2BIG)?) + } +} + +pub struct XenCallBuffer<'hyp, T> { + interface: PhantomData<&'hyp UnixXenHypercall>, + ptr: NonNull, // aligned + page_count: usize, + length: usize, +} + +impl XenCallBuffer<'_, T> { + pub(super) unsafe fn read(&self) -> T { + assert_eq!(self.length, size_of::(), "invalid write operation"); + + // SAFETY: Caller must ensure that data pointed can be read as T. + // `ptr` is properly aligned and valid for reads of T. + self.ptr.read_volatile() + } + + pub(super) fn write(&mut self, value: T) { + assert_eq!(self.length, size_of::(), "invalid write operation"); + + // SAFETY: `ptr` is properly aligned and valid for writes of T. + unsafe { self.ptr.write_volatile(value) } + } + + pub(super) unsafe fn copy_to_slice(&self, slice: &mut [T]) { + assert_eq!( + size_of_val(slice), + self.length, + "Mismatched size of slice and buffer" + ); + + // SAFETY: `ptr` is properly aligned and valid for read of [T]. + ptr::copy_nonoverlapping(self.ptr.as_ptr(), slice.as_mut_ptr(), slice.len()); + } + + pub(super) fn copy_from_slice(&mut self, slice: &[T]) { + assert_eq!( + size_of_val(slice), + self.length, + "Mismatched size of slice and buffer" + ); + + // SAFETY: `ptr` is properly aligned and valid for writes of [T]. + unsafe { ptr::copy_nonoverlapping(slice.as_ptr(), self.ptr.as_ptr(), slice.len()) }; + } +} + +impl Drop for XenCallBuffer<'_, T> { + fn drop(&mut self) { + // if page_count is zero, self.bounce_ptr is dangling (and we are in the ZST + // case) + if self.page_count == 0 { + return; + } + + unsafe { + if let Err(e) = mman::munmap(self.ptr.cast(), self.page_count * PAGE_SIZE) { + // Best effort logging + eprintln!( + "munmap({:p}, {}) failed ({})", + self.ptr, + self.page_count * PAGE_SIZE, + e.desc() + ); + } + }; + } +} + +pub struct UnixConstXenBuffer<'a, 'hyp, T: Copy> { + // As const objects are actually being copied they actually don't + // need to hold a reference to their original counterpart. + // Use a PhantomData to make the borrow checker happy. + pub(super) original: PhantomData<&'a T>, + pub(super) buffer: XenCallBuffer<'hyp, T>, +} + +pub struct UnixMutXenBuffer<'a, 'hyp, T: Copy> { + pub(super) original: &'a mut T, + pub(super) buffer: XenCallBuffer<'hyp, T>, +} + +impl XenConstBuffer for UnixConstXenBuffer<'_, '_, T> { + fn as_hypercall_ptr(&self) -> *const T { + self.buffer.ptr.as_ptr() + } +} + +impl XenMutBuffer for UnixMutXenBuffer<'_, '_, T> { + fn as_hypercall_ptr(&mut self) -> *mut T { + self.buffer.ptr.as_ptr() + } + + unsafe fn update(&mut self) { + // SAFETY: Caller must ensure that data pointed in `buffer` is valid for T. + *self.original = self.buffer.read(); + } +} + +pub struct UnixConstXenSlice<'a, 'hyp, T: Copy> { + // As const objects are actually being copied they actually don't + // need to hold a reference to their original counterpart. + // Use a PhantomData to make the borrow checker happy. + pub original: PhantomData<&'a [T]>, + pub buffer: XenCallBuffer<'hyp, T>, +} + +pub struct UnixMutXenSlice<'a, 'b, T: Copy> { + pub original: &'a mut [T], + pub buffer: XenCallBuffer<'b, T>, +} + +impl XenConstBuffer for UnixConstXenSlice<'_, '_, T> { + fn as_hypercall_ptr(&self) -> *const T { + self.buffer.ptr.as_ptr() + } +} + +impl XenMutBuffer for UnixMutXenSlice<'_, '_, T> { + fn as_hypercall_ptr(&mut self) -> *mut T { + self.buffer.ptr.as_ptr() + } + + unsafe fn update(&mut self) { + // SAFETY: Caller must ensure that data pointed in `buffer` is valid for [T]. + self.buffer.copy_to_slice(self.original); + } +} diff --git a/xen-unix/src/hypercall/foreign.rs b/xen-unix/src/hypercall/foreign.rs new file mode 100644 index 0000000..de8d105 --- /dev/null +++ b/xen-unix/src/hypercall/foreign.rs @@ -0,0 +1,171 @@ +extern crate std; + +use core::{ffi::c_void, marker::PhantomData, num::NonZeroUsize}; +use std::{ + ffi::{c_int, c_uint, c_ulong}, + io, + os::fd::{AsFd, AsRawFd}, + ptr::NonNull, +}; + +use nix::{ + ioc, ioctl_readwrite_bad, + sys::mman::{self, MapFlags, ProtFlags}, +}; +use xen_sys::DomId; + +use crate::hypercall::UnixXenHypercall; + +const PAGE_SIZE: usize = 4096; + +pub struct ForeignMap<'a> { + interface: PhantomData<&'a UnixXenHypercall>, + pub addr: NonNull, + pub length: usize, +} + +impl ForeignMap<'_> { + pub fn empty() -> Self { + Self { + interface: PhantomData, + addr: NonNull::dangling(), + length: 0, + } + } +} + +#[repr(C)] +struct PrivcmdMmapBatchV2 { + num: c_uint, + dom: DomId, + addr: u64, + arr: *const c_ulong, + err: *mut c_int, +} +ioctl_readwrite_bad!( + mmap_batch_v2, + ioc!(0, b'P', 4, size_of::()), + PrivcmdMmapBatchV2 +); + +#[repr(C)] +struct PrivCmdMmapResource { + dom: DomId, + map_type: u32, + id: u32, + idx: u32, + num: u64, + addr: u64, +} +ioctl_readwrite_bad!( + mmap_resource, + ioc!(0, b'P', 7, size_of::()), + PrivCmdMmapResource +); + +impl UnixXenHypercall { + pub fn foreign_map<'a>(&'a self, domid: DomId, pfns: &[c_ulong]) -> io::Result> { + let Some(length) = NonZeroUsize::new(pfns.len() * PAGE_SIZE) else { + return Ok(ForeignMap::empty()); + }; + + let addr: NonNull = unsafe { + mman::mmap( + None, + length, + ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, + MapFlags::MAP_SHARED, + self.privcmd_device.as_fd(), + 0, + )? + } + .cast(); + + let mut err: c_int = 0; + + let mut param = PrivcmdMmapBatchV2 { + num: pfns.len() as u32, + dom: domid, + addr: addr.addr().get() as u64, + arr: pfns.as_ptr(), + err: &raw mut err, + }; + + if let Err(e) = unsafe { mmap_batch_v2(self.privcmd_device.as_raw_fd(), &mut param) } { + unsafe { mman::munmap(addr, length.get()).ok() }; + return Err(e.into()); + } + + Ok(ForeignMap { + interface: PhantomData::<&'a Self>, + addr, + length: length.get(), + }) + } + + pub fn resource_map<'a>( + &'a self, + domid: DomId, + map_type: u32, + id: u32, + idx: u32, + num: usize, + ) -> io::Result> { + let Some(length) = NonZeroUsize::new(num * PAGE_SIZE) else { + return Ok(ForeignMap::empty()); + }; + + let addr: NonNull = unsafe { + mman::mmap( + None, + length, + ProtFlags::PROT_READ, + MapFlags::MAP_SHARED, + self.privcmd_device.as_fd(), + 0, + )? + } + .cast(); + + let mut param = PrivCmdMmapResource { + num: num as _, + dom: domid, + addr: addr.addr().get() as u64, + map_type, + id, + idx, + }; + + if let Err(e) = unsafe { mmap_resource(self.privcmd_device.as_raw_fd(), &mut param) } { + unsafe { mman::munmap(addr, length.get()).ok() }; + return Err(e.into()); + } + + Ok(ForeignMap { + interface: PhantomData::<&'a Self>, + addr, + length: length.get(), + }) + } +} + +impl Drop for ForeignMap<'_> { + fn drop(&mut self) { + // if page_count is zero, self.bounce_ptr is dangling + if self.length == 0 { + return; + } + + unsafe { + if let Err(e) = mman::munmap(self.addr.cast(), self.length) { + // Best effort logging + eprintln!( + "munmap({:p}, {}) failed ({})", + self.addr, + self.length, + e.desc() + ); + } + }; + } +} diff --git a/xen-unix/src/hypercall/mod.rs b/xen-unix/src/hypercall/mod.rs new file mode 100644 index 0000000..795e5c6 --- /dev/null +++ b/xen-unix/src/hypercall/mod.rs @@ -0,0 +1,154 @@ +//! UNIX Xen interface +//! +//! Implementation of [`XenHypercall`] for UNIX-like systems. Ought to work on +//! both Linux and *BSD systems implementing `/dev/xen/privcmd`. + +extern crate std; + +pub mod buffer; +pub mod foreign; + +use std::{fs::File, io, marker::PhantomData, os::fd::AsRawFd, ptr::addr_of_mut}; + +use buffer::{UnixConstXenBuffer, UnixConstXenSlice, UnixMutXenBuffer, UnixMutXenSlice}; +use xen_sys::{ + XenConstBuffer, XenHypercall, XenMutBuffer, + error::{XenError, parse_hypercall_return}, +}; + +/// An abstraction over Xen privcmd device. +#[derive(Debug)] +pub struct UnixXenHypercall { + privcmd_device: File, + hypercall_device: File, +} + +#[cfg(target_os = "linux")] +type PrivCmdField = u64; + +#[cfg(not(target_os = "linux"))] +type PrivCmdField = usize; + +#[repr(C)] +#[derive(Debug)] +struct PrivCmdArg { + /// Identifier for the issued hypercall type + op: PrivCmdField, + /// Hypercall-specific arguments + args: [PrivCmdField; 5], + #[cfg(not(target_os = "linux"))] + /// Return code of the `ioctl` in *BSD systems + ret: PrivCmdField, +} + +mod ioctl { + nix::ioctl_write_ptr_bad!( + hypercall, + nix::ioc!(0, b'P', 0, core::mem::size_of::()), + super::PrivCmdArg + ); +} + +/// Path to the `privcmd` device in a hosted environment. +const PATH_PRIVCMD: &str = "/dev/xen/privcmd"; + +/// Path to `hypercall` device in a hosted environment. +const PATH_HYPERCALL: &str = "/dev/xen/hypercall"; + +impl UnixXenHypercall { + pub fn new() -> Result { + Ok(Self { + privcmd_device: File::options().read(true).write(true).open(PATH_PRIVCMD)?, + hypercall_device: File::options() + .read(true) + .write(true) + .open(PATH_HYPERCALL)?, + }) + } +} + +impl XenHypercall for UnixXenHypercall { + unsafe fn hypercall5(&self, cmd: usize, param: [usize; 5]) -> Result { + let mut privcmd_arg = PrivCmdArg { + op: cmd as _, + args: [ + param[0] as _, + param[1] as _, + param[2] as _, + param[3] as _, + param[4] as _, + ], + #[cfg(not(target_os = "linux"))] + ret: 0, + }; + + match ioctl::hypercall(self.privcmd_device.as_raw_fd(), addr_of_mut!(privcmd_arg)) { + Ok(ret) => parse_hypercall_return(ret as _), + Err(ret) => Err(XenError::Other(ret.desc())), + } + } + + fn make_const_object<'a, T: Copy>( + &self, + buffer: &'a T, + ) -> Result, XenError> { + let mut call_buffer = self + .alloc_xencall_buffer() + .map_err(|e| XenError::Other(e.desc()))?; + + call_buffer.write(*buffer); + + Ok(UnixConstXenBuffer { + original: PhantomData::<&'a T>, + buffer: call_buffer, + }) + } + + fn make_mut_object( + &'_ self, + buffer: &mut T, + ) -> Result, XenError> { + let mut call_buffer = self + .alloc_xencall_buffer() + .map_err(|e| XenError::Other(e.desc()))?; + + call_buffer.write(*buffer); + + Ok(UnixMutXenBuffer { + original: buffer, + buffer: call_buffer, + }) + } + + fn make_const_slice<'a, T: Copy + Sized>( + &self, + slice: &'a [T], + ) -> Result, XenError> { + let mut call_buffer = self + .alloc_xencall_slice(slice.len()) + .map_err(|e| XenError::Other(e.desc()))?; + + call_buffer.copy_from_slice(slice); + + Ok(UnixConstXenSlice { + original: PhantomData::<&'a [T]>, + buffer: call_buffer, + }) + } + + fn make_mut_slice( + &self, + slice: &mut [T], + ) -> Result, XenError> { + let mut call_buffer = self + .alloc_xencall_slice(slice.len()) + .map_err(|e| XenError::Other(e.desc()))?; + + call_buffer.copy_from_slice(slice); + + Ok(UnixMutXenSlice { + original: slice, + buffer: call_buffer, + }) + } +} diff --git a/xen-unix/src/lib.rs b/xen-unix/src/lib.rs new file mode 100644 index 0000000..16990af --- /dev/null +++ b/xen-unix/src/lib.rs @@ -0,0 +1,2 @@ +pub mod event; +pub mod hypercall; \ No newline at end of file diff --git a/xen/Cargo.toml b/xen/Cargo.toml index 4f39c4e..54ad9e3 100644 --- a/xen/Cargo.toml +++ b/xen/Cargo.toml @@ -11,7 +11,13 @@ version = "0.1.0" edition = "2018" [dependencies] +volatile = { version = "0.6.1", features = ["derive", "unstable"] } xen-sys = { path = "../xen-sys" } +[dependencies.nix] +version = "0.30.1" +features = ["ioctl", "mman"] +optional = true + [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(target_vendor, values("xen"))'] } diff --git a/xen/src/aarch64/mod.rs b/xen/src/aarch64/mod.rs index 0bad945..1e03b10 100644 --- a/xen/src/aarch64/mod.rs +++ b/xen/src/aarch64/mod.rs @@ -8,4 +8,4 @@ * except according to those terms. */ -pub mod entry; +//pub mod entry; diff --git a/xen/src/event/mod.rs b/xen/src/event/mod.rs new file mode 100644 index 0000000..4a428df --- /dev/null +++ b/xen/src/event/mod.rs @@ -0,0 +1,53 @@ +use core::{error::Error, future::Future}; + +use xen_sys::DomId; + +mod none; +pub use none::*; + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +#[repr(transparent)] +pub struct EventChannel(pub u32); + +pub trait EventChannelInterface { + type Error: Error; + + /// Allocate a port in this domain and mark as accepting interdomain + /// bindings from domain `remote_dom`. + fn alloc(&mut self, remote_dom: DomId) -> Result; + + /// Construct an interdomain event channel between + /// the calling domain and `remote_dom`. `remote_dom`,`remote_port` must + /// identify a port that is unbound and marked as accepting bindings + /// from the calling domain. A fresh port is allocated in the calling + /// domain and returned. + fn bind_interdomain( + &mut self, + remote_dom: DomId, + remote_port: EventChannel, + ) -> Result; + + fn unbind(&mut self, evtchn: EventChannel) -> Result<(), Self::Error>; + + /// Send a event to a event channel port. + fn send(&self, port: EventChannel) -> Result<(), Self::Error>; +} + +pub trait SyncEventChannelReceiver: EventChannelInterface { + /// Read the pending event. + fn pending(&mut self) -> Result; + + /// Unmask the event channel, making it ready for receiving another event. + fn unmask(&mut self, port: EventChannel) -> Result<(), Self::Error>; +} + +pub trait AsyncEventChannelReceiver: EventChannelInterface { + /// Read the pending event. + fn pending(&mut self) -> impl Future> + Send; + + /// Unmask the event channel, making it ready for receiving another event. + fn unmask( + &mut self, + port: EventChannel, + ) -> impl Future> + Send; +} diff --git a/xen/src/event/none.rs b/xen/src/event/none.rs new file mode 100644 index 0000000..7f1701f --- /dev/null +++ b/xen/src/event/none.rs @@ -0,0 +1,126 @@ +use xen_sys::{ + bindings::{ + EVTCHNOP_alloc_unbound, EVTCHNOP_bind_interdomain, EVTCHNOP_close, EVTCHNOP_send, + __HYPERVISOR_event_channel_op, + }, + error::XenError, + DomId, XenConstBuffer, XenHypercall, XenMutBuffer, +}; + +use crate::event::{EventChannel, EventChannelInterface}; + +pub struct RawEventChannelInterface(H); + +#[derive(Clone, Copy)] +#[repr(C)] +struct EvtchnAllocUnbound { + dom: DomId, + remote_dom: DomId, + port: EventChannel, +} + +#[derive(Clone, Copy)] +#[repr(C)] +struct EvtchnBindInterdomain { + remote_dom: DomId, + remote_port: EventChannel, + local_port: EventChannel, +} + +#[derive(Clone, Copy)] +#[repr(C)] +struct EvtchnParam { + port: EventChannel, +} + +impl RawEventChannelInterface { + pub fn new(hyp: H) -> Self { + Self(hyp) + } +} + +impl EventChannelInterface for RawEventChannelInterface { + type Error = XenError; + + /// Allocate a port in this domain and mark as accepting interdomain + /// bindings from domain `remote_dom`. + fn alloc(&mut self, remote_dom: DomId) -> Result { + let mut alloc_unbound = EvtchnAllocUnbound { + dom: DomId::SELF, + remote_dom, + port: EventChannel(0), // OUT + }; + + unsafe { + let mut alloc_unbound_buffer = self.0.make_mut_object(&mut alloc_unbound)?; + self.0.hypercall2( + __HYPERVISOR_event_channel_op as _, + [ + EVTCHNOP_alloc_unbound as _, + alloc_unbound_buffer.as_hypercall_ptr().addr(), + ], + )?; + alloc_unbound_buffer.update(); + } + + Ok(alloc_unbound.port) + } + + /// Construct an interdomain event channel between + /// the calling domain and `remote_dom`. `remote_dom`,`remote_port` must + /// identify a port that is unbound and marked as accepting bindings + /// from the calling domain. A fresh port is allocated in the calling + /// domain and returned. + fn bind_interdomain( + &mut self, + remote_dom: DomId, + remote_port: EventChannel, + ) -> Result { + let mut bind_interdomain = EvtchnBindInterdomain { + remote_dom, + remote_port, + local_port: EventChannel(0), // OUT + }; + + unsafe { + let mut bind_interdomain_buffer = self.0.make_mut_object(&mut bind_interdomain)?; + self.0.hypercall2( + __HYPERVISOR_event_channel_op as _, + [ + EVTCHNOP_bind_interdomain as _, + bind_interdomain_buffer.as_hypercall_ptr().addr(), + ], + )?; + bind_interdomain_buffer.update(); + } + + Ok(bind_interdomain.local_port) + } + + fn unbind(&mut self, port: EventChannel) -> Result<(), Self::Error> { + let close = EvtchnParam { port }; + unsafe { + let close_buffer = self.0.make_const_object(&close)?; + self.0.hypercall2( + __HYPERVISOR_event_channel_op as _, + [EVTCHNOP_close as _, close_buffer.as_hypercall_ptr().addr()], + )?; + } + + Ok(()) + } + + /// Send a event to a event channel port. + fn send(&self, port: EventChannel) -> Result<(), Self::Error> { + let send = EvtchnParam { port }; + unsafe { + let send_buffer = self.0.make_const_object(&send)?; + self.0.hypercall2( + __HYPERVISOR_event_channel_op as _, + [EVTCHNOP_send as _, send_buffer.as_hypercall_ptr().addr()], + )?; + } + + Ok(()) + } +} diff --git a/xen/src/hypercall/console_io.rs b/xen/src/hypercall/console_io.rs deleted file mode 100644 index c684171..0000000 --- a/xen/src/hypercall/console_io.rs +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright 2016-2017 Doug Goldstein - * - * Licensed under the Apache License, Version 2.0 or the MIT license - * , at your - * option. This file may not be copied, modified, or distributed - * except according to those terms. - */ - -use xen_sys::hypercall::{console_io, ConsoleIO}; - -/// writes to the system serial console which -/// is disabled for non-dom0 domains unless -/// Xen is built with CONFIG_VERBOSE -#[inline] -pub fn write(out: &[u8]) { - console_io(ConsoleIO::Write, out); -} diff --git a/xen/src/hypercall/hvm.rs b/xen/src/hypercall/hvm.rs new file mode 100644 index 0000000..a73192b --- /dev/null +++ b/xen/src/hypercall/hvm.rs @@ -0,0 +1,30 @@ +use xen_sys::{ + bindings::{HVMOP_get_param, __HYPERVISOR_hvm_op, xen_hvm_param}, + error::XenError, + DomId, XenHypercall, XenMutBuffer, +}; + +pub trait XenHvmOp: XenHypercall { + fn get_hvm_param(&self, index: u32) -> Result { + let mut param = xen_hvm_param { + domid: DomId::SELF.0, + index, + pad: 0, + value: 0, + }; + let mut param_buffer = self.make_mut_object(&mut param)?; + + unsafe { + self.hypercall2( + __HYPERVISOR_hvm_op as _, + [HVMOP_get_param as _, param_buffer.as_hypercall_ptr().addr()], + )?; + param_buffer.update(); + drop(param_buffer); + }; + + Ok(param.value) + } +} + +impl XenHvmOp for H {} diff --git a/xen/src/hypercall/mod.rs b/xen/src/hypercall/mod.rs index d3e4742..d31457a 100644 --- a/xen/src/hypercall/mod.rs +++ b/xen/src/hypercall/mod.rs @@ -1,5 +1,6 @@ /* * Copyright 2016-2017 Doug Goldstein + * Copyright 2025 Teddy Astie * * Licensed under the Apache License, Version 2.0 or the MIT license @@ -8,8 +9,4 @@ * except according to those terms. */ -// export hypercall interfaces - -/// System Serial Console -pub mod console_io; -pub mod sched_op; +pub mod hvm; diff --git a/xen/src/hypercall/sched.rs b/xen/src/hypercall/sched.rs new file mode 100644 index 0000000..833a5ec --- /dev/null +++ b/xen/src/hypercall/sched.rs @@ -0,0 +1,111 @@ +/* + * Copyright 2016-2017 Doug Goldstein + * + * Licensed under the Apache License, Version 2.0 or the MIT license + * , at your + * option. This file may not be copied, modified, or distributed + * except according to those terms. + */ + +#![allow(non_camel_case_types, clippy::missing_safety_doc)] + +use xen_bindings::bindings::{ + CONSOLEIO_read, CONSOLEIO_write, SCHEDOP_shutdown, SCHEDOP_yield, __HYPERVISOR_console_io, + __HYPERVISOR_sched_op, +}; + +use crate::{error::XenError, XenConstBuffer, XenHypercall, XenMutBuffer}; + +/// SCHEDOP_ defines from public/sched.h +#[derive(Debug)] +pub enum SchedOp { + /// SCHEDOP_yield + r#yield, + /// SCHEDOP_block + block, + /// SCHEDOP_shutdown + shutdown, + /// SCHEDOP_poll + poll, + /// SCHEDOP_remote_shutdown + remote_shutdown, + /// SCHEDOP_shutdown_code + shutdown_code, + /// SCHEDOP_watchdog + watchdog, + /// SCHEDOP_pin_override + pin_override, +} + +/// CONSOLEIO_ defines from public/xen.h +#[derive(Debug)] +pub enum ConsoleIO { + /// CONSOLEIO_write + Write, + /// CONSOLEIO_read + Read, +} + +pub fn console_io( + hyp: &H, + mode: ConsoleIO, + buf: &mut [u8], +) -> Result { + let len = buf.len(); + + match mode { + ConsoleIO::Write => { + let hyp_buffer = hyp.make_const_slice(buf)?; + + let ret = unsafe { + hyp.hypercall3( + __HYPERVISOR_console_io as usize, + [ + CONSOLEIO_write as usize, + len, + hyp_buffer.as_hypercall_ptr().addr(), + ], + )? + }; + + Ok(ret as i64) + } + ConsoleIO::Read => { + let mut hyp_buffer = hyp.make_mut_slice(buf)?; + + let ret = unsafe { + hyp.hypercall3( + __HYPERVISOR_console_io as usize, + [ + CONSOLEIO_read as usize, + len, + hyp_buffer.as_hypercall_ptr().addr(), + ], + )? + }; + unsafe { hyp_buffer.update() }; + + Ok(ret as i64) + } + } +} + +pub unsafe fn sched_op(hyp: &H, mode: SchedOp, data: u32) -> Result<(), XenError> { + match mode { + SchedOp::r#yield => { + hyp.hypercall1(__HYPERVISOR_sched_op as usize, SCHEDOP_yield as usize)?; + } + SchedOp::shutdown => { + let reason = hyp.make_const_object(&data)?; + + hyp.hypercall2( + __HYPERVISOR_sched_op as usize, + [SCHEDOP_shutdown as usize, reason.as_hypercall_ptr().addr()], + )?; + } + _ => unimplemented!(), + }; + + Ok(()) +} diff --git a/xen/src/hypercall/sched_op.rs b/xen/src/hypercall/sched_op.rs deleted file mode 100644 index f218bff..0000000 --- a/xen/src/hypercall/sched_op.rs +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2016-2017 Doug Goldstein - * - * Licensed under the Apache License, Version 2.0 or the MIT license - * , at your - * option. This file may not be copied, modified, or distributed - * except according to those terms. - */ - -use xen_sys::{ - hypercall::{sched_op, SchedOp}, - SHUTDOWN_crash, SHUTDOWN_poweroff, SHUTDOWN_reboot, -}; - -fn op_shutdown(reason: u32) { - // SAFETY: This call is safe. - unsafe { sched_op(SchedOp::shutdown, reason) }; -} - -#[no_mangle] -pub extern "C" fn poweroff() -> ! { - op_shutdown(SHUTDOWN_poweroff); - unreachable!() -} - -pub fn reboot() { - op_shutdown(SHUTDOWN_reboot); -} - -pub fn crash() { - op_shutdown(SHUTDOWN_crash); -} - -pub fn yield_slice() { - // SAFETY: This call is safe. - unsafe { sched_op(SchedOp::r#yield, 0) }; -} diff --git a/xen/src/io/mod.rs b/xen/src/io/mod.rs new file mode 100644 index 0000000..ad37fdf --- /dev/null +++ b/xen/src/io/mod.rs @@ -0,0 +1,35 @@ +use core::{ptr::NonNull, sync::atomic::AtomicU32}; + +use volatile::{VolatileFieldAccess, VolatilePtr}; + +use crate::io::ring::XenRing; + +pub mod ring; + +#[repr(C)] +#[derive(VolatileFieldAccess)] +pub struct XenConsInterface { + in_buffer: [u8; 1024], + out_buffer: [u8; 2048], + in_cons: u32, + in_prod: u32, + out_cons: u32, + out_prod: u32, +} + +impl XenConsInterface { + pub unsafe fn new(ptr: NonNull) -> VolatilePtr<'static, Self> { + unsafe { VolatilePtr::new(ptr) } + } + + pub fn to_ring<'a>(interface: VolatilePtr<'a, Self>) -> XenRing<'a> { + // SAFETY: Atomic pointers are built from valid pointers. + unsafe { + XenRing { + ring: interface.out_buffer().as_slice(), + cons: AtomicU32::from_ptr(interface.out_cons().as_raw_ptr().as_ptr()), + prod: AtomicU32::from_ptr(interface.out_prod().as_raw_ptr().as_ptr()), + } + } + } +} diff --git a/xen/src/io/ring.rs b/xen/src/io/ring.rs new file mode 100644 index 0000000..68c8736 --- /dev/null +++ b/xen/src/io/ring.rs @@ -0,0 +1,154 @@ +//! Xen paravirtualized ring buffer utilities (for PV Console and XenStore) +use core::{ + hint::{likely, unlikely}, + sync::atomic::{AtomicU32, Ordering}, +}; + +use volatile::VolatilePtr; + +#[derive(Clone, Copy, Debug)] +pub struct XenRing<'a> { + pub ring: VolatilePtr<'a, [u8]>, + pub cons: &'a AtomicU32, + pub prod: &'a AtomicU32, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum XenRingError { + /// Data is too large to fit in the ring buffer. + TooLarge, + /// Consumer is not ready for receiving the payload + NotReady, + /// Misbehaving ring index + MisbehavingIndex, +} + +#[inline(always)] +fn available(prod: usize, cons: usize, len: usize) -> usize { + len - queued(prod, cons, len) - 1 +} + +#[inline(always)] +fn queued(prod: usize, cons: usize, len: usize) -> usize { + if prod < cons { + len - 1 + } else { + (prod - cons).min(len) + } +} + +impl XenRing<'_> { + pub fn capacity(&self) -> usize { + self.ring.len() - 1 + } + + pub fn available(&self) -> usize { + let cons = self.cons.load(Ordering::Acquire) as usize % self.ring.len(); + let prod = self.prod.load(Ordering::Acquire) as usize % self.ring.len(); + + available(prod, cons, self.ring.len()) + } + + pub fn queued(&self) -> usize { + let cons = self.cons.load(Ordering::Acquire) as usize % self.ring.len(); + let prod = self.prod.load(Ordering::Acquire) as usize % self.ring.len(); + + queued(prod, cons, self.ring.len()) + } + + pub fn read(&mut self, buffer: &mut [u8], exact: bool) -> Result { + // FIXME: Fix indexes (see write) + + if exact && buffer.len() >= self.ring.len() { + return Err(XenRingError::TooLarge); + } + + let cons = self.cons.load(Ordering::Acquire) as usize % self.ring.len(); + let prod = self.prod.load(Ordering::Acquire) as usize % self.ring.len(); + + let queued = queued(prod, cons, self.ring.len()); + + if unlikely(exact && queued < buffer.len()) { + return Err(XenRingError::NotReady); + } + + let to_read = queued.min(buffer.len()); + let dest_cons = (cons + to_read) % self.ring.len(); + + let buffer = &mut buffer[..to_read]; + + if likely(cons <= dest_cons) { + self.ring + .index(cons..dest_cons) + .copy_into_slice(&mut buffer[..to_read]); + } else { + /* + * Split the buffer in two parts, one that will be copied from + * the end of the ring buffer, another from the beginning. + * + * [(parts.1)C P(parts.0)] + */ + + let parts = buffer.split_at_mut(self.ring.len() - cons); + self.ring.index(cons..).copy_into_slice(parts.0); + self.ring.index(..dest_cons).copy_into_slice(parts.1); + } + + self.cons + .compare_exchange( + cons as u32, + dest_cons as u32, + Ordering::Release, + Ordering::Relaxed, + ) + .map_err(|_| XenRingError::MisbehavingIndex)?; + + Ok(to_read) + } + + pub fn write(&mut self, buffer: &[u8]) -> Result<(), XenRingError> { + if unlikely(buffer.len() >= self.ring.len()) { + return Err(XenRingError::TooLarge); + } + + let cons = self.cons.load(Ordering::Acquire) as usize; + let prod = self.prod.load(Ordering::Acquire) as usize; + + let prod_idx = prod % self.ring.len(); + + let dest_prod = prod + buffer.len(); + let dest_prod_idx = dest_prod % self.ring.len(); + + if unlikely(available(prod, cons, self.ring.len()) < buffer.len()) { + return Err(XenRingError::NotReady); + } + + if likely(prod_idx <= dest_prod_idx) { + self.ring + .index(prod_idx..dest_prod_idx) + .copy_from_slice(buffer); + } else { + /* + * Split the buffer in two parts, one that will be copied at + * the end of the ring buffer, another at the beginning. + * + * [(parts.1)C P(parts.0)] + */ + + let parts = buffer.split_at(self.ring.len() - prod_idx); + self.ring.index(prod_idx..).copy_from_slice(parts.0); + self.ring.index(..dest_prod_idx).copy_from_slice(parts.1); + } + + self.prod + .compare_exchange( + prod as u32, + dest_prod as u32, + Ordering::Release, + Ordering::Relaxed, + ) + .map_err(|_| XenRingError::MisbehavingIndex)?; + + Ok(()) + } +} diff --git a/xen/src/lib.rs b/xen/src/lib.rs index f322bba..b4dafc3 100644 --- a/xen/src/lib.rs +++ b/xen/src/lib.rs @@ -8,9 +8,8 @@ * except according to those terms. */ -#![allow(internal_features)] -#![cfg_attr(target_vendor = "xen", feature(lang_items))] #![no_std] +#![feature(likely_unlikely)] #[cfg(target_arch = "x86_64")] mod x86_64; @@ -22,5 +21,8 @@ mod aarch64; #[cfg(target_arch = "aarch64")] pub use self::aarch64::*; +pub mod io; + // export functionality -pub mod hypercall; +pub mod event; +pub mod hypercall; \ No newline at end of file diff --git a/xen-sys/src/start_info.rs b/xen/src/start_info.rs similarity index 79% rename from xen-sys/src/start_info.rs rename to xen/src/start_info.rs index cd155bf..249e745 100644 --- a/xen-sys/src/start_info.rs +++ b/xen/src/start_info.rs @@ -8,42 +8,40 @@ * except according to those terms. */ -use super::*; - #[repr(C)] pub struct start_info { /// "xen--" - pub magic: [c_char; 32], + pub magic: [u8; 32], /// total pages allocated to this domain - pub nr_pages: c_ulong, + pub nr_pages: u64, /// machine address of struct shared_info - pub shared_info: xen_pfn_t, + pub shared_info: u64, /// SIF_xxx flags pub flags: u32, /// machine page number of shared page - pub store_mfn: xen_pfn_t, + pub store_mfn: u64, /// event channel for store communication - pub store_evtchn: event_port, + pub store_evtchn: u32, /// console (dom0/domU) pub console: start_info_console, /// virtual address of page directory - pub pt_base: c_ulong, + pub pt_base: u64, /// number of bootstrap p.t. frames - pub nr_pt_frames: c_ulong, + pub nr_pt_frames: u64, /// virtual address of page frame list - pub mfn_list: c_ulong, + pub mfn_list: u64, /// virtual address of pre-loaded module /// PFN of pre-loaded module if SIF_MOD_START_PFN set in flags - pub mod_start: c_ulong, + pub mod_start: u64, /// size (bytes) of pre-loaded module - pub mod_len: c_ulong, + pub mod_len: u64, /// guest command line - pub cmd_line: [i8; 1024], + pub cmd_line: [u8; 1024], /// PFN range here covers both page table and P->M table frames /// First PFN forming initial P->M table - pub first_p2m_pfn: c_ulong, + pub first_p2m_pfn: u64, /// number of PFNs forming initial P->M table - pub nr_p2m_frames: c_ulong, + pub nr_p2m_frames: u64, } #[repr(C)] @@ -57,9 +55,9 @@ pub union start_info_console { #[derive(Clone, Copy)] pub struct start_info_console_domU { /// machine page number of console page - pub mfn: xen_pfn_t, + pub mfn: u64, /// event channel for console page - pub evtchn: event_port, + pub evtchn: u32, } #[repr(C)]