diff --git a/.github/workflows/prune_dune_modules.py b/.github/workflows/prune_dune_modules.py new file mode 100755 index 00000000..fd311469 --- /dev/null +++ b/.github/workflows/prune_dune_modules.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +import re +import sys + +if len(sys.argv) < 3: + print("usage: prune_dune_modules.py [module2 ...]") + sys.exit(1) + +path = sys.argv[1] +remove = set(sys.argv[2:]) + +with open(path) as f: + text = f.read() + +# Matches: (modules ... ) +pattern = re.compile(r'\(modules\b([^)]*)\)') + +def prune(match): + modules = match.group(1).split() + kept = [m for m in modules if m not in remove] + return "(modules " + " ".join(kept) + ")" + +new_text = pattern.sub(prune, text) + +with open(path, "w") as f: + f.write(new_text) diff --git a/.github/workflows/verify.yml b/.github/workflows/verify.yml index 95b6a1e1..e01f45d1 100644 --- a/.github/workflows/verify.yml +++ b/.github/workflows/verify.yml @@ -39,7 +39,7 @@ jobs: - name: Install RefinedRust type system run: eval $(opam env) && REFINEDRUST_ROOT=$PWD/verification/refinedrust ./verification/refinedrust/scripts/install-typesystem.sh - name: Check cargo version - run: cargo --version + run: cargo --version - name: Install RefinedRust stdlib run: eval $(opam env) && REFINEDRUST_ROOT=$PWD/verification/refinedrust ./verification/refinedrust/scripts/install-stdlib.sh - name: Exclude RefinedRust from dune build @@ -55,4 +55,32 @@ jobs: - name: make devtools run: source "$HOME/.cargo/env" && eval $(opam env) && make devtools - name: Translate specified files using RefinedRust and check proofs - run: source "$HOME/.cargo/env" && eval $(opam env) && DUNEFLAGS="-j 1" make verify + run: | + set -o pipefail + (source "$HOME/.cargo/env" && eval $(opam env) && make generate_refinedrust_translation) 2>&1 | tee full-build.log + - name: Patch dune modules to exclude big proofs from CI + run: | + python3 .github/workflows/prune_dune_modules.py verification/rust_proofs/ace/proofs/dune \ + proof_core_page_allocator_allocator_PageAllocator_add_memory_region \ + proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token \ + proof_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary \ + proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size \ + proof_core_page_allocator_page_Page_T_write \ + proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read \ + proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token \ + proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide + - name: Compile Rocq proofs + run: | + ( + set -o pipefail + source "$HOME/.cargo/env" + eval "$(opam env)" + cd verification + DUNEFLAGS="-j 1 --display verbose" dune build + ) 2>&1 | tee full-build.log + - name: Upload full build log + if: always() + uses: actions/upload-artifact@v4 + with: + name: full-build-log + path: full-build.log diff --git a/Makefile b/Makefile index d7651f06..c32084af 100644 --- a/Makefile +++ b/Makefile @@ -96,6 +96,9 @@ tools: setup verify: rm -rf $(ACE_DIR)/security_monitor/verify/ PATH="$(RISCV_GNU_TOOLCHAIN_WORK_DIR)/bin:$(PATH)" ACE_DIR=$(ACE_DIR) LINUX_IMAGE=$(LINUX_IMAGE) CROSS_COMPILE=$(CROSS_COMPILE) PLATFORM_RISCV_XLEN=$(PLATFORM_RISCV_XLEN) PLATFORM_RISCV_ISA=$(PLATFORM_RISCV_ISA) PLATFORM_RISCV_ABI=$(PLATFORM_RISCV_ABI) $(MAKE) -C verification verify +generate_refinedrust_translation: + rm -rf $(ACE_DIR)/security_monitor/verify/ + PATH="$(RISCV_GNU_TOOLCHAIN_WORK_DIR)/bin:$(PATH)" ACE_DIR=$(ACE_DIR) LINUX_IMAGE=$(LINUX_IMAGE) CROSS_COMPILE=$(CROSS_COMPILE) PLATFORM_RISCV_XLEN=$(PLATFORM_RISCV_XLEN) PLATFORM_RISCV_ISA=$(PLATFORM_RISCV_ISA) PLATFORM_RISCV_ABI=$(PLATFORM_RISCV_ABI) $(MAKE) -C verification generate clean: rm -rf $(ACE_DIR) diff --git a/confidential-vms/linux_vm/Makefile b/confidential-vms/linux_vm/Makefile index f011d629..9443f654 100644 --- a/confidential-vms/linux_vm/Makefile +++ b/confidential-vms/linux_vm/Makefile @@ -14,7 +14,7 @@ LINUX_VM_ROOTFS_SOURCE_DIR ?= $(CONFIDENTIAL_VMS_LINUX_SOURCE_DIR)/hypervisor_ LINUX_VM_BUILDROOT_SOURCE_DIR ?= $(CONFIDENTIAL_VMS_LINUX_SOURCE_DIR)/../../hypervisor/buildroot LINUX_VM_BUILDROOT_WORK_DIR ?= $(CONFIDENTIAL_VMS_LINUX_WORK_DIR)/buildroot LINUX_VM_BUILDROOT_ROOTFS ?= $(LINUX_VM_BUILDROOT_WORK_DIR)/images/rootfs.ext2 -LINUX_VM_BUILDROOT_ROOTFS_SIZE ?= "256M" +LINUX_VM_BUILDROOT_ROOTFS_SIZE ?= "128M" LINUX_VM_OVERLAY_SOURCE_DIR ?= $(CONFIDENTIAL_VMS_LINUX_SOURCE_DIR)/overlay LINUX_VM_OVERLAY_WORK_DIR ?= $(CONFIDENTIAL_VMS_LINUX_WORK_DIR)/overlay LINUX_VM_OVERLAY_WORK_ROOT_DIR ?= $(LINUX_VM_OVERLAY_WORK_DIR)/root diff --git a/confidential-vms/linux_vm/configurations/qemu_riscv64_virt_defconfig b/confidential-vms/linux_vm/configurations/qemu_riscv64_virt_defconfig index f5606e5e..85e49bdb 100644 --- a/confidential-vms/linux_vm/configurations/qemu_riscv64_virt_defconfig +++ b/confidential-vms/linux_vm/configurations/qemu_riscv64_virt_defconfig @@ -34,7 +34,7 @@ BR2_TARGET_ROOTFS_EXT2=y BR2_TARGET_ROOTFS_EXT2_2=n BR2_TARGET_ROOTFS_EXT2_3=n BR2_TARGET_ROOTFS_EXT2_4=y -BR2_TARGET_ROOTFS_EXT2_SIZE="5G" +BR2_TARGET_ROOTFS_EXT2_SIZE="1G" # Kernel BR2_LINUX_KERNEL=y diff --git a/hypervisor/Makefile b/hypervisor/Makefile index adce26b2..2a5bb1e6 100644 --- a/hypervisor/Makefile +++ b/hypervisor/Makefile @@ -14,7 +14,7 @@ BUILDROOT_SOURCE_DIR ?= $(MAKEFILE_SOURCE_DIR)/buildroot HYPERVISOR_BUILDROOT_CONFIG_DIR ?= $(HYPERVISOR_CONFIGURATION_DIR)/qemu_riscv64_virt_defconfig HYPERVISOR_LINUX_CONFIG ?= $(HYPERVISOR_CONFIGURATION_DIR)/linux64-defconfig HYPERVISOR_BUILDROOT_OVERRIDE_DIR ?= $(HYPERVISOR_CONFIGURATION_DIR)/package_override.dev -HYPERVISOR_ROOTFS_SIZE ?= "1G" +HYPERVISOR_ROOTFS_SIZE ?= "512M" HYPERVISOR_OVERLAY_DIR ?= $(HYPERVISOR_WORK_DIR)/overlay HYPERVISOR_OVERLAY_ROOT_DIR ?= $(HYPERVISOR_OVERLAY_DIR)/root HYPERVISOR_PATCHES_DIR ?= $(MAKEFILE_SOURCE_DIR)/patches diff --git a/security-monitor/src/confidential_flow/handlers/shutdown/shutdown_vm.rs b/security-monitor/src/confidential_flow/handlers/shutdown/shutdown_vm.rs index ed172512..f4c30ad1 100644 --- a/security-monitor/src/confidential_flow/handlers/shutdown/shutdown_vm.rs +++ b/security-monitor/src/confidential_flow/handlers/shutdown/shutdown_vm.rs @@ -14,6 +14,7 @@ use crate::core::control_data::{ConfidentialHart, ConfidentialHartRemoteCommand, /// shutdown (lifecycle state `Shutdown`). To do so, we send `Shutdown IPI` to all confidential harts. The last /// confidential hart that shutdowns itself, will remove the entire confidential VM from the control data. #[derive(Clone, PartialEq)] +#[rr::skip] pub struct ShutdownRequest { calling_hart_id: usize, } diff --git a/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/fence_i.rs b/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/fence_i.rs index d4a8f108..90d3d242 100644 --- a/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/fence_i.rs +++ b/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/fence_i.rs @@ -10,6 +10,7 @@ use crate::core::control_data::{ /// Handles a request from one confidential hart to execute fence.i instruction on remote confidential harts. #[derive(Clone, PartialEq)] +#[rr::skip] pub struct RemoteFenceI { ipi: Ipi, } diff --git a/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/hfence_gvma_vmid.rs b/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/hfence_gvma_vmid.rs index 5404cc83..7d6888f4 100644 --- a/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/hfence_gvma_vmid.rs +++ b/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/hfence_gvma_vmid.rs @@ -8,6 +8,7 @@ use crate::core::memory_layout::ConfidentialVmPhysicalAddress; /// An inter hart request sent by the security monitor to clear G-stage level cached address translations. #[derive(Clone, PartialEq)] +#[rr::skip] pub struct RemoteHfenceGvmaVmid { ipi: Ipi, _start_address: Option, diff --git a/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/ipi.rs b/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/ipi.rs index 998ff785..2a0ab9e2 100644 --- a/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/ipi.rs +++ b/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/ipi.rs @@ -10,6 +10,7 @@ use crate::core::control_data::{ /// Handles a request from one confidential hart to execute IPI on other confidential harts. #[derive(PartialEq, Debug, Clone)] +#[rr::skip] pub struct Ipi { hart_mask: usize, hart_mask_base: usize, diff --git a/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/sfence_vma.rs b/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/sfence_vma.rs index 43d8a32c..787e6fd7 100644 --- a/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/sfence_vma.rs +++ b/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/sfence_vma.rs @@ -11,6 +11,7 @@ use crate::core::control_data::{ /// Handles a request from one confidential hart to execute sfence.vma instruction on remote confidential harts. #[derive(Clone, PartialEq)] +#[rr::skip] pub struct RemoteSfenceVma { ipi: Ipi, _start_address: usize, diff --git a/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/sfence_vma_asid.rs b/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/sfence_vma_asid.rs index 39c2ada6..5fc4c224 100644 --- a/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/sfence_vma_asid.rs +++ b/security-monitor/src/confidential_flow/handlers/symmetrical_multiprocessing/sfence_vma_asid.rs @@ -12,6 +12,7 @@ use crate::core::control_data::{ /// Handles a request from one confidential hart to execute sfence.vma instruction on remote confidential harts. It represents an inter hart /// request. #[derive(Clone, PartialEq)] +#[rr::skip] pub struct RemoteSfenceVmaAsid { ipi: Ipi, _start_address: usize, diff --git a/security-monitor/src/core/architecture/riscv/control_status_registers.rs b/security-monitor/src/core/architecture/riscv/control_status_registers.rs index ecb1b115..6746d54b 100644 --- a/security-monitor/src/core/architecture/riscv/control_status_registers.rs +++ b/security-monitor/src/core/architecture/riscv/control_status_registers.rs @@ -314,6 +314,7 @@ pub const CSR: &ControlStatusRegister = &ControlStatusRegister { }; #[derive(Copy, Clone)] +#[rr::skip] pub struct ReadWriteRiscvCsr(usize); impl ReadWriteRiscvCsr { diff --git a/security-monitor/src/core/architecture/riscv/general_purpose_registers.rs b/security-monitor/src/core/architecture/riscv/general_purpose_registers.rs index a140aec0..fd48024b 100644 --- a/security-monitor/src/core/architecture/riscv/general_purpose_registers.rs +++ b/security-monitor/src/core/architecture/riscv/general_purpose_registers.rs @@ -40,6 +40,7 @@ impl GeneralPurposeRegisters { #[allow(unused)] #[allow(non_camel_case_types)] #[repr(usize)] +#[rr::skip] pub enum GeneralPurposeRegister { zero = 0, ra, diff --git a/security-monitor/src/core/architecture/riscv/hart_lifecycle_state.rs b/security-monitor/src/core/architecture/riscv/hart_lifecycle_state.rs index 187dba15..dad80da5 100644 --- a/security-monitor/src/core/architecture/riscv/hart_lifecycle_state.rs +++ b/security-monitor/src/core/architecture/riscv/hart_lifecycle_state.rs @@ -9,6 +9,7 @@ /// additional lifecycle state `Shutdown` that represents a final state of the confidential hart that has been shutdown /// as part of the `VM shutdown` procedure. #[derive(PartialEq, Clone)] +#[rr::skip] pub enum HartLifecycleState { Started, Stopped, diff --git a/security-monitor/src/core/architecture/riscv/mmu/page_size.rs b/security-monitor/src/core/architecture/riscv/mmu/page_size.rs index 3a550edb..8ac166c3 100644 --- a/security-monitor/src/core/architecture/riscv/mmu/page_size.rs +++ b/security-monitor/src/core/architecture/riscv/mmu/page_size.rs @@ -8,6 +8,23 @@ #[repr(u8)] #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] #[rr::refined_by("page_size")] +// PEq +#[rr::derive_instantiate("PEq" := "λ a b, bool_decide (a = b)")] +// Eq +#[rr::derive_instantiate("PEq_refl" := #proof "intros ??; solve_goal")] +#[rr::derive_instantiate("PEq_sym" := #proof "intros ???; solve_goal")] +#[rr::derive_instantiate("PEq_trans" := #proof "intros ????; solve_goal")] +// POrd +#[rr::derive_instantiate("POrd" := "λ a b, Some (page_size_cmp a b)")] +#[rr::derive_instantiate("POrd_eq_cons" := #proof "intros ? [] []; simpl; done")] +// Ord +#[rr::derive_instantiate("Ord" := "page_size_cmp")] +#[rr::derive_instantiate("Ord_POrd" := #proof "intros ???; solve_goal")] +#[rr::derive_instantiate("Ord_lt_trans" := #proof "intros ????; solve_goal")] +#[rr::derive_instantiate("Ord_eq_trans" := #proof "intros ????; solve_goal")] +#[rr::derive_instantiate("Ord_gt_trans" := #proof "intros ????; solve_goal")] +#[rr::derive_instantiate("Ord_leibniz" := #proof "intros ? [] []; simpl; done")] +#[rr::derive_instantiate("Ord_antisym" := #proof "intros ???; solve_goal")] pub enum PageSize { #[rr::pattern("Size4KiB")] Size4KiB, diff --git a/security-monitor/src/core/control_data/confidential_hart_remote_command.rs b/security-monitor/src/core/control_data/confidential_hart_remote_command.rs index ed049b04..3ac3c7a1 100644 --- a/security-monitor/src/core/control_data/confidential_hart_remote_command.rs +++ b/security-monitor/src/core/control_data/confidential_hart_remote_command.rs @@ -14,6 +14,7 @@ use crate::core::control_data::ConfidentialHart; /// the receiver hart is running, it is interrupted (IPI) and executes the command. If the hart is not running, the command is buffered and /// executed on the next time the receiver is scheduled to run. #[derive(Clone, PartialEq)] +#[rr::skip] pub enum ConfidentialHartRemoteCommand { Ipi(Ipi), RemoteFenceI(RemoteFenceI), diff --git a/security-monitor/src/core/control_data/confidential_vm_measurement.rs b/security-monitor/src/core/control_data/confidential_vm_measurement.rs index 381a9a26..fbe3e8c4 100644 --- a/security-monitor/src/core/control_data/confidential_vm_measurement.rs +++ b/security-monitor/src/core/control_data/confidential_vm_measurement.rs @@ -47,6 +47,7 @@ impl StaticMeasurements { } } +#[rr::skip] impl core::fmt::Debug for StaticMeasurements { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { self.0.iter().enumerate().for_each(|(id, value)| { diff --git a/security-monitor/src/core/memory_layout/confidential_memory_address.rs b/security-monitor/src/core/memory_layout/confidential_memory_address.rs index d9a6c8da..3955f6bd 100644 --- a/security-monitor/src/core/memory_layout/confidential_memory_address.rs +++ b/security-monitor/src/core/memory_layout/confidential_memory_address.rs @@ -6,7 +6,7 @@ use pointers_utility::{ptr_byte_add_mut, ptr_byte_offset}; /// The wrapper over a raw pointer that is guaranteed to be an address located in the confidential memory region. #[repr(transparent)] -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone, Copy)] /// Model: The memory address is represented by the location in memory. #[rr::refined_by("l" : "loc")] /// We require the ghost state for the global memory layout to be available. @@ -70,12 +70,12 @@ impl ConfidentialMemoryAddress { /// Precondition: The offset address is in the given range. #[rr::requires("self.2 + offset_in_bytes < upper_bound.2")] /// Precondition: The maximum (and thus the offset address) is in the confidential memory range. - #[rr::requires("upper_bound.2 < MEMORY_CONFIG.(conf_end).2")] + #[rr::requires("upper_bound.2 ≤ MEMORY_CONFIG.(conf_end).2")] /// Postcondition: The offset pointer is in the confidential memory range. #[rr::ensures("ret = self +ₗ offset_in_bytes")] pub fn add(&self, offset_in_bytes: usize, upper_bound: *const usize) -> Result { - let pointer = ptr_byte_add_mut(self.0, offset_in_bytes, upper_bound).map_err(|_| Error::AddressNotInConfidentialMemory())?; - Ok(ConfidentialMemoryAddress(pointer)) + let pointer = ptr_byte_add_mut(self.0, offset_in_bytes, upper_bound).map_err(#[rr::verify] |_| Error::AddressNotInConfidentialMemory())?; + Ok(Self::new(pointer)) } /// Reads usize-sized sequence of bytes from the confidential memory region. diff --git a/security-monitor/src/core/memory_layout/mod.rs b/security-monitor/src/core/memory_layout/mod.rs index ef16303a..a66757b1 100644 --- a/security-monitor/src/core/memory_layout/mod.rs +++ b/security-monitor/src/core/memory_layout/mod.rs @@ -149,10 +149,10 @@ impl MemoryLayout { /// Offsets an address in the confidential memory by a given number of bytes. Returns an error if the resulting /// address is outside the confidential memory region or exceeds the given upper bound. - #[rr::trust_me] + #[rr::only_spec] #[rr::ok] /// Precondition: The offset address is in confidential memory. - #[rr::requires("address.2 + offset_in_bytes < self.(conf_end).2")] + #[rr::requires("address.2 + offset_in_bytes < upper_bound.2")] /// Precondition: The bounds we are checking are within confidential memory. #[rr::requires("upper_bound.2 ≤ self.(conf_end).2")] /// Postcondition: Then we can correctly offset the address and ensure it is in confidential @@ -162,7 +162,7 @@ impl MemoryLayout { &self, address: &ConfidentialMemoryAddress, offset_in_bytes: usize, upper_bound: *const usize, ) -> Result { ensure!(upper_bound <= self.confidential_memory_end, Error::AddressNotInConfidentialMemory())?; - Ok(self.confidential_address_at_offset(address, offset_in_bytes)?) + Ok(address.add(offset_in_bytes, upper_bound).map_err(|_| Error::AddressNotInConfidentialMemory())?) } /// Offsets an address in the non-confidential memory by given number of bytes. Returns an error if the resulting @@ -199,8 +199,8 @@ impl MemoryLayout { // We can safely cast the below offset to usize because the constructor guarantees that the confidential memory // range is valid, and so the memory size must be a valid usize let memory_size = ptr_byte_offset(self.confidential_memory_end, self.confidential_memory_start) as usize; - let usize_alligned_offsets = (0..memory_size).step_by(core::mem::size_of::()); - usize_alligned_offsets.for_each(|offset_in_bytes| { + let usize_aligned_offsets = (0..memory_size).step_by(core::mem::size_of::()); + usize_aligned_offsets.for_each(|offset_in_bytes| { let _ = ptr_byte_add_mut(self.confidential_memory_start, offset_in_bytes, self.confidential_memory_end) .and_then(|ptr| Ok(unsafe { ptr.write_volatile(0) })); }); diff --git a/security-monitor/src/core/page_allocator/allocator.rs b/security-monitor/src/core/page_allocator/allocator.rs index c002abd6..6a25acac 100644 --- a/security-monitor/src/core/page_allocator/allocator.rs +++ b/security-monitor/src/core/page_allocator/allocator.rs @@ -23,30 +23,31 @@ static PAGE_ALLOCATOR: Once> = Once::new(); /// token of the maximum size, and it will be then stored in the root node. It is reasonable as long as we do not support systems that have /// more memory than 128TiB. For such systems, we must add larger page sizes. /// Specification: -/// We give a "ghost name" γ to the allocator, which is used to link up page tokens allocated -/// with this allocator. #[rr::refined_by("()" : "unit")] +#[rr::context("onceG Σ memory_layout")] /// Invariant: We abstract over the root node -#[rr::exists("node" : "page_storage_node")] +#[rr::exists("node" : "page_storage_node", "base_addr" : "Z", "node_size" : "page_size")] /// Invariant: the allocator tree covers the first 128TiB of memory -#[rr::invariant("node.(base_address) = 0")] +#[rr::invariant("Hroot_base": "node.(base_address) = 0")] /// Invariant: the page size is the largest -#[rr::invariant("node.(max_node_size) = Size128TiB")] +#[rr::invariant("Hroot_sz": "node.(max_node_size) = Size128TiB")] +#[rr::invariant("node_size = node.(max_node_size)")] +#[rr::invariant("base_addr = node.(base_address)")] pub struct PageAllocator { - #[rr::field("node.(base_address)")] + #[rr::field("base_addr")] base_address: usize, - #[rr::field("node.(max_node_size)")] + #[rr::field("node_size")] page_size: PageSize, #[rr::field("node")] root: PageStorageTreeNode, } -#[rr::skip] #[rr::context("onceG Σ memory_layout")] #[rr::context("onceG Σ unit")] impl PageAllocator { const NOT_INITIALIZED: &'static str = "Bug. Page allocator not initialized."; + #[rr::only_spec] /// Initializes the global memory allocator with the given memory region as confidential memory. Must be called only once during the /// system initialization. /// @@ -57,21 +58,18 @@ impl PageAllocator { /// # Safety /// /// Caller must pass the ownership of the memory region [memory_start, memory_end). - #[rr::params("conf_start", "conf_end", "vs", "MEMORY_LAYOUT")] - #[rr::args("conf_start", "conf_end")] - - /// Precondition: The start address needs to be aligned to the minimum page size. - #[rr::requires("Hstart_aligned" : "conf_start `aligned_to` (page_size_in_bytes_nat Size4KiB)")] - /// Precondition: The minimum page size divides the memory size. - #[rr::requires("Hsz_div" : "(page_size_in_bytes_nat Size4KiB) | (conf_end.2 - conf_start.2)")] - /// Precondition: The memory range should not be negative. - #[rr::requires("Hstart_lt" : "conf_start.2 ≤ conf_end.2")] + #[rr::params("vs", "MEMORY_CONFIG")] + /// Precondition: The start and end addresses need to be aligned to the minimum page size. + #[rr::requires("memory_start `aligned_to` (page_size_in_bytes_nat Size4KiB)")] + #[rr::requires("memory_end `aligned_to` (page_size_in_bytes_nat Size4KiB)")] + /// Precondition: The memory range is positive. + #[rr::requires("memory_start.2 < memory_end.2")] - /// Precondition: The memory range does not exceed the maximum page size. - #[rr::requires("mend.2 ≤ page_size_in_bytes_Z Size128TiB")] + /// Precondition: The memory range is within the region covered by the page allocator. + #[rr::requires("memory_end.2 ≤ page_size_in_bytes_Z Size128TiB")] - /// Precondition: We have ownership of the memory range, having (conf_end - conf_start) bytes. - #[rr::requires(#type "conf_start" : "vs" @ "array_t (int u8) (Z.to_nat (conf_end.2 - conf_start.2))")] + /// Precondition: We have ownership of the memory range, having (memory_end - memory_start) bytes. + #[rr::requires(#type "memory_start" : "vs" @ "array_t (Z.to_nat (memory_end.2 - memory_start.2)) (int u8)")] /// Precondition: The memory needs to be in confidential memory #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] @@ -80,10 +78,9 @@ impl PageAllocator { /// Precondition: The page allocator should be uninitialized. #[rr::requires(#iris "once_initialized π \"PAGE_ALLOCATOR\" None")] - /// Postcondition: The page allocator is initialized. #[rr::ensures(#iris "once_initialized π \"PAGE_ALLOCATOR\" (Some ())")] - #[rr::returns("Ok(#())")] + #[rr::returns("Ok(())")] pub unsafe fn initialize(memory_start: ConfidentialMemoryAddress, memory_end: *const usize) -> Result<(), Error> { ensure_not!(PAGE_ALLOCATOR.is_completed(), Error::Reinitialization())?; let mut page_allocator = Self::empty(); @@ -95,46 +92,91 @@ impl PageAllocator { /// Specification: /// Postcondition: an initialized memory allocator is returned - #[rr::returns("()")] + #[rr::verify] fn empty() -> Self { Self { root: PageStorageTreeNode::empty(), base_address: 0, page_size: PageSize::largest() } } - #[rr::params("mstart", "mend", "γ", "vs", "MEMORY_CONFIG", "mreg")] - #[rr::args("(#(), γ)", "mstart", "mend")] - - /// Precondition: The start address needs to be aligned to the minimum page size. - #[rr::requires("Hstart_aligned" : "mstart `aligned_to` (page_size_in_bytes_nat Size4KiB)")] + /// Find the largest page size we can create a page at. + /// Precondition: The current address is aligned to the given page size. + #[rr::requires("Haddr_aligned" : "address `aligned_to` page_size_in_bytes_nat page_size")] + /// Precondition: The current address is in bounds of the given memory region. + #[rr::requires("address.2 < memory_region_end.2")] + /// Precondition: The current address and the bound are in bounds of the global memory layout. + #[rr::requires("memory_layout.(conf_start).2 ≤ address.2")] + #[rr::requires("memory_region_end.2 ≤ memory_layout.(conf_end).2")] + /// Postcondition: The current address is aligned to the returned page size. + #[rr::ensures("address `aligned_to` page_size_in_bytes_nat ret")] + /// Postcondition: Either the returned page size is the smallest page size, or a page at the + /// returned size will fit into the memory region. + #[rr::ensures("ret = Size4KiB ∨ address.2 + page_size_in_bytes_nat ret ≤ memory_region_end.2")] + fn find_largest_page_size( + memory_layout: &MemoryLayout, mut page_size: PageSize, address: ConfidentialMemoryAddress, memory_region_end: *const usize, + ) -> PageSize { + // Let's find the largest possible size of a page that could align to this address. + // According to the RISC-V spec, pages must be aligned to their size. + while let Some(larger_size) = page_size.larger().filter( + #[rr::returns("bool_decide ({address} `aligned_to` page_size_in_bytes_nat larger_size)")] + |larger_size| address.is_aligned_to(larger_size.in_bytes()), + ) { + #[rr::inv_vars("page_size")] + #[rr::inv("address `aligned_to` page_size_in_bytes_nat page_size")] + #[rr::ignore] + #[allow(unused)] + || {}; + page_size = larger_size; + } + // After the loop, we have the largest possible `page_size` for which the `address` is + // well-aligned. + + // Now let's find the largest size of a page that really fits in the given memory region. We do not have to check the alignment, + // because the larger pages sizes are multiples of the smaller page sizes. + while memory_layout.confidential_address_at_offset_bounded(&address, page_size.in_bytes() - 1, memory_region_end).is_err() + && let Some(smaller_size) = page_size.smaller() + { + #[rr::inv_vars("page_size")] + #[rr::inv("address `aligned_to` page_size_in_bytes_nat page_size")] + #[rr::ignore] + #[allow(unused)] + || {}; + page_size = smaller_size; + } + // After the loop, we know that either no smaller page size exists, or a page at `page_size` fits in the memory region. - /// Precondition: The minimum page size divides the memory size. - #[rr::requires("Hsz_div" : "(page_size_in_bytes_nat Size4KiB) | (mend.2 - mstart.2)")] + page_size + } + #[rr::params("vs", "MEMORY_CONFIG")] + /// Precondition: The start and end addresses need to be aligned to the minimum page size. + #[rr::requires("Hstart_aligned" : "memory_region_start `aligned_to` (page_size_in_bytes_nat Size4KiB)")] + #[rr::requires("Hend_aligned" : "memory_region_end `aligned_to` (page_size_in_bytes_nat Size4KiB)")] /// Precondition: The memory range is positive. - #[rr::requires("Hstart_lt" : "mstart.2 < mend.2")] - - /// Precondition: The memory range is within the region covered by the memory allocator. - #[rr::requires("mend.2 ≤ page_size_in_bytes_Z Size128TiB")] + #[rr::requires("Hstart_lt" : "memory_region_start.2 < memory_region_end.2")] /// Precondition: We have ownership of the memory range, having (mend - mstart) bytes. - #[rr::requires(#type "mstart" : "vs" @ "array_t (int u8) (Z.to_nat (mend.2 - mstart.2))")] + #[rr::requires(#type "memory_region_start" : "<#> vs" @ "array_t (Z.to_nat (memory_region_end.2 - memory_region_start.2)) (int u8)")] /// Precondition: The memory layout needs to be initialized #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] /// Precondition: the whole memory region should be part of confidential memory - #[rr::requires("MEMORY_CONFIG.(conf_start).2 ≤ mstart.2")] - #[rr::requires("mend.2 ≤ MEMORY_CONFIG.(conf_end).2")] + #[rr::requires("MEMORY_CONFIG.(conf_start).2 ≤ memory_region_start.2")] + #[rr::requires("memory_region_end.2 ≤ MEMORY_CONFIG.(conf_end).2")] + + #[rr::observe("self.ghost": "()")] - /// Postcondition: There exists some correctly initialized page assignment. - #[rr::observe("γ": "()")] + #[rr::ok] + /// Precondition: The memory range is within the region covered by the memory allocator. + #[rr::requires("memory_region_end.2 ≤ page_size_in_bytes_Z Size128TiB")] + #[rr::ensures("ret = tt")] unsafe fn add_memory_region( &mut self, memory_region_start: ConfidentialMemoryAddress, memory_region_end: *const usize, ) -> Result<(), Error> { - debug!("Memory tracker: adding memory region: 0x{:x} - 0x{:x}", memory_region_start.as_usize(), memory_region_end as usize); + //debug!("Memory tracker: adding memory region: 0x{:x} - 0x{:x}", memory_region_start.as_usize(), memory_region_end as usize); assert!(memory_region_start.is_aligned_to(PageSize::smallest().in_bytes())); assert!(memory_region_end.is_aligned_to(PageSize::smallest().in_bytes())); assert!(memory_region_start.as_usize() < memory_region_end as usize); - // Page allocator supports maximum one page of largest size. - ensure_not!(memory_region_start.offset_from(memory_region_end) > self.page_size.in_bytes() as isize, Error::TooMuchMemory())?; + // The page allocator can only handle memory regions up to the largest page size. + ensure_not!(memory_region_end as usize > self.page_size.in_bytes(), Error::TooMuchMemory())?; // Our strategy is to create as few page tokens as possible to keep the memory overhead as low as possible. Therefore, we prefer to // create page tokens for the largest page size when possible. We use a greedy approach. We look for the largest possible page that @@ -142,7 +184,7 @@ impl PageAllocator { // keep increasing it until we find the largest possible page size. Then, we keep decreasing the page size until we reach the end of // the memory region. let memory_layout = MemoryLayout::read(); - let mut memory_address = Some(memory_region_start); + let mut address = memory_region_start; let mut page_size = PageSize::smallest(); // We might have to create a few tokens of 4KiB until we reach the address at which we can fit a 2MiB page. Then, we might have to @@ -162,53 +204,56 @@ impl PageAllocator { // || | | | | | | | || | | | | | | | || | | | | | | | || | | | | | | | || ... // ^1 GiB ^2 MiB ^2 MiB ^2 MiB ^memory_region_end - // According to the RISC-V spec, pages must be aligned to their size. - let is_address_page_aligned = - |address: &ConfidentialMemoryAddress, page_size: &PageSize| address.is_aligned_to(page_size.in_bytes()); - - // Page can be created only if all bytes are belonging to the given memory region - let can_create_page = |address: &ConfidentialMemoryAddress, page_size: &PageSize| { - let page_last_address = page_size.in_bytes() - 1; - memory_layout.confidential_address_at_offset_bounded(&address, page_last_address, memory_region_end).is_ok() - }; - - while let Some(address) = memory_address.take() { - // Let's find the largest possible size of a page that could align to this address. - while let Some(larger_size) = page_size.larger().filter(|larger_size| is_address_page_aligned(&address, &larger_size)) { - page_size = larger_size; - } - // Now let's find the largest size of a page that really fits in the given memory region. We do not have to check the alignment, - // because the larger pages sizes are multiplies of the smaller page sizes. - while let Some(smaller_size) = page_size.smaller().filter(|smaller_size| !can_create_page(&address, &smaller_size)) { - page_size = smaller_size; - } - // The following line ensures that the while loop will complete because, regardless of whether we manage to create a page token - // or not, we will increment the `memory_address` in each loop so that it eventually passes the end of the given memory region. - memory_address = memory_layout.confidential_address_at_offset_bounded(&address, page_size.in_bytes(), memory_region_end).ok(); - // If the next memory address (`memory_address`) is still in the memory range, then we are sure we can create the page token. - // Otherwise, we must check the boundary condition: Are we creating the last page token over a memory whose last byte - // (`address`+`page_size.in_bytes()`) is next to the end of the memory region (`memory_region_end`)? - if memory_address.is_some() || can_create_page(&address, &page_size) { - let new_page_token = unsafe { Page::::init(address, page_size.clone()) }; + let node = &mut self.root; + loop { + #[rr::params("γ")] + #[rr::inv_vars("page_size", "address", "node")] + #[rr::inv("address `aligned_to` page_size_in_bytes_nat page_size")] + #[rr::inv("memory_region_start.2 ≤ address.2")] + #[rr::inv("address.2 < memory_region_end.2")] + // Invariant: the borrow variable stays the same, but we do not track the state of the node + #[rr::inv("γ = node.ghost")] + // Invariant: the base address and node size stay unchanged + #[rr::inv("node.cur.(base_address) = 0")] + #[rr::inv("node.cur.(max_node_size) = Size128TiB")] + // Invariant: Remaining memory ownership + #[rr::exists("vs")] + #[rr::inv(#type "address" : "<#> vs" @ "array_t (Z.to_nat (memory_region_end.2 - address.2)) (int u8)")] + #[rr::ignore] + #[allow(unused)] + || {}; + + // get the largest well-aligned address that fits into the memory region + page_size = Self::find_largest_page_size(memory_layout, page_size, address, memory_region_end); + + // Check that we can actually create the page + if memory_layout.confidential_address_at_offset_bounded(&address, page_size.in_bytes() - 1, memory_region_end).is_ok() { + let new_page_token = unsafe { Page::::init(address, page_size) }; // NOTE: We show that the page token is within the range of the allocator - self.root.store_page_token(self.base_address, self.page_size, new_page_token); + node.store_page_token(self.base_address, self.page_size, new_page_token); } + + // The following line ensures that the while loop will progress because, regardless of whether we manage to create a page token + // or not, we will increment the `memory_address` in each loop so that it eventually passes the end of the given memory region. + let Ok(new_addr) = memory_layout.confidential_address_at_offset_bounded(&address, page_size.in_bytes(), memory_region_end) + else { + // we passed beyond the memory region + break; + }; + address = new_addr; } Ok(()) } + #[rr::only_spec] /// Returns a page token that has ownership over an unallocated memory region of the requested size. Returns error if it could not /// obtain write access to the global instance of the page allocator or if there are not enough page tokens satisfying the requested /// criteria. - /// Specification: - #[rr::params("sz")] - #[rr::args("sz")] /// Precondition: We require the page allocator to be initialized. #[rr::requires(#iris "once_initialized π \"PAGE_ALLOCATOR\" (Some ())")] /// Postcondition: If a page is returned, it has the right size. - #[rr::exists("res")] - #[rr::ensures("if_Ok (λ pg, pg.(page_sz) = sz)")] - #[rr::returns("<#>@{result} res")] + #[rr::ok] + #[rr::ensures("ret.(page_sz) = page_size_to_allocate")] pub fn acquire_page(page_size_to_allocate: PageSize) -> Result, Error> { Self::try_write(|page_allocator| { let base_address = page_allocator.base_address; @@ -217,25 +262,35 @@ impl PageAllocator { })? } + #[rr::only_spec] /// Consumes the page tokens given by the caller, allowing for their further acquisition. This is equivalent to deallocation of the /// physical memory region owned by the returned page tokens. Given vector of pages might contains pages of arbitrary sizes. - #[rr::params("pages")] - #[rr::args("<#> pages")] /// Precondition: We require the page allocator to be initialized. #[rr::requires(#iris "once_initialized π \"PAGE_ALLOCATOR\" (Some ())")] pub fn release_pages(released_pages: Vec>) { let _ = Self::try_write(|page_allocator| { let base_address = page_allocator.base_address; let page_size = page_allocator.page_size; - released_pages.into_iter().for_each(|page_token| { - // NOTE: we show that the token is within range of the allocator. - page_allocator.root.store_page_token(base_address, page_size, page_token); - }); + let root_node = &mut page_allocator.root; + for page_token in released_pages { + #[rr::params("γ")] + #[rr::inv_vars("root_node", "page_size", "base_address")] + #[rr::inv("base_address = 0%Z")] + #[rr::inv("page_size = Size128TiB")] + #[rr::inv("root_node.ghost = γ")] + #[rr::ignore] + #[allow(unused)] + || {}; + // NOTE: we show that the token is within range of the allocator, using the + // invariant of the page token. + root_node.store_page_token(base_address, page_size, page_token); + } Ok(()) }) .inspect_err(|_| debug!("Memory leak: failed to store released pages in the page allocator")); } + #[rr::skip] /// returns a mutable reference to the PageAllocator after obtaining a lock on the mutex fn try_write(op: O) -> Result where O: FnOnce(&mut RwLockWriteGuard<'static, PageAllocator>) -> Result { @@ -246,10 +301,7 @@ impl PageAllocator { /// A node of a tree data structure that stores page tokens and maintains additional metadata that simplifies acquisition and /// release of page tokens. /// Specification: -/// A node is refined by the size of this node, -/// its base address, -/// and the logical allocation state. -// TODO: consider using separate public and private interfaces +#[rr::context("onceG Σ memory_layout")] #[rr::refined_by("node" : "page_storage_node")] /// We abstract over the components stored here #[rr::exists("max_sz" : "option page_size")] @@ -257,22 +309,24 @@ impl PageAllocator { #[rr::exists("children" : "list page_storage_node")] /// Invariant: The representation invariant linking all these things together holds. #[rr::invariant("page_storage_node_invariant node max_sz maybe_page_token children")] +/// Invariant: The whole address space of this node is in addressable memory. +#[rr::invariant("INV_IN_RANGE": "node.(base_address) + (page_size_in_bytes_nat node.(max_node_size)) ≤ MaxInt usize")] struct PageStorageTreeNode { // Page token owned by this node. `None` means that this page token has already been allocated or that it has been divided into smaller // pages token that were stored in this node's children. - #[rr::field("<#>@{option} maybe_page_token")] + #[rr::field("<#>@{{ option }} maybe_page_token")] page_token: Option>, // Specifies what size of the page token can be allocated by exploring the tree starting at this node. // Invariant: if page token exist, then the its size is the max allocable size. Otherwise, the max allocable page size is the max // allocable page size of children - #[rr::field("<#>@{option} max_sz")] + #[rr::field("<#>@{{ option }} max_sz")] max_allocable_page_size: Option, // Invariant: Children store page tokens smaller than the page token stored in the parent node #[rr::field("<#> children")] children: Vec, } -#[rr::skip] +#[rr::context("onceG Σ memory_layout")] impl PageStorageTreeNode { /// Creates a new empty node with no allocation. /// Specification: @@ -280,8 +334,11 @@ impl PageStorageTreeNode { #[rr::params("node_size", "base_address")] /// Precondition: the base address needs to be suitably aligned. #[rr::requires("(page_size_align node_size | base_address)%Z")] + /// Precondition: the whole memory range is in addressable memory. + #[rr::requires("base_address + (page_size_in_bytes_nat node_size) ≤ MaxInt usize")] + /// Postcondition: we get a node with no available pages. #[rr::returns("mk_page_node node_size base_address PageTokenUnavailable false")] - pub fn empty() -> Self { + fn empty() -> Self { Self { page_token: None, max_allocable_page_size: None, children: vec![] } } @@ -289,83 +346,99 @@ impl PageStorageTreeNode { /// token that can be allocated from this node. This method has the max depth of recusrive invocation equal to the number of /// PageSize variants. This method has an upper bounded computation complexity. /// - /// Invariant: page token's size must not be greater than the page size allocable at this node - #[rr::params("node", "tok", "γ")] - #[rr::args("(#node, γ)", "node.(base_address)", "node.(max_node_size)", "tok")] - - /// Precondition: The token we store has a smaller size than the node. - #[rr::requires("tok.(page_sz) ≤ₚ node.(max_node_size)")] - + /// Precondition: The size and base address arguments match the logical state. + #[rr::requires("this_node_base_address = self.cur.(base_address)")] + #[rr::requires("this_node_page_size = self.cur.(max_node_size)")] + /// Precondition: The token we store has a smaller-or-equal size than the node. + #[rr::requires("Hsz_le" : "page_token.(page_sz) ≤ₚ self.cur.(max_node_size)")] /// Precondition: The page token is within the range of the node. - #[rr::requires("page_within_range node.(base_address) node.(max_node_size) tok")] - #[rr::exists("available_sz" : "page_size")] - /// Postcondition: we obtain an available page size... - #[rr::returns("available_sz")] - /// Postcondition: ...which is at least the size that we stored. - #[rr::ensures("tok.(page_sz) ≤ₚ available_sz")] + #[rr::requires("Hrange" : "page_within_range this_node_base_address this_node_page_size page_token")] + /// Postcondition: The now available size is at least the size that we stored. + #[rr::ensures("page_token.(page_sz) ≤ₚ ret")] /// Postcondition: This node has been changed... #[rr::exists("node'")] - #[rr::observe("γ": "node'")] - /// ...and now it can allocate a page of size `available_sz` - #[rr::ensures("page_node_can_allocate node' = Some available_sz")] + #[rr::observe("self.ghost": "node'")] + /// ...and now it can allocate a page of the size given by the return value + #[rr::ensures("page_node_can_allocate node' = Some ret")] + /// ...which is at least the size of the token that was stored + #[rr::ensures("page_token.(page_sz) ≤ₚ ret")] + /// ...and which is at least as large as the size we could allocate before + #[rr::ensures("page_node_can_allocate self.cur ≤o{ option_cmp page_size_cmp} Some ret")] + /// ...and which is bounded by the node's size (this is an artifact of the proof) + #[rr::ensures("ret ≤ₚ self.cur.(max_node_size)")] /// ...while the rest is unchanged - #[rr::ensures("node'.(max_node_size) = node.(max_node_size)")] - #[rr::ensures("node'.(base_address) = node.(base_address)")] - pub fn store_page_token( + #[rr::ensures("node'.(max_node_size) = self.cur.(max_node_size)")] + #[rr::ensures("node'.(base_address) = self.cur.(base_address)")] + fn store_page_token( &mut self, this_node_base_address: usize, this_node_page_size: PageSize, page_token: Page, ) -> PageSize { - assert!(this_node_page_size >= *page_token.size()); - if &this_node_page_size == page_token.size() { + assert!(this_node_page_size >= page_token.size()); + if this_node_page_size == page_token.size() { // End of recursion, we found the node that can store the page token. assert!(this_node_base_address == page_token.start_address()); - assert!(&this_node_page_size == page_token.size()); - assert!(self.page_token.is_none()); + assert!(this_node_page_size == page_token.size()); + // For verification: to unfold invariant. + &self.max_allocable_page_size; + self.store_page_token_in_this_node(page_token); } else { assert!(this_node_page_size.smaller().is_some()); self.initialize_children_if_needed(this_node_page_size); + // For verification: to unfold invariant. + &self.max_allocable_page_size; + // Calculate which child should we invoke recursively. - let index = self.calculate_child_index(this_node_base_address, this_node_page_size, &page_token); + let index = Self::calculate_child_index(this_node_base_address, this_node_page_size, &page_token); // Let's go recursively to the node where this page belongs to. let (child_base_address, child_page_size) = self.child_address_and_size(this_node_base_address, this_node_page_size, index); - let allocable_page_size = self.children[index].store_page_token(child_base_address, child_page_size, page_token); + let allocable_page_size = + vec_index_mut(&mut self.children, index).store_page_token(child_base_address, child_page_size, page_token); // We are coming back from the recursion. - self.try_to_merge_page_tokens(this_node_page_size); - if Some(allocable_page_size) > self.max_allocable_page_size { - // Some children can allocate page tokens of a size larger than the page token we stored because they could have been - // merged. - self.max_allocable_page_size = Some(allocable_page_size); + // Update the size, in case the recursion increased the allocable size. + self.max_allocable_page_size = self.max_allocable_page_size.max(Some(allocable_page_size)); + // In case this token became mergeable, merge. + // Safety: The children are initialized. + unsafe { + self.try_to_merge_page_tokens(this_node_page_size); } } self.max_allocable_page_size.unwrap() } + #[rr::only_spec] /// Recursively traverses the tree to reach a node that contains the page token of the requested size and returns this page token. This /// function returns also a set of page size that are not allocable anymore at the node. This method has the max depth of recusrive /// invocation equal to the number of PageSize variants. This method has an upper bounded computation complexity. /// - /// Invariants: requested page size to acquire must not be greater than a page size allocable at this node. - #[rr::trust_me] - #[rr::params("node", "γ", "sz_to_acquire")] - #[rr::args("(#node, γ)", "node.(base_address)", "node.(max_node_size)", "sz_to_acquire")] - /// Precondition: The desired size must be bounded by this node's size (otherwise something went wrong) - #[rr::requires("sz_to_acquire ≤ₚ node.(max_node_size)")] - /// Postcondition: the function can either succeed to allocate a page or fail - #[rr::exists("res" : "result page _")] - #[rr::returns("res")] - /// If it succeeds, the returned page has the desired size. - #[rr::ensures("if_Ok res (λ pg, pg.(page_sz) = sz_to_acquire)")] - pub fn acquire_page_token( + #[rr::params("memly" : "memory_layout")] + /// Precondition: The size and base address arguments match the logical state. + #[rr::requires("this_node_base_address = self.cur.(base_address)")] + #[rr::requires("this_node_page_size = self.cur.(max_node_size)")] + /// Precondition: The global memory layout is initialized. + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some memly)")] + /// Postcondition: This node has been changed... + #[rr::exists("node'")] + #[rr::observe("self.ghost": "node'")] + /// Postcondition: the size and base address remain unchanged + #[rr::ensures("node'.(max_node_size) = self.cur.(max_node_size)")] + #[rr::ensures("node'.(base_address) = self.cur.(base_address)")] + /// Postcondition: The function succeeds and returns a page iff the allocable page size of the + /// node is at least the required page size. + #[rr::ok] + #[rr::requires("∃ sz, page_node_can_allocate self.cur = Some sz ∧ page_size_to_acquire ≤ₚ sz")] + /// Postcondition: if it succeeds, the returned page has the desired size. + #[rr::ensures("ret.(page_sz) = page_size_to_acquire")] + fn acquire_page_token( &mut self, this_node_base_address: usize, this_node_page_size: PageSize, page_size_to_acquire: PageSize, ) -> Result, Error> { ensure!(self.max_allocable_page_size >= Some(page_size_to_acquire), Error::OutOfPages())?; - if &this_node_page_size == &page_size_to_acquire { + if this_node_page_size == page_size_to_acquire { // End of recursion, we found the node from which we acquire a page token. assert!(self.page_token.is_some()); let page_token = self.acquire_page_token_from_this_node(); assert!(this_node_base_address == page_token.start_address()); - assert!(&this_node_page_size == page_token.size()); + assert!(this_node_page_size == page_token.size()); Ok(page_token) } else { // We are too high in the tree, i.e., the current level represents page size allocations that are larger than the page size @@ -376,73 +449,88 @@ impl PageStorageTreeNode { // Because we are looking for a page token of a smaller size, we must divide the page token owned by this node, if that has // not yet occured. - if self.page_token.is_some() { - self.divide_page_token(this_node_base_address, this_node_page_size); - } - // Let's get the index of the first child that has requested allocation. It might be that there is no child that can has the - // required allocation. In such a case, we return an error. - let index = self - .children - .iter() - .position(|node| node.max_allocable_page_size >= Some(page_size_to_acquire)) - .ok_or(Error::OutOfPages())?; + self.divide_page_token_if_necessary(this_node_base_address, this_node_page_size); + + // Let's get the index of the first child that has requested allocation. + // This will succeed because we already checked above that the desired size can be acquired. + let index = vec_iter(&self.children) + .position( + #[rr::only_spec] + #[rr::returns("bool_decide ((Some {page_size_to_acquire}) ≤o{ option_cmp page_size_cmp } page_node_can_allocate node)")] + |node| node.max_allocable_page_size >= Some(page_size_to_acquire), + ) + .unwrap(); + let (child_base_address, child_page_size) = self.child_address_and_size(this_node_base_address, this_node_page_size, index); // Invoke recursively this function to traverse to a node containing a page token of the requested size. // The below unwrap is ok because we found an index of a node that has requested allocation. - let page_token = self.children[index].acquire_page_token(child_base_address, child_page_size, page_size_to_acquire).unwrap(); + let page_token = vec_index_mut(&mut self.children, index) + .acquire_page_token(child_base_address, child_page_size, page_size_to_acquire) + .unwrap(); // Let's refresh information about the largest allocable page size available in children. - self.max_allocable_page_size = self.children.iter().map(|child| child.max_allocable_page_size).max().flatten(); + self.max_allocable_page_size = vec_iter(&self.children) + .map( + #[rr::returns("page_node_can_allocate child")] + |child| child.max_allocable_page_size, + ) + .max() + .flatten(); Ok(page_token) } } + #[rr::only_spec] /// Creates children for the given node because the node gets created with an empty list of children, expecting that children will be /// created lazily with this function. /// - /// Invariant: This node has no children - /// Outcome: - /// - There is one child for every possible smaller page size that can be created for this node, - /// - Every child is empty, i.e., has no children, no page token, no possible allocation. - #[rr::params("node", "γ")] - #[rr::args("(#node, γ)", "node.(max_node_size)")] - /// Postcondition: leaves the page node unchanged except for initializing the - /// children if necessary - #[rr::observe("γ": "mk_page_node node.(max_node_size) node.(base_address) node.(allocation_state) true")] + /// Precondition the page size argument has to match the node's logical state. + #[rr::requires("this_node_page_size = self.cur.(max_node_size)")] + /// Postcondition: leaves the page node unchanged except for initializing the children if necessary + #[rr::observe("self.ghost": "mk_page_node self.cur.(max_node_size) self.cur.(base_address) self.cur.(allocation_state) true")] fn initialize_children_if_needed(&mut self, this_node_page_size: PageSize) { if self.children.is_empty() { - self.children = (0..this_node_page_size.number_of_smaller_pages()).map(|_| Self::empty()).collect(); + self.children = (0..this_node_page_size.number_of_smaller_pages()) + .map( + // I think to handle this well I'll need invariants on closures. + // i.e., the address and so on need to become logical components of the type (even + // though they don't have a physical representation) + #[rr::skip] + #[rr::params("base_address", "node_size")] + #[rr::requires("(page_size_align node_size | base_address)%Z")] + #[rr::returns("mk_page_node node_size base_address PageTokenUnavailable false")] + |_| Self::empty(), + ) + .collect(); } } /// Stores the given page token in the current node. /// /// Invariant: if there is page token then all page size equal or lower than the page token are allocable from this node. - #[rr::params("node", "γ", "tok")] - #[rr::args("(#node, γ)", "tok")] - /// Precondition: the node is in the unavailable state - #[rr::requires("node.(allocation_state) = PageTokenUnavailable")] + /// /// Precondition: the token's size is equal to this node's size - #[rr::requires("node.(max_node_size) = tok.(page_sz)")] + #[rr::requires("self.cur.(max_node_size) = page_token.(page_sz)")] /// Precondition: the token's address matches the node's address - #[rr::requires("node.(base_address) = tok.(page_loc).2")] + #[rr::requires("self.cur.(base_address) = page_token.(page_loc).2")] /// Postcondition: the node changes to the available state - #[rr::observe("γ": "mk_page_node node.(max_node_size) node.(base_address) PageTokenAvailable node.(children_initialized)")] + #[rr::observe("self.ghost": "mk_page_node self.cur.(max_node_size) self.cur.(base_address) PageTokenAvailable self.cur.(children_initialized)")] fn store_page_token_in_this_node(&mut self, page_token: Page) { - assert!(self.page_token.is_none()); - self.max_allocable_page_size = Some(*page_token.size()); + // TODO: add back the assert! + //assert!(self.page_token.is_none()); + self.max_allocable_page_size = Some(page_token.size()); self.page_token = Some(page_token); } /// Returns an ownership of a page token that has been stored in this node. /// - /// Invariant: This node owns a page token - /// Invariant: After returning the page token, this node does not own the page token and has no allocation - #[rr::params("node", "γ")] - #[rr::args("(#node, γ)")] - #[rr::requires("node.(allocation_state) = PageTokenAvailable")] - #[rr::exists("tok")] - #[rr::returns("tok")] - #[rr::observe("γ": "mk_page_node node.(max_node_size) node.(base_address) PageTokenUnavailable node.(children_initialized)")] + /// Precondition: The token in this node is fully available. + #[rr::requires("Hstate" : "self.cur.(allocation_state) = PageTokenAvailable")] + /// Postcondition: The returned token's size matches the node's size. + #[rr::ensures("ret.(page_sz) = self.cur.(max_node_size)")] + /// Postcondition: The returned token's address matches the node's address. + #[rr::ensures("ret.(page_loc).2 = self.cur.(base_address)")] + /// Postcondition: The node has been updated to the unavailable state. + #[rr::observe("self.ghost": "mk_page_node self.cur.(max_node_size) self.cur.(base_address) PageTokenUnavailable self.cur.(children_initialized)")] fn acquire_page_token_from_this_node(&mut self) -> Page { assert!(self.page_token.is_some()); self.max_allocable_page_size = None; @@ -450,86 +538,121 @@ impl PageStorageTreeNode { } /// Divides the page token into smaller page tokens. - /// - /// Invariant: This node owns a page token - /// Invariant: After returning, this node can allocate pages of lower page sizes than the original page token. - /// Invariant: After returning, every child node owns a page token of smaller size than the original page token. - /// Invariant: After returning, every child can allocate a page token of smaller size than the original page token. - #[rr::params("node", "γ", "smaller_size")] - #[rr::args("(#node, γ)", "node.(base_address)", "node.(max_node_size)")] - /// Precondition: The children should be initialized. - #[rr::requires("node.(children_initialized)")] - /// Precondition: A token is available in the current node. - #[rr::requires("node.(allocation_state) = PageTokenAvailable")] - /// Precondition: We assume there is a smaller node size. - #[rr::requires("page_size_smaller node.(max_node_size) = Some smaller_size")] - /// Postcondition: The node is updated to the "partially available" state, with a smaller node size being allocable - #[rr::observe("γ": "mk_page_node node.(max_node_size) node.(base_address) (PageTokenPartiallyAvailable smaller_size) true")] - fn divide_page_token(&mut self, this_node_base_address: usize, this_node_page_size: PageSize) { - assert!(self.page_token.is_some()); - - let mut smaller_pages = self.page_token.take().unwrap().divide(); - assert!(smaller_pages.len() == self.children.len()); - smaller_pages.drain(..).for_each(|smaller_page_token| { - let index = self.calculate_child_index(this_node_base_address, this_node_page_size, &smaller_page_token); - self.children[index].store_page_token_in_this_node(smaller_page_token); - }); - let smaller_size = self.max_allocable_page_size.unwrap().smaller().unwrap(); - self.max_allocable_page_size = Some(smaller_size); + #[rr::params("smaller_size", "memly" : "memory_layout")] + /// Precondition: The global memory layout is initialized. + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some memly)")] + /// Precondition: The argument base address and size match the node's logical state. + #[rr::requires("self.cur.(base_address) = this_node_base_address")] + #[rr::requires("self.cur.(max_node_size) = this_node_page_size")] + /// Precondition: The children are initialized. + #[rr::requires("self.cur.(children_initialized)")] + /// Precondition: There is a smaller node size. + #[rr::requires("page_size_smaller self.cur.(max_node_size) = Some smaller_size")] + /// Postcondition: The node is updated, being able to allocate at most [smaller_size]. + #[rr::observe("self.ghost": "mk_page_node self.cur.(max_node_size) self.cur.(base_address) (self.cur.(allocation_state) ⊓ PageTokenPartiallyAvailable smaller_size) true")] + fn divide_page_token_if_necessary(&mut self, this_node_base_address: usize, this_node_page_size: PageSize) { + if let Some(token) = self.page_token.take() { + let smaller_pages = token.divide(); + assert!(smaller_pages.len() == self.children.len()); + + let children = &mut self.children; + for smaller_page_token in smaller_pages { + #[rr::params("γ", "children_init")] + #[rr::inv_vars("children")] + #[rr::inv("children.ghost = γ")] + #[rr::inv( + "children.cur = ((λ node, page_node_update_state node PageTokenAvailable) <$> (take (length {Hist}) children_init)) ++ drop (length {Hist}) children_init" + )] + #[rr::ignore] + #[allow(unused)] + || {}; + let index = Self::calculate_child_index(this_node_base_address, this_node_page_size, &smaller_page_token); + vec_index_mut(children, index).store_page_token_in_this_node(smaller_page_token); + } + let smaller_size = this_node_page_size.smaller().unwrap(); + self.max_allocable_page_size = Some(smaller_size); + } } + #[rr::only_spec] /// Merges page tokens owned by children. + /// Safety: Requires that all children have been initialized. /// - /// Invariant: after merging, there is no child that owns a page token - /// Invariant: after merging, this node owns a page token that has size larger than the ones owned before by children. - #[rr::params("node", "γ")] - #[rr::args("(#node, γ)", "node.(max_node_size)")] - #[rr::requires("node.(children_initialized)")] + /// Precondition: The children are initialized. + #[rr::requires("self.cur.(children_initialized)")] + /// Precondition: the argument page size matches the node's page size + #[rr::requires("this_node_page_size = self.cur.(max_node_size)")] + /// Postcondition: this node has been updated to a new state. #[rr::exists("state'")] - #[rr::observe("γ": "mk_page_node node.(max_node_size) node.(base_address) state' true")] - #[rr::ensures("state' = node.(allocation_state) ∨ state' = PageTokenAvailable")] - fn try_to_merge_page_tokens(&mut self, this_node_page_size: PageSize) { - if self.children.iter().all(|child| child.page_token.is_some()) { + #[rr::observe("self.ghost": "mk_page_node self.cur.(max_node_size) self.cur.(base_address) state' true")] + /// Postcondition: the state is either unchanged, or the whole node is available now. + #[rr::ensures("state' = self.cur.(allocation_state) ∨ state' = PageTokenAvailable")] + unsafe fn try_to_merge_page_tokens(&mut self, this_node_page_size: PageSize) { + if self.children.iter().all( + #[rr::returns("bool_decide (child.(allocation_state) = PageTokenAvailable)")] + |child| child.page_token.is_some(), + ) { // All children have page tokens, thus we can merge them. - let pages_to_merge = self.children.iter_mut().map(|child| child.acquire_page_token_from_this_node()).collect(); + let pages_to_merge = self.children.iter_mut().map( + // postcondition of the closure has the observation. + // iter_mut hands out mutable borrows. + // Options: + // - we immediately return the obsevation to the base iterator and allow it to + // resolve, + // - or we collect a bigsep of observations and resolve it at the end. + // + // We might want to create the list gnames already at the very beginning when + // creating the iterator via iter_mut. + // We can keep the observations as part of the invariant, I suppose. + // Then we finally get the completed invariant, and the observation of having + // turned the vector into a list of PlaceGhost. + // At this point, resolve everything. + #[rr::requires("child.cur.(allocation_state) = PageTokenAvailable")] + #[rr::ensures("ret.(page_sz) = child.cur.(max_node_size)")] + #[rr::ensures("ret.(page_loc).2 = child.cur.(base_address)")] + #[rr::observe("child.ghost": "mk_page_node child.cur.(max_node_size) child.cur.(base_address) PageTokenUnavailable child.cur.(children_initialized)")] + |child| child.acquire_page_token_from_this_node() + ).collect(); + // Safety: Safe, because all children are initialized and have a page token available. self.store_page_token_in_this_node(unsafe { Page::merge(pages_to_merge, this_node_page_size) }); self.max_allocable_page_size = Some(this_node_page_size); } } /// Returns the index of a child that can store the page token. - /// - /// Invariant: children have been created - #[rr::skip] - #[rr::args(#raw "#(-[ #maybe_tok; #available_sz; #children])", "base_addr", "node_sz", "#tok")] - /// Precondition: The children need to have been initialized. - #[rr::requires("length children = page_size_multiplier node_sz")] + #[rr::params("child_node_sz")] /// Precondition: There is a smaller page size. - #[rr::requires("page_size_smaller node_sz = Some child_node_size")] + #[rr::requires("Hchild": "page_size_smaller this_node_page_size = Some child_node_sz")] /// Precondition: The token's page size is smaller than this node's size. - #[rr::requires("tok.(page_sz) ≤ₚ child_node_size")] + #[rr::requires("Hsz_lt": "page_token.(page_sz) ≤ₚ child_node_sz")] /// Precondition: The token is within the range of this node. - #[rr::requires("page_within_range base_addr node_sz tok")] - #[rr::exists("i")] - #[rr::returns("i")] - #[rr::ensures("is_Some (children !! i)")] - // TODO: the token is in the range of the child node. - // TODO: does not work at this level of abstraction. Use a raw specification. - fn calculate_child_index(&self, this_node_base_address: usize, this_node_page_size: PageSize, page_token: &Page) -> usize { - assert!(this_node_page_size > *page_token.size()); + #[rr::requires("Hrange": "page_within_range this_node_base_address this_node_page_size page_token")] + #[rr::requires("Hdiv": "page_size_in_bytes_Z this_node_page_size | this_node_base_address")] + /// Postcondition: such that there is a matching child + #[rr::ensures("ret < page_size_multiplier this_node_page_size")] + /// Postcondition: The page is within the range of the child node. + #[rr::ensures("page_within_range (this_node_base_address + ret * page_size_in_bytes_Z child_node_sz) child_node_sz page_token")] + fn calculate_child_index(this_node_base_address: usize, this_node_page_size: PageSize, page_token: &Page) -> usize { + assert!(this_node_page_size > page_token.size()); let index = (page_token.start_address() - this_node_base_address) / this_node_page_size.smaller().unwrap().in_bytes(); - assert!(index < self.children.len()); index } /// Returns the base address and the page size of the child at the given index /// /// Invariant: children have been created - #[rr::params("node", "child_index", "child_node_size")] - #[rr::args("#node", "node.(base_address)", "node.(max_node_size)", "child_index")] - #[rr::requires("node.(children_initialized)")] - #[rr::requires("page_size_smaller node.(max_node_size) = Some child_node_size")] - #[rr::returns("-[child_base_address node.(base_address) child_node_size child_index; child_node_size] ")] + #[rr::params("child_node_size")] + /// Precondition: The base address and page size arguments match the node's logical state. + #[rr::requires("this_node_base_address = self.(base_address)")] + #[rr::requires("this_node_page_size = self.(max_node_size)")] + /// The children are initialized + #[rr::requires("H_child_init": "self.(children_initialized)")] + /// There exist children + #[rr::requires("page_size_smaller self.(max_node_size) = Some child_node_size")] + /// Precondition: the child index is in range + #[rr::requires("Hindex": "index < page_size_multiplier self.(max_node_size)")] + /// Postcondition: Returns a tuple of the child's base address and its size. + #[rr::returns("*[child_base_address self.(base_address) child_node_size index; child_node_size] ")] fn child_address_and_size(&self, this_node_base_address: usize, this_node_page_size: PageSize, index: usize) -> (usize, PageSize) { assert!(index < self.children.len()); assert!(this_node_page_size.smaller().is_some()); @@ -538,3 +661,28 @@ impl PageStorageTreeNode { (child_base_address, child_page_size) } } + +/// These wrappers serve as a temporary workaround until RefinedRust supports unsized types and in +/// particular slices: the indexing and iteration methods on `Vec` work by dereferencing to slices, +/// which currently are not supported by RefinedRust. +/// For now, we thus create wrappers for these methods to which we can attach RefinedRust +/// specifications. +mod wrappers { + use alloc::vec::Vec; + + #[rr::only_spec] + #[rr::requires("index < length x.cur")] + #[rr::exists("γi")] + #[rr::returns("(x.cur !!! Z.to_nat index, γi)")] + #[rr::observe("x.ghost": "<[Z.to_nat index := PlaceGhost γi]> (<$#> x.cur)")] + pub fn vec_index_mut(x: &mut Vec, index: usize) -> &mut T { + &mut x[index] + } + + #[rr::only_spec] + #[rr::returns("x")] + pub fn vec_iter(x: &Vec) -> core::slice::Iter<'_, T> { + x.iter() + } +} +use wrappers::*; diff --git a/security-monitor/src/core/page_allocator/page.rs b/security-monitor/src/core/page_allocator/page.rs index b3c74473..3c11a0dd 100644 --- a/security-monitor/src/core/page_allocator/page.rs +++ b/security-monitor/src/core/page_allocator/page.rs @@ -30,7 +30,8 @@ impl PageState for Allocated {} #[rr::invariant(#type "p.(page_loc)" : "<#> p.(page_val)" @ "array_t (page_size_in_words_nat p.(page_sz)) (int usize)")] /// Invariant: The page is well-formed. #[rr::invariant("page_wf p")] -/// We require the page to be in this bounded memory region. +/// We require the page to be in this bounded memory region that can be handled by the page +/// allocator. #[rr::invariant("(page_end_loc p).2 ≤ MAX_PAGE_ADDR")] /// We require the memory layout to have been initialized. #[rr::context("onceG Σ memory_layout")] @@ -39,7 +40,7 @@ impl PageState for Allocated {} #[rr::invariant(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] /// Invariant: ...and according to that layout, this page resides in confidential memory. #[rr::invariant("MEMORY_CONFIG.(conf_start).2 ≤ p.(page_loc).2")] -#[rr::invariant("p.(page_loc).2 + (page_size_in_bytes_nat p.(page_sz)) < MEMORY_CONFIG.(conf_end).2")] +#[rr::invariant("p.(page_loc).2 + (page_size_in_bytes_nat p.(page_sz)) ≤ MEMORY_CONFIG.(conf_end).2")] pub struct Page { /// Specification: the `address` has mathematical value `l`. #[rr::field("p.(page_loc)")] @@ -53,7 +54,6 @@ pub struct Page { } #[rr::context("onceG Σ memory_layout")] -#[rr::context("onceG Σ gname")] impl Page { /// Creates a page token at the given address in the confidential memory. /// @@ -71,13 +71,14 @@ impl Page { #[rr::requires(#type "l" : "<#> v" @ "array_t (page_size_in_words_nat sz) (int usize)")] /// Precondition: The page needs to be sufficiently aligned. #[rr::requires("l `aligned_to` (page_size_align sz)")] - /// Precondition: The page is located in a bounded memory region. + /// Precondition: The page is located in a bounded memory region that can be handled by the + /// page allocator. #[rr::requires("l.2 + page_size_in_bytes_Z sz ≤ MAX_PAGE_ADDR")] /// Precondition: The memory layout is initialized. #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] /// Precondition: The page is entirely contained in the confidential memory range. #[rr::requires("MEMORY_CONFIG.(conf_start).2 ≤ l.2")] - #[rr::requires("l.2 + (page_size_in_bytes_nat sz) < MEMORY_CONFIG.(conf_end).2")] + #[rr::requires("l.2 + (page_size_in_bytes_nat sz) ≤ MEMORY_CONFIG.(conf_end).2")] /// Then, we get a properly initialized page starting at `l` of size `sz` with some value `v`. #[rr::returns("mk_page l sz v")] pub(super) unsafe fn init(address: ConfidentialMemoryAddress, size: PageSize) -> Self { @@ -85,14 +86,10 @@ impl Page { } /// Specification: - #[rr::only_spec] - #[rr::params("p")] - #[rr::args("p")] /// We return a page starting at `l` with size `sz`, but with all bytes initialized to zero. - #[rr::returns("mk_page p.(page_loc) p.(page_sz) (zero_page p.(page_sz))")] + #[rr::returns("mk_page self.(page_loc) self.(page_sz) (zero_page self.(page_sz))")] pub fn zeroize(mut self) -> Page { self.clear(); - // TODO: fix automation for consuming values Page { address: self.address, size: self.size, _marker: PhantomData } } @@ -128,7 +125,6 @@ impl Page { /// Returns a collection of all smaller pages that fit within the current page and /// are correctly aligned. If this page is the smallest page (4KiB for RISC-V), then /// the same page is returned. - #[rr::only_spec] #[rr::params("x" : "memory_layout")] /// Precondition: The memory layout needs to have been initialized. #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some x)")] @@ -143,10 +139,9 @@ impl Page { // NOTE: Currently using a wrapper around map, as we have to add an extra trait requirement // to the struct definition of Map to declare the invariant. Should be lifted soon. (0..number_of_smaller_pages).map( - #[rr::skip] #[rr::params("v")] // Precondition: the page bound is in confidential memory - #[rr::requires("Hpage_end" : "{page_end}.2 < {*memory_layout}.(conf_end).2")] + #[rr::requires("Hpage_end" : "{page_end}.2 ≤ {*memory_layout}.(conf_end).2")] #[rr::requires("Hpage_start" : "{*memory_layout}.(conf_start).2 ≤ {self.address}.2")] // Precondition: the offset does not overflow #[rr::requires("Hlarger_page" : "i * page_size_in_bytes_Z {smaller_page_size} ∈ usize")] @@ -186,11 +181,27 @@ impl Page { /// * Merged pages are contiguous and cover the entire new size of the future page /// * Merged pages are of the same size /// * Merged pages are sorted + #[rr::only_spec] + #[rr::params("base_address" : "loc")] + #[rr::requires("base_address = (from_pages !!! 0%nat).(page_loc)")] + #[rr::requires("(from_pages !!! 0%nat).(page_loc) `aligned_to` page_size_align new_size")] + #[rr::requires( + "∀ (i : nat) pg, from_pages !! i = Some pg → + Some pg.(page_sz) = page_size_smaller new_size ∧ + pg.(page_loc) = base_address +ₗ (i * page_size_in_bytes_Z pg.(page_sz))" + )] + #[rr::requires("length from_pages = page_size_multiplier new_size")] + #[rr::returns("mk_page base_address new_size (mjoin (page_val <$> from_pages))")] pub unsafe fn merge(mut from_pages: Vec>, new_size: PageSize) -> Self { assert!(from_pages.len() > 2); assert!(from_pages[0].address.is_aligned_to(new_size.in_bytes())); assert!(new_size.in_bytes() / from_pages[0].size.in_bytes() == from_pages.len()); assert!(from_pages[0].start_address() + new_size.in_bytes() == from_pages[from_pages.len() - 1].end_address()); + + // NOTE: logically, this is a big step. + // - We need to go over the whole vector and get the ownership. + // - From each page, we extract the ownership + // - then merge the big array unsafe { Self::init(from_pages.swap_remove(0).address, new_size) } } } @@ -199,7 +210,6 @@ impl Page { impl Page { /// Clears the entire memory content by writing 0s to it and then converts the Page from Allocated to UnAllocated so it can be returned /// to the page allocator. - #[rr::only_spec] #[rr::returns("mk_page self.(page_loc) self.(page_sz) (zero_page self.(page_sz))")] pub fn deallocate(mut self) -> Page { self.clear(); @@ -251,6 +261,7 @@ impl Page { &self.address } + #[rr::ensures("page_wf self")] #[rr::returns("self.(page_loc).2")] pub fn start_address(&self) -> usize { self.address.as_usize() @@ -266,9 +277,10 @@ impl Page { self.address().to_ptr().with_addr(self.end_address()) as *const usize } + #[rr::ensures("page_wf self")] #[rr::returns("self.(page_sz)")] - pub fn size(&self) -> &PageSize { - &self.size + pub fn size(&self) -> PageSize { + self.size } /// Writes data to a page at a given offset. Error is returned if an invalid offset was passed diff --git a/verification/Makefile b/verification/Makefile index 352fa347..3841cf6c 100644 --- a/verification/Makefile +++ b/verification/Makefile @@ -10,7 +10,7 @@ REFINEDRUST_RUSTC ?= refinedrust-rustc REFINEDRUST_GENERATED_DIR ?= generated_code -DUNEFLAGS ?= +DUNEFLAGS ?= --display short # TODO: currently, this builds in-tree. # It would be good if we could build in the ACE_DIR @@ -33,7 +33,7 @@ generate_ptrs: verify: generate - @dune build $(DUNEFLAGS) --display short rust_proofs + @dune build $(DUNEFLAGS) rust_proofs @echo "✅︎ Verification succeeded" diff --git a/verification/RefinedRust.toml b/verification/RefinedRust.toml index 86811f9f..3f2949be 100644 --- a/verification/RefinedRust.toml +++ b/verification/RefinedRust.toml @@ -2,8 +2,9 @@ dump_borrowck_info=false log_dir = "./log" output_dir = "./rust_proofs" run_check = false -admit_proofs = true +admit_proofs = false skip_unsupported_features = false extra_specs = "./extra_specs.v" generate_dune_project = false lib_load_paths = ["./refinedrust/stdlib/", "./rust_proofs"] +trust_debug = true diff --git a/verification/readme.md b/verification/readme.md index 606f02d7..89bccd82 100644 --- a/verification/readme.md +++ b/verification/readme.md @@ -1,7 +1,6 @@ # Verification of the Security Monitor This directory contains work-in-progress on verifying the security monitor. -Currently, the verification is in early stages. We use the tool [RefinedRust](https://plv.mpi-sws.org/refinedrust/) in the [Rocq prover](https://rocq-prover.org/) for verification. @@ -48,6 +47,17 @@ For the `Page::read` function, RefinedRust generates the following code: 4. the file [`proof_core_page_allocator_page_Page_core_page_allocator_page_T_read.v`](/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_T_read.v) in [`verification/rust_proofs/ace/proofs/`](/verification/rust_proofs/ace/proofs) contains the proof of the lemma for the function. The default proof that RefinedRust generates will use the proof script from step 3 and then call into RefinedRust's generic automation. +### RefinedRust proof annotations + +Rust functions are only translated and verified by RefinedRust if RefinedRust annotations (`#[rr::...]`) are attached to them. +If a function is annotated, the function is verified against that specification, except if one of the following opt-out annotations is present: +- `#[rr::only_spec]`: only the specification is generated, but the code is not translated and not verified +- `#[rr::trust_me]`: the specification and code are generated, but the code is not verified against the specification + +These opt-out annotations help to progressively cover code with specification and delay the work on the verification itself. Eventually, a fully verified security monitor would not have any of the opt-out annotations and RefinedRust will verify that. + + + ## Getting Started The [setup](setup.md) document details how to set the verification setup up on your system. @@ -67,8 +77,8 @@ The Rust source path is relative to [`security_monitor/src`](/security_monitor/s | Module | Rust source | Status | Remarks | |----------------------------|-------------|----------------------------|---------| | Memory isolation config | | Specififed, partly verified | | -| \|- Page token | [`core/page_allocator/page.rs`](/security-monitor/src/core/page_allocator/page.rs) | Specified, partly verified | | -| \|- Page allocator | [`core/page_allocator/allocator.rs`](/security-monitor/src/core/page_allocator/allocator.rs) | Specified, not verified | | +| \|- Page token | [`core/page_allocator/page.rs`](/security-monitor/src/core/page_allocator/page.rs) | Largely verified | | +| \|- Page allocator | [`core/page_allocator/allocator.rs`](/security-monitor/src/core/page_allocator/allocator.rs) | Largely verified | | | \|- Page table encoding | [`core/architecture/riscv/mmu`](/security-monitor/src/core/architecture/riscv/mmu) | Specified, not verified | | | Initialization | [`core/initialization`](/security-monitor/src/core/initialization) | Specified, partly verified | | | VM Interactions | | Unspecified | | @@ -76,14 +86,17 @@ The Rust source path is relative to [`security_monitor/src`](/security_monitor/s | \|- Confidential flow | [`confidential_flow`](/security-monitor/src/confidential_flow) | | | | \|- Non-confidential flow | [`non_confidential_flow`](/security-monitor/src/non_confidential_flow) | | | -### RefinedRust proof annotations +## Bugs found -Rust functions are only translated and verified by RefinedRust if RefinedRust annotations (`#[rr::...]`) are attached to them. -If a function is annotated, the function is verified against that specification, except if one of the following opt-out annotations is present: -- `#[rr::only_spec]`: only the specification is generated, but the code is not translated and not verified -- `#[rr::trust_me]`: the specification and code are generated, but the code is not verified against the specification +### Page allocator +- [`MemoryLayout::confidential_address_at_offset_bounded` did not correctly check the upper bound](https://github.com/IBM/ACE-RISCV/blob/b831232f1ae31162d4cb03b8785154b7942a7406/security-monitor/src/core/memory_layout/mod.rs#L161) of the memory region within which to offset. + Interestingly, this bug in the code also got reflected into the initial specification, which correctly abstracted the incorrect code, but contradicted the existing comment describing the intended behaviour. + Potential impact: `PageAllocator::add_memory_region` uses the check in this method for checking whether it exceeds the given memory regions. + If there was another confidential memory region after the page token region, we would create overlapping memory regions. + Currently not an issue, as `page_allocator_end = confidential_memory_end`, but could be critical in combination with other bugs or with future changes. -These opt-out annotations help to progressively cover code with specification and delay the work on the verification itself. Eventually, a fully verified security monitor would not have any of the opt-out annotations and RefinedRust will verify that. +- [In the second loop inside `PageAllocator::add_memory_region`](https://github.com/IBM/ACE-RISCV/blob/b831232f1ae31162d4cb03b8785154b7942a7406/security-monitor/src/core/page_allocator/allocator.rs#L182), the loop condition is slightly wrong, leading the code to not decrease the page size to allocate at in some case. + Potential impact: as the bounds are verified again afterwards, no illegal allocation is made, but the tail part of the memory region is potentially wasted when it could be allocated. ## Guarantees diff --git a/verification/refinedrust b/verification/refinedrust index 1d5e2aed..0ee0990d 160000 --- a/verification/refinedrust +++ b/verification/refinedrust @@ -1 +1 @@ -Subproject commit 1d5e2aedc7732fdc4a865d792a3e20d91e8a35e1 +Subproject commit 0ee0990daee6f45514e2f3e8e8385e451ad13af8 diff --git a/verification/rust_proofs/ace/.gitignore b/verification/rust_proofs/ace/.gitignore new file mode 100644 index 00000000..22951200 --- /dev/null +++ b/verification/rust_proofs/ace/.gitignore @@ -0,0 +1,5 @@ +generated +proofs/dune +interface.rrlib +*/.nia.cache +*/.lia.cache diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_in_bytes.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_in_bytes.v index 40e3074e..6d0886c1 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_in_bytes.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_in_bytes.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_larger.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_larger.v index 9e694382..257d7889 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_larger.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_larger.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_largest.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_largest.v index 4d72082f..2d5df3c9 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_largest.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_largest.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_number_of_smaller_pages.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_number_of_smaller_pages.v index 67b4d478..142878fa 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_number_of_smaller_pages.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_number_of_smaller_pages.v @@ -19,5 +19,5 @@ Proof. Unshelve. all: sidecond_hammer. all: unsafe_unfold_common_caesium_defs; simpl; lia. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smaller.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smaller.v index 305e4569..1721b9f3 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smaller.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smaller.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smallest.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smallest.v index 7f4d65cf..790200b7 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smallest.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_smallest.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_table_level_PageTableLevel_lower.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_table_level_PageTableLevel_lower.v index 92dc9a1f..fab88679 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_table_level_PageTableLevel_lower.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_table_level_PageTableLevel_lower.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_paging_system_PagingSystem_levels.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_paging_system_PagingSystem_levels.v index 392c7360..d6ad186c 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_paging_system_PagingSystem_levels.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_paging_system_PagingSystem_levels.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_confidential_address_at_offset_bounded.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_confidential_address_at_offset_bounded.v index 477e62cd..fc717467 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_confidential_address_at_offset_bounded.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_confidential_address_at_offset_bounded.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_read.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_read.v index 061025f7..5897b400 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_read.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_read.v @@ -22,5 +22,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_as_usize.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_as_usize.v index 4c6a3538..11bb03ad 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_as_usize.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_as_usize.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr.v index 8ad96621..4af1aad2 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr.v @@ -21,5 +21,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new.v index 1a44c766..182228b6 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_offset_from.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_offset_from.v index 8804b9ae..a6df42f4 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_offset_from.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_offset_from.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_read_volatile.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_read_volatile.v index b10dcfcb..90228942 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_read_volatile.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_read_volatile.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_to_ptr.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_to_ptr.v index bcf01311..0a230203 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_to_ptr.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_to_ptr.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_write_volatile.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_write_volatile.v index 3b0454f7..ba0966ea 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_write_volatile.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_write_volatile.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_non_confidential_memory_address_NonConfidentialMemoryAddress_as_ptr.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_non_confidential_memory_address_NonConfidentialMemoryAddress_as_ptr.v index 99fbecd0..73c99f84 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_non_confidential_memory_address_NonConfidentialMemoryAddress_as_ptr.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_non_confidential_memory_address_NonConfidentialMemoryAddress_as_ptr.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest.v b/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest.v index 632c5cdf..6bcc26e0 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest.v @@ -20,5 +20,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_smallest.v b/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_smallest.v index a33d90cf..c740ad79 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_smallest.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_smallest.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_add_memory_region.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_add_memory_region.v new file mode 100644 index 00000000..907a4934 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_add_memory_region.v @@ -0,0 +1,85 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageAllocator_add_memory_region. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageAllocator_add_memory_region_proof (π : thread_id) : + core_page_allocator_allocator_PageAllocator_add_memory_region_lemma π. +Proof. + core_page_allocator_allocator_PageAllocator_add_memory_region_prelude. + + rep <-! liRStep; liShow. + all: match goal with + | H : ?x `aligned_to` page_size_in_bytes_nat _ |- _ => rename x into larr + end. + all: try match goal with + | H : ?x = Size4KiB ∨ _.2 + _ ≤ _ |- _ => + rename x into sz_to_allocate; + rename H into Hsz_bounds + end. + + { (* calling Page::init *) + apply_update (updateable_split_array larr (page_size_in_bytes_nat sz_to_allocate)). + liRStepUntil typed_val_expr. + apply_update (updateable_reshape_array larr (page_size_in_words_nat sz_to_allocate) bytes_per_addr). + liRStepUntil typed_val_expr. + iRename select (larr ◁ₗ[_, _] _ @ (◁ _))%I into "Harr". + iApply fupd_typed_val_expr. + iMod (ltype_own_array_subtype_strong _ _ _ _ (int usize) with "[] Harr") as "(% & Harr)"; [done | | | | | ]. + { shelve_sidecond. } + { solve_layout_alg. } + { shelve_sidecond. } + { iModIntro. iIntros (??) "Harr". iPoseProof (ty_own_val_array_int_to_int with "Harr") as "$"; last done. + shelve_sidecond. } + iModIntro. + + repeat liRStep. } + repeat liRStep. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: try lia. + all: try congruence. + - rewrite -page_size_align_is_size /page_size_align. + exists (page_size_align_log Size4KiB). lia. + - sidecond_hammer. + - specialize (page_size_in_bytes_nat_ge x'). + specialize (page_size_in_bytes_nat_in_usize x') as []. + split; lia. + - specialize (page_size_in_bytes_nat_ge x'). + specialize (page_size_in_bytes_nat_in_usize x') as []. + sidecond_hammer. + - unfold page_size_in_bytes_nat. lia. + - specialize (page_size_in_words_nat_ge sz_to_allocate). lia. + - intros ? Hly1. apply syn_type_has_layout_array_inv in Hly1 as (? & Hly1 & -> & ?). + apply syn_type_has_layout_int_inv in Hly1 as ->. + eexists. split; first by apply syn_type_has_layout_int. + rewrite ly_size_mk_array_layout. done. + - (* if location is aligned to page size, it's aligned to usize *) + eapply base.aligned_to_mul_inv. + revert select (larr `aligned_to` page_size_in_bytes_nat sz_to_allocate). + done. + - sidecond_hammer. + - rewrite page_size_align_is_size. done. + - rewrite MAX_PAGE_ADDR_unfold /MAX_PAGE_ADDR_def. + revert select (¬ memory_region_end.2 > _). + rewrite Hroot_sz. lia. + - rename select (max_node_size _ = Size128TiB) into Hroot_sz'. + rewrite Hroot_sz'. + apply page_size_le_max. + - rewrite Hroot_sz Hroot_base. + split; simpl. + + lia. + + revert select (larr.2 + _ ≤ MAX_PAGE_ADDR). rewrite MAX_PAGE_ADDR_unfold /MAX_PAGE_ADDR_def. lia. + - apply aligned_to_offset; first done. + solve_goal. + + all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +(*Qed.*) +Admitted. (* admitted due to long Qed *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_empty.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_empty.v new file mode 100644 index 00000000..75ce13bc --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_empty.v @@ -0,0 +1,28 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageAllocator_empty. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageAllocator_empty_proof (π : thread_id) : + core_page_allocator_allocator_PageAllocator_empty_lemma π. +Proof. + core_page_allocator_allocator_PageAllocator_empty_prelude. + + repeat liRStep; liShow. + liInst Hevar Size128TiB. + liInst Hevar0 0. + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. + { exists 0. lia. } + { apply page_size_in_bytes_nat_in_usize. } + all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size.v new file mode 100644 index 00000000..fb31536f --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size.v @@ -0,0 +1,50 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageAllocator_find_largest_page_size. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageAllocator_find_largest_page_size_proof (π : thread_id) : + core_page_allocator_allocator_PageAllocator_find_largest_page_size_lemma π. +Proof. + core_page_allocator_allocator_PageAllocator_find_largest_page_size_prelude. + + rep liRStep; liShow. + 1: match goal with + | H : capture_address_ `aligned_to` page_size_in_bytes_nat ?x, + H' : page_size_smaller ?x = Some ?x' |- _ => + rename x into sz_aligned; + rename x' into sz_aligned_smaller; + rename H' into Hsmaller + end. + 1: rewrite Hsmaller. + all: rep liRStep; liShow. + 1: match goal with + | H : address `aligned_to` page_size_in_bytes_nat ?x, + H' : page_size_smaller ?x = Some ?x' |- _ => + rename x into sz_aligned; + rename x' into sz_aligned_smaller; + rename H' into Hsmaller + end. + 1: rewrite Hsmaller. + all: rep liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + + all: try match goal with + | H : _ `aligned_to` page_size_in_bytes_nat ?x |- _ => + rename x into sz'; + specialize (page_size_in_bytes_nat_ge sz') as ? + end. + all: try lia. + all: by eapply address_aligned_to_page_size_smaller. + + Unshelve. all: print_remaining_sidecond. +(*Qed.*) +Admitted. (* admitted due to long Qed *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size_closure0.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size_closure0.v new file mode 100644 index 00000000..98845190 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size_closure0.v @@ -0,0 +1,28 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageAllocator_find_largest_page_size_closure0. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageAllocator_find_largest_page_size_closure0_proof (π : thread_id) : + core_page_allocator_allocator_PageAllocator_find_largest_page_size_closure0_lemma π. +Proof. + core_page_allocator_allocator_PageAllocator_find_largest_page_size_closure0_prelude. + + rep <-! liRStep; liShow. + apply_update (updateable_copy_lft "vlft6" "ulft1"). + rep <-! liRStep; liShow. + apply_update (updateable_copy_lft "ulft3" "ulft_4"). + rep <-! liRStep; liShow. + apply_update (updateable_copy_lft "vlft15" "ulft1"). + rep <-! liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure0.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure0.v new file mode 100644 index 00000000..5d714680 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure0.v @@ -0,0 +1,29 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure0. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure0_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure0_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure0_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + { unfold page_node_can_allocate. + revert INV_CASE. + destruct (node.(allocation_state)) eqn:Heq; simpl. + all: normalize_and_simpl_impl false. + all: clear; sidecond_hammer. + } + + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure1.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure1.v new file mode 100644 index 00000000..9c356f5c --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure1.v @@ -0,0 +1,27 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure1. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure1_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure1_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure1_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + { f_equiv; first done. + move: INV_CASE. + unfold page_node_can_allocate. + destruct (allocation_state child) eqn:Heq. + all: sidecond_hammer. } + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node.v new file mode 100644 index 00000000..0f8b88d0 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node.v @@ -0,0 +1,31 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Hint Unfold Z.divide : lithium_rewrite. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node_prelude. + + rep <-! liRStep; liShow. + rep liRStep. + liInst Hevar (mk_page_node self.(max_node_size) self.(base_address) PageTokenUnavailable self.(children_initialized)). + rep liRStep. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + all: move: INV_CASE; + unfold page_storage_node_invariant_case; + rewrite Hstate/=; + sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index.v new file mode 100644 index 00000000..26c3ba81 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index.v @@ -0,0 +1,114 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma rem_le_a_minus_b a b z : + 0 ≤ b → + 0 < a → + Z.divide b a → + Z.divide b z → + a - b >= z mod a. +Proof. + intros ? Ha [n Hmul_a] [m Hmul_z]. + subst a z. + (* rewrite the modulus of multiples: (b*m) mod (b*n) = b * (m mod n) *) + rewrite Zmult_mod_distr_r. + + opose proof (Z.mod_pos_bound m n _) as Hbound; first lia. + destruct Hbound as [Hge Hlt]. (* 0 <= m mod n < n *) + + (* multiply the inequality chain by b *) + assert (0 <= b * (m mod n)) by (apply Z.mul_nonneg_nonneg; lia). + assert (b * (m mod n) <= b * (n - 1)). + { apply Z.mul_le_mono_nonneg_l; try lia. } + + (* conclude: b * (m mod n) <= b*n - b *) + replace (b * (n - 1)) with (b*n - b) in * by lia. + lia. +Qed. + +Lemma page_within_range_offset base off sz p child_sz : + (page_size_in_bytes_Z (page_sz p) | (page_loc p).2) → + (page_size_in_bytes_Z sz | base) → + page_size_smaller sz = Some child_sz → + p.(page_sz) ≤ₚ child_sz → + off = ((p.(page_loc).2 - base) `quot` page_size_in_bytes_Z child_sz) * page_size_in_bytes_Z child_sz → + page_within_range base sz p → + page_within_range (base + off) child_sz p. +Proof. + set (i := ((p.(page_loc).2 - base) `quot` page_size_in_bytes_Z child_sz)). + intros Hal1 Hal2 Hsmaller Hle -> [Hran1 Hran2]. + split; first lia. + + assert (p.(page_loc).2 - base ≤ i * page_size_in_bytes_Z child_sz + Z.rem (p.(page_loc).2 - base) (page_size_in_bytes_Z child_sz)). + { subst i. + specialize (Z.quot_rem' ((page_loc p).2 - base) (page_size_in_bytes_Z child_sz)) as Heq. + rewrite {1}Heq. lia. } + enough (page_size_in_bytes_Z child_sz - page_size_in_bytes_Z (page_sz p) >= ((page_loc p).2 - base) `rem` page_size_in_bytes_Z child_sz) by lia. + + rewrite Z.rem_mod_nonneg; [ | lia | ]; first last. + { specialize (page_size_in_bytes_nat_ge child_sz). lia. } + apply rem_le_a_minus_b. + - lia. + - specialize (page_size_in_bytes_nat_ge child_sz). lia. + - by apply page_size_in_bytes_nat_le_divide. + - apply Z.divide_sub_r. + + done. + + etrans; last apply Hal2. + apply page_size_in_bytes_nat_le_divide. + etrans; first done. + left. by apply page_size_smaller_lt. +Qed. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + { move: Hrange; unfold page_within_range. solve_goal. } + { move: Hrange; unfold page_within_range; solve_goal. } + { simplify_eq. specialize (page_size_in_words_nat_ge child_node_sz). sidecond_hammer. } + { simplify_eq. specialize (page_size_in_words_nat_ge child_node_sz). sidecond_hammer. } + { (* compute the index *) + simplify_eq. + etrans; last apply Z.quot_pos; first done; first lia. + specialize (page_size_in_bytes_nat_ge child_node_sz). + lia. } + { simplify_eq. + specialize (page_size_in_bytes_nat_ge child_node_sz) as ?. + apply Z.quot_le_upper_bound; first lia. + nia. } + { simplify_eq. + specialize (page_size_in_bytes_nat_ge child_node_sz) as ?. + apply Z.quot_lt_upper_bound; [lia | lia | ]. + opose proof (page_size_multiplier_size_in_bytes this_node_page_size child_node_sz _) as Ha. + { by rewrite Hchild. } + rewrite -Nat2Z.inj_mul. rewrite -Ha. + + destruct Hrange as [Hran1 Hran2]. + move: Hran2. simpl. + specialize (page_size_in_bytes_nat_ge page_token0). + lia. + } + { simplify_eq. + eapply page_within_range_offset; simpl; [ | | done..]. + - rewrite -page_size_align_is_size. done. + - eexists. done. } + { destruct Hsz_lt as [Hsz_lt |Hsz_eq]. + - apply Z.cmp_less_iff in Hsz_lt. lia. + - apply Z.cmp_equal_iff in Hsz_eq. apply page_size_variant_inj in Hsz_eq. + simplify_eq. lia. } + + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size.v new file mode 100644 index 00000000..703adff1 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size.v @@ -0,0 +1,61 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + all: rename select (page_size_smaller _ = Some _) into Hsz. + - move: INV_IN_RANGE. + opose proof (page_size_multiplier_size_in_bytes (max_node_size self) _ _) as ->. + { rewrite Hsz//. } + move: INV_INIT_CHILDREN. rewrite H_child_init. + nia. + - (* samei *) + move: INV_IN_RANGE. + opose proof (page_size_multiplier_size_in_bytes (max_node_size self) _ _) as ->. + { rewrite Hsz//. } + move: INV_INIT_CHILDREN. rewrite H_child_init. + nia. + - move: INV_IN_RANGE. + opose proof (page_size_multiplier_size_in_bytes (max_node_size self) _ _) as ->. + { rewrite Hsz//. } + move: INV_INIT_CHILDREN. rewrite H_child_init. + nia. + - move: INV_IN_RANGE. + opose proof (page_size_multiplier_size_in_bytes (max_node_size self) _ _) as ->. + { rewrite Hsz//. } + move: INV_INIT_CHILDREN. rewrite H_child_init. + nia. + - unfold child_base_address. + f_equiv. rewrite Z.mul_comm. f_equiv. + congruence. + - congruence. + - move: Hsz. + revert select (max_node_size self = Size4KiB). + intros ->. + simpl. done. + - move: Hsz. + revert select (max_node_size self = Size4KiB). + intros ->. + simpl. done. + - move: Hsz. + revert select (max_node_size self = Size4KiB). + intros ->. + simpl. done. + + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary.v new file mode 100644 index 00000000..3ddd599d --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary.v @@ -0,0 +1,196 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma divide_page_token_neutral node sz children smaller_size : + page_size_smaller node.(max_node_size) = Some smaller_size → + page_storage_node_invariant_case node sz None children → + node.(allocation_state) ⊓ PageTokenPartiallyAvailable smaller_size = node.(allocation_state). +Proof. + intros Hsmaller. + unfold page_storage_node_invariant_case. + destruct (allocation_state node) eqn:Heq; simpl. + - done. + - naive_solver. + - intros [_ (? & [= <-] & -> & Hlt & ? & ?)]. + cbn. rewrite page_size_min_l; first done. + apply page_size_smaller_page_size_variant in Hsmaller. + apply Z.ord_lt_iff in Hlt. + apply Z.ord_le_iff. lia. +Qed. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary_prelude. + + repeat liRStep. + 1: liInst Hevar (mk_page_node self.(max_node_size) self.(base_address) (PageTokenPartiallyAvailable smaller_size) true). + 2: liInst Hevar self. + all: repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: try lia. + all: try rename select (subdivided_pages _ _) into Hsubdivided. + + all: try match type of Hsubdivided with + | subdivided_pages _ (?pref ++ (mk_page _ _ _) :: ?suff) => + rename pref into prefix; + rename suff into suffix + end. + all: try (opose proof (page_storage_node_invariant_has_token _ _ _ _ INV_CASE) as Ha; simpl in Ha; destruct Ha as (? & <- & ?)). + all: match type of INV_WF with page_storage_node_children_wf _ _ ?c => rename c into children end. + all: rename select (children_initialized self = true) into Hchildren. + all: rewrite Hchildren in INV_INIT_CHILDREN. + all: try rename x'3 into child_index. + + + - right. + opose proof (subdivided_pages_lookup_size_lt (length prefix) _ _ _ _ _ Hsubdivided _) as Ha. + { simpl. done. } + { rewrite lookup_app. + rewrite lookup_ge_None_2; last lia. + solve_goal. } + solve_goal. + - opose proof (subdivided_pages_lookup_size_lt (length prefix) _ _ _ _ _ Hsubdivided _) as Ha. + { simpl. done. } + { rewrite lookup_app. + rewrite lookup_ge_None_2; last lia. + solve_goal. } + simpl in Ha. subst. + eapply (subdivided_pages_page_within_range' (length prefix)); solve_goal. + - rewrite -page_size_align_is_size. + eexists. done. + - rewrite INV_INIT_CHILDREN. + apply subdivided_pages_length in Hsubdivided. + solve_goal. + - (* point: prove length prefix = child_index *) + rename select (page_within_range _ _ _) into Hrange. + + opose proof (subdivided_pages_lookup_size_lt (length prefix) _ _ _ _ _ Hsubdivided _) as Hsz. + { done. } + { rewrite lookup_app_r; last lia. rewrite Nat.sub_diag. done. } + simpl in Hsz. subst. + + opose proof (subdivided_pages_lookup_inv _ _ _ _ _ (length prefix) _ _ Hsubdivided _ _ _ Hrange) as Heq; [solve_goal.. | ]. + assert (child_index = Z.of_nat (length prefix)) as -> by lia. + + odestruct (lookup_lt_is_Some_2 children (length prefix) _) as (child & Hchild). + { lia. } + opose proof (page_storage_node_children_wf_child_lookup (length prefix) _ _ _ _ _ _ INV_WF Hchild) as [Hchild_sz Hchild_loc]. + { done. } + + apply page_within_range_inv in Hrange; last done. + simpl in Hrange. + + rewrite lookup_total_app_r; first last. + { solve_goal. } + normalize_and_simpl_goal. + erewrite list_lookup_total_correct; last apply Hchild. + done. + - rename select (page_within_range _ _ _) into Hrange. + + opose proof (subdivided_pages_lookup_size_lt (length prefix) _ _ _ _ _ Hsubdivided _) as Hsz. + { done. } + { rewrite lookup_app_r; last lia. rewrite Nat.sub_diag. done. } + simpl in Hsz. subst. + + opose proof (subdivided_pages_lookup_inv _ _ _ _ _ (length prefix) _ _ Hsubdivided _ _ _ Hrange) as Heq; [solve_goal.. | ]. + assert (child_index = Z.of_nat (length prefix)) as -> by lia. + + odestruct (lookup_lt_is_Some_2 children (length prefix) _) as (child & Hchild). + { rewrite INV_INIT_CHILDREN. lia. } + opose proof (page_storage_node_children_wf_child_lookup (length prefix) _ _ _ _ _ _ INV_WF Hchild) as [Hchild_sz Hchild_loc]. + { done. } + + opose proof (subdivided_pages_lookup_base_address (length prefix) _ _ _ _ _ Hsubdivided _) as Hloc. + { done. } + { rewrite lookup_app_r; last lia. rewrite Nat.sub_diag. done. } + + + rewrite lookup_total_app_r; first last. + { solve_goal. } + normalize_and_simpl_goal. + erewrite list_lookup_total_correct; last apply Hchild. + + rewrite Hchild_loc. + simpl in Hloc. rewrite Hloc. + rewrite /child_base_address. lia. + - apply subdivided_pages_length in Hsubdivided. + solve_goal. + - rename select (page_within_range _ _ _) into Hrange. + + opose proof (subdivided_pages_lookup_size_lt (length prefix) _ _ _ _ _ Hsubdivided _) as Hsz. + { done. } + { rewrite lookup_app_r; last lia. rewrite Nat.sub_diag. done. } + simpl in Hsz. subst. + + opose proof (subdivided_pages_lookup_inv _ _ _ _ _ (length prefix) _ _ Hsubdivided _ _ _ Hrange) as Heq; [solve_goal.. | ]. + assert (child_index = Z.of_nat (length prefix)) as -> by lia. + + rewrite lookup_total_app_r; first last. + { solve_goal. } + normalize_and_simpl_goal. + + apply list_subequiv_split. + { solve_goal. } + normalize_and_simpl_goal. + f_equiv. lia. + + normalize_and_simpl_goal. + odestruct (lookup_lt_is_Some_2 children (length prefix) _) as (node & ?); first lia. + exists node. split; first done. + erewrite list_lookup_total_correct; last done. + destruct node; done. + - rename select (max_node_size self = Size4KiB) into Hsz. + rename select (page_size_smaller _ = Some _) into Hsmaller. + move: Hsmaller. rewrite Hsz. done. + - apply subdivided_pages_length in Hsubdivided. simpl in Hsubdivided. lia. + - rewrite skipn_all2; last lia. + rewrite app_nil_r. + by apply page_storage_node_children_wf_upd_state. + - eexists. done. + - unfold name_hint. lia. + - unfold name_hint. + rewrite skipn_all2; last lia. + rewrite app_nil_r. + simplify_eq. + + rename select (allocation_state self = _) into Hstate. + move: INV_CASE. + unfold page_storage_node_invariant_case. + rewrite Hstate. simpl. + normalize_and_simpl_goal. + eexists. split_and!; [done | done | | done | ]. + { apply Z.cmp_less_iff. lia. } + setoid_rewrite list_lookup_fmap. + opose proof (lookup_lt_is_Some_2 children 0%nat _) as (child & Hchild). + { specialize (page_size_multiplier_ge (max_node_size self)). lia. } + eexists 0%nat, (page_node_update_state child PageTokenAvailable). + rewrite Hchild. + split; first done. + opose proof (page_storage_node_children_wf_child_lookup 0%nat _ _ _ _ _ _ INV_WF Hchild) as [Hchild_sz Hchild_loc]. + { done. } + rewrite -Hchild_sz//. + - rename select (allocation_state self = _) into Hstate. + rewrite Hstate. done. + - eexists. done. + - solve_goal. + - solve_goal. + - erewrite divide_page_token_neutral; [ | done | done]. + destruct self as [? ? state ?]. + simpl. simpl in Hchildren. rewrite Hchildren//. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +(*Qed.*) +Admitted. (* admitted due to long Qed *) +End proof. + + diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_empty.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_empty.v new file mode 100644 index 00000000..b8c747a0 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_empty.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_empty. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_empty_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_empty_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_empty_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token.v new file mode 100644 index 00000000..640b5e9f --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token.v @@ -0,0 +1,328 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_store_page_token. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma page_storage_node_invariant_case_store_token sz base state children max_alloc opt_page new_state new_max_alloc (i : nat) child child' child_alloc child_alloc' : + children !! i = Some child → + page_node_can_allocate child = child_alloc → + page_node_can_allocate child' = Some child_alloc' → + child_alloc ≤o{option_cmp page_size_cmp} (Some child_alloc') → + child_alloc' <ₚ sz → + new_max_alloc = max_by (option_cmp page_size_cmp) (Some child_alloc') max_alloc → + new_state = (PageTokenPartiallyAvailable child_alloc') ⊔ state → + page_storage_node_invariant_case (mk_page_node sz base state true) max_alloc opt_page children → + page_storage_node_invariant_case (mk_page_node sz base new_state true) new_max_alloc opt_page (<[i := child']> children). +Proof. + intros Hlook Halloc Halloc' Halloc_lt Hlt -> ->. + specialize (lookup_lt_Some _ _ _ Hlook) as ?. + + unfold page_storage_node_invariant_case; simpl. + destruct state as [ | | available_sz]; simpl. + - normalize_and_simpl_goal. + eexists _. + split_and!; try done. + exists i, child'. + solve_goal. + - normalize_and_simpl_goal. + eexists. split_and!; try done. + apply max_by_r. apply not_ord_gt_iff. + left. by apply Z.cmp_less_iff. + - intros (-> & (allocable_sz & [= <-] & -> & ? & _ & Ha)). + split; first done. + destruct (decide (available_sz ≤ₚ child_alloc')) as [Hlt' | Hnlt]. + + (* the available size changes *) + exists child_alloc'. split_and!; try done. + { unfold join; simpl. f_equiv. + apply page_size_max_l. done. } + { apply max_by_l. + apply not_ord_lt_iff. + done. } + { exists i, child'. + rewrite list_lookup_insert_eq; last lia. + done. } + + (* no change *) + apply not_ord_le_iff in Hnlt. + exists available_sz. split_and!; try done. + { unfold join; simpl. f_equiv. + apply page_size_max_r. by left. } + { apply max_by_r. + apply not_ord_gt_iff. + by left. } + { (* need some monotonicity: at i, the size increases *) + destruct Ha as (j & child_max & Hlook' & Halloc1). + exists j, child_max. split; last done. + enough (i ≠ j). { rewrite list_lookup_insert_ne; done. } + intros <-. + simplify_eq. + move: Hnlt Halloc_lt. + rewrite Halloc1. + intros Hnlt Halloc_le. + opose proof (ord_lt_ord_le_trans _ _ _ Hnlt Halloc_le) as Ha. + by apply ord_lt_irrefl in Ha. } +Qed. + +Lemma node_allocation_state_join_partially_available_l st st' sz' : + st' = PageTokenPartiallyAvailable sz' ⊔ st → + (st = PageTokenUnavailable ∧ st' = PageTokenPartiallyAvailable sz') ∨ + (∃ sz, st = PageTokenPartiallyAvailable sz ∧ st' = PageTokenPartiallyAvailable (sz' ⊔ sz)) ∨ + (st = PageTokenAvailable ∧ st' = PageTokenAvailable). +Proof. + unfold join, node_allocation_state_join. + destruct st as [ | | sz]; simpl; naive_solver. +Qed. +Lemma available_sz_lower_bound node max_sz tok children updated_node_state allocable_sz_rec stored_sz merged_available_sz tok' children' : + page_storage_node_invariant_case node max_sz tok children → + page_storage_node_invariant_case (mk_page_node node.(max_node_size) node.(base_address) updated_node_state node.(children_initialized)) (Some merged_available_sz) tok' children' → + (updated_node_state = (PageTokenPartiallyAvailable allocable_sz_rec ⊔ allocation_state node) ∨ updated_node_state = PageTokenAvailable) → + stored_sz <ₚ max_node_size node → + stored_sz ≤ₚ allocable_sz_rec → + stored_sz ≤ₚ merged_available_sz ∧ (page_node_can_allocate node) ≤o{option_cmp page_size_cmp} Some merged_available_sz. +Proof. + intros Hinv1 Hinv2 Hupd ? ?. + destruct Hupd as [Hupd%node_allocation_state_join_partially_available_l | ->]; first last. + { apply page_storage_node_invariant_case_available_inv in Hinv2 as (tok'' & -> & [= ->] & ? & ?); last done. + split; first by left. + erewrite page_storage_node_invariant_case_can_allocate; last done. + by eapply page_node_invariant_case_sized_bounded. } + destruct Hupd as [[Heq1 Heq2] | [Ha | [? ->]]]. + - eapply page_storage_node_invariant_case_partially_available_inv in Hinv2 as [[= ->] ->]; last done. + split; first done. + opose proof (page_storage_node_invariant_case_unavailable_inv _ _ _ _ _ Hinv1) as [-> ->]; first done. + erewrite page_storage_node_invariant_case_can_allocate; last done. + by left. + - destruct Ha as (sz & Hst & ->). + eapply page_storage_node_invariant_case_partially_available_inv in Hinv2 as [[= ->] ->]; last done. + split. { by etrans; last apply page_size_max_ge_l. } + erewrite page_storage_node_invariant_case_can_allocate; last done. + eapply page_storage_node_invariant_case_partially_available_inv in Hinv1 as [[= ?] ->]; last done. + subst. eapply page_size_max_ge_r. + - apply page_storage_node_invariant_case_available_inv in Hinv2 as (tok'' & -> & [= ->] & ? & ?); last done. + split; first by left. + erewrite page_storage_node_invariant_case_can_allocate; last done. + by eapply page_node_invariant_case_sized_bounded. +Qed. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_store_page_token_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_store_page_token_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_store_page_token_prelude. + + rep liRStep; liShow. + 2: repeat liRStep. + 3: repeat liRStep. + 1: liInst Hevar self; rep liRStep; liShow. + 1: liInst Hevar (mk_page_node self.(max_node_size) self.(base_address) PageTokenAvailable self.(children_initialized)); rep liRStep. + (* there is a smaller page size *) + rep <-! liRStep. + 2: repeat liRStep. + rep liRStep. + liInst Hevar (mk_page_node self.(max_node_size) self.(base_address) self.(allocation_state) true). + rep <-! liRStep. + match goal with + | H : page_node_can_allocate _ = Some ?a |- _ => + rename a into allocable_sz_rec + end. + set (new_state := (PageTokenPartiallyAvailable allocable_sz_rec) ⊔ self.(allocation_state)). + + repeat liRStep. liShow. + liInst Hevar (mk_page_node self.(max_node_size) self.(base_address) new_state true). + repeat liRStep. + liInst Hevar (mk_page_node (max_node_size self) (base_address self) x'1 true). + rename x'1 into updated_node_state. + repeat liRStep. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: try lia. + + all: try match goal with + | H : page_storage_node_children_wf _ _ ?x, + H2: page_storage_node_invariant_case (mk_page_node _ _ _ _) _ _ ?x, + H3: length ?x = page_size_multiplier _ |- _ => + rename x into children; + rename H2 into INV_CASE2; + rename H into INV_WF2; + rename H3 into INV_INIT_CHILDREN2 + end. + all: try rename x' into child_index. + all: try set (child := children !!! Z.to_nat child_index). + all: try match goal with + | H : page_size_smaller (max_node_size _) = Some ?x |- _ => + rename x into child_size; rename H into Hsmaller + end. + all: try match goal with + | H : ?x = PageTokenPartiallyAvailable _ ∨ ?x = PageTokenAvailable, + H1 : page_storage_node_invariant_case (mk_page_node _ _ ?x _) _ _ _ |- _ => + rename x into merged_state; + rename H1 into INV_CASE_MERGED; + rename H into Hmerged + end. + + - apply page_within_range_inv in Hrange; simpl in Hrange; done. + - destruct Hsz_le as [Hsz_lt | Hsz_eq%Z.ord_eq_iff]. + + apply Z.ord_lt_iff in Hsz_lt. move: Hsz_lt. lia. + + apply page_size_variant_inj in Hsz_eq. done. + - eexists. done. + - done. + - move: INV_CASE. + unfold page_storage_node_invariant_case. + solve_goal. + - eexists. done. + - done. + - done. + - erewrite page_storage_node_invariant_case_can_allocate; last done. + by eapply page_node_invariant_case_sized_bounded. + - rename select (_ ∨_) into Hsz. + destruct Hsz as [Hsz | <-]; last done. + unfold ord_le, ord_lt, ord_eq. + move: Hsz. + unfold page_size_cmp. + match goal with + | H: page_size_variant _ = page_size_variant (max_node_size self) - 1 |- _ => rewrite H + end. + clear. + solve_goal. + - rewrite -page_size_align_is_size. + eexists. done. + - rename select (_ ∨_) into Hsz. + destruct Hsz as [Hsz | <-]; last done. + move: Hsz. + rename select (max_node_size self = _) into Hsz. rewrite Hsz. + destruct page_token0; done. + - eexists. done. + - done. + - destruct INV_WF2 as [INV_WF1 INV_WF2]. + odestruct (INV_WF1 _ _ (Z.to_nat child_index) child _) as [Ha Hb]. + { done. } + { assert (Z.to_nat child_index < length children); last solve_goal. + rewrite INV_INIT_CHILDREN2. + specialize (page_size_multiplier_ge (max_node_size self)); lia. } + rewrite Hb. + unfold child_base_address. + lia. + - destruct INV_WF2 as [INV_WF1 INV_WF2]. + odestruct (INV_WF1 _ _ (Z.to_nat child_index) child _) as [Ha Hb]. + { done. } + { assert (Z.to_nat child_index < length children); last solve_goal. + rewrite INV_INIT_CHILDREN2. + specialize (page_size_multiplier_ge (max_node_size self)); lia. } + done. + - opose proof (list_lookup_lookup_total_lt children (Z.to_nat child_index) _) as Hlook_child. + { lia. } + fold child in Hlook_child. + opose proof (page_storage_node_children_wf_child_lookup _ _ _ _ children _ _ _ _) as Hchild; [ | done | apply Hlook_child | ]; first done. + destruct Hchild as [-> _]. + apply Z.ord_le_iff. + move: Hsz_le. unfold ord_le. + normalize_and_simpl_goal. + lia. + - match goal with + | H: page_within_range (base_address self + _) _ _ |- _ => rename H into Hran + end. + move: Hran. + unfold child_base_address. + rewrite Z.mul_comm. + done. + - eapply page_storage_node_children_wf_insert; last done. + all: done. + - eexists _. done. + - opose proof (list_lookup_lookup_total_lt children (Z.to_nat child_index) _) as Hlook_child. + { lia. } + fold child in Hlook_child. + eapply page_storage_node_invariant_case_store_token; [ .. | done ]. + { apply Hlook_child. } + { reflexivity. } + { done. } + { done. } + { eapply (ord_le_ord_lt_trans _ (max_node_size child)); first last. + - opose proof (page_storage_node_children_wf_child_lookup _ _ _ _ _ _ _ _ Hlook_child) as (Ha & _); [done.. | ]. + rewrite Ha. + by apply page_size_smaller_lt. + - done. } + { clear. symmetry. case_bool_decide as Ha. + - eapply max_by_l. + apply not_ord_lt_iff. left. done. + - eapply max_by_r. apply not_ord_gt_iff. + move: Ha. + destruct (option_cmp page_size_cmp _ (Some allocable_sz_rec)) eqn:Heq; first done. + + right. done. + + left. by apply correct_ord_antisym. } + { done. } + - match goal with + | H : ?x = new_state ∨ ?x = PageTokenAvailable |- _ => + rename x into new_state'; + rename H into Hnew_state' + end. + exfalso. move: Hnew_state'. + rename select (page_storage_node_invariant_case (mk_page_node _ _ new_state' _) _ _ _) into Hcase'. + apply page_storage_node_invariant_not_available in Hcase' as [Ha ->]. + simpl in Ha. rewrite Ha. subst new_state. + clear. intros [? | ?]; last done. + destruct (allocation_state self); done. + - eexists. done. + - done. + - (* did a merge, now it's available *) + match goal with + | H : page_storage_node_invariant_case _ (Some ?p') _ _ |- _ => + rename H into Hcase_merged; + rename p into merged_available_sz + end. + eapply available_sz_lower_bound. + + apply INV_CASE. + + apply Hcase_merged. + + done. + + simpl. + move: Hsz_le. unfold ord_le. normalize_and_simpl_goal. + + solve_goal. + - match goal with + | H : page_storage_node_invariant_case _ (Some ?p') _ _ |- _ => + rename H into Hcase_merged; + rename p into merged_available_sz + end. + match goal with + | H : updated_node_state = new_state ∨ _ |- _ => + rename H into Hmerged + end. + by eapply page_storage_node_invariant_case_can_allocate. + - (* same as above *) + match goal with + | H : page_storage_node_invariant_case _ (Some ?p') _ _ |- _ => + rename H into Hcase_merged; + rename p into merged_available_sz + end. + eapply available_sz_lower_bound. + + apply INV_CASE. + + apply Hcase_merged. + + done. + + simpl. move: Hsz_le. unfold ord_le. normalize_and_simpl_goal. + + solve_goal. + - match goal with + | H : page_storage_node_invariant_case _ (Some ?p') _ _ |- _ => + rename H into Hcase_merged; + rename p into merged_available_sz + end. + opose proof (available_sz_lower_bound _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) as Ha. + + apply INV_CASE. + + apply Hcase_merged. + + done. + + simpl. move: Hsz_le. unfold ord_le. normalize_and_simpl_goal. + + solve_goal. + + destruct Ha as [_ Ha]. apply Ha. + - match goal with + | H : page_storage_node_invariant_case _ (Some ?p') _ _ |- _ => + rename H into Hcase_merged; + rename p into merged_available_sz + end. + opose proof (page_node_invariant_case_sized_bounded _ _ _ _ _) as Ha. + { apply Hcase_merged. } + done. + + Unshelve. all: print_remaining_sidecond. +(*Qed.*) +Admitted. (* admitted due to long Qed *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node.v new file mode 100644 index 00000000..2e05adb8 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node.v @@ -0,0 +1,30 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Hint Unfold Z.divide : lithium_rewrite. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node_prelude. + + rep liRStep; liShow. + { liInst Hevar (mk_page_node self.(max_node_size) self.(base_address) PageTokenAvailable self.(children_initialized)). + rep liRStep; liShow. } + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + { move: INV_CASE. + unfold name_hint. unfold page_storage_node_invariant_case. + solve_goal. } + + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens_closure0.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens_closure0.v new file mode 100644 index 00000000..d2e3d251 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens_closure0.v @@ -0,0 +1,26 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens_closure0. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens_closure0_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens_closure0_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens_closure0_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + { split. + - intros ?. revert INV_CASE. sidecond_hammer. + - intros (? & ->). revert INV_CASE. unfold page_storage_node_invariant_case. + destruct (allocation_state child) eqn:Heq; simpl; sidecond_hammer. } + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_address.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_address.v index dfa896c2..ceedbc87 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_address.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_address.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address.v index 9e65737f..7f888e6d 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address.v @@ -17,12 +17,12 @@ Proof. all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. - { revert select (_ < (conf_end _).2). - specialize (conf_end_in_usize x). - rewrite /page_size_in_bytes_Z /page_size_in_bytes_nat; solve_goal. } - { revert select (_ < (conf_end _).2). - specialize (conf_end_in_usize x). - rewrite /page_size_in_bytes_Z /page_size_in_bytes_nat; solve_goal. } + { revert select (_ ≤ (conf_end _).2). + specialize (conf_end_in_usize x0). + rewrite /page_size_in_bytes_nat; solve_goal. } + { revert select (_ ≤ (conf_end _).2). + specialize (conf_end_in_usize x0). + rewrite /page_size_in_bytes_nat; solve_goal. } Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address_ptr.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address_ptr.v index 1e059cdd..69fa245e 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address_ptr.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address_ptr.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_size.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_size.v index 8364b0ef..620be67c 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_size.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_size.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_start_address.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_start_address.v index 9d98b650..03a578de 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_start_address.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_start_address.v @@ -18,5 +18,5 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_write.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_write.v index 3f403b77..c8288c3c 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_write.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_write.v @@ -7,58 +7,47 @@ Set Default Proof Using "Type". Section proof. Context `{RRGS : !refinedrustGS Σ}. +Global Instance bla_copy : Copyable (error_Error_ty ). +Proof. apply _. Qed. + Lemma core_page_allocator_page_Page_T_write_proof (π : thread_id) : core_page_allocator_page_Page_T_write_lemma π. Proof. core_page_allocator_page_Page_T_write_prelude. rep <-! liRStep; liShow. - { + { rename select (offset_in_bytes `rem` 8 = 0) into Hoffset. apply Z.rem_divide in Hoffset; last done. destruct Hoffset as (off' & ->). apply_update (updateable_typed_array_access self off' (IntSynType usize)). - (*rep <-! liRStep; liShow.*) - rep liRStep; liShow. - } + repeat liRStep; liShow. + } { rep liRStep; liShow. } { rep liRStep; liShow. } all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. - { + { revert select (_ + off' * 8 < _ + page_size_in_bytes_Z _). - rewrite /page_size_in_bytes_Z/page_size_in_bytes_nat. + rewrite /page_size_in_bytes_nat. rewrite bytes_per_addr_eq. nia. } - { rewrite /name_hint. - rewrite bytes_per_int_usize bytes_per_addr_eq. - apply Z.divide_factor_r. } - { rewrite /name_hint. - assert (off' * 8 < page_size_in_bytes_Z self0) as Hx by lia. - revert Hx. unfold page_size_in_bytes_Z. + { assert (off' * 8 < page_size_in_bytes_Z self0) as Hx by lia. + revert Hx. specialize (page_size_in_bytes_div_8 self0) as (? & ->). rewrite bytes_per_int_usize. rewrite bytes_per_addr_eq. lia. } - { exists (Z.to_nat off'). + { exists (Z.to_nat off'). rewrite bytes_per_int_usize bytes_per_addr_eq. normalize_and_simpl_goal. } - { rewrite /name_hint. - rename select (¬ _ < _ < _) into Hnlt. - intros []. apply Hnlt. split. { - enough (bytes_per_int usize > 0) by lia. - rewrite bytes_per_int_usize bytes_per_addr_eq. - lia. } - revert select (_ < (conf_end _).2). - unfold page_size_in_bytes_Z. lia. } - { rewrite /name_hint. - intros []. + { intros []. rename select (offset_in_bytes `rem` 8 ≠ 0) into Hen. apply Hen. unfold name_hint in *. apply Z.rem_divide; done. } Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate.v index 5665f314..9195c619 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate.v @@ -12,11 +12,11 @@ Lemma core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocat Proof. core_page_allocator_page_Page_core_page_allocator_page_Allocated_deallocate_prelude. - repeat liRStep; liShow. + rep <-! liRStep; liShow. all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) -End proof. \ No newline at end of file +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read.v index 8472b513..8d560791 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read.v @@ -28,36 +28,23 @@ Proof. all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. - { + { revert select (_ + off' * 8 < _ + page_size_in_bytes_Z _). - rewrite /page_size_in_bytes_Z/page_size_in_bytes_nat. + rewrite /page_size_in_bytes_nat. rewrite bytes_per_addr_eq. nia. } - { rewrite /name_hint. - rewrite bytes_per_int_usize bytes_per_addr_eq. - apply Z.divide_factor_r. } - { rewrite /name_hint. - assert (off' * 8 < page_size_in_bytes_Z self0) as Hx by lia. - revert Hx. unfold page_size_in_bytes_Z. + { assert (off' * 8 < page_size_in_bytes_Z self0) as Hx by lia. + revert Hx. specialize (page_size_in_bytes_div_8 self0) as (? & ->). rewrite bytes_per_int_usize. rewrite bytes_per_addr_eq. lia. } - { exists (Z.to_nat off'). + { exists (Z.to_nat off'). rewrite bytes_per_int_usize bytes_per_addr_eq. split; last done. nia. } - { rewrite /name_hint. - rename select (¬ _ < _ < _) into Hnlt. - intros []. apply Hnlt. split. { - enough (bytes_per_int usize > 0) by lia. - rewrite bytes_per_int_usize bytes_per_addr_eq. - lia. } - revert select (_ < (conf_end _).2). - unfold page_size_in_bytes_Z. lia. } - { rewrite /name_hint. - intros []. + { intros []. rename select (offset_in_bytes `rem` 8 ≠ 0) into Hen. apply Hen. unfold name_hint in *. apply Z.rem_divide; done. } Unshelve. all: print_remaining_sidecond. -Admitted. +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide.v index d77fe58e..846c8ba5 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide.v @@ -62,18 +62,6 @@ Proof. rep liRStep; liShow. liInst Hevar INV. - Search once_status once_status_tok. - Search once_status_tok. - iRename select (once_init_tok _ memly) into "Htok1". - iRename select (once_init_tok _ x) into "Htok2". - iPoseProof (once_init_tok_agree with "Htok1 Htok2") as "<-". - iRename select (once_status _ _) into "Hstatus". - rename select (static_has_refinement _ _) into Hrfn1. - iDestruct "Hstatus" as "(%x9 & %Hrfn & Htok3)". - specialize (static_has_refinement_inj _ _ _ Hrfn1 Hrfn) as (Heq & Heq2). - rewrite (UIP_refl _ _ Heq) in Heq2. - simpl in Heq2. subst x9. - (* Prove initialization of the invariant *) unfold INV at 1. simpl. @@ -82,7 +70,7 @@ Proof. { iR. rewrite -page_size_multiplier_quot; last done. iSplitR. { rewrite page_size_multiplier_quot_Z; done. } - iR. iR. iR. iR. iSplitR. { iExists x4. iR. done. } + iR. iR. iR. iR. iSplitR. { iExists _. iR. done. } iApply big_sepL2_elim_l. iPoseProof (big_sepL_extend_r with "Harr") as "Harr". 2: iApply (big_sepL2_wand with "Harr"). { rewrite List.length_seq length_reshape length_replicate. lia. } @@ -95,12 +83,14 @@ Proof. injection Hlook2 as <-. unfold OffsetLocSt. simplify_layout_goal. unfold offset_loc. assert (ly_size usize * (k * page_size_in_words_nat smaller_sz) = k * page_size_in_bytes_Z smaller_sz) as ->. - { rewrite /page_size_in_bytes_Z /page_size_in_bytes_nat. + { rewrite /page_size_in_bytes_nat. simpl. rewrite bytes_per_int_usize. lia. } enough (x0 = (<#> take (page_size_in_words_nat smaller_sz) (drop (k * page_size_in_words_nat smaller_sz) pageval))) as -> by done. move: Hlook1. rewrite sublist_lookup_reshape. 2: { specialize (page_size_in_words_nat_ge smaller_sz). lia. } - 2: { rewrite length_fmap -H122. + 2: { + rename select (page_size_in_words_nat _ = length pageval) into Hpage_sz. + rewrite length_fmap -Hpage_sz. erewrite (page_size_multiplier_size_in_words _ smaller_sz); last done. lia. } rewrite sublist_lookup_Some'. @@ -111,7 +101,8 @@ Proof. liRStep; liShow. iApply prove_with_subtype_default. iSplitR. { liShow. iModIntro. simpl. - iIntros ([istart itend] [itstart' itend'] (capture_smaller_sz & capture_memlayout & capture_start & capture_end & []) e Hnext) "(%Hstart & %Hend & -> & -> & -> & -> & Hinv)". + iIntros ([istart itend] [itstart' itend'] (capture_smaller_sz & capture_memlayout & capture_start & capture_end & []) e) "Hnext (%Hstart & %Hend & -> & -> & -> & -> & Hinv)". + rewrite boringly_persistent_elim. iDestruct "Hnext" as "%Hnext". simpl in Hnext. destruct Hnext as (<- & _ & <- & Hnext & [= Hcmp_eq]). case_bool_decide; last done. injection Hnext as [= ->]. @@ -123,14 +114,13 @@ Proof. iSplitL "Hinv0 Hinv1". { iExists _, istart, inhabitant, _, _, _, _. iR. iR. unfold name_hint. iFrame "#". - iSplitR. { iPureIntro. unfold page_size_in_bytes_Z. simpl. lia. } + iSplitR. { iPureIntro. simpl. lia. } iSplitR. { iPureIntro. lia. } opose proof (page_size_multiplier_size_in_bytes sz smaller_sz _) as Heq_sz; first done. iSplitR. { iPureIntro. subst itend. assert (page_size_in_bytes_Z sz ∈ usize) as Hel by done. - rewrite /page_size_in_bytes_Z Heq_sz in Hel. - rewrite /page_size_in_bytes_Z. + rewrite Heq_sz in Hel. destruct Hel. split; nia. } iSplitR. { iPureIntro. rename select (self `aligned_to` _) into Hal. @@ -140,7 +130,6 @@ Proof. iSplitR. { iPureIntro. simpl. subst itend. - rewrite /page_size_in_bytes_Z. rewrite (page_size_multiplier_size_in_bytes sz smaller_sz); last done. move: Hcmp_eq. clear. nia. } iSplitR. { iPureIntro. simpl. lia. } @@ -161,83 +150,25 @@ Proof. iApply prove_with_subtype_default. iSplitR. { liShow. iModIntro. simpl. - iIntros ([istart itend] [itstart' itend'] (capture_smaller_sz & capture_memlayout & capture_start & capture_end & []) Hnext). + iIntros ([istart itend] [itstart' itend'] (capture_smaller_sz & capture_memlayout & capture_start & capture_end & [])) "Hnext". + rewrite boringly_persistent_elim. iDestruct "Hnext" as "%Hnext". simpl in Hnext. destruct Hnext as (<- & (Ha & <-) & _). injection Ha as <-. iIntros "Hinv". done. } - rep <-! liRStep. - liShow. + rep <-! liRStep. liShow. + + rep liRStep. liShow. + iApply prove_with_subtype_default. + iRename select (MapInv _ _ _ _ _) into "Hinv". + iSplitL "Hinv". { done. } + + rep <-! liRStep. liShow. (* discard the invariant on the original self token so that RefinedRust does not try to re-establish it *) iRename select (arg_self ◁ₗ[π, _] _ @ _)%I into "Hself". iPoseProof (opened_owned_discard with "Hself") as "Hself". - clear Heq. - - (* derive inductive property about the result *) - rename x' into new_pages. - iRename select (IteratorNextFusedTrans _ _ _ _ _) into "Hiter". - iAssert (⌜subdivided_pages (mk_page self sz pageval) new_pages⌝)%I with "[Hiter]" as "%Hnew_pages_correct". - { simpl. - unfold subdivided_pages. simpl. - destruct (page_size_smaller sz) as [ sz_smaller | ] eqn:Hsmaller; subst smaller_sz. - 2: { - rewrite Z.quot_same; first last. - { specialize (page_size_in_bytes_nat_ge sz). unfold page_size_in_bytes_Z. lia. } - assert (sz = Size4KiB) as ->. { by apply page_size_smaller_None. } - destruct new_pages as [ | pg1 new_pages]; simpl. - { iDestruct "Hiter" as "%Heq". injection Heq. simplify_eq. } - iRevert "Hiter". rep liRStep; liShow. - repeat case_bool_decide; simplify_eq. - iRename select (IteratorNextFusedTrans _ _ _ _ _) into "Hiter". - destruct new_pages as [ | pg2 new_pages]; simpl; first last. - { iRevert "Hiter". rep liRStep. liShow. - repeat case_bool_decide; simplify_eq. lia. } - iRevert "Hiter". rep liRStep. liShow. - iPureIntro. split; first done. - intros i p' Hlook. destruct i as [ | i]; last done. - simpl in Hlook. injection Hlook as <-. - simpl. done. } - rewrite (page_size_multiplier_quot_Z _ sz_smaller); first last. - { rewrite Hsmaller. done. } - iRevert "Hiter". iClear "∗#". - - - match goal with - | |- context[IteratorNextFusedTrans ?attrs _ {| map_clos := ?clos |} _ ?fin] => - set (AT := attrs); - set (final_state := fin); - set (clos_state := clos) - end. - iAssert (∀ k : nat, IteratorNextFusedTrans AT π {| map_it := (Z.of_nat k, Z.of_nat $ page_size_multiplier sz); map_clos := clos_state |} new_pages final_state -∗ - ⌜length new_pages = (page_size_multiplier sz - k)%nat ∧ ∀ (i : nat) (p' : page), new_pages !! i = Some p' → page_loc p' = self offset{usize}ₗ (k + i) * page_size_in_words_nat sz_smaller ∧ page_sz p' = sz_smaller⌝)%I as "Hx"; first last. - { iIntros "Hit". - iPoseProof ("Hx" $! 0%nat with "Hit") as "Ha". - rewrite Nat.sub_0_r. - simpl. iDestruct "Ha" as "(% & %Hb)". - iPureIntro. split; first done. intros ? ? Hlook. eapply Hb in Hlook. - rewrite Z.add_0_l in Hlook. done. } - - iInduction new_pages as [ | pg1 new_pages] "IH"; simpl. - - rep liRStep. liShow. iPureIntro. - rewrite Nat.sub_diag. - split; first done. done. - - rep liRStep. liShow. - iRename select (IteratorNextFusedTrans _ _ _ _ _) into "Hnext". - case_bool_decide; simplify_eq. - iPoseProof ("IH" $! (k + 1%nat)%nat) as "IH'". - rewrite Nat2Z.inj_add. iPoseProof ("IH'" with "Hnext") as "(-> & %IH)". - iPureIntro. split; first lia. - intros [ | i] p'; simpl; intros Hlook. - { injection Hlook as <-. simpl. rewrite Z.add_0_r. - split; last done. - rewrite /offset_loc. simpl. - rewrite /page_size_in_bytes_Z /page_size_in_bytes_nat. - rewrite bytes_per_int_usize. f_equiv. lia. } - replace (k + S i) with (k + 1%nat + i) by lia. - apply IH; done. } - rep liRStep. all: print_remaining_goal. @@ -247,8 +178,7 @@ Proof. specialize (page_size_in_words_nat_ge smaller_sz). solve_goal. - set (smaller_sz := (default self0 (page_size_smaller self0))). - unfold page_size_in_bytes_Z. - specialize (page_size_in_bytes_nat_ge smaller_sz). + specialize (page_size_in_words_nat_ge smaller_sz). solve_goal. - set (smaller_sz := (default self0 (page_size_smaller self0))). rewrite (page_size_multiplier_quot_Z _ smaller_sz); last done. @@ -256,7 +186,25 @@ Proof. - set (smaller_sz := (default self0 (page_size_smaller self0))). rewrite (page_size_multiplier_quot_Z _ smaller_sz); last done. specialize (page_size_multiplier_in_usize self0). solve_goal. + - rename select (Forall2 _ _ _) into Hclos. + opose proof (Forall2_length _ _ _ Hclos) as Hlen. + rewrite length_seqZ in Hlen. + rewrite page_size_multiplier_quot_Z in Hlen; last done. + unfold subdivided_pages. simpl. + split; first lia. + rename x' into new_pages. + intros i p' Hlook. + opose proof (Forall2_lookup_r _ _ _ i _ Hclos Hlook) as (j & Hlook2 & Ha). + apply lookup_seqZ in Hlook2 as (-> & Hlook2). + move: Ha. + normalize_and_simpl_goal. + split; last done. + rewrite /offset_loc. simpl. + rewrite /page_size_in_bytes_nat. + rewrite bytes_per_int_usize. f_equiv. + rewrite /smaller_sz. + lia. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0.v index c7147ef9..e79d48dd 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0.v @@ -25,12 +25,9 @@ Proof. rep <-! liRStep; liShow. apply_update (updateable_copy_lft "plft22" "ulft5"). rep <-! liRStep; liShow. - rename select (if_Err _ _) into Herr. - rename select (if_Ok _ _) into Hok. - destruct x' as [ | x']; first last. - { exfalso. simpl in *. apply Herr. split; last lia. - move: Hinrange Hpage_end. unfold page_size_in_bytes_Z. lia. } - simpl in *. revert Hok. + apply_update (updateable_copy_lft "vlft29" "ulft5"). + rep <-! liRStep. liShow. + apply_update (updateable_copy_lft "vlft30" "ulft5"). rep liRStep. all: print_remaining_goal. @@ -39,10 +36,16 @@ Proof. - (* add *) eapply aligned_to_offset. { apply Haligned. } - rewrite page_size_align_is_size /page_size_in_bytes_Z. + rewrite page_size_align_is_size. apply Z.divide_factor_r. - - move: Hinrange Hpage_end. rewrite /page_size_in_bytes_Z. nia. + - exfalso. + rename select (¬ _ < _ ≤ _) into Herr. + apply Herr. + split; last lia. + move: Hinrange. + specialize (page_size_in_bytes_nat_ge capture_smaller_page_size_). + nia. Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_mut.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_mut.v new file mode 100644 index 00000000..44f4e9ea --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_mut.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_mut. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_mut_proof (π : thread_id) : + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_mut_lemma π. +Proof. + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_mut_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_once.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_once.v new file mode 100644 index 00000000..be5605a5 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_once.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_once. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_once_proof (π : thread_id) : + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_once_lemma π. +Proof. + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_call_once_prelude. + + repeat liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init.v index 4e43e499..9104ae3d 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init.v @@ -7,12 +7,13 @@ Set Default Proof Using "Type". Section proof. Context `{!refinedrustGS Σ}. + Lemma core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init_proof (π : thread_id) : core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init_lemma π. Proof. core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init_prelude. - repeat liRStep; liShow. + repeat liRStep; liShow. all: print_remaining_goal. Unshelve. all: sidecond_solver. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_zeroize.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_zeroize.v new file mode 100644 index 00000000..7074318d --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_zeroize.v @@ -0,0 +1,22 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_zeroize. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_zeroize_proof (π : thread_id) : + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_zeroize_lemma π. +Proof. + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_zeroize_prelude. + + rep <-! liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/pointers_utility/.gitignore b/verification/rust_proofs/pointers_utility/.gitignore new file mode 100644 index 00000000..49303703 --- /dev/null +++ b/verification/rust_proofs/pointers_utility/.gitignore @@ -0,0 +1,3 @@ +generated +proofs/dune +interface.rrlib \ No newline at end of file diff --git a/verification/theories/page_allocator/dune b/verification/theories/page_allocator/dune index c618803e..584b4fc2 100644 --- a/verification/theories/page_allocator/dune +++ b/verification/theories/page_allocator/dune @@ -3,4 +3,4 @@ (package ace) (flags -w -notation-overridden -w -redundant-canonical-projection) (synopsis "Manual proofs for ACE Security Monitor page allocator") - (theories Stdlib stdpp iris iris_contrib Ltac2 caesium lithium lrust Equations refinedrust ace.theories.base)) + (theories Stdlib stdpp iris iris_contrib Ltac2 caesium lithium lrust Equations refinedrust rrstd.cmp.theories ace.theories.base)) diff --git a/verification/theories/page_allocator/page.v b/verification/theories/page_allocator/page.v index a68a8f10..879be3aa 100644 --- a/verification/theories/page_allocator/page.v +++ b/verification/theories/page_allocator/page.v @@ -1,4 +1,5 @@ From refinedrust Require Import typing. +From rrstd.cmp.theories Require Import ordering. From ace.theories.base Require Import base. (** * Extra theories for page sizes and pages *) @@ -17,17 +18,19 @@ Global Instance page_size_inh : Inhabited page_size. Proof. exact (populate Size4KiB). Qed. Global Instance page_size_eqdec : EqDecision page_size. Proof. solve_decision. Defined. +Definition page_size_variant (a : page_size) : Z := + match a with + | Size4KiB => 0 + | Size16KiB => 1 + | Size2MiB => 2 + | Size1GiB => 3 + | Size512GiB => 4 + | Size128TiB => 5 + end +. Global Instance page_size_countable : Countable page_size. Proof. - refine (inj_countable (λ sz, - match sz with - | Size4KiB => 0 - | Size16KiB => 1 - | Size2MiB => 2 - | Size1GiB => 3 - | Size512GiB => 4 - | Size128TiB => 5 - end) + refine (inj_countable (λ a, Z.to_nat (page_size_variant a)) (λ a, match a with | 0 => Some Size4KiB @@ -37,12 +40,70 @@ Proof. | 4 => Some Size512GiB | 5 => Some Size128TiB | _ => None - end) _). + end%nat) _). intros []; naive_solver. Qed. +Global Instance page_size_variant_inj : + Inj (=) (=) (page_size_variant). +Proof. + intros sz1 sz2. + destruct sz1, sz2; done. +Qed. + +Definition page_size_cmp (a b : page_size) : ordering := + Z.cmp (page_size_variant a) (page_size_variant b). +Arguments page_size_cmp : simpl never. + +Definition page_size_eq (a b : page_size) : bool := + bool_decide (a = b). + +Global Instance page_size_eq_correct : CorrectEq page_size_eq. +Proof. + unfold page_size_eq. + econstructor. + - intros ?. naive_solver. + - intros ??. naive_solver. + - intros ???. naive_solver. +Qed. +Global Instance page_size_cmp_correct : CorrectOrd page_size_eq page_size_cmp. +Proof. + unfold page_size_eq, page_size_cmp. + econstructor. + - apply _. + - intros ??. solve_goal. + - intros ???. solve_goal. + - intros ???. solve_goal. + - solve_goal. + - solve_goal. +Qed. + +Notation "a '<ₚ' b" := (a ₚ' b" := (a >o{page_size_cmp} b) (at level 50). +Notation "a '≤ₚ' b" := (a ≤o{page_size_cmp} b) (at level 60). +Notation "a '≥ₚ' b" := (a ≥o{page_size_cmp} b) (at level 50). + +Lemma page_size_le_max p : + p ≤ₚ Size128TiB. +Proof. + unfold ord_le. + destruct p; last (by right); left. + all: apply Z.cmp_less_iff. + all: simpl; lia. +Qed. +Lemma page_size_le_min p : + Size4KiB ≤ₚ p. +Proof. + unfold ord_le. + destruct p; first (by right); left. + all: apply Z.cmp_less_iff. + all: simpl; lia. +Qed. + + (** Number of 64-bit machine words in a page size *) -Definition page_size_in_words_nat (sz : page_size) : nat := +Definition page_size_in_words_nat_def (sz : page_size) : nat := match sz with | Size4KiB => 512 | Size16KiB => 4 * 512 @@ -51,11 +112,14 @@ Definition page_size_in_words_nat (sz : page_size) : nat := | Size512GiB => 512 * 512 * 512 * 512 | Size128TiB => 512 * 512 * 512 * 512 * 256 end. +Definition page_size_in_words_nat_aux : seal (page_size_in_words_nat_def). Proof. by econstructor. Qed. +Definition page_size_in_words_nat := page_size_in_words_nat_aux.(unseal). +Definition page_size_in_words_nat_unfold : page_size_in_words_nat = page_size_in_words_nat_def := page_size_in_words_nat_aux.(seal_eq). Definition page_size_in_bytes_nat (sz : page_size) : nat := bytes_per_addr * page_size_in_words_nat sz. -Definition page_size_in_bytes_Z (sz : page_size) : Z := - page_size_in_bytes_nat sz. +Notation page_size_in_bytes_Z sz := + (Z.of_nat (page_size_in_bytes_nat sz)). Lemma page_size_in_bytes_div_8 sz : (8 | page_size_in_bytes_nat sz). @@ -67,7 +131,7 @@ Qed. Lemma page_size_in_words_nat_ge sz : page_size_in_words_nat sz ≥ 512. Proof. - unfold page_size_in_words_nat. + rewrite page_size_in_words_nat_unfold /page_size_in_words_nat_def. destruct sz; lia. Qed. Lemma page_size_in_bytes_nat_ge sz : @@ -78,6 +142,104 @@ Proof. specialize (page_size_in_words_nat_ge sz). lia. Qed. +Lemma page_size_in_bytes_nat_in_usize sz : + (Z.of_nat $ page_size_in_bytes_nat sz) ∈ usize. +Proof. + rewrite /page_size_in_bytes_nat page_size_in_words_nat_unfold /page_size_in_words_nat_def. + rewrite bytes_per_addr_eq. + split. + all: unsafe_unfold_common_caesium_defs. + all: unfold it_signed, it_byte_size_log, bytes_per_addr_log. + all: destruct sz; try lia. +Qed. +Lemma page_size_in_bytes_Z_in_usize sz : + (page_size_in_bytes_Z sz) ∈ usize. +Proof. + apply page_size_in_bytes_nat_in_usize. +Qed. + +Lemma page_size_in_words_nat_mono sz1 sz2 : + page_size_variant sz1 < page_size_variant sz2 → + page_size_in_words_nat sz1 < page_size_in_words_nat sz2. +Proof. + rewrite page_size_in_words_nat_unfold /page_size_in_words_nat_def. + unfold page_size_variant. + destruct sz1, sz2; lia. +Qed. +Lemma page_size_in_bytes_nat_mono sz1 sz2 : + page_size_variant sz1 < page_size_variant sz2 → + page_size_in_bytes_nat sz1 < page_size_in_bytes_nat sz2. +Proof. + unfold page_size_in_bytes_nat. + intros ?%page_size_in_words_nat_mono. + unfold bytes_per_addr. + nia. +Qed. +Lemma page_size_in_bytes_Z_mono sz1 sz2 : + page_size_variant sz1 < page_size_variant sz2 → + page_size_in_bytes_Z sz1 < page_size_in_bytes_Z sz2. +Proof. + unfold page_size_in_bytes_Z. + intros ?%page_size_in_bytes_nat_mono. + done. +Qed. + +Lemma page_size_le_size_in_bytes sz1 sz2 : + sz1 ≤ₚ sz2 → + page_size_in_bytes_Z sz1 ≤ page_size_in_bytes_Z sz2. +Proof. + intros [Hlt | Heq]. + - apply Z.cmp_less_iff in Hlt. + apply page_size_in_bytes_Z_mono in Hlt. lia. + - apply Z.cmp_equal_iff in Heq. apply page_size_variant_inj in Heq. subst. + lia. +Qed. +Lemma page_size_in_words_nat_lt_divide sz1 sz2 : + sz1 <ₚ sz2 → + Nat.divide (page_size_in_words_nat sz1) (page_size_in_words_nat sz2). +Proof. + intros Ha. apply Z.cmp_less_iff in Ha. + unfold page_size_in_bytes_nat. + destruct sz1, sz2; simpl in *; try lia. + all: rewrite page_size_in_words_nat_unfold /page_size_in_words_nat_def. + - exists 4%nat. lia. + - exists 512%nat. lia. + - exists (512 * 512)%nat. lia. + - exists (512 * 512 * 512)%nat. lia. + - exists (512 * 512 * 512 * 256)%nat. lia. + - exists 128%nat. lia. + - exists (128 * 512)%nat. lia. + - exists (128 * 512 * 512)%nat. lia. + - exists (64 * 512 * 512 * 512)%nat. lia. + - exists 512%nat. lia. + - exists (512 * 512)%nat. lia. + - exists (512 * 512 * 256)%nat. lia. + - exists (512)%nat. lia. + - exists (512 * 256)%nat. lia. + - exists (256)%nat. lia. +Qed. +Lemma page_size_in_words_nat_le_divide sz1 sz2 : + sz1 ≤ₚ sz2 → + Nat.divide (page_size_in_words_nat sz1) (page_size_in_words_nat sz2). +Proof. + intros [Hlt | Heq]. + - by eapply page_size_in_words_nat_lt_divide. + - apply Z.cmp_equal_iff in Heq. + apply page_size_variant_inj in Heq as <-. + done. +Qed. + +Lemma page_size_in_bytes_nat_le_divide sz1 sz2 : + sz1 ≤ₚ sz2 → + Z.divide (page_size_in_bytes_Z sz1) (page_size_in_bytes_Z sz2). +Proof. + intros Ha. unfold page_size_in_bytes_nat. + apply (page_size_in_words_nat_le_divide) in Ha. + apply Nat2Z.divide in Ha. + rewrite !(Nat2Z.inj_mul bytes_per_addr). + apply Z.mul_divide_mono_l. + done. +Qed. (** The next smaller page size *) Definition page_size_smaller (sz : page_size) : option page_size := @@ -93,6 +255,22 @@ Definition page_size_smaller (sz : page_size) : option page_size := Lemma page_size_smaller_None sz : page_size_smaller sz = None ↔ sz = Size4KiB. Proof. destruct sz; done. Qed. +Lemma page_size_smaller_page_size_variant sz sz' : + page_size_smaller sz = Some sz' → + page_size_variant sz' = page_size_variant sz - 1. +Proof. + destruct sz, sz'; simpl; naive_solver. +Qed. +Lemma page_size_smaller_lt sz sz' : + page_size_smaller sz = Some sz' → + sz' <ₚ sz. +Proof. + intros Hsmaller. + apply page_size_smaller_page_size_variant in Hsmaller. + unfold ord_lt, page_size_cmp. + apply Z.cmp_less_iff. + lia. +Qed. (** The next larger page size *) Definition page_size_larger (sz : page_size) : option page_size := @@ -107,6 +285,22 @@ Definition page_size_larger (sz : page_size) : option page_size := Lemma page_size_larger_None sz : page_size_larger sz = None ↔ sz = Size128TiB. Proof. destruct sz; done. Qed. +Lemma page_size_larger_page_size_variant sz sz' : + page_size_larger sz = Some sz' → + page_size_variant sz' = page_size_variant sz + 1. +Proof. + destruct sz, sz'; simpl; naive_solver. +Qed. +Lemma page_size_larger_gt sz sz' : + page_size_larger sz = Some sz' → + sz' >ₚ sz. +Proof. + intros Hlarger. + apply page_size_larger_page_size_variant in Hlarger. + unfold ord_gt, page_size_cmp. + apply Z.cmp_greater_iff. + lia. +Qed. (** Pages should be aligned to the size of the page; compute the log2 of this page's alignment *) @@ -126,11 +320,98 @@ Lemma page_size_align_is_size sz : (page_size_align sz = page_size_in_bytes_nat sz)%nat. Proof. rewrite /page_size_align/page_size_in_bytes_nat/bytes_per_addr/bytes_per_addr_log. + rewrite page_size_in_words_nat_unfold. Local Arguments Nat.mul : simpl never. destruct sz; simpl; lia. Qed. -(** The maximum address at which a page may be located (one-past-the-end address) *) +(** Join on page sizes (maximum) *) +Definition page_size_max (sz1 sz2 : page_size) : page_size := + match sz1, sz2 with + | Size128TiB, _ => Size128TiB + | _ , Size128TiB => Size128TiB + | Size512GiB, _ => Size512GiB + | _, Size512GiB => Size512GiB + | Size1GiB, _ => Size1GiB + | _, Size1GiB => Size1GiB + | Size2MiB, _ => Size2MiB + | _, Size2MiB => Size2MiB + | Size16KiB, _ => Size16KiB + | _, Size16KiB => Size16KiB + | _, _ => Size4KiB + end. +Global Instance page_size_join : Join page_size := page_size_max. + +Lemma page_size_max_ge_l sz1 sz2 : + sz1 ≤ₚ sz1 ⊔ sz2. +Proof. + unfold ord_le. + destruct sz1, sz2; cbn; naive_solver. +Qed. +Lemma page_size_max_ge_r sz1 sz2 : + sz2 ≤ₚ sz1 ⊔ sz2. +Proof. + unfold ord_le. + destruct sz1, sz2; cbn; naive_solver. +Qed. +Lemma page_size_max_l sz1 sz2 : + sz2 ≤ₚ sz1 → sz1 ⊔ sz2 = sz1. +Proof. + intros [Hlt | Heq]. + - apply Z.cmp_less_iff in Hlt. destruct sz1, sz2; simpl in *; try done. + - apply Z.cmp_equal_iff in Heq. destruct sz1, sz2; simpl; try done. +Qed. +Lemma page_size_max_r sz1 sz2 : + sz1 ≤ₚ sz2 → sz1 ⊔ sz2 = sz2. +Proof. + intros [Hlt | Heq]. + - apply Z.cmp_less_iff in Hlt. destruct sz1, sz2; simpl in *; try done. + - apply Z.cmp_equal_iff in Heq. destruct sz1, sz2; simpl; try done. +Qed. + +(** Meet on page sizes (minimum) *) +Definition page_size_min (sz1 sz2 : page_size) : page_size := + match sz1, sz2 with + | Size4KiB, _ => Size4KiB + | _, Size4KiB => Size4KiB + | Size16KiB, _ => Size16KiB + | _, Size16KiB => Size16KiB + | Size2MiB, _ => Size2MiB + | _, Size2MiB => Size2MiB + | Size1GiB, _ => Size1GiB + | _, Size1GiB => Size1GiB + | Size512GiB, _ => Size512GiB + | _, Size512GiB => Size512GiB + |_, _ => Size128TiB + end. +Global Instance page_size_meet : Meet page_size := page_size_min. + +Lemma page_size_min_le_l sz1 sz2 : + sz1 ⊓ sz2 ≤ₚ sz1. +Proof. + unfold ord_le. destruct sz1, sz2; cbn; naive_solver. +Qed. +Lemma page_size_min_le_r sz1 sz2 : + sz1 ⊓ sz2 ≤ₚ sz2. +Proof. + unfold ord_le. destruct sz1, sz2; cbn; naive_solver. +Qed. +Lemma page_size_min_l sz1 sz2 : + sz1 ≤ₚ sz2 → sz1 ⊓ sz2 = sz1. +Proof. + intros [Hlt | Heq]. + - apply Z.cmp_less_iff in Hlt. destruct sz1, sz2; simpl in *; try done. + - apply Z.cmp_equal_iff in Heq. destruct sz1, sz2; simpl in *; try done. +Qed. +Lemma page_size_min_r sz1 sz2 : + sz2 ≤ₚ sz1 → sz1 ⊓ sz2 = sz2. +Proof. + intros [Hlt | Heq]. + - apply Z.cmp_less_iff in Hlt. destruct sz1, sz2; simpl in *; try done. + - apply Z.cmp_equal_iff in Heq. destruct sz1, sz2; simpl in *; try done. +Qed. + +(** The maximum address at which a page may be located (one-past-the-end address), limited by the page allocator implementation. *) (* Sealed because it is big and will slow down Coq *) Definition MAX_PAGE_ADDR_def : Z := page_size_in_bytes_Z Size128TiB. @@ -173,13 +454,6 @@ Qed. Definition page_end_loc (p : page) : loc := p.(page_loc) +ₗ (page_size_in_bytes_Z p.(page_sz)). -(** Order on page sizes *) -Definition page_size_le (p1 p2 : page_size) := - page_size_in_words_nat p1 ≤ page_size_in_words_nat p2. -Arguments page_size_le : simpl never. - -Infix "≤ₚ" := page_size_le (at level 50). - (** Well-formedness of a page *) Definition page_wf (p : page) : Prop := (** The value has the right size *) @@ -226,6 +500,7 @@ Definition page_size_multiplier_log (sz : page_size) : nat := Definition page_size_multiplier (sz : page_size) : nat := 2^(page_size_multiplier_log sz). + (** The above is not correct for the smallest page size *) Definition number_of_smaller_pages (sz : page_size) : nat := match page_size_smaller sz with @@ -240,7 +515,8 @@ Proof. intros ->. destruct sz; simpl. all: unfold page_size_smaller; simplify_eq. - all: unfold page_size_in_words_nat, page_size_multiplier, page_size_multiplier_log. + all: rewrite page_size_in_words_nat_unfold /page_size_in_words_nat_def. + all: unfold page_size_multiplier, page_size_multiplier_log. all: cbn [Nat.pow]; lia. Qed. Lemma page_size_multiplier_size_in_bytes sz sz' : @@ -274,7 +550,6 @@ Lemma page_size_multiplier_quot sz smaller_sz : page_size_multiplier sz = Z.to_nat (page_size_in_bytes_Z sz `quot` page_size_in_bytes_Z smaller_sz). Proof. intros Hsz. - rewrite /page_size_in_bytes_Z. rewrite (page_size_multiplier_size_in_bytes _ smaller_sz); last done. rewrite Nat.mul_comm. rewrite Nat2Z.inj_mul. @@ -287,7 +562,6 @@ Lemma page_size_multiplier_quot_Z sz smaller_sz : Proof. intros ?. rewrite (page_size_multiplier_quot _ smaller_sz); last done. - rewrite /page_size_in_bytes_Z. rewrite Z2Nat.id; first done. apply Z.quot_pos. - specialize (page_size_in_bytes_nat_ge sz). lia. @@ -304,21 +578,169 @@ Proof. all: lia. Qed. +Lemma page_size_multiplier_ge sz : + page_size_multiplier sz ≥ 1. +Proof. + unfold page_size_multiplier. + nia. +Qed. + +(** States that the page is in the given memory range *) +(* TODO unify all the memory range stuff *) +Definition page_within_range (base_address : Z) (sz : page_size) (p : page) : Prop := + (base_address ≤ p.(page_loc).2 ∧ p.(page_loc).2 + page_size_in_bytes_Z p.(page_sz) ≤ base_address + page_size_in_bytes_Z sz)%Z. + +Lemma page_within_range_inv base sz p : + page_within_range base sz p → + p.(page_sz) = sz → + p.(page_loc).2 = base. +Proof. + unfold page_within_range. + intros [] <-. + lia. +Qed. + +Lemma page_within_range_incl base1 base2 sz1 sz2 p : + base2 ≤ base1 → + base1 + page_size_in_bytes_Z sz1 ≤ base2 + page_size_in_bytes_Z sz2 → + page_within_range base1 sz1 p → + page_within_range base2 sz2 p. +Proof. + unfold page_within_range. + simpl. intros ?? []. + split; lia. +Qed. + +Lemma page_within_range_sz_base_inj p sz base1 base2 : + p.(page_sz) = sz → + page_within_range base1 sz p → + page_within_range base2 sz p → + base1 = base2. +Proof. + unfold page_within_range. + intros <-. + lia. +Qed. + +Global Arguments page_within_range : simpl never. +Global Typeclasses Opaque page_within_range. + + + (** Divide a page into pages of the next smaller page size *) (** Relational specification *) Definition subdivided_pages (p : page) (ps : list page) : Prop := - let smaller_sz := - match page_size_smaller p.(page_sz) with - | None => p.(page_sz) - | Some sz' => sz' - end - in + let smaller_sz := default p.(page_sz) (page_size_smaller p.(page_sz)) in let num_pages := page_size_multiplier p.(page_sz) in length ps = num_pages ∧ (∀ (i : nat) p', ps !! i = Some p' → p'.(page_loc) = p.(page_loc) offset{usize}ₗ (i * page_size_in_words_nat smaller_sz) ∧ p'.(page_sz) = smaller_sz). +Global Arguments subdivided_pages : simpl never. +Global Typeclasses Opaque subdivided_pages. + +Lemma subdivided_pages_lookup_size_lt i p ps p' sz' : + page_size_smaller p.(page_sz) = Some sz' → + subdivided_pages p ps → + ps !! i = Some p' → + page_sz p' = sz'. +Proof. + unfold subdivided_pages. + intros -> [Ha Hb] Hlook. + apply Hb in Hlook. simpl in Hlook. + destruct Hlook; done. +Qed. +Lemma subdivided_pages_lookup_size_le i p ps p' : + subdivided_pages p ps → + ps !! i = Some p' → + page_sz p' ≤ₚ page_sz p. +Proof. + unfold subdivided_pages. + intros [Ha Hb] Hlook. + apply Hb in Hlook. destruct Hlook as [? ->]. + destruct (page_size_smaller p.(page_sz)) eqn:Heq. + - eapply page_size_smaller_lt in Heq. left. done. + - simpl. right. + apply Z.cmp_equal_iff. done. +Qed. + +Lemma subdivided_pages_lookup_base_address (i : nat) p ps p' sz' : + page_size_smaller p.(page_sz) = Some sz' → + subdivided_pages p ps → + ps !! i = Some p' → + (page_loc p').2 = p.(page_loc).2 + (i * page_size_in_bytes_nat sz'). +Proof. + unfold subdivided_pages. + intros -> [Ha Hb] Hlook. + apply Hb in Hlook. simpl in Hlook. + destruct Hlook as [-> _]. + unfold offset_loc; simpl. + rewrite /page_size_in_bytes_nat. + rewrite bytes_per_int_usize. + lia. +Qed. + +Lemma subdivided_pages_length p ps : + subdivided_pages p ps → + length ps = page_size_multiplier p.(page_sz). +Proof. + intros [Ha _]. done. +Qed. + +Lemma subdivided_pages_page_within_range (i : nat) p ps p' : + page_size_smaller p.(page_sz) = Some p'.(page_sz) → + subdivided_pages p ps → + ps !! i = Some p' → + page_within_range (p.(page_loc).2 + i * page_size_in_bytes_Z p'.(page_sz)) p'.(page_sz) p'. +Proof. + intros ? Hsubdivided Hlook. + opose proof (subdivided_pages_lookup_base_address i _ _ _ _ _ Hsubdivided _) as Hl. + { done. } + { done. } + split; simpl; lia. +Qed. + +Lemma subdivided_pages_page_within_range' i p ps p' sz base : + page_size_smaller sz = Some p'.(page_sz) → + subdivided_pages p ps → + ps !! i = Some p' → + base = p.(page_loc).2 → + sz = p.(page_sz) → + page_within_range base sz p'. +Proof. + intros Hsmaller Hsubdivided Hlook -> ->. + + specialize (subdivided_pages_length _ _ Hsubdivided) as Hlen. + opose proof (lookup_lt_Some _ _ _ Hlook) as ?. + + eapply page_within_range_incl; first last. + { eapply subdivided_pages_page_within_range; done. } + { opose proof (page_size_multiplier_size_in_bytes p.(page_sz) p'.(page_sz) _) as Heq. + { rewrite Hsmaller//. } + rewrite Heq. + nia. } + { lia. } +Qed. + +Lemma subdivided_pages_lookup_inv p sz base ps p' (i : nat) (j : Z) : + page_size_smaller p.(page_sz) = Some sz → + subdivided_pages p ps → + ps !! i = Some p' → + p'.(page_sz) = sz → + base = p.(page_loc).2 → + page_within_range (base + j * page_size_in_bytes_Z sz) sz p' → + i = Z.to_nat j. +Proof. + intros Hsmaller Hsub Hlook <- -> Hran. + eapply subdivided_pages_page_within_range in Hsub; [ | done | done]. + + opose proof (page_within_range_sz_base_inj _ _ _ _ _ Hran Hsub) as Heq; first done. + specialize (page_size_in_bytes_nat_ge p'.(page_sz)). + nia. +Qed. + + (** Stronger functional specification *) Definition subdivide_page (p : page) : list page := match page_size_smaller p.(page_sz) with @@ -434,6 +856,22 @@ Proof. rewrite /page_size_in_bytes_nat. lia. Qed. +Lemma address_aligned_to_page_size_smaller addr sz smaller_sz : + addr `aligned_to` page_size_in_bytes_nat sz → + page_size_smaller sz = Some smaller_sz → + addr `aligned_to` page_size_in_bytes_nat smaller_sz. +Proof. + rewrite -!page_size_align_is_size. + unfold page_size_align. + intros ? Hsmall. + enough ((2^page_size_align_log sz)%nat = 2^page_size_align_log smaller_sz * (2^(page_size_align_log sz - page_size_align_log smaller_sz)))%nat as Heq. + { eapply (base.aligned_to_mul_inv _ _ (2^(page_size_align_log sz - page_size_align_log smaller_sz))). rewrite -Heq. done. } + destruct sz; simpl in Hsmall; try done. + all: injection Hsmall as <-. + all: simpl. + all: lia. +Qed. + (** Lithium automation *) Global Instance simpl_exist_page Q : @@ -449,3 +887,31 @@ Global Instance simpl_forall_page Q : Proof. intros ?. intros []. done. Qed. + +Global Instance simpl_impl_page_size_smaller sz sz' : + SimplImpl false (page_size_smaller sz = Some sz') (λ T, page_size_smaller sz = Some sz' → page_size_variant sz' = page_size_variant sz - 1 → T). +Proof. + intros T. split. + - intros Ha Hsmall. specialize (page_size_smaller_page_size_variant _ _ Hsmall) as ?. + by apply Ha. + - intros Ha. intros. by apply Ha. +Qed. +Global Instance simpl_impl_page_size_larger sz sz' : + SimplImpl false (page_size_larger sz = Some sz') (λ T, page_size_larger sz = Some sz' → page_size_variant sz' = page_size_variant sz + 1 → T). +Proof. + intros T. split. + - intros Ha Hlarge. specialize (page_size_larger_page_size_variant _ _ Hlarge) as ?. + by apply Ha. + - intros Ha. intros. by apply Ha. +Qed. + +Global Instance simpl_both_page_size_smaller_none sz : + SimplBothRel (=) (page_size_smaller sz) None (sz = Size4KiB). +Proof. + split; destruct sz; simpl; done. +Qed. +Global Instance simpl_both_page_size_larger_none sz : + SimplBothRel (=) (page_size_larger sz) None (sz = Size128TiB). +Proof. + split; destruct sz; simpl; done. +Qed. diff --git a/verification/theories/page_allocator/page_allocator.v b/verification/theories/page_allocator/page_allocator.v index 2d3e8046..7f8e4b80 100644 --- a/verification/theories/page_allocator/page_allocator.v +++ b/verification/theories/page_allocator/page_allocator.v @@ -1,5 +1,6 @@ From refinedrust Require Import typing. From refinedrust Require Import ghost_var_dfrac. +From rrstd.cmp.theories Require Import ordering. From ace.theories.page_allocator Require Import page. (** * Page allocator ghost state *) @@ -50,15 +51,38 @@ End page_allocator. (** * Page allocator invariants *) Inductive node_allocation_state := - (** This page node is completely allocated *) - | PageTokenUnavailable - (** The page token in this node is available *) - | PageTokenAvailable - (** The page node is partially available, with a page of the given size being allocable *) - | PageTokenPartiallyAvailable (allocable_sz : page_size) +(** This page node is completely allocated *) +| PageTokenUnavailable +(** The page token in this node is available *) +| PageTokenAvailable +(** The page node is partially available, with a page of the given size being allocable *) +| PageTokenPartiallyAvailable (allocable_sz : page_size) . Global Instance node_allocation_state_eqdec : EqDecision node_allocation_state. Proof. solve_decision. Defined. +Global Instance node_allocation_state_inhabited : Inhabited node_allocation_state. +Proof. apply (populate PageTokenUnavailable). Qed. + +Global Instance node_allocation_state_join : Join node_allocation_state := λ s1 s2, + match s1, s2 with + | PageTokenAvailable, _ => PageTokenAvailable + | _, PageTokenAvailable => PageTokenAvailable + | PageTokenPartiallyAvailable sz1, PageTokenPartiallyAvailable sz2 => + PageTokenPartiallyAvailable (sz1 ⊔ sz2) + | PageTokenPartiallyAvailable sz1, _ => PageTokenPartiallyAvailable sz1 + | _, PageTokenPartiallyAvailable sz1 => PageTokenPartiallyAvailable sz1 + | _, _ => PageTokenUnavailable + end. +Global Instance node_allocation_state_meet : Meet node_allocation_state := λ s1 s2, + match s1, s2 with + | PageTokenUnavailable, _ => PageTokenUnavailable + | _, PageTokenUnavailable => PageTokenUnavailable + | PageTokenPartiallyAvailable sz1, PageTokenPartiallyAvailable sz2 => + PageTokenPartiallyAvailable (sz1 ⊓ sz2) + | PageTokenPartiallyAvailable sz1, _ => PageTokenPartiallyAvailable sz1 + | _, PageTokenPartiallyAvailable sz1 => PageTokenPartiallyAvailable sz1 + | _, _ => PageTokenAvailable + end. (** Our logical representation of storage nodes *) Record page_storage_node : Type := mk_page_node { @@ -73,6 +97,16 @@ Record page_storage_node : Type := mk_page_node { (* whether the child nodes have been initialized *) children_initialized : bool; }. +Global Canonical Structure page_storage_nodeRT := directRT page_storage_node. + +Global Instance page_storage_node_inh : Inhabited page_storage_node. +Proof. + constructor. eapply mk_page_node. + all: apply inhabitant. +Qed. + +Definition page_node_update_state (node : page_storage_node) (new_state : node_allocation_state) : page_storage_node := + mk_page_node node.(max_node_size) node.(base_address) new_state node.(children_initialized). (** Compute the base address of a child node *) Definition child_base_address (parent_base_address : Z) (child_size : page_size) (child_index : Z) : Z := @@ -88,11 +122,56 @@ Definition page_storage_node_children_wf (parent_base_address : Z) (parent_node_ (* Then all children are sorted *) (∀ (i : nat) child, children !! i = Some child → child.(max_node_size) = child_node_size ∧ - child.(base_address) = child_base_address parent_base_address parent_node_size i)) ∧ + child.(base_address) = child_base_address parent_base_address child_node_size i)) ∧ (* If there can't be any children, there are no children *) (page_size_smaller parent_node_size = None → children = []) . +Lemma page_storage_node_children_wf_child_lookup (i : nat) sz child_node_size base children child : + page_size_smaller sz = Some child_node_size → + page_storage_node_children_wf base sz children → + children !! i = Some child → + max_node_size child = child_node_size ∧ + base_address child = child_base_address base child_node_size i. +Proof. + intros Hsmaller Hchildren Hchild. + destruct Hchildren as [Hchildren _]. + ospecialize (Hchildren _ _ _ _ Hchild); first done. + done. +Qed. +Lemma page_storage_node_children_wf_upd_state base sz children s : + page_storage_node_children_wf base sz children → + page_storage_node_children_wf base sz ((λ node, page_node_update_state node s) <$> children). +Proof. + unfold page_storage_node_children_wf. + destruct (page_size_smaller sz) as [smaller_size | ] eqn:Heq. + - simpl. intros [Ha _]. split; last done. + intros child_sz [=<-] i child Hlook. + apply list_lookup_fmap_Some_1 in Hlook as (node & -> & Hlook). + ospecialize (Ha _ _ _ _ Hlook). + { done. } + destruct Ha as [<- Ha]. + done. + - intros [_ Ha]. + split; first done. + intros _. rewrite Ha; done. +Qed. +Lemma page_storage_node_children_wf_insert base sz children child' (i : nat) : + max_node_size child' = max_node_size (children !!! i) → + base_address child' = base_address (children !!! i) → + page_storage_node_children_wf base sz children → + page_storage_node_children_wf base sz (<[i := child']> children). +Proof. + intros Hsz Haddr [Hwf1 Hwf2]. split. + - intros child_sz Hsmaller j ? Hlook. + apply list_lookup_insert_Some in Hlook as [(<- & -> & ?) | (? & ?)]; first last. + { by eapply Hwf1. } + rewrite Haddr Hsz. + opose proof (lookup_lt_is_Some_2 children i _) as (child' & ?); first done. + erewrite list_lookup_total_correct; last done. + by eapply Hwf1. + - intros Hnone. specialize (Hwf2 Hnone). rewrite Hwf2. done. +Qed. (** Computes the maximum size this page node can allocate *) Definition page_node_can_allocate (node : page_storage_node) : option page_size := @@ -103,27 +182,17 @@ Definition page_node_can_allocate (node : page_storage_node) : option page_size end. (** The logical invariant on a page node *) -Definition page_storage_node_invariant +Definition page_storage_node_invariant_case (node : page_storage_node) (max_sz : option page_size) (maybe_page_token : option page) (children : list page_storage_node) := - - (* The children, if any, are well-formed *) - page_storage_node_children_wf node.(base_address) node.(max_node_size) children ∧ - (* the base address is suitably aligned *) - (page_size_align node.(max_node_size) | node.(base_address))%Z ∧ - - (* initialization of child nodes *) - (if node.(children_initialized) then length children = page_size_multiplier node.(max_node_size) else True) ∧ - - (* invariant depending on the allocation state: *) if decide (node.(allocation_state) = PageTokenUnavailable) then (* No allocation is possible *) - maybe_page_token = None ∧ max_sz = None ∧ + maybe_page_token = None ∧ max_sz = None (* all children are unavailable *) (* TODO do we need this *) - Forall (λ child, child.(allocation_state) = PageTokenUnavailable) children + (*Forall (λ child, child.(allocation_state) = PageTokenUnavailable) children*) else if decide (node.(allocation_state) = PageTokenAvailable) then (* This node is completely available *) @@ -134,11 +203,9 @@ Definition page_storage_node_invariant max_sz = Some (node.(max_node_size)) ∧ (* the token spans the whole node *) token.(page_loc).2 = node.(base_address) ∧ - token.(page_sz) = node.(max_node_size) ∧ - + token.(page_sz) = node.(max_node_size) (* all children are unavailable *) - (* TODO: do we need this? *) - Forall (λ child, child.(allocation_state) = PageTokenUnavailable) children + (*Forall (λ child, child.(allocation_state) = PageTokenUnavailable) children*) else (* This node is partially available with initialized children *) @@ -147,6 +214,7 @@ Definition page_storage_node_invariant ∃ allocable_sz, node.(allocation_state) = PageTokenPartiallyAvailable allocable_sz ∧ max_sz = Some allocable_sz ∧ + allocable_sz <ₚ node.(max_node_size) ∧ (* children need to be initialized *) node.(children_initialized) = true ∧ @@ -157,6 +225,44 @@ Definition page_storage_node_invariant page_node_can_allocate child = Some allocable_sz . +Lemma page_storage_node_invariant_case_can_allocate node max_sz tok children : + page_storage_node_invariant_case node max_sz tok children → + page_node_can_allocate node = max_sz. +Proof. + unfold page_storage_node_invariant_case, page_node_can_allocate. + destruct (allocation_state node) eqn:Heq; simpl; naive_solver. +Qed. +Lemma page_node_invariant_case_sized_bounded node max_sz tok children : + page_storage_node_invariant_case node max_sz tok children → + max_sz ≤o{option_cmp page_size_cmp} Some (max_node_size node). +Proof. + unfold page_storage_node_invariant_case, page_node_can_allocate. + destruct (allocation_state node) eqn:Heq; simpl; solve_goal. +Qed. + +Global Typeclasses Opaque page_storage_node_children_wf. +Global Typeclasses Opaque page_storage_node_invariant_case. + +Global Arguments page_storage_node_children_wf : simpl never. +Global Arguments page_storage_node_invariant_case : simpl never. + + +Definition page_storage_node_invariant + (node : page_storage_node) + (max_sz : option page_size) (maybe_page_token : option page) (children : list page_storage_node) := + + (* The children, if any, are well-formed *) + name_hint "INV_WF" (page_storage_node_children_wf node.(base_address) node.(max_node_size) children) ∧ + (* the base address is suitably aligned *) + (page_size_align node.(max_node_size) | node.(base_address))%Z ∧ + + (* initialization of child nodes *) + (name_hint "INV_INIT_CHILDREN" (if node.(children_initialized) then length children = page_size_multiplier node.(max_node_size) else children = [])) ∧ + + (* invariant depending on the allocation state: *) + name_hint "INV_CASE" (page_storage_node_invariant_case node max_sz maybe_page_token children) +. + Lemma page_storage_node_invariant_empty node_size base_address : (page_size_align node_size | base_address) → page_storage_node_invariant (mk_page_node node_size base_address PageTokenUnavailable false) None None []. @@ -166,7 +272,105 @@ Proof. apply Nat2Z.divide. done. Qed. -(** States that the page is in the given memory range *) -(* TODO unify all the memory range stuff *) -Definition page_within_range (base_address : Z) (sz : page_size) (p : page) : Prop := - (base_address ≤ p.(page_loc).2 ∧ p.(page_loc).2 + page_size_in_bytes_Z p.(page_sz) < base_address + page_size_in_bytes_Z sz)%Z. +Lemma page_storage_node_invariant_case_available_inv node max_sz maybe_tok children : + node.(allocation_state) = PageTokenAvailable → + page_storage_node_invariant_case node max_sz maybe_tok children → + ∃ tok, maybe_tok = Some tok ∧ max_sz = Some node.(max_node_size) ∧ tok.(page_loc).2 = node.(base_address) ∧ tok.(page_sz) = node.(max_node_size). +Proof. + unfold page_storage_node_invariant_case. + intros ->. naive_solver. +Qed. + +Global Instance page_storage_node_invariant_simpl_available node max_sz maybe_tok children `{!TCDone (node.(allocation_state) = PageTokenAvailable)} `{!IsVar max_sz} `{!IsVar maybe_tok} : + SimplImpl false (page_storage_node_invariant_case node max_sz maybe_tok children) (λ T, + page_storage_node_invariant_case node max_sz maybe_tok children → + max_sz = Some node.(max_node_size) → + ∀ tok, + maybe_tok = Some tok → + tok.(page_loc).2 = node.(base_address) → + tok.(page_sz) = node.(max_node_size) → + T). +Proof. + rename select (TCDone _) into Hstate. unfold TCDone in Hstate. + intros T. split. + - intros Ha Hinv. + specialize (Ha Hinv). + move: Hinv. + unfold page_storage_node_invariant_case. rewrite Hstate. + simpl. intros (? & ? & ? & ? & ?). + eapply Ha; done. + - naive_solver. +Qed. + +Lemma page_storage_node_invariant_has_token node max_sz tok children : + page_storage_node_invariant_case node max_sz (Some tok) children → + node.(allocation_state) = PageTokenAvailable ∧ + node.(max_node_size) = tok.(page_sz) ∧ + node.(base_address) = tok.(page_loc).2. +Proof. + unfold page_storage_node_invariant_case. + repeat case_decide; simpl. + all: naive_solver. +Qed. + +Lemma page_storage_node_invariant_not_available node tok children : + page_storage_node_invariant_case node None tok children → + node.(allocation_state) = PageTokenUnavailable ∧ + tok = None. +Proof. + unfold page_storage_node_invariant_case. + repeat case_decide; simpl. + all: naive_solver. +Qed. + +Lemma page_storage_node_invariant_case_partially_available_inv node max_sz maybe_tok children sz : + node.(allocation_state) = PageTokenPartiallyAvailable sz → + page_storage_node_invariant_case node max_sz maybe_tok children → + max_sz = Some sz ∧ maybe_tok = None. +Proof. + unfold page_storage_node_invariant_case. + intros ->. naive_solver. +Qed. + +Global Instance page_storage_node_invariant_simpl_partially_available node max_sz maybe_tok children sz `{!TCDone (node.(allocation_state) = PageTokenPartiallyAvailable sz)} `{!IsVar max_sz} `{!IsVar maybe_tok} : + SimplImpl false (page_storage_node_invariant_case node max_sz maybe_tok children) (λ T, + page_storage_node_invariant_case node max_sz maybe_tok children → + max_sz = Some sz → maybe_tok = None → + T). +Proof. + rename select (TCDone _) into Hstate. unfold TCDone in Hstate. + intros T. split. + - intros Ha Hinv. apply Ha. + + done. + + move: Hinv. unfold page_storage_node_invariant_case. rewrite Hstate. + simpl. naive_solver. + + move: Hinv. unfold page_storage_node_invariant_case. rewrite Hstate. + simpl. naive_solver. + - naive_solver. +Qed. + +Lemma page_storage_node_invariant_case_unavailable_inv node max_sz maybe_tok children : + node.(allocation_state) = PageTokenUnavailable → + page_storage_node_invariant_case node max_sz maybe_tok children → + max_sz = None ∧ maybe_tok = None. +Proof. + unfold page_storage_node_invariant_case. + intros ->. naive_solver. +Qed. + +Global Instance page_storage_node_invariant_simpl_unavailable node max_sz maybe_tok children `{!TCDone (node.(allocation_state) = PageTokenUnavailable)} `{!IsVar max_sz} `{!IsVar maybe_tok} : + SimplImpl false (page_storage_node_invariant_case node max_sz maybe_tok children) (λ T, + page_storage_node_invariant_case node max_sz maybe_tok children → + max_sz = None → maybe_tok = None → + T). +Proof. + rename select (TCDone _) into Hstate. unfold TCDone in Hstate. + intros T. split. + - intros Ha Hinv. apply Ha. + + done. + + move: Hinv. unfold page_storage_node_invariant_case. rewrite Hstate. + simpl. naive_solver. + + move: Hinv. unfold page_storage_node_invariant_case. rewrite Hstate. + simpl. naive_solver. + - naive_solver. +Qed.