diff --git a/.github/workflows/verify.yml b/.github/workflows/verify.yml index e01f45d1..4e8b10b3 100644 --- a/.github/workflows/verify.yml +++ b/.github/workflows/verify.yml @@ -17,7 +17,7 @@ jobs: - name: Install opam dependencies run: sudo apt update -y && sudo apt install -y pkg-config git rsync tar unzip m4 time curl ocaml build-essential bubblewrap gawk libgmp-dev python3 python-is-python3 libmpfr-dev && python3 -m pip install setuptools - name: Install opam - run: curl "https://github.com/ocaml/opam/releases/download/2.1.5/opam-2.1.5-x86_64-linux" -Lo /usr/local/bin/opam && chmod +x /usr/local/bin/opam + run: curl "https://github.com/ocaml/opam/releases/download/2.5.0/opam-2.5.0-x86_64-linux" -Lo /usr/local/bin/opam && chmod +x /usr/local/bin/opam - name: Setup opam run: opam init -y - name: Setup opam switch @@ -62,13 +62,16 @@ jobs: run: | python3 .github/workflows/prune_dune_modules.py verification/rust_proofs/ace/proofs/dune \ proof_core_page_allocator_allocator_PageAllocator_add_memory_region \ + proof_core_page_allocator_allocator_PageAllocator_release_pages_closure0 \ proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token \ + proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token \ proof_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary \ + proof_core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens \ proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size \ proof_core_page_allocator_page_Page_T_write \ proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read \ - proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token \ - proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide + proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide \ + proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_merge - name: Compile Rocq proofs run: | ( diff --git a/rust-toolchain.toml b/rust-toolchain.toml index b152637b..858b58f9 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "nightly-2025-09-12" +channel = "nightly-2026-03-23" targets = [ "riscv64gc-unknown-none-elf" ] diff --git a/security-monitor/rust-crates/pointers_utility/src/lib.rs b/security-monitor/rust-crates/pointers_utility/src/lib.rs index 1ab531af..3c0f98b1 100644 --- a/security-monitor/rust-crates/pointers_utility/src/lib.rs +++ b/security-monitor/rust-crates/pointers_utility/src/lib.rs @@ -16,9 +16,9 @@ mod error; use core::mem::size_of; pub use crate::error::PointerError; -/// Calculates the offset in bytes between two pointers. +/// Calculates the offset in bytes between two pointers. #[rr::only_spec] -#[rr::returns("wrap_to_it (pointer1.2 - pointer2.2) isize")] +#[rr::returns("wrap_to_it (pointer1.(loc_a) - pointer2.(loc_a)) isize")] pub fn ptr_byte_offset(pointer1: *const usize, pointer2: *const usize) -> isize { // TODO: we should use wrapping arithmetic here, as it might overflow (pointer1.addr() as isize) - (pointer2.addr() as isize) @@ -38,7 +38,7 @@ pub fn ptr_align(pointer: *mut usize, align_in_bytes: usize, owned_region_end: * /// of size one, if the original pointer is valid. Additional checks are required for making /// larger memory accesses. #[rr::ok] -#[rr::requires("pointer.2 + offset_in_bytes < owned_region_end.2")] +#[rr::requires("pointer.(loc_a) + offset_in_bytes < owned_region_end.(loc_a)")] #[rr::ensures("ret = (pointer +ₗ offset_in_bytes)")] pub fn ptr_byte_add_mut( pointer: *mut usize, offset_in_bytes: usize, owned_region_end: *const usize, @@ -61,7 +61,7 @@ pub fn ptr_byte_add_mut( /// of size one, if the original pointer is valid. Additional checks are required for making /// larger memory accesses. #[rr::ok] -#[rr::requires("pointer.2 + offset_in_bytes < owned_region_end.2")] +#[rr::requires("pointer.(loc_a) + offset_in_bytes < owned_region_end.(loc_a)")] #[rr::ensures("ret = (pointer +ₗ offset_in_bytes)")] pub fn ptr_byte_add( pointer: *const usize, offset_in_bytes: usize, owned_region_end: *const usize, diff --git a/security-monitor/src/core/architecture/riscv/mmu/page_size.rs b/security-monitor/src/core/architecture/riscv/mmu/page_size.rs index 8ac166c3..eeb77ff9 100644 --- a/security-monitor/src/core/architecture/riscv/mmu/page_size.rs +++ b/security-monitor/src/core/architecture/riscv/mmu/page_size.rs @@ -14,6 +14,7 @@ #[rr::derive_instantiate("PEq_refl" := #proof "intros ??; solve_goal")] #[rr::derive_instantiate("PEq_sym" := #proof "intros ???; solve_goal")] #[rr::derive_instantiate("PEq_trans" := #proof "intros ????; solve_goal")] +#[rr::derive_instantiate("PEq_leibniz" := #proof "intros ? [] []; simpl; done")] // POrd #[rr::derive_instantiate("POrd" := "λ a b, Some (page_size_cmp a b)")] #[rr::derive_instantiate("POrd_eq_cons" := #proof "intros ? [] []; simpl; done")] @@ -23,7 +24,6 @@ #[rr::derive_instantiate("Ord_lt_trans" := #proof "intros ????; solve_goal")] #[rr::derive_instantiate("Ord_eq_trans" := #proof "intros ????; solve_goal")] #[rr::derive_instantiate("Ord_gt_trans" := #proof "intros ????; solve_goal")] -#[rr::derive_instantiate("Ord_leibniz" := #proof "intros ? [] []; simpl; done")] #[rr::derive_instantiate("Ord_antisym" := #proof "intros ???; solve_goal")] pub enum PageSize { #[rr::pattern("Size4KiB")] diff --git a/security-monitor/src/core/memory_layout/confidential_memory_address.rs b/security-monitor/src/core/memory_layout/confidential_memory_address.rs index 3955f6bd..c74a4dcf 100644 --- a/security-monitor/src/core/memory_layout/confidential_memory_address.rs +++ b/security-monitor/src/core/memory_layout/confidential_memory_address.rs @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::error::Error; use pointers_utility::{ptr_byte_add_mut, ptr_byte_offset}; +use super::MemoryLayout; /// The wrapper over a raw pointer that is guaranteed to be an address located in the confidential memory region. #[repr(transparent)] @@ -15,7 +16,7 @@ use pointers_utility::{ptr_byte_add_mut, ptr_byte_offset}; #[rr::exists("MEMORY_CONFIG")] #[rr::invariant(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] /// Invariant: The address is in the confidential part of the memory layout. -#[rr::invariant("(MEMORY_CONFIG.(conf_start).2 ≤ l.2 < MEMORY_CONFIG.(conf_end).2)%Z")] +#[rr::invariant("(MEMORY_CONFIG.(conf_start).(loc_a) ≤ l.(loc_a) < MEMORY_CONFIG.(conf_end).(loc_a))%Z")] pub struct ConfidentialMemoryAddress(#[rr::field("l")] *mut usize); /// Verification: We require the ghost state for the global memory layout to be available. @@ -25,7 +26,7 @@ impl ConfidentialMemoryAddress { /// Precondition: The global memory layout is initialized. #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] /// Precondition: The address is in the confidential region of the global memory layout. - #[rr::requires("(MEMORY_CONFIG.(conf_start).2 ≤ address.2 < MEMORY_CONFIG.(conf_end).2)%Z")] + #[rr::requires("(MEMORY_CONFIG.(conf_start).(loc_a) ≤ address.(loc_a) < MEMORY_CONFIG.(conf_end).(loc_a))%Z")] #[rr::returns("address")] // TODO this should be unsafe pub(super) fn new(address: *mut usize) -> Self { @@ -37,12 +38,14 @@ impl ConfidentialMemoryAddress { self.0 as *const u8 } - #[rr::returns("self.2")] + + #[rr::returns("self.(loc_a)")] pub fn as_usize(&self) -> usize { self.0.addr() } - #[rr::only_spec] + // Precondition: Theh alignment is a power of two. + #[rr::requires("is_power_of_two (Z.to_nat align)")] /// Postcondition: Verifies that the pointer is aligned to the given alignment. #[rr::returns("bool_decide (self `aligned_to` (Z.to_nat align))")] pub fn is_aligned_to(&self, align: usize) -> bool { @@ -50,7 +53,7 @@ impl ConfidentialMemoryAddress { } /// Postcondition: Compute the offset. - #[rr::returns("wrap_to_it (pointer.2 - self.2) isize")] + #[rr::returns("wrap_to_it (pointer.(loc_a) - self.(loc_a)) isize")] pub fn offset_from(&self, pointer: *const usize) -> isize { ptr_byte_offset(pointer, self.0) } @@ -61,20 +64,23 @@ impl ConfidentialMemoryAddress { /// # Safety /// /// The caller must ensure that the address at given offset is still within the confidential memory region. - // TODO: can we require the offset to be a multiple of usize? (woz: yes we can) - #[rr::only_spec] #[rr::params("MEMORY_CONFIG")] /// Precondition: The global memory layout is initialized. - #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] #[rr::ok] /// Precondition: The offset address is in the given range. - #[rr::requires("self.2 + offset_in_bytes < upper_bound.2")] + #[rr::requires("self.(loc_a) + offset_in_bytes < upper_bound.(loc_a)")] /// Precondition: The maximum (and thus the offset address) is in the confidential memory range. - #[rr::requires("upper_bound.2 ≤ MEMORY_CONFIG.(conf_end).2")] + #[rr::requires("upper_bound.(loc_a) ≤ MEMORY_CONFIG.(conf_end).(loc_a)")] /// Postcondition: The offset pointer is in the confidential memory range. #[rr::ensures("ret = self +ₗ offset_in_bytes")] pub fn add(&self, offset_in_bytes: usize, upper_bound: *const usize) -> Result { - let pointer = ptr_byte_add_mut(self.0, offset_in_bytes, upper_bound).map_err(#[rr::verify] |_| Error::AddressNotInConfidentialMemory())?; + let memory_layout = MemoryLayout::read(); + ensure!(upper_bound <= memory_layout.confidential_memory_end, Error::AddressNotInConfidentialMemory())?; + let pointer = ptr_byte_add_mut(self.0, offset_in_bytes, upper_bound).map_err( + #[rr::verify] + |_| Error::AddressNotInConfidentialMemory(), + )?; Ok(Self::new(pointer)) } diff --git a/security-monitor/src/core/memory_layout/mod.rs b/security-monitor/src/core/memory_layout/mod.rs index a66757b1..cf977620 100644 --- a/security-monitor/src/core/memory_layout/mod.rs +++ b/security-monitor/src/core/memory_layout/mod.rs @@ -28,11 +28,11 @@ static MEMORY_LAYOUT: Once = Once::new(); /// non-confidential memory. #[rr::refined_by("ml" : "memory_layout")] /// Invariant: The starts of the region have addresses less-or-equal-to the ends of the regions. -#[rr::invariant("ml.(non_conf_start).2 ≤ ml.(non_conf_end).2")] -#[rr::invariant("ml.(conf_start).2 ≤ ml.(conf_end).2")] +#[rr::invariant("ml.(non_conf_start).(loc_a) ≤ ml.(non_conf_end).(loc_a)")] +#[rr::invariant("ml.(conf_start).(loc_a) ≤ ml.(conf_end).(loc_a)")] /// Invariant: the non-confidential memory region comes before the confidential memory region. // TODO: this could be generalized to the regions being disjoint -#[rr::invariant("ml.(non_conf_end).2 ≤ ml.(conf_start).2")] +#[rr::invariant("ml.(non_conf_end).(loc_a) ≤ ml.(conf_start).(loc_a)")] /// Invariant: the bounds of the confidential memory region are aligned to 4KiB pages #[rr::invariant("ml.(conf_start) `aligned_to` (page_size_in_bytes_nat Size4KiB)")] #[rr::invariant("ml.(conf_end) `aligned_to` (page_size_in_bytes_nat Size4KiB)")] @@ -68,11 +68,11 @@ impl MemoryLayout { /// This function must be called only once by the initialization procedure during the boot of the system. #[rr::only_spec] /// Precondition: The non-confidential memory should have positive size. - #[rr::requires("non_confidential_memory_start.2 < non_confidential_memory_end.2")] + #[rr::requires("non_confidential_memory_start.(loc_a) < non_confidential_memory_end.(loc_a)")] /// Precondition: The non-condidential memory should preceed and not overlap with confidential memory. - #[rr::requires("non_confidential_memory_end.2 ≤ confidential_memory_start.2")] + #[rr::requires("non_confidential_memory_end.(loc_a) ≤ confidential_memory_start.(loc_a)")] /// Precondition: The confidential memory should have positive size. - #[rr::requires("confidential_memory_start.2 < confidential_memory_end.2")] + #[rr::requires("confidential_memory_start.(loc_a) < confidential_memory_end.(loc_a)")] /// Precondition: The global MEMORY_LAYOUT has not been initialized yet. #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" None")] /// Postcondition: There exists a result -- failure is always an option @@ -80,7 +80,7 @@ impl MemoryLayout { /// Postcondition: failure due to low memory can occur if there is no sufficiently aligned /// confidential address #[rr::ensures( - "if_Err res (λ err, (confidential_memory_start.2 - confidential_memory_end.2 ≤ page_size_in_bytes_Z Size4KiB)%Z ∧ err = error_Error_NotEnoughMemory)" + "if_Err res (λ err, (confidential_memory_start.(loc_a) - confidential_memory_end.(loc_a) ≤ page_size_in_bytes_Z Size4KiB)%Z ∧ err = error_Error_NotEnoughMemory)" )] /// Postcondition: if we return Ok, we get a new confidential memory range that is correctly /// aligned for the smallest page size and is a subrange of [conf_start, conf_end) @@ -90,9 +90,9 @@ impl MemoryLayout { maybe_mem_layout = Some mem_layout ∧ mem_layout.(conf_start) `aligned_to` (page_size_in_bytes_nat Size4KiB) ∧ mem_layout.(conf_end) `aligned_to` (page_size_in_bytes_nat Size4KiB) ∧ - confidential_memory_start.2 ≤ mem_layout.(conf_start).2 ∧ - mem_layout.(conf_end).2 ≤ confidential_memory_end.2 ∧ - mem_layout.(conf_start).2 ≤ mem_layout.(conf_end).2 ∧ + confidential_memory_start.(loc_a) ≤ mem_layout.(conf_start).(loc_a) ∧ + mem_layout.(conf_end).(loc_a) ≤ confidential_memory_end.(loc_a) ∧ + mem_layout.(conf_start).(loc_a) ≤ mem_layout.(conf_end).(loc_a) ∧ ok = *[ mem_layout.(conf_start); mem_layout.(conf_end)])%Z" )] /// Postcondition: if we return Ok, the MEMORY_LAYOUT has been initialized. @@ -138,7 +138,7 @@ impl MemoryLayout { #[rr::only_spec] #[rr::ok] /// Precondition: The offset address is in confidential memory. - #[rr::requires("address.2 + offset_in_bytes < self.(conf_end).2")] + #[rr::requires("address.(loc_a) + offset_in_bytes < self.(conf_end).(loc_a)")] /// Postcondition: The offset pointer is in confidential memory. #[rr::ensures("ret = address +ₗ offset_in_bytes")] pub fn confidential_address_at_offset( @@ -152,9 +152,9 @@ impl MemoryLayout { #[rr::only_spec] #[rr::ok] /// Precondition: The offset address is in confidential memory. - #[rr::requires("address.2 + offset_in_bytes < upper_bound.2")] + #[rr::requires("address.(loc_a) + offset_in_bytes < upper_bound.(loc_a)")] /// Precondition: The bounds we are checking are within confidential memory. - #[rr::requires("upper_bound.2 ≤ self.(conf_end).2")] + #[rr::requires("upper_bound.(loc_a) ≤ self.(conf_end).(loc_a)")] /// Postcondition: Then we can correctly offset the address and ensure it is in confidential /// memory. #[rr::ensures("ret = address +ₗ offset_in_bytes")] @@ -170,7 +170,7 @@ impl MemoryLayout { #[rr::only_spec] #[rr::ok] /// Precondition: The offset address is in non-confidential memory. - #[rr::requires("address.2 + offset_in_bytes < self.(non_conf_end).2")] + #[rr::requires("address.(loc_a) + offset_in_bytes < self.(non_conf_end).(loc_a)")] /// Postcondition: Then we can correctly offset the address and ensure it is in /// non-confidential memory. #[rr::ensures("ret = address +ₗ offset_in_bytes")] @@ -183,7 +183,7 @@ impl MemoryLayout { /// Returns true if the raw pointer is inside the non-confidential memory. #[rr::only_spec] - #[rr::returns("bool_decide (self.(non_conf_start).2 ≤ address.2 ∧ address.2 < self.(non_conf_end).2)")] + #[rr::returns("bool_decide (self.(non_conf_start).(loc_a) ≤ address.(loc_a) ∧ address.(loc_a) < self.(non_conf_end).(loc_a))")] pub fn is_in_non_confidential_range(&self, address: *const usize) -> bool { self.non_confidential_memory_start as *const usize <= address && address < self.non_confidential_memory_end } @@ -219,7 +219,7 @@ impl MemoryLayout { /// Get the boundaries of confidential memory as a (start, end) tuple. #[rr::only_spec] - #[rr::returns(" *[self.(conf_start).2; self.(conf_end).2]")] + #[rr::returns(" *[self.(conf_start).(loc_a); self.(conf_end).(loc_a)]")] pub fn confidential_memory_boundary(&self) -> (usize, usize) { (self.confidential_memory_start as usize, self.confidential_memory_end as usize) } diff --git a/security-monitor/src/core/memory_layout/non_confidential_memory_address.rs b/security-monitor/src/core/memory_layout/non_confidential_memory_address.rs index 810fec67..a8fa77ef 100644 --- a/security-monitor/src/core/memory_layout/non_confidential_memory_address.rs +++ b/security-monitor/src/core/memory_layout/non_confidential_memory_address.rs @@ -16,20 +16,19 @@ use pointers_utility::ptr_byte_add_mut; #[rr::exists("MEMORY_CONFIG")] #[rr::invariant(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] /// Invariant: The address is in non-confidential memory. -#[rr::invariant("(MEMORY_CONFIG.(non_conf_start).2 ≤ l.2 < MEMORY_CONFIG.(non_conf_end).2)%Z")] +#[rr::invariant("(MEMORY_CONFIG.(non_conf_start).(loc_a) ≤ l.(loc_a) < MEMORY_CONFIG.(non_conf_end).(loc_a))%Z")] pub struct NonConfidentialMemoryAddress(#[rr::field("l")] *mut usize); #[rr::context("onceG Σ memory_layout")] impl NonConfidentialMemoryAddress { /// Constructs an address in a non-confidential memory. Returns error if the address is outside non-confidential /// memory. - #[rr::trust_me] #[rr::params("bounds")] /// Precondition: The global memory layout has been initialized. - #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some bounds)")] + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some bounds)")] #[rr::ok] /// Precondition: The location is in non-confidential memory. - #[rr::requires("bounds.(non_conf_start).2 ≤ address.2 < bounds.(non_conf_end).2")] + #[rr::requires("bounds.(non_conf_start).(loc_a) ≤ address.(loc_a) < bounds.(non_conf_end).(loc_a)")] /// Postcondition: The non-confidential memory address is correctly initialized. #[rr::ensures("ret = address")] pub fn new(address: *mut usize) -> Result { @@ -46,19 +45,20 @@ impl NonConfidentialMemoryAddress { /// /// The caller must ensure that the address advanced by the offset is still within the non-confidential memory /// region. - // TODO: can we require the offset to be a multiple of usize? - #[rr::only_spec] - #[rr::params("MEMORY_CONFIG")] - /// Precondition: The offset address is in the given range. - #[rr::requires("self.2 + offset_in_bytes < upper_bound.2")] + #[rr::params("MEMORY_CONFIG" : "memory_layout")] /// Precondition: The global memory layout is initialized. - #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] + #[rr::ok] + /// Precondition: The offset address is in the given range. + #[rr::requires("self.(loc_a) + offset_in_bytes < upper_bound.(loc_a)")] /// Precondition: The maximum (and thus the offset address) is in the non-confidential memory range. - #[rr::requires("upper_bound.2 < MEMORY_CONFIG.(non_conf_end).2")] + #[rr::requires("upper_bound.(loc_a) ≤ MEMORY_CONFIG.(non_conf_end).(loc_a)")] /// Postcondition: The offset pointer is in the non-confidential memory range. - #[rr::returns("Ok(self +ₗ offset_in_bytes)")] + #[rr::ensures("ret = self +ₗ offset_in_bytes")] pub unsafe fn add(&self, offset_in_bytes: usize, upper_bound: *const usize) -> Result { - let pointer = ptr_byte_add_mut(self.0, offset_in_bytes, upper_bound).map_err(|_| Error::AddressNotInNonConfidentialMemory())?; + let memory_layout = MemoryLayout::read(); + ensure!(upper_bound <= memory_layout.non_confidential_memory_end, Error::AddressNotInNonConfidentialMemory())?; + let pointer = ptr_byte_add_mut(self.0, offset_in_bytes, upper_bound).map_err(#[rr::verify] |_| Error::AddressNotInNonConfidentialMemory())?; Ok(NonConfidentialMemoryAddress(pointer)) } @@ -68,6 +68,10 @@ impl NonConfidentialMemoryAddress { /// /// We need to ensure the pointer is not used by two threads simultaneously. See `ptr::read_volatile` for safety /// concerns. + #[rr::params("z", "lft_el")] + #[rr::unsafe_elctx("[ϝ ⊑ₑ lft_el]")] + #[rr::requires(#iris "self ◁ₗ[π, Shared lft_el] #z @ ◁ int usize")] + #[rr::returns("z")] pub unsafe fn read(&self) -> usize { unsafe { self.0.read_volatile() } } @@ -78,6 +82,9 @@ impl NonConfidentialMemoryAddress { /// /// We need to ensure the pointer is not used by two threads simultaneously. See `ptr::write_volatile` for safety /// concerns. + #[rr::params("z")] + #[rr::requires(#type "self" : "z" @ "int usize")] + #[rr::ensures(#type "self" : "value" @ "int usize")] pub unsafe fn write(&self, value: usize) { unsafe { self.0.write_volatile(value) }; } @@ -87,12 +94,9 @@ impl NonConfidentialMemoryAddress { self.0 } - #[rr::only_spec] - #[rr::returns("self.2")] + #[rr::returns("self.(loc_a)")] pub fn usize(&self) -> usize { // TODO: check if we need to expose the pointer. - // If not, use addr() instead. - // self.0.addr() - self.0 as usize + self.0.addr() } } diff --git a/security-monitor/src/core/page_allocator/allocator.rs b/security-monitor/src/core/page_allocator/allocator.rs index 6a25acac..928f7348 100644 --- a/security-monitor/src/core/page_allocator/allocator.rs +++ b/security-monitor/src/core/page_allocator/allocator.rs @@ -3,7 +3,6 @@ // SPDX-License-Identifier: Apache-2.0 #![rr::include("option")] #![rr::include("alloc")] -#![rr::include("btreemap")] #![rr::include("vec")] #![rr::include("spin")] #![rr::import("ace.theories.page_allocator", "page_allocator")] @@ -15,8 +14,10 @@ use alloc::vec; use alloc::vec::Vec; use spin::{Once, RwLock, RwLockWriteGuard}; +use crate::rr_wrappers::*; + /// A static global structure containing unallocated pages. Once<> guarantees that the PageAllocator can only be initialized once. -//#[rr::name("PAGE_ALLOCATOR")] +#[rr::name("PAGE_ALLOCATOR")] static PAGE_ALLOCATOR: Once> = Once::new(); /// This is a root node that represents the largest possible page size. Because of this implementation, there can be a maximum one page @@ -25,6 +26,7 @@ static PAGE_ALLOCATOR: Once> = Once::new(); /// Specification: #[rr::refined_by("()" : "unit")] #[rr::context("onceG Σ memory_layout")] +#[rr::context("MachineConfig")] /// Invariant: We abstract over the root node #[rr::exists("node" : "page_storage_node", "base_addr" : "Z", "node_size" : "page_size")] /// Invariant: the allocator tree covers the first 128TiB of memory @@ -44,10 +46,10 @@ pub struct PageAllocator { #[rr::context("onceG Σ memory_layout")] #[rr::context("onceG Σ unit")] +#[rr::context("MachineConfig")] impl PageAllocator { const NOT_INITIALIZED: &'static str = "Bug. Page allocator not initialized."; - #[rr::only_spec] /// Initializes the global memory allocator with the given memory region as confidential memory. Must be called only once during the /// system initialization. /// @@ -63,18 +65,20 @@ impl PageAllocator { #[rr::requires("memory_start `aligned_to` (page_size_in_bytes_nat Size4KiB)")] #[rr::requires("memory_end `aligned_to` (page_size_in_bytes_nat Size4KiB)")] /// Precondition: The memory range is positive. - #[rr::requires("memory_start.2 < memory_end.2")] + #[rr::requires("memory_start.(loc_a) < memory_end.(loc_a)")] /// Precondition: The memory range is within the region covered by the page allocator. - #[rr::requires("memory_end.2 ≤ page_size_in_bytes_Z Size128TiB")] + #[rr::requires("memory_end.(loc_a) ≤ page_size_in_bytes_Z Size128TiB")] + /// Precondition: The pointer has valid provenance to access machine memory. + #[rr::requires("memory_start.(loc_p) = ProvAlloc machine_memory_prov")] /// Precondition: We have ownership of the memory range, having (memory_end - memory_start) bytes. - #[rr::requires(#type "memory_start" : "vs" @ "array_t (Z.to_nat (memory_end.2 - memory_start.2)) (int u8)")] + #[rr::requires(#type "memory_start" : "<#> vs" @ "array_t (Z.to_nat (memory_end.(loc_a) - memory_start.(loc_a))) (int u8)")] /// Precondition: The memory needs to be in confidential memory - #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] - #[rr::requires("MEMORY_CONFIG.(conf_start).2 ≤ memory_start.2")] - #[rr::requires("memory_end.2 ≤ MEMORY_CONFIG.(conf_end).2")] + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] + #[rr::requires("MEMORY_CONFIG.(conf_start).(loc_a) ≤ memory_start.(loc_a)")] + #[rr::requires("memory_end.(loc_a) ≤ MEMORY_CONFIG.(conf_end).(loc_a)")] /// Precondition: The page allocator should be uninitialized. #[rr::requires(#iris "once_initialized π \"PAGE_ALLOCATOR\" None")] @@ -85,8 +89,10 @@ impl PageAllocator { ensure_not!(PAGE_ALLOCATOR.is_completed(), Error::Reinitialization())?; let mut page_allocator = Self::empty(); unsafe { page_allocator.add_memory_region(memory_start, memory_end)? }; - // NOTE: We initialize the invariant here. - PAGE_ALLOCATOR.call_once(|| RwLock::new(page_allocator)); + PAGE_ALLOCATOR.call_once( + #[rr::verify] + || RwLock::new(page_allocator), + ); Ok(()) } @@ -101,15 +107,15 @@ impl PageAllocator { /// Precondition: The current address is aligned to the given page size. #[rr::requires("Haddr_aligned" : "address `aligned_to` page_size_in_bytes_nat page_size")] /// Precondition: The current address is in bounds of the given memory region. - #[rr::requires("address.2 < memory_region_end.2")] + #[rr::requires("address.(loc_a) < memory_region_end.(loc_a)")] /// Precondition: The current address and the bound are in bounds of the global memory layout. - #[rr::requires("memory_layout.(conf_start).2 ≤ address.2")] - #[rr::requires("memory_region_end.2 ≤ memory_layout.(conf_end).2")] + #[rr::requires("memory_layout.(conf_start).(loc_a) ≤ address.(loc_a)")] + #[rr::requires("memory_region_end.(loc_a) ≤ memory_layout.(conf_end).(loc_a)")] /// Postcondition: The current address is aligned to the returned page size. #[rr::ensures("address `aligned_to` page_size_in_bytes_nat ret")] /// Postcondition: Either the returned page size is the smallest page size, or a page at the /// returned size will fit into the memory region. - #[rr::ensures("ret = Size4KiB ∨ address.2 + page_size_in_bytes_nat ret ≤ memory_region_end.2")] + #[rr::ensures("ret = Size4KiB ∨ address.(loc_a) + page_size_in_bytes_nat ret ≤ memory_region_end.(loc_a)")] fn find_largest_page_size( memory_layout: &MemoryLayout, mut page_size: PageSize, address: ConfidentialMemoryAddress, memory_region_end: *const usize, ) -> PageSize { @@ -151,22 +157,22 @@ impl PageAllocator { #[rr::requires("Hstart_aligned" : "memory_region_start `aligned_to` (page_size_in_bytes_nat Size4KiB)")] #[rr::requires("Hend_aligned" : "memory_region_end `aligned_to` (page_size_in_bytes_nat Size4KiB)")] /// Precondition: The memory range is positive. - #[rr::requires("Hstart_lt" : "memory_region_start.2 < memory_region_end.2")] + #[rr::requires("Hstart_lt" : "memory_region_start.(loc_a) < memory_region_end.(loc_a)")] + /// Precondition: The pointer has valid provenance to access machine memory. + #[rr::requires("memory_region_start.(loc_p) = ProvAlloc machine_memory_prov")] /// Precondition: We have ownership of the memory range, having (mend - mstart) bytes. - #[rr::requires(#type "memory_region_start" : "<#> vs" @ "array_t (Z.to_nat (memory_region_end.2 - memory_region_start.2)) (int u8)")] + #[rr::requires(#type "memory_region_start" : "<#> vs" @ "array_t (Z.to_nat (memory_region_end.(loc_a) - memory_region_start.(loc_a))) (int u8)")] /// Precondition: The memory layout needs to be initialized #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] /// Precondition: the whole memory region should be part of confidential memory - #[rr::requires("MEMORY_CONFIG.(conf_start).2 ≤ memory_region_start.2")] - #[rr::requires("memory_region_end.2 ≤ MEMORY_CONFIG.(conf_end).2")] - + #[rr::requires("MEMORY_CONFIG.(conf_start).(loc_a) ≤ memory_region_start.(loc_a)")] + #[rr::requires("memory_region_end.(loc_a) ≤ MEMORY_CONFIG.(conf_end).(loc_a)")] #[rr::observe("self.ghost": "()")] - #[rr::ok] /// Precondition: The memory range is within the region covered by the memory allocator. - #[rr::requires("memory_region_end.2 ≤ page_size_in_bytes_Z Size128TiB")] + #[rr::requires("memory_region_end.(loc_a) ≤ page_size_in_bytes_Z Size128TiB")] #[rr::ensures("ret = tt")] unsafe fn add_memory_region( &mut self, memory_region_start: ConfidentialMemoryAddress, memory_region_end: *const usize, @@ -209,8 +215,9 @@ impl PageAllocator { #[rr::params("γ")] #[rr::inv_vars("page_size", "address", "node")] #[rr::inv("address `aligned_to` page_size_in_bytes_nat page_size")] - #[rr::inv("memory_region_start.2 ≤ address.2")] - #[rr::inv("address.2 < memory_region_end.2")] + #[rr::inv("memory_region_start.(loc_a) ≤ address.(loc_a)")] + #[rr::inv("address.(loc_a) < memory_region_end.(loc_a)")] + #[rr::inv("address.(loc_p) = ProvAlloc machine_memory_prov")] // Invariant: the borrow variable stays the same, but we do not track the state of the node #[rr::inv("γ = node.ghost")] // Invariant: the base address and node size stay unchanged @@ -218,7 +225,7 @@ impl PageAllocator { #[rr::inv("node.cur.(max_node_size) = Size128TiB")] // Invariant: Remaining memory ownership #[rr::exists("vs")] - #[rr::inv(#type "address" : "<#> vs" @ "array_t (Z.to_nat (memory_region_end.2 - address.2)) (int u8)")] + #[rr::inv(#type "address" : "<#> vs" @ "array_t (Z.to_nat (memory_region_end.(loc_a) - address.(loc_a))) (int u8)")] #[rr::ignore] #[allow(unused)] || {}; @@ -245,56 +252,71 @@ impl PageAllocator { Ok(()) } - #[rr::only_spec] /// Returns a page token that has ownership over an unallocated memory region of the requested size. Returns error if it could not /// obtain write access to the global instance of the page allocator or if there are not enough page tokens satisfying the requested /// criteria. + #[rr::params("MEMORY_CONFIG" : "memory_layout")] /// Precondition: We require the page allocator to be initialized. #[rr::requires(#iris "once_initialized π \"PAGE_ALLOCATOR\" (Some ())")] + /// Precondition: We require the global memory layout to be initialized. + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] /// Postcondition: If a page is returned, it has the right size. - #[rr::ok] - #[rr::ensures("ret.(page_sz) = page_size_to_allocate")] + #[rr::ensures("if_Ok ret (λ ret, ret.(page_sz) = page_size_to_allocate)")] pub fn acquire_page(page_size_to_allocate: PageSize) -> Result, Error> { - Self::try_write(|page_allocator| { + Self::try_write( + #[rr::params("MEMORY_CONFIG" : "memory_layout")] + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] + #[rr::ok] + #[rr::ensures("if_Ok ret (λ tok, tok.(page_sz) = {page_size_to_allocate})")] + |page_allocator| { let base_address = page_allocator.base_address; let page_size = page_allocator.page_size; Ok(page_allocator.root.acquire_page_token(base_address, page_size, page_size_to_allocate)) })? } - #[rr::only_spec] /// Consumes the page tokens given by the caller, allowing for their further acquisition. This is equivalent to deallocation of the /// physical memory region owned by the returned page tokens. Given vector of pages might contains pages of arbitrary sizes. + #[rr::params("MEMORY_CONFIG" : "memory_layout")] /// Precondition: We require the page allocator to be initialized. #[rr::requires(#iris "once_initialized π \"PAGE_ALLOCATOR\" (Some ())")] + /// Precondition: We require the global memory layout to be initialized. + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] pub fn release_pages(released_pages: Vec>) { - let _ = Self::try_write(|page_allocator| { + let _ = Self::try_write( + #[rr::params("MEMORY_CONFIG" : "memory_layout")] + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] + #[rr::returns("Ok tt")] + |page_allocator| { let base_address = page_allocator.base_address; let page_size = page_allocator.page_size; let root_node = &mut page_allocator.root; for page_token in released_pages { #[rr::params("γ")] - #[rr::inv_vars("root_node", "page_size", "base_address")] - #[rr::inv("base_address = 0%Z")] - #[rr::inv("page_size = Size128TiB")] + #[rr::inv_vars("root_node")] #[rr::inv("root_node.ghost = γ")] + #[rr::inv("root_node.cur.(max_node_size) = Size128TiB")] + #[rr::inv("root_node.cur.(base_address) = 0%Z")] #[rr::ignore] #[allow(unused)] || {}; - // NOTE: we show that the token is within range of the allocator, using the - // invariant of the page token. + root_node.store_page_token(base_address, page_size, page_token); } Ok(()) - }) - .inspect_err(|_| debug!("Memory leak: failed to store released pages in the page allocator")); + }).unwrap(); + //.inspect_err(|_| debug!("Memory leak: failed to store released pages in the page allocator")); } - #[rr::skip] - /// returns a mutable reference to the PageAllocator after obtaining a lock on the mutex + #[rr::params("p")] + #[rr::requires(#iris "once_initialized π \"PAGE_ALLOCATOR\" (Some ())")] + #[rr::requires(#iris "□ ∀ x, {O::Pre} π p op x")] + #[rr::exists("x")] + #[rr::ensures(#iris "{O::Post} π p op x ret")] fn try_write(op: O) -> Result where O: FnOnce(&mut RwLockWriteGuard<'static, PageAllocator>) -> Result { - op(&mut PAGE_ALLOCATOR.get().expect(Self::NOT_INITIALIZED).write()) + //op(&mut PAGE_ALLOCATOR.get().expect(Self::NOT_INITIALIZED).write()) + op(&mut PAGE_ALLOCATOR.get().unwrap().write()) } } @@ -302,6 +324,7 @@ impl PageAllocator { /// release of page tokens. /// Specification: #[rr::context("onceG Σ memory_layout")] +#[rr::context("MachineConfig")] #[rr::refined_by("node" : "page_storage_node")] /// We abstract over the components stored here #[rr::exists("max_sz" : "option page_size")] @@ -327,6 +350,7 @@ struct PageStorageTreeNode { } #[rr::context("onceG Σ memory_layout")] +#[rr::context("MachineConfig")] impl PageStorageTreeNode { /// Creates a new empty node with no allocation. /// Specification: @@ -378,7 +402,7 @@ impl PageStorageTreeNode { assert!(this_node_base_address == page_token.start_address()); assert!(this_node_page_size == page_token.size()); // For verification: to unfold invariant. - &self.max_allocable_page_size; + let _ = &self.max_allocable_page_size; self.store_page_token_in_this_node(page_token); } else { @@ -386,12 +410,12 @@ impl PageStorageTreeNode { self.initialize_children_if_needed(this_node_page_size); // For verification: to unfold invariant. - &self.max_allocable_page_size; + let _ = &self.max_allocable_page_size; // Calculate which child should we invoke recursively. let index = Self::calculate_child_index(this_node_base_address, this_node_page_size, &page_token); // Let's go recursively to the node where this page belongs to. - let (child_base_address, child_page_size) = self.child_address_and_size(this_node_base_address, this_node_page_size, index); + let (child_base_address, child_page_size) = Self::child_address_and_size(this_node_base_address, this_node_page_size, index); let allocable_page_size = vec_index_mut(&mut self.children, index).store_page_token(child_base_address, child_page_size, page_token); // We are coming back from the recursion. @@ -406,11 +430,9 @@ impl PageStorageTreeNode { self.max_allocable_page_size.unwrap() } - #[rr::only_spec] /// Recursively traverses the tree to reach a node that contains the page token of the requested size and returns this page token. This /// function returns also a set of page size that are not allocable anymore at the node. This method has the max depth of recusrive /// invocation equal to the number of PageSize variants. This method has an upper bounded computation complexity. - /// #[rr::params("memly" : "memory_layout")] /// Precondition: The size and base address arguments match the logical state. #[rr::requires("this_node_base_address = self.cur.(base_address)")] @@ -423,6 +445,8 @@ impl PageStorageTreeNode { /// Postcondition: the size and base address remain unchanged #[rr::ensures("node'.(max_node_size) = self.cur.(max_node_size)")] #[rr::ensures("node'.(base_address) = self.cur.(base_address)")] + /// Postcondition: It can allocate at most the node size (proof artifact). + #[rr::ensures("page_node_can_allocate node' ≤o{option_cmp page_size_cmp} Some node'.(max_node_size)")] /// Postcondition: The function succeeds and returns a page iff the allocable page size of the /// node is at least the required page size. #[rr::ok] @@ -439,6 +463,7 @@ impl PageStorageTreeNode { let page_token = self.acquire_page_token_from_this_node(); assert!(this_node_base_address == page_token.start_address()); assert!(this_node_page_size == page_token.size()); + Ok(page_token) } else { // We are too high in the tree, i.e., the current level represents page size allocations that are larger than the page size @@ -455,13 +480,14 @@ impl PageStorageTreeNode { // This will succeed because we already checked above that the desired size can be acquired. let index = vec_iter(&self.children) .position( - #[rr::only_spec] - #[rr::returns("bool_decide ((Some {page_size_to_acquire}) ≤o{ option_cmp page_size_cmp } page_node_can_allocate node)")] + #[rr::returns( + "bool_decide ((Some {page_size_to_acquire}) ≤o{ option_cmp page_size_cmp } page_node_can_allocate node)" + )] |node| node.max_allocable_page_size >= Some(page_size_to_acquire), ) .unwrap(); - let (child_base_address, child_page_size) = self.child_address_and_size(this_node_base_address, this_node_page_size, index); + let (child_base_address, child_page_size) = Self::child_address_and_size(this_node_base_address, this_node_page_size, index); // Invoke recursively this function to traverse to a node containing a page token of the requested size. // The below unwrap is ok because we found an index of a node that has requested allocation. let page_token = vec_index_mut(&mut self.children, index) @@ -479,24 +505,23 @@ impl PageStorageTreeNode { } } - #[rr::only_spec] /// Creates children for the given node because the node gets created with an empty list of children, expecting that children will be /// created lazily with this function. /// - /// Precondition the page size argument has to match the node's logical state. + #[rr::params("smaller_sz")] + /// Precondition: the page size argument has to match the node's logical state. #[rr::requires("this_node_page_size = self.cur.(max_node_size)")] + /// Precondition: there is a smaller page size. + #[rr::requires("Hsmaller" : "page_size_smaller this_node_page_size = Some smaller_sz")] /// Postcondition: leaves the page node unchanged except for initializing the children if necessary #[rr::observe("self.ghost": "mk_page_node self.cur.(max_node_size) self.cur.(base_address) self.cur.(allocation_state) true")] fn initialize_children_if_needed(&mut self, this_node_page_size: PageSize) { if self.children.is_empty() { self.children = (0..this_node_page_size.number_of_smaller_pages()) .map( - // I think to handle this well I'll need invariants on closures. - // i.e., the address and so on need to become logical components of the type (even - // though they don't have a physical representation) - #[rr::skip] #[rr::params("base_address", "node_size")] #[rr::requires("(page_size_align node_size | base_address)%Z")] + #[rr::requires("base_address + (page_size_in_bytes_nat node_size) ≤ MaxInt usize")] #[rr::returns("mk_page_node node_size base_address PageTokenUnavailable false")] |_| Self::empty(), ) @@ -511,7 +536,7 @@ impl PageStorageTreeNode { /// Precondition: the token's size is equal to this node's size #[rr::requires("self.cur.(max_node_size) = page_token.(page_sz)")] /// Precondition: the token's address matches the node's address - #[rr::requires("self.cur.(base_address) = page_token.(page_loc).2")] + #[rr::requires("self.cur.(base_address) = page_token.(page_loc).(loc_a)")] /// Postcondition: the node changes to the available state #[rr::observe("self.ghost": "mk_page_node self.cur.(max_node_size) self.cur.(base_address) PageTokenAvailable self.cur.(children_initialized)")] fn store_page_token_in_this_node(&mut self, page_token: Page) { @@ -528,7 +553,7 @@ impl PageStorageTreeNode { /// Postcondition: The returned token's size matches the node's size. #[rr::ensures("ret.(page_sz) = self.cur.(max_node_size)")] /// Postcondition: The returned token's address matches the node's address. - #[rr::ensures("ret.(page_loc).2 = self.cur.(base_address)")] + #[rr::ensures("ret.(page_loc).(loc_a) = self.cur.(base_address)")] /// Postcondition: The node has been updated to the unavailable state. #[rr::observe("self.ghost": "mk_page_node self.cur.(max_node_size) self.cur.(base_address) PageTokenUnavailable self.cur.(children_initialized)")] fn acquire_page_token_from_this_node(&mut self) -> Page { @@ -574,12 +599,14 @@ impl PageStorageTreeNode { } } - #[rr::only_spec] /// Merges page tokens owned by children. /// Safety: Requires that all children have been initialized. /// + #[rr::params("smaller_sz")] /// Precondition: The children are initialized. - #[rr::requires("self.cur.(children_initialized)")] + #[rr::requires("Hchild_init" : "self.cur.(children_initialized)")] + /// Precondition: There is a smaller page size. + #[rr::requires("Hsmaller" : "page_size_smaller this_node_page_size = Some smaller_sz")] /// Precondition: the argument page size matches the node's page size #[rr::requires("this_node_page_size = self.cur.(max_node_size)")] /// Postcondition: this node has been updated to a new state. @@ -588,33 +615,23 @@ impl PageStorageTreeNode { /// Postcondition: the state is either unchanged, or the whole node is available now. #[rr::ensures("state' = self.cur.(allocation_state) ∨ state' = PageTokenAvailable")] unsafe fn try_to_merge_page_tokens(&mut self, this_node_page_size: PageSize) { - if self.children.iter().all( + if vec_iter(&self.children).all( #[rr::returns("bool_decide (child.(allocation_state) = PageTokenAvailable)")] |child| child.page_token.is_some(), ) { // All children have page tokens, thus we can merge them. - let pages_to_merge = self.children.iter_mut().map( - // postcondition of the closure has the observation. - // iter_mut hands out mutable borrows. - // Options: - // - we immediately return the obsevation to the base iterator and allow it to - // resolve, - // - or we collect a bigsep of observations and resolve it at the end. - // - // We might want to create the list gnames already at the very beginning when - // creating the iterator via iter_mut. - // We can keep the observations as part of the invariant, I suppose. - // Then we finally get the completed invariant, and the observation of having - // turned the vector into a list of PlaceGhost. - // At this point, resolve everything. + let pages_to_merge = vec_iter_mut(&mut self.children).map( #[rr::requires("child.cur.(allocation_state) = PageTokenAvailable")] #[rr::ensures("ret.(page_sz) = child.cur.(max_node_size)")] - #[rr::ensures("ret.(page_loc).2 = child.cur.(base_address)")] + #[rr::ensures("ret.(page_loc).(loc_a) = child.cur.(base_address)")] #[rr::observe("child.ghost": "mk_page_node child.cur.(max_node_size) child.cur.(base_address) PageTokenUnavailable child.cur.(children_initialized)")] |child| child.acquire_page_token_from_this_node() ).collect(); + // Safety: Safe, because all children are initialized and have a page token available. - self.store_page_token_in_this_node(unsafe { Page::merge(pages_to_merge, this_node_page_size) }); + let merged_token = unsafe { Page::merge(pages_to_merge, this_node_page_size) }; + assert!(merged_token.size() == this_node_page_size); + self.page_token = Some(merged_token); self.max_allocable_page_size = Some(this_node_page_size); } } @@ -642,47 +659,17 @@ impl PageStorageTreeNode { /// /// Invariant: children have been created #[rr::params("child_node_size")] - /// Precondition: The base address and page size arguments match the node's logical state. - #[rr::requires("this_node_base_address = self.(base_address)")] - #[rr::requires("this_node_page_size = self.(max_node_size)")] - /// The children are initialized - #[rr::requires("H_child_init": "self.(children_initialized)")] /// There exist children - #[rr::requires("page_size_smaller self.(max_node_size) = Some child_node_size")] + #[rr::requires("page_size_smaller this_node_page_size = Some child_node_size")] + #[rr::requires("INV_IN_RANGE" : "this_node_base_address + page_size_in_bytes_Z this_node_page_size ≤ MaxInt usize")] /// Precondition: the child index is in range - #[rr::requires("Hindex": "index < page_size_multiplier self.(max_node_size)")] + #[rr::requires("Hindex": "index < page_size_multiplier this_node_page_size")] /// Postcondition: Returns a tuple of the child's base address and its size. - #[rr::returns("*[child_base_address self.(base_address) child_node_size index; child_node_size] ")] - fn child_address_and_size(&self, this_node_base_address: usize, this_node_page_size: PageSize, index: usize) -> (usize, PageSize) { - assert!(index < self.children.len()); + #[rr::returns("*[child_base_address this_node_base_address child_node_size index; child_node_size] ")] + fn child_address_and_size(this_node_base_address: usize, this_node_page_size: PageSize, index: usize) -> (usize, PageSize) { assert!(this_node_page_size.smaller().is_some()); let child_base_address = this_node_base_address + index * this_node_page_size.smaller().unwrap().in_bytes(); let child_page_size = this_node_page_size.smaller().unwrap(); (child_base_address, child_page_size) } } - -/// These wrappers serve as a temporary workaround until RefinedRust supports unsized types and in -/// particular slices: the indexing and iteration methods on `Vec` work by dereferencing to slices, -/// which currently are not supported by RefinedRust. -/// For now, we thus create wrappers for these methods to which we can attach RefinedRust -/// specifications. -mod wrappers { - use alloc::vec::Vec; - - #[rr::only_spec] - #[rr::requires("index < length x.cur")] - #[rr::exists("γi")] - #[rr::returns("(x.cur !!! Z.to_nat index, γi)")] - #[rr::observe("x.ghost": "<[Z.to_nat index := PlaceGhost γi]> (<$#> x.cur)")] - pub fn vec_index_mut(x: &mut Vec, index: usize) -> &mut T { - &mut x[index] - } - - #[rr::only_spec] - #[rr::returns("x")] - pub fn vec_iter(x: &Vec) -> core::slice::Iter<'_, T> { - x.iter() - } -} -use wrappers::*; diff --git a/security-monitor/src/core/page_allocator/page.rs b/security-monitor/src/core/page_allocator/page.rs index 3c11a0dd..fd518461 100644 --- a/security-monitor/src/core/page_allocator/page.rs +++ b/security-monitor/src/core/page_allocator/page.rs @@ -2,6 +2,7 @@ // SPDX-FileContributor: Wojciech Ozga , IBM Research - Zurich // SPDX-License-Identifier: Apache-2.0 #![rr::import("ace.theories.page_allocator", "page")] +#![rr::import("ace.theories.base", "machine")] use crate::core::architecture::PageSize; use crate::core::control_data::{DigestType, MeasurementDigest}; use crate::core::memory_layout::{ConfidentialMemoryAddress, MemoryLayout, NonConfidentialMemoryAddress}; @@ -11,6 +12,8 @@ use core::marker::PhantomData; use core::mem; use core::ops::Range; +use crate::rr_wrappers::*; + pub trait PageState {} pub struct UnAllocated {} @@ -32,15 +35,17 @@ impl PageState for Allocated {} #[rr::invariant("page_wf p")] /// We require the page to be in this bounded memory region that can be handled by the page /// allocator. -#[rr::invariant("(page_end_loc p).2 ≤ MAX_PAGE_ADDR")] +#[rr::invariant("0 ≤ p.(page_loc).(loc_a) ∧ (page_end_loc p).(loc_a) ≤ MAX_PAGE_ADDR")] /// We require the memory layout to have been initialized. #[rr::context("onceG Σ memory_layout")] #[rr::exists("MEMORY_CONFIG")] /// Invariant: The MEMORY_LAYOUT Once instance has been initialized to MEMORY_CONFIG. #[rr::invariant(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] /// Invariant: ...and according to that layout, this page resides in confidential memory. -#[rr::invariant("MEMORY_CONFIG.(conf_start).2 ≤ p.(page_loc).2")] -#[rr::invariant("p.(page_loc).2 + (page_size_in_bytes_nat p.(page_sz)) ≤ MEMORY_CONFIG.(conf_end).2")] +#[rr::invariant("MEMORY_CONFIG.(conf_start).(loc_a) ≤ p.(page_loc).(loc_a)")] +#[rr::invariant("p.(page_loc).(loc_a) + (page_size_in_bytes_nat p.(page_sz)) ≤ MEMORY_CONFIG.(conf_end).(loc_a)")] +#[rr::context("MachineConfig")] +#[rr::invariant("p.(page_loc).(loc_p) = ProvAlloc machine_memory_prov")] pub struct Page { /// Specification: the `address` has mathematical value `l`. #[rr::field("p.(page_loc)")] @@ -54,6 +59,7 @@ pub struct Page { } #[rr::context("onceG Σ memory_layout")] +#[rr::context("MachineConfig")] impl Page { /// Creates a page token at the given address in the confidential memory. /// @@ -73,12 +79,14 @@ impl Page { #[rr::requires("l `aligned_to` (page_size_align sz)")] /// Precondition: The page is located in a bounded memory region that can be handled by the /// page allocator. - #[rr::requires("l.2 + page_size_in_bytes_Z sz ≤ MAX_PAGE_ADDR")] + #[rr::requires("l.(loc_a) + page_size_in_bytes_Z sz ≤ MAX_PAGE_ADDR")] /// Precondition: The memory layout is initialized. #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] /// Precondition: The page is entirely contained in the confidential memory range. - #[rr::requires("MEMORY_CONFIG.(conf_start).2 ≤ l.2")] - #[rr::requires("l.2 + (page_size_in_bytes_nat sz) ≤ MEMORY_CONFIG.(conf_end).2")] + #[rr::requires("MEMORY_CONFIG.(conf_start).(loc_a) ≤ l.(loc_a)")] + #[rr::requires("l.(loc_a) + (page_size_in_bytes_nat sz) ≤ MEMORY_CONFIG.(conf_end).(loc_a)")] + /// Precondition: This needs to have the machine provenance. + #[rr::requires("l.(loc_p) = ProvAlloc machine_memory_prov")] /// Then, we get a properly initialized page starting at `l` of size `sz` with some value `v`. #[rr::returns("mk_page l sz v")] pub(super) unsafe fn init(address: ConfidentialMemoryAddress, size: PageSize) -> Self { @@ -86,6 +94,8 @@ impl Page { } /// Specification: + #[rr::params("MEMORY_CONFIG" : "memory_layout")] + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] /// We return a page starting at `l` with size `sz`, but with all bytes initialized to zero. #[rr::returns("mk_page self.(page_loc) self.(page_sz) (zero_page self.(page_sz))")] pub fn zeroize(mut self) -> Page { @@ -100,15 +110,14 @@ impl Page { /// Precondition: We need to know the current memory layout. #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] /// Precondition: The region we are copying from is in non-confidential memory. - #[rr::requires("MEMORY_CONFIG.(non_conf_start).2 ≤ address.2")] - #[rr::requires("address.2 + page_size_in_bytes_Z self.(page_sz) ≤ MEMORY_CONFIG.(non_conf_end).2")] + #[rr::requires("MEMORY_CONFIG.(non_conf_start).(loc_a) ≤ address.(loc_a)")] + #[rr::requires("address.(loc_a) + page_size_in_bytes_Z self.(page_sz) ≤ MEMORY_CONFIG.(non_conf_end).(loc_a)")] /// Precondition: We require ownership over the memory region. #[rr::requires(#type "address" : "<#> v2" @ "array_t (page_size_in_words_nat self.(page_sz)) (int usize)")] /// Postcondition: We return ownership over the memory region. #[rr::ensures(#type "address" : "<#> v2" @ "array_t (page_size_in_words_nat self.(page_sz)) (int usize)")] /// Postcondition: We get a correctly initialized page token with the copied contents. #[rr::returns("Ok (mk_page self.(page_loc) self.(page_sz) v2)")] - // TODO this needs to be unsafe pub fn copy_from_non_confidential_memory(mut self, mut address: NonConfidentialMemoryAddress) -> Result, Error> { self.offsets().into_iter().try_for_each(|offset_in_bytes| { let non_confidential_address = MemoryLayout::read() @@ -136,13 +145,11 @@ impl Page { let number_of_smaller_pages = self.size.in_bytes() / smaller_page_size.in_bytes(); let memory_layout = MemoryLayout::read(); - // NOTE: Currently using a wrapper around map, as we have to add an extra trait requirement - // to the struct definition of Map to declare the invariant. Should be lifted soon. (0..number_of_smaller_pages).map( #[rr::params("v")] // Precondition: the page bound is in confidential memory - #[rr::requires("Hpage_end" : "{page_end}.2 ≤ {*memory_layout}.(conf_end).2")] - #[rr::requires("Hpage_start" : "{*memory_layout}.(conf_start).2 ≤ {self.address}.2")] + #[rr::requires("Hpage_end" : "{page_end}.(loc_a) ≤ {*memory_layout}.(conf_end).(loc_a)")] + #[rr::requires("Hpage_start" : "{*memory_layout}.(conf_start).(loc_a) ≤ {self.address}.(loc_a)")] // Precondition: the offset does not overflow #[rr::requires("Hlarger_page" : "i * page_size_in_bytes_Z {smaller_page_size} ∈ usize")] // Precondition: the base address is well-aligned @@ -150,8 +157,10 @@ impl Page { // Precondition: The memory layout needs to have been initialized. #[rr::requires(#iris "once_status \"MEMORY_LAYOUT\" (Some {*memory_layout})")] // Precondition: the offset is within the bound - #[rr::requires("Hinrange" : "{self.address}.2 + (1 + i) * (page_size_in_bytes_Z {smaller_page_size}) ≤ {page_end}.2")] - #[rr::requires("Hinrange2" : "{page_end}.2 ≤ MAX_PAGE_ADDR")] + #[rr::requires("Hinrange" : "{self.address}.(loc_a) + (1 + i) * (page_size_in_bytes_Z {smaller_page_size}) ≤ {page_end}.(loc_a)")] + #[rr::requires("Hinrange2" : "{page_end}.(loc_a) ≤ MAX_PAGE_ADDR")] + /// Precondition: provenance + #[rr::requires("{self.address}.(loc_p) = ProvAlloc machine_memory_prov")] // Precondition: ownership of this token's memory region #[rr::requires(#type "({self.address} +ₗ (i * page_size_in_bytes_Z {smaller_page_size}))" : "<#> v" @ "array_t (page_size_in_words_nat {smaller_page_size}) (int usize)")] // Postcondition: return new smaller page @@ -181,35 +190,40 @@ impl Page { /// * Merged pages are contiguous and cover the entire new size of the future page /// * Merged pages are of the same size /// * Merged pages are sorted - #[rr::only_spec] - #[rr::params("base_address" : "loc")] - #[rr::requires("base_address = (from_pages !!! 0%nat).(page_loc)")] - #[rr::requires("(from_pages !!! 0%nat).(page_loc) `aligned_to` page_size_align new_size")] - #[rr::requires( - "∀ (i : nat) pg, from_pages !! i = Some pg → - Some pg.(page_sz) = page_size_smaller new_size ∧ - pg.(page_loc) = base_address +ₗ (i * page_size_in_bytes_Z pg.(page_sz))" + #[rr::params("base_address" : "loc", "smaller_sz")] + #[rr::requires("Hsmaller": "page_size_smaller new_size = Some smaller_sz")] + #[rr::requires("Hlen": "length from_pages = page_size_multiplier new_size")] + #[rr::requires("Hbase_addr": "base_address = (from_pages !!! 0%nat).(page_loc)")] + #[rr::requires("Haligned": "(from_pages !!! 0%nat).(page_loc) `aligned_to` page_size_align new_size")] + #[rr::requires("Hlook": + "∀ (i : nat) pg, from_pages !! i = Some pg → + pg.(page_sz) = smaller_sz ∧ + pg.(page_loc).(loc_a) = base_address.(loc_a) + (i * page_size_in_bytes_Z pg.(page_sz))" )] - #[rr::requires("length from_pages = page_size_multiplier new_size")] #[rr::returns("mk_page base_address new_size (mjoin (page_val <$> from_pages))")] pub unsafe fn merge(mut from_pages: Vec>, new_size: PageSize) -> Self { - assert!(from_pages.len() > 2); - assert!(from_pages[0].address.is_aligned_to(new_size.in_bytes())); - assert!(new_size.in_bytes() / from_pages[0].size.in_bytes() == from_pages.len()); - assert!(from_pages[0].start_address() + new_size.in_bytes() == from_pages[from_pages.len() - 1].end_address()); + let base_address = vec_index(&from_pages, 0).address; + let smaller_sz = vec_index(&from_pages, 0).size; + let pages_len = from_pages.len(); + let last_page = vec_index(&from_pages, pages_len - 1); + let end_addr = last_page.end_address(); + let _end_size = last_page.size; // For verification: to unfold invariant + assert!(base_address.is_aligned_to(new_size.in_bytes())); + assert!(new_size.in_bytes() / smaller_sz.in_bytes() == pages_len); + assert!(base_address.as_usize() + new_size.in_bytes() == end_addr); + assert!(pages_len > 2); - // NOTE: logically, this is a big step. - // - We need to go over the whole vector and get the ownership. - // - From each page, we extract the ownership - // - then merge the big array - unsafe { Self::init(from_pages.swap_remove(0).address, new_size) } + unsafe { Self::init(base_address, new_size) } } } #[rr::context("onceG Σ memory_layout")] +#[rr::context("MachineConfig")] impl Page { /// Clears the entire memory content by writing 0s to it and then converts the Page from Allocated to UnAllocated so it can be returned /// to the page allocator. + #[rr::params("MEMORY_CONFIG" : "memory_layout")] + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] #[rr::returns("mk_page self.(page_loc) self.(page_sz) (zero_page self.(page_sz))")] pub fn deallocate(mut self) -> Page { self.clear(); @@ -226,6 +240,8 @@ impl Page { /// within the page address range, otherwise an Error is returned. /// /// Specification: + #[rr::params("MEMORY_CONFIG" : "memory_layout")] + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] #[rr::ok] /// Precondition: the offset needs to be divisible by the size of usize. #[rr::requires("H_off" : "(ly_size usize | offset_in_bytes)%Z")] @@ -253,6 +269,7 @@ impl Page { } #[rr::context("onceG Σ memory_layout")] +#[rr::context("MachineConfig")] impl Page { pub const ENTRY_SIZE: usize = mem::size_of::(); @@ -262,12 +279,12 @@ impl Page { } #[rr::ensures("page_wf self")] - #[rr::returns("self.(page_loc).2")] + #[rr::returns("self.(page_loc).(loc_a)")] pub fn start_address(&self) -> usize { self.address.as_usize() } - #[rr::returns("self.(page_loc).2 + page_size_in_bytes_Z self.(page_sz)")] + #[rr::returns("self.(page_loc).(loc_a) + page_size_in_bytes_Z self.(page_sz)")] pub fn end_address(&self) -> usize { self.address.as_usize() + self.size.in_bytes() } @@ -292,6 +309,8 @@ impl Page { /// will be written to the memory. This offset must be a multiply of size_of::(usize) and be /// within the page address range, otherwise an Error is returned. /// Specification: + #[rr::params("MEMORY_CONFIG" : "memory_layout")] + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] #[rr::exists("new_val")] #[rr::observe("self.ghost": "mk_page self.cur.(page_loc) self.cur.(page_sz) new_val")] #[rr::ok] @@ -344,6 +363,8 @@ impl Page { } #[rr::only_spec] + #[rr::params("MEMORY_CONFIG" : "memory_layout")] + #[rr::requires(#iris "once_initialized π \"MEMORY_LAYOUT\" (Some MEMORY_CONFIG)")] /// Postcondition: The page has been zeroized. #[rr::observe("self.ghost": "mk_page self.cur.(page_loc) self.cur.(page_sz) (zero_page self.cur.(page_sz))")] pub fn clear(&mut self) { diff --git a/security-monitor/src/error.rs b/security-monitor/src/error.rs index 8a7d6da5..b0f8fb01 100644 --- a/security-monitor/src/error.rs +++ b/security-monitor/src/error.rs @@ -4,6 +4,7 @@ use crate::core::architecture::riscv::sbi::*; use thiserror_no_std::Error; +#[rr::verify] #[derive(Error, Debug)] pub enum Error { /* Initialization-related errors */ @@ -111,6 +112,7 @@ pub enum Error { InvalidGprId(), } +// !start skip(error.error) impl Error { pub fn sbi_error_code(&self) -> usize { match &self { @@ -144,3 +146,4 @@ impl Error { } } } +// !end skip diff --git a/security-monitor/src/lib.rs b/security-monitor/src/lib.rs index 7701aa56..bc02078d 100644 --- a/security-monitor/src/lib.rs +++ b/security-monitor/src/lib.rs @@ -24,3 +24,44 @@ mod confidential_flow; mod core; mod error; mod non_confidential_flow; + +/// These wrappers serve as a temporary workaround until RefinedRust supports unsized types and in +/// particular slices: the indexing and iteration methods on `Vec` work by dereferencing to slices, +/// which currently are not supported by RefinedRust. +/// For now, we thus create wrappers for these methods to which we can attach RefinedRust +/// specifications. +mod rr_wrappers { + use alloc::vec::Vec; + + #[rr::only_spec] + #[rr::requires("index < length x.cur")] + #[rr::exists("γi")] + #[rr::returns("(x.cur !!! Z.to_nat index, γi)")] + #[rr::observe("x.ghost": "<[Z.to_nat index := PlaceGhost γi]> (<$#> x.cur)")] + pub fn vec_index_mut(x: &mut Vec, index: usize) -> &mut T { + &mut x[index] + } + + #[rr::only_spec] + #[rr::requires("index < length x")] + #[rr::returns("x !!! Z.to_nat index")] + pub fn vec_index(x: &Vec, index: usize) -> &T { + &x[index] + } + + #[rr::only_spec] + #[rr::returns("x")] + pub fn vec_iter(x: &Vec) -> core::slice::Iter<'_, T> { + x.iter() + } + + #[rr::only_spec] + #[rr::exists("γs")] + #[rr::ensures("length γs = length x.cur")] + #[rr::observe("x.ghost": "(PlaceGhost <$> γs) : list (place_rfn {rt_of T})")] + #[rr::returns("zip x.cur γs")] + pub fn vec_iter_mut(x: &mut Vec) -> core::slice::IterMut<'_, T> { + x.iter_mut() + } + +} diff --git a/verification/.gitignore b/verification/.gitignore index a8276b4e..564bf5b3 100644 --- a/verification/.gitignore +++ b/verification/.gitignore @@ -4,3 +4,5 @@ log rustc-ice-* generated_code.bak _CoqProject +.lia.cache +.nia.cache diff --git a/verification/dune-project b/verification/dune-project index a7b843bc..f3ba0aa3 100644 --- a/verification/dune-project +++ b/verification/dune-project @@ -1,4 +1,4 @@ -(lang dune 3.8) +(lang dune 3.21) (name ace) (package (name ace)) -(using coq 0.8) +(using rocq 0.11) diff --git a/verification/readme.md b/verification/readme.md index 89bccd82..9458be21 100644 --- a/verification/readme.md +++ b/verification/readme.md @@ -78,7 +78,7 @@ The Rust source path is relative to [`security_monitor/src`](/security_monitor/s |----------------------------|-------------|----------------------------|---------| | Memory isolation config | | Specififed, partly verified | | | \|- Page token | [`core/page_allocator/page.rs`](/security-monitor/src/core/page_allocator/page.rs) | Largely verified | | -| \|- Page allocator | [`core/page_allocator/allocator.rs`](/security-monitor/src/core/page_allocator/allocator.rs) | Largely verified | | +| \|- Page allocator | [`core/page_allocator/allocator.rs`](/security-monitor/src/core/page_allocator/allocator.rs) | Fully verified | | | \|- Page table encoding | [`core/architecture/riscv/mmu`](/security-monitor/src/core/architecture/riscv/mmu) | Specified, not verified | | | Initialization | [`core/initialization`](/security-monitor/src/core/initialization) | Specified, partly verified | | | VM Interactions | | Unspecified | | diff --git a/verification/refinedrust b/verification/refinedrust index 0ee0990d..6741571f 160000 --- a/verification/refinedrust +++ b/verification/refinedrust @@ -1 +1 @@ -Subproject commit 0ee0990daee6f45514e2f3e8e8385e451ad13af8 +Subproject commit 6741571f64d78223f35a62656f78cde899c7e208 diff --git a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_number_of_smaller_pages.v b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_number_of_smaller_pages.v index 142878fa..ceffc0cb 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_number_of_smaller_pages.v +++ b/verification/rust_proofs/ace/proofs/proof_core_architecture_riscv_mmu_page_size_PageSize_number_of_smaller_pages.v @@ -17,7 +17,9 @@ Proof. all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. + (* !start proof(page_size.number_of_smaller_pages) *) all: unsafe_unfold_common_caesium_defs; simpl; lia. + (* !end proof *) Unshelve. all: print_remaining_sidecond. Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_read.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_read.v index 5897b400..9cfcc31d 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_read.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_MemoryLayout_read.v @@ -14,9 +14,11 @@ Proof. core_memory_layout_MemoryLayout_read_prelude. repeat liRStep; liShow. + (* !start proof(memory_layout.read) *) liInst Hevar1 (MemoryLayout_inv_t). liInst Hevar2 Spin_ty. repeat liRStep; liShow. + (* !end proof *) all: print_remaining_goal. Unshelve. all: sidecond_solver. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr.v deleted file mode 100644 index 4af1aad2..00000000 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr.v +++ /dev/null @@ -1,25 +0,0 @@ -From caesium Require Import lang notation. -From refinedrust Require Import typing shims. -From sm.ace.generated Require Import generated_code_ace generated_specs_ace. -From sm.ace.generated Require Import generated_template_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr. - -Set Default Proof Using "Type". - -Section proof. -Context `{!refinedrustGS Σ}. -Lemma core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr_proof (π : thread_id) : - core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr_lemma π. -Proof. - core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_into_mut_ptr_prelude. - - rep <- 10 liRStep; liShow. - (* TODO: this shouldn't have to be done manually *) - simp_ltypes. - repeat liRStep. - - all: print_remaining_goal. - Unshelve. all: sidecond_solver. - Unshelve. all: sidecond_hammer. - Unshelve. all: print_remaining_sidecond. -Qed. -End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new.v b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new.v index 182228b6..22b704db 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new.v @@ -12,7 +12,7 @@ Lemma core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_n Proof. core_memory_layout_confidential_memory_address_ConfidentialMemoryAddress_new_prelude. - repeat liRStep; liShow. + rep <-! liRStep; liShow. all: print_remaining_goal. Unshelve. all: sidecond_solver. diff --git a/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest.v b/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest.v index 6bcc26e0..71fd042e 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest.v +++ b/verification/rust_proofs/ace/proofs/proof_core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest.v @@ -13,8 +13,10 @@ Proof. core_memory_protector_mmu_page_size_PageSize_all_from_largest_to_smallest_prelude. repeat liRStep; liShow. + (* !start proof(page_size.all_from_largest_to_smallest) *) liInst Hevar0 (core_memory_protector_mmu_page_size_PageSize_ty). repeat liRStep; liShow. + (* !end proof *) all: print_remaining_goal. Unshelve. all: sidecond_solver. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_acquire_page.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_acquire_page.v new file mode 100644 index 00000000..f86a54f2 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_acquire_page.v @@ -0,0 +1,39 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageAllocator_acquire_page. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageAllocator_acquire_page_proof (π : thread_id) : + core_page_allocator_allocator_PageAllocator_acquire_page_lemma π. +Proof. + core_page_allocator_allocator_PageAllocator_acquire_page_prelude. + + (* TODO: lots of spurious lifetimes, due to latebound instantiation (see #27) *) + (* !start proof(page_allocator.acquire_page) *) + rep <-! liRStep; liShow. + apply_update (updateable_copy_lft "vlft5" "static"). + rep <-! liRStep; liShow. + apply_update (updateable_copy_lft "vlft16" "vlft6"). + rep <-! liRStep; liShow. + apply_update (updateable_copy_lft "vlft18" "vlft6"). + rep <-! liRStep; liShow. + apply_update (updateable_copy_lft "vlft20" "vlft6"). + rep <-! liRStep; liShow. + apply_update (updateable_copy_lft "vlft22" "vlft6"). + rep <-! liRStep; liShow. + rep <- 2 liRStep; liShow. + { rep liRStep. + unfold once_initialized. rep liRStep. } + rep liRStep. + (* !end proof *) + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_acquire_page_closure0.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_acquire_page_closure0.v new file mode 100644 index 00000000..a24201f3 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_acquire_page_closure0.v @@ -0,0 +1,32 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageAllocator_acquire_page_closure0. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageAllocator_acquire_page_closure0_proof (π : thread_id) : + core_page_allocator_allocator_PageAllocator_acquire_page_closure0_lemma π. +Proof. + core_page_allocator_allocator_PageAllocator_acquire_page_closure0_prelude. + + rep <-! liRStep; liShow. + (* !start proof(page_allocator.acquire_page) *) + apply_update (updateable_copy_lft "ulft3" "ulft_4"). + rep <-! liRStep; liShow. + (* !end proof *) + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + (* !start proof(page_allocator.acquire_page) *) + 1-2: congruence. + { rename select (if_Ok _ _) into Hok. + match type of Hok with if_Ok ?x _ => rename x into maybe_tok end. + destruct maybe_tok; simpl in *; solve_goal. } + (* !end proof *) + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_add_memory_region.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_add_memory_region.v index 907a4934..184a1fbb 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_add_memory_region.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_add_memory_region.v @@ -13,30 +13,30 @@ Proof. core_page_allocator_allocator_PageAllocator_add_memory_region_prelude. rep <-! liRStep; liShow. + (* !start proof(page_allocator.add_memory_region) *) all: match goal with | H : ?x `aligned_to` page_size_in_bytes_nat _ |- _ => rename x into larr end. all: try match goal with - | H : ?x = Size4KiB ∨ _.2 + _ ≤ _ |- _ => + | H : ?x = Size4KiB ∨ _.(loc_a) + _ ≤ _ |- _ => rename x into sz_to_allocate; rename H into Hsz_bounds end. - { (* calling Page::init *) apply_update (updateable_split_array larr (page_size_in_bytes_nat sz_to_allocate)). - liRStepUntil typed_val_expr. + liRStepUntil typed_call. apply_update (updateable_reshape_array larr (page_size_in_words_nat sz_to_allocate) bytes_per_addr). - liRStepUntil typed_val_expr. + liRStepUntil typed_call. iRename select (larr ◁ₗ[_, _] _ @ (◁ _))%I into "Harr". - iApply fupd_typed_val_expr. - iMod (ltype_own_array_subtype_strong _ _ _ _ (int usize) with "[] Harr") as "(% & Harr)"; [done | | | | | ]. + iApply fupd_typed_call. + iMod (ltype_own_array_subtype_strong _ _ _ _ (int usize) _ _ _ (λ _ _, True%I) with "[] Harr") as "(% & _ & Harr)"; [done | | | | | ]. { shelve_sidecond. } { solve_layout_alg. } { shelve_sidecond. } - { iModIntro. iIntros (??) "Harr". iPoseProof (ty_own_val_array_int_to_int with "Harr") as "$"; last done. + { iModIntro. iIntros (??? _) "Harr". + iPoseProof (ty_own_val_array_int_to_int with "Harr") as "(% & $)"; last done. shelve_sidecond. } iModIntro. - repeat liRStep. } repeat liRStep. @@ -44,17 +44,12 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: try lia. all: try congruence. - - rewrite -page_size_align_is_size /page_size_align. - exists (page_size_align_log Size4KiB). lia. - - sidecond_hammer. + - apply page_size_in_bytes_is_power_of_two. + - solve_goal. - specialize (page_size_in_bytes_nat_ge x'). specialize (page_size_in_bytes_nat_in_usize x') as []. split; lia. - - specialize (page_size_in_bytes_nat_ge x'). - specialize (page_size_in_bytes_nat_in_usize x') as []. - sidecond_hammer. - unfold page_size_in_bytes_nat. lia. - - specialize (page_size_in_words_nat_ge sz_to_allocate). lia. - intros ? Hly1. apply syn_type_has_layout_array_inv in Hly1 as (? & Hly1 & -> & ?). apply syn_type_has_layout_int_inv in Hly1 as ->. eexists. split; first by apply syn_type_has_layout_int. @@ -66,7 +61,7 @@ Proof. - sidecond_hammer. - rewrite page_size_align_is_size. done. - rewrite MAX_PAGE_ADDR_unfold /MAX_PAGE_ADDR_def. - revert select (¬ memory_region_end.2 > _). + revert select (¬ memory_region_end.(loc_a) > _). rewrite Hroot_sz. lia. - rename select (max_node_size _ = Size128TiB) into Hroot_sz'. rewrite Hroot_sz'. @@ -74,9 +69,10 @@ Proof. - rewrite Hroot_sz Hroot_base. split; simpl. + lia. - + revert select (larr.2 + _ ≤ MAX_PAGE_ADDR). rewrite MAX_PAGE_ADDR_unfold /MAX_PAGE_ADDR_def. lia. + + revert select (larr.(loc_a) + _ ≤ MAX_PAGE_ADDR). rewrite MAX_PAGE_ADDR_unfold /MAX_PAGE_ADDR_def. lia. - apply aligned_to_offset; first done. solve_goal. + (* !end proof *) all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_empty.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_empty.v index 75ce13bc..08168a8f 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_empty.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_empty.v @@ -13,15 +13,19 @@ Proof. core_page_allocator_allocator_PageAllocator_empty_prelude. repeat liRStep; liShow. - liInst Hevar Size128TiB. - liInst Hevar0 0. + (* !start proof(page_allocator.empty) *) + liInst Hevar_x1 Size128TiB. + liInst Hevar_x2 0. repeat liRStep; liShow. + (* !end proof *) all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. + (* !start proof(page_allocator.empty) *) { exists 0. lia. } { apply page_size_in_bytes_nat_in_usize. } + (* !end proof *) all: sidecond_hammer. Unshelve. all: print_remaining_sidecond. Qed. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size.v index fb31536f..af60fe5c 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size.v @@ -12,19 +12,20 @@ Lemma core_page_allocator_allocator_PageAllocator_find_largest_page_size_proof ( Proof. core_page_allocator_allocator_PageAllocator_find_largest_page_size_prelude. - rep liRStep; liShow. - 1: match goal with - | H : capture_address_ `aligned_to` page_size_in_bytes_nat ?x, - H' : page_size_smaller ?x = Some ?x' |- _ => + (* !start proof(page_allocator.find_largest_page_size) *) + rep liRStep; liShow. + 1: match goal with + | H : capture_address_ `aligned_to` page_size_in_bytes_nat ?x, + H' : page_size_smaller ?x = Some ?x' |- _ => rename x into sz_aligned; rename x' into sz_aligned_smaller; rename H' into Hsmaller end. 1: rewrite Hsmaller. all: rep liRStep; liShow. - 1: match goal with - | H : address `aligned_to` page_size_in_bytes_nat ?x, - H' : page_size_smaller ?x = Some ?x' |- _ => + 1: match goal with + | H : address `aligned_to` page_size_in_bytes_nat ?x, + H' : page_size_smaller ?x = Some ?x' |- _ => rename x into sz_aligned; rename x' into sz_aligned_smaller; rename H' into Hsmaller @@ -36,13 +37,14 @@ Proof. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. - all: try match goal with + all: try match goal with | H : _ `aligned_to` page_size_in_bytes_nat ?x |- _ => rename x into sz'; specialize (page_size_in_bytes_nat_ge sz') as ? end. all: try lia. all: by eapply address_aligned_to_page_size_smaller. + (* !end proof *) Unshelve. all: print_remaining_sidecond. (*Qed.*) diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size_closure0.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size_closure0.v index 98845190..c58871f1 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size_closure0.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_find_largest_page_size_closure0.v @@ -12,13 +12,15 @@ Lemma core_page_allocator_allocator_PageAllocator_find_largest_page_size_closure Proof. core_page_allocator_allocator_PageAllocator_find_largest_page_size_closure0_prelude. + (* !start proof(page_allocator.find_largest_page_size) *) rep <-! liRStep; liShow. - apply_update (updateable_copy_lft "vlft6" "ulft1"). + apply_update (updateable_copy_lft "vlft7" "ulft1"). rep <-! liRStep; liShow. apply_update (updateable_copy_lft "ulft3" "ulft_4"). rep <-! liRStep; liShow. apply_update (updateable_copy_lft "vlft15" "ulft1"). rep <-! liRStep; liShow. + (* !end proof *) all: print_remaining_goal. Unshelve. all: sidecond_solver. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_release_pages.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_release_pages.v new file mode 100644 index 00000000..f62cab49 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_release_pages.v @@ -0,0 +1,40 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageAllocator_release_pages. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageAllocator_release_pages_proof (π : thread_id) : + core_page_allocator_allocator_PageAllocator_release_pages_lemma π. +Proof. + core_page_allocator_allocator_PageAllocator_release_pages_prelude. + + (* !start proof(page_allocator.release_pages) *) + rep <-! liRStep; liShow. + (* TODO: lots of spurious lifetimes, due to latebound instantiation (see #27) *) + apply_update (updateable_copy_lft "vlft3" "static"). + rep <-! liRStep; liShow. + apply_update (updateable_copy_lft "vlft10" "vlft5"). + rep <-! liRStep; liShow. + apply_update (updateable_copy_lft "vlft12" "vlft5"). + rep <-! liRStep; liShow. + apply_update (updateable_copy_lft "vlft14" "vlft5"). + rep <-! liRStep; liShow. + apply_update (updateable_copy_lft "vlft16" "vlft5"). + rep <-! liRStep; liShow. + rep <- 2 liRStep; liShow. + { rep liRStep. + unfold once_initialized. rep liRStep. } + rep liRStep. + (* !end proof *) + + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_release_pages_closure0.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_release_pages_closure0.v new file mode 100644 index 00000000..ea0ed1b1 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_release_pages_closure0.v @@ -0,0 +1,43 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageAllocator_release_pages_closure0. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageAllocator_release_pages_closure0_proof (π : thread_id) : + core_page_allocator_allocator_PageAllocator_release_pages_closure0_lemma π. +Proof. + core_page_allocator_allocator_PageAllocator_release_pages_closure0_prelude. + + rep <-! liRStep; liShow. + (* !start proof(page_allocator.release_pages) *) + apply_update (updateable_copy_lft "ulft2" "ulft_3"). + rep liRStep; liShow. + (* !end proof *) + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + (* !start proof(page_allocator.release_pages) *) + all: try congruence. + all: clear_layout. + - rename select (max_node_size root_node = Size128TiB) into Hsz_root. + rewrite Hsz_root. solve_goal. + - rewrite /page_within_range. + rewrite Hroot_base. + simpl. + split. { lia. } + rename select (_ ≤ MAX_PAGE_ADDR) into Hbounds. + etrans; first apply Hbounds. + rewrite MAX_PAGE_ADDR_unfold /MAX_PAGE_ADDR_def. + rename select (max_node_size _ = max_node_size root_node) into Hsz. + rename select (max_node_size root_node = Size128TiB) into Hsz'. + rewrite Hsz Hsz'. lia. + (* !end proof *) + + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_try_write.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_try_write.v new file mode 100644 index 00000000..3b5211d6 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageAllocator_try_write.v @@ -0,0 +1,32 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageAllocator_try_write. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageAllocator_try_write_proof (π : thread_id) : + core_page_allocator_allocator_PageAllocator_try_write_lemma π. +Proof. + core_page_allocator_allocator_PageAllocator_try_write_prelude. + + rep <-! liRStep; liShow. + (* !start proof(page_allocator.try_write) *) + rep liRStep. liShow. + iRename select (∀ _, FnOnce_Pre _ _ _ _ _)%I into "Hpre". + liInst Hevar_x1 p. + iApply prove_with_subtype_default. + iSplitR. { iApply "Hpre". } + rep liRStep. liShow. + liInst Hevar_x2 γ0. + rep liRStep. + (* !end proof *) + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token.v new file mode 100644 index 00000000..39676c39 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token.v @@ -0,0 +1,282 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +(* !start proof(page_allocator.acquire_page_token) *) +Lemma update_allocation_state_inv self new_state max_alloc max_allocs children child_size : + max_allocs = fmap page_node_can_allocate children → + max_alloc = max_list_cmp (option_cmp page_size_cmp) max_allocs None → + new_state = match max_alloc ≫= id with + | Some max => PageTokenPartiallyAvailable max + | None => PageTokenUnavailable + end → + page_size_smaller (max_node_size self) = Some child_size → + page_storage_node_children_wf (base_address self) (max_node_size self) children → + page_storage_node_invariant_case + {| + max_node_size := max_node_size self; + base_address := base_address self; + allocation_state := new_state; + children_initialized := true + |} (max_alloc ≫= id) None children. +Proof. + intros Hmax Hmaxs -> Hsmaller Hchildren. + destruct (max_alloc ≫= id) as [max_alloc' | ] eqn:Heq; simpl. + - apply bind_Some in Heq as (? & -> & Heq). simpl in Heq. subst. + unfold page_storage_node_invariant_case; simpl. + destruct (max_list_cmp_elem_of (cmp:=option_cmp page_size_cmp) (page_node_can_allocate <$> children) None) as [Hdef | Hel]; first congruence. + destruct Hel as (ps & Hmax & Hel). + apply list_elem_of_fmap in Hel as (child & -> & Hel). + rewrite Hmax in Hmaxs. simplify_eq. + apply list_elem_of_lookup_1 in Hel as (child_index & Hlook). + + opose proof* (page_storage_node_children_wf_child_lookup child_index) as (Hchild_sz & Hchild_base & Hbound); [done.. | ]. + split; first done. + eexists. + split_and!; try done. + + rewrite -Hmaxs in Hbound. move: Hbound. + normalize_and_simpl_impl false. intros Hbound. + apply page_size_smaller_lt in Hsmaller. + eapply ord_le_ord_lt_trans; done. + + naive_solver. + - apply bind_None in Heq. + unfold page_storage_node_invariant_case; simpl. + done. +Qed. + +Lemma node_partially_available_exists_child node available_sz tok children sz : + node.(allocation_state) ≠ PageTokenAvailable → + node.(allocation_state) ≥o{node_allocation_state_cmp} PageTokenPartiallyAvailable sz → + page_storage_node_invariant_case node available_sz tok children → + ∃ child_index child, + children !! child_index = Some child ∧ + Some sz ≤o{option_cmp page_size_cmp} page_node_can_allocate child. +Proof. + intros Hnot_avail Havail. + unfold page_storage_node_invariant_case. + repeat case_decide. + - intros [-> ->]. + rename select (allocation_state node = _) into Hstate. + rewrite Hstate in Havail. + exfalso. move: Havail. + unfold ord_ge. solve_goal. + - done. + - intros [-> (alloca & Halloca & -> & ? & ? & Hchild)]. + rewrite Halloca in Havail. + destruct Hchild as (i & child & Hchild & Halloc). + exists i, child. split; first done. + rewrite Halloc. move: Havail. + unfold ord_ge, ord_le. solve_goal. +Qed. +(* !end proof *) + +Lemma core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_prelude. + + repeat liRStep; liShow. + (* !start proof(page_allocator.acquire_page_token) *) + { liInst Hevar_rf self. repeat liRStep; liShow. } + 2: { liInst Hevar_rf self. repeat liRStep; liShow. } + + liInst Hevar_rf self. + rep <-! liRStep. + + set (position_pred := λ node, ((Some page_size_to_acquire) ≤o{ option_cmp page_size_cmp } page_node_can_allocate node)). + rep 10 liRStep; liShow. + rep 20 liRStep; liShow. + unfold map_inv_ty. (* TODO *) + rep liRStep; liShow. + liInst Hevar_x0 (λ _ _ clos_state, (⌜clos_state = -[page_size_to_acquire]⌝ ∗ True)%I). + liInst Hevar_x2 position_pred. + + rep <-! liRStep. + 2: repeat liRStep. + 2: repeat liRStep. + + match goal with + | H : page_size_smaller (max_node_size self) = Some ?x |- _ => + rename x into child_size; + rename H into Hsmaller + end. + rep <-! liRStep. liShow. + + set (nodes := (<[Z.to_nat z:=x'2]> (x' ++ e :: x'1))). + set (max_allocs := ((λ p, page_node_can_allocate p.1) <$> zip nodes (replicate (length x' + S (length x'1)) ()))). + set (max_alloc := max_list_cmp (option_cmp page_size_cmp) max_allocs None). + set (new_state := match max_alloc ≫= id with | Some max => PageTokenPartiallyAvailable max | None => PageTokenUnavailable end). + repeat liRStep. + liInst Hevar_rf (mk_page_node self.(max_node_size) self.(base_address) new_state true). + + repeat liRStep. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer_normalize. + all: clear_layout; clear_unused_vars. + all: try lia. + all: try match goal with + | H : page_size_smaller (max_node_size _) = Some ?x |- _ => + rename x into smaller_size; + rename H into Hsmaller + end. + all: try rename select (max_node_size self ≠ page_size_to_acquire) into Hlt_self. + all: try rename x' into children_without_alloc. + all: try rename e into allocable_node. + all: try set (children_new := (children_without_alloc ++ allocable_node :: x'1)). + all: try rename select (page_node_can_allocate allocable_node = Some _) into Hallocable_node. + all: try match type of Hallocable_node with page_node_can_allocate _ = Some ?y => rename y into allocable_size end. + all: try rename select (length children_without_alloc = Z.to_nat _) into Hpos. + all: try match type of Hpos with _ = Z.to_nat ?z => rename z into pos end. + all: try rename select (Forall _ children_without_alloc) into Hnot_found. + + - + opose proof* (page_storage_node_invariant_no_tok) as [Ha ?]; first apply INV_CASE. + rename select (_ ∨ _) into Hle. + destruct Hle as [(max_sz' & -> & Hlt)| Heq]. + + move: Ha. normalize_and_simpl_goal. + assert(max_sz' & Hlt). + normalize_and_simpl_impl false. + intros Hle. + opose proof* (ord_lt_ord_le_trans (A:=page_size)) as Hlt2; first apply Hlt; first apply Hle. + move: Hlt2. clear. + destruct page_size_to_acquire; done. + + rewrite Heq'. + normalize_and_simpl_impl false. + rewrite Heq in Hlt_self. + intros [Hlt | Heq'']. + * clear -Hlt. by destruct page_size_to_acquire. + * move: Heq''. normalize_and_simpl_impl false. done. + - eexists. done. + - apply page_storage_node_invariant_has_token in INV_CASE as (-> & _). done. + - opose proof (page_storage_node_invariant_has_token _ _ _ _ INV_CASE) as (Hstate & Hsz & Haddr). + simpl in Hsz, Haddr. subst. + exists (max_node_size self). split; last reflexivity. + unfold page_node_can_allocate. by rewrite Hstate. + - eexists. done. + - by eapply (page_node_invariant_case_can_allocate_bounded (mk_page_node _ _ _ _)). + - intros (sz & Halloc & Hlb). + opose proof (page_storage_node_invariant_case_can_allocate _ _ _ _ INV_CASE). + subst max_sz. + repeat revert select (¬ _). + rewrite Halloc. + intros Hneq Hnlt. + destruct Hlb as [Hlt | Heq]. + + apply Hnlt. + solve_goal. + + apply Hneq. + move: Heq. solve_goal. + - eexists. done. + - (* should prove that at z there is a child (by position_pred e). Then follows from invariant on children *) + odestruct (lookup_lt_is_Some_2 children_new (Z.to_nat pos) _) as (child & Hlook_child). + { rewrite length_app/=. lia. } + erewrite list_lookup_total_correct; last done. + opose proof (page_storage_node_children_wf_child_lookup (Z.to_nat pos) _ _ _ children_new _ _ _ _) as (? & Heq & _). + { apply Hsmaller. } + { done. } + { done. } + rewrite Heq. f_equiv. lia. + - (* ditto *) + odestruct (lookup_lt_is_Some_2 children_new (Z.to_nat pos) _) as (child & Hlook_child). + { rewrite length_app/=. lia. } + erewrite list_lookup_total_correct; last done. + opose proof (page_storage_node_children_wf_child_lookup (Z.to_nat pos) _ _ _ children_new _ _ _ _) as (Heq & ?). + { apply Hsmaller. } + { done. } + { done. } + rewrite Heq. done. + - eauto. + - (* ditto; similar as above *) + exfalso. rename select (¬ ∃ sz : page_size, _) into Hcontra. apply Hcontra. + rewrite lookup_total_app_r; last lia. + rewrite -Hpos. solve_goal. + - (* contradiction: Forall (¬ position_pred), but we can allocate *) + (* max_sz is at least page_size_to_acquire, but page_size_to_acquire is less then max_node_size self. + + Thus, self ≥ PageTokenPartiallyAvailable page_size_to_acquire. + + x' are the new children after initializing if necessary and dividing. + + new_state ≥ PageTokenPartiallyAvailable page_size_to_acquire. + Thus, there exists a child satisfying position_pred. + *) + exfalso. + opose proof* (page_storage_node_invariant_case_can_allocate) as Heq. + { apply INV_CASE. } + subst max_sz. + + rename select (page_storage_node_invariant_case _ _ _ children_without_alloc) into INV_CASE'. + opose proof (node_partially_available_exists_child _ _ _ _ page_size_to_acquire _ _ INV_CASE') as (child_index & child & Hchild & Hchild_alloc). + { simpl. destruct (allocation_state self); done. } + { simpl. eapply ord_le_antisym. + rename select (_ ∨ _) into Hsz_acquire_bound. + eapply node_allocation_state_meet_le_lub. + - apply page_node_can_allocate_lb. + destruct Hsz_acquire_bound as [(sz & Halloc & Hlt)| Halloc]. + + rewrite Halloc. normalize_and_simpl_goal. by left. + + rewrite Halloc. solve_goal. + - normalize_and_simpl_goal. + enough (page_size_to_acquire <ₚ max_node_size self) by lia. + opose proof* page_node_invariant_case_sized_bounded as Hbound; first apply INV_CASE. + move: Hbound. + destruct Hsz_acquire_bound as [Hlt | Heq]. + + move: Hlt. intros (sz & -> & Hlt). + normalize_and_simpl_goal. + by eapply ord_lt_ord_le_trans. + + rewrite Heq. + normalize_and_simpl_impl false. + intros [Hlt | Heq']; first done. + move: Heq'. normalize_and_simpl_goal. } + opose proof* (Forall_lookup_1) as Hpos. + { apply Hnot_found. } + { apply Hchild. } + simpl in Hpos. apply Hpos. unfold position_pred. + done. + - apply page_storage_node_children_wf_insert; done. + - rename select (page_storage_node_invariant_case _ _ _ (children_without_alloc ++ _)) into INV_CASE2. + opose proof* page_storage_node_invariant_case_at_most_partially_available_inv as [Hle_sz ->]; [ | apply INV_CASE2 | ]. + { simpl. apply node_allocation_state_meet_le_r. } + eapply update_allocation_state_inv. + { done. } + { subst max_alloc. f_equiv. + rewrite /max_allocs. + etrans. { eapply (list_fmap_ext _ (page_node_can_allocate ∘ fst)). intros ? []; done. } + rewrite list_fmap_compose fst_zip; first done. + rewrite /nodes. solve_goal. } + { done. } + { done. } + { done. } + - by eapply (page_node_invariant_case_can_allocate_bounded (mk_page_node _ _ _ _)). + - rename select (_ ∨ _) into Hsz_acquire_bound. + opose proof* page_storage_node_invariant_case_can_allocate as Halloc; first apply INV_CASE. + move: Hsz_acquire_bound. + rewrite Halloc. + solve_goal. + (* !end proof *) + + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to long Qed *) +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure0.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure0.v index 5d714680..e1376c46 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure0.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure0.v @@ -17,12 +17,14 @@ Proof. all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. + (* !start proof(page_allocator.acquire_page_token) *) { unfold page_node_can_allocate. - revert INV_CASE. - destruct (node.(allocation_state)) eqn:Heq; simpl. - all: normalize_and_simpl_impl false. - all: clear; sidecond_hammer. + opose proof* (page_storage_node_invariant_case_can_allocate) as Heq; first apply INV_CASE. + rewrite -Heq. unfold page_node_can_allocate. + unfold ord_le. + split; solve_goal. } + (* !end proof *) Unshelve. all: print_remaining_sidecond. Qed. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure1.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure1.v index 9c356f5c..4889d5fa 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure1.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_closure1.v @@ -17,11 +17,13 @@ Proof. all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. - { f_equiv; first done. + (* !start proof(page_allocator.acquire_page_token) *) + { f_equiv. move: INV_CASE. unfold page_node_can_allocate. destruct (allocation_state child) eqn:Heq. all: sidecond_hammer. } + (* !end proof *) Unshelve. all: print_remaining_sidecond. Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node.v index 0f8b88d0..925b3c58 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node.v @@ -15,17 +15,15 @@ Proof. core_page_allocator_allocator_PageStorageTreeNode_acquire_page_token_from_this_node_prelude. rep <-! liRStep; liShow. + (* !start proof(page_allocator.acquire_page_token_from_this_node) *) rep liRStep. - liInst Hevar (mk_page_node self.(max_node_size) self.(base_address) PageTokenUnavailable self.(children_initialized)). + liInst Hevar_rf (mk_page_node self.(max_node_size) self.(base_address) PageTokenUnavailable self.(children_initialized)). rep liRStep. + (* !end proof *) all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. - all: move: INV_CASE; - unfold page_storage_node_invariant_case; - rewrite Hstate/=; - sidecond_hammer. Unshelve. all: print_remaining_sidecond. Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index.v index 26c3ba81..05c2ff60 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index.v @@ -7,6 +7,7 @@ Set Default Proof Using "Type". Section proof. Context `{RRGS : !refinedrustGS Σ}. +(* !start proof(page_allocator.calculate_child_index) *) Lemma rem_le_a_minus_b a b z : 0 ≤ b → 0 < a → @@ -33,23 +34,23 @@ Proof. Qed. Lemma page_within_range_offset base off sz p child_sz : - (page_size_in_bytes_Z (page_sz p) | (page_loc p).2) → + (page_size_in_bytes_Z (page_sz p) | (page_loc p).(loc_a)) → (page_size_in_bytes_Z sz | base) → page_size_smaller sz = Some child_sz → p.(page_sz) ≤ₚ child_sz → - off = ((p.(page_loc).2 - base) `quot` page_size_in_bytes_Z child_sz) * page_size_in_bytes_Z child_sz → + off = ((p.(page_loc).(loc_a) - base) `quot` page_size_in_bytes_Z child_sz) * page_size_in_bytes_Z child_sz → page_within_range base sz p → page_within_range (base + off) child_sz p. Proof. - set (i := ((p.(page_loc).2 - base) `quot` page_size_in_bytes_Z child_sz)). + set (i := ((p.(page_loc).(loc_a) - base) `quot` page_size_in_bytes_Z child_sz)). intros Hal1 Hal2 Hsmaller Hle -> [Hran1 Hran2]. split; first lia. - assert (p.(page_loc).2 - base ≤ i * page_size_in_bytes_Z child_sz + Z.rem (p.(page_loc).2 - base) (page_size_in_bytes_Z child_sz)). + assert (p.(page_loc).(loc_a) - base ≤ i * page_size_in_bytes_Z child_sz + Z.rem (p.(page_loc).(loc_a) - base) (page_size_in_bytes_Z child_sz)). { subst i. - specialize (Z.quot_rem' ((page_loc p).2 - base) (page_size_in_bytes_Z child_sz)) as Heq. + specialize (Z.quot_rem' ((page_loc p).(loc_a) - base) (page_size_in_bytes_Z child_sz)) as Heq. rewrite {1}Heq. lia. } - enough (page_size_in_bytes_Z child_sz - page_size_in_bytes_Z (page_sz p) >= ((page_loc p).2 - base) `rem` page_size_in_bytes_Z child_sz) by lia. + enough (page_size_in_bytes_Z child_sz - page_size_in_bytes_Z (page_sz p) >= ((page_loc p).(loc_a) - base) `rem` page_size_in_bytes_Z child_sz) by lia. rewrite Z.rem_mod_nonneg; [ | lia | ]; first last. { specialize (page_size_in_bytes_nat_ge child_sz). lia. } @@ -64,6 +65,7 @@ Proof. etrans; first done. left. by apply page_size_smaller_lt. Qed. +(* !end proof *) Lemma core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index_proof (π : thread_id) : core_page_allocator_allocator_PageStorageTreeNode_calculate_child_index_lemma π. @@ -75,39 +77,30 @@ Proof. all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. + (* !start proof(page_allocator.calculate_child_index) *) { move: Hrange; unfold page_within_range. solve_goal. } - { move: Hrange; unfold page_within_range; solve_goal. } { simplify_eq. specialize (page_size_in_words_nat_ge child_node_sz). sidecond_hammer. } { simplify_eq. specialize (page_size_in_words_nat_ge child_node_sz). sidecond_hammer. } - { (* compute the index *) - simplify_eq. - etrans; last apply Z.quot_pos; first done; first lia. - specialize (page_size_in_bytes_nat_ge child_node_sz). - lia. } { simplify_eq. specialize (page_size_in_bytes_nat_ge child_node_sz) as ?. - apply Z.quot_le_upper_bound; first lia. - nia. } + split. + - nia. + - apply Z.quot_le_upper_bound; first lia. nia. } { simplify_eq. specialize (page_size_in_bytes_nat_ge child_node_sz) as ?. - apply Z.quot_lt_upper_bound; [lia | lia | ]. + apply Z.quot_lt_upper_bound; [solve_goal| lia | ]. opose proof (page_size_multiplier_size_in_bytes this_node_page_size child_node_sz _) as Ha. { by rewrite Hchild. } rewrite -Nat2Z.inj_mul. rewrite -Ha. - destruct Hrange as [Hran1 Hran2]. move: Hran2. simpl. specialize (page_size_in_bytes_nat_ge page_token0). - lia. - } + lia. } { simplify_eq. eapply page_within_range_offset; simpl; [ | | done..]. - rewrite -page_size_align_is_size. done. - eexists. done. } - { destruct Hsz_lt as [Hsz_lt |Hsz_eq]. - - apply Z.cmp_less_iff in Hsz_lt. lia. - - apply Z.cmp_equal_iff in Hsz_eq. apply page_size_variant_inj in Hsz_eq. - simplify_eq. lia. } + (* !end proof *) Unshelve. all: print_remaining_sidecond. Qed. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size.v index 703adff1..69bc5ae2 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_child_address_and_size.v @@ -17,44 +17,22 @@ Proof. all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. + (* !start proof(page_allocator.child_address_and_size) *) all: rename select (page_size_smaller _ = Some _) into Hsz. - - move: INV_IN_RANGE. - opose proof (page_size_multiplier_size_in_bytes (max_node_size self) _ _) as ->. + - opose proof (page_size_multiplier_size_in_bytes this_node_page_size _ _) as Heq. { rewrite Hsz//. } - move: INV_INIT_CHILDREN. rewrite H_child_init. + specialize (page_size_in_bytes_nat_in_usize this_node_page_size) as []. nia. - (* samei *) move: INV_IN_RANGE. - opose proof (page_size_multiplier_size_in_bytes (max_node_size self) _ _) as ->. + opose proof (page_size_multiplier_size_in_bytes this_node_page_size _ _) as ->. { rewrite Hsz//. } - move: INV_INIT_CHILDREN. rewrite H_child_init. - nia. - - move: INV_IN_RANGE. - opose proof (page_size_multiplier_size_in_bytes (max_node_size self) _ _) as ->. - { rewrite Hsz//. } - move: INV_INIT_CHILDREN. rewrite H_child_init. - nia. - - move: INV_IN_RANGE. - opose proof (page_size_multiplier_size_in_bytes (max_node_size self) _ _) as ->. - { rewrite Hsz//. } - move: INV_INIT_CHILDREN. rewrite H_child_init. nia. - unfold child_base_address. f_equiv. rewrite Z.mul_comm. f_equiv. congruence. - congruence. - - move: Hsz. - revert select (max_node_size self = Size4KiB). - intros ->. - simpl. done. - - move: Hsz. - revert select (max_node_size self = Size4KiB). - intros ->. - simpl. done. - - move: Hsz. - revert select (max_node_size self = Size4KiB). - intros ->. - simpl. done. + (* !end proof *) Unshelve. all: print_remaining_sidecond. Qed. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary.v index 3ddd599d..1ba7cbc6 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary.v @@ -7,6 +7,7 @@ Set Default Proof Using "Type". Section proof. Context `{RRGS : !refinedrustGS Σ}. +(* !start proof(page_allocator.divide_page_token_if_necessary) *) Lemma divide_page_token_neutral node sz children smaller_size : page_size_smaller node.(max_node_size) = Some smaller_size → page_storage_node_invariant_case node sz None children → @@ -20,9 +21,9 @@ Proof. - intros [_ (? & [= <-] & -> & Hlt & ? & ?)]. cbn. rewrite page_size_min_l; first done. apply page_size_smaller_page_size_variant in Hsmaller. - apply Z.ord_lt_iff in Hlt. - apply Z.ord_le_iff. lia. + lia. Qed. +(* !end proof *) Lemma core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary_proof (π : thread_id) : core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary_lemma π. @@ -30,12 +31,15 @@ Proof. core_page_allocator_allocator_PageStorageTreeNode_divide_page_token_if_necessary_prelude. repeat liRStep. - 1: liInst Hevar (mk_page_node self.(max_node_size) self.(base_address) (PageTokenPartiallyAvailable smaller_size) true). - 2: liInst Hevar self. + (* !start proof(page_allocator.divide_page_token_if_necessary) *) + 1: liInst Hevar_rf (mk_page_node self.(max_node_size) self.(base_address) (PageTokenPartiallyAvailable smaller_size) true). + 2: liInst Hevar_rf self. all: repeat liRStep; liShow. + (* !end proof *) all: print_remaining_goal. Unshelve. all: sidecond_solver. + (* !start proof(page_allocator.divide_page_token_if_necessary) *) Unshelve. all: try lia. all: try rename select (subdivided_pages _ _) into Hsubdivided. @@ -50,7 +54,6 @@ Proof. all: rewrite Hchildren in INV_INIT_CHILDREN. all: try rename x'3 into child_index. - - right. opose proof (subdivided_pages_lookup_size_lt (length prefix) _ _ _ _ _ Hsubdivided _) as Ha. { simpl. done. } @@ -83,7 +86,7 @@ Proof. odestruct (lookup_lt_is_Some_2 children (length prefix) _) as (child & Hchild). { lia. } - opose proof (page_storage_node_children_wf_child_lookup (length prefix) _ _ _ _ _ _ INV_WF Hchild) as [Hchild_sz Hchild_loc]. + opose proof (page_storage_node_children_wf_child_lookup (length prefix) _ _ _ _ _ _ INV_WF Hchild) as (Hchild_sz & Hchild_loc & ?). { done. } apply page_within_range_inv in Hrange; last done. @@ -106,7 +109,7 @@ Proof. odestruct (lookup_lt_is_Some_2 children (length prefix) _) as (child & Hchild). { rewrite INV_INIT_CHILDREN. lia. } - opose proof (page_storage_node_children_wf_child_lookup (length prefix) _ _ _ _ _ _ INV_WF Hchild) as [Hchild_sz Hchild_loc]. + opose proof (page_storage_node_children_wf_child_lookup (length prefix) _ _ _ _ _ _ INV_WF Hchild) as (Hchild_sz &Hchild_loc & ?). { done. } opose proof (subdivided_pages_lookup_base_address (length prefix) _ _ _ _ _ Hsubdivided _) as Hloc. @@ -122,8 +125,6 @@ Proof. rewrite Hchild_loc. simpl in Hloc. rewrite Hloc. rewrite /child_base_address. lia. - - apply subdivided_pages_length in Hsubdivided. - solve_goal. - rename select (page_within_range _ _ _) into Hrange. opose proof (subdivided_pages_lookup_size_lt (length prefix) _ _ _ _ _ Hsubdivided _) as Hsz. @@ -154,9 +155,8 @@ Proof. rewrite app_nil_r. by apply page_storage_node_children_wf_upd_state. - eexists. done. - - unfold name_hint. lia. - - unfold name_hint. - rewrite skipn_all2; last lia. + - lia. + - rewrite skipn_all2; last lia. rewrite app_nil_r. simplify_eq. @@ -165,25 +165,23 @@ Proof. unfold page_storage_node_invariant_case. rewrite Hstate. simpl. normalize_and_simpl_goal. - eexists. split_and!; [done | done | | done | ]. - { apply Z.cmp_less_iff. lia. } + eexists. split_and!; [done | done | lia | done | ]. setoid_rewrite list_lookup_fmap. opose proof (lookup_lt_is_Some_2 children 0%nat _) as (child & Hchild). { specialize (page_size_multiplier_ge (max_node_size self)). lia. } eexists 0%nat, (page_node_update_state child PageTokenAvailable). rewrite Hchild. split; first done. - opose proof (page_storage_node_children_wf_child_lookup 0%nat _ _ _ _ _ _ INV_WF Hchild) as [Hchild_sz Hchild_loc]. + opose proof (page_storage_node_children_wf_child_lookup 0%nat _ _ _ _ _ _ INV_WF Hchild) as (Hchild_sz & Hchild_loc & ?). { done. } rewrite -Hchild_sz//. - rename select (allocation_state self = _) into Hstate. rewrite Hstate. done. - eexists. done. - - solve_goal. - - solve_goal. - erewrite divide_page_token_neutral; [ | done | done]. destruct self as [? ? state ?]. simpl. simpl in Hchildren. rewrite Hchildren//. + (* !end proof *) all: print_remaining_goal. Unshelve. all: sidecond_solver. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed.v new file mode 100644 index 00000000..7f7d12be --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed.v @@ -0,0 +1,88 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_prelude. + + rep <-! liRStep; liShow. + (* !start proof(page_allocator.initialize_children_if_needed) *) + { rep liRStep; liShow. + (* pick map invariant *) + liInst Hevar_Inv (λ π '(i, b) _, ⌜i ≤ page_size_multiplier self.(max_node_size)⌝ ∗ ⌜b = Z.of_nat $ page_size_multiplier self.(max_node_size)⌝)%I. + (* instantiation of the params of PageNode::empty *) + liInst Hevar_ParamPred (λ i '( *[p1; p2]), + let '( *[i]) := i in + p1 = child_base_address (base_address self) smaller_sz i ∧ p2 = smaller_sz). + rep liRStep; liShow. + liInst Hevar_rf (mk_page_node self.(max_node_size) self.(base_address) self.(allocation_state) true). + rep liRStep; liShow. + } + { rep liRStep. + liInst Hevar_rf self. + rep liRStep. + } + (* !end proof *) + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + (* !start proof(page_allocator.initialize_children_if_needed) *) + all: try rename select (Forall2 _ _ _) into Hf. + - rewrite /number_of_smaller_pages Hsmaller. lia. + - unfold child_base_address. + apply Z.divide_add_r. + + trans (page_size_align (max_node_size self)). + 2: by eexists. + rewrite (page_size_multiplier_align (max_node_size self) smaller_sz); last done. + rewrite Nat2Z.inj_mul. + apply Z.divide_factor_l. + + rewrite page_size_align_is_size. + apply Z.divide_factor_l. + - move: INV_IN_RANGE. + opose proof (page_size_multiplier_size_in_bytes (max_node_size self) _ _) as ->. + { rewrite Hsmaller//. } + unfold child_base_address. nia. + - case_bool_decide; simplify_eq. lia. + - rewrite snd_zip; first last. + { opose proof* Forall2_length as Hlen; first apply Hf. lia. } + split; first last. + { rewrite Hsmaller. done. } + intros child_sz Hsmaller' i child Hlook. + opose proof * Forall2_lookup_r as Hlook'; [apply Hf | apply Hlook | ]. + destruct Hlook' as (idx & Hlook_idx & idx' & -> & Haddr & ?). + apply lookup_seqZ in Hlook_idx as (-> & ?). + simpl. + split_and!. + + simplify_eq. done. + + simplify_eq. done. + + subst. solve_goal. + - eexists. done. + - opose proof* Forall2_length as Hlen; first apply Hf. + rewrite -Hlen. + rewrite length_seqZ. + rewrite /number_of_smaller_pages Hsmaller. + lia. + - (* reasoning: + if children were empty before, then either there is no smaller page size or we are not in PartiallyAvailable state. So this is trivial *) + rewrite snd_zip; first last. + { opose proof* Forall2_length as Hlen; first apply Hf. lia. } + move: INV_CASE. + unfold page_storage_node_invariant_case. + simpl. repeat case_decide; try done. + all: solve_goal. + - eexists. done. + - destruct self as [??? children_init]. simpl in *. + destruct children_init; done. + (* !end proof *) + + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0.v new file mode 100644 index 00000000..194ed2d1 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0.v @@ -0,0 +1,26 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_prelude. + + repeat liRStep; liShow. + (* !start proof(page_allocator.initialize_children_if_needed) *) + liInst Hevar_x1 node_size. liInst Hevar_x2 (n * page_size_align node_size). + rep liRStep; liShow. + (* !end proof *) + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_call_mut.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_call_mut.v new file mode 100644 index 00000000..054cd88c --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_call_mut.v @@ -0,0 +1,27 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_call_mut. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + + +Lemma core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_call_mut_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_call_mut_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_call_mut_prelude. + + repeat liRStep; liShow. + (* !start proof(page_allocator.initialize_children_if_needed) *) + liInst Hevar_x0 node_size. liInst Hevar_x2 (n * page_size_align node_size). + rep liRStep; liShow. + (* !end proof *) + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_call_once.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_call_once.v new file mode 100644 index 00000000..ef2b243f --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_call_once.v @@ -0,0 +1,26 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_call_once. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_call_once_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_call_once_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_initialize_children_if_needed_closure0_call_once_prelude. + + repeat liRStep; liShow. + (* !start proof(page_allocator.initialize_children_if_needed) *) + liInst Hevar_x0 node_size. liInst Hevar_x2 (n * page_size_align node_size). + rep liRStep; liShow. + (* !end proof *) + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token.v index 640b5e9f..59b9518e 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token.v @@ -7,6 +7,7 @@ Set Default Proof Using "Type". Section proof. Context `{RRGS : !refinedrustGS Σ}. +(* !start proof(page_allocator.store_page_token) *) Lemma page_storage_node_invariant_case_store_token sz base state children max_alloc opt_page new_state new_max_alloc (i : nat) child child' child_alloc child_alloc' : children !! i = Some child → page_node_can_allocate child = child_alloc → @@ -31,7 +32,7 @@ Proof. - normalize_and_simpl_goal. eexists. split_and!; try done. apply max_by_r. apply not_ord_gt_iff. - left. by apply Z.cmp_less_iff. + left. by apply Z.compare_lt_iff. - intros (-> & (allocable_sz & [= <-] & -> & ? & _ & Ha)). split; first done. destruct (decide (available_sz ≤ₚ child_alloc')) as [Hlt' | Hnlt]. @@ -106,23 +107,24 @@ Proof. erewrite page_storage_node_invariant_case_can_allocate; last done. by eapply page_node_invariant_case_sized_bounded. Qed. +(* !end proof *) Lemma core_page_allocator_allocator_PageStorageTreeNode_store_page_token_proof (π : thread_id) : core_page_allocator_allocator_PageStorageTreeNode_store_page_token_lemma π. Proof. core_page_allocator_allocator_PageStorageTreeNode_store_page_token_prelude. + (* !start proof(page_allocator.store_page_token) *) rep liRStep; liShow. 2: repeat liRStep. 3: repeat liRStep. - 1: liInst Hevar self; rep liRStep; liShow. - 1: liInst Hevar (mk_page_node self.(max_node_size) self.(base_address) PageTokenAvailable self.(children_initialized)); rep liRStep. + 1: liInst Hevar_rf self; rep liRStep; liShow. + 1: liInst Hevar_rf (mk_page_node self.(max_node_size) self.(base_address) PageTokenAvailable self.(children_initialized)); rep liRStep. (* there is a smaller page size *) rep <-! liRStep. 2: repeat liRStep. - rep liRStep. - liInst Hevar (mk_page_node self.(max_node_size) self.(base_address) self.(allocation_state) true). rep <-! liRStep. + match goal with | H : page_node_can_allocate _ = Some ?a |- _ => rename a into allocable_sz_rec @@ -130,25 +132,17 @@ Proof. set (new_state := (PageTokenPartiallyAvailable allocable_sz_rec) ⊔ self.(allocation_state)). repeat liRStep. liShow. - liInst Hevar (mk_page_node self.(max_node_size) self.(base_address) new_state true). + liInst Hevar_rf (mk_page_node self.(max_node_size) self.(base_address) new_state true). repeat liRStep. - liInst Hevar (mk_page_node (max_node_size self) (base_address self) x'1 true). - rename x'1 into updated_node_state. + liInst Hevar_rf (mk_page_node (max_node_size self) (base_address self) x'2 true). + rename x'2 into updated_node_state. repeat liRStep. all: print_remaining_goal. Unshelve. all: sidecond_solver. - Unshelve. all: try lia. + Unshelve. all: sidecond_hammer. + all: clear_layout. - all: try match goal with - | H : page_storage_node_children_wf _ _ ?x, - H2: page_storage_node_invariant_case (mk_page_node _ _ _ _) _ _ ?x, - H3: length ?x = page_size_multiplier _ |- _ => - rename x into children; - rename H2 into INV_CASE2; - rename H into INV_WF2; - rename H3 into INV_INIT_CHILDREN2 - end. all: try rename x' into child_index. all: try set (child := children !!! Z.to_nat child_index). all: try match goal with @@ -156,71 +150,42 @@ Proof. rename x into child_size; rename H into Hsmaller end. all: try match goal with - | H : ?x = PageTokenPartiallyAvailable _ ∨ ?x = PageTokenAvailable, - H1 : page_storage_node_invariant_case (mk_page_node _ _ ?x _) _ _ _ |- _ => + | H : ?x = _ ∨ ?x = PageTokenAvailable |- _ => rename x into merged_state; - rename H1 into INV_CASE_MERGED; rename H into Hmerged end. + all: try match goal with + | H1: ?x = _ ∨ _, + H : page_storage_node_invariant_case (mk_page_node _ _ ?x _) (Some ?p') _ _ |- _ => + rename H into Hcase_merged; + rename p' into merged_available_sz + end. - apply page_within_range_inv in Hrange; simpl in Hrange; done. - destruct Hsz_le as [Hsz_lt | Hsz_eq%Z.ord_eq_iff]. - + apply Z.ord_lt_iff in Hsz_lt. move: Hsz_lt. lia. + + lia. + apply page_size_variant_inj in Hsz_eq. done. - eexists. done. - - done. - - move: INV_CASE. - unfold page_storage_node_invariant_case. - solve_goal. - - eexists. done. - - done. - - done. - erewrite page_storage_node_invariant_case_can_allocate; last done. by eapply page_node_invariant_case_sized_bounded. - - rename select (_ ∨_) into Hsz. - destruct Hsz as [Hsz | <-]; last done. - unfold ord_le, ord_lt, ord_eq. - move: Hsz. - unfold page_size_cmp. - match goal with - | H: page_size_variant _ = page_size_variant (max_node_size self) - 1 |- _ => rewrite H - end. - clear. - solve_goal. - rewrite -page_size_align_is_size. eexists. done. - - rename select (_ ∨_) into Hsz. - destruct Hsz as [Hsz | <-]; last done. - move: Hsz. - rename select (max_node_size self = _) into Hsz. rewrite Hsz. - destruct page_token0; done. - - eexists. done. - - done. - - destruct INV_WF2 as [INV_WF1 INV_WF2]. - odestruct (INV_WF1 _ _ (Z.to_nat child_index) child _) as [Ha Hb]. + - destruct INV_WF as [INV_WF1 INV_WF2]. + odestruct (INV_WF1 _ _ (Z.to_nat child_index) child _) as (Ha & Hb & Hc). { done. } { assert (Z.to_nat child_index < length children); last solve_goal. - rewrite INV_INIT_CHILDREN2. + rewrite INV_INIT_CHILDREN. specialize (page_size_multiplier_ge (max_node_size self)); lia. } rewrite Hb. unfold child_base_address. lia. - - destruct INV_WF2 as [INV_WF1 INV_WF2]. - odestruct (INV_WF1 _ _ (Z.to_nat child_index) child _) as [Ha Hb]. + - destruct INV_WF as [INV_WF1 INV_WF2]. + odestruct (INV_WF1 _ _ (Z.to_nat child_index) child _) as (Ha & Hb & Hc). { done. } { assert (Z.to_nat child_index < length children); last solve_goal. - rewrite INV_INIT_CHILDREN2. + rewrite INV_INIT_CHILDREN. specialize (page_size_multiplier_ge (max_node_size self)); lia. } done. - - opose proof (list_lookup_lookup_total_lt children (Z.to_nat child_index) _) as Hlook_child. - { lia. } - fold child in Hlook_child. - opose proof (page_storage_node_children_wf_child_lookup _ _ _ _ children _ _ _ _) as Hchild; [ | done | apply Hlook_child | ]; first done. - destruct Hchild as [-> _]. - apply Z.ord_le_iff. - move: Hsz_le. unfold ord_le. - normalize_and_simpl_goal. - lia. - match goal with | H: page_within_range (base_address self + _) _ _ |- _ => rename H into Hran end. @@ -228,9 +193,23 @@ Proof. unfold child_base_address. rewrite Z.mul_comm. done. + - rename select (_ ∨_) into Hsz. + destruct Hsz as [Hsz | <-]; last done. + move: Hsz. + rename select (max_node_size self = _) into Hsz. rewrite Hsz. + solve_goal. - eapply page_storage_node_children_wf_insert; last done. - all: done. - - eexists _. done. + 1-2: done. + destruct INV_WF as [INV_WF1 INV_WF2]. + odestruct (INV_WF1 _ _ (Z.to_nat child_index) child _) as (Ha & Hb & Hc). + { done. } + { assert (Z.to_nat child_index < length children); last solve_goal. + rewrite INV_INIT_CHILDREN. + specialize (page_size_multiplier_ge (max_node_size self)); lia. } + rename select (page_node_can_allocate x'1 = _) into Heq1. + rename select (max_node_size x'1 = _) into Heq2. + rewrite Heq1 Heq2. solve_goal. + - eexists. done. - opose proof (list_lookup_lookup_total_lt children (Z.to_nat child_index) _) as Hlook_child. { lia. } fold child in Hlook_child. @@ -249,29 +228,18 @@ Proof. apply not_ord_lt_iff. left. done. - eapply max_by_r. apply not_ord_gt_iff. move: Ha. - destruct (option_cmp page_size_cmp _ (Some allocable_sz_rec)) eqn:Heq; first done. + destruct (option_cmp page_size_cmp _ (Some allocable_sz_rec)) eqn:Heq. + right. done. + + done. + left. by apply correct_ord_antisym. } { done. } - - match goal with - | H : ?x = new_state ∨ ?x = PageTokenAvailable |- _ => - rename x into new_state'; - rename H into Hnew_state' - end. - exfalso. move: Hnew_state'. - rename select (page_storage_node_invariant_case (mk_page_node _ _ new_state' _) _ _ _) into Hcase'. + - exfalso. move: Hmerged. + rename select (page_storage_node_invariant_case (mk_page_node _ _ merged_state _) _ _ _) into Hcase'. apply page_storage_node_invariant_not_available in Hcase' as [Ha ->]. simpl in Ha. rewrite Ha. subst new_state. clear. intros [? | ?]; last done. destruct (allocation_state self); done. - - eexists. done. - - done. - (* did a merge, now it's available *) - match goal with - | H : page_storage_node_invariant_case _ (Some ?p') _ _ |- _ => - rename H into Hcase_merged; - rename p into merged_available_sz - end. eapply available_sz_lower_bound. + apply INV_CASE. + apply Hcase_merged. @@ -279,48 +247,18 @@ Proof. + simpl. move: Hsz_le. unfold ord_le. normalize_and_simpl_goal. + solve_goal. - - match goal with - | H : page_storage_node_invariant_case _ (Some ?p') _ _ |- _ => - rename H into Hcase_merged; - rename p into merged_available_sz - end. - match goal with - | H : updated_node_state = new_state ∨ _ |- _ => - rename H into Hmerged - end. - by eapply page_storage_node_invariant_case_can_allocate. - - (* same as above *) - match goal with - | H : page_storage_node_invariant_case _ (Some ?p') _ _ |- _ => - rename H into Hcase_merged; - rename p into merged_available_sz - end. - eapply available_sz_lower_bound. - + apply INV_CASE. - + apply Hcase_merged. - + done. - + simpl. move: Hsz_le. unfold ord_le. normalize_and_simpl_goal. - + solve_goal. - - match goal with - | H : page_storage_node_invariant_case _ (Some ?p') _ _ |- _ => - rename H into Hcase_merged; - rename p into merged_available_sz - end. - opose proof (available_sz_lower_bound _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) as Ha. + - by eapply page_storage_node_invariant_case_can_allocate. + - opose proof (available_sz_lower_bound _ _ _ _ _ _ _ _ _ _ _ _ _ _ _) as Ha. + apply INV_CASE. + apply Hcase_merged. + done. + simpl. move: Hsz_le. unfold ord_le. normalize_and_simpl_goal. + solve_goal. + destruct Ha as [_ Ha]. apply Ha. - - match goal with - | H : page_storage_node_invariant_case _ (Some ?p') _ _ |- _ => - rename H into Hcase_merged; - rename p into merged_available_sz - end. - opose proof (page_node_invariant_case_sized_bounded _ _ _ _ _) as Ha. + - opose proof (page_node_invariant_case_sized_bounded _ _ _ _ _) as Ha. { apply Hcase_merged. } done. + (* !end proof *) Unshelve. all: print_remaining_sidecond. (*Qed.*) diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node.v index 2e05adb8..4e99185f 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node.v @@ -15,15 +15,19 @@ Proof. core_page_allocator_allocator_PageStorageTreeNode_store_page_token_in_this_node_prelude. rep liRStep; liShow. - { liInst Hevar (mk_page_node self.(max_node_size) self.(base_address) PageTokenAvailable self.(children_initialized)). + (* !start proof(page_allocator.store_page_token_in_this_node) *) + { liInst Hevar_rf (mk_page_node self.(max_node_size) self.(base_address) PageTokenAvailable self.(children_initialized)). rep liRStep; liShow. } + (* !end proof *) all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. + (* !start proof(page_allocator.store_page_token_in_this_node) *) { move: INV_CASE. unfold name_hint. unfold page_storage_node_invariant_case. solve_goal. } + (* !end proof *) Unshelve. all: print_remaining_sidecond. Qed. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens.v new file mode 100644 index 00000000..56c6e4b2 --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens.v @@ -0,0 +1,153 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +Lemma core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens_proof (π : thread_id) : + core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens_lemma π. +Proof. + core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens_prelude. + + rep <-! liRStep; liShow. + (* !start proof(page_allocator.try_to_merge_page_tokens) *) + rep liRStep; liShow. + { liInst Hevar_x2 (λ child, child.(allocation_state) = PageTokenAvailable). + rep liRStep. } + rep liRStep; liShow. + 2: { rep liRStep; liShow. + liInst Hevar_rf (mk_page_node self.(max_node_size) self.(base_address) self.(allocation_state) true). + rep liRStep; liShow. } + (* picking invariant for map: all nodes emitted by the iterator are available *) + liInst Hevar_Inv (λ π iter _, ⌜Forall (λ node, node.(allocation_state) = PageTokenAvailable) iter.*1⌝%I). + + rep <-! liRStep; liShow. + + iRename select (IteratorNextFusedTrans _ _ _ _ _ _) into "Hiter". + iPoseProof (iterator_next_fused_trans_map_inv with "Hiter") as "Hiter". + simpl. + set (new_children := (fmap (λ child, {| max_node_size := max_node_size child; base_address := base_address child; allocation_state := PageTokenUnavailable; children_initialized := children_initialized child |}) x')). + iApply updateable_add_fupd. + iAssert (|={⊤}=> ObsList x'0 new_children)%I with "[Hiter]" as "Hobs" . + { iDestruct "Hiter" as "(%hist' & % & _ & _ & _ & Hiter & Hpred)". + iMod (learn_from_hyp_proof with "[] Hiter") as "(_ & Hb)"; first done. + simpl. rewrite right_id. iDestruct "Hb" as "<-". + rewrite big_sepL2_fmap_r. + (* TODO: we should simplify Hpred using some big_sepL2 simplification machinery similar to the Forall2 machinery *) + iPoseProof (big_sepL2_impl _ (λ _ child _, + Obs child.2 {| max_node_size := max_node_size child.1; base_address := base_address child.1; allocation_state := PageTokenUnavailable; children_initialized := children_initialized child.1 |}) with "Hpred []")%I as "Ha". + { iModIntro. iIntros (? ? ???). iIntros "(% & % & % & % & % & _ & _ & %child & % & -> & -> & %Heq & (%ret & <- & % & % & Hobs & ->) & _)". + injection Heq. intros ->. done. } + iPoseProof (big_sepL2_elim_r with "Ha") as "Ha". + (*iPoseProof (big_sepL2_from_zip with "Ha") as "Ha".*) + rewrite /ObsList. + iApply big_sepL2_from_zip. { rewrite /new_children length_fmap//. } + rewrite zip_flip big_sepL_fmap. rewrite zip_fmap_r big_sepL_fmap. + iApply (big_sepL_impl with "Ha"). + iModIntro. iModIntro. iIntros (? [] ?). simpl. eauto. } + iMod "Hobs". iModIntro. + + rep liRStep; liShow. + liInst Hevar_rf (mk_page_node self.(max_node_size) self.(base_address) PageTokenAvailable true). + rep liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + all: try rename x'1 into children. + all: try rename l3 into tokens. + all: try rename select (Forall2 _ _ tokens) into Hf. + all: rewrite Hchild_init in INV_INIT_CHILDREN. + all: try (opose proof * Forall2_length as Hlen; first apply Hf). + - eexists. done. + - move: INV_CASE. + rename select (children_initialized self = true) into Hchild_init. + destruct self. simpl in *. + rewrite Hchild_init. done. + - rewrite -Hlen length_zip. lia. + - rewrite length_zip Nat.min_l in Hlen; last lia. + specialize (page_size_multiplier_ge (max_node_size self)) as Hge. + odestruct (lookup_lt_is_Some_2 tokens 0 _) as (tok0 & Hlook_tok0). + { lia. } + rewrite snd_zip; last solve_goal. + erewrite list_lookup_total_correct; last done. + opose proof* Forall2_lookup_r as Hlook1; [apply Hf | apply Hlook_tok0 | ]. + destruct Hlook1 as ([child_node0 ?] & Hlook_child & _ & Hsz & Hloc). + apply lookup_zip_Some in Hlook_child as [Hlook_child _]. + destruct INV_WF as [INV_WF _]. + opose proof (INV_WF _ _ 0%nat _ _) as Hchild; [done | done | ]. + destruct Hchild as (Hchild_sz & Hchild_addr & _). + rewrite /aligned_to. + rewrite Hloc/=. + rewrite Hchild_addr. + rewrite /child_base_address/=. + rewrite Z.mul_0_r Z.add_0_r. + eexists. done. + - rename select (_ !! i = Some pg) into Htok_i. + rewrite snd_zip in Htok_i; last lia. + opose proof* Forall2_lookup_r as Hlook1; [apply Hf | apply Htok_i | ]. + destruct Hlook1 as ([child_node ?] & Hlook_child & _ & Hsz & Hloc). + apply lookup_zip_Some in Hlook_child as [Hlook_child _]. + destruct INV_WF as [INV_WF _]. + opose proof (INV_WF _ _ i _ _) as Hchild; [done | done | ]. + destruct Hchild as (Hchild_sz & Hchild_addr & _). + rewrite Hsz Hchild_sz//. + - (* use invariant *) + rewrite length_zip Nat.min_l in Hlen; last lia. + specialize (page_size_multiplier_ge (max_node_size self)) as Hge. + odestruct (lookup_lt_is_Some_2 tokens 0 _) as (tok0 & Hlook_tok0). + { lia. } + rewrite snd_zip; last solve_goal. + erewrite list_lookup_total_correct; last done. + opose proof* Forall2_lookup_r as Hlook1; [apply Hf | apply Hlook_tok0 | ]. + destruct Hlook1 as ([child_node0 ?] & Hlook_child & _ & Hsz & Hloc). + apply lookup_zip_Some in Hlook_child as [Hlook_child _]. + + rename select (_ !! i = Some _) into Htok_i. + rewrite snd_zip in Htok_i; last solve_goal. + opose proof* Forall2_lookup_r as Hlook2; [apply Hf | apply Htok_i | ]. + destruct Hlook2 as ([child_nodei ?] & Hlook_child_i & _ & Hsz' & Hloc'). + apply lookup_zip_Some in Hlook_child_i as [Hlook_child_i _]. + + destruct INV_WF as [INV_WF _]. + opose proof (INV_WF _ _ i _ _) as Hchild; [done | done | ]. + destruct Hchild as (Hchild_sz & Hchild_addr & _). + + opose proof (INV_WF _ _ 0%nat _ _) as Hchild; [done | done | ]. + destruct Hchild as (Hchild_sz_0 & Hchild_addr_0 & _). + + rewrite Hloc'/= Hchild_addr Hloc/=. + rewrite Hchild_addr_0. + rewrite Hsz' Hchild_sz. + rewrite /child_base_address. + simpl. clear. nia. + - apply page_storage_node_children_wf_upd_state; last done. + simpl. solve_goal. + - eexists. done. + - rewrite /new_children length_fmap//. + - rewrite /page_storage_node_invariant_case/=. + eexists. split_and!; try done. simpl. + rewrite length_zip Nat.min_l in Hlen; last lia. + specialize (page_size_multiplier_ge (max_node_size self)) as Hge. + odestruct (lookup_lt_is_Some_2 tokens 0 _) as (tok0 & Hlook_tok0). + { lia. } + rewrite snd_zip; last solve_goal. + erewrite list_lookup_total_correct; last done. + opose proof* Forall2_lookup_r as Hlook1; [apply Hf | apply Hlook_tok0 | ]. + destruct Hlook1 as ([child_node0 ?] & Hlook_child & _ & Hsz & Hloc). + apply lookup_zip_Some in Hlook_child as [Hlook_child _]. + destruct INV_WF as [INV_WF _]. + opose proof (INV_WF _ _ 0%nat _ _) as Hchild; [done | done | ]. + destruct Hchild as (Hchild_sz & Hchild_addr & _). + rewrite Hloc/=. + rewrite Hchild_addr. + rewrite /child_base_address/=. + rewrite Z.mul_0_r Z.add_0_r//. + (* !end proof *) + + Unshelve. all: print_remaining_sidecond. +Qed. +End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens_closure0.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens_closure0.v index d2e3d251..52a87150 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens_closure0.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_allocator_PageStorageTreeNode_try_to_merge_page_tokens_closure0.v @@ -17,10 +17,12 @@ Proof. all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. + (* !start proof(page_allocator.try_to_merge_page_tokens) *) { split. - intros ?. revert INV_CASE. sidecond_hammer. - intros (? & ->). revert INV_CASE. unfold page_storage_node_invariant_case. destruct (allocation_state child) eqn:Heq; simpl; sidecond_hammer. } + (* !end proof *) Unshelve. all: print_remaining_sidecond. Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address.v index 7f888e6d..e82a1066 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_end_address.v @@ -17,12 +17,11 @@ Proof. all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. - { revert select (_ ≤ (conf_end _).2). - specialize (conf_end_in_usize x0). - rewrite /page_size_in_bytes_nat; solve_goal. } - { revert select (_ ≤ (conf_end _).2). - specialize (conf_end_in_usize x0). + (* !start proof(page.accessors) *) + { revert select (_ ≤ (conf_end _).(loc_a)). + specialize (conf_end_in_usize MEMORY_CONFIG). rewrite /page_size_in_bytes_nat; solve_goal. } + (* !end proof *) Unshelve. all: print_remaining_sidecond. Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_write.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_write.v index c8288c3c..2b38f826 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_write.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_T_write.v @@ -7,15 +7,13 @@ Set Default Proof Using "Type". Section proof. Context `{RRGS : !refinedrustGS Σ}. -Global Instance bla_copy : Copyable (error_Error_ty ). -Proof. apply _. Qed. - Lemma core_page_allocator_page_Page_T_write_proof (π : thread_id) : core_page_allocator_page_Page_T_write_lemma π. Proof. core_page_allocator_page_Page_T_write_prelude. rep <-! liRStep; liShow. + (* !start proof(page.write) *) { rename select (offset_in_bytes `rem` 8 = 0) into Hoffset. apply Z.rem_divide in Hoffset; last done. @@ -27,10 +25,12 @@ Proof. } { rep liRStep; liShow. } { rep liRStep; liShow. } + (* !end proof *) all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. + (* !start proof(page.write) *) { revert select (_ + off' * 8 < _ + page_size_in_bytes_Z _). rewrite /page_size_in_bytes_nat. @@ -47,6 +47,7 @@ Proof. rename select (offset_in_bytes `rem` 8 ≠ 0) into Hen. apply Hen. unfold name_hint in *. apply Z.rem_divide; done. } + (* !end proof *) Unshelve. all: print_remaining_sidecond. Qed. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read.v index 8d560791..4d053bb0 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_Allocated_read.v @@ -14,6 +14,7 @@ Proof. core_page_allocator_page_Page_core_page_allocator_page_Allocated_read_prelude. rep <-! liRStep; liShow. + (* !start proof(page.read) *) { (* accessing the element of the array for the read requires manual reasoning *) rename select (offset_in_bytes `rem` 8 = 0) into Hoffset. apply Z.rem_divide in Hoffset; last done. @@ -24,10 +25,12 @@ Proof. repeat liRStep; liShow. } all: repeat liRStep; liShow. + (* !end proof *) all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. + (* !start proof(page.read) *) { revert select (_ + off' * 8 < _ + page_size_in_bytes_Z _). rewrite /page_size_in_bytes_nat. @@ -44,6 +47,7 @@ Proof. rename select (offset_in_bytes `rem` 8 ≠ 0) into Hen. apply Hen. unfold name_hint in *. apply Z.rem_divide; done. } + (* !end proof *) Unshelve. all: print_remaining_sidecond. Qed. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide.v index 846c8ba5..2fa120a8 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide.v @@ -12,6 +12,7 @@ Lemma core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_ Proof. core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_prelude. + (* !start proof(page.divide) *) rep <-! liRStep. liShow. apply_update (updateable_copy_lft "plft27" "plft31"). rep <-! liRStep. liShow. @@ -26,12 +27,8 @@ Proof. rename self0 into sz. set (smaller_sz := default sz (page_size_smaller sz)). - (* strip later *) - apply_update (updateable_ltype_strip_later self). - rep <-! liRStep; liShow. - iRename select (self ◁ₗ[_, _] _ @ (◁ _))%I into "Harr". - (* split up the array *) + iRename select (self ◁ₗ[_, _] _ @ (◁ _))%I into "Harr". apply_update (updateable_add_fupd). iMod (array_t_ofty_split_reshape _ _ _ _ (page_size_multiplier sz) (page_size_in_words_nat smaller_sz) with "Harr") as "Harr"; [done | |]. { rewrite (page_size_multiplier_size_in_words _ smaller_sz); last done. lia. } @@ -54,23 +51,24 @@ Proof. ⌜smaller_sz' = smaller_sz⌝ ∗ once_status "MEMORY_LAYOUT" (Some memly) ∗ [∗ list] i ∈ seq itstart (itend - itstart)%nat, - ((startloc +ₗ (i * page_size_in_bytes_Z smaller_sz)) ◁ₗ[π, Owned false] + ((startloc +ₗ (i * page_size_in_bytes_Z smaller_sz)) ◁ₗ[π, Owned] # (<#> (take (page_size_in_words_nat smaller_sz) (drop (i * page_size_in_words_nat smaller_sz) pageval))) @ ◁ array_t (page_size_in_words_nat smaller_sz) (int usize)) ) : iProp Σ)%I). - rep liRStep; liShow. - liInst Hevar INV. + repeat liRStep; liShow. + liInst Hevar_Inv INV. + liInst Hevar_ParamPred (λ _ _, True). (* Prove initialization of the invariant *) unfold INV at 1. simpl. + rep liRStep. iApply prove_with_subtype_default. iSplitL "Harr". - { iR. - rewrite -page_size_multiplier_quot; last done. - iSplitR. { rewrite page_size_multiplier_quot_Z; done. } - iR. iR. iR. iR. iSplitR. { iExists _. iR. done. } + { rewrite -page_size_multiplier_quot; last done. + (*iSplitR. { rewrite page_size_multiplier_quot_Z; done. }*) + (*iR. iR. iR. iR. iSplitR. { iExists _. iR. done. }*) iApply big_sepL2_elim_l. iPoseProof (big_sepL_extend_r with "Harr") as "Harr". 2: iApply (big_sepL2_wand with "Harr"). { rewrite List.length_seq length_reshape length_replicate. lia. } @@ -85,7 +83,7 @@ Proof. assert (ly_size usize * (k * page_size_in_words_nat smaller_sz) = k * page_size_in_bytes_Z smaller_sz) as ->. { rewrite /page_size_in_bytes_nat. simpl. rewrite bytes_per_int_usize. lia. } - enough (x0 = (<#> take (page_size_in_words_nat smaller_sz) (drop (k * page_size_in_words_nat smaller_sz) pageval))) as -> by done. + enough (x2 = (<#> take (page_size_in_words_nat smaller_sz) (drop (k * page_size_in_words_nat smaller_sz) pageval))) as -> by done. move: Hlook1. rewrite sublist_lookup_reshape. 2: { specialize (page_size_in_words_nat_ge smaller_sz). lia. } 2: { @@ -98,7 +96,8 @@ Proof. intros [? _]. done. } (* Prove preservation if the iterator emits an element *) - liRStep; liShow. iApply prove_with_subtype_default. + liRStep; liShow. + iApply prove_with_subtype_default. iSplitR. { liShow. iModIntro. simpl. iIntros ([istart itend] [itstart' itend'] (capture_smaller_sz & capture_memlayout & capture_start & capture_end & []) e) "Hnext (%Hstart & %Hend & -> & -> & -> & -> & Hinv)". @@ -106,13 +105,14 @@ Proof. simpl in Hnext. destruct Hnext as (<- & _ & <- & Hnext & [= Hcmp_eq]). case_bool_decide; last done. injection Hnext as [= ->]. - apply Z.cmp_less_iff in Hcmp_eq. + rewrite Z.compare_lt_iff in Hcmp_eq. remember ((Z.to_nat itend - Z.to_nat istart)%nat) as len eqn:Heq_len. destruct len. { exfalso. move: Hcmp_eq Heq_len Hstart Hend. lia. } iDestruct "Hinv" as "(#Hinv0 & Hinv1 & Hinv)". fold seq. + iExists ( *[take (page_size_in_words_nat smaller_sz) (drop (Z.to_nat istart * page_size_in_words_nat smaller_sz) pageval)]). iR. iSplitL "Hinv0 Hinv1". - { iExists _, istart, inhabitant, _, _, _, _. iR. iR. + { iExists _, istart, inhabitant, _, _, _, _. iR. iR. iR. unfold name_hint. iFrame "#". iSplitR. { iPureIntro. simpl. lia. } iSplitR. { iPureIntro. lia. } @@ -121,7 +121,9 @@ Proof. subst itend. assert (page_size_in_bytes_Z sz ∈ usize) as Hel by done. rewrite Heq_sz in Hel. - destruct Hel. split; nia. } + revert Hel Hcmp_eq Hstart. + li_clear_all. open_jcache. + intros [] ??. split; nia. } iSplitR. { iPureIntro. rename select (self `aligned_to` _) into Hal. move: Hal. rewrite !page_size_align_is_size. @@ -133,18 +135,21 @@ Proof. rewrite (page_size_multiplier_size_in_bytes sz smaller_sz); last done. move: Hcmp_eq. clear. nia. } iSplitR. { iPureIntro. simpl. lia. } - rewrite Z2Nat.id; first done. lia. } + rewrite Z2Nat.id; last lia. iR. iL. done. } iIntros (e' (capture_smaller_sz & capture_memlayout & capture_start & capture_end & [])). - iIntros "(%v' & %i & % & % & % & % & % & %Heq1 & %Heq2 & -> & %Heq3)". + rewrite boringly_persistent_elim. + iIntros "(%v' & %i & % & % & % & % & % & %Heq0 & %Heq1 & %Heq2 & (-> & %Heq3) & _)". + injection Heq0 as <-. injection Heq2 as <-. injection Heq1 as <- <- <- <-. injection Heq3 as -> -> -> ->. - simpl. iSplitR. { iPureIntro. lia. } + simpl. iL. iSplitR. { iPureIntro. lia. } iR. iR. iR. iR. iR. - replace ((S (Z.to_nat istart))) with (Z.to_nat (istart + 1%nat)) by lia. + replace ((S (Z.to_nat istart))) with (Z.to_nat (istart + 1%nat)); first last. + { clear -Hstart. lia. } iR. assert ((Z.to_nat itend - Z.to_nat (istart + 1%nat))%nat = len)as ->; last done. - { lia. } + { clear -Hstart Heq_len. lia. } } (* Prove preservation if the iterator does not emit an element *) iApply prove_with_subtype_default. @@ -154,26 +159,14 @@ Proof. rewrite boringly_persistent_elim. iDestruct "Hnext" as "%Hnext". simpl in Hnext. destruct Hnext as (<- & (Ha & <-) & _). injection Ha as <-. - iIntros "Hinv". done. + iIntros "Hinv". iL. done. } - rep <-! liRStep. liShow. - - rep liRStep. liShow. - iApply prove_with_subtype_default. - iRename select (MapInv _ _ _ _ _) into "Hinv". - iSplitL "Hinv". { done. } - - rep <-! liRStep. liShow. - - (* discard the invariant on the original self token so that RefinedRust does not try to re-establish it *) - iRename select (arg_self ◁ₗ[π, _] _ @ _)%I into "Hself". - iPoseProof (opened_owned_discard with "Hself") as "Hself". - - rep liRStep. + rep <-! liRStep. all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. + all: try rename l3 into new_pages. - set (smaller_sz := (default self0 (page_size_smaller self0))). specialize (page_size_in_words_nat_ge smaller_sz). solve_goal. @@ -183,16 +176,16 @@ Proof. - set (smaller_sz := (default self0 (page_size_smaller self0))). rewrite (page_size_multiplier_quot_Z _ smaller_sz); last done. specialize (page_size_multiplier_in_usize self0). solve_goal. - - set (smaller_sz := (default self0 (page_size_smaller self0))). - rewrite (page_size_multiplier_quot_Z _ smaller_sz); last done. - specialize (page_size_multiplier_in_usize self0). solve_goal. - - rename select (Forall2 _ _ _) into Hclos. + - rewrite page_size_multiplier_quot_Z; done. + - (* TODO: let's look at these cached sideconditions and filter more.. *) + rename select (Forall2 _ _ _) into Hclos. opose proof (Forall2_length _ _ _ Hclos) as Hlen. rewrite length_seqZ in Hlen. rewrite page_size_multiplier_quot_Z in Hlen; last done. - unfold subdivided_pages. simpl. - split; first lia. - rename x' into new_pages. + rewrite snd_zip. + 2: { rewrite -Hlen. rewrite page_size_multiplier_quot_Z; last done. solve_goal. } + unfold subdivided_pages. simpl. split. + { rewrite -Hlen. clear. lia. } intros i p' Hlook. opose proof (Forall2_lookup_r _ _ _ i _ Hclos Hlook) as (j & Hlook2 & Ha). apply lookup_seqZ in Hlook2 as (-> & Hlook2). @@ -204,7 +197,9 @@ Proof. rewrite bytes_per_int_usize. f_equiv. rewrite /smaller_sz. lia. + (* !end proof *) Unshelve. all: print_remaining_sidecond. -Qed. +(*Qed.*) +Admitted. (* admitted due to long Qed *) End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0.v index e79d48dd..6eed6c05 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0.v @@ -13,26 +13,25 @@ Proof. core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_divide_closure0_prelude. rep <-! liRStep; liShow. - apply_update (updateable_copy_lft "vlft7" "ulft5"). - rep <-! liRStep; liShow. - apply_update (updateable_copy_lft "plft21" "vlft7"). - rep <-! liRStep; liShow. - apply_update (updateable_copy_lft "vlft9" "ulft5"). - rep <-! liRStep; liShow. + (* !start proof(page.divide) *) apply_update (updateable_copy_lft "vlft11" "ulft5"). rep <-! liRStep; liShow. - apply_update (updateable_copy_lft "plft24" "vlft11"). - rep <-! liRStep; liShow. - apply_update (updateable_copy_lft "plft22" "ulft5"). - rep <-! liRStep; liShow. apply_update (updateable_copy_lft "vlft29" "ulft5"). rep <-! liRStep. liShow. + apply_update (updateable_copy_lft "vlft13" "ulft5"). + rep <-! liRStep. + apply_update (updateable_copy_lft "vlft15" "ulft5"). + rep <-! liRStep. + apply_update (updateable_copy_lft "plft28" "ulft5"). + rep <-! liRStep. apply_update (updateable_copy_lft "vlft30" "ulft5"). - rep liRStep. + rep <-! liRStep. + (* !end proof *) all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. + (* !start proof(page.divide) *) - (* add *) eapply aligned_to_offset. { apply Haligned. } @@ -45,6 +44,7 @@ Proof. move: Hinrange. specialize (page_size_in_bytes_nat_ge capture_smaller_page_size_). nia. + (* !end proof *) Unshelve. all: print_remaining_sidecond. Qed. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init.v index 9104ae3d..db526676 100644 --- a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init.v +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_init.v @@ -18,6 +18,9 @@ Proof. all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. + (* !start proof(page.init) *) + specialize (MEMORY_CONFIG.(conf_start_in_usize)); solve_goal. + (* !end proof(page.init) *) Unshelve. all: print_remaining_sidecond. Qed. End proof. diff --git a/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_merge.v b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_merge.v new file mode 100644 index 00000000..14187f4e --- /dev/null +++ b/verification/rust_proofs/ace/proofs/proof_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_merge.v @@ -0,0 +1,170 @@ +From caesium Require Import lang notation. +From refinedrust Require Import typing shims. +From sm.ace.generated Require Import generated_code_ace generated_specs_ace generated_template_core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_merge. + +Set Default Proof Using "Type". + +Section proof. +Context `{RRGS : !refinedrustGS Σ}. + +(* !start proof(page.merge) *) +Lemma extract_page_token_invariants `{!onceG Σ memory_layout} `{!MachineConfig} {S_rt : RT} (S_attrs : core_page_allocator_page_PageState_spec_attrs S_rt) (S_ty : type S_rt) π xs xs' F : + lftE ⊆ F → + ([∗ list] x1; x2 ∈ xs'; xs, (core_page_allocator_page_Page_inv_t_inv_spec S_rt S_attrs S_ty ).(inv_P) π x1 x2) ={F}=∗ + ([∗ list] p ∈ xs, ⌜p.(page_loc).(loc_p) = ProvAlloc machine_memory_prov⌝ ∗ p.(page_loc) ◁ₗ[π, Owned] # (<#> p.(page_val)) @ ◁ array_t (page_size_in_words_nat p.(page_sz)) (int usize)). +Proof. + iIntros (?) "Ha". + iApply big_sepL_fupd. + iApply (big_sepL2_elim_l xs'). + iApply (big_sepL2_impl with "Ha"). + iModIntro. iIntros (? inner pg Hlook1 Hlook2) "Hinv". + simpl. iDestruct "Hinv" as "(%MEM & -> & Hpg & % & % & ? & % & % & % & _)". + rewrite /guarded. iDestruct "Hpg" as "(((Hc1 & _) & _)& Hpg)". + iApply (lc_fupd_add_later with "Hc1"). iNext. + iR. by iFrame. +Qed. +(* !end proof *) + +Lemma core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_merge_proof (π : thread_id) : + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_merge_lemma π. +Proof. + core_page_allocator_page_Page_core_page_allocator_page_UnAllocated_merge_prelude. + + rep <-! liRStep; liShow. + (* !start proof(page.merge) *) + opose proof (lookup_lt_is_Some_2 from_pages (length from_pages - 1) _) as [pg_last Hlook_pg_last]. + { lia. } + destruct pg_last as [pg_loc_last pg_sz_last pg_val_last]. + opose proof (page_size_multiplier_ge new_size) as ?. + opose proof (lookup_lt_is_Some_2 from_pages 0%nat _) as [pg_0 Hlook_pg0]. + { lia. } + destruct pg_0 as [pg_loc_0 pg_sz_0 pg_val_0]. + rep <-! liRStep; liShow. + 2-5: rep liRStep; liShow. + + iRename select (arg_from_pages ◁ₗ[π, Owned] _ @ _)%I into "Hvec". + iApply updateable_add_fupd. + iAcquireCredit as "Hcred". + iMod (vec_extract_invariant with "Hcred Hvec") as "(%xs' & Hinv & Hvec)"; first done. + iMod (extract_page_token_invariants with "Hinv") as "Harrs"; first done. + + iMod (array_t_ofty_merge_big_sep (int usize) π _ (page_size_in_words_nat pg_sz_0) ((λ p, <#> p.(page_val)) <$> from_pages) (pg_loc_0) with "[Harrs]") as "Harr"; first done. + { rewrite length_fmap. lia. } + { rewrite length_fmap. rewrite /size_of_st/use_layout_alg'. simpl. + erewrite syn_type_has_layout_int; last done. + simpl. rewrite Hlen. + rewrite bytes_per_int_usize. + + specialize (page_size_in_bytes_nat_in_isize new_size) as [_ Hsz]. + move: Hsz. rewrite /page_size_in_bytes_nat. + opose proof * (page_size_multiplier_size_in_words new_size smaller_sz) as Hw. + { rewrite Hsmaller//. } + rewrite Hw. + apply Hlook in Hlook_pg0 as [Hsz0 _]. simpl in Hsz0. + rewrite Hsz0. lia. } + { rewrite big_sepL_fmap. iApply (big_sepL_impl with "Harrs"). + iModIntro. iIntros (? pg' Hlook'). + apply Hlook in Hlook'. + iIntros "(%Hprov & Hb)". + destruct Hlook' as [-> Hloc_eq]. + revert select (loc_p (page_loc (from_pages !!! 0%nat)) = ProvAlloc machine_memory_prov). + move: Hloc_eq. erewrite list_lookup_total_correct; last done. + simpl. apply Hlook in Hlook_pg0 as [Hsz_0 Hloc_0]. + simpl in Hsz_0. rewrite Hsz_0. + intros Heq_a Heq_prov. + enough (page_loc pg' = (pg_loc_0 offsetst{IntSynType usize}ₗ (k * page_size_in_words_nat smaller_sz))) as -> by done. + move: Hprov Heq_a Heq_prov. destruct (page_loc pg'); simpl. + intros -> ->. + destruct pg_loc_0; simpl. intros ->. + rewrite /OffsetLocSt /offset_loc/use_layout_alg'/=. + rewrite /shift_loc. + erewrite syn_type_has_layout_int; last done. + simpl. + rewrite /page_size_in_bytes_nat bytes_per_int_usize. + f_equiv. lia. } + repeat iClear select (page_loc (from_pages !!! Z.to_nat 0) ◁ₗ[ π, Shared _] _ @ _)%I. + iModIntro. + rep liRStep; liShow. + rewrite (list_lookup_total_correct _ 0 _ Hlook_pg0). simpl. + rep liRStep; liShow. + liInst Hevar_x0 (mjoin (page_val <$> from_pages)). + rep <-! liRStep; liShow. + + all: print_remaining_goal. + Unshelve. all: sidecond_solver. + Unshelve. all: sidecond_hammer. + - rewrite Hlen. specialize (page_size_multiplier_ge new_size). lia. + - specialize (page_size_in_words_nat_ge (page_sz (from_pages !!! 0%nat))). lia. + - specialize (page_size_in_words_nat_ge (page_sz (from_pages !!! 0%nat))). lia. + - rewrite page_size_multiplier_quot_Z; first solve_goal. + rewrite Hsmaller. + erewrite list_lookup_total_correct; last done. + apply Hlook in Hlook_pg0 as [-> _]. done. + - opose proof (Hlook (length from_pages - 1)%nat _) as [Hlast_sz Hlast_off]; first done. + simpl in *. + split; first solve_goal. + rename select (loc_a pg_loc_last + _ ≤ MaxInt USize) into Hbound. + move: Hbound. + rewrite Hlast_off. + rewrite Hlen. + rewrite (page_size_multiplier_size_in_bytes new_size smaller_sz); last by rewrite Hsmaller. + simpl. rewrite Hlast_sz. clear. nia. + - revert select (¬ length from_pages > 2). + rewrite Hlen. + revert Hsmaller. clear. + destruct new_size; first done. + all: intros _; unfold page_size_multiplier, page_size_multiplier_log; lia. + - rename select (loc_a _ + page_size_in_bytes_Z new_size ≠ loc_a pg_loc_last + _) into Hcontra. + apply Hcontra. + opose proof (Hlook (length from_pages - 1)%nat _ _) as Hlook_last; first done. + simpl in Hlook_last. destruct Hlook_last as [-> ->]. + rewrite (page_size_multiplier_size_in_bytes new_size smaller_sz); last by rewrite Hsmaller. + rewrite Hlen. + clear. specialize (page_size_multiplier_ge new_size). nia. + - revert select (_ `quot` _ ≠ length from_pages). + rewrite page_size_multiplier_quot_Z; first solve_goal. + rewrite Hsmaller. + erewrite list_lookup_total_correct; last done. + apply Hlook in Hlook_pg0 as [-> _]. done. + - revert select (¬ _ `aligned_to` _). + rewrite -page_size_align_is_size. done. + - rewrite Hlen. + rewrite (page_size_multiplier_size_in_words new_size smaller_sz); last rewrite Hsmaller//. + apply Hlook in Hlook_pg0. simpl in Hlook_pg0. destruct Hlook_pg0 as [-> _]. lia. + - rewrite list_fmap_mjoin. + f_equiv. rewrite list_fmap_compose//. + - move: Haligned. + erewrite list_lookup_total_correct; last done. + done. + - (* we should open the invariant of the last page token *) + opose proof (Hlook (length from_pages - 1)%nat _) as [Hlast_sz Hlast_off]; first done. + simpl in *. + revert select (loc_a (page_loc (from_pages !!! (length from_pages - Z.to_nat 1)%nat)) + _ ≤ MAX_PAGE_ADDR). + erewrite list_lookup_total_correct; last done. + simpl. rewrite Hlast_off. rewrite Hlen. + erewrite list_lookup_total_correct; last done. + simpl. + rewrite (page_size_multiplier_size_in_bytes new_size smaller_sz); last by rewrite Hsmaller. + simpl. rewrite Hlast_sz. clear. nia. + - revert select (loc_a (conf_start MEMORY_CONFIG) ≤ loc_a (page_loc (from_pages !!! 0%nat))). + erewrite list_lookup_total_correct; last done. + done. + - opose proof (Hlook (length from_pages - 1)%nat _) as [Hlast_sz Hlast_off]; first done. + simpl in *. + revert select (loc_a (page_loc (from_pages !!! (length from_pages - Z.to_nat 1)%nat)) + _ ≤ loc_a (conf_end MEMORY_CONFIG)). + erewrite list_lookup_total_correct; last done. + simpl. rewrite Hlast_off. rewrite Hlen. + erewrite list_lookup_total_correct; last done. + simpl. + rewrite (page_size_multiplier_size_in_bytes new_size smaller_sz); last by rewrite Hsmaller. + simpl. rewrite Hlast_sz. clear. nia. + - revert select (loc_p (page_loc (from_pages !!! 0%nat)) = ProvAlloc machine_memory_prov). + erewrite list_lookup_total_correct; last done. + done. + - erewrite list_lookup_total_correct; last done. + done. + (* !end proof *) + + Unshelve. all: print_remaining_sidecond. +Admitted. (* admitted due to long Qed *) +End proof. diff --git a/verification/rust_proofs/pointers_utility/proofs/proof_ptr_byte_add_mut.v b/verification/rust_proofs/pointers_utility/proofs/proof_ptr_byte_add_mut.v index 38de0736..7f0240a7 100644 --- a/verification/rust_proofs/pointers_utility/proofs/proof_ptr_byte_add_mut.v +++ b/verification/rust_proofs/pointers_utility/proofs/proof_ptr_byte_add_mut.v @@ -17,26 +17,27 @@ Proof. all: print_remaining_goal. Unshelve. all: sidecond_solver. Unshelve. all: sidecond_hammer. - - all: try rename select ((wrap_unsigned (pointer.2 + offset_in_bytes) usize ?= owned_region_end.2) = Lt) into Hwrap_lt. - all: try rewrite Z.compare_lt_iff in Hwrap_lt. - all: try rename select (wrap_unsigned (pointer.2 + offset_in_bytes) usize ≤ pointer.2) into Hoverflow. - { opose proof (wrap_unsigned_le (pointer.2 + offset_in_bytes) usize _); lia. } + + (* !start proof(pointers_utility.ptr_byte_add_mut) *) + all: try rename select ((wrap_unsigned (pointer.(loc_a) + offset_in_bytes) usize < owned_region_end.(loc_a))) into Hwrap_lt. + all: try rename select (wrap_unsigned (pointer.(loc_a) + offset_in_bytes) usize ≤ pointer.(loc_a)) into Hoverflow. + { opose proof (wrap_unsigned_le (pointer.(loc_a) + offset_in_bytes) usize _); lia. } { apply wrap_unsigned_add_did_wrap in Hoverflow; [ | done.. | lia]. sidecond_hammer. } - { rewrite wrap_unsigned_add_did_not_wrap in Hwrap_lt; sidecond_hammer. } + { rewrite wrap_unsigned_add_did_not_wrap in Hwrap_lt; sidecond_hammer. } { rewrite wrap_unsigned_add_did_not_wrap in Hwrap_lt; [ | sidecond_hammer..]. apply wrapping_shift_loc_shift_loc. sidecond_hammer. - } + } { (* offset is zero. So this is okay. *) assert (offset_in_bytes = 0) as -> by lia. rewrite Z.add_0_r in Hwrap_lt. rewrite wrap_unsigned_id in Hwrap_lt; [ lia | done.. ]. } { assert (offset_in_bytes = 0) as -> by lia. rewrite shift_loc_0 wrapping_shift_loc_0//. } + (* !end proof *) Unshelve. all: print_remaining_sidecond. -Admitted. (* admitted due to admit_proofs config flag *) +Qed. End proof. diff --git a/verification/theories/base/dune b/verification/theories/base/dune index 77561392..cac899ba 100644 --- a/verification/theories/base/dune +++ b/verification/theories/base/dune @@ -1,4 +1,4 @@ -(coq.theory +(rocq.theory (name ace.theories.base) (package ace) (flags -w -notation-overridden -w -redundant-canonical-projection) diff --git a/verification/theories/base/machine.v b/verification/theories/base/machine.v new file mode 100644 index 00000000..4dc881c8 --- /dev/null +++ b/verification/theories/base/machine.v @@ -0,0 +1,10 @@ +From refinedrust Require Import typing. + +(** Configuration of the Rust abstract machine *) +Class MachineConfig := { + (** We assume a provance ID for the memory ownership the security monitor gets passed from assembly on initialization. + This provenance allows for access to all direct hardware memory, but not to memory that went through the Rust abstract machine (e.g. stack/heap allocated pointers). + For now, we just use this to make sure that page token pointers don't get reduced to a more restrictive provenance; but eventually this should interface with the Radium semantics for exposed pointer-integer casts. + *) + machine_memory_prov : Z +}. diff --git a/verification/theories/memory_layout/dune b/verification/theories/memory_layout/dune index f5b9943f..9b9e52f5 100644 --- a/verification/theories/memory_layout/dune +++ b/verification/theories/memory_layout/dune @@ -1,4 +1,4 @@ -(coq.theory +(rocq.theory (name ace.theories.memory_layout) (package ace) (flags -w -notation-overridden -w -redundant-canonical-projection) diff --git a/verification/theories/memory_layout/memory_layout.v b/verification/theories/memory_layout/memory_layout.v index efbf9a4d..6b7d36d5 100644 --- a/verification/theories/memory_layout/memory_layout.v +++ b/verification/theories/memory_layout/memory_layout.v @@ -2,16 +2,17 @@ From refinedrust Require Import typing. (** * Additional definitions for the security monitor's global memory layout *) +(* !start spec(memory_layout.memory_layout) *) Record memory_layout : Type := mk_memory_layout { conf_start : loc; conf_end : loc; non_conf_start : loc; non_conf_end : loc; - conf_start_in_usize : conf_start.2 ∈ usize; - conf_end_in_usize : conf_end.2 ∈ usize; - non_conf_start_in_usize : non_conf_start.2 ∈ usize; - non_conf_end_in_usize : non_conf_end.2 ∈ usize; + conf_start_in_usize : conf_start.(loc_a) ∈ usize; + conf_end_in_usize : conf_end.(loc_a) ∈ usize; + non_conf_start_in_usize : non_conf_start.(loc_a) ∈ usize; + non_conf_end_in_usize : non_conf_end.(loc_a) ∈ usize; }. Canonical Structure memory_layoutRT := directRT memory_layout. @@ -19,3 +20,4 @@ Global Program Instance memory_layout_inhabited : Inhabited memory_layout := populate (mk_memory_layout NULL_loc NULL_loc NULL_loc NULL_loc _ _ _ _). Solve Obligations with split; unsafe_unfold_common_caesium_defs; sidecond_hammer. +(* !end spec *) diff --git a/verification/theories/page_allocator/dune b/verification/theories/page_allocator/dune index 584b4fc2..f9826df0 100644 --- a/verification/theories/page_allocator/dune +++ b/verification/theories/page_allocator/dune @@ -1,4 +1,4 @@ -(coq.theory +(rocq.theory (name ace.theories.page_allocator) (package ace) (flags -w -notation-overridden -w -redundant-canonical-projection) diff --git a/verification/theories/page_allocator/page.v b/verification/theories/page_allocator/page.v index 879be3aa..6f7ace8d 100644 --- a/verification/theories/page_allocator/page.v +++ b/verification/theories/page_allocator/page.v @@ -1,9 +1,11 @@ From refinedrust Require Import typing. From rrstd.cmp.theories Require Import ordering. From ace.theories.base Require Import base. +From Stdlib Require Import ZifyClasses ZifyComparison. (** * Extra theories for page sizes and pages *) +(* !start spec(page_size.page_size) *) (* This reflects the page sizes in [core/architecture/riscv/mmu/page_size.rs] *) Inductive page_size : Set := | Size4KiB @@ -20,12 +22,12 @@ Global Instance page_size_eqdec : EqDecision page_size. Proof. solve_decision. Defined. Definition page_size_variant (a : page_size) : Z := match a with - | Size4KiB => 0 - | Size16KiB => 1 - | Size2MiB => 2 - | Size1GiB => 3 - | Size512GiB => 4 - | Size128TiB => 5 + | Size4KiB => 1 + | Size16KiB => 2 + | Size2MiB => 3 + | Size1GiB => 4 + | Size512GiB => 5 + | Size128TiB => 6 end . Global Instance page_size_countable : Countable page_size. @@ -33,12 +35,12 @@ Proof. refine (inj_countable (λ a, Z.to_nat (page_size_variant a)) (λ a, match a with - | 0 => Some Size4KiB - | 1 => Some Size16KiB - | 2 => Some Size2MiB - | 3 => Some Size1GiB - | 4 => Some Size512GiB - | 5 => Some Size128TiB + | 1 => Some Size4KiB + | 2 => Some Size16KiB + | 3 => Some Size2MiB + | 4 => Some Size1GiB + | 5 => Some Size512GiB + | 6 => Some Size128TiB | _ => None end%nat) _). intros []; naive_solver. @@ -52,12 +54,18 @@ Proof. Qed. Definition page_size_cmp (a b : page_size) : ordering := - Z.cmp (page_size_variant a) (page_size_variant b). + Z.compare (page_size_variant a) (page_size_variant b). Arguments page_size_cmp : simpl never. Definition page_size_eq (a b : page_size) : bool := bool_decide (a = b). +Notation "a '<ₚ' b" := (a ₚ' b" := (a >o{page_size_cmp} b) (at level 50). +Notation "a '≤ₚ' b" := (a ≤o{page_size_cmp} b) (at level 60). +Notation "a '≥ₚ' b" := (a ≥o{page_size_cmp} b) (at level 50). + Global Instance page_size_eq_correct : CorrectEq page_size_eq. Proof. unfold page_size_eq. @@ -65,6 +73,7 @@ Proof. - intros ?. naive_solver. - intros ??. naive_solver. - intros ???. naive_solver. + - intros ??. solve_goal. Qed. Global Instance page_size_cmp_correct : CorrectOrd page_size_eq page_size_cmp. Proof. @@ -75,33 +84,8 @@ Proof. - intros ???. solve_goal. - intros ???. solve_goal. - solve_goal. - - solve_goal. -Qed. - -Notation "a '<ₚ' b" := (a ₚ' b" := (a >o{page_size_cmp} b) (at level 50). -Notation "a '≤ₚ' b" := (a ≤o{page_size_cmp} b) (at level 60). -Notation "a '≥ₚ' b" := (a ≥o{page_size_cmp} b) (at level 50). - -Lemma page_size_le_max p : - p ≤ₚ Size128TiB. -Proof. - unfold ord_le. - destruct p; last (by right); left. - all: apply Z.cmp_less_iff. - all: simpl; lia. -Qed. -Lemma page_size_le_min p : - Size4KiB ≤ₚ p. -Proof. - unfold ord_le. - destruct p; first (by right); left. - all: apply Z.cmp_less_iff. - all: simpl; lia. Qed. - (** Number of 64-bit machine words in a page size *) Definition page_size_in_words_nat_def (sz : page_size) : nat := match sz with @@ -121,6 +105,165 @@ Definition page_size_in_bytes_nat (sz : page_size) : nat := Notation page_size_in_bytes_Z sz := (Z.of_nat (page_size_in_bytes_nat sz)). +(** Join on page sizes (maximum) *) +Definition page_size_max (sz1 sz2 : page_size) : page_size := + match sz1, sz2 with + | Size128TiB, _ => Size128TiB + | _ , Size128TiB => Size128TiB + | Size512GiB, _ => Size512GiB + | _, Size512GiB => Size512GiB + | Size1GiB, _ => Size1GiB + | _, Size1GiB => Size1GiB + | Size2MiB, _ => Size2MiB + | _, Size2MiB => Size2MiB + | Size16KiB, _ => Size16KiB + | _, Size16KiB => Size16KiB + | _, _ => Size4KiB + end. +Global Instance page_size_join : Join page_size := page_size_max. + +(** Meet on page sizes (minimum) *) +Definition page_size_min (sz1 sz2 : page_size) : page_size := + match sz1, sz2 with + | Size4KiB, _ => Size4KiB + | _, Size4KiB => Size4KiB + | Size16KiB, _ => Size16KiB + | _, Size16KiB => Size16KiB + | Size2MiB, _ => Size2MiB + | _, Size2MiB => Size2MiB + | Size1GiB, _ => Size1GiB + | _, Size1GiB => Size1GiB + | Size512GiB, _ => Size512GiB + | _, Size512GiB => Size512GiB + |_, _ => Size128TiB + end. +Global Instance page_size_meet : Meet page_size := page_size_min. + +(** The next smaller page size *) +Definition page_size_smaller (sz : page_size) : option page_size := + match sz with + | Size4KiB => None + | Size16KiB => Some Size4KiB + | Size2MiB => Some Size16KiB + | Size1GiB => Some Size2MiB + | Size512GiB => Some Size1GiB + | Size128TiB => Some Size512GiB + end. + +(** The next larger page size *) +Definition page_size_larger (sz : page_size) : option page_size := + match sz with + | Size4KiB => Some Size16KiB + | Size16KiB => Some Size2MiB + | Size2MiB => Some Size1GiB + | Size1GiB => Some Size512GiB + | Size512GiB => Some Size128TiB + | Size128TiB => None + end. + +(** Zify support *) +#[global] Program Instance Inj_page_size_Z : InjTyp page_size Z := + mkinj _ _ page_size_variant (fun x => 1 <= x ∧ x < 7) _. +Next Obligation. + intros []; simpl; lia. +Qed. +Add Zify InjTyp Inj_page_size_Z. + +#[global] Program Instance Inj_option_page_size_Z : InjTyp (option page_size) Z := + mkinj _ _ (λ x, match x with | None => 0 | Some x => page_size_variant x end) (fun x => 0 <= x ∧ x < 7) _. +Next Obligation. + intros [[] | ]; simpl; lia. +Qed. +Add Zify InjTyp Inj_option_page_size_Z. + +#[global] Program Instance BinOp_page_size_cmp : BinOp page_size_cmp := + { TBOp := ZcompareZ }. +Next Obligation. done. Qed. +Add Zify BinOp BinOp_page_size_cmp. + +Definition page_size_compareZ (x y : page_size) := + Z_of_comparison (page_size_cmp x y). + +#[global] Program Instance page_size_compareSpec : BinOpSpec page_size_compareZ := + {| BPred := λ x y r, (x =ₚ y → r = 0%Z) ∧ + (x >ₚ y → r = 1%Z) ∧ + (x <ₚ y → r = (-1)%Z) |}. +Next Obligation. + unfold ord_eq, ord_lt, ord_gt, page_size_cmp. + unfold page_size_compareZ, Z_of_comparison. + unfold page_size_cmp. + intros x y. + destruct (page_size_variant x ?= page_size_variant y) eqn:C; simpl. + all: solve_goal. +Qed. +Add Zify BinOpSpec page_size_compareSpec. + +#[global] Program Instance page_size_ge_op : BinRel (ord_ge page_size_cmp) := { TR := Z.ge; }. +Next Obligation. + intros; apply Z.ord_ge_iff. +Qed. +Add Zify BinRel page_size_ge_op. + +#[global] Program Instance page_size_gt_op : BinRel (ord_gt page_size_cmp) := { TR := Z.gt; }. +Next Obligation. + intros; apply Z.ord_gt_iff. +Qed. +Add Zify BinRel page_size_gt_op. + +#[global] Program Instance page_size_lt_op : BinRel (ord_lt page_size_cmp) := { TR := Z.lt; }. +Next Obligation. + intros; apply Z.ord_lt_iff. +Qed. +Add Zify BinRel page_size_lt_op. + +#[global] Program Instance page_size_le_op : BinRel (ord_le page_size_cmp) := { TR := Z.le; }. +Next Obligation. + intros; apply Z.ord_le_iff. +Qed. +Add Zify BinRel page_size_le_op. + +#[global] Program Instance page_size_eq_op : BinRel (ord_eq page_size_cmp) := { TR := Z.eq; }. +Next Obligation. + intros; apply Z.ord_eq_iff. +Qed. +Add Zify BinRel page_size_eq_op. + +#[global] Program Instance page_size_eq_op' : BinRel (@eq page_size) := { TR := Z.eq; }. +Next Obligation. + intros. simpl. + destruct n, m; simpl; try solve_goal. +Qed. +Add Zify BinRel page_size_eq_op'. + +#[global] Program Instance page_size_smaller_unop : UnOp page_size_smaller := + { TUOp := Z.pred }. +Next Obligation. + intros []; simpl; lia. +Qed. +Add Zify UnOp page_size_smaller_unop. + +#[global] Program Instance page_size_max_binop : BinOp page_size_max := + { TBOp := Z.max }. +Next Obligation. + intros [] []; simpl; lia. +Qed. +Add Zify BinOp page_size_max_binop. + +#[global] Program Instance page_size_min_binop : BinOp page_size_min := + { TBOp := Z.min }. +Next Obligation. + intros [] []; simpl; lia. +Qed. +Add Zify BinOp page_size_min_binop. + +(** Lemmas *) +Lemma page_size_le_max p : + p ≤ₚ Size128TiB. +Proof. lia. Qed. +Lemma page_size_le_min p : + Size4KiB ≤ₚ p. +Proof. lia. Qed. + Lemma page_size_in_bytes_div_8 sz : (8 | page_size_in_bytes_nat sz). Proof. @@ -142,6 +285,16 @@ Proof. specialize (page_size_in_words_nat_ge sz). lia. Qed. +Lemma page_size_in_bytes_nat_in_isize sz : + (Z.of_nat $ page_size_in_bytes_nat sz) ∈ isize. +Proof. + rewrite /page_size_in_bytes_nat page_size_in_words_nat_unfold /page_size_in_words_nat_def. + rewrite bytes_per_addr_eq. + split. + all: unsafe_unfold_common_caesium_defs. + all: unfold it_signed, it_byte_size_log, bytes_per_addr_log. + all: destruct sz; try lia. +Qed. Lemma page_size_in_bytes_nat_in_usize sz : (Z.of_nat $ page_size_in_bytes_nat sz) ∈ usize. Proof. @@ -189,18 +342,18 @@ Lemma page_size_le_size_in_bytes sz1 sz2 : page_size_in_bytes_Z sz1 ≤ page_size_in_bytes_Z sz2. Proof. intros [Hlt | Heq]. - - apply Z.cmp_less_iff in Hlt. + - apply Z.compare_lt_iff in Hlt. apply page_size_in_bytes_Z_mono in Hlt. lia. - - apply Z.cmp_equal_iff in Heq. apply page_size_variant_inj in Heq. subst. + - apply Z.compare_eq_iff in Heq. apply page_size_variant_inj in Heq. subst. lia. Qed. Lemma page_size_in_words_nat_lt_divide sz1 sz2 : sz1 <ₚ sz2 → Nat.divide (page_size_in_words_nat sz1) (page_size_in_words_nat sz2). Proof. - intros Ha. apply Z.cmp_less_iff in Ha. + intros Ha. apply Z.compare_lt_iff in Ha. unfold page_size_in_bytes_nat. - destruct sz1, sz2; simpl in *; try lia. + destruct sz1, sz2; simpl in *; try lia; try done. all: rewrite page_size_in_words_nat_unfold /page_size_in_words_nat_def. - exists 4%nat. lia. - exists 512%nat. lia. @@ -224,7 +377,7 @@ Lemma page_size_in_words_nat_le_divide sz1 sz2 : Proof. intros [Hlt | Heq]. - by eapply page_size_in_words_nat_lt_divide. - - apply Z.cmp_equal_iff in Heq. + - apply Z.compare_eq_iff in Heq. apply page_size_variant_inj in Heq as <-. done. Qed. @@ -241,17 +394,6 @@ Proof. done. Qed. -(** The next smaller page size *) -Definition page_size_smaller (sz : page_size) : option page_size := - match sz with - | Size4KiB => None - | Size16KiB => Some Size4KiB - | Size2MiB => Some Size16KiB - | Size1GiB => Some Size2MiB - | Size512GiB => Some Size1GiB - | Size128TiB => Some Size512GiB - end. - Lemma page_size_smaller_None sz : page_size_smaller sz = None ↔ sz = Size4KiB. Proof. destruct sz; done. Qed. @@ -268,20 +410,10 @@ Proof. intros Hsmaller. apply page_size_smaller_page_size_variant in Hsmaller. unfold ord_lt, page_size_cmp. - apply Z.cmp_less_iff. + apply Z.compare_lt_iff. lia. Qed. -(** The next larger page size *) -Definition page_size_larger (sz : page_size) : option page_size := - match sz with - | Size4KiB => Some Size16KiB - | Size16KiB => Some Size2MiB - | Size2MiB => Some Size1GiB - | Size1GiB => Some Size512GiB - | Size512GiB => Some Size128TiB - | Size128TiB => None - end. Lemma page_size_larger_None sz : page_size_larger sz = None ↔ sz = Size128TiB. Proof. destruct sz; done. Qed. @@ -298,7 +430,7 @@ Proof. intros Hlarger. apply page_size_larger_page_size_variant in Hlarger. unfold ord_gt, page_size_cmp. - apply Z.cmp_greater_iff. + apply Z.compare_gt_iff. lia. Qed. @@ -325,91 +457,77 @@ Proof. destruct sz; simpl; lia. Qed. -(** Join on page sizes (maximum) *) -Definition page_size_max (sz1 sz2 : page_size) : page_size := - match sz1, sz2 with - | Size128TiB, _ => Size128TiB - | _ , Size128TiB => Size128TiB - | Size512GiB, _ => Size512GiB - | _, Size512GiB => Size512GiB - | Size1GiB, _ => Size1GiB - | _, Size1GiB => Size1GiB - | Size2MiB, _ => Size2MiB - | _, Size2MiB => Size2MiB - | Size16KiB, _ => Size16KiB - | _, Size16KiB => Size16KiB - | _, _ => Size4KiB - end. -Global Instance page_size_join : Join page_size := page_size_max. - Lemma page_size_max_ge_l sz1 sz2 : sz1 ≤ₚ sz1 ⊔ sz2. Proof. - unfold ord_le. - destruct sz1, sz2; cbn; naive_solver. + unfold join, page_size_join. lia. Qed. Lemma page_size_max_ge_r sz1 sz2 : sz2 ≤ₚ sz1 ⊔ sz2. Proof. - unfold ord_le. - destruct sz1, sz2; cbn; naive_solver. + unfold join, page_size_join. lia. Qed. Lemma page_size_max_l sz1 sz2 : sz2 ≤ₚ sz1 → sz1 ⊔ sz2 = sz1. Proof. intros [Hlt | Heq]. - - apply Z.cmp_less_iff in Hlt. destruct sz1, sz2; simpl in *; try done. - - apply Z.cmp_equal_iff in Heq. destruct sz1, sz2; simpl; try done. + - apply Z.compare_lt_iff in Hlt. destruct sz1, sz2; simpl in *; try done. + - apply Z.compare_eq_iff in Heq. destruct sz1, sz2; simpl; try done. Qed. Lemma page_size_max_r sz1 sz2 : sz1 ≤ₚ sz2 → sz1 ⊔ sz2 = sz2. Proof. intros [Hlt | Heq]. - - apply Z.cmp_less_iff in Hlt. destruct sz1, sz2; simpl in *; try done. - - apply Z.cmp_equal_iff in Heq. destruct sz1, sz2; simpl; try done. + - apply Z.compare_lt_iff in Hlt. destruct sz1, sz2; simpl in *; try done. + - apply Z.compare_eq_iff in Heq. destruct sz1, sz2; simpl; try done. +Qed. +Lemma page_size_max_le_glb s a b : + a ≤ₚ s → b ≤ₚ s → a ⊔ b ≤ₚ s. +Proof. + destruct s, a, b; simpl; done. +Qed. +Lemma page_size_max_lt_glb s a b : + a <ₚ s → b <ₚ s → a ⊔ b <ₚ s. +Proof. + destruct s, a, b; simpl; done. Qed. - -(** Meet on page sizes (minimum) *) -Definition page_size_min (sz1 sz2 : page_size) : page_size := - match sz1, sz2 with - | Size4KiB, _ => Size4KiB - | _, Size4KiB => Size4KiB - | Size16KiB, _ => Size16KiB - | _, Size16KiB => Size16KiB - | Size2MiB, _ => Size2MiB - | _, Size2MiB => Size2MiB - | Size1GiB, _ => Size1GiB - | _, Size1GiB => Size1GiB - | Size512GiB, _ => Size512GiB - | _, Size512GiB => Size512GiB - |_, _ => Size128TiB - end. -Global Instance page_size_meet : Meet page_size := page_size_min. Lemma page_size_min_le_l sz1 sz2 : sz1 ⊓ sz2 ≤ₚ sz1. Proof. - unfold ord_le. destruct sz1, sz2; cbn; naive_solver. + unfold meet, page_size_meet. lia. Qed. Lemma page_size_min_le_r sz1 sz2 : sz1 ⊓ sz2 ≤ₚ sz2. Proof. - unfold ord_le. destruct sz1, sz2; cbn; naive_solver. + unfold meet, page_size_meet. lia. Qed. Lemma page_size_min_l sz1 sz2 : sz1 ≤ₚ sz2 → sz1 ⊓ sz2 = sz1. Proof. intros [Hlt | Heq]. - - apply Z.cmp_less_iff in Hlt. destruct sz1, sz2; simpl in *; try done. - - apply Z.cmp_equal_iff in Heq. destruct sz1, sz2; simpl in *; try done. + - apply Z.compare_lt_iff in Hlt. destruct sz1, sz2; simpl in *; try done. + - apply Z.compare_eq_iff in Heq. destruct sz1, sz2; simpl in *; try done. Qed. Lemma page_size_min_r sz1 sz2 : sz2 ≤ₚ sz1 → sz1 ⊓ sz2 = sz2. Proof. intros [Hlt | Heq]. - - apply Z.cmp_less_iff in Hlt. destruct sz1, sz2; simpl in *; try done. - - apply Z.cmp_equal_iff in Heq. destruct sz1, sz2; simpl in *; try done. + - apply Z.compare_lt_iff in Hlt. destruct sz1, sz2; simpl in *; try done. + - apply Z.compare_eq_iff in Heq. destruct sz1, sz2; simpl in *; try done. +Qed. +Lemma page_size_min_le_lub s a b : + s ≤ₚ a → s ≤ₚ b → s ≤ₚ a ⊓ b. +Proof. + destruct s, a, b; simpl; done. +Qed. +Lemma page_size_min_lt_lub s a b : + s <ₚ a → s <ₚ b → s <ₚ a ⊓ b. +Proof. + destruct s, a, b; simpl; done. Qed. +(* !end spec *) +(* !start spec(page.page) *) (** The maximum address at which a page may be located (one-past-the-end address), limited by the page allocator implementation. *) (* Sealed because it is big and will slow down Coq *) @@ -437,8 +555,6 @@ Record page : Type := mk_page { Canonical Structure pageRT := directRT page. Global Instance loc_eqdec : EqDecision loc. Proof. solve_decision. Defined. -Global Instance loc_countable : Countable loc. -Proof. unfold loc. apply prod_countable. Qed. Global Instance page_inh : Inhabited page. Proof. exact (populate (mk_page inhabitant inhabitant inhabitant)). Qed. Global Instance page_eqdec : EqDecision page. @@ -545,6 +661,41 @@ Proof. f_equiv. by apply page_size_multiplier_align_log. Qed. +Lemma page_size_in_words_is_power_of_two sz : + is_power_of_two (page_size_in_words_nat sz). +Proof. + rewrite page_size_in_words_nat_unfold. + rewrite /page_size_in_words_nat_def. + assert (is_power_of_two 512). + { exists 9%nat. done. } + assert (is_power_of_two 256). + { exists 8%nat. done. } + assert (is_power_of_two 4). + { exists 2%nat. done. } + destruct sz. + all: repeat first [done | apply is_power_of_two_mult; split]. +Qed. +Lemma page_size_in_bytes_is_power_of_two sz : + is_power_of_two (page_size_in_bytes_nat sz). +Proof. + rewrite /page_size_in_bytes_nat. + apply is_power_of_two_mult. + split; last apply page_size_in_words_is_power_of_two. + exists 3%nat. done. +Qed. +Global Instance simpl_page_size_in_words_is_power_of_two sz : + SimplBoth (is_power_of_two (page_size_in_words_nat sz)) True. +Proof. + rewrite /SimplBoth. split; first done. + intros. apply page_size_in_words_is_power_of_two. +Qed. +Global Instance simpl_page_size_in_bytes_is_power_of_two sz : + SimplBoth (is_power_of_two (page_size_in_bytes_nat sz)) True. +Proof. + rewrite /SimplBoth. split; first done. + intros. apply page_size_in_bytes_is_power_of_two. +Qed. + Lemma page_size_multiplier_quot sz smaller_sz : smaller_sz = default sz (page_size_smaller sz) → page_size_multiplier sz = Z.to_nat (page_size_in_bytes_Z sz `quot` page_size_in_bytes_Z smaller_sz). @@ -588,12 +739,12 @@ Qed. (** States that the page is in the given memory range *) (* TODO unify all the memory range stuff *) Definition page_within_range (base_address : Z) (sz : page_size) (p : page) : Prop := - (base_address ≤ p.(page_loc).2 ∧ p.(page_loc).2 + page_size_in_bytes_Z p.(page_sz) ≤ base_address + page_size_in_bytes_Z sz)%Z. + (base_address ≤ p.(page_loc).(loc_a) ∧ p.(page_loc).(loc_a) + page_size_in_bytes_Z p.(page_sz) ≤ base_address + page_size_in_bytes_Z sz)%Z. Lemma page_within_range_inv base sz p : page_within_range base sz p → p.(page_sz) = sz → - p.(page_loc).2 = base. + p.(page_loc).(loc_a) = base. Proof. unfold page_within_range. intros [] <-. @@ -662,14 +813,14 @@ Proof. destruct (page_size_smaller p.(page_sz)) eqn:Heq. - eapply page_size_smaller_lt in Heq. left. done. - simpl. right. - apply Z.cmp_equal_iff. done. + apply Z.compare_eq_iff. done. Qed. Lemma subdivided_pages_lookup_base_address (i : nat) p ps p' sz' : page_size_smaller p.(page_sz) = Some sz' → subdivided_pages p ps → ps !! i = Some p' → - (page_loc p').2 = p.(page_loc).2 + (i * page_size_in_bytes_nat sz'). + (page_loc p').(loc_a) = p.(page_loc).(loc_a) + (i * page_size_in_bytes_nat sz'). Proof. unfold subdivided_pages. intros -> [Ha Hb] Hlook. @@ -692,7 +843,7 @@ Lemma subdivided_pages_page_within_range (i : nat) p ps p' : page_size_smaller p.(page_sz) = Some p'.(page_sz) → subdivided_pages p ps → ps !! i = Some p' → - page_within_range (p.(page_loc).2 + i * page_size_in_bytes_Z p'.(page_sz)) p'.(page_sz) p'. + page_within_range (p.(page_loc).(loc_a) + i * page_size_in_bytes_Z p'.(page_sz)) p'.(page_sz) p'. Proof. intros ? Hsubdivided Hlook. opose proof (subdivided_pages_lookup_base_address i _ _ _ _ _ Hsubdivided _) as Hl. @@ -705,7 +856,7 @@ Lemma subdivided_pages_page_within_range' i p ps p' sz base : page_size_smaller sz = Some p'.(page_sz) → subdivided_pages p ps → ps !! i = Some p' → - base = p.(page_loc).2 → + base = p.(page_loc).(loc_a) → sz = p.(page_sz) → page_within_range base sz p'. Proof. @@ -728,7 +879,7 @@ Lemma subdivided_pages_lookup_inv p sz base ps p' (i : nat) (j : Z) : subdivided_pages p ps → ps !! i = Some p' → p'.(page_sz) = sz → - base = p.(page_loc).2 → + base = p.(page_loc).(loc_a) → page_within_range (base + j * page_size_in_bytes_Z sz) sz p' → i = Z.to_nat j. Proof. @@ -739,6 +890,7 @@ Proof. specialize (page_size_in_bytes_nat_ge p'.(page_sz)). nia. Qed. +(* !end spec *) (** Stronger functional specification *) @@ -874,6 +1026,7 @@ Qed. (** Lithium automation *) +(* !start spec(page.page) *) Global Instance simpl_exist_page Q : SimplExist page Q (∃ (page_loc : loc) (page_sz : page_size) (page_val : list Z), Q (mk_page page_loc page_sz page_val)). @@ -915,3 +1068,4 @@ Global Instance simpl_both_page_size_larger_none sz : Proof. split; destruct sz; simpl; done. Qed. +(* !end spec *) diff --git a/verification/theories/page_allocator/page_allocator.v b/verification/theories/page_allocator/page_allocator.v index 7f8e4b80..bc2b4246 100644 --- a/verification/theories/page_allocator/page_allocator.v +++ b/verification/theories/page_allocator/page_allocator.v @@ -3,53 +3,8 @@ From refinedrust Require Import ghost_var_dfrac. From rrstd.cmp.theories Require Import ordering. From ace.theories.page_allocator Require Import page. -(** * Page allocator ghost state *) - -Record memory_region := { - mreg_start : Z; - mreg_size : nat; -}. -Definition mreg_end (mreg : memory_region) : Z := - mreg.(mreg_start) + mreg.(mreg_size). - -Class memory_regionG Σ := { - mreg_ghost_var :: ghost_varG Σ memory_region; -}. -Section page_allocator. - Context `{!typeGS Σ}. - Context `{!memory_regionG Σ}. - - Definition has_memory_region_def (γ : gname) (mreg : memory_region) := - ghost_var γ DfracDiscarded mreg. - Definition has_memory_region_aux : seal (@has_memory_region_def). Proof. by eexists. Qed. - Definition has_memory_region := has_memory_region_aux.(unseal). - Definition has_memory_region_eq : @has_memory_region = @has_memory_region_def := has_memory_region_aux.(seal_eq). - - Lemma has_memory_region_alloc mreg : - ⊢ |==> ∃ γ, has_memory_region γ mreg. - Proof. - rewrite has_memory_region_eq. - iMod (ghost_var_alloc mreg) as (γ) "Hvar". - iExists γ. by iApply ghost_var_discard. - Qed. - - Lemma has_memory_region_agree γ mreg1 mreg2 : - has_memory_region γ mreg1 -∗ - has_memory_region γ mreg2 -∗ - ⌜mreg1 = mreg2⌝. - Proof. - rewrite has_memory_region_eq. - iApply ghost_var_agree. - Qed. - - Global Instance has_memory_region_pers γ mreg : Persistent (has_memory_region γ mreg). - Proof. - rewrite has_memory_region_eq. - apply _. - Qed. -End page_allocator. - (** * Page allocator invariants *) +(* !start spec(page_allocator.page_allocator) *) Inductive node_allocation_state := (** This page node is completely allocated *) | PageTokenUnavailable @@ -84,6 +39,173 @@ Global Instance node_allocation_state_meet : Meet node_allocation_state := λ s1 | _, _ => PageTokenAvailable end. +Definition node_allocation_state_cmp (s1 s2 : node_allocation_state) : comparison := + match s1, s2 with + | PageTokenUnavailable, PageTokenUnavailable => + Eq + | PageTokenUnavailable, _ => + Lt + | PageTokenPartiallyAvailable sz1, PageTokenUnavailable => + Gt + | PageTokenPartiallyAvailable sz1, PageTokenPartiallyAvailable sz2 => + page_size_cmp sz1 sz2 + | PageTokenPartiallyAvailable sz1, PageTokenAvailable => + Lt + | PageTokenAvailable, PageTokenAvailable => + Eq + | PageTokenAvailable, _ => + Gt + end. + +Global Instance simpl_page_token_partially_available_le s1 s2 : + SimplBoth (PageTokenPartiallyAvailable s1 ≤o{node_allocation_state_cmp} PageTokenPartiallyAvailable s2) (s1 ≤ₚ s2). +Proof. done. Qed. +Global Instance simpl_page_token_partially_available_lt s1 s2 : + SimplBoth (PageTokenPartiallyAvailable s1 None + | PageTokenAvailable => Some max_sz + | PageTokenPartiallyAvailable allocable => Some allocable + end. +Definition page_node_can_allocate (node : page_storage_node) : option page_size := + node_allocation_state_can_allocate node.(allocation_state) node.(max_node_size). + +Lemma page_node_can_allocate_lb sz node : + Some sz ≤o{option_cmp page_size_cmp} page_node_can_allocate node → + PageTokenPartiallyAvailable sz ≤o{node_allocation_state_cmp} node.(allocation_state). +Proof. + unfold page_node_can_allocate. + simpl. + generalize (allocation_state node). + intros []; simpl. + all: try solve_goal. + intros. left. done. +Qed. + (** Compute the base address of a child node *) Definition child_base_address (parent_base_address : Z) (child_size : page_size) (child_index : Z) : Z := parent_base_address + (page_size_in_bytes_Z child_size * child_index). @@ -122,7 +266,8 @@ Definition page_storage_node_children_wf (parent_base_address : Z) (parent_node_ (* Then all children are sorted *) (∀ (i : nat) child, children !! i = Some child → child.(max_node_size) = child_node_size ∧ - child.(base_address) = child_base_address parent_base_address child_node_size i)) ∧ + child.(base_address) = child_base_address parent_base_address child_node_size i ∧ + page_node_can_allocate child ≤o{option_cmp page_size_cmp} Some child_node_size)) ∧ (* If there can't be any children, there are no children *) (page_size_smaller parent_node_size = None → children = []) @@ -132,7 +277,8 @@ Lemma page_storage_node_children_wf_child_lookup (i : nat) sz child_node_size ba page_storage_node_children_wf base sz children → children !! i = Some child → max_node_size child = child_node_size ∧ - base_address child = child_base_address base child_node_size i. + base_address child = child_base_address base child_node_size i ∧ + page_node_can_allocate child ≤o{option_cmp page_size_cmp} Some child_node_size. Proof. intros Hsmaller Hchildren Hchild. destruct Hchildren as [Hchildren _]. @@ -140,18 +286,21 @@ Proof. done. Qed. Lemma page_storage_node_children_wf_upd_state base sz children s : + (∀ child_sz, page_size_smaller sz = Some child_sz → node_allocation_state_can_allocate s child_sz ≤o{option_cmp page_size_cmp} Some child_sz) → page_storage_node_children_wf base sz children → page_storage_node_children_wf base sz ((λ node, page_node_update_state node s) <$> children). Proof. unfold page_storage_node_children_wf. + intros Hbound. destruct (page_size_smaller sz) as [smaller_size | ] eqn:Heq. - simpl. intros [Ha _]. split; last done. intros child_sz [=<-] i child Hlook. apply list_lookup_fmap_Some_1 in Hlook as (node & -> & Hlook). ospecialize (Ha _ _ _ _ Hlook). { done. } - destruct Ha as [<- Ha]. - done. + destruct Ha as (<- & Ha & Hb). + split_and!; [done.. | ]. + by apply Hbound. - intros [_ Ha]. split; first done. intros _. rewrite Ha; done. @@ -159,28 +308,27 @@ Qed. Lemma page_storage_node_children_wf_insert base sz children child' (i : nat) : max_node_size child' = max_node_size (children !!! i) → base_address child' = base_address (children !!! i) → + page_node_can_allocate child' ≤o{option_cmp page_size_cmp} Some (max_node_size child') → page_storage_node_children_wf base sz children → page_storage_node_children_wf base sz (<[i := child']> children). Proof. - intros Hsz Haddr [Hwf1 Hwf2]. split. + intros Hsz Haddr Hbound [Hwf1 Hwf2]. split. - intros child_sz Hsmaller j ? Hlook. apply list_lookup_insert_Some in Hlook as [(<- & -> & ?) | (? & ?)]; first last. { by eapply Hwf1. } rewrite Haddr Hsz. opose proof (lookup_lt_is_Some_2 children i _) as (child' & ?); first done. erewrite list_lookup_total_correct; last done. - by eapply Hwf1. + opose proof* Hwf1 as (Hsz' & Haddr' & ?); [done.. | ]. + split; first done. split; first done. + rewrite -Hsz'. + move: Hsz. + erewrite list_lookup_total_correct; last done. + intros <-. + done. - intros Hnone. specialize (Hwf2 Hnone). rewrite Hwf2. done. Qed. -(** Computes the maximum size this page node can allocate *) -Definition page_node_can_allocate (node : page_storage_node) : option page_size := - match node.(allocation_state) with - | PageTokenUnavailable => None - | PageTokenAvailable => Some node.(max_node_size) - | PageTokenPartiallyAvailable allocable => Some allocable - end. - (** The logical invariant on a page node *) Definition page_storage_node_invariant_case (node : page_storage_node) @@ -190,9 +338,11 @@ Definition page_storage_node_invariant_case (* No allocation is possible *) maybe_page_token = None ∧ max_sz = None + (* NOTE: This (and the matching requirement in the Available state below) are quite annoying to establish. + If we do `store_page_token_in_this_node`, we have to traverse the whole subtree and use reasoning about + exclusivity of memory ownership in order to get this. *) (* all children are unavailable *) - (* TODO do we need this *) - (*Forall (λ child, child.(allocation_state) = PageTokenUnavailable) children*) + (*∧ Forall (λ child, child.(allocation_state) = PageTokenUnavailable) children*) else if decide (node.(allocation_state) = PageTokenAvailable) then (* This node is completely available *) @@ -202,10 +352,10 @@ Definition page_storage_node_invariant_case (* the allocable size spans the whole page *) max_sz = Some (node.(max_node_size)) ∧ (* the token spans the whole node *) - token.(page_loc).2 = node.(base_address) ∧ + token.(page_loc).(loc_a) = node.(base_address) ∧ token.(page_sz) = node.(max_node_size) (* all children are unavailable *) - (*Forall (λ child, child.(allocation_state) = PageTokenUnavailable) children*) + (*∧ Forall (λ child, child.(allocation_state) = PageTokenUnavailable) children*) else (* This node is partially available with initialized children *) @@ -237,9 +387,19 @@ Lemma page_node_invariant_case_sized_bounded node max_sz tok children : max_sz ≤o{option_cmp page_size_cmp} Some (max_node_size node). Proof. unfold page_storage_node_invariant_case, page_node_can_allocate. + unfold ord_le. destruct (allocation_state node) eqn:Heq; simpl; solve_goal. Qed. +Lemma page_node_invariant_case_can_allocate_bounded node max_sz tok children : + page_storage_node_invariant_case node max_sz tok children → + page_node_can_allocate node ≤o{option_cmp page_size_cmp} Some (max_node_size node). +Proof. + unfold page_node_can_allocate. + unfold page_storage_node_invariant_case. + destruct allocation_state; solve_goal. +Qed. + Global Typeclasses Opaque page_storage_node_children_wf. Global Typeclasses Opaque page_storage_node_invariant_case. @@ -262,7 +422,9 @@ Definition page_storage_node_invariant (* invariant depending on the allocation state: *) name_hint "INV_CASE" (page_storage_node_invariant_case node max_sz maybe_page_token children) . +(* !end spec *) +(* !start proof(page_allocator.page_allocator) *) Lemma page_storage_node_invariant_empty node_size base_address : (page_size_align node_size | base_address) → page_storage_node_invariant (mk_page_node node_size base_address PageTokenUnavailable false) None None []. @@ -272,10 +434,21 @@ Proof. apply Nat2Z.divide. done. Qed. +Lemma page_storage_node_invariant_no_tok node max_sz children : + page_storage_node_invariant_case node max_sz None children → + max_sz ). + split; first done. + congruence. +Qed. + Lemma page_storage_node_invariant_case_available_inv node max_sz maybe_tok children : node.(allocation_state) = PageTokenAvailable → page_storage_node_invariant_case node max_sz maybe_tok children → - ∃ tok, maybe_tok = Some tok ∧ max_sz = Some node.(max_node_size) ∧ tok.(page_loc).2 = node.(base_address) ∧ tok.(page_sz) = node.(max_node_size). + ∃ tok, maybe_tok = Some tok ∧ max_sz = Some node.(max_node_size) ∧ tok.(page_loc).(loc_a) = node.(base_address) ∧ tok.(page_sz) = node.(max_node_size). Proof. unfold page_storage_node_invariant_case. intros ->. naive_solver. @@ -287,7 +460,7 @@ Global Instance page_storage_node_invariant_simpl_available node max_sz maybe_to max_sz = Some node.(max_node_size) → ∀ tok, maybe_tok = Some tok → - tok.(page_loc).2 = node.(base_address) → + tok.(page_loc).(loc_a) = node.(base_address) → tok.(page_sz) = node.(max_node_size) → T). Proof. @@ -306,7 +479,7 @@ Lemma page_storage_node_invariant_has_token node max_sz tok children : page_storage_node_invariant_case node max_sz (Some tok) children → node.(allocation_state) = PageTokenAvailable ∧ node.(max_node_size) = tok.(page_sz) ∧ - node.(base_address) = tok.(page_loc).2. + node.(base_address) = tok.(page_loc).(loc_a). Proof. unfold page_storage_node_invariant_case. repeat case_decide; simpl. @@ -332,6 +505,24 @@ Proof. intros ->. naive_solver. Qed. +Lemma page_storage_node_invariant_case_at_most_partially_available_inv node max_sz maybe_tok children sz : + node.(allocation_state) ≤o{node_allocation_state_cmp} PageTokenPartiallyAvailable sz → + page_storage_node_invariant_case node max_sz maybe_tok children → + max_sz ≤o{option_cmp page_size_cmp} Some sz ∧ maybe_tok = None. +Proof. + intros Hbound. + unfold page_storage_node_invariant_case. + repeat case_decide. + - solve_goal. + - intros (tok & -> & -> & ? & ?). + rename select (allocation_state node = PageTokenAvailable) into Hstate. + move: Hbound. rewrite Hstate. + intros []; done. + - intros (-> & (alloca & Hstate & -> & Hlt & _)). + move: Hbound. rewrite Hstate. + normalize_and_simpl_goal. +Qed. + Global Instance page_storage_node_invariant_simpl_partially_available node max_sz maybe_tok children sz `{!TCDone (node.(allocation_state) = PageTokenPartiallyAvailable sz)} `{!IsVar max_sz} `{!IsVar maybe_tok} : SimplImpl false (page_storage_node_invariant_case node max_sz maybe_tok children) (λ T, page_storage_node_invariant_case node max_sz maybe_tok children → @@ -374,3 +565,4 @@ Proof. simpl. naive_solver. - naive_solver. Qed. +(* !end proof *) diff --git a/verification/theories/page_table/dune b/verification/theories/page_table/dune index e07eb38b..b71d499c 100644 --- a/verification/theories/page_table/dune +++ b/verification/theories/page_table/dune @@ -1,4 +1,4 @@ -(coq.theory +(rocq.theory (name ace.theories.page_table) (package ace) (flags -w -notation-overridden -w -redundant-canonical-projection) diff --git a/verification/theories/page_table/page_table.v b/verification/theories/page_table/page_table.v index 40b184f0..a32918ed 100644 --- a/verification/theories/page_table/page_table.v +++ b/verification/theories/page_table/page_table.v @@ -364,7 +364,7 @@ Definition encode_logical_page_table_entry_bv (pte : logical_page_table_entry) : (* This is not a leaf *) encode_pte (pt_get_serialized_addr pt) (to_pte_flags true ptc pt_permission_pointer) | PageWithConfidentialVmData pg ptc ptp => - encode_pte pg.(page_loc).2 (to_pte_flags true ptc ptp) + encode_pte pg.(page_loc).(loc_a) (to_pte_flags true ptc ptp) | PageSharedWithHypervisor sp ptc ptp => encode_pte sp.(shared_page_hv_address) (to_pte_flags true ptc ptp) | NotValid => @@ -380,7 +380,7 @@ Definition encode_page_table_entries (entries : list logical_page_table_entry) : (** Relation that states that [pt_byte] is a valid byte-level representation of [pt_logical]'s entries. *) Definition is_byte_level_representation (pt_logical : page_table_tree) (pt_byte : page) := (* The physical address matches up *) - pt_byte.(page_loc).2 = pt_get_serialized_addr pt_logical ∧ + pt_byte.(page_loc).(loc_a) = pt_get_serialized_addr pt_logical ∧ (* The logical representation is well-formed *) page_table_wf pt_logical ∧ (* We have a 16KiB page for Level 5, and 4KiB pages otherwise *)