| 
 | 1 | +/*  | 
 | 2 | +Copyright 2025  The Hyperlight Authors.  | 
 | 3 | +
  | 
 | 4 | +Licensed under the Apache License, Version 2.0 (the "License");  | 
 | 5 | +you may not use this file except in compliance with the License.  | 
 | 6 | +You may obtain a copy of the License at  | 
 | 7 | +
  | 
 | 8 | +    http://www.apache.org/licenses/LICENSE-2.0  | 
 | 9 | +
  | 
 | 10 | +Unless required by applicable law or agreed to in writing, software  | 
 | 11 | +distributed under the License is distributed on an "AS IS" BASIS,  | 
 | 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  | 
 | 13 | +See the License for the specific language governing permissions and  | 
 | 14 | +limitations under the License.  | 
 | 15 | +*/  | 
 | 16 | + | 
 | 17 | +use alloc::alloc::Layout;  | 
 | 18 | +use core::arch::asm;  | 
 | 19 | + | 
 | 20 | +use crate::OS_PAGE_SIZE;  | 
 | 21 | + | 
 | 22 | +/// Convert a physical address in main memory to a virtual address  | 
 | 23 | +/// through the pysmap  | 
 | 24 | +///  | 
 | 25 | +/// This is _not guaranteed_ to work with device memory  | 
 | 26 | +pub fn ptov(x: u64) -> *mut u8 {  | 
 | 27 | +    // Currently, all of main memory is identity mapped  | 
 | 28 | +    x as *mut u8  | 
 | 29 | +}  | 
 | 30 | + | 
 | 31 | +// TODO: This is not at all thread-safe atm  | 
 | 32 | +// TODO: A lot of code in this file uses inline assembly to load and  | 
 | 33 | +//       store page table entries. It would be nice to use pointer  | 
 | 34 | +//       volatile read/writes instead, but unfortunately we have a PTE  | 
 | 35 | +//       at physical address 0, which is currently identity-mapped at  | 
 | 36 | +//       virtual address 0, and Rust raw pointer operations can't be  | 
 | 37 | +//       used to read/write from address 0.  | 
 | 38 | + | 
 | 39 | +/// A helper structure indicating a mapping operation that needs to be  | 
 | 40 | +/// performed  | 
 | 41 | +struct MapRequest {  | 
 | 42 | +    table_base: u64,  | 
 | 43 | +    vmin: *mut u8,  | 
 | 44 | +    len: u64,  | 
 | 45 | +}  | 
 | 46 | + | 
 | 47 | +/// A helper structure indicating that a particular PTE needs to be  | 
 | 48 | +/// modified  | 
 | 49 | +struct MapResponse {  | 
 | 50 | +    entry_ptr: *mut u64,  | 
 | 51 | +    vmin: *mut u8,  | 
 | 52 | +    len: u64,  | 
 | 53 | +}  | 
 | 54 | + | 
 | 55 | +/// Assumption: all are page-aligned  | 
 | 56 | +pub unsafe fn map_region(phys_base: u64, virt_base: *mut u8, len: u64) {  | 
 | 57 | +    let mut pml4_base: u64 = 0;  | 
 | 58 | +    unsafe {  | 
 | 59 | +        asm!("mov {}, cr3", out(reg) pml4_base);  | 
 | 60 | +    }  | 
 | 61 | +    pml4_base &= !0xfff;  | 
 | 62 | +    modify_ptes::<47, 39>(MapRequest {  | 
 | 63 | +        table_base: pml4_base,  | 
 | 64 | +        vmin: virt_base,  | 
 | 65 | +        len,  | 
 | 66 | +    })  | 
 | 67 | +    .map(|r| unsafe { alloc_pte_if_needed(r) })  | 
 | 68 | +    .flat_map(modify_ptes::<38, 30>)  | 
 | 69 | +    .map(|r| unsafe { alloc_pte_if_needed(r) })  | 
 | 70 | +    .flat_map(modify_ptes::<29, 21>)  | 
 | 71 | +    .map(|r| unsafe { alloc_pte_if_needed(r) })  | 
 | 72 | +    .flat_map(modify_ptes::<20, 12>)  | 
 | 73 | +    .map(|r| map_normal(phys_base, virt_base, r))  | 
 | 74 | +    .collect::<()>();  | 
 | 75 | +}  | 
 | 76 | + | 
 | 77 | +#[allow(unused)]  | 
 | 78 | +/// This function is not presently used for anything, but is useful  | 
 | 79 | +/// for debugging  | 
 | 80 | +pub unsafe fn dbg_print_address_pte(address: u64) -> u64 {  | 
 | 81 | +    let mut pml4_base: u64 = 0;  | 
 | 82 | +    unsafe {  | 
 | 83 | +        asm!("mov {}, cr3", out(reg) pml4_base);  | 
 | 84 | +    }  | 
 | 85 | +    pml4_base &= !0xfff;  | 
 | 86 | +    let addrs = modify_ptes::<47, 39>(MapRequest {  | 
 | 87 | +        table_base: pml4_base,  | 
 | 88 | +        vmin: address as *mut u8,  | 
 | 89 | +        len: unsafe { OS_PAGE_SIZE as u64 },  | 
 | 90 | +    })  | 
 | 91 | +    .map(|r| unsafe { require_pte_exist(r) })  | 
 | 92 | +    .flat_map(modify_ptes::<38, 30>)  | 
 | 93 | +    .map(|r| unsafe { require_pte_exist(r) })  | 
 | 94 | +    .flat_map(modify_ptes::<29, 21>)  | 
 | 95 | +    .map(|r| unsafe { require_pte_exist(r) })  | 
 | 96 | +    .flat_map(modify_ptes::<20, 12>)  | 
 | 97 | +    .map(|r| {  | 
 | 98 | +        let mut pte: u64 = 0;  | 
 | 99 | +        unsafe {  | 
 | 100 | +            asm!("mov {}, qword ptr [{}]", out(reg) pte, in(reg) r.entry_ptr);  | 
 | 101 | +        }  | 
 | 102 | +        pte  | 
 | 103 | +    })  | 
 | 104 | +    .collect::<alloc::vec::Vec<u64>>();  | 
 | 105 | +    if addrs.len() != 1 {  | 
 | 106 | +        panic!("impossible: 1 page map request resolved to multiple PTEs");  | 
 | 107 | +    }  | 
 | 108 | +    return addrs[0];  | 
 | 109 | +}  | 
 | 110 | + | 
 | 111 | +/// Allocate n contiguous physical pages and return the physical  | 
 | 112 | +/// addresses of the pages in question.  | 
 | 113 | +pub unsafe fn alloc_phys_pages(n: u64) -> u64 {  | 
 | 114 | +    // Currently, since all of main memory is idmap'd, we can just  | 
 | 115 | +    // allocate any appropriately aligned section of memory.  | 
 | 116 | +    unsafe {  | 
 | 117 | +        let v = alloc::alloc::alloc_zeroed(  | 
 | 118 | +            Layout::from_size_align(n as usize * OS_PAGE_SIZE as usize, OS_PAGE_SIZE as usize)  | 
 | 119 | +                .expect("could not create physical page allocation layout"),  | 
 | 120 | +        );  | 
 | 121 | +        if v.is_null() {  | 
 | 122 | +            panic!("could not allocate a physical page");  | 
 | 123 | +        }  | 
 | 124 | +        v as u64  | 
 | 125 | +    }  | 
 | 126 | +}  | 
 | 127 | + | 
 | 128 | +pub unsafe fn require_pte_exist(x: MapResponse) -> MapRequest {  | 
 | 129 | +    let mut pte: u64 = 0;  | 
 | 130 | +    unsafe {  | 
 | 131 | +        asm!("mov {}, qword ptr [{}]", out(reg) pte, in(reg) x.entry_ptr);  | 
 | 132 | +    }  | 
 | 133 | +    let present = pte & 0x1;  | 
 | 134 | +    if present == 0 {  | 
 | 135 | +        panic!("debugging: found not-present pte");  | 
 | 136 | +    }  | 
 | 137 | +    MapRequest {  | 
 | 138 | +        table_base: pte & !0xfff,  | 
 | 139 | +        vmin: x.vmin,  | 
 | 140 | +        len: x.len,  | 
 | 141 | +    }  | 
 | 142 | +}  | 
 | 143 | + | 
 | 144 | +/// Page-mapping callback to allocate a next-level page table if necessary  | 
 | 145 | +pub unsafe fn alloc_pte_if_needed(x: MapResponse) -> MapRequest {  | 
 | 146 | +    let mut pte: u64 = 0;  | 
 | 147 | +    unsafe {  | 
 | 148 | +        asm!("mov {}, qword ptr [{}]", out(reg) pte, in(reg) x.entry_ptr);  | 
 | 149 | +    }  | 
 | 150 | +    let present = pte & 0x1;  | 
 | 151 | +    if present != 0 {  | 
 | 152 | +        return MapRequest {  | 
 | 153 | +            table_base: pte & !0xfff,  | 
 | 154 | +            vmin: x.vmin,  | 
 | 155 | +            len: x.len,  | 
 | 156 | +        };  | 
 | 157 | +    }  | 
 | 158 | +    let page_addr = unsafe { alloc_phys_pages(1) };  | 
 | 159 | +    unsafe { ptov(page_addr).write_bytes(0u8, OS_PAGE_SIZE as usize) };  | 
 | 160 | +    let pte = page_addr |  | 
 | 161 | +        1 << 5 | // A   - we don't track accesses at table level  | 
 | 162 | +        0 << 4 | // PCD - leave caching enabled  | 
 | 163 | +        0 << 3 | // PWT - write-back  | 
 | 164 | +        1 << 2 | // U/S - allow user access to everything (for now)  | 
 | 165 | +        1 << 1 | // R/W - we don't use block-level permissions  | 
 | 166 | +        1 << 0; // P   - this entry is present  | 
 | 167 | +    unsafe {  | 
 | 168 | +        asm!("mov qword ptr [{}], {}", in(reg) x.entry_ptr, in(reg) pte);  | 
 | 169 | +    }  | 
 | 170 | +    MapRequest {  | 
 | 171 | +        table_base: page_addr,  | 
 | 172 | +        vmin: x.vmin,  | 
 | 173 | +        len: x.len,  | 
 | 174 | +    }  | 
 | 175 | +}  | 
 | 176 | + | 
 | 177 | +/// Map a normal memory page  | 
 | 178 | +///  | 
 | 179 | +/// TODO: support permissions; currently mapping is always RWX  | 
 | 180 | +fn map_normal(phys_base: u64, virt_base: *mut u8, r: MapResponse) {  | 
 | 181 | +    let pte = (phys_base + (r.vmin as u64 - virt_base as u64)) |  | 
 | 182 | +        1 << 6 | // D   - we don't presently track dirty state for anything  | 
 | 183 | +        1 << 5 | // A   - we don't presently track access for anything  | 
 | 184 | +        0 << 4 | // PCD - leave caching enabled  | 
 | 185 | +        0 << 3 | // PWT - write-back  | 
 | 186 | +        1 << 2 | // U/S - allow user access to everything (for now)  | 
 | 187 | +        1 << 1 | // R/W - for now make everything r/w  | 
 | 188 | +        1 << 0; // P   - this entry is present  | 
 | 189 | +    unsafe {  | 
 | 190 | +        r.entry_ptr.write_volatile(pte);  | 
 | 191 | +    }  | 
 | 192 | +}  | 
 | 193 | + | 
 | 194 | +#[inline(always)]  | 
 | 195 | +/// Utility function to extract an (inclusive on both ends) bit range  | 
 | 196 | +/// from a quadword.  | 
 | 197 | +fn bits<const high_bit: u8, const low_bit: u8>(x: u64) -> u64 {  | 
 | 198 | +    (x & ((1 << (high_bit + 1)) - 1)) >> low_bit  | 
 | 199 | +}  | 
 | 200 | + | 
 | 201 | +struct ModifyPteIterator<const high_bit: u8, const low_bit: u8> {  | 
 | 202 | +    request: MapRequest,  | 
 | 203 | +    n: u64,  | 
 | 204 | +}  | 
 | 205 | +impl<const high_bit: u8, const low_bit: u8> Iterator for ModifyPteIterator<high_bit, low_bit> {  | 
 | 206 | +    type Item = MapResponse;  | 
 | 207 | +    fn next(&mut self) -> Option<Self::Item> {  | 
 | 208 | +        if (self.n << low_bit) >= self.request.len {  | 
 | 209 | +            return None;  | 
 | 210 | +        }  | 
 | 211 | +        // next stage parameters  | 
 | 212 | +        let next_vmin = self.request.vmin.wrapping_add((self.n << low_bit) as usize);  | 
 | 213 | +        let entry_ptr = ptov(self.request.table_base)  | 
 | 214 | +            .wrapping_add((bits::<high_bit, low_bit>(next_vmin as u64) << 3) as usize)  | 
 | 215 | +            as *mut u64;  | 
 | 216 | +        let len_from_here = self.request.len - (self.n << low_bit);  | 
 | 217 | +        let next_len = core::cmp::min(len_from_here, 1 << low_bit);  | 
 | 218 | + | 
 | 219 | +        // update our state  | 
 | 220 | +        self.n += 1;  | 
 | 221 | + | 
 | 222 | +        Some(MapResponse {  | 
 | 223 | +            entry_ptr,  | 
 | 224 | +            vmin: next_vmin,  | 
 | 225 | +            len: next_len,  | 
 | 226 | +        })  | 
 | 227 | +    }  | 
 | 228 | +}  | 
 | 229 | +fn modify_ptes<const high_bit: u8, const low_bit: u8>(  | 
 | 230 | +    r: MapRequest,  | 
 | 231 | +) -> ModifyPteIterator<high_bit, low_bit> {  | 
 | 232 | +    ModifyPteIterator { request: r, n: 0 }  | 
 | 233 | +}  | 
0 commit comments