22#include <stdint.h>
33#include <stdbool.h>
44
5+ /**
6+ * @enum cache_mode
7+ * Page cache attributes for new mappings.
8+ */
59enum cache_mode {
6- cache_wb , /* normal write -back (PWT=0, PCD=0) */
7- cache_uc /* uncacheable (PWT=1, PCD=1) – safe default for MMIO */
10+ cache_wb , /**< Write -back: normal cached memory (PWT=0, PCD=0) */
11+ cache_uc /**< Uncacheable: safe default for MMIO (PWT=1, PCD=1) */
812};
913
10- typedef uint64_t (* alloc_page_cb )(void ); /* returns physical address of a zeroed 4K page */
11- typedef void (* free_page_cb )(uint64_t phys ); /* free a 4K page by physical address */
14+ typedef struct gdt_t {
15+ uint16_t limit ;
16+ uint64_t base ;
17+ } __attribute__((packed )) gdt_t ;
1218
13- #define PTE_P (1 << 0)
14- #define PTE_W (1 << 1)
15- #define PTE_U (1 << 2)
16- #define PTE_PWT (1 << 3)
17- #define PTE_PCD (1 << 4)
18- #define PTE_A (1 << 5)
19- #define PTE_D (1 << 6)
20- #define PTE_PS (1 << 7) /* For PDE (2M) / PDPTE (1G). For PTE this bit is PAT. */
21- #define PTE_G (1 << 8)
22- #define PTE_NX (1ull << 63)
19+ /**
20+ * @typedef alloc_page_cb
21+ * Callback to allocate a zeroed 4 KiB page, returning its physical address
22+ */
23+ typedef uint64_t (* alloc_page_cb )(void );
2324
24- #define PT_ENTRIES 512
25- #define PT_SHIFT 12
26- #define PD_SHIFT 21
27- #define PDP_SHIFT 30
28- #define PML4_SHIFT 39
29- #define PT_MASK 0x000ffffffffff000
25+ /**
26+ * @typedef free_page_cb
27+ * Callback to free a 4 KiB page by physical address
28+ */
29+ typedef void (* free_page_cb )(uint64_t phys );
3030
31- bool map_range (uint64_t pml4_phys , uint64_t virt , uint64_t phys , uint64_t size , bool writable , bool executable , enum cache_mode cmode , alloc_page_cb alloc_page );
31+ /* Page table entry flags */
32+ #define PTE_PRESENT (1 << 0) /**< Present */
33+ #define PTE_WRITE (1 << 1) /**< Writable */
34+ #define PTE_USER (1 << 2) /**< User accessible */
35+ #define PTE_WRITE_THROUGH (1 << 3) /**< Write-through caching */
36+ #define PTE_CACHE_DISABLED (1 << 4) /**< Cache disable */
37+ #define PTE_ACCESSED (1 << 5) /**< Accessed */
38+ #define PTE_DIRTY (1 << 6) /**< Dirty (for PTE) */
39+ #define PTE_PAGE_SIZE (1 << 7) /**< Page size: 2 MB (PD) / 1 GB (PDP). For PTE this bit is PAT */
40+ #define PTE_GLOBAL (1 << 8) /**< Global mapping */
41+ #define PTE_NO_EXECUTE (1ull << 63) /**< No-execute */
3242
33- bool map_identity (uint64_t pml4_phys , uint64_t base , uint64_t size , bool writable , bool executable , enum cache_mode cmode , alloc_page_cb alloc_page );
43+ /* Page table constants */
44+ #define PT_ENTRIES 512
45+ #define PT_SHIFT 12
46+ #define PD_SHIFT 21
47+ #define PDP_SHIFT 30
48+ #define PML4_SHIFT 39
49+ #define PT_MASK 0x000ffffffffff000
3450
35- bool unmap_range (uint64_t pml4_phys , uint64_t virt , uint64_t size );
51+ /**
52+ * Map a physical range to a specified virtual range
53+ *
54+ * @param virt Starting virtual address (4 KiB aligned)
55+ * @param phys Starting physical address (4 KiB aligned)
56+ * @param size Mapping size in bytes (multiple of 4 KiB)
57+ * @param writable If true, mapping is writable.
58+ * @param executable If true, mapping is executable; otherwise NX is set
59+ * @param cmode Cache attribute for the mapping
60+ * @param alloc_page Allocator for new page-table pages
61+ * @return true if the range was mapped successfully, false otherwise
62+ */
63+ bool map_range (uint64_t virt , uint64_t phys , uint64_t size , bool writable , bool executable , enum cache_mode cmode , alloc_page_cb alloc_page );
3664
65+ /**
66+ * Identity-map a physical range
67+ *
68+ * @param base Starting physical address (used as VA and PA)
69+ * @param size Mapping size in bytes (multiple of 4 KiB)
70+ * @param writable If true, mapping is writable
71+ * @param executable If true, mapping is executable; otherwise NX is set
72+ * @param cmode Cache attribute for the mapping
73+ * @param alloc_page Allocator for new page-table pages
74+ * @return true if the range was mapped successfully, false otherwise
75+ */
76+ bool map_identity (uint64_t base , uint64_t size , bool writable , bool executable , enum cache_mode cmode , alloc_page_cb alloc_page );
77+
78+ /**
79+ * Unmap a virtual range
80+ *
81+ * @param virt Starting virtual address (4K aligned)
82+ * @param size Range size in bytes (multiple of 4K)
83+ * @return true if the range was unmapped successfully, false otherwise
84+ */
85+ bool unmap_range (uint64_t virt , uint64_t size );
86+
87+ /**
88+ * Clone Limine’s initial page tables, switch CR3 to the new copy,
89+ * and preserve all existing mappings (identity and HHDM)
90+ * Must be called once, very early in boot, before interrupts are enabled
91+ */
3792void adopt_cloned_tables (void );
3893
39- bool mmio_identity_map (uint64_t pml4_phys , uint64_t phys , uint64_t size );
94+ /**
95+ * Identity-map a physical MMIO region with UC attributes
96+ *
97+ * @param phys Physical base address (4 KiB aligned)
98+ * @param size Mapping size in bytes (multiple of 4K)
99+ * @return true if the range was mapped successfully, false otherwise
100+ */
101+ bool mmio_identity_map (uint64_t phys , uint64_t size );
102+
103+ /**
104+ * Identity-map a physical RAM region with WB attributes
105+ *
106+ * @param phys Physical base address (4 KiB aligned)
107+ * @param size Mapping size in bytes (multiple of 4K)
108+ * @param writable If true, mapping is writable
109+ * @param executable If true, mapping is executable; otherwise NX is set
110+ * @return true if the range was mapped successfully, false otherwise
111+ */
112+ bool ram_identity_map (uint64_t phys , uint64_t size , bool writable , bool executable );
113+
114+ /**
115+ * Unmap a previously identity-mapped physical range
116+ *
117+ * @param phys Physical base address (4 KiB aligned)
118+ * @param size Mapping size in bytes (multiple of 4K)
119+ * @return true if the range was unmapped successfully, false otherwise
120+ */
121+ bool identity_unmap (uint64_t phys , uint64_t size );
40122
41- bool ram_identity_map (uint64_t pml4_phys , uint64_t phys , uint64_t size , bool writable , bool executable );
123+ /**
124+ * Adopt the already-prepared page tables on an AP
125+ * Must have run adopt_cloned_tables() on the BSP first
126+ * Panics if interrupts are enabled, safe to call multiple times
127+ */
128+ void adopt_cloned_tables_on_ap (void );
42129
43- bool identity_unmap (uint64_t pml4_phys , uint64_t phys , uint64_t size );
130+ /**
131+ * Invalidate page cache
132+ * @param va virtual address
133+ */
134+ static inline __attribute((always_inline )) void invlpg (void * va ) {
135+ __asm__ volatile ("invlpg (%0)" :: "r" (va ) : "memory" );
136+ }
0 commit comments