@@ -56,8 +56,72 @@ inline bool is_flagged_page(uint64_t flags, uint64_t entry) {
5656 return (entry & flags) == flags;
5757}
5858
59+ static void add_remappings (vMemory& memory,
60+ const VirtualRemapping& remapping,
61+ uint64_t * pml4,
62+ uint64_t flags,
63+ uint64_t & free_page)
64+ {
65+ if (remapping.virt <= free_page)
66+ throw MachineException (" Invalid remapping address" , remapping.virt );
67+ if (remapping.size % vMemory::PageSize () != 0 )
68+ throw MachineException (" Invalid remapping size" , remapping.size );
69+ const auto virt_tera_page = (remapping.virt >> 39UL ) & 511 ;
70+ const auto virt_giga_page = (remapping.virt >> 30UL ) & 511 ;
71+
72+ uint64_t paddr_base = remapping.phys ;
73+ if (paddr_base == 0x0 ) {
74+ constexpr auto PD_ALIGN_MASK = (1ULL << 21U ) - 1 ;
75+ // Over-allocate rounding up to nearest 2MB
76+ paddr_base = memory.machine .mmap_allocate (remapping.size + PD_ALIGN_MASK);
77+ paddr_base = (paddr_base + PD_ALIGN_MASK) & ~PD_ALIGN_MASK;
78+ // Relax allocation down to size
79+ memory.machine .mmap () = paddr_base + remapping.size ;
80+ }
81+
82+ if (pml4[virt_tera_page] == 0 ) {
83+ const auto pdpt_addr = free_page;
84+ free_page += 0x1000 ;
85+
86+ pml4[virt_tera_page] = PDE64_PRESENT | PDE64_USER | PDE64_RW | pdpt_addr;
87+ }
88+
89+ auto pdpt_addr = pml4[virt_tera_page] & PDE64_ADDR_MASK;
90+ auto * pdpt = memory.page_at (pdpt_addr);
91+
92+ // Allocate the gigapage with 512x 2MB entries
93+ if (pdpt[virt_giga_page] == 0 ) {
94+ const auto giga_page = free_page;
95+ free_page += 0x1000 ;
96+ pdpt[virt_giga_page] = PDE64_PRESENT | PDE64_USER | PDE64_RW | giga_page;
97+ }
98+
99+ auto pd_addr = pdpt[virt_giga_page] & PDE64_ADDR_MASK;
100+ auto * pd = memory.page_at (pd_addr);
101+
102+ // Create 2MB entries for remapping size
103+ const auto n_2mb_pages = (remapping.size >> 21UL ) & 511 ;
104+ for (uint64_t i = 0 ; i < 512 ; i++)
105+ {
106+ const auto paddr = paddr_base + (i << 21UL );
107+ if (i < n_2mb_pages)
108+ pd[i] = PDE64_PRESENT | flags | PDE64_PS | paddr;
109+ else
110+ pd[i] = 0 ;
111+ }
112+
113+ // Track the first seen executable mapping, allowing mmap to use it for
114+ // JIT segments.
115+ if (remapping.executable && memory.vmem_exec_begin == 0 )
116+ {
117+ memory.vmem_exec_begin = remapping.virt ;
118+ memory.vmem_exec_end = remapping.virt + remapping.size ;
119+ }
120+ }
121+
59122uint64_t setup_amd64_paging (vMemory& memory,
60- std::string_view binary, const std::vector<VirtualRemapping>& remappings)
123+ std::string_view binary,
124+ const std::vector<VirtualRemapping>& remappings)
61125{
62126 static constexpr uint64_t PD_MASK = (1ULL << 30 ) - 1 ;
63127 const size_t PD_PAGES = (memory.size + PD_MASK) >> 30 ;
@@ -136,10 +200,16 @@ uint64_t setup_amd64_paging(vMemory& memory,
136200 }
137201
138202 // Covers 1GB pages with 512x 2MB user-read-write entries
203+ // NOTE: Even with executable heap, the ELF loader will still correctly
204+ // apply the NX-bit to its own segments.
205+ uint64_t heap_flags = PDE64_USER | PDE64_RW;
206+ if (!memory.executable_heap )
207+ heap_flags |= PDE64_NX;
139208 for (uint64_t i = base_2mb_page+2 ; i < 512 *PD_PAGES; i++) {
140- pd[i] = PDE64_PRESENT | PDE64_PS | PDE64_USER | PDE64_RW | PDE64_NX
209+ pd[i] = PDE64_PRESENT | PDE64_PS | heap_flags
141210 | ((base_giga_page << 30 ) + (i << 21 ));
142211 }
212+ printf (" Heap is executable: %d\n " , memory.executable_heap );
143213
144214 /* ELF executable area */
145215 if (!binary.empty ())
@@ -234,53 +304,10 @@ uint64_t setup_amd64_paging(vMemory& memory,
234304 /* Virtual memory remappings (up to 1GB each, for now) */
235305 for (const auto & vmem : remappings)
236306 {
237- if (vmem.virt <= free_page)
238- throw MachineException (" Invalid remapping address" , vmem.virt );
239- if (vmem.size % vMemory::PageSize () != 0 )
240- throw MachineException (" Invalid remapping size" , vmem.size );
241- const auto virt_tera_page = (vmem.virt >> 39UL ) & 511 ;
242- const auto virt_giga_page = (vmem.virt >> 30UL ) & 511 ;
243-
244- uint64_t paddr_base = vmem.phys ;
245- if (paddr_base == 0x0 ) {
246- constexpr auto PD_ALIGN_MASK = (1ULL << 21U ) - 1 ;
247- // Over-allocate rounding up to nearest 2MB
248- paddr_base = memory.machine .mmap_allocate (vmem.size + PD_ALIGN_MASK);
249- paddr_base = (paddr_base + PD_ALIGN_MASK) & ~PD_ALIGN_MASK;
250- // Relax allocation down to size
251- memory.machine .mmap () = paddr_base + vmem.size ;
252- }
253-
254- if (pml4[virt_tera_page] == 0 ) {
255- const auto pdpt_addr = free_page;
256- free_page += 0x1000 ;
257-
258- pml4[virt_tera_page] = PDE64_PRESENT | PDE64_USER | PDE64_RW | pdpt_addr;
259- }
260-
261- auto pdpt_addr = pml4[virt_tera_page] & PDE64_ADDR_MASK;
262- auto * pdpt = memory.page_at (pdpt_addr);
263-
264- // Allocate the gigapage with 512x 2MB entries
265- if (pdpt[virt_giga_page] == 0 ) {
266- const auto giga_page = free_page;
267- free_page += 0x1000 ;
268- pdpt[virt_giga_page] = PDE64_PRESENT | PDE64_USER | PDE64_RW | giga_page;
269- }
270-
271- auto pd_addr = pdpt[virt_giga_page] & PDE64_ADDR_MASK;
272- auto * pd = memory.page_at (pd_addr);
273-
274- // Create 2MB entries for remapping size
275- const auto n_2mb_pages = (vmem.size >> 21UL ) & 511 ;
276- for (uint64_t i = 0 ; i < 512 ; i++)
277- {
278- const auto paddr = paddr_base + (i << 21UL );
279- if (i < n_2mb_pages)
280- pd[i] = PDE64_PRESENT | PDE64_USER | PDE64_RW | PDE64_NX | PDE64_PS | paddr;
281- else
282- pd[i] = 0 ;
283- }
307+ uint64_t flags = PDE64_USER | PDE64_NX;
308+ if (vmem.writable ) flags |= PDE64_RW;
309+ if (vmem.executable ) flags &= ~PDE64_NX;
310+ add_remappings (memory, vmem, pml4, flags, free_page);
284311 }
285312
286313 // vDSO / vsyscall
0 commit comments