Skip to content

Commit 14c5d1f

Browse files
Andrew Boienashif
authored andcommitted
kernel: add CONFIG_ARCH_MAPS_ALL_RAM
Some arches like x86 need all memory mapped so that they can fetch information placed arbitrarily by firmware, like ACPI tables. Ensure that if this is the case, the kernel won't accidentally clobber it by thinking the relevant virtual memory is unused. Otherwise this has no effect on page frame management. Signed-off-by: Andrew Boie <[email protected]>
1 parent 6c97ab3 commit 14c5d1f

File tree

5 files changed

+58
-16
lines changed

5 files changed

+58
-16
lines changed

arch/Kconfig

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ config X86
6262
select ARCH_HAS_TIMING_FUNCTIONS
6363
select ARCH_HAS_THREAD_LOCAL_STORAGE
6464
select ARCH_HAS_DEMAND_PAGING
65+
select ARCH_MAPS_ALL_RAM
6566
help
6667
x86 architecture
6768

@@ -532,6 +533,26 @@ config ARCH_HAS_RESERVED_PAGE_FRAMES
532533
memory mappings. The architecture will need to implement
533534
arch_reserved_pages_update().
534535

536+
config ARCH_MAPS_ALL_RAM
537+
bool
538+
help
539+
This hidden option is selected by the architecture to inform the kernel
540+
that all RAM is mapped at boot, and not just the bounds of the Zephyr image.
541+
If RAM starts at 0x0, the first page must remain un-mapped to catch NULL
542+
pointer dereferences. With this enabled, the kernel will not assume that
543+
virtual memory addresses past the kernel image are available for mappings,
544+
but instead takes into account an entire RAM mapping instead.
545+
546+
This is typically set by architectures which need direct access to all memory.
547+
It is the architecture's responsibility to mark reserved memory regions
548+
as such in arch_reserved_pages_update().
549+
550+
Although the kernel will not disturb this RAM mapping by re-mapping the associated
551+
virtual addresses elsewhere, this is limited to only management of the
552+
virtual address space. The kernel's page frame ontology will not consider
553+
this mapping at all; non-kernel pages will be considered free (unless marked
554+
as reserved) and Z_PAGE_FRAME_MAPPED will not be set.
555+
535556
menuconfig MMU
536557
bool "Enable MMU features"
537558
depends on CPU_HAS_MMU

kernel/include/mmu.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,22 @@
4040
#define Z_KERNEL_VIRT_END ((uint8_t *)(&z_mapped_end))
4141
#define Z_KERNEL_VIRT_SIZE ((size_t)(&z_mapped_size))
4242

43+
#define Z_VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
44+
CONFIG_SRAM_BASE_ADDRESS)
45+
46+
/* Only applies to boot RAM mappings within the Zephyr image that have never
47+
* been remapped or paged out. Never use this unless you know exactly what you
48+
* are doing.
49+
*/
50+
#define Z_BOOT_VIRT_TO_PHYS(virt) ((uintptr_t)(((uint8_t *)virt) + Z_VM_OFFSET))
51+
#define Z_BOOT_PHYS_TO_VIRT(phys) ((uint8_t *)(((uintptr_t)phys) - Z_VM_OFFSET))
52+
53+
#ifdef CONFIG_ARCH_MAPS_ALL_RAM
54+
#define Z_FREE_VM_START Z_BOOT_PHYS_TO_VIRT(Z_PHYS_RAM_END)
55+
#else
56+
#define Z_FREE_VM_START Z_KERNEL_VIRT_END
57+
#endif
58+
4359
/*
4460
* Macros and data structures for physical page frame accounting,
4561
* APIs for use by eviction and backing store algorithms. This code

kernel/mmu.c

Lines changed: 14 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -125,25 +125,32 @@ void z_page_frames_dump(void)
125125
for (_pos = _base; \
126126
_pos < ((uintptr_t)_base + _size); _pos += CONFIG_MMU_PAGE_SIZE)
127127

128+
128129
/*
129130
* Virtual address space management
130131
*
131132
* Call all of these functions with z_mm_lock held.
132133
*
133134
* Overall virtual memory map: When the kernel starts, it resides in
134-
* virtual memory in the region Z_BOOT_KERNEL_VIRT_START to
135-
* Z_BOOT_KERNEL_VIRT_END. Unused virtual memory past this, up to the limit
135+
* virtual memory in the region Z_KERNEL_VIRT_START to
136+
* Z_KERNEL_VIRT_END. Unused virtual memory past this, up to the limit
136137
* noted by CONFIG_KERNEL_VM_SIZE may be used for runtime memory mappings.
137138
*
139+
* If CONFIG_ARCH_MAPS_ALL_RAM is set, we do not just map the kernel image,
140+
* but have a mapping for all RAM in place. This is for special architectural
141+
* purposes and does not otherwise affect page frame accounting or flags;
142+
* the only guarantee is that such RAM mapping outside of the Zephyr image
143+
* won't be disturbed by subsequent memory mapping calls.
144+
*
138145
* +--------------+ <- Z_VIRT_ADDR_START
139146
* | Undefined VM | <- May contain ancillary regions like x86_64's locore
140-
* +--------------+ <- Z_BOOT_KERNEL_VIRT_START (often == Z_VIRT_ADDR_START)
147+
* +--------------+ <- Z_KERNEL_VIRT_START (often == Z_VIRT_ADDR_START)
141148
* | Mapping for |
142149
* | main kernel |
143150
* | image |
144151
* | |
145152
* | |
146-
* +--------------+ <- Z_BOOT_KERNEL_VIRT_END
153+
* +--------------+ <- Z_FREE_VM_START
147154
* | |
148155
* | Unused, |
149156
* | Available VM |
@@ -175,7 +182,7 @@ static void *virt_region_get(size_t size)
175182
{
176183
uint8_t *dest_addr;
177184

178-
if ((mapping_pos - size) < Z_KERNEL_VIRT_END) {
185+
if ((mapping_pos - size) < Z_FREE_VM_START) {
179186
LOG_ERR("insufficient virtual address space (requested %zu)",
180187
size);
181188
return NULL;
@@ -474,14 +481,6 @@ size_t k_mem_region_align(uintptr_t *aligned_phys, size_t *aligned_size,
474481
return addr_offset;
475482
}
476483

477-
#define VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
478-
CONFIG_SRAM_BASE_ADDRESS)
479-
480-
/* Only applies to boot RAM mappings within the Zephyr image that have never
481-
* been remapped or paged out. Never use this unless you know exactly what you
482-
* are doing.
483-
*/
484-
#define BOOT_VIRT_TO_PHYS(virt) ((uintptr_t)(((uint8_t *)virt) + VM_OFFSET))
485484

486485
#ifdef CONFIG_USERSPACE
487486
void z_kernel_map_fixup(void)
@@ -500,7 +499,7 @@ void z_kernel_map_fixup(void)
500499

501500
if (kobject_size != 0) {
502501
arch_mem_map(kobject_page_begin,
503-
BOOT_VIRT_TO_PHYS(kobject_page_begin),
502+
Z_BOOT_VIRT_TO_PHYS(kobject_page_begin),
504503
kobject_size, K_MEM_PERM_RW | K_MEM_CACHE_WB);
505504
}
506505
}
@@ -527,7 +526,7 @@ void z_mem_manage_init(void)
527526
*/
528527
VIRT_FOREACH(Z_KERNEL_VIRT_START, Z_KERNEL_VIRT_SIZE, addr)
529528
{
530-
pf = z_phys_to_page_frame(BOOT_VIRT_TO_PHYS(addr));
529+
pf = z_phys_to_page_frame(Z_BOOT_VIRT_TO_PHYS(addr));
531530
frame_mapped_set(pf, addr);
532531

533532
/* TODO: for now we pin the whole Zephyr image. Demand paging

tests/kernel/mem_protect/syscalls/CMakeLists.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,5 +4,10 @@ cmake_minimum_required(VERSION 3.13.1)
44
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
55
project(syscalls)
66

7+
target_include_directories(app PRIVATE
8+
${ZEPHYR_BASE}/kernel/include
9+
${ZEPHYR_BASE}/arch/${ARCH}/include
10+
)
11+
712
FILE(GLOB app_sources src/*.c)
813
target_sources(app PRIVATE ${app_sources})

tests/kernel/mem_protect/syscalls/src/main.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
#include <ztest.h>
1010
#include <linker/linker-defs.h>
1111
#include "test_syscalls.h"
12+
#include <mmu.h>
1213

1314
#define BUF_SIZE 32
1415
#define SLEEP_MS_LONG 15000
@@ -18,7 +19,7 @@
1819
#define FAULTY_ADDRESS 0x0FFFFFFF
1920
#elif CONFIG_MMU
2021
/* Just past the zephyr image mapping should be a non-present page */
21-
#define FAULTY_ADDRESS ((uint8_t *)(&z_mapped_end))
22+
#define FAULTY_ADDRESS Z_FREE_VM_START
2223
#else
2324
#define FAULTY_ADDRESS 0xFFFFFFF0
2425
#endif

0 commit comments

Comments
 (0)