@@ -125,25 +125,32 @@ void z_page_frames_dump(void)
125
125
for (_pos = _base; \
126
126
_pos < ((uintptr_t)_base + _size); _pos += CONFIG_MMU_PAGE_SIZE)
127
127
128
+
128
129
/*
129
130
* Virtual address space management
130
131
*
131
132
* Call all of these functions with z_mm_lock held.
132
133
*
133
134
* Overall virtual memory map: When the kernel starts, it resides in
134
- * virtual memory in the region Z_BOOT_KERNEL_VIRT_START to
135
- * Z_BOOT_KERNEL_VIRT_END . Unused virtual memory past this, up to the limit
135
+ * virtual memory in the region Z_KERNEL_VIRT_START to
136
+ * Z_KERNEL_VIRT_END . Unused virtual memory past this, up to the limit
136
137
* noted by CONFIG_KERNEL_VM_SIZE may be used for runtime memory mappings.
137
138
*
139
+ * If CONFIG_ARCH_MAPS_ALL_RAM is set, we do not just map the kernel image,
140
+ * but have a mapping for all RAM in place. This is for special architectural
141
+ * purposes and does not otherwise affect page frame accounting or flags;
142
+ * the only guarantee is that such RAM mapping outside of the Zephyr image
143
+ * won't be disturbed by subsequent memory mapping calls.
144
+ *
138
145
* +--------------+ <- Z_VIRT_ADDR_START
139
146
* | Undefined VM | <- May contain ancillary regions like x86_64's locore
140
- * +--------------+ <- Z_BOOT_KERNEL_VIRT_START (often == Z_VIRT_ADDR_START)
147
+ * +--------------+ <- Z_KERNEL_VIRT_START (often == Z_VIRT_ADDR_START)
141
148
* | Mapping for |
142
149
* | main kernel |
143
150
* | image |
144
151
* | |
145
152
* | |
146
- * +--------------+ <- Z_BOOT_KERNEL_VIRT_END
153
+ * +--------------+ <- Z_FREE_VM_START
147
154
* | |
148
155
* | Unused, |
149
156
* | Available VM |
@@ -175,7 +182,7 @@ static void *virt_region_get(size_t size)
175
182
{
176
183
uint8_t * dest_addr ;
177
184
178
- if ((mapping_pos - size ) < Z_KERNEL_VIRT_END ) {
185
+ if ((mapping_pos - size ) < Z_FREE_VM_START ) {
179
186
LOG_ERR ("insufficient virtual address space (requested %zu)" ,
180
187
size );
181
188
return NULL ;
@@ -474,14 +481,6 @@ size_t k_mem_region_align(uintptr_t *aligned_phys, size_t *aligned_size,
474
481
return addr_offset ;
475
482
}
476
483
477
- #define VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
478
- CONFIG_SRAM_BASE_ADDRESS)
479
-
480
- /* Only applies to boot RAM mappings within the Zephyr image that have never
481
- * been remapped or paged out. Never use this unless you know exactly what you
482
- * are doing.
483
- */
484
- #define BOOT_VIRT_TO_PHYS (virt ) ((uintptr_t)(((uint8_t *)virt) + VM_OFFSET))
485
484
486
485
#ifdef CONFIG_USERSPACE
487
486
void z_kernel_map_fixup (void )
@@ -500,7 +499,7 @@ void z_kernel_map_fixup(void)
500
499
501
500
if (kobject_size != 0 ) {
502
501
arch_mem_map (kobject_page_begin ,
503
- BOOT_VIRT_TO_PHYS (kobject_page_begin ),
502
+ Z_BOOT_VIRT_TO_PHYS (kobject_page_begin ),
504
503
kobject_size , K_MEM_PERM_RW | K_MEM_CACHE_WB );
505
504
}
506
505
}
@@ -527,7 +526,7 @@ void z_mem_manage_init(void)
527
526
*/
528
527
VIRT_FOREACH (Z_KERNEL_VIRT_START , Z_KERNEL_VIRT_SIZE , addr )
529
528
{
530
- pf = z_phys_to_page_frame (BOOT_VIRT_TO_PHYS (addr ));
529
+ pf = z_phys_to_page_frame (Z_BOOT_VIRT_TO_PHYS (addr ));
531
530
frame_mapped_set (pf , addr );
532
531
533
532
/* TODO: for now we pin the whole Zephyr image. Demand paging
0 commit comments