Skip to content

Commit c7be5dd

Browse files
Andrew Boienashif
authored andcommitted
mmu: backing stores reserve page fault room
If we evict enough pages to completely fill the backing store, through APIs like k_mem_map(), z_page_frame_evict(), or z_mem_page_out(), this will produce a crash the next time we try to handle a page fault. The backing store now always reserves a free storage location for actual page faults. Signed-off-by: Andrew Boie <[email protected]>
1 parent cad944e commit c7be5dd

File tree

4 files changed

+62
-11
lines changed
  • kernel
  • subsys/demand_paging/backing_store
  • tests/kernel/mem_protect/demand_paging/src

4 files changed

+62
-11
lines changed

kernel/include/mmu.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -268,14 +268,21 @@ void z_eviction_init(void);
268268
* may simply generate location tokens purely as a function of pf->addr with no
269269
* other management necessary.
270270
*
271+
* This function distinguishes whether it was called on behalf of a page
272+
* fault. A free backing store location must always be reserved in order for
273+
* page faults to succeed. If the page_fault parameter is not set, this
274+
* function should return -ENOMEM even if one location is available.
275+
*
271276
* This function is invoked with interrupts locked.
272277
*
273278
* @param addr Virtual address to obtain a storage location
274279
* @param [out] location storage location token
280+
* @param page_fault Whether this request was for a page fault
275281
* @return 0 Success
276282
* @return -ENOMEM Backing store is full
277283
*/
278-
int z_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location);
284+
int z_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location,
285+
bool page_fault);
279286

280287
/**
281288
* Free a backing store location

kernel/mmu.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -607,7 +607,7 @@ static void page_frame_free_locked(struct z_page_frame *pf)
607607
* Returns -ENOMEM if the backing store is full
608608
*/
609609
static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
610-
bool page_in, uintptr_t *location_ptr)
610+
bool page_fault, uintptr_t *location_ptr)
611611
{
612612
uintptr_t phys;
613613
int ret;
@@ -632,12 +632,13 @@ static int page_frame_prepare_locked(struct z_page_frame *pf, bool *dirty_ptr,
632632
dirty = dirty || !z_page_frame_is_backed(pf);
633633
}
634634

635-
if (dirty || page_in) {
635+
if (dirty || page_fault) {
636636
arch_mem_scratch(phys);
637637
}
638638

639639
if (z_page_frame_is_mapped(pf)) {
640-
ret = z_backing_store_location_get(pf, location_ptr);
640+
ret = z_backing_store_location_get(pf, location_ptr,
641+
page_fault);
641642
if (ret != 0) {
642643
LOG_ERR("out of backing store memory");
643644
return -ENOMEM;

subsys/demand_paging/backing_store/ram.c

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@
5151
static char backing_store[CONFIG_MMU_PAGE_SIZE *
5252
CONFIG_BACKING_STORE_RAM_PAGES];
5353
static struct k_mem_slab backing_slabs;
54+
static unsigned int free_slabs;
5455

5556
static void *location_to_slab(uintptr_t location)
5657
{
@@ -78,17 +79,21 @@ static uintptr_t slab_to_location(void *slab)
7879
return offset;
7980
}
8081

81-
int z_backing_store_location_get(struct z_page_frame *pf,
82-
uintptr_t *location)
82+
int z_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location,
83+
bool page_fault)
8384
{
8485
int ret;
8586
void *slab;
8687

87-
ret = k_mem_slab_alloc(&backing_slabs, &slab, K_NO_WAIT);
88-
if (ret != 0) {
88+
if ((!page_fault && free_slabs == 1) || free_slabs == 0) {
8989
return -ENOMEM;
9090
}
91+
92+
ret = k_mem_slab_alloc(&backing_slabs, &slab, K_NO_WAIT);
93+
__ASSERT(ret == 0, "slab count mismatch");
94+
(void)ret;
9195
*location = slab_to_location(slab);
96+
free_slabs--;
9297

9398
return 0;
9499
}
@@ -98,6 +103,7 @@ void z_backing_store_location_free(uintptr_t location)
98103
void *slab = location_to_slab(location);
99104

100105
k_mem_slab_free(&backing_slabs, &slab);
106+
free_slabs++;
101107
}
102108

103109
void z_backing_store_page_out(uintptr_t location)
@@ -121,4 +127,5 @@ void z_backing_store_init(void)
121127
{
122128
k_mem_slab_init(&backing_slabs, backing_store, CONFIG_MMU_PAGE_SIZE,
123129
CONFIG_BACKING_STORE_RAM_PAGES);
130+
free_slabs = CONFIG_BACKING_STORE_RAM_PAGES;
124131
}

tests/kernel/mem_protect/demand_paging/src/main.c

Lines changed: 39 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
#include <mmu.h>
1010

1111
#ifdef CONFIG_BACKING_STORE_RAM_PAGES
12-
#define EXTRA_PAGES CONFIG_BACKING_STORE_RAM_PAGES
12+
#define EXTRA_PAGES (CONFIG_BACKING_STORE_RAM_PAGES - 1)
1313
#else
1414
#error "Unsupported configuration"
1515
#endif
@@ -182,6 +182,41 @@ void test_z_mem_unpin(void)
182182
test_z_mem_page_out();
183183
}
184184

185+
/* Show that even if we map enough anonymous memory to fill the backing
186+
* store, we can still handle pagefaults.
187+
* This eats up memory so should be last in the suite.
188+
*/
189+
void test_backing_store_capacity(void)
190+
{
191+
char *mem, *ret;
192+
int key;
193+
unsigned long faults;
194+
size_t size = (((CONFIG_BACKING_STORE_RAM_PAGES - 1) - HALF_PAGES) *
195+
CONFIG_MMU_PAGE_SIZE);
196+
197+
/* Consume the rest of memory */
198+
mem = k_mem_map(size, K_MEM_PERM_RW);
199+
zassert_not_null(mem, "k_mem_map failed");
200+
201+
/* Show no memory is left */
202+
ret = k_mem_map(CONFIG_MMU_PAGE_SIZE, K_MEM_PERM_RW);
203+
zassert_is_null(ret, "k_mem_map shouldn't have succeeded");
204+
205+
key = irq_lock();
206+
faults = z_num_pagefaults_get();
207+
/* Poke all anonymous memory */
208+
for (size_t i = 0; i < HALF_BYTES; i++) {
209+
arena[i] = nums[i % 10];
210+
}
211+
for (size_t i = 0; i < size; i++) {
212+
mem[i] = nums[i % 10];
213+
}
214+
faults = z_num_pagefaults_get() - faults;
215+
irq_unlock(key);
216+
217+
zassert_not_equal(faults, 0, "should have had some pagefaults");
218+
}
219+
185220
/* ztest main entry*/
186221
void test_main(void)
187222
{
@@ -191,7 +226,8 @@ void test_main(void)
191226
ztest_unit_test(test_z_mem_page_out),
192227
ztest_unit_test(test_z_mem_page_in),
193228
ztest_unit_test(test_z_mem_pin),
194-
ztest_unit_test(test_z_mem_unpin)
195-
);
229+
ztest_unit_test(test_z_mem_unpin),
230+
ztest_unit_test(test_backing_store_capacity));
231+
196232
ztest_run_test_suite(test_demand_paging);
197233
}

0 commit comments

Comments
 (0)