diff --git a/arch/x86/core/fatal.c b/arch/x86/core/fatal.c index 07bd2315b1579..9e38c6a335b82 100644 --- a/arch/x86/core/fatal.c +++ b/arch/x86/core/fatal.c @@ -13,6 +13,10 @@ #include LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL); +#ifdef CONFIG_DEMAND_PAGING +#include +#endif + #if defined(CONFIG_BOARD_QEMU_X86) || defined(CONFIG_BOARD_QEMU_X86_64) FUNC_NORETURN void arch_system_halt(unsigned int reason) { @@ -476,6 +480,20 @@ void z_x86_page_fault_handler(struct arch_esf *esf) #endif /* CONFIG_X86_KPTI */ if (was_valid_access) { /* Page fault handled, re-try */ + +#ifdef CONFIG_EVICTION_LRU + /* Currently only LRU eviction algorithm needs to be marked. + * So for now, skip it for others to avoid unnecessary + * processing. + */ + uintptr_t phys, ret; + + ret = arch_page_info_get(virt, &phys, false); + if ((ret & ARCH_DATA_PAGE_NOT_MAPPED) != ARCH_DATA_PAGE_NOT_MAPPED) { + k_mem_paging_eviction_accessed(phys); + } +#endif + return; } } diff --git a/subsys/demand_paging/eviction/Kconfig b/subsys/demand_paging/eviction/Kconfig index 2c582be201a36..7cd2983cba7a0 100644 --- a/subsys/demand_paging/eviction/Kconfig +++ b/subsys/demand_paging/eviction/Kconfig @@ -48,3 +48,16 @@ config EVICTION_NRU_PERIOD pages that are capable of being paged out. At eviction time, if a page still has the accessed property, it will be considered as recently used. endif # EVICTION_NRU + +if EVICTION_LRU + +config EVICTION_LRU_SW_SIMULATED + bool + default y if X86 + help + This is for architectures where hardware does not provide a native + mean to figure out least recently used pages. Enabling this changes + the eviction selection to clear the access flag if it is set and + requeues the page frame for later consideration. + +endif # EVICTION_LRU diff --git a/subsys/demand_paging/eviction/lru.c b/subsys/demand_paging/eviction/lru.c index 1156fa1a549ad..da4bab9eee520 100644 --- a/subsys/demand_paging/eviction/lru.c +++ b/subsys/demand_paging/eviction/lru.c @@ -160,14 +160,48 @@ void k_mem_paging_eviction_accessed(uintptr_t phys) struct k_mem_page_frame *k_mem_paging_eviction_select(bool *dirty_ptr) { - uint32_t head_pf_idx = LRU_PF_HEAD; + uint32_t pf_idx = LRU_PF_HEAD; + struct k_mem_page_frame *pf; + uintptr_t flags; - if (head_pf_idx == 0) { + if (pf_idx == 0) { return NULL; } - struct k_mem_page_frame *pf = idx_to_pf(head_pf_idx); - uintptr_t flags = arch_page_info_get(k_mem_page_frame_to_virt(pf), NULL, false); +#ifndef CONFIG_EVICTION_LRU_SW_SIMULATED + pf = idx_to_pf(pf_idx); + flags = arch_page_info_get(k_mem_page_frame_to_virt(pf), NULL, false); +#else + do { + /* + * During page selection for eviction, if a page has access bit set, + * clear the access bit and put it back at the end of queue. + * + * A subtle fail safe to avoid infinite loop is that the head of queue + * when this function is entered will have its access bit cleared. + * So when we have exhausted every page and looped back to that page, + * it will be selected for eviction. + */ + pf = idx_to_pf(pf_idx); + flags = arch_page_info_get(k_mem_page_frame_to_virt(pf), NULL, true); + + if ((flags & ARCH_DATA_PAGE_ACCESSED) != ARCH_DATA_PAGE_ACCESSED) { + /* Page has not been accessed recently, so select this. */ + break; + } + + /* + * Page has been accessed recently. + * Put the page frame back to the end of queue. + * Use unlink() here since we do not need the extra logic inside remove(). + */ + lru_pf_unlink(pf_idx); + lru_pf_append(pf_idx); + + /* Grab the new head of queue. */ + pf_idx = LRU_PF_HEAD; + } while (pf_idx != 0); +#endif /* !CONFIG_EVICTION_LRU_SW_SIMULATED */ __ASSERT(k_mem_page_frame_is_evictable(pf), ""); *dirty_ptr = ((flags & ARCH_DATA_PAGE_DIRTY) != 0);