Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
94 changes: 69 additions & 25 deletions arch/arm64/core/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,13 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);

static uint64_t xlat_tables[CONFIG_MAX_XLAT_TABLES * Ln_XLAT_NUM_ENTRIES]
__aligned(Ln_XLAT_NUM_ENTRIES * sizeof(uint64_t));
static uint16_t xlat_use_count[CONFIG_MAX_XLAT_TABLES];
static int xlat_use_count[CONFIG_MAX_XLAT_TABLES];
static struct k_spinlock xlat_lock;

/* Usage count value range */
#define XLAT_PTE_COUNT_MASK GENMASK(15, 0)
#define XLAT_REF_COUNT_UNIT BIT(16)

/* Returns a reference to a free table */
static uint64_t *new_table(void)
{
Expand All @@ -39,9 +43,9 @@ static uint64_t *new_table(void)

/* Look for a free table. */
for (i = 0U; i < CONFIG_MAX_XLAT_TABLES; i++) {
if (xlat_use_count[i] == 0U) {
if (xlat_use_count[i] == 0) {
table = &xlat_tables[i * Ln_XLAT_NUM_ENTRIES];
xlat_use_count[i] = 1U;
xlat_use_count[i] = XLAT_REF_COUNT_UNIT;
MMU_DEBUG("allocating table [%d]%p\n", i, table);
return table;
}
Expand All @@ -59,31 +63,74 @@ static inline unsigned int table_index(uint64_t *pte)
return i;
}

/* Makes a table free for reuse. */
static void free_table(uint64_t *table)
/* Adjusts usage count and returns current count. */
static int table_usage(uint64_t *table, int adjustment)
{
unsigned int i = table_index(table);
int prev_count = xlat_use_count[i];
int new_count = prev_count + adjustment;

MMU_DEBUG("freeing table [%d]%p\n", i, table);
__ASSERT(xlat_use_count[i] == 1U, "table still in use");
xlat_use_count[i] = 0U;
if (IS_ENABLED(DUMP_PTE) || new_count == 0) {
MMU_DEBUG("table [%d]%p: usage %#x -> %#x\n", i, table, prev_count, new_count);
}

__ASSERT(new_count >= 0,
"table use count underflow");
__ASSERT(new_count == 0 || new_count >= XLAT_REF_COUNT_UNIT,
"table in use with no reference to it");
__ASSERT((new_count & XLAT_PTE_COUNT_MASK) <= Ln_XLAT_NUM_ENTRIES,
"table PTE count overflow");

xlat_use_count[i] = new_count;
return new_count;
}

/* Adjusts usage count and returns current count. */
static int table_usage(uint64_t *table, int adjustment)
static inline void inc_table_ref(uint64_t *table)
{
unsigned int i = table_index(table);
table_usage(table, XLAT_REF_COUNT_UNIT);
}

xlat_use_count[i] += adjustment;
__ASSERT(xlat_use_count[i] > 0, "usage count underflow");
return xlat_use_count[i];
static inline void dec_table_ref(uint64_t *table)
{
int ref_unit = XLAT_REF_COUNT_UNIT;

table_usage(table, -ref_unit);
}

static inline bool is_table_unused(uint64_t *table)
{
return table_usage(table, 0) == 1;
return (table_usage(table, 0) & XLAT_PTE_COUNT_MASK) == 0;
}

#ifdef CONFIG_TEST
/* Hooks to let test code peek at table states */

int arm64_mmu_nb_free_tables(void)
{
int count = 0;

for (int i = 0; i < CONFIG_MAX_XLAT_TABLES; i++) {
if (xlat_use_count[i] == 0) {
count++;
}
}

return count;
}

int arm64_mmu_tables_total_usage(void)
{
int count = 0;

for (int i = 0; i < CONFIG_MAX_XLAT_TABLES; i++) {
count += xlat_use_count[i];
}

return count;
}

#endif /* CONFIG_TEST */

static inline bool is_free_desc(uint64_t desc)
{
return (desc & PTE_DESC_TYPE_MASK) == PTE_INVALID_DESC;
Expand Down Expand Up @@ -225,7 +272,6 @@ static uint64_t *expand_to_table(uint64_t *pte, unsigned int level)

/* Link the new table in place of the pte it replaces */
set_pte_table_desc(pte, table, level);
table_usage(table, 1);

return table;
}
Expand Down Expand Up @@ -300,7 +346,7 @@ static int set_mapping(struct arm_mmu_ptables *ptables,
/* recursively free unused tables if any */
while (level != BASE_XLAT_LEVEL &&
is_table_unused(pte)) {
free_table(pte);
dec_table_ref(pte);
pte = ptes[--level];
set_pte_block_desc(pte, 0, level);
table_usage(pte, -1);
Expand Down Expand Up @@ -347,8 +393,8 @@ static uint64_t *dup_table(uint64_t *src_table, unsigned int level)
}

dst_table[i] = src_table[i];
if (is_table_desc(src_table[i], level)) {
table_usage(pte_desc_table(src_table[i]), 1);
if (is_table_desc(dst_table[i], level)) {
inc_table_ref(pte_desc_table(dst_table[i]));
}
if (!is_free_desc(dst_table[i])) {
table_usage(dst_table, 1);
Expand Down Expand Up @@ -388,8 +434,7 @@ static int privatize_table(uint64_t *dst_table, uint64_t *src_table,
return -ENOMEM;
}
set_pte_table_desc(&dst_table[i], dst_subtable, level);
table_usage(dst_subtable, 1);
table_usage(src_subtable, -1);
dec_table_ref(src_subtable);
}

ret = privatize_table(dst_subtable, src_subtable,
Expand Down Expand Up @@ -436,15 +481,14 @@ static void discard_table(uint64_t *table, unsigned int level)

for (i = 0U; i < Ln_XLAT_NUM_ENTRIES; i++) {
if (is_table_desc(table[i], level)) {
table_usage(pte_desc_table(table[i]), -1);
discard_table(pte_desc_table(table[i]), level + 1);
dec_table_ref(pte_desc_table(table[i]));
}
if (!is_free_desc(table[i])) {
table[i] = 0U;
table_usage(table, -1);
}
}
free_table(table);
}

static int globalize_table(uint64_t *dst_table, uint64_t *src_table,
Expand Down Expand Up @@ -497,15 +541,15 @@ static int globalize_table(uint64_t *dst_table, uint64_t *src_table,
table_usage(dst_table, -1);
}
if (is_table_desc(src_table[i], level)) {
table_usage(pte_desc_table(src_table[i]), 1);
inc_table_ref(pte_desc_table(src_table[i]));
}
dst_table[i] = src_table[i];
debug_show_pte(&dst_table[i], level);

if (old_table) {
/* we can discard the whole branch */
table_usage(old_table, -1);
discard_table(old_table, level + 1);
dec_table_ref(old_table);
}
}

Expand Down
13 changes: 13 additions & 0 deletions tests/arch/arm64/arm64_mmu/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# SPDX-License-Identifier: Apache-2.0

cmake_minimum_required(VERSION 3.20.0)

find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(arm64_mmu)

target_include_directories(app PRIVATE
${ZEPHYR_BASE}/kernel/include
${ZEPHYR_BASE}/arch/${ARCH}/include
)

target_sources(app PRIVATE src/main.c)
9 changes: 9 additions & 0 deletions tests/arch/arm64/arm64_mmu/prj.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
CONFIG_ASSERT=y
CONFIG_LOG=y
CONFIG_LOG_MODE_MINIMAL=y
CONFIG_ZTEST=y
CONFIG_ARM_MMU=y
CONFIG_MMU_PAGE_SIZE=0x1000
CONFIG_ARM64_VA_BITS_36=y
CONFIG_ARM64_PA_BITS_36=y
CONFIG_MAX_XLAT_TABLES=8
154 changes: 154 additions & 0 deletions tests/arch/arm64/arm64_mmu/src/main.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,154 @@
/*
* Copyright (c) 2024 BayLibre, SAS
*
* SPDX-License-Identifier: Apache-2.0
*/

#include <zephyr/kernel.h>
#include <zephyr/ztest.h>
#include <kernel_arch_interface.h>

/*
* Virtual and physical addresses used to exercize MMU page table recycling.
* Those are completely arbitrary addresses away from any existing addresses
* (no worry, the test will fail otherwise). Those addresses don't have
* to be valid as we won't attempt any access to the mapped memory.
*/
#define TEST_VIRT_ADDR 0x456560000
#define TEST_PHYS_ADDR 0x123230000

/* special test hooks in arch/arm64/core/mmu.c for test purpose */
extern int arm64_mmu_nb_free_tables(void);
extern int arm64_mmu_tables_total_usage(void);

/* initial states to compare against */
static int initial_nb_free_tables;
static int initial_tables_usage;

static void *arm64_mmu_test_init(void)
{
/* get initial states */
initial_nb_free_tables = arm64_mmu_nb_free_tables();
initial_tables_usage = arm64_mmu_tables_total_usage();

TC_PRINT(" Total page tables: %d\n", CONFIG_MAX_XLAT_TABLES);
TC_PRINT(" Initial free tables: %d\n", initial_nb_free_tables);
TC_PRINT(" Initial total table usage: %#x\n", initial_tables_usage);

zassert_true(initial_nb_free_tables > 1,
"initial_nb_free_tables = %d", initial_nb_free_tables);
zassert_true(initial_tables_usage > 1,
"initial_tables_usage = %d", initial_tables_usage);

return NULL;
}

static int mem_map_test(uintptr_t virt_addr, uintptr_t phys_addr, size_t size)
{
/*
* This is not defined to return any error but the implementation
* will call k_panic() if an error occurs.
*/
arch_mem_map((void *)virt_addr, phys_addr, size, K_MEM_ARM_NORMAL_NC);

int mapped_nb_free_tables = arm64_mmu_nb_free_tables();
int mapped_tables_usage = arm64_mmu_tables_total_usage();

TC_PRINT(" After arch_mem_map:\n");
TC_PRINT(" current free tables: %d\n", mapped_nb_free_tables);
TC_PRINT(" current total table usage: %#x\n", mapped_tables_usage);

zassert_true(mapped_nb_free_tables < initial_nb_free_tables,
"%d vs %d", mapped_nb_free_tables, initial_nb_free_tables);
zassert_true(mapped_tables_usage > initial_tables_usage,
"%#x vs %#x", mapped_tables_usage > initial_tables_usage);

arch_mem_unmap((void *)virt_addr, size);

int unmapped_nb_free_tables = arm64_mmu_nb_free_tables();
int unmapped_tables_usage = arm64_mmu_tables_total_usage();

TC_PRINT(" After arch_mem_unmap:\n");
TC_PRINT(" current free tables: %d\n", unmapped_nb_free_tables);
TC_PRINT(" current total table usage: %#x\n", unmapped_tables_usage);

zassert_true(unmapped_nb_free_tables == initial_nb_free_tables,
"%d vs %d", unmapped_nb_free_tables, initial_nb_free_tables);
zassert_true(unmapped_tables_usage == initial_tables_usage,
"%#x vs %#x", unmapped_tables_usage > initial_tables_usage);

int tables_used = unmapped_nb_free_tables - mapped_nb_free_tables;
return tables_used;
}

ZTEST(arm64_mmu, test_arm64_mmu_01_single_page)
{
/*
* Let's map a single page to start with. This will allocate
* multiple tables to reach the deepest level.
*/
uintptr_t virt = TEST_VIRT_ADDR;
uintptr_t phys = TEST_PHYS_ADDR;
size_t size = CONFIG_MMU_PAGE_SIZE;

int tables_used = mem_map_test(virt, phys, size);

zassert_true(tables_used == 2, "used %d tables", tables_used);
}

ZTEST(arm64_mmu, test_arm64_mmu_02_single_block)
{
/*
* Same thing as above, except that we expect a block mapping
* this time. Both addresses and the size must be properly aligned.
* Table allocation won't go as deep as for a page.
*/
int table_entries = CONFIG_MMU_PAGE_SIZE / sizeof(uint64_t);
size_t block_size = table_entries * CONFIG_MMU_PAGE_SIZE;
uintptr_t virt = TEST_VIRT_ADDR & ~(block_size - 1);
uintptr_t phys = TEST_PHYS_ADDR & ~(block_size - 1);

int tables_used = mem_map_test(virt, phys, block_size);

zassert_true(tables_used == 1, "used %d tables", tables_used);
}

ZTEST(arm64_mmu, test_arm64_mmu_03_block_and_page)
{
/*
* Same thing as above, except that we expect a block mapping
* followed by a page mapping to exercize range splitting.
* To achieve that we simply increase the size by one page and keep
* starting addresses aligned to a block.
*/
int table_entries = CONFIG_MMU_PAGE_SIZE / sizeof(uint64_t);
size_t block_size = table_entries * CONFIG_MMU_PAGE_SIZE;
uintptr_t virt = TEST_VIRT_ADDR & ~(block_size - 1);
uintptr_t phys = TEST_PHYS_ADDR & ~(block_size - 1);
size_t size = block_size + CONFIG_MMU_PAGE_SIZE;

int tables_used = mem_map_test(virt, phys, size);

zassert_true(tables_used == 2, "used %d tables", tables_used);
}

ZTEST(arm64_mmu, test_arm64_mmu_04_page_and_block)
{
/*
* Same thing as above, except that we expect a page mapping
* followed by a block mapping to exercize range splitting.
* To achieve that we increase the size by one page and decrease
* starting addresses by one page below block alignment.
*/
int table_entries = CONFIG_MMU_PAGE_SIZE / sizeof(uint64_t);
size_t block_size = table_entries * CONFIG_MMU_PAGE_SIZE;
uintptr_t virt = (TEST_VIRT_ADDR & ~(block_size - 1)) - CONFIG_MMU_PAGE_SIZE;
uintptr_t phys = (TEST_PHYS_ADDR & ~(block_size - 1)) - CONFIG_MMU_PAGE_SIZE;
size_t size = block_size + CONFIG_MMU_PAGE_SIZE;

int tables_used = mem_map_test(virt, phys, size);

zassert_true(tables_used == 2, "used %d tables", tables_used);
}

ZTEST_SUITE(arm64_mmu, NULL, arm64_mmu_test_init, NULL, NULL, NULL);
3 changes: 3 additions & 0 deletions tests/arch/arm64/arm64_mmu/testcase.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
tests:
arch.arm64.mmu:
platform_allow: qemu_cortex_a53