Skip to content

Commit e0f1bcb

Browse files
committed
riscv: pmp: Configure entries from Device Tree via memattr
This commit enables the custom PMP entry feature to source region information from the Device Tree using the Zephyr `memattr` API, instead of direct Kconfig definitions. When `CONFIG_CUSTOM_PMP_ENTRY` is enabled, the system now iterates through memory regions tagged with `zephyr,memattr` in the Device Tree during the PMP initialization in `z_riscv_pmp_init()`. For each region found, a corresponding PMP entry is set up. This approach allows for more flexible and hardware-specific memory protection schemes defined within the Device Tree. The region's base address, size, and access permissions (R/W/X) are now read from the Device Tree node attributes. This feature is designed for applications requiring runtime control of memory permissions for critical areas, such as firmware rollback segments or sensitive configuration data. This control is established early in the boot process. Signed-off-by: Firas Sammoura <[email protected]>
1 parent 16f4d6c commit e0f1bcb

File tree

4 files changed

+55
-1
lines changed

4 files changed

+55
-1
lines changed

arch/riscv/Kconfig

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -432,6 +432,14 @@ config PMP_STACK_GUARD_MIN_SIZE
432432
wiggle room to accommodate the eventual overflow exception
433433
stack usage.
434434

435+
config CUSTOM_PMP_ENTRY
436+
bool "Use PMP for custom protection region"
437+
depends on RISCV_PMP && MEM_ATTR
438+
help
439+
Enable custom Physical Memory Protection (PMP) entries to protect
440+
user-defined memory regions. This is typically used for critical
441+
data or firmware rollback protection.
442+
435443
# Implement the null pointer detection using the Physical Memory Protection
436444
# (PMP) Unit.
437445
config NULL_POINTER_EXCEPTION_DETECTION_PMP

arch/riscv/core/pmp.c

Lines changed: 40 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
#include <pmp.h>
3232
#include <zephyr/arch/arch_interface.h>
3333
#include <zephyr/arch/riscv/csr.h>
34+
#include <zephyr/mem_mgmt/mem_attr.h>
3435

3536
#define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
3637
#include <zephyr/logging/log.h>
@@ -204,7 +205,7 @@ static bool set_pmp_entry(unsigned int *index_p, uint8_t perm,
204205
return ok;
205206
}
206207

207-
#ifdef CONFIG_PMP_STACK_GUARD
208+
#if defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_CUSTOM_PMP_ENTRY)
208209
static inline bool set_pmp_mprv_catchall(unsigned int *index_p,
209210
unsigned long *pmp_addr, unsigned long *pmp_cfg,
210211
unsigned int index_limit)
@@ -354,6 +355,19 @@ void z_riscv_pmp_init(void)
354355
unsigned long pmp_cfg[CONFIG_PMP_SLOTS / PMPCFG_STRIDE];
355356
unsigned int index = 0;
356357

358+
#ifdef CONFIG_CUSTOM_PMP_ENTRY
359+
const struct mem_attr_region_t *region;
360+
size_t num_regions;
361+
362+
num_regions = mem_attr_get_regions(&region);
363+
364+
for (size_t idx = 0; idx < num_regions; ++idx) {
365+
set_pmp_entry(&index, region[idx].dt_attr, (uintptr_t)(region[idx].dt_addr),
366+
(size_t)(region[idx].dt_size), pmp_addr, pmp_cfg,
367+
ARRAY_SIZE(pmp_addr));
368+
}
369+
#endif
370+
357371
/* The read-only area is always there for every mode */
358372
set_pmp_entry(&index, PMP_R | PMP_X | PMP_L,
359373
(uintptr_t)__rom_region_start,
@@ -371,6 +385,17 @@ void z_riscv_pmp_init(void)
371385
pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
372386
#endif
373387

388+
#ifdef CONFIG_CUSTOM_PMP_ENTRY
389+
#ifndef CONFIG_PMP_STACK_GUARD
390+
/*
391+
* This early, the kernel init code uses the custom entry and we want to
392+
* safeguard it as soon as possible. But we need a temporary default
393+
* "catch all" PMP entry for MPRV to work.
394+
*/
395+
set_pmp_mprv_catchall(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
396+
#endif
397+
#endif
398+
374399
#ifdef CONFIG_PMP_STACK_GUARD
375400
#ifdef CONFIG_MULTITHREADING
376401
/*
@@ -446,6 +471,20 @@ void z_riscv_pmp_init(void)
446471
}
447472
}
448473

474+
#if defined(CONFIG_CUSTOM_PMP_ENTRY)
475+
/**
476+
* @brief Prepare M-mode for custom PMP entry handling.
477+
*
478+
* Configures the Machine Status Register (mstatus) by clearing MPP and setting MPRV to control
479+
* the memory privilege context for PMP access or configuration.
480+
*/
481+
void z_riscv_custom_pmp_entry_enable(void)
482+
{
483+
csr_clear(mstatus, MSTATUS_MPRV | MSTATUS_MPP);
484+
csr_set(mstatus, MSTATUS_MPRV);
485+
}
486+
#endif
487+
449488
/**
450489
* @Brief Initialize the per-thread PMP register copy with global values.
451490
*/

arch/riscv/core/switch.S

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,12 @@ SECTION_FUNC(TEXT, z_riscv_switch)
6161
mv a0, s0
6262
#endif
6363

64+
#if defined(CONFIG_CUSTOM_PMP_ENTRY) && !defined(CONFIG_PMP_STACK_GUARD)
65+
mv s0, a0
66+
call z_riscv_custom_pmp_entry_enable
67+
mv a0, s0
68+
#endif
69+
6470
#if defined(CONFIG_PMP_STACK_GUARD)
6571
/* Stack guard has priority over user space for PMP usage. */
6672
mv s0, a0

arch/riscv/include/pmp.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,5 +14,6 @@ void z_riscv_pmp_stackguard_disable(void);
1414
void z_riscv_pmp_usermode_init(struct k_thread *thread);
1515
void z_riscv_pmp_usermode_prepare(struct k_thread *thread);
1616
void z_riscv_pmp_usermode_enable(struct k_thread *thread);
17+
void z_riscv_custom_pmp_entry_enable(void);
1718

1819
#endif /* PMP_H_ */

0 commit comments

Comments
 (0)