Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions arch/riscv/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,15 @@ config PMP_STACK_GUARD_MIN_SIZE
wiggle room to accommodate the eventual overflow exception
stack usage.

config CUSTOM_PMP_ENTRY
bool "Use PMP for custom protection region"
depends on RISCV_PMP && MEM_ATTR
help
Enable custom Physical Memory Protection (PMP) entries to protect
user-defined memory regions. These regions are defined in the Device
Tree using the "zephyr,memattr" property. This allows for hardware-
specific protection of critical data or firmware rollback segments.

# Implement the null pointer detection using the Physical Memory Protection
# (PMP) Unit.
config NULL_POINTER_EXCEPTION_DETECTION_PMP
Expand Down
158 changes: 157 additions & 1 deletion arch/riscv/core/pmp.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@
#include <pmp.h>
#include <zephyr/arch/arch_interface.h>
#include <zephyr/arch/riscv/csr.h>
#include <zephyr/mem_mgmt/mem_attr.h>

#include <errno.h>
#include <stdint.h>

#define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
#include <zephyr/logging/log.h>
Expand All @@ -56,6 +60,8 @@ LOG_MODULE_REGISTER(mpu);

#define PMP_NONE 0

#define PMP_CFG_W_BIT 1 /* Write permission bit in the PMP config byte */

static void print_pmp_entries(unsigned int pmp_start, unsigned int pmp_end,
unsigned long *pmp_addr, unsigned long *pmp_cfg,
const char *banner)
Expand Down Expand Up @@ -204,7 +210,7 @@ static bool set_pmp_entry(unsigned int *index_p, uint8_t perm,
return ok;
}

#ifdef CONFIG_PMP_STACK_GUARD
#if defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_CUSTOM_PMP_ENTRY)
static inline bool set_pmp_mprv_catchall(unsigned int *index_p,
unsigned long *pmp_addr, unsigned long *pmp_cfg,
unsigned int index_limit)
Expand Down Expand Up @@ -354,12 +360,29 @@ void z_riscv_pmp_init(void)
unsigned long pmp_cfg[CONFIG_PMP_SLOTS / PMPCFG_STRIDE];
unsigned int index = 0;

#ifdef CONFIG_CUSTOM_PMP_ENTRY
const struct mem_attr_region_t *region;
size_t num_regions;

num_regions = mem_attr_get_regions(&region);

for (size_t idx = 0; idx < num_regions; ++idx) {
set_pmp_entry(&index, region[idx].dt_attr, (uintptr_t)(region[idx].dt_addr),
(size_t)(region[idx].dt_size), pmp_addr, pmp_cfg,
ARRAY_SIZE(pmp_addr));
}
#endif

#ifndef CONFIG_CUSTOM_PMP_ENTRY

/* The read-only area is always there for every mode */
set_pmp_entry(&index, PMP_R | PMP_X | PMP_L,
(uintptr_t)__rom_region_start,
(size_t)__rom_region_size,
pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));

#endif

#ifdef CONFIG_NULL_POINTER_EXCEPTION_DETECTION_PMP
/*
* Use a PMP slot to make region (starting at address 0x0) inaccessible
Expand All @@ -371,6 +394,17 @@ void z_riscv_pmp_init(void)
pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
#endif

#ifdef CONFIG_CUSTOM_PMP_ENTRY
#ifndef CONFIG_PMP_STACK_GUARD
/*
* This early, the kernel init code uses the custom entry and we want to
* safeguard it as soon as possible. But we need a temporary default
* "catch all" PMP entry for MPRV to work.
*/
set_pmp_mprv_catchall(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
#endif
#endif

#ifdef CONFIG_PMP_STACK_GUARD
#ifdef CONFIG_MULTITHREADING
/*
Expand Down Expand Up @@ -446,6 +480,128 @@ void z_riscv_pmp_init(void)
}
}

#if defined(CONFIG_CUSTOM_PMP_ENTRY)
/**
* @brief Prepare M-mode for custom PMP entry handling.
*
* Configures the Machine Status Register (mstatus) by clearing MPP and setting MPRV to control
* the memory privilege context for PMP access or configuration.
*/
void z_riscv_custom_pmp_entry_enable(void)
{
csr_clear(mstatus, MSTATUS_MPRV | MSTATUS_MPP);
csr_set(mstatus, MSTATUS_MPRV);
}

int riscv_pmp_set_write_permission(bool write_enable, size_t region_idx)
{
if (CONFIG_PMP_SLOTS > 8) {
LOG_ERR("This function only supports up to 8 PMP slots.");
return -ENOTSUP;
}

const struct mem_attr_region_t *region;
size_t num_regions;

num_regions = mem_attr_get_regions(&region);

if (region_idx >= num_regions) {
LOG_ERR("region_idx %zu is out of bounds (num_regions: %zu)", region_idx,
num_regions);
return -EINVAL;
}

uintptr_t region_start_address = region[region_idx].dt_addr;
size_t region_size = region[region_idx].dt_size;

int entry_index = -1;
uintptr_t target_pmpaddr_start = PMP_ADDR(region_start_address);
uintptr_t target_pmpaddr_end = PMP_ADDR(region_start_address + region_size);
uintptr_t target_pmpaddr_napot = PMP_ADDR_NAPOT(region_start_address, region_size);

for (uint8_t i = 0; i < CONFIG_PMP_SLOTS; ++i) {
uintptr_t current_pmpaddr = PMPADDR_READ(i);

/* Check for NAPOT match */
if (current_pmpaddr == target_pmpaddr_napot) {
entry_index = i;
break;
}

/* Check for TOR match start */
if (current_pmpaddr == target_pmpaddr_start) {
if (i < (CONFIG_PMP_SLOTS - 1)) {
if (PMPADDR_READ(i + 1) == target_pmpaddr_end) {
entry_index =
i + 1;
break;
}
}
}
}

if (entry_index == -1) {
LOG_ERR("PMP entry for address 0x%x not found", region_start_address);
return -ENOENT;
}

uint8_t cfg_reg_idx = entry_index / PMPCFG_STRIDE;
uint8_t entry_in_reg = entry_index % PMPCFG_STRIDE;
int bit_position = (entry_in_reg * 8) + PMP_CFG_W_BIT;
unsigned long mask = 1UL << bit_position;

unsigned long pmpcfg_val;

#if defined(CONFIG_64BIT)
/*
* RV64: pmpcfg0 holds configurations for entries 0-7.
* On RV64, the PMP configuration registers are even-numbered:
* pmpcfg0, pmpcfg2, pmpcfg4, and so on.
*/
if (cfg_reg_idx == 0) { /* Entries 0-7 are in pmpcfg0 */
pmpcfg_val = csr_read(pmpcfg0);
if (write_enable) {
pmpcfg_val |= mask;
} else {
pmpcfg_val &= ~mask;
}
csr_write(pmpcfg0, pmpcfg_val);
} else {
LOG_ERR("cfg_reg_idx %d unexpected for <= 8 slots on RV64", cfg_reg_idx);
return -EINVAL;
}
#else
/*
* RV32: pmpcfg0 holds configurations for entries 0-3, pmpcfg1 holds entries 4-7.
* On RV32, all pmpcfg registers are valid: pmpcfg0, pmpcfg1, pmpcfg2, and so on.
*/
if (cfg_reg_idx == 0) { /* Entries 0-3 */
pmpcfg_val = csr_read(pmpcfg0);
if (write_enable) {
pmpcfg_val |= mask;
} else {
pmpcfg_val &= ~mask;
}
csr_write(pmpcfg0, pmpcfg_val);
} else if (cfg_reg_idx == 1) { /* Entries 4-7 */
pmpcfg_val = csr_read(pmpcfg1);
if (write_enable) {
pmpcfg_val |= mask;
} else {
pmpcfg_val &= ~mask;
}
csr_write(pmpcfg1, pmpcfg_val);
} else {
LOG_ERR("cfg_reg_idx %d unexpected for <= 8 slots on RV32", cfg_reg_idx);
return -EINVAL;
}
#endif

return 0;
}

#endif

/**
* @Brief Initialize the per-thread PMP register copy with global values.
*/
Expand Down
6 changes: 6 additions & 0 deletions arch/riscv/core/switch.S
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,12 @@ SECTION_FUNC(TEXT, z_riscv_switch)
mv a0, s0
#endif

#if defined(CONFIG_CUSTOM_PMP_ENTRY) && !defined(CONFIG_PMP_STACK_GUARD)
mv s0, a0
call z_riscv_custom_pmp_entry_enable
mv a0, s0
#endif

#if defined(CONFIG_PMP_STACK_GUARD)
/* Stack guard has priority over user space for PMP usage. */
mv s0, a0
Expand Down
27 changes: 27 additions & 0 deletions arch/riscv/include/pmp.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,32 @@ void z_riscv_pmp_stackguard_disable(void);
void z_riscv_pmp_usermode_init(struct k_thread *thread);
void z_riscv_pmp_usermode_prepare(struct k_thread *thread);
void z_riscv_pmp_usermode_enable(struct k_thread *thread);
void z_riscv_custom_pmp_entry_enable(void);


#ifdef CONFIG_CUSTOM_PMP_ENTRY

/**
* @brief Sets the write permission for a specific PMP entry.
*
* Searches for the PMP entry corresponding to the Device Tree memory
* region at the given index. Modifies the Write (W) bit in this
* entry's PMP configuration.
*
* @note This function currently supports up to 8 PMP slots (CONFIG_PMP_SLOTS <= 8).
*
* @param write_enable If true, enables writes to the region (sets W bit).
* If false, disables writes (clears W bit).
* @param region_idx The index of the region in the array returned
* by mem_attr_get_regions(), for which to modify PMP settings.
*
* @return 0 on success.
* -EINVAL if region_idx is out of bounds.
* -ENOENT if the matching PMP entry is not found.
* -ENOTSUP if CONFIG_PMP_SLOTS > 8.
*/
int riscv_pmp_set_write_permission(bool write_enable, size_t region_idx);

#endif

#endif /* PMP_H_ */