Skip to content

Commit 9b4861c

Browse files
committed
riscv: pmp: Support custom entries from device tree
When CONFIG_MEM_ATTR is enabled, the PMP initialization process in z_riscv_pmp_init() now scans the Device Tree for memory regions tagged with the 'zephyr,memattr' property. For each such region found, a corresponding PMP entry is programmed with the base address, size, and permissions (R/W/X) specified in the Device Tree node. This is facilitated by the mem_attr API. This change allows for more flexible and hardware-specific memory protection schemes, ideal for safeguarding critical areas like firmware rollback segments or sensitive configuration data early in the boot process. Additionally, various checks for PMP usage throughout the RISC-V port, previously conditional only on CONFIG_PMP_STACK_GUARD, have been updated to also include CONFIG_MEM_ATTR. This ensures that PMP-aware code paths (e.g., in ISRs, context switching) are correctly activated when PMP is configured via Device Tree attributes, even if the stack guard feature is not enabled. Signed-off-by: Firas Sammoura <[email protected]>
1 parent 226038a commit 9b4861c

File tree

7 files changed

+132
-41
lines changed

7 files changed

+132
-41
lines changed

arch/riscv/core/fatal.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ void z_riscv_fault(struct arch_esf *esf)
224224
unsigned int reason = K_ERR_CPU_EXCEPTION;
225225

226226
if (bad_stack_pointer(esf)) {
227-
#ifdef CONFIG_PMP_STACK_GUARD
227+
#if (defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_MEM_ATTR))
228228
/*
229229
* Remove the thread's PMP setting to prevent triggering a stack
230230
* overflow error again due to the previous configuration.

arch/riscv/core/isr.S

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -366,7 +366,7 @@ no_fp: /* increment _current->arch.exception_depth */
366366
li t1, RISCV_EXC_ECALLU
367367
beq t0, t1, is_user_syscall
368368

369-
#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
369+
#if (defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)) || defined(CONFIG_MEM_ATTR)
370370
/*
371371
* Determine if we come from user space. If so, reconfigure the PMP for
372372
* kernel mode stack guard.
@@ -378,7 +378,7 @@ no_fp: /* increment _current->arch.exception_depth */
378378
lr a0, ___cpu_t_current_OFFSET(s0)
379379
call z_riscv_pmp_kernelmode_enable
380380
1:
381-
#endif /* CONFIG_PMP_STACK_GUARD */
381+
#endif /* CONFIG_PMP_STACK_GUARD, CONFIG_MEM_ATTR */
382382

383383
#endif /* CONFIG_USERSPACE */
384384

@@ -407,7 +407,7 @@ is_kernel_syscall:
407407
addi t0, t0, 4
408408
sr t0, __struct_arch_esf_mepc_OFFSET(sp)
409409

410-
#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
410+
#if (defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)) || defined(CONFIG_MEM_ATTR)
411411
/* Re-activate PMP for m-mode */
412412
li t1, MSTATUS_MPP
413413
csrc mstatus, t1
@@ -518,7 +518,7 @@ do_irq_offload:
518518
#ifdef CONFIG_USERSPACE
519519
is_user_syscall:
520520

521-
#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
521+
#if (defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)) || defined(CONFIG_MEM_ATTR)
522522
/*
523523
* We came from userspace and need to reconfigure the
524524
* PMP for kernel mode stack guard.
@@ -588,7 +588,7 @@ valid_syscall_id:
588588

589589
is_interrupt:
590590

591-
#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
591+
#if (defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)) || defined(CONFIG_MEM_ATTR)
592592
#ifdef CONFIG_USERSPACE
593593
/*
594594
* If we came from userspace then we need to reconfigure the
@@ -772,7 +772,7 @@ fp_trap_exit:
772772
and t0, t2, t1
773773
bnez t0, 1f
774774

775-
#if defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)
775+
#if (defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)) || defined(CONFIG_MEM_ATTR)
776776
/* Remove kernel stack guard and Reconfigure PMP for user mode */
777777
lr a0, ___cpu_t_current_OFFSET(s0)
778778
call z_riscv_pmp_usermode_enable

arch/riscv/core/pmp.c

Lines changed: 114 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
#include <pmp.h>
3232
#include <zephyr/arch/arch_interface.h>
3333
#include <zephyr/arch/riscv/csr.h>
34+
#include <zephyr/mem_mgmt/mem_attr.h>
3435

3536
#define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
3637
#include <zephyr/logging/log.h>
@@ -270,7 +271,7 @@ static bool set_pmp_entry(unsigned int *index_p, uint8_t perm,
270271
return ok;
271272
}
272273

273-
#ifdef CONFIG_PMP_STACK_GUARD
274+
#if defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_MEM_ATTR)
274275
static inline bool set_pmp_mprv_catchall(unsigned int *index_p,
275276
unsigned long *pmp_addr, unsigned long *pmp_cfg,
276277
unsigned int index_limit)
@@ -382,6 +383,49 @@ static void write_pmp_entries(unsigned int start, unsigned int end,
382383
pmp_addr, pmp_cfg);
383384
}
384385

386+
#ifdef CONFIG_MEM_ATTR
387+
/**
388+
* @brief Install PMP entries from devicetree mem-attr regions.
389+
*
390+
* Iterates over devicetree-provided memory-attr regions and programs PMP
391+
* via set_pmp_entry(). Ordering matters because PMP checks entries from lowest
392+
* to highest index and uses the first entry that matches the address.
393+
*
394+
* @param index_p Location of the current PMP slot index to use. This index
395+
* will be updated according to the number of slots used.
396+
* @param pmp_addr Array of pmpaddr values (starting at entry 0).
397+
* @param pmp_cfg Array of pmpcfg values (starting at entry 0).
398+
* @param index_limit Index value representing the size of the provided arrays.
399+
* @return Number of PMP slots consumed by installed mem-attr regions.
400+
*/
401+
static unsigned int set_pmp_mem_attr(unsigned int *index_p,
402+
unsigned long *pmp_addr, unsigned long *pmp_cfg,
403+
unsigned int index_limit)
404+
{
405+
const struct mem_attr_region_t *region;
406+
unsigned int entry_cnt = *index_p;
407+
size_t num_regions;
408+
409+
num_regions = mem_attr_get_regions(&region);
410+
411+
for (size_t idx = 0; idx < num_regions; ++idx) {
412+
413+
uint8_t perm = DT_MEM_RISCV_TO_PMP_PERM(region[idx].dt_attr);
414+
415+
if (perm) {
416+
set_pmp_entry(index_p, perm,
417+
(uintptr_t)(region[idx].dt_addr),
418+
(size_t)(region[idx].dt_size),
419+
pmp_addr, pmp_cfg, index_limit);
420+
}
421+
}
422+
423+
entry_cnt = *index_p - entry_cnt;
424+
425+
return entry_cnt;
426+
}
427+
#endif /* CONFIG_MEM_ATTR */
428+
385429
/**
386430
* @brief Abstract the last 3 arguments to set_pmp_entry() and
387431
* write_pmp_entries( for m-mode.
@@ -441,6 +485,9 @@ void z_riscv_pmp_init(void)
441485

442486
#ifdef CONFIG_PMP_STACK_GUARD
443487
#ifdef CONFIG_MULTITHREADING
488+
#ifdef CONFIG_SMP
489+
unsigned int irq_stack_pmp_index = index;
490+
#endif
444491
/*
445492
* Set the stack guard for this CPU's IRQ stack by making the bottom
446493
* addresses inaccessible. This will never change so we do it here
@@ -450,23 +497,6 @@ void z_riscv_pmp_init(void)
450497
(uintptr_t)z_interrupt_stacks[_current_cpu->id],
451498
Z_RISCV_STACK_GUARD_SIZE,
452499
pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
453-
454-
/*
455-
* This early, the kernel init code uses the IRQ stack and we want to
456-
* safeguard it as soon as possible. But we need a temporary default
457-
* "catch all" PMP entry for MPRV to work. Later on, this entry will
458-
* be set for each thread by z_riscv_pmp_kernelmode_prepare().
459-
*/
460-
set_pmp_mprv_catchall(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
461-
462-
/* Write those entries to PMP regs. */
463-
write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
464-
465-
/* Activate our non-locked PMP entries for m-mode */
466-
csr_set(mstatus, MSTATUS_MPRV);
467-
468-
/* And forget about that last entry as we won't need it later */
469-
index--;
470500
#else
471501
/* Without multithreading setup stack guards for IRQ and main stacks */
472502
set_pmp_entry(&index, PMP_NONE | PMP_L,
@@ -479,12 +509,31 @@ void z_riscv_pmp_init(void)
479509
Z_RISCV_STACK_GUARD_SIZE,
480510
pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
481511

512+
#endif /* CONFIG_MULTITHREADING */
513+
#endif
514+
515+
#ifdef CONFIG_MEM_ATTR
516+
unsigned int attr_cnt = set_pmp_mem_attr(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
517+
518+
/*
519+
* This early, we want to protect unlock PMP entries as soon as
520+
* possible. But we need a temporary default "catch all" PMP entry for
521+
* MPRV to work. Later on, this entry will be set for each thread by
522+
* z_riscv_pmp_kernelmode_prepare().
523+
*/
524+
set_pmp_mprv_catchall(&index, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
525+
482526
/* Write those entries to PMP regs. */
483527
write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
484-
#endif /* CONFIG_MULTITHREADING */
528+
529+
/* Activate our non-locked PMP entries for m-mode */
530+
csr_set(mstatus, MSTATUS_MPRV);
531+
532+
/* And forget about that tempory entries as we won't need it later */
533+
index -= attr_cnt + 1;
485534
#else
486-
/* Write those entries to PMP regs. */
487-
write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
535+
/* Write those entries to PMP regs. */
536+
write_pmp_entries(0, index, true, pmp_addr, pmp_cfg, ARRAY_SIZE(pmp_addr));
488537
#endif
489538

490539
#ifdef CONFIG_SMP
@@ -494,7 +543,7 @@ void z_riscv_pmp_init(void)
494543
* Make sure TOR entry sharing won't be attempted with it by
495544
* remembering a bogus address for those entries.
496545
*/
497-
pmp_addr[index - 1] = -1L;
546+
pmp_addr[irq_stack_pmp_index] = -1L;
498547
#endif
499548

500549
/* Make sure secondary CPUs produced the same values */
@@ -518,7 +567,8 @@ void z_riscv_pmp_init(void)
518567
/**
519568
* @Brief Initialize the per-thread PMP register copy with global values.
520569
*/
521-
#if (defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)) || defined(CONFIG_USERSPACE)
570+
#if ((defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)) || \
571+
defined(CONFIG_MEM_ATTR)) || defined(CONFIG_USERSPACE)
522572
static inline unsigned int z_riscv_pmp_thread_init(unsigned long *pmp_addr,
523573
unsigned long *pmp_cfg,
524574
unsigned int index_limit)
@@ -540,9 +590,8 @@ static inline unsigned int z_riscv_pmp_thread_init(unsigned long *pmp_addr,
540590
}
541591
#endif
542592

543-
#ifdef CONFIG_PMP_STACK_GUARD
544-
545-
#ifdef CONFIG_MULTITHREADING
593+
#if ((defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING)) || \
594+
defined(CONFIG_MEM_ATTR))
546595
/**
547596
* @brief Prepare the PMP stackguard content for given thread.
548597
*
@@ -551,6 +600,8 @@ static inline unsigned int z_riscv_pmp_thread_init(unsigned long *pmp_addr,
551600
void z_riscv_pmp_kernelmode_prepare(struct k_thread *thread)
552601
{
553602
unsigned int index = z_riscv_pmp_thread_init(PMP_M_MODE(thread));
603+
604+
#if (defined(CONFIG_PMP_STACK_GUARD) && defined(CONFIG_MULTITHREADING))
554605
uintptr_t stack_bottom;
555606

556607
/* make the bottom addresses of our stack inaccessible */
@@ -565,6 +616,12 @@ void z_riscv_pmp_kernelmode_prepare(struct k_thread *thread)
565616
set_pmp_entry(&index, PMP_NONE,
566617
stack_bottom, Z_RISCV_STACK_GUARD_SIZE,
567618
PMP_M_MODE(thread));
619+
#endif /* CONFIG_PMP_STACK_GUARD */
620+
621+
#ifdef CONFIG_MEM_ATTR
622+
set_pmp_mem_attr(&index, PMP_M_MODE(thread));
623+
#endif /* CONFIG_MEM_ATTR */
624+
568625
set_pmp_mprv_catchall(&index, PMP_M_MODE(thread));
569626

570627
/* remember how many entries we use */
@@ -600,8 +657,9 @@ void z_riscv_pmp_kernelmode_enable(struct k_thread *thread)
600657
csr_set(mstatus, MSTATUS_MPRV);
601658
}
602659

603-
#endif /* CONFIG_MULTITHREADING */
660+
#endif /* CONFIG_PMP_STACK_GUARD && CONFIG_MULTITHREADING || CONFIG_MEM_ATTR */
604661

662+
#if (defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_MEM_ATTR))
605663
/**
606664
* @brief Remove PMP stackguard content to actual PMP registers
607665
*/
@@ -633,7 +691,7 @@ void z_riscv_pmp_kernelmode_disable(void)
633691
}
634692
}
635693

636-
#endif /* CONFIG_PMP_STACK_GUARD */
694+
#endif /* CONFIG_PMP_STACK_GUARD || CONFIG_MEM_ATTR */
637695

638696
#ifdef CONFIG_USERSPACE
639697

@@ -699,8 +757,34 @@ static void resync_pmp_domain(struct k_thread *thread,
699757
continue;
700758
}
701759

702-
ok = set_pmp_entry(&index, part->attr.pmp_attr,
703-
part->start, part->size, PMP_U_MODE(thread));
760+
#ifdef CONFIG_MEM_ATTR
761+
const struct mem_attr_region_t *region;
762+
uint8_t attr_mask = PMP_R | PMP_W | PMP_X;
763+
764+
for (int idx = 0; idx < mem_attr_get_regions(&region); idx++) {
765+
uintptr_t dt_start = (uintptr_t)(region[idx].dt_addr);
766+
uintptr_t dt_end = dt_start + (size_t)(region[idx].dt_size);
767+
bool covered = false;
768+
769+
/* No overlap at all, skip this memory region */
770+
if ((part->start + part->size) <= dt_start || part->start >= dt_end) {
771+
continue;
772+
}
773+
774+
covered = part->start >= dt_start && (part->start + part->size) <= dt_end;
775+
__ASSERT(covered, "No allowed partition partially overlaps memory region");
776+
777+
attr_mask = DT_MEM_RISCV_TO_PMP_PERM(region[idx].dt_attr);
778+
break;
779+
}
780+
781+
ok = set_pmp_entry(&index, part->attr.pmp_attr & attr_mask, part->start, part->size,
782+
PMP_U_MODE(thread));
783+
#else
784+
ok = set_pmp_entry(&index, part->attr.pmp_attr, part->start, part->size,
785+
PMP_U_MODE(thread));
786+
#endif
787+
704788
__ASSERT(ok,
705789
"no PMP slot left for %d remaining partitions in domain %p",
706790
remaining_partitions + 1, domain);

arch/riscv/core/switch.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ SECTION_FUNC(TEXT, z_riscv_switch)
6161
mv a0, s0
6262
#endif
6363

64-
#if defined(CONFIG_PMP_STACK_GUARD)
64+
#if defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_MEM_ATTR)
6565
/* Stack guard has priority over user space for PMP usage. */
6666
mv s0, a0
6767
call z_riscv_pmp_kernelmode_enable

arch/riscv/core/thread.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,15 +89,15 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
8989
/* Supervisor thread */
9090
stack_init->mepc = (unsigned long)z_thread_entry;
9191

92-
#if defined(CONFIG_PMP_STACK_GUARD)
92+
#if (defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_MEM_ATTR))
9393
/* Enable PMP in mstatus.MPRV mode for RISC-V machine mode
9494
* if thread is supervisor thread.
9595
*/
9696
stack_init->mstatus |= MSTATUS_MPRV;
9797
#endif /* CONFIG_PMP_STACK_GUARD */
9898
}
9999

100-
#if defined(CONFIG_PMP_STACK_GUARD)
100+
#if (defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_MEM_ATTR))
101101
/* Setup PMP regions of PMP stack guard of thread. */
102102
z_riscv_pmp_kernelmode_prepare(thread);
103103
#endif /* CONFIG_PMP_STACK_GUARD */

arch/riscv/include/pmp.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,15 @@
77
#ifndef PMP_H_
88
#define PMP_H_
99

10+
#include <zephyr/dt-bindings/memory-attr/memory-attr-riscv.h>
11+
1012
#define PMPCFG_STRIDE (__riscv_xlen / 8)
1113

14+
#define DT_MEM_RISCV_TO_PMP_PERM(dt_attr) ( \
15+
(((dt_attr) & DT_MEM_RISCV_TYPE_IO_R) ? PMP_R : 0) | \
16+
(((dt_attr) & DT_MEM_RISCV_TYPE_IO_W) ? PMP_W : 0) | \
17+
(((dt_attr) & DT_MEM_RISCV_TYPE_IO_X) ? PMP_X : 0))
18+
1219
void z_riscv_pmp_init(void);
1320
void z_riscv_pmp_kernelmode_prepare(struct k_thread *thread);
1421
void z_riscv_pmp_kernelmode_enable(struct k_thread *thread);

include/zephyr/arch/riscv/thread.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ struct _thread_arch {
7777
unsigned int u_mode_pmp_end_index;
7878
unsigned int u_mode_pmp_update_nr;
7979
#endif
80-
#ifdef CONFIG_PMP_STACK_GUARD
80+
#if defined(CONFIG_PMP_STACK_GUARD) || defined(CONFIG_MEM_ATTR)
8181
unsigned int m_mode_pmp_end_index;
8282
unsigned long m_mode_pmpaddr_regs[CONFIG_PMP_SLOTS];
8383
unsigned long m_mode_pmpcfg_regs[CONFIG_PMP_SLOTS / (__riscv_xlen / 8)];

0 commit comments

Comments
 (0)