3131#include <pmp.h>
3232#include <zephyr/arch/arch_interface.h>
3333#include <zephyr/arch/riscv/csr.h>
34+ #include <zephyr/mem_mgmt/mem_attr.h>
3435
3536#define LOG_LEVEL CONFIG_MPU_LOG_LEVEL
3637#include <zephyr/logging/log.h>
@@ -270,7 +271,7 @@ static bool set_pmp_entry(unsigned int *index_p, uint8_t perm,
270271 return ok ;
271272}
272273
273- #ifdef CONFIG_PMP_STACK_GUARD
274+ #if defined( CONFIG_PMP_STACK_GUARD ) || defined( CONFIG_MEM_ATTR )
274275static inline bool set_pmp_mprv_catchall (unsigned int * index_p ,
275276 unsigned long * pmp_addr , unsigned long * pmp_cfg ,
276277 unsigned int index_limit )
@@ -382,6 +383,49 @@ static void write_pmp_entries(unsigned int start, unsigned int end,
382383 pmp_addr , pmp_cfg );
383384}
384385
386+ #ifdef CONFIG_MEM_ATTR
387+ /**
388+ * @brief Install PMP entries from devicetree mem-attr regions.
389+ *
390+ * Iterates over devicetree-provided memory-attr regions and programs PMP
391+ * via set_pmp_entry(). Ordering matters because PMP checks entries from lowest
392+ * to highest index and uses the first entry that matches the address.
393+ *
394+ * @param index_p Location of the current PMP slot index to use. This index
395+ * will be updated according to the number of slots used.
396+ * @param pmp_addr Array of pmpaddr values (starting at entry 0).
397+ * @param pmp_cfg Array of pmpcfg values (starting at entry 0).
398+ * @param index_limit Index value representing the size of the provided arrays.
399+ * @return Number of PMP slots consumed by installed mem-attr regions.
400+ */
401+ static unsigned int set_pmp_mem_attr (unsigned int * index_p ,
402+ unsigned long * pmp_addr , unsigned long * pmp_cfg ,
403+ unsigned int index_limit )
404+ {
405+ const struct mem_attr_region_t * region ;
406+ unsigned int entry_cnt = * index_p ;
407+ size_t num_regions ;
408+
409+ num_regions = mem_attr_get_regions (& region );
410+
411+ for (size_t idx = 0 ; idx < num_regions ; ++ idx ) {
412+
413+ uint8_t perm = DT_MEM_RISCV_TO_PMP_PERM (region [idx ].dt_attr );
414+
415+ if (perm ) {
416+ set_pmp_entry (index_p , perm ,
417+ (uintptr_t )(region [idx ].dt_addr ),
418+ (size_t )(region [idx ].dt_size ),
419+ pmp_addr , pmp_cfg , index_limit );
420+ }
421+ }
422+
423+ entry_cnt = * index_p - entry_cnt ;
424+
425+ return entry_cnt ;
426+ }
427+ #endif /* CONFIG_MEM_ATTR */
428+
385429/**
386430 * @brief Abstract the last 3 arguments to set_pmp_entry() and
387431 * write_pmp_entries( for m-mode.
@@ -441,6 +485,9 @@ void z_riscv_pmp_init(void)
441485
442486#ifdef CONFIG_PMP_STACK_GUARD
443487#ifdef CONFIG_MULTITHREADING
488+ #ifdef CONFIG_SMP
489+ unsigned int irq_stack_pmp_index = index ;
490+ #endif
444491 /*
445492 * Set the stack guard for this CPU's IRQ stack by making the bottom
446493 * addresses inaccessible. This will never change so we do it here
@@ -450,23 +497,6 @@ void z_riscv_pmp_init(void)
450497 (uintptr_t )z_interrupt_stacks [_current_cpu -> id ],
451498 Z_RISCV_STACK_GUARD_SIZE ,
452499 pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
453-
454- /*
455- * This early, the kernel init code uses the IRQ stack and we want to
456- * safeguard it as soon as possible. But we need a temporary default
457- * "catch all" PMP entry for MPRV to work. Later on, this entry will
458- * be set for each thread by z_riscv_pmp_kernelmode_prepare().
459- */
460- set_pmp_mprv_catchall (& index , pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
461-
462- /* Write those entries to PMP regs. */
463- write_pmp_entries (0 , index , true, pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
464-
465- /* Activate our non-locked PMP entries for m-mode */
466- csr_set (mstatus , MSTATUS_MPRV );
467-
468- /* And forget about that last entry as we won't need it later */
469- index -- ;
470500#else
471501 /* Without multithreading setup stack guards for IRQ and main stacks */
472502 set_pmp_entry (& index , PMP_NONE | PMP_L ,
@@ -479,12 +509,31 @@ void z_riscv_pmp_init(void)
479509 Z_RISCV_STACK_GUARD_SIZE ,
480510 pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
481511
512+ #endif /* CONFIG_MULTITHREADING */
513+ #endif
514+
515+ #ifdef CONFIG_MEM_ATTR
516+ unsigned int attr_cnt = set_pmp_mem_attr (& index , pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
517+
518+ /*
519+ * This early, we want to protect unlock PMP entries as soon as
520+ * possible. But we need a temporary default "catch all" PMP entry for
521+ * MPRV to work. Later on, this entry will be set for each thread by
522+ * z_riscv_pmp_kernelmode_prepare().
523+ */
524+ set_pmp_mprv_catchall (& index , pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
525+
482526 /* Write those entries to PMP regs. */
483527 write_pmp_entries (0 , index , true, pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
484- #endif /* CONFIG_MULTITHREADING */
528+
529+ /* Activate our non-locked PMP entries for m-mode */
530+ csr_set (mstatus , MSTATUS_MPRV );
531+
532+ /* And forget about that tempory entries as we won't need it later */
533+ index -= attr_cnt + 1 ;
485534#else
486- /* Write those entries to PMP regs. */
487- write_pmp_entries (0 , index , true, pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
535+ /* Write those entries to PMP regs. */
536+ write_pmp_entries (0 , index , true, pmp_addr , pmp_cfg , ARRAY_SIZE (pmp_addr ));
488537#endif
489538
490539#ifdef CONFIG_SMP
@@ -494,7 +543,7 @@ void z_riscv_pmp_init(void)
494543 * Make sure TOR entry sharing won't be attempted with it by
495544 * remembering a bogus address for those entries.
496545 */
497- pmp_addr [index - 1 ] = -1L ;
546+ pmp_addr [irq_stack_pmp_index ] = -1L ;
498547#endif
499548
500549 /* Make sure secondary CPUs produced the same values */
@@ -518,7 +567,8 @@ void z_riscv_pmp_init(void)
518567/**
519568 * @Brief Initialize the per-thread PMP register copy with global values.
520569 */
521- #if (defined(CONFIG_PMP_STACK_GUARD ) && defined(CONFIG_MULTITHREADING )) || defined(CONFIG_USERSPACE )
570+ #if ((defined(CONFIG_PMP_STACK_GUARD ) && defined(CONFIG_MULTITHREADING )) || \
571+ defined(CONFIG_MEM_ATTR )) || defined(CONFIG_USERSPACE )
522572static inline unsigned int z_riscv_pmp_thread_init (unsigned long * pmp_addr ,
523573 unsigned long * pmp_cfg ,
524574 unsigned int index_limit )
@@ -540,9 +590,8 @@ static inline unsigned int z_riscv_pmp_thread_init(unsigned long *pmp_addr,
540590}
541591#endif
542592
543- #ifdef CONFIG_PMP_STACK_GUARD
544-
545- #ifdef CONFIG_MULTITHREADING
593+ #if ((defined(CONFIG_PMP_STACK_GUARD ) && defined(CONFIG_MULTITHREADING )) || \
594+ defined(CONFIG_MEM_ATTR ))
546595/**
547596 * @brief Prepare the PMP stackguard content for given thread.
548597 *
@@ -551,6 +600,8 @@ static inline unsigned int z_riscv_pmp_thread_init(unsigned long *pmp_addr,
551600void z_riscv_pmp_kernelmode_prepare (struct k_thread * thread )
552601{
553602 unsigned int index = z_riscv_pmp_thread_init (PMP_M_MODE (thread ));
603+
604+ #if (defined(CONFIG_PMP_STACK_GUARD ) && defined(CONFIG_MULTITHREADING ))
554605 uintptr_t stack_bottom ;
555606
556607 /* make the bottom addresses of our stack inaccessible */
@@ -565,6 +616,12 @@ void z_riscv_pmp_kernelmode_prepare(struct k_thread *thread)
565616 set_pmp_entry (& index , PMP_NONE ,
566617 stack_bottom , Z_RISCV_STACK_GUARD_SIZE ,
567618 PMP_M_MODE (thread ));
619+ #endif /* CONFIG_PMP_STACK_GUARD */
620+
621+ #ifdef CONFIG_MEM_ATTR
622+ set_pmp_mem_attr (& index , PMP_M_MODE (thread ));
623+ #endif /* CONFIG_MEM_ATTR */
624+
568625 set_pmp_mprv_catchall (& index , PMP_M_MODE (thread ));
569626
570627 /* remember how many entries we use */
@@ -600,8 +657,9 @@ void z_riscv_pmp_kernelmode_enable(struct k_thread *thread)
600657 csr_set (mstatus , MSTATUS_MPRV );
601658}
602659
603- #endif /* CONFIG_MULTITHREADING */
660+ #endif /* CONFIG_PMP_STACK_GUARD && CONFIG_MULTITHREADING || CONFIG_MEM_ATTR */
604661
662+ #if (defined(CONFIG_PMP_STACK_GUARD ) || defined(CONFIG_MEM_ATTR ))
605663/**
606664 * @brief Remove PMP stackguard content to actual PMP registers
607665 */
@@ -633,7 +691,7 @@ void z_riscv_pmp_kernelmode_disable(void)
633691 }
634692}
635693
636- #endif /* CONFIG_PMP_STACK_GUARD */
694+ #endif /* CONFIG_PMP_STACK_GUARD || CONFIG_MEM_ATTR */
637695
638696#ifdef CONFIG_USERSPACE
639697
@@ -699,8 +757,34 @@ static void resync_pmp_domain(struct k_thread *thread,
699757 continue ;
700758 }
701759
702- ok = set_pmp_entry (& index , part -> attr .pmp_attr ,
703- part -> start , part -> size , PMP_U_MODE (thread ));
760+ #ifdef CONFIG_MEM_ATTR
761+ const struct mem_attr_region_t * region ;
762+ uint8_t attr_mask = PMP_R | PMP_W | PMP_X ;
763+
764+ for (int idx = 0 ; idx < mem_attr_get_regions (& region ); idx ++ ) {
765+ uintptr_t dt_start = (uintptr_t )(region [idx ].dt_addr );
766+ uintptr_t dt_end = dt_start + (size_t )(region [idx ].dt_size );
767+ bool covered = false;
768+
769+ /* No overlap at all, skip this memory region */
770+ if ((part -> start + part -> size ) <= dt_start || part -> start >= dt_end ) {
771+ continue ;
772+ }
773+
774+ covered = part -> start >= dt_start && (part -> start + part -> size ) <= dt_end ;
775+ __ASSERT (covered , "No allowed partition partially overlaps memory region" );
776+
777+ attr_mask = DT_MEM_RISCV_TO_PMP_PERM (region [idx ].dt_attr );
778+ break ;
779+ }
780+
781+ ok = set_pmp_entry (& index , part -> attr .pmp_attr & attr_mask , part -> start , part -> size ,
782+ PMP_U_MODE (thread ));
783+ #else
784+ ok = set_pmp_entry (& index , part -> attr .pmp_attr , part -> start , part -> size ,
785+ PMP_U_MODE (thread ));
786+ #endif
787+
704788 __ASSERT (ok ,
705789 "no PMP slot left for %d remaining partitions in domain %p" ,
706790 remaining_partitions + 1 , domain );
0 commit comments