11/*
2- * Copyright (c) 2006-2021, RT-Thread Development Team
2+ * Copyright (c) 2006-2025 RT-Thread Development Team
33 *
44 * SPDX-License-Identifier: Apache-2.0
55 *
@@ -43,6 +43,20 @@ static void *current_mmu_table = RT_NULL;
4343volatile __attribute__((aligned (4 * 1024 )))
4444rt_ubase_t MMUTable [__SIZE (VPN2_BIT )];
4545
46+ /**
47+ * @brief Switch the current address space to the specified one.
48+ *
49+ * This function is responsible for switching the address space by updating the page table
50+ * and related hardware state. The behavior depends on whether the architecture supports
51+ * Address Space Identifiers (ASIDs), devided by macro definition of ARCH_USING_ASID.
52+ *
53+ * @param aspace Pointer to the address space structure containing the new page table.
54+ *
55+ * @note If ASID is supported (`ARCH_USING_ASID` is defined), the function will call
56+ * `rt_hw_asid_switch_pgtbl` to switch the page table and update the ASID.
57+ * Otherwise, it will directly write the `satp` CSR to switch the page table
58+ * and invalidate the TLB.
59+ */
4660#ifdef ARCH_USING_ASID
4761void rt_hw_aspace_switch (rt_aspace_t aspace )
4862{
@@ -68,11 +82,13 @@ void rt_hw_asid_init(void)
6882}
6983#endif /* ARCH_USING_ASID */
7084
85+ /* get current page table. */
7186void * rt_hw_mmu_tbl_get ()
7287{
7388 return current_mmu_table ;
7489}
7590
91+ /* Map a single virtual address page to a physical address page in the page table. */
7692static int _map_one_page (struct rt_aspace * aspace , void * va , void * pa ,
7793 size_t attr )
7894{
@@ -125,7 +141,7 @@ static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
125141 COMBINEPTE ((rt_ubase_t )VPN_TO_PPN (mmu_l3 , PV_OFFSET ),
126142 PAGE_DEFAULT_ATTR_NEXT );
127143 rt_hw_cpu_dcache_clean (mmu_l2 , sizeof (* mmu_l2 ));
128- // declares a reference to parent page table
144+ /* declares a reference to parent page table */
129145 rt_page_ref_inc ((void * )mmu_l2 , 0 );
130146 }
131147 else
@@ -135,22 +151,42 @@ static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
135151 }
136152
137153 RT_ASSERT (!PTE_USED (* (mmu_l3 + l3_off )));
138- // declares a reference to parent page table
154+ /* declares a reference to parent page table */
139155 rt_page_ref_inc ((void * )mmu_l3 , 0 );
140156 * (mmu_l3 + l3_off ) = COMBINEPTE ((rt_ubase_t )pa , attr );
141157 rt_hw_cpu_dcache_clean (mmu_l3 + l3_off , sizeof (* (mmu_l3 + l3_off )));
142158 return 0 ;
143159}
144160
145- /** rt_hw_mmu_map will never override existed page table entry */
161+ /**
162+ * @brief Maps a virtual address space to a physical address space.
163+ *
164+ * This function maps a specified range of virtual addresses to a range of physical addresses
165+ * and sets the attributes of the page table entries (PTEs). If an error occurs during the
166+ * mapping process, the function will automatically roll back any partially completed mappings.
167+ *
168+ * @param aspace Pointer to the address space structure containing the page table information.
169+ * @param v_addr The starting virtual address to be mapped.
170+ * @param p_addr The starting physical address to be mapped.
171+ * @param size The size of the memory to be mapped (in bytes).
172+ * @param attr The attributes of the page table entries (e.g., read/write permissions, cache policies).
173+ *
174+ * @return On success, returns the starting virtual address `v_addr`;
175+ * On failure, returns `NULL`.
176+ *
177+ * @note This function will not override existing page table entries.
178+ * @warning The caller must ensure that `v_addr` and `p_addr` are page-aligned,
179+ * and `size` is a multiple of the page size.
180+ *
181+ */
146182void * rt_hw_mmu_map (struct rt_aspace * aspace , void * v_addr , void * p_addr ,
147183 size_t size , size_t attr )
148184{
149185 int ret = -1 ;
150186 void * unmap_va = v_addr ;
151187 size_t npages = size >> ARCH_PAGE_SHIFT ;
152188
153- // TODO trying with HUGEPAGE here
189+ /* TODO trying with HUGEPAGE here */
154190 while (npages -- )
155191 {
156192 MM_PGTBL_LOCK (aspace );
@@ -180,6 +216,7 @@ void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
180216 return NULL ;
181217}
182218
219+ /* unmap page table entry */
183220static void _unmap_pte (rt_ubase_t * pentry , rt_ubase_t * lvl_entry [], int level )
184221{
185222 int loop_flag = 1 ;
@@ -189,12 +226,12 @@ static void _unmap_pte(rt_ubase_t *pentry, rt_ubase_t *lvl_entry[], int level)
189226 * pentry = 0 ;
190227 rt_hw_cpu_dcache_clean (pentry , sizeof (* pentry ));
191228
192- // we don't handle level 0, which is maintained by caller
229+ /* we don't handle level 0, which is maintained by caller */
193230 if (level > 0 )
194231 {
195232 void * page = (void * )((rt_ubase_t )pentry & ~ARCH_PAGE_MASK );
196233
197- // decrease reference from child page to parent
234+ /* decrease reference from child page to parent */
198235 rt_pages_free (page , 0 );
199236
200237 int free = rt_page_ref_get (page , 0 );
@@ -208,6 +245,7 @@ static void _unmap_pte(rt_ubase_t *pentry, rt_ubase_t *lvl_entry[], int level)
208245 }
209246}
210247
248+ /* Unmaps a virtual address range from the page table. */
211249static size_t _unmap_area (struct rt_aspace * aspace , void * v_addr , size_t size )
212250{
213251 rt_ubase_t loop_va = __UMASKVALUE ((rt_ubase_t )v_addr , PAGE_OFFSET_MASK );
@@ -225,7 +263,7 @@ static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size)
225263 lvl_entry [i ] = ((rt_ubase_t * )aspace -> page_table + lvl_off [i ]);
226264 pentry = lvl_entry [i ];
227265
228- // find leaf page table entry
266+ /* find leaf page table entry */
229267 while (PTE_USED (* pentry ) && !PAGE_IS_LEAF (* pentry ))
230268 {
231269 i += 1 ;
@@ -235,7 +273,7 @@ static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size)
235273 unmapped >>= ARCH_INDEX_WIDTH ;
236274 }
237275
238- // clear PTE & setup its
276+ /* clear PTE & setup its */
239277 if (PTE_USED (* pentry ))
240278 {
241279 _unmap_pte (pentry , lvl_entry , i );
@@ -244,10 +282,30 @@ static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size)
244282 return unmapped ;
245283}
246284
247- /** unmap is different from map that it can handle multiple pages */
285+ /**
286+ * @brief Unmaps a range of virtual memory addresses from the specified address space.
287+ *
288+ * This function is responsible for unmapping a contiguous region of virtual memory
289+ * from the given address space. It handles multiple pages and ensures thread safety
290+ * by locking the page table during the unmapping operation.
291+ *
292+ * @param aspace Pointer to the address space structure from which the memory will be unmapped.
293+ * @param v_addr Starting virtual address to unmap. Must be page-aligned.
294+ * @param size Size of the memory region to unmap. Must be page-aligned.
295+ *
296+ * @note The caller must ensure that both `v_addr` and `size` are page-aligned.
297+ *
298+ * @details The function operates in a loop, unmapping memory in chunks. It uses the
299+ * `_unmap_area` function to perform the actual unmapping, which is called within a
300+ * locked section to ensure thread safety. The loop continues until the entire region
301+ * is unmapped.
302+ *
303+ * @see _unmap_area
304+ * @note unmap is different from map that it can handle multiple pages
305+ */
248306void rt_hw_mmu_unmap (struct rt_aspace * aspace , void * v_addr , size_t size )
249307{
250- // caller guarantee that v_addr & size are page aligned
308+ /* caller guarantee that v_addr & size are page aligned */
251309 if (!aspace -> page_table )
252310 {
253311 return ;
@@ -260,7 +318,7 @@ void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size)
260318 unmapped = _unmap_area (aspace , v_addr , size );
261319 MM_PGTBL_UNLOCK (aspace );
262320
263- // when unmapped == 0, region not exist in pgtbl
321+ /* when unmapped == 0, region not exist in pgtbl */
264322 if (!unmapped || unmapped > size ) break ;
265323
266324 size -= unmapped ;
@@ -292,11 +350,31 @@ static inline void _init_region(void *vaddr, size_t size)
292350#define KERN_SPACE_SIZE ((size_t)USER_VADDR_START - 0x1000)
293351#endif
294352
353+ /**
354+ * @brief Initialize the MMU (Memory Management Unit) mapping.
355+ *
356+ * This function initializes the MMU mapping, incluing these steps as follows:
357+ * 1. Check the validity of the input parameters,
358+ * 2. Calculate the start and end virtual addresses based on the input virtual address and size.
359+ * 3. Convert the virtual addresses to PPN2 indices.
360+ * 4. Check the initialization of the page table. If any entry in the page table within
361+ * the specified range is non-zero, it returns -1.
362+ * 5. It initializes the kernel address space using rt_aspace_init() and initializes the specified region
363+ * using _init_region.
364+ *
365+ * @param aspace Pointer to the address space. Must not be NULL.
366+ * @param v_address The starting virtual address.
367+ * @param size The size of the virtual address space.
368+ * @param vtable Pointer to the page table. Must not be NULL.
369+ * @param pv_off The page table offset.
370+ *
371+ * @return Returns 0 if the initialization is successful. Returns -1 if any input parameter is invalid
372+ * or the page table initialization check fails.
373+ */
295374int rt_hw_mmu_map_init (rt_aspace_t aspace , void * v_address , rt_ubase_t size ,
296375 rt_ubase_t * vtable , rt_ubase_t pv_off )
297376{
298377 size_t l1_off , va_s , va_e ;
299- rt_base_t level ;
300378
301379 if ((!aspace ) || (!vtable ))
302380 {
@@ -311,7 +389,7 @@ int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_ubase_t size,
311389 return -1 ;
312390 }
313391
314- // convert address to PPN2 index
392+ /* convert address to PPN2 index */
315393 va_s = GET_L1 (va_s );
316394 va_e = GET_L1 (va_e );
317395
@@ -320,7 +398,7 @@ int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_ubase_t size,
320398 return -1 ;
321399 }
322400
323- // vtable initialization check
401+ /* vtable initialization check */
324402 for (l1_off = va_s ; l1_off <= va_e ; l1_off ++ )
325403 {
326404 size_t v = vtable [l1_off ];
@@ -395,6 +473,22 @@ static rt_ubase_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
395473 return RT_NULL ;
396474}
397475
476+ /**
477+ * @brief Translate a virtual address to a physical address.
478+ *
479+ * This function translates a given virtual address (`vaddr`) to its corresponding
480+ * physical address (`paddr`) using the page table in the specified address space (`aspace`).
481+ *
482+ * @param aspace Pointer to the address space structure containing the page table.
483+ * @param vaddr The virtual address to be translated.
484+ *
485+ * @return The translated physical address. If the translation fails, `ARCH_MAP_FAILED` is returned.
486+ *
487+ * @note The function queries the page table entry (PTE) for the virtual address using `_query`.
488+ * If a valid PTE is found, the physical address is extracted and combined with the offset
489+ * from the virtual address. If no valid PTE is found, a debug log is recorded, and
490+ * `ARCH_MAP_FAILED` is returned.
491+ */
398492void * rt_hw_mmu_v2p (struct rt_aspace * aspace , void * vaddr )
399493{
400494 int level ;
@@ -424,11 +518,29 @@ static int _cache(rt_base_t *pte)
424518 return 0 ;
425519}
426520
427- static int (* control_handler [MMU_CNTL_DUMMY_END ])(rt_base_t * pte ) = {
521+ static int (* control_handler [MMU_CNTL_DUMMY_END ])(rt_base_t * pte )=
522+ {
428523 [MMU_CNTL_CACHE ] = _cache ,
429524 [MMU_CNTL_NONCACHE ] = _noncache ,
430525};
431526
527+ /**
528+ * @brief Control the page table entries (PTEs) for a specified virtual address range.
529+ *
530+ * This function applies a control command (e.g., cache control) to the page table entries
531+ * (PTEs) corresponding to the specified virtual address range (`vaddr` to `vaddr + size`).
532+ *
533+ * @param aspace Pointer to the address space structure containing the page table.
534+ * @param vaddr The starting virtual address of the range.
535+ * @param size The size of the virtual address range.
536+ * @param cmd The control command to apply (e.g., `MMU_CNTL_CACHE`, `MMU_CNTL_NONCACHE`.etc.).
537+ *
538+ * @return `RT_EOK` on success, or an error code (`-RT_EINVAL` or `-RT_ENOSYS`) on failure.
539+ *
540+ * @note The function uses the `control_handler` array to map the command to a handler function.
541+ * It iterates over the virtual address range, queries the PTEs, and applies the handler
542+ * to each valid PTE. If the command is invalid, `-RT_ENOSYS` is returned.
543+ */
432544int rt_hw_mmu_control (struct rt_aspace * aspace , void * vaddr , size_t size ,
433545 enum rt_mmu_cntl cmd )
434546{
@@ -471,9 +583,9 @@ int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
471583 * otherwise is a failure and no report will be
472584 * returned.
473585 *
474- * @param aspace
475- * @param mdesc
476- * @param desc_nr
586+ * @param aspace Pointer to the address space structure.
587+ * @param mdesc Pointer to the array of memory descriptors.
588+ * @param desc_nr Number of memory descriptors in the array.
477589 */
478590void rt_hw_mmu_setup (rt_aspace_t aspace , struct mem_desc * mdesc , int desc_nr )
479591{
@@ -575,6 +687,16 @@ void rt_hw_mem_setup_early(void)
575687 /* return to lower text section */
576688}
577689
690+ /**
691+ * @brief Creates and initializes a new MMU page table.
692+ *
693+ * This function allocates a new MMU page table, copies the kernel space
694+ * page table into it, and flushes the data cache to ensure consistency.
695+ *
696+ * @return
697+ * - A pointer to the newly allocated MMU page table on success.
698+ * - RT_NULL if the allocation fails.
699+ */
578700void * rt_hw_mmu_pgtbl_create (void )
579701{
580702 rt_ubase_t * mmu_table ;
@@ -589,7 +711,14 @@ void *rt_hw_mmu_pgtbl_create(void)
589711 return mmu_table ;
590712}
591713
714+ /**
715+ * @brief Deletes an MMU page table.
716+ *
717+ * This function frees the memory allocated for the given MMU page table.
718+ *
719+ * @param pgtbl Pointer to the MMU page table to be deleted.
720+ */
592721void rt_hw_mmu_pgtbl_delete (void * pgtbl )
593722{
594723 rt_pages_free (pgtbl , 0 );
595- }
724+ }
0 commit comments