|
36 | 36 | #define USER_VADDR_START 0
|
37 | 37 | #endif
|
38 | 38 |
|
39 |
| -static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size); |
| 39 | +static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr); |
40 | 40 |
|
41 | 41 | static void *current_mmu_table = RT_NULL;
|
42 | 42 |
|
@@ -198,7 +198,7 @@ void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
|
198 | 198 | while (unmap_va != v_addr)
|
199 | 199 | {
|
200 | 200 | MM_PGTBL_LOCK(aspace);
|
201 |
| - _unmap_area(aspace, unmap_va, ARCH_PAGE_SIZE); |
| 201 | + _unmap_area(aspace, unmap_va); |
202 | 202 | MM_PGTBL_UNLOCK(aspace);
|
203 | 203 | unmap_va += ARCH_PAGE_SIZE;
|
204 | 204 | }
|
@@ -245,8 +245,8 @@ static void _unmap_pte(rt_ubase_t *pentry, rt_ubase_t *lvl_entry[], int level)
|
245 | 245 | }
|
246 | 246 | }
|
247 | 247 |
|
248 |
| -/* Unmaps a virtual address range from the page table. */ |
249 |
| -static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size) |
| 248 | +/* Unmaps a virtual address range (1GB/2MB/4KB according to actual page level) from the page table. */ |
| 249 | +static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr) |
250 | 250 | {
|
251 | 251 | rt_ubase_t loop_va = __UMASKVALUE((rt_ubase_t)v_addr, PAGE_OFFSET_MASK);
|
252 | 252 | size_t unmapped = 0;
|
@@ -315,7 +315,7 @@ void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size)
|
315 | 315 | while (size > 0)
|
316 | 316 | {
|
317 | 317 | MM_PGTBL_LOCK(aspace);
|
318 |
| - unmapped = _unmap_area(aspace, v_addr, size); |
| 318 | + unmapped = _unmap_area(aspace, v_addr); |
319 | 319 | MM_PGTBL_UNLOCK(aspace);
|
320 | 320 |
|
321 | 321 | /* when unmapped == 0, region not exist in pgtbl */
|
|
0 commit comments