|
6 | 6 | #include <linux/mm.h>
|
7 | 7 | #include <asm/cache.h>
|
8 | 8 |
|
9 |
| -void flush_icache_page(struct vm_area_struct *vma, struct page *page) |
| 9 | +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, |
| 10 | + pte_t *pte) |
10 | 11 | {
|
11 |
| - unsigned long start; |
| 12 | + unsigned long addr; |
| 13 | + struct page *page; |
12 | 14 |
|
13 |
| - start = (unsigned long) kmap_atomic(page); |
| 15 | + page = pfn_to_page(pte_pfn(*pte)); |
| 16 | + if (page == ZERO_PAGE(0)) |
| 17 | + return; |
14 | 18 |
|
15 |
| - cache_wbinv_range(start, start + PAGE_SIZE); |
| 19 | + if (test_and_set_bit(PG_dcache_clean, &page->flags)) |
| 20 | + return; |
16 | 21 |
|
17 |
| - kunmap_atomic((void *)start); |
18 |
| -} |
| 22 | + addr = (unsigned long) kmap_atomic(page); |
19 | 23 |
|
20 |
| -void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, |
21 |
| - unsigned long vaddr, int len) |
22 |
| -{ |
23 |
| - unsigned long kaddr; |
| 24 | + dcache_wb_range(addr, addr + PAGE_SIZE); |
24 | 25 |
|
25 |
| - kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK); |
| 26 | + if (vma->vm_flags & VM_EXEC) |
| 27 | + icache_inv_range(addr, addr + PAGE_SIZE); |
| 28 | + |
| 29 | + kunmap_atomic((void *) addr); |
| 30 | +} |
26 | 31 |
|
27 |
| - cache_wbinv_range(kaddr, kaddr + len); |
| 32 | +void flush_icache_deferred(struct mm_struct *mm) |
| 33 | +{ |
| 34 | + unsigned int cpu = smp_processor_id(); |
| 35 | + cpumask_t *mask = &mm->context.icache_stale_mask; |
28 | 36 |
|
29 |
| - kunmap_atomic((void *)kaddr); |
| 37 | + if (cpumask_test_cpu(cpu, mask)) { |
| 38 | + cpumask_clear_cpu(cpu, mask); |
| 39 | + /* |
| 40 | + * Ensure the remote hart's writes are visible to this hart. |
| 41 | + * This pairs with a barrier in flush_icache_mm. |
| 42 | + */ |
| 43 | + smp_mb(); |
| 44 | + local_icache_inv_all(NULL); |
| 45 | + } |
30 | 46 | }
|
31 | 47 |
|
32 |
| -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, |
33 |
| - pte_t *pte) |
| 48 | +void flush_icache_mm_range(struct mm_struct *mm, |
| 49 | + unsigned long start, unsigned long end) |
34 | 50 | {
|
35 |
| - unsigned long addr, pfn; |
36 |
| - struct page *page; |
| 51 | + unsigned int cpu; |
| 52 | + cpumask_t others, *mask; |
37 | 53 |
|
38 |
| - pfn = pte_pfn(*pte); |
39 |
| - if (unlikely(!pfn_valid(pfn))) |
40 |
| - return; |
| 54 | + preempt_disable(); |
41 | 55 |
|
42 |
| - page = pfn_to_page(pfn); |
43 |
| - if (page == ZERO_PAGE(0)) |
| 56 | +#ifdef CONFIG_CPU_HAS_ICACHE_INS |
| 57 | + if (mm == current->mm) { |
| 58 | + icache_inv_range(start, end); |
| 59 | + preempt_enable(); |
44 | 60 | return;
|
| 61 | + } |
| 62 | +#endif |
45 | 63 |
|
46 |
| - addr = (unsigned long) kmap_atomic(page); |
| 64 | + /* Mark every hart's icache as needing a flush for this MM. */ |
| 65 | + mask = &mm->context.icache_stale_mask; |
| 66 | + cpumask_setall(mask); |
47 | 67 |
|
48 |
| - cache_wbinv_range(addr, addr + PAGE_SIZE); |
| 68 | + /* Flush this hart's I$ now, and mark it as flushed. */ |
| 69 | + cpu = smp_processor_id(); |
| 70 | + cpumask_clear_cpu(cpu, mask); |
| 71 | + local_icache_inv_all(NULL); |
49 | 72 |
|
50 |
| - kunmap_atomic((void *) addr); |
| 73 | + /* |
| 74 | + * Flush the I$ of other harts concurrently executing, and mark them as |
| 75 | + * flushed. |
| 76 | + */ |
| 77 | + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); |
| 78 | + |
| 79 | + if (mm != current->active_mm || !cpumask_empty(&others)) { |
| 80 | + on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1); |
| 81 | + cpumask_clear(mask); |
| 82 | + } |
| 83 | + |
| 84 | + preempt_enable(); |
51 | 85 | }
|
0 commit comments