|
40 | 40 | #include <linux/sched.h>
|
41 | 41 | #include <linux/page_table_check.h>
|
42 | 42 |
|
| 43 | +static inline void emit_pte_barriers(void) |
| 44 | +{ |
| 45 | + /* |
| 46 | + * These barriers are emitted under certain conditions after a pte entry |
| 47 | + * was modified (see e.g. __set_pte_complete()). The dsb makes the store |
| 48 | + * visible to the table walker. The isb ensures that any previous |
| 49 | + * speculative "invalid translation" marker that is in the CPU's |
| 50 | + * pipeline gets cleared, so that any access to that address after |
| 51 | + * setting the pte to valid won't cause a spurious fault. If the thread |
| 52 | + * gets preempted after storing to the pgtable but before emitting these |
| 53 | + * barriers, __switch_to() emits a dsb which ensure the walker gets to |
| 54 | + * see the store. There is no guarantee of an isb being issued though. |
| 55 | + * This is safe because it will still get issued (albeit on a |
| 56 | + * potentially different CPU) when the thread starts running again, |
| 57 | + * before any access to the address. |
| 58 | + */ |
| 59 | + dsb(ishst); |
| 60 | + isb(); |
| 61 | +} |
| 62 | + |
| 63 | +static inline void queue_pte_barriers(void) |
| 64 | +{ |
| 65 | + unsigned long flags; |
| 66 | + |
| 67 | + VM_WARN_ON(in_interrupt()); |
| 68 | + flags = read_thread_flags(); |
| 69 | + |
| 70 | + if (flags & BIT(TIF_LAZY_MMU)) { |
| 71 | + /* Avoid the atomic op if already set. */ |
| 72 | + if (!(flags & BIT(TIF_LAZY_MMU_PENDING))) |
| 73 | + set_thread_flag(TIF_LAZY_MMU_PENDING); |
| 74 | + } else { |
| 75 | + emit_pte_barriers(); |
| 76 | + } |
| 77 | +} |
| 78 | + |
| 79 | +#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE |
| 80 | +static inline void arch_enter_lazy_mmu_mode(void) |
| 81 | +{ |
| 82 | + VM_WARN_ON(in_interrupt()); |
| 83 | + VM_WARN_ON(test_thread_flag(TIF_LAZY_MMU)); |
| 84 | + |
| 85 | + set_thread_flag(TIF_LAZY_MMU); |
| 86 | +} |
| 87 | + |
| 88 | +static inline void arch_flush_lazy_mmu_mode(void) |
| 89 | +{ |
| 90 | + if (test_and_clear_thread_flag(TIF_LAZY_MMU_PENDING)) |
| 91 | + emit_pte_barriers(); |
| 92 | +} |
| 93 | + |
| 94 | +static inline void arch_leave_lazy_mmu_mode(void) |
| 95 | +{ |
| 96 | + arch_flush_lazy_mmu_mode(); |
| 97 | + clear_thread_flag(TIF_LAZY_MMU); |
| 98 | +} |
| 99 | + |
43 | 100 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
44 | 101 | #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
|
45 | 102 |
|
@@ -326,10 +383,8 @@ static inline void __set_pte_complete(pte_t pte)
|
326 | 383 | * Only if the new pte is valid and kernel, otherwise TLB maintenance
|
327 | 384 | * has the necessary barriers.
|
328 | 385 | */
|
329 |
| - if (pte_valid_not_user(pte)) { |
330 |
| - dsb(ishst); |
331 |
| - isb(); |
332 |
| - } |
| 386 | + if (pte_valid_not_user(pte)) |
| 387 | + queue_pte_barriers(); |
333 | 388 | }
|
334 | 389 |
|
335 | 390 | static inline void __set_pte(pte_t *ptep, pte_t pte)
|
@@ -801,10 +856,8 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
|
801 | 856 |
|
802 | 857 | WRITE_ONCE(*pmdp, pmd);
|
803 | 858 |
|
804 |
| - if (pmd_valid(pmd)) { |
805 |
| - dsb(ishst); |
806 |
| - isb(); |
807 |
| - } |
| 859 | + if (pmd_valid(pmd)) |
| 860 | + queue_pte_barriers(); |
808 | 861 | }
|
809 | 862 |
|
810 | 863 | static inline void pmd_clear(pmd_t *pmdp)
|
@@ -869,10 +922,8 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
|
869 | 922 |
|
870 | 923 | WRITE_ONCE(*pudp, pud);
|
871 | 924 |
|
872 |
| - if (pud_valid(pud)) { |
873 |
| - dsb(ishst); |
874 |
| - isb(); |
875 |
| - } |
| 925 | + if (pud_valid(pud)) |
| 926 | + queue_pte_barriers(); |
876 | 927 | }
|
877 | 928 |
|
878 | 929 | static inline void pud_clear(pud_t *pudp)
|
@@ -951,8 +1002,7 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
|
951 | 1002 | }
|
952 | 1003 |
|
953 | 1004 | WRITE_ONCE(*p4dp, p4d);
|
954 |
| - dsb(ishst); |
955 |
| - isb(); |
| 1005 | + queue_pte_barriers(); |
956 | 1006 | }
|
957 | 1007 |
|
958 | 1008 | static inline void p4d_clear(p4d_t *p4dp)
|
@@ -1080,8 +1130,7 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
|
1080 | 1130 | }
|
1081 | 1131 |
|
1082 | 1132 | WRITE_ONCE(*pgdp, pgd);
|
1083 |
| - dsb(ishst); |
1084 |
| - isb(); |
| 1133 | + queue_pte_barriers(); |
1085 | 1134 | }
|
1086 | 1135 |
|
1087 | 1136 | static inline void pgd_clear(pgd_t *pgdp)
|
|
0 commit comments