|
11 | 11 | #include <asm/fixmap.h>
|
12 | 12 |
|
13 | 13 | #define _PAGE_PRESENT 0x001
|
14 |
| -#define _PAGE_NEWPAGE 0x002 |
| 14 | +#define _PAGE_NEEDSYNC 0x002 |
15 | 15 | #define _PAGE_RW 0x020
|
16 | 16 | #define _PAGE_USER 0x040
|
17 | 17 | #define _PAGE_ACCESSED 0x080
|
@@ -79,22 +79,22 @@ extern unsigned long end_iomem;
|
79 | 79 | */
|
80 | 80 | #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
|
81 | 81 |
|
82 |
| -#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) |
| 82 | +#define pte_clear(mm, addr, xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEEDSYNC)) |
83 | 83 |
|
84 |
| -#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE)) |
| 84 | +#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEEDSYNC)) |
85 | 85 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
|
86 | 86 |
|
87 | 87 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
|
88 |
| -#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) |
| 88 | +#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEEDSYNC; } while (0) |
89 | 89 |
|
90 |
| -#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE) |
91 |
| -#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE) |
| 90 | +#define pmd_needsync(x) (pmd_val(x) & _PAGE_NEEDSYNC) |
| 91 | +#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEEDSYNC) |
92 | 92 |
|
93 |
| -#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE) |
94 |
| -#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE) |
| 93 | +#define pud_needsync(x) (pud_val(x) & _PAGE_NEEDSYNC) |
| 94 | +#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEEDSYNC) |
95 | 95 |
|
96 |
| -#define p4d_newpage(x) (p4d_val(x) & _PAGE_NEWPAGE) |
97 |
| -#define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEWPAGE) |
| 96 | +#define p4d_needsync(x) (p4d_val(x) & _PAGE_NEEDSYNC) |
| 97 | +#define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEEDSYNC) |
98 | 98 |
|
99 | 99 | #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
|
100 | 100 | #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
|
@@ -145,9 +145,9 @@ static inline int pte_young(pte_t pte)
|
145 | 145 | return pte_get_bits(pte, _PAGE_ACCESSED);
|
146 | 146 | }
|
147 | 147 |
|
148 |
| -static inline int pte_newpage(pte_t pte) |
| 148 | +static inline int pte_needsync(pte_t pte) |
149 | 149 | {
|
150 |
| - return pte_get_bits(pte, _PAGE_NEWPAGE); |
| 150 | + return pte_get_bits(pte, _PAGE_NEEDSYNC); |
151 | 151 | }
|
152 | 152 |
|
153 | 153 | /*
|
@@ -200,25 +200,25 @@ static inline pte_t pte_mkwrite_novma(pte_t pte)
|
200 | 200 |
|
201 | 201 | static inline pte_t pte_mkuptodate(pte_t pte)
|
202 | 202 | {
|
203 |
| - pte_clear_bits(pte, _PAGE_NEWPAGE); |
| 203 | + pte_clear_bits(pte, _PAGE_NEEDSYNC); |
204 | 204 | return pte;
|
205 | 205 | }
|
206 | 206 |
|
207 |
| -static inline pte_t pte_mknewpage(pte_t pte) |
| 207 | +static inline pte_t pte_mkneedsync(pte_t pte) |
208 | 208 | {
|
209 |
| - pte_set_bits(pte, _PAGE_NEWPAGE); |
| 209 | + pte_set_bits(pte, _PAGE_NEEDSYNC); |
210 | 210 | return(pte);
|
211 | 211 | }
|
212 | 212 |
|
213 | 213 | static inline void set_pte(pte_t *pteptr, pte_t pteval)
|
214 | 214 | {
|
215 | 215 | pte_copy(*pteptr, pteval);
|
216 | 216 |
|
217 |
| - /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so |
| 217 | + /* If it's a swap entry, it needs to be marked _PAGE_NEEDSYNC so |
218 | 218 | * update_pte_range knows to unmap it.
|
219 | 219 | */
|
220 | 220 |
|
221 |
| - *pteptr = pte_mknewpage(*pteptr); |
| 221 | + *pteptr = pte_mkneedsync(*pteptr); |
222 | 222 | }
|
223 | 223 |
|
224 | 224 | #define PFN_PTE_SHIFT PAGE_SHIFT
|
@@ -258,7 +258,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
|
258 | 258 | #define __HAVE_ARCH_PTE_SAME
|
259 | 259 | static inline int pte_same(pte_t pte_a, pte_t pte_b)
|
260 | 260 | {
|
261 |
| - return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE); |
| 261 | + return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEEDSYNC); |
262 | 262 | }
|
263 | 263 |
|
264 | 264 | /*
|
@@ -308,7 +308,7 @@ extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
|
308 | 308 | * <--------------- offset ----------------> E < type -> 0 0 0 1 0
|
309 | 309 | *
|
310 | 310 | * E is the exclusive marker that is not stored in swap entries.
|
311 |
| - * _PAGE_NEWPAGE (bit 1) is always set to 1 in set_pte(). |
| 311 | + * _PAGE_NEEDSYNC (bit 1) is always set to 1 in set_pte(). |
312 | 312 | */
|
313 | 313 | #define __swp_type(x) (((x).val >> 5) & 0x1f)
|
314 | 314 | #define __swp_offset(x) ((x).val >> 11)
|
|
0 commit comments