Skip to content

Commit 2717c6b

Browse files
btw616jmberg-intel
authored andcommitted
um: Abandon the _PAGE_NEWPROT bit
When a PTE is updated in the page table, the _PAGE_NEWPAGE bit will always be set. And the corresponding page will always be mapped or unmapped depending on whether the PTE is present or not. The check on the _PAGE_NEWPROT bit is not really reachable. Abandoning it will allow us to simplify the code and remove the unreachable code. Reviewed-by: Benjamin Berg <[email protected]> Signed-off-by: Tiwei Bie <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Johannes Berg <[email protected]>
1 parent 90daca7 commit 2717c6b

File tree

7 files changed

+36
-105
lines changed

7 files changed

+36
-105
lines changed

arch/um/include/asm/pgtable.h

Lines changed: 6 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212

1313
#define _PAGE_PRESENT 0x001
1414
#define _PAGE_NEWPAGE 0x002
15-
#define _PAGE_NEWPROT 0x004
1615
#define _PAGE_RW 0x020
1716
#define _PAGE_USER 0x040
1817
#define _PAGE_ACCESSED 0x080
@@ -151,23 +150,12 @@ static inline int pte_newpage(pte_t pte)
151150
return pte_get_bits(pte, _PAGE_NEWPAGE);
152151
}
153152

154-
static inline int pte_newprot(pte_t pte)
155-
{
156-
return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
157-
}
158-
159153
/*
160154
* =================================
161155
* Flags setting section.
162156
* =================================
163157
*/
164158

165-
static inline pte_t pte_mknewprot(pte_t pte)
166-
{
167-
pte_set_bits(pte, _PAGE_NEWPROT);
168-
return(pte);
169-
}
170-
171159
static inline pte_t pte_mkclean(pte_t pte)
172160
{
173161
pte_clear_bits(pte, _PAGE_DIRTY);
@@ -182,19 +170,14 @@ static inline pte_t pte_mkold(pte_t pte)
182170

183171
static inline pte_t pte_wrprotect(pte_t pte)
184172
{
185-
if (likely(pte_get_bits(pte, _PAGE_RW)))
186-
pte_clear_bits(pte, _PAGE_RW);
187-
else
188-
return pte;
189-
return(pte_mknewprot(pte));
173+
pte_clear_bits(pte, _PAGE_RW);
174+
return pte;
190175
}
191176

192177
static inline pte_t pte_mkread(pte_t pte)
193178
{
194-
if (unlikely(pte_get_bits(pte, _PAGE_USER)))
195-
return pte;
196179
pte_set_bits(pte, _PAGE_USER);
197-
return(pte_mknewprot(pte));
180+
return pte;
198181
}
199182

200183
static inline pte_t pte_mkdirty(pte_t pte)
@@ -211,18 +194,14 @@ static inline pte_t pte_mkyoung(pte_t pte)
211194

212195
static inline pte_t pte_mkwrite_novma(pte_t pte)
213196
{
214-
if (unlikely(pte_get_bits(pte, _PAGE_RW)))
215-
return pte;
216197
pte_set_bits(pte, _PAGE_RW);
217-
return(pte_mknewprot(pte));
198+
return pte;
218199
}
219200

220201
static inline pte_t pte_mkuptodate(pte_t pte)
221202
{
222203
pte_clear_bits(pte, _PAGE_NEWPAGE);
223-
if(pte_present(pte))
224-
pte_clear_bits(pte, _PAGE_NEWPROT);
225-
return(pte);
204+
return pte;
226205
}
227206

228207
static inline pte_t pte_mknewpage(pte_t pte)
@@ -236,12 +215,10 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
236215
pte_copy(*pteptr, pteval);
237216

238217
/* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
239-
* fix_range knows to unmap it. _PAGE_NEWPROT is specific to
240-
* mapped pages.
218+
* update_pte_range knows to unmap it.
241219
*/
242220

243221
*pteptr = pte_mknewpage(*pteptr);
244-
if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
245222
}
246223

247224
#define PFN_PTE_SHIFT PAGE_SHIFT
@@ -298,8 +275,6 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
298275
({ pte_t pte; \
299276
\
300277
pte_set_val(pte, page_to_phys(page), (pgprot)); \
301-
if (pte_present(pte)) \
302-
pte_mknewprot(pte_mknewpage(pte)); \
303278
pte;})
304279

305280
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)

arch/um/include/asm/tlbflush.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@
99
#include <linux/mm.h>
1010

1111
/*
12-
* In UML, we need to sync the TLB over by using mmap/munmap/mprotect syscalls
13-
* from the process handling the MM (which can be the kernel itself).
12+
* In UML, we need to sync the TLB over by using mmap/munmap syscalls from
13+
* the process handling the MM (which can be the kernel itself).
1414
*
1515
* To track updates, we can hook into set_ptes and flush_tlb_*. With set_ptes
1616
* we catch all PTE transitions where memory that was unusable becomes usable.

arch/um/include/shared/os.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -279,8 +279,6 @@ int map(struct mm_id *mm_idp, unsigned long virt,
279279
unsigned long len, int prot, int phys_fd,
280280
unsigned long long offset);
281281
int unmap(struct mm_id *mm_idp, unsigned long addr, unsigned long len);
282-
int protect(struct mm_id *mm_idp, unsigned long addr,
283-
unsigned long len, unsigned int prot);
284282

285283
/* skas/process.c */
286284
extern int is_skas_winch(int pid, int fd, void *data);

arch/um/include/shared/skas/stub-data.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ enum stub_syscall_type {
3030
STUB_SYSCALL_UNSET = 0,
3131
STUB_SYSCALL_MMAP,
3232
STUB_SYSCALL_MUNMAP,
33-
STUB_SYSCALL_MPROTECT,
3433
};
3534

3635
struct stub_syscall {

arch/um/kernel/skas/stub.c

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -35,16 +35,6 @@ static __always_inline int syscall_handler(struct stub_data *d)
3535
return -1;
3636
}
3737
break;
38-
case STUB_SYSCALL_MPROTECT:
39-
res = stub_syscall3(__NR_mprotect,
40-
sc->mem.addr, sc->mem.length,
41-
sc->mem.prot);
42-
if (res) {
43-
d->err = res;
44-
d->syscall_data_len = i;
45-
return -1;
46-
}
47-
break;
4838
default:
4939
d->err = -95; /* EOPNOTSUPP */
5040
d->syscall_data_len = i;

arch/um/kernel/tlb.c

Lines changed: 28 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,6 @@ struct vm_ops {
2323
int phys_fd, unsigned long long offset);
2424
int (*unmap)(struct mm_id *mm_idp,
2525
unsigned long virt, unsigned long len);
26-
int (*mprotect)(struct mm_id *mm_idp,
27-
unsigned long virt, unsigned long len,
28-
unsigned int prot);
2926
};
3027

3128
static int kern_map(struct mm_id *mm_idp,
@@ -44,15 +41,6 @@ static int kern_unmap(struct mm_id *mm_idp,
4441
return os_unmap_memory((void *)virt, len);
4542
}
4643

47-
static int kern_mprotect(struct mm_id *mm_idp,
48-
unsigned long virt, unsigned long len,
49-
unsigned int prot)
50-
{
51-
return os_protect_memory((void *)virt, len,
52-
prot & UM_PROT_READ, prot & UM_PROT_WRITE,
53-
1);
54-
}
55-
5644
void report_enomem(void)
5745
{
5846
printk(KERN_ERR "UML ran out of memory on the host side! "
@@ -65,33 +53,37 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
6553
struct vm_ops *ops)
6654
{
6755
pte_t *pte;
68-
int r, w, x, prot, ret = 0;
56+
int ret = 0;
6957

7058
pte = pte_offset_kernel(pmd, addr);
7159
do {
72-
r = pte_read(*pte);
73-
w = pte_write(*pte);
74-
x = pte_exec(*pte);
75-
if (!pte_young(*pte)) {
76-
r = 0;
77-
w = 0;
78-
} else if (!pte_dirty(*pte))
79-
w = 0;
80-
81-
prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
82-
(x ? UM_PROT_EXEC : 0));
83-
if (pte_newpage(*pte)) {
84-
if (pte_present(*pte)) {
85-
__u64 offset;
86-
unsigned long phys = pte_val(*pte) & PAGE_MASK;
87-
int fd = phys_mapping(phys, &offset);
88-
89-
ret = ops->mmap(ops->mm_idp, addr, PAGE_SIZE,
90-
prot, fd, offset);
91-
} else
92-
ret = ops->unmap(ops->mm_idp, addr, PAGE_SIZE);
93-
} else if (pte_newprot(*pte))
94-
ret = ops->mprotect(ops->mm_idp, addr, PAGE_SIZE, prot);
60+
if (!pte_newpage(*pte))
61+
continue;
62+
63+
if (pte_present(*pte)) {
64+
__u64 offset;
65+
unsigned long phys = pte_val(*pte) & PAGE_MASK;
66+
int fd = phys_mapping(phys, &offset);
67+
int r, w, x, prot;
68+
69+
r = pte_read(*pte);
70+
w = pte_write(*pte);
71+
x = pte_exec(*pte);
72+
if (!pte_young(*pte)) {
73+
r = 0;
74+
w = 0;
75+
} else if (!pte_dirty(*pte))
76+
w = 0;
77+
78+
prot = (r ? UM_PROT_READ : 0) |
79+
(w ? UM_PROT_WRITE : 0) |
80+
(x ? UM_PROT_EXEC : 0);
81+
82+
ret = ops->mmap(ops->mm_idp, addr, PAGE_SIZE,
83+
prot, fd, offset);
84+
} else
85+
ret = ops->unmap(ops->mm_idp, addr, PAGE_SIZE);
86+
9587
*pte = pte_mkuptodate(*pte);
9688
} while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
9789
return ret;
@@ -180,11 +172,9 @@ int um_tlb_sync(struct mm_struct *mm)
180172
if (mm == &init_mm) {
181173
ops.mmap = kern_map;
182174
ops.unmap = kern_unmap;
183-
ops.mprotect = kern_mprotect;
184175
} else {
185176
ops.mmap = map;
186177
ops.unmap = unmap;
187-
ops.mprotect = protect;
188178
}
189179

190180
pgd = pgd_offset(mm, addr);

arch/um/os-Linux/skas/mem.c

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -217,24 +217,3 @@ int unmap(struct mm_id *mm_idp, unsigned long addr, unsigned long len)
217217

218218
return 0;
219219
}
220-
221-
int protect(struct mm_id *mm_idp, unsigned long addr, unsigned long len,
222-
unsigned int prot)
223-
{
224-
struct stub_syscall *sc;
225-
226-
/* Compress with previous syscall if that is possible */
227-
sc = syscall_stub_get_previous(mm_idp, STUB_SYSCALL_MPROTECT, addr);
228-
if (sc && sc->mem.prot == prot) {
229-
sc->mem.length += len;
230-
return 0;
231-
}
232-
233-
sc = syscall_stub_alloc(mm_idp);
234-
sc->syscall = STUB_SYSCALL_MPROTECT;
235-
sc->mem.addr = addr;
236-
sc->mem.length = len;
237-
sc->mem.prot = prot;
238-
239-
return 0;
240-
}

0 commit comments

Comments
 (0)