Skip to content

Commit 3e0d3e4

Browse files
davidhildenbrandhcahca
authored andcommitted
s390/vmem: consolidate vmem_add_range() and vmem_remove_range()
We want to have only a single pagetable walker and reuse the same functionality for vmemmap handling. Let's start by consolidating vmem_add_range() and vmem_remove_range(), converting it into a recursive implementation. A recursive implementation makes it easier to expand individual cases without harming readability. In addition, we minimize traversing the whole hierarchy over and over again. One change is that we don't unmap large PMDs/PUDs when not completely covered by the request, something that should never happen with direct mappings, unless one would be removing in other granularity than added, which would be broken already. Cc: Vasily Gorbik <[email protected]> Cc: Christian Borntraeger <[email protected]> Cc: Gerald Schaefer <[email protected]> Signed-off-by: David Hildenbrand <[email protected]> Message-Id: <[email protected]> Signed-off-by: Heiko Carstens <[email protected]>
1 parent 8398b22 commit 3e0d3e4

File tree

1 file changed

+198
-119
lines changed

1 file changed

+198
-119
lines changed

arch/s390/mm/vmem.c

Lines changed: 198 additions & 119 deletions
Original file line numberDiff line numberDiff line change
@@ -54,148 +54,227 @@ pte_t __ref *vmem_pte_alloc(void)
5454
return pte;
5555
}
5656

57-
/*
58-
* Add a physical memory range to the 1:1 mapping.
59-
*/
60-
static int vmem_add_range(unsigned long start, unsigned long size)
57+
static void modify_pte_table(pmd_t *pmd, unsigned long addr, unsigned long end,
58+
bool add)
6159
{
62-
unsigned long pgt_prot, sgt_prot, r3_prot;
63-
unsigned long pages4k, pages1m, pages2g;
64-
unsigned long end = start + size;
65-
unsigned long address = start;
66-
pgd_t *pg_dir;
67-
p4d_t *p4_dir;
68-
pud_t *pu_dir;
69-
pmd_t *pm_dir;
70-
pte_t *pt_dir;
71-
int ret = -ENOMEM;
60+
unsigned long prot, pages = 0;
61+
pte_t *pte;
7262

73-
pgt_prot = pgprot_val(PAGE_KERNEL);
74-
sgt_prot = pgprot_val(SEGMENT_KERNEL);
75-
r3_prot = pgprot_val(REGION3_KERNEL);
76-
if (!MACHINE_HAS_NX) {
77-
pgt_prot &= ~_PAGE_NOEXEC;
78-
sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
79-
r3_prot &= ~_REGION_ENTRY_NOEXEC;
63+
prot = pgprot_val(PAGE_KERNEL);
64+
if (!MACHINE_HAS_NX)
65+
prot &= ~_PAGE_NOEXEC;
66+
67+
pte = pte_offset_kernel(pmd, addr);
68+
for (; addr < end; addr += PAGE_SIZE, pte++) {
69+
if (!add) {
70+
if (pte_none(*pte))
71+
continue;
72+
pte_clear(&init_mm, addr, pte);
73+
} else if (pte_none(*pte)) {
74+
pte_val(*pte) = addr | prot;
75+
} else
76+
continue;
77+
78+
pages++;
8079
}
81-
pages4k = pages1m = pages2g = 0;
82-
while (address < end) {
83-
pg_dir = pgd_offset_k(address);
84-
if (pgd_none(*pg_dir)) {
85-
p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
86-
if (!p4_dir)
87-
goto out;
88-
pgd_populate(&init_mm, pg_dir, p4_dir);
89-
}
90-
p4_dir = p4d_offset(pg_dir, address);
91-
if (p4d_none(*p4_dir)) {
92-
pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
93-
if (!pu_dir)
80+
81+
update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
82+
}
83+
84+
static int modify_pmd_table(pud_t *pud, unsigned long addr, unsigned long end,
85+
bool add)
86+
{
87+
unsigned long next, prot, pages = 0;
88+
int ret = -ENOMEM;
89+
pmd_t *pmd;
90+
pte_t *pte;
91+
92+
prot = pgprot_val(SEGMENT_KERNEL);
93+
if (!MACHINE_HAS_NX)
94+
prot &= ~_SEGMENT_ENTRY_NOEXEC;
95+
96+
pmd = pmd_offset(pud, addr);
97+
for (; addr < end; addr = next, pmd++) {
98+
next = pmd_addr_end(addr, end);
99+
100+
if (!add) {
101+
if (pmd_none(*pmd))
102+
continue;
103+
if (pmd_large(*pmd) && !add) {
104+
if (IS_ALIGNED(addr, PMD_SIZE) &&
105+
IS_ALIGNED(next, PMD_SIZE)) {
106+
pmd_clear(pmd);
107+
pages++;
108+
}
109+
continue;
110+
}
111+
} else if (pmd_none(*pmd)) {
112+
if (IS_ALIGNED(addr, PMD_SIZE) &&
113+
IS_ALIGNED(next, PMD_SIZE) &&
114+
MACHINE_HAS_EDAT1 && addr &&
115+
!debug_pagealloc_enabled()) {
116+
pmd_val(*pmd) = addr | prot;
117+
pages++;
118+
continue;
119+
}
120+
pte = vmem_pte_alloc();
121+
if (!pte)
94122
goto out;
95-
p4d_populate(&init_mm, p4_dir, pu_dir);
96-
}
97-
pu_dir = pud_offset(p4_dir, address);
98-
if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
99-
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
100-
!debug_pagealloc_enabled()) {
101-
pud_val(*pu_dir) = address | r3_prot;
102-
address += PUD_SIZE;
103-
pages2g++;
123+
pmd_populate(&init_mm, pmd, pte);
124+
} else if (pmd_large(*pmd))
104125
continue;
105-
}
106-
if (pud_none(*pu_dir)) {
107-
pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
108-
if (!pm_dir)
126+
127+
modify_pte_table(pmd, addr, next, add);
128+
}
129+
ret = 0;
130+
out:
131+
update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
132+
return ret;
133+
}
134+
135+
static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
136+
bool add)
137+
{
138+
unsigned long next, prot, pages = 0;
139+
int ret = -ENOMEM;
140+
pud_t *pud;
141+
pmd_t *pmd;
142+
143+
prot = pgprot_val(REGION3_KERNEL);
144+
if (!MACHINE_HAS_NX)
145+
prot &= ~_REGION_ENTRY_NOEXEC;
146+
147+
pud = pud_offset(p4d, addr);
148+
for (; addr < end; addr = next, pud++) {
149+
next = pud_addr_end(addr, end);
150+
151+
if (!add) {
152+
if (pud_none(*pud))
153+
continue;
154+
if (pud_large(*pud)) {
155+
if (IS_ALIGNED(addr, PUD_SIZE) &&
156+
IS_ALIGNED(next, PUD_SIZE)) {
157+
pud_clear(pud);
158+
pages++;
159+
}
160+
continue;
161+
}
162+
} else if (pud_none(*pud)) {
163+
if (IS_ALIGNED(addr, PUD_SIZE) &&
164+
IS_ALIGNED(next, PUD_SIZE) &&
165+
MACHINE_HAS_EDAT2 && addr &&
166+
!debug_pagealloc_enabled()) {
167+
pud_val(*pud) = addr | prot;
168+
pages++;
169+
continue;
170+
}
171+
pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
172+
if (!pmd)
109173
goto out;
110-
pud_populate(&init_mm, pu_dir, pm_dir);
111-
}
112-
pm_dir = pmd_offset(pu_dir, address);
113-
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
114-
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
115-
!debug_pagealloc_enabled()) {
116-
pmd_val(*pm_dir) = address | sgt_prot;
117-
address += PMD_SIZE;
118-
pages1m++;
174+
pud_populate(&init_mm, pud, pmd);
175+
} else if (pud_large(*pud))
119176
continue;
177+
178+
ret = modify_pmd_table(pud, addr, next, add);
179+
if (ret)
180+
goto out;
181+
}
182+
ret = 0;
183+
out:
184+
update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
185+
return ret;
186+
}
187+
188+
static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
189+
bool add)
190+
{
191+
unsigned long next;
192+
int ret = -ENOMEM;
193+
p4d_t *p4d;
194+
pud_t *pud;
195+
196+
p4d = p4d_offset(pgd, addr);
197+
for (; addr < end; addr = next, p4d++) {
198+
next = p4d_addr_end(addr, end);
199+
200+
if (!add) {
201+
if (p4d_none(*p4d))
202+
continue;
203+
} else if (p4d_none(*p4d)) {
204+
pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
205+
if (!pud)
206+
goto out;
120207
}
121-
if (pmd_none(*pm_dir)) {
122-
pt_dir = vmem_pte_alloc();
123-
if (!pt_dir)
208+
209+
ret = modify_pud_table(p4d, addr, next, add);
210+
if (ret)
211+
goto out;
212+
}
213+
ret = 0;
214+
out:
215+
return ret;
216+
}
217+
218+
static int modify_pagetable(unsigned long start, unsigned long end, bool add)
219+
{
220+
unsigned long addr, next;
221+
int ret = -ENOMEM;
222+
pgd_t *pgd;
223+
p4d_t *p4d;
224+
225+
if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
226+
return -EINVAL;
227+
228+
for (addr = start; addr < end; addr = next) {
229+
next = pgd_addr_end(addr, end);
230+
pgd = pgd_offset_k(addr);
231+
232+
if (!add) {
233+
if (pgd_none(*pgd))
234+
continue;
235+
} else if (pgd_none(*pgd)) {
236+
p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
237+
if (!p4d)
124238
goto out;
125-
pmd_populate(&init_mm, pm_dir, pt_dir);
239+
pgd_populate(&init_mm, pgd, p4d);
126240
}
127241

128-
pt_dir = pte_offset_kernel(pm_dir, address);
129-
pte_val(*pt_dir) = address | pgt_prot;
130-
address += PAGE_SIZE;
131-
pages4k++;
242+
ret = modify_p4d_table(pgd, addr, next, add);
243+
if (ret)
244+
goto out;
132245
}
133246
ret = 0;
134247
out:
135-
update_page_count(PG_DIRECT_MAP_4K, pages4k);
136-
update_page_count(PG_DIRECT_MAP_1M, pages1m);
137-
update_page_count(PG_DIRECT_MAP_2G, pages2g);
248+
if (!add)
249+
flush_tlb_kernel_range(start, end);
138250
return ret;
139251
}
140252

253+
static int add_pagetable(unsigned long start, unsigned long end)
254+
{
255+
return modify_pagetable(start, end, true);
256+
}
257+
258+
static int remove_pagetable(unsigned long start, unsigned long end)
259+
{
260+
return modify_pagetable(start, end, false);
261+
}
262+
263+
/*
264+
* Add a physical memory range to the 1:1 mapping.
265+
*/
266+
static int vmem_add_range(unsigned long start, unsigned long size)
267+
{
268+
return add_pagetable(start, start + size);
269+
}
270+
141271
/*
142272
* Remove a physical memory range from the 1:1 mapping.
143273
* Currently only invalidates page table entries.
144274
*/
145275
static void vmem_remove_range(unsigned long start, unsigned long size)
146276
{
147-
unsigned long pages4k, pages1m, pages2g;
148-
unsigned long end = start + size;
149-
unsigned long address = start;
150-
pgd_t *pg_dir;
151-
p4d_t *p4_dir;
152-
pud_t *pu_dir;
153-
pmd_t *pm_dir;
154-
pte_t *pt_dir;
155-
156-
pages4k = pages1m = pages2g = 0;
157-
while (address < end) {
158-
pg_dir = pgd_offset_k(address);
159-
if (pgd_none(*pg_dir)) {
160-
address += PGDIR_SIZE;
161-
continue;
162-
}
163-
p4_dir = p4d_offset(pg_dir, address);
164-
if (p4d_none(*p4_dir)) {
165-
address += P4D_SIZE;
166-
continue;
167-
}
168-
pu_dir = pud_offset(p4_dir, address);
169-
if (pud_none(*pu_dir)) {
170-
address += PUD_SIZE;
171-
continue;
172-
}
173-
if (pud_large(*pu_dir)) {
174-
pud_clear(pu_dir);
175-
address += PUD_SIZE;
176-
pages2g++;
177-
continue;
178-
}
179-
pm_dir = pmd_offset(pu_dir, address);
180-
if (pmd_none(*pm_dir)) {
181-
address += PMD_SIZE;
182-
continue;
183-
}
184-
if (pmd_large(*pm_dir)) {
185-
pmd_clear(pm_dir);
186-
address += PMD_SIZE;
187-
pages1m++;
188-
continue;
189-
}
190-
pt_dir = pte_offset_kernel(pm_dir, address);
191-
pte_clear(&init_mm, address, pt_dir);
192-
address += PAGE_SIZE;
193-
pages4k++;
194-
}
195-
flush_tlb_kernel_range(start, end);
196-
update_page_count(PG_DIRECT_MAP_4K, -pages4k);
197-
update_page_count(PG_DIRECT_MAP_1M, -pages1m);
198-
update_page_count(PG_DIRECT_MAP_2G, -pages2g);
277+
remove_pagetable(start, start + size);
199278
}
200279

201280
/*

0 commit comments

Comments
 (0)