Skip to content

Commit 9ec8fa8

Browse files
davidhildenbrandhcahca
authored andcommitted
s390/vmemmap: extend modify_pagetable() to handle vmemmap
Extend our shiny new modify_pagetable() to handle !direct (vmemmap) mappings. Convert vmemmap_populate() and implement vmemmap_free(). Cc: Vasily Gorbik <[email protected]> Cc: Christian Borntraeger <[email protected]> Cc: Gerald Schaefer <[email protected]> Signed-off-by: David Hildenbrand <[email protected]> Message-Id: <[email protected]> Signed-off-by: Heiko Carstens <[email protected]>
1 parent 3e0d3e4 commit 9ec8fa8

File tree

1 file changed

+76
-105
lines changed

1 file changed

+76
-105
lines changed

arch/s390/mm/vmem.c

Lines changed: 76 additions & 105 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,15 @@ static void __ref *vmem_alloc_pages(unsigned int order)
2929
return (void *) memblock_phys_alloc(size, size);
3030
}
3131

32+
static void vmem_free_pages(unsigned long addr, int order)
33+
{
34+
/* We don't expect boot memory to be removed ever. */
35+
if (!slab_is_available() ||
36+
WARN_ON_ONCE(PageReserved(phys_to_page(addr))))
37+
return;
38+
free_pages(addr, order);
39+
}
40+
3241
void *vmem_crst_alloc(unsigned long val)
3342
{
3443
unsigned long *table;
@@ -54,10 +63,12 @@ pte_t __ref *vmem_pte_alloc(void)
5463
return pte;
5564
}
5665

57-
static void modify_pte_table(pmd_t *pmd, unsigned long addr, unsigned long end,
58-
bool add)
66+
/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
67+
static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
68+
unsigned long end, bool add, bool direct)
5969
{
6070
unsigned long prot, pages = 0;
71+
int ret = -ENOMEM;
6172
pte_t *pte;
6273

6374
prot = pgprot_val(PAGE_KERNEL);
@@ -69,20 +80,34 @@ static void modify_pte_table(pmd_t *pmd, unsigned long addr, unsigned long end,
6980
if (!add) {
7081
if (pte_none(*pte))
7182
continue;
83+
if (!direct)
84+
vmem_free_pages(pfn_to_phys(pte_pfn(*pte)), 0);
7285
pte_clear(&init_mm, addr, pte);
7386
} else if (pte_none(*pte)) {
74-
pte_val(*pte) = addr | prot;
87+
if (!direct) {
88+
void *new_page = vmemmap_alloc_block(PAGE_SIZE,
89+
NUMA_NO_NODE);
90+
91+
if (!new_page)
92+
goto out;
93+
pte_val(*pte) = __pa(new_page) | prot;
94+
} else
95+
pte_val(*pte) = addr | prot;
7596
} else
7697
continue;
7798

7899
pages++;
79100
}
80-
81-
update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
101+
ret = 0;
102+
out:
103+
if (direct)
104+
update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
105+
return ret;
82106
}
83107

84-
static int modify_pmd_table(pud_t *pud, unsigned long addr, unsigned long end,
85-
bool add)
108+
/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
109+
static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
110+
unsigned long end, bool add, bool direct)
86111
{
87112
unsigned long next, prot, pages = 0;
88113
int ret = -ENOMEM;
@@ -103,6 +128,9 @@ static int modify_pmd_table(pud_t *pud, unsigned long addr, unsigned long end,
103128
if (pmd_large(*pmd) && !add) {
104129
if (IS_ALIGNED(addr, PMD_SIZE) &&
105130
IS_ALIGNED(next, PMD_SIZE)) {
131+
if (!direct)
132+
vmem_free_pages(pmd_deref(*pmd),
133+
get_order(PMD_SIZE));
106134
pmd_clear(pmd);
107135
pages++;
108136
}
@@ -111,11 +139,27 @@ static int modify_pmd_table(pud_t *pud, unsigned long addr, unsigned long end,
111139
} else if (pmd_none(*pmd)) {
112140
if (IS_ALIGNED(addr, PMD_SIZE) &&
113141
IS_ALIGNED(next, PMD_SIZE) &&
114-
MACHINE_HAS_EDAT1 && addr &&
142+
MACHINE_HAS_EDAT1 && addr && direct &&
115143
!debug_pagealloc_enabled()) {
116144
pmd_val(*pmd) = addr | prot;
117145
pages++;
118146
continue;
147+
} else if (!direct && MACHINE_HAS_EDAT1) {
148+
void *new_page;
149+
150+
/*
151+
* Use 1MB frames for vmemmap if available. We
152+
* always use large frames even if they are only
153+
* partially used. Otherwise we would have also
154+
* page tables since vmemmap_populate gets
155+
* called for each section separately.
156+
*/
157+
new_page = vmemmap_alloc_block(PMD_SIZE,
158+
NUMA_NO_NODE);
159+
if (!new_page)
160+
goto out;
161+
pmd_val(*pmd) = __pa(new_page) | prot;
162+
continue;
119163
}
120164
pte = vmem_pte_alloc();
121165
if (!pte)
@@ -124,16 +168,19 @@ static int modify_pmd_table(pud_t *pud, unsigned long addr, unsigned long end,
124168
} else if (pmd_large(*pmd))
125169
continue;
126170

127-
modify_pte_table(pmd, addr, next, add);
171+
ret = modify_pte_table(pmd, addr, next, add, direct);
172+
if (ret)
173+
goto out;
128174
}
129175
ret = 0;
130176
out:
131-
update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
177+
if (direct)
178+
update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
132179
return ret;
133180
}
134181

135182
static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
136-
bool add)
183+
bool add, bool direct)
137184
{
138185
unsigned long next, prot, pages = 0;
139186
int ret = -ENOMEM;
@@ -162,7 +209,7 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
162209
} else if (pud_none(*pud)) {
163210
if (IS_ALIGNED(addr, PUD_SIZE) &&
164211
IS_ALIGNED(next, PUD_SIZE) &&
165-
MACHINE_HAS_EDAT2 && addr &&
212+
MACHINE_HAS_EDAT2 && addr && direct &&
166213
!debug_pagealloc_enabled()) {
167214
pud_val(*pud) = addr | prot;
168215
pages++;
@@ -175,18 +222,19 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
175222
} else if (pud_large(*pud))
176223
continue;
177224

178-
ret = modify_pmd_table(pud, addr, next, add);
225+
ret = modify_pmd_table(pud, addr, next, add, direct);
179226
if (ret)
180227
goto out;
181228
}
182229
ret = 0;
183230
out:
184-
update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
231+
if (direct)
232+
update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
185233
return ret;
186234
}
187235

188236
static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
189-
bool add)
237+
bool add, bool direct)
190238
{
191239
unsigned long next;
192240
int ret = -ENOMEM;
@@ -206,7 +254,7 @@ static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
206254
goto out;
207255
}
208256

209-
ret = modify_pud_table(p4d, addr, next, add);
257+
ret = modify_pud_table(p4d, addr, next, add, direct);
210258
if (ret)
211259
goto out;
212260
}
@@ -215,7 +263,8 @@ static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
215263
return ret;
216264
}
217265

218-
static int modify_pagetable(unsigned long start, unsigned long end, bool add)
266+
static int modify_pagetable(unsigned long start, unsigned long end, bool add,
267+
bool direct)
219268
{
220269
unsigned long addr, next;
221270
int ret = -ENOMEM;
@@ -239,7 +288,7 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add)
239288
pgd_populate(&init_mm, pgd, p4d);
240289
}
241290

242-
ret = modify_p4d_table(pgd, addr, next, add);
291+
ret = modify_p4d_table(pgd, addr, next, add, direct);
243292
if (ret)
244293
goto out;
245294
}
@@ -250,22 +299,22 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add)
250299
return ret;
251300
}
252301

253-
static int add_pagetable(unsigned long start, unsigned long end)
302+
static int add_pagetable(unsigned long start, unsigned long end, bool direct)
254303
{
255-
return modify_pagetable(start, end, true);
304+
return modify_pagetable(start, end, true, direct);
256305
}
257306

258-
static int remove_pagetable(unsigned long start, unsigned long end)
307+
static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
259308
{
260-
return modify_pagetable(start, end, false);
309+
return modify_pagetable(start, end, false, direct);
261310
}
262311

263312
/*
264313
* Add a physical memory range to the 1:1 mapping.
265314
*/
266315
static int vmem_add_range(unsigned long start, unsigned long size)
267316
{
268-
return add_pagetable(start, start + size);
317+
return add_pagetable(start, start + size, true);
269318
}
270319

271320
/*
@@ -274,7 +323,7 @@ static int vmem_add_range(unsigned long start, unsigned long size)
274323
*/
275324
static void vmem_remove_range(unsigned long start, unsigned long size)
276325
{
277-
remove_pagetable(start, start + size);
326+
remove_pagetable(start, start + size, true);
278327
}
279328

280329
/*
@@ -283,92 +332,14 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
283332
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
284333
struct vmem_altmap *altmap)
285334
{
286-
unsigned long pgt_prot, sgt_prot;
287-
unsigned long address = start;
288-
pgd_t *pg_dir;
289-
p4d_t *p4_dir;
290-
pud_t *pu_dir;
291-
pmd_t *pm_dir;
292-
pte_t *pt_dir;
293-
int ret = -ENOMEM;
294-
295-
pgt_prot = pgprot_val(PAGE_KERNEL);
296-
sgt_prot = pgprot_val(SEGMENT_KERNEL);
297-
if (!MACHINE_HAS_NX) {
298-
pgt_prot &= ~_PAGE_NOEXEC;
299-
sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
300-
}
301-
for (address = start; address < end;) {
302-
pg_dir = pgd_offset_k(address);
303-
if (pgd_none(*pg_dir)) {
304-
p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
305-
if (!p4_dir)
306-
goto out;
307-
pgd_populate(&init_mm, pg_dir, p4_dir);
308-
}
309-
310-
p4_dir = p4d_offset(pg_dir, address);
311-
if (p4d_none(*p4_dir)) {
312-
pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
313-
if (!pu_dir)
314-
goto out;
315-
p4d_populate(&init_mm, p4_dir, pu_dir);
316-
}
317-
318-
pu_dir = pud_offset(p4_dir, address);
319-
if (pud_none(*pu_dir)) {
320-
pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
321-
if (!pm_dir)
322-
goto out;
323-
pud_populate(&init_mm, pu_dir, pm_dir);
324-
}
325-
326-
pm_dir = pmd_offset(pu_dir, address);
327-
if (pmd_none(*pm_dir)) {
328-
/* Use 1MB frames for vmemmap if available. We always
329-
* use large frames even if they are only partially
330-
* used.
331-
* Otherwise we would have also page tables since
332-
* vmemmap_populate gets called for each section
333-
* separately. */
334-
if (MACHINE_HAS_EDAT1) {
335-
void *new_page;
336-
337-
new_page = vmemmap_alloc_block(PMD_SIZE, node);
338-
if (!new_page)
339-
goto out;
340-
pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
341-
address = (address + PMD_SIZE) & PMD_MASK;
342-
continue;
343-
}
344-
pt_dir = vmem_pte_alloc();
345-
if (!pt_dir)
346-
goto out;
347-
pmd_populate(&init_mm, pm_dir, pt_dir);
348-
} else if (pmd_large(*pm_dir)) {
349-
address = (address + PMD_SIZE) & PMD_MASK;
350-
continue;
351-
}
352-
353-
pt_dir = pte_offset_kernel(pm_dir, address);
354-
if (pte_none(*pt_dir)) {
355-
void *new_page;
356-
357-
new_page = vmemmap_alloc_block(PAGE_SIZE, node);
358-
if (!new_page)
359-
goto out;
360-
pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
361-
}
362-
address += PAGE_SIZE;
363-
}
364-
ret = 0;
365-
out:
366-
return ret;
335+
/* We don't care about the node, just use NUMA_NO_NODE on allocations */
336+
return add_pagetable(start, end, false);
367337
}
368338

369339
void vmemmap_free(unsigned long start, unsigned long end,
370340
struct vmem_altmap *altmap)
371341
{
342+
remove_pagetable(start, end, false);
372343
}
373344

374345
void vmem_remove_mapping(unsigned long start, unsigned long size)

0 commit comments

Comments
 (0)