@@ -63,6 +63,15 @@ pte_t __ref *vmem_pte_alloc(void)
63
63
return pte ;
64
64
}
65
65
66
+ static void vmem_pte_free (unsigned long * table )
67
+ {
68
+ /* We don't expect boot memory to be removed ever. */
69
+ if (!slab_is_available () ||
70
+ WARN_ON_ONCE (PageReserved (virt_to_page (table ))))
71
+ return ;
72
+ page_table_free (& init_mm , table );
73
+ }
74
+
66
75
/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
67
76
static int __ref modify_pte_table (pmd_t * pmd , unsigned long addr ,
68
77
unsigned long end , bool add , bool direct )
@@ -105,6 +114,21 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
105
114
return ret ;
106
115
}
107
116
117
+ static void try_free_pte_table (pmd_t * pmd , unsigned long start )
118
+ {
119
+ pte_t * pte ;
120
+ int i ;
121
+
122
+ /* We can safely assume this is fully in 1:1 mapping & vmemmap area */
123
+ pte = pte_offset_kernel (pmd , start );
124
+ for (i = 0 ; i < PTRS_PER_PTE ; i ++ , pte ++ )
125
+ if (!pte_none (* pte ))
126
+ return ;
127
+
128
+ vmem_pte_free (__va (pmd_deref (* pmd )));
129
+ pmd_clear (pmd );
130
+ }
131
+
108
132
/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
109
133
static int __ref modify_pmd_table (pud_t * pud , unsigned long addr ,
110
134
unsigned long end , bool add , bool direct )
@@ -171,6 +195,8 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
171
195
ret = modify_pte_table (pmd , addr , next , add , direct );
172
196
if (ret )
173
197
goto out ;
198
+ if (!add )
199
+ try_free_pte_table (pmd , addr & PMD_MASK );
174
200
}
175
201
ret = 0 ;
176
202
out :
@@ -179,6 +205,29 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
179
205
return ret ;
180
206
}
181
207
208
+ static void try_free_pmd_table (pud_t * pud , unsigned long start )
209
+ {
210
+ const unsigned long end = start + PUD_SIZE ;
211
+ pmd_t * pmd ;
212
+ int i ;
213
+
214
+ /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
215
+ if (end > VMALLOC_START )
216
+ return ;
217
+ #ifdef CONFIG_KASAN
218
+ if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end )
219
+ return ;
220
+ #endif
221
+
222
+ pmd = pmd_offset (pud , start );
223
+ for (i = 0 ; i < PTRS_PER_PMD ; i ++ , pmd ++ )
224
+ if (!pmd_none (* pmd ))
225
+ return ;
226
+
227
+ vmem_free_pages (pud_deref (* pud ), CRST_ALLOC_ORDER );
228
+ pud_clear (pud );
229
+ }
230
+
182
231
static int modify_pud_table (p4d_t * p4d , unsigned long addr , unsigned long end ,
183
232
bool add , bool direct )
184
233
{
@@ -225,6 +274,8 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
225
274
ret = modify_pmd_table (pud , addr , next , add , direct );
226
275
if (ret )
227
276
goto out ;
277
+ if (!add )
278
+ try_free_pmd_table (pud , addr & PUD_MASK );
228
279
}
229
280
ret = 0 ;
230
281
out :
@@ -233,6 +284,29 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
233
284
return ret ;
234
285
}
235
286
287
+ static void try_free_pud_table (p4d_t * p4d , unsigned long start )
288
+ {
289
+ const unsigned long end = start + P4D_SIZE ;
290
+ pud_t * pud ;
291
+ int i ;
292
+
293
+ /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
294
+ if (end > VMALLOC_START )
295
+ return ;
296
+ #ifdef CONFIG_KASAN
297
+ if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end )
298
+ return ;
299
+ #endif
300
+
301
+ pud = pud_offset (p4d , start );
302
+ for (i = 0 ; i < PTRS_PER_PUD ; i ++ , pud ++ )
303
+ if (!pud_none (* pud ))
304
+ return ;
305
+
306
+ vmem_free_pages (p4d_deref (* p4d ), CRST_ALLOC_ORDER );
307
+ p4d_clear (p4d );
308
+ }
309
+
236
310
static int modify_p4d_table (pgd_t * pgd , unsigned long addr , unsigned long end ,
237
311
bool add , bool direct )
238
312
{
@@ -257,12 +331,37 @@ static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
257
331
ret = modify_pud_table (p4d , addr , next , add , direct );
258
332
if (ret )
259
333
goto out ;
334
+ if (!add )
335
+ try_free_pud_table (p4d , addr & P4D_MASK );
260
336
}
261
337
ret = 0 ;
262
338
out :
263
339
return ret ;
264
340
}
265
341
342
+ static void try_free_p4d_table (pgd_t * pgd , unsigned long start )
343
+ {
344
+ const unsigned long end = start + PGDIR_SIZE ;
345
+ p4d_t * p4d ;
346
+ int i ;
347
+
348
+ /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
349
+ if (end > VMALLOC_START )
350
+ return ;
351
+ #ifdef CONFIG_KASAN
352
+ if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end )
353
+ return ;
354
+ #endif
355
+
356
+ p4d = p4d_offset (pgd , start );
357
+ for (i = 0 ; i < PTRS_PER_P4D ; i ++ , p4d ++ )
358
+ if (!p4d_none (* p4d ))
359
+ return ;
360
+
361
+ vmem_free_pages (pgd_deref (* pgd ), CRST_ALLOC_ORDER );
362
+ pgd_clear (pgd );
363
+ }
364
+
266
365
static int modify_pagetable (unsigned long start , unsigned long end , bool add ,
267
366
bool direct )
268
367
{
@@ -291,6 +390,8 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add,
291
390
ret = modify_p4d_table (pgd , addr , next , add , direct );
292
391
if (ret )
293
392
goto out ;
393
+ if (!add )
394
+ try_free_p4d_table (pgd , addr & PGDIR_MASK );
294
395
}
295
396
ret = 0 ;
296
397
out :
@@ -319,7 +420,6 @@ static int vmem_add_range(unsigned long start, unsigned long size)
319
420
320
421
/*
321
422
* Remove a physical memory range from the 1:1 mapping.
322
- * Currently only invalidates page table entries.
323
423
*/
324
424
static void vmem_remove_range (unsigned long start , unsigned long size )
325
425
{
0 commit comments