@@ -169,17 +169,17 @@ static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
169
169
pte_clear (& init_mm , addr , pte );
170
170
} else if (pte_none (* pte )) {
171
171
if (!direct ) {
172
- void * new_page = vmemmap_alloc_block (PAGE_SIZE ,
173
- NUMA_NO_NODE );
172
+ void * new_page = vmemmap_alloc_block (PAGE_SIZE , NUMA_NO_NODE );
174
173
175
174
if (!new_page )
176
175
goto out ;
177
176
pte_val (* pte ) = __pa (new_page ) | prot ;
178
- } else
177
+ } else {
179
178
pte_val (* pte ) = addr | prot ;
180
- } else
179
+ }
180
+ } else {
181
181
continue ;
182
-
182
+ }
183
183
pages ++ ;
184
184
}
185
185
ret = 0 ;
@@ -196,10 +196,10 @@ static void try_free_pte_table(pmd_t *pmd, unsigned long start)
196
196
197
197
/* We can safely assume this is fully in 1:1 mapping & vmemmap area */
198
198
pte = pte_offset_kernel (pmd , start );
199
- for (i = 0 ; i < PTRS_PER_PTE ; i ++ , pte ++ )
199
+ for (i = 0 ; i < PTRS_PER_PTE ; i ++ , pte ++ ) {
200
200
if (!pte_none (* pte ))
201
201
return ;
202
-
202
+ }
203
203
vmem_pte_free (__va (pmd_deref (* pmd )));
204
204
pmd_clear (pmd );
205
205
}
@@ -220,22 +220,18 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
220
220
pmd = pmd_offset (pud , addr );
221
221
for (; addr < end ; addr = next , pmd ++ ) {
222
222
next = pmd_addr_end (addr , end );
223
-
224
223
if (!add ) {
225
224
if (pmd_none (* pmd ))
226
225
continue ;
227
226
if (pmd_large (* pmd ) && !add ) {
228
227
if (IS_ALIGNED (addr , PMD_SIZE ) &&
229
228
IS_ALIGNED (next , PMD_SIZE )) {
230
229
if (!direct )
231
- vmem_free_pages (pmd_deref (* pmd ),
232
- get_order (PMD_SIZE ));
230
+ vmem_free_pages (pmd_deref (* pmd ), get_order (PMD_SIZE ));
233
231
pmd_clear (pmd );
234
232
pages ++ ;
235
- } else if (!direct &&
236
- vmemmap_unuse_sub_pmd (addr , next )) {
237
- vmem_free_pages (pmd_deref (* pmd ),
238
- get_order (PMD_SIZE ));
233
+ } else if (!direct && vmemmap_unuse_sub_pmd (addr , next )) {
234
+ vmem_free_pages (pmd_deref (* pmd ), get_order (PMD_SIZE ));
239
235
pmd_clear (pmd );
240
236
}
241
237
continue ;
@@ -258,14 +254,12 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
258
254
* page tables since vmemmap_populate gets
259
255
* called for each section separately.
260
256
*/
261
- new_page = vmemmap_alloc_block (PMD_SIZE ,
262
- NUMA_NO_NODE );
257
+ new_page = vmemmap_alloc_block (PMD_SIZE , NUMA_NO_NODE );
263
258
if (new_page ) {
264
259
pmd_val (* pmd ) = __pa (new_page ) | prot ;
265
260
if (!IS_ALIGNED (addr , PMD_SIZE ) ||
266
261
!IS_ALIGNED (next , PMD_SIZE )) {
267
- vmemmap_use_new_sub_pmd (addr ,
268
- next );
262
+ vmemmap_use_new_sub_pmd (addr , next );
269
263
}
270
264
continue ;
271
265
}
@@ -279,7 +273,6 @@ static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
279
273
vmemmap_use_sub_pmd (addr , next );
280
274
continue ;
281
275
}
282
-
283
276
ret = modify_pte_table (pmd , addr , next , add , direct );
284
277
if (ret )
285
278
goto out ;
@@ -306,12 +299,10 @@ static void try_free_pmd_table(pud_t *pud, unsigned long start)
306
299
if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end )
307
300
return ;
308
301
#endif
309
-
310
302
pmd = pmd_offset (pud , start );
311
303
for (i = 0 ; i < PTRS_PER_PMD ; i ++ , pmd ++ )
312
304
if (!pmd_none (* pmd ))
313
305
return ;
314
-
315
306
vmem_free_pages (pud_deref (* pud ), CRST_ALLOC_ORDER );
316
307
pud_clear (pud );
317
308
}
@@ -327,11 +318,9 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
327
318
prot = pgprot_val (REGION3_KERNEL );
328
319
if (!MACHINE_HAS_NX )
329
320
prot &= ~_REGION_ENTRY_NOEXEC ;
330
-
331
321
pud = pud_offset (p4d , addr );
332
322
for (; addr < end ; addr = next , pud ++ ) {
333
323
next = pud_addr_end (addr , end );
334
-
335
324
if (!add ) {
336
325
if (pud_none (* pud ))
337
326
continue ;
@@ -356,9 +345,9 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
356
345
if (!pmd )
357
346
goto out ;
358
347
pud_populate (& init_mm , pud , pmd );
359
- } else if (pud_large (* pud ))
348
+ } else if (pud_large (* pud )) {
360
349
continue ;
361
-
350
+ }
362
351
ret = modify_pmd_table (pud , addr , next , add , direct );
363
352
if (ret )
364
353
goto out ;
@@ -387,10 +376,10 @@ static void try_free_pud_table(p4d_t *p4d, unsigned long start)
387
376
#endif
388
377
389
378
pud = pud_offset (p4d , start );
390
- for (i = 0 ; i < PTRS_PER_PUD ; i ++ , pud ++ )
379
+ for (i = 0 ; i < PTRS_PER_PUD ; i ++ , pud ++ ) {
391
380
if (!pud_none (* pud ))
392
381
return ;
393
-
382
+ }
394
383
vmem_free_pages (p4d_deref (* p4d ), CRST_ALLOC_ORDER );
395
384
p4d_clear (p4d );
396
385
}
@@ -406,7 +395,6 @@ static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
406
395
p4d = p4d_offset (pgd , addr );
407
396
for (; addr < end ; addr = next , p4d ++ ) {
408
397
next = p4d_addr_end (addr , end );
409
-
410
398
if (!add ) {
411
399
if (p4d_none (* p4d ))
412
400
continue ;
@@ -415,7 +403,6 @@ static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
415
403
if (!pud )
416
404
goto out ;
417
405
}
418
-
419
406
ret = modify_pud_table (p4d , addr , next , add , direct );
420
407
if (ret )
421
408
goto out ;
@@ -442,10 +429,10 @@ static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
442
429
#endif
443
430
444
431
p4d = p4d_offset (pgd , start );
445
- for (i = 0 ; i < PTRS_PER_P4D ; i ++ , p4d ++ )
432
+ for (i = 0 ; i < PTRS_PER_P4D ; i ++ , p4d ++ ) {
446
433
if (!p4d_none (* p4d ))
447
434
return ;
448
-
435
+ }
449
436
vmem_free_pages (pgd_deref (* pgd ), CRST_ALLOC_ORDER );
450
437
pgd_clear (pgd );
451
438
}
@@ -460,7 +447,6 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add,
460
447
461
448
if (WARN_ON_ONCE (!PAGE_ALIGNED (start | end )))
462
449
return - EINVAL ;
463
-
464
450
for (addr = start ; addr < end ; addr = next ) {
465
451
next = pgd_addr_end (addr , end );
466
452
pgd = pgd_offset_k (addr );
@@ -474,7 +460,6 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add,
474
460
goto out ;
475
461
pgd_populate (& init_mm , pgd , p4d );
476
462
}
477
-
478
463
ret = modify_p4d_table (pgd , addr , next , add , direct );
479
464
if (ret )
480
465
goto out ;
@@ -518,7 +503,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
518
503
* Add a backed mem_map array to the virtual mem_map array.
519
504
*/
520
505
int __meminit vmemmap_populate (unsigned long start , unsigned long end , int node ,
521
- struct vmem_altmap * altmap )
506
+ struct vmem_altmap * altmap )
522
507
{
523
508
int ret ;
524
509
@@ -532,7 +517,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
532
517
}
533
518
534
519
void vmemmap_free (unsigned long start , unsigned long end ,
535
- struct vmem_altmap * altmap )
520
+ struct vmem_altmap * altmap )
536
521
{
537
522
mutex_lock (& vmem_mutex );
538
523
remove_pagetable (start , end , false);
0 commit comments