Skip to content

Commit c198a11

Browse files
Vasily Gorbikgregkh
authored andcommitted
s390/mm: Allow large pages for KASAN shadow mapping
[ Upstream commit e70452c ] Commit c98d2ec ("s390/mm: Uncouple physical vs virtual address spaces") introduced a large_allowed() helper that restricts which mapping modes can use large pages. This change unintentionally prevented KASAN shadow mappings from using large pages, despite there being no reason to avoid them. In fact, large pages are preferred for performance. Since commit d8073dc ("s390/mm: Allow large pages only for aligned physical addresses"), both can_large_pud() and can_large_pmd() call _pa() to check if large page physical addresses are aligned. However, _pa() has a side effect: it allocates memory in POPULATE_KASAN_MAP_SHADOW mode. Rename large_allowed() to large_page_mapping_allowed() and add POPULATE_KASAN_MAP_SHADOW to the allowed list to restore large page mappings for KASAN shadows. While large_page_mapping_allowed() isn't strictly necessary with current mapping modes since disallowed modes either don't map anything or fail alignment and size checks, keep it for clarity. Rename _pa() to resolve_pa_may_alloc() for clarity and to emphasize existing side effect. Rework can_large_pud()/can_large_pmd() to take the side effect into consideration and actually return physical address instead of just checking conditions. Fixes: c98d2ec ("s390/mm: Uncouple physical vs virtual address spaces") Reviewed-by: Alexander Gordeev <[email protected]> Signed-off-by: Vasily Gorbik <[email protected]> Signed-off-by: Alexander Gordeev <[email protected]> Signed-off-by: Sasha Levin <[email protected]>
1 parent 501d989 commit c198a11

File tree

1 file changed

+50
-24
lines changed

1 file changed

+50
-24
lines changed

arch/s390/boot/vmem.c

Lines changed: 50 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#include "decompressor.h"
1414
#include "boot.h"
1515

16+
#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
1617
struct ctlreg __bootdata_preserved(s390_invalid_asce);
1718

1819
#ifdef CONFIG_PROC_FS
@@ -236,11 +237,12 @@ static pte_t *boot_pte_alloc(void)
236237
return pte;
237238
}
238239

239-
static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_mode mode)
240+
static unsigned long resolve_pa_may_alloc(unsigned long addr, unsigned long size,
241+
enum populate_mode mode)
240242
{
241243
switch (mode) {
242244
case POPULATE_NONE:
243-
return -1;
245+
return INVALID_PHYS_ADDR;
244246
case POPULATE_DIRECT:
245247
return addr;
246248
case POPULATE_LOWCORE:
@@ -258,33 +260,55 @@ static unsigned long _pa(unsigned long addr, unsigned long size, enum populate_m
258260
return addr;
259261
#endif
260262
default:
261-
return -1;
263+
return INVALID_PHYS_ADDR;
262264
}
263265
}
264266

265-
static bool large_allowed(enum populate_mode mode)
267+
static bool large_page_mapping_allowed(enum populate_mode mode)
266268
{
267-
return (mode == POPULATE_DIRECT) || (mode == POPULATE_IDENTITY) || (mode == POPULATE_KERNEL);
269+
switch (mode) {
270+
case POPULATE_DIRECT:
271+
case POPULATE_IDENTITY:
272+
case POPULATE_KERNEL:
273+
#ifdef CONFIG_KASAN
274+
case POPULATE_KASAN_MAP_SHADOW:
275+
#endif
276+
return true;
277+
default:
278+
return false;
279+
}
268280
}
269281

270-
static bool can_large_pud(pud_t *pu_dir, unsigned long addr, unsigned long end,
271-
enum populate_mode mode)
282+
static unsigned long try_get_large_pud_pa(pud_t *pu_dir, unsigned long addr, unsigned long end,
283+
enum populate_mode mode)
272284
{
273-
unsigned long size = end - addr;
285+
unsigned long pa, size = end - addr;
286+
287+
if (!machine.has_edat2 || !large_page_mapping_allowed(mode) ||
288+
!IS_ALIGNED(addr, PUD_SIZE) || (size < PUD_SIZE))
289+
return INVALID_PHYS_ADDR;
274290

275-
return machine.has_edat2 && large_allowed(mode) &&
276-
IS_ALIGNED(addr, PUD_SIZE) && (size >= PUD_SIZE) &&
277-
IS_ALIGNED(_pa(addr, size, mode), PUD_SIZE);
291+
pa = resolve_pa_may_alloc(addr, size, mode);
292+
if (!IS_ALIGNED(pa, PUD_SIZE))
293+
return INVALID_PHYS_ADDR;
294+
295+
return pa;
278296
}
279297

280-
static bool can_large_pmd(pmd_t *pm_dir, unsigned long addr, unsigned long end,
281-
enum populate_mode mode)
298+
static unsigned long try_get_large_pmd_pa(pmd_t *pm_dir, unsigned long addr, unsigned long end,
299+
enum populate_mode mode)
282300
{
283-
unsigned long size = end - addr;
301+
unsigned long pa, size = end - addr;
302+
303+
if (!machine.has_edat1 || !large_page_mapping_allowed(mode) ||
304+
!IS_ALIGNED(addr, PMD_SIZE) || (size < PMD_SIZE))
305+
return INVALID_PHYS_ADDR;
306+
307+
pa = resolve_pa_may_alloc(addr, size, mode);
308+
if (!IS_ALIGNED(pa, PMD_SIZE))
309+
return INVALID_PHYS_ADDR;
284310

285-
return machine.has_edat1 && large_allowed(mode) &&
286-
IS_ALIGNED(addr, PMD_SIZE) && (size >= PMD_SIZE) &&
287-
IS_ALIGNED(_pa(addr, size, mode), PMD_SIZE);
311+
return pa;
288312
}
289313

290314
static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end,
@@ -298,7 +322,7 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
298322
if (pte_none(*pte)) {
299323
if (kasan_pte_populate_zero_shadow(pte, mode))
300324
continue;
301-
entry = __pte(_pa(addr, PAGE_SIZE, mode));
325+
entry = __pte(resolve_pa_may_alloc(addr, PAGE_SIZE, mode));
302326
entry = set_pte_bit(entry, PAGE_KERNEL);
303327
if (!machine.has_nx)
304328
entry = clear_pte_bit(entry, __pgprot(_PAGE_NOEXEC));
@@ -313,7 +337,7 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
313337
static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end,
314338
enum populate_mode mode)
315339
{
316-
unsigned long next, pages = 0;
340+
unsigned long pa, next, pages = 0;
317341
pmd_t *pmd, entry;
318342
pte_t *pte;
319343

@@ -323,8 +347,9 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
323347
if (pmd_none(*pmd)) {
324348
if (kasan_pmd_populate_zero_shadow(pmd, addr, next, mode))
325349
continue;
326-
if (can_large_pmd(pmd, addr, next, mode)) {
327-
entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
350+
pa = try_get_large_pmd_pa(pmd, addr, next, mode);
351+
if (pa != INVALID_PHYS_ADDR) {
352+
entry = __pmd(pa);
328353
entry = set_pmd_bit(entry, SEGMENT_KERNEL);
329354
if (!machine.has_nx)
330355
entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
@@ -346,7 +371,7 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
346371
static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end,
347372
enum populate_mode mode)
348373
{
349-
unsigned long next, pages = 0;
374+
unsigned long pa, next, pages = 0;
350375
pud_t *pud, entry;
351376
pmd_t *pmd;
352377

@@ -356,8 +381,9 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
356381
if (pud_none(*pud)) {
357382
if (kasan_pud_populate_zero_shadow(pud, addr, next, mode))
358383
continue;
359-
if (can_large_pud(pud, addr, next, mode)) {
360-
entry = __pud(_pa(addr, _REGION3_SIZE, mode));
384+
pa = try_get_large_pud_pa(pud, addr, next, mode);
385+
if (pa != INVALID_PHYS_ADDR) {
386+
entry = __pud(pa);
361387
entry = set_pud_bit(entry, REGION3_KERNEL);
362388
if (!machine.has_nx)
363389
entry = clear_pud_bit(entry, __pgprot(_REGION_ENTRY_NOEXEC));

0 commit comments

Comments
 (0)