Skip to content

Commit 3e82610

Browse files
author
Alexander Gordeev
committed
s390/kasan: avoid short by one page shadow memory
Kernel Address Sanitizer uses 3 bits per byte to encode memory. That is the number of bits the start and end address of a memory range is shifted right when the corresponding shadow memory is created for that memory range. The used memory mapping routine expects page-aligned addresses, while the above described 3-bit shift might turn the shadow memory range start and end boundaries into non-page-aligned in case the size of the original memory range is less than (PAGE_SIZE << 3). As result, the resulting shadow memory range could be short on one page. Align on page boundary the start and end addresses when mapping a shadow memory range and avoid the described issue in the future. Note, that does not fix a real problem, since currently no virtual regions of size less than (PAGE_SIZE << 3) exist. Reviewed-by: Vasily Gorbik <[email protected]> Signed-off-by: Alexander Gordeev <[email protected]>
1 parent 2ed8b50 commit 3e82610

File tree

1 file changed

+11
-4
lines changed

1 file changed

+11
-4
lines changed

arch/s390/boot/vmem.c

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,13 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat
4545

4646
static pte_t pte_z;
4747

48+
static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode)
49+
{
50+
start = PAGE_ALIGN_DOWN(__sha(start));
51+
end = PAGE_ALIGN(__sha(end));
52+
pgtable_populate(start, end, mode);
53+
}
54+
4855
static void kasan_populate_shadow(void)
4956
{
5057
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
@@ -95,17 +102,17 @@ static void kasan_populate_shadow(void)
95102
*/
96103

97104
for_each_physmem_usable_range(i, &start, &end)
98-
pgtable_populate(__sha(start), __sha(end), POPULATE_KASAN_MAP_SHADOW);
105+
kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
99106
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
100107
untracked_end = VMALLOC_START;
101108
/* shallowly populate kasan shadow for vmalloc and modules */
102-
pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END), POPULATE_KASAN_SHALLOW);
109+
kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW);
103110
} else {
104111
untracked_end = MODULES_VADDR;
105112
}
106113
/* populate kasan shadow for untracked memory */
107-
pgtable_populate(__sha(ident_map_size), __sha(untracked_end), POPULATE_KASAN_ZERO_SHADOW);
108-
pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE), POPULATE_KASAN_ZERO_SHADOW);
114+
kasan_populate(ident_map_size, untracked_end, POPULATE_KASAN_ZERO_SHADOW);
115+
kasan_populate(MODULES_END, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
109116
}
110117

111118
static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,

0 commit comments

Comments
 (0)