Skip to content

Commit d6e6a74

Browse files
linuswRussell King (Oracle)
authored andcommitted
ARM: 9429/1: ioremap: Sync PGDs for VMALLOC shadow
When sync:ing the VMALLOC area to other CPUs, make sure to also sync the KASAN shadow memory for the VMALLOC area, so that we don't get stale entries for the shadow memory in the top level PGD. Since we are now copying PGDs in two instances, create a helper function named memcpy_pgd() to do the actual copying, and create a helper to map the addresses of VMALLOC_START and VMALLOC_END into the corresponding shadow memory. Co-developed-by: Melon Liu <[email protected]> Cc: [email protected] Fixes: 565cbaa ("ARM: 9202/1: kasan: support CONFIG_KASAN_VMALLOC") Link: https://lore.kernel.org/linux-arm-kernel/[email protected]/ Reported-by: Clement LE GOFFIC <[email protected]> Suggested-by: Mark Rutland <[email protected]> Suggested-by: Russell King (Oracle) <[email protected]> Acked-by: Mark Rutland <[email protected]> Signed-off-by: Linus Walleij <[email protected]> Signed-off-by: Russell King (Oracle) <[email protected]>
1 parent c0b5195 commit d6e6a74

File tree

1 file changed

+29
-4
lines changed

1 file changed

+29
-4
lines changed

arch/arm/mm/ioremap.c

Lines changed: 29 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
*/
2424
#include <linux/module.h>
2525
#include <linux/errno.h>
26+
#include <linux/kasan.h>
2627
#include <linux/mm.h>
2728
#include <linux/vmalloc.h>
2829
#include <linux/io.h>
@@ -115,16 +116,40 @@ int ioremap_page(unsigned long virt, unsigned long phys,
115116
}
116117
EXPORT_SYMBOL(ioremap_page);
117118

119+
#ifdef CONFIG_KASAN
120+
static unsigned long arm_kasan_mem_to_shadow(unsigned long addr)
121+
{
122+
return (unsigned long)kasan_mem_to_shadow((void *)addr);
123+
}
124+
#else
125+
static unsigned long arm_kasan_mem_to_shadow(unsigned long addr)
126+
{
127+
return 0;
128+
}
129+
#endif
130+
131+
static void memcpy_pgd(struct mm_struct *mm, unsigned long start,
132+
unsigned long end)
133+
{
134+
end = ALIGN(end, PGDIR_SIZE);
135+
memcpy(pgd_offset(mm, start), pgd_offset_k(start),
136+
sizeof(pgd_t) * (pgd_index(end) - pgd_index(start)));
137+
}
138+
118139
void __check_vmalloc_seq(struct mm_struct *mm)
119140
{
120141
int seq;
121142

122143
do {
123144
seq = atomic_read(&init_mm.context.vmalloc_seq);
124-
memcpy(pgd_offset(mm, VMALLOC_START),
125-
pgd_offset_k(VMALLOC_START),
126-
sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
127-
pgd_index(VMALLOC_START)));
145+
memcpy_pgd(mm, VMALLOC_START, VMALLOC_END);
146+
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
147+
unsigned long start =
148+
arm_kasan_mem_to_shadow(VMALLOC_START);
149+
unsigned long end =
150+
arm_kasan_mem_to_shadow(VMALLOC_END);
151+
memcpy_pgd(mm, start, end);
152+
}
128153
/*
129154
* Use a store-release so that other CPUs that observe the
130155
* counter's new value are guaranteed to see the results of the

0 commit comments

Comments
 (0)