Skip to content

Commit c436500

Browse files
committed
Merge branch 'for-next/mte' into for-next/core
* for-next/mte: arm64: kasan: Revert "arm64: mte: reset the page tag in page->flags" mm: kasan: Skip page unpoisoning only if __GFP_SKIP_KASAN_UNPOISON mm: kasan: Skip unpoisoning of user pages mm: kasan: Ensure the tags are visible before the tag in page->flags
2 parents 03939cf + 2079454 commit c436500

File tree

8 files changed

+13
-44
lines changed

8 files changed

+13
-44
lines changed

arch/arm64/kernel/hibernate.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -300,11 +300,6 @@ static void swsusp_mte_restore_tags(void)
300300
unsigned long pfn = xa_state.xa_index;
301301
struct page *page = pfn_to_online_page(pfn);
302302

303-
/*
304-
* It is not required to invoke page_kasan_tag_reset(page)
305-
* at this point since the tags stored in page->flags are
306-
* already restored.
307-
*/
308303
mte_restore_page_tags(page_address(page), tags);
309304

310305
mte_free_tag_storage(tags);

arch/arm64/kernel/mte.c

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -48,15 +48,6 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
4848
if (!pte_is_tagged)
4949
return;
5050

51-
page_kasan_tag_reset(page);
52-
/*
53-
* We need smp_wmb() in between setting the flags and clearing the
54-
* tags because if another thread reads page->flags and builds a
55-
* tagged address out of it, there is an actual dependency to the
56-
* memory access, but on the current thread we do not guarantee that
57-
* the new page->flags are visible before the tags were updated.
58-
*/
59-
smp_wmb();
6051
mte_clear_page_tags(page_address(page));
6152
}
6253

arch/arm64/mm/copypage.c

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -23,15 +23,6 @@ void copy_highpage(struct page *to, struct page *from)
2323

2424
if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
2525
set_bit(PG_mte_tagged, &to->flags);
26-
page_kasan_tag_reset(to);
27-
/*
28-
* We need smp_wmb() in between setting the flags and clearing the
29-
* tags because if another thread reads page->flags and builds a
30-
* tagged address out of it, there is an actual dependency to the
31-
* memory access, but on the current thread we do not guarantee that
32-
* the new page->flags are visible before the tags were updated.
33-
*/
34-
smp_wmb();
3526
mte_copy_page_tags(kto, kfrom);
3627
}
3728
}

arch/arm64/mm/fault.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -927,6 +927,5 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
927927
void tag_clear_highpage(struct page *page)
928928
{
929929
mte_zero_clear_page_tags(page_address(page));
930-
page_kasan_tag_reset(page);
931930
set_bit(PG_mte_tagged, &page->flags);
932931
}

arch/arm64/mm/mteswap.c

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -53,15 +53,6 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page)
5353
if (!tags)
5454
return false;
5555

56-
page_kasan_tag_reset(page);
57-
/*
58-
* We need smp_wmb() in between setting the flags and clearing the
59-
* tags because if another thread reads page->flags and builds a
60-
* tagged address out of it, there is an actual dependency to the
61-
* memory access, but on the current thread we do not guarantee that
62-
* the new page->flags are visible before the tags were updated.
63-
*/
64-
smp_wmb();
6556
mte_restore_page_tags(page_address(page), tags);
6657

6758
return true;

include/linux/gfp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,7 @@ struct vm_area_struct;
348348
#define GFP_DMA32 __GFP_DMA32
349349
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
350350
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \
351-
__GFP_SKIP_KASAN_POISON)
351+
__GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON)
352352
#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
353353
__GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
354354
#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)

mm/kasan/common.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,9 +108,10 @@ void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
108108
return;
109109

110110
tag = kasan_random_tag();
111+
kasan_unpoison(set_tag(page_address(page), tag),
112+
PAGE_SIZE << order, init);
111113
for (i = 0; i < (1 << order); i++)
112114
page_kasan_tag_set(page + i, tag);
113-
kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
114115
}
115116

116117
void __kasan_poison_pages(struct page *page, unsigned int order, bool init)

mm/page_alloc.c

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2361,7 +2361,7 @@ static inline bool check_new_pcp(struct page *page, unsigned int order)
23612361
}
23622362
#endif /* CONFIG_DEBUG_VM */
23632363

2364-
static inline bool should_skip_kasan_unpoison(gfp_t flags, bool init_tags)
2364+
static inline bool should_skip_kasan_unpoison(gfp_t flags)
23652365
{
23662366
/* Don't skip if a software KASAN mode is enabled. */
23672367
if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
@@ -2373,12 +2373,10 @@ static inline bool should_skip_kasan_unpoison(gfp_t flags, bool init_tags)
23732373
return true;
23742374

23752375
/*
2376-
* With hardware tag-based KASAN enabled, skip if either:
2377-
*
2378-
* 1. Memory tags have already been cleared via tag_clear_highpage().
2379-
* 2. Skipping has been requested via __GFP_SKIP_KASAN_UNPOISON.
2376+
* With hardware tag-based KASAN enabled, skip if this has been
2377+
* requested via __GFP_SKIP_KASAN_UNPOISON.
23802378
*/
2381-
return init_tags || (flags & __GFP_SKIP_KASAN_UNPOISON);
2379+
return flags & __GFP_SKIP_KASAN_UNPOISON;
23822380
}
23832381

23842382
static inline bool should_skip_init(gfp_t flags)
@@ -2397,6 +2395,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
23972395
bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
23982396
!should_skip_init(gfp_flags);
23992397
bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
2398+
int i;
24002399

24012400
set_page_private(page, 0);
24022401
set_page_refcounted(page);
@@ -2422,22 +2421,24 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
24222421
* should be initialized as well).
24232422
*/
24242423
if (init_tags) {
2425-
int i;
2426-
24272424
/* Initialize both memory and tags. */
24282425
for (i = 0; i != 1 << order; ++i)
24292426
tag_clear_highpage(page + i);
24302427

24312428
/* Note that memory is already initialized by the loop above. */
24322429
init = false;
24332430
}
2434-
if (!should_skip_kasan_unpoison(gfp_flags, init_tags)) {
2431+
if (!should_skip_kasan_unpoison(gfp_flags)) {
24352432
/* Unpoison shadow memory or set memory tags. */
24362433
kasan_unpoison_pages(page, order, init);
24372434

24382435
/* Note that memory is already initialized by KASAN. */
24392436
if (kasan_has_integrated_init())
24402437
init = false;
2438+
} else {
2439+
/* Ensure page_address() dereferencing does not fault. */
2440+
for (i = 0; i != 1 << order; ++i)
2441+
page_kasan_tag_reset(page + i);
24412442
}
24422443
/* If memory is still not initialized, do it now. */
24432444
if (init)

0 commit comments

Comments
 (0)