Skip to content

Commit d77e59a

Browse files
ctmarinasMarc Zyngier
authored andcommitted
arm64: mte: Lock a page for MTE tag initialisation
Initialising the tags and setting PG_mte_tagged flag for a page can race between multiple set_pte_at() on shared pages or setting the stage 2 pte via user_mem_abort(). Introduce a new PG_mte_lock flag as PG_arch_3 and set it before attempting page initialisation. Given that PG_mte_tagged is never cleared for a page, consider setting this flag to mean page unlocked and wait on this bit with acquire semantics if the page is locked: - try_page_mte_tagging() - lock the page for tagging, return true if it can be tagged, false if already tagged. No acquire semantics if it returns true (PG_mte_tagged not set) as there is no serialisation with a previous set_page_mte_tagged(). - set_page_mte_tagged() - set PG_mte_tagged with release semantics. The two-bit locking is based on Peter Collingbourne's idea. Signed-off-by: Catalin Marinas <[email protected]> Signed-off-by: Peter Collingbourne <[email protected]> Reviewed-by: Steven Price <[email protected]> Cc: Will Deacon <[email protected]> Cc: Marc Zyngier <[email protected]> Cc: Peter Collingbourne <[email protected]> Reviewed-by: Cornelia Huck <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent ef6458b commit d77e59a

File tree

9 files changed

+60
-29
lines changed

9 files changed

+60
-29
lines changed

arch/arm64/include/asm/mte.h

Lines changed: 34 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ unsigned long mte_copy_tags_to_user(void __user *to, void *from,
2525
unsigned long n);
2626
int mte_save_tags(struct page *page);
2727
void mte_save_page_tags(const void *page_addr, void *tag_storage);
28-
bool mte_restore_tags(swp_entry_t entry, struct page *page);
28+
void mte_restore_tags(swp_entry_t entry, struct page *page);
2929
void mte_restore_page_tags(void *page_addr, const void *tag_storage);
3030
void mte_invalidate_tags(int type, pgoff_t offset);
3131
void mte_invalidate_tags_area(int type);
@@ -36,6 +36,8 @@ void mte_free_tag_storage(char *storage);
3636

3737
/* track which pages have valid allocation tags */
3838
#define PG_mte_tagged PG_arch_2
39+
/* simple lock to avoid multiple threads tagging the same page */
40+
#define PG_mte_lock PG_arch_3
3941

4042
static inline void set_page_mte_tagged(struct page *page)
4143
{
@@ -60,6 +62,33 @@ static inline bool page_mte_tagged(struct page *page)
6062
return ret;
6163
}
6264

65+
/*
66+
* Lock the page for tagging and return 'true' if the page can be tagged,
67+
* 'false' if already tagged. PG_mte_tagged is never cleared and therefore the
68+
* locking only happens once for page initialisation.
69+
*
70+
* The page MTE lock state:
71+
*
72+
* Locked: PG_mte_lock && !PG_mte_tagged
73+
* Unlocked: !PG_mte_lock || PG_mte_tagged
74+
*
75+
* Acquire semantics only if the page is tagged (returning 'false').
76+
*/
77+
static inline bool try_page_mte_tagging(struct page *page)
78+
{
79+
if (!test_and_set_bit(PG_mte_lock, &page->flags))
80+
return true;
81+
82+
/*
83+
* The tags are either being initialised or may have been initialised
84+
* already. Check if the PG_mte_tagged flag has been set or wait
85+
* otherwise.
86+
*/
87+
smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged));
88+
89+
return false;
90+
}
91+
6392
void mte_zero_clear_page_tags(void *addr);
6493
void mte_sync_tags(pte_t old_pte, pte_t pte);
6594
void mte_copy_page_tags(void *kto, const void *kfrom);
@@ -86,6 +115,10 @@ static inline bool page_mte_tagged(struct page *page)
86115
{
87116
return false;
88117
}
118+
static inline bool try_page_mte_tagging(struct page *page)
119+
{
120+
return false;
121+
}
89122
static inline void mte_zero_clear_page_tags(void *addr)
90123
{
91124
}

arch/arm64/include/asm/pgtable.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1049,8 +1049,8 @@ static inline void arch_swap_invalidate_area(int type)
10491049
#define __HAVE_ARCH_SWAP_RESTORE
10501050
static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
10511051
{
1052-
if (system_supports_mte() && mte_restore_tags(entry, &folio->page))
1053-
set_page_mte_tagged(&folio->page);
1052+
if (system_supports_mte())
1053+
mte_restore_tags(entry, &folio->page);
10541054
}
10551055

10561056
#endif /* CONFIG_ARM64_MTE */

arch/arm64/kernel/cpufeature.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2050,7 +2050,7 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
20502050
* Clear the tags in the zero page. This needs to be done via the
20512051
* linear map which has the Tagged attribute.
20522052
*/
2053-
if (!page_mte_tagged(ZERO_PAGE(0))) {
2053+
if (try_page_mte_tagging(ZERO_PAGE(0))) {
20542054
mte_clear_page_tags(lm_alias(empty_zero_page));
20552055
set_page_mte_tagged(ZERO_PAGE(0));
20562056
}

arch/arm64/kernel/mte.c

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -41,20 +41,14 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte,
4141
if (check_swap && is_swap_pte(old_pte)) {
4242
swp_entry_t entry = pte_to_swp_entry(old_pte);
4343

44-
if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) {
45-
set_page_mte_tagged(page);
46-
return;
47-
}
44+
if (!non_swap_entry(entry))
45+
mte_restore_tags(entry, page);
4846
}
4947

5048
if (!pte_is_tagged)
5149
return;
5250

53-
/*
54-
* Test PG_mte_tagged again in case it was racing with another
55-
* set_pte_at().
56-
*/
57-
if (!page_mte_tagged(page)) {
51+
if (try_page_mte_tagging(page)) {
5852
mte_clear_page_tags(page_address(page));
5953
set_page_mte_tagged(page);
6054
}

arch/arm64/kvm/guest.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1068,15 +1068,19 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
10681068
clear_user(tags, MTE_GRANULES_PER_PAGE);
10691069
kvm_release_pfn_clean(pfn);
10701070
} else {
1071+
/*
1072+
* Only locking to serialise with a concurrent
1073+
* set_pte_at() in the VMM but still overriding the
1074+
* tags, hence ignoring the return value.
1075+
*/
1076+
try_page_mte_tagging(page);
10711077
num_tags = mte_copy_tags_from_user(maddr, tags,
10721078
MTE_GRANULES_PER_PAGE);
10731079

1074-
/*
1075-
* Set the flag after checking the write
1076-
* completed fully
1077-
*/
1078-
if (num_tags == MTE_GRANULES_PER_PAGE)
1079-
set_page_mte_tagged(page);
1080+
/* uaccess failed, don't leave stale tags */
1081+
if (num_tags != MTE_GRANULES_PER_PAGE)
1082+
mte_clear_page_tags(page);
1083+
set_page_mte_tagged(page);
10801084

10811085
kvm_release_pfn_dirty(pfn);
10821086
}

arch/arm64/kvm/mmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1101,7 +1101,7 @@ static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
11011101
return;
11021102

11031103
for (i = 0; i < nr_pages; i++, page++) {
1104-
if (!page_mte_tagged(page)) {
1104+
if (try_page_mte_tagging(page)) {
11051105
mte_clear_page_tags(page_address(page));
11061106
set_page_mte_tagged(page);
11071107
}

arch/arm64/mm/copypage.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,8 @@ void copy_highpage(struct page *to, struct page *from)
2323

2424
if (system_supports_mte() && page_mte_tagged(from)) {
2525
page_kasan_tag_reset(to);
26+
/* It's a new page, shouldn't have been tagged yet */
27+
WARN_ON_ONCE(!try_page_mte_tagging(to));
2628
mte_copy_page_tags(kto, kfrom);
2729
set_page_mte_tagged(to);
2830
}

arch/arm64/mm/fault.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -933,6 +933,8 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
933933

934934
void tag_clear_highpage(struct page *page)
935935
{
936+
/* Newly allocated page, shouldn't have been tagged yet */
937+
WARN_ON_ONCE(!try_page_mte_tagging(page));
936938
mte_zero_clear_page_tags(page_address(page));
937939
set_page_mte_tagged(page);
938940
}

arch/arm64/mm/mteswap.c

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -46,21 +46,17 @@ int mte_save_tags(struct page *page)
4646
return 0;
4747
}
4848

49-
bool mte_restore_tags(swp_entry_t entry, struct page *page)
49+
void mte_restore_tags(swp_entry_t entry, struct page *page)
5050
{
5151
void *tags = xa_load(&mte_pages, entry.val);
5252

5353
if (!tags)
54-
return false;
54+
return;
5555

56-
/*
57-
* Test PG_mte_tagged again in case it was racing with another
58-
* set_pte_at().
59-
*/
60-
if (!test_and_set_bit(PG_mte_tagged, &page->flags))
56+
if (try_page_mte_tagging(page)) {
6157
mte_restore_page_tags(page_address(page), tags);
62-
63-
return true;
58+
set_page_mte_tagged(page);
59+
}
6460
}
6561

6662
void mte_invalidate_tags(int type, pgoff_t offset)

0 commit comments

Comments
 (0)