Skip to content

Commit 21d9bb4

Browse files
suryasaimadhuIngo Molnar
authored andcommitted
x86/mm: Make the SME mask a u64
The SME encryption mask is for masking 64-bit pagetable entries. It being an unsigned long works fine on X86_64 but on 32-bit builds in truncates bits leading to Xen guests crashing very early. And regardless, the whole SME mask handling shouldnt've leaked into 32-bit because SME is X86_64-only feature. So, first make the mask u64. And then, add trivial 32-bit versions of the __sme_* macros so that nothing happens there. Reported-and-tested-by: Boris Ostrovsky <[email protected]> Tested-by: Brijesh Singh <[email protected]> Signed-off-by: Borislav Petkov <[email protected]> Acked-by: Tom Lendacky <[email protected]> Acked-by: Thomas Gleixner <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas <[email protected]> Fixes: 21729f8 ("x86/mm: Provide general kernel support for memory encryption") Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 1c9fe44 commit 21d9bb4

File tree

3 files changed

+12
-7
lines changed

3 files changed

+12
-7
lines changed

arch/x86/include/asm/mem_encrypt.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
#ifdef CONFIG_AMD_MEM_ENCRYPT
2323

24-
extern unsigned long sme_me_mask;
24+
extern u64 sme_me_mask;
2525

2626
void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
2727
unsigned long decrypted_kernel_vaddr,
@@ -49,7 +49,7 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);
4949

5050
#else /* !CONFIG_AMD_MEM_ENCRYPT */
5151

52-
#define sme_me_mask 0UL
52+
#define sme_me_mask 0ULL
5353

5454
static inline void __init sme_early_encrypt(resource_size_t paddr,
5555
unsigned long size) { }

arch/x86/mm/mem_encrypt.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ static char sme_cmdline_off[] __initdata = "off";
3737
* reside in the .data section so as not to be zeroed out when the .bss
3838
* section is later cleared.
3939
*/
40-
unsigned long sme_me_mask __section(.data) = 0;
40+
u64 sme_me_mask __section(.data) = 0;
4141
EXPORT_SYMBOL_GPL(sme_me_mask);
4242

4343
/* Buffer used for early in-place encryption by BSP, no locking needed */

include/linux/mem_encrypt.h

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
2323

24-
#define sme_me_mask 0UL
24+
#define sme_me_mask 0ULL
2525

2626
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
2727

@@ -30,18 +30,23 @@ static inline bool sme_active(void)
3030
return !!sme_me_mask;
3131
}
3232

33-
static inline unsigned long sme_get_me_mask(void)
33+
static inline u64 sme_get_me_mask(void)
3434
{
3535
return sme_me_mask;
3636
}
3737

38+
#ifdef CONFIG_AMD_MEM_ENCRYPT
3839
/*
3940
* The __sme_set() and __sme_clr() macros are useful for adding or removing
4041
* the encryption mask from a value (e.g. when dealing with pagetable
4142
* entries).
4243
*/
43-
#define __sme_set(x) ((unsigned long)(x) | sme_me_mask)
44-
#define __sme_clr(x) ((unsigned long)(x) & ~sme_me_mask)
44+
#define __sme_set(x) ((x) | sme_me_mask)
45+
#define __sme_clr(x) ((x) & ~sme_me_mask)
46+
#else
47+
#define __sme_set(x) (x)
48+
#define __sme_clr(x) (x)
49+
#endif
4550

4651
#endif /* __ASSEMBLY__ */
4752

0 commit comments

Comments
 (0)