Skip to content

Commit 7b306df

Browse files
tlendackybp3tk0v
authored andcommitted
x86/sev: Evict cache lines during SNP memory validation
An SNP cache coherency vulnerability requires a cache line eviction mitigation when validating memory after a page state change to private. The specific mitigation is to touch the first and last byte of each 4K page that is being validated. There is no need to perform the mitigation when performing a page state change to shared and rescinding validation. CPUID bit Fn8000001F_EBX[31] defines the COHERENCY_SFW_NO CPUID bit that, when set, indicates that the software mitigation for this vulnerability is not needed. Implement the mitigation and invoke it when validating memory (making it private) and the COHERENCY_SFW_NO bit is not set, indicating the SNP guest is vulnerable. Co-developed-by: Michael Roth <[email protected]> Signed-off-by: Michael Roth <[email protected]> Signed-off-by: Tom Lendacky <[email protected]> Signed-off-by: Borislav Petkov (AMD) <[email protected]> Acked-by: Thomas Gleixner <[email protected]>
1 parent 98e8f2c commit 7b306df

File tree

6 files changed

+62
-0
lines changed

6 files changed

+62
-0
lines changed

arch/x86/boot/cpuflags.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,5 +106,18 @@ void get_cpuflags(void)
106106
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
107107
&cpu.flags[1]);
108108
}
109+
110+
if (max_amd_level >= 0x8000001f) {
111+
u32 ebx;
112+
113+
/*
114+
* The X86_FEATURE_COHERENCY_SFW_NO feature bit is in
115+
* the virtualization flags entry (word 8) and set by
116+
* scattered.c, so the bit needs to be explicitly set.
117+
*/
118+
cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored);
119+
if (ebx & BIT(31))
120+
set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags);
121+
}
109122
}
110123
}

arch/x86/boot/startup/sev-shared.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -810,6 +810,13 @@ static void __head pvalidate_4k_page(unsigned long vaddr, unsigned long paddr,
810810
if (ret)
811811
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
812812
}
813+
814+
/*
815+
* If validating memory (making it private) and affected by the
816+
* cache-coherency vulnerability, perform the cache eviction mitigation.
817+
*/
818+
if (validate && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO))
819+
sev_evict_cache((void *)vaddr, 1);
813820
}
814821

815822
/*

arch/x86/coco/sev/core.c

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -358,10 +358,31 @@ static void svsm_pval_pages(struct snp_psc_desc *desc)
358358

359359
static void pvalidate_pages(struct snp_psc_desc *desc)
360360
{
361+
struct psc_entry *e;
362+
unsigned int i;
363+
361364
if (snp_vmpl)
362365
svsm_pval_pages(desc);
363366
else
364367
pval_pages(desc);
368+
369+
/*
370+
* If not affected by the cache-coherency vulnerability there is no need
371+
* to perform the cache eviction mitigation.
372+
*/
373+
if (cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
374+
return;
375+
376+
for (i = 0; i <= desc->hdr.end_entry; i++) {
377+
e = &desc->entries[i];
378+
379+
/*
380+
* If validating memory (making it private) perform the cache
381+
* eviction mitigation.
382+
*/
383+
if (e->operation == SNP_PAGE_STATE_PRIVATE)
384+
sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1);
385+
}
365386
}
366387

367388
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)

arch/x86/include/asm/cpufeatures.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -218,6 +218,7 @@
218218
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */
219219
#define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */
220220
#define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */
221+
#define X86_FEATURE_COHERENCY_SFW_NO ( 8*32+ 4) /* SNP cache coherency software work around not needed */
221222

222223
#define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */
223224
#define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */

arch/x86/include/asm/sev.h

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -619,6 +619,24 @@ int rmp_make_shared(u64 pfn, enum pg_level level);
619619
void snp_leak_pages(u64 pfn, unsigned int npages);
620620
void kdump_sev_callback(void);
621621
void snp_fixup_e820_tables(void);
622+
623+
static inline void sev_evict_cache(void *va, int npages)
624+
{
625+
volatile u8 val __always_unused;
626+
u8 *bytes = va;
627+
int page_idx;
628+
629+
/*
630+
* For SEV guests, a read from the first/last cache-lines of a 4K page
631+
* using the guest key is sufficient to cause a flush of all cache-lines
632+
* associated with that 4K page without incurring all the overhead of a
633+
* full CLFLUSH sequence.
634+
*/
635+
for (page_idx = 0; page_idx < npages; page_idx++) {
636+
val = bytes[page_idx * PAGE_SIZE];
637+
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
638+
}
639+
}
622640
#else
623641
static inline bool snp_probe_rmptable_info(void) { return false; }
624642
static inline int snp_rmptable_init(void) { return -ENOSYS; }
@@ -634,6 +652,7 @@ static inline int rmp_make_shared(u64 pfn, enum pg_level level) { return -ENODEV
634652
static inline void snp_leak_pages(u64 pfn, unsigned int npages) {}
635653
static inline void kdump_sev_callback(void) { }
636654
static inline void snp_fixup_e820_tables(void) {}
655+
static inline void sev_evict_cache(void *va, int npages) {}
637656
#endif
638657

639658
#endif

arch/x86/kernel/cpu/scattered.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ static const struct cpuid_bit cpuid_bits[] = {
4848
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
4949
{ X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
5050
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
51+
{ X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 },
5152
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
5253
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
5354
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },

0 commit comments

Comments
 (0)