Skip to content

Commit be1bd4c

Browse files
pgondasean-jc
authored andcommitted
KVM: selftests: Allow tagging protected memory in guest page tables
Add support for tagging and untagging guest physical address, e.g. to allow x86's SEV and TDX guests to embed shared vs. private information in the GPA. SEV (encryption, a.k.a. C-bit) and TDX (shared, a.k.a. S-bit) steal bits from the guest's physical address space that is consumed by the CPU metadata, i.e. effectively aliases the "real" GPA. Implement generic "tagging" so that the shared vs. private metadata can be managed by x86 without bleeding too many details into common code. Cc: Paolo Bonzini <[email protected]> Cc: Sean Christopherson <[email protected]> Cc: Vishal Annapurve <[email protected]> Cc: Ackerly Tng <[email protected]> cc: Andrew Jones <[email protected]> Cc: Tom Lendacky <[email protected]> Cc: Michael Roth <[email protected]> Tested-by: Carlos Bilbao <[email protected]> Originally-by: Michael Roth <[email protected]> Signed-off-by: Peter Gonda <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent 31e00da commit be1bd4c

File tree

7 files changed

+86
-1
lines changed

7 files changed

+86
-1
lines changed
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
#ifndef SELFTEST_KVM_UTIL_ARCH_H
3+
#define SELFTEST_KVM_UTIL_ARCH_H
4+
5+
struct kvm_vm_arch {};
6+
7+
#endif // SELFTEST_KVM_UTIL_ARCH_H

tools/testing/selftests/kvm/include/kvm_util_base.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,11 @@
1818
#include <linux/types.h>
1919

2020
#include <asm/atomic.h>
21+
#include <asm/kvm.h>
2122

2223
#include <sys/ioctl.h>
2324

25+
#include "kvm_util_arch.h"
2426
#include "sparsebit.h"
2527

2628
/*
@@ -113,6 +115,9 @@ struct kvm_vm {
113115
vm_vaddr_t idt;
114116
vm_vaddr_t handlers;
115117
uint32_t dirty_ring_size;
118+
uint64_t gpa_tag_mask;
119+
120+
struct kvm_vm_arch arch;
116121

117122
/* Cache of information for binary stats interface */
118123
int stats_fd;
@@ -601,6 +606,12 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
601606
vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
602607
void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
603608

609+
610+
static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
611+
{
612+
return gpa & ~vm->gpa_tag_mask;
613+
}
614+
604615
void vcpu_run(struct kvm_vcpu *vcpu);
605616
int _vcpu_run(struct kvm_vcpu *vcpu);
606617

@@ -1113,4 +1124,6 @@ void kvm_selftest_arch_init(void);
11131124

11141125
void kvm_arch_vm_post_create(struct kvm_vm *vm);
11151126

1127+
bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
1128+
11161129
#endif /* SELFTEST_KVM_UTIL_BASE_H */
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
#ifndef SELFTEST_KVM_UTIL_ARCH_H
3+
#define SELFTEST_KVM_UTIL_ARCH_H
4+
5+
struct kvm_vm_arch {};
6+
7+
#endif // SELFTEST_KVM_UTIL_ARCH_H
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
#ifndef SELFTEST_KVM_UTIL_ARCH_H
3+
#define SELFTEST_KVM_UTIL_ARCH_H
4+
5+
struct kvm_vm_arch {};
6+
7+
#endif // SELFTEST_KVM_UTIL_ARCH_H
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
/* SPDX-License-Identifier: GPL-2.0-only */
2+
#ifndef SELFTEST_KVM_UTIL_ARCH_H
3+
#define SELFTEST_KVM_UTIL_ARCH_H
4+
5+
#include <stdbool.h>
6+
#include <stdint.h>
7+
8+
struct kvm_vm_arch {
9+
uint64_t c_bit;
10+
uint64_t s_bit;
11+
};
12+
13+
static inline bool __vm_arch_has_protected_memory(struct kvm_vm_arch *arch)
14+
{
15+
return arch->c_bit || arch->s_bit;
16+
}
17+
18+
#define vm_arch_has_protected_memory(vm) \
19+
__vm_arch_has_protected_memory(&(vm)->arch)
20+
21+
#endif // SELFTEST_KVM_UTIL_ARCH_H

tools/testing/selftests/kvm/lib/kvm_util.c

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1546,6 +1546,8 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
15461546
{
15471547
struct userspace_mem_region *region;
15481548

1549+
gpa = vm_untag_gpa(vm, gpa);
1550+
15491551
region = userspace_mem_region_find(vm, gpa, gpa);
15501552
if (!region) {
15511553
TEST_FAIL("No vm physical memory at 0x%lx", gpa);
@@ -2254,3 +2256,18 @@ void __attribute((constructor)) kvm_selftest_init(void)
22542256

22552257
kvm_selftest_arch_init();
22562258
}
2259+
2260+
bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr)
2261+
{
2262+
sparsebit_idx_t pg = 0;
2263+
struct userspace_mem_region *region;
2264+
2265+
if (!vm_arch_has_protected_memory(vm))
2266+
return false;
2267+
2268+
region = userspace_mem_region_find(vm, paddr, paddr);
2269+
TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr);
2270+
2271+
pg = paddr >> vm->page_shift;
2272+
return sparsebit_is_set(region->protected_phy_pages, pg);
2273+
}

tools/testing/selftests/kvm/lib/x86_64/processor.c

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,8 @@ static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
157157
{
158158
uint64_t *pte = virt_get_pte(vm, parent_pte, vaddr, current_level);
159159

160+
paddr = vm_untag_gpa(vm, paddr);
161+
160162
if (!(*pte & PTE_PRESENT_MASK)) {
161163
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
162164
if (current_level == target_level)
@@ -200,6 +202,8 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
200202
"Physical address beyond maximum supported,\n"
201203
" paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
202204
paddr, vm->max_gfn, vm->page_size);
205+
TEST_ASSERT(vm_untag_gpa(vm, paddr) == paddr,
206+
"Unexpected bits in paddr: %lx", paddr);
203207

204208
/*
205209
* Allocate upper level page tables, if not already present. Return
@@ -222,6 +226,15 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
222226
TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
223227
"PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
224228
*pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
229+
230+
/*
231+
* Neither SEV nor TDX supports shared page tables, so only the final
232+
* leaf PTE needs manually set the C/S-bit.
233+
*/
234+
if (vm_is_gpa_protected(vm, paddr))
235+
*pte |= vm->arch.c_bit;
236+
else
237+
*pte |= vm->arch.s_bit;
225238
}
226239

227240
void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
@@ -496,7 +509,7 @@ vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
496509
* No need for a hugepage mask on the PTE, x86-64 requires the "unused"
497510
* address bits to be zero.
498511
*/
499-
return PTE_GET_PA(*pte) | (gva & ~HUGEPAGE_MASK(level));
512+
return vm_untag_gpa(vm, PTE_GET_PA(*pte)) | (gva & ~HUGEPAGE_MASK(level));
500513
}
501514

502515
static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt)

0 commit comments

Comments
 (0)