Skip to content

Commit 4f2e7aa

Browse files
mdrothbonzini
authored andcommitted
KVM: SEV: Implement gmem hook for initializing private pages
This will handle the RMP table updates needed to put a page into a private state before mapping it into an SEV-SNP guest. Reviewed-by: Paolo Bonzini <[email protected]> Signed-off-by: Michael Roth <[email protected]> Message-ID: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent e366f92 commit 4f2e7aa

File tree

6 files changed

+113
-2
lines changed

6 files changed

+113
-2
lines changed

arch/x86/kvm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,7 @@ config KVM_AMD_SEV
137137
depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
138138
select ARCH_HAS_CC_PLATFORM
139139
select KVM_GENERIC_PRIVATE_MEM
140+
select HAVE_KVM_GMEM_PREPARE
140141
help
141142
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
142143
with Encrypted State (SEV-ES) on AMD processors.

arch/x86/kvm/svm/sev.c

Lines changed: 98 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4565,3 +4565,101 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code)
45654565
out_no_trace:
45664566
put_page(pfn_to_page(pfn));
45674567
}
4568+
4569+
static bool is_pfn_range_shared(kvm_pfn_t start, kvm_pfn_t end)
4570+
{
4571+
kvm_pfn_t pfn = start;
4572+
4573+
while (pfn < end) {
4574+
int ret, rmp_level;
4575+
bool assigned;
4576+
4577+
ret = snp_lookup_rmpentry(pfn, &assigned, &rmp_level);
4578+
if (ret) {
4579+
pr_warn_ratelimited("SEV: Failed to retrieve RMP entry: PFN 0x%llx GFN start 0x%llx GFN end 0x%llx RMP level %d error %d\n",
4580+
pfn, start, end, rmp_level, ret);
4581+
return false;
4582+
}
4583+
4584+
if (assigned) {
4585+
pr_debug("%s: overlap detected, PFN 0x%llx start 0x%llx end 0x%llx RMP level %d\n",
4586+
__func__, pfn, start, end, rmp_level);
4587+
return false;
4588+
}
4589+
4590+
pfn++;
4591+
}
4592+
4593+
return true;
4594+
}
4595+
4596+
static u8 max_level_for_order(int order)
4597+
{
4598+
if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
4599+
return PG_LEVEL_2M;
4600+
4601+
return PG_LEVEL_4K;
4602+
}
4603+
4604+
static bool is_large_rmp_possible(struct kvm *kvm, kvm_pfn_t pfn, int order)
4605+
{
4606+
kvm_pfn_t pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
4607+
4608+
/*
4609+
* If this is a large folio, and the entire 2M range containing the
4610+
* PFN is currently shared, then the entire 2M-aligned range can be
4611+
* set to private via a single 2M RMP entry.
4612+
*/
4613+
if (max_level_for_order(order) > PG_LEVEL_4K &&
4614+
is_pfn_range_shared(pfn_aligned, pfn_aligned + PTRS_PER_PMD))
4615+
return true;
4616+
4617+
return false;
4618+
}
4619+
4620+
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
4621+
{
4622+
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
4623+
kvm_pfn_t pfn_aligned;
4624+
gfn_t gfn_aligned;
4625+
int level, rc;
4626+
bool assigned;
4627+
4628+
if (!sev_snp_guest(kvm))
4629+
return 0;
4630+
4631+
rc = snp_lookup_rmpentry(pfn, &assigned, &level);
4632+
if (rc) {
4633+
pr_err_ratelimited("SEV: Failed to look up RMP entry: GFN %llx PFN %llx error %d\n",
4634+
gfn, pfn, rc);
4635+
return -ENOENT;
4636+
}
4637+
4638+
if (assigned) {
4639+
pr_debug("%s: already assigned: gfn %llx pfn %llx max_order %d level %d\n",
4640+
__func__, gfn, pfn, max_order, level);
4641+
return 0;
4642+
}
4643+
4644+
if (is_large_rmp_possible(kvm, pfn, max_order)) {
4645+
level = PG_LEVEL_2M;
4646+
pfn_aligned = ALIGN_DOWN(pfn, PTRS_PER_PMD);
4647+
gfn_aligned = ALIGN_DOWN(gfn, PTRS_PER_PMD);
4648+
} else {
4649+
level = PG_LEVEL_4K;
4650+
pfn_aligned = pfn;
4651+
gfn_aligned = gfn;
4652+
}
4653+
4654+
rc = rmp_make_private(pfn_aligned, gfn_to_gpa(gfn_aligned), level, sev->asid, false);
4655+
if (rc) {
4656+
pr_err_ratelimited("SEV: Failed to update RMP entry: GFN %llx PFN %llx level %d error %d\n",
4657+
gfn, pfn, level, rc);
4658+
return -EINVAL;
4659+
}
4660+
4661+
pr_debug("%s: updated: gfn %llx pfn %llx pfn_aligned %llx max_order %d level %d\n",
4662+
__func__, gfn, pfn, pfn_aligned, max_order, level);
4663+
4664+
return 0;
4665+
}

arch/x86/kvm/svm/svm.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5081,6 +5081,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
50815081
.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
50825082
.vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
50835083
.alloc_apic_backing_page = svm_alloc_apic_backing_page,
5084+
5085+
.gmem_prepare = sev_gmem_prepare,
50845086
};
50855087

50865088
/*

arch/x86/kvm/svm/svm.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -736,6 +736,7 @@ extern unsigned int max_sev_asid;
736736
void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
737737
void sev_vcpu_unblocking(struct kvm_vcpu *vcpu);
738738
void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
739+
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
739740
#else
740741
static inline struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) {
741742
return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
@@ -752,6 +753,10 @@ static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXI
752753
static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
753754
static inline void sev_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
754755
static inline void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) {}
756+
static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
757+
{
758+
return 0;
759+
}
755760

756761
#endif
757762

arch/x86/kvm/x86.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13611,6 +13611,11 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
1361113611
EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
1361213612

1361313613
#ifdef CONFIG_HAVE_KVM_GMEM_PREPARE
13614+
bool kvm_arch_gmem_prepare_needed(struct kvm *kvm)
13615+
{
13616+
return kvm->arch.vm_type == KVM_X86_SNP_VM;
13617+
}
13618+
1361413619
int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order)
1361513620
{
1361613621
return static_call(kvm_x86_gmem_prepare)(kvm, pfn, gfn, max_order);

virt/kvm/guest_memfd.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,8 +39,8 @@ static int kvm_gmem_prepare_folio(struct inode *inode, pgoff_t index, struct fol
3939
gfn = slot->base_gfn + index - slot->gmem.pgoff;
4040
rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, compound_order(compound_head(page)));
4141
if (rc) {
42-
pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx, error %d.\n",
43-
index, rc);
42+
pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
43+
index, gfn, pfn, rc);
4444
return rc;
4545
}
4646
}

0 commit comments

Comments
 (0)