Skip to content

Commit d45111e

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull x86 kvm fixes from Paolo Bonzini: "Many small fixes that accumulated while I was on vacation... - Fixup missed comments from the REMOVED_SPTE => FROZEN_SPTE rename - Ensure a root is successfully loaded when pre-faulting SPTEs - Grab kvm->srcu when handling KVM_SET_VCPU_EVENTS to guard against accessing memslots if toggling SMM happens to force a VM-Exit - Emulate MSR_{FS,GS}_BASE on SVM even though interception is always disabled, so that KVM does the right thing if KVM's emulator encounters {RD,WR}MSR - Explicitly clear BUS_LOCK_DETECT from KVM's caps on AMD, as KVM doesn't yet virtualize BUS_LOCK_DETECT on AMD - Cleanup the help message for CONFIG_KVM_AMD_SEV, and call out that KVM now supports SEV-SNP too - Specialize return value of KVM_CHECK_EXTENSION(KVM_CAP_READONLY_MEM), based on VM type - Remove unnecessary dependency on CONFIG_HIGH_RES_TIMERS - Note an RCU quiescent state on guest exit. This avoids a call to rcu_core() if there was a grace period request while guest was running" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: Remove HIGH_RES_TIMERS dependency kvm: Note an RCU quiescent state on guest exit KVM: x86: Only advertise KVM_CAP_READONLY_MEM when supported by VM KVM: SEV: Update KVM_AMD_SEV Kconfig entry and mention SEV-SNP KVM: SVM: Don't advertise Bus Lock Detect to guest if SVM support is missing KVM: SVM: fix emulation of msr reads/writes of MSR_FS_BASE and MSR_GS_BASE KVM: x86: Acquire kvm->srcu when handling KVM_SET_VCPU_EVENTS KVM: x86/mmu: Check that root is valid/loaded when pre-faulting SPTEs KVM: x86/mmu: Fixup comments missed by the REMOVED_SPTE=>FROZEN_SPTE rename
2 parents 788220e + 59cbd4e commit d45111e

File tree

9 files changed

+47
-16
lines changed

9 files changed

+47
-16
lines changed

arch/x86/kvm/Kconfig

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ if VIRTUALIZATION
1919

2020
config KVM
2121
tristate "Kernel-based Virtual Machine (KVM) support"
22-
depends on HIGH_RES_TIMERS
2322
depends on X86_LOCAL_APIC
2423
select KVM_COMMON
2524
select KVM_GENERIC_MMU_NOTIFIER
@@ -144,8 +143,10 @@ config KVM_AMD_SEV
144143
select HAVE_KVM_ARCH_GMEM_PREPARE
145144
select HAVE_KVM_ARCH_GMEM_INVALIDATE
146145
help
147-
Provides support for launching Encrypted VMs (SEV) and Encrypted VMs
148-
with Encrypted State (SEV-ES) on AMD processors.
146+
Provides support for launching encrypted VMs which use Secure
147+
Encrypted Virtualization (SEV), Secure Encrypted Virtualization with
148+
Encrypted State (SEV-ES), and Secure Encrypted Virtualization with
149+
Secure Nested Paging (SEV-SNP) technologies on AMD processors.
149150

150151
config KVM_SMM
151152
bool "System Management Mode emulation"

arch/x86/kvm/mmu/mmu.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4750,7 +4750,9 @@ long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
47504750
* reload is efficient when called repeatedly, so we can do it on
47514751
* every iteration.
47524752
*/
4753-
kvm_mmu_reload(vcpu);
4753+
r = kvm_mmu_reload(vcpu);
4754+
if (r)
4755+
return r;
47544756

47554757
if (kvm_arch_has_private_mem(vcpu->kvm) &&
47564758
kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(range->gpa)))

arch/x86/kvm/mmu/spte.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -391,9 +391,9 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
391391
mmio_value = 0;
392392

393393
/*
394-
* The masked MMIO value must obviously match itself and a removed SPTE
395-
* must not get a false positive. Removed SPTEs and MMIO SPTEs should
396-
* never collide as MMIO must set some RWX bits, and removed SPTEs must
394+
* The masked MMIO value must obviously match itself and a frozen SPTE
395+
* must not get a false positive. Frozen SPTEs and MMIO SPTEs should
396+
* never collide as MMIO must set some RWX bits, and frozen SPTEs must
397397
* not set any RWX bits.
398398
*/
399399
if (WARN_ON((mmio_value & mmio_mask) != mmio_value) ||

arch/x86/kvm/mmu/spte.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,7 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
214214
*/
215215
#define FROZEN_SPTE (SHADOW_NONPRESENT_VALUE | 0x5a0ULL)
216216

217-
/* Removed SPTEs must not be misconstrued as shadow present PTEs. */
217+
/* Frozen SPTEs must not be misconstrued as shadow present PTEs. */
218218
static_assert(!(FROZEN_SPTE & SPTE_MMU_PRESENT_MASK));
219219

220220
static inline bool is_frozen_spte(u64 spte)

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -359,10 +359,10 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
359359
/*
360360
* Set the SPTE to a nonpresent value that other
361361
* threads will not overwrite. If the SPTE was
362-
* already marked as removed then another thread
362+
* already marked as frozen then another thread
363363
* handling a page fault could overwrite it, so
364364
* set the SPTE until it is set from some other
365-
* value to the removed SPTE value.
365+
* value to the frozen SPTE value.
366366
*/
367367
for (;;) {
368368
old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, FROZEN_SPTE);
@@ -536,8 +536,8 @@ static inline int __must_check __tdp_mmu_set_spte_atomic(struct tdp_iter *iter,
536536
u64 *sptep = rcu_dereference(iter->sptep);
537537

538538
/*
539-
* The caller is responsible for ensuring the old SPTE is not a REMOVED
540-
* SPTE. KVM should never attempt to zap or manipulate a REMOVED SPTE,
539+
* The caller is responsible for ensuring the old SPTE is not a FROZEN
540+
* SPTE. KVM should never attempt to zap or manipulate a FROZEN SPTE,
541541
* and pre-checking before inserting a new SPTE is advantageous as it
542542
* avoids unnecessary work.
543543
*/

arch/x86/kvm/svm/svm.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2876,6 +2876,12 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
28762876
case MSR_CSTAR:
28772877
msr_info->data = svm->vmcb01.ptr->save.cstar;
28782878
break;
2879+
case MSR_GS_BASE:
2880+
msr_info->data = svm->vmcb01.ptr->save.gs.base;
2881+
break;
2882+
case MSR_FS_BASE:
2883+
msr_info->data = svm->vmcb01.ptr->save.fs.base;
2884+
break;
28792885
case MSR_KERNEL_GS_BASE:
28802886
msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
28812887
break;
@@ -3101,6 +3107,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
31013107
case MSR_CSTAR:
31023108
svm->vmcb01.ptr->save.cstar = data;
31033109
break;
3110+
case MSR_GS_BASE:
3111+
svm->vmcb01.ptr->save.gs.base = data;
3112+
break;
3113+
case MSR_FS_BASE:
3114+
svm->vmcb01.ptr->save.fs.base = data;
3115+
break;
31043116
case MSR_KERNEL_GS_BASE:
31053117
svm->vmcb01.ptr->save.kernel_gs_base = data;
31063118
break;
@@ -5224,6 +5236,9 @@ static __init void svm_set_cpu_caps(void)
52245236

52255237
/* CPUID 0x8000001F (SME/SEV features) */
52265238
sev_set_cpu_caps();
5239+
5240+
/* Don't advertise Bus Lock Detect to guest if SVM support is absent */
5241+
kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT);
52275242
}
52285243

52295244
static __init int svm_hardware_setup(void)

arch/x86/kvm/x86.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4656,7 +4656,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
46564656
case KVM_CAP_ASYNC_PF_INT:
46574657
case KVM_CAP_GET_TSC_KHZ:
46584658
case KVM_CAP_KVMCLOCK_CTRL:
4659-
case KVM_CAP_READONLY_MEM:
46604659
case KVM_CAP_IOAPIC_POLARITY_IGNORED:
46614660
case KVM_CAP_TSC_DEADLINE_TIMER:
46624661
case KVM_CAP_DISABLE_QUIRKS:
@@ -4815,6 +4814,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
48154814
case KVM_CAP_VM_TYPES:
48164815
r = kvm_caps.supported_vm_types;
48174816
break;
4817+
case KVM_CAP_READONLY_MEM:
4818+
r = kvm ? kvm_arch_has_readonly_mem(kvm) : 1;
48184819
default:
48194820
break;
48204821
}
@@ -6040,7 +6041,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
60406041
if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
60416042
break;
60426043

6044+
kvm_vcpu_srcu_read_lock(vcpu);
60436045
r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
6046+
kvm_vcpu_srcu_read_unlock(vcpu);
60446047
break;
60456048
}
60466049
case KVM_GET_DEBUGREGS: {

include/linux/context_tracking.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,10 +80,12 @@ static __always_inline bool context_tracking_guest_enter(void)
8080
return context_tracking_enabled_this_cpu();
8181
}
8282

83-
static __always_inline void context_tracking_guest_exit(void)
83+
static __always_inline bool context_tracking_guest_exit(void)
8484
{
8585
if (context_tracking_enabled())
8686
__ct_user_exit(CONTEXT_GUEST);
87+
88+
return context_tracking_enabled_this_cpu();
8789
}
8890

8991
#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
@@ -98,7 +100,7 @@ static inline void exception_exit(enum ctx_state prev_ctx) { }
98100
static inline int ct_state(void) { return -1; }
99101
static inline int __ct_state(void) { return -1; }
100102
static __always_inline bool context_tracking_guest_enter(void) { return false; }
101-
static __always_inline void context_tracking_guest_exit(void) { }
103+
static __always_inline bool context_tracking_guest_exit(void) { return false; }
102104
#define CT_WARN_ON(cond) do { } while (0)
103105
#endif /* !CONFIG_CONTEXT_TRACKING_USER */
104106

include/linux/kvm_host.h

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -485,7 +485,15 @@ static __always_inline void guest_state_enter_irqoff(void)
485485
*/
486486
static __always_inline void guest_context_exit_irqoff(void)
487487
{
488-
context_tracking_guest_exit();
488+
/*
489+
* Guest mode is treated as a quiescent state, see
490+
* guest_context_enter_irqoff() for more details.
491+
*/
492+
if (!context_tracking_guest_exit()) {
493+
instrumentation_begin();
494+
rcu_virt_note_context_switch();
495+
instrumentation_end();
496+
}
489497
}
490498

491499
/*

0 commit comments

Comments
 (0)