Skip to content

Commit 73cd107

Browse files
sean-jcPeter Zijlstra
authored andcommitted
KVM: x86: Drop current_vcpu for kvm_running_vcpu + kvm_arch_vcpu variable
Use the generic kvm_running_vcpu plus a new 'handling_intr_from_guest' variable in kvm_arch_vcpu instead of the semi-redundant current_vcpu. kvm_before/after_interrupt() must be called while the vCPU is loaded, (which protects against preemption), thus kvm_running_vcpu is guaranteed to be non-NULL when handling_intr_from_guest is non-zero. Switching to kvm_get_running_vcpu() will allows moving KVM's perf callbacks to generic code, and the new flag will be used in a future patch to more precisely identify the "NMI from guest" case. Signed-off-by: Sean Christopherson <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Paolo Bonzini <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 87b940a commit 73cd107

File tree

4 files changed

+20
-16
lines changed

4 files changed

+20
-16
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -773,6 +773,7 @@ struct kvm_vcpu_arch {
773773
unsigned nmi_pending; /* NMI queued after currently running handler */
774774
bool nmi_injected; /* Trying to inject an NMI this entry */
775775
bool smi_pending; /* SMI queued after currently running handler */
776+
u8 handling_intr_from_guest;
776777

777778
struct kvm_mtrr mtrr_state;
778779
u64 pat;
@@ -1895,8 +1896,6 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
18951896
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
18961897
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
18971898

1898-
unsigned int kvm_guest_state(void);
1899-
19001899
void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
19011900
u32 size);
19021901
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);

arch/x86/kvm/pmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
8787
* woken up. So we should wake it, but this is impossible from
8888
* NMI context. Do it from irq work instead.
8989
*/
90-
if (!kvm_guest_state())
90+
if (!kvm_handling_nmi_from_guest(pmc->vcpu))
9191
irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
9292
else
9393
kvm_make_request(KVM_REQ_PMI, pmc->vcpu);

arch/x86/kvm/x86.c

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -8469,15 +8469,17 @@ static void kvm_timer_init(void)
84698469
kvmclock_cpu_online, kvmclock_cpu_down_prep);
84708470
}
84718471

8472-
DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
8473-
EXPORT_PER_CPU_SYMBOL_GPL(current_vcpu);
8472+
static inline bool kvm_pmi_in_guest(struct kvm_vcpu *vcpu)
8473+
{
8474+
return vcpu && vcpu->arch.handling_intr_from_guest;
8475+
}
84748476

8475-
unsigned int kvm_guest_state(void)
8477+
static unsigned int kvm_guest_state(void)
84768478
{
8477-
struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
8479+
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
84788480
unsigned int state;
84798481

8480-
if (!vcpu)
8482+
if (!kvm_pmi_in_guest(vcpu))
84818483
return 0;
84828484

84838485
state = PERF_GUEST_ACTIVE;
@@ -8489,20 +8491,21 @@ unsigned int kvm_guest_state(void)
84898491

84908492
static unsigned long kvm_guest_get_ip(void)
84918493
{
8492-
struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
8494+
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
84938495

8494-
if (WARN_ON_ONCE(!vcpu))
8496+
/* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
8497+
if (WARN_ON_ONCE(!kvm_pmi_in_guest(vcpu)))
84958498
return 0;
84968499

84978500
return kvm_rip_read(vcpu);
84988501
}
84998502

85008503
static unsigned int kvm_handle_intel_pt_intr(void)
85018504
{
8502-
struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
8505+
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
85038506

85048507
/* '0' on failure so that the !PT case can use a RET0 static call. */
8505-
if (!vcpu)
8508+
if (!kvm_pmi_in_guest(vcpu))
85068509
return 0;
85078510

85088511
kvm_make_request(KVM_REQ_PMI, vcpu);

arch/x86/kvm/x86.h

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -385,18 +385,20 @@ static inline bool kvm_cstate_in_guest(struct kvm *kvm)
385385
return kvm->arch.cstate_in_guest;
386386
}
387387

388-
DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu);
389-
390388
static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
391389
{
392-
__this_cpu_write(current_vcpu, vcpu);
390+
WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 1);
393391
}
394392

395393
static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
396394
{
397-
__this_cpu_write(current_vcpu, NULL);
395+
WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0);
398396
}
399397

398+
static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
399+
{
400+
return !!vcpu->arch.handling_intr_from_guest;
401+
}
400402

401403
static inline bool kvm_pat_valid(u64 data)
402404
{

0 commit comments

Comments
 (0)