Skip to content

Commit e1bfc24

Browse files
sean-jcPeter Zijlstra
authored andcommitted
KVM: Move x86's perf guest info callbacks to generic KVM
Move x86's perf guest callbacks into common KVM, as they are semantically identical to arm64's callbacks (the only other such KVM callbacks). arm64 will convert to the common versions in a future patch. Implement the necessary arm64 arch hooks now to avoid having to provide stubs or a temporary #define (from x86) to avoid arm64 compilation errors when CONFIG_GUEST_PERF_EVENTS=y. Signed-off-by: Sean Christopherson <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Paolo Bonzini <[email protected]> Acked-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent db21575 commit e1bfc24

File tree

6 files changed

+83
-42
lines changed

6 files changed

+83
-42
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -678,6 +678,16 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
678678
void kvm_perf_init(void);
679679
void kvm_perf_teardown(void);
680680

681+
/*
682+
* Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
683+
* arrived in guest context. For arm64, any event that arrives while a vCPU is
684+
* loaded is considered to be "in guest".
685+
*/
686+
static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
687+
{
688+
return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
689+
}
690+
681691
long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
682692
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
683693
void kvm_update_stolen_time(struct kvm_vcpu *vcpu);

arch/arm64/kvm/arm.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -496,6 +496,11 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
496496
return vcpu_mode_priv(vcpu);
497497
}
498498

499+
unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
500+
{
501+
return *vcpu_pc(vcpu);
502+
}
503+
499504
/* Just ensure a guest exit from a particular CPU */
500505
static void exit_vm_noop(void *info)
501506
{

arch/x86/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1567,6 +1567,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
15671567
return -ENOTSUPP;
15681568
}
15691569

1570+
#define kvm_arch_pmi_in_guest(vcpu) \
1571+
((vcpu) && (vcpu)->arch.handling_intr_from_guest)
1572+
15701573
int kvm_mmu_module_init(void);
15711574
void kvm_mmu_module_exit(void);
15721575

arch/x86/kvm/x86.c

Lines changed: 11 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -8469,43 +8469,12 @@ static void kvm_timer_init(void)
84698469
kvmclock_cpu_online, kvmclock_cpu_down_prep);
84708470
}
84718471

8472-
static inline bool kvm_pmi_in_guest(struct kvm_vcpu *vcpu)
8473-
{
8474-
return vcpu && vcpu->arch.handling_intr_from_guest;
8475-
}
8476-
8477-
static unsigned int kvm_guest_state(void)
8478-
{
8479-
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
8480-
unsigned int state;
8481-
8482-
if (!kvm_pmi_in_guest(vcpu))
8483-
return 0;
8484-
8485-
state = PERF_GUEST_ACTIVE;
8486-
if (static_call(kvm_x86_get_cpl)(vcpu))
8487-
state |= PERF_GUEST_USER;
8488-
8489-
return state;
8490-
}
8491-
8492-
static unsigned long kvm_guest_get_ip(void)
8493-
{
8494-
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
8495-
8496-
/* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
8497-
if (WARN_ON_ONCE(!kvm_pmi_in_guest(vcpu)))
8498-
return 0;
8499-
8500-
return kvm_rip_read(vcpu);
8501-
}
8502-
85038472
static unsigned int kvm_handle_intel_pt_intr(void)
85048473
{
85058474
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
85068475

85078476
/* '0' on failure so that the !PT case can use a RET0 static call. */
8508-
if (!kvm_pmi_in_guest(vcpu))
8477+
if (!kvm_arch_pmi_in_guest(vcpu))
85098478
return 0;
85108479

85118480
kvm_make_request(KVM_REQ_PMI, vcpu);
@@ -8514,12 +8483,6 @@ static unsigned int kvm_handle_intel_pt_intr(void)
85148483
return 1;
85158484
}
85168485

8517-
static struct perf_guest_info_callbacks kvm_guest_cbs = {
8518-
.state = kvm_guest_state,
8519-
.get_ip = kvm_guest_get_ip,
8520-
.handle_intel_pt_intr = NULL,
8521-
};
8522-
85238486
#ifdef CONFIG_X86_64
85248487
static void pvclock_gtod_update_fn(struct work_struct *work)
85258488
{
@@ -11229,9 +11192,11 @@ int kvm_arch_hardware_setup(void *opaque)
1122911192
memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
1123011193
kvm_ops_static_call_update();
1123111194

11195+
/* Temporary ugliness. */
1123211196
if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest())
11233-
kvm_guest_cbs.handle_intel_pt_intr = kvm_handle_intel_pt_intr;
11234-
perf_register_guest_info_callbacks(&kvm_guest_cbs);
11197+
kvm_register_perf_callbacks(kvm_handle_intel_pt_intr);
11198+
else
11199+
kvm_register_perf_callbacks(NULL);
1123511200

1123611201
if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
1123711202
supported_xss = 0;
@@ -11260,8 +11225,7 @@ int kvm_arch_hardware_setup(void *opaque)
1126011225

1126111226
void kvm_arch_hardware_unsetup(void)
1126211227
{
11263-
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
11264-
kvm_guest_cbs.handle_intel_pt_intr = NULL;
11228+
kvm_unregister_perf_callbacks();
1126511229

1126611230
static_call(kvm_x86_hardware_unsetup)();
1126711231
}
@@ -11852,6 +11816,11 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
1185211816
return vcpu->arch.preempted_in_kernel;
1185311817
}
1185411818

11819+
unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
11820+
{
11821+
return kvm_rip_read(vcpu);
11822+
}
11823+
1185511824
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1185611825
{
1185711826
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;

include/linux/kvm_host.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1170,6 +1170,16 @@ static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
11701170
}
11711171
#endif
11721172

1173+
#ifdef CONFIG_GUEST_PERF_EVENTS
1174+
unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
1175+
1176+
void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
1177+
void kvm_unregister_perf_callbacks(void);
1178+
#else
1179+
static inline void kvm_register_perf_callbacks(void *ign) {}
1180+
static inline void kvm_unregister_perf_callbacks(void) {}
1181+
#endif /* CONFIG_GUEST_PERF_EVENTS */
1182+
11731183
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
11741184
void kvm_arch_destroy_vm(struct kvm *kvm);
11751185
void kvm_arch_sync_events(struct kvm *kvm);

virt/kvm/kvm_main.c

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5479,6 +5479,50 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
54795479
return &kvm_running_vcpu;
54805480
}
54815481

5482+
#ifdef CONFIG_GUEST_PERF_EVENTS
5483+
static unsigned int kvm_guest_state(void)
5484+
{
5485+
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
5486+
unsigned int state;
5487+
5488+
if (!kvm_arch_pmi_in_guest(vcpu))
5489+
return 0;
5490+
5491+
state = PERF_GUEST_ACTIVE;
5492+
if (!kvm_arch_vcpu_in_kernel(vcpu))
5493+
state |= PERF_GUEST_USER;
5494+
5495+
return state;
5496+
}
5497+
5498+
static unsigned long kvm_guest_get_ip(void)
5499+
{
5500+
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
5501+
5502+
/* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
5503+
if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
5504+
return 0;
5505+
5506+
return kvm_arch_vcpu_get_ip(vcpu);
5507+
}
5508+
5509+
static struct perf_guest_info_callbacks kvm_guest_cbs = {
5510+
.state = kvm_guest_state,
5511+
.get_ip = kvm_guest_get_ip,
5512+
.handle_intel_pt_intr = NULL,
5513+
};
5514+
5515+
void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
5516+
{
5517+
kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
5518+
perf_register_guest_info_callbacks(&kvm_guest_cbs);
5519+
}
5520+
void kvm_unregister_perf_callbacks(void)
5521+
{
5522+
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
5523+
}
5524+
#endif
5525+
54825526
struct kvm_cpu_compat_check {
54835527
void *opaque;
54845528
int *ret;

0 commit comments

Comments
 (0)