Skip to content

Commit 8fbb696

Browse files
committed
KVM: x86: Fold kvm_arch_sched_in() into kvm_arch_vcpu_load()
Fold the guts of kvm_arch_sched_in() into kvm_arch_vcpu_load(), keying off the recently added kvm_vcpu.scheduled_out as appropriate. Note, there is a very slight functional change, as PLE shrink updates will now happen after blasting WBINVD, but that is quite uninteresting as the two operations do not interact in any way. Acked-by: Kai Huang <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent 5d9c07f commit 8fbb696

File tree

7 files changed

+16
-27
lines changed

7 files changed

+16
-27
lines changed

arch/x86/include/asm/kvm-x86-ops.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,6 @@ KVM_X86_OP(write_tsc_multiplier)
103103
KVM_X86_OP(get_exit_info)
104104
KVM_X86_OP(check_intercept)
105105
KVM_X86_OP(handle_exit_irqoff)
106-
KVM_X86_OP(sched_in)
107106
KVM_X86_OP_OPTIONAL(update_cpu_dirty_logging)
108107
KVM_X86_OP_OPTIONAL(vcpu_blocking)
109108
KVM_X86_OP_OPTIONAL(vcpu_unblocking)

arch/x86/include/asm/kvm_host.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1749,8 +1749,6 @@ struct kvm_x86_ops {
17491749
struct x86_exception *exception);
17501750
void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
17511751

1752-
void (*sched_in)(struct kvm_vcpu *vcpu, int cpu);
1753-
17541752
/*
17551753
* Size of the CPU's dirty log buffer, i.e. VMX's PML buffer. A zero
17561754
* value indicates CPU dirty logging is unsupported or disabled.

arch/x86/kvm/svm/svm.c

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1545,6 +1545,9 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
15451545
struct vcpu_svm *svm = to_svm(vcpu);
15461546
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
15471547

1548+
if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
1549+
shrink_ple_window(vcpu);
1550+
15481551
if (sd->current_vmcb != svm->vmcb) {
15491552
sd->current_vmcb = svm->vmcb;
15501553

@@ -4560,12 +4563,6 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
45604563
vcpu->arch.at_instruction_boundary = true;
45614564
}
45624565

4563-
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
4564-
{
4565-
if (!kvm_pause_in_guest(vcpu->kvm))
4566-
shrink_ple_window(vcpu);
4567-
}
4568-
45694566
static void svm_setup_mce(struct kvm_vcpu *vcpu)
45704567
{
45714568
/* [63:9] are reserved. */
@@ -5025,8 +5022,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
50255022
.check_intercept = svm_check_intercept,
50265023
.handle_exit_irqoff = svm_handle_exit_irqoff,
50275024

5028-
.sched_in = svm_sched_in,
5029-
50305025
.nested_ops = &svm_nested_ops,
50315026

50325027
.deliver_interrupt = svm_deliver_interrupt,

arch/x86/kvm/vmx/main.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -122,8 +122,6 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
122122
.check_intercept = vmx_check_intercept,
123123
.handle_exit_irqoff = vmx_handle_exit_irqoff,
124124

125-
.sched_in = vmx_sched_in,
126-
127125
.cpu_dirty_log_size = PML_ENTITY_NUM,
128126
.update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
129127

arch/x86/kvm/vmx/vmx.c

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1518,6 +1518,9 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
15181518
{
15191519
struct vcpu_vmx *vmx = to_vmx(vcpu);
15201520

1521+
if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
1522+
shrink_ple_window(vcpu);
1523+
15211524
vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
15221525

15231526
vmx_vcpu_pi_load(vcpu, cpu);
@@ -8172,12 +8175,6 @@ void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
81728175
}
81738176
#endif
81748177

8175-
void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
8176-
{
8177-
if (!kvm_pause_in_guest(vcpu->kvm))
8178-
shrink_ple_window(vcpu);
8179-
}
8180-
81818178
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
81828179
{
81838180
struct vcpu_vmx *vmx = to_vmx(vcpu);

arch/x86/kvm/vmx/x86_ops.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,6 @@ u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
112112
void vmx_write_tsc_offset(struct kvm_vcpu *vcpu);
113113
void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu);
114114
void vmx_request_immediate_exit(struct kvm_vcpu *vcpu);
115-
void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu);
116115
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
117116
#ifdef CONFIG_X86_64
118117
int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,

arch/x86/kvm/x86.c

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5004,6 +5004,16 @@ static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
50045004

50055005
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
50065006
{
5007+
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
5008+
5009+
if (vcpu->scheduled_out) {
5010+
vcpu->arch.l1tf_flush_l1d = true;
5011+
if (pmu->version && unlikely(pmu->event_count)) {
5012+
pmu->need_cleanup = true;
5013+
kvm_make_request(KVM_REQ_PMU, vcpu);
5014+
}
5015+
}
5016+
50075017
/* Address WBINVD may be executed by guest */
50085018
if (need_emulate_wbinvd(vcpu)) {
50095019
if (static_call(kvm_x86_has_wbinvd_exit)())
@@ -12567,14 +12577,7 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
1256712577

1256812578
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
1256912579
{
12570-
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
1257112580

12572-
vcpu->arch.l1tf_flush_l1d = true;
12573-
if (pmu->version && unlikely(pmu->event_count)) {
12574-
pmu->need_cleanup = true;
12575-
kvm_make_request(KVM_REQ_PMU, vcpu);
12576-
}
12577-
static_call(kvm_x86_sched_in)(vcpu, cpu);
1257812581
}
1257912582

1258012583
void kvm_arch_free_vm(struct kvm *kvm)

0 commit comments

Comments
 (0)