Skip to content

Commit 2d63699

Browse files
committed
KVM: x86: Always write vCPU's current TSC offset/ratio in vendor hooks
Drop the @offset and @multiplier params from the kvm_x86_ops hooks for propagating TSC offsets/multipliers into hardware, and instead have the vendor implementations pull the information directly from the vCPU structure. The respective vCPU fields _must_ be written at the same time in order to maintain consistent state, i.e. it's not random luck that the value passed in by all callers is grabbed from the vCPU. Explicitly grabbing the value from the vCPU field in SVM's implementation in particular will allow for additional cleanup without introducing even more subtle dependencies. Specifically, SVM can skip the WRMSR if guest state isn't loaded, i.e. svm_prepare_switch_to_guest() will load the correct value for the vCPU prior to entering the guest. This also reconciles KVM's handling of related values that are stored in the vCPU, as svm_write_tsc_offset() already assumes/requires the caller to have updated l1_tsc_offset. Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent 229725a commit 2d63699

File tree

6 files changed

+15
-16
lines changed

6 files changed

+15
-16
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1654,8 +1654,8 @@ struct kvm_x86_ops {
16541654

16551655
u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
16561656
u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
1657-
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
1658-
void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu, u64 multiplier);
1657+
void (*write_tsc_offset)(struct kvm_vcpu *vcpu);
1658+
void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu);
16591659

16601660
/*
16611661
* Retrieve somewhat arbitrary exit information. Intended to

arch/x86/kvm/svm/nested.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1103,7 +1103,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
11031103
if (kvm_caps.has_tsc_control &&
11041104
vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
11051105
vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
1106-
svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
1106+
svm_write_tsc_multiplier(vcpu);
11071107
}
11081108

11091109
svm->nested.ctl.nested_cr3 = 0;
@@ -1536,7 +1536,7 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
15361536
vcpu->arch.tsc_scaling_ratio =
15371537
kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
15381538
svm->tsc_ratio_msr);
1539-
svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
1539+
svm_write_tsc_multiplier(vcpu);
15401540
}
15411541

15421542
/* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */

arch/x86/kvm/svm/svm.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1137,19 +1137,19 @@ static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
11371137
return svm->tsc_ratio_msr;
11381138
}
11391139

1140-
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1140+
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu)
11411141
{
11421142
struct vcpu_svm *svm = to_svm(vcpu);
11431143

11441144
svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset;
1145-
svm->vmcb->control.tsc_offset = offset;
1145+
svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset;
11461146
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
11471147
}
11481148

1149-
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
1149+
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu)
11501150
{
11511151
preempt_disable();
1152-
__svm_write_tsc_multiplier(multiplier);
1152+
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
11531153
preempt_enable();
11541154
}
11551155

arch/x86/kvm/svm/svm.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -658,7 +658,7 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
658658
bool has_error_code, u32 error_code);
659659
int nested_svm_exit_special(struct vcpu_svm *svm);
660660
void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
661-
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier);
661+
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu);
662662
void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
663663
struct vmcb_control_area *control);
664664
void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,

arch/x86/kvm/vmx/vmx.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1898,14 +1898,14 @@ u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
18981898
return kvm_caps.default_tsc_scaling_ratio;
18991899
}
19001900

1901-
static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1901+
static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu)
19021902
{
1903-
vmcs_write64(TSC_OFFSET, offset);
1903+
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
19041904
}
19051905

1906-
static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
1906+
static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu)
19071907
{
1908-
vmcs_write64(TSC_MULTIPLIER, multiplier);
1908+
vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
19091909
}
19101910

19111911
/*

arch/x86/kvm/x86.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2613,7 +2613,7 @@ static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
26132613
else
26142614
vcpu->arch.tsc_offset = l1_offset;
26152615

2616-
static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset);
2616+
static_call(kvm_x86_write_tsc_offset)(vcpu);
26172617
}
26182618

26192619
static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier)
@@ -2629,8 +2629,7 @@ static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multipli
26292629
vcpu->arch.tsc_scaling_ratio = l1_multiplier;
26302630

26312631
if (kvm_caps.has_tsc_control)
2632-
static_call(kvm_x86_write_tsc_multiplier)(
2633-
vcpu, vcpu->arch.tsc_scaling_ratio);
2632+
static_call(kvm_x86_write_tsc_multiplier)(vcpu);
26342633
}
26352634

26362635
static inline bool kvm_check_tsc_unstable(void)

0 commit comments

Comments
 (0)