Skip to content

Commit 53f9855

Browse files
Andrew JonesMarc Zyngier
authored andcommitted
KVM: arm64: pvtime: Fix stolen time accounting across migration
When updating the stolen time we should always read the current stolen time from the user provided memory, not from a kernel cache. If we use a cache then we'll end up resetting stolen time to zero on the first update after migration. Signed-off-by: Andrew Jones <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 4d2d4ce commit 53f9855

File tree

3 files changed

+29
-15
lines changed

3 files changed

+29
-15
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -368,7 +368,6 @@ struct kvm_vcpu_arch {
368368

369369
/* Guest PV state */
370370
struct {
371-
u64 steal;
372371
u64 last_steal;
373372
gpa_t base;
374373
} steal;

arch/arm64/kvm/pvtime.c

Lines changed: 9 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -13,26 +13,22 @@
1313
void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
1414
{
1515
struct kvm *kvm = vcpu->kvm;
16+
u64 base = vcpu->arch.steal.base;
1617
u64 last_steal = vcpu->arch.steal.last_steal;
17-
u64 steal;
18-
__le64 steal_le;
19-
u64 offset;
18+
u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
19+
u64 steal = 0;
2020
int idx;
21-
u64 base = vcpu->arch.steal.base;
2221

2322
if (base == GPA_INVALID)
2423
return;
2524

26-
/* Let's do the local bookkeeping */
27-
steal = vcpu->arch.steal.steal;
28-
vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
29-
steal += vcpu->arch.steal.last_steal - last_steal;
30-
vcpu->arch.steal.steal = steal;
31-
32-
steal_le = cpu_to_le64(steal);
3325
idx = srcu_read_lock(&kvm->srcu);
34-
offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
35-
kvm_put_guest(kvm, base + offset, steal_le);
26+
if (!kvm_get_guest(kvm, base + offset, steal)) {
27+
steal = le64_to_cpu(steal);
28+
vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
29+
steal += vcpu->arch.steal.last_steal - last_steal;
30+
kvm_put_guest(kvm, base + offset, cpu_to_le64(steal));
31+
}
3632
srcu_read_unlock(&kvm->srcu, idx);
3733
}
3834

@@ -66,7 +62,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
6662
* Start counting stolen time from the time the guest requests
6763
* the feature enabled.
6864
*/
69-
vcpu->arch.steal.steal = 0;
7065
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
7166

7267
idx = srcu_read_lock(&kvm->srcu);

include/linux/kvm_host.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -749,6 +749,26 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
749749
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
750750
gpa_t gpa, unsigned long len);
751751

752+
#define __kvm_get_guest(kvm, gfn, offset, v) \
753+
({ \
754+
unsigned long __addr = gfn_to_hva(kvm, gfn); \
755+
typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
756+
int __ret = -EFAULT; \
757+
\
758+
if (!kvm_is_error_hva(__addr)) \
759+
__ret = get_user(v, __uaddr); \
760+
__ret; \
761+
})
762+
763+
#define kvm_get_guest(kvm, gpa, v) \
764+
({ \
765+
gpa_t __gpa = gpa; \
766+
struct kvm *__kvm = kvm; \
767+
\
768+
__kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
769+
offset_in_page(__gpa), v); \
770+
})
771+
752772
#define __kvm_put_guest(kvm, gfn, offset, v) \
753773
({ \
754774
unsigned long __addr = gfn_to_hva(kvm, gfn); \

0 commit comments

Comments
 (0)