Skip to content

Commit 8564d63

Browse files
Steven PriceMarc Zyngier
authored andcommitted
KVM: arm64: Support stolen time reporting via shared structure
Implement the service call for configuring a shared structure between a VCPU and the hypervisor in which the hypervisor can write the time stolen from the VCPU's execution time by other tasks on the host. User space allocates memory which is placed at an IPA also chosen by user space. The hypervisor then updates the shared structure using kvm_put_guest() to ensure single copy atomicity of the 64-bit value reporting the stolen time in nanoseconds. Whenever stolen time is enabled by the guest, the stolen time counter is reset. The stolen time itself is retrieved from the sched_info structure maintained by the Linux scheduler code. We enable SCHEDSTATS when selecting KVM Kconfig to ensure this value is meaningful. Signed-off-by: Steven Price <[email protected]> Signed-off-by: Marc Zyngier <[email protected]>
1 parent cac0f1b commit 8564d63

File tree

7 files changed

+111
-0
lines changed

7 files changed

+111
-0
lines changed

arch/arm/include/asm/kvm_host.h

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
4040
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
4141
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
42+
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
4243

4344
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
4445

@@ -329,6 +330,24 @@ static inline long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
329330
return SMCCC_RET_NOT_SUPPORTED;
330331
}
331332

333+
static inline gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
334+
{
335+
return GPA_INVALID;
336+
}
337+
338+
static inline void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
339+
{
340+
}
341+
342+
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
343+
{
344+
}
345+
346+
static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
347+
{
348+
return false;
349+
}
350+
332351
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
333352

334353
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);

arch/arm64/include/asm/kvm_host.h

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@
4444
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
4545
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
4646
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
47+
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
4748

4849
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
4950

@@ -338,6 +339,13 @@ struct kvm_vcpu_arch {
338339
/* True when deferrable sysregs are loaded on the physical CPU,
339340
* see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
340341
bool sysregs_loaded_on_cpu;
342+
343+
/* Guest PV state */
344+
struct {
345+
u64 steal;
346+
u64 last_steal;
347+
gpa_t base;
348+
} steal;
341349
};
342350

343351
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@@ -479,6 +487,18 @@ int kvm_perf_init(void);
479487
int kvm_perf_teardown(void);
480488

481489
long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
490+
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
491+
void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
492+
493+
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
494+
{
495+
vcpu_arch->steal.base = GPA_INVALID;
496+
}
497+
498+
static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
499+
{
500+
return (vcpu_arch->steal.base != GPA_INVALID);
501+
}
482502

483503
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
484504

arch/arm64/kvm/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ config KVM
3939
select IRQ_BYPASS_MANAGER
4040
select HAVE_KVM_IRQ_BYPASS
4141
select HAVE_KVM_VCPU_RUN_PID_CHANGE
42+
select SCHEDSTATS
4243
---help---
4344
Support hosting virtualized guest machines.
4445
We don't support KVM with 16K page tables yet, due to the multiple

include/linux/kvm_types.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@ typedef unsigned long gva_t;
3535
typedef u64 gpa_t;
3636
typedef u64 gfn_t;
3737

38+
#define GPA_INVALID (~(gpa_t)0)
39+
3840
typedef unsigned long hva_t;
3941
typedef u64 hpa_t;
4042
typedef u64 hfn_t;

virt/kvm/arm/arm.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,10 @@
4040
#include <asm/kvm_coproc.h>
4141
#include <asm/sections.h>
4242

43+
#include <kvm/arm_hypercalls.h>
44+
#include <kvm/arm_pmu.h>
45+
#include <kvm/arm_psci.h>
46+
4347
#ifdef REQUIRES_VIRT
4448
__asm__(".arch_extension virt");
4549
#endif
@@ -351,6 +355,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
351355

352356
kvm_arm_reset_debug_ptr(vcpu);
353357

358+
kvm_arm_pvtime_vcpu_init(&vcpu->arch);
359+
354360
return kvm_vgic_vcpu_init(vcpu);
355361
}
356362

@@ -380,6 +386,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
380386
kvm_vcpu_load_sysregs(vcpu);
381387
kvm_arch_vcpu_load_fp(vcpu);
382388
kvm_vcpu_pmu_restore_guest(vcpu);
389+
if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
390+
kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
383391

384392
if (single_task_running())
385393
vcpu_clear_wfe_traps(vcpu);
@@ -645,6 +653,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
645653
* that a VCPU sees new virtual interrupts.
646654
*/
647655
kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
656+
657+
if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
658+
kvm_update_stolen_time(vcpu);
648659
}
649660
}
650661

virt/kvm/arm/hypercalls.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
1414
u32 func_id = smccc_get_function(vcpu);
1515
long val = SMCCC_RET_NOT_SUPPORTED;
1616
u32 feature;
17+
gpa_t gpa;
1718

1819
switch (func_id) {
1920
case ARM_SMCCC_VERSION_FUNC_ID:
@@ -56,6 +57,11 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
5657
case ARM_SMCCC_HV_PV_TIME_FEATURES:
5758
val = kvm_hypercall_pv_features(vcpu);
5859
break;
60+
case ARM_SMCCC_HV_PV_TIME_ST:
61+
gpa = kvm_init_stolen_time(vcpu);
62+
if (gpa != GPA_INVALID)
63+
val = gpa;
64+
break;
5965
default:
6066
return kvm_psci_call(vcpu);
6167
}

virt/kvm/arm/pvtime.c

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,18 +3,70 @@
33

44
#include <linux/arm-smccc.h>
55

6+
#include <asm/pvclock-abi.h>
7+
68
#include <kvm/arm_hypercalls.h>
79

10+
void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
11+
{
12+
struct kvm *kvm = vcpu->kvm;
13+
u64 steal;
14+
__le64 steal_le;
15+
u64 offset;
16+
int idx;
17+
u64 base = vcpu->arch.steal.base;
18+
19+
if (base == GPA_INVALID)
20+
return;
21+
22+
/* Let's do the local bookkeeping */
23+
steal = vcpu->arch.steal.steal;
24+
steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal;
25+
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
26+
vcpu->arch.steal.steal = steal;
27+
28+
steal_le = cpu_to_le64(steal);
29+
idx = srcu_read_lock(&kvm->srcu);
30+
offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
31+
kvm_put_guest(kvm, base + offset, steal_le, u64);
32+
srcu_read_unlock(&kvm->srcu, idx);
33+
}
34+
835
long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
936
{
1037
u32 feature = smccc_get_arg1(vcpu);
1138
long val = SMCCC_RET_NOT_SUPPORTED;
1239

1340
switch (feature) {
1441
case ARM_SMCCC_HV_PV_TIME_FEATURES:
42+
case ARM_SMCCC_HV_PV_TIME_ST:
1543
val = SMCCC_RET_SUCCESS;
1644
break;
1745
}
1846

1947
return val;
2048
}
49+
50+
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
51+
{
52+
struct pvclock_vcpu_stolen_time init_values = {};
53+
struct kvm *kvm = vcpu->kvm;
54+
u64 base = vcpu->arch.steal.base;
55+
int idx;
56+
57+
if (base == GPA_INVALID)
58+
return base;
59+
60+
/*
61+
* Start counting stolen time from the time the guest requests
62+
* the feature enabled.
63+
*/
64+
vcpu->arch.steal.steal = 0;
65+
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
66+
67+
idx = srcu_read_lock(&kvm->srcu);
68+
kvm_write_guest(kvm, base, &init_values, sizeof(init_values));
69+
srcu_read_unlock(&kvm->srcu, idx);
70+
71+
return base;
72+
}

0 commit comments

Comments
 (0)