Skip to content

Commit b2626f1

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more kvm fixes from Paolo Bonzini: "Small x86 fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: selftests: Ensure all migrations are performed when test is affined KVM: x86: Swap order of CPUID entry "index" vs. "significant flag" checks ptp: Fix ptp_kvm_getcrosststamp issue for x86 ptp_kvm x86/kvmclock: Move this_cpu_pvti into kvmclock.h selftests: KVM: Don't clobber XMM register when read KVM: VMX: Fix a TSX_CTRL_CPUID_CLEAR field mask issue
2 parents 24f67d8 + 7b0035e commit b2626f1

File tree

7 files changed

+81
-32
lines changed

7 files changed

+81
-32
lines changed

arch/x86/include/asm/kvmclock.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,20 @@
22
#ifndef _ASM_X86_KVM_CLOCK_H
33
#define _ASM_X86_KVM_CLOCK_H
44

5+
#include <linux/percpu.h>
6+
57
extern struct clocksource kvm_clock;
68

9+
DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
10+
11+
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
12+
{
13+
return &this_cpu_read(hv_clock_per_cpu)->pvti;
14+
}
15+
16+
static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
17+
{
18+
return this_cpu_read(hv_clock_per_cpu);
19+
}
20+
721
#endif /* _ASM_X86_KVM_CLOCK_H */

arch/x86/kernel/kvmclock.c

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -49,18 +49,9 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
4949
static struct pvclock_vsyscall_time_info
5050
hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
5151
static struct pvclock_wall_clock wall_clock __bss_decrypted;
52-
static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
5352
static struct pvclock_vsyscall_time_info *hvclock_mem;
54-
55-
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
56-
{
57-
return &this_cpu_read(hv_clock_per_cpu)->pvti;
58-
}
59-
60-
static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void)
61-
{
62-
return this_cpu_read(hv_clock_per_cpu);
63-
}
53+
DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
54+
EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu);
6455

6556
/*
6657
* The wallclock is the time of day when we booted. Since then, some time may

arch/x86/kvm/cpuid.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,8 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
6565
for (i = 0; i < nent; i++) {
6666
e = &entries[i];
6767

68-
if (e->function == function && (e->index == index ||
69-
!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX)))
68+
if (e->function == function &&
69+
(!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index))
7070
return e;
7171
}
7272

arch/x86/kvm/vmx/vmx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6848,7 +6848,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
68486848
*/
68496849
tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
68506850
if (tsx_ctrl)
6851-
vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
6851+
tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
68526852
}
68536853

68546854
err = alloc_loaded_vmcs(&vmx->vmcs01);

drivers/ptp/ptp_kvm_x86.c

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,6 @@
1515
#include <linux/ptp_clock_kernel.h>
1616
#include <linux/ptp_kvm.h>
1717

18-
struct pvclock_vsyscall_time_info *hv_clock;
19-
2018
static phys_addr_t clock_pair_gpa;
2119
static struct kvm_clock_pairing clock_pair;
2220

@@ -28,8 +26,7 @@ int kvm_arch_ptp_init(void)
2826
return -ENODEV;
2927

3028
clock_pair_gpa = slow_virt_to_phys(&clock_pair);
31-
hv_clock = pvclock_get_pvti_cpu0_va();
32-
if (!hv_clock)
29+
if (!pvclock_get_pvti_cpu0_va())
3330
return -ENODEV;
3431

3532
ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
@@ -64,10 +61,8 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec,
6461
struct pvclock_vcpu_time_info *src;
6562
unsigned int version;
6663
long ret;
67-
int cpu;
6864

69-
cpu = smp_processor_id();
70-
src = &hv_clock[cpu].pvti;
65+
src = this_cpu_pvti();
7166

7267
do {
7368
/*

tools/testing/selftests/kvm/include/x86_64/processor.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -315,7 +315,7 @@ static inline void set_xmm(int n, unsigned long val)
315315
#define GET_XMM(__xmm) \
316316
({ \
317317
unsigned long __val; \
318-
asm volatile("movq %%"#__xmm", %0" : "=r"(__val) : : #__xmm); \
318+
asm volatile("movq %%"#__xmm", %0" : "=r"(__val)); \
319319
__val; \
320320
})
321321

tools/testing/selftests/kvm/rseq_test.c

Lines changed: 59 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <signal.h>
1111
#include <syscall.h>
1212
#include <sys/ioctl.h>
13+
#include <sys/sysinfo.h>
1314
#include <asm/barrier.h>
1415
#include <linux/atomic.h>
1516
#include <linux/rseq.h>
@@ -39,6 +40,7 @@ static __thread volatile struct rseq __rseq = {
3940

4041
static pthread_t migration_thread;
4142
static cpu_set_t possible_mask;
43+
static int min_cpu, max_cpu;
4244
static bool done;
4345

4446
static atomic_t seq_cnt;
@@ -57,20 +59,37 @@ static void sys_rseq(int flags)
5759
TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno));
5860
}
5961

62+
static int next_cpu(int cpu)
63+
{
64+
/*
65+
* Advance to the next CPU, skipping those that weren't in the original
66+
* affinity set. Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's
67+
* data storage is considered as opaque. Note, if this task is pinned
68+
* to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will
69+
* burn a lot cycles and the test will take longer than normal to
70+
* complete.
71+
*/
72+
do {
73+
cpu++;
74+
if (cpu > max_cpu) {
75+
cpu = min_cpu;
76+
TEST_ASSERT(CPU_ISSET(cpu, &possible_mask),
77+
"Min CPU = %d must always be usable", cpu);
78+
break;
79+
}
80+
} while (!CPU_ISSET(cpu, &possible_mask));
81+
82+
return cpu;
83+
}
84+
6085
static void *migration_worker(void *ign)
6186
{
6287
cpu_set_t allowed_mask;
63-
int r, i, nr_cpus, cpu;
88+
int r, i, cpu;
6489

6590
CPU_ZERO(&allowed_mask);
6691

67-
nr_cpus = CPU_COUNT(&possible_mask);
68-
69-
for (i = 0; i < NR_TASK_MIGRATIONS; i++) {
70-
cpu = i % nr_cpus;
71-
if (!CPU_ISSET(cpu, &possible_mask))
72-
continue;
73-
92+
for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) {
7493
CPU_SET(cpu, &allowed_mask);
7594

7695
/*
@@ -154,6 +173,36 @@ static void *migration_worker(void *ign)
154173
return NULL;
155174
}
156175

176+
static int calc_min_max_cpu(void)
177+
{
178+
int i, cnt, nproc;
179+
180+
if (CPU_COUNT(&possible_mask) < 2)
181+
return -EINVAL;
182+
183+
/*
184+
* CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that
185+
* this task is affined to in order to reduce the time spent querying
186+
* unusable CPUs, e.g. if this task is pinned to a small percentage of
187+
* total CPUs.
188+
*/
189+
nproc = get_nprocs_conf();
190+
min_cpu = -1;
191+
max_cpu = -1;
192+
cnt = 0;
193+
194+
for (i = 0; i < nproc; i++) {
195+
if (!CPU_ISSET(i, &possible_mask))
196+
continue;
197+
if (min_cpu == -1)
198+
min_cpu = i;
199+
max_cpu = i;
200+
cnt++;
201+
}
202+
203+
return (cnt < 2) ? -EINVAL : 0;
204+
}
205+
157206
int main(int argc, char *argv[])
158207
{
159208
int r, i, snapshot;
@@ -167,8 +216,8 @@ int main(int argc, char *argv[])
167216
TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
168217
strerror(errno));
169218

170-
if (CPU_COUNT(&possible_mask) < 2) {
171-
print_skip("Only one CPU, task migration not possible\n");
219+
if (calc_min_max_cpu()) {
220+
print_skip("Only one usable CPU, task migration not possible");
172221
exit(KSFT_SKIP);
173222
}
174223

0 commit comments

Comments
 (0)