Skip to content

Commit 78bc117

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/psci/cpu_on into kvmarm-master/next
PSCI fixes from Oliver Upton: - Plug race on reset - Ensure that a pending reset is applied before userspace accesses - Reject PSCI requests with illegal affinity bits * kvm-arm64/psci/cpu_on: selftests: KVM: Introduce psci_cpu_on_test KVM: arm64: Enforce reserved bits for PSCI target affinities KVM: arm64: Handle PSCI resets before userspace touches vCPU state KVM: arm64: Fix read-side race on updates to vcpu reset state Signed-off-by: Marc Zyngier <[email protected]>
2 parents cf0c712 + cb97cf9 commit 78bc117

File tree

7 files changed

+156
-9
lines changed

7 files changed

+156
-9
lines changed

arch/arm64/kvm/arm.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1216,6 +1216,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
12161216
if (copy_from_user(&reg, argp, sizeof(reg)))
12171217
break;
12181218

1219+
/*
1220+
* We could owe a reset due to PSCI. Handle the pending reset
1221+
* here to ensure userspace register accesses are ordered after
1222+
* the reset.
1223+
*/
1224+
if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
1225+
kvm_reset_vcpu(vcpu);
1226+
12191227
if (ioctl == KVM_SET_ONE_REG)
12201228
r = kvm_arm_set_reg(vcpu, &reg);
12211229
else

arch/arm64/kvm/psci.c

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,16 +59,22 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
5959
kvm_vcpu_kick(vcpu);
6060
}
6161

62+
static inline bool kvm_psci_valid_affinity(struct kvm_vcpu *vcpu,
63+
unsigned long affinity)
64+
{
65+
return !(affinity & ~MPIDR_HWID_BITMASK);
66+
}
67+
6268
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
6369
{
6470
struct vcpu_reset_state *reset_state;
6571
struct kvm *kvm = source_vcpu->kvm;
6672
struct kvm_vcpu *vcpu = NULL;
6773
unsigned long cpu_id;
6874

69-
cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
70-
if (vcpu_mode_is_32bit(source_vcpu))
71-
cpu_id &= ~((u32) 0);
75+
cpu_id = smccc_get_arg1(source_vcpu);
76+
if (!kvm_psci_valid_affinity(source_vcpu, cpu_id))
77+
return PSCI_RET_INVALID_PARAMS;
7278

7379
vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
7480

@@ -126,6 +132,9 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
126132
target_affinity = smccc_get_arg1(vcpu);
127133
lowest_affinity_level = smccc_get_arg2(vcpu);
128134

135+
if (!kvm_psci_valid_affinity(vcpu, target_affinity))
136+
return PSCI_RET_INVALID_PARAMS;
137+
129138
/* Determine target affinity mask */
130139
target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
131140
if (!target_affinity_mask)

arch/arm64/kvm/reset.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -210,10 +210,16 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
210210
*/
211211
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
212212
{
213+
struct vcpu_reset_state reset_state;
213214
int ret;
214215
bool loaded;
215216
u32 pstate;
216217

218+
mutex_lock(&vcpu->kvm->lock);
219+
reset_state = vcpu->arch.reset_state;
220+
WRITE_ONCE(vcpu->arch.reset_state.reset, false);
221+
mutex_unlock(&vcpu->kvm->lock);
222+
217223
/* Reset PMU outside of the non-preemptible section */
218224
kvm_pmu_vcpu_reset(vcpu);
219225

@@ -276,8 +282,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
276282
* Additional reset state handling that PSCI may have imposed on us.
277283
* Must be done after all the sys_reg reset.
278284
*/
279-
if (vcpu->arch.reset_state.reset) {
280-
unsigned long target_pc = vcpu->arch.reset_state.pc;
285+
if (reset_state.reset) {
286+
unsigned long target_pc = reset_state.pc;
281287

282288
/* Gracefully handle Thumb2 entry point */
283289
if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
@@ -286,13 +292,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
286292
}
287293

288294
/* Propagate caller endianness */
289-
if (vcpu->arch.reset_state.be)
295+
if (reset_state.be)
290296
kvm_vcpu_set_be(vcpu);
291297

292298
*vcpu_pc(vcpu) = target_pc;
293-
vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
294-
295-
vcpu->arch.reset_state.reset = false;
299+
vcpu_set_reg(vcpu, 0, reset_state.r0);
296300
}
297301

298302
/* Reset timer */

tools/testing/selftests/kvm/.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# SPDX-License-Identifier: GPL-2.0-only
22
/aarch64/debug-exceptions
33
/aarch64/get-reg-list
4+
/aarch64/psci_cpu_on_test
45
/aarch64/vgic_init
56
/s390x/memop
67
/s390x/resets

tools/testing/selftests/kvm/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@ TEST_GEN_PROGS_x86_64 += kvm_binary_stats_test
8686

8787
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
8888
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
89+
TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test
8990
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
9091
TEST_GEN_PROGS_aarch64 += demand_paging_test
9192
TEST_GEN_PROGS_aarch64 += dirty_log_test
Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/*
3+
* psci_cpu_on_test - Test that the observable state of a vCPU targeted by the
4+
* CPU_ON PSCI call matches what the caller requested.
5+
*
6+
* Copyright (c) 2021 Google LLC.
7+
*
8+
* This is a regression test for a race between KVM servicing the PSCI call and
9+
* userspace reading the vCPUs registers.
10+
*/
11+
12+
#define _GNU_SOURCE
13+
14+
#include <linux/psci.h>
15+
16+
#include "kvm_util.h"
17+
#include "processor.h"
18+
#include "test_util.h"
19+
20+
#define VCPU_ID_SOURCE 0
21+
#define VCPU_ID_TARGET 1
22+
23+
#define CPU_ON_ENTRY_ADDR 0xfeedf00dul
24+
#define CPU_ON_CONTEXT_ID 0xdeadc0deul
25+
26+
static uint64_t psci_cpu_on(uint64_t target_cpu, uint64_t entry_addr,
27+
uint64_t context_id)
28+
{
29+
register uint64_t x0 asm("x0") = PSCI_0_2_FN64_CPU_ON;
30+
register uint64_t x1 asm("x1") = target_cpu;
31+
register uint64_t x2 asm("x2") = entry_addr;
32+
register uint64_t x3 asm("x3") = context_id;
33+
34+
asm("hvc #0"
35+
: "=r"(x0)
36+
: "r"(x0), "r"(x1), "r"(x2), "r"(x3)
37+
: "memory");
38+
39+
return x0;
40+
}
41+
42+
static uint64_t psci_affinity_info(uint64_t target_affinity,
43+
uint64_t lowest_affinity_level)
44+
{
45+
register uint64_t x0 asm("x0") = PSCI_0_2_FN64_AFFINITY_INFO;
46+
register uint64_t x1 asm("x1") = target_affinity;
47+
register uint64_t x2 asm("x2") = lowest_affinity_level;
48+
49+
asm("hvc #0"
50+
: "=r"(x0)
51+
: "r"(x0), "r"(x1), "r"(x2)
52+
: "memory");
53+
54+
return x0;
55+
}
56+
57+
static void guest_main(uint64_t target_cpu)
58+
{
59+
GUEST_ASSERT(!psci_cpu_on(target_cpu, CPU_ON_ENTRY_ADDR, CPU_ON_CONTEXT_ID));
60+
uint64_t target_state;
61+
62+
do {
63+
target_state = psci_affinity_info(target_cpu, 0);
64+
65+
GUEST_ASSERT((target_state == PSCI_0_2_AFFINITY_LEVEL_ON) ||
66+
(target_state == PSCI_0_2_AFFINITY_LEVEL_OFF));
67+
} while (target_state != PSCI_0_2_AFFINITY_LEVEL_ON);
68+
69+
GUEST_DONE();
70+
}
71+
72+
int main(void)
73+
{
74+
uint64_t target_mpidr, obs_pc, obs_x0;
75+
struct kvm_vcpu_init init;
76+
struct kvm_vm *vm;
77+
struct ucall uc;
78+
79+
vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
80+
kvm_vm_elf_load(vm, program_invocation_name);
81+
ucall_init(vm, NULL);
82+
83+
vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
84+
init.features[0] |= (1 << KVM_ARM_VCPU_PSCI_0_2);
85+
86+
aarch64_vcpu_add_default(vm, VCPU_ID_SOURCE, &init, guest_main);
87+
88+
/*
89+
* make sure the target is already off when executing the test.
90+
*/
91+
init.features[0] |= (1 << KVM_ARM_VCPU_POWER_OFF);
92+
aarch64_vcpu_add_default(vm, VCPU_ID_TARGET, &init, guest_main);
93+
94+
get_reg(vm, VCPU_ID_TARGET, ARM64_SYS_REG(MPIDR_EL1), &target_mpidr);
95+
vcpu_args_set(vm, VCPU_ID_SOURCE, 1, target_mpidr & MPIDR_HWID_BITMASK);
96+
vcpu_run(vm, VCPU_ID_SOURCE);
97+
98+
switch (get_ucall(vm, VCPU_ID_SOURCE, &uc)) {
99+
case UCALL_DONE:
100+
break;
101+
case UCALL_ABORT:
102+
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0], __FILE__,
103+
uc.args[1]);
104+
break;
105+
default:
106+
TEST_FAIL("Unhandled ucall: %lu", uc.cmd);
107+
}
108+
109+
get_reg(vm, VCPU_ID_TARGET, ARM64_CORE_REG(regs.pc), &obs_pc);
110+
get_reg(vm, VCPU_ID_TARGET, ARM64_CORE_REG(regs.regs[0]), &obs_x0);
111+
112+
TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR,
113+
"unexpected target cpu pc: %lx (expected: %lx)",
114+
obs_pc, CPU_ON_ENTRY_ADDR);
115+
TEST_ASSERT(obs_x0 == CPU_ON_CONTEXT_ID,
116+
"unexpected target context id: %lx (expected: %lx)",
117+
obs_x0, CPU_ON_CONTEXT_ID);
118+
119+
kvm_vm_free(vm);
120+
return 0;
121+
}

tools/testing/selftests/kvm/include/aarch64/processor.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#define CPACR_EL1 3, 0, 1, 0, 2
1818
#define TCR_EL1 3, 0, 2, 0, 2
1919
#define MAIR_EL1 3, 0, 10, 2, 0
20+
#define MPIDR_EL1 3, 0, 0, 0, 5
2021
#define TTBR0_EL1 3, 0, 2, 0, 0
2122
#define SCTLR_EL1 3, 0, 1, 0, 0
2223
#define VBAR_EL1 3, 0, 12, 0, 0
@@ -40,6 +41,8 @@
4041
(0xfful << (4 * 8)) | \
4142
(0xbbul << (5 * 8)))
4243

44+
#define MPIDR_HWID_BITMASK (0xff00fffffful)
45+
4346
static inline void get_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t *addr)
4447
{
4548
struct kvm_one_reg reg;

0 commit comments

Comments
 (0)