Skip to content

Commit bfe91da

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "Bugfixes and a one-liner patch to silence a sparse warning" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: arm64: Stop clobbering x0 for HVC_SOFT_RESTART KVM: arm64: PMU: Fix per-CPU access in preemptible context KVM: VMX: Use KVM_POSSIBLE_CR*_GUEST_BITS to initialize guest/host masks KVM: x86: Mark CR4.TSD as being possibly owned by the guest KVM: x86: Inject #GP if guest attempts to toggle CR4.LA57 in 64-bit mode kvm: use more precise cast and do not drop __user KVM: x86: bit 8 of non-leaf PDPEs is not reserved KVM: X86: Fix async pf caused null-ptr-deref KVM: arm64: vgic-v4: Plug race between non-residency and v4.1 doorbell KVM: arm64: pvtime: Ensure task delay accounting is enabled KVM: arm64: Fix kvm_reset_vcpu() return code being incorrect with SVE KVM: arm64: Annotate hyp NMI-related functions as __always_inline KVM: s390: reduce number of IO pins to 1
2 parents 5c82ec0 + 8038a92 commit bfe91da

File tree

15 files changed

+70
-30
lines changed

15 files changed

+70
-30
lines changed

arch/arm64/include/asm/arch_gicv3.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ static inline u32 gic_read_pmr(void)
109109
return read_sysreg_s(SYS_ICC_PMR_EL1);
110110
}
111111

112-
static inline void gic_write_pmr(u32 val)
112+
static __always_inline void gic_write_pmr(u32 val)
113113
{
114114
write_sysreg_s(val, SYS_ICC_PMR_EL1);
115115
}

arch/arm64/include/asm/cpufeature.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -675,7 +675,7 @@ static inline bool system_supports_generic_auth(void)
675675
cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
676676
}
677677

678-
static inline bool system_uses_irq_prio_masking(void)
678+
static __always_inline bool system_uses_irq_prio_masking(void)
679679
{
680680
return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
681681
cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);

arch/arm64/kvm/hyp-init.S

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -136,11 +136,15 @@ SYM_CODE_START(__kvm_handle_stub_hvc)
136136

137137
1: cmp x0, #HVC_RESET_VECTORS
138138
b.ne 1f
139-
reset:
139+
140140
/*
141-
* Reset kvm back to the hyp stub. Do not clobber x0-x4 in
142-
* case we coming via HVC_SOFT_RESTART.
141+
* Set the HVC_RESET_VECTORS return code before entering the common
142+
* path so that we do not clobber x0-x2 in case we are coming via
143+
* HVC_SOFT_RESTART.
143144
*/
145+
mov x0, xzr
146+
reset:
147+
/* Reset kvm back to the hyp stub. */
144148
mrs x5, sctlr_el2
145149
mov_q x6, SCTLR_ELx_FLAGS
146150
bic x5, x5, x6 // Clear SCTL_M and etc
@@ -151,7 +155,6 @@ reset:
151155
/* Install stub vectors */
152156
adr_l x5, __hyp_stub_vectors
153157
msr vbar_el2, x5
154-
mov x0, xzr
155158
eret
156159

157160
1: /* Bad stub call */

arch/arm64/kvm/pmu.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,10 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
159159
}
160160

161161
/*
162-
* On VHE ensure that only guest events have EL0 counting enabled
162+
* On VHE ensure that only guest events have EL0 counting enabled.
163+
* This is called from both vcpu_{load,put} and the sysreg handling.
164+
* Since the latter is preemptible, special care must be taken to
165+
* disable preemption.
163166
*/
164167
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
165168
{
@@ -169,12 +172,14 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
169172
if (!has_vhe())
170173
return;
171174

175+
preempt_disable();
172176
host = this_cpu_ptr(&kvm_host_data);
173177
events_guest = host->pmu_events.events_guest;
174178
events_host = host->pmu_events.events_host;
175179

176180
kvm_vcpu_pmu_enable_el0(events_guest);
177181
kvm_vcpu_pmu_disable_el0(events_host);
182+
preempt_enable();
178183
}
179184

180185
/*

arch/arm64/kvm/pvtime.c

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
#include <linux/arm-smccc.h>
55
#include <linux/kvm_host.h>
6+
#include <linux/sched/stat.h>
67

78
#include <asm/kvm_mmu.h>
89
#include <asm/pvclock-abi.h>
@@ -73,6 +74,11 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
7374
return base;
7475
}
7576

77+
static bool kvm_arm_pvtime_supported(void)
78+
{
79+
return !!sched_info_on();
80+
}
81+
7682
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
7783
struct kvm_device_attr *attr)
7884
{
@@ -82,7 +88,8 @@ int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
8288
int ret = 0;
8389
int idx;
8490

85-
if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
91+
if (!kvm_arm_pvtime_supported() ||
92+
attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
8693
return -ENXIO;
8794

8895
if (get_user(ipa, user))
@@ -110,7 +117,8 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
110117
u64 __user *user = (u64 __user *)attr->addr;
111118
u64 ipa;
112119

113-
if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
120+
if (!kvm_arm_pvtime_supported() ||
121+
attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
114122
return -ENXIO;
115123

116124
ipa = vcpu->arch.steal.base;
@@ -125,7 +133,8 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
125133
{
126134
switch (attr->attr) {
127135
case KVM_ARM_VCPU_PVTIME_IPA:
128-
return 0;
136+
if (kvm_arm_pvtime_supported())
137+
return 0;
129138
}
130139
return -ENXIO;
131140
}

arch/arm64/kvm/reset.c

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -245,7 +245,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
245245
*/
246246
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
247247
{
248-
int ret = -EINVAL;
248+
int ret;
249249
bool loaded;
250250
u32 pstate;
251251

@@ -269,15 +269,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
269269

270270
if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
271271
test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
272-
if (kvm_vcpu_enable_ptrauth(vcpu))
272+
if (kvm_vcpu_enable_ptrauth(vcpu)) {
273+
ret = -EINVAL;
273274
goto out;
275+
}
274276
}
275277

276278
switch (vcpu->arch.target) {
277279
default:
278280
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
279-
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
281+
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
282+
ret = -EINVAL;
280283
goto out;
284+
}
281285
pstate = VCPU_RESET_PSTATE_SVC;
282286
} else {
283287
pstate = VCPU_RESET_PSTATE_EL1;

arch/arm64/kvm/vgic/vgic-v4.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,15 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
9090
!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
9191
disable_irq_nosync(irq);
9292

93+
/*
94+
* The v4.1 doorbell can fire concurrently with the vPE being
95+
* made non-resident. Ensure we only update pending_last
96+
* *after* the non-residency sequence has completed.
97+
*/
98+
raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
9399
vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
100+
raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
101+
94102
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
95103
kvm_vcpu_kick(vcpu);
96104

arch/s390/include/asm/kvm_host.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,12 +31,12 @@
3131
#define KVM_USER_MEM_SLOTS 32
3232

3333
/*
34-
* These seem to be used for allocating ->chip in the routing table,
35-
* which we don't use. 4096 is an out-of-thin-air value. If we need
36-
* to look at ->chip later on, we'll need to revisit this.
34+
* These seem to be used for allocating ->chip in the routing table, which we
35+
* don't use. 1 is as small as we can get to reduce the needed memory. If we
36+
* need to look at ->chip later on, we'll need to revisit this.
3737
*/
3838
#define KVM_NR_IRQCHIPS 1
39-
#define KVM_IRQCHIP_NUM_PINS 4096
39+
#define KVM_IRQCHIP_NUM_PINS 1
4040
#define KVM_HALT_POLL_NS_DEFAULT 50000
4141

4242
/* s390-specific vcpu->requests bit members */

arch/x86/kvm/kvm_cache_regs.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
88
#define KVM_POSSIBLE_CR4_GUEST_BITS \
99
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
10-
| X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
10+
| X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
1111

1212
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
1313
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\

arch/x86/kvm/mmu/mmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4449,7 +4449,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
44494449
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
44504450
rsvd_bits(maxphyaddr, 51);
44514451
rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
4452-
nonleaf_bit8_rsvd | gbpages_bit_rsvd |
4452+
gbpages_bit_rsvd |
44534453
rsvd_bits(maxphyaddr, 51);
44544454
rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
44554455
rsvd_bits(maxphyaddr, 51);

0 commit comments

Comments
 (0)