Skip to content

Commit 921d259

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "s390: - implement diag318 x86: - Report last CPU for debugging - Emulate smaller MAXPHYADDR in the guest than in the host - .noinstr and tracing fixes from Thomas - nested SVM page table switching optimization and fixes Generic: - Unify shadow MMU cache data structures across architectures" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (127 commits) KVM: SVM: Fix sev_pin_memory() error handling KVM: LAPIC: Set the TDCR settable bits KVM: x86: Specify max TDP level via kvm_configure_mmu() KVM: x86/mmu: Rename max_page_level to max_huge_page_level KVM: x86: Dynamically calculate TDP level from max level and MAXPHYADDR KVM: VXM: Remove temporary WARN on expected vs. actual EPTP level mismatch KVM: x86: Pull the PGD's level from the MMU instead of recalculating it KVM: VMX: Make vmx_load_mmu_pgd() static KVM: x86/mmu: Add separate helper for shadow NPT root page role calc KVM: VMX: Drop a duplicate declaration of construct_eptp() KVM: nSVM: Correctly set the shadow NPT root level in its MMU role KVM: Using macros instead of magic values MIPS: KVM: Fix build error caused by 'kvm_run' cleanup KVM: nSVM: remove nonsensical EXITINFO1 adjustment on nested NPF KVM: x86: Add a capability for GUEST_MAXPHYADDR < HOST_MAXPHYADDR support KVM: VMX: optimize #PF injection when MAXPHYADDR does not match KVM: VMX: Add guest physical address check in EPT violation and misconfig KVM: VMX: introduce vmx_need_pf_intercept KVM: x86: update exception bitmap on CPUID changes KVM: x86: rename update_bp_intercept to update_exception_bitmap ...
2 parents 7b4ea94 + f3633c2 commit 921d259

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

71 files changed

+1633
-1212
lines changed

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5804,8 +5804,9 @@
58045804
panic() code such as dumping handler.
58055805

58065806
xen_nopvspin [X86,XEN]
5807-
Disables the ticketlock slowpath using Xen PV
5808-
optimizations.
5807+
Disables the qspinlock slowpath using Xen PV optimizations.
5808+
This parameter is obsoleted by "nopvspin" parameter, which
5809+
has equivalent effect for XEN platform.
58095810

58105811
xen_nopv [X86]
58115812
Disables the PV optimizations forcing the HVM guest to
@@ -5831,6 +5832,11 @@
58315832
as generic guest with no PV drivers. Currently support
58325833
XEN HVM, KVM, HYPER_V and VMWARE guest.
58335834

5835+
nopvspin [X86,XEN,KVM]
5836+
Disables the qspinlock slow path using PV optimizations
5837+
which allow the hypervisor to 'idle' the guest on lock
5838+
contention.
5839+
58345840
xirc2ps_cs= [NET,PCMCIA]
58355841
Format:
58365842
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]

Documentation/virt/kvm/api.rst

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -669,6 +669,10 @@ MSRs that have been set successfully.
669669
Defines the vcpu responses to the cpuid instruction. Applications
670670
should use the KVM_SET_CPUID2 ioctl if available.
671671

672+
Note, when this IOCTL fails, KVM gives no guarantees that previous valid CPUID
673+
configuration (if there is) is not corrupted. Userspace can get a copy of the
674+
resulting CPUID configuration through KVM_GET_CPUID2 in case.
675+
672676
::
673677

674678
struct kvm_cpuid_entry {
@@ -4795,6 +4799,7 @@ hardware_exit_reason.
47954799
/* KVM_EXIT_FAIL_ENTRY */
47964800
struct {
47974801
__u64 hardware_entry_failure_reason;
4802+
__u32 cpu; /* if KVM_LAST_CPU */
47984803
} fail_entry;
47994804

48004805
If exit_reason is KVM_EXIT_FAIL_ENTRY, the vcpu could not be run due

arch/arm64/include/asm/kvm_coproc.h

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -27,12 +27,12 @@ struct kvm_sys_reg_target_table {
2727
void kvm_register_target_sys_reg_table(unsigned int target,
2828
struct kvm_sys_reg_target_table *table);
2929

30-
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
31-
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
32-
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
33-
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
34-
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
35-
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
30+
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
31+
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
32+
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
33+
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
34+
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
35+
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
3636

3737
#define kvm_coproc_table_init kvm_sys_reg_table_init
3838
void kvm_sys_reg_table_init(void);

arch/arm64/include/asm/kvm_host.h

Lines changed: 4 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -97,17 +97,6 @@ struct kvm_arch {
9797
bool return_nisv_io_abort_to_user;
9898
};
9999

100-
#define KVM_NR_MEM_OBJS 40
101-
102-
/*
103-
* We don't want allocation failures within the mmu code, so we preallocate
104-
* enough memory for a single page fault in a cache.
105-
*/
106-
struct kvm_mmu_memory_cache {
107-
int nobjs;
108-
void *objects[KVM_NR_MEM_OBJS];
109-
};
110-
111100
struct kvm_vcpu_fault_info {
112101
u32 esr_el2; /* Hyp Syndrom Register */
113102
u64 far_el2; /* Hyp Fault Address Register */
@@ -486,18 +475,15 @@ u64 __kvm_call_hyp(void *hypfn, ...);
486475
void force_vm_exit(const cpumask_t *mask);
487476
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
488477

489-
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
490-
int exception_index);
491-
void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
492-
int exception_index);
478+
int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
479+
void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
493480

494481
/* MMIO helpers */
495482
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
496483
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
497484

498-
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
499-
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
500-
phys_addr_t fault_ipa);
485+
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
486+
int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
501487

502488
int kvm_perf_init(void);
503489
int kvm_perf_teardown(void);

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm);
139139
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
140140
phys_addr_t pa, unsigned long size, bool writable);
141141

142-
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
142+
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
143143

144144
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
145145

arch/arm64/include/asm/kvm_types.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef _ASM_ARM64_KVM_TYPES_H
3+
#define _ASM_ARM64_KVM_TYPES_H
4+
5+
#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
6+
7+
#endif /* _ASM_ARM64_KVM_TYPES_H */
8+

arch/arm64/kvm/arm.c

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -270,6 +270,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
270270
vcpu->arch.target = -1;
271271
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
272272

273+
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
274+
273275
/* Set up the timer */
274276
kvm_timer_vcpu_init(vcpu);
275277

@@ -658,7 +660,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
658660
return ret;
659661

660662
if (run->exit_reason == KVM_EXIT_MMIO) {
661-
ret = kvm_handle_mmio_return(vcpu, run);
663+
ret = kvm_handle_mmio_return(vcpu);
662664
if (ret)
663665
return ret;
664666
}
@@ -810,11 +812,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
810812
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
811813

812814
/* Exit types that need handling before we can be preempted */
813-
handle_exit_early(vcpu, run, ret);
815+
handle_exit_early(vcpu, ret);
814816

815817
preempt_enable();
816818

817-
ret = handle_exit(vcpu, run, ret);
819+
ret = handle_exit(vcpu, ret);
818820
}
819821

820822
/* Tell userspace about in-kernel device output levels */

arch/arm64/kvm/handle_exit.c

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -25,15 +25,15 @@
2525
#define CREATE_TRACE_POINTS
2626
#include "trace_handle_exit.h"
2727

28-
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
28+
typedef int (*exit_handle_fn)(struct kvm_vcpu *);
2929

3030
static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
3131
{
3232
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
3333
kvm_inject_vabt(vcpu);
3434
}
3535

36-
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
36+
static int handle_hvc(struct kvm_vcpu *vcpu)
3737
{
3838
int ret;
3939

@@ -50,7 +50,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
5050
return ret;
5151
}
5252

53-
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
53+
static int handle_smc(struct kvm_vcpu *vcpu)
5454
{
5555
/*
5656
* "If an SMC instruction executed at Non-secure EL1 is
@@ -69,7 +69,7 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
6969
* Guest access to FP/ASIMD registers are routed to this handler only
7070
* when the system doesn't support FP/ASIMD.
7171
*/
72-
static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
72+
static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
7373
{
7474
kvm_inject_undefined(vcpu);
7575
return 1;
@@ -87,7 +87,7 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
8787
* world-switches and schedule other host processes until there is an
8888
* incoming IRQ or FIQ to the VM.
8989
*/
90-
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
90+
static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
9191
{
9292
if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
9393
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
@@ -109,16 +109,16 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
109109
* kvm_handle_guest_debug - handle a debug exception instruction
110110
*
111111
* @vcpu: the vcpu pointer
112-
* @run: access to the kvm_run structure for results
113112
*
114113
* We route all debug exceptions through the same handler. If both the
115114
* guest and host are using the same debug facilities it will be up to
116115
* userspace to re-inject the correct exception for guest delivery.
117116
*
118-
* @return: 0 (while setting run->exit_reason), -1 for error
117+
* @return: 0 (while setting vcpu->run->exit_reason), -1 for error
119118
*/
120-
static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
119+
static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
121120
{
121+
struct kvm_run *run = vcpu->run;
122122
u32 hsr = kvm_vcpu_get_hsr(vcpu);
123123
int ret = 0;
124124

@@ -144,7 +144,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
144144
return ret;
145145
}
146146

147-
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
147+
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
148148
{
149149
u32 hsr = kvm_vcpu_get_hsr(vcpu);
150150

@@ -155,7 +155,7 @@ static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
155155
return 1;
156156
}
157157

158-
static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
158+
static int handle_sve(struct kvm_vcpu *vcpu)
159159
{
160160
/* Until SVE is supported for guests: */
161161
kvm_inject_undefined(vcpu);
@@ -167,7 +167,7 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
167167
* a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
168168
* that we can do is give the guest an UNDEF.
169169
*/
170-
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
170+
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
171171
{
172172
kvm_inject_undefined(vcpu);
173173
return 1;
@@ -212,7 +212,7 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
212212
* KVM_EXIT_DEBUG, otherwise userspace needs to complete its
213213
* emulation first.
214214
*/
215-
static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
215+
static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
216216
{
217217
int handled;
218218

@@ -227,7 +227,7 @@ static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
227227
exit_handle_fn exit_handler;
228228

229229
exit_handler = kvm_get_exit_handler(vcpu);
230-
handled = exit_handler(vcpu, run);
230+
handled = exit_handler(vcpu);
231231
}
232232

233233
return handled;
@@ -237,9 +237,10 @@ static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
237237
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
238238
* proper exit to userspace.
239239
*/
240-
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
241-
int exception_index)
240+
int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
242241
{
242+
struct kvm_run *run = vcpu->run;
243+
243244
if (ARM_SERROR_PENDING(exception_index)) {
244245
u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
245246

@@ -265,7 +266,7 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
265266
case ARM_EXCEPTION_EL1_SERROR:
266267
return 1;
267268
case ARM_EXCEPTION_TRAP:
268-
return handle_trap_exceptions(vcpu, run);
269+
return handle_trap_exceptions(vcpu);
269270
case ARM_EXCEPTION_HYP_GONE:
270271
/*
271272
* EL2 has been reset to the hyp-stub. This happens when a guest
@@ -289,8 +290,7 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
289290
}
290291

291292
/* For exit types that need handling before we can be preempted */
292-
void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
293-
int exception_index)
293+
void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
294294
{
295295
if (ARM_SERROR_PENDING(exception_index)) {
296296
if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {

arch/arm64/kvm/mmio.c

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -77,9 +77,8 @@ unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
7777
* or in-kernel IO emulation
7878
*
7979
* @vcpu: The VCPU pointer
80-
* @run: The VCPU run struct containing the mmio data
8180
*/
82-
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
81+
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
8382
{
8483
unsigned long data;
8584
unsigned int len;
@@ -92,6 +91,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
9291
vcpu->mmio_needed = 0;
9392

9493
if (!kvm_vcpu_dabt_iswrite(vcpu)) {
94+
struct kvm_run *run = vcpu->run;
95+
9596
len = kvm_vcpu_dabt_get_as(vcpu);
9697
data = kvm_mmio_read_buf(run->mmio.data, len);
9798

@@ -119,9 +120,9 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
119120
return 0;
120121
}
121122

122-
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
123-
phys_addr_t fault_ipa)
123+
int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
124124
{
125+
struct kvm_run *run = vcpu->run;
125126
unsigned long data;
126127
unsigned long rt;
127128
int ret;
@@ -188,7 +189,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
188189
if (!is_write)
189190
memcpy(run->mmio.data, data_buf, len);
190191
vcpu->stat.mmio_exit_kernel++;
191-
kvm_handle_mmio_return(vcpu, run);
192+
kvm_handle_mmio_return(vcpu);
192193
return 1;
193194
}
194195

0 commit comments

Comments
 (0)