Skip to content

Commit 5d438e0

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "A new testcase for guest debugging (gdbstub) that exposed a bunch of bugs, mostly for AMD processors. And a few other x86 fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86: Fix off-by-one error in kvm_vcpu_ioctl_x86_setup_mce KVM: x86: Fix pkru save/restore when guest CR4.PKE=0, move it to x86.c KVM: SVM: Disable AVIC before setting V_IRQ KVM: Introduce kvm_make_all_cpus_request_except() KVM: VMX: pass correct DR6 for GD userspace exit KVM: x86, SVM: isolate vcpu->arch.dr6 from vmcb->save.dr6 KVM: SVM: keep DR6 synchronized with vcpu->arch.dr6 KVM: nSVM: trap #DB and #BP to userspace if guest debugging is on KVM: selftests: Add KVM_SET_GUEST_DEBUG test KVM: X86: Fix single-step with KVM_SET_GUEST_DEBUG KVM: X86: Set RTM for DB_VECTOR too for KVM_EXIT_DEBUG KVM: x86: fix DR6 delivery for various cases of #DB injection KVM: X86: Declare KVM_CAP_SET_GUEST_DEBUG properly
2 parents befc42e + c4e0e4a commit 5d438e0

File tree

12 files changed

+325
-88
lines changed

12 files changed

+325
-88
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -578,6 +578,7 @@ struct kvm_vcpu_arch {
578578
unsigned long cr4;
579579
unsigned long cr4_guest_owned_bits;
580580
unsigned long cr8;
581+
u32 host_pkru;
581582
u32 pkru;
582583
u32 hflags;
583584
u64 efer;
@@ -1093,8 +1094,6 @@ struct kvm_x86_ops {
10931094
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
10941095
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
10951096
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1096-
u64 (*get_dr6)(struct kvm_vcpu *vcpu);
1097-
void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
10981097
void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
10991098
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
11001099
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
@@ -1449,6 +1448,7 @@ bool kvm_rdpmc(struct kvm_vcpu *vcpu);
14491448

14501449
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
14511450
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1451+
void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload);
14521452
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
14531453
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
14541454
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);

arch/x86/kvm/hyperv.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1427,7 +1427,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
14271427
*/
14281428
kvm_make_vcpus_request_mask(kvm,
14291429
KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
1430-
vcpu_mask, &hv_vcpu->tlb_flush);
1430+
NULL, vcpu_mask, &hv_vcpu->tlb_flush);
14311431

14321432
ret_success:
14331433
/* We always do full TLB flush, set rep_done = rep_cnt. */

arch/x86/kvm/svm/nested.c

Lines changed: 31 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include <linux/kernel.h>
2020

2121
#include <asm/msr-index.h>
22+
#include <asm/debugreg.h>
2223

2324
#include "kvm_emulate.h"
2425
#include "trace.h"
@@ -267,7 +268,7 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
267268
svm->vmcb->save.rsp = nested_vmcb->save.rsp;
268269
svm->vmcb->save.rip = nested_vmcb->save.rip;
269270
svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
270-
svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
271+
svm->vcpu.arch.dr6 = nested_vmcb->save.dr6;
271272
svm->vmcb->save.cpl = nested_vmcb->save.cpl;
272273

273274
svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
@@ -482,7 +483,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
482483
nested_vmcb->save.rsp = vmcb->save.rsp;
483484
nested_vmcb->save.rax = vmcb->save.rax;
484485
nested_vmcb->save.dr7 = vmcb->save.dr7;
485-
nested_vmcb->save.dr6 = vmcb->save.dr6;
486+
nested_vmcb->save.dr6 = svm->vcpu.arch.dr6;
486487
nested_vmcb->save.cpl = vmcb->save.cpl;
487488

488489
nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
@@ -606,26 +607,45 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
606607
/* DB exceptions for our internal use must not cause vmexit */
607608
static int nested_svm_intercept_db(struct vcpu_svm *svm)
608609
{
609-
unsigned long dr6;
610+
unsigned long dr6 = svm->vmcb->save.dr6;
611+
612+
/* Always catch it and pass it to userspace if debugging. */
613+
if (svm->vcpu.guest_debug &
614+
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
615+
return NESTED_EXIT_HOST;
610616

611617
/* if we're not singlestepping, it's not ours */
612618
if (!svm->nmi_singlestep)
613-
return NESTED_EXIT_DONE;
619+
goto reflected_db;
614620

615621
/* if it's not a singlestep exception, it's not ours */
616-
if (kvm_get_dr(&svm->vcpu, 6, &dr6))
617-
return NESTED_EXIT_DONE;
618622
if (!(dr6 & DR6_BS))
619-
return NESTED_EXIT_DONE;
623+
goto reflected_db;
620624

621625
/* if the guest is singlestepping, it should get the vmexit */
622626
if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
623627
disable_nmi_singlestep(svm);
624-
return NESTED_EXIT_DONE;
628+
goto reflected_db;
625629
}
626630

627631
/* it's ours, the nested hypervisor must not see this one */
628632
return NESTED_EXIT_HOST;
633+
634+
reflected_db:
635+
/*
636+
* Synchronize guest DR6 here just like in kvm_deliver_exception_payload;
637+
* it will be moved into the nested VMCB by nested_svm_vmexit. Once
638+
* exceptions will be moved to svm_check_nested_events, all this stuff
639+
* will just go away and we could just return NESTED_EXIT_HOST
640+
* unconditionally. db_interception will queue the exception, which
641+
* will be processed by svm_check_nested_events if a nested vmexit is
642+
* required, and we will just use kvm_deliver_exception_payload to copy
643+
* the payload to DR6 before vmexit.
644+
*/
645+
WARN_ON(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT);
646+
svm->vcpu.arch.dr6 &= ~(DR_TRAP_BITS | DR6_RTM);
647+
svm->vcpu.arch.dr6 |= dr6 & ~DR6_FIXED_1;
648+
return NESTED_EXIT_DONE;
629649
}
630650

631651
static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
@@ -682,6 +702,9 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
682702
if (svm->nested.intercept_exceptions & excp_bits) {
683703
if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
684704
vmexit = nested_svm_intercept_db(svm);
705+
else if (exit_code == SVM_EXIT_EXCP_BASE + BP_VECTOR &&
706+
svm->vcpu.guest_debug & KVM_GUESTDBG_USE_SW_BP)
707+
vmexit = NESTED_EXIT_HOST;
685708
else
686709
vmexit = NESTED_EXIT_DONE;
687710
}

arch/x86/kvm/svm/svm.c

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1672,17 +1672,14 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
16721672
mark_dirty(svm->vmcb, VMCB_ASID);
16731673
}
16741674

1675-
static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
1675+
static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
16761676
{
1677-
return to_svm(vcpu)->vmcb->save.dr6;
1678-
}
1679-
1680-
static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
1681-
{
1682-
struct vcpu_svm *svm = to_svm(vcpu);
1677+
struct vmcb *vmcb = svm->vmcb;
16831678

1684-
svm->vmcb->save.dr6 = value;
1685-
mark_dirty(svm->vmcb, VMCB_DR);
1679+
if (unlikely(value != vmcb->save.dr6)) {
1680+
vmcb->save.dr6 = value;
1681+
mark_dirty(vmcb, VMCB_DR);
1682+
}
16861683
}
16871684

16881685
static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
@@ -1693,9 +1690,12 @@ static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
16931690
get_debugreg(vcpu->arch.db[1], 1);
16941691
get_debugreg(vcpu->arch.db[2], 2);
16951692
get_debugreg(vcpu->arch.db[3], 3);
1696-
vcpu->arch.dr6 = svm_get_dr6(vcpu);
1693+
/*
1694+
* We cannot reset svm->vmcb->save.dr6 to DR6_FIXED_1|DR6_RTM here,
1695+
* because db_interception might need it. We can do it before vmentry.
1696+
*/
1697+
vcpu->arch.dr6 = svm->vmcb->save.dr6;
16971698
vcpu->arch.dr7 = svm->vmcb->save.dr7;
1698-
16991699
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
17001700
set_dr_intercepts(svm);
17011701
}
@@ -1739,7 +1739,8 @@ static int db_interception(struct vcpu_svm *svm)
17391739
if (!(svm->vcpu.guest_debug &
17401740
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
17411741
!svm->nmi_singlestep) {
1742-
kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1742+
u32 payload = (svm->vmcb->save.dr6 ^ DR6_RTM) & ~DR6_FIXED_1;
1743+
kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload);
17431744
return 1;
17441745
}
17451746

@@ -3317,6 +3318,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
33173318

33183319
svm->vmcb->save.cr2 = vcpu->arch.cr2;
33193320

3321+
/*
3322+
* Run with all-zero DR6 unless needed, so that we can get the exact cause
3323+
* of a #DB.
3324+
*/
3325+
if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
3326+
svm_set_dr6(svm, vcpu->arch.dr6);
3327+
else
3328+
svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM);
3329+
33203330
clgi();
33213331
kvm_load_guest_xsave_state(vcpu);
33223332

@@ -3931,8 +3941,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
39313941
.set_idt = svm_set_idt,
39323942
.get_gdt = svm_get_gdt,
39333943
.set_gdt = svm_set_gdt,
3934-
.get_dr6 = svm_get_dr6,
3935-
.set_dr6 = svm_set_dr6,
39363944
.set_dr7 = svm_set_dr7,
39373945
.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
39383946
.cache_reg = svm_cache_reg,

arch/x86/kvm/vmx/vmx.c

Lines changed: 4 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -1372,7 +1372,6 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
13721372

13731373
vmx_vcpu_pi_load(vcpu, cpu);
13741374

1375-
vmx->host_pkru = read_pkru();
13761375
vmx->host_debugctlmsr = get_debugctlmsr();
13771376
}
13781377

@@ -4677,15 +4676,13 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
46774676
dr6 = vmcs_readl(EXIT_QUALIFICATION);
46784677
if (!(vcpu->guest_debug &
46794678
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
4680-
vcpu->arch.dr6 &= ~DR_TRAP_BITS;
4681-
vcpu->arch.dr6 |= dr6 | DR6_RTM;
46824679
if (is_icebp(intr_info))
46834680
WARN_ON(!skip_emulated_instruction(vcpu));
46844681

4685-
kvm_queue_exception(vcpu, DB_VECTOR);
4682+
kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
46864683
return 1;
46874684
}
4688-
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
4685+
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
46894686
kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
46904687
/* fall through */
46914688
case BP_VECTOR:
@@ -4929,16 +4926,14 @@ static int handle_dr(struct kvm_vcpu *vcpu)
49294926
* guest debugging itself.
49304927
*/
49314928
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
4932-
vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
4929+
vcpu->run->debug.arch.dr6 = DR6_BD | DR6_RTM | DR6_FIXED_1;
49334930
vcpu->run->debug.arch.dr7 = dr7;
49344931
vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
49354932
vcpu->run->debug.arch.exception = DB_VECTOR;
49364933
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
49374934
return 0;
49384935
} else {
4939-
vcpu->arch.dr6 &= ~DR_TRAP_BITS;
4940-
vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
4941-
kvm_queue_exception(vcpu, DB_VECTOR);
4936+
kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BD);
49424937
return 1;
49434938
}
49444939
}
@@ -4969,15 +4964,6 @@ static int handle_dr(struct kvm_vcpu *vcpu)
49694964
return kvm_skip_emulated_instruction(vcpu);
49704965
}
49714966

4972-
static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
4973-
{
4974-
return vcpu->arch.dr6;
4975-
}
4976-
4977-
static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
4978-
{
4979-
}
4980-
49814967
static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
49824968
{
49834969
get_debugreg(vcpu->arch.db[0], 0);
@@ -6577,11 +6563,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
65776563

65786564
kvm_load_guest_xsave_state(vcpu);
65796565

6580-
if (static_cpu_has(X86_FEATURE_PKU) &&
6581-
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
6582-
vcpu->arch.pkru != vmx->host_pkru)
6583-
__write_pkru(vcpu->arch.pkru);
6584-
65856566
pt_guest_enter(vmx);
65866567

65876568
if (vcpu_to_pmu(vcpu)->version)
@@ -6671,18 +6652,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
66716652

66726653
pt_guest_exit(vmx);
66736654

6674-
/*
6675-
* eager fpu is enabled if PKEY is supported and CR4 is switched
6676-
* back on host, so it is safe to read guest PKRU from current
6677-
* XSAVE.
6678-
*/
6679-
if (static_cpu_has(X86_FEATURE_PKU) &&
6680-
kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
6681-
vcpu->arch.pkru = rdpkru();
6682-
if (vcpu->arch.pkru != vmx->host_pkru)
6683-
__write_pkru(vmx->host_pkru);
6684-
}
6685-
66866655
kvm_load_host_xsave_state(vcpu);
66876656

66886657
vmx->nested.nested_run_pending = 0;
@@ -7740,8 +7709,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
77407709
.set_idt = vmx_set_idt,
77417710
.get_gdt = vmx_get_gdt,
77427711
.set_gdt = vmx_set_gdt,
7743-
.get_dr6 = vmx_get_dr6,
7744-
.set_dr6 = vmx_set_dr6,
77457712
.set_dr7 = vmx_set_dr7,
77467713
.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
77477714
.cache_reg = vmx_cache_reg,

0 commit comments

Comments
 (0)