Skip to content

Commit 5679b80

Browse files
committed
KVM: SVM: keep DR6 synchronized with vcpu->arch.dr6
kvm_x86_ops.set_dr6 is only ever called with vcpu->arch.dr6 as the second argument. Ensure that the VMCB value is synchronized to vcpu->arch.dr6 on #DB (both "normal" and nested) and nested vmentry, so that the current value of DR6 is always available in vcpu->arch.dr6. The get_dr6 callback can just access vcpu->arch.dr6 and becomes redundant. Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 2c19dba commit 5679b80

File tree

5 files changed

+21
-28
lines changed

5 files changed

+21
-28
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1093,7 +1093,6 @@ struct kvm_x86_ops {
10931093
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
10941094
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
10951095
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1096-
u64 (*get_dr6)(struct kvm_vcpu *vcpu);
10971096
void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
10981097
void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
10991098
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
@@ -1624,6 +1623,7 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
16241623

16251624
void kvm_define_shared_msr(unsigned index, u32 msr);
16261625
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
1626+
void kvm_update_dr6(struct kvm_vcpu *vcpu);
16271627

16281628
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
16291629
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);

arch/x86/kvm/svm/nested.c

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include <linux/kernel.h>
2020

2121
#include <asm/msr-index.h>
22+
#include <asm/debugreg.h>
2223

2324
#include "kvm_emulate.h"
2425
#include "trace.h"
@@ -267,7 +268,8 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
267268
svm->vmcb->save.rsp = nested_vmcb->save.rsp;
268269
svm->vmcb->save.rip = nested_vmcb->save.rip;
269270
svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
270-
svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
271+
svm->vcpu.arch.dr6 = nested_vmcb->save.dr6;
272+
kvm_update_dr6(&svm->vcpu);
271273
svm->vmcb->save.cpl = nested_vmcb->save.cpl;
272274

273275
svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
@@ -482,7 +484,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
482484
nested_vmcb->save.rsp = vmcb->save.rsp;
483485
nested_vmcb->save.rax = vmcb->save.rax;
484486
nested_vmcb->save.dr7 = vmcb->save.dr7;
485-
nested_vmcb->save.dr6 = vmcb->save.dr6;
487+
nested_vmcb->save.dr6 = svm->vcpu.arch.dr6;
486488
nested_vmcb->save.cpl = vmcb->save.cpl;
487489

488490
nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
@@ -606,7 +608,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
606608
/* DB exceptions for our internal use must not cause vmexit */
607609
static int nested_svm_intercept_db(struct vcpu_svm *svm)
608610
{
609-
unsigned long dr6;
611+
unsigned long dr6 = svm->vmcb->save.dr6;
610612

611613
/* Always catch it and pass it to userspace if debugging. */
612614
if (svm->vcpu.guest_debug &
@@ -615,22 +617,28 @@ static int nested_svm_intercept_db(struct vcpu_svm *svm)
615617

616618
/* if we're not singlestepping, it's not ours */
617619
if (!svm->nmi_singlestep)
618-
return NESTED_EXIT_DONE;
620+
goto reflected_db;
619621

620622
/* if it's not a singlestep exception, it's not ours */
621-
if (kvm_get_dr(&svm->vcpu, 6, &dr6))
622-
return NESTED_EXIT_DONE;
623623
if (!(dr6 & DR6_BS))
624-
return NESTED_EXIT_DONE;
624+
goto reflected_db;
625625

626626
/* if the guest is singlestepping, it should get the vmexit */
627627
if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
628628
disable_nmi_singlestep(svm);
629-
return NESTED_EXIT_DONE;
629+
goto reflected_db;
630630
}
631631

632632
/* it's ours, the nested hypervisor must not see this one */
633633
return NESTED_EXIT_HOST;
634+
635+
reflected_db:
636+
/*
637+
* Synchronize guest DR6 here just like in db_interception; it will
638+
* be moved into the nested VMCB by nested_svm_vmexit.
639+
*/
640+
svm->vcpu.arch.dr6 = dr6;
641+
return NESTED_EXIT_DONE;
634642
}
635643

636644
static int nested_svm_intercept_ioio(struct vcpu_svm *svm)

arch/x86/kvm/svm/svm.c

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1672,11 +1672,6 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
16721672
mark_dirty(svm->vmcb, VMCB_ASID);
16731673
}
16741674

1675-
static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
1676-
{
1677-
return to_svm(vcpu)->vmcb->save.dr6;
1678-
}
1679-
16801675
static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
16811676
{
16821677
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1693,7 +1688,7 @@ static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
16931688
get_debugreg(vcpu->arch.db[1], 1);
16941689
get_debugreg(vcpu->arch.db[2], 2);
16951690
get_debugreg(vcpu->arch.db[3], 3);
1696-
vcpu->arch.dr6 = svm_get_dr6(vcpu);
1691+
vcpu->arch.dr6 = svm->vmcb->save.dr6;
16971692
vcpu->arch.dr7 = svm->vmcb->save.dr7;
16981693

16991694
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
@@ -1739,6 +1734,7 @@ static int db_interception(struct vcpu_svm *svm)
17391734
if (!(svm->vcpu.guest_debug &
17401735
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
17411736
!svm->nmi_singlestep) {
1737+
vcpu->arch.dr6 = svm->vmcb->save.dr6;
17421738
kvm_queue_exception(&svm->vcpu, DB_VECTOR);
17431739
return 1;
17441740
}
@@ -3931,7 +3927,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
39313927
.set_idt = svm_set_idt,
39323928
.get_gdt = svm_get_gdt,
39333929
.set_gdt = svm_set_gdt,
3934-
.get_dr6 = svm_get_dr6,
39353930
.set_dr6 = svm_set_dr6,
39363931
.set_dr7 = svm_set_dr7,
39373932
.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,

arch/x86/kvm/vmx/vmx.c

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4965,11 +4965,6 @@ static int handle_dr(struct kvm_vcpu *vcpu)
49654965
return kvm_skip_emulated_instruction(vcpu);
49664966
}
49674967

4968-
static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
4969-
{
4970-
return vcpu->arch.dr6;
4971-
}
4972-
49734968
static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
49744969
{
49754970
}
@@ -7736,7 +7731,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
77367731
.set_idt = vmx_set_idt,
77377732
.get_gdt = vmx_get_gdt,
77387733
.set_gdt = vmx_set_gdt,
7739-
.get_dr6 = vmx_get_dr6,
77407734
.set_dr6 = vmx_set_dr6,
77417735
.set_dr7 = vmx_set_dr7,
77427736
.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,

arch/x86/kvm/x86.c

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,6 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
104104
KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
105105

106106
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
107-
static void kvm_update_dr6(struct kvm_vcpu *vcpu);
108107
static void process_nmi(struct kvm_vcpu *vcpu);
109108
static void enter_smm(struct kvm_vcpu *vcpu);
110109
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
@@ -1048,7 +1047,7 @@ static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
10481047
}
10491048
}
10501049

1051-
static void kvm_update_dr6(struct kvm_vcpu *vcpu)
1050+
void kvm_update_dr6(struct kvm_vcpu *vcpu)
10521051
{
10531052
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
10541053
kvm_x86_ops.set_dr6(vcpu, vcpu->arch.dr6);
@@ -1129,10 +1128,7 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
11291128
case 4:
11301129
/* fall through */
11311130
case 6:
1132-
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1133-
*val = vcpu->arch.dr6;
1134-
else
1135-
*val = kvm_x86_ops.get_dr6(vcpu);
1131+
*val = vcpu->arch.dr6;
11361132
break;
11371133
case 5:
11381134
/* fall through */

0 commit comments

Comments
 (0)