Skip to content

Commit d67668e

Browse files
committed
KVM: x86, SVM: isolate vcpu->arch.dr6 from vmcb->save.dr6
There are two issues with KVM_EXIT_DEBUG on AMD, whose root cause is the different handling of DR6 on intercepted #DB exceptions on Intel and AMD. On Intel, #DB exceptions transmit the DR6 value via the exit qualification field of the VMCS, and the exit qualification only contains the description of the precise event that caused a vmexit. On AMD, instead the DR6 field of the VMCB is filled in as if the #DB exception was to be injected into the guest. This has two effects when guest debugging is in use: * the guest DR6 is clobbered * the kvm_run->debug.arch.dr6 field can accumulate more debug events, rather than just the last one that happened (the testcase in the next patch covers this issue). This patch fixes both issues by emulating, so to speak, the Intel behavior on AMD processors. The important observation is that (after the previous patches) the VMCB value of DR6 is only ever observable from the guest is KVM_DEBUGREG_WONT_EXIT is set. Therefore we can actually set vmcb->save.dr6 to any value we want as long as KVM_DEBUGREG_WONT_EXIT is clear, which it will be if guest debugging is enabled. Therefore it is possible to enter the guest with an all-zero DR6, reconstruct the #DB payload from the DR6 we get at exit time, and let kvm_deliver_exception_payload move the newly set bits into vcpu->arch.dr6. Some extra bits may be included in the payload if KVM_DEBUGREG_WONT_EXIT is set, but this is harmless. This may not be the most optimized way to deal with this, but it is simple and, being confined within SVM code, it gets rid of the set_dr6 callback and kvm_update_dr6. Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 5679b80 commit d67668e

File tree

5 files changed

+32
-30
lines changed

5 files changed

+32
-30
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1093,7 +1093,6 @@ struct kvm_x86_ops {
10931093
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
10941094
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
10951095
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1096-
void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
10971096
void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
10981097
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
10991098
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
@@ -1623,7 +1622,6 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
16231622

16241623
void kvm_define_shared_msr(unsigned index, u32 msr);
16251624
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
1626-
void kvm_update_dr6(struct kvm_vcpu *vcpu);
16271625

16281626
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
16291627
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);

arch/x86/kvm/svm/nested.c

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,6 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
269269
svm->vmcb->save.rip = nested_vmcb->save.rip;
270270
svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
271271
svm->vcpu.arch.dr6 = nested_vmcb->save.dr6;
272-
kvm_update_dr6(&svm->vcpu);
273272
svm->vmcb->save.cpl = nested_vmcb->save.cpl;
274273

275274
svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
@@ -634,10 +633,18 @@ static int nested_svm_intercept_db(struct vcpu_svm *svm)
634633

635634
reflected_db:
636635
/*
637-
* Synchronize guest DR6 here just like in db_interception; it will
638-
* be moved into the nested VMCB by nested_svm_vmexit.
636+
* Synchronize guest DR6 here just like in kvm_deliver_exception_payload;
637+
* it will be moved into the nested VMCB by nested_svm_vmexit. Once
638+
* exceptions will be moved to svm_check_nested_events, all this stuff
639+
* will just go away and we could just return NESTED_EXIT_HOST
640+
* unconditionally. db_interception will queue the exception, which
641+
* will be processed by svm_check_nested_events if a nested vmexit is
642+
* required, and we will just use kvm_deliver_exception_payload to copy
643+
* the payload to DR6 before vmexit.
639644
*/
640-
svm->vcpu.arch.dr6 = dr6;
645+
WARN_ON(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT);
646+
svm->vcpu.arch.dr6 &= ~(DR_TRAP_BITS | DR6_RTM);
647+
svm->vcpu.arch.dr6 |= dr6 & ~DR6_FIXED_1;
641648
return NESTED_EXIT_DONE;
642649
}
643650

arch/x86/kvm/svm/svm.c

Lines changed: 21 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1672,12 +1672,14 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
16721672
mark_dirty(svm->vmcb, VMCB_ASID);
16731673
}
16741674

1675-
static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
1675+
static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
16761676
{
1677-
struct vcpu_svm *svm = to_svm(vcpu);
1677+
struct vmcb *vmcb = svm->vmcb;
16781678

1679-
svm->vmcb->save.dr6 = value;
1680-
mark_dirty(svm->vmcb, VMCB_DR);
1679+
if (unlikely(value != vmcb->save.dr6)) {
1680+
vmcb->save.dr6 = value;
1681+
mark_dirty(vmcb, VMCB_DR);
1682+
}
16811683
}
16821684

16831685
static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
@@ -1688,9 +1690,12 @@ static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
16881690
get_debugreg(vcpu->arch.db[1], 1);
16891691
get_debugreg(vcpu->arch.db[2], 2);
16901692
get_debugreg(vcpu->arch.db[3], 3);
1693+
/*
1694+
* We cannot reset svm->vmcb->save.dr6 to DR6_FIXED_1|DR6_RTM here,
1695+
* because db_interception might need it. We can do it before vmentry.
1696+
*/
16911697
vcpu->arch.dr6 = svm->vmcb->save.dr6;
16921698
vcpu->arch.dr7 = svm->vmcb->save.dr7;
1693-
16941699
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
16951700
set_dr_intercepts(svm);
16961701
}
@@ -1734,8 +1739,8 @@ static int db_interception(struct vcpu_svm *svm)
17341739
if (!(svm->vcpu.guest_debug &
17351740
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
17361741
!svm->nmi_singlestep) {
1737-
vcpu->arch.dr6 = svm->vmcb->save.dr6;
1738-
kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1742+
u32 payload = (svm->vmcb->save.dr6 ^ DR6_RTM) & ~DR6_FIXED_1;
1743+
kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload);
17391744
return 1;
17401745
}
17411746

@@ -3313,6 +3318,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
33133318

33143319
svm->vmcb->save.cr2 = vcpu->arch.cr2;
33153320

3321+
/*
3322+
* Run with all-zero DR6 unless needed, so that we can get the exact cause
3323+
* of a #DB.
3324+
*/
3325+
if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
3326+
svm_set_dr6(svm, vcpu->arch.dr6);
3327+
else
3328+
svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM);
3329+
33163330
clgi();
33173331
kvm_load_guest_xsave_state(vcpu);
33183332

@@ -3927,7 +3941,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
39273941
.set_idt = svm_set_idt,
39283942
.get_gdt = svm_get_gdt,
39293943
.set_gdt = svm_set_gdt,
3930-
.set_dr6 = svm_set_dr6,
39313944
.set_dr7 = svm_set_dr7,
39323945
.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
39333946
.cache_reg = svm_cache_reg,

arch/x86/kvm/vmx/vmx.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4965,10 +4965,6 @@ static int handle_dr(struct kvm_vcpu *vcpu)
49654965
return kvm_skip_emulated_instruction(vcpu);
49664966
}
49674967

4968-
static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
4969-
{
4970-
}
4971-
49724968
static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
49734969
{
49744970
get_debugreg(vcpu->arch.db[0], 0);
@@ -7731,7 +7727,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
77317727
.set_idt = vmx_set_idt,
77327728
.get_gdt = vmx_get_gdt,
77337729
.set_gdt = vmx_set_gdt,
7734-
.set_dr6 = vmx_set_dr6,
77357730
.set_dr7 = vmx_set_dr7,
77367731
.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
77377732
.cache_reg = vmx_cache_reg,

arch/x86/kvm/x86.c

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -473,7 +473,6 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
473473
* breakpoint), it is reserved and must be zero in DR6.
474474
*/
475475
vcpu->arch.dr6 &= ~BIT(12);
476-
kvm_update_dr6(vcpu);
477476
break;
478477
case PF_VECTOR:
479478
vcpu->arch.cr2 = payload;
@@ -1047,12 +1046,6 @@ static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
10471046
}
10481047
}
10491048

1050-
void kvm_update_dr6(struct kvm_vcpu *vcpu)
1051-
{
1052-
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1053-
kvm_x86_ops.set_dr6(vcpu, vcpu->arch.dr6);
1054-
}
1055-
10561049
static void kvm_update_dr7(struct kvm_vcpu *vcpu)
10571050
{
10581051
unsigned long dr7;
@@ -1092,7 +1085,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
10921085
if (val & 0xffffffff00000000ULL)
10931086
return -1; /* #GP */
10941087
vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
1095-
kvm_update_dr6(vcpu);
10961088
break;
10971089
case 5:
10981090
/* fall through */
@@ -4008,7 +4000,6 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
40084000
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
40094001
kvm_update_dr0123(vcpu);
40104002
vcpu->arch.dr6 = dbgregs->dr6;
4011-
kvm_update_dr6(vcpu);
40124003
vcpu->arch.dr7 = dbgregs->dr7;
40134004
kvm_update_dr7(vcpu);
40144005

@@ -8417,7 +8408,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
84178408
WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
84188409
kvm_x86_ops.sync_dirty_debug_regs(vcpu);
84198410
kvm_update_dr0123(vcpu);
8420-
kvm_update_dr6(vcpu);
84218411
kvm_update_dr7(vcpu);
84228412
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
84238413
}
@@ -9478,7 +9468,6 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
94789468
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
94799469
kvm_update_dr0123(vcpu);
94809470
vcpu->arch.dr6 = DR6_INIT;
9481-
kvm_update_dr6(vcpu);
94829471
vcpu->arch.dr7 = DR7_FIXED_1;
94839472
kvm_update_dr7(vcpu);
94849473

0 commit comments

Comments
 (0)