Skip to content

Commit cf87ac7

Browse files
Gavin ShanMarc Zyngier
authored andcommitted
KVM: x86: Introduce KVM_REQ_DIRTY_RING_SOFT_FULL
The VCPU isn't expected to be runnable when the dirty ring becomes soft full, until the dirty pages are harvested and the dirty ring is reset from userspace. So there is a check in each guest's entrace to see if the dirty ring is soft full or not. The VCPU is stopped from running if its dirty ring has been soft full. The similar check will be needed when the feature is going to be supported on ARM64. As Marc Zyngier suggested, a new event will avoid pointless overhead to check the size of the dirty ring ('vcpu->kvm->dirty_ring_size') in each guest's entrance. Add KVM_REQ_DIRTY_RING_SOFT_FULL. The event is raised when the dirty ring becomes soft full in kvm_dirty_ring_push(). The event is only cleared in the check, done in the newly added helper kvm_dirty_ring_check_request(). Since the VCPU is not runnable when the dirty ring becomes soft full, the KVM_REQ_DIRTY_RING_SOFT_FULL event is always set to prevent the VCPU from running until the dirty pages are harvested and the dirty ring is reset by userspace. kvm_dirty_ring_soft_full() becomes a private function with the newly added helper kvm_dirty_ring_check_request(). The alignment for the various event definitions in kvm_host.h is changed to tab character by the way. In order to avoid using 'container_of()', the argument @Ring is replaced by @vcpu in kvm_dirty_ring_push(). Link: https://lore.kernel.org/kvmarm/[email protected] Suggested-by: Marc Zyngier <[email protected]> Signed-off-by: Gavin Shan <[email protected]> Reviewed-by: Peter Xu <[email protected]> Reviewed-by: Sean Christopherson <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 590925a commit cf87ac7

File tree

5 files changed

+46
-25
lines changed

5 files changed

+46
-25
lines changed

arch/x86/kvm/x86.c

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -10499,20 +10499,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1049910499

1050010500
bool req_immediate_exit = false;
1050110501

10502-
/* Forbid vmenter if vcpu dirty ring is soft-full */
10503-
if (unlikely(vcpu->kvm->dirty_ring_size &&
10504-
kvm_dirty_ring_soft_full(&vcpu->dirty_ring))) {
10505-
vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
10506-
trace_kvm_dirty_ring_exit(vcpu);
10507-
r = 0;
10508-
goto out;
10509-
}
10510-
1051110502
if (kvm_request_pending(vcpu)) {
1051210503
if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) {
1051310504
r = -EIO;
1051410505
goto out;
1051510506
}
10507+
10508+
if (kvm_dirty_ring_check_request(vcpu)) {
10509+
r = 0;
10510+
goto out;
10511+
}
10512+
1051610513
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
1051710514
if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
1051810515
r = 0;

include/linux/kvm_dirty_ring.h

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ static inline int kvm_dirty_ring_reset(struct kvm *kvm,
4949
return 0;
5050
}
5151

52-
static inline void kvm_dirty_ring_push(struct kvm_dirty_ring *ring,
52+
static inline void kvm_dirty_ring_push(struct kvm_vcpu *vcpu,
5353
u32 slot, u64 offset)
5454
{
5555
}
@@ -64,11 +64,6 @@ static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
6464
{
6565
}
6666

67-
static inline bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
68-
{
69-
return true;
70-
}
71-
7267
#else /* CONFIG_HAVE_KVM_DIRTY_RING */
7368

7469
u32 kvm_dirty_ring_get_rsvd_entries(void);
@@ -84,13 +79,14 @@ int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring);
8479
* returns =0: successfully pushed
8580
* <0: unable to push, need to wait
8681
*/
87-
void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset);
82+
void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset);
83+
84+
bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu);
8885

8986
/* for use in vm_operations_struct */
9087
struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset);
9188

9289
void kvm_dirty_ring_free(struct kvm_dirty_ring *ring);
93-
bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring);
9490

9591
#endif /* CONFIG_HAVE_KVM_DIRTY_RING */
9692

include/linux/kvm_host.h

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -153,10 +153,11 @@ static inline bool is_error_page(struct page *page)
153153
* Architecture-independent vcpu->requests bit members
154154
* Bits 3-7 are reserved for more arch-independent bits.
155155
*/
156-
#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
157-
#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
158-
#define KVM_REQ_UNBLOCK 2
159-
#define KVM_REQUEST_ARCH_BASE 8
156+
#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
157+
#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
158+
#define KVM_REQ_UNBLOCK 2
159+
#define KVM_REQ_DIRTY_RING_SOFT_FULL 3
160+
#define KVM_REQUEST_ARCH_BASE 8
160161

161162
/*
162163
* KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to

virt/kvm/dirty_ring.c

Lines changed: 30 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
2626
return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
2727
}
2828

29-
bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
29+
static bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
3030
{
3131
return kvm_dirty_ring_used(ring) >= ring->soft_limit;
3232
}
@@ -142,13 +142,19 @@ int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
142142

143143
kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
144144

145+
/*
146+
* The request KVM_REQ_DIRTY_RING_SOFT_FULL will be cleared
147+
* by the VCPU thread next time when it enters the guest.
148+
*/
149+
145150
trace_kvm_dirty_ring_reset(ring);
146151

147152
return count;
148153
}
149154

150-
void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset)
155+
void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset)
151156
{
157+
struct kvm_dirty_ring *ring = &vcpu->dirty_ring;
152158
struct kvm_dirty_gfn *entry;
153159

154160
/* It should never get full */
@@ -166,6 +172,28 @@ void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset)
166172
kvm_dirty_gfn_set_dirtied(entry);
167173
ring->dirty_index++;
168174
trace_kvm_dirty_ring_push(ring, slot, offset);
175+
176+
if (kvm_dirty_ring_soft_full(ring))
177+
kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
178+
}
179+
180+
bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu)
181+
{
182+
/*
183+
* The VCPU isn't runnable when the dirty ring becomes soft full.
184+
* The KVM_REQ_DIRTY_RING_SOFT_FULL event is always set to prevent
185+
* the VCPU from running until the dirty pages are harvested and
186+
* the dirty ring is reset by userspace.
187+
*/
188+
if (kvm_check_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu) &&
189+
kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
190+
kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
191+
vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
192+
trace_kvm_dirty_ring_exit(vcpu);
193+
return true;
194+
}
195+
196+
return false;
169197
}
170198

171199
struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)

virt/kvm/kvm_main.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3314,8 +3314,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm,
33143314
u32 slot = (memslot->as_id << 16) | memslot->id;
33153315

33163316
if (kvm->dirty_ring_size)
3317-
kvm_dirty_ring_push(&vcpu->dirty_ring,
3318-
slot, rel_gfn);
3317+
kvm_dirty_ring_push(vcpu, slot, rel_gfn);
33193318
else
33203319
set_bit_le(rel_gfn, memslot->dirty_bitmap);
33213320
}

0 commit comments

Comments
 (0)