@@ -311,8 +311,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
311
311
return called ;
312
312
}
313
313
314
- bool kvm_make_all_cpus_request_except (struct kvm * kvm , unsigned int req ,
315
- struct kvm_vcpu * except )
314
+ bool kvm_make_all_cpus_request (struct kvm * kvm , unsigned int req )
316
315
{
317
316
struct kvm_vcpu * vcpu ;
318
317
struct cpumask * cpus ;
@@ -325,22 +324,14 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
325
324
cpus = this_cpu_cpumask_var_ptr (cpu_kick_mask );
326
325
cpumask_clear (cpus );
327
326
328
- kvm_for_each_vcpu (i , vcpu , kvm ) {
329
- if (vcpu == except )
330
- continue ;
327
+ kvm_for_each_vcpu (i , vcpu , kvm )
331
328
kvm_make_vcpu_request (vcpu , req , cpus , me );
332
- }
333
329
334
330
called = kvm_kick_many_cpus (cpus , !!(req & KVM_REQUEST_WAIT ));
335
331
put_cpu ();
336
332
337
333
return called ;
338
334
}
339
-
340
- bool kvm_make_all_cpus_request (struct kvm * kvm , unsigned int req )
341
- {
342
- return kvm_make_all_cpus_request_except (kvm , req , NULL );
343
- }
344
335
EXPORT_SYMBOL_GPL (kvm_make_all_cpus_request );
345
336
346
337
void kvm_flush_remote_tlbs (struct kvm * kvm )
@@ -2932,7 +2923,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2932
2923
/*
2933
2924
* Pin guest page in memory and return its pfn.
2934
2925
* @addr: host virtual address which maps memory to the guest
2935
- * @atomic: whether this function can sleep
2926
+ * @atomic: whether this function is forbidden from sleeping
2936
2927
* @interruptible: whether the process can be interrupted by non-fatal signals
2937
2928
* @async: whether this function need to wait IO complete if the
2938
2929
* host page is not in the memory
@@ -3004,16 +2995,12 @@ kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
3004
2995
if (hva )
3005
2996
* hva = addr ;
3006
2997
3007
- if (addr == KVM_HVA_ERR_RO_BAD ) {
3008
- if (writable )
3009
- * writable = false;
3010
- return KVM_PFN_ERR_RO_FAULT ;
3011
- }
3012
-
3013
2998
if (kvm_is_error_hva (addr )) {
3014
2999
if (writable )
3015
3000
* writable = false;
3016
- return KVM_PFN_NOSLOT ;
3001
+
3002
+ return addr == KVM_HVA_ERR_RO_BAD ? KVM_PFN_ERR_RO_FAULT :
3003
+ KVM_PFN_NOSLOT ;
3017
3004
}
3018
3005
3019
3006
/* Do not map writable pfn in the readonly memslot. */
@@ -3277,6 +3264,7 @@ static int next_segment(unsigned long len, int offset)
3277
3264
return len ;
3278
3265
}
3279
3266
3267
+ /* Copy @len bytes from guest memory at '(@gfn * PAGE_SIZE) + @offset' to @data */
3280
3268
static int __kvm_read_guest_page (struct kvm_memory_slot * slot , gfn_t gfn ,
3281
3269
void * data , int offset , int len )
3282
3270
{
@@ -3378,6 +3366,7 @@ int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
3378
3366
}
3379
3367
EXPORT_SYMBOL_GPL (kvm_vcpu_read_guest_atomic );
3380
3368
3369
+ /* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
3381
3370
static int __kvm_write_guest_page (struct kvm * kvm ,
3382
3371
struct kvm_memory_slot * memslot , gfn_t gfn ,
3383
3372
const void * data , int offset , int len )
0 commit comments