@@ -211,10 +211,79 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
211
211
return false;
212
212
}
213
213
214
+ static int kvmppc_memslot_page_merge (struct kvm * kvm ,
215
+ const struct kvm_memory_slot * memslot , bool merge )
216
+ {
217
+ unsigned long gfn = memslot -> base_gfn ;
218
+ unsigned long end , start = gfn_to_hva (kvm , gfn );
219
+ int ret = 0 ;
220
+ struct vm_area_struct * vma ;
221
+ int merge_flag = (merge ) ? MADV_MERGEABLE : MADV_UNMERGEABLE ;
222
+
223
+ if (kvm_is_error_hva (start ))
224
+ return H_STATE ;
225
+
226
+ end = start + (memslot -> npages << PAGE_SHIFT );
227
+
228
+ mmap_write_lock (kvm -> mm );
229
+ do {
230
+ vma = find_vma_intersection (kvm -> mm , start , end );
231
+ if (!vma ) {
232
+ ret = H_STATE ;
233
+ break ;
234
+ }
235
+ ret = ksm_madvise (vma , vma -> vm_start , vma -> vm_end ,
236
+ merge_flag , & vma -> vm_flags );
237
+ if (ret ) {
238
+ ret = H_STATE ;
239
+ break ;
240
+ }
241
+ start = vma -> vm_end ;
242
+ } while (end > vma -> vm_end );
243
+
244
+ mmap_write_unlock (kvm -> mm );
245
+ return ret ;
246
+ }
247
+
248
+ static void kvmppc_uvmem_memslot_delete (struct kvm * kvm ,
249
+ const struct kvm_memory_slot * memslot )
250
+ {
251
+ uv_unregister_mem_slot (kvm -> arch .lpid , memslot -> id );
252
+ kvmppc_uvmem_slot_free (kvm , memslot );
253
+ kvmppc_memslot_page_merge (kvm , memslot , true);
254
+ }
255
+
256
+ static int kvmppc_uvmem_memslot_create (struct kvm * kvm ,
257
+ const struct kvm_memory_slot * memslot )
258
+ {
259
+ int ret = H_PARAMETER ;
260
+
261
+ if (kvmppc_memslot_page_merge (kvm , memslot , false))
262
+ return ret ;
263
+
264
+ if (kvmppc_uvmem_slot_init (kvm , memslot ))
265
+ goto out1 ;
266
+
267
+ ret = uv_register_mem_slot (kvm -> arch .lpid ,
268
+ memslot -> base_gfn << PAGE_SHIFT ,
269
+ memslot -> npages * PAGE_SIZE ,
270
+ 0 , memslot -> id );
271
+ if (ret < 0 ) {
272
+ ret = H_PARAMETER ;
273
+ goto out ;
274
+ }
275
+ return 0 ;
276
+ out :
277
+ kvmppc_uvmem_slot_free (kvm , memslot );
278
+ out1 :
279
+ kvmppc_memslot_page_merge (kvm , memslot , true);
280
+ return ret ;
281
+ }
282
+
214
283
unsigned long kvmppc_h_svm_init_start (struct kvm * kvm )
215
284
{
216
285
struct kvm_memslots * slots ;
217
- struct kvm_memory_slot * memslot ;
286
+ struct kvm_memory_slot * memslot , * m ;
218
287
int ret = H_SUCCESS ;
219
288
int srcu_idx ;
220
289
@@ -232,23 +301,24 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
232
301
return H_AUTHORITY ;
233
302
234
303
srcu_idx = srcu_read_lock (& kvm -> srcu );
304
+
305
+ /* register the memslot */
235
306
slots = kvm_memslots (kvm );
236
307
kvm_for_each_memslot (memslot , slots ) {
237
- if (kvmppc_uvmem_slot_init (kvm , memslot )) {
238
- ret = H_PARAMETER ;
239
- goto out ;
240
- }
241
- ret = uv_register_mem_slot (kvm -> arch .lpid ,
242
- memslot -> base_gfn << PAGE_SHIFT ,
243
- memslot -> npages * PAGE_SIZE ,
244
- 0 , memslot -> id );
245
- if (ret < 0 ) {
246
- kvmppc_uvmem_slot_free (kvm , memslot );
247
- ret = H_PARAMETER ;
248
- goto out ;
308
+ ret = kvmppc_uvmem_memslot_create (kvm , memslot );
309
+ if (ret )
310
+ break ;
311
+ }
312
+
313
+ if (ret ) {
314
+ slots = kvm_memslots (kvm );
315
+ kvm_for_each_memslot (m , slots ) {
316
+ if (m == memslot )
317
+ break ;
318
+ kvmppc_uvmem_memslot_delete (kvm , memslot );
249
319
}
250
320
}
251
- out :
321
+
252
322
srcu_read_unlock (& kvm -> srcu , srcu_idx );
253
323
return ret ;
254
324
}
@@ -384,7 +454,7 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
384
454
*/
385
455
static int kvmppc_svm_page_in (struct vm_area_struct * vma , unsigned long start ,
386
456
unsigned long end , unsigned long gpa , struct kvm * kvm ,
387
- unsigned long page_shift , bool * downgrade )
457
+ unsigned long page_shift )
388
458
{
389
459
unsigned long src_pfn , dst_pfn = 0 ;
390
460
struct migrate_vma mig ;
@@ -400,18 +470,6 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
400
470
mig .src = & src_pfn ;
401
471
mig .dst = & dst_pfn ;
402
472
403
- /*
404
- * We come here with mmap_lock write lock held just for
405
- * ksm_madvise(), otherwise we only need read mmap_lock.
406
- * Hence downgrade to read lock once ksm_madvise() is done.
407
- */
408
- ret = ksm_madvise (vma , vma -> vm_start , vma -> vm_end ,
409
- MADV_UNMERGEABLE , & vma -> vm_flags );
410
- mmap_write_downgrade (kvm -> mm );
411
- * downgrade = true;
412
- if (ret )
413
- return ret ;
414
-
415
473
ret = migrate_vma_setup (& mig );
416
474
if (ret )
417
475
return ret ;
@@ -503,7 +561,6 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
503
561
unsigned long flags ,
504
562
unsigned long page_shift )
505
563
{
506
- bool downgrade = false;
507
564
unsigned long start , end ;
508
565
struct vm_area_struct * vma ;
509
566
int srcu_idx ;
@@ -524,7 +581,7 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
524
581
525
582
ret = H_PARAMETER ;
526
583
srcu_idx = srcu_read_lock (& kvm -> srcu );
527
- mmap_write_lock (kvm -> mm );
584
+ mmap_read_lock (kvm -> mm );
528
585
529
586
start = gfn_to_hva (kvm , gfn );
530
587
if (kvm_is_error_hva (start ))
@@ -540,16 +597,12 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
540
597
if (!vma || vma -> vm_start > start || vma -> vm_end < end )
541
598
goto out_unlock ;
542
599
543
- if (!kvmppc_svm_page_in (vma , start , end , gpa , kvm , page_shift ,
544
- & downgrade ))
600
+ if (!kvmppc_svm_page_in (vma , start , end , gpa , kvm , page_shift ))
545
601
ret = H_SUCCESS ;
546
602
out_unlock :
547
603
mutex_unlock (& kvm -> arch .uvmem_lock );
548
604
out :
549
- if (downgrade )
550
- mmap_read_unlock (kvm -> mm );
551
- else
552
- mmap_write_unlock (kvm -> mm );
605
+ mmap_read_unlock (kvm -> mm );
553
606
srcu_read_unlock (& kvm -> srcu , srcu_idx );
554
607
return ret ;
555
608
}
0 commit comments