@@ -74,8 +74,8 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
74
74
EXPORT_SYMBOL_GPL (kvmppc_find_table );
75
75
76
76
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
77
- static long kvmppc_rm_tce_to_ua (struct kvm * kvm , unsigned long tce ,
78
- unsigned long * ua , unsigned long * * prmap )
77
+ static long kvmppc_rm_tce_to_ua (struct kvm * kvm ,
78
+ unsigned long tce , unsigned long * ua )
79
79
{
80
80
unsigned long gfn = tce >> PAGE_SHIFT ;
81
81
struct kvm_memory_slot * memslot ;
@@ -87,9 +87,6 @@ static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
87
87
* ua = __gfn_to_hva_memslot (memslot , gfn ) |
88
88
(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE ));
89
89
90
- if (prmap )
91
- * prmap = & memslot -> arch .rmap [gfn - memslot -> base_gfn ];
92
-
93
90
return 0 ;
94
91
}
95
92
@@ -116,7 +113,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
116
113
if (iommu_tce_check_gpa (stt -> page_shift , gpa ))
117
114
return H_PARAMETER ;
118
115
119
- if (kvmppc_rm_tce_to_ua (stt -> kvm , tce , & ua , NULL ))
116
+ if (kvmppc_rm_tce_to_ua (stt -> kvm , tce , & ua ))
120
117
return H_TOO_HARD ;
121
118
122
119
list_for_each_entry_lockless (stit , & stt -> iommu_tables , next ) {
@@ -411,7 +408,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
411
408
return ret ;
412
409
413
410
dir = iommu_tce_direction (tce );
414
- if ((dir != DMA_NONE ) && kvmppc_rm_tce_to_ua (vcpu -> kvm , tce , & ua , NULL ))
411
+ if ((dir != DMA_NONE ) && kvmppc_rm_tce_to_ua (vcpu -> kvm , tce , & ua ))
415
412
return H_PARAMETER ;
416
413
417
414
entry = ioba >> stt -> page_shift ;
@@ -488,7 +485,6 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
488
485
struct kvmppc_spapr_tce_table * stt ;
489
486
long i , ret = H_SUCCESS ;
490
487
unsigned long tces , entry , ua = 0 ;
491
- unsigned long * rmap = NULL ;
492
488
unsigned long mmu_seq ;
493
489
bool prereg = false;
494
490
struct kvmppc_spapr_tce_iommu_table * stit ;
@@ -530,7 +526,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
530
526
*/
531
527
struct mm_iommu_table_group_mem_t * mem ;
532
528
533
- if (kvmppc_rm_tce_to_ua (vcpu -> kvm , tce_list , & ua , NULL ))
529
+ if (kvmppc_rm_tce_to_ua (vcpu -> kvm , tce_list , & ua ))
534
530
return H_TOO_HARD ;
535
531
536
532
mem = mm_iommu_lookup_rm (vcpu -> kvm -> mm , ua , IOMMU_PAGE_SIZE_4K );
@@ -546,23 +542,9 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
546
542
* We do not require memory to be preregistered in this case
547
543
* so lock rmap and do __find_linux_pte_or_hugepte().
548
544
*/
549
- if (kvmppc_rm_tce_to_ua (vcpu -> kvm , tce_list , & ua , & rmap ))
550
- return H_TOO_HARD ;
551
-
552
- rmap = (void * ) vmalloc_to_phys (rmap );
553
- if (WARN_ON_ONCE_RM (!rmap ))
545
+ if (kvmppc_rm_tce_to_ua (vcpu -> kvm , tce_list , & ua ))
554
546
return H_TOO_HARD ;
555
547
556
- /*
557
- * Synchronize with the MMU notifier callbacks in
558
- * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
559
- * While we have the rmap lock, code running on other CPUs
560
- * cannot finish unmapping the host real page that backs
561
- * this guest real page, so we are OK to access the host
562
- * real page.
563
- */
564
- lock_rmap (rmap );
565
-
566
548
arch_spin_lock (& kvm -> mmu_lock .rlock .raw_lock );
567
549
if (kvmppc_rm_ua_to_hpa (vcpu , mmu_seq , ua , & tces )) {
568
550
ret = H_TOO_HARD ;
@@ -582,7 +564,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
582
564
unsigned long tce = be64_to_cpu (((u64 * )tces )[i ]);
583
565
584
566
ua = 0 ;
585
- if (kvmppc_rm_tce_to_ua (vcpu -> kvm , tce , & ua , NULL )) {
567
+ if (kvmppc_rm_tce_to_ua (vcpu -> kvm , tce , & ua )) {
586
568
ret = H_PARAMETER ;
587
569
goto invalidate_exit ;
588
570
}
@@ -607,10 +589,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
607
589
iommu_tce_kill_rm (stit -> tbl , entry , npages );
608
590
609
591
unlock_exit :
610
- if (rmap )
611
- unlock_rmap (rmap );
612
-
613
- arch_spin_unlock (& kvm -> mmu_lock .rlock .raw_lock );
592
+ if (!prereg )
593
+ arch_spin_unlock (& kvm -> mmu_lock .rlock .raw_lock );
614
594
return ret ;
615
595
}
616
596
0 commit comments