@@ -365,8 +365,8 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
365
365
* value to the removed SPTE value.
366
366
*/
367
367
for (;;) {
368
- old_spte = kvm_tdp_mmu_write_spte_atomic (sptep , REMOVED_SPTE );
369
- if (!is_removed_spte (old_spte ))
368
+ old_spte = kvm_tdp_mmu_write_spte_atomic (sptep , FROZEN_SPTE );
369
+ if (!is_frozen_spte (old_spte ))
370
370
break ;
371
371
cpu_relax ();
372
372
}
@@ -397,11 +397,11 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
397
397
* No retry is needed in the atomic update path as the
398
398
* sole concern is dropping a Dirty bit, i.e. no other
399
399
* task can zap/remove the SPTE as mmu_lock is held for
400
- * write. Marking the SPTE as a removed SPTE is not
400
+ * write. Marking the SPTE as a frozen SPTE is not
401
401
* strictly necessary for the same reason, but using
402
- * the remove SPTE value keeps the shared/exclusive
402
+ * the frozen SPTE value keeps the shared/exclusive
403
403
* paths consistent and allows the handle_changed_spte()
404
- * call below to hardcode the new value to REMOVED_SPTE .
404
+ * call below to hardcode the new value to FROZEN_SPTE .
405
405
*
406
406
* Note, even though dropping a Dirty bit is the only
407
407
* scenario where a non-atomic update could result in a
@@ -413,10 +413,10 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
413
413
* it here.
414
414
*/
415
415
old_spte = kvm_tdp_mmu_write_spte (sptep , old_spte ,
416
- REMOVED_SPTE , level );
416
+ FROZEN_SPTE , level );
417
417
}
418
418
handle_changed_spte (kvm , kvm_mmu_page_as_id (sp ), gfn ,
419
- old_spte , REMOVED_SPTE , level , shared );
419
+ old_spte , FROZEN_SPTE , level , shared );
420
420
}
421
421
422
422
call_rcu (& sp -> rcu_head , tdp_mmu_free_sp_rcu_callback );
@@ -490,19 +490,19 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
490
490
*/
491
491
if (!was_present && !is_present ) {
492
492
/*
493
- * If this change does not involve a MMIO SPTE or removed SPTE,
493
+ * If this change does not involve a MMIO SPTE or frozen SPTE,
494
494
* it is unexpected. Log the change, though it should not
495
495
* impact the guest since both the former and current SPTEs
496
496
* are nonpresent.
497
497
*/
498
498
if (WARN_ON_ONCE (!is_mmio_spte (kvm , old_spte ) &&
499
499
!is_mmio_spte (kvm , new_spte ) &&
500
- !is_removed_spte (new_spte )))
500
+ !is_frozen_spte (new_spte )))
501
501
pr_err ("Unexpected SPTE change! Nonpresent SPTEs\n"
502
502
"should not be replaced with another,\n"
503
503
"different nonpresent SPTE, unless one or both\n"
504
504
"are MMIO SPTEs, or the new SPTE is\n"
505
- "a temporary removed SPTE.\n"
505
+ "a temporary frozen SPTE.\n"
506
506
"as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d" ,
507
507
as_id , gfn , old_spte , new_spte , level );
508
508
return ;
@@ -541,7 +541,7 @@ static inline int __must_check __tdp_mmu_set_spte_atomic(struct tdp_iter *iter,
541
541
* and pre-checking before inserting a new SPTE is advantageous as it
542
542
* avoids unnecessary work.
543
543
*/
544
- WARN_ON_ONCE (iter -> yielded || is_removed_spte (iter -> old_spte ));
544
+ WARN_ON_ONCE (iter -> yielded || is_frozen_spte (iter -> old_spte ));
545
545
546
546
/*
547
547
* Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
@@ -604,26 +604,26 @@ static inline int __must_check tdp_mmu_zap_spte_atomic(struct kvm *kvm,
604
604
* in its place before the TLBs are flushed.
605
605
*
606
606
* Delay processing of the zapped SPTE until after TLBs are flushed and
607
- * the REMOVED_SPTE is replaced (see below).
607
+ * the FROZEN_SPTE is replaced (see below).
608
608
*/
609
- ret = __tdp_mmu_set_spte_atomic (iter , REMOVED_SPTE );
609
+ ret = __tdp_mmu_set_spte_atomic (iter , FROZEN_SPTE );
610
610
if (ret )
611
611
return ret ;
612
612
613
613
kvm_flush_remote_tlbs_gfn (kvm , iter -> gfn , iter -> level );
614
614
615
615
/*
616
- * No other thread can overwrite the removed SPTE as they must either
616
+ * No other thread can overwrite the frozen SPTE as they must either
617
617
* wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
618
- * overwrite the special removed SPTE value. Use the raw write helper to
618
+ * overwrite the special frozen SPTE value. Use the raw write helper to
619
619
* avoid an unnecessary check on volatile bits.
620
620
*/
621
621
__kvm_tdp_mmu_write_spte (iter -> sptep , SHADOW_NONPRESENT_VALUE );
622
622
623
623
/*
624
624
* Process the zapped SPTE after flushing TLBs, and after replacing
625
- * REMOVED_SPTE with 0. This minimizes the amount of time vCPUs are
626
- * blocked by the REMOVED_SPTE and reduces contention on the child
625
+ * FROZEN_SPTE with 0. This minimizes the amount of time vCPUs are
626
+ * blocked by the FROZEN_SPTE and reduces contention on the child
627
627
* SPTEs.
628
628
*/
629
629
handle_changed_spte (kvm , iter -> as_id , iter -> gfn , iter -> old_spte ,
@@ -653,12 +653,12 @@ static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
653
653
654
654
/*
655
655
* No thread should be using this function to set SPTEs to or from the
656
- * temporary removed SPTE value.
656
+ * temporary frozen SPTE value.
657
657
* If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
658
658
* should be used. If operating under the MMU lock in write mode, the
659
- * use of the removed SPTE should not be necessary.
659
+ * use of the frozen SPTE should not be necessary.
660
660
*/
661
- WARN_ON_ONCE (is_removed_spte (old_spte ) || is_removed_spte (new_spte ));
661
+ WARN_ON_ONCE (is_frozen_spte (old_spte ) || is_frozen_spte (new_spte ));
662
662
663
663
old_spte = kvm_tdp_mmu_write_spte (sptep , old_spte , new_spte , level );
664
664
@@ -1127,7 +1127,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1127
1127
* If SPTE has been frozen by another thread, just give up and
1128
1128
* retry, avoiding unnecessary page table allocation and free.
1129
1129
*/
1130
- if (is_removed_spte (iter .old_spte ))
1130
+ if (is_frozen_spte (iter .old_spte ))
1131
1131
goto retry ;
1132
1132
1133
1133
if (iter .level == fault -> goal_level )
@@ -1802,12 +1802,11 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1802
1802
*
1803
1803
* WARNING: This function is only intended to be called during fast_page_fault.
1804
1804
*/
1805
- u64 * kvm_tdp_mmu_fast_pf_get_last_sptep (struct kvm_vcpu * vcpu , u64 addr ,
1805
+ u64 * kvm_tdp_mmu_fast_pf_get_last_sptep (struct kvm_vcpu * vcpu , gfn_t gfn ,
1806
1806
u64 * spte )
1807
1807
{
1808
1808
struct tdp_iter iter ;
1809
1809
struct kvm_mmu * mmu = vcpu -> arch .mmu ;
1810
- gfn_t gfn = addr >> PAGE_SHIFT ;
1811
1810
tdp_ptep_t sptep = NULL ;
1812
1811
1813
1812
tdp_mmu_for_each_pte (iter , mmu , gfn , gfn + 1 ) {
0 commit comments