Skip to content

Commit eb162c9

Browse files
committed
Merge branch 'kvm-tdx-prep-1-truncated' into HEAD
A rename and refactoring extracted from the preparatory series for Intel TDX support in KVM's MMU.
2 parents 27e6a24 + c2f38f7 commit eb162c9

File tree

5 files changed

+31
-32
lines changed

5 files changed

+31
-32
lines changed

arch/x86/kvm/mmu/mmu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3448,7 +3448,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
34483448
u64 new_spte;
34493449

34503450
if (tdp_mmu_enabled)
3451-
sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3451+
sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->gfn, &spte);
34523452
else
34533453
sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
34543454

@@ -3458,7 +3458,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
34583458
* available as the vCPU holds a reference to its root(s).
34593459
*/
34603460
if (WARN_ON_ONCE(!sptep))
3461-
spte = REMOVED_SPTE;
3461+
spte = FROZEN_SPTE;
34623462

34633463
if (!is_shadow_present_pte(spte))
34643464
break;

arch/x86/kvm/mmu/spte.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -383,7 +383,7 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
383383
* not set any RWX bits.
384384
*/
385385
if (WARN_ON((mmio_value & mmio_mask) != mmio_value) ||
386-
WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value))
386+
WARN_ON(mmio_value && (FROZEN_SPTE & mmio_mask) == mmio_value))
387387
mmio_value = 0;
388388

389389
if (!mmio_value)

arch/x86/kvm/mmu/spte.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
202202

203203
/*
204204
* If a thread running without exclusive control of the MMU lock must perform a
205-
* multi-part operation on an SPTE, it can set the SPTE to REMOVED_SPTE as a
205+
* multi-part operation on an SPTE, it can set the SPTE to FROZEN_SPTE as a
206206
* non-present intermediate value. Other threads which encounter this value
207207
* should not modify the SPTE.
208208
*
@@ -212,14 +212,14 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
212212
*
213213
* Only used by the TDP MMU.
214214
*/
215-
#define REMOVED_SPTE (SHADOW_NONPRESENT_VALUE | 0x5a0ULL)
215+
#define FROZEN_SPTE (SHADOW_NONPRESENT_VALUE | 0x5a0ULL)
216216

217217
/* Removed SPTEs must not be misconstrued as shadow present PTEs. */
218-
static_assert(!(REMOVED_SPTE & SPTE_MMU_PRESENT_MASK));
218+
static_assert(!(FROZEN_SPTE & SPTE_MMU_PRESENT_MASK));
219219

220-
static inline bool is_removed_spte(u64 spte)
220+
static inline bool is_frozen_spte(u64 spte)
221221
{
222-
return spte == REMOVED_SPTE;
222+
return spte == FROZEN_SPTE;
223223
}
224224

225225
/* Get an SPTE's index into its parent's page table (and the spt array). */

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 22 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -365,8 +365,8 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
365365
* value to the removed SPTE value.
366366
*/
367367
for (;;) {
368-
old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE);
369-
if (!is_removed_spte(old_spte))
368+
old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, FROZEN_SPTE);
369+
if (!is_frozen_spte(old_spte))
370370
break;
371371
cpu_relax();
372372
}
@@ -397,11 +397,11 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
397397
* No retry is needed in the atomic update path as the
398398
* sole concern is dropping a Dirty bit, i.e. no other
399399
* task can zap/remove the SPTE as mmu_lock is held for
400-
* write. Marking the SPTE as a removed SPTE is not
400+
* write. Marking the SPTE as a frozen SPTE is not
401401
* strictly necessary for the same reason, but using
402-
* the remove SPTE value keeps the shared/exclusive
402+
* the frozen SPTE value keeps the shared/exclusive
403403
* paths consistent and allows the handle_changed_spte()
404-
* call below to hardcode the new value to REMOVED_SPTE.
404+
* call below to hardcode the new value to FROZEN_SPTE.
405405
*
406406
* Note, even though dropping a Dirty bit is the only
407407
* scenario where a non-atomic update could result in a
@@ -413,10 +413,10 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
413413
* it here.
414414
*/
415415
old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
416-
REMOVED_SPTE, level);
416+
FROZEN_SPTE, level);
417417
}
418418
handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
419-
old_spte, REMOVED_SPTE, level, shared);
419+
old_spte, FROZEN_SPTE, level, shared);
420420
}
421421

422422
call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
@@ -490,19 +490,19 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
490490
*/
491491
if (!was_present && !is_present) {
492492
/*
493-
* If this change does not involve a MMIO SPTE or removed SPTE,
493+
* If this change does not involve a MMIO SPTE or frozen SPTE,
494494
* it is unexpected. Log the change, though it should not
495495
* impact the guest since both the former and current SPTEs
496496
* are nonpresent.
497497
*/
498498
if (WARN_ON_ONCE(!is_mmio_spte(kvm, old_spte) &&
499499
!is_mmio_spte(kvm, new_spte) &&
500-
!is_removed_spte(new_spte)))
500+
!is_frozen_spte(new_spte)))
501501
pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
502502
"should not be replaced with another,\n"
503503
"different nonpresent SPTE, unless one or both\n"
504504
"are MMIO SPTEs, or the new SPTE is\n"
505-
"a temporary removed SPTE.\n"
505+
"a temporary frozen SPTE.\n"
506506
"as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
507507
as_id, gfn, old_spte, new_spte, level);
508508
return;
@@ -541,7 +541,7 @@ static inline int __must_check __tdp_mmu_set_spte_atomic(struct tdp_iter *iter,
541541
* and pre-checking before inserting a new SPTE is advantageous as it
542542
* avoids unnecessary work.
543543
*/
544-
WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte));
544+
WARN_ON_ONCE(iter->yielded || is_frozen_spte(iter->old_spte));
545545

546546
/*
547547
* Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
@@ -604,26 +604,26 @@ static inline int __must_check tdp_mmu_zap_spte_atomic(struct kvm *kvm,
604604
* in its place before the TLBs are flushed.
605605
*
606606
* Delay processing of the zapped SPTE until after TLBs are flushed and
607-
* the REMOVED_SPTE is replaced (see below).
607+
* the FROZEN_SPTE is replaced (see below).
608608
*/
609-
ret = __tdp_mmu_set_spte_atomic(iter, REMOVED_SPTE);
609+
ret = __tdp_mmu_set_spte_atomic(iter, FROZEN_SPTE);
610610
if (ret)
611611
return ret;
612612

613613
kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level);
614614

615615
/*
616-
* No other thread can overwrite the removed SPTE as they must either
616+
* No other thread can overwrite the frozen SPTE as they must either
617617
* wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
618-
* overwrite the special removed SPTE value. Use the raw write helper to
618+
* overwrite the special frozen SPTE value. Use the raw write helper to
619619
* avoid an unnecessary check on volatile bits.
620620
*/
621621
__kvm_tdp_mmu_write_spte(iter->sptep, SHADOW_NONPRESENT_VALUE);
622622

623623
/*
624624
* Process the zapped SPTE after flushing TLBs, and after replacing
625-
* REMOVED_SPTE with 0. This minimizes the amount of time vCPUs are
626-
* blocked by the REMOVED_SPTE and reduces contention on the child
625+
* FROZEN_SPTE with 0. This minimizes the amount of time vCPUs are
626+
* blocked by the FROZEN_SPTE and reduces contention on the child
627627
* SPTEs.
628628
*/
629629
handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
@@ -653,12 +653,12 @@ static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
653653

654654
/*
655655
* No thread should be using this function to set SPTEs to or from the
656-
* temporary removed SPTE value.
656+
* temporary frozen SPTE value.
657657
* If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
658658
* should be used. If operating under the MMU lock in write mode, the
659-
* use of the removed SPTE should not be necessary.
659+
* use of the frozen SPTE should not be necessary.
660660
*/
661-
WARN_ON_ONCE(is_removed_spte(old_spte) || is_removed_spte(new_spte));
661+
WARN_ON_ONCE(is_frozen_spte(old_spte) || is_frozen_spte(new_spte));
662662

663663
old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
664664

@@ -1127,7 +1127,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
11271127
* If SPTE has been frozen by another thread, just give up and
11281128
* retry, avoiding unnecessary page table allocation and free.
11291129
*/
1130-
if (is_removed_spte(iter.old_spte))
1130+
if (is_frozen_spte(iter.old_spte))
11311131
goto retry;
11321132

11331133
if (iter.level == fault->goal_level)
@@ -1802,12 +1802,11 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
18021802
*
18031803
* WARNING: This function is only intended to be called during fast_page_fault.
18041804
*/
1805-
u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
1805+
u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,
18061806
u64 *spte)
18071807
{
18081808
struct tdp_iter iter;
18091809
struct kvm_mmu *mmu = vcpu->arch.mmu;
1810-
gfn_t gfn = addr >> PAGE_SHIFT;
18111810
tdp_ptep_t sptep = NULL;
18121811

18131812
tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {

arch/x86/kvm/mmu/tdp_mmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ static inline void kvm_tdp_mmu_walk_lockless_end(void)
6464

6565
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
6666
int *root_level);
67-
u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
67+
u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,
6868
u64 *spte);
6969

7070
#ifdef CONFIG_X86_64

0 commit comments

Comments
 (0)