Skip to content

Commit 3d4a5a4

Browse files
dmatlacksean-jc
authored andcommitted
KVM: x86/mmu: Unnest TDP MMU helpers that allocate SPs for eager splitting
Move the implementation of tdp_mmu_alloc_sp_for_split() to its one and only caller to reduce unnecessary nesting and make it more clear why the eager split loop continues after allocating a new SP. Opportunistically drop the double-underscores from __tdp_mmu_alloc_sp_for_split() now that its parent is gone. No functional change intended. Suggested-by: Sean Christopherson <[email protected]> Signed-off-by: David Matlack <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent e1c04f7 commit 3d4a5a4

File tree

1 file changed

+18
-30
lines changed

1 file changed

+18
-30
lines changed

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 18 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1339,7 +1339,7 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
13391339
return spte_set;
13401340
}
13411341

1342-
static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(void)
1342+
static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(void)
13431343
{
13441344
struct kvm_mmu_page *sp;
13451345

@@ -1356,34 +1356,6 @@ static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(void)
13561356
return sp;
13571357
}
13581358

1359-
static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
1360-
struct tdp_iter *iter,
1361-
bool shared)
1362-
{
1363-
struct kvm_mmu_page *sp;
1364-
1365-
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1366-
1367-
rcu_read_unlock();
1368-
1369-
if (shared)
1370-
read_unlock(&kvm->mmu_lock);
1371-
else
1372-
write_unlock(&kvm->mmu_lock);
1373-
1374-
iter->yielded = true;
1375-
sp = __tdp_mmu_alloc_sp_for_split();
1376-
1377-
if (shared)
1378-
read_lock(&kvm->mmu_lock);
1379-
else
1380-
write_lock(&kvm->mmu_lock);
1381-
1382-
rcu_read_lock();
1383-
1384-
return sp;
1385-
}
1386-
13871359
/* Note, the caller is responsible for initializing @sp. */
13881360
static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
13891361
struct kvm_mmu_page *sp, bool shared)
@@ -1454,7 +1426,22 @@ static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
14541426
continue;
14551427

14561428
if (!sp) {
1457-
sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
1429+
rcu_read_unlock();
1430+
1431+
if (shared)
1432+
read_unlock(&kvm->mmu_lock);
1433+
else
1434+
write_unlock(&kvm->mmu_lock);
1435+
1436+
sp = tdp_mmu_alloc_sp_for_split();
1437+
1438+
if (shared)
1439+
read_lock(&kvm->mmu_lock);
1440+
else
1441+
write_lock(&kvm->mmu_lock);
1442+
1443+
rcu_read_lock();
1444+
14581445
if (!sp) {
14591446
ret = -ENOMEM;
14601447
trace_kvm_mmu_split_huge_page(iter.gfn,
@@ -1463,6 +1450,7 @@ static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
14631450
break;
14641451
}
14651452

1453+
iter.yielded = true;
14661454
continue;
14671455
}
14681456

0 commit comments

Comments
 (0)