Skip to content

Commit faf35ce

Browse files
Maxim Levitskyafranjir
authored andcommitted
KVM: x86: move sev_lock/unlock_vcpus_for_migration to kvm_main.c
Move sev_lock/unlock_vcpus_for_migration to kvm_main and call the new functions the kvm_lock_all_vcpus/kvm_unlock_all_vcpus and kvm_lock_all_vcpus_nested. This code allows to lock all vCPUs without triggering lockdep warning about reaching MAX_LOCK_DEPTH depth by coercing the lockdep into thinking that we release all the locks other than vcpu'0 lock immediately after we take them. No functional change intended. Suggested-by: Paolo Bonzini <[email protected]> Signed-off-by: Maxim Levitsky <[email protected]>
1 parent 9449dac commit faf35ce

File tree

3 files changed

+81
-61
lines changed

3 files changed

+81
-61
lines changed

arch/x86/kvm/svm/sev.c

Lines changed: 4 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -1845,63 +1845,6 @@ enum sev_migration_role {
18451845
SEV_NR_MIGRATION_ROLES,
18461846
};
18471847

1848-
static int sev_lock_vcpus_for_migration(struct kvm *kvm,
1849-
enum sev_migration_role role)
1850-
{
1851-
struct kvm_vcpu *vcpu;
1852-
unsigned long i, j;
1853-
1854-
kvm_for_each_vcpu(i, vcpu, kvm) {
1855-
if (mutex_lock_killable_nested(&vcpu->mutex, role))
1856-
goto out_unlock;
1857-
1858-
#ifdef CONFIG_PROVE_LOCKING
1859-
if (!i)
1860-
/*
1861-
* Reset the role to one that avoids colliding with
1862-
* the role used for the first vcpu mutex.
1863-
*/
1864-
role = SEV_NR_MIGRATION_ROLES;
1865-
else
1866-
mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
1867-
#endif
1868-
}
1869-
1870-
return 0;
1871-
1872-
out_unlock:
1873-
1874-
kvm_for_each_vcpu(j, vcpu, kvm) {
1875-
if (i == j)
1876-
break;
1877-
1878-
#ifdef CONFIG_PROVE_LOCKING
1879-
if (j)
1880-
mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
1881-
#endif
1882-
1883-
mutex_unlock(&vcpu->mutex);
1884-
}
1885-
return -EINTR;
1886-
}
1887-
1888-
static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
1889-
{
1890-
struct kvm_vcpu *vcpu;
1891-
unsigned long i;
1892-
bool first = true;
1893-
1894-
kvm_for_each_vcpu(i, vcpu, kvm) {
1895-
if (first)
1896-
first = false;
1897-
else
1898-
mutex_acquire(&vcpu->mutex.dep_map,
1899-
SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
1900-
1901-
mutex_unlock(&vcpu->mutex);
1902-
}
1903-
}
1904-
19051848
static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
19061849
{
19071850
struct kvm_sev_info *dst = to_kvm_sev_info(dst_kvm);
@@ -2023,10 +1966,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, struct kvm *source_kvm)
20231966
charged = true;
20241967
}
20251968

2026-
ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
1969+
ret = kvm_lock_all_vcpus_nested(kvm, false, SEV_MIGRATION_SOURCE);
20271970
if (ret)
20281971
goto out_dst_cgroup;
2029-
ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
1972+
ret = kvm_lock_all_vcpus_nested(source_kvm, false, SEV_MIGRATION_TARGET);
20301973
if (ret)
20311974
goto out_dst_vcpu;
20321975

@@ -2040,9 +1983,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, struct kvm *source_kvm)
20401983
ret = 0;
20411984

20421985
out_source_vcpu:
2043-
sev_unlock_vcpus_for_migration(source_kvm);
1986+
kvm_unlock_all_vcpus(source_kvm);
20441987
out_dst_vcpu:
2045-
sev_unlock_vcpus_for_migration(kvm);
1988+
kvm_unlock_all_vcpus(kvm);
20461989
out_dst_cgroup:
20471990
/* Operates on the source on success, on the destination on failure. */
20481991
if (charged)

include/linux/kvm_host.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1016,6 +1016,12 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
10161016

10171017
void kvm_destroy_vcpus(struct kvm *kvm);
10181018

1019+
int kvm_lock_all_vcpus_nested(struct kvm *kvm, bool trylock, unsigned int role);
1020+
void kvm_unlock_all_vcpus(struct kvm *kvm);
1021+
1022+
#define kvm_lock_all_vcpus(kvm, trylock) \
1023+
kvm_lock_all_vcpus_nested(kvm, trylock, 0)
1024+
10191025
void vcpu_load(struct kvm_vcpu *vcpu);
10201026
void vcpu_put(struct kvm_vcpu *vcpu);
10211027

virt/kvm/kvm_main.c

Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1368,6 +1368,77 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
13681368
return 0;
13691369
}
13701370

1371+
1372+
/*
1373+
* Lock all VM vCPUs.
1374+
* Can be used nested (to lock vCPUS of two VMs for example)
1375+
*/
1376+
int kvm_lock_all_vcpus_nested(struct kvm *kvm, bool trylock, unsigned int role)
1377+
{
1378+
struct kvm_vcpu *vcpu;
1379+
unsigned long i, j;
1380+
1381+
lockdep_assert_held(&kvm->lock);
1382+
1383+
kvm_for_each_vcpu(i, vcpu, kvm) {
1384+
1385+
if (trylock && !mutex_trylock_nested(&vcpu->mutex, role))
1386+
goto out_unlock;
1387+
else if (!trylock && mutex_lock_killable_nested(&vcpu->mutex, role))
1388+
goto out_unlock;
1389+
1390+
#ifdef CONFIG_PROVE_LOCKING
1391+
if (!i)
1392+
/*
1393+
* Reset the role to one that avoids colliding with
1394+
* the role used for the first vcpu mutex.
1395+
*/
1396+
role = MAX_LOCK_DEPTH - 1;
1397+
else
1398+
mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
1399+
#endif
1400+
}
1401+
1402+
return 0;
1403+
1404+
out_unlock:
1405+
1406+
kvm_for_each_vcpu(j, vcpu, kvm) {
1407+
if (i == j)
1408+
break;
1409+
1410+
#ifdef CONFIG_PROVE_LOCKING
1411+
if (j)
1412+
mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
1413+
#endif
1414+
1415+
mutex_unlock(&vcpu->mutex);
1416+
}
1417+
return -EINTR;
1418+
}
1419+
EXPORT_SYMBOL_GPL(kvm_lock_all_vcpus_nested);
1420+
1421+
void kvm_unlock_all_vcpus(struct kvm *kvm)
1422+
{
1423+
struct kvm_vcpu *vcpu;
1424+
unsigned long i;
1425+
bool first = true;
1426+
1427+
lockdep_assert_held(&kvm->lock);
1428+
1429+
kvm_for_each_vcpu(i, vcpu, kvm) {
1430+
if (first)
1431+
first = false;
1432+
else
1433+
mutex_acquire(&vcpu->mutex.dep_map,
1434+
MAX_LOCK_DEPTH - 1, 0, _THIS_IP_);
1435+
1436+
mutex_unlock(&vcpu->mutex);
1437+
}
1438+
}
1439+
EXPORT_SYMBOL_GPL(kvm_unlock_all_vcpus);
1440+
1441+
13711442
/*
13721443
* Allocation size is twice as large as the actual dirty bitmap size.
13731444
* See kvm_vm_ioctl_get_dirty_log() why this is needed.

0 commit comments

Comments
 (0)