Skip to content

Commit 8e86e73

Browse files
committed
Merge branch 'kvm-lockdep-common' into HEAD
Introduce new mutex locking functions mutex_trylock_nest_lock() and mutex_lock_killable_nest_lock() and use them to clean up locking of all vCPUs for a VM. For x86, this removes some complex code that was used instead of lockdep's "nest_lock" feature. For ARM and RISC-V, this removes a lockdep warning when the VM is configured to have more than MAX_LOCK_DEPTH vCPUs, and removes a fair amount of duplicate code by sharing the logic across all architectures. Signed-off-by: Paolo BOnzini <[email protected]>
2 parents 4e02d4f + 4dbe28c commit 8e86e73

File tree

13 files changed

+131
-170
lines changed

13 files changed

+131
-170
lines changed

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1320,9 +1320,6 @@ int __init populate_sysreg_config(const struct sys_reg_desc *sr,
13201320
unsigned int idx);
13211321
int __init populate_nv_trap_config(void);
13221322

1323-
bool lock_all_vcpus(struct kvm *kvm);
1324-
void unlock_all_vcpus(struct kvm *kvm);
1325-
13261323
void kvm_calculate_traps(struct kvm_vcpu *vcpu);
13271324

13281325
/* MMIO helpers */

arch/arm64/kvm/arch_timer.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1766,7 +1766,7 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
17661766

17671767
mutex_lock(&kvm->lock);
17681768

1769-
if (lock_all_vcpus(kvm)) {
1769+
if (!kvm_trylock_all_vcpus(kvm)) {
17701770
set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &kvm->arch.flags);
17711771

17721772
/*
@@ -1778,7 +1778,7 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
17781778
kvm->arch.timer_data.voffset = offset->counter_offset;
17791779
kvm->arch.timer_data.poffset = offset->counter_offset;
17801780

1781-
unlock_all_vcpus(kvm);
1781+
kvm_unlock_all_vcpus(kvm);
17821782
} else {
17831783
ret = -EBUSY;
17841784
}

arch/arm64/kvm/arm.c

Lines changed: 0 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -1924,49 +1924,6 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
19241924
}
19251925
}
19261926

1927-
/* unlocks vcpus from @vcpu_lock_idx and smaller */
1928-
static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
1929-
{
1930-
struct kvm_vcpu *tmp_vcpu;
1931-
1932-
for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
1933-
tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
1934-
mutex_unlock(&tmp_vcpu->mutex);
1935-
}
1936-
}
1937-
1938-
void unlock_all_vcpus(struct kvm *kvm)
1939-
{
1940-
lockdep_assert_held(&kvm->lock);
1941-
1942-
unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
1943-
}
1944-
1945-
/* Returns true if all vcpus were locked, false otherwise */
1946-
bool lock_all_vcpus(struct kvm *kvm)
1947-
{
1948-
struct kvm_vcpu *tmp_vcpu;
1949-
unsigned long c;
1950-
1951-
lockdep_assert_held(&kvm->lock);
1952-
1953-
/*
1954-
* Any time a vcpu is in an ioctl (including running), the
1955-
* core KVM code tries to grab the vcpu->mutex.
1956-
*
1957-
* By grabbing the vcpu->mutex of all VCPUs we ensure that no
1958-
* other VCPUs can fiddle with the state while we access it.
1959-
*/
1960-
kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
1961-
if (!mutex_trylock(&tmp_vcpu->mutex)) {
1962-
unlock_vcpus(kvm, c - 1);
1963-
return false;
1964-
}
1965-
}
1966-
1967-
return true;
1968-
}
1969-
19701927
static unsigned long nvhe_percpu_size(void)
19711928
{
19721929
return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -

arch/arm64/kvm/vgic/vgic-init.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
8888
lockdep_assert_held(&kvm->lock);
8989

9090
ret = -EBUSY;
91-
if (!lock_all_vcpus(kvm))
91+
if (kvm_trylock_all_vcpus(kvm))
9292
return ret;
9393

9494
mutex_lock(&kvm->arch.config_lock);
@@ -142,7 +142,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
142142

143143
out_unlock:
144144
mutex_unlock(&kvm->arch.config_lock);
145-
unlock_all_vcpus(kvm);
145+
kvm_unlock_all_vcpus(kvm);
146146
return ret;
147147
}
148148

arch/arm64/kvm/vgic/vgic-its.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1971,7 +1971,7 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
19711971

19721972
mutex_lock(&dev->kvm->lock);
19731973

1974-
if (!lock_all_vcpus(dev->kvm)) {
1974+
if (kvm_trylock_all_vcpus(dev->kvm)) {
19751975
mutex_unlock(&dev->kvm->lock);
19761976
return -EBUSY;
19771977
}
@@ -2006,7 +2006,7 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
20062006
}
20072007
out:
20082008
mutex_unlock(&dev->kvm->arch.config_lock);
2009-
unlock_all_vcpus(dev->kvm);
2009+
kvm_unlock_all_vcpus(dev->kvm);
20102010
mutex_unlock(&dev->kvm->lock);
20112011
return ret;
20122012
}
@@ -2676,7 +2676,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
26762676

26772677
mutex_lock(&kvm->lock);
26782678

2679-
if (!lock_all_vcpus(kvm)) {
2679+
if (kvm_trylock_all_vcpus(kvm)) {
26802680
mutex_unlock(&kvm->lock);
26812681
return -EBUSY;
26822682
}
@@ -2698,7 +2698,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
26982698

26992699
mutex_unlock(&its->its_lock);
27002700
mutex_unlock(&kvm->arch.config_lock);
2701-
unlock_all_vcpus(kvm);
2701+
kvm_unlock_all_vcpus(kvm);
27022702
mutex_unlock(&kvm->lock);
27032703
return ret;
27042704
}

arch/arm64/kvm/vgic/vgic-kvm-device.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -268,15 +268,15 @@ static int vgic_set_common_attr(struct kvm_device *dev,
268268
return -ENXIO;
269269
mutex_lock(&dev->kvm->lock);
270270

271-
if (!lock_all_vcpus(dev->kvm)) {
271+
if (kvm_trylock_all_vcpus(dev->kvm)) {
272272
mutex_unlock(&dev->kvm->lock);
273273
return -EBUSY;
274274
}
275275

276276
mutex_lock(&dev->kvm->arch.config_lock);
277277
r = vgic_v3_save_pending_tables(dev->kvm);
278278
mutex_unlock(&dev->kvm->arch.config_lock);
279-
unlock_all_vcpus(dev->kvm);
279+
kvm_unlock_all_vcpus(dev->kvm);
280280
mutex_unlock(&dev->kvm->lock);
281281
return r;
282282
}
@@ -390,7 +390,7 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
390390

391391
mutex_lock(&dev->kvm->lock);
392392

393-
if (!lock_all_vcpus(dev->kvm)) {
393+
if (kvm_trylock_all_vcpus(dev->kvm)) {
394394
mutex_unlock(&dev->kvm->lock);
395395
return -EBUSY;
396396
}
@@ -415,7 +415,7 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
415415

416416
out:
417417
mutex_unlock(&dev->kvm->arch.config_lock);
418-
unlock_all_vcpus(dev->kvm);
418+
kvm_unlock_all_vcpus(dev->kvm);
419419
mutex_unlock(&dev->kvm->lock);
420420

421421
if (!ret && !is_write)
@@ -554,7 +554,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
554554

555555
mutex_lock(&dev->kvm->lock);
556556

557-
if (!lock_all_vcpus(dev->kvm)) {
557+
if (kvm_trylock_all_vcpus(dev->kvm)) {
558558
mutex_unlock(&dev->kvm->lock);
559559
return -EBUSY;
560560
}
@@ -611,7 +611,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
611611

612612
out:
613613
mutex_unlock(&dev->kvm->arch.config_lock);
614-
unlock_all_vcpus(dev->kvm);
614+
kvm_unlock_all_vcpus(dev->kvm);
615615
mutex_unlock(&dev->kvm->lock);
616616

617617
if (!ret && uaccess && !is_write) {

arch/riscv/kvm/aia_device.c

Lines changed: 2 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -12,36 +12,6 @@
1212
#include <linux/kvm_host.h>
1313
#include <linux/uaccess.h>
1414

15-
static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
16-
{
17-
struct kvm_vcpu *tmp_vcpu;
18-
19-
for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
20-
tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
21-
mutex_unlock(&tmp_vcpu->mutex);
22-
}
23-
}
24-
25-
static void unlock_all_vcpus(struct kvm *kvm)
26-
{
27-
unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
28-
}
29-
30-
static bool lock_all_vcpus(struct kvm *kvm)
31-
{
32-
struct kvm_vcpu *tmp_vcpu;
33-
unsigned long c;
34-
35-
kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
36-
if (!mutex_trylock(&tmp_vcpu->mutex)) {
37-
unlock_vcpus(kvm, c - 1);
38-
return false;
39-
}
40-
}
41-
42-
return true;
43-
}
44-
4515
static int aia_create(struct kvm_device *dev, u32 type)
4616
{
4717
int ret;
@@ -53,7 +23,7 @@ static int aia_create(struct kvm_device *dev, u32 type)
5323
return -EEXIST;
5424

5525
ret = -EBUSY;
56-
if (!lock_all_vcpus(kvm))
26+
if (kvm_trylock_all_vcpus(kvm))
5727
return ret;
5828

5929
kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -65,7 +35,7 @@ static int aia_create(struct kvm_device *dev, u32 type)
6535
kvm->arch.aia.in_kernel = true;
6636

6737
out_unlock:
68-
unlock_all_vcpus(kvm);
38+
kvm_unlock_all_vcpus(kvm);
6939
return ret;
7040
}
7141

arch/x86/kvm/svm/sev.c

Lines changed: 4 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -1884,70 +1884,6 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
18841884
atomic_set_release(&src_sev->migration_in_progress, 0);
18851885
}
18861886

1887-
/* vCPU mutex subclasses. */
1888-
enum sev_migration_role {
1889-
SEV_MIGRATION_SOURCE = 0,
1890-
SEV_MIGRATION_TARGET,
1891-
SEV_NR_MIGRATION_ROLES,
1892-
};
1893-
1894-
static int sev_lock_vcpus_for_migration(struct kvm *kvm,
1895-
enum sev_migration_role role)
1896-
{
1897-
struct kvm_vcpu *vcpu;
1898-
unsigned long i, j;
1899-
1900-
kvm_for_each_vcpu(i, vcpu, kvm) {
1901-
if (mutex_lock_killable_nested(&vcpu->mutex, role))
1902-
goto out_unlock;
1903-
1904-
#ifdef CONFIG_PROVE_LOCKING
1905-
if (!i)
1906-
/*
1907-
* Reset the role to one that avoids colliding with
1908-
* the role used for the first vcpu mutex.
1909-
*/
1910-
role = SEV_NR_MIGRATION_ROLES;
1911-
else
1912-
mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
1913-
#endif
1914-
}
1915-
1916-
return 0;
1917-
1918-
out_unlock:
1919-
1920-
kvm_for_each_vcpu(j, vcpu, kvm) {
1921-
if (i == j)
1922-
break;
1923-
1924-
#ifdef CONFIG_PROVE_LOCKING
1925-
if (j)
1926-
mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
1927-
#endif
1928-
1929-
mutex_unlock(&vcpu->mutex);
1930-
}
1931-
return -EINTR;
1932-
}
1933-
1934-
static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
1935-
{
1936-
struct kvm_vcpu *vcpu;
1937-
unsigned long i;
1938-
bool first = true;
1939-
1940-
kvm_for_each_vcpu(i, vcpu, kvm) {
1941-
if (first)
1942-
first = false;
1943-
else
1944-
mutex_acquire(&vcpu->mutex.dep_map,
1945-
SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
1946-
1947-
mutex_unlock(&vcpu->mutex);
1948-
}
1949-
}
1950-
19511887
static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
19521888
{
19531889
struct kvm_sev_info *dst = to_kvm_sev_info(dst_kvm);
@@ -2085,10 +2021,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
20852021
charged = true;
20862022
}
20872023

2088-
ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
2024+
ret = kvm_lock_all_vcpus(kvm);
20892025
if (ret)
20902026
goto out_dst_cgroup;
2091-
ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
2027+
ret = kvm_lock_all_vcpus(source_kvm);
20922028
if (ret)
20932029
goto out_dst_vcpu;
20942030

@@ -2102,9 +2038,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
21022038
ret = 0;
21032039

21042040
out_source_vcpu:
2105-
sev_unlock_vcpus_for_migration(source_kvm);
2041+
kvm_unlock_all_vcpus(source_kvm);
21062042
out_dst_vcpu:
2107-
sev_unlock_vcpus_for_migration(kvm);
2043+
kvm_unlock_all_vcpus(kvm);
21082044
out_dst_cgroup:
21092045
/* Operates on the source on success, on the destination on failure. */
21102046
if (charged)

include/linux/kvm_host.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1015,6 +1015,10 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
10151015

10161016
void kvm_destroy_vcpus(struct kvm *kvm);
10171017

1018+
int kvm_trylock_all_vcpus(struct kvm *kvm);
1019+
int kvm_lock_all_vcpus(struct kvm *kvm);
1020+
void kvm_unlock_all_vcpus(struct kvm *kvm);
1021+
10181022
void vcpu_load(struct kvm_vcpu *vcpu);
10191023
void vcpu_put(struct kvm_vcpu *vcpu);
10201024

0 commit comments

Comments
 (0)