Skip to content

Commit e5ba90a

Browse files
bibo-maochenhuacai
authored andcommitted
LoongArch: Revert qspinlock to test-and-set simple lock on VM
Similar with x86, when VM is detected, revert to a simple test-and-set lock to avoid the horrors of queue preemption. Tested on 3C5000 Dual-way machine with 32 cores and 2 numa nodes, test case is kcbench on kernel mainline 6.10, the detailed command is "kcbench --src /root/src/linux" Performance on host machine kernel compile time performance impact Original 150.29 seconds With patch 150.19 seconds almost no impact Performance on virtual machine: 1. 1 VM with 32 vCPUs and 2 numa node, numa node pinned kernel compile time performance impact Original 170.87 seconds With patch 171.73 seconds almost no impact 2. 2 VMs, each VM with 32 vCPUs and 2 numa node, numa node pinned kernel compile time performance impact Original 2362.04 seconds With patch 354.73 seconds +565% Signed-off-by: Bibo Mao <[email protected]> Signed-off-by: Huacai Chen <[email protected]>
1 parent da3ea35 commit e5ba90a

File tree

6 files changed

+64
-2
lines changed

6 files changed

+64
-2
lines changed

arch/loongarch/include/asm/Kbuild

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@ generic-y += mcs_spinlock.h
66
generic-y += parport.h
77
generic-y += early_ioremap.h
88
generic-y += qrwlock.h
9-
generic-y += qspinlock.h
109
generic-y += user.h
1110
generic-y += ioctl.h
1211
generic-y += statfs.h

arch/loongarch/include/asm/paravirt.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ static inline u64 paravirt_steal_clock(int cpu)
1919

2020
int __init pv_ipi_init(void);
2121
int __init pv_time_init(void);
22+
int __init pv_spinlock_init(void);
2223

2324
#else
2425

@@ -31,5 +32,11 @@ static inline int pv_time_init(void)
3132
{
3233
return 0;
3334
}
35+
36+
static inline int pv_spinlock_init(void)
37+
{
38+
return 0;
39+
}
40+
3441
#endif // CONFIG_PARAVIRT
3542
#endif
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef _ASM_LOONGARCH_QSPINLOCK_H
3+
#define _ASM_LOONGARCH_QSPINLOCK_H
4+
5+
#include <linux/jump_label.h>
6+
7+
#ifdef CONFIG_PARAVIRT
8+
9+
DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
10+
11+
#define virt_spin_lock virt_spin_lock
12+
13+
static inline bool virt_spin_lock(struct qspinlock *lock)
14+
{
15+
int val;
16+
17+
if (!static_branch_unlikely(&virt_spin_lock_key))
18+
return false;
19+
20+
/*
21+
* On hypervisors without PARAVIRT_SPINLOCKS support we fall
22+
* back to a Test-and-Set spinlock, because fair locks have
23+
* horrible lock 'holder' preemption issues.
24+
*/
25+
26+
__retry:
27+
val = atomic_read(&lock->val);
28+
29+
if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
30+
cpu_relax();
31+
goto __retry;
32+
}
33+
34+
return true;
35+
}
36+
37+
#endif /* CONFIG_PARAVIRT */
38+
39+
#include <asm-generic/qspinlock.h>
40+
41+
#endif // _ASM_LOONGARCH_QSPINLOCK_H

arch/loongarch/kernel/paravirt.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ static int has_steal_clock;
1313
struct static_key paravirt_steal_enabled;
1414
struct static_key paravirt_steal_rq_enabled;
1515
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
16+
DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
1617

1718
static u64 native_steal_clock(int cpu)
1819
{
@@ -300,3 +301,13 @@ int __init pv_time_init(void)
300301

301302
return 0;
302303
}
304+
305+
int __init pv_spinlock_init(void)
306+
{
307+
if (!cpu_has_hypervisor)
308+
return 0;
309+
310+
static_branch_enable(&virt_spin_lock_key);
311+
312+
return 0;
313+
}

arch/loongarch/kernel/setup.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -603,6 +603,8 @@ void __init setup_arch(char **cmdline_p)
603603
arch_mem_init(cmdline_p);
604604

605605
resource_init();
606+
jump_label_init(); /* Initialise the static keys for paravirtualization */
607+
606608
#ifdef CONFIG_SMP
607609
plat_smp_setup();
608610
prefill_possible_map();

arch/loongarch/kernel/smp.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -476,7 +476,7 @@ core_initcall(ipi_pm_init);
476476
#endif
477477

478478
/* Preload SMP state for boot cpu */
479-
void smp_prepare_boot_cpu(void)
479+
void __init smp_prepare_boot_cpu(void)
480480
{
481481
unsigned int cpu, node, rr_node;
482482

@@ -509,6 +509,8 @@ void smp_prepare_boot_cpu(void)
509509
rr_node = next_node_in(rr_node, node_online_map);
510510
}
511511
}
512+
513+
pv_spinlock_init();
512514
}
513515

514516
/* called from main before smp_init() */

0 commit comments

Comments
 (0)