Skip to content

Commit f1a033c

Browse files
jgross1Peter Zijlstra
authored andcommitted
x86/paravirt: Use common macro for creating simple asm paravirt functions
There are some paravirt assembler functions which are sharing a common pattern. Introduce a macro DEFINE_PARAVIRT_ASM() for creating them. Note that this macro is including explicit alignment of the generated functions, leading to __raw_callee_save___kvm_vcpu_is_preempted(), _paravirt_nop() and paravirt_ret0() to be aligned at 4 byte boundaries now. The explicit _paravirt_nop() prototype in paravirt.c isn't needed, as it is included in paravirt_types.h already. Signed-off-by: Juergen Gross <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Srivatsa S. Bhat (VMware) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 5736b1b commit f1a033c

File tree

4 files changed

+40
-61
lines changed

4 files changed

+40
-61
lines changed

arch/x86/include/asm/paravirt.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -731,6 +731,18 @@ static __always_inline unsigned long arch_local_irq_save(void)
731731
#undef PVOP_VCALL4
732732
#undef PVOP_CALL4
733733

734+
#define DEFINE_PARAVIRT_ASM(func, instr, sec) \
735+
asm (".pushsection " #sec ", \"ax\"\n" \
736+
".global " #func "\n\t" \
737+
".type " #func ", @function\n\t" \
738+
ASM_FUNC_ALIGN "\n" \
739+
#func ":\n\t" \
740+
ASM_ENDBR \
741+
instr "\n\t" \
742+
ASM_RET \
743+
".size " #func ", . - " #func "\n\t" \
744+
".popsection")
745+
734746
extern void default_banner(void);
735747

736748
#else /* __ASSEMBLY__ */

arch/x86/include/asm/qspinlock_paravirt.h

Lines changed: 20 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,6 @@
1414

1515
__PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
1616
#define __pv_queued_spin_unlock __pv_queued_spin_unlock
17-
#define PV_UNLOCK "__raw_callee_save___pv_queued_spin_unlock"
18-
#define PV_UNLOCK_SLOWPATH "__raw_callee_save___pv_queued_spin_unlock_slowpath"
1917

2018
/*
2119
* Optimized assembly version of __raw_callee_save___pv_queued_spin_unlock
@@ -37,32 +35,27 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text");
3735
* rsi = lockval (second argument)
3836
* rdx = internal variable (set to 0)
3937
*/
40-
asm (".pushsection .spinlock.text, \"ax\";"
41-
".globl " PV_UNLOCK ";"
42-
".type " PV_UNLOCK ", @function;"
43-
ASM_FUNC_ALIGN
44-
PV_UNLOCK ": "
45-
ASM_ENDBR
46-
FRAME_BEGIN
47-
"push %rdx;"
48-
"mov $0x1,%eax;"
49-
"xor %edx,%edx;"
50-
LOCK_PREFIX "cmpxchg %dl,(%rdi);"
51-
"cmp $0x1,%al;"
52-
"jne .slowpath;"
53-
"pop %rdx;"
38+
#define PV_UNLOCK_ASM \
39+
FRAME_BEGIN \
40+
"push %rdx\n\t" \
41+
"mov $0x1,%eax\n\t" \
42+
"xor %edx,%edx\n\t" \
43+
LOCK_PREFIX "cmpxchg %dl,(%rdi)\n\t" \
44+
"cmp $0x1,%al\n\t" \
45+
"jne .slowpath\n\t" \
46+
"pop %rdx\n\t" \
47+
FRAME_END \
48+
ASM_RET \
49+
".slowpath:\n\t" \
50+
"push %rsi\n\t" \
51+
"movzbl %al,%esi\n\t" \
52+
"call __raw_callee_save___pv_queued_spin_unlock_slowpath\n\t" \
53+
"pop %rsi\n\t" \
54+
"pop %rdx\n\t" \
5455
FRAME_END
55-
ASM_RET
56-
".slowpath: "
57-
"push %rsi;"
58-
"movzbl %al,%esi;"
59-
"call " PV_UNLOCK_SLOWPATH ";"
60-
"pop %rsi;"
61-
"pop %rdx;"
62-
FRAME_END
63-
ASM_RET
64-
".size " PV_UNLOCK ", .-" PV_UNLOCK ";"
65-
".popsection");
56+
57+
DEFINE_PARAVIRT_ASM(__raw_callee_save___pv_queued_spin_unlock,
58+
PV_UNLOCK_ASM, .spinlock.text);
6659

6760
#else /* CONFIG_64BIT */
6861

arch/x86/kernel/kvm.c

Lines changed: 6 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -798,20 +798,13 @@ extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
798798
* Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
799799
* restoring to/from the stack.
800800
*/
801-
asm(
802-
".pushsection .text;"
803-
".global __raw_callee_save___kvm_vcpu_is_preempted;"
804-
".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
805-
ASM_FUNC_ALIGN
806-
"__raw_callee_save___kvm_vcpu_is_preempted:"
807-
ASM_ENDBR
808-
"movq __per_cpu_offset(,%rdi,8), %rax;"
809-
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
810-
"setne %al;"
811-
ASM_RET
812-
".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
813-
".popsection");
801+
#define PV_VCPU_PREEMPTED_ASM \
802+
"movq __per_cpu_offset(,%rdi,8), %rax\n\t" \
803+
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" \
804+
"setne %al\n\t"
814805

806+
DEFINE_PARAVIRT_ASM(__raw_callee_save___kvm_vcpu_is_preempted,
807+
PV_VCPU_PREEMPTED_ASM, .text);
815808
#endif
816809

817810
static void __init kvm_guest_init(void)

arch/x86/kernel/paravirt.c

Lines changed: 2 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -37,29 +37,10 @@
3737
* nop stub, which must not clobber anything *including the stack* to
3838
* avoid confusing the entry prologues.
3939
*/
40-
extern void _paravirt_nop(void);
41-
asm (".pushsection .entry.text, \"ax\"\n"
42-
".global _paravirt_nop\n"
43-
ASM_FUNC_ALIGN
44-
"_paravirt_nop:\n\t"
45-
ASM_ENDBR
46-
ASM_RET
47-
".size _paravirt_nop, . - _paravirt_nop\n\t"
48-
".type _paravirt_nop, @function\n\t"
49-
".popsection");
40+
DEFINE_PARAVIRT_ASM(_paravirt_nop, "", .entry.text);
5041

5142
/* stub always returning 0. */
52-
asm (".pushsection .entry.text, \"ax\"\n"
53-
".global paravirt_ret0\n"
54-
ASM_FUNC_ALIGN
55-
"paravirt_ret0:\n\t"
56-
ASM_ENDBR
57-
"xor %" _ASM_AX ", %" _ASM_AX ";\n\t"
58-
ASM_RET
59-
".size paravirt_ret0, . - paravirt_ret0\n\t"
60-
".type paravirt_ret0, @function\n\t"
61-
".popsection");
62-
43+
DEFINE_PARAVIRT_ASM(paravirt_ret0, "xor %eax,%eax", .entry.text);
6344

6445
void __init default_banner(void)
6546
{

0 commit comments

Comments
 (0)