|
77 | 77 | * Function calls can clobber anything except the callee-saved |
78 | 78 | * registers. Tell the compiler. |
79 | 79 | */ |
80 | | -#define call_on_irqstack(func, asm_call, argconstr...) \ |
| 80 | +#define call_on_stack(stack, func, asm_call, argconstr...) \ |
81 | 81 | { \ |
82 | 82 | register void *tos asm("r11"); \ |
83 | 83 | \ |
84 | | - tos = ((void *)__this_cpu_read(hardirq_stack_ptr)); \ |
| 84 | + tos = ((void *)(stack)); \ |
85 | 85 | \ |
86 | 86 | asm_inline volatile( \ |
87 | 87 | "movq %%rsp, (%[tos]) \n" \ |
|
98 | 98 | ); \ |
99 | 99 | } |
100 | 100 |
|
| 101 | +#define ASM_CALL_ARG0 \ |
| 102 | + "call %P[__func] \n" |
| 103 | + |
| 104 | +#define ASM_CALL_ARG1 \ |
| 105 | + "movq %[arg1], %%rdi \n" \ |
| 106 | + ASM_CALL_ARG0 |
| 107 | + |
| 108 | +#define ASM_CALL_ARG2 \ |
| 109 | + "movq %[arg2], %%rsi \n" \ |
| 110 | + ASM_CALL_ARG1 |
| 111 | + |
| 112 | +#define ASM_CALL_ARG3 \ |
| 113 | + "movq %[arg3], %%rdx \n" \ |
| 114 | + ASM_CALL_ARG2 |
| 115 | + |
| 116 | +#define call_on_irqstack(func, asm_call, argconstr...) \ |
| 117 | + call_on_stack(__this_cpu_read(hardirq_stack_ptr), \ |
| 118 | + func, asm_call, argconstr) |
| 119 | + |
101 | 120 | /* Macros to assert type correctness for run_*_on_irqstack macros */ |
102 | 121 | #define assert_function_type(func, proto) \ |
103 | 122 | static_assert(__builtin_types_compatible_p(typeof(&func), proto)) |
|
147 | 166 | */ |
148 | 167 | #define ASM_CALL_SYSVEC \ |
149 | 168 | "call irq_enter_rcu \n" \ |
150 | | - "movq %[arg1], %%rdi \n" \ |
151 | | - "call %P[__func] \n" \ |
| 169 | + ASM_CALL_ARG1 \ |
152 | 170 | "call irq_exit_rcu \n" |
153 | 171 |
|
154 | 172 | #define SYSVEC_CONSTRAINTS , [arg1] "r" (regs) |
|
168 | 186 | */ |
169 | 187 | #define ASM_CALL_IRQ \ |
170 | 188 | "call irq_enter_rcu \n" \ |
171 | | - "movq %[arg1], %%rdi \n" \ |
172 | | - "movl %[arg2], %%esi \n" \ |
173 | | - "call %P[__func] \n" \ |
| 189 | + ASM_CALL_ARG2 \ |
174 | 190 | "call irq_exit_rcu \n" |
175 | 191 |
|
176 | | -#define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" (vector) |
| 192 | +#define IRQ_CONSTRAINTS , [arg1] "r" (regs), [arg2] "r" ((unsigned long)vector) |
177 | 193 |
|
178 | 194 | #define run_irq_on_irqstack_cond(func, regs, vector) \ |
179 | 195 | { \ |
|
186 | 202 | } |
187 | 203 |
|
188 | 204 | #ifndef CONFIG_PREEMPT_RT |
189 | | -#define ASM_CALL_SOFTIRQ \ |
190 | | - "call %P[__func] \n" |
191 | | - |
192 | 205 | /* |
193 | 206 | * Macro to invoke __do_softirq on the irq stack. This is only called from |
194 | 207 | * task context when bottom halves are about to be reenabled and soft |
|
198 | 211 | #define do_softirq_own_stack() \ |
199 | 212 | { \ |
200 | 213 | __this_cpu_write(hardirq_stack_inuse, true); \ |
201 | | - call_on_irqstack(__do_softirq, ASM_CALL_SOFTIRQ); \ |
| 214 | + call_on_irqstack(__do_softirq, ASM_CALL_ARG0); \ |
202 | 215 | __this_cpu_write(hardirq_stack_inuse, false); \ |
203 | 216 | } |
204 | 217 |
|
|
0 commit comments