Skip to content

Commit 28f6bf9

Browse files
Frederic WeisbeckerKAGA-KOKO
authored andcommitted
arm64: Prepare arch_nmi_enter() for recursion
When using nmi_enter() recursively, arch_nmi_enter() must also be recursion safe. In particular, it must be ensured that HCR_TGE is always set while in NMI context when in HYP mode, and be restored to it's former state when done. The current code fails this when interleaved wrong. Notably it overwrites the original hcr state on nesting. Introduce a nesting counter to make sure to store the original value. Signed-off-by: Frederic Weisbecker <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Alexandre Chartre <[email protected]> Cc: Will Deacon <[email protected]> Cc: Catalin Marinas <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent b0f5188 commit 28f6bf9

File tree

1 file changed

+59
-19
lines changed

1 file changed

+59
-19
lines changed

arch/arm64/include/asm/hardirq.h

Lines changed: 59 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -32,30 +32,70 @@ u64 smp_irq_stat_cpu(unsigned int cpu);
3232

3333
struct nmi_ctx {
3434
u64 hcr;
35+
unsigned int cnt;
3536
};
3637

3738
DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
3839

39-
#define arch_nmi_enter() \
40-
do { \
41-
if (is_kernel_in_hyp_mode()) { \
42-
struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
43-
nmi_ctx->hcr = read_sysreg(hcr_el2); \
44-
if (!(nmi_ctx->hcr & HCR_TGE)) { \
45-
write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \
46-
isb(); \
47-
} \
48-
} \
49-
} while (0)
40+
#define arch_nmi_enter() \
41+
do { \
42+
struct nmi_ctx *___ctx; \
43+
u64 ___hcr; \
44+
\
45+
if (!is_kernel_in_hyp_mode()) \
46+
break; \
47+
\
48+
___ctx = this_cpu_ptr(&nmi_contexts); \
49+
if (___ctx->cnt) { \
50+
___ctx->cnt++; \
51+
break; \
52+
} \
53+
\
54+
___hcr = read_sysreg(hcr_el2); \
55+
if (!(___hcr & HCR_TGE)) { \
56+
write_sysreg(___hcr | HCR_TGE, hcr_el2); \
57+
isb(); \
58+
} \
59+
/* \
60+
* Make sure the sysreg write is performed before ___ctx->cnt \
61+
* is set to 1. NMIs that see cnt == 1 will rely on us. \
62+
*/ \
63+
barrier(); \
64+
___ctx->cnt = 1; \
65+
/* \
66+
* Make sure ___ctx->cnt is set before we save ___hcr. We \
67+
* don't want ___ctx->hcr to be overwritten. \
68+
*/ \
69+
barrier(); \
70+
___ctx->hcr = ___hcr; \
71+
} while (0)
5072

51-
#define arch_nmi_exit() \
52-
do { \
53-
if (is_kernel_in_hyp_mode()) { \
54-
struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \
55-
if (!(nmi_ctx->hcr & HCR_TGE)) \
56-
write_sysreg(nmi_ctx->hcr, hcr_el2); \
57-
} \
58-
} while (0)
73+
#define arch_nmi_exit() \
74+
do { \
75+
struct nmi_ctx *___ctx; \
76+
u64 ___hcr; \
77+
\
78+
if (!is_kernel_in_hyp_mode()) \
79+
break; \
80+
\
81+
___ctx = this_cpu_ptr(&nmi_contexts); \
82+
___hcr = ___ctx->hcr; \
83+
/* \
84+
* Make sure we read ___ctx->hcr before we release \
85+
* ___ctx->cnt as it makes ___ctx->hcr updatable again. \
86+
*/ \
87+
barrier(); \
88+
___ctx->cnt--; \
89+
/* \
90+
* Make sure ___ctx->cnt release is visible before we \
91+
* restore the sysreg. Otherwise a new NMI occurring \
92+
* right after write_sysreg() can be fooled and think \
93+
* we secured things for it. \
94+
*/ \
95+
barrier(); \
96+
if (!___ctx->cnt && !(___hcr & HCR_TGE)) \
97+
write_sysreg(___hcr, hcr_el2); \
98+
} while (0)
5999

60100
static inline void ack_bad_irq(unsigned int irq)
61101
{

0 commit comments

Comments
 (0)