|
21 | 21 | * exceptions should be unmasked.
|
22 | 22 | */
|
23 | 23 |
|
24 |
| -/* |
25 |
| - * CPU interrupt mask handling. |
26 |
| - */ |
27 |
| -static inline void arch_local_irq_enable(void) |
| 24 | +static __always_inline bool __irqflags_uses_pmr(void) |
28 | 25 | {
|
29 |
| - if (system_has_prio_mask_debugging()) { |
30 |
| - u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); |
| 26 | + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && |
| 27 | + alternative_has_feature_unlikely(ARM64_HAS_GIC_PRIO_MASKING); |
| 28 | +} |
31 | 29 |
|
| 30 | +static __always_inline void __daif_local_irq_enable(void) |
| 31 | +{ |
| 32 | + barrier(); |
| 33 | + asm volatile("msr daifclr, #3"); |
| 34 | + barrier(); |
| 35 | +} |
| 36 | + |
| 37 | +static __always_inline void __pmr_local_irq_enable(void) |
| 38 | +{ |
| 39 | + if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) { |
| 40 | + u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); |
32 | 41 | WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
|
33 | 42 | }
|
34 | 43 |
|
35 |
| - asm volatile(ALTERNATIVE( |
36 |
| - "msr daifclr, #3 // arch_local_irq_enable", |
37 |
| - __msr_s(SYS_ICC_PMR_EL1, "%0"), |
38 |
| - ARM64_HAS_GIC_PRIO_MASKING) |
39 |
| - : |
40 |
| - : "r" ((unsigned long) GIC_PRIO_IRQON) |
41 |
| - : "memory"); |
42 |
| - |
| 44 | + barrier(); |
| 45 | + write_sysreg_s(GIC_PRIO_IRQON, SYS_ICC_PMR_EL1); |
43 | 46 | pmr_sync();
|
| 47 | + barrier(); |
44 | 48 | }
|
45 | 49 |
|
46 |
| -static inline void arch_local_irq_disable(void) |
| 50 | +static inline void arch_local_irq_enable(void) |
47 | 51 | {
|
48 |
| - if (system_has_prio_mask_debugging()) { |
49 |
| - u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); |
| 52 | + if (__irqflags_uses_pmr()) { |
| 53 | + __pmr_local_irq_enable(); |
| 54 | + } else { |
| 55 | + __daif_local_irq_enable(); |
| 56 | + } |
| 57 | +} |
50 | 58 |
|
| 59 | +static __always_inline void __daif_local_irq_disable(void) |
| 60 | +{ |
| 61 | + barrier(); |
| 62 | + asm volatile("msr daifset, #3"); |
| 63 | + barrier(); |
| 64 | +} |
| 65 | + |
| 66 | +static __always_inline void __pmr_local_irq_disable(void) |
| 67 | +{ |
| 68 | + if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) { |
| 69 | + u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); |
51 | 70 | WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
|
52 | 71 | }
|
53 | 72 |
|
54 |
| - asm volatile(ALTERNATIVE( |
55 |
| - "msr daifset, #3 // arch_local_irq_disable", |
56 |
| - __msr_s(SYS_ICC_PMR_EL1, "%0"), |
57 |
| - ARM64_HAS_GIC_PRIO_MASKING) |
58 |
| - : |
59 |
| - : "r" ((unsigned long) GIC_PRIO_IRQOFF) |
60 |
| - : "memory"); |
| 73 | + barrier(); |
| 74 | + write_sysreg_s(GIC_PRIO_IRQOFF, SYS_ICC_PMR_EL1); |
| 75 | + barrier(); |
| 76 | +} |
| 77 | + |
| 78 | +static inline void arch_local_irq_disable(void) |
| 79 | +{ |
| 80 | + if (__irqflags_uses_pmr()) { |
| 81 | + __pmr_local_irq_disable(); |
| 82 | + } else { |
| 83 | + __daif_local_irq_disable(); |
| 84 | + } |
| 85 | +} |
| 86 | + |
| 87 | +static __always_inline unsigned long __daif_local_save_flags(void) |
| 88 | +{ |
| 89 | + return read_sysreg(daif); |
| 90 | +} |
| 91 | + |
| 92 | +static __always_inline unsigned long __pmr_local_save_flags(void) |
| 93 | +{ |
| 94 | + return read_sysreg_s(SYS_ICC_PMR_EL1); |
61 | 95 | }
|
62 | 96 |
|
63 | 97 | /*
|
64 | 98 | * Save the current interrupt enable state.
|
65 | 99 | */
|
66 | 100 | static inline unsigned long arch_local_save_flags(void)
|
67 | 101 | {
|
68 |
| - unsigned long flags; |
| 102 | + if (__irqflags_uses_pmr()) { |
| 103 | + return __pmr_local_save_flags(); |
| 104 | + } else { |
| 105 | + return __daif_local_save_flags(); |
| 106 | + } |
| 107 | +} |
69 | 108 |
|
70 |
| - asm volatile(ALTERNATIVE( |
71 |
| - "mrs %0, daif", |
72 |
| - __mrs_s("%0", SYS_ICC_PMR_EL1), |
73 |
| - ARM64_HAS_GIC_PRIO_MASKING) |
74 |
| - : "=&r" (flags) |
75 |
| - : |
76 |
| - : "memory"); |
| 109 | +static __always_inline bool __daif_irqs_disabled_flags(unsigned long flags) |
| 110 | +{ |
| 111 | + return flags & PSR_I_BIT; |
| 112 | +} |
77 | 113 |
|
78 |
| - return flags; |
| 114 | +static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags) |
| 115 | +{ |
| 116 | + return flags != GIC_PRIO_IRQON; |
79 | 117 | }
|
80 | 118 |
|
81 |
| -static inline int arch_irqs_disabled_flags(unsigned long flags) |
| 119 | +static inline bool arch_irqs_disabled_flags(unsigned long flags) |
82 | 120 | {
|
83 |
| - int res; |
| 121 | + if (__irqflags_uses_pmr()) { |
| 122 | + return __pmr_irqs_disabled_flags(flags); |
| 123 | + } else { |
| 124 | + return __daif_irqs_disabled_flags(flags); |
| 125 | + } |
| 126 | +} |
84 | 127 |
|
85 |
| - asm volatile(ALTERNATIVE( |
86 |
| - "and %w0, %w1, #" __stringify(PSR_I_BIT), |
87 |
| - "eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON), |
88 |
| - ARM64_HAS_GIC_PRIO_MASKING) |
89 |
| - : "=&r" (res) |
90 |
| - : "r" ((int) flags) |
91 |
| - : "memory"); |
| 128 | +static __always_inline bool __daif_irqs_disabled(void) |
| 129 | +{ |
| 130 | + return __daif_irqs_disabled_flags(__daif_local_save_flags()); |
| 131 | +} |
92 | 132 |
|
93 |
| - return res; |
| 133 | +static __always_inline bool __pmr_irqs_disabled(void) |
| 134 | +{ |
| 135 | + return __pmr_irqs_disabled_flags(__pmr_local_save_flags()); |
94 | 136 | }
|
95 | 137 |
|
96 |
| -static inline int arch_irqs_disabled(void) |
| 138 | +static inline bool arch_irqs_disabled(void) |
97 | 139 | {
|
98 |
| - return arch_irqs_disabled_flags(arch_local_save_flags()); |
| 140 | + if (__irqflags_uses_pmr()) { |
| 141 | + return __pmr_irqs_disabled(); |
| 142 | + } else { |
| 143 | + return __daif_irqs_disabled(); |
| 144 | + } |
99 | 145 | }
|
100 | 146 |
|
101 |
| -static inline unsigned long arch_local_irq_save(void) |
| 147 | +static __always_inline unsigned long __daif_local_irq_save(void) |
102 | 148 | {
|
103 |
| - unsigned long flags; |
| 149 | + unsigned long flags = __daif_local_save_flags(); |
| 150 | + |
| 151 | + __daif_local_irq_disable(); |
| 152 | + |
| 153 | + return flags; |
| 154 | +} |
104 | 155 |
|
105 |
| - flags = arch_local_save_flags(); |
| 156 | +static __always_inline unsigned long __pmr_local_irq_save(void) |
| 157 | +{ |
| 158 | + unsigned long flags = __pmr_local_save_flags(); |
106 | 159 |
|
107 | 160 | /*
|
108 | 161 | * There are too many states with IRQs disabled, just keep the current
|
109 | 162 | * state if interrupts are already disabled/masked.
|
110 | 163 | */
|
111 |
| - if (!arch_irqs_disabled_flags(flags)) |
112 |
| - arch_local_irq_disable(); |
| 164 | + if (!__pmr_irqs_disabled_flags(flags)) |
| 165 | + __pmr_local_irq_disable(); |
113 | 166 |
|
114 | 167 | return flags;
|
115 | 168 | }
|
116 | 169 |
|
| 170 | +static inline unsigned long arch_local_irq_save(void) |
| 171 | +{ |
| 172 | + if (__irqflags_uses_pmr()) { |
| 173 | + return __pmr_local_irq_save(); |
| 174 | + } else { |
| 175 | + return __daif_local_irq_save(); |
| 176 | + } |
| 177 | +} |
| 178 | + |
| 179 | +static __always_inline void __daif_local_irq_restore(unsigned long flags) |
| 180 | +{ |
| 181 | + barrier(); |
| 182 | + write_sysreg(flags, daif); |
| 183 | + barrier(); |
| 184 | +} |
| 185 | + |
| 186 | +static __always_inline void __pmr_local_irq_restore(unsigned long flags) |
| 187 | +{ |
| 188 | + barrier(); |
| 189 | + write_sysreg_s(flags, SYS_ICC_PMR_EL1); |
| 190 | + pmr_sync(); |
| 191 | + barrier(); |
| 192 | +} |
| 193 | + |
117 | 194 | /*
|
118 | 195 | * restore saved IRQ state
|
119 | 196 | */
|
120 | 197 | static inline void arch_local_irq_restore(unsigned long flags)
|
121 | 198 | {
|
122 |
| - asm volatile(ALTERNATIVE( |
123 |
| - "msr daif, %0", |
124 |
| - __msr_s(SYS_ICC_PMR_EL1, "%0"), |
125 |
| - ARM64_HAS_GIC_PRIO_MASKING) |
126 |
| - : |
127 |
| - : "r" (flags) |
128 |
| - : "memory"); |
129 |
| - |
130 |
| - pmr_sync(); |
| 199 | + if (__irqflags_uses_pmr()) { |
| 200 | + __pmr_local_irq_restore(flags); |
| 201 | + } else { |
| 202 | + __daif_local_irq_restore(flags); |
| 203 | + } |
131 | 204 | }
|
132 | 205 |
|
133 | 206 | #endif /* __ASM_IRQFLAGS_H */
|
0 commit comments