|
24 | 24 | #ifndef LINUXKM_WC_PORT_H |
25 | 25 | #define LINUXKM_WC_PORT_H |
26 | 26 |
|
| 27 | + /* |
| 28 | + * CRITICAL: Disable ARM64 LSE atomics for out-of-tree modules. |
| 29 | + * |
| 30 | + * When CONFIG_ARM64_LSE_ATOMICS is enabled, the kernel uses static keys |
| 31 | + * (jump labels) in system_uses_lse_atomics() to choose between LSE and |
| 32 | + * LL/SC atomic implementations at runtime. These static keys generate |
| 33 | + * asm goto statements that reference .jump_table section symbols which |
| 34 | + * cannot be resolved in out-of-tree modules, causing: |
| 35 | + * "error: impossible constraint in 'asm'" |
| 36 | + * |
| 37 | + * By undefining CONFIG_ARM64_LSE_ATOMICS here (before any kernel headers |
| 38 | + * that use atomics are included), we force use of the LL/SC fallback path |
| 39 | + * which works correctly in out-of-tree modules. |
| 40 | + * |
| 41 | + * This must appear BEFORE #include <linux/version.h> because that header |
| 42 | + * may transitively include headers that use atomics. |
| 43 | + */ |
| 44 | + #ifdef CONFIG_ARM64_LSE_ATOMICS |
| 45 | + #undef CONFIG_ARM64_LSE_ATOMICS |
| 46 | + #endif |
| 47 | + |
27 | 48 | #include <linux/version.h> |
28 | 49 |
|
29 | 50 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) |
|
916 | 937 |
|
917 | 938 | #endif /* WOLFSSL_USE_SAVE_VECTOR_REGISTERS */ |
918 | 939 |
|
919 | | - typeof(__mutex_init) *__mutex_init; |
| 940 | + #ifndef CONFIG_PREEMPT_RT |
| 941 | + typeof(__mutex_init) *__mutex_init; |
| 942 | + #else |
| 943 | + typeof(__rt_mutex_init) *__rt_mutex_init; |
| 944 | + typeof(rt_mutex_base_init) *rt_mutex_base_init; |
| 945 | + typeof(rt_spin_lock) *rt_spin_lock; |
| 946 | + typeof(rt_spin_unlock) *rt_spin_unlock; |
| 947 | + #endif |
| 948 | + |
920 | 949 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) |
921 | 950 | typeof(mutex_lock_nested) *mutex_lock_nested; |
922 | 951 | #else |
|
1023 | 1052 | #endif |
1024 | 1053 |
|
1025 | 1054 | #ifdef CONFIG_ARM64 |
| 1055 | + #ifndef CONFIG_ARCH_TEGRA |
1026 | 1056 | #ifdef WC_CONTAINERIZE_THIS |
1027 | 1057 | /* alt_cb_patch_nops and queued_spin_lock_slowpath are defined early |
1028 | 1058 | * to allow shimming in system headers, but now we need the native |
|
1037 | 1067 | typeof(queued_spin_lock_slowpath) *queued_spin_lock_slowpath; |
1038 | 1068 | #endif |
1039 | 1069 | #endif |
| 1070 | + #endif |
1040 | 1071 |
|
1041 | 1072 | typeof(preempt_count) *preempt_count; |
1042 | 1073 | #ifndef _raw_spin_lock_irqsave |
|
1235 | 1266 | #error WOLFSSL_USE_SAVE_VECTOR_REGISTERS is set for an unimplemented architecture. |
1236 | 1267 | #endif /* WOLFSSL_USE_SAVE_VECTOR_REGISTERS */ |
1237 | 1268 |
|
1238 | | - #define __mutex_init WC_PIE_INDIRECT_SYM(__mutex_init) |
| 1269 | + #ifndef CONFIG_PREEMPT_RT |
| 1270 | + #define __mutex_init WC_PIE_INDIRECT_SYM(__mutex_init) |
| 1271 | + #else |
| 1272 | + /* On RT kernels, __mutex_init is a macro pointing to __rt_mutex_init */ |
| 1273 | + #undef __mutex_init |
| 1274 | + #define __rt_mutex_init WC_PIE_INDIRECT_SYM(__rt_mutex_init) |
| 1275 | + #define __mutex_init(mutex, name, key) __rt_mutex_init(mutex, name, key) |
| 1276 | + #define rt_mutex_base_init WC_PIE_INDIRECT_SYM(rt_mutex_base_init) |
| 1277 | + #define rt_spin_lock WC_PIE_INDIRECT_SYM(rt_spin_lock) |
| 1278 | + #define rt_spin_unlock WC_PIE_INDIRECT_SYM(rt_spin_unlock) |
| 1279 | + #endif |
1239 | 1280 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) |
1240 | 1281 | #define mutex_lock_nested WC_PIE_INDIRECT_SYM(mutex_lock_nested) |
1241 | 1282 | #else |
|
1298 | 1339 |
|
1299 | 1340 | /* this is defined in linux/spinlock.h as an inline that calls the unshimmed |
1300 | 1341 | * raw_spin_unlock_irqrestore(). use a macro here to supersede it. |
| 1342 | + * Note: On PREEMPT_RT kernels, spinlock_t doesn't have rlock member, |
| 1343 | + * so we skip this redefinition and use the kernel's native implementation. |
1301 | 1344 | */ |
1302 | | - #define spin_unlock_irqrestore(lock, flags) raw_spin_unlock_irqrestore(&((lock)->rlock), flags) |
| 1345 | + #ifndef CONFIG_PREEMPT_RT |
| 1346 | + #define spin_unlock_irqrestore(lock, flags) raw_spin_unlock_irqrestore(&((lock)->rlock), flags) |
| 1347 | + #else |
| 1348 | + /* Undo internal wolfSSL PIE macro rewriting */ |
| 1349 | + #ifdef rt_spin_unlock |
| 1350 | + #undef rt_spin_unlock |
| 1351 | + #endif |
| 1352 | + #ifdef rt_spin_lock |
| 1353 | + #undef rt_spin_lock |
| 1354 | + #endif |
| 1355 | + static inline void wolfssl_spin_unlock_irqrestore_rt(spinlock_t *lock, |
| 1356 | + unsigned long flags) |
| 1357 | + { |
| 1358 | + (void)flags; /* rt_spin_unlock ignores flags */ |
| 1359 | + wolfssl_linuxkm_pie_redirect_table.rt_spin_unlock(lock); |
| 1360 | + } |
| 1361 | + |
| 1362 | + #undef spin_unlock_irqrestore |
| 1363 | + #define spin_unlock_irqrestore(lock, flags) \ |
| 1364 | + wolfssl_spin_unlock_irqrestore_rt((lock), (flags)) |
| 1365 | + #endif |
1303 | 1366 |
|
1304 | 1367 | #define wc_linuxkm_sig_ignore_begin WC_PIE_INDIRECT_SYM(wc_linuxkm_sig_ignore_begin); |
1305 | 1368 | #define wc_linuxkm_sig_ignore_end WC_PIE_INDIRECT_SYM(wc_linuxkm_sig_ignore_end); |
|
0 commit comments