|
24 | 24 | #ifndef LINUXKM_WC_PORT_H |
25 | 25 | #define LINUXKM_WC_PORT_H |
26 | 26 |
|
| 27 | + /* |
| 28 | + * CRITICAL: Disable ARM64 LSE atomics for out-of-tree modules. |
| 29 | + * |
| 30 | + * When CONFIG_ARM64_LSE_ATOMICS is enabled, the kernel uses static keys |
| 31 | + * (jump labels) in system_uses_lse_atomics() to choose between LSE and |
| 32 | + * LL/SC atomic implementations at runtime. These static keys generate |
| 33 | + * asm goto statements that reference .jump_table section symbols which |
| 34 | + * cannot be resolved in out-of-tree modules, causing: |
| 35 | + * "error: impossible constraint in 'asm'" |
| 36 | + * |
| 37 | + * By undefining CONFIG_ARM64_LSE_ATOMICS here (before any kernel headers |
| 38 | + * that use atomics are included), we force use of the LL/SC fallback path |
| 39 | + * which works correctly in out-of-tree modules. |
| 40 | + * |
| 41 | + * This must appear BEFORE #include <linux/version.h> because that header |
| 42 | + * may transitively include headers that use atomics. |
| 43 | + */ |
| 44 | + #ifdef CONFIG_ARM64_LSE_ATOMICS |
| 45 | + #undef CONFIG_ARM64_LSE_ATOMICS |
| 46 | + #endif |
| 47 | + |
27 | 48 | #include <linux/version.h> |
28 | 49 |
|
29 | 50 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) |
|
916 | 937 |
|
917 | 938 | #endif /* WOLFSSL_USE_SAVE_VECTOR_REGISTERS */ |
918 | 939 |
|
| 940 | + #ifndef CONFIG_PREEMPT_RT |
919 | 941 | typeof(__mutex_init) *__mutex_init; |
| 942 | + #else |
| 943 | + void (*__mutex_init)(struct mutex *lock, const char *name, struct lock_class_key *key); |
| 944 | + #endif |
| 945 | + |
920 | 946 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) |
921 | 947 | typeof(mutex_lock_nested) *mutex_lock_nested; |
922 | 948 | #else |
|
1023 | 1049 | #endif |
1024 | 1050 |
|
1025 | 1051 | #ifdef CONFIG_ARM64 |
| 1052 | + #ifndef CONFIG_ARCH_TEGRA |
1026 | 1053 | #ifdef WC_CONTAINERIZE_THIS |
1027 | 1054 | /* alt_cb_patch_nops and queued_spin_lock_slowpath are defined early |
1028 | 1055 | * to allow shimming in system headers, but now we need the native |
|
1037 | 1064 | typeof(queued_spin_lock_slowpath) *queued_spin_lock_slowpath; |
1038 | 1065 | #endif |
1039 | 1066 | #endif |
| 1067 | + #endif |
1040 | 1068 |
|
1041 | 1069 | typeof(preempt_count) *preempt_count; |
1042 | 1070 | #ifndef _raw_spin_lock_irqsave |
|
1235 | 1263 | #error WOLFSSL_USE_SAVE_VECTOR_REGISTERS is set for an unimplemented architecture. |
1236 | 1264 | #endif /* WOLFSSL_USE_SAVE_VECTOR_REGISTERS */ |
1237 | 1265 |
|
| 1266 | + #ifndef CONFIG_PREEMPT_RT |
1238 | 1267 | #define __mutex_init WC_PIE_INDIRECT_SYM(__mutex_init) |
| 1268 | + #endif |
1239 | 1269 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) |
1240 | 1270 | #define mutex_lock_nested WC_PIE_INDIRECT_SYM(mutex_lock_nested) |
1241 | 1271 | #else |
|
1298 | 1328 |
|
1299 | 1329 | /* this is defined in linux/spinlock.h as an inline that calls the unshimmed |
1300 | 1330 | * raw_spin_unlock_irqrestore(). use a macro here to supersede it. |
| 1331 | + * Note: On PREEMPT_RT kernels, spinlock_t doesn't have rlock member, |
| 1332 | + * so we skip this redefinition and use the kernel's native implementation. |
1301 | 1333 | */ |
| 1334 | + #ifndef CONFIG_PREEMPT_RT |
1302 | 1335 | #define spin_unlock_irqrestore(lock, flags) raw_spin_unlock_irqrestore(&((lock)->rlock), flags) |
| 1336 | + #endif |
1303 | 1337 |
|
1304 | 1338 | #define wc_linuxkm_sig_ignore_begin WC_PIE_INDIRECT_SYM(wc_linuxkm_sig_ignore_begin); |
1305 | 1339 | #define wc_linuxkm_sig_ignore_end WC_PIE_INDIRECT_SYM(wc_linuxkm_sig_ignore_end); |
|
0 commit comments