|
24 | 24 | #ifndef LINUXKM_WC_PORT_H |
25 | 25 | #define LINUXKM_WC_PORT_H |
26 | 26 |
|
| 27 | + /* |
| 28 | + * CRITICAL: Disable ARM64 LSE atomics for out-of-tree modules. |
| 29 | + * |
| 30 | + * When CONFIG_ARM64_LSE_ATOMICS is enabled, the kernel uses static keys |
| 31 | + * (jump labels) in system_uses_lse_atomics() to choose between LSE and |
| 32 | + * LL/SC atomic implementations at runtime. These static keys generate |
| 33 | + * asm goto statements that reference .jump_table section symbols which |
| 34 | + * cannot be resolved in out-of-tree modules, causing: |
| 35 | + * "error: impossible constraint in 'asm'" |
| 36 | + * |
| 37 | + * By undefining CONFIG_ARM64_LSE_ATOMICS here (before any kernel headers |
| 38 | + * that use atomics are included), we force use of the LL/SC fallback path |
| 39 | + * which works correctly in out-of-tree modules. |
| 40 | + * |
| 41 | + * This must appear BEFORE #include <linux/version.h> because that header |
| 42 | + * may transitively include headers that use atomics. |
| 43 | + */ |
| 44 | + #ifdef CONFIG_ARM64_LSE_ATOMICS |
| 45 | + #undef CONFIG_ARM64_LSE_ATOMICS |
| 46 | + #endif |
| 47 | + |
27 | 48 | #include <linux/version.h> |
28 | 49 |
|
29 | 50 | #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) |
|
941 | 962 |
|
942 | 963 | #endif /* WOLFSSL_USE_SAVE_VECTOR_REGISTERS */ |
943 | 964 |
|
944 | | - typeof(__mutex_init) *__mutex_init; |
| 965 | + #ifndef CONFIG_PREEMPT_RT |
| 966 | + typeof(__mutex_init) *__mutex_init; |
| 967 | + #else |
| 968 | + typeof(__rt_mutex_init) *__rt_mutex_init; |
| 969 | + typeof(rt_mutex_base_init) *rt_mutex_base_init; |
| 970 | + typeof(rt_spin_lock) *rt_spin_lock; |
| 971 | + typeof(rt_spin_unlock) *rt_spin_unlock; |
| 972 | + #endif |
| 973 | + |
945 | 974 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) |
946 | 975 | typeof(mutex_lock_nested) *mutex_lock_nested; |
947 | 976 | #else |
|
1048 | 1077 | #endif |
1049 | 1078 |
|
1050 | 1079 | #ifdef CONFIG_ARM64 |
| 1080 | + #ifndef CONFIG_ARCH_TEGRA |
1051 | 1081 | #ifdef WC_CONTAINERIZE_THIS |
1052 | 1082 | /* alt_cb_patch_nops and queued_spin_lock_slowpath are defined early |
1053 | 1083 | * to allow shimming in system headers, but now we need the native |
|
1062 | 1092 | typeof(queued_spin_lock_slowpath) *queued_spin_lock_slowpath; |
1063 | 1093 | #endif |
1064 | 1094 | #endif |
| 1095 | + #endif |
1065 | 1096 |
|
1066 | 1097 | typeof(preempt_count) *preempt_count; |
1067 | 1098 | #ifndef _raw_spin_lock_irqsave |
|
1260 | 1291 | #error WOLFSSL_USE_SAVE_VECTOR_REGISTERS is set for an unimplemented architecture. |
1261 | 1292 | #endif /* WOLFSSL_USE_SAVE_VECTOR_REGISTERS */ |
1262 | 1293 |
|
1263 | | - #define __mutex_init WC_PIE_INDIRECT_SYM(__mutex_init) |
| 1294 | + #ifndef CONFIG_PREEMPT_RT |
| 1295 | + #define __mutex_init WC_PIE_INDIRECT_SYM(__mutex_init) |
| 1296 | + #else |
| 1297 | + /* On RT kernels, __mutex_init is a macro pointing to __rt_mutex_init */ |
| 1298 | + #undef __mutex_init |
| 1299 | + #define __rt_mutex_init WC_PIE_INDIRECT_SYM(__rt_mutex_init) |
| 1300 | + #define __mutex_init(mutex, name, key) __rt_mutex_init(mutex, name, key) |
| 1301 | + #define rt_mutex_base_init WC_PIE_INDIRECT_SYM(rt_mutex_base_init) |
| 1302 | + #define rt_spin_lock WC_PIE_INDIRECT_SYM(rt_spin_lock) |
| 1303 | + #define rt_spin_unlock WC_PIE_INDIRECT_SYM(rt_spin_unlock) |
| 1304 | + #endif |
1264 | 1305 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) |
1265 | 1306 | #define mutex_lock_nested WC_PIE_INDIRECT_SYM(mutex_lock_nested) |
1266 | 1307 | #else |
|
1327 | 1368 |
|
1328 | 1369 | /* this is defined in linux/spinlock.h as an inline that calls the unshimmed |
1329 | 1370 | * raw_spin_unlock_irqrestore(). use a macro here to supersede it. |
| 1371 | + * Note: On PREEMPT_RT kernels, spinlock_t doesn't have rlock member, |
| 1372 | + * so we skip this redefinition and use the kernel's native implementation. |
1330 | 1373 | */ |
1331 | | - #define spin_unlock_irqrestore(lock, flags) raw_spin_unlock_irqrestore(&((lock)->rlock), flags) |
| 1374 | + #ifndef CONFIG_PREEMPT_RT |
| 1375 | + #define spin_unlock_irqrestore(lock, flags) raw_spin_unlock_irqrestore(&((lock)->rlock), flags) |
| 1376 | + #else |
| 1377 | + /* Undo internal wolfSSL PIE macro rewriting */ |
| 1378 | + #ifdef rt_spin_unlock |
| 1379 | + #undef rt_spin_unlock |
| 1380 | + #endif |
| 1381 | + #ifdef rt_spin_lock |
| 1382 | + #undef rt_spin_lock |
| 1383 | + #endif |
| 1384 | + static inline int wolfssl_spin_unlock_irqrestore_rt(spinlock_t *lock, |
| 1385 | + unsigned long flags) |
| 1386 | + { |
| 1387 | + (void)flags; /* rt_spin_unlock ignores flags */ |
| 1388 | + WC_PIE_INDIRECT_SYM(rt_spin_unlock)(lock); |
| 1389 | + return 0; |
| 1390 | + } |
| 1391 | + |
| 1392 | + #undef spin_unlock_irqrestore |
| 1393 | + #define spin_unlock_irqrestore(lock, flags) \ |
| 1394 | + wolfssl_spin_unlock_irqrestore_rt((lock), (flags)) |
| 1395 | + #endif |
1332 | 1396 |
|
1333 | 1397 | #define wc_linuxkm_sig_ignore_begin WC_PIE_INDIRECT_SYM(wc_linuxkm_sig_ignore_begin); |
1334 | 1398 | #define wc_linuxkm_sig_ignore_end WC_PIE_INDIRECT_SYM(wc_linuxkm_sig_ignore_end); |
|
0 commit comments