|
126 | 126 |
|
127 | 127 | #if defined(__PIE__) && defined(CONFIG_ARM64) |
128 | 128 | #define alt_cb_patch_nops my__alt_cb_patch_nops |
| 129 | + #define queued_spin_lock_slowpath my__queued_spin_lock_slowpath |
129 | 130 | #endif |
130 | 131 |
|
131 | 132 | #include <linux/kernel.h> |
|
705 | 706 |
|
706 | 707 | #ifdef CONFIG_ARM64 |
707 | 708 | #ifdef __PIE__ |
708 | | - /* alt_cb_patch_nops defined early to allow shimming in system |
709 | | - * headers, but now we need the native one. |
| 709 | + /* alt_cb_patch_nops and queued_spin_lock_slowpath are defined early |
| 710 | + * to allow shimming in system headers, but now we need the native |
| 711 | + * ones. |
710 | 712 | */ |
711 | 713 | #undef alt_cb_patch_nops |
712 | 714 | typeof(my__alt_cb_patch_nops) *alt_cb_patch_nops; |
| 715 | + #undef queued_spin_lock_slowpath |
| 716 | + typeof(my__queued_spin_lock_slowpath) *queued_spin_lock_slowpath; |
713 | 717 | #else |
714 | 718 | typeof(alt_cb_patch_nops) *alt_cb_patch_nops; |
| 719 | + typeof(queued_spin_lock_slowpath) *queued_spin_lock_slowpath; |
715 | 720 | #endif |
716 | 721 | #endif |
717 | 722 |
|
718 | 723 | typeof(preempt_count) *preempt_count; |
719 | | - typeof(_raw_spin_lock_irqsave) *_raw_spin_lock_irqsave; |
720 | | - typeof(_raw_spin_trylock) *_raw_spin_trylock; |
721 | | - typeof(_raw_spin_unlock_irqrestore) *_raw_spin_unlock_irqrestore; |
| 724 | + #ifndef _raw_spin_lock_irqsave |
| 725 | + typeof(_raw_spin_lock_irqsave) *_raw_spin_lock_irqsave; |
| 726 | + #endif |
| 727 | + #ifndef _raw_spin_trylock |
| 728 | + typeof(_raw_spin_trylock) *_raw_spin_trylock; |
| 729 | + #endif |
| 730 | + #ifndef _raw_spin_unlock_irqrestore |
| 731 | + typeof(_raw_spin_unlock_irqrestore) *_raw_spin_unlock_irqrestore; |
| 732 | + #endif |
722 | 733 | typeof(_cond_resched) *_cond_resched; |
723 | 734 |
|
724 | 735 | const void *_last_slot; |
|
885 | 896 |
|
886 | 897 | #undef preempt_count /* just in case -- not a macro on x86. */ |
887 | 898 | #define preempt_count (wolfssl_linuxkm_get_pie_redirect_table()->preempt_count) |
888 | | - #define _raw_spin_lock_irqsave (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_lock_irqsave) |
889 | | - #define _raw_spin_trylock (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_trylock) |
890 | | - #define _raw_spin_unlock_irqrestore (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_unlock_irqrestore) |
| 899 | + |
| 900 | + #ifndef WOLFSSL_LINUXKM_USE_MUTEXES |
| 901 | + #ifndef _raw_spin_lock_irqsave |
| 902 | + #define _raw_spin_lock_irqsave (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_lock_irqsave) |
| 903 | + #endif |
| 904 | + #ifndef _raw_spin_trylock |
| 905 | + #define _raw_spin_trylock (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_trylock) |
| 906 | + #endif |
| 907 | + #ifndef _raw_spin_unlock_irqrestore |
| 908 | + #define _raw_spin_unlock_irqrestore (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_unlock_irqrestore) |
| 909 | + #endif |
| 910 | + #endif |
| 911 | + |
891 | 912 | #define _cond_resched (wolfssl_linuxkm_get_pie_redirect_table()->_cond_resched) |
892 | 913 |
|
893 | 914 | /* this is defined in linux/spinlock.h as an inline that calls the unshimmed |
|
991 | 1012 |
|
992 | 1013 | static inline int wc_LockMutex(wolfSSL_Mutex* m) |
993 | 1014 | { |
994 | | - if (in_nmi() || in_hardirq() || in_softirq()) |
995 | | - return BAD_STATE_E; |
| 1015 | + if (in_nmi() || hardirq_count() || in_softirq()) |
| 1016 | + return -1; |
996 | 1017 | mutex_lock(m); |
997 | 1018 | return 0; |
998 | 1019 | } |
|
0 commit comments