|
6 | 6 | #include <linux/percpu-defs.h>
|
7 | 7 | #include <linux/lockdep.h>
|
8 | 8 |
|
| 9 | +#ifndef CONFIG_PREEMPT_RT |
| 10 | + |
9 | 11 | typedef struct {
|
10 | 12 | #ifdef CONFIG_DEBUG_LOCK_ALLOC
|
11 | 13 | struct lockdep_map dep_map;
|
@@ -95,3 +97,45 @@ do { \
|
95 | 97 | local_lock_release(this_cpu_ptr(lock)); \
|
96 | 98 | local_irq_restore(flags); \
|
97 | 99 | } while (0)
|
| 100 | + |
| 101 | +#else /* !CONFIG_PREEMPT_RT */ |
| 102 | + |
| 103 | +/* |
| 104 | + * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the |
| 105 | + * critical section while staying preemptible. |
| 106 | + */ |
| 107 | +typedef spinlock_t local_lock_t; |
| 108 | + |
| 109 | +#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname)) |
| 110 | + |
| 111 | +#define __local_lock_init(l) \ |
| 112 | + do { \ |
| 113 | + local_spin_lock_init((l)); \ |
| 114 | + } while (0) |
| 115 | + |
| 116 | +#define __local_lock(__lock) \ |
| 117 | + do { \ |
| 118 | + migrate_disable(); \ |
| 119 | + spin_lock(this_cpu_ptr((__lock))); \ |
| 120 | + } while (0) |
| 121 | + |
| 122 | +#define __local_lock_irq(lock) __local_lock(lock) |
| 123 | + |
| 124 | +#define __local_lock_irqsave(lock, flags) \ |
| 125 | + do { \ |
| 126 | + typecheck(unsigned long, flags); \ |
| 127 | + flags = 0; \ |
| 128 | + __local_lock(lock); \ |
| 129 | + } while (0) |
| 130 | + |
| 131 | +#define __local_unlock(__lock) \ |
| 132 | + do { \ |
| 133 | + spin_unlock(this_cpu_ptr((__lock))); \ |
| 134 | + migrate_enable(); \ |
| 135 | + } while (0) |
| 136 | + |
| 137 | +#define __local_unlock_irq(lock) __local_unlock(lock) |
| 138 | + |
| 139 | +#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock) |
| 140 | + |
| 141 | +#endif /* CONFIG_PREEMPT_RT */ |
0 commit comments