@@ -122,22 +122,6 @@ static void get_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
122
122
atomic_inc (& rcpu -> refcnt );
123
123
}
124
124
125
- /* called from workqueue, to workaround syscall using preempt_disable */
126
- static void cpu_map_kthread_stop (struct work_struct * work )
127
- {
128
- struct bpf_cpu_map_entry * rcpu ;
129
-
130
- rcpu = container_of (work , struct bpf_cpu_map_entry , kthread_stop_wq );
131
-
132
- /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
133
- * as it waits until all in-flight call_rcu() callbacks complete.
134
- */
135
- rcu_barrier ();
136
-
137
- /* kthread_stop will wake_up_process and wait for it to complete */
138
- kthread_stop (rcpu -> kthread );
139
- }
140
-
141
125
static void __cpu_map_ring_cleanup (struct ptr_ring * ring )
142
126
{
143
127
/* The tear-down procedure should have made sure that queue is
@@ -165,6 +149,30 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
165
149
}
166
150
}
167
151
152
+ /* called from workqueue, to workaround syscall using preempt_disable */
153
+ static void cpu_map_kthread_stop (struct work_struct * work )
154
+ {
155
+ struct bpf_cpu_map_entry * rcpu ;
156
+ int err ;
157
+
158
+ rcpu = container_of (work , struct bpf_cpu_map_entry , kthread_stop_wq );
159
+
160
+ /* Wait for flush in __cpu_map_entry_free(), via full RCU barrier,
161
+ * as it waits until all in-flight call_rcu() callbacks complete.
162
+ */
163
+ rcu_barrier ();
164
+
165
+ /* kthread_stop will wake_up_process and wait for it to complete */
166
+ err = kthread_stop (rcpu -> kthread );
167
+ if (err ) {
168
+ /* kthread_stop may be called before cpu_map_kthread_run
169
+ * is executed, so we need to release the memory related
170
+ * to rcpu.
171
+ */
172
+ put_cpu_map_entry (rcpu );
173
+ }
174
+ }
175
+
168
176
static void cpu_map_bpf_prog_run_skb (struct bpf_cpu_map_entry * rcpu ,
169
177
struct list_head * listp ,
170
178
struct xdp_cpumap_stats * stats )
0 commit comments