@@ -33,6 +33,28 @@ struct bpf_thp_ops {
3333};
3434
3535static DEFINE_SPINLOCK (thp_ops_lock );
36+ static struct bpf_thp_ops __rcu * bpf_thp_global ; /* global mode */
37+
38+ static unsigned long
39+ bpf_hook_thp_get_orders_global (struct vm_area_struct * vma ,
40+ enum tva_type type ,
41+ unsigned long orders )
42+ {
43+ thp_order_fn_t * bpf_hook_thp_get_order ;
44+ int bpf_order ;
45+
46+ rcu_read_lock ();
47+ bpf_hook_thp_get_order = rcu_dereference (bpf_thp_global -> thp_get_order );
48+ if (!bpf_hook_thp_get_order )
49+ goto out ;
50+
51+ bpf_order = bpf_hook_thp_get_order (vma , type , orders );
52+ orders &= BIT (bpf_order );
53+
54+ out :
55+ rcu_read_unlock ();
56+ return orders ;
57+ }
3658
3759unsigned long bpf_hook_thp_get_orders (struct vm_area_struct * vma ,
3860 enum tva_type type ,
@@ -45,6 +67,10 @@ unsigned long bpf_hook_thp_get_orders(struct vm_area_struct *vma,
4567 if (!mm )
4668 return orders ;
4769
70+ /* Global BPF-THP takes precedence over per-process BPF-THP. */
71+ if (rcu_access_pointer (bpf_thp_global ))
72+ return bpf_hook_thp_get_orders_global (vma , type , orders );
73+
4874 rcu_read_lock ();
4975 bpf_thp = rcu_dereference (mm -> bpf_mm .bpf_thp );
5076 if (!bpf_thp || !bpf_thp -> thp_get_order )
@@ -177,6 +203,23 @@ static int bpf_thp_init_member(const struct btf_type *t,
177203 return 0 ;
178204}
179205
206+ static int bpf_thp_reg_gloabl (void * kdata , struct bpf_link * link )
207+ {
208+ struct bpf_thp_ops * ops = kdata ;
209+
210+ /* Protect the global pointer bpf_thp_global from concurrent writes. */
211+ spin_lock (& thp_ops_lock );
212+ /* Only one instance is allowed. */
213+ if (rcu_access_pointer (bpf_thp_global )) {
214+ spin_unlock (& thp_ops_lock );
215+ return - EBUSY ;
216+ }
217+
218+ rcu_assign_pointer (bpf_thp_global , ops );
219+ spin_unlock (& thp_ops_lock );
220+ return 0 ;
221+ }
222+
180223static int bpf_thp_reg (void * kdata , struct bpf_link * link )
181224{
182225 struct bpf_thp_ops * bpf_thp = kdata ;
@@ -187,6 +230,11 @@ static int bpf_thp_reg(void *kdata, struct bpf_link *link)
187230 pid_t pid ;
188231
189232 pid = bpf_thp -> pid ;
233+
234+ /* Fallback to global mode if pid is not set. */
235+ if (!pid )
236+ return bpf_thp_reg_gloabl (kdata , link );
237+
190238 p = find_get_task_by_vpid (pid );
191239 if (!p )
192240 return - ESRCH ;
@@ -207,8 +255,10 @@ static int bpf_thp_reg(void *kdata, struct bpf_link *link)
207255 * might register this task simultaneously.
208256 */
209257 spin_lock (& thp_ops_lock );
210- /* Each process is exclusively managed by a single BPF-THP. */
211- if (rcu_access_pointer (mm -> bpf_mm .bpf_thp ))
258+ /* Each process is exclusively managed by a single BPF-THP.
259+ * Global mode disables per-process instances.
260+ */
261+ if (rcu_access_pointer (mm -> bpf_mm .bpf_thp ) || rcu_access_pointer (bpf_thp_global ))
212262 goto out_lock ;
213263 err = 0 ;
214264 rcu_assign_pointer (mm -> bpf_mm .bpf_thp , bpf_thp );
@@ -224,12 +274,33 @@ static int bpf_thp_reg(void *kdata, struct bpf_link *link)
224274 return err ;
225275}
226276
277+ static void bpf_thp_unreg_global (void * kdata , struct bpf_link * link )
278+ {
279+ struct bpf_thp_ops * bpf_thp ;
280+
281+ spin_lock (& thp_ops_lock );
282+ if (!rcu_access_pointer (bpf_thp_global )) {
283+ spin_unlock (& thp_ops_lock );
284+ return ;
285+ }
286+
287+ bpf_thp = rcu_replace_pointer (bpf_thp_global , NULL ,
288+ lockdep_is_held (& thp_ops_lock ));
289+ WARN_ON_ONCE (!bpf_thp );
290+ spin_unlock (& thp_ops_lock );
291+
292+ synchronize_rcu ();
293+ }
294+
227295static void bpf_thp_unreg (void * kdata , struct bpf_link * link )
228296{
229297 struct bpf_thp_ops * bpf_thp = kdata ;
230298 struct bpf_mm_ops * bpf_mm ;
231299 struct list_head * pos , * n ;
232300
301+ if (!bpf_thp -> pid )
302+ return bpf_thp_unreg_global (kdata , link );
303+
233304 spin_lock (& thp_ops_lock );
234305 list_for_each_safe (pos , n , & bpf_thp -> mm_list ) {
235306 bpf_mm = list_entry (pos , struct bpf_mm_ops , bpf_thp_list );
@@ -242,13 +313,47 @@ static void bpf_thp_unreg(void *kdata, struct bpf_link *link)
242313 synchronize_rcu ();
243314}
244315
316+ static int bpf_thp_update_global (void * kdata , void * old_kdata , struct bpf_link * link )
317+ {
318+ struct bpf_thp_ops * old_bpf_thp = old_kdata ;
319+ struct bpf_thp_ops * bpf_thp = kdata ;
320+ struct bpf_thp_ops * old_global ;
321+
322+ if (!old_bpf_thp || !bpf_thp )
323+ return - EINVAL ;
324+
325+ spin_lock (& thp_ops_lock );
326+ /* BPF-THP global instance has already been removed. */
327+ if (!rcu_access_pointer (bpf_thp_global )) {
328+ spin_unlock (& thp_ops_lock );
329+ return - ENOENT ;
330+ }
331+
332+ old_global = rcu_replace_pointer (bpf_thp_global , bpf_thp ,
333+ lockdep_is_held (& thp_ops_lock ));
334+ WARN_ON_ONCE (!old_global );
335+ spin_unlock (& thp_ops_lock );
336+
337+ synchronize_rcu ();
338+ return 0 ;
339+ }
340+
245341static int bpf_thp_update (void * kdata , void * old_kdata , struct bpf_link * link )
246342{
247343 struct bpf_thp_ops * old_bpf_thp = old_kdata ;
248344 struct bpf_thp_ops * bpf_thp = kdata ;
249345 struct bpf_mm_ops * bpf_mm ;
250346 struct list_head * pos , * n ;
251347
348+ /* Updates are confined to instances of the same scope:
349+ * global to global, process-local to process-local.
350+ */
351+ if (!!old_bpf_thp -> pid != !!bpf_thp -> pid )
352+ return - EINVAL ;
353+
354+ if (!old_bpf_thp -> pid )
355+ return bpf_thp_update_global (kdata , old_kdata , link );
356+
252357 INIT_LIST_HEAD (& bpf_thp -> mm_list );
253358
254359 /* Could be optimized to a per-instance lock if this lock becomes a bottleneck. */
0 commit comments