@@ -307,9 +307,10 @@ static void bpf_lru_list_push_free(struct bpf_lru_list *l,
307307 if (WARN_ON_ONCE (IS_LOCAL_LIST_TYPE (node -> type )))
308308 return ;
309309
310- raw_spin_lock_irqsave (& l -> lock , flags );
310+ if (raw_res_spin_lock_irqsave (& l -> lock , flags ))
311+ return ;
311312 __bpf_lru_node_move (l , node , BPF_LRU_LIST_T_FREE );
312- raw_spin_unlock_irqrestore (& l -> lock , flags );
313+ raw_res_spin_unlock_irqrestore (& l -> lock , flags );
313314}
314315
315316static void bpf_lru_list_pop_free_to_local (struct bpf_lru * lru ,
@@ -319,7 +320,8 @@ static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
319320 struct bpf_lru_node * node , * tmp_node ;
320321 unsigned int nfree = 0 ;
321322
322- raw_spin_lock (& l -> lock );
323+ if (raw_res_spin_lock (& l -> lock ))
324+ return ;
323325
324326 __local_list_flush (l , loc_l );
325327
@@ -338,7 +340,7 @@ static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
338340 local_free_list (loc_l ),
339341 BPF_LRU_LOCAL_LIST_T_FREE );
340342
341- raw_spin_unlock (& l -> lock );
343+ raw_res_spin_unlock (& l -> lock );
342344}
343345
344346static void __local_list_add_pending (struct bpf_lru * lru ,
@@ -404,7 +406,8 @@ static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
404406
405407 l = per_cpu_ptr (lru -> percpu_lru , cpu );
406408
407- raw_spin_lock_irqsave (& l -> lock , flags );
409+ if (raw_res_spin_lock_irqsave (& l -> lock , flags ))
410+ return NULL ;
408411
409412 __bpf_lru_list_rotate (lru , l );
410413
@@ -420,7 +423,7 @@ static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
420423 __bpf_lru_node_move (l , node , BPF_LRU_LIST_T_INACTIVE );
421424 }
422425
423- raw_spin_unlock_irqrestore (& l -> lock , flags );
426+ raw_res_spin_unlock_irqrestore (& l -> lock , flags );
424427
425428 return node ;
426429}
@@ -437,7 +440,8 @@ static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
437440
438441 loc_l = per_cpu_ptr (clru -> local_list , cpu );
439442
440- raw_spin_lock_irqsave (& loc_l -> lock , flags );
443+ if (raw_res_spin_lock_irqsave (& loc_l -> lock , flags ))
444+ return NULL ;
441445
442446 node = __local_list_pop_free (loc_l );
443447 if (!node ) {
@@ -448,7 +452,7 @@ static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
448452 if (node )
449453 __local_list_add_pending (lru , loc_l , cpu , node , hash );
450454
451- raw_spin_unlock_irqrestore (& loc_l -> lock , flags );
455+ raw_res_spin_unlock_irqrestore (& loc_l -> lock , flags );
452456
453457 if (node )
454458 return node ;
@@ -466,23 +470,26 @@ static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
466470 do {
467471 steal_loc_l = per_cpu_ptr (clru -> local_list , steal );
468472
469- raw_spin_lock_irqsave (& steal_loc_l -> lock , flags );
473+ if (raw_res_spin_lock_irqsave (& steal_loc_l -> lock , flags ))
474+ goto out_next ;
470475
471476 node = __local_list_pop_free (steal_loc_l );
472477 if (!node )
473478 node = __local_list_pop_pending (lru , steal_loc_l );
474479
475- raw_spin_unlock_irqrestore (& steal_loc_l -> lock , flags );
480+ raw_res_spin_unlock_irqrestore (& steal_loc_l -> lock , flags );
476481
482+ out_next :
477483 steal = cpumask_next_wrap (steal , cpu_possible_mask );
478484 } while (!node && steal != first_steal );
479485
480486 loc_l -> next_steal = steal ;
481487
482488 if (node ) {
483- raw_spin_lock_irqsave (& loc_l -> lock , flags );
489+ if (raw_res_spin_lock_irqsave (& loc_l -> lock , flags ))
490+ return NULL ;
484491 __local_list_add_pending (lru , loc_l , cpu , node , hash );
485- raw_spin_unlock_irqrestore (& loc_l -> lock , flags );
492+ raw_res_spin_unlock_irqrestore (& loc_l -> lock , flags );
486493 }
487494
488495 return node ;
@@ -511,18 +518,19 @@ static void bpf_common_lru_push_free(struct bpf_lru *lru,
511518
512519 loc_l = per_cpu_ptr (lru -> common_lru .local_list , node -> cpu );
513520
514- raw_spin_lock_irqsave (& loc_l -> lock , flags );
521+ if (raw_res_spin_lock_irqsave (& loc_l -> lock , flags ))
522+ return ;
515523
516524 if (unlikely (node -> type != BPF_LRU_LOCAL_LIST_T_PENDING )) {
517- raw_spin_unlock_irqrestore (& loc_l -> lock , flags );
525+ raw_res_spin_unlock_irqrestore (& loc_l -> lock , flags );
518526 goto check_lru_list ;
519527 }
520528
521529 node -> type = BPF_LRU_LOCAL_LIST_T_FREE ;
522530 bpf_lru_node_clear_ref (node );
523531 list_move (& node -> list , local_free_list (loc_l ));
524532
525- raw_spin_unlock_irqrestore (& loc_l -> lock , flags );
533+ raw_res_spin_unlock_irqrestore (& loc_l -> lock , flags );
526534 return ;
527535 }
528536
@@ -538,11 +546,12 @@ static void bpf_percpu_lru_push_free(struct bpf_lru *lru,
538546
539547 l = per_cpu_ptr (lru -> percpu_lru , node -> cpu );
540548
541- raw_spin_lock_irqsave (& l -> lock , flags );
549+ if (raw_res_spin_lock_irqsave (& l -> lock , flags ))
550+ return ;
542551
543552 __bpf_lru_node_move (l , node , BPF_LRU_LIST_T_FREE );
544553
545- raw_spin_unlock_irqrestore (& l -> lock , flags );
554+ raw_res_spin_unlock_irqrestore (& l -> lock , flags );
546555}
547556
548557void bpf_lru_push_free (struct bpf_lru * lru , struct bpf_lru_node * node )
@@ -625,7 +634,7 @@ static void bpf_lru_locallist_init(struct bpf_lru_locallist *loc_l, int cpu)
625634
626635 loc_l -> next_steal = cpu ;
627636
628- raw_spin_lock_init (& loc_l -> lock );
637+ raw_res_spin_lock_init (& loc_l -> lock );
629638}
630639
631640static void bpf_lru_list_init (struct bpf_lru_list * l )
@@ -640,7 +649,7 @@ static void bpf_lru_list_init(struct bpf_lru_list *l)
640649
641650 l -> next_inactive_rotation = & l -> lists [BPF_LRU_LIST_T_INACTIVE ];
642651
643- raw_spin_lock_init (& l -> lock );
652+ raw_res_spin_lock_init (& l -> lock );
644653}
645654
646655int bpf_lru_init (struct bpf_lru * lru , bool percpu , u32 hash_offset ,
0 commit comments