@@ -584,29 +584,42 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
584
584
return sk_psock_skb_ingress (psock , skb );
585
585
}
586
586
587
- static void sock_drop (struct sock * sk , struct sk_buff * skb )
587
+ static void sk_psock_skb_state (struct sk_psock * psock ,
588
+ struct sk_psock_work_state * state ,
589
+ struct sk_buff * skb ,
590
+ int len , int off )
588
591
{
589
- sk_drops_add (sk , skb );
590
- kfree_skb (skb );
592
+ spin_lock_bh (& psock -> ingress_lock );
593
+ if (sk_psock_test_state (psock , SK_PSOCK_TX_ENABLED )) {
594
+ state -> skb = skb ;
595
+ state -> len = len ;
596
+ state -> off = off ;
597
+ } else {
598
+ sock_drop (psock -> sk , skb );
599
+ }
600
+ spin_unlock_bh (& psock -> ingress_lock );
591
601
}
592
602
593
603
static void sk_psock_backlog (struct work_struct * work )
594
604
{
595
605
struct sk_psock * psock = container_of (work , struct sk_psock , work );
596
606
struct sk_psock_work_state * state = & psock -> work_state ;
597
- struct sk_buff * skb ;
607
+ struct sk_buff * skb = NULL ;
598
608
bool ingress ;
599
609
u32 len , off ;
600
610
int ret ;
601
611
602
612
mutex_lock (& psock -> work_mutex );
603
- if (state -> skb ) {
613
+ if (unlikely (state -> skb )) {
614
+ spin_lock_bh (& psock -> ingress_lock );
604
615
skb = state -> skb ;
605
616
len = state -> len ;
606
617
off = state -> off ;
607
618
state -> skb = NULL ;
608
- goto start ;
619
+ spin_unlock_bh ( & psock -> ingress_lock ) ;
609
620
}
621
+ if (skb )
622
+ goto start ;
610
623
611
624
while ((skb = skb_dequeue (& psock -> ingress_skb ))) {
612
625
len = skb -> len ;
@@ -621,9 +634,8 @@ static void sk_psock_backlog(struct work_struct *work)
621
634
len , ingress );
622
635
if (ret <= 0 ) {
623
636
if (ret == - EAGAIN ) {
624
- state -> skb = skb ;
625
- state -> len = len ;
626
- state -> off = off ;
637
+ sk_psock_skb_state (psock , state , skb ,
638
+ len , off );
627
639
goto end ;
628
640
}
629
641
/* Hard errors break pipe and stop xmit. */
@@ -722,6 +734,11 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
722
734
skb_bpf_redirect_clear (skb );
723
735
sock_drop (psock -> sk , skb );
724
736
}
737
+ kfree_skb (psock -> work_state .skb );
738
+ /* We null the skb here to ensure that calls to sk_psock_backlog
739
+ * do not pick up the free'd skb.
740
+ */
741
+ psock -> work_state .skb = NULL ;
725
742
__sk_psock_purge_ingress_msg (psock );
726
743
}
727
744
@@ -773,8 +790,6 @@ static void sk_psock_destroy(struct work_struct *work)
773
790
774
791
void sk_psock_drop (struct sock * sk , struct sk_psock * psock )
775
792
{
776
- sk_psock_stop (psock , false);
777
-
778
793
write_lock_bh (& sk -> sk_callback_lock );
779
794
sk_psock_restore_proto (sk , psock );
780
795
rcu_assign_sk_user_data (sk , NULL );
@@ -784,6 +799,8 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
784
799
sk_psock_stop_verdict (sk , psock );
785
800
write_unlock_bh (& sk -> sk_callback_lock );
786
801
802
+ sk_psock_stop (psock , false);
803
+
787
804
INIT_RCU_WORK (& psock -> rwork , sk_psock_destroy );
788
805
queue_rcu_work (system_wq , & psock -> rwork );
789
806
}
0 commit comments