@@ -494,6 +494,7 @@ static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
494
494
}
495
495
496
496
static int sk_psock_skb_ingress_enqueue (struct sk_buff * skb ,
497
+ u32 off , u32 len ,
497
498
struct sk_psock * psock ,
498
499
struct sock * sk ,
499
500
struct sk_msg * msg )
@@ -507,11 +508,11 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
507
508
*/
508
509
if (skb_linearize (skb ))
509
510
return - EAGAIN ;
510
- num_sge = skb_to_sgvec (skb , msg -> sg .data , 0 , skb -> len );
511
+ num_sge = skb_to_sgvec (skb , msg -> sg .data , off , len );
511
512
if (unlikely (num_sge < 0 ))
512
513
return num_sge ;
513
514
514
- copied = skb -> len ;
515
+ copied = len ;
515
516
msg -> sg .start = 0 ;
516
517
msg -> sg .size = copied ;
517
518
msg -> sg .end = num_sge ;
@@ -522,9 +523,11 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
522
523
return copied ;
523
524
}
524
525
525
- static int sk_psock_skb_ingress_self (struct sk_psock * psock , struct sk_buff * skb );
526
+ static int sk_psock_skb_ingress_self (struct sk_psock * psock , struct sk_buff * skb ,
527
+ u32 off , u32 len );
526
528
527
- static int sk_psock_skb_ingress (struct sk_psock * psock , struct sk_buff * skb )
529
+ static int sk_psock_skb_ingress (struct sk_psock * psock , struct sk_buff * skb ,
530
+ u32 off , u32 len )
528
531
{
529
532
struct sock * sk = psock -> sk ;
530
533
struct sk_msg * msg ;
@@ -535,7 +538,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
535
538
* correctly.
536
539
*/
537
540
if (unlikely (skb -> sk == sk ))
538
- return sk_psock_skb_ingress_self (psock , skb );
541
+ return sk_psock_skb_ingress_self (psock , skb , off , len );
539
542
msg = sk_psock_create_ingress_msg (sk , skb );
540
543
if (!msg )
541
544
return - EAGAIN ;
@@ -547,7 +550,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
547
550
* into user buffers.
548
551
*/
549
552
skb_set_owner_r (skb , sk );
550
- err = sk_psock_skb_ingress_enqueue (skb , psock , sk , msg );
553
+ err = sk_psock_skb_ingress_enqueue (skb , off , len , psock , sk , msg );
551
554
if (err < 0 )
552
555
kfree (msg );
553
556
return err ;
@@ -557,7 +560,8 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
557
560
* skb. In this case we do not need to check memory limits or skb_set_owner_r
558
561
* because the skb is already accounted for here.
559
562
*/
560
- static int sk_psock_skb_ingress_self (struct sk_psock * psock , struct sk_buff * skb )
563
+ static int sk_psock_skb_ingress_self (struct sk_psock * psock , struct sk_buff * skb ,
564
+ u32 off , u32 len )
561
565
{
562
566
struct sk_msg * msg = kzalloc (sizeof (* msg ), __GFP_NOWARN | GFP_ATOMIC );
563
567
struct sock * sk = psock -> sk ;
@@ -567,7 +571,7 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
567
571
return - EAGAIN ;
568
572
sk_msg_init (msg );
569
573
skb_set_owner_r (skb , sk );
570
- err = sk_psock_skb_ingress_enqueue (skb , psock , sk , msg );
574
+ err = sk_psock_skb_ingress_enqueue (skb , off , len , psock , sk , msg );
571
575
if (err < 0 )
572
576
kfree (msg );
573
577
return err ;
@@ -581,7 +585,7 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
581
585
return - EAGAIN ;
582
586
return skb_send_sock (psock -> sk , skb , off , len );
583
587
}
584
- return sk_psock_skb_ingress (psock , skb );
588
+ return sk_psock_skb_ingress (psock , skb , off , len );
585
589
}
586
590
587
591
static void sk_psock_skb_state (struct sk_psock * psock ,
@@ -624,6 +628,12 @@ static void sk_psock_backlog(struct work_struct *work)
624
628
while ((skb = skb_dequeue (& psock -> ingress_skb ))) {
625
629
len = skb -> len ;
626
630
off = 0 ;
631
+ if (skb_bpf_strparser (skb )) {
632
+ struct strp_msg * stm = strp_msg (skb );
633
+
634
+ off = stm -> offset ;
635
+ len = stm -> full_len ;
636
+ }
627
637
start :
628
638
ingress = skb_bpf_ingress (skb );
629
639
skb_bpf_redirect_clear (skb );
@@ -863,6 +873,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
863
873
* return code, but then didn't set a redirect interface.
864
874
*/
865
875
if (unlikely (!sk_other )) {
876
+ skb_bpf_redirect_clear (skb );
866
877
sock_drop (from -> sk , skb );
867
878
return - EIO ;
868
879
}
@@ -930,13 +941,15 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
930
941
{
931
942
struct sock * sk_other ;
932
943
int err = 0 ;
944
+ u32 len , off ;
933
945
934
946
switch (verdict ) {
935
947
case __SK_PASS :
936
948
err = - EIO ;
937
949
sk_other = psock -> sk ;
938
950
if (sock_flag (sk_other , SOCK_DEAD ) ||
939
951
!sk_psock_test_state (psock , SK_PSOCK_TX_ENABLED )) {
952
+ skb_bpf_redirect_clear (skb );
940
953
goto out_free ;
941
954
}
942
955
@@ -949,7 +962,15 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
949
962
* retrying later from workqueue.
950
963
*/
951
964
if (skb_queue_empty (& psock -> ingress_skb )) {
952
- err = sk_psock_skb_ingress_self (psock , skb );
965
+ len = skb -> len ;
966
+ off = 0 ;
967
+ if (skb_bpf_strparser (skb )) {
968
+ struct strp_msg * stm = strp_msg (skb );
969
+
970
+ off = stm -> offset ;
971
+ len = stm -> full_len ;
972
+ }
973
+ err = sk_psock_skb_ingress_self (psock , skb , off , len );
953
974
}
954
975
if (err < 0 ) {
955
976
spin_lock_bh (& psock -> ingress_lock );
@@ -1015,6 +1036,8 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1015
1036
skb_dst_drop (skb );
1016
1037
skb_bpf_redirect_clear (skb );
1017
1038
ret = bpf_prog_run_pin_on_cpu (prog , skb );
1039
+ if (ret == SK_PASS )
1040
+ skb_bpf_set_strparser (skb );
1018
1041
ret = sk_psock_map_verd (ret , skb_bpf_redirect_fetch (skb ));
1019
1042
skb -> sk = NULL ;
1020
1043
}
0 commit comments