@@ -498,6 +498,12 @@ struct virtio_net_common_hdr {
498
498
};
499
499
500
500
static void virtnet_sq_free_unused_buf (struct virtqueue * vq , void * buf );
501
+ static int virtnet_xdp_handler (struct bpf_prog * xdp_prog , struct xdp_buff * xdp ,
502
+ struct net_device * dev ,
503
+ unsigned int * xdp_xmit ,
504
+ struct virtnet_rq_stats * stats );
505
+ static void virtnet_receive_done (struct virtnet_info * vi , struct receive_queue * rq ,
506
+ struct sk_buff * skb , u8 flags );
501
507
502
508
static bool is_xdp_frame (void * ptr )
503
509
{
@@ -1062,6 +1068,124 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
1062
1068
sg -> length = len ;
1063
1069
}
1064
1070
1071
+ static struct xdp_buff * buf_to_xdp (struct virtnet_info * vi ,
1072
+ struct receive_queue * rq , void * buf , u32 len )
1073
+ {
1074
+ struct xdp_buff * xdp ;
1075
+ u32 bufsize ;
1076
+
1077
+ xdp = (struct xdp_buff * )buf ;
1078
+
1079
+ bufsize = xsk_pool_get_rx_frame_size (rq -> xsk_pool ) + vi -> hdr_len ;
1080
+
1081
+ if (unlikely (len > bufsize )) {
1082
+ pr_debug ("%s: rx error: len %u exceeds truesize %u\n" ,
1083
+ vi -> dev -> name , len , bufsize );
1084
+ DEV_STATS_INC (vi -> dev , rx_length_errors );
1085
+ xsk_buff_free (xdp );
1086
+ return NULL ;
1087
+ }
1088
+
1089
+ xsk_buff_set_size (xdp , len );
1090
+ xsk_buff_dma_sync_for_cpu (xdp );
1091
+
1092
+ return xdp ;
1093
+ }
1094
+
1095
+ static struct sk_buff * xsk_construct_skb (struct receive_queue * rq ,
1096
+ struct xdp_buff * xdp )
1097
+ {
1098
+ unsigned int metasize = xdp -> data - xdp -> data_meta ;
1099
+ struct sk_buff * skb ;
1100
+ unsigned int size ;
1101
+
1102
+ size = xdp -> data_end - xdp -> data_hard_start ;
1103
+ skb = napi_alloc_skb (& rq -> napi , size );
1104
+ if (unlikely (!skb )) {
1105
+ xsk_buff_free (xdp );
1106
+ return NULL ;
1107
+ }
1108
+
1109
+ skb_reserve (skb , xdp -> data_meta - xdp -> data_hard_start );
1110
+
1111
+ size = xdp -> data_end - xdp -> data_meta ;
1112
+ memcpy (__skb_put (skb , size ), xdp -> data_meta , size );
1113
+
1114
+ if (metasize ) {
1115
+ __skb_pull (skb , metasize );
1116
+ skb_metadata_set (skb , metasize );
1117
+ }
1118
+
1119
+ xsk_buff_free (xdp );
1120
+
1121
+ return skb ;
1122
+ }
1123
+
1124
+ static struct sk_buff * virtnet_receive_xsk_small (struct net_device * dev , struct virtnet_info * vi ,
1125
+ struct receive_queue * rq , struct xdp_buff * xdp ,
1126
+ unsigned int * xdp_xmit ,
1127
+ struct virtnet_rq_stats * stats )
1128
+ {
1129
+ struct bpf_prog * prog ;
1130
+ u32 ret ;
1131
+
1132
+ ret = XDP_PASS ;
1133
+ rcu_read_lock ();
1134
+ prog = rcu_dereference (rq -> xdp_prog );
1135
+ if (prog )
1136
+ ret = virtnet_xdp_handler (prog , xdp , dev , xdp_xmit , stats );
1137
+ rcu_read_unlock ();
1138
+
1139
+ switch (ret ) {
1140
+ case XDP_PASS :
1141
+ return xsk_construct_skb (rq , xdp );
1142
+
1143
+ case XDP_TX :
1144
+ case XDP_REDIRECT :
1145
+ return NULL ;
1146
+
1147
+ default :
1148
+ /* drop packet */
1149
+ xsk_buff_free (xdp );
1150
+ u64_stats_inc (& stats -> drops );
1151
+ return NULL ;
1152
+ }
1153
+ }
1154
+
1155
+ static void virtnet_receive_xsk_buf (struct virtnet_info * vi , struct receive_queue * rq ,
1156
+ void * buf , u32 len ,
1157
+ unsigned int * xdp_xmit ,
1158
+ struct virtnet_rq_stats * stats )
1159
+ {
1160
+ struct net_device * dev = vi -> dev ;
1161
+ struct sk_buff * skb = NULL ;
1162
+ struct xdp_buff * xdp ;
1163
+ u8 flags ;
1164
+
1165
+ len -= vi -> hdr_len ;
1166
+
1167
+ u64_stats_add (& stats -> bytes , len );
1168
+
1169
+ xdp = buf_to_xdp (vi , rq , buf , len );
1170
+ if (!xdp )
1171
+ return ;
1172
+
1173
+ if (unlikely (len < ETH_HLEN )) {
1174
+ pr_debug ("%s: short packet %i\n" , dev -> name , len );
1175
+ DEV_STATS_INC (dev , rx_length_errors );
1176
+ xsk_buff_free (xdp );
1177
+ return ;
1178
+ }
1179
+
1180
+ flags = ((struct virtio_net_common_hdr * )(xdp -> data - vi -> hdr_len ))-> hdr .flags ;
1181
+
1182
+ if (!vi -> mergeable_rx_bufs )
1183
+ skb = virtnet_receive_xsk_small (dev , vi , rq , xdp , xdp_xmit , stats );
1184
+
1185
+ if (skb )
1186
+ virtnet_receive_done (vi , rq , skb , flags );
1187
+ }
1188
+
1065
1189
static int virtnet_add_recvbuf_xsk (struct virtnet_info * vi , struct receive_queue * rq ,
1066
1190
struct xsk_buff_pool * pool , gfp_t gfp )
1067
1191
{
@@ -2392,32 +2516,68 @@ static void refill_work(struct work_struct *work)
2392
2516
}
2393
2517
}
2394
2518
2395
- static int virtnet_receive (struct receive_queue * rq , int budget ,
2396
- unsigned int * xdp_xmit )
2519
+ static int virtnet_receive_xsk_bufs (struct virtnet_info * vi ,
2520
+ struct receive_queue * rq ,
2521
+ int budget ,
2522
+ unsigned int * xdp_xmit ,
2523
+ struct virtnet_rq_stats * stats )
2524
+ {
2525
+ unsigned int len ;
2526
+ int packets = 0 ;
2527
+ void * buf ;
2528
+
2529
+ while (packets < budget ) {
2530
+ buf = virtqueue_get_buf (rq -> vq , & len );
2531
+ if (!buf )
2532
+ break ;
2533
+
2534
+ virtnet_receive_xsk_buf (vi , rq , buf , len , xdp_xmit , stats );
2535
+ packets ++ ;
2536
+ }
2537
+
2538
+ return packets ;
2539
+ }
2540
+
2541
+ static int virtnet_receive_packets (struct virtnet_info * vi ,
2542
+ struct receive_queue * rq ,
2543
+ int budget ,
2544
+ unsigned int * xdp_xmit ,
2545
+ struct virtnet_rq_stats * stats )
2397
2546
{
2398
- struct virtnet_info * vi = rq -> vq -> vdev -> priv ;
2399
- struct virtnet_rq_stats stats = {};
2400
2547
unsigned int len ;
2401
2548
int packets = 0 ;
2402
2549
void * buf ;
2403
- int i ;
2404
2550
2405
2551
if (!vi -> big_packets || vi -> mergeable_rx_bufs ) {
2406
2552
void * ctx ;
2407
-
2408
2553
while (packets < budget &&
2409
2554
(buf = virtnet_rq_get_buf (rq , & len , & ctx ))) {
2410
- receive_buf (vi , rq , buf , len , ctx , xdp_xmit , & stats );
2555
+ receive_buf (vi , rq , buf , len , ctx , xdp_xmit , stats );
2411
2556
packets ++ ;
2412
2557
}
2413
2558
} else {
2414
2559
while (packets < budget &&
2415
2560
(buf = virtqueue_get_buf (rq -> vq , & len )) != NULL ) {
2416
- receive_buf (vi , rq , buf , len , NULL , xdp_xmit , & stats );
2561
+ receive_buf (vi , rq , buf , len , NULL , xdp_xmit , stats );
2417
2562
packets ++ ;
2418
2563
}
2419
2564
}
2420
2565
2566
+ return packets ;
2567
+ }
2568
+
2569
+ static int virtnet_receive (struct receive_queue * rq , int budget ,
2570
+ unsigned int * xdp_xmit )
2571
+ {
2572
+ struct virtnet_info * vi = rq -> vq -> vdev -> priv ;
2573
+ struct virtnet_rq_stats stats = {};
2574
+ int i , packets ;
2575
+
2576
+ if (rq -> xsk_pool )
2577
+ packets = virtnet_receive_xsk_bufs (vi , rq , budget , xdp_xmit , & stats );
2578
+ else
2579
+ packets = virtnet_receive_packets (vi , rq , budget , xdp_xmit , & stats );
2580
+
2421
2581
if (rq -> vq -> num_free > min ((unsigned int )budget , virtqueue_get_vring_size (rq -> vq )) / 2 ) {
2422
2582
if (!try_fill_recv (vi , rq , GFP_ATOMIC )) {
2423
2583
spin_lock (& vi -> refill_lock );
0 commit comments