@@ -504,6 +504,10 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
504
504
struct virtnet_rq_stats * stats );
505
505
static void virtnet_receive_done (struct virtnet_info * vi , struct receive_queue * rq ,
506
506
struct sk_buff * skb , u8 flags );
507
+ static struct sk_buff * virtnet_skb_append_frag (struct sk_buff * head_skb ,
508
+ struct sk_buff * curr_skb ,
509
+ struct page * page , void * buf ,
510
+ int len , int truesize );
507
511
508
512
static bool is_xdp_frame (void * ptr )
509
513
{
@@ -984,6 +988,11 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
984
988
985
989
rq = & vi -> rq [i ];
986
990
991
+ if (rq -> xsk_pool ) {
992
+ xsk_buff_free ((struct xdp_buff * )buf );
993
+ return ;
994
+ }
995
+
987
996
if (!vi -> big_packets || vi -> mergeable_rx_bufs )
988
997
virtnet_rq_unmap (rq , buf , 0 );
989
998
@@ -1152,6 +1161,139 @@ static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct
1152
1161
}
1153
1162
}
1154
1163
1164
+ static void xsk_drop_follow_bufs (struct net_device * dev ,
1165
+ struct receive_queue * rq ,
1166
+ u32 num_buf ,
1167
+ struct virtnet_rq_stats * stats )
1168
+ {
1169
+ struct xdp_buff * xdp ;
1170
+ u32 len ;
1171
+
1172
+ while (num_buf -- > 1 ) {
1173
+ xdp = virtqueue_get_buf (rq -> vq , & len );
1174
+ if (unlikely (!xdp )) {
1175
+ pr_debug ("%s: rx error: %d buffers missing\n" ,
1176
+ dev -> name , num_buf );
1177
+ DEV_STATS_INC (dev , rx_length_errors );
1178
+ break ;
1179
+ }
1180
+ u64_stats_add (& stats -> bytes , len );
1181
+ xsk_buff_free (xdp );
1182
+ }
1183
+ }
1184
+
1185
+ static int xsk_append_merge_buffer (struct virtnet_info * vi ,
1186
+ struct receive_queue * rq ,
1187
+ struct sk_buff * head_skb ,
1188
+ u32 num_buf ,
1189
+ struct virtio_net_hdr_mrg_rxbuf * hdr ,
1190
+ struct virtnet_rq_stats * stats )
1191
+ {
1192
+ struct sk_buff * curr_skb ;
1193
+ struct xdp_buff * xdp ;
1194
+ u32 len , truesize ;
1195
+ struct page * page ;
1196
+ void * buf ;
1197
+
1198
+ curr_skb = head_skb ;
1199
+
1200
+ while (-- num_buf ) {
1201
+ buf = virtqueue_get_buf (rq -> vq , & len );
1202
+ if (unlikely (!buf )) {
1203
+ pr_debug ("%s: rx error: %d buffers out of %d missing\n" ,
1204
+ vi -> dev -> name , num_buf ,
1205
+ virtio16_to_cpu (vi -> vdev ,
1206
+ hdr -> num_buffers ));
1207
+ DEV_STATS_INC (vi -> dev , rx_length_errors );
1208
+ return - EINVAL ;
1209
+ }
1210
+
1211
+ u64_stats_add (& stats -> bytes , len );
1212
+
1213
+ xdp = buf_to_xdp (vi , rq , buf , len );
1214
+ if (!xdp )
1215
+ goto err ;
1216
+
1217
+ buf = napi_alloc_frag (len );
1218
+ if (!buf ) {
1219
+ xsk_buff_free (xdp );
1220
+ goto err ;
1221
+ }
1222
+
1223
+ memcpy (buf , xdp -> data - vi -> hdr_len , len );
1224
+
1225
+ xsk_buff_free (xdp );
1226
+
1227
+ page = virt_to_page (buf );
1228
+
1229
+ truesize = len ;
1230
+
1231
+ curr_skb = virtnet_skb_append_frag (head_skb , curr_skb , page ,
1232
+ buf , len , truesize );
1233
+ if (!curr_skb ) {
1234
+ put_page (page );
1235
+ goto err ;
1236
+ }
1237
+ }
1238
+
1239
+ return 0 ;
1240
+
1241
+ err :
1242
+ xsk_drop_follow_bufs (vi -> dev , rq , num_buf , stats );
1243
+ return - EINVAL ;
1244
+ }
1245
+
1246
+ static struct sk_buff * virtnet_receive_xsk_merge (struct net_device * dev , struct virtnet_info * vi ,
1247
+ struct receive_queue * rq , struct xdp_buff * xdp ,
1248
+ unsigned int * xdp_xmit ,
1249
+ struct virtnet_rq_stats * stats )
1250
+ {
1251
+ struct virtio_net_hdr_mrg_rxbuf * hdr ;
1252
+ struct bpf_prog * prog ;
1253
+ struct sk_buff * skb ;
1254
+ u32 ret , num_buf ;
1255
+
1256
+ hdr = xdp -> data - vi -> hdr_len ;
1257
+ num_buf = virtio16_to_cpu (vi -> vdev , hdr -> num_buffers );
1258
+
1259
+ ret = XDP_PASS ;
1260
+ rcu_read_lock ();
1261
+ prog = rcu_dereference (rq -> xdp_prog );
1262
+ /* TODO: support multi buffer. */
1263
+ if (prog && num_buf == 1 )
1264
+ ret = virtnet_xdp_handler (prog , xdp , dev , xdp_xmit , stats );
1265
+ rcu_read_unlock ();
1266
+
1267
+ switch (ret ) {
1268
+ case XDP_PASS :
1269
+ skb = xsk_construct_skb (rq , xdp );
1270
+ if (!skb )
1271
+ goto drop_bufs ;
1272
+
1273
+ if (xsk_append_merge_buffer (vi , rq , skb , num_buf , hdr , stats )) {
1274
+ dev_kfree_skb (skb );
1275
+ goto drop ;
1276
+ }
1277
+
1278
+ return skb ;
1279
+
1280
+ case XDP_TX :
1281
+ case XDP_REDIRECT :
1282
+ return NULL ;
1283
+
1284
+ default :
1285
+ /* drop packet */
1286
+ xsk_buff_free (xdp );
1287
+ }
1288
+
1289
+ drop_bufs :
1290
+ xsk_drop_follow_bufs (dev , rq , num_buf , stats );
1291
+
1292
+ drop :
1293
+ u64_stats_inc (& stats -> drops );
1294
+ return NULL ;
1295
+ }
1296
+
1155
1297
static void virtnet_receive_xsk_buf (struct virtnet_info * vi , struct receive_queue * rq ,
1156
1298
void * buf , u32 len ,
1157
1299
unsigned int * xdp_xmit ,
@@ -1181,6 +1323,8 @@ static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queu
1181
1323
1182
1324
if (!vi -> mergeable_rx_bufs )
1183
1325
skb = virtnet_receive_xsk_small (dev , vi , rq , xdp , xdp_xmit , stats );
1326
+ else
1327
+ skb = virtnet_receive_xsk_merge (dev , vi , rq , xdp , xdp_xmit , stats );
1184
1328
1185
1329
if (skb )
1186
1330
virtnet_receive_done (vi , rq , skb , flags );
0 commit comments