@@ -332,10 +332,13 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
332
332
333
333
334
334
struct xenvif_tx_cb {
335
- u16 pending_idx ;
335
+ u16 copy_pending_idx [XEN_NETBK_LEGACY_SLOTS_MAX + 1 ];
336
+ u8 copy_count ;
336
337
};
337
338
338
339
#define XENVIF_TX_CB (skb ) ((struct xenvif_tx_cb *)(skb)->cb)
340
+ #define copy_pending_idx (skb , i ) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
341
+ #define copy_count (skb ) (XENVIF_TX_CB(skb)->copy_count)
339
342
340
343
static inline void xenvif_tx_create_map_op (struct xenvif_queue * queue ,
341
344
u16 pending_idx ,
@@ -370,31 +373,93 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
370
373
return skb ;
371
374
}
372
375
373
- static struct gnttab_map_grant_ref * xenvif_get_requests (struct xenvif_queue * queue ,
374
- struct sk_buff * skb ,
375
- struct xen_netif_tx_request * txp ,
376
- struct gnttab_map_grant_ref * gop ,
377
- unsigned int frag_overflow ,
378
- struct sk_buff * nskb )
376
+ static void xenvif_get_requests (struct xenvif_queue * queue ,
377
+ struct sk_buff * skb ,
378
+ struct xen_netif_tx_request * first ,
379
+ struct xen_netif_tx_request * txfrags ,
380
+ unsigned * copy_ops ,
381
+ unsigned * map_ops ,
382
+ unsigned int frag_overflow ,
383
+ struct sk_buff * nskb ,
384
+ unsigned int extra_count ,
385
+ unsigned int data_len )
379
386
{
380
387
struct skb_shared_info * shinfo = skb_shinfo (skb );
381
388
skb_frag_t * frags = shinfo -> frags ;
382
- u16 pending_idx = XENVIF_TX_CB (skb )-> pending_idx ;
383
- int start ;
389
+ u16 pending_idx ;
384
390
pending_ring_idx_t index ;
385
391
unsigned int nr_slots ;
392
+ struct gnttab_copy * cop = queue -> tx_copy_ops + * copy_ops ;
393
+ struct gnttab_map_grant_ref * gop = queue -> tx_map_ops + * map_ops ;
394
+ struct xen_netif_tx_request * txp = first ;
395
+
396
+ nr_slots = shinfo -> nr_frags + 1 ;
397
+
398
+ copy_count (skb ) = 0 ;
399
+
400
+ /* Create copy ops for exactly data_len bytes into the skb head. */
401
+ __skb_put (skb , data_len );
402
+ while (data_len > 0 ) {
403
+ int amount = data_len > txp -> size ? txp -> size : data_len ;
404
+
405
+ cop -> source .u .ref = txp -> gref ;
406
+ cop -> source .domid = queue -> vif -> domid ;
407
+ cop -> source .offset = txp -> offset ;
408
+
409
+ cop -> dest .domid = DOMID_SELF ;
410
+ cop -> dest .offset = (offset_in_page (skb -> data +
411
+ skb_headlen (skb ) -
412
+ data_len )) & ~XEN_PAGE_MASK ;
413
+ cop -> dest .u .gmfn = virt_to_gfn (skb -> data + skb_headlen (skb )
414
+ - data_len );
415
+
416
+ cop -> len = amount ;
417
+ cop -> flags = GNTCOPY_source_gref ;
386
418
387
- nr_slots = shinfo -> nr_frags ;
419
+ index = pending_index (queue -> pending_cons );
420
+ pending_idx = queue -> pending_ring [index ];
421
+ callback_param (queue , pending_idx ).ctx = NULL ;
422
+ copy_pending_idx (skb , copy_count (skb )) = pending_idx ;
423
+ copy_count (skb )++ ;
424
+
425
+ cop ++ ;
426
+ data_len -= amount ;
388
427
389
- /* Skip first skb fragment if it is on same page as header fragment. */
390
- start = (frag_get_pending_idx (& shinfo -> frags [0 ]) == pending_idx );
428
+ if (amount == txp -> size ) {
429
+ /* The copy op covered the full tx_request */
430
+
431
+ memcpy (& queue -> pending_tx_info [pending_idx ].req ,
432
+ txp , sizeof (* txp ));
433
+ queue -> pending_tx_info [pending_idx ].extra_count =
434
+ (txp == first ) ? extra_count : 0 ;
435
+
436
+ if (txp == first )
437
+ txp = txfrags ;
438
+ else
439
+ txp ++ ;
440
+ queue -> pending_cons ++ ;
441
+ nr_slots -- ;
442
+ } else {
443
+ /* The copy op partially covered the tx_request.
444
+ * The remainder will be mapped.
445
+ */
446
+ txp -> offset += amount ;
447
+ txp -> size -= amount ;
448
+ }
449
+ }
391
450
392
- for (shinfo -> nr_frags = start ; shinfo -> nr_frags < nr_slots ;
393
- shinfo -> nr_frags ++ , txp ++ , gop ++ ) {
451
+ for (shinfo -> nr_frags = 0 ; shinfo -> nr_frags < nr_slots ;
452
+ shinfo -> nr_frags ++ , gop ++ ) {
394
453
index = pending_index (queue -> pending_cons ++ );
395
454
pending_idx = queue -> pending_ring [index ];
396
- xenvif_tx_create_map_op (queue , pending_idx , txp , 0 , gop );
455
+ xenvif_tx_create_map_op (queue , pending_idx , txp ,
456
+ txp == first ? extra_count : 0 , gop );
397
457
frag_set_pending_idx (& frags [shinfo -> nr_frags ], pending_idx );
458
+
459
+ if (txp == first )
460
+ txp = txfrags ;
461
+ else
462
+ txp ++ ;
398
463
}
399
464
400
465
if (frag_overflow ) {
@@ -415,7 +480,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
415
480
skb_shinfo (skb )-> frag_list = nskb ;
416
481
}
417
482
418
- return gop ;
483
+ (* copy_ops ) = cop - queue -> tx_copy_ops ;
484
+ (* map_ops ) = gop - queue -> tx_map_ops ;
419
485
}
420
486
421
487
static inline void xenvif_grant_handle_set (struct xenvif_queue * queue ,
@@ -451,7 +517,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
451
517
struct gnttab_copy * * gopp_copy )
452
518
{
453
519
struct gnttab_map_grant_ref * gop_map = * gopp_map ;
454
- u16 pending_idx = XENVIF_TX_CB ( skb ) -> pending_idx ;
520
+ u16 pending_idx ;
455
521
/* This always points to the shinfo of the skb being checked, which
456
522
* could be either the first or the one on the frag_list
457
523
*/
@@ -462,24 +528,37 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
462
528
struct skb_shared_info * first_shinfo = NULL ;
463
529
int nr_frags = shinfo -> nr_frags ;
464
530
const bool sharedslot = nr_frags &&
465
- frag_get_pending_idx (& shinfo -> frags [0 ]) == pending_idx ;
531
+ frag_get_pending_idx (& shinfo -> frags [0 ]) ==
532
+ copy_pending_idx (skb , copy_count (skb ) - 1 );
466
533
int i , err ;
467
534
468
- /* Check status of header. */
469
- err = (* gopp_copy )-> status ;
470
- if (unlikely (err )) {
471
- if (net_ratelimit ())
472
- netdev_dbg (queue -> vif -> dev ,
473
- "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n" ,
474
- (* gopp_copy )-> status ,
475
- pending_idx ,
476
- (* gopp_copy )-> source .u .ref );
477
- /* The first frag might still have this slot mapped */
478
- if (!sharedslot )
479
- xenvif_idx_release (queue , pending_idx ,
480
- XEN_NETIF_RSP_ERROR );
535
+ for (i = 0 ; i < copy_count (skb ); i ++ ) {
536
+ int newerr ;
537
+
538
+ /* Check status of header. */
539
+ pending_idx = copy_pending_idx (skb , i );
540
+
541
+ newerr = (* gopp_copy )-> status ;
542
+ if (likely (!newerr )) {
543
+ /* The first frag might still have this slot mapped */
544
+ if (i < copy_count (skb ) - 1 || !sharedslot )
545
+ xenvif_idx_release (queue , pending_idx ,
546
+ XEN_NETIF_RSP_OKAY );
547
+ } else {
548
+ err = newerr ;
549
+ if (net_ratelimit ())
550
+ netdev_dbg (queue -> vif -> dev ,
551
+ "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n" ,
552
+ (* gopp_copy )-> status ,
553
+ pending_idx ,
554
+ (* gopp_copy )-> source .u .ref );
555
+ /* The first frag might still have this slot mapped */
556
+ if (i < copy_count (skb ) - 1 || !sharedslot )
557
+ xenvif_idx_release (queue , pending_idx ,
558
+ XEN_NETIF_RSP_ERROR );
559
+ }
560
+ (* gopp_copy )++ ;
481
561
}
482
- (* gopp_copy )++ ;
483
562
484
563
check_frags :
485
564
for (i = 0 ; i < nr_frags ; i ++ , gop_map ++ ) {
@@ -526,14 +605,6 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
526
605
if (err )
527
606
continue ;
528
607
529
- /* First error: if the header haven't shared a slot with the
530
- * first frag, release it as well.
531
- */
532
- if (!sharedslot )
533
- xenvif_idx_release (queue ,
534
- XENVIF_TX_CB (skb )-> pending_idx ,
535
- XEN_NETIF_RSP_OKAY );
536
-
537
608
/* Invalidate preceding fragments of this skb. */
538
609
for (j = 0 ; j < i ; j ++ ) {
539
610
pending_idx = frag_get_pending_idx (& shinfo -> frags [j ]);
@@ -803,7 +874,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
803
874
unsigned * copy_ops ,
804
875
unsigned * map_ops )
805
876
{
806
- struct gnttab_map_grant_ref * gop = queue -> tx_map_ops ;
807
877
struct sk_buff * skb , * nskb ;
808
878
int ret ;
809
879
unsigned int frag_overflow ;
@@ -885,8 +955,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
885
955
continue ;
886
956
}
887
957
958
+ data_len = (txreq .size > XEN_NETBACK_TX_COPY_LEN ) ?
959
+ XEN_NETBACK_TX_COPY_LEN : txreq .size ;
960
+
888
961
ret = xenvif_count_requests (queue , & txreq , extra_count ,
889
962
txfrags , work_to_do );
963
+
890
964
if (unlikely (ret < 0 ))
891
965
break ;
892
966
@@ -912,9 +986,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
912
986
index = pending_index (queue -> pending_cons );
913
987
pending_idx = queue -> pending_ring [index ];
914
988
915
- data_len = (txreq .size > XEN_NETBACK_TX_COPY_LEN &&
916
- ret < XEN_NETBK_LEGACY_SLOTS_MAX ) ?
917
- XEN_NETBACK_TX_COPY_LEN : txreq .size ;
989
+ if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq .size )
990
+ data_len = txreq .size ;
918
991
919
992
skb = xenvif_alloc_skb (data_len );
920
993
if (unlikely (skb == NULL )) {
@@ -925,8 +998,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
925
998
}
926
999
927
1000
skb_shinfo (skb )-> nr_frags = ret ;
928
- if (data_len < txreq .size )
929
- skb_shinfo (skb )-> nr_frags ++ ;
930
1001
/* At this point shinfo->nr_frags is in fact the number of
931
1002
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
932
1003
*/
@@ -988,54 +1059,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
988
1059
type );
989
1060
}
990
1061
991
- XENVIF_TX_CB (skb )-> pending_idx = pending_idx ;
992
-
993
- __skb_put (skb , data_len );
994
- queue -> tx_copy_ops [* copy_ops ].source .u .ref = txreq .gref ;
995
- queue -> tx_copy_ops [* copy_ops ].source .domid = queue -> vif -> domid ;
996
- queue -> tx_copy_ops [* copy_ops ].source .offset = txreq .offset ;
997
-
998
- queue -> tx_copy_ops [* copy_ops ].dest .u .gmfn =
999
- virt_to_gfn (skb -> data );
1000
- queue -> tx_copy_ops [* copy_ops ].dest .domid = DOMID_SELF ;
1001
- queue -> tx_copy_ops [* copy_ops ].dest .offset =
1002
- offset_in_page (skb -> data ) & ~XEN_PAGE_MASK ;
1003
-
1004
- queue -> tx_copy_ops [* copy_ops ].len = data_len ;
1005
- queue -> tx_copy_ops [* copy_ops ].flags = GNTCOPY_source_gref ;
1006
-
1007
- (* copy_ops )++ ;
1008
-
1009
- if (data_len < txreq .size ) {
1010
- frag_set_pending_idx (& skb_shinfo (skb )-> frags [0 ],
1011
- pending_idx );
1012
- xenvif_tx_create_map_op (queue , pending_idx , & txreq ,
1013
- extra_count , gop );
1014
- gop ++ ;
1015
- } else {
1016
- frag_set_pending_idx (& skb_shinfo (skb )-> frags [0 ],
1017
- INVALID_PENDING_IDX );
1018
- memcpy (& queue -> pending_tx_info [pending_idx ].req ,
1019
- & txreq , sizeof (txreq ));
1020
- queue -> pending_tx_info [pending_idx ].extra_count =
1021
- extra_count ;
1022
- }
1023
-
1024
- queue -> pending_cons ++ ;
1025
-
1026
- gop = xenvif_get_requests (queue , skb , txfrags , gop ,
1027
- frag_overflow , nskb );
1062
+ xenvif_get_requests (queue , skb , & txreq , txfrags , copy_ops ,
1063
+ map_ops , frag_overflow , nskb , extra_count ,
1064
+ data_len );
1028
1065
1029
1066
__skb_queue_tail (& queue -> tx_queue , skb );
1030
1067
1031
1068
queue -> tx .req_cons = idx ;
1032
1069
1033
- if ((( gop - queue -> tx_map_ops ) >= ARRAY_SIZE (queue -> tx_map_ops )) ||
1070
+ if ((* map_ops >= ARRAY_SIZE (queue -> tx_map_ops )) ||
1034
1071
(* copy_ops >= ARRAY_SIZE (queue -> tx_copy_ops )))
1035
1072
break ;
1036
1073
}
1037
1074
1038
- (* map_ops ) = gop - queue -> tx_map_ops ;
1039
1075
return ;
1040
1076
}
1041
1077
@@ -1114,9 +1150,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1114
1150
while ((skb = __skb_dequeue (& queue -> tx_queue )) != NULL ) {
1115
1151
struct xen_netif_tx_request * txp ;
1116
1152
u16 pending_idx ;
1117
- unsigned data_len ;
1118
1153
1119
- pending_idx = XENVIF_TX_CB (skb ) -> pending_idx ;
1154
+ pending_idx = copy_pending_idx (skb , 0 ) ;
1120
1155
txp = & queue -> pending_tx_info [pending_idx ].req ;
1121
1156
1122
1157
/* Check the remap error code. */
@@ -1135,18 +1170,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1135
1170
continue ;
1136
1171
}
1137
1172
1138
- data_len = skb -> len ;
1139
- callback_param (queue , pending_idx ).ctx = NULL ;
1140
- if (data_len < txp -> size ) {
1141
- /* Append the packet payload as a fragment. */
1142
- txp -> offset += data_len ;
1143
- txp -> size -= data_len ;
1144
- } else {
1145
- /* Schedule a response immediately. */
1146
- xenvif_idx_release (queue , pending_idx ,
1147
- XEN_NETIF_RSP_OKAY );
1148
- }
1149
-
1150
1173
if (txp -> flags & XEN_NETTXF_csum_blank )
1151
1174
skb -> ip_summed = CHECKSUM_PARTIAL ;
1152
1175
else if (txp -> flags & XEN_NETTXF_data_validated )
@@ -1332,7 +1355,7 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1332
1355
/* Called after netfront has transmitted */
1333
1356
int xenvif_tx_action (struct xenvif_queue * queue , int budget )
1334
1357
{
1335
- unsigned nr_mops , nr_cops = 0 ;
1358
+ unsigned nr_mops = 0 , nr_cops = 0 ;
1336
1359
int work_done , ret ;
1337
1360
1338
1361
if (unlikely (!tx_work_todo (queue )))
0 commit comments