@@ -334,6 +334,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
334
334
struct xenvif_tx_cb {
335
335
u16 copy_pending_idx [XEN_NETBK_LEGACY_SLOTS_MAX + 1 ];
336
336
u8 copy_count ;
337
+ u32 split_mask ;
337
338
};
338
339
339
340
#define XENVIF_TX_CB (skb ) ((struct xenvif_tx_cb *)(skb)->cb)
@@ -361,6 +362,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
361
362
struct sk_buff * skb =
362
363
alloc_skb (size + NET_SKB_PAD + NET_IP_ALIGN ,
363
364
GFP_ATOMIC | __GFP_NOWARN );
365
+
366
+ BUILD_BUG_ON (sizeof (* XENVIF_TX_CB (skb )) > sizeof (skb -> cb ));
364
367
if (unlikely (skb == NULL ))
365
368
return NULL ;
366
369
@@ -396,11 +399,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
396
399
nr_slots = shinfo -> nr_frags + 1 ;
397
400
398
401
copy_count (skb ) = 0 ;
402
+ XENVIF_TX_CB (skb )-> split_mask = 0 ;
399
403
400
404
/* Create copy ops for exactly data_len bytes into the skb head. */
401
405
__skb_put (skb , data_len );
402
406
while (data_len > 0 ) {
403
407
int amount = data_len > txp -> size ? txp -> size : data_len ;
408
+ bool split = false;
404
409
405
410
cop -> source .u .ref = txp -> gref ;
406
411
cop -> source .domid = queue -> vif -> domid ;
@@ -413,14 +418,22 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
413
418
cop -> dest .u .gmfn = virt_to_gfn (skb -> data + skb_headlen (skb )
414
419
- data_len );
415
420
421
+ /* Don't cross local page boundary! */
422
+ if (cop -> dest .offset + amount > XEN_PAGE_SIZE ) {
423
+ amount = XEN_PAGE_SIZE - cop -> dest .offset ;
424
+ XENVIF_TX_CB (skb )-> split_mask |= 1U << copy_count (skb );
425
+ split = true;
426
+ }
427
+
416
428
cop -> len = amount ;
417
429
cop -> flags = GNTCOPY_source_gref ;
418
430
419
431
index = pending_index (queue -> pending_cons );
420
432
pending_idx = queue -> pending_ring [index ];
421
433
callback_param (queue , pending_idx ).ctx = NULL ;
422
434
copy_pending_idx (skb , copy_count (skb )) = pending_idx ;
423
- copy_count (skb )++ ;
435
+ if (!split )
436
+ copy_count (skb )++ ;
424
437
425
438
cop ++ ;
426
439
data_len -= amount ;
@@ -441,7 +454,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
441
454
nr_slots -- ;
442
455
} else {
443
456
/* The copy op partially covered the tx_request.
444
- * The remainder will be mapped.
457
+ * The remainder will be mapped or copied in the next
458
+ * iteration.
445
459
*/
446
460
txp -> offset += amount ;
447
461
txp -> size -= amount ;
@@ -539,6 +553,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
539
553
pending_idx = copy_pending_idx (skb , i );
540
554
541
555
newerr = (* gopp_copy )-> status ;
556
+
557
+ /* Split copies need to be handled together. */
558
+ if (XENVIF_TX_CB (skb )-> split_mask & (1U << i )) {
559
+ (* gopp_copy )++ ;
560
+ if (!newerr )
561
+ newerr = (* gopp_copy )-> status ;
562
+ }
542
563
if (likely (!newerr )) {
543
564
/* The first frag might still have this slot mapped */
544
565
if (i < copy_count (skb ) - 1 || !sharedslot )
@@ -1061,10 +1082,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1061
1082
__skb_queue_tail (& queue -> tx_queue , skb );
1062
1083
1063
1084
queue -> tx .req_cons = idx ;
1064
-
1065
- if ((* map_ops >= ARRAY_SIZE (queue -> tx_map_ops )) ||
1066
- (* copy_ops >= ARRAY_SIZE (queue -> tx_copy_ops )))
1067
- break ;
1068
1085
}
1069
1086
1070
1087
return ;
0 commit comments