Skip to content

Commit 05310f3

Browse files
jgross1Paolo Abeni
authored andcommitted
xen/netback: don't do grant copy across page boundary
Fix xenvif_get_requests() not to do grant copy operations across local page boundaries. This requires to double the maximum number of copy operations per queue, as each copy could now be split into 2. Make sure that struct xenvif_tx_cb doesn't grow too large. Cc: [email protected] Fixes: ad7f402 ("xen/netback: Ensure protocol headers don't fall in the non-linear area") Signed-off-by: Juergen Gross <[email protected]> Reviewed-by: Paul Durrant <[email protected]> Signed-off-by: Paolo Abeni <[email protected]>
1 parent f22c993 commit 05310f3

File tree

2 files changed

+24
-3
lines changed

2 files changed

+24
-3
lines changed

drivers/net/xen-netback/common.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
166166
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
167167
grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
168168

169-
struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
169+
struct gnttab_copy tx_copy_ops[2 * MAX_PENDING_REQS];
170170
struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
171171
struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
172172
/* passed to gnttab_[un]map_refs with pages under (un)mapping */

drivers/net/xen-netback/netback.c

Lines changed: 23 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -334,6 +334,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
334334
struct xenvif_tx_cb {
335335
u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
336336
u8 copy_count;
337+
u32 split_mask;
337338
};
338339

339340
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
@@ -361,6 +362,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
361362
struct sk_buff *skb =
362363
alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
363364
GFP_ATOMIC | __GFP_NOWARN);
365+
366+
BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
364367
if (unlikely(skb == NULL))
365368
return NULL;
366369

@@ -396,11 +399,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
396399
nr_slots = shinfo->nr_frags + 1;
397400

398401
copy_count(skb) = 0;
402+
XENVIF_TX_CB(skb)->split_mask = 0;
399403

400404
/* Create copy ops for exactly data_len bytes into the skb head. */
401405
__skb_put(skb, data_len);
402406
while (data_len > 0) {
403407
int amount = data_len > txp->size ? txp->size : data_len;
408+
bool split = false;
404409

405410
cop->source.u.ref = txp->gref;
406411
cop->source.domid = queue->vif->domid;
@@ -413,14 +418,22 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
413418
cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
414419
- data_len);
415420

421+
/* Don't cross local page boundary! */
422+
if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
423+
amount = XEN_PAGE_SIZE - cop->dest.offset;
424+
XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
425+
split = true;
426+
}
427+
416428
cop->len = amount;
417429
cop->flags = GNTCOPY_source_gref;
418430

419431
index = pending_index(queue->pending_cons);
420432
pending_idx = queue->pending_ring[index];
421433
callback_param(queue, pending_idx).ctx = NULL;
422434
copy_pending_idx(skb, copy_count(skb)) = pending_idx;
423-
copy_count(skb)++;
435+
if (!split)
436+
copy_count(skb)++;
424437

425438
cop++;
426439
data_len -= amount;
@@ -441,7 +454,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
441454
nr_slots--;
442455
} else {
443456
/* The copy op partially covered the tx_request.
444-
* The remainder will be mapped.
457+
* The remainder will be mapped or copied in the next
458+
* iteration.
445459
*/
446460
txp->offset += amount;
447461
txp->size -= amount;
@@ -539,6 +553,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
539553
pending_idx = copy_pending_idx(skb, i);
540554

541555
newerr = (*gopp_copy)->status;
556+
557+
/* Split copies need to be handled together. */
558+
if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
559+
(*gopp_copy)++;
560+
if (!newerr)
561+
newerr = (*gopp_copy)->status;
562+
}
542563
if (likely(!newerr)) {
543564
/* The first frag might still have this slot mapped */
544565
if (i < copy_count(skb) - 1 || !sharedslot)

0 commit comments

Comments
 (0)