Skip to content

Commit b71101d

Browse files
committed
Merge tag 'for-linus-xsa-6.1-rc9-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen fixes from Juergen Gross: "Two zero-day fixes for the xen-netback driver (XSA-423 and XSA-424)" * tag 'for-linus-xsa-6.1-rc9-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/netback: don't call kfree_skb() with interrupts disabled xen/netback: Ensure protocol headers don't fall in the non-linear area
2 parents bce9332 + 74e7e1e commit b71101d

File tree

4 files changed

+133
-106
lines changed

4 files changed

+133
-106
lines changed

drivers/net/xen-netback/common.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -386,7 +386,7 @@ int xenvif_dealloc_kthread(void *data);
386386
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
387387

388388
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
389-
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
389+
bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
390390

391391
void xenvif_carrier_on(struct xenvif *vif);
392392

drivers/net/xen-netback/interface.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -254,14 +254,16 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
254254
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
255255
skb_clear_hash(skb);
256256

257-
xenvif_rx_queue_tail(queue, skb);
257+
if (!xenvif_rx_queue_tail(queue, skb))
258+
goto drop;
259+
258260
xenvif_kick_thread(queue);
259261

260262
return NETDEV_TX_OK;
261263

262264
drop:
263265
vif->dev->stats.tx_dropped++;
264-
dev_kfree_skb(skb);
266+
dev_kfree_skb_any(skb);
265267
return NETDEV_TX_OK;
266268
}
267269

drivers/net/xen-netback/netback.c

Lines changed: 123 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -332,10 +332,13 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
332332

333333

334334
struct xenvif_tx_cb {
335-
u16 pending_idx;
335+
u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
336+
u8 copy_count;
336337
};
337338

338339
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
340+
#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
341+
#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
339342

340343
static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
341344
u16 pending_idx,
@@ -370,31 +373,93 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
370373
return skb;
371374
}
372375

373-
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
374-
struct sk_buff *skb,
375-
struct xen_netif_tx_request *txp,
376-
struct gnttab_map_grant_ref *gop,
377-
unsigned int frag_overflow,
378-
struct sk_buff *nskb)
376+
static void xenvif_get_requests(struct xenvif_queue *queue,
377+
struct sk_buff *skb,
378+
struct xen_netif_tx_request *first,
379+
struct xen_netif_tx_request *txfrags,
380+
unsigned *copy_ops,
381+
unsigned *map_ops,
382+
unsigned int frag_overflow,
383+
struct sk_buff *nskb,
384+
unsigned int extra_count,
385+
unsigned int data_len)
379386
{
380387
struct skb_shared_info *shinfo = skb_shinfo(skb);
381388
skb_frag_t *frags = shinfo->frags;
382-
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
383-
int start;
389+
u16 pending_idx;
384390
pending_ring_idx_t index;
385391
unsigned int nr_slots;
392+
struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
393+
struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
394+
struct xen_netif_tx_request *txp = first;
395+
396+
nr_slots = shinfo->nr_frags + 1;
397+
398+
copy_count(skb) = 0;
399+
400+
/* Create copy ops for exactly data_len bytes into the skb head. */
401+
__skb_put(skb, data_len);
402+
while (data_len > 0) {
403+
int amount = data_len > txp->size ? txp->size : data_len;
404+
405+
cop->source.u.ref = txp->gref;
406+
cop->source.domid = queue->vif->domid;
407+
cop->source.offset = txp->offset;
408+
409+
cop->dest.domid = DOMID_SELF;
410+
cop->dest.offset = (offset_in_page(skb->data +
411+
skb_headlen(skb) -
412+
data_len)) & ~XEN_PAGE_MASK;
413+
cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
414+
- data_len);
415+
416+
cop->len = amount;
417+
cop->flags = GNTCOPY_source_gref;
386418

387-
nr_slots = shinfo->nr_frags;
419+
index = pending_index(queue->pending_cons);
420+
pending_idx = queue->pending_ring[index];
421+
callback_param(queue, pending_idx).ctx = NULL;
422+
copy_pending_idx(skb, copy_count(skb)) = pending_idx;
423+
copy_count(skb)++;
424+
425+
cop++;
426+
data_len -= amount;
388427

389-
/* Skip first skb fragment if it is on same page as header fragment. */
390-
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
428+
if (amount == txp->size) {
429+
/* The copy op covered the full tx_request */
430+
431+
memcpy(&queue->pending_tx_info[pending_idx].req,
432+
txp, sizeof(*txp));
433+
queue->pending_tx_info[pending_idx].extra_count =
434+
(txp == first) ? extra_count : 0;
435+
436+
if (txp == first)
437+
txp = txfrags;
438+
else
439+
txp++;
440+
queue->pending_cons++;
441+
nr_slots--;
442+
} else {
443+
/* The copy op partially covered the tx_request.
444+
* The remainder will be mapped.
445+
*/
446+
txp->offset += amount;
447+
txp->size -= amount;
448+
}
449+
}
391450

392-
for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
393-
shinfo->nr_frags++, txp++, gop++) {
451+
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
452+
shinfo->nr_frags++, gop++) {
394453
index = pending_index(queue->pending_cons++);
395454
pending_idx = queue->pending_ring[index];
396-
xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
455+
xenvif_tx_create_map_op(queue, pending_idx, txp,
456+
txp == first ? extra_count : 0, gop);
397457
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
458+
459+
if (txp == first)
460+
txp = txfrags;
461+
else
462+
txp++;
398463
}
399464

400465
if (frag_overflow) {
@@ -415,7 +480,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
415480
skb_shinfo(skb)->frag_list = nskb;
416481
}
417482

418-
return gop;
483+
(*copy_ops) = cop - queue->tx_copy_ops;
484+
(*map_ops) = gop - queue->tx_map_ops;
419485
}
420486

421487
static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
@@ -451,7 +517,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
451517
struct gnttab_copy **gopp_copy)
452518
{
453519
struct gnttab_map_grant_ref *gop_map = *gopp_map;
454-
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
520+
u16 pending_idx;
455521
/* This always points to the shinfo of the skb being checked, which
456522
* could be either the first or the one on the frag_list
457523
*/
@@ -462,24 +528,37 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
462528
struct skb_shared_info *first_shinfo = NULL;
463529
int nr_frags = shinfo->nr_frags;
464530
const bool sharedslot = nr_frags &&
465-
frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
531+
frag_get_pending_idx(&shinfo->frags[0]) ==
532+
copy_pending_idx(skb, copy_count(skb) - 1);
466533
int i, err;
467534

468-
/* Check status of header. */
469-
err = (*gopp_copy)->status;
470-
if (unlikely(err)) {
471-
if (net_ratelimit())
472-
netdev_dbg(queue->vif->dev,
473-
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
474-
(*gopp_copy)->status,
475-
pending_idx,
476-
(*gopp_copy)->source.u.ref);
477-
/* The first frag might still have this slot mapped */
478-
if (!sharedslot)
479-
xenvif_idx_release(queue, pending_idx,
480-
XEN_NETIF_RSP_ERROR);
535+
for (i = 0; i < copy_count(skb); i++) {
536+
int newerr;
537+
538+
/* Check status of header. */
539+
pending_idx = copy_pending_idx(skb, i);
540+
541+
newerr = (*gopp_copy)->status;
542+
if (likely(!newerr)) {
543+
/* The first frag might still have this slot mapped */
544+
if (i < copy_count(skb) - 1 || !sharedslot)
545+
xenvif_idx_release(queue, pending_idx,
546+
XEN_NETIF_RSP_OKAY);
547+
} else {
548+
err = newerr;
549+
if (net_ratelimit())
550+
netdev_dbg(queue->vif->dev,
551+
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
552+
(*gopp_copy)->status,
553+
pending_idx,
554+
(*gopp_copy)->source.u.ref);
555+
/* The first frag might still have this slot mapped */
556+
if (i < copy_count(skb) - 1 || !sharedslot)
557+
xenvif_idx_release(queue, pending_idx,
558+
XEN_NETIF_RSP_ERROR);
559+
}
560+
(*gopp_copy)++;
481561
}
482-
(*gopp_copy)++;
483562

484563
check_frags:
485564
for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -526,14 +605,6 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
526605
if (err)
527606
continue;
528607

529-
/* First error: if the header haven't shared a slot with the
530-
* first frag, release it as well.
531-
*/
532-
if (!sharedslot)
533-
xenvif_idx_release(queue,
534-
XENVIF_TX_CB(skb)->pending_idx,
535-
XEN_NETIF_RSP_OKAY);
536-
537608
/* Invalidate preceding fragments of this skb. */
538609
for (j = 0; j < i; j++) {
539610
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
@@ -803,7 +874,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
803874
unsigned *copy_ops,
804875
unsigned *map_ops)
805876
{
806-
struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
807877
struct sk_buff *skb, *nskb;
808878
int ret;
809879
unsigned int frag_overflow;
@@ -885,8 +955,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
885955
continue;
886956
}
887957

958+
data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
959+
XEN_NETBACK_TX_COPY_LEN : txreq.size;
960+
888961
ret = xenvif_count_requests(queue, &txreq, extra_count,
889962
txfrags, work_to_do);
963+
890964
if (unlikely(ret < 0))
891965
break;
892966

@@ -912,9 +986,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
912986
index = pending_index(queue->pending_cons);
913987
pending_idx = queue->pending_ring[index];
914988

915-
data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
916-
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
917-
XEN_NETBACK_TX_COPY_LEN : txreq.size;
989+
if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
990+
data_len = txreq.size;
918991

919992
skb = xenvif_alloc_skb(data_len);
920993
if (unlikely(skb == NULL)) {
@@ -925,8 +998,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
925998
}
926999

9271000
skb_shinfo(skb)->nr_frags = ret;
928-
if (data_len < txreq.size)
929-
skb_shinfo(skb)->nr_frags++;
9301001
/* At this point shinfo->nr_frags is in fact the number of
9311002
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
9321003
*/
@@ -988,54 +1059,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
9881059
type);
9891060
}
9901061

991-
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
992-
993-
__skb_put(skb, data_len);
994-
queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
995-
queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
996-
queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
997-
998-
queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
999-
virt_to_gfn(skb->data);
1000-
queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
1001-
queue->tx_copy_ops[*copy_ops].dest.offset =
1002-
offset_in_page(skb->data) & ~XEN_PAGE_MASK;
1003-
1004-
queue->tx_copy_ops[*copy_ops].len = data_len;
1005-
queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
1006-
1007-
(*copy_ops)++;
1008-
1009-
if (data_len < txreq.size) {
1010-
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1011-
pending_idx);
1012-
xenvif_tx_create_map_op(queue, pending_idx, &txreq,
1013-
extra_count, gop);
1014-
gop++;
1015-
} else {
1016-
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1017-
INVALID_PENDING_IDX);
1018-
memcpy(&queue->pending_tx_info[pending_idx].req,
1019-
&txreq, sizeof(txreq));
1020-
queue->pending_tx_info[pending_idx].extra_count =
1021-
extra_count;
1022-
}
1023-
1024-
queue->pending_cons++;
1025-
1026-
gop = xenvif_get_requests(queue, skb, txfrags, gop,
1027-
frag_overflow, nskb);
1062+
xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
1063+
map_ops, frag_overflow, nskb, extra_count,
1064+
data_len);
10281065

10291066
__skb_queue_tail(&queue->tx_queue, skb);
10301067

10311068
queue->tx.req_cons = idx;
10321069

1033-
if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1070+
if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
10341071
(*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
10351072
break;
10361073
}
10371074

1038-
(*map_ops) = gop - queue->tx_map_ops;
10391075
return;
10401076
}
10411077

@@ -1114,9 +1150,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
11141150
while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
11151151
struct xen_netif_tx_request *txp;
11161152
u16 pending_idx;
1117-
unsigned data_len;
11181153

1119-
pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1154+
pending_idx = copy_pending_idx(skb, 0);
11201155
txp = &queue->pending_tx_info[pending_idx].req;
11211156

11221157
/* Check the remap error code. */
@@ -1135,18 +1170,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
11351170
continue;
11361171
}
11371172

1138-
data_len = skb->len;
1139-
callback_param(queue, pending_idx).ctx = NULL;
1140-
if (data_len < txp->size) {
1141-
/* Append the packet payload as a fragment. */
1142-
txp->offset += data_len;
1143-
txp->size -= data_len;
1144-
} else {
1145-
/* Schedule a response immediately. */
1146-
xenvif_idx_release(queue, pending_idx,
1147-
XEN_NETIF_RSP_OKAY);
1148-
}
1149-
11501173
if (txp->flags & XEN_NETTXF_csum_blank)
11511174
skb->ip_summed = CHECKSUM_PARTIAL;
11521175
else if (txp->flags & XEN_NETTXF_data_validated)
@@ -1332,7 +1355,7 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
13321355
/* Called after netfront has transmitted */
13331356
int xenvif_tx_action(struct xenvif_queue *queue, int budget)
13341357
{
1335-
unsigned nr_mops, nr_cops = 0;
1358+
unsigned nr_mops = 0, nr_cops = 0;
13361359
int work_done, ret;
13371360

13381361
if (unlikely(!tx_work_todo(queue)))

0 commit comments

Comments
 (0)