Skip to content

Commit 499919d

Browse files
mfijalkoKernel Patches Daemon
authored andcommitted
xsk: avoid double checking against rx queue being full
Currently non-zc xsk rx path for multi-buffer case checks twice if xsk rx queue has enough space for producing descriptors: 1. if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) { xs->rx_queue_full++; return -ENOBUFS; } 2. __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0); -> err = xskq_prod_reserve_desc(xs->rx, addr, len, flags); -> if (xskq_prod_is_full(q)) Second part is redundant as in 1. we already peeked onto rx queue and checked that there is enough space to produce given amount of descriptors. Provide helper functions that will skip it and therefore optimize code. Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Acked-by: Stanislav Fomichev <sdf@fomichev.me>
1 parent 291a805 commit 499919d

File tree

2 files changed

+24
-6
lines changed

2 files changed

+24
-6
lines changed

net/xdp/xsk.c

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -160,6 +160,17 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
160160
return 0;
161161
}
162162

163+
static void __xsk_rcv_zc_safe(struct xdp_sock *xs, struct xdp_buff_xsk *xskb,
164+
u32 len, u32 flags)
165+
{
166+
u64 addr;
167+
168+
addr = xp_get_handle(xskb, xskb->pool);
169+
__xskq_prod_reserve_desc(xs->rx, addr, len, flags);
170+
171+
xp_release(xskb);
172+
}
173+
163174
static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
164175
{
165176
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
@@ -292,7 +303,8 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
292303
rem -= copied;
293304

294305
xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
295-
__xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
306+
__xsk_rcv_zc_safe(xs, xskb, copied - meta_len,
307+
rem ? XDP_PKT_CONTD : 0);
296308
meta_len = 0;
297309
} while (rem);
298310

net/xdp/xsk_queue.h

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -445,20 +445,26 @@ static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_de
445445
q->cached_prod = cached_prod;
446446
}
447447

448-
static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
449-
u64 addr, u32 len, u32 flags)
448+
static inline void __xskq_prod_reserve_desc(struct xsk_queue *q,
449+
u64 addr, u32 len, u32 flags)
450450
{
451451
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
452452
u32 idx;
453453

454-
if (xskq_prod_is_full(q))
455-
return -ENOBUFS;
456-
457454
/* A, matches D */
458455
idx = q->cached_prod++ & q->ring_mask;
459456
ring->desc[idx].addr = addr;
460457
ring->desc[idx].len = len;
461458
ring->desc[idx].options = flags;
459+
}
460+
461+
static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
462+
u64 addr, u32 len, u32 flags)
463+
{
464+
if (xskq_prod_is_full(q))
465+
return -ENOBUFS;
466+
467+
__xskq_prod_reserve_desc(q, addr, len, flags);
462468

463469
return 0;
464470
}

0 commit comments

Comments
 (0)