Skip to content

Commit 9fefc78

Browse files
n132Paolo Abeni
authored andcommitted
net/sched: sch_cake: Fix incorrect qlen reduction in cake_drop
In cake_drop(), qdisc_tree_reduce_backlog() is used to update the qlen and backlog of the qdisc hierarchy. Its caller, cake_enqueue(), assumes that the parent qdisc will enqueue the current packet. However, this assumption breaks when cake_enqueue() returns NET_XMIT_CN: the parent qdisc stops enqueuing current packet, leaving the tree qlen/backlog accounting inconsistent. This mismatch can lead to a NULL dereference (e.g., when the parent Qdisc is qfq_qdisc). This patch computes the qlen/backlog delta in a more robust way by observing the difference before and after the series of cake_drop() calls, and then compensates the qdisc tree accounting if cake_enqueue() returns NET_XMIT_CN. To ensure correct compensation when ACK thinning is enabled, a new variable is introduced to keep qlen unchanged. Fixes: 15de71d ("net/sched: Make cake_enqueue return NET_XMIT_CN when past buffer_limit") Signed-off-by: Xiang Mei <[email protected]> Reviewed-by: Toke Høiland-Jørgensen <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Paolo Abeni <[email protected]>
1 parent e5235eb commit 9fefc78

File tree

1 file changed

+32
-26
lines changed

1 file changed

+32
-26
lines changed

net/sched/sch_cake.c

Lines changed: 32 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1597,7 +1597,6 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
15971597

15981598
qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_OVERLIMIT);
15991599
sch->q.qlen--;
1600-
qdisc_tree_reduce_backlog(sch, 1, len);
16011600

16021601
cake_heapify(q, 0);
16031602

@@ -1743,14 +1742,14 @@ static void cake_reconfigure(struct Qdisc *sch);
17431742
static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
17441743
struct sk_buff **to_free)
17451744
{
1745+
u32 idx, tin, prev_qlen, prev_backlog, drop_id;
17461746
struct cake_sched_data *q = qdisc_priv(sch);
1747-
int len = qdisc_pkt_len(skb);
1748-
int ret;
1747+
int len = qdisc_pkt_len(skb), ret;
17491748
struct sk_buff *ack = NULL;
17501749
ktime_t now = ktime_get();
17511750
struct cake_tin_data *b;
17521751
struct cake_flow *flow;
1753-
u32 idx, tin;
1752+
bool same_flow = false;
17541753

17551754
/* choose flow to insert into */
17561755
idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
@@ -1823,6 +1822,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
18231822
consume_skb(skb);
18241823
} else {
18251824
/* not splitting */
1825+
int ack_pkt_len = 0;
1826+
18261827
cobalt_set_enqueue_time(skb, now);
18271828
get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
18281829
flow_queue_add(flow, skb);
@@ -1833,13 +1834,13 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
18331834
if (ack) {
18341835
b->ack_drops++;
18351836
sch->qstats.drops++;
1836-
b->bytes += qdisc_pkt_len(ack);
1837-
len -= qdisc_pkt_len(ack);
1837+
ack_pkt_len = qdisc_pkt_len(ack);
1838+
b->bytes += ack_pkt_len;
18381839
q->buffer_used += skb->truesize - ack->truesize;
18391840
if (q->rate_flags & CAKE_FLAG_INGRESS)
18401841
cake_advance_shaper(q, b, ack, now, true);
18411842

1842-
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
1843+
qdisc_tree_reduce_backlog(sch, 1, ack_pkt_len);
18431844
consume_skb(ack);
18441845
} else {
18451846
sch->q.qlen++;
@@ -1848,11 +1849,11 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
18481849

18491850
/* stats */
18501851
b->packets++;
1851-
b->bytes += len;
1852-
b->backlogs[idx] += len;
1853-
b->tin_backlog += len;
1854-
sch->qstats.backlog += len;
1855-
q->avg_window_bytes += len;
1852+
b->bytes += len - ack_pkt_len;
1853+
b->backlogs[idx] += len - ack_pkt_len;
1854+
b->tin_backlog += len - ack_pkt_len;
1855+
sch->qstats.backlog += len - ack_pkt_len;
1856+
q->avg_window_bytes += len - ack_pkt_len;
18561857
}
18571858

18581859
if (q->overflow_timeout)
@@ -1927,24 +1928,29 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
19271928
if (q->buffer_used > q->buffer_max_used)
19281929
q->buffer_max_used = q->buffer_used;
19291930

1930-
if (q->buffer_used > q->buffer_limit) {
1931-
bool same_flow = false;
1932-
u32 dropped = 0;
1933-
u32 drop_id;
1931+
if (q->buffer_used <= q->buffer_limit)
1932+
return NET_XMIT_SUCCESS;
19341933

1935-
while (q->buffer_used > q->buffer_limit) {
1936-
dropped++;
1937-
drop_id = cake_drop(sch, to_free);
1934+
prev_qlen = sch->q.qlen;
1935+
prev_backlog = sch->qstats.backlog;
19381936

1939-
if ((drop_id >> 16) == tin &&
1940-
(drop_id & 0xFFFF) == idx)
1941-
same_flow = true;
1942-
}
1943-
b->drop_overlimit += dropped;
1937+
while (q->buffer_used > q->buffer_limit) {
1938+
drop_id = cake_drop(sch, to_free);
1939+
if ((drop_id >> 16) == tin &&
1940+
(drop_id & 0xFFFF) == idx)
1941+
same_flow = true;
1942+
}
1943+
1944+
prev_qlen -= sch->q.qlen;
1945+
prev_backlog -= sch->qstats.backlog;
1946+
b->drop_overlimit += prev_qlen;
19441947

1945-
if (same_flow)
1946-
return NET_XMIT_CN;
1948+
if (same_flow) {
1949+
qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
1950+
prev_backlog - len);
1951+
return NET_XMIT_CN;
19471952
}
1953+
qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
19481954
return NET_XMIT_SUCCESS;
19491955
}
19501956

0 commit comments

Comments
 (0)