Skip to content

Commit f4252ea

Browse files
Paolo Abeniintel-lab-lkp
authored andcommitted
mptcp: cleanup mem accounting.
After the previous patch, updating sk_forward_memory is cheap and we can drop a lot of complexity from the MPTCP memory acconting, removing the custom fwd mem allocations for rmem. Signed-off-by: Paolo Abeni <[email protected]>
1 parent edf6b4b commit f4252ea

File tree

3 files changed

+10
-111
lines changed

3 files changed

+10
-111
lines changed

net/mptcp/fastopen.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf
5151
mptcp_data_lock(sk);
5252
DEBUG_NET_WARN_ON_ONCE(sock_owned_by_user_nocheck(sk));
5353

54-
mptcp_set_owner_r(skb, sk);
54+
skb_set_owner_r(skb, sk);
5555
__skb_queue_tail(&sk->sk_receive_queue, skb);
5656
mptcp_sk(sk)->bytes_received += skb->len;
5757

net/mptcp/protocol.c

Lines changed: 8 additions & 107 deletions
Original file line numberDiff line numberDiff line change
@@ -118,17 +118,6 @@ static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
118118
__kfree_skb(skb);
119119
}
120120

121-
static void mptcp_rmem_fwd_alloc_add(struct sock *sk, int size)
122-
{
123-
WRITE_ONCE(mptcp_sk(sk)->rmem_fwd_alloc,
124-
mptcp_sk(sk)->rmem_fwd_alloc + size);
125-
}
126-
127-
static void mptcp_rmem_charge(struct sock *sk, int size)
128-
{
129-
mptcp_rmem_fwd_alloc_add(sk, -size);
130-
}
131-
132121
static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
133122
struct sk_buff *from)
134123
{
@@ -150,7 +139,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
150139
* negative one
151140
*/
152141
atomic_add(delta, &sk->sk_rmem_alloc);
153-
mptcp_rmem_charge(sk, delta);
142+
sk_mem_charge(sk, delta);
154143
kfree_skb_partial(from, fragstolen);
155144

156145
return true;
@@ -165,44 +154,6 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
165154
return mptcp_try_coalesce((struct sock *)msk, to, from);
166155
}
167156

168-
static void __mptcp_rmem_reclaim(struct sock *sk, int amount)
169-
{
170-
amount >>= PAGE_SHIFT;
171-
mptcp_rmem_charge(sk, amount << PAGE_SHIFT);
172-
__sk_mem_reduce_allocated(sk, amount);
173-
}
174-
175-
static void mptcp_rmem_uncharge(struct sock *sk, int size)
176-
{
177-
struct mptcp_sock *msk = mptcp_sk(sk);
178-
int reclaimable;
179-
180-
mptcp_rmem_fwd_alloc_add(sk, size);
181-
reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk);
182-
183-
/* see sk_mem_uncharge() for the rationale behind the following schema */
184-
if (unlikely(reclaimable >= PAGE_SIZE))
185-
__mptcp_rmem_reclaim(sk, reclaimable);
186-
}
187-
188-
static void mptcp_rfree(struct sk_buff *skb)
189-
{
190-
unsigned int len = skb->truesize;
191-
struct sock *sk = skb->sk;
192-
193-
atomic_sub(len, &sk->sk_rmem_alloc);
194-
mptcp_rmem_uncharge(sk, len);
195-
}
196-
197-
void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
198-
{
199-
skb_orphan(skb);
200-
skb->sk = sk;
201-
skb->destructor = mptcp_rfree;
202-
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
203-
mptcp_rmem_charge(sk, skb->truesize);
204-
}
205-
206157
/* "inspired" by tcp_data_queue_ofo(), main differences:
207158
* - use mptcp seqs
208159
* - don't cope with sacks
@@ -315,25 +266,7 @@ static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
315266

316267
end:
317268
skb_condense(skb);
318-
mptcp_set_owner_r(skb, sk);
319-
}
320-
321-
static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)
322-
{
323-
struct mptcp_sock *msk = mptcp_sk(sk);
324-
int amt, amount;
325-
326-
if (size <= msk->rmem_fwd_alloc)
327-
return true;
328-
329-
size -= msk->rmem_fwd_alloc;
330-
amt = sk_mem_pages(size);
331-
amount = amt << PAGE_SHIFT;
332-
if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV))
333-
return false;
334-
335-
mptcp_rmem_fwd_alloc_add(sk, amount);
336-
return true;
269+
skb_set_owner_r(skb, sk);
337270
}
338271

339272
static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
@@ -351,7 +284,7 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
351284
skb_orphan(skb);
352285

353286
/* try to fetch required memory from subflow */
354-
if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) {
287+
if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
355288
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
356289
goto drop;
357290
}
@@ -375,7 +308,7 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
375308
if (tail && mptcp_try_coalesce(sk, tail, skb))
376309
return true;
377310

378-
mptcp_set_owner_r(skb, sk);
311+
skb_set_owner_r(skb, sk);
379312
__skb_queue_tail(&sk->sk_receive_queue, skb);
380313
return true;
381314
} else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) {
@@ -1983,9 +1916,10 @@ static int __mptcp_recvmsg_mskq(struct sock *sk,
19831916
}
19841917

19851918
if (!(flags & MSG_PEEK)) {
1986-
/* we will bulk release the skb memory later */
1919+
/* avoid the indirect call, we know the destructor is sock_wfree */
19871920
skb->destructor = NULL;
1988-
WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize);
1921+
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
1922+
sk_mem_uncharge(sk, skb->truesize);
19891923
__skb_unlink(skb, &sk->sk_receive_queue);
19901924
__kfree_skb(skb);
19911925
msk->bytes_consumed += count;
@@ -2099,18 +2033,6 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
20992033
msk->rcvq_space.time = mstamp;
21002034
}
21012035

2102-
static void __mptcp_update_rmem(struct sock *sk)
2103-
{
2104-
struct mptcp_sock *msk = mptcp_sk(sk);
2105-
2106-
if (!msk->rmem_released)
2107-
return;
2108-
2109-
atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc);
2110-
mptcp_rmem_uncharge(sk, msk->rmem_released);
2111-
WRITE_ONCE(msk->rmem_released, 0);
2112-
}
2113-
21142036
static bool __mptcp_move_skbs(struct sock *sk)
21152037
{
21162038
struct mptcp_subflow_context *subflow;
@@ -2134,20 +2056,14 @@ static bool __mptcp_move_skbs(struct sock *sk)
21342056
break;
21352057

21362058
slowpath = lock_sock_fast(ssk);
2137-
__mptcp_update_rmem(sk);
21382059
done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
21392060

21402061
if (unlikely(ssk->sk_err))
21412062
__mptcp_error_report(sk);
21422063
unlock_sock_fast(ssk, slowpath);
21432064
} while (!done);
21442065

2145-
ret = moved > 0;
2146-
if (!RB_EMPTY_ROOT(&msk->out_of_order_queue) ||
2147-
!skb_queue_empty(&sk->sk_receive_queue)) {
2148-
__mptcp_update_rmem(sk);
2149-
ret |= __mptcp_ofo_queue(msk);
2150-
}
2066+
ret = moved > 0 || __mptcp_ofo_queue(msk);
21512067
if (ret)
21522068
mptcp_check_data_fin((struct sock *)msk);
21532069
return ret;
@@ -2813,8 +2729,6 @@ static void __mptcp_init_sock(struct sock *sk)
28132729
INIT_WORK(&msk->work, mptcp_worker);
28142730
msk->out_of_order_queue = RB_ROOT;
28152731
msk->first_pending = NULL;
2816-
WRITE_ONCE(msk->rmem_fwd_alloc, 0);
2817-
WRITE_ONCE(msk->rmem_released, 0);
28182732
msk->timer_ival = TCP_RTO_MIN;
28192733
msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
28202734

@@ -3040,8 +2954,6 @@ static void __mptcp_destroy_sock(struct sock *sk)
30402954

30412955
sk->sk_prot->destroy(sk);
30422956

3043-
WARN_ON_ONCE(READ_ONCE(msk->rmem_fwd_alloc));
3044-
WARN_ON_ONCE(msk->rmem_released);
30452957
sk_stream_kill_queues(sk);
30462958
xfrm_sk_free_policy(sk);
30472959

@@ -3399,8 +3311,6 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
33993311
/* move all the rx fwd alloc into the sk_mem_reclaim_final in
34003312
* inet_sock_destruct() will dispose it
34013313
*/
3402-
sk_forward_alloc_add(sk, msk->rmem_fwd_alloc);
3403-
WRITE_ONCE(msk->rmem_fwd_alloc, 0);
34043314
mptcp_token_destroy(msk);
34053315
mptcp_pm_free_anno_list(msk);
34063316
mptcp_free_local_addr_list(msk);
@@ -3496,8 +3406,6 @@ static void mptcp_release_cb(struct sock *sk)
34963406
if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags))
34973407
__mptcp_sync_sndbuf(sk);
34983408
}
3499-
3500-
__mptcp_update_rmem(sk);
35013409
}
35023410

35033411
/* MP_JOIN client subflow must wait for 4th ack before sending any data:
@@ -3668,12 +3576,6 @@ static void mptcp_shutdown(struct sock *sk, int how)
36683576
__mptcp_wr_shutdown(sk);
36693577
}
36703578

3671-
static int mptcp_forward_alloc_get(const struct sock *sk)
3672-
{
3673-
return READ_ONCE(sk->sk_forward_alloc) +
3674-
READ_ONCE(mptcp_sk(sk)->rmem_fwd_alloc);
3675-
}
3676-
36773579
static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
36783580
{
36793581
const struct sock *sk = (void *)msk;
@@ -3832,7 +3734,6 @@ static struct proto mptcp_prot = {
38323734
.hash = mptcp_hash,
38333735
.unhash = mptcp_unhash,
38343736
.get_port = mptcp_get_port,
3835-
.forward_alloc_get = mptcp_forward_alloc_get,
38363737
.stream_memory_free = mptcp_stream_memory_free,
38373738
.sockets_allocated = &mptcp_sockets_allocated,
38383739

net/mptcp/protocol.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -278,7 +278,6 @@ struct mptcp_sock {
278278
u64 rcv_data_fin_seq;
279279
u64 bytes_retrans;
280280
u64 bytes_consumed;
281-
int rmem_fwd_alloc;
282281
int snd_burst;
283282
int old_wspace;
284283
u64 recovery_snd_nxt; /* in recovery mode accept up to this seq;
@@ -293,7 +292,6 @@ struct mptcp_sock {
293292
u32 last_ack_recv;
294293
unsigned long timer_ival;
295294
u32 token;
296-
int rmem_released;
297295
unsigned long flags;
298296
unsigned long cb_flags;
299297
bool recovery; /* closing subflow write queue reinjected */
@@ -384,7 +382,7 @@ static inline void msk_owned_by_me(const struct mptcp_sock *msk)
384382
*/
385383
static inline int __mptcp_rmem(const struct sock *sk)
386384
{
387-
return atomic_read(&sk->sk_rmem_alloc) - READ_ONCE(mptcp_sk(sk)->rmem_released);
385+
return atomic_read(&sk->sk_rmem_alloc);
388386
}
389387

390388
static inline int mptcp_win_from_space(const struct sock *sk, int space)

0 commit comments

Comments
 (0)