Skip to content

Commit b20a7ca

Browse files
committed
Merge branch 'sysctl-races-part-5'
Kuniyuki Iwashima says: ==================== sysctl: Fix data-races around ipv4_net_table (Round 5). This series fixes data-races around 15 knobs after tcp_dsack in ipv4_net_table. tcp_tso_win_divisor was skipped because it already uses READ_ONCE(). So, the final round for ipv4_net_table will start with tcp_pacing_ss_ratio. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents ebbbe23 + 2afdbe7 commit b20a7ca

File tree

7 files changed

+23
-22
lines changed

7 files changed

+23
-22
lines changed

include/net/tcp.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1419,7 +1419,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space,
14191419

14201420
static inline int tcp_win_from_space(const struct sock *sk, int space)
14211421
{
1422-
int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
1422+
int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
14231423

14241424
return tcp_adv_win_scale <= 0 ?
14251425
(space>>(-tcp_adv_win_scale)) :

net/ipv4/tcp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -686,7 +686,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
686686
int size_goal)
687687
{
688688
return skb->len < size_goal &&
689-
sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
689+
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) &&
690690
!tcp_rtx_queue_empty(sk) &&
691691
refcount_read(&sk->sk_wmem_alloc) > skb->truesize &&
692692
tcp_skb_can_collapse_to(skb);

net/ipv4/tcp_input.c

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -534,7 +534,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
534534
*/
535535
static void tcp_init_buffer_space(struct sock *sk)
536536
{
537-
int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win;
537+
int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win);
538538
struct tcp_sock *tp = tcp_sk(sk);
539539
int maxwin;
540540

@@ -724,7 +724,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
724724
* <prev RTT . ><current RTT .. ><next RTT .... >
725725
*/
726726

727-
if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
727+
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
728728
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
729729
int rcvmem, rcvbuf;
730730
u64 rcvwin, grow;
@@ -2175,7 +2175,7 @@ void tcp_enter_loss(struct sock *sk)
21752175
* loss recovery is underway except recurring timeout(s) on
21762176
* the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
21772177
*/
2178-
tp->frto = net->ipv4.sysctl_tcp_frto &&
2178+
tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) &&
21792179
(new_recovery || icsk->icsk_retransmits) &&
21802180
!inet_csk(sk)->icsk_mtup.probe_size;
21812181
}
@@ -3058,7 +3058,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
30583058

30593059
static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
30603060
{
3061-
u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ;
3061+
u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ;
30623062
struct tcp_sock *tp = tcp_sk(sk);
30633063

30643064
if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
@@ -3581,7 +3581,8 @@ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
35813581
if (*last_oow_ack_time) {
35823582
s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time);
35833583

3584-
if (0 <= elapsed && elapsed < net->ipv4.sysctl_tcp_invalid_ratelimit) {
3584+
if (0 <= elapsed &&
3585+
elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {
35853586
NET_INC_STATS(net, mib_idx);
35863587
return true; /* rate-limited: don't send yet! */
35873588
}
@@ -3629,7 +3630,7 @@ static void tcp_send_challenge_ack(struct sock *sk)
36293630
/* Then check host-wide RFC 5961 rate limit. */
36303631
now = jiffies / HZ;
36313632
if (now != challenge_timestamp) {
3632-
u32 ack_limit = net->ipv4.sysctl_tcp_challenge_ack_limit;
3633+
u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
36333634
u32 half = (ack_limit + 1) >> 1;
36343635

36353636
challenge_timestamp = now;
@@ -4426,7 +4427,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
44264427
{
44274428
struct tcp_sock *tp = tcp_sk(sk);
44284429

4429-
if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
4430+
if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
44304431
int mib_idx;
44314432

44324433
if (before(seq, tp->rcv_nxt))
@@ -4473,7 +4474,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
44734474
NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
44744475
tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
44754476

4476-
if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
4477+
if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
44774478
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
44784479

44794480
tcp_rcv_spurious_retrans(sk, skb);

net/ipv4/tcp_metrics.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -329,7 +329,7 @@ void tcp_update_metrics(struct sock *sk)
329329
int m;
330330

331331
sk_dst_confirm(sk);
332-
if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
332+
if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
333333
return;
334334

335335
rcu_read_lock();
@@ -385,7 +385,7 @@ void tcp_update_metrics(struct sock *sk)
385385

386386
if (tcp_in_initial_slowstart(tp)) {
387387
/* Slow start still did not finish. */
388-
if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
388+
if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
389389
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
390390
val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
391391
if (val && (tcp_snd_cwnd(tp) >> 1) > val)
@@ -401,7 +401,7 @@ void tcp_update_metrics(struct sock *sk)
401401
} else if (!tcp_in_slow_start(tp) &&
402402
icsk->icsk_ca_state == TCP_CA_Open) {
403403
/* Cong. avoidance phase, cwnd is reliable. */
404-
if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
404+
if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
405405
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
406406
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
407407
max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));
@@ -418,7 +418,7 @@ void tcp_update_metrics(struct sock *sk)
418418
tcp_metric_set(tm, TCP_METRIC_CWND,
419419
(val + tp->snd_ssthresh) >> 1);
420420
}
421-
if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save &&
421+
if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
422422
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
423423
val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
424424
if (val && tp->snd_ssthresh > val)
@@ -463,7 +463,7 @@ void tcp_init_metrics(struct sock *sk)
463463
if (tcp_metric_locked(tm, TCP_METRIC_CWND))
464464
tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
465465

466-
val = net->ipv4.sysctl_tcp_no_ssthresh_metrics_save ?
466+
val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ?
467467
0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
468468
if (val) {
469469
tp->snd_ssthresh = val;

net/ipv4/tcp_output.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
230230
* which we interpret as a sign the remote TCP is not
231231
* misinterpreting the window field as a signed quantity.
232232
*/
233-
if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
233+
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
234234
(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
235235
else
236236
(*rcv_wnd) = min_t(u32, space, U16_MAX);
@@ -285,7 +285,7 @@ static u16 tcp_select_window(struct sock *sk)
285285
* scaled window.
286286
*/
287287
if (!tp->rx_opt.rcv_wscale &&
288-
sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
288+
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
289289
new_win = min(new_win, MAX_TCP_WINDOW);
290290
else
291291
new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
@@ -1976,7 +1976,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
19761976

19771977
bytes = sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift);
19781978

1979-
r = tcp_min_rtt(tcp_sk(sk)) >> sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log;
1979+
r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log);
19801980
if (r < BITS_PER_TYPE(sk->sk_gso_max_size))
19811981
bytes += sk->sk_gso_max_size >> r;
19821982

@@ -1995,7 +1995,7 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
19951995

19961996
min_tso = ca_ops->min_tso_segs ?
19971997
ca_ops->min_tso_segs(sk) :
1998-
sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
1998+
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
19991999

20002000
tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
20012001
return min_t(u32, tso_segs, sk->sk_gso_max_segs);
@@ -2507,7 +2507,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
25072507
sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
25082508
if (sk->sk_pacing_status == SK_PACING_NONE)
25092509
limit = min_t(unsigned long, limit,
2510-
sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
2510+
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
25112511
limit <<= factor;
25122512

25132513
if (static_branch_unlikely(&tcp_tx_delay_enabled) &&

net/mptcp/options.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1271,7 +1271,7 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th)
12711271
if (unlikely(th->syn))
12721272
new_win = min(new_win, 65535U) << tp->rx_opt.rcv_wscale;
12731273
if (!tp->rx_opt.rcv_wscale &&
1274-
sock_net(ssk)->ipv4.sysctl_tcp_workaround_signed_windows)
1274+
READ_ONCE(sock_net(ssk)->ipv4.sysctl_tcp_workaround_signed_windows))
12751275
new_win = min(new_win, MAX_TCP_WINDOW);
12761276
else
12771277
new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));

net/mptcp/protocol.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1908,7 +1908,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
19081908
if (msk->rcvq_space.copied <= msk->rcvq_space.space)
19091909
goto new_measure;
19101910

1911-
if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
1911+
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
19121912
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
19131913
int rcvmem, rcvbuf;
19141914
u64 rcvwin, grow;

0 commit comments

Comments
 (0)