@@ -113,7 +113,6 @@ struct inet_connection_sock {
113113 lrcv_flowlabel :20 , /* last received ipv6 flowlabel */
114114 dst_quick_ack :1 , /* cache dst RTAX_QUICKACK */
115115 unused :3 ;
116- unsigned long timeout ; /* Currently scheduled timeout */
117116 __u32 lrcvtime ; /* timestamp of last received data packet */
118117 __u16 last_seg_size ; /* Size of last incoming segment */
119118 __u16 rcv_mss ; /* MSS used for delayed ACK decisions */
@@ -191,6 +190,12 @@ icsk_timeout(const struct inet_connection_sock *icsk)
191190 return READ_ONCE (icsk -> icsk_retransmit_timer .expires );
192191}
193192
193+ static inline unsigned long
194+ icsk_delack_timeout (const struct inet_connection_sock * icsk )
195+ {
196+ return READ_ONCE (icsk -> icsk_delack_timer .expires );
197+ }
198+
194199static inline void inet_csk_clear_xmit_timer (struct sock * sk , const int what )
195200{
196201 struct inet_connection_sock * icsk = inet_csk (sk );
@@ -226,16 +231,15 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
226231 when = max_when ;
227232 }
228233
234+ when += jiffies ;
229235 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
230236 what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT ) {
231237 smp_store_release (& icsk -> icsk_pending , what );
232- when += jiffies ;
233238 sk_reset_timer (sk , & icsk -> icsk_retransmit_timer , when );
234239 } else if (what == ICSK_TIME_DACK ) {
235240 smp_store_release (& icsk -> icsk_ack .pending ,
236241 icsk -> icsk_ack .pending | ICSK_ACK_TIMER );
237- icsk -> icsk_ack .timeout = jiffies + when ;
238- sk_reset_timer (sk , & icsk -> icsk_delack_timer , icsk -> icsk_ack .timeout );
242+ sk_reset_timer (sk , & icsk -> icsk_delack_timer , when );
239243 } else {
240244 pr_debug ("inet_csk BUG: unknown timer value\n" );
241245 }
0 commit comments