Blame view
net/dccp/timer.c
7.08 KB
2874c5fd2
|
1 |
// SPDX-License-Identifier: GPL-2.0-or-later |
7c657876b
|
2 3 |
/* * net/dccp/timer.c |
8109b02b5
|
4 |
* |
7c657876b
|
5 6 |
* An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> |
7c657876b
|
7 |
*/ |
7c657876b
|
8 9 |
#include <linux/dccp.h> #include <linux/skbuff.h> |
bc3b2d7fb
|
10 |
#include <linux/export.h> |
7c657876b
|
11 12 |
#include "dccp.h" |
2e2e9e92b
|
13 14 15 16 |
/* sysctl variables governing numbers of retransmission attempts */ int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES; int sysctl_dccp_retries1 __read_mostly = TCP_RETR1; int sysctl_dccp_retries2 __read_mostly = TCP_RETR2; |
7c657876b
|
17 18 19 20 |
static void dccp_write_err(struct sock *sk) { sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; sk->sk_error_report(sk); |
017487d7d
|
21 |
dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); |
7c657876b
|
22 |
dccp_done(sk); |
aa62d76b6
|
23 |
__DCCP_INC_STATS(DCCP_MIB_ABORTONTIMEOUT); |
7c657876b
|
24 25 26 27 28 29 30 31 32 33 |
} /* A write timeout has occurred. Process the after effects. */ static int dccp_write_timeout(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); int retry_until; if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { if (icsk->icsk_retransmits != 0) |
b6c6712a4
|
34 |
dst_negative_advice(sk); |
2e2e9e92b
|
35 36 |
retry_until = icsk->icsk_syn_retries ? : sysctl_dccp_request_retries; |
7c657876b
|
37 |
} else { |
2e2e9e92b
|
38 |
if (icsk->icsk_retransmits >= sysctl_dccp_retries1) { |
7690af3ff
|
39 40 |
/* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black hole detection. :-( |
7c657876b
|
41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
It is place to make it. It is not made. I do not want to make it. It is disguisting. It does not work in any case. Let me to cite the same draft, which requires for us to implement this: "The one security concern raised by this memo is that ICMP black holes are often caused by over-zealous security administrators who block all ICMP messages. It is vitally important that those who design and deploy security systems understand the impact of strict filtering on upper-layer protocols. The safest web site in the world is worthless if most TCP implementations cannot transfer data from it. It would be far nicer to have all of the black holes fixed rather than fixing all of the TCP implementations." |
c9eaf1734
|
55 |
Golden words :-). |
7c657876b
|
56 |
*/ |
b6c6712a4
|
57 |
dst_negative_advice(sk); |
7c657876b
|
58 |
} |
2e2e9e92b
|
59 |
retry_until = sysctl_dccp_retries2; |
7c657876b
|
60 61 62 63 64 65 66 67 68 69 70 71 |
/* * FIXME: see tcp_write_timout and tcp_out_of_resources */ } if (icsk->icsk_retransmits >= retry_until) { /* Has it gone just too far? */ dccp_write_err(sk); return 1; } return 0; } |
7c657876b
|
72 73 74 75 76 77 78 79 |
/* * The DCCP retransmit timer. */ static void dccp_retransmit_timer(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); /* |
546335277
|
80 |
* More than 4MSL (8 minutes) has passed, a RESET(aborted) was |
7c657876b
|
81 82 83 |
* sent, no need to retransmit, this sock is dead. */ if (dccp_write_timeout(sk)) |
59435444a
|
84 |
return; |
7c657876b
|
85 86 87 88 89 90 |
/* * We want to know the number of packets retransmitted, not the * total number of retransmissions of clones of original packets. */ if (icsk->icsk_retransmits == 0) |
aa62d76b6
|
91 |
__DCCP_INC_STATS(DCCP_MIB_TIMEOUTS); |
7c657876b
|
92 |
|
59435444a
|
93 |
if (dccp_retransmit_skb(sk) != 0) { |
7c657876b
|
94 95 96 97 |
/* * Retransmission failed because of local congestion, * do not backoff. */ |
59435444a
|
98 |
if (--icsk->icsk_retransmits == 0) |
7c657876b
|
99 100 101 102 |
icsk->icsk_retransmits = 1; inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL), |
7690af3ff
|
103 |
DCCP_RTO_MAX); |
59435444a
|
104 |
return; |
7c657876b
|
105 106 107 |
} icsk->icsk_backoff++; |
7c657876b
|
108 109 |
icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX); |
7690af3ff
|
110 111 |
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, DCCP_RTO_MAX); |
2e2e9e92b
|
112 |
if (icsk->icsk_retransmits > sysctl_dccp_retries1) |
7c657876b
|
113 |
__sk_dst_reset(sk); |
7c657876b
|
114 |
} |
59f379f90
|
115 |
static void dccp_write_timer(struct timer_list *t) |
7c657876b
|
116 |
{ |
59f379f90
|
117 118 119 |
struct inet_connection_sock *icsk = from_timer(icsk, t, icsk_retransmit_timer); struct sock *sk = &icsk->icsk_inet.sk; |
7c657876b
|
120 121 122 123 124 |
int event = 0; bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later */ |
7690af3ff
|
125 126 |
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20)); |
7c657876b
|
127 128 129 130 131 132 133 |
goto out; } if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending) goto out; if (time_after(icsk->icsk_timeout, jiffies)) { |
7690af3ff
|
134 135 |
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); |
7c657876b
|
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
goto out; } event = icsk->icsk_pending; icsk->icsk_pending = 0; switch (event) { case ICSK_TIME_RETRANS: dccp_retransmit_timer(sk); break; } out: bh_unlock_sock(sk); sock_put(sk); } |
59f379f90
|
151 |
static void dccp_keepalive_timer(struct timer_list *t) |
7c657876b
|
152 |
{ |
59f379f90
|
153 |
struct sock *sk = from_timer(sk, t, sk_timer); |
7c657876b
|
154 |
|
fa76ce732
|
155 156 |
pr_err("dccp should not use a keepalive timer ! "); |
7c657876b
|
157 158 |
sock_put(sk); } |
4ed800d02
|
159 160 |
/* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */ |
59f379f90
|
161 |
static void dccp_delack_timer(struct timer_list *t) |
4ed800d02
|
162 |
{ |
59f379f90
|
163 164 165 |
struct inet_connection_sock *icsk = from_timer(icsk, t, icsk_delack_timer); struct sock *sk = &icsk->icsk_inet.sk; |
4ed800d02
|
166 167 168 169 |
bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later. */ |
02a1d6e7a
|
170 |
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); |
4ed800d02
|
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 |
sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); goto out; } if (sk->sk_state == DCCP_CLOSED || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) goto out; if (time_after(icsk->icsk_ack.timeout, jiffies)) { sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); goto out; } icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; if (inet_csk_ack_scheduled(sk)) { |
31954cd8b
|
188 |
if (!inet_csk_in_pingpong_mode(sk)) { |
4ed800d02
|
189 190 191 192 193 194 195 |
/* Delayed ACK missed: inflate ATO. */ icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); } else { /* Delayed ACK missed: leave pingpong mode and * deflate ATO. */ |
31954cd8b
|
196 |
inet_csk_exit_pingpong_mode(sk); |
4ed800d02
|
197 198 199 |
icsk->icsk_ack.ato = TCP_ATO_MIN; } dccp_send_ack(sk); |
02a1d6e7a
|
200 |
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); |
4ed800d02
|
201 202 203 204 205 |
} out: bh_unlock_sock(sk); sock_put(sk); } |
dc841e30e
|
206 207 |
/** * dccp_write_xmitlet - Workhorse for CCID packet dequeueing interface |
d0b1101bb
|
208 209 |
* @data: Socket to act on * |
dc841e30e
|
210 211 212 |
* See the comments above %ccid_dequeueing_decision for supported modes. */ static void dccp_write_xmitlet(unsigned long data) |
aabb601b0
|
213 214 |
{ struct sock *sk = (struct sock *)data; |
aabb601b0
|
215 216 217 |
bh_lock_sock(sk); if (sock_owned_by_user(sk)) |
dc841e30e
|
218 |
sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1); |
aabb601b0
|
219 |
else |
b1fcf55ee
|
220 |
dccp_write_xmit(sk); |
aabb601b0
|
221 |
bh_unlock_sock(sk); |
a8d7aa17b
|
222 |
sock_put(sk); |
aabb601b0
|
223 |
} |
839a60941
|
224 |
static void dccp_write_xmit_timer(struct timer_list *t) |
aabb601b0
|
225 |
{ |
839a60941
|
226 227 228 229 |
struct dccp_sock *dp = from_timer(dp, t, dccps_xmit_timer); struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk; dccp_write_xmitlet((unsigned long)sk); |
aabb601b0
|
230 |
} |
4ed800d02
|
231 232 |
void dccp_init_xmit_timers(struct sock *sk) { |
dc841e30e
|
233 234 235 |
struct dccp_sock *dp = dccp_sk(sk); tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk); |
839a60941
|
236 |
timer_setup(&dp->dccps_xmit_timer, dccp_write_xmit_timer, 0); |
4ed800d02
|
237 238 239 |
inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, &dccp_keepalive_timer); } |
4c70f383e
|
240 241 242 243 244 245 246 247 248 249 |
static ktime_t dccp_timestamp_seed; /** * dccp_timestamp - 10s of microseconds time source * Returns the number of 10s of microseconds since loading DCCP. This is native * DCCP time difference format (RFC 4340, sec. 13). * Please note: This will wrap around about circa every 11.9 hours. */ u32 dccp_timestamp(void) { |
5c4a43b02
|
250 |
u64 delta = (u64)ktime_us_delta(ktime_get_real(), dccp_timestamp_seed); |
4c70f383e
|
251 252 253 254 255 256 257 258 259 260 |
do_div(delta, 10); return delta; } EXPORT_SYMBOL_GPL(dccp_timestamp); void __init dccp_timestamping_init(void) { dccp_timestamp_seed = ktime_get_real(); } |