Blame view
net/ipv4/inet_timewait_sock.c
14.5 KB
e48c414ee
|
1 2 3 4 5 6 7 8 9 |
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Generic TIME_WAIT sockets functions * * From code orinally in TCP */ |
172589ccd
|
10 |
#include <linux/kernel.h> |
9e337b0fb
|
11 |
#include <linux/kmemcheck.h> |
5a0e3ad6a
|
12 |
#include <linux/slab.h> |
3a9a231d9
|
13 |
#include <linux/module.h> |
e48c414ee
|
14 15 |
#include <net/inet_hashtables.h> #include <net/inet_timewait_sock.h> |
696ab2d3b
|
16 |
#include <net/ip.h> |
e48c414ee
|
17 |
|
13475a30b
|
18 |
|
2a8875e73
|
19 20 21 22 23 24 25 |
/** * inet_twsk_unhash - unhash a timewait socket from established hash * @tw: timewait socket * * unhash a timewait socket from established hash, if hashed. * ehash lock must be held by caller. * Returns 1 if caller should call inet_twsk_put() after lock release. |
13475a30b
|
26 27 28 29 30 31 32 33 |
*/ int inet_twsk_unhash(struct inet_timewait_sock *tw) { if (hlist_nulls_unhashed(&tw->tw_node)) return 0; hlist_nulls_del_rcu(&tw->tw_node); sk_nulls_node_init(&tw->tw_node); |
2a8875e73
|
34 35 36 37 |
/* * We cannot call inet_twsk_put() ourself under lock, * caller must call it for us. */ |
13475a30b
|
38 39 |
return 1; } |
2a8875e73
|
40 41 42 43 44 45 46 47 |
/** * inet_twsk_bind_unhash - unhash a timewait socket from bind hash * @tw: timewait socket * @hashinfo: hashinfo pointer * * unhash a timewait socket from bind hash, if hashed. * bind hash lock must be held by caller. * Returns 1 if caller should call inet_twsk_put() after lock release. |
3cdaedae6
|
48 49 50 51 52 53 54 55 56 57 58 59 |
*/ int inet_twsk_bind_unhash(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo) { struct inet_bind_bucket *tb = tw->tw_tb; if (!tb) return 0; __hlist_del(&tw->tw_bind_node); tw->tw_tb = NULL; inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); |
2a8875e73
|
60 61 62 63 |
/* * We cannot call inet_twsk_put() ourself under lock, * caller must call it for us. */ |
3cdaedae6
|
64 65 |
return 1; } |
e48c414ee
|
66 |
/* Must be called with locally disabled BHs. */ |
acd159b6b
|
67 68 |
static void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo) |
e48c414ee
|
69 70 |
{ struct inet_bind_hashbucket *bhead; |
13475a30b
|
71 |
int refcnt; |
e48c414ee
|
72 |
/* Unlink from established hashes. */ |
9db66bdcc
|
73 |
spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); |
e48c414ee
|
74 |
|
9db66bdcc
|
75 |
spin_lock(lock); |
13475a30b
|
76 |
refcnt = inet_twsk_unhash(tw); |
9db66bdcc
|
77 |
spin_unlock(lock); |
e48c414ee
|
78 79 |
/* Disassociate with bind bucket. */ |
7f635ab71
|
80 81 |
bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, hashinfo->bhash_size)]; |
3cdaedae6
|
82 |
|
e48c414ee
|
83 |
spin_lock(&bhead->lock); |
3cdaedae6
|
84 |
refcnt += inet_twsk_bind_unhash(tw, hashinfo); |
e48c414ee
|
85 |
spin_unlock(&bhead->lock); |
3cdaedae6
|
86 |
|
e48c414ee
|
87 88 89 90 91 92 93 |
#ifdef SOCK_REFCNT_DEBUG if (atomic_read(&tw->tw_refcnt) != 1) { printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d ", tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt)); } #endif |
13475a30b
|
94 95 96 97 |
while (refcnt) { inet_twsk_put(tw); refcnt--; } |
e48c414ee
|
98 |
} |
4dbc8ef7e
|
99 |
static noinline void inet_twsk_free(struct inet_timewait_sock *tw) |
7054fb937
|
100 |
{ |
4dbc8ef7e
|
101 102 |
struct module *owner = tw->tw_prot->owner; twsk_destructor((struct sock *)tw); |
7054fb937
|
103 |
#ifdef SOCK_REFCNT_DEBUG |
4dbc8ef7e
|
104 105 |
pr_debug("%s timewait_sock %p released ", tw->tw_prot->name, tw); |
7054fb937
|
106 |
#endif |
4dbc8ef7e
|
107 108 109 110 111 112 113 114 115 |
release_net(twsk_net(tw)); kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw); module_put(owner); } void inet_twsk_put(struct inet_timewait_sock *tw) { if (atomic_dec_and_test(&tw->tw_refcnt)) inet_twsk_free(tw); |
7054fb937
|
116 117 |
} EXPORT_SYMBOL_GPL(inet_twsk_put); |
e48c414ee
|
118 119 120 121 122 123 124 125 126 |
/* * Enter the time wait state. This is called with locally disabled BH. * Essentially we whip up a timewait bucket, copy the relevant info into it * from the SK, and mess with hash chains and list linkage. */ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, struct inet_hashinfo *hashinfo) { const struct inet_sock *inet = inet_sk(sk); |
463c84b97
|
127 |
const struct inet_connection_sock *icsk = inet_csk(sk); |
81c3d5470
|
128 |
struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); |
9db66bdcc
|
129 |
spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); |
e48c414ee
|
130 131 132 133 134 |
struct inet_bind_hashbucket *bhead; /* Step 1: Put TW into bind hash. Original socket stays there too. Note, that any socket with inet->num != 0 MUST be bound in binding cache, even if it is closed. */ |
c720c7e83
|
135 |
bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num, |
7f635ab71
|
136 |
hashinfo->bhash_size)]; |
e48c414ee
|
137 |
spin_lock(&bhead->lock); |
463c84b97
|
138 |
tw->tw_tb = icsk->icsk_bind_hash; |
547b792ca
|
139 |
WARN_ON(!icsk->icsk_bind_hash); |
e48c414ee
|
140 141 |
inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); spin_unlock(&bhead->lock); |
9db66bdcc
|
142 |
spin_lock(lock); |
e48c414ee
|
143 |
|
3ab5aee7f
|
144 145 146 147 148 |
/* * Step 2: Hash TW into TIMEWAIT chain. * Should be done before removing sk from established chain * because readers are lockless and search established first. */ |
3ab5aee7f
|
149 150 151 152 153 |
inet_twsk_add_node_rcu(tw, &ehead->twchain); /* Step 3: Remove SK from established hash. */ if (__sk_nulls_del_node_init_rcu(sk)) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); |
e48c414ee
|
154 |
|
47e1c3230
|
155 156 |
/* * Notes : |
2a8875e73
|
157 |
* - We initially set tw_refcnt to 0 in inet_twsk_alloc() |
47e1c3230
|
158 159 160 161 162 163 |
* - We add one reference for the bhash link * - We add one reference for the ehash link * - We want this refcnt update done before allowing other * threads to find this tw in ehash chain. */ atomic_add(1 + 1 + 1, &tw->tw_refcnt); |
9db66bdcc
|
164 |
spin_unlock(lock); |
e48c414ee
|
165 |
} |
696ab2d3b
|
166 |
EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); |
c676270bc
|
167 168 |
struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state) { |
6d6ee43e0
|
169 170 |
struct inet_timewait_sock *tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, |
54e6ecb23
|
171 |
GFP_ATOMIC); |
c676270bc
|
172 173 |
if (tw != NULL) { const struct inet_sock *inet = inet_sk(sk); |
9e337b0fb
|
174 |
kmemcheck_annotate_bitfield(tw, flags); |
c676270bc
|
175 |
/* Give us an identity. */ |
c720c7e83
|
176 177 |
tw->tw_daddr = inet->inet_daddr; tw->tw_rcv_saddr = inet->inet_rcv_saddr; |
c676270bc
|
178 |
tw->tw_bound_dev_if = sk->sk_bound_dev_if; |
66b13d99d
|
179 |
tw->tw_tos = inet->tos; |
c720c7e83
|
180 |
tw->tw_num = inet->inet_num; |
c676270bc
|
181 182 |
tw->tw_state = TCP_TIME_WAIT; tw->tw_substate = state; |
c720c7e83
|
183 184 |
tw->tw_sport = inet->inet_sport; tw->tw_dport = inet->inet_dport; |
c676270bc
|
185 186 |
tw->tw_family = sk->sk_family; tw->tw_reuse = sk->sk_reuse; |
81c3d5470
|
187 |
tw->tw_hash = sk->sk_hash; |
c676270bc
|
188 |
tw->tw_ipv6only = 0; |
f5715aea4
|
189 |
tw->tw_transparent = inet->transparent; |
c676270bc
|
190 |
tw->tw_prot = sk->sk_prot_creator; |
cd5342d90
|
191 |
twsk_net_set(tw, hold_net(sock_net(sk))); |
47e1c3230
|
192 193 194 195 196 197 |
/* * Because we use RCU lookups, we should not set tw_refcnt * to a non null value before everything is setup for this * timewait socket. */ atomic_set(&tw->tw_refcnt, 0); |
c676270bc
|
198 |
inet_twsk_dead_node_init(tw); |
eeb2b8560
|
199 |
__module_get(tw->tw_prot->owner); |
c676270bc
|
200 201 202 203 |
} return tw; } |
696ab2d3b
|
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 |
EXPORT_SYMBOL_GPL(inet_twsk_alloc); /* Returns non-zero if quota exceeded. */ static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr, const int slot) { struct inet_timewait_sock *tw; struct hlist_node *node; unsigned int killed; int ret; /* NOTE: compare this to previous version where lock * was released after detaching chain. It was racy, * because tw buckets are scheduled in not serialized context * in 2.3 (with netfilter), and with softnet it is common, because * soft irqs are not sequenced. */ killed = 0; ret = 0; rescan: inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) { __inet_twsk_del_dead_node(tw); spin_unlock(&twdr->death_lock); __inet_twsk_kill(tw, twdr->hashinfo); |
f2bf415cf
|
228 229 230 |
#ifdef CONFIG_NET_NS NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED); #endif |
696ab2d3b
|
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 |
inet_twsk_put(tw); killed++; spin_lock(&twdr->death_lock); if (killed > INET_TWDR_TWKILL_QUOTA) { ret = 1; break; } /* While we dropped twdr->death_lock, another cpu may have * killed off the next TW bucket in the list, therefore * do a fresh re-read of the hlist head node with the * lock reacquired. We still use the hlist traversal * macro in order to get the prefetches. */ goto rescan; } twdr->tw_count -= killed; |
f2bf415cf
|
249 250 251 |
#ifndef CONFIG_NET_NS NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed); #endif |
696ab2d3b
|
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 |
return ret; } void inet_twdr_hangman(unsigned long data) { struct inet_timewait_death_row *twdr; int unsigned need_timer; twdr = (struct inet_timewait_death_row *)data; spin_lock(&twdr->death_lock); if (twdr->tw_count == 0) goto out; need_timer = 0; if (inet_twdr_do_twkill_work(twdr, twdr->slot)) { twdr->thread_slots |= (1 << twdr->slot); |
696ab2d3b
|
269 270 271 272 273 274 |
schedule_work(&twdr->twkill_work); need_timer = 1; } else { /* We purged the entire slot, anything left? */ if (twdr->tw_count) need_timer = 1; |
80a1096ba
|
275 |
twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1)); |
696ab2d3b
|
276 |
} |
696ab2d3b
|
277 278 279 280 281 |
if (need_timer) mod_timer(&twdr->tw_timer, jiffies + twdr->period); out: spin_unlock(&twdr->death_lock); } |
696ab2d3b
|
282 |
EXPORT_SYMBOL_GPL(inet_twdr_hangman); |
65f27f384
|
283 |
void inet_twdr_twkill_work(struct work_struct *work) |
696ab2d3b
|
284 |
{ |
65f27f384
|
285 286 |
struct inet_timewait_death_row *twdr = container_of(work, struct inet_timewait_death_row, twkill_work); |
696ab2d3b
|
287 |
int i; |
95c9382a3
|
288 289 |
BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)); |
696ab2d3b
|
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 |
while (twdr->thread_slots) { spin_lock_bh(&twdr->death_lock); for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) { if (!(twdr->thread_slots & (1 << i))) continue; while (inet_twdr_do_twkill_work(twdr, i) != 0) { if (need_resched()) { spin_unlock_bh(&twdr->death_lock); schedule(); spin_lock_bh(&twdr->death_lock); } } twdr->thread_slots &= ~(1 << i); } spin_unlock_bh(&twdr->death_lock); } } |
696ab2d3b
|
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 |
EXPORT_SYMBOL_GPL(inet_twdr_twkill_work); /* These are always called from BH context. See callers in * tcp_input.c to verify this. */ /* This is for handling early-kills of TIME_WAIT sockets. */ void inet_twsk_deschedule(struct inet_timewait_sock *tw, struct inet_timewait_death_row *twdr) { spin_lock(&twdr->death_lock); if (inet_twsk_del_dead_node(tw)) { inet_twsk_put(tw); if (--twdr->tw_count == 0) del_timer(&twdr->tw_timer); } spin_unlock(&twdr->death_lock); __inet_twsk_kill(tw, twdr->hashinfo); } |
696ab2d3b
|
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 |
EXPORT_SYMBOL(inet_twsk_deschedule); void inet_twsk_schedule(struct inet_timewait_sock *tw, struct inet_timewait_death_row *twdr, const int timeo, const int timewait_len) { struct hlist_head *list; int slot; /* timeout := RTO * 3.5 * * 3.5 = 1+2+0.5 to wait for two retransmits. * * RATIONALE: if FIN arrived and we entered TIME-WAIT state, * our ACK acking that FIN can be lost. If N subsequent retransmitted * FINs (or previous seqments) are lost (probability of such event * is p^(N+1), where p is probability to lose single packet and * time to detect the loss is about RTO*(2^N - 1) with exponential * backoff). Normal timewait length is calculated so, that we * waited at least for one retransmitted FIN (maximal RTO is 120sec). * [ BTW Linux. following BSD, violates this requirement waiting * only for 60sec, we should wait at least for 240 secs. * Well, 240 consumes too much of resources 8) * ] * This interval is not reduced to catch old duplicate and * responces to our wandering segments living for two MSLs. * However, if we use PAWS to detect * old duplicates, we can reduce the interval to bounds required * by RTO, rather than MSL. So, if peer understands PAWS, we * kill tw bucket after 3.5*RTO (it is important that this number * is greater than TS tick!) and detect old duplicates with help * of PAWS. */ slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK; spin_lock(&twdr->death_lock); /* Unlink it, if it was scheduled */ if (inet_twsk_del_dead_node(tw)) twdr->tw_count--; else atomic_inc(&tw->tw_refcnt); if (slot >= INET_TWDR_RECYCLE_SLOTS) { /* Schedule to slow timer */ if (timeo >= timewait_len) { slot = INET_TWDR_TWKILL_SLOTS - 1; } else { |
172589ccd
|
377 |
slot = DIV_ROUND_UP(timeo, twdr->period); |
696ab2d3b
|
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 |
if (slot >= INET_TWDR_TWKILL_SLOTS) slot = INET_TWDR_TWKILL_SLOTS - 1; } tw->tw_ttd = jiffies + timeo; slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1); list = &twdr->cells[slot]; } else { tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK); if (twdr->twcal_hand < 0) { twdr->twcal_hand = 0; twdr->twcal_jiffie = jiffies; twdr->twcal_timer.expires = twdr->twcal_jiffie + (slot << INET_TWDR_RECYCLE_TICK); add_timer(&twdr->twcal_timer); } else { if (time_after(twdr->twcal_timer.expires, jiffies + (slot << INET_TWDR_RECYCLE_TICK))) mod_timer(&twdr->twcal_timer, jiffies + (slot << INET_TWDR_RECYCLE_TICK)); slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1); } list = &twdr->twcal_row[slot]; } hlist_add_head(&tw->tw_death_node, list); if (twdr->tw_count++ == 0) mod_timer(&twdr->tw_timer, jiffies + twdr->period); spin_unlock(&twdr->death_lock); } |
696ab2d3b
|
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 |
EXPORT_SYMBOL_GPL(inet_twsk_schedule); void inet_twdr_twcal_tick(unsigned long data) { struct inet_timewait_death_row *twdr; int n, slot; unsigned long j; unsigned long now = jiffies; int killed = 0; int adv = 0; twdr = (struct inet_timewait_death_row *)data; spin_lock(&twdr->death_lock); if (twdr->twcal_hand < 0) goto out; slot = twdr->twcal_hand; j = twdr->twcal_jiffie; for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) { if (time_before_eq(j, now)) { struct hlist_node *node, *safe; struct inet_timewait_sock *tw; inet_twsk_for_each_inmate_safe(tw, node, safe, &twdr->twcal_row[slot]) { __inet_twsk_del_dead_node(tw); __inet_twsk_kill(tw, twdr->hashinfo); |
f2bf415cf
|
438 439 440 |
#ifdef CONFIG_NET_NS NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED); #endif |
696ab2d3b
|
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 |
inet_twsk_put(tw); killed++; } } else { if (!adv) { adv = 1; twdr->twcal_jiffie = j; twdr->twcal_hand = slot; } if (!hlist_empty(&twdr->twcal_row[slot])) { mod_timer(&twdr->twcal_timer, j); goto out; } } j += 1 << INET_TWDR_RECYCLE_TICK; slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1); } twdr->twcal_hand = -1; out: if ((twdr->tw_count -= killed) == 0) del_timer(&twdr->tw_timer); |
f2bf415cf
|
464 465 466 |
#ifndef CONFIG_NET_NS NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed); #endif |
696ab2d3b
|
467 468 |
spin_unlock(&twdr->death_lock); } |
696ab2d3b
|
469 |
EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick); |
d315492b1
|
470 |
|
b099ce260
|
471 |
void inet_twsk_purge(struct inet_hashinfo *hashinfo, |
d315492b1
|
472 473 474 475 |
struct inet_timewait_death_row *twdr, int family) { struct inet_timewait_sock *tw; struct sock *sk; |
3ab5aee7f
|
476 |
struct hlist_nulls_node *node; |
575f4cd5a
|
477 |
unsigned int slot; |
d315492b1
|
478 |
|
575f4cd5a
|
479 480 481 482 |
for (slot = 0; slot <= hashinfo->ehash_mask; slot++) { struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; restart_rcu: rcu_read_lock(); |
d315492b1
|
483 |
restart: |
575f4cd5a
|
484 |
sk_nulls_for_each_rcu(sk, node, &head->twchain) { |
d315492b1
|
485 |
tw = inet_twsk(sk); |
b099ce260
|
486 487 |
if ((tw->tw_family != family) || atomic_read(&twsk_net(tw)->count)) |
d315492b1
|
488 |
continue; |
575f4cd5a
|
489 490 |
if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt))) continue; |
b099ce260
|
491 492 |
if (unlikely((tw->tw_family != family) || atomic_read(&twsk_net(tw)->count))) { |
575f4cd5a
|
493 494 495 496 497 |
inet_twsk_put(tw); goto restart; } rcu_read_unlock(); |
91035f0b7
|
498 |
local_bh_disable(); |
d315492b1
|
499 |
inet_twsk_deschedule(tw, twdr); |
91035f0b7
|
500 |
local_bh_enable(); |
d315492b1
|
501 |
inet_twsk_put(tw); |
575f4cd5a
|
502 |
goto restart_rcu; |
d315492b1
|
503 |
} |
575f4cd5a
|
504 505 506 507 508 509 510 |
/* If the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot) goto restart; rcu_read_unlock(); |
d315492b1
|
511 |
} |
d315492b1
|
512 513 |
} EXPORT_SYMBOL_GPL(inet_twsk_purge); |