Blame view
net/sctp/associola.c
48.7 KB
47505b8bc
|
1 |
// SPDX-License-Identifier: GPL-2.0-or-later |
60c778b25
|
2 |
/* SCTP kernel implementation |
1da177e4c
|
3 4 5 6 7 8 |
* (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 La Monte H.P. Yarroll * |
60c778b25
|
9 |
* This file is part of the SCTP kernel implementation |
1da177e4c
|
10 11 12 |
* * This module provides the abstraction for an SCTP association. * |
1da177e4c
|
13 14 |
* Please send any bug reports or fixes you make to the * email address(es): |
91705c61b
|
15 |
* lksctp developers <linux-sctp@vger.kernel.org> |
1da177e4c
|
16 |
* |
1da177e4c
|
17 18 19 20 21 22 23 24 25 26 |
* Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@us.ibm.com> * Xingang Guo <xingang.guo@intel.com> * Hui Huang <hui.huang@nokia.com> * Sridhar Samudrala <sri@us.ibm.com> * Daisy Chang <daisyc@us.ibm.com> * Ryan Layer <rmlayer@us.ibm.com> * Kevin Gao <kevin.gao@intel.com> |
1da177e4c
|
27 |
*/ |
145ce502e
|
28 |
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
1da177e4c
|
29 30 31 32 |
#include <linux/types.h> #include <linux/fcntl.h> #include <linux/poll.h> #include <linux/init.h> |
1da177e4c
|
33 34 35 36 37 38 39 40 |
#include <linux/slab.h> #include <linux/in.h> #include <net/ipv6.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> /* Forward declarations for internal functions. */ |
b82e8f31a
|
41 |
static void sctp_select_active_and_retran_path(struct sctp_association *asoc); |
c4028958b
|
42 |
static void sctp_assoc_bh_rcv(struct work_struct *work); |
a08de64d0
|
43 |
static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); |
8b4472cc1
|
44 |
static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc); |
1da177e4c
|
45 |
|
1da177e4c
|
46 47 48 |
/* 1st Level Abstractions. */ /* Initialize a new association from provided memory. */ |
1c662018d
|
49 50 51 52 53 |
static struct sctp_association *sctp_association_init( struct sctp_association *asoc, const struct sctp_endpoint *ep, const struct sock *sk, enum sctp_scope scope, gfp_t gfp) |
1da177e4c
|
54 55 |
{ struct sctp_sock *sp; |
3c9187049
|
56 |
struct sctp_paramhdr *p; |
581947787
|
57 |
int i; |
1da177e4c
|
58 59 60 |
/* Retrieve the SCTP per socket area. */ sp = sctp_sk((struct sock *)sk); |
1da177e4c
|
61 62 |
/* Discarding const is appropriate here. */ asoc->ep = (struct sctp_endpoint *)ep; |
1da177e4c
|
63 |
asoc->base.sk = (struct sock *)sk; |
312434617
|
64 |
asoc->base.net = sock_net(sk); |
2e0c9e791
|
65 66 |
sctp_endpoint_hold(asoc->ep); |
1da177e4c
|
67 68 69 70 71 72 |
sock_hold(asoc->base.sk); /* Initialize the common base substructure. */ asoc->base.type = SCTP_EP_TYPE_ASSOCIATION; /* Initialize the object handling fields. */ |
c638457a7
|
73 |
refcount_set(&asoc->base.refcnt, 1); |
1da177e4c
|
74 75 76 |
/* Initialize the bind addr area. */ sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); |
1da177e4c
|
77 78 |
asoc->state = SCTP_STATE_CLOSED; |
52db882f3
|
79 |
asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life); |
f68b2e05f
|
80 |
asoc->user_frag = sp->user_frag; |
1da177e4c
|
81 82 83 84 85 |
/* Set the association max_retrans and RTO values from the * socket values. */ asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt; |
8add543e3
|
86 |
asoc->pf_retrans = sp->pf_retrans; |
34515e94c
|
87 |
asoc->ps_retrans = sp->ps_retrans; |
aef587be4
|
88 |
asoc->pf_expose = sp->pf_expose; |
5aa93bcf6
|
89 |
|
1da177e4c
|
90 91 92 |
asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial); asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max); asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min); |
52ccb8e90
|
93 94 95 96 97 98 99 |
/* Initialize the association's heartbeat interval based on the * sock configured value. */ asoc->hbinterval = msecs_to_jiffies(sp->hbinterval); /* Initialize path max retrans value. */ asoc->pathmaxrxt = sp->pathmaxrxt; |
8a9c58d28
|
100 101 |
asoc->flowlabel = sp->flowlabel; asoc->dscp = sp->dscp; |
52ccb8e90
|
102 103 |
/* Set association default SACK delay */ asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); |
d364d9276
|
104 |
asoc->sackfreq = sp->sackfreq; |
52ccb8e90
|
105 106 107 108 109 |
/* Set the association default flags controlling * Heartbeat, SACK delay, and Path MTU Discovery. */ asoc->param_flags = sp->param_flags; |
9d2c881af
|
110 |
/* Initialize the maximum number of new data packets that can be sent |
1da177e4c
|
111 112 |
* in a burst. */ |
703315712
|
113 |
asoc->max_burst = sp->max_burst; |
1da177e4c
|
114 |
|
a1e3a0590
|
115 |
asoc->subscribe = sp->subscribe; |
1e7d3d90c
|
116 |
/* initialize association timers */ |
1e7d3d90c
|
117 118 119 |
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial; asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial; asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial; |
1e7d3d90c
|
120 121 122 123 124 |
/* sctpimpguide Section 2.12.2 * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the * recommended value of 5 times 'RTO.Max'. */ |
d808ad9ab
|
125 |
asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] |
1e7d3d90c
|
126 |
= 5 * asoc->rto_max; |
52ccb8e90
|
127 |
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; |
9f70f46bd
|
128 |
asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ; |
d808ad9ab
|
129 |
|
421f91d21
|
130 |
/* Initializes the timers */ |
b24b8a247
|
131 |
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) |
9c3b57518
|
132 |
timer_setup(&asoc->timers[i], sctp_timer_events[i], 0); |
1da177e4c
|
133 134 135 136 137 138 139 140 141 142 143 |
/* Pull default initialization values from the sock options. * Note: This assumes that the values have already been * validated in the sock. */ asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams; asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams; asoc->max_init_attempts = sp->initmsg.sinit_max_attempts; asoc->max_init_timeo = msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo); |
1da177e4c
|
144 145 146 147 148 |
/* Set the local window size for receive. * This is also the rcvbuf space per association. * RFC 6 - A SCTP receiver MUST be able to receive a minimum of * 1500 bytes in one SCTP packet. */ |
049b3ff5a
|
149 |
if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW) |
1da177e4c
|
150 151 |
asoc->rwnd = SCTP_DEFAULT_MINWINDOW; else |
049b3ff5a
|
152 |
asoc->rwnd = sk->sk_rcvbuf/2; |
1da177e4c
|
153 154 |
asoc->a_rwnd = asoc->rwnd; |
1da177e4c
|
155 156 |
/* Use my own max window until I learn something better. */ asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; |
049b3ff5a
|
157 158 |
/* Initialize the receive memory counter */ atomic_set(&asoc->rmem_alloc, 0); |
1da177e4c
|
159 160 161 |
init_waitqueue_head(&asoc->wait); asoc->c.my_vtag = sctp_generate_tag(ep); |
1da177e4c
|
162 163 164 165 166 167 168 169 170 171 |
asoc->c.my_port = ep->base.bind_addr.port; asoc->c.initial_tsn = sctp_generate_tsn(ep); asoc->next_tsn = asoc->c.initial_tsn; asoc->ctsn_ack_point = asoc->next_tsn - 1; asoc->adv_peer_ack_point = asoc->ctsn_ack_point; asoc->highest_sacked = asoc->ctsn_ack_point; asoc->last_cwr_tsn = asoc->ctsn_ack_point; |
1da177e4c
|
172 |
|
1da177e4c
|
173 174 175 176 177 178 179 180 181 182 183 |
/* ADDIP Section 4.1 Asconf Chunk Procedures * * When an endpoint has an ASCONF signaled change to be sent to the * remote endpoint it should do the following: * ... * A2) a serial number should be assigned to the chunk. The serial * number SHOULD be a monotonically increasing number. The serial * numbers SHOULD be initialized at the start of the * association to the same value as the initial TSN. */ asoc->addip_serial = asoc->c.initial_tsn; |
cc16f00f6
|
184 |
asoc->strreset_outseq = asoc->c.initial_tsn; |
1da177e4c
|
185 |
|
79af02c25
|
186 |
INIT_LIST_HEAD(&asoc->addip_chunk_list); |
a08de64d0
|
187 |
INIT_LIST_HEAD(&asoc->asconf_ack_list); |
1da177e4c
|
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 |
/* Make an empty list of remote transport addresses. */ INIT_LIST_HEAD(&asoc->peer.transport_addr_list); /* RFC 2960 5.1 Normal Establishment of an Association * * After the reception of the first data chunk in an * association the endpoint must immediately respond with a * sack to acknowledge the data chunk. Subsequent * acknowledgements should be done as described in Section * 6.2. * * [We implement this by telling a new association that it * already received one packet.] */ asoc->peer.sack_needed = 1; |
4244854d2
|
204 |
asoc->peer.sack_generation = 1; |
1da177e4c
|
205 |
|
1da177e4c
|
206 207 |
/* Create an input queue. */ sctp_inq_init(&asoc->base.inqueue); |
c4028958b
|
208 |
sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); |
1da177e4c
|
209 210 211 212 213 214 |
/* Create an output queue. */ sctp_outq_init(asoc, &asoc->outqueue); if (!sctp_ulpq_init(&asoc->ulpq, asoc)) goto fail_init; |
ff356414d
|
215 216 |
if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp)) |
3dbcc105d
|
217 |
goto fail_init; |
4135cce7f
|
218 219 220 |
/* Initialize default path MTU. */ asoc->pathmtu = sp->pathmtu; sctp_assoc_update_frag_point(asoc); |
1da177e4c
|
221 222 223 224 |
/* Assume that peer would support both address types unless we are * told otherwise. */ asoc->peer.ipv4_address = 1; |
a2c395846
|
225 226 |
if (asoc->base.sk->sk_family == PF_INET6) asoc->peer.ipv6_address = 1; |
1da177e4c
|
227 |
INIT_LIST_HEAD(&asoc->asocs); |
1da177e4c
|
228 229 230 231 232 |
asoc->default_stream = sp->default_stream; asoc->default_ppid = sp->default_ppid; asoc->default_flags = sp->default_flags; asoc->default_context = sp->default_context; asoc->default_timetolive = sp->default_timetolive; |
6ab792f57
|
233 |
asoc->default_rcv_context = sp->default_rcv_context; |
1da177e4c
|
234 |
|
a29a5bd4f
|
235 236 |
/* AUTH related initializations */ INIT_LIST_HEAD(&asoc->endpoint_shared_keys); |
581947787
|
237 |
if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp)) |
3dbcc105d
|
238 |
goto stream_free; |
a29a5bd4f
|
239 240 |
asoc->active_key_id = ep->active_key_id; |
9fb657aec
|
241 |
asoc->strreset_enable = ep->strreset_enable; |
a29a5bd4f
|
242 |
|
a29a5bd4f
|
243 244 245 246 247 248 249 250 251 |
/* Save the hmacs and chunks list into this association */ if (ep->auth_hmacs_list) memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list, ntohs(ep->auth_hmacs_list->param_hdr.length)); if (ep->auth_chunk_list) memcpy(asoc->c.auth_chunks, ep->auth_chunk_list, ntohs(ep->auth_chunk_list->param_hdr.length)); /* Get the AUTH random number for this association */ |
3c9187049
|
252 |
p = (struct sctp_paramhdr *)asoc->c.auth_random; |
a29a5bd4f
|
253 |
p->type = SCTP_PARAM_RANDOM; |
3c9187049
|
254 |
p->length = htons(sizeof(*p) + SCTP_AUTH_RANDOM_LENGTH); |
a29a5bd4f
|
255 |
get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH); |
1da177e4c
|
256 |
return asoc; |
3dbcc105d
|
257 |
stream_free: |
cee360ab4
|
258 |
sctp_stream_free(&asoc->stream); |
1da177e4c
|
259 |
fail_init: |
1da177e4c
|
260 |
sock_put(asoc->base.sk); |
2e0c9e791
|
261 |
sctp_endpoint_put(asoc->ep); |
1da177e4c
|
262 263 264 265 266 |
return NULL; } /* Allocate and initialize a new association */ struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, |
1c662018d
|
267 268 |
const struct sock *sk, enum sctp_scope scope, gfp_t gfp) |
1da177e4c
|
269 270 |
{ struct sctp_association *asoc; |
939cfa75a
|
271 |
asoc = kzalloc(sizeof(*asoc), gfp); |
1da177e4c
|
272 273 274 275 276 |
if (!asoc) goto fail; if (!sctp_association_init(asoc, ep, sk, scope, gfp)) goto fail_init; |
1da177e4c
|
277 |
SCTP_DBG_OBJCNT_INC(assoc); |
bb33381d0
|
278 279 280 |
pr_debug("Created asoc %p ", asoc); |
1da177e4c
|
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 |
return asoc; fail_init: kfree(asoc); fail: return NULL; } /* Free this association if possible. There may still be users, so * the actual deallocation may be delayed. */ void sctp_association_free(struct sctp_association *asoc) { struct sock *sk = asoc->base.sk; struct sctp_transport *transport; struct list_head *pos, *temp; int i; |
de76e695a
|
299 300 301 |
/* Only real associations count against the endpoint, so * don't bother for if this is a temporary association. */ |
d3217b15a
|
302 |
if (!list_empty(&asoc->asocs)) { |
de76e695a
|
303 304 305 306 307 308 |
list_del(&asoc->asocs); /* Decrement the backlog value for a TCP-style listening * socket. */ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) |
7976a11b3
|
309 |
sk_acceptq_removed(sk); |
de76e695a
|
310 |
} |
1da177e4c
|
311 312 313 314 |
/* Mark as dead, so other users can know this structure is * going away. */ |
0022d2dd4
|
315 |
asoc->base.dead = true; |
1da177e4c
|
316 317 318 319 320 321 322 323 324 |
/* Dispose of any data lying around in the outqueue. */ sctp_outq_free(&asoc->outqueue); /* Dispose of any pending messages for the upper layer. */ sctp_ulpq_free(&asoc->ulpq); /* Dispose of any pending chunks on the inqueue. */ sctp_inq_free(&asoc->base.inqueue); |
8e1ee18c3
|
325 |
sctp_tsnmap_free(&asoc->peer.tsn_map); |
a83863174
|
326 |
/* Free stream information. */ |
cee360ab4
|
327 |
sctp_stream_free(&asoc->stream); |
1da177e4c
|
328 |
|
7b9438de0
|
329 330 |
if (asoc->strreset_chunk) sctp_chunk_free(asoc->strreset_chunk); |
1da177e4c
|
331 332 333 334 335 336 337 338 339 |
/* Clean up the bound address list. */ sctp_bind_addr_free(&asoc->base.bind_addr); /* Do we need to go through all of our timers and * delete them? To be safe we will try to delete all, but we * should be able to go through and make a guess based * on our state. */ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { |
25cc4ae91
|
340 |
if (del_timer(&asoc->timers[i])) |
1da177e4c
|
341 342 343 344 |
sctp_association_put(asoc); } /* Free peer's cached cookie. */ |
a51482bde
|
345 |
kfree(asoc->peer.cookie); |
730fc3d05
|
346 347 348 |
kfree(asoc->peer.peer_random); kfree(asoc->peer.peer_chunks); kfree(asoc->peer.peer_hmacs); |
1da177e4c
|
349 350 351 352 |
/* Release the transport structures. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); |
45122ca26
|
353 |
list_del_rcu(pos); |
4f0087812
|
354 |
sctp_unhash_transport(transport); |
1da177e4c
|
355 356 |
sctp_transport_free(transport); } |
3f7a87d2f
|
357 |
asoc->peer.transport_count = 0; |
a000c01e6
|
358 |
sctp_asconf_queue_teardown(asoc); |
1da177e4c
|
359 |
|
8a07eb0a5
|
360 |
/* Free pending address space being deleted */ |
7d37d0c15
|
361 |
kfree(asoc->asconf_addr_del_pending); |
8a07eb0a5
|
362 |
|
a29a5bd4f
|
363 364 365 366 367 |
/* AUTH - Free the endpoint shared keys */ sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); /* AUTH - Free the association shared key */ sctp_auth_key_put(asoc->asoc_shared_key); |
1da177e4c
|
368 369 370 371 372 373 |
sctp_association_put(asoc); } /* Cleanup and free up an association. */ static void sctp_association_destroy(struct sctp_association *asoc) { |
bb33381d0
|
374 375 376 377 378 |
if (unlikely(!asoc->base.dead)) { WARN(1, "Attempt to destroy undead association %p! ", asoc); return; } |
1da177e4c
|
379 380 381 382 383 384 385 386 387 |
sctp_endpoint_put(asoc->ep); sock_put(asoc->base.sk); if (asoc->assoc_id != 0) { spin_lock_bh(&sctp_assocs_id_lock); idr_remove(&sctp_assocs_id, asoc->assoc_id); spin_unlock_bh(&sctp_assocs_id_lock); } |
547b792ca
|
388 |
WARN_ON(atomic_read(&asoc->rmem_alloc)); |
049b3ff5a
|
389 |
|
fb6df5a62
|
390 |
kfree_rcu(asoc, rcu); |
ff2266cdd
|
391 |
SCTP_DBG_OBJCNT_DEC(assoc); |
1da177e4c
|
392 393 394 395 396 397 |
} /* Change the primary destination address for the peer. */ void sctp_assoc_set_primary(struct sctp_association *asoc, struct sctp_transport *transport) { |
319fa2a24
|
398 399 400 401 402 403 404 405 |
int changeover = 0; /* it's a changeover only if we already have a primary path * that we are changing */ if (asoc->peer.primary_path != NULL && asoc->peer.primary_path != transport) changeover = 1 ; |
1da177e4c
|
406 |
asoc->peer.primary_path = transport; |
50ce4c099
|
407 |
sctp_ulpevent_notify_peer_addr_change(transport, |
5cd0b9173
|
408 |
SCTP_ADDR_MADE_PRIM, 0); |
1da177e4c
|
409 410 411 412 413 414 415 416 |
/* Set a default msg_name for events. */ memcpy(&asoc->peer.primary_addr, &transport->ipaddr, sizeof(union sctp_addr)); /* If the primary path is changing, assume that the * user wants to use this new path. */ |
ad8fec172
|
417 418 |
if ((transport->state == SCTP_ACTIVE) || (transport->state == SCTP_UNKNOWN)) |
1da177e4c
|
419 420 421 422 423 424 425 426 427 428 429 430 |
asoc->peer.active_path = transport; /* * SFR-CACC algorithm: * Upon the receipt of a request to change the primary * destination address, on the data structure for the new * primary destination, the sender MUST do the following: * * 1) If CHANGEOVER_ACTIVE is set, then there was a switch * to this destination address earlier. The sender MUST set * CYCLING_CHANGEOVER to indicate that this switch is a * double switch to the same destination address. |
e0e9db178
|
431 432 433 |
* * Really, only bother is we have data queued or outstanding on * the association. |
1da177e4c
|
434 |
*/ |
e0e9db178
|
435 436 |
if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen) return; |
1da177e4c
|
437 |
if (transport->cacc.changeover_active) |
319fa2a24
|
438 |
transport->cacc.cycling_changeover = changeover; |
1da177e4c
|
439 440 441 442 |
/* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that * a changeover has occurred. */ |
319fa2a24
|
443 |
transport->cacc.changeover_active = changeover; |
1da177e4c
|
444 445 446 447 448 449 |
/* 3) The sender MUST store the next TSN to be sent in * next_tsn_at_change. */ transport->cacc.next_tsn_at_change = asoc->next_tsn; } |
3f7a87d2f
|
450 451 452 453 |
/* Remove a transport from an association. */ void sctp_assoc_rm_peer(struct sctp_association *asoc, struct sctp_transport *peer) { |
df132eff4
|
454 455 456 |
struct sctp_transport *transport; struct list_head *pos; struct sctp_chunk *ch; |
3f7a87d2f
|
457 |
|
bb33381d0
|
458 459 460 |
pr_debug("%s: association:%p addr:%pISpc ", __func__, asoc, &peer->ipaddr.sa); |
3f7a87d2f
|
461 462 463 464 465 466 467 468 |
/* If we are to remove the current retran_path, update it * to the next peer before removing this peer from the list. */ if (asoc->peer.retran_path == peer) sctp_assoc_update_retran_path(asoc); /* Remove this peer from the list. */ |
45122ca26
|
469 |
list_del_rcu(&peer->transports); |
4f0087812
|
470 471 |
/* Remove this peer from the transport hashtable */ sctp_unhash_transport(peer); |
3f7a87d2f
|
472 473 474 475 476 477 478 479 480 481 |
/* Get the first transport of asoc. */ pos = asoc->peer.transport_addr_list.next; transport = list_entry(pos, struct sctp_transport, transports); /* Update any entries that match the peer to be deleted. */ if (asoc->peer.primary_path == peer) sctp_assoc_set_primary(asoc, transport); if (asoc->peer.active_path == peer) asoc->peer.active_path = transport; |
9494c7c57
|
482 483 |
if (asoc->peer.retran_path == peer) asoc->peer.retran_path = transport; |
3f7a87d2f
|
484 485 |
if (asoc->peer.last_data_from == peer) asoc->peer.last_data_from = transport; |
7b9438de0
|
486 487 488 489 490 |
if (asoc->strreset_chunk && asoc->strreset_chunk->transport == peer) { asoc->strreset_chunk->transport = transport; sctp_transport_reset_reconf_timer(transport); } |
3f7a87d2f
|
491 492 493 494 495 496 497 |
/* If we remove the transport an INIT was last sent to, set it to * NULL. Combined with the update of the retran path above, this * will cause the next INIT to be sent to the next available * transport, maintaining the cycle. */ if (asoc->init_last_sent_to == peer) asoc->init_last_sent_to = NULL; |
6345b1998
|
498 499 500 501 502 503 504 |
/* If we remove the transport an SHUTDOWN was last sent to, set it * to NULL. Combined with the update of the retran path above, this * will cause the next SHUTDOWN to be sent to the next available * transport, maintaining the cycle. */ if (asoc->shutdown_last_sent_to == peer) asoc->shutdown_last_sent_to = NULL; |
10a43cea7
|
505 506 507 508 509 510 |
/* If we remove the transport an ASCONF was last sent to, set it to * NULL. */ if (asoc->addip_last_asconf && asoc->addip_last_asconf->transport == peer) asoc->addip_last_asconf->transport = NULL; |
31b02e154
|
511 512 513 514 515 |
/* If we have something on the transmitted list, we have to * save it off. The best place is the active path. */ if (!list_empty(&peer->transmitted)) { struct sctp_transport *active = asoc->peer.active_path; |
31b02e154
|
516 517 518 519 520 521 522 523 524 525 526 527 528 |
/* Reset the transport of each chunk on this list */ list_for_each_entry(ch, &peer->transmitted, transmitted_list) { ch->transport = NULL; ch->rtt_in_progress = 0; } list_splice_tail_init(&peer->transmitted, &active->transmitted); /* Start a T3 timer here in case it wasn't running so * that these migrated packets have a chance to get |
2bccbadf2
|
529 |
* retransmitted. |
31b02e154
|
530 531 532 533 534 535 |
*/ if (!timer_pending(&active->T3_rtx_timer)) if (!mod_timer(&active->T3_rtx_timer, jiffies + active->rto)) sctp_transport_hold(active); } |
df132eff4
|
536 537 538 |
list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) if (ch->transport == peer) ch->transport = NULL; |
3f7a87d2f
|
539 |
asoc->peer.transport_count--; |
50ce4c099
|
540 |
sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_REMOVED, 0); |
3f7a87d2f
|
541 542 |
sctp_transport_free(peer); } |
1da177e4c
|
543 544 545 |
/* Add a transport address to an association. */ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, const union sctp_addr *addr, |
dd0fc66fb
|
546 |
const gfp_t gfp, |
3f7a87d2f
|
547 |
const int peer_state) |
1da177e4c
|
548 549 550 551 552 553 554 555 |
{ struct sctp_transport *peer; struct sctp_sock *sp; unsigned short port; sp = sctp_sk(asoc->base.sk); /* AF_INET and AF_INET6 share common port field. */ |
4bdf4b5fe
|
556 |
port = ntohs(addr->v4.sin_port); |
1da177e4c
|
557 |
|
bb33381d0
|
558 559 560 |
pr_debug("%s: association:%p addr:%pISpc state:%d ", __func__, asoc, &addr->sa, peer_state); |
3f7a87d2f
|
561 |
|
1da177e4c
|
562 563 564 565 566 567 |
/* Set the port if it has not been set yet. */ if (0 == asoc->peer.port) asoc->peer.port = port; /* Check to see if this is a duplicate. */ peer = sctp_assoc_lookup_paddr(asoc, addr); |
3f7a87d2f
|
568 |
if (peer) { |
add52379d
|
569 570 571 572 |
/* An UNKNOWN state is only set on transports added by * user in sctp_connectx() call. Such transports should be * considered CONFIRMED per RFC 4960, Section 5.4. */ |
ad8fec172
|
573 |
if (peer->state == SCTP_UNKNOWN) { |
add52379d
|
574 |
peer->state = SCTP_ACTIVE; |
ad8fec172
|
575 |
} |
1da177e4c
|
576 |
return peer; |
3f7a87d2f
|
577 |
} |
1da177e4c
|
578 |
|
4e7696d90
|
579 |
peer = sctp_transport_new(asoc->base.net, addr, gfp); |
1da177e4c
|
580 581 582 583 |
if (!peer) return NULL; sctp_transport_set_owner(peer, asoc); |
52ccb8e90
|
584 585 586 587 588 589 590 |
/* Initialize the peer's heartbeat interval based on the * association configured value. */ peer->hbinterval = asoc->hbinterval; /* Set the path max_retrans. */ peer->pathmaxrxt = asoc->pathmaxrxt; |
2bccbadf2
|
591 |
/* And the partial failure retrans threshold */ |
5aa93bcf6
|
592 |
peer->pf_retrans = asoc->pf_retrans; |
34515e94c
|
593 594 |
/* And the primary path switchover retrans threshold */ peer->ps_retrans = asoc->ps_retrans; |
5aa93bcf6
|
595 |
|
52ccb8e90
|
596 597 598 599 |
/* Initialize the peer's SACK delay timeout based on the * association configured value. */ peer->sackdelay = asoc->sackdelay; |
d364d9276
|
600 |
peer->sackfreq = asoc->sackfreq; |
52ccb8e90
|
601 |
|
4be4139f7
|
602 603 604 605 606 607 608 609 610 611 |
if (addr->sa.sa_family == AF_INET6) { __be32 info = addr->v6.sin6_flowinfo; if (info) { peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK); peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK; } else { peer->flowlabel = asoc->flowlabel; } } |
8a9c58d28
|
612 |
peer->dscp = asoc->dscp; |
52ccb8e90
|
613 614 615 616 |
/* Enable/disable heartbeat, SACK delay, and path MTU discovery * based on association setting. */ peer->param_flags = asoc->param_flags; |
1da177e4c
|
617 |
/* Initialize the pmtu of the transport. */ |
800e00c12
|
618 |
sctp_transport_route(peer, NULL, sp); |
1da177e4c
|
619 620 621 622 623 624 |
/* If this is the first transport addr on this association, * initialize the association PMTU to the peer's PMTU. * If not and the current association PMTU is higher than the new * peer's PMTU, reset the association PMTU to the new peer's PMTU. */ |
c4b2893da
|
625 626 627 |
sctp_assoc_set_pmtu(asoc, asoc->pathmtu ? min_t(int, peer->pathmtu, asoc->pathmtu) : peer->pathmtu); |
bb33381d0
|
628 |
|
6d0ccbac6
|
629 |
peer->pmtu_pending = 0; |
1da177e4c
|
630 |
|
1da177e4c
|
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 |
/* The asoc->peer.port might not be meaningful yet, but * initialize the packet structure anyway. */ sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port, asoc->peer.port); /* 7.2.1 Slow-Start * * o The initial cwnd before DATA transmission or after a sufficiently * long idle period MUST be set to * min(4*MTU, max(2*MTU, 4380 bytes)) * * o The initial value of ssthresh MAY be arbitrarily high * (for example, implementations MAY use the size of the * receiver advertised window). */ |
52ccb8e90
|
647 |
peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); |
1da177e4c
|
648 649 650 651 652 653 654 655 656 |
/* At this point, we may not have the receiver's advertised window, * so initialize ssthresh to the default value and it will be set * later when we process the INIT. */ peer->ssthresh = SCTP_DEFAULT_MAXWINDOW; peer->partial_bytes_acked = 0; peer->flight_size = 0; |
46d5a8085
|
657 |
peer->burst_limited = 0; |
1da177e4c
|
658 |
|
1da177e4c
|
659 660 |
/* Set the transport's RTO.initial value */ peer->rto = asoc->rto_initial; |
196d67593
|
661 |
sctp_max_rto(asoc, peer); |
1da177e4c
|
662 |
|
3f7a87d2f
|
663 664 |
/* Set the peer's active state. */ peer->state = peer_state; |
7fda702f9
|
665 666 667 668 669 |
/* Add this peer into the transport hashtable */ if (sctp_hash_transport(peer)) { sctp_transport_free(peer); return NULL; } |
1da177e4c
|
670 |
/* Attach the remote transport to our asoc. */ |
45122ca26
|
671 |
list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list); |
3f7a87d2f
|
672 |
asoc->peer.transport_count++; |
1da177e4c
|
673 |
|
50ce4c099
|
674 |
sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_ADDED, 0); |
4b7740324
|
675 |
|
1da177e4c
|
676 677 678 679 680 |
/* If we do not yet have a primary path, set one. */ if (!asoc->peer.primary_path) { sctp_assoc_set_primary(asoc, peer); asoc->peer.retran_path = peer; } |
fbdf501c9
|
681 682 |
if (asoc->peer.active_path == asoc->peer.retran_path && peer->state != SCTP_UNCONFIRMED) { |
1da177e4c
|
683 |
asoc->peer.retran_path = peer; |
3f7a87d2f
|
684 |
} |
1da177e4c
|
685 686 687 688 689 690 691 692 693 694 |
return peer; } /* Delete a transport address from an association. */ void sctp_assoc_del_peer(struct sctp_association *asoc, const union sctp_addr *addr) { struct list_head *pos; struct list_head *temp; |
1da177e4c
|
695 696 697 698 699 |
struct sctp_transport *transport; list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { transport = list_entry(pos, struct sctp_transport, transports); if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) { |
3f7a87d2f
|
700 701 |
/* Do book keeping for removing the peer and free it. */ sctp_assoc_rm_peer(asoc, transport); |
1da177e4c
|
702 703 704 |
break; } } |
1da177e4c
|
705 706 707 708 709 710 711 712 |
} /* Lookup a transport by address. */ struct sctp_transport *sctp_assoc_lookup_paddr( const struct sctp_association *asoc, const union sctp_addr *address) { struct sctp_transport *t; |
1da177e4c
|
713 714 |
/* Cycle through all transports searching for a peer address. */ |
9dbc15f05
|
715 716 |
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { |
1da177e4c
|
717 718 719 720 721 722 |
if (sctp_cmp_addr_exact(address, &t->ipaddr)) return t; } return NULL; } |
42e30bf34
|
723 724 725 726 727 728 729 730 731 732 733 734 735 |
/* Remove all transports except a give one */ void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc, struct sctp_transport *primary) { struct sctp_transport *temp; struct sctp_transport *t; list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list, transports) { /* if the current transport is not the primary one, delete it */ if (t != primary) sctp_assoc_rm_peer(asoc, t); } |
42e30bf34
|
736 |
} |
1da177e4c
|
737 738 739 740 741 742 |
/* Engage in transport control operations. * Mark the transport up or down and send a notification to the user. * Select and update the new active and retran paths. */ void sctp_assoc_control_transport(struct sctp_association *asoc, struct sctp_transport *transport, |
0ceaeebe2
|
743 |
enum sctp_transport_cmd command, |
1da177e4c
|
744 745 |
sctp_sn_error_t error) { |
768e15182
|
746 |
int spc_state = SCTP_ADDR_AVAILABLE; |
5aa93bcf6
|
747 |
bool ulp_notify = true; |
1da177e4c
|
748 749 750 751 |
/* Record the transition on the transport. */ switch (command) { case SCTP_TRANSPORT_UP: |
1ae4114dc
|
752 753 754 755 |
/* If we are moving from UNCONFIRMED state due * to heartbeat success, report the SCTP_ADDR_CONFIRMED * state to the user, otherwise report SCTP_ADDR_AVAILABLE. */ |
768e15182
|
756 757 |
if (transport->state == SCTP_PF && asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE) |
5aa93bcf6
|
758 |
ulp_notify = false; |
768e15182
|
759 760 761 |
else if (transport->state == SCTP_UNCONFIRMED && error == SCTP_HEARTBEAT_SUCCESS) spc_state = SCTP_ADDR_CONFIRMED; |
3f7a87d2f
|
762 |
transport->state = SCTP_ACTIVE; |
1da177e4c
|
763 764 765 |
break; case SCTP_TRANSPORT_DOWN: |
40187886b
|
766 767 768 |
/* If the transport was never confirmed, do not transition it * to inactive state. Also, release the cached route since * there may be a better route next time. |
cc75689a4
|
769 |
*/ |
768e15182
|
770 |
if (transport->state != SCTP_UNCONFIRMED) { |
cc75689a4
|
771 |
transport->state = SCTP_INACTIVE; |
768e15182
|
772 773 |
spc_state = SCTP_ADDR_UNREACHABLE; } else { |
c86a773c7
|
774 |
sctp_transport_dst_release(transport); |
061079ac0
|
775 |
ulp_notify = false; |
40187886b
|
776 |
} |
1da177e4c
|
777 |
break; |
5aa93bcf6
|
778 779 |
case SCTP_TRANSPORT_PF: transport->state = SCTP_PF; |
768e15182
|
780 781 782 783 |
if (asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE) ulp_notify = false; else spc_state = SCTP_ADDR_POTENTIALLY_FAILED; |
5aa93bcf6
|
784 |
break; |
1da177e4c
|
785 786 |
default: return; |
3ff50b799
|
787 |
} |
1da177e4c
|
788 |
|
b82e8f31a
|
789 790 |
/* Generate and send a SCTP_PEER_ADDR_CHANGE notification * to the user. |
1da177e4c
|
791 |
*/ |
4b7740324
|
792 |
if (ulp_notify) |
50ce4c099
|
793 |
sctp_ulpevent_notify_peer_addr_change(transport, |
4b7740324
|
794 |
spc_state, error); |
1da177e4c
|
795 796 |
/* Select new active and retran paths. */ |
b82e8f31a
|
797 |
sctp_select_active_and_retran_path(asoc); |
1da177e4c
|
798 799 800 801 802 |
} /* Hold a reference to an association. */ void sctp_association_hold(struct sctp_association *asoc) { |
c638457a7
|
803 |
refcount_inc(&asoc->base.refcnt); |
1da177e4c
|
804 805 806 807 808 809 810 |
} /* Release a reference to an association and cleanup * if there are no more references. */ void sctp_association_put(struct sctp_association *asoc) { |
c638457a7
|
811 |
if (refcount_dec_and_test(&asoc->base.refcnt)) |
1da177e4c
|
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 |
sctp_association_destroy(asoc); } /* Allocate the next TSN, Transmission Sequence Number, for the given * association. */ __u32 sctp_association_get_next_tsn(struct sctp_association *asoc) { /* From Section 1.6 Serial Number Arithmetic: * Transmission Sequence Numbers wrap around when they reach * 2**32 - 1. That is, the next TSN a DATA chunk MUST use * after transmitting TSN = 2*32 - 1 is TSN = 0. */ __u32 retval = asoc->next_tsn; asoc->next_tsn++; asoc->unack_data++; return retval; } /* Compare two addresses to see if they match. Wildcard addresses * only match themselves. */ int sctp_cmp_addr_exact(const union sctp_addr *ss1, const union sctp_addr *ss2) { struct sctp_af *af; af = sctp_get_af_specific(ss1->sa.sa_family); if (unlikely(!af)) return 0; return af->cmp_addr(ss1, ss2); } /* Return an ecne chunk to get prepended to a packet. * Note: We are sly and return a shared, prealloced chunk. FIXME: * No we don't, but we could/should. */ struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc) { |
8b7318d3e
|
853 854 |
if (!asoc->need_ecne) return NULL; |
1da177e4c
|
855 856 857 858 |
/* Send ECNE if needed. * Not being able to allocate a chunk here is not deadly. */ |
8b7318d3e
|
859 |
return sctp_make_ecne(asoc, asoc->last_ecne_tsn); |
1da177e4c
|
860 861 862 863 864 865 866 867 868 869 |
} /* * Find which transport this TSN was sent on. */ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, __u32 tsn) { struct sctp_transport *active; struct sctp_transport *match; |
1da177e4c
|
870 871 |
struct sctp_transport *transport; struct sctp_chunk *chunk; |
dbc16db1e
|
872 |
__be32 key = htonl(tsn); |
1da177e4c
|
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 |
match = NULL; /* * FIXME: In general, find a more efficient data structure for * searching. */ /* * The general strategy is to search each transport's transmitted * list. Return which transport this TSN lives on. * * Let's be hopeful and check the active_path first. * Another optimization would be to know if there is only one * outbound path and not have to look for the TSN at all. * */ active = asoc->peer.active_path; |
9dbc15f05
|
892 893 |
list_for_each_entry(chunk, &active->transmitted, transmitted_list) { |
1da177e4c
|
894 895 896 897 898 899 900 901 |
if (key == chunk->subh.data_hdr->tsn) { match = active; goto out; } } /* If not found, go search all the other transports. */ |
9dbc15f05
|
902 903 |
list_for_each_entry(transport, &asoc->peer.transport_addr_list, transports) { |
1da177e4c
|
904 905 |
if (transport == active) |
2317f449a
|
906 |
continue; |
9dbc15f05
|
907 908 |
list_for_each_entry(chunk, &transport->transmitted, transmitted_list) { |
1da177e4c
|
909 910 911 912 913 914 915 916 917 |
if (key == chunk->subh.data_hdr->tsn) { match = transport; goto out; } } } out: return match; } |
1da177e4c
|
918 |
/* Do delayed input processing. This is scheduled by sctp_rcv(). */ |
c4028958b
|
919 |
static void sctp_assoc_bh_rcv(struct work_struct *work) |
1da177e4c
|
920 |
{ |
c4028958b
|
921 922 923 |
struct sctp_association *asoc = container_of(work, struct sctp_association, base.inqueue.immediate); |
4e7696d90
|
924 |
struct net *net = asoc->base.net; |
bfc6f8270
|
925 |
union sctp_subtype subtype; |
1da177e4c
|
926 927 |
struct sctp_endpoint *ep; struct sctp_chunk *chunk; |
1da177e4c
|
928 |
struct sctp_inq *inqueue; |
59d8d4434
|
929 |
int first_time = 1; /* is this the first time through the loop */ |
1da177e4c
|
930 |
int error = 0; |
59d8d4434
|
931 |
int state; |
1da177e4c
|
932 933 934 |
/* The association should be held so we should be safe. */ ep = asoc->ep; |
1da177e4c
|
935 936 937 938 939 940 |
inqueue = &asoc->base.inqueue; sctp_association_hold(asoc); while (NULL != (chunk = sctp_inq_pop(inqueue))) { state = asoc->state; subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); |
59d8d4434
|
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 |
/* If the first chunk in the packet is AUTH, do special * processing specified in Section 6.3 of SCTP-AUTH spec */ if (first_time && subtype.chunk == SCTP_CID_AUTH) { struct sctp_chunkhdr *next_hdr; next_hdr = sctp_inq_peek(inqueue); if (!next_hdr) goto normal; /* If the next chunk is COOKIE-ECHO, skip the AUTH * chunk while saving a pointer to it so we can do * Authentication later (during cookie-echo * processing). */ if (next_hdr->type == SCTP_CID_COOKIE_ECHO) { chunk->auth_chunk = skb_clone(chunk->skb, GFP_ATOMIC); chunk->auth = 1; continue; } } normal: |
bbd0d5980
|
965 966 967 968 969 970 971 972 973 |
/* SCTP-AUTH, Section 6.3: * The receiver has a list of chunk types which it expects * to be received only after an AUTH-chunk. This list has * been sent to the peer during the association setup. It * MUST silently discard these chunks if they are not placed * after an AUTH chunk in the packet. */ if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) continue; |
1da177e4c
|
974 975 976 977 978 |
/* Remember where the last DATA chunk came from so we * know where to send the SACK. */ if (sctp_chunk_is_data(chunk)) asoc->peer.last_data_from = chunk->transport; |
196d67593
|
979 |
else { |
55e26eb95
|
980 |
SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS); |
196d67593
|
981 982 983 984 |
asoc->stats.ictrlchunks++; if (chunk->chunk_hdr->type == SCTP_CID_SACK) asoc->stats.isacks++; } |
1da177e4c
|
985 986 |
if (chunk->transport) |
e575235fc
|
987 |
chunk->transport->last_time_heard = ktime_get(); |
1da177e4c
|
988 989 |
/* Run through the state machine. */ |
55e26eb95
|
990 |
error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, |
1da177e4c
|
991 992 993 994 995 996 997 998 999 1000 1001 |
state, ep, asoc, chunk, GFP_ATOMIC); /* Check to see if the association is freed in response to * the incoming chunk. If so, get out of the while loop. */ if (asoc->base.dead) break; /* If there is an error on chunk, discard this packet. */ if (error && chunk) chunk->pdiscard = 1; |
59d8d4434
|
1002 1003 1004 |
if (first_time) first_time = 0; |
1da177e4c
|
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 |
} sctp_association_put(asoc); } /* This routine moves an association from its old sk to a new sk. */ void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk) { struct sctp_sock *newsp = sctp_sk(newsk); struct sock *oldsk = assoc->base.sk; /* Delete the association from the old endpoint's list of * associations. */ list_del_init(&assoc->asocs); /* Decrement the backlog value for a TCP-style socket. */ if (sctp_style(oldsk, TCP)) |
7976a11b3
|
1022 |
sk_acceptq_removed(oldsk); |
1da177e4c
|
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 |
/* Release references to the old endpoint and the sock. */ sctp_endpoint_put(assoc->ep); sock_put(assoc->base.sk); /* Get a reference to the new endpoint. */ assoc->ep = newsp->ep; sctp_endpoint_hold(assoc->ep); /* Get a reference to the new sock. */ assoc->base.sk = newsk; sock_hold(assoc->base.sk); /* Add the association to the new endpoint's list of associations. */ sctp_endpoint_add_asoc(newsp->ep, assoc); } /* Update an association (possibly from unexpected COOKIE-ECHO processing). */ |
5ee8aa689
|
1041 1042 |
int sctp_assoc_update(struct sctp_association *asoc, struct sctp_association *new) |
1da177e4c
|
1043 1044 1045 1046 1047 1048 1049 1050 |
{ struct sctp_transport *trans; struct list_head *pos, *temp; /* Copy in new parameters of peer. */ asoc->c = new->c; asoc->peer.rwnd = new->peer.rwnd; asoc->peer.sack_needed = new->peer.sack_needed; |
1be9a950c
|
1051 |
asoc->peer.auth_capable = new->peer.auth_capable; |
1da177e4c
|
1052 |
asoc->peer.i = new->peer.i; |
5ee8aa689
|
1053 1054 1055 1056 |
if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, asoc->peer.i.initial_tsn, GFP_ATOMIC)) return -ENOMEM; |
1da177e4c
|
1057 1058 1059 1060 |
/* Remove any peer addresses not present in the new association. */ list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { trans = list_entry(pos, struct sctp_transport, transports); |
0c42749cf
|
1061 1062 1063 1064 |
if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) { sctp_assoc_rm_peer(asoc, trans); continue; } |
749bf9215
|
1065 1066 1067 |
if (asoc->state >= SCTP_STATE_ESTABLISHED) sctp_transport_reset(trans); |
1da177e4c
|
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 |
} /* If the case is A (association restart), use * initial_tsn as next_tsn. If the case is B, use * current next_tsn in case data sent to peer * has been discarded and needs retransmission. */ if (asoc->state >= SCTP_STATE_ESTABLISHED) { asoc->next_tsn = new->next_tsn; asoc->ctsn_ack_point = new->ctsn_ack_point; asoc->adv_peer_ack_point = new->adv_peer_ack_point; /* Reinitialize SSN for both local streams * and peer's streams. */ |
cee360ab4
|
1083 |
sctp_stream_clear(&asoc->stream); |
1da177e4c
|
1084 |
|
0b58a8114
|
1085 1086 1087 1088 1089 |
/* Flush the ULP reassembly and ordered queue. * Any data there will now be stale and will * cause problems. */ sctp_ulpq_flush(&asoc->ulpq); |
749bf9215
|
1090 1091 1092 1093 1094 |
/* reset the overall association error count so * that the restarted association doesn't get torn * down on the next retransmission timer. */ asoc->overall_error_count = 0; |
1da177e4c
|
1095 1096 |
} else { /* Add any peer addresses from the new association. */ |
9dbc15f05
|
1097 |
list_for_each_entry(trans, &new->peer.transport_addr_list, |
5ee8aa689
|
1098 1099 1100 1101 1102 |
transports) if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) && !sctp_assoc_add_peer(asoc, &trans->ipaddr, GFP_ATOMIC, trans->state)) return -ENOMEM; |
1da177e4c
|
1103 1104 1105 |
asoc->ctsn_ack_point = asoc->next_tsn - 1; asoc->adv_peer_ack_point = asoc->ctsn_ack_point; |
3ab213791
|
1106 |
|
cee360ab4
|
1107 1108 |
if (sctp_state(asoc, COOKIE_WAIT)) sctp_stream_update(&asoc->stream, &new->stream); |
07d939677
|
1109 |
|
4abf5a653
|
1110 |
/* get a new assoc id if we don't have one yet. */ |
5ee8aa689
|
1111 1112 |
if (sctp_assoc_set_id(asoc, GFP_ATOMIC)) return -ENOMEM; |
1da177e4c
|
1113 |
} |
a29a5bd4f
|
1114 |
|
9d2c881af
|
1115 |
/* SCTP-AUTH: Save the peer parameters from the new associations |
730fc3d05
|
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 |
* and also move the association shared keys over */ kfree(asoc->peer.peer_random); asoc->peer.peer_random = new->peer.peer_random; new->peer.peer_random = NULL; kfree(asoc->peer.peer_chunks); asoc->peer.peer_chunks = new->peer.peer_chunks; new->peer.peer_chunks = NULL; kfree(asoc->peer.peer_hmacs); asoc->peer.peer_hmacs = new->peer.peer_hmacs; new->peer.peer_hmacs = NULL; |
5ee8aa689
|
1129 |
return sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); |
1da177e4c
|
1130 1131 1132 |
} /* Update the retran path for sending a retransmitted packet. |
4c47af4d5
|
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 |
* See also RFC4960, 6.4. Multi-Homed SCTP Endpoints: * * When there is outbound data to send and the primary path * becomes inactive (e.g., due to failures), or where the * SCTP user explicitly requests to send data to an * inactive destination transport address, before reporting * an error to its ULP, the SCTP endpoint should try to send * the data to an alternate active destination transport * address if one exists. * * When retransmitting data that timed out, if the endpoint * is multihomed, it should consider each source-destination * address pair in its retransmission selection policy. * When retransmitting timed-out data, the endpoint should * attempt to pick the most divergent source-destination * pair from the original source-destination pair to which * the packet was transmitted. * * Note: Rules for picking the most divergent source-destination * pair are an implementation decision and are not specified * within this document. * * Our basic strategy is to round-robin transports in priorities |
2103d6b81
|
1156 |
* according to sctp_trans_score() e.g., if no such |
4c47af4d5
|
1157 1158 |
* transport with state SCTP_ACTIVE exists, round-robin through * SCTP_UNKNOWN, etc. You get the picture. |
1da177e4c
|
1159 |
*/ |
4c47af4d5
|
1160 |
static u8 sctp_trans_score(const struct sctp_transport *trans) |
1da177e4c
|
1161 |
{ |
2103d6b81
|
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 |
switch (trans->state) { case SCTP_ACTIVE: return 3; /* best case */ case SCTP_UNKNOWN: return 2; case SCTP_PF: return 1; default: /* case SCTP_INACTIVE */ return 0; /* worst case */ } |
4c47af4d5
|
1172 |
} |
1da177e4c
|
1173 |
|
a7288c4dd
|
1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 |
static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, struct sctp_transport *trans2) { if (trans1->error_count > trans2->error_count) { return trans2; } else if (trans1->error_count == trans2->error_count && ktime_after(trans2->last_time_heard, trans1->last_time_heard)) { return trans2; } else { return trans1; } } |
4c47af4d5
|
1187 1188 1189 |
static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, struct sctp_transport *best) { |
a7288c4dd
|
1190 |
u8 score_curr, score_best; |
ea4f19c1f
|
1191 |
if (best == NULL || curr == best) |
4c47af4d5
|
1192 |
return curr; |
4141ddc02
|
1193 |
|
a7288c4dd
|
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 |
score_curr = sctp_trans_score(curr); score_best = sctp_trans_score(best); /* First, try a score-based selection if both transport states * differ. If we're in a tie, lets try to make a more clever * decision here based on error counts and last time heard. */ if (score_curr > score_best) return curr; else if (score_curr == score_best) |
39d2adebf
|
1204 |
return sctp_trans_elect_tie(best, curr); |
a7288c4dd
|
1205 1206 |
else return best; |
4c47af4d5
|
1207 |
} |
1da177e4c
|
1208 |
|
4c47af4d5
|
1209 1210 1211 1212 |
void sctp_assoc_update_retran_path(struct sctp_association *asoc) { struct sctp_transport *trans = asoc->peer.retran_path; struct sctp_transport *trans_next = NULL; |
1da177e4c
|
1213 |
|
4c47af4d5
|
1214 1215 1216 1217 1218 1219 1220 1221 1222 |
/* We're done as we only have the one and only path. */ if (asoc->peer.transport_count == 1) return; /* If active_path and retran_path are the same and active, * then this is the only active path. Use it. */ if (asoc->peer.active_path == asoc->peer.retran_path && asoc->peer.active_path->state == SCTP_ACTIVE) return; |
1da177e4c
|
1223 |
|
4c47af4d5
|
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 |
/* Iterate from retran_path's successor back to retran_path. */ for (trans = list_next_entry(trans, transports); 1; trans = list_next_entry(trans, transports)) { /* Manually skip the head element. */ if (&trans->transports == &asoc->peer.transport_addr_list) continue; if (trans->state == SCTP_UNCONFIRMED) continue; trans_next = sctp_trans_elect_best(trans, trans_next); /* Active is good enough for immediate return. */ if (trans_next->state == SCTP_ACTIVE) |
4141ddc02
|
1235 |
break; |
4c47af4d5
|
1236 1237 |
/* We've reached the end, time to update path. */ if (trans == asoc->peer.retran_path) |
1da177e4c
|
1238 |
break; |
1da177e4c
|
1239 |
} |
433131ba0
|
1240 |
asoc->peer.retran_path = trans_next; |
3f7a87d2f
|
1241 |
|
4c47af4d5
|
1242 1243 1244 |
pr_debug("%s: association:%p updated new path to addr:%pISpc ", __func__, asoc, &asoc->peer.retran_path->ipaddr.sa); |
3f7a87d2f
|
1245 |
} |
b82e8f31a
|
1246 1247 1248 |
static void sctp_select_active_and_retran_path(struct sctp_association *asoc) { struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL; |
a7288c4dd
|
1249 |
struct sctp_transport *trans_pf = NULL; |
b82e8f31a
|
1250 1251 1252 1253 |
/* Look for the two most recently used active transports. */ list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { |
a7288c4dd
|
1254 |
/* Skip uninteresting transports. */ |
b82e8f31a
|
1255 |
if (trans->state == SCTP_INACTIVE || |
a7288c4dd
|
1256 |
trans->state == SCTP_UNCONFIRMED) |
b82e8f31a
|
1257 |
continue; |
a7288c4dd
|
1258 1259 1260 1261 1262 1263 1264 1265 |
/* Keep track of the best PF transport from our * list in case we don't find an active one. */ if (trans->state == SCTP_PF) { trans_pf = sctp_trans_elect_best(trans, trans_pf); continue; } /* For active transports, pick the most recent ones. */ |
b82e8f31a
|
1266 |
if (trans_pri == NULL || |
e575235fc
|
1267 1268 |
ktime_after(trans->last_time_heard, trans_pri->last_time_heard)) { |
b82e8f31a
|
1269 1270 1271 |
trans_sec = trans_pri; trans_pri = trans; } else if (trans_sec == NULL || |
e575235fc
|
1272 1273 |
ktime_after(trans->last_time_heard, trans_sec->last_time_heard)) { |
b82e8f31a
|
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 |
trans_sec = trans; } } /* RFC 2960 6.4 Multi-Homed SCTP Endpoints * * By default, an endpoint should always transmit to the primary * path, unless the SCTP user explicitly specifies the * destination transport address (and possibly source transport * address) to use. [If the primary is active but not most recent, * bump the most recently used transport.] */ if ((asoc->peer.primary_path->state == SCTP_ACTIVE || asoc->peer.primary_path->state == SCTP_UNKNOWN) && asoc->peer.primary_path != trans_pri) { trans_sec = trans_pri; trans_pri = asoc->peer.primary_path; } /* We did not find anything useful for a possible retransmission |
5e80a0ccb
|
1294 |
* path; either primary path that we found is the same as |
b82e8f31a
|
1295 1296 1297 1298 1299 1300 |
* the current one, or we didn't generally find an active one. */ if (trans_sec == NULL) trans_sec = trans_pri; /* If we failed to find a usable transport, just camp on the |
aa4a83ee8
|
1301 |
* active or pick a PF iff it's the better choice. |
b82e8f31a
|
1302 1303 |
*/ if (trans_pri == NULL) { |
aa4a83ee8
|
1304 1305 |
trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf); trans_sec = trans_pri; |
b82e8f31a
|
1306 1307 1308 1309 1310 1311 |
} /* Set the active and retran transports. */ asoc->peer.active_path = trans_pri; asoc->peer.retran_path = trans_sec; } |
4c47af4d5
|
1312 1313 1314 |
struct sctp_transport * sctp_assoc_choose_alter_transport(struct sctp_association *asoc, struct sctp_transport *last_sent_to) |
3f7a87d2f
|
1315 |
{ |
9919b455f
|
1316 1317 |
/* If this is the first time packet is sent, use the active path, * else use the retran path. If the last packet was sent over the |
3f7a87d2f
|
1318 1319 |
* retran path, update the retran path and use it. */ |
4c47af4d5
|
1320 |
if (last_sent_to == NULL) { |
1da177e4c
|
1321 |
return asoc->peer.active_path; |
4c47af4d5
|
1322 |
} else { |
9919b455f
|
1323 |
if (last_sent_to == asoc->peer.retran_path) |
1da177e4c
|
1324 |
sctp_assoc_update_retran_path(asoc); |
4c47af4d5
|
1325 |
|
1da177e4c
|
1326 1327 |
return asoc->peer.retran_path; } |
1da177e4c
|
1328 |
} |
2f5e3c9df
|
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 |
void sctp_assoc_update_frag_point(struct sctp_association *asoc) { int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu, sctp_datachk_len(&asoc->stream)); if (asoc->user_frag) frag = min_t(int, frag, asoc->user_frag); frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN - sctp_datachk_len(&asoc->stream)); asoc->frag_point = SCTP_TRUNC4(frag); } |
c4b2893da
|
1342 1343 |
void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu) { |
2f5e3c9df
|
1344 |
if (asoc->pathmtu != pmtu) { |
c4b2893da
|
1345 |
asoc->pathmtu = pmtu; |
2f5e3c9df
|
1346 1347 |
sctp_assoc_update_frag_point(asoc); } |
c4b2893da
|
1348 1349 1350 1351 1352 |
pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d ", __func__, asoc, asoc->pathmtu, asoc->frag_point); } |
1da177e4c
|
1353 1354 1355 |
/* Update the association's pmtu and frag_point by going through all the * transports. This routine is called when a transport's PMTU has changed. */ |
3ebfdf082
|
1356 |
void sctp_assoc_sync_pmtu(struct sctp_association *asoc) |
1da177e4c
|
1357 1358 |
{ struct sctp_transport *t; |
1da177e4c
|
1359 1360 1361 1362 1363 1364 |
__u32 pmtu = 0; if (!asoc) return; /* Get the lowest pmtu of all the transports. */ |
6ff0f871c
|
1365 |
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { |
8a4794914
|
1366 |
if (t->pmtu_pending && t->dst) { |
d805397c3
|
1367 1368 |
sctp_transport_update_pmtu(t, atomic_read(&t->mtu_info)); |
8a4794914
|
1369 1370 |
t->pmtu_pending = 0; } |
52ccb8e90
|
1371 1372 |
if (!pmtu || (t->pathmtu < pmtu)) pmtu = t->pathmtu; |
1da177e4c
|
1373 |
} |
c4b2893da
|
1374 |
sctp_assoc_set_pmtu(asoc, pmtu); |
1da177e4c
|
1375 1376 1377 |
} /* Should we send a SACK to update our peer? */ |
ce4a03db9
|
1378 |
static inline bool sctp_peer_needs_update(struct sctp_association *asoc) |
1da177e4c
|
1379 |
{ |
4e7696d90
|
1380 |
struct net *net = asoc->base.net; |
1da177e4c
|
1381 1382 1383 1384 1385 1386 |
switch (asoc->state) { case SCTP_STATE_ESTABLISHED: case SCTP_STATE_SHUTDOWN_PENDING: case SCTP_STATE_SHUTDOWN_RECEIVED: case SCTP_STATE_SHUTDOWN_SENT: if ((asoc->rwnd > asoc->a_rwnd) && |
90f2f5318
|
1387 |
((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32, |
e1fc3b14f
|
1388 |
(asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift), |
90f2f5318
|
1389 |
asoc->pathmtu))) |
ce4a03db9
|
1390 |
return true; |
1da177e4c
|
1391 1392 1393 1394 |
break; default: break; } |
ce4a03db9
|
1395 |
return false; |
1da177e4c
|
1396 |
} |
362d52040
|
1397 1398 |
/* Increase asoc's rwnd by len and send any window update SACK if needed. */ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) |
1da177e4c
|
1399 1400 1401 |
{ struct sctp_chunk *sack; struct timer_list *timer; |
362d52040
|
1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 |
if (asoc->rwnd_over) { if (asoc->rwnd_over >= len) { asoc->rwnd_over -= len; } else { asoc->rwnd += (len - asoc->rwnd_over); asoc->rwnd_over = 0; } } else { asoc->rwnd += len; } |
1da177e4c
|
1412 |
|
362d52040
|
1413 1414 1415 1416 1417 |
/* If we had window pressure, start recovering it * once our rwnd had reached the accumulated pressure * threshold. The idea is to recover slowly, but up * to the initial advertised window. */ |
1636098c4
|
1418 |
if (asoc->rwnd_press) { |
362d52040
|
1419 1420 1421 1422 |
int change = min(asoc->pathmtu, asoc->rwnd_press); asoc->rwnd += change; asoc->rwnd_press -= change; } |
4d3c46e68
|
1423 |
|
362d52040
|
1424 1425 1426 1427 |
pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u ", __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, asoc->a_rwnd); |
1da177e4c
|
1428 1429 1430 1431 1432 1433 |
/* Send a window update SACK if the rwnd has increased by at least the * minimum of the association's PMTU and half of the receive buffer. * The algorithm used is similar to the one described in * Section 4.2.3.3 of RFC 1122. */ |
362d52040
|
1434 |
if (sctp_peer_needs_update(asoc)) { |
1da177e4c
|
1435 |
asoc->a_rwnd = asoc->rwnd; |
bb33381d0
|
1436 1437 1438 1439 1440 |
pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " "a_rwnd:%u ", __func__, asoc, asoc->rwnd, asoc->a_rwnd); |
1da177e4c
|
1441 1442 1443 1444 1445 |
sack = sctp_make_sack(asoc); if (!sack) return; asoc->peer.sack_needed = 0; |
cea8768f3
|
1446 |
sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC); |
1da177e4c
|
1447 1448 1449 |
/* Stop the SACK timer. */ timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; |
25cc4ae91
|
1450 |
if (del_timer(timer)) |
1da177e4c
|
1451 1452 1453 |
sctp_association_put(asoc); } } |
362d52040
|
1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 |
/* Decrease asoc's rwnd by len. */ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) { int rx_count; int over = 0; if (unlikely(!asoc->rwnd || asoc->rwnd_over)) pr_debug("%s: association:%p has asoc->rwnd:%u, " "asoc->rwnd_over:%u! ", __func__, asoc, asoc->rwnd, asoc->rwnd_over); if (asoc->ep->rcvbuf_policy) rx_count = atomic_read(&asoc->rmem_alloc); else rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); /* If we've reached or overflowed our receive buffer, announce * a 0 rwnd if rwnd would still be positive. Store the |
5e80a0ccb
|
1473 |
* potential pressure overflow so that the window can be restored |
362d52040
|
1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 |
* back to original value. */ if (rx_count >= asoc->base.sk->sk_rcvbuf) over = 1; if (asoc->rwnd >= len) { asoc->rwnd -= len; if (over) { asoc->rwnd_press += asoc->rwnd; asoc->rwnd = 0; } } else { |
58b94d88d
|
1486 |
asoc->rwnd_over += len - asoc->rwnd; |
362d52040
|
1487 1488 1489 1490 1491 1492 1493 1494 |
asoc->rwnd = 0; } pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u) ", __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, asoc->rwnd_press); } |
1da177e4c
|
1495 1496 1497 1498 |
/* Build the bind address list for the association based on info from the * local endpoint and the remote peer. */ |
3182cd84f
|
1499 |
int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, |
1c662018d
|
1500 |
enum sctp_scope scope, gfp_t gfp) |
1da177e4c
|
1501 |
{ |
471e39df9
|
1502 |
struct sock *sk = asoc->base.sk; |
1da177e4c
|
1503 1504 1505 1506 1507 |
int flags; /* Use scoping rules to determine the subset of addresses from * the endpoint. */ |
471e39df9
|
1508 1509 1510 |
flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; if (!inet_v6_ipv6only(sk)) flags |= SCTP_ADDR4_ALLOWED; |
1da177e4c
|
1511 1512 1513 1514 |
if (asoc->peer.ipv4_address) flags |= SCTP_ADDR4_PEERSUPP; if (asoc->peer.ipv6_address) flags |= SCTP_ADDR6_PEERSUPP; |
4e7696d90
|
1515 |
return sctp_bind_addr_copy(asoc->base.net, |
4db67e808
|
1516 |
&asoc->base.bind_addr, |
1da177e4c
|
1517 1518 1519 1520 1521 1522 |
&asoc->ep->base.bind_addr, scope, gfp, flags); } /* Build the association's bind address list from the cookie. */ int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, |
3182cd84f
|
1523 |
struct sctp_cookie *cookie, |
dd0fc66fb
|
1524 |
gfp_t gfp) |
1da177e4c
|
1525 1526 1527 1528 1529 1530 1531 1532 |
{ int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length); int var_size3 = cookie->raw_addr_list_len; __u8 *raw = (__u8 *)cookie->peer_init + var_size2; return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3, asoc->ep->base.bind_addr.port, gfp); } |
d808ad9ab
|
1533 1534 |
/* Lookup laddr in the bind address list of an association. */ int sctp_assoc_lookup_laddr(struct sctp_association *asoc, |
1da177e4c
|
1535 1536 |
const union sctp_addr *laddr) { |
559cf710b
|
1537 |
int found = 0; |
1da177e4c
|
1538 |
|
1da177e4c
|
1539 1540 |
if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && sctp_bind_addr_match(&asoc->base.bind_addr, laddr, |
559cf710b
|
1541 |
sctp_sk(asoc->base.sk))) |
1da177e4c
|
1542 |
found = 1; |
1da177e4c
|
1543 |
|
1da177e4c
|
1544 1545 |
return found; } |
07d939677
|
1546 1547 1548 1549 |
/* Set an association id for a given association */ int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) { |
d0164adc8
|
1550 |
bool preload = gfpflags_allow_blocking(gfp); |
94960e8c2
|
1551 |
int ret; |
c6ba68a26
|
1552 1553 1554 |
/* If the id is already assigned, keep it. */ if (asoc->assoc_id) |
94960e8c2
|
1555 |
return 0; |
07d939677
|
1556 |
|
94960e8c2
|
1557 1558 |
if (preload) idr_preload(gfp); |
07d939677
|
1559 |
spin_lock_bh(&sctp_assocs_id_lock); |
80df2704a
|
1560 1561 1562 1563 1564 |
/* 0, 1, 2 are used as SCTP_FUTURE_ASSOC, SCTP_CURRENT_ASSOC and * SCTP_ALL_ASSOC, so an available id must be > SCTP_ALL_ASSOC. */ ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, SCTP_ALL_ASSOC + 1, 0, GFP_NOWAIT); |
07d939677
|
1565 |
spin_unlock_bh(&sctp_assocs_id_lock); |
94960e8c2
|
1566 1567 1568 1569 |
if (preload) idr_preload_end(); if (ret < 0) return ret; |
07d939677
|
1570 |
|
94960e8c2
|
1571 1572 |
asoc->assoc_id = (sctp_assoc_t)ret; return 0; |
07d939677
|
1573 |
} |
a08de64d0
|
1574 |
|
8b4472cc1
|
1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 |
/* Free the ASCONF queue */ static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc) { struct sctp_chunk *asconf; struct sctp_chunk *tmp; list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) { list_del_init(&asconf->list); sctp_chunk_free(asconf); } } |
a08de64d0
|
1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 |
/* Free asconf_ack cache */ static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc) { struct sctp_chunk *ack; struct sctp_chunk *tmp; list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, transmitted_list) { list_del_init(&ack->transmitted_list); sctp_chunk_free(ack); } } /* Clean up the ASCONF_ACK queue */ void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc) { struct sctp_chunk *ack; struct sctp_chunk *tmp; |
25985edce
|
1604 |
/* We can remove all the entries from the queue up to |
a08de64d0
|
1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 |
* the "Peer-Sequence-Number". */ list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, transmitted_list) { if (ack->subh.addip_hdr->serial == htonl(asoc->peer.addip_serial)) break; list_del_init(&ack->transmitted_list); sctp_chunk_free(ack); } } /* Find the ASCONF_ACK whose serial number matches ASCONF */ struct sctp_chunk *sctp_assoc_lookup_asconf_ack( const struct sctp_association *asoc, __be32 serial) { |
a86998142
|
1623 |
struct sctp_chunk *ack; |
a08de64d0
|
1624 1625 1626 1627 1628 |
/* Walk through the list of cached ASCONF-ACKs and find the * ack chunk whose serial number matches that of the request. */ list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) { |
b69040d8e
|
1629 1630 |
if (sctp_chunk_pending(ack)) continue; |
a08de64d0
|
1631 1632 |
if (ack->subh.addip_hdr->serial == serial) { sctp_chunk_hold(ack); |
a86998142
|
1633 |
return ack; |
a08de64d0
|
1634 1635 |
} } |
a86998142
|
1636 |
return NULL; |
a08de64d0
|
1637 |
} |
a000c01e6
|
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 |
void sctp_asconf_queue_teardown(struct sctp_association *asoc) { /* Free any cached ASCONF_ACK chunk. */ sctp_assoc_free_asconf_acks(asoc); /* Free the ASCONF queue. */ sctp_assoc_free_asconf_queue(asoc); /* Free any cached ASCONF chunk. */ if (asoc->addip_last_asconf) sctp_chunk_free(asoc->addip_last_asconf); } |