Commit a29a5bd4f5c3e8ba2e89688feab8b01c44f1654f
Committed by
David S. Miller
1 parent
1f485649f5
[SCTP]: Implement SCTP-AUTH initializations.
The patch initializes AUTH related members of the generic SCTP structures and provides a way to enable/disable auth extension. Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 5 changed files with 133 additions and 0 deletions Inline Diff
net/sctp/associola.c
1 | /* SCTP kernel reference Implementation | 1 | /* SCTP kernel reference Implementation |
2 | * (C) Copyright IBM Corp. 2001, 2004 | 2 | * (C) Copyright IBM Corp. 2001, 2004 |
3 | * Copyright (c) 1999-2000 Cisco, Inc. | 3 | * Copyright (c) 1999-2000 Cisco, Inc. |
4 | * Copyright (c) 1999-2001 Motorola, Inc. | 4 | * Copyright (c) 1999-2001 Motorola, Inc. |
5 | * Copyright (c) 2001 Intel Corp. | 5 | * Copyright (c) 2001 Intel Corp. |
6 | * Copyright (c) 2001 La Monte H.P. Yarroll | 6 | * Copyright (c) 2001 La Monte H.P. Yarroll |
7 | * | 7 | * |
8 | * This file is part of the SCTP kernel reference Implementation | 8 | * This file is part of the SCTP kernel reference Implementation |
9 | * | 9 | * |
10 | * This module provides the abstraction for an SCTP association. | 10 | * This module provides the abstraction for an SCTP association. |
11 | * | 11 | * |
12 | * The SCTP reference implementation is free software; | 12 | * The SCTP reference implementation is free software; |
13 | * you can redistribute it and/or modify it under the terms of | 13 | * you can redistribute it and/or modify it under the terms of |
14 | * the GNU General Public License as published by | 14 | * the GNU General Public License as published by |
15 | * the Free Software Foundation; either version 2, or (at your option) | 15 | * the Free Software Foundation; either version 2, or (at your option) |
16 | * any later version. | 16 | * any later version. |
17 | * | 17 | * |
18 | * The SCTP reference implementation is distributed in the hope that it | 18 | * The SCTP reference implementation is distributed in the hope that it |
19 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | 19 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied |
20 | * ************************ | 20 | * ************************ |
21 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | 21 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
22 | * See the GNU General Public License for more details. | 22 | * See the GNU General Public License for more details. |
23 | * | 23 | * |
24 | * You should have received a copy of the GNU General Public License | 24 | * You should have received a copy of the GNU General Public License |
25 | * along with GNU CC; see the file COPYING. If not, write to | 25 | * along with GNU CC; see the file COPYING. If not, write to |
26 | * the Free Software Foundation, 59 Temple Place - Suite 330, | 26 | * the Free Software Foundation, 59 Temple Place - Suite 330, |
27 | * Boston, MA 02111-1307, USA. | 27 | * Boston, MA 02111-1307, USA. |
28 | * | 28 | * |
29 | * Please send any bug reports or fixes you make to the | 29 | * Please send any bug reports or fixes you make to the |
30 | * email address(es): | 30 | * email address(es): |
31 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | 31 | * lksctp developers <lksctp-developers@lists.sourceforge.net> |
32 | * | 32 | * |
33 | * Or submit a bug report through the following website: | 33 | * Or submit a bug report through the following website: |
34 | * http://www.sf.net/projects/lksctp | 34 | * http://www.sf.net/projects/lksctp |
35 | * | 35 | * |
36 | * Written or modified by: | 36 | * Written or modified by: |
37 | * La Monte H.P. Yarroll <piggy@acm.org> | 37 | * La Monte H.P. Yarroll <piggy@acm.org> |
38 | * Karl Knutson <karl@athena.chicago.il.us> | 38 | * Karl Knutson <karl@athena.chicago.il.us> |
39 | * Jon Grimm <jgrimm@us.ibm.com> | 39 | * Jon Grimm <jgrimm@us.ibm.com> |
40 | * Xingang Guo <xingang.guo@intel.com> | 40 | * Xingang Guo <xingang.guo@intel.com> |
41 | * Hui Huang <hui.huang@nokia.com> | 41 | * Hui Huang <hui.huang@nokia.com> |
42 | * Sridhar Samudrala <sri@us.ibm.com> | 42 | * Sridhar Samudrala <sri@us.ibm.com> |
43 | * Daisy Chang <daisyc@us.ibm.com> | 43 | * Daisy Chang <daisyc@us.ibm.com> |
44 | * Ryan Layer <rmlayer@us.ibm.com> | 44 | * Ryan Layer <rmlayer@us.ibm.com> |
45 | * Kevin Gao <kevin.gao@intel.com> | 45 | * Kevin Gao <kevin.gao@intel.com> |
46 | * | 46 | * |
47 | * Any bugs reported given to us we will try to fix... any fixes shared will | 47 | * Any bugs reported given to us we will try to fix... any fixes shared will |
48 | * be incorporated into the next SCTP release. | 48 | * be incorporated into the next SCTP release. |
49 | */ | 49 | */ |
50 | 50 | ||
51 | #include <linux/types.h> | 51 | #include <linux/types.h> |
52 | #include <linux/fcntl.h> | 52 | #include <linux/fcntl.h> |
53 | #include <linux/poll.h> | 53 | #include <linux/poll.h> |
54 | #include <linux/init.h> | 54 | #include <linux/init.h> |
55 | 55 | ||
56 | #include <linux/slab.h> | 56 | #include <linux/slab.h> |
57 | #include <linux/in.h> | 57 | #include <linux/in.h> |
58 | #include <net/ipv6.h> | 58 | #include <net/ipv6.h> |
59 | #include <net/sctp/sctp.h> | 59 | #include <net/sctp/sctp.h> |
60 | #include <net/sctp/sm.h> | 60 | #include <net/sctp/sm.h> |
61 | 61 | ||
62 | /* Forward declarations for internal functions. */ | 62 | /* Forward declarations for internal functions. */ |
63 | static void sctp_assoc_bh_rcv(struct work_struct *work); | 63 | static void sctp_assoc_bh_rcv(struct work_struct *work); |
64 | 64 | ||
65 | 65 | ||
66 | /* 1st Level Abstractions. */ | 66 | /* 1st Level Abstractions. */ |
67 | 67 | ||
68 | /* Initialize a new association from provided memory. */ | 68 | /* Initialize a new association from provided memory. */ |
69 | static struct sctp_association *sctp_association_init(struct sctp_association *asoc, | 69 | static struct sctp_association *sctp_association_init(struct sctp_association *asoc, |
70 | const struct sctp_endpoint *ep, | 70 | const struct sctp_endpoint *ep, |
71 | const struct sock *sk, | 71 | const struct sock *sk, |
72 | sctp_scope_t scope, | 72 | sctp_scope_t scope, |
73 | gfp_t gfp) | 73 | gfp_t gfp) |
74 | { | 74 | { |
75 | struct sctp_sock *sp; | 75 | struct sctp_sock *sp; |
76 | int i; | 76 | int i; |
77 | sctp_paramhdr_t *p; | ||
78 | int err; | ||
77 | 79 | ||
78 | /* Retrieve the SCTP per socket area. */ | 80 | /* Retrieve the SCTP per socket area. */ |
79 | sp = sctp_sk((struct sock *)sk); | 81 | sp = sctp_sk((struct sock *)sk); |
80 | 82 | ||
81 | /* Init all variables to a known value. */ | 83 | /* Init all variables to a known value. */ |
82 | memset(asoc, 0, sizeof(struct sctp_association)); | 84 | memset(asoc, 0, sizeof(struct sctp_association)); |
83 | 85 | ||
84 | /* Discarding const is appropriate here. */ | 86 | /* Discarding const is appropriate here. */ |
85 | asoc->ep = (struct sctp_endpoint *)ep; | 87 | asoc->ep = (struct sctp_endpoint *)ep; |
86 | sctp_endpoint_hold(asoc->ep); | 88 | sctp_endpoint_hold(asoc->ep); |
87 | 89 | ||
88 | /* Hold the sock. */ | 90 | /* Hold the sock. */ |
89 | asoc->base.sk = (struct sock *)sk; | 91 | asoc->base.sk = (struct sock *)sk; |
90 | sock_hold(asoc->base.sk); | 92 | sock_hold(asoc->base.sk); |
91 | 93 | ||
92 | /* Initialize the common base substructure. */ | 94 | /* Initialize the common base substructure. */ |
93 | asoc->base.type = SCTP_EP_TYPE_ASSOCIATION; | 95 | asoc->base.type = SCTP_EP_TYPE_ASSOCIATION; |
94 | 96 | ||
95 | /* Initialize the object handling fields. */ | 97 | /* Initialize the object handling fields. */ |
96 | atomic_set(&asoc->base.refcnt, 1); | 98 | atomic_set(&asoc->base.refcnt, 1); |
97 | asoc->base.dead = 0; | 99 | asoc->base.dead = 0; |
98 | asoc->base.malloced = 0; | 100 | asoc->base.malloced = 0; |
99 | 101 | ||
100 | /* Initialize the bind addr area. */ | 102 | /* Initialize the bind addr area. */ |
101 | sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); | 103 | sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); |
102 | 104 | ||
103 | asoc->state = SCTP_STATE_CLOSED; | 105 | asoc->state = SCTP_STATE_CLOSED; |
104 | 106 | ||
105 | /* Set these values from the socket values, a conversion between | 107 | /* Set these values from the socket values, a conversion between |
106 | * millsecons to seconds/microseconds must also be done. | 108 | * millsecons to seconds/microseconds must also be done. |
107 | */ | 109 | */ |
108 | asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000; | 110 | asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000; |
109 | asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000) | 111 | asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000) |
110 | * 1000; | 112 | * 1000; |
111 | asoc->frag_point = 0; | 113 | asoc->frag_point = 0; |
112 | 114 | ||
113 | /* Set the association max_retrans and RTO values from the | 115 | /* Set the association max_retrans and RTO values from the |
114 | * socket values. | 116 | * socket values. |
115 | */ | 117 | */ |
116 | asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt; | 118 | asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt; |
117 | asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial); | 119 | asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial); |
118 | asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max); | 120 | asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max); |
119 | asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min); | 121 | asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min); |
120 | 122 | ||
121 | asoc->overall_error_count = 0; | 123 | asoc->overall_error_count = 0; |
122 | 124 | ||
123 | /* Initialize the association's heartbeat interval based on the | 125 | /* Initialize the association's heartbeat interval based on the |
124 | * sock configured value. | 126 | * sock configured value. |
125 | */ | 127 | */ |
126 | asoc->hbinterval = msecs_to_jiffies(sp->hbinterval); | 128 | asoc->hbinterval = msecs_to_jiffies(sp->hbinterval); |
127 | 129 | ||
128 | /* Initialize path max retrans value. */ | 130 | /* Initialize path max retrans value. */ |
129 | asoc->pathmaxrxt = sp->pathmaxrxt; | 131 | asoc->pathmaxrxt = sp->pathmaxrxt; |
130 | 132 | ||
131 | /* Initialize default path MTU. */ | 133 | /* Initialize default path MTU. */ |
132 | asoc->pathmtu = sp->pathmtu; | 134 | asoc->pathmtu = sp->pathmtu; |
133 | 135 | ||
134 | /* Set association default SACK delay */ | 136 | /* Set association default SACK delay */ |
135 | asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); | 137 | asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); |
136 | 138 | ||
137 | /* Set the association default flags controlling | 139 | /* Set the association default flags controlling |
138 | * Heartbeat, SACK delay, and Path MTU Discovery. | 140 | * Heartbeat, SACK delay, and Path MTU Discovery. |
139 | */ | 141 | */ |
140 | asoc->param_flags = sp->param_flags; | 142 | asoc->param_flags = sp->param_flags; |
141 | 143 | ||
142 | /* Initialize the maximum mumber of new data packets that can be sent | 144 | /* Initialize the maximum mumber of new data packets that can be sent |
143 | * in a burst. | 145 | * in a burst. |
144 | */ | 146 | */ |
145 | asoc->max_burst = sp->max_burst; | 147 | asoc->max_burst = sp->max_burst; |
146 | 148 | ||
147 | /* initialize association timers */ | 149 | /* initialize association timers */ |
148 | asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0; | 150 | asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0; |
149 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial; | 151 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial; |
150 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial; | 152 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial; |
151 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial; | 153 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial; |
152 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0; | 154 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0; |
153 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0; | 155 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0; |
154 | 156 | ||
155 | /* sctpimpguide Section 2.12.2 | 157 | /* sctpimpguide Section 2.12.2 |
156 | * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the | 158 | * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the |
157 | * recommended value of 5 times 'RTO.Max'. | 159 | * recommended value of 5 times 'RTO.Max'. |
158 | */ | 160 | */ |
159 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] | 161 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] |
160 | = 5 * asoc->rto_max; | 162 | = 5 * asoc->rto_max; |
161 | 163 | ||
162 | asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; | 164 | asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; |
163 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; | 165 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; |
164 | asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = | 166 | asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = |
165 | sp->autoclose * HZ; | 167 | sp->autoclose * HZ; |
166 | 168 | ||
167 | /* Initilizes the timers */ | 169 | /* Initilizes the timers */ |
168 | for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { | 170 | for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { |
169 | init_timer(&asoc->timers[i]); | 171 | init_timer(&asoc->timers[i]); |
170 | asoc->timers[i].function = sctp_timer_events[i]; | 172 | asoc->timers[i].function = sctp_timer_events[i]; |
171 | asoc->timers[i].data = (unsigned long) asoc; | 173 | asoc->timers[i].data = (unsigned long) asoc; |
172 | } | 174 | } |
173 | 175 | ||
174 | /* Pull default initialization values from the sock options. | 176 | /* Pull default initialization values from the sock options. |
175 | * Note: This assumes that the values have already been | 177 | * Note: This assumes that the values have already been |
176 | * validated in the sock. | 178 | * validated in the sock. |
177 | */ | 179 | */ |
178 | asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams; | 180 | asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams; |
179 | asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams; | 181 | asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams; |
180 | asoc->max_init_attempts = sp->initmsg.sinit_max_attempts; | 182 | asoc->max_init_attempts = sp->initmsg.sinit_max_attempts; |
181 | 183 | ||
182 | asoc->max_init_timeo = | 184 | asoc->max_init_timeo = |
183 | msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo); | 185 | msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo); |
184 | 186 | ||
185 | /* Allocate storage for the ssnmap after the inbound and outbound | 187 | /* Allocate storage for the ssnmap after the inbound and outbound |
186 | * streams have been negotiated during Init. | 188 | * streams have been negotiated during Init. |
187 | */ | 189 | */ |
188 | asoc->ssnmap = NULL; | 190 | asoc->ssnmap = NULL; |
189 | 191 | ||
190 | /* Set the local window size for receive. | 192 | /* Set the local window size for receive. |
191 | * This is also the rcvbuf space per association. | 193 | * This is also the rcvbuf space per association. |
192 | * RFC 6 - A SCTP receiver MUST be able to receive a minimum of | 194 | * RFC 6 - A SCTP receiver MUST be able to receive a minimum of |
193 | * 1500 bytes in one SCTP packet. | 195 | * 1500 bytes in one SCTP packet. |
194 | */ | 196 | */ |
195 | if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW) | 197 | if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW) |
196 | asoc->rwnd = SCTP_DEFAULT_MINWINDOW; | 198 | asoc->rwnd = SCTP_DEFAULT_MINWINDOW; |
197 | else | 199 | else |
198 | asoc->rwnd = sk->sk_rcvbuf/2; | 200 | asoc->rwnd = sk->sk_rcvbuf/2; |
199 | 201 | ||
200 | asoc->a_rwnd = asoc->rwnd; | 202 | asoc->a_rwnd = asoc->rwnd; |
201 | 203 | ||
202 | asoc->rwnd_over = 0; | 204 | asoc->rwnd_over = 0; |
203 | 205 | ||
204 | /* Use my own max window until I learn something better. */ | 206 | /* Use my own max window until I learn something better. */ |
205 | asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; | 207 | asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; |
206 | 208 | ||
207 | /* Set the sndbuf size for transmit. */ | 209 | /* Set the sndbuf size for transmit. */ |
208 | asoc->sndbuf_used = 0; | 210 | asoc->sndbuf_used = 0; |
209 | 211 | ||
210 | /* Initialize the receive memory counter */ | 212 | /* Initialize the receive memory counter */ |
211 | atomic_set(&asoc->rmem_alloc, 0); | 213 | atomic_set(&asoc->rmem_alloc, 0); |
212 | 214 | ||
213 | init_waitqueue_head(&asoc->wait); | 215 | init_waitqueue_head(&asoc->wait); |
214 | 216 | ||
215 | asoc->c.my_vtag = sctp_generate_tag(ep); | 217 | asoc->c.my_vtag = sctp_generate_tag(ep); |
216 | asoc->peer.i.init_tag = 0; /* INIT needs a vtag of 0. */ | 218 | asoc->peer.i.init_tag = 0; /* INIT needs a vtag of 0. */ |
217 | asoc->c.peer_vtag = 0; | 219 | asoc->c.peer_vtag = 0; |
218 | asoc->c.my_ttag = 0; | 220 | asoc->c.my_ttag = 0; |
219 | asoc->c.peer_ttag = 0; | 221 | asoc->c.peer_ttag = 0; |
220 | asoc->c.my_port = ep->base.bind_addr.port; | 222 | asoc->c.my_port = ep->base.bind_addr.port; |
221 | 223 | ||
222 | asoc->c.initial_tsn = sctp_generate_tsn(ep); | 224 | asoc->c.initial_tsn = sctp_generate_tsn(ep); |
223 | 225 | ||
224 | asoc->next_tsn = asoc->c.initial_tsn; | 226 | asoc->next_tsn = asoc->c.initial_tsn; |
225 | 227 | ||
226 | asoc->ctsn_ack_point = asoc->next_tsn - 1; | 228 | asoc->ctsn_ack_point = asoc->next_tsn - 1; |
227 | asoc->adv_peer_ack_point = asoc->ctsn_ack_point; | 229 | asoc->adv_peer_ack_point = asoc->ctsn_ack_point; |
228 | asoc->highest_sacked = asoc->ctsn_ack_point; | 230 | asoc->highest_sacked = asoc->ctsn_ack_point; |
229 | asoc->last_cwr_tsn = asoc->ctsn_ack_point; | 231 | asoc->last_cwr_tsn = asoc->ctsn_ack_point; |
230 | asoc->unack_data = 0; | 232 | asoc->unack_data = 0; |
231 | 233 | ||
232 | /* ADDIP Section 4.1 Asconf Chunk Procedures | 234 | /* ADDIP Section 4.1 Asconf Chunk Procedures |
233 | * | 235 | * |
234 | * When an endpoint has an ASCONF signaled change to be sent to the | 236 | * When an endpoint has an ASCONF signaled change to be sent to the |
235 | * remote endpoint it should do the following: | 237 | * remote endpoint it should do the following: |
236 | * ... | 238 | * ... |
237 | * A2) a serial number should be assigned to the chunk. The serial | 239 | * A2) a serial number should be assigned to the chunk. The serial |
238 | * number SHOULD be a monotonically increasing number. The serial | 240 | * number SHOULD be a monotonically increasing number. The serial |
239 | * numbers SHOULD be initialized at the start of the | 241 | * numbers SHOULD be initialized at the start of the |
240 | * association to the same value as the initial TSN. | 242 | * association to the same value as the initial TSN. |
241 | */ | 243 | */ |
242 | asoc->addip_serial = asoc->c.initial_tsn; | 244 | asoc->addip_serial = asoc->c.initial_tsn; |
243 | 245 | ||
244 | INIT_LIST_HEAD(&asoc->addip_chunk_list); | 246 | INIT_LIST_HEAD(&asoc->addip_chunk_list); |
245 | 247 | ||
246 | /* Make an empty list of remote transport addresses. */ | 248 | /* Make an empty list of remote transport addresses. */ |
247 | INIT_LIST_HEAD(&asoc->peer.transport_addr_list); | 249 | INIT_LIST_HEAD(&asoc->peer.transport_addr_list); |
248 | asoc->peer.transport_count = 0; | 250 | asoc->peer.transport_count = 0; |
249 | 251 | ||
250 | /* RFC 2960 5.1 Normal Establishment of an Association | 252 | /* RFC 2960 5.1 Normal Establishment of an Association |
251 | * | 253 | * |
252 | * After the reception of the first data chunk in an | 254 | * After the reception of the first data chunk in an |
253 | * association the endpoint must immediately respond with a | 255 | * association the endpoint must immediately respond with a |
254 | * sack to acknowledge the data chunk. Subsequent | 256 | * sack to acknowledge the data chunk. Subsequent |
255 | * acknowledgements should be done as described in Section | 257 | * acknowledgements should be done as described in Section |
256 | * 6.2. | 258 | * 6.2. |
257 | * | 259 | * |
258 | * [We implement this by telling a new association that it | 260 | * [We implement this by telling a new association that it |
259 | * already received one packet.] | 261 | * already received one packet.] |
260 | */ | 262 | */ |
261 | asoc->peer.sack_needed = 1; | 263 | asoc->peer.sack_needed = 1; |
262 | 264 | ||
263 | /* Assume that the peer recongizes ASCONF until reported otherwise | 265 | /* Assume that the peer recongizes ASCONF until reported otherwise |
264 | * via an ERROR chunk. | 266 | * via an ERROR chunk. |
265 | */ | 267 | */ |
266 | asoc->peer.asconf_capable = 1; | 268 | asoc->peer.asconf_capable = 1; |
267 | 269 | ||
268 | /* Create an input queue. */ | 270 | /* Create an input queue. */ |
269 | sctp_inq_init(&asoc->base.inqueue); | 271 | sctp_inq_init(&asoc->base.inqueue); |
270 | sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); | 272 | sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); |
271 | 273 | ||
272 | /* Create an output queue. */ | 274 | /* Create an output queue. */ |
273 | sctp_outq_init(asoc, &asoc->outqueue); | 275 | sctp_outq_init(asoc, &asoc->outqueue); |
274 | 276 | ||
275 | if (!sctp_ulpq_init(&asoc->ulpq, asoc)) | 277 | if (!sctp_ulpq_init(&asoc->ulpq, asoc)) |
276 | goto fail_init; | 278 | goto fail_init; |
277 | 279 | ||
278 | /* Set up the tsn tracking. */ | 280 | /* Set up the tsn tracking. */ |
279 | sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE, 0); | 281 | sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE, 0); |
280 | 282 | ||
281 | asoc->need_ecne = 0; | 283 | asoc->need_ecne = 0; |
282 | 284 | ||
283 | asoc->assoc_id = 0; | 285 | asoc->assoc_id = 0; |
284 | 286 | ||
285 | /* Assume that peer would support both address types unless we are | 287 | /* Assume that peer would support both address types unless we are |
286 | * told otherwise. | 288 | * told otherwise. |
287 | */ | 289 | */ |
288 | asoc->peer.ipv4_address = 1; | 290 | asoc->peer.ipv4_address = 1; |
289 | asoc->peer.ipv6_address = 1; | 291 | asoc->peer.ipv6_address = 1; |
290 | INIT_LIST_HEAD(&asoc->asocs); | 292 | INIT_LIST_HEAD(&asoc->asocs); |
291 | 293 | ||
292 | asoc->autoclose = sp->autoclose; | 294 | asoc->autoclose = sp->autoclose; |
293 | 295 | ||
294 | asoc->default_stream = sp->default_stream; | 296 | asoc->default_stream = sp->default_stream; |
295 | asoc->default_ppid = sp->default_ppid; | 297 | asoc->default_ppid = sp->default_ppid; |
296 | asoc->default_flags = sp->default_flags; | 298 | asoc->default_flags = sp->default_flags; |
297 | asoc->default_context = sp->default_context; | 299 | asoc->default_context = sp->default_context; |
298 | asoc->default_timetolive = sp->default_timetolive; | 300 | asoc->default_timetolive = sp->default_timetolive; |
299 | asoc->default_rcv_context = sp->default_rcv_context; | 301 | asoc->default_rcv_context = sp->default_rcv_context; |
300 | 302 | ||
303 | /* AUTH related initializations */ | ||
304 | INIT_LIST_HEAD(&asoc->endpoint_shared_keys); | ||
305 | err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp); | ||
306 | if (err) | ||
307 | goto fail_init; | ||
308 | |||
309 | asoc->active_key_id = ep->active_key_id; | ||
310 | asoc->asoc_shared_key = NULL; | ||
311 | |||
312 | asoc->default_hmac_id = 0; | ||
313 | /* Save the hmacs and chunks list into this association */ | ||
314 | if (ep->auth_hmacs_list) | ||
315 | memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list, | ||
316 | ntohs(ep->auth_hmacs_list->param_hdr.length)); | ||
317 | if (ep->auth_chunk_list) | ||
318 | memcpy(asoc->c.auth_chunks, ep->auth_chunk_list, | ||
319 | ntohs(ep->auth_chunk_list->param_hdr.length)); | ||
320 | |||
321 | /* Get the AUTH random number for this association */ | ||
322 | p = (sctp_paramhdr_t *)asoc->c.auth_random; | ||
323 | p->type = SCTP_PARAM_RANDOM; | ||
324 | p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH); | ||
325 | get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH); | ||
326 | |||
301 | return asoc; | 327 | return asoc; |
302 | 328 | ||
303 | fail_init: | 329 | fail_init: |
304 | sctp_endpoint_put(asoc->ep); | 330 | sctp_endpoint_put(asoc->ep); |
305 | sock_put(asoc->base.sk); | 331 | sock_put(asoc->base.sk); |
306 | return NULL; | 332 | return NULL; |
307 | } | 333 | } |
308 | 334 | ||
309 | /* Allocate and initialize a new association */ | 335 | /* Allocate and initialize a new association */ |
310 | struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, | 336 | struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, |
311 | const struct sock *sk, | 337 | const struct sock *sk, |
312 | sctp_scope_t scope, | 338 | sctp_scope_t scope, |
313 | gfp_t gfp) | 339 | gfp_t gfp) |
314 | { | 340 | { |
315 | struct sctp_association *asoc; | 341 | struct sctp_association *asoc; |
316 | 342 | ||
317 | asoc = t_new(struct sctp_association, gfp); | 343 | asoc = t_new(struct sctp_association, gfp); |
318 | if (!asoc) | 344 | if (!asoc) |
319 | goto fail; | 345 | goto fail; |
320 | 346 | ||
321 | if (!sctp_association_init(asoc, ep, sk, scope, gfp)) | 347 | if (!sctp_association_init(asoc, ep, sk, scope, gfp)) |
322 | goto fail_init; | 348 | goto fail_init; |
323 | 349 | ||
324 | asoc->base.malloced = 1; | 350 | asoc->base.malloced = 1; |
325 | SCTP_DBG_OBJCNT_INC(assoc); | 351 | SCTP_DBG_OBJCNT_INC(assoc); |
326 | SCTP_DEBUG_PRINTK("Created asoc %p\n", asoc); | 352 | SCTP_DEBUG_PRINTK("Created asoc %p\n", asoc); |
327 | 353 | ||
328 | return asoc; | 354 | return asoc; |
329 | 355 | ||
330 | fail_init: | 356 | fail_init: |
331 | kfree(asoc); | 357 | kfree(asoc); |
332 | fail: | 358 | fail: |
333 | return NULL; | 359 | return NULL; |
334 | } | 360 | } |
335 | 361 | ||
336 | /* Free this association if possible. There may still be users, so | 362 | /* Free this association if possible. There may still be users, so |
337 | * the actual deallocation may be delayed. | 363 | * the actual deallocation may be delayed. |
338 | */ | 364 | */ |
339 | void sctp_association_free(struct sctp_association *asoc) | 365 | void sctp_association_free(struct sctp_association *asoc) |
340 | { | 366 | { |
341 | struct sock *sk = asoc->base.sk; | 367 | struct sock *sk = asoc->base.sk; |
342 | struct sctp_transport *transport; | 368 | struct sctp_transport *transport; |
343 | struct list_head *pos, *temp; | 369 | struct list_head *pos, *temp; |
344 | int i; | 370 | int i; |
345 | 371 | ||
346 | /* Only real associations count against the endpoint, so | 372 | /* Only real associations count against the endpoint, so |
347 | * don't bother for if this is a temporary association. | 373 | * don't bother for if this is a temporary association. |
348 | */ | 374 | */ |
349 | if (!asoc->temp) { | 375 | if (!asoc->temp) { |
350 | list_del(&asoc->asocs); | 376 | list_del(&asoc->asocs); |
351 | 377 | ||
352 | /* Decrement the backlog value for a TCP-style listening | 378 | /* Decrement the backlog value for a TCP-style listening |
353 | * socket. | 379 | * socket. |
354 | */ | 380 | */ |
355 | if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) | 381 | if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) |
356 | sk->sk_ack_backlog--; | 382 | sk->sk_ack_backlog--; |
357 | } | 383 | } |
358 | 384 | ||
359 | /* Mark as dead, so other users can know this structure is | 385 | /* Mark as dead, so other users can know this structure is |
360 | * going away. | 386 | * going away. |
361 | */ | 387 | */ |
362 | asoc->base.dead = 1; | 388 | asoc->base.dead = 1; |
363 | 389 | ||
364 | /* Dispose of any data lying around in the outqueue. */ | 390 | /* Dispose of any data lying around in the outqueue. */ |
365 | sctp_outq_free(&asoc->outqueue); | 391 | sctp_outq_free(&asoc->outqueue); |
366 | 392 | ||
367 | /* Dispose of any pending messages for the upper layer. */ | 393 | /* Dispose of any pending messages for the upper layer. */ |
368 | sctp_ulpq_free(&asoc->ulpq); | 394 | sctp_ulpq_free(&asoc->ulpq); |
369 | 395 | ||
370 | /* Dispose of any pending chunks on the inqueue. */ | 396 | /* Dispose of any pending chunks on the inqueue. */ |
371 | sctp_inq_free(&asoc->base.inqueue); | 397 | sctp_inq_free(&asoc->base.inqueue); |
372 | 398 | ||
373 | /* Free ssnmap storage. */ | 399 | /* Free ssnmap storage. */ |
374 | sctp_ssnmap_free(asoc->ssnmap); | 400 | sctp_ssnmap_free(asoc->ssnmap); |
375 | 401 | ||
376 | /* Clean up the bound address list. */ | 402 | /* Clean up the bound address list. */ |
377 | sctp_bind_addr_free(&asoc->base.bind_addr); | 403 | sctp_bind_addr_free(&asoc->base.bind_addr); |
378 | 404 | ||
379 | /* Do we need to go through all of our timers and | 405 | /* Do we need to go through all of our timers and |
380 | * delete them? To be safe we will try to delete all, but we | 406 | * delete them? To be safe we will try to delete all, but we |
381 | * should be able to go through and make a guess based | 407 | * should be able to go through and make a guess based |
382 | * on our state. | 408 | * on our state. |
383 | */ | 409 | */ |
384 | for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { | 410 | for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { |
385 | if (timer_pending(&asoc->timers[i]) && | 411 | if (timer_pending(&asoc->timers[i]) && |
386 | del_timer(&asoc->timers[i])) | 412 | del_timer(&asoc->timers[i])) |
387 | sctp_association_put(asoc); | 413 | sctp_association_put(asoc); |
388 | } | 414 | } |
389 | 415 | ||
390 | /* Free peer's cached cookie. */ | 416 | /* Free peer's cached cookie. */ |
391 | kfree(asoc->peer.cookie); | 417 | kfree(asoc->peer.cookie); |
392 | 418 | ||
393 | /* Release the transport structures. */ | 419 | /* Release the transport structures. */ |
394 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | 420 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { |
395 | transport = list_entry(pos, struct sctp_transport, transports); | 421 | transport = list_entry(pos, struct sctp_transport, transports); |
396 | list_del(pos); | 422 | list_del(pos); |
397 | sctp_transport_free(transport); | 423 | sctp_transport_free(transport); |
398 | } | 424 | } |
399 | 425 | ||
400 | asoc->peer.transport_count = 0; | 426 | asoc->peer.transport_count = 0; |
401 | 427 | ||
402 | /* Free any cached ASCONF_ACK chunk. */ | 428 | /* Free any cached ASCONF_ACK chunk. */ |
403 | if (asoc->addip_last_asconf_ack) | 429 | if (asoc->addip_last_asconf_ack) |
404 | sctp_chunk_free(asoc->addip_last_asconf_ack); | 430 | sctp_chunk_free(asoc->addip_last_asconf_ack); |
405 | 431 | ||
406 | /* Free any cached ASCONF chunk. */ | 432 | /* Free any cached ASCONF chunk. */ |
407 | if (asoc->addip_last_asconf) | 433 | if (asoc->addip_last_asconf) |
408 | sctp_chunk_free(asoc->addip_last_asconf); | 434 | sctp_chunk_free(asoc->addip_last_asconf); |
409 | 435 | ||
436 | /* AUTH - Free the endpoint shared keys */ | ||
437 | sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); | ||
438 | |||
439 | /* AUTH - Free the association shared key */ | ||
440 | sctp_auth_key_put(asoc->asoc_shared_key); | ||
441 | |||
410 | sctp_association_put(asoc); | 442 | sctp_association_put(asoc); |
411 | } | 443 | } |
412 | 444 | ||
413 | /* Cleanup and free up an association. */ | 445 | /* Cleanup and free up an association. */ |
414 | static void sctp_association_destroy(struct sctp_association *asoc) | 446 | static void sctp_association_destroy(struct sctp_association *asoc) |
415 | { | 447 | { |
416 | SCTP_ASSERT(asoc->base.dead, "Assoc is not dead", return); | 448 | SCTP_ASSERT(asoc->base.dead, "Assoc is not dead", return); |
417 | 449 | ||
418 | sctp_endpoint_put(asoc->ep); | 450 | sctp_endpoint_put(asoc->ep); |
419 | sock_put(asoc->base.sk); | 451 | sock_put(asoc->base.sk); |
420 | 452 | ||
421 | if (asoc->assoc_id != 0) { | 453 | if (asoc->assoc_id != 0) { |
422 | spin_lock_bh(&sctp_assocs_id_lock); | 454 | spin_lock_bh(&sctp_assocs_id_lock); |
423 | idr_remove(&sctp_assocs_id, asoc->assoc_id); | 455 | idr_remove(&sctp_assocs_id, asoc->assoc_id); |
424 | spin_unlock_bh(&sctp_assocs_id_lock); | 456 | spin_unlock_bh(&sctp_assocs_id_lock); |
425 | } | 457 | } |
426 | 458 | ||
427 | BUG_TRAP(!atomic_read(&asoc->rmem_alloc)); | 459 | BUG_TRAP(!atomic_read(&asoc->rmem_alloc)); |
428 | 460 | ||
429 | if (asoc->base.malloced) { | 461 | if (asoc->base.malloced) { |
430 | kfree(asoc); | 462 | kfree(asoc); |
431 | SCTP_DBG_OBJCNT_DEC(assoc); | 463 | SCTP_DBG_OBJCNT_DEC(assoc); |
432 | } | 464 | } |
433 | } | 465 | } |
434 | 466 | ||
435 | /* Change the primary destination address for the peer. */ | 467 | /* Change the primary destination address for the peer. */ |
436 | void sctp_assoc_set_primary(struct sctp_association *asoc, | 468 | void sctp_assoc_set_primary(struct sctp_association *asoc, |
437 | struct sctp_transport *transport) | 469 | struct sctp_transport *transport) |
438 | { | 470 | { |
439 | asoc->peer.primary_path = transport; | 471 | asoc->peer.primary_path = transport; |
440 | 472 | ||
441 | /* Set a default msg_name for events. */ | 473 | /* Set a default msg_name for events. */ |
442 | memcpy(&asoc->peer.primary_addr, &transport->ipaddr, | 474 | memcpy(&asoc->peer.primary_addr, &transport->ipaddr, |
443 | sizeof(union sctp_addr)); | 475 | sizeof(union sctp_addr)); |
444 | 476 | ||
445 | /* If the primary path is changing, assume that the | 477 | /* If the primary path is changing, assume that the |
446 | * user wants to use this new path. | 478 | * user wants to use this new path. |
447 | */ | 479 | */ |
448 | if ((transport->state == SCTP_ACTIVE) || | 480 | if ((transport->state == SCTP_ACTIVE) || |
449 | (transport->state == SCTP_UNKNOWN)) | 481 | (transport->state == SCTP_UNKNOWN)) |
450 | asoc->peer.active_path = transport; | 482 | asoc->peer.active_path = transport; |
451 | 483 | ||
452 | /* | 484 | /* |
453 | * SFR-CACC algorithm: | 485 | * SFR-CACC algorithm: |
454 | * Upon the receipt of a request to change the primary | 486 | * Upon the receipt of a request to change the primary |
455 | * destination address, on the data structure for the new | 487 | * destination address, on the data structure for the new |
456 | * primary destination, the sender MUST do the following: | 488 | * primary destination, the sender MUST do the following: |
457 | * | 489 | * |
458 | * 1) If CHANGEOVER_ACTIVE is set, then there was a switch | 490 | * 1) If CHANGEOVER_ACTIVE is set, then there was a switch |
459 | * to this destination address earlier. The sender MUST set | 491 | * to this destination address earlier. The sender MUST set |
460 | * CYCLING_CHANGEOVER to indicate that this switch is a | 492 | * CYCLING_CHANGEOVER to indicate that this switch is a |
461 | * double switch to the same destination address. | 493 | * double switch to the same destination address. |
462 | */ | 494 | */ |
463 | if (transport->cacc.changeover_active) | 495 | if (transport->cacc.changeover_active) |
464 | transport->cacc.cycling_changeover = 1; | 496 | transport->cacc.cycling_changeover = 1; |
465 | 497 | ||
466 | /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that | 498 | /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that |
467 | * a changeover has occurred. | 499 | * a changeover has occurred. |
468 | */ | 500 | */ |
469 | transport->cacc.changeover_active = 1; | 501 | transport->cacc.changeover_active = 1; |
470 | 502 | ||
471 | /* 3) The sender MUST store the next TSN to be sent in | 503 | /* 3) The sender MUST store the next TSN to be sent in |
472 | * next_tsn_at_change. | 504 | * next_tsn_at_change. |
473 | */ | 505 | */ |
474 | transport->cacc.next_tsn_at_change = asoc->next_tsn; | 506 | transport->cacc.next_tsn_at_change = asoc->next_tsn; |
475 | } | 507 | } |
476 | 508 | ||
477 | /* Remove a transport from an association. */ | 509 | /* Remove a transport from an association. */ |
478 | void sctp_assoc_rm_peer(struct sctp_association *asoc, | 510 | void sctp_assoc_rm_peer(struct sctp_association *asoc, |
479 | struct sctp_transport *peer) | 511 | struct sctp_transport *peer) |
480 | { | 512 | { |
481 | struct list_head *pos; | 513 | struct list_head *pos; |
482 | struct sctp_transport *transport; | 514 | struct sctp_transport *transport; |
483 | 515 | ||
484 | SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_rm_peer:association %p addr: ", | 516 | SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_rm_peer:association %p addr: ", |
485 | " port: %d\n", | 517 | " port: %d\n", |
486 | asoc, | 518 | asoc, |
487 | (&peer->ipaddr), | 519 | (&peer->ipaddr), |
488 | ntohs(peer->ipaddr.v4.sin_port)); | 520 | ntohs(peer->ipaddr.v4.sin_port)); |
489 | 521 | ||
490 | /* If we are to remove the current retran_path, update it | 522 | /* If we are to remove the current retran_path, update it |
491 | * to the next peer before removing this peer from the list. | 523 | * to the next peer before removing this peer from the list. |
492 | */ | 524 | */ |
493 | if (asoc->peer.retran_path == peer) | 525 | if (asoc->peer.retran_path == peer) |
494 | sctp_assoc_update_retran_path(asoc); | 526 | sctp_assoc_update_retran_path(asoc); |
495 | 527 | ||
496 | /* Remove this peer from the list. */ | 528 | /* Remove this peer from the list. */ |
497 | list_del(&peer->transports); | 529 | list_del(&peer->transports); |
498 | 530 | ||
499 | /* Get the first transport of asoc. */ | 531 | /* Get the first transport of asoc. */ |
500 | pos = asoc->peer.transport_addr_list.next; | 532 | pos = asoc->peer.transport_addr_list.next; |
501 | transport = list_entry(pos, struct sctp_transport, transports); | 533 | transport = list_entry(pos, struct sctp_transport, transports); |
502 | 534 | ||
503 | /* Update any entries that match the peer to be deleted. */ | 535 | /* Update any entries that match the peer to be deleted. */ |
504 | if (asoc->peer.primary_path == peer) | 536 | if (asoc->peer.primary_path == peer) |
505 | sctp_assoc_set_primary(asoc, transport); | 537 | sctp_assoc_set_primary(asoc, transport); |
506 | if (asoc->peer.active_path == peer) | 538 | if (asoc->peer.active_path == peer) |
507 | asoc->peer.active_path = transport; | 539 | asoc->peer.active_path = transport; |
508 | if (asoc->peer.last_data_from == peer) | 540 | if (asoc->peer.last_data_from == peer) |
509 | asoc->peer.last_data_from = transport; | 541 | asoc->peer.last_data_from = transport; |
510 | 542 | ||
511 | /* If we remove the transport an INIT was last sent to, set it to | 543 | /* If we remove the transport an INIT was last sent to, set it to |
512 | * NULL. Combined with the update of the retran path above, this | 544 | * NULL. Combined with the update of the retran path above, this |
513 | * will cause the next INIT to be sent to the next available | 545 | * will cause the next INIT to be sent to the next available |
514 | * transport, maintaining the cycle. | 546 | * transport, maintaining the cycle. |
515 | */ | 547 | */ |
516 | if (asoc->init_last_sent_to == peer) | 548 | if (asoc->init_last_sent_to == peer) |
517 | asoc->init_last_sent_to = NULL; | 549 | asoc->init_last_sent_to = NULL; |
518 | 550 | ||
519 | asoc->peer.transport_count--; | 551 | asoc->peer.transport_count--; |
520 | 552 | ||
521 | sctp_transport_free(peer); | 553 | sctp_transport_free(peer); |
522 | } | 554 | } |
523 | 555 | ||
524 | /* Add a transport address to an association. */ | 556 | /* Add a transport address to an association. */ |
525 | struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, | 557 | struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, |
526 | const union sctp_addr *addr, | 558 | const union sctp_addr *addr, |
527 | const gfp_t gfp, | 559 | const gfp_t gfp, |
528 | const int peer_state) | 560 | const int peer_state) |
529 | { | 561 | { |
530 | struct sctp_transport *peer; | 562 | struct sctp_transport *peer; |
531 | struct sctp_sock *sp; | 563 | struct sctp_sock *sp; |
532 | unsigned short port; | 564 | unsigned short port; |
533 | 565 | ||
534 | sp = sctp_sk(asoc->base.sk); | 566 | sp = sctp_sk(asoc->base.sk); |
535 | 567 | ||
536 | /* AF_INET and AF_INET6 share common port field. */ | 568 | /* AF_INET and AF_INET6 share common port field. */ |
537 | port = ntohs(addr->v4.sin_port); | 569 | port = ntohs(addr->v4.sin_port); |
538 | 570 | ||
539 | SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ", | 571 | SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ", |
540 | " port: %d state:%d\n", | 572 | " port: %d state:%d\n", |
541 | asoc, | 573 | asoc, |
542 | addr, | 574 | addr, |
543 | port, | 575 | port, |
544 | peer_state); | 576 | peer_state); |
545 | 577 | ||
546 | /* Set the port if it has not been set yet. */ | 578 | /* Set the port if it has not been set yet. */ |
547 | if (0 == asoc->peer.port) | 579 | if (0 == asoc->peer.port) |
548 | asoc->peer.port = port; | 580 | asoc->peer.port = port; |
549 | 581 | ||
550 | /* Check to see if this is a duplicate. */ | 582 | /* Check to see if this is a duplicate. */ |
551 | peer = sctp_assoc_lookup_paddr(asoc, addr); | 583 | peer = sctp_assoc_lookup_paddr(asoc, addr); |
552 | if (peer) { | 584 | if (peer) { |
553 | if (peer->state == SCTP_UNKNOWN) { | 585 | if (peer->state == SCTP_UNKNOWN) { |
554 | if (peer_state == SCTP_ACTIVE) | 586 | if (peer_state == SCTP_ACTIVE) |
555 | peer->state = SCTP_ACTIVE; | 587 | peer->state = SCTP_ACTIVE; |
556 | if (peer_state == SCTP_UNCONFIRMED) | 588 | if (peer_state == SCTP_UNCONFIRMED) |
557 | peer->state = SCTP_UNCONFIRMED; | 589 | peer->state = SCTP_UNCONFIRMED; |
558 | } | 590 | } |
559 | return peer; | 591 | return peer; |
560 | } | 592 | } |
561 | 593 | ||
562 | peer = sctp_transport_new(addr, gfp); | 594 | peer = sctp_transport_new(addr, gfp); |
563 | if (!peer) | 595 | if (!peer) |
564 | return NULL; | 596 | return NULL; |
565 | 597 | ||
566 | sctp_transport_set_owner(peer, asoc); | 598 | sctp_transport_set_owner(peer, asoc); |
567 | 599 | ||
568 | /* Initialize the peer's heartbeat interval based on the | 600 | /* Initialize the peer's heartbeat interval based on the |
569 | * association configured value. | 601 | * association configured value. |
570 | */ | 602 | */ |
571 | peer->hbinterval = asoc->hbinterval; | 603 | peer->hbinterval = asoc->hbinterval; |
572 | 604 | ||
573 | /* Set the path max_retrans. */ | 605 | /* Set the path max_retrans. */ |
574 | peer->pathmaxrxt = asoc->pathmaxrxt; | 606 | peer->pathmaxrxt = asoc->pathmaxrxt; |
575 | 607 | ||
576 | /* Initialize the peer's SACK delay timeout based on the | 608 | /* Initialize the peer's SACK delay timeout based on the |
577 | * association configured value. | 609 | * association configured value. |
578 | */ | 610 | */ |
579 | peer->sackdelay = asoc->sackdelay; | 611 | peer->sackdelay = asoc->sackdelay; |
580 | 612 | ||
581 | /* Enable/disable heartbeat, SACK delay, and path MTU discovery | 613 | /* Enable/disable heartbeat, SACK delay, and path MTU discovery |
582 | * based on association setting. | 614 | * based on association setting. |
583 | */ | 615 | */ |
584 | peer->param_flags = asoc->param_flags; | 616 | peer->param_flags = asoc->param_flags; |
585 | 617 | ||
586 | /* Initialize the pmtu of the transport. */ | 618 | /* Initialize the pmtu of the transport. */ |
587 | if (peer->param_flags & SPP_PMTUD_ENABLE) | 619 | if (peer->param_flags & SPP_PMTUD_ENABLE) |
588 | sctp_transport_pmtu(peer); | 620 | sctp_transport_pmtu(peer); |
589 | else if (asoc->pathmtu) | 621 | else if (asoc->pathmtu) |
590 | peer->pathmtu = asoc->pathmtu; | 622 | peer->pathmtu = asoc->pathmtu; |
591 | else | 623 | else |
592 | peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT; | 624 | peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT; |
593 | 625 | ||
594 | /* If this is the first transport addr on this association, | 626 | /* If this is the first transport addr on this association, |
595 | * initialize the association PMTU to the peer's PMTU. | 627 | * initialize the association PMTU to the peer's PMTU. |
596 | * If not and the current association PMTU is higher than the new | 628 | * If not and the current association PMTU is higher than the new |
597 | * peer's PMTU, reset the association PMTU to the new peer's PMTU. | 629 | * peer's PMTU, reset the association PMTU to the new peer's PMTU. |
598 | */ | 630 | */ |
599 | if (asoc->pathmtu) | 631 | if (asoc->pathmtu) |
600 | asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu); | 632 | asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu); |
601 | else | 633 | else |
602 | asoc->pathmtu = peer->pathmtu; | 634 | asoc->pathmtu = peer->pathmtu; |
603 | 635 | ||
604 | SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to " | 636 | SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to " |
605 | "%d\n", asoc, asoc->pathmtu); | 637 | "%d\n", asoc, asoc->pathmtu); |
606 | 638 | ||
607 | asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu); | 639 | asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu); |
608 | 640 | ||
609 | /* The asoc->peer.port might not be meaningful yet, but | 641 | /* The asoc->peer.port might not be meaningful yet, but |
610 | * initialize the packet structure anyway. | 642 | * initialize the packet structure anyway. |
611 | */ | 643 | */ |
612 | sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port, | 644 | sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port, |
613 | asoc->peer.port); | 645 | asoc->peer.port); |
614 | 646 | ||
615 | /* 7.2.1 Slow-Start | 647 | /* 7.2.1 Slow-Start |
616 | * | 648 | * |
617 | * o The initial cwnd before DATA transmission or after a sufficiently | 649 | * o The initial cwnd before DATA transmission or after a sufficiently |
618 | * long idle period MUST be set to | 650 | * long idle period MUST be set to |
619 | * min(4*MTU, max(2*MTU, 4380 bytes)) | 651 | * min(4*MTU, max(2*MTU, 4380 bytes)) |
620 | * | 652 | * |
621 | * o The initial value of ssthresh MAY be arbitrarily high | 653 | * o The initial value of ssthresh MAY be arbitrarily high |
622 | * (for example, implementations MAY use the size of the | 654 | * (for example, implementations MAY use the size of the |
623 | * receiver advertised window). | 655 | * receiver advertised window). |
624 | */ | 656 | */ |
625 | peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); | 657 | peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); |
626 | 658 | ||
627 | /* At this point, we may not have the receiver's advertised window, | 659 | /* At this point, we may not have the receiver's advertised window, |
628 | * so initialize ssthresh to the default value and it will be set | 660 | * so initialize ssthresh to the default value and it will be set |
629 | * later when we process the INIT. | 661 | * later when we process the INIT. |
630 | */ | 662 | */ |
631 | peer->ssthresh = SCTP_DEFAULT_MAXWINDOW; | 663 | peer->ssthresh = SCTP_DEFAULT_MAXWINDOW; |
632 | 664 | ||
633 | peer->partial_bytes_acked = 0; | 665 | peer->partial_bytes_acked = 0; |
634 | peer->flight_size = 0; | 666 | peer->flight_size = 0; |
635 | 667 | ||
636 | /* Set the transport's RTO.initial value */ | 668 | /* Set the transport's RTO.initial value */ |
637 | peer->rto = asoc->rto_initial; | 669 | peer->rto = asoc->rto_initial; |
638 | 670 | ||
639 | /* Set the peer's active state. */ | 671 | /* Set the peer's active state. */ |
640 | peer->state = peer_state; | 672 | peer->state = peer_state; |
641 | 673 | ||
642 | /* Attach the remote transport to our asoc. */ | 674 | /* Attach the remote transport to our asoc. */ |
643 | list_add_tail(&peer->transports, &asoc->peer.transport_addr_list); | 675 | list_add_tail(&peer->transports, &asoc->peer.transport_addr_list); |
644 | asoc->peer.transport_count++; | 676 | asoc->peer.transport_count++; |
645 | 677 | ||
646 | /* If we do not yet have a primary path, set one. */ | 678 | /* If we do not yet have a primary path, set one. */ |
647 | if (!asoc->peer.primary_path) { | 679 | if (!asoc->peer.primary_path) { |
648 | sctp_assoc_set_primary(asoc, peer); | 680 | sctp_assoc_set_primary(asoc, peer); |
649 | asoc->peer.retran_path = peer; | 681 | asoc->peer.retran_path = peer; |
650 | } | 682 | } |
651 | 683 | ||
652 | if (asoc->peer.active_path == asoc->peer.retran_path) { | 684 | if (asoc->peer.active_path == asoc->peer.retran_path) { |
653 | asoc->peer.retran_path = peer; | 685 | asoc->peer.retran_path = peer; |
654 | } | 686 | } |
655 | 687 | ||
656 | return peer; | 688 | return peer; |
657 | } | 689 | } |
658 | 690 | ||
659 | /* Delete a transport address from an association. */ | 691 | /* Delete a transport address from an association. */ |
660 | void sctp_assoc_del_peer(struct sctp_association *asoc, | 692 | void sctp_assoc_del_peer(struct sctp_association *asoc, |
661 | const union sctp_addr *addr) | 693 | const union sctp_addr *addr) |
662 | { | 694 | { |
663 | struct list_head *pos; | 695 | struct list_head *pos; |
664 | struct list_head *temp; | 696 | struct list_head *temp; |
665 | struct sctp_transport *transport; | 697 | struct sctp_transport *transport; |
666 | 698 | ||
667 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | 699 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { |
668 | transport = list_entry(pos, struct sctp_transport, transports); | 700 | transport = list_entry(pos, struct sctp_transport, transports); |
669 | if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) { | 701 | if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) { |
670 | /* Do book keeping for removing the peer and free it. */ | 702 | /* Do book keeping for removing the peer and free it. */ |
671 | sctp_assoc_rm_peer(asoc, transport); | 703 | sctp_assoc_rm_peer(asoc, transport); |
672 | break; | 704 | break; |
673 | } | 705 | } |
674 | } | 706 | } |
675 | } | 707 | } |
676 | 708 | ||
677 | /* Lookup a transport by address. */ | 709 | /* Lookup a transport by address. */ |
678 | struct sctp_transport *sctp_assoc_lookup_paddr( | 710 | struct sctp_transport *sctp_assoc_lookup_paddr( |
679 | const struct sctp_association *asoc, | 711 | const struct sctp_association *asoc, |
680 | const union sctp_addr *address) | 712 | const union sctp_addr *address) |
681 | { | 713 | { |
682 | struct sctp_transport *t; | 714 | struct sctp_transport *t; |
683 | struct list_head *pos; | 715 | struct list_head *pos; |
684 | 716 | ||
685 | /* Cycle through all transports searching for a peer address. */ | 717 | /* Cycle through all transports searching for a peer address. */ |
686 | 718 | ||
687 | list_for_each(pos, &asoc->peer.transport_addr_list) { | 719 | list_for_each(pos, &asoc->peer.transport_addr_list) { |
688 | t = list_entry(pos, struct sctp_transport, transports); | 720 | t = list_entry(pos, struct sctp_transport, transports); |
689 | if (sctp_cmp_addr_exact(address, &t->ipaddr)) | 721 | if (sctp_cmp_addr_exact(address, &t->ipaddr)) |
690 | return t; | 722 | return t; |
691 | } | 723 | } |
692 | 724 | ||
693 | return NULL; | 725 | return NULL; |
694 | } | 726 | } |
695 | 727 | ||
696 | /* Engage in transport control operations. | 728 | /* Engage in transport control operations. |
697 | * Mark the transport up or down and send a notification to the user. | 729 | * Mark the transport up or down and send a notification to the user. |
698 | * Select and update the new active and retran paths. | 730 | * Select and update the new active and retran paths. |
699 | */ | 731 | */ |
700 | void sctp_assoc_control_transport(struct sctp_association *asoc, | 732 | void sctp_assoc_control_transport(struct sctp_association *asoc, |
701 | struct sctp_transport *transport, | 733 | struct sctp_transport *transport, |
702 | sctp_transport_cmd_t command, | 734 | sctp_transport_cmd_t command, |
703 | sctp_sn_error_t error) | 735 | sctp_sn_error_t error) |
704 | { | 736 | { |
705 | struct sctp_transport *t = NULL; | 737 | struct sctp_transport *t = NULL; |
706 | struct sctp_transport *first; | 738 | struct sctp_transport *first; |
707 | struct sctp_transport *second; | 739 | struct sctp_transport *second; |
708 | struct sctp_ulpevent *event; | 740 | struct sctp_ulpevent *event; |
709 | struct sockaddr_storage addr; | 741 | struct sockaddr_storage addr; |
710 | struct list_head *pos; | 742 | struct list_head *pos; |
711 | int spc_state = 0; | 743 | int spc_state = 0; |
712 | 744 | ||
713 | /* Record the transition on the transport. */ | 745 | /* Record the transition on the transport. */ |
714 | switch (command) { | 746 | switch (command) { |
715 | case SCTP_TRANSPORT_UP: | 747 | case SCTP_TRANSPORT_UP: |
716 | /* If we are moving from UNCONFIRMED state due | 748 | /* If we are moving from UNCONFIRMED state due |
717 | * to heartbeat success, report the SCTP_ADDR_CONFIRMED | 749 | * to heartbeat success, report the SCTP_ADDR_CONFIRMED |
718 | * state to the user, otherwise report SCTP_ADDR_AVAILABLE. | 750 | * state to the user, otherwise report SCTP_ADDR_AVAILABLE. |
719 | */ | 751 | */ |
720 | if (SCTP_UNCONFIRMED == transport->state && | 752 | if (SCTP_UNCONFIRMED == transport->state && |
721 | SCTP_HEARTBEAT_SUCCESS == error) | 753 | SCTP_HEARTBEAT_SUCCESS == error) |
722 | spc_state = SCTP_ADDR_CONFIRMED; | 754 | spc_state = SCTP_ADDR_CONFIRMED; |
723 | else | 755 | else |
724 | spc_state = SCTP_ADDR_AVAILABLE; | 756 | spc_state = SCTP_ADDR_AVAILABLE; |
725 | transport->state = SCTP_ACTIVE; | 757 | transport->state = SCTP_ACTIVE; |
726 | break; | 758 | break; |
727 | 759 | ||
728 | case SCTP_TRANSPORT_DOWN: | 760 | case SCTP_TRANSPORT_DOWN: |
729 | /* if the transort was never confirmed, do not transition it | 761 | /* if the transort was never confirmed, do not transition it |
730 | * to inactive state. | 762 | * to inactive state. |
731 | */ | 763 | */ |
732 | if (transport->state != SCTP_UNCONFIRMED) | 764 | if (transport->state != SCTP_UNCONFIRMED) |
733 | transport->state = SCTP_INACTIVE; | 765 | transport->state = SCTP_INACTIVE; |
734 | 766 | ||
735 | spc_state = SCTP_ADDR_UNREACHABLE; | 767 | spc_state = SCTP_ADDR_UNREACHABLE; |
736 | break; | 768 | break; |
737 | 769 | ||
738 | default: | 770 | default: |
739 | return; | 771 | return; |
740 | } | 772 | } |
741 | 773 | ||
742 | /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the | 774 | /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the |
743 | * user. | 775 | * user. |
744 | */ | 776 | */ |
745 | memset(&addr, 0, sizeof(struct sockaddr_storage)); | 777 | memset(&addr, 0, sizeof(struct sockaddr_storage)); |
746 | memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len); | 778 | memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len); |
747 | event = sctp_ulpevent_make_peer_addr_change(asoc, &addr, | 779 | event = sctp_ulpevent_make_peer_addr_change(asoc, &addr, |
748 | 0, spc_state, error, GFP_ATOMIC); | 780 | 0, spc_state, error, GFP_ATOMIC); |
749 | if (event) | 781 | if (event) |
750 | sctp_ulpq_tail_event(&asoc->ulpq, event); | 782 | sctp_ulpq_tail_event(&asoc->ulpq, event); |
751 | 783 | ||
752 | /* Select new active and retran paths. */ | 784 | /* Select new active and retran paths. */ |
753 | 785 | ||
754 | /* Look for the two most recently used active transports. | 786 | /* Look for the two most recently used active transports. |
755 | * | 787 | * |
756 | * This code produces the wrong ordering whenever jiffies | 788 | * This code produces the wrong ordering whenever jiffies |
757 | * rolls over, but we still get usable transports, so we don't | 789 | * rolls over, but we still get usable transports, so we don't |
758 | * worry about it. | 790 | * worry about it. |
759 | */ | 791 | */ |
760 | first = NULL; second = NULL; | 792 | first = NULL; second = NULL; |
761 | 793 | ||
762 | list_for_each(pos, &asoc->peer.transport_addr_list) { | 794 | list_for_each(pos, &asoc->peer.transport_addr_list) { |
763 | t = list_entry(pos, struct sctp_transport, transports); | 795 | t = list_entry(pos, struct sctp_transport, transports); |
764 | 796 | ||
765 | if ((t->state == SCTP_INACTIVE) || | 797 | if ((t->state == SCTP_INACTIVE) || |
766 | (t->state == SCTP_UNCONFIRMED)) | 798 | (t->state == SCTP_UNCONFIRMED)) |
767 | continue; | 799 | continue; |
768 | if (!first || t->last_time_heard > first->last_time_heard) { | 800 | if (!first || t->last_time_heard > first->last_time_heard) { |
769 | second = first; | 801 | second = first; |
770 | first = t; | 802 | first = t; |
771 | } | 803 | } |
772 | if (!second || t->last_time_heard > second->last_time_heard) | 804 | if (!second || t->last_time_heard > second->last_time_heard) |
773 | second = t; | 805 | second = t; |
774 | } | 806 | } |
775 | 807 | ||
776 | /* RFC 2960 6.4 Multi-Homed SCTP Endpoints | 808 | /* RFC 2960 6.4 Multi-Homed SCTP Endpoints |
777 | * | 809 | * |
778 | * By default, an endpoint should always transmit to the | 810 | * By default, an endpoint should always transmit to the |
779 | * primary path, unless the SCTP user explicitly specifies the | 811 | * primary path, unless the SCTP user explicitly specifies the |
780 | * destination transport address (and possibly source | 812 | * destination transport address (and possibly source |
781 | * transport address) to use. | 813 | * transport address) to use. |
782 | * | 814 | * |
783 | * [If the primary is active but not most recent, bump the most | 815 | * [If the primary is active but not most recent, bump the most |
784 | * recently used transport.] | 816 | * recently used transport.] |
785 | */ | 817 | */ |
786 | if (((asoc->peer.primary_path->state == SCTP_ACTIVE) || | 818 | if (((asoc->peer.primary_path->state == SCTP_ACTIVE) || |
787 | (asoc->peer.primary_path->state == SCTP_UNKNOWN)) && | 819 | (asoc->peer.primary_path->state == SCTP_UNKNOWN)) && |
788 | first != asoc->peer.primary_path) { | 820 | first != asoc->peer.primary_path) { |
789 | second = first; | 821 | second = first; |
790 | first = asoc->peer.primary_path; | 822 | first = asoc->peer.primary_path; |
791 | } | 823 | } |
792 | 824 | ||
793 | /* If we failed to find a usable transport, just camp on the | 825 | /* If we failed to find a usable transport, just camp on the |
794 | * primary, even if it is inactive. | 826 | * primary, even if it is inactive. |
795 | */ | 827 | */ |
796 | if (!first) { | 828 | if (!first) { |
797 | first = asoc->peer.primary_path; | 829 | first = asoc->peer.primary_path; |
798 | second = asoc->peer.primary_path; | 830 | second = asoc->peer.primary_path; |
799 | } | 831 | } |
800 | 832 | ||
801 | /* Set the active and retran transports. */ | 833 | /* Set the active and retran transports. */ |
802 | asoc->peer.active_path = first; | 834 | asoc->peer.active_path = first; |
803 | asoc->peer.retran_path = second; | 835 | asoc->peer.retran_path = second; |
804 | } | 836 | } |
805 | 837 | ||
806 | /* Hold a reference to an association. */ | 838 | /* Hold a reference to an association. */ |
807 | void sctp_association_hold(struct sctp_association *asoc) | 839 | void sctp_association_hold(struct sctp_association *asoc) |
808 | { | 840 | { |
809 | atomic_inc(&asoc->base.refcnt); | 841 | atomic_inc(&asoc->base.refcnt); |
810 | } | 842 | } |
811 | 843 | ||
812 | /* Release a reference to an association and cleanup | 844 | /* Release a reference to an association and cleanup |
813 | * if there are no more references. | 845 | * if there are no more references. |
814 | */ | 846 | */ |
815 | void sctp_association_put(struct sctp_association *asoc) | 847 | void sctp_association_put(struct sctp_association *asoc) |
816 | { | 848 | { |
817 | if (atomic_dec_and_test(&asoc->base.refcnt)) | 849 | if (atomic_dec_and_test(&asoc->base.refcnt)) |
818 | sctp_association_destroy(asoc); | 850 | sctp_association_destroy(asoc); |
819 | } | 851 | } |
820 | 852 | ||
821 | /* Allocate the next TSN, Transmission Sequence Number, for the given | 853 | /* Allocate the next TSN, Transmission Sequence Number, for the given |
822 | * association. | 854 | * association. |
823 | */ | 855 | */ |
824 | __u32 sctp_association_get_next_tsn(struct sctp_association *asoc) | 856 | __u32 sctp_association_get_next_tsn(struct sctp_association *asoc) |
825 | { | 857 | { |
826 | /* From Section 1.6 Serial Number Arithmetic: | 858 | /* From Section 1.6 Serial Number Arithmetic: |
827 | * Transmission Sequence Numbers wrap around when they reach | 859 | * Transmission Sequence Numbers wrap around when they reach |
828 | * 2**32 - 1. That is, the next TSN a DATA chunk MUST use | 860 | * 2**32 - 1. That is, the next TSN a DATA chunk MUST use |
829 | * after transmitting TSN = 2*32 - 1 is TSN = 0. | 861 | * after transmitting TSN = 2*32 - 1 is TSN = 0. |
830 | */ | 862 | */ |
831 | __u32 retval = asoc->next_tsn; | 863 | __u32 retval = asoc->next_tsn; |
832 | asoc->next_tsn++; | 864 | asoc->next_tsn++; |
833 | asoc->unack_data++; | 865 | asoc->unack_data++; |
834 | 866 | ||
835 | return retval; | 867 | return retval; |
836 | } | 868 | } |
837 | 869 | ||
838 | /* Compare two addresses to see if they match. Wildcard addresses | 870 | /* Compare two addresses to see if they match. Wildcard addresses |
839 | * only match themselves. | 871 | * only match themselves. |
840 | */ | 872 | */ |
841 | int sctp_cmp_addr_exact(const union sctp_addr *ss1, | 873 | int sctp_cmp_addr_exact(const union sctp_addr *ss1, |
842 | const union sctp_addr *ss2) | 874 | const union sctp_addr *ss2) |
843 | { | 875 | { |
844 | struct sctp_af *af; | 876 | struct sctp_af *af; |
845 | 877 | ||
846 | af = sctp_get_af_specific(ss1->sa.sa_family); | 878 | af = sctp_get_af_specific(ss1->sa.sa_family); |
847 | if (unlikely(!af)) | 879 | if (unlikely(!af)) |
848 | return 0; | 880 | return 0; |
849 | 881 | ||
850 | return af->cmp_addr(ss1, ss2); | 882 | return af->cmp_addr(ss1, ss2); |
851 | } | 883 | } |
852 | 884 | ||
853 | /* Return an ecne chunk to get prepended to a packet. | 885 | /* Return an ecne chunk to get prepended to a packet. |
854 | * Note: We are sly and return a shared, prealloced chunk. FIXME: | 886 | * Note: We are sly and return a shared, prealloced chunk. FIXME: |
855 | * No we don't, but we could/should. | 887 | * No we don't, but we could/should. |
856 | */ | 888 | */ |
857 | struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc) | 889 | struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc) |
858 | { | 890 | { |
859 | struct sctp_chunk *chunk; | 891 | struct sctp_chunk *chunk; |
860 | 892 | ||
861 | /* Send ECNE if needed. | 893 | /* Send ECNE if needed. |
862 | * Not being able to allocate a chunk here is not deadly. | 894 | * Not being able to allocate a chunk here is not deadly. |
863 | */ | 895 | */ |
864 | if (asoc->need_ecne) | 896 | if (asoc->need_ecne) |
865 | chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn); | 897 | chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn); |
866 | else | 898 | else |
867 | chunk = NULL; | 899 | chunk = NULL; |
868 | 900 | ||
869 | return chunk; | 901 | return chunk; |
870 | } | 902 | } |
871 | 903 | ||
872 | /* | 904 | /* |
873 | * Find which transport this TSN was sent on. | 905 | * Find which transport this TSN was sent on. |
874 | */ | 906 | */ |
875 | struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, | 907 | struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, |
876 | __u32 tsn) | 908 | __u32 tsn) |
877 | { | 909 | { |
878 | struct sctp_transport *active; | 910 | struct sctp_transport *active; |
879 | struct sctp_transport *match; | 911 | struct sctp_transport *match; |
880 | struct list_head *entry, *pos; | 912 | struct list_head *entry, *pos; |
881 | struct sctp_transport *transport; | 913 | struct sctp_transport *transport; |
882 | struct sctp_chunk *chunk; | 914 | struct sctp_chunk *chunk; |
883 | __be32 key = htonl(tsn); | 915 | __be32 key = htonl(tsn); |
884 | 916 | ||
885 | match = NULL; | 917 | match = NULL; |
886 | 918 | ||
887 | /* | 919 | /* |
888 | * FIXME: In general, find a more efficient data structure for | 920 | * FIXME: In general, find a more efficient data structure for |
889 | * searching. | 921 | * searching. |
890 | */ | 922 | */ |
891 | 923 | ||
892 | /* | 924 | /* |
893 | * The general strategy is to search each transport's transmitted | 925 | * The general strategy is to search each transport's transmitted |
894 | * list. Return which transport this TSN lives on. | 926 | * list. Return which transport this TSN lives on. |
895 | * | 927 | * |
896 | * Let's be hopeful and check the active_path first. | 928 | * Let's be hopeful and check the active_path first. |
897 | * Another optimization would be to know if there is only one | 929 | * Another optimization would be to know if there is only one |
898 | * outbound path and not have to look for the TSN at all. | 930 | * outbound path and not have to look for the TSN at all. |
899 | * | 931 | * |
900 | */ | 932 | */ |
901 | 933 | ||
902 | active = asoc->peer.active_path; | 934 | active = asoc->peer.active_path; |
903 | 935 | ||
904 | list_for_each(entry, &active->transmitted) { | 936 | list_for_each(entry, &active->transmitted) { |
905 | chunk = list_entry(entry, struct sctp_chunk, transmitted_list); | 937 | chunk = list_entry(entry, struct sctp_chunk, transmitted_list); |
906 | 938 | ||
907 | if (key == chunk->subh.data_hdr->tsn) { | 939 | if (key == chunk->subh.data_hdr->tsn) { |
908 | match = active; | 940 | match = active; |
909 | goto out; | 941 | goto out; |
910 | } | 942 | } |
911 | } | 943 | } |
912 | 944 | ||
913 | /* If not found, go search all the other transports. */ | 945 | /* If not found, go search all the other transports. */ |
914 | list_for_each(pos, &asoc->peer.transport_addr_list) { | 946 | list_for_each(pos, &asoc->peer.transport_addr_list) { |
915 | transport = list_entry(pos, struct sctp_transport, transports); | 947 | transport = list_entry(pos, struct sctp_transport, transports); |
916 | 948 | ||
917 | if (transport == active) | 949 | if (transport == active) |
918 | break; | 950 | break; |
919 | list_for_each(entry, &transport->transmitted) { | 951 | list_for_each(entry, &transport->transmitted) { |
920 | chunk = list_entry(entry, struct sctp_chunk, | 952 | chunk = list_entry(entry, struct sctp_chunk, |
921 | transmitted_list); | 953 | transmitted_list); |
922 | if (key == chunk->subh.data_hdr->tsn) { | 954 | if (key == chunk->subh.data_hdr->tsn) { |
923 | match = transport; | 955 | match = transport; |
924 | goto out; | 956 | goto out; |
925 | } | 957 | } |
926 | } | 958 | } |
927 | } | 959 | } |
928 | out: | 960 | out: |
929 | return match; | 961 | return match; |
930 | } | 962 | } |
931 | 963 | ||
932 | /* Is this the association we are looking for? */ | 964 | /* Is this the association we are looking for? */ |
933 | struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc, | 965 | struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc, |
934 | const union sctp_addr *laddr, | 966 | const union sctp_addr *laddr, |
935 | const union sctp_addr *paddr) | 967 | const union sctp_addr *paddr) |
936 | { | 968 | { |
937 | struct sctp_transport *transport; | 969 | struct sctp_transport *transport; |
938 | 970 | ||
939 | if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) && | 971 | if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) && |
940 | (htons(asoc->peer.port) == paddr->v4.sin_port)) { | 972 | (htons(asoc->peer.port) == paddr->v4.sin_port)) { |
941 | transport = sctp_assoc_lookup_paddr(asoc, paddr); | 973 | transport = sctp_assoc_lookup_paddr(asoc, paddr); |
942 | if (!transport) | 974 | if (!transport) |
943 | goto out; | 975 | goto out; |
944 | 976 | ||
945 | if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr, | 977 | if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr, |
946 | sctp_sk(asoc->base.sk))) | 978 | sctp_sk(asoc->base.sk))) |
947 | goto out; | 979 | goto out; |
948 | } | 980 | } |
949 | transport = NULL; | 981 | transport = NULL; |
950 | 982 | ||
951 | out: | 983 | out: |
952 | return transport; | 984 | return transport; |
953 | } | 985 | } |
954 | 986 | ||
955 | /* Do delayed input processing. This is scheduled by sctp_rcv(). */ | 987 | /* Do delayed input processing. This is scheduled by sctp_rcv(). */ |
956 | static void sctp_assoc_bh_rcv(struct work_struct *work) | 988 | static void sctp_assoc_bh_rcv(struct work_struct *work) |
957 | { | 989 | { |
958 | struct sctp_association *asoc = | 990 | struct sctp_association *asoc = |
959 | container_of(work, struct sctp_association, | 991 | container_of(work, struct sctp_association, |
960 | base.inqueue.immediate); | 992 | base.inqueue.immediate); |
961 | struct sctp_endpoint *ep; | 993 | struct sctp_endpoint *ep; |
962 | struct sctp_chunk *chunk; | 994 | struct sctp_chunk *chunk; |
963 | struct sock *sk; | 995 | struct sock *sk; |
964 | struct sctp_inq *inqueue; | 996 | struct sctp_inq *inqueue; |
965 | int state; | 997 | int state; |
966 | sctp_subtype_t subtype; | 998 | sctp_subtype_t subtype; |
967 | int error = 0; | 999 | int error = 0; |
968 | 1000 | ||
969 | /* The association should be held so we should be safe. */ | 1001 | /* The association should be held so we should be safe. */ |
970 | ep = asoc->ep; | 1002 | ep = asoc->ep; |
971 | sk = asoc->base.sk; | 1003 | sk = asoc->base.sk; |
972 | 1004 | ||
973 | inqueue = &asoc->base.inqueue; | 1005 | inqueue = &asoc->base.inqueue; |
974 | sctp_association_hold(asoc); | 1006 | sctp_association_hold(asoc); |
975 | while (NULL != (chunk = sctp_inq_pop(inqueue))) { | 1007 | while (NULL != (chunk = sctp_inq_pop(inqueue))) { |
976 | state = asoc->state; | 1008 | state = asoc->state; |
977 | subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); | 1009 | subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); |
978 | 1010 | ||
979 | /* Remember where the last DATA chunk came from so we | 1011 | /* Remember where the last DATA chunk came from so we |
980 | * know where to send the SACK. | 1012 | * know where to send the SACK. |
981 | */ | 1013 | */ |
982 | if (sctp_chunk_is_data(chunk)) | 1014 | if (sctp_chunk_is_data(chunk)) |
983 | asoc->peer.last_data_from = chunk->transport; | 1015 | asoc->peer.last_data_from = chunk->transport; |
984 | else | 1016 | else |
985 | SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS); | 1017 | SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS); |
986 | 1018 | ||
987 | if (chunk->transport) | 1019 | if (chunk->transport) |
988 | chunk->transport->last_time_heard = jiffies; | 1020 | chunk->transport->last_time_heard = jiffies; |
989 | 1021 | ||
990 | /* Run through the state machine. */ | 1022 | /* Run through the state machine. */ |
991 | error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, | 1023 | error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, |
992 | state, ep, asoc, chunk, GFP_ATOMIC); | 1024 | state, ep, asoc, chunk, GFP_ATOMIC); |
993 | 1025 | ||
994 | /* Check to see if the association is freed in response to | 1026 | /* Check to see if the association is freed in response to |
995 | * the incoming chunk. If so, get out of the while loop. | 1027 | * the incoming chunk. If so, get out of the while loop. |
996 | */ | 1028 | */ |
997 | if (asoc->base.dead) | 1029 | if (asoc->base.dead) |
998 | break; | 1030 | break; |
999 | 1031 | ||
1000 | /* If there is an error on chunk, discard this packet. */ | 1032 | /* If there is an error on chunk, discard this packet. */ |
1001 | if (error && chunk) | 1033 | if (error && chunk) |
1002 | chunk->pdiscard = 1; | 1034 | chunk->pdiscard = 1; |
1003 | } | 1035 | } |
1004 | sctp_association_put(asoc); | 1036 | sctp_association_put(asoc); |
1005 | } | 1037 | } |
1006 | 1038 | ||
1007 | /* This routine moves an association from its old sk to a new sk. */ | 1039 | /* This routine moves an association from its old sk to a new sk. */ |
1008 | void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk) | 1040 | void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk) |
1009 | { | 1041 | { |
1010 | struct sctp_sock *newsp = sctp_sk(newsk); | 1042 | struct sctp_sock *newsp = sctp_sk(newsk); |
1011 | struct sock *oldsk = assoc->base.sk; | 1043 | struct sock *oldsk = assoc->base.sk; |
1012 | 1044 | ||
1013 | /* Delete the association from the old endpoint's list of | 1045 | /* Delete the association from the old endpoint's list of |
1014 | * associations. | 1046 | * associations. |
1015 | */ | 1047 | */ |
1016 | list_del_init(&assoc->asocs); | 1048 | list_del_init(&assoc->asocs); |
1017 | 1049 | ||
1018 | /* Decrement the backlog value for a TCP-style socket. */ | 1050 | /* Decrement the backlog value for a TCP-style socket. */ |
1019 | if (sctp_style(oldsk, TCP)) | 1051 | if (sctp_style(oldsk, TCP)) |
1020 | oldsk->sk_ack_backlog--; | 1052 | oldsk->sk_ack_backlog--; |
1021 | 1053 | ||
1022 | /* Release references to the old endpoint and the sock. */ | 1054 | /* Release references to the old endpoint and the sock. */ |
1023 | sctp_endpoint_put(assoc->ep); | 1055 | sctp_endpoint_put(assoc->ep); |
1024 | sock_put(assoc->base.sk); | 1056 | sock_put(assoc->base.sk); |
1025 | 1057 | ||
1026 | /* Get a reference to the new endpoint. */ | 1058 | /* Get a reference to the new endpoint. */ |
1027 | assoc->ep = newsp->ep; | 1059 | assoc->ep = newsp->ep; |
1028 | sctp_endpoint_hold(assoc->ep); | 1060 | sctp_endpoint_hold(assoc->ep); |
1029 | 1061 | ||
1030 | /* Get a reference to the new sock. */ | 1062 | /* Get a reference to the new sock. */ |
1031 | assoc->base.sk = newsk; | 1063 | assoc->base.sk = newsk; |
1032 | sock_hold(assoc->base.sk); | 1064 | sock_hold(assoc->base.sk); |
1033 | 1065 | ||
1034 | /* Add the association to the new endpoint's list of associations. */ | 1066 | /* Add the association to the new endpoint's list of associations. */ |
1035 | sctp_endpoint_add_asoc(newsp->ep, assoc); | 1067 | sctp_endpoint_add_asoc(newsp->ep, assoc); |
1036 | } | 1068 | } |
1037 | 1069 | ||
1038 | /* Update an association (possibly from unexpected COOKIE-ECHO processing). */ | 1070 | /* Update an association (possibly from unexpected COOKIE-ECHO processing). */ |
1039 | void sctp_assoc_update(struct sctp_association *asoc, | 1071 | void sctp_assoc_update(struct sctp_association *asoc, |
1040 | struct sctp_association *new) | 1072 | struct sctp_association *new) |
1041 | { | 1073 | { |
1042 | struct sctp_transport *trans; | 1074 | struct sctp_transport *trans; |
1043 | struct list_head *pos, *temp; | 1075 | struct list_head *pos, *temp; |
1044 | 1076 | ||
1045 | /* Copy in new parameters of peer. */ | 1077 | /* Copy in new parameters of peer. */ |
1046 | asoc->c = new->c; | 1078 | asoc->c = new->c; |
1047 | asoc->peer.rwnd = new->peer.rwnd; | 1079 | asoc->peer.rwnd = new->peer.rwnd; |
1048 | asoc->peer.sack_needed = new->peer.sack_needed; | 1080 | asoc->peer.sack_needed = new->peer.sack_needed; |
1049 | asoc->peer.i = new->peer.i; | 1081 | asoc->peer.i = new->peer.i; |
1050 | sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE, | 1082 | sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE, |
1051 | asoc->peer.i.initial_tsn); | 1083 | asoc->peer.i.initial_tsn); |
1052 | 1084 | ||
1053 | /* Remove any peer addresses not present in the new association. */ | 1085 | /* Remove any peer addresses not present in the new association. */ |
1054 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | 1086 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { |
1055 | trans = list_entry(pos, struct sctp_transport, transports); | 1087 | trans = list_entry(pos, struct sctp_transport, transports); |
1056 | if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) | 1088 | if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) |
1057 | sctp_assoc_del_peer(asoc, &trans->ipaddr); | 1089 | sctp_assoc_del_peer(asoc, &trans->ipaddr); |
1058 | 1090 | ||
1059 | if (asoc->state >= SCTP_STATE_ESTABLISHED) | 1091 | if (asoc->state >= SCTP_STATE_ESTABLISHED) |
1060 | sctp_transport_reset(trans); | 1092 | sctp_transport_reset(trans); |
1061 | } | 1093 | } |
1062 | 1094 | ||
1063 | /* If the case is A (association restart), use | 1095 | /* If the case is A (association restart), use |
1064 | * initial_tsn as next_tsn. If the case is B, use | 1096 | * initial_tsn as next_tsn. If the case is B, use |
1065 | * current next_tsn in case data sent to peer | 1097 | * current next_tsn in case data sent to peer |
1066 | * has been discarded and needs retransmission. | 1098 | * has been discarded and needs retransmission. |
1067 | */ | 1099 | */ |
1068 | if (asoc->state >= SCTP_STATE_ESTABLISHED) { | 1100 | if (asoc->state >= SCTP_STATE_ESTABLISHED) { |
1069 | asoc->next_tsn = new->next_tsn; | 1101 | asoc->next_tsn = new->next_tsn; |
1070 | asoc->ctsn_ack_point = new->ctsn_ack_point; | 1102 | asoc->ctsn_ack_point = new->ctsn_ack_point; |
1071 | asoc->adv_peer_ack_point = new->adv_peer_ack_point; | 1103 | asoc->adv_peer_ack_point = new->adv_peer_ack_point; |
1072 | 1104 | ||
1073 | /* Reinitialize SSN for both local streams | 1105 | /* Reinitialize SSN for both local streams |
1074 | * and peer's streams. | 1106 | * and peer's streams. |
1075 | */ | 1107 | */ |
1076 | sctp_ssnmap_clear(asoc->ssnmap); | 1108 | sctp_ssnmap_clear(asoc->ssnmap); |
1077 | 1109 | ||
1078 | /* Flush the ULP reassembly and ordered queue. | 1110 | /* Flush the ULP reassembly and ordered queue. |
1079 | * Any data there will now be stale and will | 1111 | * Any data there will now be stale and will |
1080 | * cause problems. | 1112 | * cause problems. |
1081 | */ | 1113 | */ |
1082 | sctp_ulpq_flush(&asoc->ulpq); | 1114 | sctp_ulpq_flush(&asoc->ulpq); |
1083 | 1115 | ||
1084 | /* reset the overall association error count so | 1116 | /* reset the overall association error count so |
1085 | * that the restarted association doesn't get torn | 1117 | * that the restarted association doesn't get torn |
1086 | * down on the next retransmission timer. | 1118 | * down on the next retransmission timer. |
1087 | */ | 1119 | */ |
1088 | asoc->overall_error_count = 0; | 1120 | asoc->overall_error_count = 0; |
1089 | 1121 | ||
1090 | } else { | 1122 | } else { |
1091 | /* Add any peer addresses from the new association. */ | 1123 | /* Add any peer addresses from the new association. */ |
1092 | list_for_each(pos, &new->peer.transport_addr_list) { | 1124 | list_for_each(pos, &new->peer.transport_addr_list) { |
1093 | trans = list_entry(pos, struct sctp_transport, | 1125 | trans = list_entry(pos, struct sctp_transport, |
1094 | transports); | 1126 | transports); |
1095 | if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr)) | 1127 | if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr)) |
1096 | sctp_assoc_add_peer(asoc, &trans->ipaddr, | 1128 | sctp_assoc_add_peer(asoc, &trans->ipaddr, |
1097 | GFP_ATOMIC, trans->state); | 1129 | GFP_ATOMIC, trans->state); |
1098 | } | 1130 | } |
1099 | 1131 | ||
1100 | asoc->ctsn_ack_point = asoc->next_tsn - 1; | 1132 | asoc->ctsn_ack_point = asoc->next_tsn - 1; |
1101 | asoc->adv_peer_ack_point = asoc->ctsn_ack_point; | 1133 | asoc->adv_peer_ack_point = asoc->ctsn_ack_point; |
1102 | if (!asoc->ssnmap) { | 1134 | if (!asoc->ssnmap) { |
1103 | /* Move the ssnmap. */ | 1135 | /* Move the ssnmap. */ |
1104 | asoc->ssnmap = new->ssnmap; | 1136 | asoc->ssnmap = new->ssnmap; |
1105 | new->ssnmap = NULL; | 1137 | new->ssnmap = NULL; |
1106 | } | 1138 | } |
1107 | 1139 | ||
1108 | if (!asoc->assoc_id) { | 1140 | if (!asoc->assoc_id) { |
1109 | /* get a new association id since we don't have one | 1141 | /* get a new association id since we don't have one |
1110 | * yet. | 1142 | * yet. |
1111 | */ | 1143 | */ |
1112 | sctp_assoc_set_id(asoc, GFP_ATOMIC); | 1144 | sctp_assoc_set_id(asoc, GFP_ATOMIC); |
1113 | } | 1145 | } |
1114 | } | 1146 | } |
1147 | |||
1148 | /* SCTP-AUTH: XXX something needs to be done here*/ | ||
1115 | } | 1149 | } |
1116 | 1150 | ||
1117 | /* Update the retran path for sending a retransmitted packet. | 1151 | /* Update the retran path for sending a retransmitted packet. |
1118 | * Round-robin through the active transports, else round-robin | 1152 | * Round-robin through the active transports, else round-robin |
1119 | * through the inactive transports as this is the next best thing | 1153 | * through the inactive transports as this is the next best thing |
1120 | * we can try. | 1154 | * we can try. |
1121 | */ | 1155 | */ |
1122 | void sctp_assoc_update_retran_path(struct sctp_association *asoc) | 1156 | void sctp_assoc_update_retran_path(struct sctp_association *asoc) |
1123 | { | 1157 | { |
1124 | struct sctp_transport *t, *next; | 1158 | struct sctp_transport *t, *next; |
1125 | struct list_head *head = &asoc->peer.transport_addr_list; | 1159 | struct list_head *head = &asoc->peer.transport_addr_list; |
1126 | struct list_head *pos; | 1160 | struct list_head *pos; |
1127 | 1161 | ||
1128 | /* Find the next transport in a round-robin fashion. */ | 1162 | /* Find the next transport in a round-robin fashion. */ |
1129 | t = asoc->peer.retran_path; | 1163 | t = asoc->peer.retran_path; |
1130 | pos = &t->transports; | 1164 | pos = &t->transports; |
1131 | next = NULL; | 1165 | next = NULL; |
1132 | 1166 | ||
1133 | while (1) { | 1167 | while (1) { |
1134 | /* Skip the head. */ | 1168 | /* Skip the head. */ |
1135 | if (pos->next == head) | 1169 | if (pos->next == head) |
1136 | pos = head->next; | 1170 | pos = head->next; |
1137 | else | 1171 | else |
1138 | pos = pos->next; | 1172 | pos = pos->next; |
1139 | 1173 | ||
1140 | t = list_entry(pos, struct sctp_transport, transports); | 1174 | t = list_entry(pos, struct sctp_transport, transports); |
1141 | 1175 | ||
1142 | /* Try to find an active transport. */ | 1176 | /* Try to find an active transport. */ |
1143 | 1177 | ||
1144 | if ((t->state == SCTP_ACTIVE) || | 1178 | if ((t->state == SCTP_ACTIVE) || |
1145 | (t->state == SCTP_UNKNOWN)) { | 1179 | (t->state == SCTP_UNKNOWN)) { |
1146 | break; | 1180 | break; |
1147 | } else { | 1181 | } else { |
1148 | /* Keep track of the next transport in case | 1182 | /* Keep track of the next transport in case |
1149 | * we don't find any active transport. | 1183 | * we don't find any active transport. |
1150 | */ | 1184 | */ |
1151 | if (!next) | 1185 | if (!next) |
1152 | next = t; | 1186 | next = t; |
1153 | } | 1187 | } |
1154 | 1188 | ||
1155 | /* We have exhausted the list, but didn't find any | 1189 | /* We have exhausted the list, but didn't find any |
1156 | * other active transports. If so, use the next | 1190 | * other active transports. If so, use the next |
1157 | * transport. | 1191 | * transport. |
1158 | */ | 1192 | */ |
1159 | if (t == asoc->peer.retran_path) { | 1193 | if (t == asoc->peer.retran_path) { |
1160 | t = next; | 1194 | t = next; |
1161 | break; | 1195 | break; |
1162 | } | 1196 | } |
1163 | } | 1197 | } |
1164 | 1198 | ||
1165 | asoc->peer.retran_path = t; | 1199 | asoc->peer.retran_path = t; |
1166 | 1200 | ||
1167 | SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" | 1201 | SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" |
1168 | " %p addr: ", | 1202 | " %p addr: ", |
1169 | " port: %d\n", | 1203 | " port: %d\n", |
1170 | asoc, | 1204 | asoc, |
1171 | (&t->ipaddr), | 1205 | (&t->ipaddr), |
1172 | ntohs(t->ipaddr.v4.sin_port)); | 1206 | ntohs(t->ipaddr.v4.sin_port)); |
1173 | } | 1207 | } |
1174 | 1208 | ||
1175 | /* Choose the transport for sending a INIT packet. */ | 1209 | /* Choose the transport for sending a INIT packet. */ |
1176 | struct sctp_transport *sctp_assoc_choose_init_transport( | 1210 | struct sctp_transport *sctp_assoc_choose_init_transport( |
1177 | struct sctp_association *asoc) | 1211 | struct sctp_association *asoc) |
1178 | { | 1212 | { |
1179 | struct sctp_transport *t; | 1213 | struct sctp_transport *t; |
1180 | 1214 | ||
1181 | /* Use the retran path. If the last INIT was sent over the | 1215 | /* Use the retran path. If the last INIT was sent over the |
1182 | * retran path, update the retran path and use it. | 1216 | * retran path, update the retran path and use it. |
1183 | */ | 1217 | */ |
1184 | if (!asoc->init_last_sent_to) { | 1218 | if (!asoc->init_last_sent_to) { |
1185 | t = asoc->peer.active_path; | 1219 | t = asoc->peer.active_path; |
1186 | } else { | 1220 | } else { |
1187 | if (asoc->init_last_sent_to == asoc->peer.retran_path) | 1221 | if (asoc->init_last_sent_to == asoc->peer.retran_path) |
1188 | sctp_assoc_update_retran_path(asoc); | 1222 | sctp_assoc_update_retran_path(asoc); |
1189 | t = asoc->peer.retran_path; | 1223 | t = asoc->peer.retran_path; |
1190 | } | 1224 | } |
1191 | 1225 | ||
1192 | SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" | 1226 | SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" |
1193 | " %p addr: ", | 1227 | " %p addr: ", |
1194 | " port: %d\n", | 1228 | " port: %d\n", |
1195 | asoc, | 1229 | asoc, |
1196 | (&t->ipaddr), | 1230 | (&t->ipaddr), |
1197 | ntohs(t->ipaddr.v4.sin_port)); | 1231 | ntohs(t->ipaddr.v4.sin_port)); |
1198 | 1232 | ||
1199 | return t; | 1233 | return t; |
1200 | } | 1234 | } |
1201 | 1235 | ||
1202 | /* Choose the transport for sending a SHUTDOWN packet. */ | 1236 | /* Choose the transport for sending a SHUTDOWN packet. */ |
1203 | struct sctp_transport *sctp_assoc_choose_shutdown_transport( | 1237 | struct sctp_transport *sctp_assoc_choose_shutdown_transport( |
1204 | struct sctp_association *asoc) | 1238 | struct sctp_association *asoc) |
1205 | { | 1239 | { |
1206 | /* If this is the first time SHUTDOWN is sent, use the active path, | 1240 | /* If this is the first time SHUTDOWN is sent, use the active path, |
1207 | * else use the retran path. If the last SHUTDOWN was sent over the | 1241 | * else use the retran path. If the last SHUTDOWN was sent over the |
1208 | * retran path, update the retran path and use it. | 1242 | * retran path, update the retran path and use it. |
1209 | */ | 1243 | */ |
1210 | if (!asoc->shutdown_last_sent_to) | 1244 | if (!asoc->shutdown_last_sent_to) |
1211 | return asoc->peer.active_path; | 1245 | return asoc->peer.active_path; |
1212 | else { | 1246 | else { |
1213 | if (asoc->shutdown_last_sent_to == asoc->peer.retran_path) | 1247 | if (asoc->shutdown_last_sent_to == asoc->peer.retran_path) |
1214 | sctp_assoc_update_retran_path(asoc); | 1248 | sctp_assoc_update_retran_path(asoc); |
1215 | return asoc->peer.retran_path; | 1249 | return asoc->peer.retran_path; |
1216 | } | 1250 | } |
1217 | 1251 | ||
1218 | } | 1252 | } |
1219 | 1253 | ||
1220 | /* Update the association's pmtu and frag_point by going through all the | 1254 | /* Update the association's pmtu and frag_point by going through all the |
1221 | * transports. This routine is called when a transport's PMTU has changed. | 1255 | * transports. This routine is called when a transport's PMTU has changed. |
1222 | */ | 1256 | */ |
1223 | void sctp_assoc_sync_pmtu(struct sctp_association *asoc) | 1257 | void sctp_assoc_sync_pmtu(struct sctp_association *asoc) |
1224 | { | 1258 | { |
1225 | struct sctp_transport *t; | 1259 | struct sctp_transport *t; |
1226 | struct list_head *pos; | 1260 | struct list_head *pos; |
1227 | __u32 pmtu = 0; | 1261 | __u32 pmtu = 0; |
1228 | 1262 | ||
1229 | if (!asoc) | 1263 | if (!asoc) |
1230 | return; | 1264 | return; |
1231 | 1265 | ||
1232 | /* Get the lowest pmtu of all the transports. */ | 1266 | /* Get the lowest pmtu of all the transports. */ |
1233 | list_for_each(pos, &asoc->peer.transport_addr_list) { | 1267 | list_for_each(pos, &asoc->peer.transport_addr_list) { |
1234 | t = list_entry(pos, struct sctp_transport, transports); | 1268 | t = list_entry(pos, struct sctp_transport, transports); |
1235 | if (t->pmtu_pending && t->dst) { | 1269 | if (t->pmtu_pending && t->dst) { |
1236 | sctp_transport_update_pmtu(t, dst_mtu(t->dst)); | 1270 | sctp_transport_update_pmtu(t, dst_mtu(t->dst)); |
1237 | t->pmtu_pending = 0; | 1271 | t->pmtu_pending = 0; |
1238 | } | 1272 | } |
1239 | if (!pmtu || (t->pathmtu < pmtu)) | 1273 | if (!pmtu || (t->pathmtu < pmtu)) |
1240 | pmtu = t->pathmtu; | 1274 | pmtu = t->pathmtu; |
1241 | } | 1275 | } |
1242 | 1276 | ||
1243 | if (pmtu) { | 1277 | if (pmtu) { |
1244 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); | 1278 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); |
1245 | asoc->pathmtu = pmtu; | 1279 | asoc->pathmtu = pmtu; |
1246 | asoc->frag_point = sctp_frag_point(sp, pmtu); | 1280 | asoc->frag_point = sctp_frag_point(sp, pmtu); |
1247 | } | 1281 | } |
1248 | 1282 | ||
1249 | SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n", | 1283 | SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n", |
1250 | __FUNCTION__, asoc, asoc->pathmtu, asoc->frag_point); | 1284 | __FUNCTION__, asoc, asoc->pathmtu, asoc->frag_point); |
1251 | } | 1285 | } |
1252 | 1286 | ||
1253 | /* Should we send a SACK to update our peer? */ | 1287 | /* Should we send a SACK to update our peer? */ |
1254 | static inline int sctp_peer_needs_update(struct sctp_association *asoc) | 1288 | static inline int sctp_peer_needs_update(struct sctp_association *asoc) |
1255 | { | 1289 | { |
1256 | switch (asoc->state) { | 1290 | switch (asoc->state) { |
1257 | case SCTP_STATE_ESTABLISHED: | 1291 | case SCTP_STATE_ESTABLISHED: |
1258 | case SCTP_STATE_SHUTDOWN_PENDING: | 1292 | case SCTP_STATE_SHUTDOWN_PENDING: |
1259 | case SCTP_STATE_SHUTDOWN_RECEIVED: | 1293 | case SCTP_STATE_SHUTDOWN_RECEIVED: |
1260 | case SCTP_STATE_SHUTDOWN_SENT: | 1294 | case SCTP_STATE_SHUTDOWN_SENT: |
1261 | if ((asoc->rwnd > asoc->a_rwnd) && | 1295 | if ((asoc->rwnd > asoc->a_rwnd) && |
1262 | ((asoc->rwnd - asoc->a_rwnd) >= | 1296 | ((asoc->rwnd - asoc->a_rwnd) >= |
1263 | min_t(__u32, (asoc->base.sk->sk_rcvbuf >> 1), asoc->pathmtu))) | 1297 | min_t(__u32, (asoc->base.sk->sk_rcvbuf >> 1), asoc->pathmtu))) |
1264 | return 1; | 1298 | return 1; |
1265 | break; | 1299 | break; |
1266 | default: | 1300 | default: |
1267 | break; | 1301 | break; |
1268 | } | 1302 | } |
1269 | return 0; | 1303 | return 0; |
1270 | } | 1304 | } |
1271 | 1305 | ||
1272 | /* Increase asoc's rwnd by len and send any window update SACK if needed. */ | 1306 | /* Increase asoc's rwnd by len and send any window update SACK if needed. */ |
1273 | void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) | 1307 | void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) |
1274 | { | 1308 | { |
1275 | struct sctp_chunk *sack; | 1309 | struct sctp_chunk *sack; |
1276 | struct timer_list *timer; | 1310 | struct timer_list *timer; |
1277 | 1311 | ||
1278 | if (asoc->rwnd_over) { | 1312 | if (asoc->rwnd_over) { |
1279 | if (asoc->rwnd_over >= len) { | 1313 | if (asoc->rwnd_over >= len) { |
1280 | asoc->rwnd_over -= len; | 1314 | asoc->rwnd_over -= len; |
1281 | } else { | 1315 | } else { |
1282 | asoc->rwnd += (len - asoc->rwnd_over); | 1316 | asoc->rwnd += (len - asoc->rwnd_over); |
1283 | asoc->rwnd_over = 0; | 1317 | asoc->rwnd_over = 0; |
1284 | } | 1318 | } |
1285 | } else { | 1319 | } else { |
1286 | asoc->rwnd += len; | 1320 | asoc->rwnd += len; |
1287 | } | 1321 | } |
1288 | 1322 | ||
1289 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) " | 1323 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) " |
1290 | "- %u\n", __FUNCTION__, asoc, len, asoc->rwnd, | 1324 | "- %u\n", __FUNCTION__, asoc, len, asoc->rwnd, |
1291 | asoc->rwnd_over, asoc->a_rwnd); | 1325 | asoc->rwnd_over, asoc->a_rwnd); |
1292 | 1326 | ||
1293 | /* Send a window update SACK if the rwnd has increased by at least the | 1327 | /* Send a window update SACK if the rwnd has increased by at least the |
1294 | * minimum of the association's PMTU and half of the receive buffer. | 1328 | * minimum of the association's PMTU and half of the receive buffer. |
1295 | * The algorithm used is similar to the one described in | 1329 | * The algorithm used is similar to the one described in |
1296 | * Section 4.2.3.3 of RFC 1122. | 1330 | * Section 4.2.3.3 of RFC 1122. |
1297 | */ | 1331 | */ |
1298 | if (sctp_peer_needs_update(asoc)) { | 1332 | if (sctp_peer_needs_update(asoc)) { |
1299 | asoc->a_rwnd = asoc->rwnd; | 1333 | asoc->a_rwnd = asoc->rwnd; |
1300 | SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p " | 1334 | SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p " |
1301 | "rwnd: %u a_rwnd: %u\n", __FUNCTION__, | 1335 | "rwnd: %u a_rwnd: %u\n", __FUNCTION__, |
1302 | asoc, asoc->rwnd, asoc->a_rwnd); | 1336 | asoc, asoc->rwnd, asoc->a_rwnd); |
1303 | sack = sctp_make_sack(asoc); | 1337 | sack = sctp_make_sack(asoc); |
1304 | if (!sack) | 1338 | if (!sack) |
1305 | return; | 1339 | return; |
1306 | 1340 | ||
1307 | asoc->peer.sack_needed = 0; | 1341 | asoc->peer.sack_needed = 0; |
1308 | 1342 | ||
1309 | sctp_outq_tail(&asoc->outqueue, sack); | 1343 | sctp_outq_tail(&asoc->outqueue, sack); |
1310 | 1344 | ||
1311 | /* Stop the SACK timer. */ | 1345 | /* Stop the SACK timer. */ |
1312 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; | 1346 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; |
1313 | if (timer_pending(timer) && del_timer(timer)) | 1347 | if (timer_pending(timer) && del_timer(timer)) |
1314 | sctp_association_put(asoc); | 1348 | sctp_association_put(asoc); |
1315 | } | 1349 | } |
1316 | } | 1350 | } |
1317 | 1351 | ||
1318 | /* Decrease asoc's rwnd by len. */ | 1352 | /* Decrease asoc's rwnd by len. */ |
1319 | void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) | 1353 | void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) |
1320 | { | 1354 | { |
1321 | SCTP_ASSERT(asoc->rwnd, "rwnd zero", return); | 1355 | SCTP_ASSERT(asoc->rwnd, "rwnd zero", return); |
1322 | SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return); | 1356 | SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return); |
1323 | if (asoc->rwnd >= len) { | 1357 | if (asoc->rwnd >= len) { |
1324 | asoc->rwnd -= len; | 1358 | asoc->rwnd -= len; |
1325 | } else { | 1359 | } else { |
1326 | asoc->rwnd_over = len - asoc->rwnd; | 1360 | asoc->rwnd_over = len - asoc->rwnd; |
1327 | asoc->rwnd = 0; | 1361 | asoc->rwnd = 0; |
1328 | } | 1362 | } |
1329 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u)\n", | 1363 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u)\n", |
1330 | __FUNCTION__, asoc, len, asoc->rwnd, | 1364 | __FUNCTION__, asoc, len, asoc->rwnd, |
1331 | asoc->rwnd_over); | 1365 | asoc->rwnd_over); |
1332 | } | 1366 | } |
1333 | 1367 | ||
1334 | /* Build the bind address list for the association based on info from the | 1368 | /* Build the bind address list for the association based on info from the |
1335 | * local endpoint and the remote peer. | 1369 | * local endpoint and the remote peer. |
1336 | */ | 1370 | */ |
1337 | int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, | 1371 | int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, |
1338 | gfp_t gfp) | 1372 | gfp_t gfp) |
1339 | { | 1373 | { |
1340 | sctp_scope_t scope; | 1374 | sctp_scope_t scope; |
1341 | int flags; | 1375 | int flags; |
1342 | 1376 | ||
1343 | /* Use scoping rules to determine the subset of addresses from | 1377 | /* Use scoping rules to determine the subset of addresses from |
1344 | * the endpoint. | 1378 | * the endpoint. |
1345 | */ | 1379 | */ |
1346 | scope = sctp_scope(&asoc->peer.active_path->ipaddr); | 1380 | scope = sctp_scope(&asoc->peer.active_path->ipaddr); |
1347 | flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; | 1381 | flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; |
1348 | if (asoc->peer.ipv4_address) | 1382 | if (asoc->peer.ipv4_address) |
1349 | flags |= SCTP_ADDR4_PEERSUPP; | 1383 | flags |= SCTP_ADDR4_PEERSUPP; |
1350 | if (asoc->peer.ipv6_address) | 1384 | if (asoc->peer.ipv6_address) |
1351 | flags |= SCTP_ADDR6_PEERSUPP; | 1385 | flags |= SCTP_ADDR6_PEERSUPP; |
1352 | 1386 | ||
1353 | return sctp_bind_addr_copy(&asoc->base.bind_addr, | 1387 | return sctp_bind_addr_copy(&asoc->base.bind_addr, |
1354 | &asoc->ep->base.bind_addr, | 1388 | &asoc->ep->base.bind_addr, |
1355 | scope, gfp, flags); | 1389 | scope, gfp, flags); |
1356 | } | 1390 | } |
1357 | 1391 | ||
1358 | /* Build the association's bind address list from the cookie. */ | 1392 | /* Build the association's bind address list from the cookie. */ |
1359 | int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, | 1393 | int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, |
1360 | struct sctp_cookie *cookie, | 1394 | struct sctp_cookie *cookie, |
1361 | gfp_t gfp) | 1395 | gfp_t gfp) |
1362 | { | 1396 | { |
1363 | int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length); | 1397 | int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length); |
1364 | int var_size3 = cookie->raw_addr_list_len; | 1398 | int var_size3 = cookie->raw_addr_list_len; |
1365 | __u8 *raw = (__u8 *)cookie->peer_init + var_size2; | 1399 | __u8 *raw = (__u8 *)cookie->peer_init + var_size2; |
1366 | 1400 | ||
1367 | return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3, | 1401 | return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3, |
1368 | asoc->ep->base.bind_addr.port, gfp); | 1402 | asoc->ep->base.bind_addr.port, gfp); |
1369 | } | 1403 | } |
1370 | 1404 | ||
1371 | /* Lookup laddr in the bind address list of an association. */ | 1405 | /* Lookup laddr in the bind address list of an association. */ |
1372 | int sctp_assoc_lookup_laddr(struct sctp_association *asoc, | 1406 | int sctp_assoc_lookup_laddr(struct sctp_association *asoc, |
1373 | const union sctp_addr *laddr) | 1407 | const union sctp_addr *laddr) |
1374 | { | 1408 | { |
1375 | int found = 0; | 1409 | int found = 0; |
1376 | 1410 | ||
1377 | if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && | 1411 | if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && |
1378 | sctp_bind_addr_match(&asoc->base.bind_addr, laddr, | 1412 | sctp_bind_addr_match(&asoc->base.bind_addr, laddr, |
1379 | sctp_sk(asoc->base.sk))) | 1413 | sctp_sk(asoc->base.sk))) |
1380 | found = 1; | 1414 | found = 1; |
1381 | 1415 | ||
1382 | return found; | 1416 | return found; |
1383 | } | 1417 | } |
1384 | 1418 | ||
1385 | /* Set an association id for a given association */ | 1419 | /* Set an association id for a given association */ |
1386 | int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) | 1420 | int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) |
1387 | { | 1421 | { |
1388 | int assoc_id; | 1422 | int assoc_id; |
1389 | int error = 0; | 1423 | int error = 0; |
1390 | retry: | 1424 | retry: |
1391 | if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp))) | 1425 | if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp))) |
1392 | return -ENOMEM; | 1426 | return -ENOMEM; |
1393 | 1427 | ||
1394 | spin_lock_bh(&sctp_assocs_id_lock); | 1428 | spin_lock_bh(&sctp_assocs_id_lock); |
1395 | error = idr_get_new_above(&sctp_assocs_id, (void *)asoc, | 1429 | error = idr_get_new_above(&sctp_assocs_id, (void *)asoc, |
1396 | 1, &assoc_id); | 1430 | 1, &assoc_id); |
1397 | spin_unlock_bh(&sctp_assocs_id_lock); | 1431 | spin_unlock_bh(&sctp_assocs_id_lock); |
1398 | if (error == -EAGAIN) | 1432 | if (error == -EAGAIN) |
1399 | goto retry; | 1433 | goto retry; |
1400 | else if (error) | 1434 | else if (error) |
1401 | return error; | 1435 | return error; |
1402 | 1436 | ||
1403 | asoc->assoc_id = (sctp_assoc_t) assoc_id; | 1437 | asoc->assoc_id = (sctp_assoc_t) assoc_id; |
1404 | return error; | 1438 | return error; |
1405 | } | 1439 | } |
1406 | 1440 |
net/sctp/endpointola.c
1 | /* SCTP kernel reference Implementation | 1 | /* SCTP kernel reference Implementation |
2 | * Copyright (c) 1999-2000 Cisco, Inc. | 2 | * Copyright (c) 1999-2000 Cisco, Inc. |
3 | * Copyright (c) 1999-2001 Motorola, Inc. | 3 | * Copyright (c) 1999-2001 Motorola, Inc. |
4 | * Copyright (c) 2001-2002 International Business Machines, Corp. | 4 | * Copyright (c) 2001-2002 International Business Machines, Corp. |
5 | * Copyright (c) 2001 Intel Corp. | 5 | * Copyright (c) 2001 Intel Corp. |
6 | * Copyright (c) 2001 Nokia, Inc. | 6 | * Copyright (c) 2001 Nokia, Inc. |
7 | * Copyright (c) 2001 La Monte H.P. Yarroll | 7 | * Copyright (c) 2001 La Monte H.P. Yarroll |
8 | * | 8 | * |
9 | * This file is part of the SCTP kernel reference Implementation | 9 | * This file is part of the SCTP kernel reference Implementation |
10 | * | 10 | * |
11 | * This abstraction represents an SCTP endpoint. | 11 | * This abstraction represents an SCTP endpoint. |
12 | * | 12 | * |
13 | * This file is part of the implementation of the add-IP extension, | 13 | * This file is part of the implementation of the add-IP extension, |
14 | * based on <draft-ietf-tsvwg-addip-sctp-02.txt> June 29, 2001, | 14 | * based on <draft-ietf-tsvwg-addip-sctp-02.txt> June 29, 2001, |
15 | * for the SCTP kernel reference Implementation. | 15 | * for the SCTP kernel reference Implementation. |
16 | * | 16 | * |
17 | * The SCTP reference implementation is free software; | 17 | * The SCTP reference implementation is free software; |
18 | * you can redistribute it and/or modify it under the terms of | 18 | * you can redistribute it and/or modify it under the terms of |
19 | * the GNU General Public License as published by | 19 | * the GNU General Public License as published by |
20 | * the Free Software Foundation; either version 2, or (at your option) | 20 | * the Free Software Foundation; either version 2, or (at your option) |
21 | * any later version. | 21 | * any later version. |
22 | * | 22 | * |
23 | * The SCTP reference implementation is distributed in the hope that it | 23 | * The SCTP reference implementation is distributed in the hope that it |
24 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | 24 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied |
25 | * ************************ | 25 | * ************************ |
26 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | 26 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
27 | * See the GNU General Public License for more details. | 27 | * See the GNU General Public License for more details. |
28 | * | 28 | * |
29 | * You should have received a copy of the GNU General Public License | 29 | * You should have received a copy of the GNU General Public License |
30 | * along with GNU CC; see the file COPYING. If not, write to | 30 | * along with GNU CC; see the file COPYING. If not, write to |
31 | * the Free Software Foundation, 59 Temple Place - Suite 330, | 31 | * the Free Software Foundation, 59 Temple Place - Suite 330, |
32 | * Boston, MA 02111-1307, USA. | 32 | * Boston, MA 02111-1307, USA. |
33 | * | 33 | * |
34 | * Please send any bug reports or fixes you make to the | 34 | * Please send any bug reports or fixes you make to the |
35 | * email address(es): | 35 | * email address(es): |
36 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | 36 | * lksctp developers <lksctp-developers@lists.sourceforge.net> |
37 | * | 37 | * |
38 | * Or submit a bug report through the following website: | 38 | * Or submit a bug report through the following website: |
39 | * http://www.sf.net/projects/lksctp | 39 | * http://www.sf.net/projects/lksctp |
40 | * | 40 | * |
41 | * Written or modified by: | 41 | * Written or modified by: |
42 | * La Monte H.P. Yarroll <piggy@acm.org> | 42 | * La Monte H.P. Yarroll <piggy@acm.org> |
43 | * Karl Knutson <karl@athena.chicago.il.us> | 43 | * Karl Knutson <karl@athena.chicago.il.us> |
44 | * Jon Grimm <jgrimm@austin.ibm.com> | 44 | * Jon Grimm <jgrimm@austin.ibm.com> |
45 | * Daisy Chang <daisyc@us.ibm.com> | 45 | * Daisy Chang <daisyc@us.ibm.com> |
46 | * Dajiang Zhang <dajiang.zhang@nokia.com> | 46 | * Dajiang Zhang <dajiang.zhang@nokia.com> |
47 | * | 47 | * |
48 | * Any bugs reported given to us we will try to fix... any fixes shared will | 48 | * Any bugs reported given to us we will try to fix... any fixes shared will |
49 | * be incorporated into the next SCTP release. | 49 | * be incorporated into the next SCTP release. |
50 | */ | 50 | */ |
51 | 51 | ||
52 | #include <linux/types.h> | 52 | #include <linux/types.h> |
53 | #include <linux/slab.h> | 53 | #include <linux/slab.h> |
54 | #include <linux/in.h> | 54 | #include <linux/in.h> |
55 | #include <linux/random.h> /* get_random_bytes() */ | 55 | #include <linux/random.h> /* get_random_bytes() */ |
56 | #include <linux/crypto.h> | 56 | #include <linux/crypto.h> |
57 | #include <net/sock.h> | 57 | #include <net/sock.h> |
58 | #include <net/ipv6.h> | 58 | #include <net/ipv6.h> |
59 | #include <net/sctp/sctp.h> | 59 | #include <net/sctp/sctp.h> |
60 | #include <net/sctp/sm.h> | 60 | #include <net/sctp/sm.h> |
61 | 61 | ||
62 | /* Forward declarations for internal helpers. */ | 62 | /* Forward declarations for internal helpers. */ |
63 | static void sctp_endpoint_bh_rcv(struct work_struct *work); | 63 | static void sctp_endpoint_bh_rcv(struct work_struct *work); |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * Initialize the base fields of the endpoint structure. | 66 | * Initialize the base fields of the endpoint structure. |
67 | */ | 67 | */ |
68 | static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | 68 | static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, |
69 | struct sock *sk, | 69 | struct sock *sk, |
70 | gfp_t gfp) | 70 | gfp_t gfp) |
71 | { | 71 | { |
72 | struct sctp_hmac_algo_param *auth_hmacs = NULL; | ||
73 | struct sctp_chunks_param *auth_chunks = NULL; | ||
74 | struct sctp_shared_key *null_key; | ||
75 | int err; | ||
76 | |||
72 | memset(ep, 0, sizeof(struct sctp_endpoint)); | 77 | memset(ep, 0, sizeof(struct sctp_endpoint)); |
73 | 78 | ||
74 | ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp); | 79 | ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp); |
75 | if (!ep->digest) | 80 | if (!ep->digest) |
76 | return NULL; | 81 | return NULL; |
77 | 82 | ||
83 | if (sctp_auth_enable) { | ||
84 | /* Allocate space for HMACS and CHUNKS authentication | ||
85 | * variables. There are arrays that we encode directly | ||
86 | * into parameters to make the rest of the operations easier. | ||
87 | */ | ||
88 | auth_hmacs = kzalloc(sizeof(sctp_hmac_algo_param_t) + | ||
89 | sizeof(__u16) * SCTP_AUTH_NUM_HMACS, gfp); | ||
90 | if (!auth_hmacs) | ||
91 | goto nomem; | ||
92 | |||
93 | auth_chunks = kzalloc(sizeof(sctp_chunks_param_t) + | ||
94 | SCTP_NUM_CHUNK_TYPES, gfp); | ||
95 | if (!auth_chunks) | ||
96 | goto nomem; | ||
97 | |||
98 | /* Initialize the HMACS parameter. | ||
99 | * SCTP-AUTH: Section 3.3 | ||
100 | * Every endpoint supporting SCTP chunk authentication MUST | ||
101 | * support the HMAC based on the SHA-1 algorithm. | ||
102 | */ | ||
103 | auth_hmacs->param_hdr.type = SCTP_PARAM_HMAC_ALGO; | ||
104 | auth_hmacs->param_hdr.length = | ||
105 | htons(sizeof(sctp_paramhdr_t) + 2); | ||
106 | auth_hmacs->hmac_ids[0] = htons(SCTP_AUTH_HMAC_ID_SHA1); | ||
107 | |||
108 | /* Initialize the CHUNKS parameter */ | ||
109 | auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS; | ||
110 | |||
111 | /* If the Add-IP functionality is enabled, we must | ||
112 | * authenticate, ASCONF and ASCONF-ACK chunks | ||
113 | */ | ||
114 | if (sctp_addip_enable) { | ||
115 | auth_chunks->chunks[0] = SCTP_CID_ASCONF; | ||
116 | auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; | ||
117 | auth_chunks->param_hdr.length = | ||
118 | htons(sizeof(sctp_paramhdr_t) + 2); | ||
119 | } | ||
120 | } | ||
121 | |||
78 | /* Initialize the base structure. */ | 122 | /* Initialize the base structure. */ |
79 | /* What type of endpoint are we? */ | 123 | /* What type of endpoint are we? */ |
80 | ep->base.type = SCTP_EP_TYPE_SOCKET; | 124 | ep->base.type = SCTP_EP_TYPE_SOCKET; |
81 | 125 | ||
82 | /* Initialize the basic object fields. */ | 126 | /* Initialize the basic object fields. */ |
83 | atomic_set(&ep->base.refcnt, 1); | 127 | atomic_set(&ep->base.refcnt, 1); |
84 | ep->base.dead = 0; | 128 | ep->base.dead = 0; |
85 | ep->base.malloced = 1; | 129 | ep->base.malloced = 1; |
86 | 130 | ||
87 | /* Create an input queue. */ | 131 | /* Create an input queue. */ |
88 | sctp_inq_init(&ep->base.inqueue); | 132 | sctp_inq_init(&ep->base.inqueue); |
89 | 133 | ||
90 | /* Set its top-half handler */ | 134 | /* Set its top-half handler */ |
91 | sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); | 135 | sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); |
92 | 136 | ||
93 | /* Initialize the bind addr area */ | 137 | /* Initialize the bind addr area */ |
94 | sctp_bind_addr_init(&ep->base.bind_addr, 0); | 138 | sctp_bind_addr_init(&ep->base.bind_addr, 0); |
95 | 139 | ||
96 | /* Remember who we are attached to. */ | 140 | /* Remember who we are attached to. */ |
97 | ep->base.sk = sk; | 141 | ep->base.sk = sk; |
98 | sock_hold(ep->base.sk); | 142 | sock_hold(ep->base.sk); |
99 | 143 | ||
100 | /* Create the lists of associations. */ | 144 | /* Create the lists of associations. */ |
101 | INIT_LIST_HEAD(&ep->asocs); | 145 | INIT_LIST_HEAD(&ep->asocs); |
102 | 146 | ||
103 | /* Use SCTP specific send buffer space queues. */ | 147 | /* Use SCTP specific send buffer space queues. */ |
104 | ep->sndbuf_policy = sctp_sndbuf_policy; | 148 | ep->sndbuf_policy = sctp_sndbuf_policy; |
105 | 149 | ||
106 | sk->sk_write_space = sctp_write_space; | 150 | sk->sk_write_space = sctp_write_space; |
107 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | 151 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); |
108 | 152 | ||
109 | /* Get the receive buffer policy for this endpoint */ | 153 | /* Get the receive buffer policy for this endpoint */ |
110 | ep->rcvbuf_policy = sctp_rcvbuf_policy; | 154 | ep->rcvbuf_policy = sctp_rcvbuf_policy; |
111 | 155 | ||
112 | /* Initialize the secret key used with cookie. */ | 156 | /* Initialize the secret key used with cookie. */ |
113 | get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE); | 157 | get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE); |
114 | ep->last_key = ep->current_key = 0; | 158 | ep->last_key = ep->current_key = 0; |
115 | ep->key_changed_at = jiffies; | 159 | ep->key_changed_at = jiffies; |
116 | 160 | ||
161 | /* SCTP-AUTH extensions*/ | ||
162 | INIT_LIST_HEAD(&ep->endpoint_shared_keys); | ||
163 | null_key = sctp_auth_shkey_create(0, GFP_KERNEL); | ||
164 | if (!null_key) | ||
165 | goto nomem; | ||
166 | |||
167 | list_add(&null_key->key_list, &ep->endpoint_shared_keys); | ||
168 | |||
169 | /* Allocate and initialize transorms arrays for suported HMACs. */ | ||
170 | err = sctp_auth_init_hmacs(ep, gfp); | ||
171 | if (err) | ||
172 | goto nomem_hmacs; | ||
173 | |||
174 | /* Add the null key to the endpoint shared keys list and | ||
175 | * set the hmcas and chunks pointers. | ||
176 | */ | ||
177 | ep->auth_hmacs_list = auth_hmacs; | ||
178 | ep->auth_chunk_list = auth_chunks; | ||
179 | |||
117 | return ep; | 180 | return ep; |
181 | |||
182 | nomem_hmacs: | ||
183 | sctp_auth_destroy_keys(&ep->endpoint_shared_keys); | ||
184 | nomem: | ||
185 | /* Free all allocations */ | ||
186 | kfree(auth_hmacs); | ||
187 | kfree(auth_chunks); | ||
188 | kfree(ep->digest); | ||
189 | return NULL; | ||
190 | |||
118 | } | 191 | } |
119 | 192 | ||
120 | /* Create a sctp_endpoint with all that boring stuff initialized. | 193 | /* Create a sctp_endpoint with all that boring stuff initialized. |
121 | * Returns NULL if there isn't enough memory. | 194 | * Returns NULL if there isn't enough memory. |
122 | */ | 195 | */ |
123 | struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp) | 196 | struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp) |
124 | { | 197 | { |
125 | struct sctp_endpoint *ep; | 198 | struct sctp_endpoint *ep; |
126 | 199 | ||
127 | /* Build a local endpoint. */ | 200 | /* Build a local endpoint. */ |
128 | ep = t_new(struct sctp_endpoint, gfp); | 201 | ep = t_new(struct sctp_endpoint, gfp); |
129 | if (!ep) | 202 | if (!ep) |
130 | goto fail; | 203 | goto fail; |
131 | if (!sctp_endpoint_init(ep, sk, gfp)) | 204 | if (!sctp_endpoint_init(ep, sk, gfp)) |
132 | goto fail_init; | 205 | goto fail_init; |
133 | ep->base.malloced = 1; | 206 | ep->base.malloced = 1; |
134 | SCTP_DBG_OBJCNT_INC(ep); | 207 | SCTP_DBG_OBJCNT_INC(ep); |
135 | return ep; | 208 | return ep; |
136 | 209 | ||
137 | fail_init: | 210 | fail_init: |
138 | kfree(ep); | 211 | kfree(ep); |
139 | fail: | 212 | fail: |
140 | return NULL; | 213 | return NULL; |
141 | } | 214 | } |
142 | 215 | ||
143 | /* Add an association to an endpoint. */ | 216 | /* Add an association to an endpoint. */ |
144 | void sctp_endpoint_add_asoc(struct sctp_endpoint *ep, | 217 | void sctp_endpoint_add_asoc(struct sctp_endpoint *ep, |
145 | struct sctp_association *asoc) | 218 | struct sctp_association *asoc) |
146 | { | 219 | { |
147 | struct sock *sk = ep->base.sk; | 220 | struct sock *sk = ep->base.sk; |
148 | 221 | ||
149 | /* If this is a temporary association, don't bother | 222 | /* If this is a temporary association, don't bother |
150 | * since we'll be removing it shortly and don't | 223 | * since we'll be removing it shortly and don't |
151 | * want anyone to find it anyway. | 224 | * want anyone to find it anyway. |
152 | */ | 225 | */ |
153 | if (asoc->temp) | 226 | if (asoc->temp) |
154 | return; | 227 | return; |
155 | 228 | ||
156 | /* Now just add it to our list of asocs */ | 229 | /* Now just add it to our list of asocs */ |
157 | list_add_tail(&asoc->asocs, &ep->asocs); | 230 | list_add_tail(&asoc->asocs, &ep->asocs); |
158 | 231 | ||
159 | /* Increment the backlog value for a TCP-style listening socket. */ | 232 | /* Increment the backlog value for a TCP-style listening socket. */ |
160 | if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) | 233 | if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) |
161 | sk->sk_ack_backlog++; | 234 | sk->sk_ack_backlog++; |
162 | } | 235 | } |
163 | 236 | ||
164 | /* Free the endpoint structure. Delay cleanup until | 237 | /* Free the endpoint structure. Delay cleanup until |
165 | * all users have released their reference count on this structure. | 238 | * all users have released their reference count on this structure. |
166 | */ | 239 | */ |
167 | void sctp_endpoint_free(struct sctp_endpoint *ep) | 240 | void sctp_endpoint_free(struct sctp_endpoint *ep) |
168 | { | 241 | { |
169 | ep->base.dead = 1; | 242 | ep->base.dead = 1; |
170 | 243 | ||
171 | ep->base.sk->sk_state = SCTP_SS_CLOSED; | 244 | ep->base.sk->sk_state = SCTP_SS_CLOSED; |
172 | 245 | ||
173 | /* Unlink this endpoint, so we can't find it again! */ | 246 | /* Unlink this endpoint, so we can't find it again! */ |
174 | sctp_unhash_endpoint(ep); | 247 | sctp_unhash_endpoint(ep); |
175 | 248 | ||
176 | sctp_endpoint_put(ep); | 249 | sctp_endpoint_put(ep); |
177 | } | 250 | } |
178 | 251 | ||
179 | /* Final destructor for endpoint. */ | 252 | /* Final destructor for endpoint. */ |
180 | static void sctp_endpoint_destroy(struct sctp_endpoint *ep) | 253 | static void sctp_endpoint_destroy(struct sctp_endpoint *ep) |
181 | { | 254 | { |
182 | SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); | 255 | SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); |
183 | 256 | ||
184 | /* Free up the HMAC transform. */ | 257 | /* Free up the HMAC transform. */ |
185 | crypto_free_hash(sctp_sk(ep->base.sk)->hmac); | 258 | crypto_free_hash(sctp_sk(ep->base.sk)->hmac); |
186 | 259 | ||
187 | /* Free the digest buffer */ | 260 | /* Free the digest buffer */ |
188 | kfree(ep->digest); | 261 | kfree(ep->digest); |
262 | |||
263 | /* SCTP-AUTH: Free up AUTH releated data such as shared keys | ||
264 | * chunks and hmacs arrays that were allocated | ||
265 | */ | ||
266 | sctp_auth_destroy_keys(&ep->endpoint_shared_keys); | ||
267 | kfree(ep->auth_hmacs_list); | ||
268 | kfree(ep->auth_chunk_list); | ||
269 | |||
270 | /* AUTH - Free any allocated HMAC transform containers */ | ||
271 | sctp_auth_destroy_hmacs(ep->auth_hmacs); | ||
189 | 272 | ||
190 | /* Cleanup. */ | 273 | /* Cleanup. */ |
191 | sctp_inq_free(&ep->base.inqueue); | 274 | sctp_inq_free(&ep->base.inqueue); |
192 | sctp_bind_addr_free(&ep->base.bind_addr); | 275 | sctp_bind_addr_free(&ep->base.bind_addr); |
193 | 276 | ||
194 | /* Remove and free the port */ | 277 | /* Remove and free the port */ |
195 | if (sctp_sk(ep->base.sk)->bind_hash) | 278 | if (sctp_sk(ep->base.sk)->bind_hash) |
196 | sctp_put_port(ep->base.sk); | 279 | sctp_put_port(ep->base.sk); |
197 | 280 | ||
198 | /* Give up our hold on the sock. */ | 281 | /* Give up our hold on the sock. */ |
199 | if (ep->base.sk) | 282 | if (ep->base.sk) |
200 | sock_put(ep->base.sk); | 283 | sock_put(ep->base.sk); |
201 | 284 | ||
202 | /* Finally, free up our memory. */ | 285 | /* Finally, free up our memory. */ |
203 | if (ep->base.malloced) { | 286 | if (ep->base.malloced) { |
204 | kfree(ep); | 287 | kfree(ep); |
205 | SCTP_DBG_OBJCNT_DEC(ep); | 288 | SCTP_DBG_OBJCNT_DEC(ep); |
206 | } | 289 | } |
207 | } | 290 | } |
208 | 291 | ||
209 | /* Hold a reference to an endpoint. */ | 292 | /* Hold a reference to an endpoint. */ |
210 | void sctp_endpoint_hold(struct sctp_endpoint *ep) | 293 | void sctp_endpoint_hold(struct sctp_endpoint *ep) |
211 | { | 294 | { |
212 | atomic_inc(&ep->base.refcnt); | 295 | atomic_inc(&ep->base.refcnt); |
213 | } | 296 | } |
214 | 297 | ||
215 | /* Release a reference to an endpoint and clean up if there are | 298 | /* Release a reference to an endpoint and clean up if there are |
216 | * no more references. | 299 | * no more references. |
217 | */ | 300 | */ |
218 | void sctp_endpoint_put(struct sctp_endpoint *ep) | 301 | void sctp_endpoint_put(struct sctp_endpoint *ep) |
219 | { | 302 | { |
220 | if (atomic_dec_and_test(&ep->base.refcnt)) | 303 | if (atomic_dec_and_test(&ep->base.refcnt)) |
221 | sctp_endpoint_destroy(ep); | 304 | sctp_endpoint_destroy(ep); |
222 | } | 305 | } |
223 | 306 | ||
224 | /* Is this the endpoint we are looking for? */ | 307 | /* Is this the endpoint we are looking for? */ |
225 | struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep, | 308 | struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep, |
226 | const union sctp_addr *laddr) | 309 | const union sctp_addr *laddr) |
227 | { | 310 | { |
228 | struct sctp_endpoint *retval = NULL; | 311 | struct sctp_endpoint *retval = NULL; |
229 | 312 | ||
230 | if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) { | 313 | if (htons(ep->base.bind_addr.port) == laddr->v4.sin_port) { |
231 | if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, | 314 | if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, |
232 | sctp_sk(ep->base.sk))) | 315 | sctp_sk(ep->base.sk))) |
233 | retval = ep; | 316 | retval = ep; |
234 | } | 317 | } |
235 | 318 | ||
236 | return retval; | 319 | return retval; |
237 | } | 320 | } |
238 | 321 | ||
239 | /* Find the association that goes with this chunk. | 322 | /* Find the association that goes with this chunk. |
240 | * We do a linear search of the associations for this endpoint. | 323 | * We do a linear search of the associations for this endpoint. |
241 | * We return the matching transport address too. | 324 | * We return the matching transport address too. |
242 | */ | 325 | */ |
243 | static struct sctp_association *__sctp_endpoint_lookup_assoc( | 326 | static struct sctp_association *__sctp_endpoint_lookup_assoc( |
244 | const struct sctp_endpoint *ep, | 327 | const struct sctp_endpoint *ep, |
245 | const union sctp_addr *paddr, | 328 | const union sctp_addr *paddr, |
246 | struct sctp_transport **transport) | 329 | struct sctp_transport **transport) |
247 | { | 330 | { |
248 | int rport; | 331 | int rport; |
249 | struct sctp_association *asoc; | 332 | struct sctp_association *asoc; |
250 | struct list_head *pos; | 333 | struct list_head *pos; |
251 | 334 | ||
252 | rport = ntohs(paddr->v4.sin_port); | 335 | rport = ntohs(paddr->v4.sin_port); |
253 | 336 | ||
254 | list_for_each(pos, &ep->asocs) { | 337 | list_for_each(pos, &ep->asocs) { |
255 | asoc = list_entry(pos, struct sctp_association, asocs); | 338 | asoc = list_entry(pos, struct sctp_association, asocs); |
256 | if (rport == asoc->peer.port) { | 339 | if (rport == asoc->peer.port) { |
257 | *transport = sctp_assoc_lookup_paddr(asoc, paddr); | 340 | *transport = sctp_assoc_lookup_paddr(asoc, paddr); |
258 | 341 | ||
259 | if (*transport) | 342 | if (*transport) |
260 | return asoc; | 343 | return asoc; |
261 | } | 344 | } |
262 | } | 345 | } |
263 | 346 | ||
264 | *transport = NULL; | 347 | *transport = NULL; |
265 | return NULL; | 348 | return NULL; |
266 | } | 349 | } |
267 | 350 | ||
268 | /* Lookup association on an endpoint based on a peer address. BH-safe. */ | 351 | /* Lookup association on an endpoint based on a peer address. BH-safe. */ |
269 | struct sctp_association *sctp_endpoint_lookup_assoc( | 352 | struct sctp_association *sctp_endpoint_lookup_assoc( |
270 | const struct sctp_endpoint *ep, | 353 | const struct sctp_endpoint *ep, |
271 | const union sctp_addr *paddr, | 354 | const union sctp_addr *paddr, |
272 | struct sctp_transport **transport) | 355 | struct sctp_transport **transport) |
273 | { | 356 | { |
274 | struct sctp_association *asoc; | 357 | struct sctp_association *asoc; |
275 | 358 | ||
276 | sctp_local_bh_disable(); | 359 | sctp_local_bh_disable(); |
277 | asoc = __sctp_endpoint_lookup_assoc(ep, paddr, transport); | 360 | asoc = __sctp_endpoint_lookup_assoc(ep, paddr, transport); |
278 | sctp_local_bh_enable(); | 361 | sctp_local_bh_enable(); |
279 | 362 | ||
280 | return asoc; | 363 | return asoc; |
281 | } | 364 | } |
282 | 365 | ||
283 | /* Look for any peeled off association from the endpoint that matches the | 366 | /* Look for any peeled off association from the endpoint that matches the |
284 | * given peer address. | 367 | * given peer address. |
285 | */ | 368 | */ |
286 | int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, | 369 | int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, |
287 | const union sctp_addr *paddr) | 370 | const union sctp_addr *paddr) |
288 | { | 371 | { |
289 | struct sctp_sockaddr_entry *addr; | 372 | struct sctp_sockaddr_entry *addr; |
290 | struct sctp_bind_addr *bp; | 373 | struct sctp_bind_addr *bp; |
291 | 374 | ||
292 | bp = &ep->base.bind_addr; | 375 | bp = &ep->base.bind_addr; |
293 | /* This function is called with the socket lock held, | 376 | /* This function is called with the socket lock held, |
294 | * so the address_list can not change. | 377 | * so the address_list can not change. |
295 | */ | 378 | */ |
296 | list_for_each_entry(addr, &bp->address_list, list) { | 379 | list_for_each_entry(addr, &bp->address_list, list) { |
297 | if (sctp_has_association(&addr->a, paddr)) | 380 | if (sctp_has_association(&addr->a, paddr)) |
298 | return 1; | 381 | return 1; |
299 | } | 382 | } |
300 | 383 | ||
301 | return 0; | 384 | return 0; |
302 | } | 385 | } |
303 | 386 | ||
304 | /* Do delayed input processing. This is scheduled by sctp_rcv(). | 387 | /* Do delayed input processing. This is scheduled by sctp_rcv(). |
305 | * This may be called on BH or task time. | 388 | * This may be called on BH or task time. |
306 | */ | 389 | */ |
307 | static void sctp_endpoint_bh_rcv(struct work_struct *work) | 390 | static void sctp_endpoint_bh_rcv(struct work_struct *work) |
308 | { | 391 | { |
309 | struct sctp_endpoint *ep = | 392 | struct sctp_endpoint *ep = |
310 | container_of(work, struct sctp_endpoint, | 393 | container_of(work, struct sctp_endpoint, |
311 | base.inqueue.immediate); | 394 | base.inqueue.immediate); |
312 | struct sctp_association *asoc; | 395 | struct sctp_association *asoc; |
313 | struct sock *sk; | 396 | struct sock *sk; |
314 | struct sctp_transport *transport; | 397 | struct sctp_transport *transport; |
315 | struct sctp_chunk *chunk; | 398 | struct sctp_chunk *chunk; |
316 | struct sctp_inq *inqueue; | 399 | struct sctp_inq *inqueue; |
317 | sctp_subtype_t subtype; | 400 | sctp_subtype_t subtype; |
318 | sctp_state_t state; | 401 | sctp_state_t state; |
319 | int error = 0; | 402 | int error = 0; |
320 | 403 | ||
321 | if (ep->base.dead) | 404 | if (ep->base.dead) |
322 | return; | 405 | return; |
323 | 406 | ||
324 | asoc = NULL; | 407 | asoc = NULL; |
325 | inqueue = &ep->base.inqueue; | 408 | inqueue = &ep->base.inqueue; |
326 | sk = ep->base.sk; | 409 | sk = ep->base.sk; |
327 | 410 | ||
328 | while (NULL != (chunk = sctp_inq_pop(inqueue))) { | 411 | while (NULL != (chunk = sctp_inq_pop(inqueue))) { |
329 | subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); | 412 | subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); |
330 | 413 | ||
331 | /* We might have grown an association since last we | 414 | /* We might have grown an association since last we |
332 | * looked, so try again. | 415 | * looked, so try again. |
333 | * | 416 | * |
334 | * This happens when we've just processed our | 417 | * This happens when we've just processed our |
335 | * COOKIE-ECHO chunk. | 418 | * COOKIE-ECHO chunk. |
336 | */ | 419 | */ |
337 | if (NULL == chunk->asoc) { | 420 | if (NULL == chunk->asoc) { |
338 | asoc = sctp_endpoint_lookup_assoc(ep, | 421 | asoc = sctp_endpoint_lookup_assoc(ep, |
339 | sctp_source(chunk), | 422 | sctp_source(chunk), |
340 | &transport); | 423 | &transport); |
341 | chunk->asoc = asoc; | 424 | chunk->asoc = asoc; |
342 | chunk->transport = transport; | 425 | chunk->transport = transport; |
343 | } | 426 | } |
344 | 427 | ||
345 | state = asoc ? asoc->state : SCTP_STATE_CLOSED; | 428 | state = asoc ? asoc->state : SCTP_STATE_CLOSED; |
346 | 429 | ||
347 | /* Remember where the last DATA chunk came from so we | 430 | /* Remember where the last DATA chunk came from so we |
348 | * know where to send the SACK. | 431 | * know where to send the SACK. |
349 | */ | 432 | */ |
350 | if (asoc && sctp_chunk_is_data(chunk)) | 433 | if (asoc && sctp_chunk_is_data(chunk)) |
351 | asoc->peer.last_data_from = chunk->transport; | 434 | asoc->peer.last_data_from = chunk->transport; |
352 | else | 435 | else |
353 | SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS); | 436 | SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS); |
354 | 437 | ||
355 | if (chunk->transport) | 438 | if (chunk->transport) |
356 | chunk->transport->last_time_heard = jiffies; | 439 | chunk->transport->last_time_heard = jiffies; |
357 | 440 | ||
358 | error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, state, | 441 | error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, state, |
359 | ep, asoc, chunk, GFP_ATOMIC); | 442 | ep, asoc, chunk, GFP_ATOMIC); |
360 | 443 | ||
361 | if (error && chunk) | 444 | if (error && chunk) |
362 | chunk->pdiscard = 1; | 445 | chunk->pdiscard = 1; |
363 | 446 | ||
364 | /* Check to see if the endpoint is freed in response to | 447 | /* Check to see if the endpoint is freed in response to |
365 | * the incoming chunk. If so, get out of the while loop. | 448 | * the incoming chunk. If so, get out of the while loop. |
366 | */ | 449 | */ |
367 | if (!sctp_sk(sk)->ep) | 450 | if (!sctp_sk(sk)->ep) |
368 | break; | 451 | break; |
369 | } | 452 | } |
370 | } | 453 | } |
371 | 454 |
net/sctp/output.c
1 | /* SCTP kernel reference Implementation | 1 | /* SCTP kernel reference Implementation |
2 | * (C) Copyright IBM Corp. 2001, 2004 | 2 | * (C) Copyright IBM Corp. 2001, 2004 |
3 | * Copyright (c) 1999-2000 Cisco, Inc. | 3 | * Copyright (c) 1999-2000 Cisco, Inc. |
4 | * Copyright (c) 1999-2001 Motorola, Inc. | 4 | * Copyright (c) 1999-2001 Motorola, Inc. |
5 | * | 5 | * |
6 | * This file is part of the SCTP kernel reference Implementation | 6 | * This file is part of the SCTP kernel reference Implementation |
7 | * | 7 | * |
8 | * These functions handle output processing. | 8 | * These functions handle output processing. |
9 | * | 9 | * |
10 | * The SCTP reference implementation is free software; | 10 | * The SCTP reference implementation is free software; |
11 | * you can redistribute it and/or modify it under the terms of | 11 | * you can redistribute it and/or modify it under the terms of |
12 | * the GNU General Public License as published by | 12 | * the GNU General Public License as published by |
13 | * the Free Software Foundation; either version 2, or (at your option) | 13 | * the Free Software Foundation; either version 2, or (at your option) |
14 | * any later version. | 14 | * any later version. |
15 | * | 15 | * |
16 | * The SCTP reference implementation is distributed in the hope that it | 16 | * The SCTP reference implementation is distributed in the hope that it |
17 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | 17 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied |
18 | * ************************ | 18 | * ************************ |
19 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | 19 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
20 | * See the GNU General Public License for more details. | 20 | * See the GNU General Public License for more details. |
21 | * | 21 | * |
22 | * You should have received a copy of the GNU General Public License | 22 | * You should have received a copy of the GNU General Public License |
23 | * along with GNU CC; see the file COPYING. If not, write to | 23 | * along with GNU CC; see the file COPYING. If not, write to |
24 | * the Free Software Foundation, 59 Temple Place - Suite 330, | 24 | * the Free Software Foundation, 59 Temple Place - Suite 330, |
25 | * Boston, MA 02111-1307, USA. | 25 | * Boston, MA 02111-1307, USA. |
26 | * | 26 | * |
27 | * Please send any bug reports or fixes you make to the | 27 | * Please send any bug reports or fixes you make to the |
28 | * email address(es): | 28 | * email address(es): |
29 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | 29 | * lksctp developers <lksctp-developers@lists.sourceforge.net> |
30 | * | 30 | * |
31 | * Or submit a bug report through the following website: | 31 | * Or submit a bug report through the following website: |
32 | * http://www.sf.net/projects/lksctp | 32 | * http://www.sf.net/projects/lksctp |
33 | * | 33 | * |
34 | * Written or modified by: | 34 | * Written or modified by: |
35 | * La Monte H.P. Yarroll <piggy@acm.org> | 35 | * La Monte H.P. Yarroll <piggy@acm.org> |
36 | * Karl Knutson <karl@athena.chicago.il.us> | 36 | * Karl Knutson <karl@athena.chicago.il.us> |
37 | * Jon Grimm <jgrimm@austin.ibm.com> | 37 | * Jon Grimm <jgrimm@austin.ibm.com> |
38 | * Sridhar Samudrala <sri@us.ibm.com> | 38 | * Sridhar Samudrala <sri@us.ibm.com> |
39 | * | 39 | * |
40 | * Any bugs reported given to us we will try to fix... any fixes shared will | 40 | * Any bugs reported given to us we will try to fix... any fixes shared will |
41 | * be incorporated into the next SCTP release. | 41 | * be incorporated into the next SCTP release. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <linux/types.h> | 44 | #include <linux/types.h> |
45 | #include <linux/kernel.h> | 45 | #include <linux/kernel.h> |
46 | #include <linux/wait.h> | 46 | #include <linux/wait.h> |
47 | #include <linux/time.h> | 47 | #include <linux/time.h> |
48 | #include <linux/ip.h> | 48 | #include <linux/ip.h> |
49 | #include <linux/ipv6.h> | 49 | #include <linux/ipv6.h> |
50 | #include <linux/init.h> | 50 | #include <linux/init.h> |
51 | #include <net/inet_ecn.h> | 51 | #include <net/inet_ecn.h> |
52 | #include <net/icmp.h> | 52 | #include <net/icmp.h> |
53 | 53 | ||
54 | #ifndef TEST_FRAME | 54 | #ifndef TEST_FRAME |
55 | #include <net/tcp.h> | 55 | #include <net/tcp.h> |
56 | #endif /* TEST_FRAME (not defined) */ | 56 | #endif /* TEST_FRAME (not defined) */ |
57 | 57 | ||
58 | #include <linux/socket.h> /* for sa_family_t */ | 58 | #include <linux/socket.h> /* for sa_family_t */ |
59 | #include <net/sock.h> | 59 | #include <net/sock.h> |
60 | 60 | ||
61 | #include <net/sctp/sctp.h> | 61 | #include <net/sctp/sctp.h> |
62 | #include <net/sctp/sm.h> | 62 | #include <net/sctp/sm.h> |
63 | 63 | ||
64 | /* Forward declarations for private helpers. */ | 64 | /* Forward declarations for private helpers. */ |
65 | static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | 65 | static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, |
66 | struct sctp_chunk *chunk); | 66 | struct sctp_chunk *chunk); |
67 | 67 | ||
68 | /* Config a packet. | 68 | /* Config a packet. |
69 | * This appears to be a followup set of initializations. | 69 | * This appears to be a followup set of initializations. |
70 | */ | 70 | */ |
71 | struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, | 71 | struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, |
72 | __u32 vtag, int ecn_capable) | 72 | __u32 vtag, int ecn_capable) |
73 | { | 73 | { |
74 | struct sctp_chunk *chunk = NULL; | 74 | struct sctp_chunk *chunk = NULL; |
75 | 75 | ||
76 | SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __FUNCTION__, | 76 | SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __FUNCTION__, |
77 | packet, vtag); | 77 | packet, vtag); |
78 | 78 | ||
79 | packet->vtag = vtag; | 79 | packet->vtag = vtag; |
80 | packet->has_cookie_echo = 0; | 80 | packet->has_cookie_echo = 0; |
81 | packet->has_sack = 0; | 81 | packet->has_sack = 0; |
82 | packet->has_auth = 0; | ||
82 | packet->ipfragok = 0; | 83 | packet->ipfragok = 0; |
84 | packet->auth = NULL; | ||
83 | 85 | ||
84 | if (ecn_capable && sctp_packet_empty(packet)) { | 86 | if (ecn_capable && sctp_packet_empty(packet)) { |
85 | chunk = sctp_get_ecne_prepend(packet->transport->asoc); | 87 | chunk = sctp_get_ecne_prepend(packet->transport->asoc); |
86 | 88 | ||
87 | /* If there a is a prepend chunk stick it on the list before | 89 | /* If there a is a prepend chunk stick it on the list before |
88 | * any other chunks get appended. | 90 | * any other chunks get appended. |
89 | */ | 91 | */ |
90 | if (chunk) | 92 | if (chunk) |
91 | sctp_packet_append_chunk(packet, chunk); | 93 | sctp_packet_append_chunk(packet, chunk); |
92 | } | 94 | } |
93 | 95 | ||
94 | return packet; | 96 | return packet; |
95 | } | 97 | } |
96 | 98 | ||
97 | /* Initialize the packet structure. */ | 99 | /* Initialize the packet structure. */ |
98 | struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, | 100 | struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, |
99 | struct sctp_transport *transport, | 101 | struct sctp_transport *transport, |
100 | __u16 sport, __u16 dport) | 102 | __u16 sport, __u16 dport) |
101 | { | 103 | { |
102 | struct sctp_association *asoc = transport->asoc; | 104 | struct sctp_association *asoc = transport->asoc; |
103 | size_t overhead; | 105 | size_t overhead; |
104 | 106 | ||
105 | SCTP_DEBUG_PRINTK("%s: packet:%p transport:%p\n", __FUNCTION__, | 107 | SCTP_DEBUG_PRINTK("%s: packet:%p transport:%p\n", __FUNCTION__, |
106 | packet, transport); | 108 | packet, transport); |
107 | 109 | ||
108 | packet->transport = transport; | 110 | packet->transport = transport; |
109 | packet->source_port = sport; | 111 | packet->source_port = sport; |
110 | packet->destination_port = dport; | 112 | packet->destination_port = dport; |
111 | INIT_LIST_HEAD(&packet->chunk_list); | 113 | INIT_LIST_HEAD(&packet->chunk_list); |
112 | if (asoc) { | 114 | if (asoc) { |
113 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); | 115 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); |
114 | overhead = sp->pf->af->net_header_len; | 116 | overhead = sp->pf->af->net_header_len; |
115 | } else { | 117 | } else { |
116 | overhead = sizeof(struct ipv6hdr); | 118 | overhead = sizeof(struct ipv6hdr); |
117 | } | 119 | } |
118 | overhead += sizeof(struct sctphdr); | 120 | overhead += sizeof(struct sctphdr); |
119 | packet->overhead = overhead; | 121 | packet->overhead = overhead; |
120 | packet->size = overhead; | 122 | packet->size = overhead; |
121 | packet->vtag = 0; | 123 | packet->vtag = 0; |
122 | packet->has_cookie_echo = 0; | 124 | packet->has_cookie_echo = 0; |
123 | packet->has_sack = 0; | 125 | packet->has_sack = 0; |
126 | packet->has_auth = 0; | ||
124 | packet->ipfragok = 0; | 127 | packet->ipfragok = 0; |
125 | packet->malloced = 0; | 128 | packet->malloced = 0; |
129 | packet->auth = NULL; | ||
126 | return packet; | 130 | return packet; |
127 | } | 131 | } |
128 | 132 | ||
129 | /* Free a packet. */ | 133 | /* Free a packet. */ |
130 | void sctp_packet_free(struct sctp_packet *packet) | 134 | void sctp_packet_free(struct sctp_packet *packet) |
131 | { | 135 | { |
132 | struct sctp_chunk *chunk, *tmp; | 136 | struct sctp_chunk *chunk, *tmp; |
133 | 137 | ||
134 | SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); | 138 | SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); |
135 | 139 | ||
136 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { | 140 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { |
137 | list_del_init(&chunk->list); | 141 | list_del_init(&chunk->list); |
138 | sctp_chunk_free(chunk); | 142 | sctp_chunk_free(chunk); |
139 | } | 143 | } |
140 | 144 | ||
141 | if (packet->malloced) | 145 | if (packet->malloced) |
142 | kfree(packet); | 146 | kfree(packet); |
143 | } | 147 | } |
144 | 148 | ||
145 | /* This routine tries to append the chunk to the offered packet. If adding | 149 | /* This routine tries to append the chunk to the offered packet. If adding |
146 | * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk | 150 | * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk |
147 | * is not present in the packet, it transmits the input packet. | 151 | * is not present in the packet, it transmits the input packet. |
148 | * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long | 152 | * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long |
149 | * as it can fit in the packet, but any more data that does not fit in this | 153 | * as it can fit in the packet, but any more data that does not fit in this |
150 | * packet can be sent only after receiving the COOKIE_ACK. | 154 | * packet can be sent only after receiving the COOKIE_ACK. |
151 | */ | 155 | */ |
152 | sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, | 156 | sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, |
153 | struct sctp_chunk *chunk) | 157 | struct sctp_chunk *chunk) |
154 | { | 158 | { |
155 | sctp_xmit_t retval; | 159 | sctp_xmit_t retval; |
156 | int error = 0; | 160 | int error = 0; |
157 | 161 | ||
158 | SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __FUNCTION__, | 162 | SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __FUNCTION__, |
159 | packet, chunk); | 163 | packet, chunk); |
160 | 164 | ||
161 | switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { | 165 | switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { |
162 | case SCTP_XMIT_PMTU_FULL: | 166 | case SCTP_XMIT_PMTU_FULL: |
163 | if (!packet->has_cookie_echo) { | 167 | if (!packet->has_cookie_echo) { |
164 | error = sctp_packet_transmit(packet); | 168 | error = sctp_packet_transmit(packet); |
165 | if (error < 0) | 169 | if (error < 0) |
166 | chunk->skb->sk->sk_err = -error; | 170 | chunk->skb->sk->sk_err = -error; |
167 | 171 | ||
168 | /* If we have an empty packet, then we can NOT ever | 172 | /* If we have an empty packet, then we can NOT ever |
169 | * return PMTU_FULL. | 173 | * return PMTU_FULL. |
170 | */ | 174 | */ |
171 | retval = sctp_packet_append_chunk(packet, chunk); | 175 | retval = sctp_packet_append_chunk(packet, chunk); |
172 | } | 176 | } |
173 | break; | 177 | break; |
174 | 178 | ||
175 | case SCTP_XMIT_RWND_FULL: | 179 | case SCTP_XMIT_RWND_FULL: |
176 | case SCTP_XMIT_OK: | 180 | case SCTP_XMIT_OK: |
177 | case SCTP_XMIT_NAGLE_DELAY: | 181 | case SCTP_XMIT_NAGLE_DELAY: |
178 | break; | 182 | break; |
179 | } | 183 | } |
180 | 184 | ||
181 | return retval; | 185 | return retval; |
182 | } | 186 | } |
183 | 187 | ||
184 | /* Try to bundle a SACK with the packet. */ | 188 | /* Try to bundle a SACK with the packet. */ |
185 | static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt, | 189 | static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt, |
186 | struct sctp_chunk *chunk) | 190 | struct sctp_chunk *chunk) |
187 | { | 191 | { |
188 | sctp_xmit_t retval = SCTP_XMIT_OK; | 192 | sctp_xmit_t retval = SCTP_XMIT_OK; |
189 | 193 | ||
190 | /* If sending DATA and haven't aleady bundled a SACK, try to | 194 | /* If sending DATA and haven't aleady bundled a SACK, try to |
191 | * bundle one in to the packet. | 195 | * bundle one in to the packet. |
192 | */ | 196 | */ |
193 | if (sctp_chunk_is_data(chunk) && !pkt->has_sack && | 197 | if (sctp_chunk_is_data(chunk) && !pkt->has_sack && |
194 | !pkt->has_cookie_echo) { | 198 | !pkt->has_cookie_echo) { |
195 | struct sctp_association *asoc; | 199 | struct sctp_association *asoc; |
196 | asoc = pkt->transport->asoc; | 200 | asoc = pkt->transport->asoc; |
197 | 201 | ||
198 | if (asoc->a_rwnd > asoc->rwnd) { | 202 | if (asoc->a_rwnd > asoc->rwnd) { |
199 | struct sctp_chunk *sack; | 203 | struct sctp_chunk *sack; |
200 | asoc->a_rwnd = asoc->rwnd; | 204 | asoc->a_rwnd = asoc->rwnd; |
201 | sack = sctp_make_sack(asoc); | 205 | sack = sctp_make_sack(asoc); |
202 | if (sack) { | 206 | if (sack) { |
203 | struct timer_list *timer; | 207 | struct timer_list *timer; |
204 | retval = sctp_packet_append_chunk(pkt, sack); | 208 | retval = sctp_packet_append_chunk(pkt, sack); |
205 | asoc->peer.sack_needed = 0; | 209 | asoc->peer.sack_needed = 0; |
206 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; | 210 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; |
207 | if (timer_pending(timer) && del_timer(timer)) | 211 | if (timer_pending(timer) && del_timer(timer)) |
208 | sctp_association_put(asoc); | 212 | sctp_association_put(asoc); |
209 | } | 213 | } |
210 | } | 214 | } |
211 | } | 215 | } |
212 | return retval; | 216 | return retval; |
213 | } | 217 | } |
214 | 218 | ||
215 | /* Append a chunk to the offered packet reporting back any inability to do | 219 | /* Append a chunk to the offered packet reporting back any inability to do |
216 | * so. | 220 | * so. |
217 | */ | 221 | */ |
218 | sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, | 222 | sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, |
219 | struct sctp_chunk *chunk) | 223 | struct sctp_chunk *chunk) |
220 | { | 224 | { |
221 | sctp_xmit_t retval = SCTP_XMIT_OK; | 225 | sctp_xmit_t retval = SCTP_XMIT_OK; |
222 | __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length)); | 226 | __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length)); |
223 | size_t psize; | 227 | size_t psize; |
224 | size_t pmtu; | 228 | size_t pmtu; |
225 | int too_big; | 229 | int too_big; |
226 | 230 | ||
227 | SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __FUNCTION__, packet, | 231 | SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __FUNCTION__, packet, |
228 | chunk); | 232 | chunk); |
229 | 233 | ||
230 | retval = sctp_packet_bundle_sack(packet, chunk); | 234 | retval = sctp_packet_bundle_sack(packet, chunk); |
231 | psize = packet->size; | 235 | psize = packet->size; |
232 | 236 | ||
233 | if (retval != SCTP_XMIT_OK) | 237 | if (retval != SCTP_XMIT_OK) |
234 | goto finish; | 238 | goto finish; |
235 | 239 | ||
236 | pmtu = ((packet->transport->asoc) ? | 240 | pmtu = ((packet->transport->asoc) ? |
237 | (packet->transport->asoc->pathmtu) : | 241 | (packet->transport->asoc->pathmtu) : |
238 | (packet->transport->pathmtu)); | 242 | (packet->transport->pathmtu)); |
239 | 243 | ||
240 | too_big = (psize + chunk_len > pmtu); | 244 | too_big = (psize + chunk_len > pmtu); |
241 | 245 | ||
242 | /* Decide if we need to fragment or resubmit later. */ | 246 | /* Decide if we need to fragment or resubmit later. */ |
243 | if (too_big) { | 247 | if (too_big) { |
244 | /* Both control chunks and data chunks with TSNs are | 248 | /* Both control chunks and data chunks with TSNs are |
245 | * non-fragmentable. | 249 | * non-fragmentable. |
246 | */ | 250 | */ |
247 | if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk)) { | 251 | if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk)) { |
248 | /* We no longer do re-fragmentation. | 252 | /* We no longer do re-fragmentation. |
249 | * Just fragment at the IP layer, if we | 253 | * Just fragment at the IP layer, if we |
250 | * actually hit this condition | 254 | * actually hit this condition |
251 | */ | 255 | */ |
252 | packet->ipfragok = 1; | 256 | packet->ipfragok = 1; |
253 | goto append; | 257 | goto append; |
254 | 258 | ||
255 | } else { | 259 | } else { |
256 | retval = SCTP_XMIT_PMTU_FULL; | 260 | retval = SCTP_XMIT_PMTU_FULL; |
257 | goto finish; | 261 | goto finish; |
258 | } | 262 | } |
259 | } | 263 | } |
260 | 264 | ||
261 | append: | 265 | append: |
262 | /* We believe that this chunk is OK to add to the packet (as | 266 | /* We believe that this chunk is OK to add to the packet (as |
263 | * long as we have the cwnd for it). | 267 | * long as we have the cwnd for it). |
264 | */ | 268 | */ |
265 | 269 | ||
266 | /* DATA is a special case since we must examine both rwnd and cwnd | 270 | /* DATA is a special case since we must examine both rwnd and cwnd |
267 | * before we send DATA. | 271 | * before we send DATA. |
268 | */ | 272 | */ |
269 | if (sctp_chunk_is_data(chunk)) { | 273 | if (sctp_chunk_is_data(chunk)) { |
270 | retval = sctp_packet_append_data(packet, chunk); | 274 | retval = sctp_packet_append_data(packet, chunk); |
271 | /* Disallow SACK bundling after DATA. */ | 275 | /* Disallow SACK bundling after DATA. */ |
272 | packet->has_sack = 1; | 276 | packet->has_sack = 1; |
273 | if (SCTP_XMIT_OK != retval) | 277 | if (SCTP_XMIT_OK != retval) |
274 | goto finish; | 278 | goto finish; |
275 | } else if (SCTP_CID_COOKIE_ECHO == chunk->chunk_hdr->type) | 279 | } else if (SCTP_CID_COOKIE_ECHO == chunk->chunk_hdr->type) |
276 | packet->has_cookie_echo = 1; | 280 | packet->has_cookie_echo = 1; |
277 | else if (SCTP_CID_SACK == chunk->chunk_hdr->type) | 281 | else if (SCTP_CID_SACK == chunk->chunk_hdr->type) |
278 | packet->has_sack = 1; | 282 | packet->has_sack = 1; |
279 | 283 | ||
280 | /* It is OK to send this chunk. */ | 284 | /* It is OK to send this chunk. */ |
281 | list_add_tail(&chunk->list, &packet->chunk_list); | 285 | list_add_tail(&chunk->list, &packet->chunk_list); |
282 | packet->size += chunk_len; | 286 | packet->size += chunk_len; |
283 | chunk->transport = packet->transport; | 287 | chunk->transport = packet->transport; |
284 | finish: | 288 | finish: |
285 | return retval; | 289 | return retval; |
286 | } | 290 | } |
287 | 291 | ||
288 | /* All packets are sent to the network through this function from | 292 | /* All packets are sent to the network through this function from |
289 | * sctp_outq_tail(). | 293 | * sctp_outq_tail(). |
290 | * | 294 | * |
291 | * The return value is a normal kernel error return value. | 295 | * The return value is a normal kernel error return value. |
292 | */ | 296 | */ |
293 | int sctp_packet_transmit(struct sctp_packet *packet) | 297 | int sctp_packet_transmit(struct sctp_packet *packet) |
294 | { | 298 | { |
295 | struct sctp_transport *tp = packet->transport; | 299 | struct sctp_transport *tp = packet->transport; |
296 | struct sctp_association *asoc = tp->asoc; | 300 | struct sctp_association *asoc = tp->asoc; |
297 | struct sctphdr *sh; | 301 | struct sctphdr *sh; |
298 | __u32 crc32 = 0; | 302 | __u32 crc32 = 0; |
299 | struct sk_buff *nskb; | 303 | struct sk_buff *nskb; |
300 | struct sctp_chunk *chunk, *tmp; | 304 | struct sctp_chunk *chunk, *tmp; |
301 | struct sock *sk; | 305 | struct sock *sk; |
302 | int err = 0; | 306 | int err = 0; |
303 | int padding; /* How much padding do we need? */ | 307 | int padding; /* How much padding do we need? */ |
304 | __u8 has_data = 0; | 308 | __u8 has_data = 0; |
305 | struct dst_entry *dst = tp->dst; | 309 | struct dst_entry *dst = tp->dst; |
306 | 310 | ||
307 | SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); | 311 | SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); |
308 | 312 | ||
309 | /* Do NOT generate a chunkless packet. */ | 313 | /* Do NOT generate a chunkless packet. */ |
310 | if (list_empty(&packet->chunk_list)) | 314 | if (list_empty(&packet->chunk_list)) |
311 | return err; | 315 | return err; |
312 | 316 | ||
313 | /* Set up convenience variables... */ | 317 | /* Set up convenience variables... */ |
314 | chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); | 318 | chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); |
315 | sk = chunk->skb->sk; | 319 | sk = chunk->skb->sk; |
316 | 320 | ||
317 | /* Allocate the new skb. */ | 321 | /* Allocate the new skb. */ |
318 | nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC); | 322 | nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC); |
319 | if (!nskb) | 323 | if (!nskb) |
320 | goto nomem; | 324 | goto nomem; |
321 | 325 | ||
322 | /* Make sure the outbound skb has enough header room reserved. */ | 326 | /* Make sure the outbound skb has enough header room reserved. */ |
323 | skb_reserve(nskb, packet->overhead + LL_MAX_HEADER); | 327 | skb_reserve(nskb, packet->overhead + LL_MAX_HEADER); |
324 | 328 | ||
325 | /* Set the owning socket so that we know where to get the | 329 | /* Set the owning socket so that we know where to get the |
326 | * destination IP address. | 330 | * destination IP address. |
327 | */ | 331 | */ |
328 | skb_set_owner_w(nskb, sk); | 332 | skb_set_owner_w(nskb, sk); |
329 | 333 | ||
330 | /* The 'obsolete' field of dst is set to 2 when a dst is freed. */ | 334 | /* The 'obsolete' field of dst is set to 2 when a dst is freed. */ |
331 | if (!dst || (dst->obsolete > 1)) { | 335 | if (!dst || (dst->obsolete > 1)) { |
332 | dst_release(dst); | 336 | dst_release(dst); |
333 | sctp_transport_route(tp, NULL, sctp_sk(sk)); | 337 | sctp_transport_route(tp, NULL, sctp_sk(sk)); |
334 | if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) { | 338 | if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) { |
335 | sctp_assoc_sync_pmtu(asoc); | 339 | sctp_assoc_sync_pmtu(asoc); |
336 | } | 340 | } |
337 | } | 341 | } |
338 | nskb->dst = dst_clone(tp->dst); | 342 | nskb->dst = dst_clone(tp->dst); |
339 | if (!nskb->dst) | 343 | if (!nskb->dst) |
340 | goto no_route; | 344 | goto no_route; |
341 | dst = nskb->dst; | 345 | dst = nskb->dst; |
342 | 346 | ||
343 | /* Build the SCTP header. */ | 347 | /* Build the SCTP header. */ |
344 | sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr)); | 348 | sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr)); |
345 | sh->source = htons(packet->source_port); | 349 | sh->source = htons(packet->source_port); |
346 | sh->dest = htons(packet->destination_port); | 350 | sh->dest = htons(packet->destination_port); |
347 | 351 | ||
348 | /* From 6.8 Adler-32 Checksum Calculation: | 352 | /* From 6.8 Adler-32 Checksum Calculation: |
349 | * After the packet is constructed (containing the SCTP common | 353 | * After the packet is constructed (containing the SCTP common |
350 | * header and one or more control or DATA chunks), the | 354 | * header and one or more control or DATA chunks), the |
351 | * transmitter shall: | 355 | * transmitter shall: |
352 | * | 356 | * |
353 | * 1) Fill in the proper Verification Tag in the SCTP common | 357 | * 1) Fill in the proper Verification Tag in the SCTP common |
354 | * header and initialize the checksum field to 0's. | 358 | * header and initialize the checksum field to 0's. |
355 | */ | 359 | */ |
356 | sh->vtag = htonl(packet->vtag); | 360 | sh->vtag = htonl(packet->vtag); |
357 | sh->checksum = 0; | 361 | sh->checksum = 0; |
358 | 362 | ||
359 | /* 2) Calculate the Adler-32 checksum of the whole packet, | 363 | /* 2) Calculate the Adler-32 checksum of the whole packet, |
360 | * including the SCTP common header and all the | 364 | * including the SCTP common header and all the |
361 | * chunks. | 365 | * chunks. |
362 | * | 366 | * |
363 | * Note: Adler-32 is no longer applicable, as has been replaced | 367 | * Note: Adler-32 is no longer applicable, as has been replaced |
364 | * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. | 368 | * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. |
365 | */ | 369 | */ |
366 | if (!(dst->dev->features & NETIF_F_NO_CSUM)) | 370 | if (!(dst->dev->features & NETIF_F_NO_CSUM)) |
367 | crc32 = sctp_start_cksum((__u8 *)sh, sizeof(struct sctphdr)); | 371 | crc32 = sctp_start_cksum((__u8 *)sh, sizeof(struct sctphdr)); |
368 | 372 | ||
369 | /** | 373 | /** |
370 | * 6.10 Bundling | 374 | * 6.10 Bundling |
371 | * | 375 | * |
372 | * An endpoint bundles chunks by simply including multiple | 376 | * An endpoint bundles chunks by simply including multiple |
373 | * chunks in one outbound SCTP packet. ... | 377 | * chunks in one outbound SCTP packet. ... |
374 | */ | 378 | */ |
375 | 379 | ||
376 | /** | 380 | /** |
377 | * 3.2 Chunk Field Descriptions | 381 | * 3.2 Chunk Field Descriptions |
378 | * | 382 | * |
379 | * The total length of a chunk (including Type, Length and | 383 | * The total length of a chunk (including Type, Length and |
380 | * Value fields) MUST be a multiple of 4 bytes. If the length | 384 | * Value fields) MUST be a multiple of 4 bytes. If the length |
381 | * of the chunk is not a multiple of 4 bytes, the sender MUST | 385 | * of the chunk is not a multiple of 4 bytes, the sender MUST |
382 | * pad the chunk with all zero bytes and this padding is not | 386 | * pad the chunk with all zero bytes and this padding is not |
383 | * included in the chunk length field. The sender should | 387 | * included in the chunk length field. The sender should |
384 | * never pad with more than 3 bytes. | 388 | * never pad with more than 3 bytes. |
385 | * | 389 | * |
386 | * [This whole comment explains WORD_ROUND() below.] | 390 | * [This whole comment explains WORD_ROUND() below.] |
387 | */ | 391 | */ |
388 | SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n"); | 392 | SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n"); |
389 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { | 393 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { |
390 | list_del_init(&chunk->list); | 394 | list_del_init(&chunk->list); |
391 | if (sctp_chunk_is_data(chunk)) { | 395 | if (sctp_chunk_is_data(chunk)) { |
392 | 396 | ||
393 | if (!chunk->has_tsn) { | 397 | if (!chunk->has_tsn) { |
394 | sctp_chunk_assign_ssn(chunk); | 398 | sctp_chunk_assign_ssn(chunk); |
395 | sctp_chunk_assign_tsn(chunk); | 399 | sctp_chunk_assign_tsn(chunk); |
396 | 400 | ||
397 | /* 6.3.1 C4) When data is in flight and when allowed | 401 | /* 6.3.1 C4) When data is in flight and when allowed |
398 | * by rule C5, a new RTT measurement MUST be made each | 402 | * by rule C5, a new RTT measurement MUST be made each |
399 | * round trip. Furthermore, new RTT measurements | 403 | * round trip. Furthermore, new RTT measurements |
400 | * SHOULD be made no more than once per round-trip | 404 | * SHOULD be made no more than once per round-trip |
401 | * for a given destination transport address. | 405 | * for a given destination transport address. |
402 | */ | 406 | */ |
403 | 407 | ||
404 | if (!tp->rto_pending) { | 408 | if (!tp->rto_pending) { |
405 | chunk->rtt_in_progress = 1; | 409 | chunk->rtt_in_progress = 1; |
406 | tp->rto_pending = 1; | 410 | tp->rto_pending = 1; |
407 | } | 411 | } |
408 | } else | 412 | } else |
409 | chunk->resent = 1; | 413 | chunk->resent = 1; |
410 | 414 | ||
411 | chunk->sent_at = jiffies; | 415 | chunk->sent_at = jiffies; |
412 | has_data = 1; | 416 | has_data = 1; |
413 | } | 417 | } |
414 | 418 | ||
415 | padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len; | 419 | padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len; |
416 | if (padding) | 420 | if (padding) |
417 | memset(skb_put(chunk->skb, padding), 0, padding); | 421 | memset(skb_put(chunk->skb, padding), 0, padding); |
418 | 422 | ||
419 | if (dst->dev->features & NETIF_F_NO_CSUM) | 423 | if (dst->dev->features & NETIF_F_NO_CSUM) |
420 | memcpy(skb_put(nskb, chunk->skb->len), | 424 | memcpy(skb_put(nskb, chunk->skb->len), |
421 | chunk->skb->data, chunk->skb->len); | 425 | chunk->skb->data, chunk->skb->len); |
422 | else | 426 | else |
423 | crc32 = sctp_update_copy_cksum(skb_put(nskb, | 427 | crc32 = sctp_update_copy_cksum(skb_put(nskb, |
424 | chunk->skb->len), | 428 | chunk->skb->len), |
425 | chunk->skb->data, | 429 | chunk->skb->data, |
426 | chunk->skb->len, crc32); | 430 | chunk->skb->len, crc32); |
427 | 431 | ||
428 | SCTP_DEBUG_PRINTK("%s %p[%s] %s 0x%x, %s %d, %s %d, %s %d\n", | 432 | SCTP_DEBUG_PRINTK("%s %p[%s] %s 0x%x, %s %d, %s %d, %s %d\n", |
429 | "*** Chunk", chunk, | 433 | "*** Chunk", chunk, |
430 | sctp_cname(SCTP_ST_CHUNK( | 434 | sctp_cname(SCTP_ST_CHUNK( |
431 | chunk->chunk_hdr->type)), | 435 | chunk->chunk_hdr->type)), |
432 | chunk->has_tsn ? "TSN" : "No TSN", | 436 | chunk->has_tsn ? "TSN" : "No TSN", |
433 | chunk->has_tsn ? | 437 | chunk->has_tsn ? |
434 | ntohl(chunk->subh.data_hdr->tsn) : 0, | 438 | ntohl(chunk->subh.data_hdr->tsn) : 0, |
435 | "length", ntohs(chunk->chunk_hdr->length), | 439 | "length", ntohs(chunk->chunk_hdr->length), |
436 | "chunk->skb->len", chunk->skb->len, | 440 | "chunk->skb->len", chunk->skb->len, |
437 | "rtt_in_progress", chunk->rtt_in_progress); | 441 | "rtt_in_progress", chunk->rtt_in_progress); |
438 | 442 | ||
439 | /* | 443 | /* |
440 | * If this is a control chunk, this is our last | 444 | * If this is a control chunk, this is our last |
441 | * reference. Free data chunks after they've been | 445 | * reference. Free data chunks after they've been |
442 | * acknowledged or have failed. | 446 | * acknowledged or have failed. |
443 | */ | 447 | */ |
444 | if (!sctp_chunk_is_data(chunk)) | 448 | if (!sctp_chunk_is_data(chunk)) |
445 | sctp_chunk_free(chunk); | 449 | sctp_chunk_free(chunk); |
446 | } | 450 | } |
447 | 451 | ||
448 | /* Perform final transformation on checksum. */ | 452 | /* Perform final transformation on checksum. */ |
449 | if (!(dst->dev->features & NETIF_F_NO_CSUM)) | 453 | if (!(dst->dev->features & NETIF_F_NO_CSUM)) |
450 | crc32 = sctp_end_cksum(crc32); | 454 | crc32 = sctp_end_cksum(crc32); |
451 | 455 | ||
452 | /* 3) Put the resultant value into the checksum field in the | 456 | /* 3) Put the resultant value into the checksum field in the |
453 | * common header, and leave the rest of the bits unchanged. | 457 | * common header, and leave the rest of the bits unchanged. |
454 | */ | 458 | */ |
455 | sh->checksum = htonl(crc32); | 459 | sh->checksum = htonl(crc32); |
456 | 460 | ||
457 | /* IP layer ECN support | 461 | /* IP layer ECN support |
458 | * From RFC 2481 | 462 | * From RFC 2481 |
459 | * "The ECN-Capable Transport (ECT) bit would be set by the | 463 | * "The ECN-Capable Transport (ECT) bit would be set by the |
460 | * data sender to indicate that the end-points of the | 464 | * data sender to indicate that the end-points of the |
461 | * transport protocol are ECN-capable." | 465 | * transport protocol are ECN-capable." |
462 | * | 466 | * |
463 | * Now setting the ECT bit all the time, as it should not cause | 467 | * Now setting the ECT bit all the time, as it should not cause |
464 | * any problems protocol-wise even if our peer ignores it. | 468 | * any problems protocol-wise even if our peer ignores it. |
465 | * | 469 | * |
466 | * Note: The works for IPv6 layer checks this bit too later | 470 | * Note: The works for IPv6 layer checks this bit too later |
467 | * in transmission. See IP6_ECN_flow_xmit(). | 471 | * in transmission. See IP6_ECN_flow_xmit(). |
468 | */ | 472 | */ |
469 | INET_ECN_xmit(nskb->sk); | 473 | INET_ECN_xmit(nskb->sk); |
470 | 474 | ||
471 | /* Set up the IP options. */ | 475 | /* Set up the IP options. */ |
472 | /* BUG: not implemented | 476 | /* BUG: not implemented |
473 | * For v4 this all lives somewhere in sk->sk_opt... | 477 | * For v4 this all lives somewhere in sk->sk_opt... |
474 | */ | 478 | */ |
475 | 479 | ||
476 | /* Dump that on IP! */ | 480 | /* Dump that on IP! */ |
477 | if (asoc && asoc->peer.last_sent_to != tp) { | 481 | if (asoc && asoc->peer.last_sent_to != tp) { |
478 | /* Considering the multiple CPU scenario, this is a | 482 | /* Considering the multiple CPU scenario, this is a |
479 | * "correcter" place for last_sent_to. --xguo | 483 | * "correcter" place for last_sent_to. --xguo |
480 | */ | 484 | */ |
481 | asoc->peer.last_sent_to = tp; | 485 | asoc->peer.last_sent_to = tp; |
482 | } | 486 | } |
483 | 487 | ||
484 | if (has_data) { | 488 | if (has_data) { |
485 | struct timer_list *timer; | 489 | struct timer_list *timer; |
486 | unsigned long timeout; | 490 | unsigned long timeout; |
487 | 491 | ||
488 | tp->last_time_used = jiffies; | 492 | tp->last_time_used = jiffies; |
489 | 493 | ||
490 | /* Restart the AUTOCLOSE timer when sending data. */ | 494 | /* Restart the AUTOCLOSE timer when sending data. */ |
491 | if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) { | 495 | if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) { |
492 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; | 496 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; |
493 | timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; | 497 | timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; |
494 | 498 | ||
495 | if (!mod_timer(timer, jiffies + timeout)) | 499 | if (!mod_timer(timer, jiffies + timeout)) |
496 | sctp_association_hold(asoc); | 500 | sctp_association_hold(asoc); |
497 | } | 501 | } |
498 | } | 502 | } |
499 | 503 | ||
500 | SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n", | 504 | SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n", |
501 | nskb->len); | 505 | nskb->len); |
502 | 506 | ||
503 | if (tp->param_flags & SPP_PMTUD_ENABLE) | 507 | if (tp->param_flags & SPP_PMTUD_ENABLE) |
504 | (*tp->af_specific->sctp_xmit)(nskb, tp, packet->ipfragok); | 508 | (*tp->af_specific->sctp_xmit)(nskb, tp, packet->ipfragok); |
505 | else | 509 | else |
506 | (*tp->af_specific->sctp_xmit)(nskb, tp, 1); | 510 | (*tp->af_specific->sctp_xmit)(nskb, tp, 1); |
507 | 511 | ||
508 | out: | 512 | out: |
509 | packet->size = packet->overhead; | 513 | packet->size = packet->overhead; |
510 | return err; | 514 | return err; |
511 | no_route: | 515 | no_route: |
512 | kfree_skb(nskb); | 516 | kfree_skb(nskb); |
513 | IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); | 517 | IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); |
514 | 518 | ||
515 | /* FIXME: Returning the 'err' will effect all the associations | 519 | /* FIXME: Returning the 'err' will effect all the associations |
516 | * associated with a socket, although only one of the paths of the | 520 | * associated with a socket, although only one of the paths of the |
517 | * association is unreachable. | 521 | * association is unreachable. |
518 | * The real failure of a transport or association can be passed on | 522 | * The real failure of a transport or association can be passed on |
519 | * to the user via notifications. So setting this error may not be | 523 | * to the user via notifications. So setting this error may not be |
520 | * required. | 524 | * required. |
521 | */ | 525 | */ |
522 | /* err = -EHOSTUNREACH; */ | 526 | /* err = -EHOSTUNREACH; */ |
523 | err: | 527 | err: |
524 | /* Control chunks are unreliable so just drop them. DATA chunks | 528 | /* Control chunks are unreliable so just drop them. DATA chunks |
525 | * will get resent or dropped later. | 529 | * will get resent or dropped later. |
526 | */ | 530 | */ |
527 | 531 | ||
528 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { | 532 | list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { |
529 | list_del_init(&chunk->list); | 533 | list_del_init(&chunk->list); |
530 | if (!sctp_chunk_is_data(chunk)) | 534 | if (!sctp_chunk_is_data(chunk)) |
531 | sctp_chunk_free(chunk); | 535 | sctp_chunk_free(chunk); |
532 | } | 536 | } |
533 | goto out; | 537 | goto out; |
534 | nomem: | 538 | nomem: |
535 | err = -ENOMEM; | 539 | err = -ENOMEM; |
536 | goto err; | 540 | goto err; |
537 | } | 541 | } |
538 | 542 | ||
539 | /******************************************************************** | 543 | /******************************************************************** |
540 | * 2nd Level Abstractions | 544 | * 2nd Level Abstractions |
541 | ********************************************************************/ | 545 | ********************************************************************/ |
542 | 546 | ||
543 | /* This private function handles the specifics of appending DATA chunks. */ | 547 | /* This private function handles the specifics of appending DATA chunks. */ |
544 | static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | 548 | static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, |
545 | struct sctp_chunk *chunk) | 549 | struct sctp_chunk *chunk) |
546 | { | 550 | { |
547 | sctp_xmit_t retval = SCTP_XMIT_OK; | 551 | sctp_xmit_t retval = SCTP_XMIT_OK; |
548 | size_t datasize, rwnd, inflight; | 552 | size_t datasize, rwnd, inflight; |
549 | struct sctp_transport *transport = packet->transport; | 553 | struct sctp_transport *transport = packet->transport; |
550 | __u32 max_burst_bytes; | 554 | __u32 max_burst_bytes; |
551 | struct sctp_association *asoc = transport->asoc; | 555 | struct sctp_association *asoc = transport->asoc; |
552 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); | 556 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); |
553 | struct sctp_outq *q = &asoc->outqueue; | 557 | struct sctp_outq *q = &asoc->outqueue; |
554 | 558 | ||
555 | /* RFC 2960 6.1 Transmission of DATA Chunks | 559 | /* RFC 2960 6.1 Transmission of DATA Chunks |
556 | * | 560 | * |
557 | * A) At any given time, the data sender MUST NOT transmit new data to | 561 | * A) At any given time, the data sender MUST NOT transmit new data to |
558 | * any destination transport address if its peer's rwnd indicates | 562 | * any destination transport address if its peer's rwnd indicates |
559 | * that the peer has no buffer space (i.e. rwnd is 0, see Section | 563 | * that the peer has no buffer space (i.e. rwnd is 0, see Section |
560 | * 6.2.1). However, regardless of the value of rwnd (including if it | 564 | * 6.2.1). However, regardless of the value of rwnd (including if it |
561 | * is 0), the data sender can always have one DATA chunk in flight to | 565 | * is 0), the data sender can always have one DATA chunk in flight to |
562 | * the receiver if allowed by cwnd (see rule B below). This rule | 566 | * the receiver if allowed by cwnd (see rule B below). This rule |
563 | * allows the sender to probe for a change in rwnd that the sender | 567 | * allows the sender to probe for a change in rwnd that the sender |
564 | * missed due to the SACK having been lost in transit from the data | 568 | * missed due to the SACK having been lost in transit from the data |
565 | * receiver to the data sender. | 569 | * receiver to the data sender. |
566 | */ | 570 | */ |
567 | 571 | ||
568 | rwnd = asoc->peer.rwnd; | 572 | rwnd = asoc->peer.rwnd; |
569 | inflight = asoc->outqueue.outstanding_bytes; | 573 | inflight = asoc->outqueue.outstanding_bytes; |
570 | 574 | ||
571 | datasize = sctp_data_size(chunk); | 575 | datasize = sctp_data_size(chunk); |
572 | 576 | ||
573 | if (datasize > rwnd) { | 577 | if (datasize > rwnd) { |
574 | if (inflight > 0) { | 578 | if (inflight > 0) { |
575 | /* We have (at least) one data chunk in flight, | 579 | /* We have (at least) one data chunk in flight, |
576 | * so we can't fall back to rule 6.1 B). | 580 | * so we can't fall back to rule 6.1 B). |
577 | */ | 581 | */ |
578 | retval = SCTP_XMIT_RWND_FULL; | 582 | retval = SCTP_XMIT_RWND_FULL; |
579 | goto finish; | 583 | goto finish; |
580 | } | 584 | } |
581 | } | 585 | } |
582 | 586 | ||
583 | /* sctpimpguide-05 2.14.2 | 587 | /* sctpimpguide-05 2.14.2 |
584 | * D) When the time comes for the sender to | 588 | * D) When the time comes for the sender to |
585 | * transmit new DATA chunks, the protocol parameter Max.Burst MUST | 589 | * transmit new DATA chunks, the protocol parameter Max.Burst MUST |
586 | * first be applied to limit how many new DATA chunks may be sent. | 590 | * first be applied to limit how many new DATA chunks may be sent. |
587 | * The limit is applied by adjusting cwnd as follows: | 591 | * The limit is applied by adjusting cwnd as follows: |
588 | * if ((flightsize + Max.Burst * MTU) < cwnd) | 592 | * if ((flightsize + Max.Burst * MTU) < cwnd) |
589 | * cwnd = flightsize + Max.Burst * MTU | 593 | * cwnd = flightsize + Max.Burst * MTU |
590 | */ | 594 | */ |
591 | max_burst_bytes = asoc->max_burst * asoc->pathmtu; | 595 | max_burst_bytes = asoc->max_burst * asoc->pathmtu; |
592 | if ((transport->flight_size + max_burst_bytes) < transport->cwnd) { | 596 | if ((transport->flight_size + max_burst_bytes) < transport->cwnd) { |
593 | transport->cwnd = transport->flight_size + max_burst_bytes; | 597 | transport->cwnd = transport->flight_size + max_burst_bytes; |
594 | SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: " | 598 | SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: " |
595 | "transport: %p, cwnd: %d, " | 599 | "transport: %p, cwnd: %d, " |
596 | "ssthresh: %d, flight_size: %d, " | 600 | "ssthresh: %d, flight_size: %d, " |
597 | "pba: %d\n", | 601 | "pba: %d\n", |
598 | __FUNCTION__, transport, | 602 | __FUNCTION__, transport, |
599 | transport->cwnd, | 603 | transport->cwnd, |
600 | transport->ssthresh, | 604 | transport->ssthresh, |
601 | transport->flight_size, | 605 | transport->flight_size, |
602 | transport->partial_bytes_acked); | 606 | transport->partial_bytes_acked); |
603 | } | 607 | } |
604 | 608 | ||
605 | /* RFC 2960 6.1 Transmission of DATA Chunks | 609 | /* RFC 2960 6.1 Transmission of DATA Chunks |
606 | * | 610 | * |
607 | * B) At any given time, the sender MUST NOT transmit new data | 611 | * B) At any given time, the sender MUST NOT transmit new data |
608 | * to a given transport address if it has cwnd or more bytes | 612 | * to a given transport address if it has cwnd or more bytes |
609 | * of data outstanding to that transport address. | 613 | * of data outstanding to that transport address. |
610 | */ | 614 | */ |
611 | /* RFC 7.2.4 & the Implementers Guide 2.8. | 615 | /* RFC 7.2.4 & the Implementers Guide 2.8. |
612 | * | 616 | * |
613 | * 3) ... | 617 | * 3) ... |
614 | * When a Fast Retransmit is being performed the sender SHOULD | 618 | * When a Fast Retransmit is being performed the sender SHOULD |
615 | * ignore the value of cwnd and SHOULD NOT delay retransmission. | 619 | * ignore the value of cwnd and SHOULD NOT delay retransmission. |
616 | */ | 620 | */ |
617 | if (chunk->fast_retransmit <= 0) | 621 | if (chunk->fast_retransmit <= 0) |
618 | if (transport->flight_size >= transport->cwnd) { | 622 | if (transport->flight_size >= transport->cwnd) { |
619 | retval = SCTP_XMIT_RWND_FULL; | 623 | retval = SCTP_XMIT_RWND_FULL; |
620 | goto finish; | 624 | goto finish; |
621 | } | 625 | } |
622 | 626 | ||
623 | /* Nagle's algorithm to solve small-packet problem: | 627 | /* Nagle's algorithm to solve small-packet problem: |
624 | * Inhibit the sending of new chunks when new outgoing data arrives | 628 | * Inhibit the sending of new chunks when new outgoing data arrives |
625 | * if any previously transmitted data on the connection remains | 629 | * if any previously transmitted data on the connection remains |
626 | * unacknowledged. | 630 | * unacknowledged. |
627 | */ | 631 | */ |
628 | if (!sp->nodelay && sctp_packet_empty(packet) && | 632 | if (!sp->nodelay && sctp_packet_empty(packet) && |
629 | q->outstanding_bytes && sctp_state(asoc, ESTABLISHED)) { | 633 | q->outstanding_bytes && sctp_state(asoc, ESTABLISHED)) { |
630 | unsigned len = datasize + q->out_qlen; | 634 | unsigned len = datasize + q->out_qlen; |
631 | 635 | ||
632 | /* Check whether this chunk and all the rest of pending | 636 | /* Check whether this chunk and all the rest of pending |
633 | * data will fit or delay in hopes of bundling a full | 637 | * data will fit or delay in hopes of bundling a full |
634 | * sized packet. | 638 | * sized packet. |
635 | */ | 639 | */ |
636 | if (len < asoc->frag_point) { | 640 | if (len < asoc->frag_point) { |
637 | retval = SCTP_XMIT_NAGLE_DELAY; | 641 | retval = SCTP_XMIT_NAGLE_DELAY; |
638 | goto finish; | 642 | goto finish; |
639 | } | 643 | } |
640 | } | 644 | } |
641 | 645 | ||
642 | /* Keep track of how many bytes are in flight over this transport. */ | 646 | /* Keep track of how many bytes are in flight over this transport. */ |
643 | transport->flight_size += datasize; | 647 | transport->flight_size += datasize; |
644 | 648 | ||
645 | /* Keep track of how many bytes are in flight to the receiver. */ | 649 | /* Keep track of how many bytes are in flight to the receiver. */ |
646 | asoc->outqueue.outstanding_bytes += datasize; | 650 | asoc->outqueue.outstanding_bytes += datasize; |
647 | 651 | ||
648 | /* Update our view of the receiver's rwnd. Include sk_buff overhead | 652 | /* Update our view of the receiver's rwnd. Include sk_buff overhead |
649 | * while updating peer.rwnd so that it reduces the chances of a | 653 | * while updating peer.rwnd so that it reduces the chances of a |
650 | * receiver running out of receive buffer space even when receive | 654 | * receiver running out of receive buffer space even when receive |
651 | * window is still open. This can happen when a sender is sending | 655 | * window is still open. This can happen when a sender is sending |
652 | * sending small messages. | 656 | * sending small messages. |
653 | */ | 657 | */ |
654 | datasize += sizeof(struct sk_buff); | 658 | datasize += sizeof(struct sk_buff); |
655 | if (datasize < rwnd) | 659 | if (datasize < rwnd) |
656 | rwnd -= datasize; | 660 | rwnd -= datasize; |
657 | else | 661 | else |
658 | rwnd = 0; | 662 | rwnd = 0; |
659 | 663 | ||
660 | asoc->peer.rwnd = rwnd; | 664 | asoc->peer.rwnd = rwnd; |
661 | /* Has been accepted for transmission. */ | 665 | /* Has been accepted for transmission. */ |
662 | if (!asoc->peer.prsctp_capable) | 666 | if (!asoc->peer.prsctp_capable) |
663 | chunk->msg->can_abandon = 0; | 667 | chunk->msg->can_abandon = 0; |
664 | 668 | ||
665 | finish: | 669 | finish: |
666 | return retval; | 670 | return retval; |
667 | } | 671 | } |
668 | 672 |
net/sctp/protocol.c
1 | /* SCTP kernel reference Implementation | 1 | /* SCTP kernel reference Implementation |
2 | * (C) Copyright IBM Corp. 2001, 2004 | 2 | * (C) Copyright IBM Corp. 2001, 2004 |
3 | * Copyright (c) 1999-2000 Cisco, Inc. | 3 | * Copyright (c) 1999-2000 Cisco, Inc. |
4 | * Copyright (c) 1999-2001 Motorola, Inc. | 4 | * Copyright (c) 1999-2001 Motorola, Inc. |
5 | * Copyright (c) 2001 Intel Corp. | 5 | * Copyright (c) 2001 Intel Corp. |
6 | * Copyright (c) 2001 Nokia, Inc. | 6 | * Copyright (c) 2001 Nokia, Inc. |
7 | * Copyright (c) 2001 La Monte H.P. Yarroll | 7 | * Copyright (c) 2001 La Monte H.P. Yarroll |
8 | * | 8 | * |
9 | * This file is part of the SCTP kernel reference Implementation | 9 | * This file is part of the SCTP kernel reference Implementation |
10 | * | 10 | * |
11 | * Initialization/cleanup for SCTP protocol support. | 11 | * Initialization/cleanup for SCTP protocol support. |
12 | * | 12 | * |
13 | * The SCTP reference implementation is free software; | 13 | * The SCTP reference implementation is free software; |
14 | * you can redistribute it and/or modify it under the terms of | 14 | * you can redistribute it and/or modify it under the terms of |
15 | * the GNU General Public License as published by | 15 | * the GNU General Public License as published by |
16 | * the Free Software Foundation; either version 2, or (at your option) | 16 | * the Free Software Foundation; either version 2, or (at your option) |
17 | * any later version. | 17 | * any later version. |
18 | * | 18 | * |
19 | * The SCTP reference implementation is distributed in the hope that it | 19 | * The SCTP reference implementation is distributed in the hope that it |
20 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | 20 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied |
21 | * ************************ | 21 | * ************************ |
22 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | 22 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
23 | * See the GNU General Public License for more details. | 23 | * See the GNU General Public License for more details. |
24 | * | 24 | * |
25 | * You should have received a copy of the GNU General Public License | 25 | * You should have received a copy of the GNU General Public License |
26 | * along with GNU CC; see the file COPYING. If not, write to | 26 | * along with GNU CC; see the file COPYING. If not, write to |
27 | * the Free Software Foundation, 59 Temple Place - Suite 330, | 27 | * the Free Software Foundation, 59 Temple Place - Suite 330, |
28 | * Boston, MA 02111-1307, USA. | 28 | * Boston, MA 02111-1307, USA. |
29 | * | 29 | * |
30 | * Please send any bug reports or fixes you make to the | 30 | * Please send any bug reports or fixes you make to the |
31 | * email address(es): | 31 | * email address(es): |
32 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | 32 | * lksctp developers <lksctp-developers@lists.sourceforge.net> |
33 | * | 33 | * |
34 | * Or submit a bug report through the following website: | 34 | * Or submit a bug report through the following website: |
35 | * http://www.sf.net/projects/lksctp | 35 | * http://www.sf.net/projects/lksctp |
36 | * | 36 | * |
37 | * Written or modified by: | 37 | * Written or modified by: |
38 | * La Monte H.P. Yarroll <piggy@acm.org> | 38 | * La Monte H.P. Yarroll <piggy@acm.org> |
39 | * Karl Knutson <karl@athena.chicago.il.us> | 39 | * Karl Knutson <karl@athena.chicago.il.us> |
40 | * Jon Grimm <jgrimm@us.ibm.com> | 40 | * Jon Grimm <jgrimm@us.ibm.com> |
41 | * Sridhar Samudrala <sri@us.ibm.com> | 41 | * Sridhar Samudrala <sri@us.ibm.com> |
42 | * Daisy Chang <daisyc@us.ibm.com> | 42 | * Daisy Chang <daisyc@us.ibm.com> |
43 | * Ardelle Fan <ardelle.fan@intel.com> | 43 | * Ardelle Fan <ardelle.fan@intel.com> |
44 | * | 44 | * |
45 | * Any bugs reported given to us we will try to fix... any fixes shared will | 45 | * Any bugs reported given to us we will try to fix... any fixes shared will |
46 | * be incorporated into the next SCTP release. | 46 | * be incorporated into the next SCTP release. |
47 | */ | 47 | */ |
48 | 48 | ||
49 | #include <linux/module.h> | 49 | #include <linux/module.h> |
50 | #include <linux/init.h> | 50 | #include <linux/init.h> |
51 | #include <linux/netdevice.h> | 51 | #include <linux/netdevice.h> |
52 | #include <linux/inetdevice.h> | 52 | #include <linux/inetdevice.h> |
53 | #include <linux/seq_file.h> | 53 | #include <linux/seq_file.h> |
54 | #include <linux/bootmem.h> | 54 | #include <linux/bootmem.h> |
55 | #include <net/net_namespace.h> | 55 | #include <net/net_namespace.h> |
56 | #include <net/protocol.h> | 56 | #include <net/protocol.h> |
57 | #include <net/ip.h> | 57 | #include <net/ip.h> |
58 | #include <net/ipv6.h> | 58 | #include <net/ipv6.h> |
59 | #include <net/route.h> | 59 | #include <net/route.h> |
60 | #include <net/sctp/sctp.h> | 60 | #include <net/sctp/sctp.h> |
61 | #include <net/addrconf.h> | 61 | #include <net/addrconf.h> |
62 | #include <net/inet_common.h> | 62 | #include <net/inet_common.h> |
63 | #include <net/inet_ecn.h> | 63 | #include <net/inet_ecn.h> |
64 | 64 | ||
65 | /* Global data structures. */ | 65 | /* Global data structures. */ |
66 | struct sctp_globals sctp_globals __read_mostly; | 66 | struct sctp_globals sctp_globals __read_mostly; |
67 | struct proc_dir_entry *proc_net_sctp; | 67 | struct proc_dir_entry *proc_net_sctp; |
68 | DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics) __read_mostly; | 68 | DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics) __read_mostly; |
69 | 69 | ||
70 | struct idr sctp_assocs_id; | 70 | struct idr sctp_assocs_id; |
71 | DEFINE_SPINLOCK(sctp_assocs_id_lock); | 71 | DEFINE_SPINLOCK(sctp_assocs_id_lock); |
72 | 72 | ||
73 | /* This is the global socket data structure used for responding to | 73 | /* This is the global socket data structure used for responding to |
74 | * the Out-of-the-blue (OOTB) packets. A control sock will be created | 74 | * the Out-of-the-blue (OOTB) packets. A control sock will be created |
75 | * for this socket at the initialization time. | 75 | * for this socket at the initialization time. |
76 | */ | 76 | */ |
77 | static struct socket *sctp_ctl_socket; | 77 | static struct socket *sctp_ctl_socket; |
78 | 78 | ||
79 | static struct sctp_pf *sctp_pf_inet6_specific; | 79 | static struct sctp_pf *sctp_pf_inet6_specific; |
80 | static struct sctp_pf *sctp_pf_inet_specific; | 80 | static struct sctp_pf *sctp_pf_inet_specific; |
81 | static struct sctp_af *sctp_af_v4_specific; | 81 | static struct sctp_af *sctp_af_v4_specific; |
82 | static struct sctp_af *sctp_af_v6_specific; | 82 | static struct sctp_af *sctp_af_v6_specific; |
83 | 83 | ||
84 | struct kmem_cache *sctp_chunk_cachep __read_mostly; | 84 | struct kmem_cache *sctp_chunk_cachep __read_mostly; |
85 | struct kmem_cache *sctp_bucket_cachep __read_mostly; | 85 | struct kmem_cache *sctp_bucket_cachep __read_mostly; |
86 | 86 | ||
87 | int sysctl_sctp_mem[3]; | 87 | int sysctl_sctp_mem[3]; |
88 | int sysctl_sctp_rmem[3]; | 88 | int sysctl_sctp_rmem[3]; |
89 | int sysctl_sctp_wmem[3]; | 89 | int sysctl_sctp_wmem[3]; |
90 | 90 | ||
91 | /* Return the address of the control sock. */ | 91 | /* Return the address of the control sock. */ |
92 | struct sock *sctp_get_ctl_sock(void) | 92 | struct sock *sctp_get_ctl_sock(void) |
93 | { | 93 | { |
94 | return sctp_ctl_socket->sk; | 94 | return sctp_ctl_socket->sk; |
95 | } | 95 | } |
96 | 96 | ||
97 | /* Set up the proc fs entry for the SCTP protocol. */ | 97 | /* Set up the proc fs entry for the SCTP protocol. */ |
98 | static __init int sctp_proc_init(void) | 98 | static __init int sctp_proc_init(void) |
99 | { | 99 | { |
100 | if (!proc_net_sctp) { | 100 | if (!proc_net_sctp) { |
101 | struct proc_dir_entry *ent; | 101 | struct proc_dir_entry *ent; |
102 | ent = proc_mkdir("sctp", init_net.proc_net); | 102 | ent = proc_mkdir("sctp", init_net.proc_net); |
103 | if (ent) { | 103 | if (ent) { |
104 | ent->owner = THIS_MODULE; | 104 | ent->owner = THIS_MODULE; |
105 | proc_net_sctp = ent; | 105 | proc_net_sctp = ent; |
106 | } else | 106 | } else |
107 | goto out_nomem; | 107 | goto out_nomem; |
108 | } | 108 | } |
109 | 109 | ||
110 | if (sctp_snmp_proc_init()) | 110 | if (sctp_snmp_proc_init()) |
111 | goto out_nomem; | 111 | goto out_nomem; |
112 | if (sctp_eps_proc_init()) | 112 | if (sctp_eps_proc_init()) |
113 | goto out_nomem; | 113 | goto out_nomem; |
114 | if (sctp_assocs_proc_init()) | 114 | if (sctp_assocs_proc_init()) |
115 | goto out_nomem; | 115 | goto out_nomem; |
116 | 116 | ||
117 | return 0; | 117 | return 0; |
118 | 118 | ||
119 | out_nomem: | 119 | out_nomem: |
120 | return -ENOMEM; | 120 | return -ENOMEM; |
121 | } | 121 | } |
122 | 122 | ||
123 | /* Clean up the proc fs entry for the SCTP protocol. | 123 | /* Clean up the proc fs entry for the SCTP protocol. |
124 | * Note: Do not make this __exit as it is used in the init error | 124 | * Note: Do not make this __exit as it is used in the init error |
125 | * path. | 125 | * path. |
126 | */ | 126 | */ |
127 | static void sctp_proc_exit(void) | 127 | static void sctp_proc_exit(void) |
128 | { | 128 | { |
129 | sctp_snmp_proc_exit(); | 129 | sctp_snmp_proc_exit(); |
130 | sctp_eps_proc_exit(); | 130 | sctp_eps_proc_exit(); |
131 | sctp_assocs_proc_exit(); | 131 | sctp_assocs_proc_exit(); |
132 | 132 | ||
133 | if (proc_net_sctp) { | 133 | if (proc_net_sctp) { |
134 | proc_net_sctp = NULL; | 134 | proc_net_sctp = NULL; |
135 | remove_proc_entry("sctp", init_net.proc_net); | 135 | remove_proc_entry("sctp", init_net.proc_net); |
136 | } | 136 | } |
137 | } | 137 | } |
138 | 138 | ||
139 | /* Private helper to extract ipv4 address and stash them in | 139 | /* Private helper to extract ipv4 address and stash them in |
140 | * the protocol structure. | 140 | * the protocol structure. |
141 | */ | 141 | */ |
142 | static void sctp_v4_copy_addrlist(struct list_head *addrlist, | 142 | static void sctp_v4_copy_addrlist(struct list_head *addrlist, |
143 | struct net_device *dev) | 143 | struct net_device *dev) |
144 | { | 144 | { |
145 | struct in_device *in_dev; | 145 | struct in_device *in_dev; |
146 | struct in_ifaddr *ifa; | 146 | struct in_ifaddr *ifa; |
147 | struct sctp_sockaddr_entry *addr; | 147 | struct sctp_sockaddr_entry *addr; |
148 | 148 | ||
149 | rcu_read_lock(); | 149 | rcu_read_lock(); |
150 | if ((in_dev = __in_dev_get_rcu(dev)) == NULL) { | 150 | if ((in_dev = __in_dev_get_rcu(dev)) == NULL) { |
151 | rcu_read_unlock(); | 151 | rcu_read_unlock(); |
152 | return; | 152 | return; |
153 | } | 153 | } |
154 | 154 | ||
155 | for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { | 155 | for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { |
156 | /* Add the address to the local list. */ | 156 | /* Add the address to the local list. */ |
157 | addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); | 157 | addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); |
158 | if (addr) { | 158 | if (addr) { |
159 | addr->a.v4.sin_family = AF_INET; | 159 | addr->a.v4.sin_family = AF_INET; |
160 | addr->a.v4.sin_port = 0; | 160 | addr->a.v4.sin_port = 0; |
161 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; | 161 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; |
162 | addr->valid = 1; | 162 | addr->valid = 1; |
163 | INIT_LIST_HEAD(&addr->list); | 163 | INIT_LIST_HEAD(&addr->list); |
164 | INIT_RCU_HEAD(&addr->rcu); | 164 | INIT_RCU_HEAD(&addr->rcu); |
165 | list_add_tail(&addr->list, addrlist); | 165 | list_add_tail(&addr->list, addrlist); |
166 | } | 166 | } |
167 | } | 167 | } |
168 | 168 | ||
169 | rcu_read_unlock(); | 169 | rcu_read_unlock(); |
170 | } | 170 | } |
171 | 171 | ||
172 | /* Extract our IP addresses from the system and stash them in the | 172 | /* Extract our IP addresses from the system and stash them in the |
173 | * protocol structure. | 173 | * protocol structure. |
174 | */ | 174 | */ |
175 | static void sctp_get_local_addr_list(void) | 175 | static void sctp_get_local_addr_list(void) |
176 | { | 176 | { |
177 | struct net_device *dev; | 177 | struct net_device *dev; |
178 | struct list_head *pos; | 178 | struct list_head *pos; |
179 | struct sctp_af *af; | 179 | struct sctp_af *af; |
180 | 180 | ||
181 | read_lock(&dev_base_lock); | 181 | read_lock(&dev_base_lock); |
182 | for_each_netdev(&init_net, dev) { | 182 | for_each_netdev(&init_net, dev) { |
183 | __list_for_each(pos, &sctp_address_families) { | 183 | __list_for_each(pos, &sctp_address_families) { |
184 | af = list_entry(pos, struct sctp_af, list); | 184 | af = list_entry(pos, struct sctp_af, list); |
185 | af->copy_addrlist(&sctp_local_addr_list, dev); | 185 | af->copy_addrlist(&sctp_local_addr_list, dev); |
186 | } | 186 | } |
187 | } | 187 | } |
188 | read_unlock(&dev_base_lock); | 188 | read_unlock(&dev_base_lock); |
189 | } | 189 | } |
190 | 190 | ||
191 | /* Free the existing local addresses. */ | 191 | /* Free the existing local addresses. */ |
192 | static void sctp_free_local_addr_list(void) | 192 | static void sctp_free_local_addr_list(void) |
193 | { | 193 | { |
194 | struct sctp_sockaddr_entry *addr; | 194 | struct sctp_sockaddr_entry *addr; |
195 | struct list_head *pos, *temp; | 195 | struct list_head *pos, *temp; |
196 | 196 | ||
197 | list_for_each_safe(pos, temp, &sctp_local_addr_list) { | 197 | list_for_each_safe(pos, temp, &sctp_local_addr_list) { |
198 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | 198 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); |
199 | list_del(pos); | 199 | list_del(pos); |
200 | kfree(addr); | 200 | kfree(addr); |
201 | } | 201 | } |
202 | } | 202 | } |
203 | 203 | ||
204 | void sctp_local_addr_free(struct rcu_head *head) | 204 | void sctp_local_addr_free(struct rcu_head *head) |
205 | { | 205 | { |
206 | struct sctp_sockaddr_entry *e = container_of(head, | 206 | struct sctp_sockaddr_entry *e = container_of(head, |
207 | struct sctp_sockaddr_entry, rcu); | 207 | struct sctp_sockaddr_entry, rcu); |
208 | kfree(e); | 208 | kfree(e); |
209 | } | 209 | } |
210 | 210 | ||
211 | /* Copy the local addresses which are valid for 'scope' into 'bp'. */ | 211 | /* Copy the local addresses which are valid for 'scope' into 'bp'. */ |
212 | int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, | 212 | int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, |
213 | gfp_t gfp, int copy_flags) | 213 | gfp_t gfp, int copy_flags) |
214 | { | 214 | { |
215 | struct sctp_sockaddr_entry *addr; | 215 | struct sctp_sockaddr_entry *addr; |
216 | int error = 0; | 216 | int error = 0; |
217 | 217 | ||
218 | rcu_read_lock(); | 218 | rcu_read_lock(); |
219 | list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) { | 219 | list_for_each_entry_rcu(addr, &sctp_local_addr_list, list) { |
220 | if (!addr->valid) | 220 | if (!addr->valid) |
221 | continue; | 221 | continue; |
222 | if (sctp_in_scope(&addr->a, scope)) { | 222 | if (sctp_in_scope(&addr->a, scope)) { |
223 | /* Now that the address is in scope, check to see if | 223 | /* Now that the address is in scope, check to see if |
224 | * the address type is really supported by the local | 224 | * the address type is really supported by the local |
225 | * sock as well as the remote peer. | 225 | * sock as well as the remote peer. |
226 | */ | 226 | */ |
227 | if ((((AF_INET == addr->a.sa.sa_family) && | 227 | if ((((AF_INET == addr->a.sa.sa_family) && |
228 | (copy_flags & SCTP_ADDR4_PEERSUPP))) || | 228 | (copy_flags & SCTP_ADDR4_PEERSUPP))) || |
229 | (((AF_INET6 == addr->a.sa.sa_family) && | 229 | (((AF_INET6 == addr->a.sa.sa_family) && |
230 | (copy_flags & SCTP_ADDR6_ALLOWED) && | 230 | (copy_flags & SCTP_ADDR6_ALLOWED) && |
231 | (copy_flags & SCTP_ADDR6_PEERSUPP)))) { | 231 | (copy_flags & SCTP_ADDR6_PEERSUPP)))) { |
232 | error = sctp_add_bind_addr(bp, &addr->a, 1, | 232 | error = sctp_add_bind_addr(bp, &addr->a, 1, |
233 | GFP_ATOMIC); | 233 | GFP_ATOMIC); |
234 | if (error) | 234 | if (error) |
235 | goto end_copy; | 235 | goto end_copy; |
236 | } | 236 | } |
237 | } | 237 | } |
238 | } | 238 | } |
239 | 239 | ||
240 | end_copy: | 240 | end_copy: |
241 | rcu_read_unlock(); | 241 | rcu_read_unlock(); |
242 | return error; | 242 | return error; |
243 | } | 243 | } |
244 | 244 | ||
245 | /* Initialize a sctp_addr from in incoming skb. */ | 245 | /* Initialize a sctp_addr from in incoming skb. */ |
246 | static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, | 246 | static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, |
247 | int is_saddr) | 247 | int is_saddr) |
248 | { | 248 | { |
249 | void *from; | 249 | void *from; |
250 | __be16 *port; | 250 | __be16 *port; |
251 | struct sctphdr *sh; | 251 | struct sctphdr *sh; |
252 | 252 | ||
253 | port = &addr->v4.sin_port; | 253 | port = &addr->v4.sin_port; |
254 | addr->v4.sin_family = AF_INET; | 254 | addr->v4.sin_family = AF_INET; |
255 | 255 | ||
256 | sh = sctp_hdr(skb); | 256 | sh = sctp_hdr(skb); |
257 | if (is_saddr) { | 257 | if (is_saddr) { |
258 | *port = sh->source; | 258 | *port = sh->source; |
259 | from = &ip_hdr(skb)->saddr; | 259 | from = &ip_hdr(skb)->saddr; |
260 | } else { | 260 | } else { |
261 | *port = sh->dest; | 261 | *port = sh->dest; |
262 | from = &ip_hdr(skb)->daddr; | 262 | from = &ip_hdr(skb)->daddr; |
263 | } | 263 | } |
264 | memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr)); | 264 | memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr)); |
265 | } | 265 | } |
266 | 266 | ||
267 | /* Initialize an sctp_addr from a socket. */ | 267 | /* Initialize an sctp_addr from a socket. */ |
268 | static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk) | 268 | static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk) |
269 | { | 269 | { |
270 | addr->v4.sin_family = AF_INET; | 270 | addr->v4.sin_family = AF_INET; |
271 | addr->v4.sin_port = 0; | 271 | addr->v4.sin_port = 0; |
272 | addr->v4.sin_addr.s_addr = inet_sk(sk)->rcv_saddr; | 272 | addr->v4.sin_addr.s_addr = inet_sk(sk)->rcv_saddr; |
273 | } | 273 | } |
274 | 274 | ||
275 | /* Initialize sk->sk_rcv_saddr from sctp_addr. */ | 275 | /* Initialize sk->sk_rcv_saddr from sctp_addr. */ |
276 | static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk) | 276 | static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk) |
277 | { | 277 | { |
278 | inet_sk(sk)->rcv_saddr = addr->v4.sin_addr.s_addr; | 278 | inet_sk(sk)->rcv_saddr = addr->v4.sin_addr.s_addr; |
279 | } | 279 | } |
280 | 280 | ||
281 | /* Initialize sk->sk_daddr from sctp_addr. */ | 281 | /* Initialize sk->sk_daddr from sctp_addr. */ |
282 | static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk) | 282 | static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk) |
283 | { | 283 | { |
284 | inet_sk(sk)->daddr = addr->v4.sin_addr.s_addr; | 284 | inet_sk(sk)->daddr = addr->v4.sin_addr.s_addr; |
285 | } | 285 | } |
286 | 286 | ||
287 | /* Initialize a sctp_addr from an address parameter. */ | 287 | /* Initialize a sctp_addr from an address parameter. */ |
288 | static void sctp_v4_from_addr_param(union sctp_addr *addr, | 288 | static void sctp_v4_from_addr_param(union sctp_addr *addr, |
289 | union sctp_addr_param *param, | 289 | union sctp_addr_param *param, |
290 | __be16 port, int iif) | 290 | __be16 port, int iif) |
291 | { | 291 | { |
292 | addr->v4.sin_family = AF_INET; | 292 | addr->v4.sin_family = AF_INET; |
293 | addr->v4.sin_port = port; | 293 | addr->v4.sin_port = port; |
294 | addr->v4.sin_addr.s_addr = param->v4.addr.s_addr; | 294 | addr->v4.sin_addr.s_addr = param->v4.addr.s_addr; |
295 | } | 295 | } |
296 | 296 | ||
297 | /* Initialize an address parameter from a sctp_addr and return the length | 297 | /* Initialize an address parameter from a sctp_addr and return the length |
298 | * of the address parameter. | 298 | * of the address parameter. |
299 | */ | 299 | */ |
300 | static int sctp_v4_to_addr_param(const union sctp_addr *addr, | 300 | static int sctp_v4_to_addr_param(const union sctp_addr *addr, |
301 | union sctp_addr_param *param) | 301 | union sctp_addr_param *param) |
302 | { | 302 | { |
303 | int length = sizeof(sctp_ipv4addr_param_t); | 303 | int length = sizeof(sctp_ipv4addr_param_t); |
304 | 304 | ||
305 | param->v4.param_hdr.type = SCTP_PARAM_IPV4_ADDRESS; | 305 | param->v4.param_hdr.type = SCTP_PARAM_IPV4_ADDRESS; |
306 | param->v4.param_hdr.length = htons(length); | 306 | param->v4.param_hdr.length = htons(length); |
307 | param->v4.addr.s_addr = addr->v4.sin_addr.s_addr; | 307 | param->v4.addr.s_addr = addr->v4.sin_addr.s_addr; |
308 | 308 | ||
309 | return length; | 309 | return length; |
310 | } | 310 | } |
311 | 311 | ||
312 | /* Initialize a sctp_addr from a dst_entry. */ | 312 | /* Initialize a sctp_addr from a dst_entry. */ |
313 | static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct dst_entry *dst, | 313 | static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct dst_entry *dst, |
314 | __be16 port) | 314 | __be16 port) |
315 | { | 315 | { |
316 | struct rtable *rt = (struct rtable *)dst; | 316 | struct rtable *rt = (struct rtable *)dst; |
317 | saddr->v4.sin_family = AF_INET; | 317 | saddr->v4.sin_family = AF_INET; |
318 | saddr->v4.sin_port = port; | 318 | saddr->v4.sin_port = port; |
319 | saddr->v4.sin_addr.s_addr = rt->rt_src; | 319 | saddr->v4.sin_addr.s_addr = rt->rt_src; |
320 | } | 320 | } |
321 | 321 | ||
322 | /* Compare two addresses exactly. */ | 322 | /* Compare two addresses exactly. */ |
323 | static int sctp_v4_cmp_addr(const union sctp_addr *addr1, | 323 | static int sctp_v4_cmp_addr(const union sctp_addr *addr1, |
324 | const union sctp_addr *addr2) | 324 | const union sctp_addr *addr2) |
325 | { | 325 | { |
326 | if (addr1->sa.sa_family != addr2->sa.sa_family) | 326 | if (addr1->sa.sa_family != addr2->sa.sa_family) |
327 | return 0; | 327 | return 0; |
328 | if (addr1->v4.sin_port != addr2->v4.sin_port) | 328 | if (addr1->v4.sin_port != addr2->v4.sin_port) |
329 | return 0; | 329 | return 0; |
330 | if (addr1->v4.sin_addr.s_addr != addr2->v4.sin_addr.s_addr) | 330 | if (addr1->v4.sin_addr.s_addr != addr2->v4.sin_addr.s_addr) |
331 | return 0; | 331 | return 0; |
332 | 332 | ||
333 | return 1; | 333 | return 1; |
334 | } | 334 | } |
335 | 335 | ||
336 | /* Initialize addr struct to INADDR_ANY. */ | 336 | /* Initialize addr struct to INADDR_ANY. */ |
337 | static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port) | 337 | static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port) |
338 | { | 338 | { |
339 | addr->v4.sin_family = AF_INET; | 339 | addr->v4.sin_family = AF_INET; |
340 | addr->v4.sin_addr.s_addr = INADDR_ANY; | 340 | addr->v4.sin_addr.s_addr = INADDR_ANY; |
341 | addr->v4.sin_port = port; | 341 | addr->v4.sin_port = port; |
342 | } | 342 | } |
343 | 343 | ||
344 | /* Is this a wildcard address? */ | 344 | /* Is this a wildcard address? */ |
345 | static int sctp_v4_is_any(const union sctp_addr *addr) | 345 | static int sctp_v4_is_any(const union sctp_addr *addr) |
346 | { | 346 | { |
347 | return INADDR_ANY == addr->v4.sin_addr.s_addr; | 347 | return INADDR_ANY == addr->v4.sin_addr.s_addr; |
348 | } | 348 | } |
349 | 349 | ||
350 | /* This function checks if the address is a valid address to be used for | 350 | /* This function checks if the address is a valid address to be used for |
351 | * SCTP binding. | 351 | * SCTP binding. |
352 | * | 352 | * |
353 | * Output: | 353 | * Output: |
354 | * Return 0 - If the address is a non-unicast or an illegal address. | 354 | * Return 0 - If the address is a non-unicast or an illegal address. |
355 | * Return 1 - If the address is a unicast. | 355 | * Return 1 - If the address is a unicast. |
356 | */ | 356 | */ |
357 | static int sctp_v4_addr_valid(union sctp_addr *addr, | 357 | static int sctp_v4_addr_valid(union sctp_addr *addr, |
358 | struct sctp_sock *sp, | 358 | struct sctp_sock *sp, |
359 | const struct sk_buff *skb) | 359 | const struct sk_buff *skb) |
360 | { | 360 | { |
361 | /* Is this a non-unicast address or a unusable SCTP address? */ | 361 | /* Is this a non-unicast address or a unusable SCTP address? */ |
362 | if (IS_IPV4_UNUSABLE_ADDRESS(&addr->v4.sin_addr.s_addr)) | 362 | if (IS_IPV4_UNUSABLE_ADDRESS(&addr->v4.sin_addr.s_addr)) |
363 | return 0; | 363 | return 0; |
364 | 364 | ||
365 | /* Is this a broadcast address? */ | 365 | /* Is this a broadcast address? */ |
366 | if (skb && ((struct rtable *)skb->dst)->rt_flags & RTCF_BROADCAST) | 366 | if (skb && ((struct rtable *)skb->dst)->rt_flags & RTCF_BROADCAST) |
367 | return 0; | 367 | return 0; |
368 | 368 | ||
369 | return 1; | 369 | return 1; |
370 | } | 370 | } |
371 | 371 | ||
372 | /* Should this be available for binding? */ | 372 | /* Should this be available for binding? */ |
373 | static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) | 373 | static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) |
374 | { | 374 | { |
375 | int ret = inet_addr_type(addr->v4.sin_addr.s_addr); | 375 | int ret = inet_addr_type(addr->v4.sin_addr.s_addr); |
376 | 376 | ||
377 | 377 | ||
378 | if (addr->v4.sin_addr.s_addr != INADDR_ANY && | 378 | if (addr->v4.sin_addr.s_addr != INADDR_ANY && |
379 | ret != RTN_LOCAL && | 379 | ret != RTN_LOCAL && |
380 | !sp->inet.freebind && | 380 | !sp->inet.freebind && |
381 | !sysctl_ip_nonlocal_bind) | 381 | !sysctl_ip_nonlocal_bind) |
382 | return 0; | 382 | return 0; |
383 | 383 | ||
384 | return 1; | 384 | return 1; |
385 | } | 385 | } |
386 | 386 | ||
387 | /* Checking the loopback, private and other address scopes as defined in | 387 | /* Checking the loopback, private and other address scopes as defined in |
388 | * RFC 1918. The IPv4 scoping is based on the draft for SCTP IPv4 | 388 | * RFC 1918. The IPv4 scoping is based on the draft for SCTP IPv4 |
389 | * scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>. | 389 | * scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>. |
390 | * | 390 | * |
391 | * Level 0 - unusable SCTP addresses | 391 | * Level 0 - unusable SCTP addresses |
392 | * Level 1 - loopback address | 392 | * Level 1 - loopback address |
393 | * Level 2 - link-local addresses | 393 | * Level 2 - link-local addresses |
394 | * Level 3 - private addresses. | 394 | * Level 3 - private addresses. |
395 | * Level 4 - global addresses | 395 | * Level 4 - global addresses |
396 | * For INIT and INIT-ACK address list, let L be the level of | 396 | * For INIT and INIT-ACK address list, let L be the level of |
397 | * of requested destination address, sender and receiver | 397 | * of requested destination address, sender and receiver |
398 | * SHOULD include all of its addresses with level greater | 398 | * SHOULD include all of its addresses with level greater |
399 | * than or equal to L. | 399 | * than or equal to L. |
400 | */ | 400 | */ |
401 | static sctp_scope_t sctp_v4_scope(union sctp_addr *addr) | 401 | static sctp_scope_t sctp_v4_scope(union sctp_addr *addr) |
402 | { | 402 | { |
403 | sctp_scope_t retval; | 403 | sctp_scope_t retval; |
404 | 404 | ||
405 | /* Should IPv4 scoping be a sysctl configurable option | 405 | /* Should IPv4 scoping be a sysctl configurable option |
406 | * so users can turn it off (default on) for certain | 406 | * so users can turn it off (default on) for certain |
407 | * unconventional networking environments? | 407 | * unconventional networking environments? |
408 | */ | 408 | */ |
409 | 409 | ||
410 | /* Check for unusable SCTP addresses. */ | 410 | /* Check for unusable SCTP addresses. */ |
411 | if (IS_IPV4_UNUSABLE_ADDRESS(&addr->v4.sin_addr.s_addr)) { | 411 | if (IS_IPV4_UNUSABLE_ADDRESS(&addr->v4.sin_addr.s_addr)) { |
412 | retval = SCTP_SCOPE_UNUSABLE; | 412 | retval = SCTP_SCOPE_UNUSABLE; |
413 | } else if (LOOPBACK(addr->v4.sin_addr.s_addr)) { | 413 | } else if (LOOPBACK(addr->v4.sin_addr.s_addr)) { |
414 | retval = SCTP_SCOPE_LOOPBACK; | 414 | retval = SCTP_SCOPE_LOOPBACK; |
415 | } else if (IS_IPV4_LINK_ADDRESS(&addr->v4.sin_addr.s_addr)) { | 415 | } else if (IS_IPV4_LINK_ADDRESS(&addr->v4.sin_addr.s_addr)) { |
416 | retval = SCTP_SCOPE_LINK; | 416 | retval = SCTP_SCOPE_LINK; |
417 | } else if (IS_IPV4_PRIVATE_ADDRESS(&addr->v4.sin_addr.s_addr)) { | 417 | } else if (IS_IPV4_PRIVATE_ADDRESS(&addr->v4.sin_addr.s_addr)) { |
418 | retval = SCTP_SCOPE_PRIVATE; | 418 | retval = SCTP_SCOPE_PRIVATE; |
419 | } else { | 419 | } else { |
420 | retval = SCTP_SCOPE_GLOBAL; | 420 | retval = SCTP_SCOPE_GLOBAL; |
421 | } | 421 | } |
422 | 422 | ||
423 | return retval; | 423 | return retval; |
424 | } | 424 | } |
425 | 425 | ||
426 | /* Returns a valid dst cache entry for the given source and destination ip | 426 | /* Returns a valid dst cache entry for the given source and destination ip |
427 | * addresses. If an association is passed, trys to get a dst entry with a | 427 | * addresses. If an association is passed, trys to get a dst entry with a |
428 | * source address that matches an address in the bind address list. | 428 | * source address that matches an address in the bind address list. |
429 | */ | 429 | */ |
430 | static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, | 430 | static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, |
431 | union sctp_addr *daddr, | 431 | union sctp_addr *daddr, |
432 | union sctp_addr *saddr) | 432 | union sctp_addr *saddr) |
433 | { | 433 | { |
434 | struct rtable *rt; | 434 | struct rtable *rt; |
435 | struct flowi fl; | 435 | struct flowi fl; |
436 | struct sctp_bind_addr *bp; | 436 | struct sctp_bind_addr *bp; |
437 | struct sctp_sockaddr_entry *laddr; | 437 | struct sctp_sockaddr_entry *laddr; |
438 | struct dst_entry *dst = NULL; | 438 | struct dst_entry *dst = NULL; |
439 | union sctp_addr dst_saddr; | 439 | union sctp_addr dst_saddr; |
440 | 440 | ||
441 | memset(&fl, 0x0, sizeof(struct flowi)); | 441 | memset(&fl, 0x0, sizeof(struct flowi)); |
442 | fl.fl4_dst = daddr->v4.sin_addr.s_addr; | 442 | fl.fl4_dst = daddr->v4.sin_addr.s_addr; |
443 | fl.proto = IPPROTO_SCTP; | 443 | fl.proto = IPPROTO_SCTP; |
444 | if (asoc) { | 444 | if (asoc) { |
445 | fl.fl4_tos = RT_CONN_FLAGS(asoc->base.sk); | 445 | fl.fl4_tos = RT_CONN_FLAGS(asoc->base.sk); |
446 | fl.oif = asoc->base.sk->sk_bound_dev_if; | 446 | fl.oif = asoc->base.sk->sk_bound_dev_if; |
447 | } | 447 | } |
448 | if (saddr) | 448 | if (saddr) |
449 | fl.fl4_src = saddr->v4.sin_addr.s_addr; | 449 | fl.fl4_src = saddr->v4.sin_addr.s_addr; |
450 | 450 | ||
451 | SCTP_DEBUG_PRINTK("%s: DST:%u.%u.%u.%u, SRC:%u.%u.%u.%u - ", | 451 | SCTP_DEBUG_PRINTK("%s: DST:%u.%u.%u.%u, SRC:%u.%u.%u.%u - ", |
452 | __FUNCTION__, NIPQUAD(fl.fl4_dst), | 452 | __FUNCTION__, NIPQUAD(fl.fl4_dst), |
453 | NIPQUAD(fl.fl4_src)); | 453 | NIPQUAD(fl.fl4_src)); |
454 | 454 | ||
455 | if (!ip_route_output_key(&rt, &fl)) { | 455 | if (!ip_route_output_key(&rt, &fl)) { |
456 | dst = &rt->u.dst; | 456 | dst = &rt->u.dst; |
457 | } | 457 | } |
458 | 458 | ||
459 | /* If there is no association or if a source address is passed, no | 459 | /* If there is no association or if a source address is passed, no |
460 | * more validation is required. | 460 | * more validation is required. |
461 | */ | 461 | */ |
462 | if (!asoc || saddr) | 462 | if (!asoc || saddr) |
463 | goto out; | 463 | goto out; |
464 | 464 | ||
465 | bp = &asoc->base.bind_addr; | 465 | bp = &asoc->base.bind_addr; |
466 | 466 | ||
467 | if (dst) { | 467 | if (dst) { |
468 | /* Walk through the bind address list and look for a bind | 468 | /* Walk through the bind address list and look for a bind |
469 | * address that matches the source address of the returned dst. | 469 | * address that matches the source address of the returned dst. |
470 | */ | 470 | */ |
471 | rcu_read_lock(); | 471 | rcu_read_lock(); |
472 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { | 472 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { |
473 | if (!laddr->valid || !laddr->use_as_src) | 473 | if (!laddr->valid || !laddr->use_as_src) |
474 | continue; | 474 | continue; |
475 | sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port)); | 475 | sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port)); |
476 | if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) | 476 | if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) |
477 | goto out_unlock; | 477 | goto out_unlock; |
478 | } | 478 | } |
479 | rcu_read_unlock(); | 479 | rcu_read_unlock(); |
480 | 480 | ||
481 | /* None of the bound addresses match the source address of the | 481 | /* None of the bound addresses match the source address of the |
482 | * dst. So release it. | 482 | * dst. So release it. |
483 | */ | 483 | */ |
484 | dst_release(dst); | 484 | dst_release(dst); |
485 | dst = NULL; | 485 | dst = NULL; |
486 | } | 486 | } |
487 | 487 | ||
488 | /* Walk through the bind address list and try to get a dst that | 488 | /* Walk through the bind address list and try to get a dst that |
489 | * matches a bind address as the source address. | 489 | * matches a bind address as the source address. |
490 | */ | 490 | */ |
491 | rcu_read_lock(); | 491 | rcu_read_lock(); |
492 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { | 492 | list_for_each_entry_rcu(laddr, &bp->address_list, list) { |
493 | if (!laddr->valid) | 493 | if (!laddr->valid) |
494 | continue; | 494 | continue; |
495 | if ((laddr->use_as_src) && | 495 | if ((laddr->use_as_src) && |
496 | (AF_INET == laddr->a.sa.sa_family)) { | 496 | (AF_INET == laddr->a.sa.sa_family)) { |
497 | fl.fl4_src = laddr->a.v4.sin_addr.s_addr; | 497 | fl.fl4_src = laddr->a.v4.sin_addr.s_addr; |
498 | if (!ip_route_output_key(&rt, &fl)) { | 498 | if (!ip_route_output_key(&rt, &fl)) { |
499 | dst = &rt->u.dst; | 499 | dst = &rt->u.dst; |
500 | goto out_unlock; | 500 | goto out_unlock; |
501 | } | 501 | } |
502 | } | 502 | } |
503 | } | 503 | } |
504 | 504 | ||
505 | out_unlock: | 505 | out_unlock: |
506 | rcu_read_unlock(); | 506 | rcu_read_unlock(); |
507 | out: | 507 | out: |
508 | if (dst) | 508 | if (dst) |
509 | SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n", | 509 | SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n", |
510 | NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_src)); | 510 | NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_src)); |
511 | else | 511 | else |
512 | SCTP_DEBUG_PRINTK("NO ROUTE\n"); | 512 | SCTP_DEBUG_PRINTK("NO ROUTE\n"); |
513 | 513 | ||
514 | return dst; | 514 | return dst; |
515 | } | 515 | } |
516 | 516 | ||
517 | /* For v4, the source address is cached in the route entry(dst). So no need | 517 | /* For v4, the source address is cached in the route entry(dst). So no need |
518 | * to cache it separately and hence this is an empty routine. | 518 | * to cache it separately and hence this is an empty routine. |
519 | */ | 519 | */ |
520 | static void sctp_v4_get_saddr(struct sctp_association *asoc, | 520 | static void sctp_v4_get_saddr(struct sctp_association *asoc, |
521 | struct dst_entry *dst, | 521 | struct dst_entry *dst, |
522 | union sctp_addr *daddr, | 522 | union sctp_addr *daddr, |
523 | union sctp_addr *saddr) | 523 | union sctp_addr *saddr) |
524 | { | 524 | { |
525 | struct rtable *rt = (struct rtable *)dst; | 525 | struct rtable *rt = (struct rtable *)dst; |
526 | 526 | ||
527 | if (!asoc) | 527 | if (!asoc) |
528 | return; | 528 | return; |
529 | 529 | ||
530 | if (rt) { | 530 | if (rt) { |
531 | saddr->v4.sin_family = AF_INET; | 531 | saddr->v4.sin_family = AF_INET; |
532 | saddr->v4.sin_port = htons(asoc->base.bind_addr.port); | 532 | saddr->v4.sin_port = htons(asoc->base.bind_addr.port); |
533 | saddr->v4.sin_addr.s_addr = rt->rt_src; | 533 | saddr->v4.sin_addr.s_addr = rt->rt_src; |
534 | } | 534 | } |
535 | } | 535 | } |
536 | 536 | ||
537 | /* What interface did this skb arrive on? */ | 537 | /* What interface did this skb arrive on? */ |
538 | static int sctp_v4_skb_iif(const struct sk_buff *skb) | 538 | static int sctp_v4_skb_iif(const struct sk_buff *skb) |
539 | { | 539 | { |
540 | return ((struct rtable *)skb->dst)->rt_iif; | 540 | return ((struct rtable *)skb->dst)->rt_iif; |
541 | } | 541 | } |
542 | 542 | ||
543 | /* Was this packet marked by Explicit Congestion Notification? */ | 543 | /* Was this packet marked by Explicit Congestion Notification? */ |
544 | static int sctp_v4_is_ce(const struct sk_buff *skb) | 544 | static int sctp_v4_is_ce(const struct sk_buff *skb) |
545 | { | 545 | { |
546 | return INET_ECN_is_ce(ip_hdr(skb)->tos); | 546 | return INET_ECN_is_ce(ip_hdr(skb)->tos); |
547 | } | 547 | } |
548 | 548 | ||
549 | /* Create and initialize a new sk for the socket returned by accept(). */ | 549 | /* Create and initialize a new sk for the socket returned by accept(). */ |
550 | static struct sock *sctp_v4_create_accept_sk(struct sock *sk, | 550 | static struct sock *sctp_v4_create_accept_sk(struct sock *sk, |
551 | struct sctp_association *asoc) | 551 | struct sctp_association *asoc) |
552 | { | 552 | { |
553 | struct inet_sock *inet = inet_sk(sk); | 553 | struct inet_sock *inet = inet_sk(sk); |
554 | struct inet_sock *newinet; | 554 | struct inet_sock *newinet; |
555 | struct sock *newsk = sk_alloc(sk->sk_net, PF_INET, GFP_KERNEL, sk->sk_prot, 1); | 555 | struct sock *newsk = sk_alloc(sk->sk_net, PF_INET, GFP_KERNEL, sk->sk_prot, 1); |
556 | 556 | ||
557 | if (!newsk) | 557 | if (!newsk) |
558 | goto out; | 558 | goto out; |
559 | 559 | ||
560 | sock_init_data(NULL, newsk); | 560 | sock_init_data(NULL, newsk); |
561 | 561 | ||
562 | newsk->sk_type = SOCK_STREAM; | 562 | newsk->sk_type = SOCK_STREAM; |
563 | 563 | ||
564 | newsk->sk_no_check = sk->sk_no_check; | 564 | newsk->sk_no_check = sk->sk_no_check; |
565 | newsk->sk_reuse = sk->sk_reuse; | 565 | newsk->sk_reuse = sk->sk_reuse; |
566 | newsk->sk_shutdown = sk->sk_shutdown; | 566 | newsk->sk_shutdown = sk->sk_shutdown; |
567 | 567 | ||
568 | newsk->sk_destruct = inet_sock_destruct; | 568 | newsk->sk_destruct = inet_sock_destruct; |
569 | newsk->sk_family = PF_INET; | 569 | newsk->sk_family = PF_INET; |
570 | newsk->sk_protocol = IPPROTO_SCTP; | 570 | newsk->sk_protocol = IPPROTO_SCTP; |
571 | newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; | 571 | newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; |
572 | sock_reset_flag(newsk, SOCK_ZAPPED); | 572 | sock_reset_flag(newsk, SOCK_ZAPPED); |
573 | 573 | ||
574 | newinet = inet_sk(newsk); | 574 | newinet = inet_sk(newsk); |
575 | 575 | ||
576 | /* Initialize sk's sport, dport, rcv_saddr and daddr for | 576 | /* Initialize sk's sport, dport, rcv_saddr and daddr for |
577 | * getsockname() and getpeername() | 577 | * getsockname() and getpeername() |
578 | */ | 578 | */ |
579 | newinet->sport = inet->sport; | 579 | newinet->sport = inet->sport; |
580 | newinet->saddr = inet->saddr; | 580 | newinet->saddr = inet->saddr; |
581 | newinet->rcv_saddr = inet->rcv_saddr; | 581 | newinet->rcv_saddr = inet->rcv_saddr; |
582 | newinet->dport = htons(asoc->peer.port); | 582 | newinet->dport = htons(asoc->peer.port); |
583 | newinet->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; | 583 | newinet->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; |
584 | newinet->pmtudisc = inet->pmtudisc; | 584 | newinet->pmtudisc = inet->pmtudisc; |
585 | newinet->id = asoc->next_tsn ^ jiffies; | 585 | newinet->id = asoc->next_tsn ^ jiffies; |
586 | 586 | ||
587 | newinet->uc_ttl = -1; | 587 | newinet->uc_ttl = -1; |
588 | newinet->mc_loop = 1; | 588 | newinet->mc_loop = 1; |
589 | newinet->mc_ttl = 1; | 589 | newinet->mc_ttl = 1; |
590 | newinet->mc_index = 0; | 590 | newinet->mc_index = 0; |
591 | newinet->mc_list = NULL; | 591 | newinet->mc_list = NULL; |
592 | 592 | ||
593 | sk_refcnt_debug_inc(newsk); | 593 | sk_refcnt_debug_inc(newsk); |
594 | 594 | ||
595 | if (newsk->sk_prot->init(newsk)) { | 595 | if (newsk->sk_prot->init(newsk)) { |
596 | sk_common_release(newsk); | 596 | sk_common_release(newsk); |
597 | newsk = NULL; | 597 | newsk = NULL; |
598 | } | 598 | } |
599 | 599 | ||
600 | out: | 600 | out: |
601 | return newsk; | 601 | return newsk; |
602 | } | 602 | } |
603 | 603 | ||
604 | /* Map address, empty for v4 family */ | 604 | /* Map address, empty for v4 family */ |
605 | static void sctp_v4_addr_v4map(struct sctp_sock *sp, union sctp_addr *addr) | 605 | static void sctp_v4_addr_v4map(struct sctp_sock *sp, union sctp_addr *addr) |
606 | { | 606 | { |
607 | /* Empty */ | 607 | /* Empty */ |
608 | } | 608 | } |
609 | 609 | ||
610 | /* Dump the v4 addr to the seq file. */ | 610 | /* Dump the v4 addr to the seq file. */ |
611 | static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr) | 611 | static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr) |
612 | { | 612 | { |
613 | seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr)); | 613 | seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr)); |
614 | } | 614 | } |
615 | 615 | ||
616 | /* Event handler for inet address addition/deletion events. | 616 | /* Event handler for inet address addition/deletion events. |
617 | * The sctp_local_addr_list needs to be protocted by a spin lock since | 617 | * The sctp_local_addr_list needs to be protocted by a spin lock since |
618 | * multiple notifiers (say IPv4 and IPv6) may be running at the same | 618 | * multiple notifiers (say IPv4 and IPv6) may be running at the same |
619 | * time and thus corrupt the list. | 619 | * time and thus corrupt the list. |
620 | * The reader side is protected with RCU. | 620 | * The reader side is protected with RCU. |
621 | */ | 621 | */ |
622 | static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, | 622 | static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, |
623 | void *ptr) | 623 | void *ptr) |
624 | { | 624 | { |
625 | struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; | 625 | struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; |
626 | struct sctp_sockaddr_entry *addr = NULL; | 626 | struct sctp_sockaddr_entry *addr = NULL; |
627 | struct sctp_sockaddr_entry *temp; | 627 | struct sctp_sockaddr_entry *temp; |
628 | 628 | ||
629 | switch (ev) { | 629 | switch (ev) { |
630 | case NETDEV_UP: | 630 | case NETDEV_UP: |
631 | addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); | 631 | addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); |
632 | if (addr) { | 632 | if (addr) { |
633 | addr->a.v4.sin_family = AF_INET; | 633 | addr->a.v4.sin_family = AF_INET; |
634 | addr->a.v4.sin_port = 0; | 634 | addr->a.v4.sin_port = 0; |
635 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; | 635 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; |
636 | addr->valid = 1; | 636 | addr->valid = 1; |
637 | spin_lock_bh(&sctp_local_addr_lock); | 637 | spin_lock_bh(&sctp_local_addr_lock); |
638 | list_add_tail_rcu(&addr->list, &sctp_local_addr_list); | 638 | list_add_tail_rcu(&addr->list, &sctp_local_addr_list); |
639 | spin_unlock_bh(&sctp_local_addr_lock); | 639 | spin_unlock_bh(&sctp_local_addr_lock); |
640 | } | 640 | } |
641 | break; | 641 | break; |
642 | case NETDEV_DOWN: | 642 | case NETDEV_DOWN: |
643 | spin_lock_bh(&sctp_local_addr_lock); | 643 | spin_lock_bh(&sctp_local_addr_lock); |
644 | list_for_each_entry_safe(addr, temp, | 644 | list_for_each_entry_safe(addr, temp, |
645 | &sctp_local_addr_list, list) { | 645 | &sctp_local_addr_list, list) { |
646 | if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) { | 646 | if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) { |
647 | addr->valid = 0; | 647 | addr->valid = 0; |
648 | list_del_rcu(&addr->list); | 648 | list_del_rcu(&addr->list); |
649 | break; | 649 | break; |
650 | } | 650 | } |
651 | } | 651 | } |
652 | spin_unlock_bh(&sctp_local_addr_lock); | 652 | spin_unlock_bh(&sctp_local_addr_lock); |
653 | if (addr && !addr->valid) | 653 | if (addr && !addr->valid) |
654 | call_rcu(&addr->rcu, sctp_local_addr_free); | 654 | call_rcu(&addr->rcu, sctp_local_addr_free); |
655 | break; | 655 | break; |
656 | } | 656 | } |
657 | 657 | ||
658 | return NOTIFY_DONE; | 658 | return NOTIFY_DONE; |
659 | } | 659 | } |
660 | 660 | ||
661 | /* | 661 | /* |
662 | * Initialize the control inode/socket with a control endpoint data | 662 | * Initialize the control inode/socket with a control endpoint data |
663 | * structure. This endpoint is reserved exclusively for the OOTB processing. | 663 | * structure. This endpoint is reserved exclusively for the OOTB processing. |
664 | */ | 664 | */ |
665 | static int sctp_ctl_sock_init(void) | 665 | static int sctp_ctl_sock_init(void) |
666 | { | 666 | { |
667 | int err; | 667 | int err; |
668 | sa_family_t family; | 668 | sa_family_t family; |
669 | 669 | ||
670 | if (sctp_get_pf_specific(PF_INET6)) | 670 | if (sctp_get_pf_specific(PF_INET6)) |
671 | family = PF_INET6; | 671 | family = PF_INET6; |
672 | else | 672 | else |
673 | family = PF_INET; | 673 | family = PF_INET; |
674 | 674 | ||
675 | err = sock_create_kern(family, SOCK_SEQPACKET, IPPROTO_SCTP, | 675 | err = sock_create_kern(family, SOCK_SEQPACKET, IPPROTO_SCTP, |
676 | &sctp_ctl_socket); | 676 | &sctp_ctl_socket); |
677 | if (err < 0) { | 677 | if (err < 0) { |
678 | printk(KERN_ERR | 678 | printk(KERN_ERR |
679 | "SCTP: Failed to create the SCTP control socket.\n"); | 679 | "SCTP: Failed to create the SCTP control socket.\n"); |
680 | return err; | 680 | return err; |
681 | } | 681 | } |
682 | sctp_ctl_socket->sk->sk_allocation = GFP_ATOMIC; | 682 | sctp_ctl_socket->sk->sk_allocation = GFP_ATOMIC; |
683 | inet_sk(sctp_ctl_socket->sk)->uc_ttl = -1; | 683 | inet_sk(sctp_ctl_socket->sk)->uc_ttl = -1; |
684 | 684 | ||
685 | return 0; | 685 | return 0; |
686 | } | 686 | } |
687 | 687 | ||
688 | /* Register address family specific functions. */ | 688 | /* Register address family specific functions. */ |
689 | int sctp_register_af(struct sctp_af *af) | 689 | int sctp_register_af(struct sctp_af *af) |
690 | { | 690 | { |
691 | switch (af->sa_family) { | 691 | switch (af->sa_family) { |
692 | case AF_INET: | 692 | case AF_INET: |
693 | if (sctp_af_v4_specific) | 693 | if (sctp_af_v4_specific) |
694 | return 0; | 694 | return 0; |
695 | sctp_af_v4_specific = af; | 695 | sctp_af_v4_specific = af; |
696 | break; | 696 | break; |
697 | case AF_INET6: | 697 | case AF_INET6: |
698 | if (sctp_af_v6_specific) | 698 | if (sctp_af_v6_specific) |
699 | return 0; | 699 | return 0; |
700 | sctp_af_v6_specific = af; | 700 | sctp_af_v6_specific = af; |
701 | break; | 701 | break; |
702 | default: | 702 | default: |
703 | return 0; | 703 | return 0; |
704 | } | 704 | } |
705 | 705 | ||
706 | INIT_LIST_HEAD(&af->list); | 706 | INIT_LIST_HEAD(&af->list); |
707 | list_add_tail(&af->list, &sctp_address_families); | 707 | list_add_tail(&af->list, &sctp_address_families); |
708 | return 1; | 708 | return 1; |
709 | } | 709 | } |
710 | 710 | ||
711 | /* Get the table of functions for manipulating a particular address | 711 | /* Get the table of functions for manipulating a particular address |
712 | * family. | 712 | * family. |
713 | */ | 713 | */ |
714 | struct sctp_af *sctp_get_af_specific(sa_family_t family) | 714 | struct sctp_af *sctp_get_af_specific(sa_family_t family) |
715 | { | 715 | { |
716 | switch (family) { | 716 | switch (family) { |
717 | case AF_INET: | 717 | case AF_INET: |
718 | return sctp_af_v4_specific; | 718 | return sctp_af_v4_specific; |
719 | case AF_INET6: | 719 | case AF_INET6: |
720 | return sctp_af_v6_specific; | 720 | return sctp_af_v6_specific; |
721 | default: | 721 | default: |
722 | return NULL; | 722 | return NULL; |
723 | } | 723 | } |
724 | } | 724 | } |
725 | 725 | ||
726 | /* Common code to initialize a AF_INET msg_name. */ | 726 | /* Common code to initialize a AF_INET msg_name. */ |
727 | static void sctp_inet_msgname(char *msgname, int *addr_len) | 727 | static void sctp_inet_msgname(char *msgname, int *addr_len) |
728 | { | 728 | { |
729 | struct sockaddr_in *sin; | 729 | struct sockaddr_in *sin; |
730 | 730 | ||
731 | sin = (struct sockaddr_in *)msgname; | 731 | sin = (struct sockaddr_in *)msgname; |
732 | *addr_len = sizeof(struct sockaddr_in); | 732 | *addr_len = sizeof(struct sockaddr_in); |
733 | sin->sin_family = AF_INET; | 733 | sin->sin_family = AF_INET; |
734 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | 734 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); |
735 | } | 735 | } |
736 | 736 | ||
737 | /* Copy the primary address of the peer primary address as the msg_name. */ | 737 | /* Copy the primary address of the peer primary address as the msg_name. */ |
738 | static void sctp_inet_event_msgname(struct sctp_ulpevent *event, char *msgname, | 738 | static void sctp_inet_event_msgname(struct sctp_ulpevent *event, char *msgname, |
739 | int *addr_len) | 739 | int *addr_len) |
740 | { | 740 | { |
741 | struct sockaddr_in *sin, *sinfrom; | 741 | struct sockaddr_in *sin, *sinfrom; |
742 | 742 | ||
743 | if (msgname) { | 743 | if (msgname) { |
744 | struct sctp_association *asoc; | 744 | struct sctp_association *asoc; |
745 | 745 | ||
746 | asoc = event->asoc; | 746 | asoc = event->asoc; |
747 | sctp_inet_msgname(msgname, addr_len); | 747 | sctp_inet_msgname(msgname, addr_len); |
748 | sin = (struct sockaddr_in *)msgname; | 748 | sin = (struct sockaddr_in *)msgname; |
749 | sinfrom = &asoc->peer.primary_addr.v4; | 749 | sinfrom = &asoc->peer.primary_addr.v4; |
750 | sin->sin_port = htons(asoc->peer.port); | 750 | sin->sin_port = htons(asoc->peer.port); |
751 | sin->sin_addr.s_addr = sinfrom->sin_addr.s_addr; | 751 | sin->sin_addr.s_addr = sinfrom->sin_addr.s_addr; |
752 | } | 752 | } |
753 | } | 753 | } |
754 | 754 | ||
755 | /* Initialize and copy out a msgname from an inbound skb. */ | 755 | /* Initialize and copy out a msgname from an inbound skb. */ |
756 | static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len) | 756 | static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len) |
757 | { | 757 | { |
758 | if (msgname) { | 758 | if (msgname) { |
759 | struct sctphdr *sh = sctp_hdr(skb); | 759 | struct sctphdr *sh = sctp_hdr(skb); |
760 | struct sockaddr_in *sin = (struct sockaddr_in *)msgname; | 760 | struct sockaddr_in *sin = (struct sockaddr_in *)msgname; |
761 | 761 | ||
762 | sctp_inet_msgname(msgname, len); | 762 | sctp_inet_msgname(msgname, len); |
763 | sin->sin_port = sh->source; | 763 | sin->sin_port = sh->source; |
764 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; | 764 | sin->sin_addr.s_addr = ip_hdr(skb)->saddr; |
765 | } | 765 | } |
766 | } | 766 | } |
767 | 767 | ||
768 | /* Do we support this AF? */ | 768 | /* Do we support this AF? */ |
769 | static int sctp_inet_af_supported(sa_family_t family, struct sctp_sock *sp) | 769 | static int sctp_inet_af_supported(sa_family_t family, struct sctp_sock *sp) |
770 | { | 770 | { |
771 | /* PF_INET only supports AF_INET addresses. */ | 771 | /* PF_INET only supports AF_INET addresses. */ |
772 | return (AF_INET == family); | 772 | return (AF_INET == family); |
773 | } | 773 | } |
774 | 774 | ||
775 | /* Address matching with wildcards allowed. */ | 775 | /* Address matching with wildcards allowed. */ |
776 | static int sctp_inet_cmp_addr(const union sctp_addr *addr1, | 776 | static int sctp_inet_cmp_addr(const union sctp_addr *addr1, |
777 | const union sctp_addr *addr2, | 777 | const union sctp_addr *addr2, |
778 | struct sctp_sock *opt) | 778 | struct sctp_sock *opt) |
779 | { | 779 | { |
780 | /* PF_INET only supports AF_INET addresses. */ | 780 | /* PF_INET only supports AF_INET addresses. */ |
781 | if (addr1->sa.sa_family != addr2->sa.sa_family) | 781 | if (addr1->sa.sa_family != addr2->sa.sa_family) |
782 | return 0; | 782 | return 0; |
783 | if (INADDR_ANY == addr1->v4.sin_addr.s_addr || | 783 | if (INADDR_ANY == addr1->v4.sin_addr.s_addr || |
784 | INADDR_ANY == addr2->v4.sin_addr.s_addr) | 784 | INADDR_ANY == addr2->v4.sin_addr.s_addr) |
785 | return 1; | 785 | return 1; |
786 | if (addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr) | 786 | if (addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr) |
787 | return 1; | 787 | return 1; |
788 | 788 | ||
789 | return 0; | 789 | return 0; |
790 | } | 790 | } |
791 | 791 | ||
792 | /* Verify that provided sockaddr looks bindable. Common verification has | 792 | /* Verify that provided sockaddr looks bindable. Common verification has |
793 | * already been taken care of. | 793 | * already been taken care of. |
794 | */ | 794 | */ |
795 | static int sctp_inet_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) | 795 | static int sctp_inet_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) |
796 | { | 796 | { |
797 | return sctp_v4_available(addr, opt); | 797 | return sctp_v4_available(addr, opt); |
798 | } | 798 | } |
799 | 799 | ||
800 | /* Verify that sockaddr looks sendable. Common verification has already | 800 | /* Verify that sockaddr looks sendable. Common verification has already |
801 | * been taken care of. | 801 | * been taken care of. |
802 | */ | 802 | */ |
803 | static int sctp_inet_send_verify(struct sctp_sock *opt, union sctp_addr *addr) | 803 | static int sctp_inet_send_verify(struct sctp_sock *opt, union sctp_addr *addr) |
804 | { | 804 | { |
805 | return 1; | 805 | return 1; |
806 | } | 806 | } |
807 | 807 | ||
808 | /* Fill in Supported Address Type information for INIT and INIT-ACK | 808 | /* Fill in Supported Address Type information for INIT and INIT-ACK |
809 | * chunks. Returns number of addresses supported. | 809 | * chunks. Returns number of addresses supported. |
810 | */ | 810 | */ |
811 | static int sctp_inet_supported_addrs(const struct sctp_sock *opt, | 811 | static int sctp_inet_supported_addrs(const struct sctp_sock *opt, |
812 | __be16 *types) | 812 | __be16 *types) |
813 | { | 813 | { |
814 | types[0] = SCTP_PARAM_IPV4_ADDRESS; | 814 | types[0] = SCTP_PARAM_IPV4_ADDRESS; |
815 | return 1; | 815 | return 1; |
816 | } | 816 | } |
817 | 817 | ||
818 | /* Wrapper routine that calls the ip transmit routine. */ | 818 | /* Wrapper routine that calls the ip transmit routine. */ |
819 | static inline int sctp_v4_xmit(struct sk_buff *skb, | 819 | static inline int sctp_v4_xmit(struct sk_buff *skb, |
820 | struct sctp_transport *transport, int ipfragok) | 820 | struct sctp_transport *transport, int ipfragok) |
821 | { | 821 | { |
822 | SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, " | 822 | SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, " |
823 | "src:%u.%u.%u.%u, dst:%u.%u.%u.%u\n", | 823 | "src:%u.%u.%u.%u, dst:%u.%u.%u.%u\n", |
824 | __FUNCTION__, skb, skb->len, | 824 | __FUNCTION__, skb, skb->len, |
825 | NIPQUAD(((struct rtable *)skb->dst)->rt_src), | 825 | NIPQUAD(((struct rtable *)skb->dst)->rt_src), |
826 | NIPQUAD(((struct rtable *)skb->dst)->rt_dst)); | 826 | NIPQUAD(((struct rtable *)skb->dst)->rt_dst)); |
827 | 827 | ||
828 | SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); | 828 | SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); |
829 | return ip_queue_xmit(skb, ipfragok); | 829 | return ip_queue_xmit(skb, ipfragok); |
830 | } | 830 | } |
831 | 831 | ||
832 | static struct sctp_af sctp_ipv4_specific; | 832 | static struct sctp_af sctp_ipv4_specific; |
833 | 833 | ||
834 | static struct sctp_pf sctp_pf_inet = { | 834 | static struct sctp_pf sctp_pf_inet = { |
835 | .event_msgname = sctp_inet_event_msgname, | 835 | .event_msgname = sctp_inet_event_msgname, |
836 | .skb_msgname = sctp_inet_skb_msgname, | 836 | .skb_msgname = sctp_inet_skb_msgname, |
837 | .af_supported = sctp_inet_af_supported, | 837 | .af_supported = sctp_inet_af_supported, |
838 | .cmp_addr = sctp_inet_cmp_addr, | 838 | .cmp_addr = sctp_inet_cmp_addr, |
839 | .bind_verify = sctp_inet_bind_verify, | 839 | .bind_verify = sctp_inet_bind_verify, |
840 | .send_verify = sctp_inet_send_verify, | 840 | .send_verify = sctp_inet_send_verify, |
841 | .supported_addrs = sctp_inet_supported_addrs, | 841 | .supported_addrs = sctp_inet_supported_addrs, |
842 | .create_accept_sk = sctp_v4_create_accept_sk, | 842 | .create_accept_sk = sctp_v4_create_accept_sk, |
843 | .addr_v4map = sctp_v4_addr_v4map, | 843 | .addr_v4map = sctp_v4_addr_v4map, |
844 | .af = &sctp_ipv4_specific, | 844 | .af = &sctp_ipv4_specific, |
845 | }; | 845 | }; |
846 | 846 | ||
847 | /* Notifier for inetaddr addition/deletion events. */ | 847 | /* Notifier for inetaddr addition/deletion events. */ |
848 | static struct notifier_block sctp_inetaddr_notifier = { | 848 | static struct notifier_block sctp_inetaddr_notifier = { |
849 | .notifier_call = sctp_inetaddr_event, | 849 | .notifier_call = sctp_inetaddr_event, |
850 | }; | 850 | }; |
851 | 851 | ||
852 | /* Socket operations. */ | 852 | /* Socket operations. */ |
853 | static const struct proto_ops inet_seqpacket_ops = { | 853 | static const struct proto_ops inet_seqpacket_ops = { |
854 | .family = PF_INET, | 854 | .family = PF_INET, |
855 | .owner = THIS_MODULE, | 855 | .owner = THIS_MODULE, |
856 | .release = inet_release, /* Needs to be wrapped... */ | 856 | .release = inet_release, /* Needs to be wrapped... */ |
857 | .bind = inet_bind, | 857 | .bind = inet_bind, |
858 | .connect = inet_dgram_connect, | 858 | .connect = inet_dgram_connect, |
859 | .socketpair = sock_no_socketpair, | 859 | .socketpair = sock_no_socketpair, |
860 | .accept = inet_accept, | 860 | .accept = inet_accept, |
861 | .getname = inet_getname, /* Semantics are different. */ | 861 | .getname = inet_getname, /* Semantics are different. */ |
862 | .poll = sctp_poll, | 862 | .poll = sctp_poll, |
863 | .ioctl = inet_ioctl, | 863 | .ioctl = inet_ioctl, |
864 | .listen = sctp_inet_listen, | 864 | .listen = sctp_inet_listen, |
865 | .shutdown = inet_shutdown, /* Looks harmless. */ | 865 | .shutdown = inet_shutdown, /* Looks harmless. */ |
866 | .setsockopt = sock_common_setsockopt, /* IP_SOL IP_OPTION is a problem */ | 866 | .setsockopt = sock_common_setsockopt, /* IP_SOL IP_OPTION is a problem */ |
867 | .getsockopt = sock_common_getsockopt, | 867 | .getsockopt = sock_common_getsockopt, |
868 | .sendmsg = inet_sendmsg, | 868 | .sendmsg = inet_sendmsg, |
869 | .recvmsg = sock_common_recvmsg, | 869 | .recvmsg = sock_common_recvmsg, |
870 | .mmap = sock_no_mmap, | 870 | .mmap = sock_no_mmap, |
871 | .sendpage = sock_no_sendpage, | 871 | .sendpage = sock_no_sendpage, |
872 | #ifdef CONFIG_COMPAT | 872 | #ifdef CONFIG_COMPAT |
873 | .compat_setsockopt = compat_sock_common_setsockopt, | 873 | .compat_setsockopt = compat_sock_common_setsockopt, |
874 | .compat_getsockopt = compat_sock_common_getsockopt, | 874 | .compat_getsockopt = compat_sock_common_getsockopt, |
875 | #endif | 875 | #endif |
876 | }; | 876 | }; |
877 | 877 | ||
878 | /* Registration with AF_INET family. */ | 878 | /* Registration with AF_INET family. */ |
879 | static struct inet_protosw sctp_seqpacket_protosw = { | 879 | static struct inet_protosw sctp_seqpacket_protosw = { |
880 | .type = SOCK_SEQPACKET, | 880 | .type = SOCK_SEQPACKET, |
881 | .protocol = IPPROTO_SCTP, | 881 | .protocol = IPPROTO_SCTP, |
882 | .prot = &sctp_prot, | 882 | .prot = &sctp_prot, |
883 | .ops = &inet_seqpacket_ops, | 883 | .ops = &inet_seqpacket_ops, |
884 | .capability = -1, | 884 | .capability = -1, |
885 | .no_check = 0, | 885 | .no_check = 0, |
886 | .flags = SCTP_PROTOSW_FLAG | 886 | .flags = SCTP_PROTOSW_FLAG |
887 | }; | 887 | }; |
888 | static struct inet_protosw sctp_stream_protosw = { | 888 | static struct inet_protosw sctp_stream_protosw = { |
889 | .type = SOCK_STREAM, | 889 | .type = SOCK_STREAM, |
890 | .protocol = IPPROTO_SCTP, | 890 | .protocol = IPPROTO_SCTP, |
891 | .prot = &sctp_prot, | 891 | .prot = &sctp_prot, |
892 | .ops = &inet_seqpacket_ops, | 892 | .ops = &inet_seqpacket_ops, |
893 | .capability = -1, | 893 | .capability = -1, |
894 | .no_check = 0, | 894 | .no_check = 0, |
895 | .flags = SCTP_PROTOSW_FLAG | 895 | .flags = SCTP_PROTOSW_FLAG |
896 | }; | 896 | }; |
897 | 897 | ||
898 | /* Register with IP layer. */ | 898 | /* Register with IP layer. */ |
899 | static struct net_protocol sctp_protocol = { | 899 | static struct net_protocol sctp_protocol = { |
900 | .handler = sctp_rcv, | 900 | .handler = sctp_rcv, |
901 | .err_handler = sctp_v4_err, | 901 | .err_handler = sctp_v4_err, |
902 | .no_policy = 1, | 902 | .no_policy = 1, |
903 | }; | 903 | }; |
904 | 904 | ||
905 | /* IPv4 address related functions. */ | 905 | /* IPv4 address related functions. */ |
906 | static struct sctp_af sctp_ipv4_specific = { | 906 | static struct sctp_af sctp_ipv4_specific = { |
907 | .sa_family = AF_INET, | 907 | .sa_family = AF_INET, |
908 | .sctp_xmit = sctp_v4_xmit, | 908 | .sctp_xmit = sctp_v4_xmit, |
909 | .setsockopt = ip_setsockopt, | 909 | .setsockopt = ip_setsockopt, |
910 | .getsockopt = ip_getsockopt, | 910 | .getsockopt = ip_getsockopt, |
911 | .get_dst = sctp_v4_get_dst, | 911 | .get_dst = sctp_v4_get_dst, |
912 | .get_saddr = sctp_v4_get_saddr, | 912 | .get_saddr = sctp_v4_get_saddr, |
913 | .copy_addrlist = sctp_v4_copy_addrlist, | 913 | .copy_addrlist = sctp_v4_copy_addrlist, |
914 | .from_skb = sctp_v4_from_skb, | 914 | .from_skb = sctp_v4_from_skb, |
915 | .from_sk = sctp_v4_from_sk, | 915 | .from_sk = sctp_v4_from_sk, |
916 | .to_sk_saddr = sctp_v4_to_sk_saddr, | 916 | .to_sk_saddr = sctp_v4_to_sk_saddr, |
917 | .to_sk_daddr = sctp_v4_to_sk_daddr, | 917 | .to_sk_daddr = sctp_v4_to_sk_daddr, |
918 | .from_addr_param = sctp_v4_from_addr_param, | 918 | .from_addr_param = sctp_v4_from_addr_param, |
919 | .to_addr_param = sctp_v4_to_addr_param, | 919 | .to_addr_param = sctp_v4_to_addr_param, |
920 | .dst_saddr = sctp_v4_dst_saddr, | 920 | .dst_saddr = sctp_v4_dst_saddr, |
921 | .cmp_addr = sctp_v4_cmp_addr, | 921 | .cmp_addr = sctp_v4_cmp_addr, |
922 | .addr_valid = sctp_v4_addr_valid, | 922 | .addr_valid = sctp_v4_addr_valid, |
923 | .inaddr_any = sctp_v4_inaddr_any, | 923 | .inaddr_any = sctp_v4_inaddr_any, |
924 | .is_any = sctp_v4_is_any, | 924 | .is_any = sctp_v4_is_any, |
925 | .available = sctp_v4_available, | 925 | .available = sctp_v4_available, |
926 | .scope = sctp_v4_scope, | 926 | .scope = sctp_v4_scope, |
927 | .skb_iif = sctp_v4_skb_iif, | 927 | .skb_iif = sctp_v4_skb_iif, |
928 | .is_ce = sctp_v4_is_ce, | 928 | .is_ce = sctp_v4_is_ce, |
929 | .seq_dump_addr = sctp_v4_seq_dump_addr, | 929 | .seq_dump_addr = sctp_v4_seq_dump_addr, |
930 | .net_header_len = sizeof(struct iphdr), | 930 | .net_header_len = sizeof(struct iphdr), |
931 | .sockaddr_len = sizeof(struct sockaddr_in), | 931 | .sockaddr_len = sizeof(struct sockaddr_in), |
932 | #ifdef CONFIG_COMPAT | 932 | #ifdef CONFIG_COMPAT |
933 | .compat_setsockopt = compat_ip_setsockopt, | 933 | .compat_setsockopt = compat_ip_setsockopt, |
934 | .compat_getsockopt = compat_ip_getsockopt, | 934 | .compat_getsockopt = compat_ip_getsockopt, |
935 | #endif | 935 | #endif |
936 | }; | 936 | }; |
937 | 937 | ||
938 | struct sctp_pf *sctp_get_pf_specific(sa_family_t family) { | 938 | struct sctp_pf *sctp_get_pf_specific(sa_family_t family) { |
939 | 939 | ||
940 | switch (family) { | 940 | switch (family) { |
941 | case PF_INET: | 941 | case PF_INET: |
942 | return sctp_pf_inet_specific; | 942 | return sctp_pf_inet_specific; |
943 | case PF_INET6: | 943 | case PF_INET6: |
944 | return sctp_pf_inet6_specific; | 944 | return sctp_pf_inet6_specific; |
945 | default: | 945 | default: |
946 | return NULL; | 946 | return NULL; |
947 | } | 947 | } |
948 | } | 948 | } |
949 | 949 | ||
950 | /* Register the PF specific function table. */ | 950 | /* Register the PF specific function table. */ |
951 | int sctp_register_pf(struct sctp_pf *pf, sa_family_t family) | 951 | int sctp_register_pf(struct sctp_pf *pf, sa_family_t family) |
952 | { | 952 | { |
953 | switch (family) { | 953 | switch (family) { |
954 | case PF_INET: | 954 | case PF_INET: |
955 | if (sctp_pf_inet_specific) | 955 | if (sctp_pf_inet_specific) |
956 | return 0; | 956 | return 0; |
957 | sctp_pf_inet_specific = pf; | 957 | sctp_pf_inet_specific = pf; |
958 | break; | 958 | break; |
959 | case PF_INET6: | 959 | case PF_INET6: |
960 | if (sctp_pf_inet6_specific) | 960 | if (sctp_pf_inet6_specific) |
961 | return 0; | 961 | return 0; |
962 | sctp_pf_inet6_specific = pf; | 962 | sctp_pf_inet6_specific = pf; |
963 | break; | 963 | break; |
964 | default: | 964 | default: |
965 | return 0; | 965 | return 0; |
966 | } | 966 | } |
967 | return 1; | 967 | return 1; |
968 | } | 968 | } |
969 | 969 | ||
970 | static int __init init_sctp_mibs(void) | 970 | static int __init init_sctp_mibs(void) |
971 | { | 971 | { |
972 | sctp_statistics[0] = alloc_percpu(struct sctp_mib); | 972 | sctp_statistics[0] = alloc_percpu(struct sctp_mib); |
973 | if (!sctp_statistics[0]) | 973 | if (!sctp_statistics[0]) |
974 | return -ENOMEM; | 974 | return -ENOMEM; |
975 | sctp_statistics[1] = alloc_percpu(struct sctp_mib); | 975 | sctp_statistics[1] = alloc_percpu(struct sctp_mib); |
976 | if (!sctp_statistics[1]) { | 976 | if (!sctp_statistics[1]) { |
977 | free_percpu(sctp_statistics[0]); | 977 | free_percpu(sctp_statistics[0]); |
978 | return -ENOMEM; | 978 | return -ENOMEM; |
979 | } | 979 | } |
980 | return 0; | 980 | return 0; |
981 | 981 | ||
982 | } | 982 | } |
983 | 983 | ||
984 | static void cleanup_sctp_mibs(void) | 984 | static void cleanup_sctp_mibs(void) |
985 | { | 985 | { |
986 | free_percpu(sctp_statistics[0]); | 986 | free_percpu(sctp_statistics[0]); |
987 | free_percpu(sctp_statistics[1]); | 987 | free_percpu(sctp_statistics[1]); |
988 | } | 988 | } |
989 | 989 | ||
990 | /* Initialize the universe into something sensible. */ | 990 | /* Initialize the universe into something sensible. */ |
991 | SCTP_STATIC __init int sctp_init(void) | 991 | SCTP_STATIC __init int sctp_init(void) |
992 | { | 992 | { |
993 | int i; | 993 | int i; |
994 | int status = -EINVAL; | 994 | int status = -EINVAL; |
995 | unsigned long goal; | 995 | unsigned long goal; |
996 | unsigned long limit; | 996 | unsigned long limit; |
997 | int max_share; | 997 | int max_share; |
998 | int order; | 998 | int order; |
999 | 999 | ||
1000 | /* SCTP_DEBUG sanity check. */ | 1000 | /* SCTP_DEBUG sanity check. */ |
1001 | if (!sctp_sanity_check()) | 1001 | if (!sctp_sanity_check()) |
1002 | goto out; | 1002 | goto out; |
1003 | 1003 | ||
1004 | /* Allocate bind_bucket and chunk caches. */ | 1004 | /* Allocate bind_bucket and chunk caches. */ |
1005 | status = -ENOBUFS; | 1005 | status = -ENOBUFS; |
1006 | sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket", | 1006 | sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket", |
1007 | sizeof(struct sctp_bind_bucket), | 1007 | sizeof(struct sctp_bind_bucket), |
1008 | 0, SLAB_HWCACHE_ALIGN, | 1008 | 0, SLAB_HWCACHE_ALIGN, |
1009 | NULL); | 1009 | NULL); |
1010 | if (!sctp_bucket_cachep) | 1010 | if (!sctp_bucket_cachep) |
1011 | goto out; | 1011 | goto out; |
1012 | 1012 | ||
1013 | sctp_chunk_cachep = kmem_cache_create("sctp_chunk", | 1013 | sctp_chunk_cachep = kmem_cache_create("sctp_chunk", |
1014 | sizeof(struct sctp_chunk), | 1014 | sizeof(struct sctp_chunk), |
1015 | 0, SLAB_HWCACHE_ALIGN, | 1015 | 0, SLAB_HWCACHE_ALIGN, |
1016 | NULL); | 1016 | NULL); |
1017 | if (!sctp_chunk_cachep) | 1017 | if (!sctp_chunk_cachep) |
1018 | goto err_chunk_cachep; | 1018 | goto err_chunk_cachep; |
1019 | 1019 | ||
1020 | /* Allocate and initialise sctp mibs. */ | 1020 | /* Allocate and initialise sctp mibs. */ |
1021 | status = init_sctp_mibs(); | 1021 | status = init_sctp_mibs(); |
1022 | if (status) | 1022 | if (status) |
1023 | goto err_init_mibs; | 1023 | goto err_init_mibs; |
1024 | 1024 | ||
1025 | /* Initialize proc fs directory. */ | 1025 | /* Initialize proc fs directory. */ |
1026 | status = sctp_proc_init(); | 1026 | status = sctp_proc_init(); |
1027 | if (status) | 1027 | if (status) |
1028 | goto err_init_proc; | 1028 | goto err_init_proc; |
1029 | 1029 | ||
1030 | /* Initialize object count debugging. */ | 1030 | /* Initialize object count debugging. */ |
1031 | sctp_dbg_objcnt_init(); | 1031 | sctp_dbg_objcnt_init(); |
1032 | 1032 | ||
1033 | /* Initialize the SCTP specific PF functions. */ | 1033 | /* Initialize the SCTP specific PF functions. */ |
1034 | sctp_register_pf(&sctp_pf_inet, PF_INET); | 1034 | sctp_register_pf(&sctp_pf_inet, PF_INET); |
1035 | /* | 1035 | /* |
1036 | * 14. Suggested SCTP Protocol Parameter Values | 1036 | * 14. Suggested SCTP Protocol Parameter Values |
1037 | */ | 1037 | */ |
1038 | /* The following protocol parameters are RECOMMENDED: */ | 1038 | /* The following protocol parameters are RECOMMENDED: */ |
1039 | /* RTO.Initial - 3 seconds */ | 1039 | /* RTO.Initial - 3 seconds */ |
1040 | sctp_rto_initial = SCTP_RTO_INITIAL; | 1040 | sctp_rto_initial = SCTP_RTO_INITIAL; |
1041 | /* RTO.Min - 1 second */ | 1041 | /* RTO.Min - 1 second */ |
1042 | sctp_rto_min = SCTP_RTO_MIN; | 1042 | sctp_rto_min = SCTP_RTO_MIN; |
1043 | /* RTO.Max - 60 seconds */ | 1043 | /* RTO.Max - 60 seconds */ |
1044 | sctp_rto_max = SCTP_RTO_MAX; | 1044 | sctp_rto_max = SCTP_RTO_MAX; |
1045 | /* RTO.Alpha - 1/8 */ | 1045 | /* RTO.Alpha - 1/8 */ |
1046 | sctp_rto_alpha = SCTP_RTO_ALPHA; | 1046 | sctp_rto_alpha = SCTP_RTO_ALPHA; |
1047 | /* RTO.Beta - 1/4 */ | 1047 | /* RTO.Beta - 1/4 */ |
1048 | sctp_rto_beta = SCTP_RTO_BETA; | 1048 | sctp_rto_beta = SCTP_RTO_BETA; |
1049 | 1049 | ||
1050 | /* Valid.Cookie.Life - 60 seconds */ | 1050 | /* Valid.Cookie.Life - 60 seconds */ |
1051 | sctp_valid_cookie_life = SCTP_DEFAULT_COOKIE_LIFE; | 1051 | sctp_valid_cookie_life = SCTP_DEFAULT_COOKIE_LIFE; |
1052 | 1052 | ||
1053 | /* Whether Cookie Preservative is enabled(1) or not(0) */ | 1053 | /* Whether Cookie Preservative is enabled(1) or not(0) */ |
1054 | sctp_cookie_preserve_enable = 1; | 1054 | sctp_cookie_preserve_enable = 1; |
1055 | 1055 | ||
1056 | /* Max.Burst - 4 */ | 1056 | /* Max.Burst - 4 */ |
1057 | sctp_max_burst = SCTP_DEFAULT_MAX_BURST; | 1057 | sctp_max_burst = SCTP_DEFAULT_MAX_BURST; |
1058 | 1058 | ||
1059 | /* Association.Max.Retrans - 10 attempts | 1059 | /* Association.Max.Retrans - 10 attempts |
1060 | * Path.Max.Retrans - 5 attempts (per destination address) | 1060 | * Path.Max.Retrans - 5 attempts (per destination address) |
1061 | * Max.Init.Retransmits - 8 attempts | 1061 | * Max.Init.Retransmits - 8 attempts |
1062 | */ | 1062 | */ |
1063 | sctp_max_retrans_association = 10; | 1063 | sctp_max_retrans_association = 10; |
1064 | sctp_max_retrans_path = 5; | 1064 | sctp_max_retrans_path = 5; |
1065 | sctp_max_retrans_init = 8; | 1065 | sctp_max_retrans_init = 8; |
1066 | 1066 | ||
1067 | /* Sendbuffer growth - do per-socket accounting */ | 1067 | /* Sendbuffer growth - do per-socket accounting */ |
1068 | sctp_sndbuf_policy = 0; | 1068 | sctp_sndbuf_policy = 0; |
1069 | 1069 | ||
1070 | /* Rcvbuffer growth - do per-socket accounting */ | 1070 | /* Rcvbuffer growth - do per-socket accounting */ |
1071 | sctp_rcvbuf_policy = 0; | 1071 | sctp_rcvbuf_policy = 0; |
1072 | 1072 | ||
1073 | /* HB.interval - 30 seconds */ | 1073 | /* HB.interval - 30 seconds */ |
1074 | sctp_hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT; | 1074 | sctp_hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT; |
1075 | 1075 | ||
1076 | /* delayed SACK timeout */ | 1076 | /* delayed SACK timeout */ |
1077 | sctp_sack_timeout = SCTP_DEFAULT_TIMEOUT_SACK; | 1077 | sctp_sack_timeout = SCTP_DEFAULT_TIMEOUT_SACK; |
1078 | 1078 | ||
1079 | /* Implementation specific variables. */ | 1079 | /* Implementation specific variables. */ |
1080 | 1080 | ||
1081 | /* Initialize default stream count setup information. */ | 1081 | /* Initialize default stream count setup information. */ |
1082 | sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; | 1082 | sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; |
1083 | sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; | 1083 | sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; |
1084 | 1084 | ||
1085 | /* Initialize handle used for association ids. */ | 1085 | /* Initialize handle used for association ids. */ |
1086 | idr_init(&sctp_assocs_id); | 1086 | idr_init(&sctp_assocs_id); |
1087 | 1087 | ||
1088 | /* Set the pressure threshold to be a fraction of global memory that | 1088 | /* Set the pressure threshold to be a fraction of global memory that |
1089 | * is up to 1/2 at 256 MB, decreasing toward zero with the amount of | 1089 | * is up to 1/2 at 256 MB, decreasing toward zero with the amount of |
1090 | * memory, with a floor of 128 pages. | 1090 | * memory, with a floor of 128 pages. |
1091 | * Note this initalizes the data in sctpv6_prot too | 1091 | * Note this initalizes the data in sctpv6_prot too |
1092 | * Unabashedly stolen from tcp_init | 1092 | * Unabashedly stolen from tcp_init |
1093 | */ | 1093 | */ |
1094 | limit = min(num_physpages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); | 1094 | limit = min(num_physpages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); |
1095 | limit = (limit * (num_physpages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); | 1095 | limit = (limit * (num_physpages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); |
1096 | limit = max(limit, 128UL); | 1096 | limit = max(limit, 128UL); |
1097 | sysctl_sctp_mem[0] = limit / 4 * 3; | 1097 | sysctl_sctp_mem[0] = limit / 4 * 3; |
1098 | sysctl_sctp_mem[1] = limit; | 1098 | sysctl_sctp_mem[1] = limit; |
1099 | sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2; | 1099 | sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2; |
1100 | 1100 | ||
1101 | /* Set per-socket limits to no more than 1/128 the pressure threshold*/ | 1101 | /* Set per-socket limits to no more than 1/128 the pressure threshold*/ |
1102 | limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7); | 1102 | limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7); |
1103 | max_share = min(4UL*1024*1024, limit); | 1103 | max_share = min(4UL*1024*1024, limit); |
1104 | 1104 | ||
1105 | sysctl_sctp_rmem[0] = PAGE_SIZE; /* give each asoc 1 page min */ | 1105 | sysctl_sctp_rmem[0] = PAGE_SIZE; /* give each asoc 1 page min */ |
1106 | sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1)); | 1106 | sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1)); |
1107 | sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); | 1107 | sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); |
1108 | 1108 | ||
1109 | sysctl_sctp_wmem[0] = SK_STREAM_MEM_QUANTUM; | 1109 | sysctl_sctp_wmem[0] = SK_STREAM_MEM_QUANTUM; |
1110 | sysctl_sctp_wmem[1] = 16*1024; | 1110 | sysctl_sctp_wmem[1] = 16*1024; |
1111 | sysctl_sctp_wmem[2] = max(64*1024, max_share); | 1111 | sysctl_sctp_wmem[2] = max(64*1024, max_share); |
1112 | 1112 | ||
1113 | /* Size and allocate the association hash table. | 1113 | /* Size and allocate the association hash table. |
1114 | * The methodology is similar to that of the tcp hash tables. | 1114 | * The methodology is similar to that of the tcp hash tables. |
1115 | */ | 1115 | */ |
1116 | if (num_physpages >= (128 * 1024)) | 1116 | if (num_physpages >= (128 * 1024)) |
1117 | goal = num_physpages >> (22 - PAGE_SHIFT); | 1117 | goal = num_physpages >> (22 - PAGE_SHIFT); |
1118 | else | 1118 | else |
1119 | goal = num_physpages >> (24 - PAGE_SHIFT); | 1119 | goal = num_physpages >> (24 - PAGE_SHIFT); |
1120 | 1120 | ||
1121 | for (order = 0; (1UL << order) < goal; order++) | 1121 | for (order = 0; (1UL << order) < goal; order++) |
1122 | ; | 1122 | ; |
1123 | 1123 | ||
1124 | do { | 1124 | do { |
1125 | sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE / | 1125 | sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE / |
1126 | sizeof(struct sctp_hashbucket); | 1126 | sizeof(struct sctp_hashbucket); |
1127 | if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0) | 1127 | if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0) |
1128 | continue; | 1128 | continue; |
1129 | sctp_assoc_hashtable = (struct sctp_hashbucket *) | 1129 | sctp_assoc_hashtable = (struct sctp_hashbucket *) |
1130 | __get_free_pages(GFP_ATOMIC, order); | 1130 | __get_free_pages(GFP_ATOMIC, order); |
1131 | } while (!sctp_assoc_hashtable && --order > 0); | 1131 | } while (!sctp_assoc_hashtable && --order > 0); |
1132 | if (!sctp_assoc_hashtable) { | 1132 | if (!sctp_assoc_hashtable) { |
1133 | printk(KERN_ERR "SCTP: Failed association hash alloc.\n"); | 1133 | printk(KERN_ERR "SCTP: Failed association hash alloc.\n"); |
1134 | status = -ENOMEM; | 1134 | status = -ENOMEM; |
1135 | goto err_ahash_alloc; | 1135 | goto err_ahash_alloc; |
1136 | } | 1136 | } |
1137 | for (i = 0; i < sctp_assoc_hashsize; i++) { | 1137 | for (i = 0; i < sctp_assoc_hashsize; i++) { |
1138 | rwlock_init(&sctp_assoc_hashtable[i].lock); | 1138 | rwlock_init(&sctp_assoc_hashtable[i].lock); |
1139 | sctp_assoc_hashtable[i].chain = NULL; | 1139 | sctp_assoc_hashtable[i].chain = NULL; |
1140 | } | 1140 | } |
1141 | 1141 | ||
1142 | /* Allocate and initialize the endpoint hash table. */ | 1142 | /* Allocate and initialize the endpoint hash table. */ |
1143 | sctp_ep_hashsize = 64; | 1143 | sctp_ep_hashsize = 64; |
1144 | sctp_ep_hashtable = (struct sctp_hashbucket *) | 1144 | sctp_ep_hashtable = (struct sctp_hashbucket *) |
1145 | kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL); | 1145 | kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL); |
1146 | if (!sctp_ep_hashtable) { | 1146 | if (!sctp_ep_hashtable) { |
1147 | printk(KERN_ERR "SCTP: Failed endpoint_hash alloc.\n"); | 1147 | printk(KERN_ERR "SCTP: Failed endpoint_hash alloc.\n"); |
1148 | status = -ENOMEM; | 1148 | status = -ENOMEM; |
1149 | goto err_ehash_alloc; | 1149 | goto err_ehash_alloc; |
1150 | } | 1150 | } |
1151 | for (i = 0; i < sctp_ep_hashsize; i++) { | 1151 | for (i = 0; i < sctp_ep_hashsize; i++) { |
1152 | rwlock_init(&sctp_ep_hashtable[i].lock); | 1152 | rwlock_init(&sctp_ep_hashtable[i].lock); |
1153 | sctp_ep_hashtable[i].chain = NULL; | 1153 | sctp_ep_hashtable[i].chain = NULL; |
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | /* Allocate and initialize the SCTP port hash table. */ | 1156 | /* Allocate and initialize the SCTP port hash table. */ |
1157 | do { | 1157 | do { |
1158 | sctp_port_hashsize = (1UL << order) * PAGE_SIZE / | 1158 | sctp_port_hashsize = (1UL << order) * PAGE_SIZE / |
1159 | sizeof(struct sctp_bind_hashbucket); | 1159 | sizeof(struct sctp_bind_hashbucket); |
1160 | if ((sctp_port_hashsize > (64 * 1024)) && order > 0) | 1160 | if ((sctp_port_hashsize > (64 * 1024)) && order > 0) |
1161 | continue; | 1161 | continue; |
1162 | sctp_port_hashtable = (struct sctp_bind_hashbucket *) | 1162 | sctp_port_hashtable = (struct sctp_bind_hashbucket *) |
1163 | __get_free_pages(GFP_ATOMIC, order); | 1163 | __get_free_pages(GFP_ATOMIC, order); |
1164 | } while (!sctp_port_hashtable && --order > 0); | 1164 | } while (!sctp_port_hashtable && --order > 0); |
1165 | if (!sctp_port_hashtable) { | 1165 | if (!sctp_port_hashtable) { |
1166 | printk(KERN_ERR "SCTP: Failed bind hash alloc."); | 1166 | printk(KERN_ERR "SCTP: Failed bind hash alloc."); |
1167 | status = -ENOMEM; | 1167 | status = -ENOMEM; |
1168 | goto err_bhash_alloc; | 1168 | goto err_bhash_alloc; |
1169 | } | 1169 | } |
1170 | for (i = 0; i < sctp_port_hashsize; i++) { | 1170 | for (i = 0; i < sctp_port_hashsize; i++) { |
1171 | spin_lock_init(&sctp_port_hashtable[i].lock); | 1171 | spin_lock_init(&sctp_port_hashtable[i].lock); |
1172 | sctp_port_hashtable[i].chain = NULL; | 1172 | sctp_port_hashtable[i].chain = NULL; |
1173 | } | 1173 | } |
1174 | 1174 | ||
1175 | spin_lock_init(&sctp_port_alloc_lock); | 1175 | spin_lock_init(&sctp_port_alloc_lock); |
1176 | sctp_port_rover = sysctl_local_port_range[0] - 1; | 1176 | sctp_port_rover = sysctl_local_port_range[0] - 1; |
1177 | 1177 | ||
1178 | printk(KERN_INFO "SCTP: Hash tables configured " | 1178 | printk(KERN_INFO "SCTP: Hash tables configured " |
1179 | "(established %d bind %d)\n", | 1179 | "(established %d bind %d)\n", |
1180 | sctp_assoc_hashsize, sctp_port_hashsize); | 1180 | sctp_assoc_hashsize, sctp_port_hashsize); |
1181 | 1181 | ||
1182 | /* Disable ADDIP by default. */ | 1182 | /* Disable ADDIP by default. */ |
1183 | sctp_addip_enable = 0; | 1183 | sctp_addip_enable = 0; |
1184 | 1184 | ||
1185 | /* Enable PR-SCTP by default. */ | 1185 | /* Enable PR-SCTP by default. */ |
1186 | sctp_prsctp_enable = 1; | 1186 | sctp_prsctp_enable = 1; |
1187 | 1187 | ||
1188 | /* Disable AUTH by default. */ | ||
1189 | sctp_auth_enable = 0; | ||
1190 | |||
1188 | sctp_sysctl_register(); | 1191 | sctp_sysctl_register(); |
1189 | 1192 | ||
1190 | INIT_LIST_HEAD(&sctp_address_families); | 1193 | INIT_LIST_HEAD(&sctp_address_families); |
1191 | sctp_register_af(&sctp_ipv4_specific); | 1194 | sctp_register_af(&sctp_ipv4_specific); |
1192 | 1195 | ||
1193 | status = proto_register(&sctp_prot, 1); | 1196 | status = proto_register(&sctp_prot, 1); |
1194 | if (status) | 1197 | if (status) |
1195 | goto err_proto_register; | 1198 | goto err_proto_register; |
1196 | 1199 | ||
1197 | /* Register SCTP(UDP and TCP style) with socket layer. */ | 1200 | /* Register SCTP(UDP and TCP style) with socket layer. */ |
1198 | inet_register_protosw(&sctp_seqpacket_protosw); | 1201 | inet_register_protosw(&sctp_seqpacket_protosw); |
1199 | inet_register_protosw(&sctp_stream_protosw); | 1202 | inet_register_protosw(&sctp_stream_protosw); |
1200 | 1203 | ||
1201 | status = sctp_v6_init(); | 1204 | status = sctp_v6_init(); |
1202 | if (status) | 1205 | if (status) |
1203 | goto err_v6_init; | 1206 | goto err_v6_init; |
1204 | 1207 | ||
1205 | /* Initialize the control inode/socket for handling OOTB packets. */ | 1208 | /* Initialize the control inode/socket for handling OOTB packets. */ |
1206 | if ((status = sctp_ctl_sock_init())) { | 1209 | if ((status = sctp_ctl_sock_init())) { |
1207 | printk (KERN_ERR | 1210 | printk (KERN_ERR |
1208 | "SCTP: Failed to initialize the SCTP control sock.\n"); | 1211 | "SCTP: Failed to initialize the SCTP control sock.\n"); |
1209 | goto err_ctl_sock_init; | 1212 | goto err_ctl_sock_init; |
1210 | } | 1213 | } |
1211 | 1214 | ||
1212 | /* Initialize the local address list. */ | 1215 | /* Initialize the local address list. */ |
1213 | INIT_LIST_HEAD(&sctp_local_addr_list); | 1216 | INIT_LIST_HEAD(&sctp_local_addr_list); |
1214 | spin_lock_init(&sctp_local_addr_lock); | 1217 | spin_lock_init(&sctp_local_addr_lock); |
1215 | sctp_get_local_addr_list(); | 1218 | sctp_get_local_addr_list(); |
1216 | 1219 | ||
1217 | /* Register notifier for inet address additions/deletions. */ | 1220 | /* Register notifier for inet address additions/deletions. */ |
1218 | register_inetaddr_notifier(&sctp_inetaddr_notifier); | 1221 | register_inetaddr_notifier(&sctp_inetaddr_notifier); |
1219 | 1222 | ||
1220 | /* Register SCTP with inet layer. */ | 1223 | /* Register SCTP with inet layer. */ |
1221 | if (inet_add_protocol(&sctp_protocol, IPPROTO_SCTP) < 0) { | 1224 | if (inet_add_protocol(&sctp_protocol, IPPROTO_SCTP) < 0) { |
1222 | status = -EAGAIN; | 1225 | status = -EAGAIN; |
1223 | goto err_add_protocol; | 1226 | goto err_add_protocol; |
1224 | } | 1227 | } |
1225 | 1228 | ||
1226 | /* Register SCTP with inet6 layer. */ | 1229 | /* Register SCTP with inet6 layer. */ |
1227 | status = sctp_v6_add_protocol(); | 1230 | status = sctp_v6_add_protocol(); |
1228 | if (status) | 1231 | if (status) |
1229 | goto err_v6_add_protocol; | 1232 | goto err_v6_add_protocol; |
1230 | 1233 | ||
1231 | __unsafe(THIS_MODULE); | 1234 | __unsafe(THIS_MODULE); |
1232 | status = 0; | 1235 | status = 0; |
1233 | out: | 1236 | out: |
1234 | return status; | 1237 | return status; |
1235 | err_v6_add_protocol: | 1238 | err_v6_add_protocol: |
1236 | inet_del_protocol(&sctp_protocol, IPPROTO_SCTP); | 1239 | inet_del_protocol(&sctp_protocol, IPPROTO_SCTP); |
1237 | unregister_inetaddr_notifier(&sctp_inetaddr_notifier); | 1240 | unregister_inetaddr_notifier(&sctp_inetaddr_notifier); |
1238 | err_add_protocol: | 1241 | err_add_protocol: |
1239 | sctp_free_local_addr_list(); | 1242 | sctp_free_local_addr_list(); |
1240 | sock_release(sctp_ctl_socket); | 1243 | sock_release(sctp_ctl_socket); |
1241 | err_ctl_sock_init: | 1244 | err_ctl_sock_init: |
1242 | sctp_v6_exit(); | 1245 | sctp_v6_exit(); |
1243 | err_v6_init: | 1246 | err_v6_init: |
1244 | inet_unregister_protosw(&sctp_stream_protosw); | 1247 | inet_unregister_protosw(&sctp_stream_protosw); |
1245 | inet_unregister_protosw(&sctp_seqpacket_protosw); | 1248 | inet_unregister_protosw(&sctp_seqpacket_protosw); |
1246 | proto_unregister(&sctp_prot); | 1249 | proto_unregister(&sctp_prot); |
1247 | err_proto_register: | 1250 | err_proto_register: |
1248 | sctp_sysctl_unregister(); | 1251 | sctp_sysctl_unregister(); |
1249 | list_del(&sctp_ipv4_specific.list); | 1252 | list_del(&sctp_ipv4_specific.list); |
1250 | free_pages((unsigned long)sctp_port_hashtable, | 1253 | free_pages((unsigned long)sctp_port_hashtable, |
1251 | get_order(sctp_port_hashsize * | 1254 | get_order(sctp_port_hashsize * |
1252 | sizeof(struct sctp_bind_hashbucket))); | 1255 | sizeof(struct sctp_bind_hashbucket))); |
1253 | err_bhash_alloc: | 1256 | err_bhash_alloc: |
1254 | kfree(sctp_ep_hashtable); | 1257 | kfree(sctp_ep_hashtable); |
1255 | err_ehash_alloc: | 1258 | err_ehash_alloc: |
1256 | free_pages((unsigned long)sctp_assoc_hashtable, | 1259 | free_pages((unsigned long)sctp_assoc_hashtable, |
1257 | get_order(sctp_assoc_hashsize * | 1260 | get_order(sctp_assoc_hashsize * |
1258 | sizeof(struct sctp_hashbucket))); | 1261 | sizeof(struct sctp_hashbucket))); |
1259 | err_ahash_alloc: | 1262 | err_ahash_alloc: |
1260 | sctp_dbg_objcnt_exit(); | 1263 | sctp_dbg_objcnt_exit(); |
1261 | sctp_proc_exit(); | 1264 | sctp_proc_exit(); |
1262 | err_init_proc: | 1265 | err_init_proc: |
1263 | cleanup_sctp_mibs(); | 1266 | cleanup_sctp_mibs(); |
1264 | err_init_mibs: | 1267 | err_init_mibs: |
1265 | kmem_cache_destroy(sctp_chunk_cachep); | 1268 | kmem_cache_destroy(sctp_chunk_cachep); |
1266 | err_chunk_cachep: | 1269 | err_chunk_cachep: |
1267 | kmem_cache_destroy(sctp_bucket_cachep); | 1270 | kmem_cache_destroy(sctp_bucket_cachep); |
1268 | goto out; | 1271 | goto out; |
1269 | } | 1272 | } |
1270 | 1273 | ||
1271 | /* Exit handler for the SCTP protocol. */ | 1274 | /* Exit handler for the SCTP protocol. */ |
1272 | SCTP_STATIC __exit void sctp_exit(void) | 1275 | SCTP_STATIC __exit void sctp_exit(void) |
1273 | { | 1276 | { |
1274 | /* BUG. This should probably do something useful like clean | 1277 | /* BUG. This should probably do something useful like clean |
1275 | * up all the remaining associations and all that memory. | 1278 | * up all the remaining associations and all that memory. |
1276 | */ | 1279 | */ |
1277 | 1280 | ||
1278 | /* Unregister with inet6/inet layers. */ | 1281 | /* Unregister with inet6/inet layers. */ |
1279 | sctp_v6_del_protocol(); | 1282 | sctp_v6_del_protocol(); |
1280 | inet_del_protocol(&sctp_protocol, IPPROTO_SCTP); | 1283 | inet_del_protocol(&sctp_protocol, IPPROTO_SCTP); |
1281 | 1284 | ||
1282 | /* Unregister notifier for inet address additions/deletions. */ | 1285 | /* Unregister notifier for inet address additions/deletions. */ |
1283 | unregister_inetaddr_notifier(&sctp_inetaddr_notifier); | 1286 | unregister_inetaddr_notifier(&sctp_inetaddr_notifier); |
1284 | 1287 | ||
1285 | /* Free the local address list. */ | 1288 | /* Free the local address list. */ |
1286 | sctp_free_local_addr_list(); | 1289 | sctp_free_local_addr_list(); |
1287 | 1290 | ||
1288 | /* Free the control endpoint. */ | 1291 | /* Free the control endpoint. */ |
1289 | sock_release(sctp_ctl_socket); | 1292 | sock_release(sctp_ctl_socket); |
1290 | 1293 | ||
1291 | /* Cleanup v6 initializations. */ | 1294 | /* Cleanup v6 initializations. */ |
1292 | sctp_v6_exit(); | 1295 | sctp_v6_exit(); |
1293 | 1296 | ||
1294 | /* Unregister with socket layer. */ | 1297 | /* Unregister with socket layer. */ |
1295 | inet_unregister_protosw(&sctp_stream_protosw); | 1298 | inet_unregister_protosw(&sctp_stream_protosw); |
1296 | inet_unregister_protosw(&sctp_seqpacket_protosw); | 1299 | inet_unregister_protosw(&sctp_seqpacket_protosw); |
1297 | 1300 | ||
1298 | sctp_sysctl_unregister(); | 1301 | sctp_sysctl_unregister(); |
1299 | list_del(&sctp_ipv4_specific.list); | 1302 | list_del(&sctp_ipv4_specific.list); |
1300 | 1303 | ||
1301 | free_pages((unsigned long)sctp_assoc_hashtable, | 1304 | free_pages((unsigned long)sctp_assoc_hashtable, |
1302 | get_order(sctp_assoc_hashsize * | 1305 | get_order(sctp_assoc_hashsize * |
1303 | sizeof(struct sctp_hashbucket))); | 1306 | sizeof(struct sctp_hashbucket))); |
1304 | kfree(sctp_ep_hashtable); | 1307 | kfree(sctp_ep_hashtable); |
1305 | free_pages((unsigned long)sctp_port_hashtable, | 1308 | free_pages((unsigned long)sctp_port_hashtable, |
1306 | get_order(sctp_port_hashsize * | 1309 | get_order(sctp_port_hashsize * |
1307 | sizeof(struct sctp_bind_hashbucket))); | 1310 | sizeof(struct sctp_bind_hashbucket))); |
1308 | 1311 | ||
1309 | sctp_dbg_objcnt_exit(); | 1312 | sctp_dbg_objcnt_exit(); |
1310 | sctp_proc_exit(); | 1313 | sctp_proc_exit(); |
1311 | cleanup_sctp_mibs(); | 1314 | cleanup_sctp_mibs(); |
1312 | 1315 | ||
1313 | kmem_cache_destroy(sctp_chunk_cachep); | 1316 | kmem_cache_destroy(sctp_chunk_cachep); |
1314 | kmem_cache_destroy(sctp_bucket_cachep); | 1317 | kmem_cache_destroy(sctp_bucket_cachep); |
1315 | 1318 | ||
1316 | proto_unregister(&sctp_prot); | 1319 | proto_unregister(&sctp_prot); |
1317 | } | 1320 | } |
1318 | 1321 | ||
1319 | module_init(sctp_init); | 1322 | module_init(sctp_init); |
1320 | module_exit(sctp_exit); | 1323 | module_exit(sctp_exit); |
1321 | 1324 | ||
1322 | /* | 1325 | /* |
1323 | * __stringify doesn't likes enums, so use IPPROTO_SCTP value (132) directly. | 1326 | * __stringify doesn't likes enums, so use IPPROTO_SCTP value (132) directly. |
1324 | */ | 1327 | */ |
1325 | MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132"); | 1328 | MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132"); |
1326 | MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132"); | 1329 | MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132"); |
1327 | MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>"); | 1330 | MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>"); |
1328 | MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)"); | 1331 | MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)"); |
1329 | MODULE_LICENSE("GPL"); | 1332 | MODULE_LICENSE("GPL"); |
1330 | 1333 |
net/sctp/sysctl.c
1 | /* SCTP kernel reference Implementation | 1 | /* SCTP kernel reference Implementation |
2 | * (C) Copyright IBM Corp. 2002, 2004 | 2 | * (C) Copyright IBM Corp. 2002, 2004 |
3 | * Copyright (c) 2002 Intel Corp. | 3 | * Copyright (c) 2002 Intel Corp. |
4 | * | 4 | * |
5 | * This file is part of the SCTP kernel reference Implementation | 5 | * This file is part of the SCTP kernel reference Implementation |
6 | * | 6 | * |
7 | * Sysctl related interfaces for SCTP. | 7 | * Sysctl related interfaces for SCTP. |
8 | * | 8 | * |
9 | * The SCTP reference implementation is free software; | 9 | * The SCTP reference implementation is free software; |
10 | * you can redistribute it and/or modify it under the terms of | 10 | * you can redistribute it and/or modify it under the terms of |
11 | * the GNU General Public License as published by | 11 | * the GNU General Public License as published by |
12 | * the Free Software Foundation; either version 2, or (at your option) | 12 | * the Free Software Foundation; either version 2, or (at your option) |
13 | * any later version. | 13 | * any later version. |
14 | * | 14 | * |
15 | * The SCTP reference implementation is distributed in the hope that it | 15 | * The SCTP reference implementation is distributed in the hope that it |
16 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | 16 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied |
17 | * ************************ | 17 | * ************************ |
18 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | 18 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
19 | * See the GNU General Public License for more details. | 19 | * See the GNU General Public License for more details. |
20 | * | 20 | * |
21 | * You should have received a copy of the GNU General Public License | 21 | * You should have received a copy of the GNU General Public License |
22 | * along with GNU CC; see the file COPYING. If not, write to | 22 | * along with GNU CC; see the file COPYING. If not, write to |
23 | * the Free Software Foundation, 59 Temple Place - Suite 330, | 23 | * the Free Software Foundation, 59 Temple Place - Suite 330, |
24 | * Boston, MA 02111-1307, USA. | 24 | * Boston, MA 02111-1307, USA. |
25 | * | 25 | * |
26 | * Please send any bug reports or fixes you make to the | 26 | * Please send any bug reports or fixes you make to the |
27 | * email address(es): | 27 | * email address(es): |
28 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | 28 | * lksctp developers <lksctp-developers@lists.sourceforge.net> |
29 | * | 29 | * |
30 | * Or submit a bug report through the following website: | 30 | * Or submit a bug report through the following website: |
31 | * http://www.sf.net/projects/lksctp | 31 | * http://www.sf.net/projects/lksctp |
32 | * | 32 | * |
33 | * Written or modified by: | 33 | * Written or modified by: |
34 | * Mingqin Liu <liuming@us.ibm.com> | 34 | * Mingqin Liu <liuming@us.ibm.com> |
35 | * Jon Grimm <jgrimm@us.ibm.com> | 35 | * Jon Grimm <jgrimm@us.ibm.com> |
36 | * Ardelle Fan <ardelle.fan@intel.com> | 36 | * Ardelle Fan <ardelle.fan@intel.com> |
37 | * Ryan Layer <rmlayer@us.ibm.com> | 37 | * Ryan Layer <rmlayer@us.ibm.com> |
38 | * Sridhar Samudrala <sri@us.ibm.com> | 38 | * Sridhar Samudrala <sri@us.ibm.com> |
39 | * | 39 | * |
40 | * Any bugs reported given to us we will try to fix... any fixes shared will | 40 | * Any bugs reported given to us we will try to fix... any fixes shared will |
41 | * be incorporated into the next SCTP release. | 41 | * be incorporated into the next SCTP release. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include <net/sctp/structs.h> | 44 | #include <net/sctp/structs.h> |
45 | #include <net/sctp/sctp.h> | 45 | #include <net/sctp/sctp.h> |
46 | #include <linux/sysctl.h> | 46 | #include <linux/sysctl.h> |
47 | 47 | ||
48 | static int zero = 0; | 48 | static int zero = 0; |
49 | static int one = 1; | 49 | static int one = 1; |
50 | static int timer_max = 86400000; /* ms in one day */ | 50 | static int timer_max = 86400000; /* ms in one day */ |
51 | static int int_max = INT_MAX; | 51 | static int int_max = INT_MAX; |
52 | static long sack_timer_min = 1; | 52 | static long sack_timer_min = 1; |
53 | static long sack_timer_max = 500; | 53 | static long sack_timer_max = 500; |
54 | 54 | ||
55 | extern int sysctl_sctp_mem[3]; | 55 | extern int sysctl_sctp_mem[3]; |
56 | extern int sysctl_sctp_rmem[3]; | 56 | extern int sysctl_sctp_rmem[3]; |
57 | extern int sysctl_sctp_wmem[3]; | 57 | extern int sysctl_sctp_wmem[3]; |
58 | 58 | ||
59 | static ctl_table sctp_table[] = { | 59 | static ctl_table sctp_table[] = { |
60 | { | 60 | { |
61 | .ctl_name = NET_SCTP_RTO_INITIAL, | 61 | .ctl_name = NET_SCTP_RTO_INITIAL, |
62 | .procname = "rto_initial", | 62 | .procname = "rto_initial", |
63 | .data = &sctp_rto_initial, | 63 | .data = &sctp_rto_initial, |
64 | .maxlen = sizeof(unsigned int), | 64 | .maxlen = sizeof(unsigned int), |
65 | .mode = 0644, | 65 | .mode = 0644, |
66 | .proc_handler = &proc_dointvec_minmax, | 66 | .proc_handler = &proc_dointvec_minmax, |
67 | .strategy = &sysctl_intvec, | 67 | .strategy = &sysctl_intvec, |
68 | .extra1 = &one, | 68 | .extra1 = &one, |
69 | .extra2 = &timer_max | 69 | .extra2 = &timer_max |
70 | }, | 70 | }, |
71 | { | 71 | { |
72 | .ctl_name = NET_SCTP_RTO_MIN, | 72 | .ctl_name = NET_SCTP_RTO_MIN, |
73 | .procname = "rto_min", | 73 | .procname = "rto_min", |
74 | .data = &sctp_rto_min, | 74 | .data = &sctp_rto_min, |
75 | .maxlen = sizeof(unsigned int), | 75 | .maxlen = sizeof(unsigned int), |
76 | .mode = 0644, | 76 | .mode = 0644, |
77 | .proc_handler = &proc_dointvec_minmax, | 77 | .proc_handler = &proc_dointvec_minmax, |
78 | .strategy = &sysctl_intvec, | 78 | .strategy = &sysctl_intvec, |
79 | .extra1 = &one, | 79 | .extra1 = &one, |
80 | .extra2 = &timer_max | 80 | .extra2 = &timer_max |
81 | }, | 81 | }, |
82 | { | 82 | { |
83 | .ctl_name = NET_SCTP_RTO_MAX, | 83 | .ctl_name = NET_SCTP_RTO_MAX, |
84 | .procname = "rto_max", | 84 | .procname = "rto_max", |
85 | .data = &sctp_rto_max, | 85 | .data = &sctp_rto_max, |
86 | .maxlen = sizeof(unsigned int), | 86 | .maxlen = sizeof(unsigned int), |
87 | .mode = 0644, | 87 | .mode = 0644, |
88 | .proc_handler = &proc_dointvec_minmax, | 88 | .proc_handler = &proc_dointvec_minmax, |
89 | .strategy = &sysctl_intvec, | 89 | .strategy = &sysctl_intvec, |
90 | .extra1 = &one, | 90 | .extra1 = &one, |
91 | .extra2 = &timer_max | 91 | .extra2 = &timer_max |
92 | }, | 92 | }, |
93 | { | 93 | { |
94 | .ctl_name = NET_SCTP_VALID_COOKIE_LIFE, | 94 | .ctl_name = NET_SCTP_VALID_COOKIE_LIFE, |
95 | .procname = "valid_cookie_life", | 95 | .procname = "valid_cookie_life", |
96 | .data = &sctp_valid_cookie_life, | 96 | .data = &sctp_valid_cookie_life, |
97 | .maxlen = sizeof(unsigned int), | 97 | .maxlen = sizeof(unsigned int), |
98 | .mode = 0644, | 98 | .mode = 0644, |
99 | .proc_handler = &proc_dointvec_minmax, | 99 | .proc_handler = &proc_dointvec_minmax, |
100 | .strategy = &sysctl_intvec, | 100 | .strategy = &sysctl_intvec, |
101 | .extra1 = &one, | 101 | .extra1 = &one, |
102 | .extra2 = &timer_max | 102 | .extra2 = &timer_max |
103 | }, | 103 | }, |
104 | { | 104 | { |
105 | .ctl_name = NET_SCTP_MAX_BURST, | 105 | .ctl_name = NET_SCTP_MAX_BURST, |
106 | .procname = "max_burst", | 106 | .procname = "max_burst", |
107 | .data = &sctp_max_burst, | 107 | .data = &sctp_max_burst, |
108 | .maxlen = sizeof(int), | 108 | .maxlen = sizeof(int), |
109 | .mode = 0644, | 109 | .mode = 0644, |
110 | .proc_handler = &proc_dointvec_minmax, | 110 | .proc_handler = &proc_dointvec_minmax, |
111 | .strategy = &sysctl_intvec, | 111 | .strategy = &sysctl_intvec, |
112 | .extra1 = &zero, | 112 | .extra1 = &zero, |
113 | .extra2 = &int_max | 113 | .extra2 = &int_max |
114 | }, | 114 | }, |
115 | { | 115 | { |
116 | .ctl_name = NET_SCTP_ASSOCIATION_MAX_RETRANS, | 116 | .ctl_name = NET_SCTP_ASSOCIATION_MAX_RETRANS, |
117 | .procname = "association_max_retrans", | 117 | .procname = "association_max_retrans", |
118 | .data = &sctp_max_retrans_association, | 118 | .data = &sctp_max_retrans_association, |
119 | .maxlen = sizeof(int), | 119 | .maxlen = sizeof(int), |
120 | .mode = 0644, | 120 | .mode = 0644, |
121 | .proc_handler = &proc_dointvec_minmax, | 121 | .proc_handler = &proc_dointvec_minmax, |
122 | .strategy = &sysctl_intvec, | 122 | .strategy = &sysctl_intvec, |
123 | .extra1 = &one, | 123 | .extra1 = &one, |
124 | .extra2 = &int_max | 124 | .extra2 = &int_max |
125 | }, | 125 | }, |
126 | { | 126 | { |
127 | .ctl_name = NET_SCTP_SNDBUF_POLICY, | 127 | .ctl_name = NET_SCTP_SNDBUF_POLICY, |
128 | .procname = "sndbuf_policy", | 128 | .procname = "sndbuf_policy", |
129 | .data = &sctp_sndbuf_policy, | 129 | .data = &sctp_sndbuf_policy, |
130 | .maxlen = sizeof(int), | 130 | .maxlen = sizeof(int), |
131 | .mode = 0644, | 131 | .mode = 0644, |
132 | .proc_handler = &proc_dointvec, | 132 | .proc_handler = &proc_dointvec, |
133 | .strategy = &sysctl_intvec | 133 | .strategy = &sysctl_intvec |
134 | }, | 134 | }, |
135 | { | 135 | { |
136 | .ctl_name = NET_SCTP_RCVBUF_POLICY, | 136 | .ctl_name = NET_SCTP_RCVBUF_POLICY, |
137 | .procname = "rcvbuf_policy", | 137 | .procname = "rcvbuf_policy", |
138 | .data = &sctp_rcvbuf_policy, | 138 | .data = &sctp_rcvbuf_policy, |
139 | .maxlen = sizeof(int), | 139 | .maxlen = sizeof(int), |
140 | .mode = 0644, | 140 | .mode = 0644, |
141 | .proc_handler = &proc_dointvec, | 141 | .proc_handler = &proc_dointvec, |
142 | .strategy = &sysctl_intvec | 142 | .strategy = &sysctl_intvec |
143 | }, | 143 | }, |
144 | { | 144 | { |
145 | .ctl_name = NET_SCTP_PATH_MAX_RETRANS, | 145 | .ctl_name = NET_SCTP_PATH_MAX_RETRANS, |
146 | .procname = "path_max_retrans", | 146 | .procname = "path_max_retrans", |
147 | .data = &sctp_max_retrans_path, | 147 | .data = &sctp_max_retrans_path, |
148 | .maxlen = sizeof(int), | 148 | .maxlen = sizeof(int), |
149 | .mode = 0644, | 149 | .mode = 0644, |
150 | .proc_handler = &proc_dointvec_minmax, | 150 | .proc_handler = &proc_dointvec_minmax, |
151 | .strategy = &sysctl_intvec, | 151 | .strategy = &sysctl_intvec, |
152 | .extra1 = &one, | 152 | .extra1 = &one, |
153 | .extra2 = &int_max | 153 | .extra2 = &int_max |
154 | }, | 154 | }, |
155 | { | 155 | { |
156 | .ctl_name = NET_SCTP_MAX_INIT_RETRANSMITS, | 156 | .ctl_name = NET_SCTP_MAX_INIT_RETRANSMITS, |
157 | .procname = "max_init_retransmits", | 157 | .procname = "max_init_retransmits", |
158 | .data = &sctp_max_retrans_init, | 158 | .data = &sctp_max_retrans_init, |
159 | .maxlen = sizeof(int), | 159 | .maxlen = sizeof(int), |
160 | .mode = 0644, | 160 | .mode = 0644, |
161 | .proc_handler = &proc_dointvec_minmax, | 161 | .proc_handler = &proc_dointvec_minmax, |
162 | .strategy = &sysctl_intvec, | 162 | .strategy = &sysctl_intvec, |
163 | .extra1 = &one, | 163 | .extra1 = &one, |
164 | .extra2 = &int_max | 164 | .extra2 = &int_max |
165 | }, | 165 | }, |
166 | { | 166 | { |
167 | .ctl_name = NET_SCTP_HB_INTERVAL, | 167 | .ctl_name = NET_SCTP_HB_INTERVAL, |
168 | .procname = "hb_interval", | 168 | .procname = "hb_interval", |
169 | .data = &sctp_hb_interval, | 169 | .data = &sctp_hb_interval, |
170 | .maxlen = sizeof(unsigned int), | 170 | .maxlen = sizeof(unsigned int), |
171 | .mode = 0644, | 171 | .mode = 0644, |
172 | .proc_handler = &proc_dointvec_minmax, | 172 | .proc_handler = &proc_dointvec_minmax, |
173 | .strategy = &sysctl_intvec, | 173 | .strategy = &sysctl_intvec, |
174 | .extra1 = &one, | 174 | .extra1 = &one, |
175 | .extra2 = &timer_max | 175 | .extra2 = &timer_max |
176 | }, | 176 | }, |
177 | { | 177 | { |
178 | .ctl_name = NET_SCTP_PRESERVE_ENABLE, | 178 | .ctl_name = NET_SCTP_PRESERVE_ENABLE, |
179 | .procname = "cookie_preserve_enable", | 179 | .procname = "cookie_preserve_enable", |
180 | .data = &sctp_cookie_preserve_enable, | 180 | .data = &sctp_cookie_preserve_enable, |
181 | .maxlen = sizeof(int), | 181 | .maxlen = sizeof(int), |
182 | .mode = 0644, | 182 | .mode = 0644, |
183 | .proc_handler = &proc_dointvec, | 183 | .proc_handler = &proc_dointvec, |
184 | .strategy = &sysctl_intvec | 184 | .strategy = &sysctl_intvec |
185 | }, | 185 | }, |
186 | { | 186 | { |
187 | .ctl_name = NET_SCTP_RTO_ALPHA, | 187 | .ctl_name = NET_SCTP_RTO_ALPHA, |
188 | .procname = "rto_alpha_exp_divisor", | 188 | .procname = "rto_alpha_exp_divisor", |
189 | .data = &sctp_rto_alpha, | 189 | .data = &sctp_rto_alpha, |
190 | .maxlen = sizeof(int), | 190 | .maxlen = sizeof(int), |
191 | .mode = 0444, | 191 | .mode = 0444, |
192 | .proc_handler = &proc_dointvec, | 192 | .proc_handler = &proc_dointvec, |
193 | .strategy = &sysctl_intvec | 193 | .strategy = &sysctl_intvec |
194 | }, | 194 | }, |
195 | { | 195 | { |
196 | .ctl_name = NET_SCTP_RTO_BETA, | 196 | .ctl_name = NET_SCTP_RTO_BETA, |
197 | .procname = "rto_beta_exp_divisor", | 197 | .procname = "rto_beta_exp_divisor", |
198 | .data = &sctp_rto_beta, | 198 | .data = &sctp_rto_beta, |
199 | .maxlen = sizeof(int), | 199 | .maxlen = sizeof(int), |
200 | .mode = 0444, | 200 | .mode = 0444, |
201 | .proc_handler = &proc_dointvec, | 201 | .proc_handler = &proc_dointvec, |
202 | .strategy = &sysctl_intvec | 202 | .strategy = &sysctl_intvec |
203 | }, | 203 | }, |
204 | { | 204 | { |
205 | .ctl_name = NET_SCTP_ADDIP_ENABLE, | 205 | .ctl_name = NET_SCTP_ADDIP_ENABLE, |
206 | .procname = "addip_enable", | 206 | .procname = "addip_enable", |
207 | .data = &sctp_addip_enable, | 207 | .data = &sctp_addip_enable, |
208 | .maxlen = sizeof(int), | 208 | .maxlen = sizeof(int), |
209 | .mode = 0644, | 209 | .mode = 0644, |
210 | .proc_handler = &proc_dointvec, | 210 | .proc_handler = &proc_dointvec, |
211 | .strategy = &sysctl_intvec | 211 | .strategy = &sysctl_intvec |
212 | }, | 212 | }, |
213 | { | 213 | { |
214 | .ctl_name = NET_SCTP_PRSCTP_ENABLE, | 214 | .ctl_name = NET_SCTP_PRSCTP_ENABLE, |
215 | .procname = "prsctp_enable", | 215 | .procname = "prsctp_enable", |
216 | .data = &sctp_prsctp_enable, | 216 | .data = &sctp_prsctp_enable, |
217 | .maxlen = sizeof(int), | 217 | .maxlen = sizeof(int), |
218 | .mode = 0644, | 218 | .mode = 0644, |
219 | .proc_handler = &proc_dointvec, | 219 | .proc_handler = &proc_dointvec, |
220 | .strategy = &sysctl_intvec | 220 | .strategy = &sysctl_intvec |
221 | }, | 221 | }, |
222 | { | 222 | { |
223 | .ctl_name = NET_SCTP_SACK_TIMEOUT, | 223 | .ctl_name = NET_SCTP_SACK_TIMEOUT, |
224 | .procname = "sack_timeout", | 224 | .procname = "sack_timeout", |
225 | .data = &sctp_sack_timeout, | 225 | .data = &sctp_sack_timeout, |
226 | .maxlen = sizeof(long), | 226 | .maxlen = sizeof(long), |
227 | .mode = 0644, | 227 | .mode = 0644, |
228 | .proc_handler = &proc_dointvec_minmax, | 228 | .proc_handler = &proc_dointvec_minmax, |
229 | .strategy = &sysctl_intvec, | 229 | .strategy = &sysctl_intvec, |
230 | .extra1 = &sack_timer_min, | 230 | .extra1 = &sack_timer_min, |
231 | .extra2 = &sack_timer_max, | 231 | .extra2 = &sack_timer_max, |
232 | }, | 232 | }, |
233 | { | 233 | { |
234 | .ctl_name = CTL_UNNUMBERED, | 234 | .ctl_name = CTL_UNNUMBERED, |
235 | .procname = "sctp_mem", | 235 | .procname = "sctp_mem", |
236 | .data = &sysctl_sctp_mem, | 236 | .data = &sysctl_sctp_mem, |
237 | .maxlen = sizeof(sysctl_sctp_mem), | 237 | .maxlen = sizeof(sysctl_sctp_mem), |
238 | .mode = 0644, | 238 | .mode = 0644, |
239 | .proc_handler = &proc_dointvec, | 239 | .proc_handler = &proc_dointvec, |
240 | }, | 240 | }, |
241 | { | 241 | { |
242 | .ctl_name = CTL_UNNUMBERED, | 242 | .ctl_name = CTL_UNNUMBERED, |
243 | .procname = "sctp_rmem", | 243 | .procname = "sctp_rmem", |
244 | .data = &sysctl_sctp_rmem, | 244 | .data = &sysctl_sctp_rmem, |
245 | .maxlen = sizeof(sysctl_sctp_rmem), | 245 | .maxlen = sizeof(sysctl_sctp_rmem), |
246 | .mode = 0644, | 246 | .mode = 0644, |
247 | .proc_handler = &proc_dointvec, | 247 | .proc_handler = &proc_dointvec, |
248 | }, | 248 | }, |
249 | { | 249 | { |
250 | .ctl_name = CTL_UNNUMBERED, | 250 | .ctl_name = CTL_UNNUMBERED, |
251 | .procname = "sctp_wmem", | 251 | .procname = "sctp_wmem", |
252 | .data = &sysctl_sctp_wmem, | 252 | .data = &sysctl_sctp_wmem, |
253 | .maxlen = sizeof(sysctl_sctp_wmem), | 253 | .maxlen = sizeof(sysctl_sctp_wmem), |
254 | .mode = 0644, | 254 | .mode = 0644, |
255 | .proc_handler = &proc_dointvec, | 255 | .proc_handler = &proc_dointvec, |
256 | }, | 256 | }, |
257 | { | ||
258 | .ctl_name = CTL_UNNUMBERED, | ||
259 | .procname = "auth_enable", | ||
260 | .data = &sctp_auth_enable, | ||
261 | .maxlen = sizeof(int), | ||
262 | .mode = 0644, | ||
263 | .proc_handler = &proc_dointvec, | ||
264 | .strategy = &sysctl_intvec | ||
265 | }, | ||
257 | { .ctl_name = 0 } | 266 | { .ctl_name = 0 } |
258 | }; | 267 | }; |
259 | 268 | ||
260 | static ctl_table sctp_net_table[] = { | 269 | static ctl_table sctp_net_table[] = { |
261 | { | 270 | { |
262 | .ctl_name = NET_SCTP, | 271 | .ctl_name = NET_SCTP, |
263 | .procname = "sctp", | 272 | .procname = "sctp", |
264 | .mode = 0555, | 273 | .mode = 0555, |
265 | .child = sctp_table | 274 | .child = sctp_table |
266 | }, | 275 | }, |
267 | { .ctl_name = 0 } | 276 | { .ctl_name = 0 } |
268 | }; | 277 | }; |
269 | 278 | ||
270 | static ctl_table sctp_root_table[] = { | 279 | static ctl_table sctp_root_table[] = { |
271 | { | 280 | { |
272 | .ctl_name = CTL_NET, | 281 | .ctl_name = CTL_NET, |
273 | .procname = "net", | 282 | .procname = "net", |
274 | .mode = 0555, | 283 | .mode = 0555, |
275 | .child = sctp_net_table | 284 | .child = sctp_net_table |
276 | }, | 285 | }, |
277 | { .ctl_name = 0 } | 286 | { .ctl_name = 0 } |
278 | }; | 287 | }; |
279 | 288 | ||
280 | static struct ctl_table_header * sctp_sysctl_header; | 289 | static struct ctl_table_header * sctp_sysctl_header; |
281 | 290 | ||
282 | /* Sysctl registration. */ | 291 | /* Sysctl registration. */ |
283 | void sctp_sysctl_register(void) | 292 | void sctp_sysctl_register(void) |
284 | { | 293 | { |
285 | sctp_sysctl_header = register_sysctl_table(sctp_root_table); | 294 | sctp_sysctl_header = register_sysctl_table(sctp_root_table); |
286 | } | 295 | } |
287 | 296 | ||
288 | /* Sysctl deregistration. */ | 297 | /* Sysctl deregistration. */ |
289 | void sctp_sysctl_unregister(void) | 298 | void sctp_sysctl_unregister(void) |
290 | { | 299 | { |
291 | unregister_sysctl_table(sctp_sysctl_header); | 300 | unregister_sysctl_table(sctp_sysctl_header); |
292 | } | 301 | } |
293 | 302 |