Commit ae445d172ab4d342a0a9d64df499cca8d5ad61b3

Authored by YOSHIFUJI Hideaki
Committed by David S. Miller
1 parent f831e90971

[RXRPC]: Use cpu_to_be32() where appropriate.

Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 3 changed files with 5 additions and 5 deletions Inline Diff

net/rxrpc/ar-connection.c
1 /* RxRPC virtual connection handler 1 /* RxRPC virtual connection handler
2 * 2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/net.h> 13 #include <linux/net.h>
14 #include <linux/skbuff.h> 14 #include <linux/skbuff.h>
15 #include <linux/crypto.h> 15 #include <linux/crypto.h>
16 #include <net/sock.h> 16 #include <net/sock.h>
17 #include <net/af_rxrpc.h> 17 #include <net/af_rxrpc.h>
18 #include "ar-internal.h" 18 #include "ar-internal.h"
19 19
20 static void rxrpc_connection_reaper(struct work_struct *work); 20 static void rxrpc_connection_reaper(struct work_struct *work);
21 21
22 LIST_HEAD(rxrpc_connections); 22 LIST_HEAD(rxrpc_connections);
23 DEFINE_RWLOCK(rxrpc_connection_lock); 23 DEFINE_RWLOCK(rxrpc_connection_lock);
24 static unsigned long rxrpc_connection_timeout = 10 * 60; 24 static unsigned long rxrpc_connection_timeout = 10 * 60;
25 static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper); 25 static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
26 26
27 /* 27 /*
28 * allocate a new client connection bundle 28 * allocate a new client connection bundle
29 */ 29 */
30 static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp) 30 static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
31 { 31 {
32 struct rxrpc_conn_bundle *bundle; 32 struct rxrpc_conn_bundle *bundle;
33 33
34 _enter(""); 34 _enter("");
35 35
36 bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp); 36 bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp);
37 if (bundle) { 37 if (bundle) {
38 INIT_LIST_HEAD(&bundle->unused_conns); 38 INIT_LIST_HEAD(&bundle->unused_conns);
39 INIT_LIST_HEAD(&bundle->avail_conns); 39 INIT_LIST_HEAD(&bundle->avail_conns);
40 INIT_LIST_HEAD(&bundle->busy_conns); 40 INIT_LIST_HEAD(&bundle->busy_conns);
41 init_waitqueue_head(&bundle->chanwait); 41 init_waitqueue_head(&bundle->chanwait);
42 atomic_set(&bundle->usage, 1); 42 atomic_set(&bundle->usage, 1);
43 } 43 }
44 44
45 _leave(" = %p", bundle); 45 _leave(" = %p", bundle);
46 return bundle; 46 return bundle;
47 } 47 }
48 48
49 /* 49 /*
50 * compare bundle parameters with what we're looking for 50 * compare bundle parameters with what we're looking for
51 * - return -ve, 0 or +ve 51 * - return -ve, 0 or +ve
52 */ 52 */
53 static inline 53 static inline
54 int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle, 54 int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
55 struct key *key, __be16 service_id) 55 struct key *key, __be16 service_id)
56 { 56 {
57 return (bundle->service_id - service_id) ?: 57 return (bundle->service_id - service_id) ?:
58 ((unsigned long) bundle->key - (unsigned long) key); 58 ((unsigned long) bundle->key - (unsigned long) key);
59 } 59 }
60 60
61 /* 61 /*
62 * get bundle of client connections that a client socket can make use of 62 * get bundle of client connections that a client socket can make use of
63 */ 63 */
64 struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx, 64 struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
65 struct rxrpc_transport *trans, 65 struct rxrpc_transport *trans,
66 struct key *key, 66 struct key *key,
67 __be16 service_id, 67 __be16 service_id,
68 gfp_t gfp) 68 gfp_t gfp)
69 { 69 {
70 struct rxrpc_conn_bundle *bundle, *candidate; 70 struct rxrpc_conn_bundle *bundle, *candidate;
71 struct rb_node *p, *parent, **pp; 71 struct rb_node *p, *parent, **pp;
72 72
73 _enter("%p{%x},%x,%hx,", 73 _enter("%p{%x},%x,%hx,",
74 rx, key_serial(key), trans->debug_id, ntohs(service_id)); 74 rx, key_serial(key), trans->debug_id, ntohs(service_id));
75 75
76 if (rx->trans == trans && rx->bundle) { 76 if (rx->trans == trans && rx->bundle) {
77 atomic_inc(&rx->bundle->usage); 77 atomic_inc(&rx->bundle->usage);
78 return rx->bundle; 78 return rx->bundle;
79 } 79 }
80 80
81 /* search the extant bundles first for one that matches the specified 81 /* search the extant bundles first for one that matches the specified
82 * user ID */ 82 * user ID */
83 spin_lock(&trans->client_lock); 83 spin_lock(&trans->client_lock);
84 84
85 p = trans->bundles.rb_node; 85 p = trans->bundles.rb_node;
86 while (p) { 86 while (p) {
87 bundle = rb_entry(p, struct rxrpc_conn_bundle, node); 87 bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
88 88
89 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0) 89 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
90 p = p->rb_left; 90 p = p->rb_left;
91 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0) 91 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
92 p = p->rb_right; 92 p = p->rb_right;
93 else 93 else
94 goto found_extant_bundle; 94 goto found_extant_bundle;
95 } 95 }
96 96
97 spin_unlock(&trans->client_lock); 97 spin_unlock(&trans->client_lock);
98 98
99 /* not yet present - create a candidate for a new record and then 99 /* not yet present - create a candidate for a new record and then
100 * redo the search */ 100 * redo the search */
101 candidate = rxrpc_alloc_bundle(gfp); 101 candidate = rxrpc_alloc_bundle(gfp);
102 if (!candidate) { 102 if (!candidate) {
103 _leave(" = -ENOMEM"); 103 _leave(" = -ENOMEM");
104 return ERR_PTR(-ENOMEM); 104 return ERR_PTR(-ENOMEM);
105 } 105 }
106 106
107 candidate->key = key_get(key); 107 candidate->key = key_get(key);
108 candidate->service_id = service_id; 108 candidate->service_id = service_id;
109 109
110 spin_lock(&trans->client_lock); 110 spin_lock(&trans->client_lock);
111 111
112 pp = &trans->bundles.rb_node; 112 pp = &trans->bundles.rb_node;
113 parent = NULL; 113 parent = NULL;
114 while (*pp) { 114 while (*pp) {
115 parent = *pp; 115 parent = *pp;
116 bundle = rb_entry(parent, struct rxrpc_conn_bundle, node); 116 bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
117 117
118 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0) 118 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
119 pp = &(*pp)->rb_left; 119 pp = &(*pp)->rb_left;
120 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0) 120 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
121 pp = &(*pp)->rb_right; 121 pp = &(*pp)->rb_right;
122 else 122 else
123 goto found_extant_second; 123 goto found_extant_second;
124 } 124 }
125 125
126 /* second search also failed; add the new bundle */ 126 /* second search also failed; add the new bundle */
127 bundle = candidate; 127 bundle = candidate;
128 candidate = NULL; 128 candidate = NULL;
129 129
130 rb_link_node(&bundle->node, parent, pp); 130 rb_link_node(&bundle->node, parent, pp);
131 rb_insert_color(&bundle->node, &trans->bundles); 131 rb_insert_color(&bundle->node, &trans->bundles);
132 spin_unlock(&trans->client_lock); 132 spin_unlock(&trans->client_lock);
133 _net("BUNDLE new on trans %d", trans->debug_id); 133 _net("BUNDLE new on trans %d", trans->debug_id);
134 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) { 134 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
135 atomic_inc(&bundle->usage); 135 atomic_inc(&bundle->usage);
136 rx->bundle = bundle; 136 rx->bundle = bundle;
137 } 137 }
138 _leave(" = %p [new]", bundle); 138 _leave(" = %p [new]", bundle);
139 return bundle; 139 return bundle;
140 140
141 /* we found the bundle in the list immediately */ 141 /* we found the bundle in the list immediately */
142 found_extant_bundle: 142 found_extant_bundle:
143 atomic_inc(&bundle->usage); 143 atomic_inc(&bundle->usage);
144 spin_unlock(&trans->client_lock); 144 spin_unlock(&trans->client_lock);
145 _net("BUNDLE old on trans %d", trans->debug_id); 145 _net("BUNDLE old on trans %d", trans->debug_id);
146 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) { 146 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
147 atomic_inc(&bundle->usage); 147 atomic_inc(&bundle->usage);
148 rx->bundle = bundle; 148 rx->bundle = bundle;
149 } 149 }
150 _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage)); 150 _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
151 return bundle; 151 return bundle;
152 152
153 /* we found the bundle on the second time through the list */ 153 /* we found the bundle on the second time through the list */
154 found_extant_second: 154 found_extant_second:
155 atomic_inc(&bundle->usage); 155 atomic_inc(&bundle->usage);
156 spin_unlock(&trans->client_lock); 156 spin_unlock(&trans->client_lock);
157 kfree(candidate); 157 kfree(candidate);
158 _net("BUNDLE old2 on trans %d", trans->debug_id); 158 _net("BUNDLE old2 on trans %d", trans->debug_id);
159 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) { 159 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
160 atomic_inc(&bundle->usage); 160 atomic_inc(&bundle->usage);
161 rx->bundle = bundle; 161 rx->bundle = bundle;
162 } 162 }
163 _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage)); 163 _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
164 return bundle; 164 return bundle;
165 } 165 }
166 166
167 /* 167 /*
168 * release a bundle 168 * release a bundle
169 */ 169 */
170 void rxrpc_put_bundle(struct rxrpc_transport *trans, 170 void rxrpc_put_bundle(struct rxrpc_transport *trans,
171 struct rxrpc_conn_bundle *bundle) 171 struct rxrpc_conn_bundle *bundle)
172 { 172 {
173 _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage)); 173 _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));
174 174
175 if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) { 175 if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
176 _debug("Destroy bundle"); 176 _debug("Destroy bundle");
177 rb_erase(&bundle->node, &trans->bundles); 177 rb_erase(&bundle->node, &trans->bundles);
178 spin_unlock(&trans->client_lock); 178 spin_unlock(&trans->client_lock);
179 ASSERT(list_empty(&bundle->unused_conns)); 179 ASSERT(list_empty(&bundle->unused_conns));
180 ASSERT(list_empty(&bundle->avail_conns)); 180 ASSERT(list_empty(&bundle->avail_conns));
181 ASSERT(list_empty(&bundle->busy_conns)); 181 ASSERT(list_empty(&bundle->busy_conns));
182 ASSERTCMP(bundle->num_conns, ==, 0); 182 ASSERTCMP(bundle->num_conns, ==, 0);
183 key_put(bundle->key); 183 key_put(bundle->key);
184 kfree(bundle); 184 kfree(bundle);
185 } 185 }
186 186
187 _leave(""); 187 _leave("");
188 } 188 }
189 189
190 /* 190 /*
191 * allocate a new connection 191 * allocate a new connection
192 */ 192 */
193 static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) 193 static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
194 { 194 {
195 struct rxrpc_connection *conn; 195 struct rxrpc_connection *conn;
196 196
197 _enter(""); 197 _enter("");
198 198
199 conn = kzalloc(sizeof(struct rxrpc_connection), gfp); 199 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
200 if (conn) { 200 if (conn) {
201 INIT_WORK(&conn->processor, &rxrpc_process_connection); 201 INIT_WORK(&conn->processor, &rxrpc_process_connection);
202 INIT_LIST_HEAD(&conn->bundle_link); 202 INIT_LIST_HEAD(&conn->bundle_link);
203 conn->calls = RB_ROOT; 203 conn->calls = RB_ROOT;
204 skb_queue_head_init(&conn->rx_queue); 204 skb_queue_head_init(&conn->rx_queue);
205 rwlock_init(&conn->lock); 205 rwlock_init(&conn->lock);
206 spin_lock_init(&conn->state_lock); 206 spin_lock_init(&conn->state_lock);
207 atomic_set(&conn->usage, 1); 207 atomic_set(&conn->usage, 1);
208 conn->debug_id = atomic_inc_return(&rxrpc_debug_id); 208 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
209 conn->avail_calls = RXRPC_MAXCALLS; 209 conn->avail_calls = RXRPC_MAXCALLS;
210 conn->size_align = 4; 210 conn->size_align = 4;
211 conn->header_size = sizeof(struct rxrpc_header); 211 conn->header_size = sizeof(struct rxrpc_header);
212 } 212 }
213 213
214 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0); 214 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
215 return conn; 215 return conn;
216 } 216 }
217 217
218 /* 218 /*
219 * assign a connection ID to a connection and add it to the transport's 219 * assign a connection ID to a connection and add it to the transport's
220 * connection lookup tree 220 * connection lookup tree
221 * - called with transport client lock held 221 * - called with transport client lock held
222 */ 222 */
223 static void rxrpc_assign_connection_id(struct rxrpc_connection *conn) 223 static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
224 { 224 {
225 struct rxrpc_connection *xconn; 225 struct rxrpc_connection *xconn;
226 struct rb_node *parent, **p; 226 struct rb_node *parent, **p;
227 __be32 epoch; 227 __be32 epoch;
228 u32 real_conn_id; 228 u32 real_conn_id;
229 229
230 _enter(""); 230 _enter("");
231 231
232 epoch = conn->epoch; 232 epoch = conn->epoch;
233 233
234 write_lock_bh(&conn->trans->conn_lock); 234 write_lock_bh(&conn->trans->conn_lock);
235 235
236 conn->trans->conn_idcounter += RXRPC_CID_INC; 236 conn->trans->conn_idcounter += RXRPC_CID_INC;
237 if (conn->trans->conn_idcounter < RXRPC_CID_INC) 237 if (conn->trans->conn_idcounter < RXRPC_CID_INC)
238 conn->trans->conn_idcounter = RXRPC_CID_INC; 238 conn->trans->conn_idcounter = RXRPC_CID_INC;
239 real_conn_id = conn->trans->conn_idcounter; 239 real_conn_id = conn->trans->conn_idcounter;
240 240
241 attempt_insertion: 241 attempt_insertion:
242 parent = NULL; 242 parent = NULL;
243 p = &conn->trans->client_conns.rb_node; 243 p = &conn->trans->client_conns.rb_node;
244 244
245 while (*p) { 245 while (*p) {
246 parent = *p; 246 parent = *p;
247 xconn = rb_entry(parent, struct rxrpc_connection, node); 247 xconn = rb_entry(parent, struct rxrpc_connection, node);
248 248
249 if (epoch < xconn->epoch) 249 if (epoch < xconn->epoch)
250 p = &(*p)->rb_left; 250 p = &(*p)->rb_left;
251 else if (epoch > xconn->epoch) 251 else if (epoch > xconn->epoch)
252 p = &(*p)->rb_right; 252 p = &(*p)->rb_right;
253 else if (real_conn_id < xconn->real_conn_id) 253 else if (real_conn_id < xconn->real_conn_id)
254 p = &(*p)->rb_left; 254 p = &(*p)->rb_left;
255 else if (real_conn_id > xconn->real_conn_id) 255 else if (real_conn_id > xconn->real_conn_id)
256 p = &(*p)->rb_right; 256 p = &(*p)->rb_right;
257 else 257 else
258 goto id_exists; 258 goto id_exists;
259 } 259 }
260 260
261 /* we've found a suitable hole - arrange for this connection to occupy 261 /* we've found a suitable hole - arrange for this connection to occupy
262 * it */ 262 * it */
263 rb_link_node(&conn->node, parent, p); 263 rb_link_node(&conn->node, parent, p);
264 rb_insert_color(&conn->node, &conn->trans->client_conns); 264 rb_insert_color(&conn->node, &conn->trans->client_conns);
265 265
266 conn->real_conn_id = real_conn_id; 266 conn->real_conn_id = real_conn_id;
267 conn->cid = htonl(real_conn_id); 267 conn->cid = htonl(real_conn_id);
268 write_unlock_bh(&conn->trans->conn_lock); 268 write_unlock_bh(&conn->trans->conn_lock);
269 _leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid)); 269 _leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid));
270 return; 270 return;
271 271
272 /* we found a connection with the proposed ID - walk the tree from that 272 /* we found a connection with the proposed ID - walk the tree from that
273 * point looking for the next unused ID */ 273 * point looking for the next unused ID */
274 id_exists: 274 id_exists:
275 for (;;) { 275 for (;;) {
276 real_conn_id += RXRPC_CID_INC; 276 real_conn_id += RXRPC_CID_INC;
277 if (real_conn_id < RXRPC_CID_INC) { 277 if (real_conn_id < RXRPC_CID_INC) {
278 real_conn_id = RXRPC_CID_INC; 278 real_conn_id = RXRPC_CID_INC;
279 conn->trans->conn_idcounter = real_conn_id; 279 conn->trans->conn_idcounter = real_conn_id;
280 goto attempt_insertion; 280 goto attempt_insertion;
281 } 281 }
282 282
283 parent = rb_next(parent); 283 parent = rb_next(parent);
284 if (!parent) 284 if (!parent)
285 goto attempt_insertion; 285 goto attempt_insertion;
286 286
287 xconn = rb_entry(parent, struct rxrpc_connection, node); 287 xconn = rb_entry(parent, struct rxrpc_connection, node);
288 if (epoch < xconn->epoch || 288 if (epoch < xconn->epoch ||
289 real_conn_id < xconn->real_conn_id) 289 real_conn_id < xconn->real_conn_id)
290 goto attempt_insertion; 290 goto attempt_insertion;
291 } 291 }
292 } 292 }
293 293
294 /* 294 /*
295 * add a call to a connection's call-by-ID tree 295 * add a call to a connection's call-by-ID tree
296 */ 296 */
297 static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn, 297 static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
298 struct rxrpc_call *call) 298 struct rxrpc_call *call)
299 { 299 {
300 struct rxrpc_call *xcall; 300 struct rxrpc_call *xcall;
301 struct rb_node *parent, **p; 301 struct rb_node *parent, **p;
302 __be32 call_id; 302 __be32 call_id;
303 303
304 write_lock_bh(&conn->lock); 304 write_lock_bh(&conn->lock);
305 305
306 call_id = call->call_id; 306 call_id = call->call_id;
307 p = &conn->calls.rb_node; 307 p = &conn->calls.rb_node;
308 parent = NULL; 308 parent = NULL;
309 while (*p) { 309 while (*p) {
310 parent = *p; 310 parent = *p;
311 xcall = rb_entry(parent, struct rxrpc_call, conn_node); 311 xcall = rb_entry(parent, struct rxrpc_call, conn_node);
312 312
313 if (call_id < xcall->call_id) 313 if (call_id < xcall->call_id)
314 p = &(*p)->rb_left; 314 p = &(*p)->rb_left;
315 else if (call_id > xcall->call_id) 315 else if (call_id > xcall->call_id)
316 p = &(*p)->rb_right; 316 p = &(*p)->rb_right;
317 else 317 else
318 BUG(); 318 BUG();
319 } 319 }
320 320
321 rb_link_node(&call->conn_node, parent, p); 321 rb_link_node(&call->conn_node, parent, p);
322 rb_insert_color(&call->conn_node, &conn->calls); 322 rb_insert_color(&call->conn_node, &conn->calls);
323 323
324 write_unlock_bh(&conn->lock); 324 write_unlock_bh(&conn->lock);
325 } 325 }
326 326
327 /* 327 /*
328 * connect a call on an exclusive connection 328 * connect a call on an exclusive connection
329 */ 329 */
330 static int rxrpc_connect_exclusive(struct rxrpc_sock *rx, 330 static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
331 struct rxrpc_transport *trans, 331 struct rxrpc_transport *trans,
332 __be16 service_id, 332 __be16 service_id,
333 struct rxrpc_call *call, 333 struct rxrpc_call *call,
334 gfp_t gfp) 334 gfp_t gfp)
335 { 335 {
336 struct rxrpc_connection *conn; 336 struct rxrpc_connection *conn;
337 int chan, ret; 337 int chan, ret;
338 338
339 _enter(""); 339 _enter("");
340 340
341 conn = rx->conn; 341 conn = rx->conn;
342 if (!conn) { 342 if (!conn) {
343 /* not yet present - create a candidate for a new connection 343 /* not yet present - create a candidate for a new connection
344 * and then redo the check */ 344 * and then redo the check */
345 conn = rxrpc_alloc_connection(gfp); 345 conn = rxrpc_alloc_connection(gfp);
346 if (IS_ERR(conn)) { 346 if (IS_ERR(conn)) {
347 _leave(" = %ld", PTR_ERR(conn)); 347 _leave(" = %ld", PTR_ERR(conn));
348 return PTR_ERR(conn); 348 return PTR_ERR(conn);
349 } 349 }
350 350
351 conn->trans = trans; 351 conn->trans = trans;
352 conn->bundle = NULL; 352 conn->bundle = NULL;
353 conn->service_id = service_id; 353 conn->service_id = service_id;
354 conn->epoch = rxrpc_epoch; 354 conn->epoch = rxrpc_epoch;
355 conn->in_clientflag = 0; 355 conn->in_clientflag = 0;
356 conn->out_clientflag = RXRPC_CLIENT_INITIATED; 356 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
357 conn->cid = 0; 357 conn->cid = 0;
358 conn->state = RXRPC_CONN_CLIENT; 358 conn->state = RXRPC_CONN_CLIENT;
359 conn->avail_calls = RXRPC_MAXCALLS - 1; 359 conn->avail_calls = RXRPC_MAXCALLS - 1;
360 conn->security_level = rx->min_sec_level; 360 conn->security_level = rx->min_sec_level;
361 conn->key = key_get(rx->key); 361 conn->key = key_get(rx->key);
362 362
363 ret = rxrpc_init_client_conn_security(conn); 363 ret = rxrpc_init_client_conn_security(conn);
364 if (ret < 0) { 364 if (ret < 0) {
365 key_put(conn->key); 365 key_put(conn->key);
366 kfree(conn); 366 kfree(conn);
367 _leave(" = %d [key]", ret); 367 _leave(" = %d [key]", ret);
368 return ret; 368 return ret;
369 } 369 }
370 370
371 write_lock_bh(&rxrpc_connection_lock); 371 write_lock_bh(&rxrpc_connection_lock);
372 list_add_tail(&conn->link, &rxrpc_connections); 372 list_add_tail(&conn->link, &rxrpc_connections);
373 write_unlock_bh(&rxrpc_connection_lock); 373 write_unlock_bh(&rxrpc_connection_lock);
374 374
375 spin_lock(&trans->client_lock); 375 spin_lock(&trans->client_lock);
376 atomic_inc(&trans->usage); 376 atomic_inc(&trans->usage);
377 377
378 _net("CONNECT EXCL new %d on TRANS %d", 378 _net("CONNECT EXCL new %d on TRANS %d",
379 conn->debug_id, conn->trans->debug_id); 379 conn->debug_id, conn->trans->debug_id);
380 380
381 rxrpc_assign_connection_id(conn); 381 rxrpc_assign_connection_id(conn);
382 rx->conn = conn; 382 rx->conn = conn;
383 } 383 }
384 384
385 /* we've got a connection with a free channel and we can now attach the 385 /* we've got a connection with a free channel and we can now attach the
386 * call to it 386 * call to it
387 * - we're holding the transport's client lock 387 * - we're holding the transport's client lock
388 * - we're holding a reference on the connection 388 * - we're holding a reference on the connection
389 */ 389 */
390 for (chan = 0; chan < RXRPC_MAXCALLS; chan++) 390 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
391 if (!conn->channels[chan]) 391 if (!conn->channels[chan])
392 goto found_channel; 392 goto found_channel;
393 goto no_free_channels; 393 goto no_free_channels;
394 394
395 found_channel: 395 found_channel:
396 atomic_inc(&conn->usage); 396 atomic_inc(&conn->usage);
397 conn->channels[chan] = call; 397 conn->channels[chan] = call;
398 call->conn = conn; 398 call->conn = conn;
399 call->channel = chan; 399 call->channel = chan;
400 call->cid = conn->cid | htonl(chan); 400 call->cid = conn->cid | htonl(chan);
401 call->call_id = htonl(++conn->call_counter); 401 call->call_id = htonl(++conn->call_counter);
402 402
403 _net("CONNECT client on conn %d chan %d as call %x", 403 _net("CONNECT client on conn %d chan %d as call %x",
404 conn->debug_id, chan, ntohl(call->call_id)); 404 conn->debug_id, chan, ntohl(call->call_id));
405 405
406 spin_unlock(&trans->client_lock); 406 spin_unlock(&trans->client_lock);
407 407
408 rxrpc_add_call_ID_to_conn(conn, call); 408 rxrpc_add_call_ID_to_conn(conn, call);
409 _leave(" = 0"); 409 _leave(" = 0");
410 return 0; 410 return 0;
411 411
412 no_free_channels: 412 no_free_channels:
413 spin_unlock(&trans->client_lock); 413 spin_unlock(&trans->client_lock);
414 _leave(" = -ENOSR"); 414 _leave(" = -ENOSR");
415 return -ENOSR; 415 return -ENOSR;
416 } 416 }
417 417
418 /* 418 /*
419 * find a connection for a call 419 * find a connection for a call
420 * - called in process context with IRQs enabled 420 * - called in process context with IRQs enabled
421 */ 421 */
422 int rxrpc_connect_call(struct rxrpc_sock *rx, 422 int rxrpc_connect_call(struct rxrpc_sock *rx,
423 struct rxrpc_transport *trans, 423 struct rxrpc_transport *trans,
424 struct rxrpc_conn_bundle *bundle, 424 struct rxrpc_conn_bundle *bundle,
425 struct rxrpc_call *call, 425 struct rxrpc_call *call,
426 gfp_t gfp) 426 gfp_t gfp)
427 { 427 {
428 struct rxrpc_connection *conn, *candidate; 428 struct rxrpc_connection *conn, *candidate;
429 int chan, ret; 429 int chan, ret;
430 430
431 DECLARE_WAITQUEUE(myself, current); 431 DECLARE_WAITQUEUE(myself, current);
432 432
433 _enter("%p,%lx,", rx, call->user_call_ID); 433 _enter("%p,%lx,", rx, call->user_call_ID);
434 434
435 if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags)) 435 if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags))
436 return rxrpc_connect_exclusive(rx, trans, bundle->service_id, 436 return rxrpc_connect_exclusive(rx, trans, bundle->service_id,
437 call, gfp); 437 call, gfp);
438 438
439 spin_lock(&trans->client_lock); 439 spin_lock(&trans->client_lock);
440 for (;;) { 440 for (;;) {
441 /* see if the bundle has a call slot available */ 441 /* see if the bundle has a call slot available */
442 if (!list_empty(&bundle->avail_conns)) { 442 if (!list_empty(&bundle->avail_conns)) {
443 _debug("avail"); 443 _debug("avail");
444 conn = list_entry(bundle->avail_conns.next, 444 conn = list_entry(bundle->avail_conns.next,
445 struct rxrpc_connection, 445 struct rxrpc_connection,
446 bundle_link); 446 bundle_link);
447 if (--conn->avail_calls == 0) 447 if (--conn->avail_calls == 0)
448 list_move(&conn->bundle_link, 448 list_move(&conn->bundle_link,
449 &bundle->busy_conns); 449 &bundle->busy_conns);
450 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS); 450 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
451 ASSERT(conn->channels[0] == NULL || 451 ASSERT(conn->channels[0] == NULL ||
452 conn->channels[1] == NULL || 452 conn->channels[1] == NULL ||
453 conn->channels[2] == NULL || 453 conn->channels[2] == NULL ||
454 conn->channels[3] == NULL); 454 conn->channels[3] == NULL);
455 atomic_inc(&conn->usage); 455 atomic_inc(&conn->usage);
456 break; 456 break;
457 } 457 }
458 458
459 if (!list_empty(&bundle->unused_conns)) { 459 if (!list_empty(&bundle->unused_conns)) {
460 _debug("unused"); 460 _debug("unused");
461 conn = list_entry(bundle->unused_conns.next, 461 conn = list_entry(bundle->unused_conns.next,
462 struct rxrpc_connection, 462 struct rxrpc_connection,
463 bundle_link); 463 bundle_link);
464 ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS); 464 ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS);
465 conn->avail_calls = RXRPC_MAXCALLS - 1; 465 conn->avail_calls = RXRPC_MAXCALLS - 1;
466 ASSERT(conn->channels[0] == NULL && 466 ASSERT(conn->channels[0] == NULL &&
467 conn->channels[1] == NULL && 467 conn->channels[1] == NULL &&
468 conn->channels[2] == NULL && 468 conn->channels[2] == NULL &&
469 conn->channels[3] == NULL); 469 conn->channels[3] == NULL);
470 atomic_inc(&conn->usage); 470 atomic_inc(&conn->usage);
471 list_move(&conn->bundle_link, &bundle->avail_conns); 471 list_move(&conn->bundle_link, &bundle->avail_conns);
472 break; 472 break;
473 } 473 }
474 474
475 /* need to allocate a new connection */ 475 /* need to allocate a new connection */
476 _debug("get new conn [%d]", bundle->num_conns); 476 _debug("get new conn [%d]", bundle->num_conns);
477 477
478 spin_unlock(&trans->client_lock); 478 spin_unlock(&trans->client_lock);
479 479
480 if (signal_pending(current)) 480 if (signal_pending(current))
481 goto interrupted; 481 goto interrupted;
482 482
483 if (bundle->num_conns >= 20) { 483 if (bundle->num_conns >= 20) {
484 _debug("too many conns"); 484 _debug("too many conns");
485 485
486 if (!(gfp & __GFP_WAIT)) { 486 if (!(gfp & __GFP_WAIT)) {
487 _leave(" = -EAGAIN"); 487 _leave(" = -EAGAIN");
488 return -EAGAIN; 488 return -EAGAIN;
489 } 489 }
490 490
491 add_wait_queue(&bundle->chanwait, &myself); 491 add_wait_queue(&bundle->chanwait, &myself);
492 for (;;) { 492 for (;;) {
493 set_current_state(TASK_INTERRUPTIBLE); 493 set_current_state(TASK_INTERRUPTIBLE);
494 if (bundle->num_conns < 20 || 494 if (bundle->num_conns < 20 ||
495 !list_empty(&bundle->unused_conns) || 495 !list_empty(&bundle->unused_conns) ||
496 !list_empty(&bundle->avail_conns)) 496 !list_empty(&bundle->avail_conns))
497 break; 497 break;
498 if (signal_pending(current)) 498 if (signal_pending(current))
499 goto interrupted_dequeue; 499 goto interrupted_dequeue;
500 schedule(); 500 schedule();
501 } 501 }
502 remove_wait_queue(&bundle->chanwait, &myself); 502 remove_wait_queue(&bundle->chanwait, &myself);
503 __set_current_state(TASK_RUNNING); 503 __set_current_state(TASK_RUNNING);
504 spin_lock(&trans->client_lock); 504 spin_lock(&trans->client_lock);
505 continue; 505 continue;
506 } 506 }
507 507
508 /* not yet present - create a candidate for a new connection and then 508 /* not yet present - create a candidate for a new connection and then
509 * redo the check */ 509 * redo the check */
510 candidate = rxrpc_alloc_connection(gfp); 510 candidate = rxrpc_alloc_connection(gfp);
511 if (IS_ERR(candidate)) { 511 if (IS_ERR(candidate)) {
512 _leave(" = %ld", PTR_ERR(candidate)); 512 _leave(" = %ld", PTR_ERR(candidate));
513 return PTR_ERR(candidate); 513 return PTR_ERR(candidate);
514 } 514 }
515 515
516 candidate->trans = trans; 516 candidate->trans = trans;
517 candidate->bundle = bundle; 517 candidate->bundle = bundle;
518 candidate->service_id = bundle->service_id; 518 candidate->service_id = bundle->service_id;
519 candidate->epoch = rxrpc_epoch; 519 candidate->epoch = rxrpc_epoch;
520 candidate->in_clientflag = 0; 520 candidate->in_clientflag = 0;
521 candidate->out_clientflag = RXRPC_CLIENT_INITIATED; 521 candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
522 candidate->cid = 0; 522 candidate->cid = 0;
523 candidate->state = RXRPC_CONN_CLIENT; 523 candidate->state = RXRPC_CONN_CLIENT;
524 candidate->avail_calls = RXRPC_MAXCALLS; 524 candidate->avail_calls = RXRPC_MAXCALLS;
525 candidate->security_level = rx->min_sec_level; 525 candidate->security_level = rx->min_sec_level;
526 candidate->key = key_get(bundle->key); 526 candidate->key = key_get(bundle->key);
527 527
528 ret = rxrpc_init_client_conn_security(candidate); 528 ret = rxrpc_init_client_conn_security(candidate);
529 if (ret < 0) { 529 if (ret < 0) {
530 key_put(candidate->key); 530 key_put(candidate->key);
531 kfree(candidate); 531 kfree(candidate);
532 _leave(" = %d [key]", ret); 532 _leave(" = %d [key]", ret);
533 return ret; 533 return ret;
534 } 534 }
535 535
536 write_lock_bh(&rxrpc_connection_lock); 536 write_lock_bh(&rxrpc_connection_lock);
537 list_add_tail(&candidate->link, &rxrpc_connections); 537 list_add_tail(&candidate->link, &rxrpc_connections);
538 write_unlock_bh(&rxrpc_connection_lock); 538 write_unlock_bh(&rxrpc_connection_lock);
539 539
540 spin_lock(&trans->client_lock); 540 spin_lock(&trans->client_lock);
541 541
542 list_add(&candidate->bundle_link, &bundle->unused_conns); 542 list_add(&candidate->bundle_link, &bundle->unused_conns);
543 bundle->num_conns++; 543 bundle->num_conns++;
544 atomic_inc(&bundle->usage); 544 atomic_inc(&bundle->usage);
545 atomic_inc(&trans->usage); 545 atomic_inc(&trans->usage);
546 546
547 _net("CONNECT new %d on TRANS %d", 547 _net("CONNECT new %d on TRANS %d",
548 candidate->debug_id, candidate->trans->debug_id); 548 candidate->debug_id, candidate->trans->debug_id);
549 549
550 rxrpc_assign_connection_id(candidate); 550 rxrpc_assign_connection_id(candidate);
551 if (candidate->security) 551 if (candidate->security)
552 candidate->security->prime_packet_security(candidate); 552 candidate->security->prime_packet_security(candidate);
553 553
554 /* leave the candidate lurking in zombie mode attached to the 554 /* leave the candidate lurking in zombie mode attached to the
555 * bundle until we're ready for it */ 555 * bundle until we're ready for it */
556 rxrpc_put_connection(candidate); 556 rxrpc_put_connection(candidate);
557 candidate = NULL; 557 candidate = NULL;
558 } 558 }
559 559
560 /* we've got a connection with a free channel and we can now attach the 560 /* we've got a connection with a free channel and we can now attach the
561 * call to it 561 * call to it
562 * - we're holding the transport's client lock 562 * - we're holding the transport's client lock
563 * - we're holding a reference on the connection 563 * - we're holding a reference on the connection
564 * - we're holding a reference on the bundle 564 * - we're holding a reference on the bundle
565 */ 565 */
566 for (chan = 0; chan < RXRPC_MAXCALLS; chan++) 566 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
567 if (!conn->channels[chan]) 567 if (!conn->channels[chan])
568 goto found_channel; 568 goto found_channel;
569 ASSERT(conn->channels[0] == NULL || 569 ASSERT(conn->channels[0] == NULL ||
570 conn->channels[1] == NULL || 570 conn->channels[1] == NULL ||
571 conn->channels[2] == NULL || 571 conn->channels[2] == NULL ||
572 conn->channels[3] == NULL); 572 conn->channels[3] == NULL);
573 BUG(); 573 BUG();
574 574
575 found_channel: 575 found_channel:
576 conn->channels[chan] = call; 576 conn->channels[chan] = call;
577 call->conn = conn; 577 call->conn = conn;
578 call->channel = chan; 578 call->channel = chan;
579 call->cid = conn->cid | htonl(chan); 579 call->cid = conn->cid | htonl(chan);
580 call->call_id = htonl(++conn->call_counter); 580 call->call_id = htonl(++conn->call_counter);
581 581
582 _net("CONNECT client on conn %d chan %d as call %x", 582 _net("CONNECT client on conn %d chan %d as call %x",
583 conn->debug_id, chan, ntohl(call->call_id)); 583 conn->debug_id, chan, ntohl(call->call_id));
584 584
585 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS); 585 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
586 spin_unlock(&trans->client_lock); 586 spin_unlock(&trans->client_lock);
587 587
588 rxrpc_add_call_ID_to_conn(conn, call); 588 rxrpc_add_call_ID_to_conn(conn, call);
589 589
590 _leave(" = 0"); 590 _leave(" = 0");
591 return 0; 591 return 0;
592 592
593 interrupted_dequeue: 593 interrupted_dequeue:
594 remove_wait_queue(&bundle->chanwait, &myself); 594 remove_wait_queue(&bundle->chanwait, &myself);
595 __set_current_state(TASK_RUNNING); 595 __set_current_state(TASK_RUNNING);
596 interrupted: 596 interrupted:
597 _leave(" = -ERESTARTSYS"); 597 _leave(" = -ERESTARTSYS");
598 return -ERESTARTSYS; 598 return -ERESTARTSYS;
599 } 599 }
600 600
601 /* 601 /*
602 * get a record of an incoming connection 602 * get a record of an incoming connection
603 */ 603 */
604 struct rxrpc_connection * 604 struct rxrpc_connection *
605 rxrpc_incoming_connection(struct rxrpc_transport *trans, 605 rxrpc_incoming_connection(struct rxrpc_transport *trans,
606 struct rxrpc_header *hdr, 606 struct rxrpc_header *hdr,
607 gfp_t gfp) 607 gfp_t gfp)
608 { 608 {
609 struct rxrpc_connection *conn, *candidate = NULL; 609 struct rxrpc_connection *conn, *candidate = NULL;
610 struct rb_node *p, **pp; 610 struct rb_node *p, **pp;
611 const char *new = "old"; 611 const char *new = "old";
612 __be32 epoch; 612 __be32 epoch;
613 u32 conn_id; 613 u32 conn_id;
614 614
615 _enter(""); 615 _enter("");
616 616
617 ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED); 617 ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED);
618 618
619 epoch = hdr->epoch; 619 epoch = hdr->epoch;
620 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK; 620 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
621 621
622 /* search the connection list first */ 622 /* search the connection list first */
623 read_lock_bh(&trans->conn_lock); 623 read_lock_bh(&trans->conn_lock);
624 624
625 p = trans->server_conns.rb_node; 625 p = trans->server_conns.rb_node;
626 while (p) { 626 while (p) {
627 conn = rb_entry(p, struct rxrpc_connection, node); 627 conn = rb_entry(p, struct rxrpc_connection, node);
628 628
629 _debug("maybe %x", conn->real_conn_id); 629 _debug("maybe %x", conn->real_conn_id);
630 630
631 if (epoch < conn->epoch) 631 if (epoch < conn->epoch)
632 p = p->rb_left; 632 p = p->rb_left;
633 else if (epoch > conn->epoch) 633 else if (epoch > conn->epoch)
634 p = p->rb_right; 634 p = p->rb_right;
635 else if (conn_id < conn->real_conn_id) 635 else if (conn_id < conn->real_conn_id)
636 p = p->rb_left; 636 p = p->rb_left;
637 else if (conn_id > conn->real_conn_id) 637 else if (conn_id > conn->real_conn_id)
638 p = p->rb_right; 638 p = p->rb_right;
639 else 639 else
640 goto found_extant_connection; 640 goto found_extant_connection;
641 } 641 }
642 read_unlock_bh(&trans->conn_lock); 642 read_unlock_bh(&trans->conn_lock);
643 643
644 /* not yet present - create a candidate for a new record and then 644 /* not yet present - create a candidate for a new record and then
645 * redo the search */ 645 * redo the search */
646 candidate = rxrpc_alloc_connection(gfp); 646 candidate = rxrpc_alloc_connection(gfp);
647 if (!candidate) { 647 if (!candidate) {
648 _leave(" = -ENOMEM"); 648 _leave(" = -ENOMEM");
649 return ERR_PTR(-ENOMEM); 649 return ERR_PTR(-ENOMEM);
650 } 650 }
651 651
652 candidate->trans = trans; 652 candidate->trans = trans;
653 candidate->epoch = hdr->epoch; 653 candidate->epoch = hdr->epoch;
654 candidate->cid = hdr->cid & __constant_cpu_to_be32(RXRPC_CIDMASK); 654 candidate->cid = hdr->cid & cpu_to_be32(RXRPC_CIDMASK);
655 candidate->service_id = hdr->serviceId; 655 candidate->service_id = hdr->serviceId;
656 candidate->security_ix = hdr->securityIndex; 656 candidate->security_ix = hdr->securityIndex;
657 candidate->in_clientflag = RXRPC_CLIENT_INITIATED; 657 candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
658 candidate->out_clientflag = 0; 658 candidate->out_clientflag = 0;
659 candidate->real_conn_id = conn_id; 659 candidate->real_conn_id = conn_id;
660 candidate->state = RXRPC_CONN_SERVER; 660 candidate->state = RXRPC_CONN_SERVER;
661 if (candidate->service_id) 661 if (candidate->service_id)
662 candidate->state = RXRPC_CONN_SERVER_UNSECURED; 662 candidate->state = RXRPC_CONN_SERVER_UNSECURED;
663 663
664 write_lock_bh(&trans->conn_lock); 664 write_lock_bh(&trans->conn_lock);
665 665
666 pp = &trans->server_conns.rb_node; 666 pp = &trans->server_conns.rb_node;
667 p = NULL; 667 p = NULL;
668 while (*pp) { 668 while (*pp) {
669 p = *pp; 669 p = *pp;
670 conn = rb_entry(p, struct rxrpc_connection, node); 670 conn = rb_entry(p, struct rxrpc_connection, node);
671 671
672 if (epoch < conn->epoch) 672 if (epoch < conn->epoch)
673 pp = &(*pp)->rb_left; 673 pp = &(*pp)->rb_left;
674 else if (epoch > conn->epoch) 674 else if (epoch > conn->epoch)
675 pp = &(*pp)->rb_right; 675 pp = &(*pp)->rb_right;
676 else if (conn_id < conn->real_conn_id) 676 else if (conn_id < conn->real_conn_id)
677 pp = &(*pp)->rb_left; 677 pp = &(*pp)->rb_left;
678 else if (conn_id > conn->real_conn_id) 678 else if (conn_id > conn->real_conn_id)
679 pp = &(*pp)->rb_right; 679 pp = &(*pp)->rb_right;
680 else 680 else
681 goto found_extant_second; 681 goto found_extant_second;
682 } 682 }
683 683
684 /* we can now add the new candidate to the list */ 684 /* we can now add the new candidate to the list */
685 conn = candidate; 685 conn = candidate;
686 candidate = NULL; 686 candidate = NULL;
687 rb_link_node(&conn->node, p, pp); 687 rb_link_node(&conn->node, p, pp);
688 rb_insert_color(&conn->node, &trans->server_conns); 688 rb_insert_color(&conn->node, &trans->server_conns);
689 atomic_inc(&conn->trans->usage); 689 atomic_inc(&conn->trans->usage);
690 690
691 write_unlock_bh(&trans->conn_lock); 691 write_unlock_bh(&trans->conn_lock);
692 692
693 write_lock_bh(&rxrpc_connection_lock); 693 write_lock_bh(&rxrpc_connection_lock);
694 list_add_tail(&conn->link, &rxrpc_connections); 694 list_add_tail(&conn->link, &rxrpc_connections);
695 write_unlock_bh(&rxrpc_connection_lock); 695 write_unlock_bh(&rxrpc_connection_lock);
696 696
697 new = "new"; 697 new = "new";
698 698
699 success: 699 success:
700 _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id); 700 _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id);
701 701
702 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage)); 702 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
703 return conn; 703 return conn;
704 704
705 /* we found the connection in the list immediately */ 705 /* we found the connection in the list immediately */
706 found_extant_connection: 706 found_extant_connection:
707 if (hdr->securityIndex != conn->security_ix) { 707 if (hdr->securityIndex != conn->security_ix) {
708 read_unlock_bh(&trans->conn_lock); 708 read_unlock_bh(&trans->conn_lock);
709 goto security_mismatch; 709 goto security_mismatch;
710 } 710 }
711 atomic_inc(&conn->usage); 711 atomic_inc(&conn->usage);
712 read_unlock_bh(&trans->conn_lock); 712 read_unlock_bh(&trans->conn_lock);
713 goto success; 713 goto success;
714 714
715 /* we found the connection on the second time through the list */ 715 /* we found the connection on the second time through the list */
716 found_extant_second: 716 found_extant_second:
717 if (hdr->securityIndex != conn->security_ix) { 717 if (hdr->securityIndex != conn->security_ix) {
718 write_unlock_bh(&trans->conn_lock); 718 write_unlock_bh(&trans->conn_lock);
719 goto security_mismatch; 719 goto security_mismatch;
720 } 720 }
721 atomic_inc(&conn->usage); 721 atomic_inc(&conn->usage);
722 write_unlock_bh(&trans->conn_lock); 722 write_unlock_bh(&trans->conn_lock);
723 kfree(candidate); 723 kfree(candidate);
724 goto success; 724 goto success;
725 725
726 security_mismatch: 726 security_mismatch:
727 kfree(candidate); 727 kfree(candidate);
728 _leave(" = -EKEYREJECTED"); 728 _leave(" = -EKEYREJECTED");
729 return ERR_PTR(-EKEYREJECTED); 729 return ERR_PTR(-EKEYREJECTED);
730 } 730 }
731 731
732 /* 732 /*
733 * find a connection based on transport and RxRPC connection ID for an incoming 733 * find a connection based on transport and RxRPC connection ID for an incoming
734 * packet 734 * packet
735 */ 735 */
736 struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans, 736 struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
737 struct rxrpc_header *hdr) 737 struct rxrpc_header *hdr)
738 { 738 {
739 struct rxrpc_connection *conn; 739 struct rxrpc_connection *conn;
740 struct rb_node *p; 740 struct rb_node *p;
741 __be32 epoch; 741 __be32 epoch;
742 u32 conn_id; 742 u32 conn_id;
743 743
744 _enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags); 744 _enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags);
745 745
746 read_lock_bh(&trans->conn_lock); 746 read_lock_bh(&trans->conn_lock);
747 747
748 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK; 748 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
749 epoch = hdr->epoch; 749 epoch = hdr->epoch;
750 750
751 if (hdr->flags & RXRPC_CLIENT_INITIATED) 751 if (hdr->flags & RXRPC_CLIENT_INITIATED)
752 p = trans->server_conns.rb_node; 752 p = trans->server_conns.rb_node;
753 else 753 else
754 p = trans->client_conns.rb_node; 754 p = trans->client_conns.rb_node;
755 755
756 while (p) { 756 while (p) {
757 conn = rb_entry(p, struct rxrpc_connection, node); 757 conn = rb_entry(p, struct rxrpc_connection, node);
758 758
759 _debug("maybe %x", conn->real_conn_id); 759 _debug("maybe %x", conn->real_conn_id);
760 760
761 if (epoch < conn->epoch) 761 if (epoch < conn->epoch)
762 p = p->rb_left; 762 p = p->rb_left;
763 else if (epoch > conn->epoch) 763 else if (epoch > conn->epoch)
764 p = p->rb_right; 764 p = p->rb_right;
765 else if (conn_id < conn->real_conn_id) 765 else if (conn_id < conn->real_conn_id)
766 p = p->rb_left; 766 p = p->rb_left;
767 else if (conn_id > conn->real_conn_id) 767 else if (conn_id > conn->real_conn_id)
768 p = p->rb_right; 768 p = p->rb_right;
769 else 769 else
770 goto found; 770 goto found;
771 } 771 }
772 772
773 read_unlock_bh(&trans->conn_lock); 773 read_unlock_bh(&trans->conn_lock);
774 _leave(" = NULL"); 774 _leave(" = NULL");
775 return NULL; 775 return NULL;
776 776
777 found: 777 found:
778 atomic_inc(&conn->usage); 778 atomic_inc(&conn->usage);
779 read_unlock_bh(&trans->conn_lock); 779 read_unlock_bh(&trans->conn_lock);
780 _leave(" = %p", conn); 780 _leave(" = %p", conn);
781 return conn; 781 return conn;
782 } 782 }
783 783
784 /* 784 /*
785 * release a virtual connection 785 * release a virtual connection
786 */ 786 */
787 void rxrpc_put_connection(struct rxrpc_connection *conn) 787 void rxrpc_put_connection(struct rxrpc_connection *conn)
788 { 788 {
789 _enter("%p{u=%d,d=%d}", 789 _enter("%p{u=%d,d=%d}",
790 conn, atomic_read(&conn->usage), conn->debug_id); 790 conn, atomic_read(&conn->usage), conn->debug_id);
791 791
792 ASSERTCMP(atomic_read(&conn->usage), >, 0); 792 ASSERTCMP(atomic_read(&conn->usage), >, 0);
793 793
794 conn->put_time = get_seconds(); 794 conn->put_time = get_seconds();
795 if (atomic_dec_and_test(&conn->usage)) { 795 if (atomic_dec_and_test(&conn->usage)) {
796 _debug("zombie"); 796 _debug("zombie");
797 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); 797 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
798 } 798 }
799 799
800 _leave(""); 800 _leave("");
801 } 801 }
802 802
803 /* 803 /*
804 * destroy a virtual connection 804 * destroy a virtual connection
805 */ 805 */
806 static void rxrpc_destroy_connection(struct rxrpc_connection *conn) 806 static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
807 { 807 {
808 _enter("%p{%d}", conn, atomic_read(&conn->usage)); 808 _enter("%p{%d}", conn, atomic_read(&conn->usage));
809 809
810 ASSERTCMP(atomic_read(&conn->usage), ==, 0); 810 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
811 811
812 _net("DESTROY CONN %d", conn->debug_id); 812 _net("DESTROY CONN %d", conn->debug_id);
813 813
814 if (conn->bundle) 814 if (conn->bundle)
815 rxrpc_put_bundle(conn->trans, conn->bundle); 815 rxrpc_put_bundle(conn->trans, conn->bundle);
816 816
817 ASSERT(RB_EMPTY_ROOT(&conn->calls)); 817 ASSERT(RB_EMPTY_ROOT(&conn->calls));
818 rxrpc_purge_queue(&conn->rx_queue); 818 rxrpc_purge_queue(&conn->rx_queue);
819 819
820 rxrpc_clear_conn_security(conn); 820 rxrpc_clear_conn_security(conn);
821 rxrpc_put_transport(conn->trans); 821 rxrpc_put_transport(conn->trans);
822 kfree(conn); 822 kfree(conn);
823 _leave(""); 823 _leave("");
824 } 824 }
825 825
826 /* 826 /*
827 * reap dead connections 827 * reap dead connections
828 */ 828 */
829 void rxrpc_connection_reaper(struct work_struct *work) 829 void rxrpc_connection_reaper(struct work_struct *work)
830 { 830 {
831 struct rxrpc_connection *conn, *_p; 831 struct rxrpc_connection *conn, *_p;
832 unsigned long now, earliest, reap_time; 832 unsigned long now, earliest, reap_time;
833 833
834 LIST_HEAD(graveyard); 834 LIST_HEAD(graveyard);
835 835
836 _enter(""); 836 _enter("");
837 837
838 now = get_seconds(); 838 now = get_seconds();
839 earliest = ULONG_MAX; 839 earliest = ULONG_MAX;
840 840
841 write_lock_bh(&rxrpc_connection_lock); 841 write_lock_bh(&rxrpc_connection_lock);
842 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) { 842 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
843 _debug("reap CONN %d { u=%d,t=%ld }", 843 _debug("reap CONN %d { u=%d,t=%ld }",
844 conn->debug_id, atomic_read(&conn->usage), 844 conn->debug_id, atomic_read(&conn->usage),
845 (long) now - (long) conn->put_time); 845 (long) now - (long) conn->put_time);
846 846
847 if (likely(atomic_read(&conn->usage) > 0)) 847 if (likely(atomic_read(&conn->usage) > 0))
848 continue; 848 continue;
849 849
850 spin_lock(&conn->trans->client_lock); 850 spin_lock(&conn->trans->client_lock);
851 write_lock(&conn->trans->conn_lock); 851 write_lock(&conn->trans->conn_lock);
852 reap_time = conn->put_time + rxrpc_connection_timeout; 852 reap_time = conn->put_time + rxrpc_connection_timeout;
853 853
854 if (atomic_read(&conn->usage) > 0) { 854 if (atomic_read(&conn->usage) > 0) {
855 ; 855 ;
856 } else if (reap_time <= now) { 856 } else if (reap_time <= now) {
857 list_move_tail(&conn->link, &graveyard); 857 list_move_tail(&conn->link, &graveyard);
858 if (conn->out_clientflag) 858 if (conn->out_clientflag)
859 rb_erase(&conn->node, 859 rb_erase(&conn->node,
860 &conn->trans->client_conns); 860 &conn->trans->client_conns);
861 else 861 else
862 rb_erase(&conn->node, 862 rb_erase(&conn->node,
863 &conn->trans->server_conns); 863 &conn->trans->server_conns);
864 if (conn->bundle) { 864 if (conn->bundle) {
865 list_del_init(&conn->bundle_link); 865 list_del_init(&conn->bundle_link);
866 conn->bundle->num_conns--; 866 conn->bundle->num_conns--;
867 } 867 }
868 868
869 } else if (reap_time < earliest) { 869 } else if (reap_time < earliest) {
870 earliest = reap_time; 870 earliest = reap_time;
871 } 871 }
872 872
873 write_unlock(&conn->trans->conn_lock); 873 write_unlock(&conn->trans->conn_lock);
874 spin_unlock(&conn->trans->client_lock); 874 spin_unlock(&conn->trans->client_lock);
875 } 875 }
876 write_unlock_bh(&rxrpc_connection_lock); 876 write_unlock_bh(&rxrpc_connection_lock);
877 877
878 if (earliest != ULONG_MAX) { 878 if (earliest != ULONG_MAX) {
879 _debug("reschedule reaper %ld", (long) earliest - now); 879 _debug("reschedule reaper %ld", (long) earliest - now);
880 ASSERTCMP(earliest, >, now); 880 ASSERTCMP(earliest, >, now);
881 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 881 rxrpc_queue_delayed_work(&rxrpc_connection_reap,
882 (earliest - now) * HZ); 882 (earliest - now) * HZ);
883 } 883 }
884 884
885 /* then destroy all those pulled out */ 885 /* then destroy all those pulled out */
886 while (!list_empty(&graveyard)) { 886 while (!list_empty(&graveyard)) {
887 conn = list_entry(graveyard.next, struct rxrpc_connection, 887 conn = list_entry(graveyard.next, struct rxrpc_connection,
888 link); 888 link);
889 list_del_init(&conn->link); 889 list_del_init(&conn->link);
890 890
891 ASSERTCMP(atomic_read(&conn->usage), ==, 0); 891 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
892 rxrpc_destroy_connection(conn); 892 rxrpc_destroy_connection(conn);
893 } 893 }
894 894
895 _leave(""); 895 _leave("");
896 } 896 }
897 897
898 /* 898 /*
899 * preemptively destroy all the connection records rather than waiting for them 899 * preemptively destroy all the connection records rather than waiting for them
900 * to time out 900 * to time out
901 */ 901 */
902 void __exit rxrpc_destroy_all_connections(void) 902 void __exit rxrpc_destroy_all_connections(void)
903 { 903 {
904 _enter(""); 904 _enter("");
905 905
906 rxrpc_connection_timeout = 0; 906 rxrpc_connection_timeout = 0;
907 cancel_delayed_work(&rxrpc_connection_reap); 907 cancel_delayed_work(&rxrpc_connection_reap);
908 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0); 908 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
909 909
910 _leave(""); 910 _leave("");
911 } 911 }
912 912
net/rxrpc/ar-input.c
1 /* RxRPC packet reception 1 /* RxRPC packet reception
2 * 2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/net.h> 13 #include <linux/net.h>
14 #include <linux/skbuff.h> 14 #include <linux/skbuff.h>
15 #include <linux/errqueue.h> 15 #include <linux/errqueue.h>
16 #include <linux/udp.h> 16 #include <linux/udp.h>
17 #include <linux/in.h> 17 #include <linux/in.h>
18 #include <linux/in6.h> 18 #include <linux/in6.h>
19 #include <linux/icmp.h> 19 #include <linux/icmp.h>
20 #include <net/sock.h> 20 #include <net/sock.h>
21 #include <net/af_rxrpc.h> 21 #include <net/af_rxrpc.h>
22 #include <net/ip.h> 22 #include <net/ip.h>
23 #include <net/udp.h> 23 #include <net/udp.h>
24 #include "ar-internal.h" 24 #include "ar-internal.h"
25 25
26 unsigned long rxrpc_ack_timeout = 1; 26 unsigned long rxrpc_ack_timeout = 1;
27 27
28 const char *rxrpc_pkts[] = { 28 const char *rxrpc_pkts[] = {
29 "?00", 29 "?00",
30 "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG", 30 "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
31 "?09", "?10", "?11", "?12", "?13", "?14", "?15" 31 "?09", "?10", "?11", "?12", "?13", "?14", "?15"
32 }; 32 };
33 33
34 /* 34 /*
35 * queue a packet for recvmsg to pass to userspace 35 * queue a packet for recvmsg to pass to userspace
36 * - the caller must hold a lock on call->lock 36 * - the caller must hold a lock on call->lock
37 * - must not be called with interrupts disabled (sk_filter() disables BH's) 37 * - must not be called with interrupts disabled (sk_filter() disables BH's)
38 * - eats the packet whether successful or not 38 * - eats the packet whether successful or not
39 * - there must be just one reference to the packet, which the caller passes to 39 * - there must be just one reference to the packet, which the caller passes to
40 * this function 40 * this function
41 */ 41 */
42 int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, 42 int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
43 bool force, bool terminal) 43 bool force, bool terminal)
44 { 44 {
45 struct rxrpc_skb_priv *sp; 45 struct rxrpc_skb_priv *sp;
46 struct rxrpc_sock *rx = call->socket; 46 struct rxrpc_sock *rx = call->socket;
47 struct sock *sk; 47 struct sock *sk;
48 int skb_len, ret; 48 int skb_len, ret;
49 49
50 _enter(",,%d,%d", force, terminal); 50 _enter(",,%d,%d", force, terminal);
51 51
52 ASSERT(!irqs_disabled()); 52 ASSERT(!irqs_disabled());
53 53
54 sp = rxrpc_skb(skb); 54 sp = rxrpc_skb(skb);
55 ASSERTCMP(sp->call, ==, call); 55 ASSERTCMP(sp->call, ==, call);
56 56
57 /* if we've already posted the terminal message for a call, then we 57 /* if we've already posted the terminal message for a call, then we
58 * don't post any more */ 58 * don't post any more */
59 if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) { 59 if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
60 _debug("already terminated"); 60 _debug("already terminated");
61 ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE); 61 ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE);
62 skb->destructor = NULL; 62 skb->destructor = NULL;
63 sp->call = NULL; 63 sp->call = NULL;
64 rxrpc_put_call(call); 64 rxrpc_put_call(call);
65 rxrpc_free_skb(skb); 65 rxrpc_free_skb(skb);
66 return 0; 66 return 0;
67 } 67 }
68 68
69 sk = &rx->sk; 69 sk = &rx->sk;
70 70
71 if (!force) { 71 if (!force) {
72 /* cast skb->rcvbuf to unsigned... It's pointless, but 72 /* cast skb->rcvbuf to unsigned... It's pointless, but
73 * reduces number of warnings when compiling with -W 73 * reduces number of warnings when compiling with -W
74 * --ANK */ 74 * --ANK */
75 // ret = -ENOBUFS; 75 // ret = -ENOBUFS;
76 // if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 76 // if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
77 // (unsigned) sk->sk_rcvbuf) 77 // (unsigned) sk->sk_rcvbuf)
78 // goto out; 78 // goto out;
79 79
80 ret = sk_filter(sk, skb); 80 ret = sk_filter(sk, skb);
81 if (ret < 0) 81 if (ret < 0)
82 goto out; 82 goto out;
83 } 83 }
84 84
85 spin_lock_bh(&sk->sk_receive_queue.lock); 85 spin_lock_bh(&sk->sk_receive_queue.lock);
86 if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags) && 86 if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags) &&
87 !test_bit(RXRPC_CALL_RELEASED, &call->flags) && 87 !test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
88 call->socket->sk.sk_state != RXRPC_CLOSE) { 88 call->socket->sk.sk_state != RXRPC_CLOSE) {
89 skb->destructor = rxrpc_packet_destructor; 89 skb->destructor = rxrpc_packet_destructor;
90 skb->dev = NULL; 90 skb->dev = NULL;
91 skb->sk = sk; 91 skb->sk = sk;
92 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 92 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
93 93
94 if (terminal) { 94 if (terminal) {
95 _debug("<<<< TERMINAL MESSAGE >>>>"); 95 _debug("<<<< TERMINAL MESSAGE >>>>");
96 set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags); 96 set_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags);
97 } 97 }
98 98
99 /* allow interception by a kernel service */ 99 /* allow interception by a kernel service */
100 if (rx->interceptor) { 100 if (rx->interceptor) {
101 rx->interceptor(sk, call->user_call_ID, skb); 101 rx->interceptor(sk, call->user_call_ID, skb);
102 spin_unlock_bh(&sk->sk_receive_queue.lock); 102 spin_unlock_bh(&sk->sk_receive_queue.lock);
103 } else { 103 } else {
104 104
105 /* Cache the SKB length before we tack it onto the 105 /* Cache the SKB length before we tack it onto the
106 * receive queue. Once it is added it no longer 106 * receive queue. Once it is added it no longer
107 * belongs to us and may be freed by other threads of 107 * belongs to us and may be freed by other threads of
108 * control pulling packets from the queue */ 108 * control pulling packets from the queue */
109 skb_len = skb->len; 109 skb_len = skb->len;
110 110
111 _net("post skb %p", skb); 111 _net("post skb %p", skb);
112 __skb_queue_tail(&sk->sk_receive_queue, skb); 112 __skb_queue_tail(&sk->sk_receive_queue, skb);
113 spin_unlock_bh(&sk->sk_receive_queue.lock); 113 spin_unlock_bh(&sk->sk_receive_queue.lock);
114 114
115 if (!sock_flag(sk, SOCK_DEAD)) 115 if (!sock_flag(sk, SOCK_DEAD))
116 sk->sk_data_ready(sk, skb_len); 116 sk->sk_data_ready(sk, skb_len);
117 } 117 }
118 skb = NULL; 118 skb = NULL;
119 } else { 119 } else {
120 spin_unlock_bh(&sk->sk_receive_queue.lock); 120 spin_unlock_bh(&sk->sk_receive_queue.lock);
121 } 121 }
122 ret = 0; 122 ret = 0;
123 123
124 out: 124 out:
125 /* release the socket buffer */ 125 /* release the socket buffer */
126 if (skb) { 126 if (skb) {
127 skb->destructor = NULL; 127 skb->destructor = NULL;
128 sp->call = NULL; 128 sp->call = NULL;
129 rxrpc_put_call(call); 129 rxrpc_put_call(call);
130 rxrpc_free_skb(skb); 130 rxrpc_free_skb(skb);
131 } 131 }
132 132
133 _leave(" = %d", ret); 133 _leave(" = %d", ret);
134 return ret; 134 return ret;
135 } 135 }
136 136
137 /* 137 /*
138 * process a DATA packet, posting the packet to the appropriate queue 138 * process a DATA packet, posting the packet to the appropriate queue
139 * - eats the packet if successful 139 * - eats the packet if successful
140 */ 140 */
141 static int rxrpc_fast_process_data(struct rxrpc_call *call, 141 static int rxrpc_fast_process_data(struct rxrpc_call *call,
142 struct sk_buff *skb, u32 seq) 142 struct sk_buff *skb, u32 seq)
143 { 143 {
144 struct rxrpc_skb_priv *sp; 144 struct rxrpc_skb_priv *sp;
145 bool terminal; 145 bool terminal;
146 int ret, ackbit, ack; 146 int ret, ackbit, ack;
147 147
148 _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq); 148 _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq);
149 149
150 sp = rxrpc_skb(skb); 150 sp = rxrpc_skb(skb);
151 ASSERTCMP(sp->call, ==, NULL); 151 ASSERTCMP(sp->call, ==, NULL);
152 152
153 spin_lock(&call->lock); 153 spin_lock(&call->lock);
154 154
155 if (call->state > RXRPC_CALL_COMPLETE) 155 if (call->state > RXRPC_CALL_COMPLETE)
156 goto discard; 156 goto discard;
157 157
158 ASSERTCMP(call->rx_data_expect, >=, call->rx_data_post); 158 ASSERTCMP(call->rx_data_expect, >=, call->rx_data_post);
159 ASSERTCMP(call->rx_data_post, >=, call->rx_data_recv); 159 ASSERTCMP(call->rx_data_post, >=, call->rx_data_recv);
160 ASSERTCMP(call->rx_data_recv, >=, call->rx_data_eaten); 160 ASSERTCMP(call->rx_data_recv, >=, call->rx_data_eaten);
161 161
162 if (seq < call->rx_data_post) { 162 if (seq < call->rx_data_post) {
163 _debug("dup #%u [-%u]", seq, call->rx_data_post); 163 _debug("dup #%u [-%u]", seq, call->rx_data_post);
164 ack = RXRPC_ACK_DUPLICATE; 164 ack = RXRPC_ACK_DUPLICATE;
165 ret = -ENOBUFS; 165 ret = -ENOBUFS;
166 goto discard_and_ack; 166 goto discard_and_ack;
167 } 167 }
168 168
169 /* we may already have the packet in the out of sequence queue */ 169 /* we may already have the packet in the out of sequence queue */
170 ackbit = seq - (call->rx_data_eaten + 1); 170 ackbit = seq - (call->rx_data_eaten + 1);
171 ASSERTCMP(ackbit, >=, 0); 171 ASSERTCMP(ackbit, >=, 0);
172 if (__test_and_set_bit(ackbit, call->ackr_window)) { 172 if (__test_and_set_bit(ackbit, call->ackr_window)) {
173 _debug("dup oos #%u [%u,%u]", 173 _debug("dup oos #%u [%u,%u]",
174 seq, call->rx_data_eaten, call->rx_data_post); 174 seq, call->rx_data_eaten, call->rx_data_post);
175 ack = RXRPC_ACK_DUPLICATE; 175 ack = RXRPC_ACK_DUPLICATE;
176 goto discard_and_ack; 176 goto discard_and_ack;
177 } 177 }
178 178
179 if (seq >= call->ackr_win_top) { 179 if (seq >= call->ackr_win_top) {
180 _debug("exceed #%u [%u]", seq, call->ackr_win_top); 180 _debug("exceed #%u [%u]", seq, call->ackr_win_top);
181 __clear_bit(ackbit, call->ackr_window); 181 __clear_bit(ackbit, call->ackr_window);
182 ack = RXRPC_ACK_EXCEEDS_WINDOW; 182 ack = RXRPC_ACK_EXCEEDS_WINDOW;
183 goto discard_and_ack; 183 goto discard_and_ack;
184 } 184 }
185 185
186 if (seq == call->rx_data_expect) { 186 if (seq == call->rx_data_expect) {
187 clear_bit(RXRPC_CALL_EXPECT_OOS, &call->flags); 187 clear_bit(RXRPC_CALL_EXPECT_OOS, &call->flags);
188 call->rx_data_expect++; 188 call->rx_data_expect++;
189 } else if (seq > call->rx_data_expect) { 189 } else if (seq > call->rx_data_expect) {
190 _debug("oos #%u [%u]", seq, call->rx_data_expect); 190 _debug("oos #%u [%u]", seq, call->rx_data_expect);
191 call->rx_data_expect = seq + 1; 191 call->rx_data_expect = seq + 1;
192 if (test_and_set_bit(RXRPC_CALL_EXPECT_OOS, &call->flags)) { 192 if (test_and_set_bit(RXRPC_CALL_EXPECT_OOS, &call->flags)) {
193 ack = RXRPC_ACK_OUT_OF_SEQUENCE; 193 ack = RXRPC_ACK_OUT_OF_SEQUENCE;
194 goto enqueue_and_ack; 194 goto enqueue_and_ack;
195 } 195 }
196 goto enqueue_packet; 196 goto enqueue_packet;
197 } 197 }
198 198
199 if (seq != call->rx_data_post) { 199 if (seq != call->rx_data_post) {
200 _debug("ahead #%u [%u]", seq, call->rx_data_post); 200 _debug("ahead #%u [%u]", seq, call->rx_data_post);
201 goto enqueue_packet; 201 goto enqueue_packet;
202 } 202 }
203 203
204 if (test_bit(RXRPC_CALL_RCVD_LAST, &call->flags)) 204 if (test_bit(RXRPC_CALL_RCVD_LAST, &call->flags))
205 goto protocol_error; 205 goto protocol_error;
206 206
207 /* if the packet need security things doing to it, then it goes down 207 /* if the packet need security things doing to it, then it goes down
208 * the slow path */ 208 * the slow path */
209 if (call->conn->security) 209 if (call->conn->security)
210 goto enqueue_packet; 210 goto enqueue_packet;
211 211
212 sp->call = call; 212 sp->call = call;
213 rxrpc_get_call(call); 213 rxrpc_get_call(call);
214 terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) && 214 terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
215 !(sp->hdr.flags & RXRPC_CLIENT_INITIATED)); 215 !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
216 ret = rxrpc_queue_rcv_skb(call, skb, false, terminal); 216 ret = rxrpc_queue_rcv_skb(call, skb, false, terminal);
217 if (ret < 0) { 217 if (ret < 0) {
218 if (ret == -ENOMEM || ret == -ENOBUFS) { 218 if (ret == -ENOMEM || ret == -ENOBUFS) {
219 __clear_bit(ackbit, call->ackr_window); 219 __clear_bit(ackbit, call->ackr_window);
220 ack = RXRPC_ACK_NOSPACE; 220 ack = RXRPC_ACK_NOSPACE;
221 goto discard_and_ack; 221 goto discard_and_ack;
222 } 222 }
223 goto out; 223 goto out;
224 } 224 }
225 225
226 skb = NULL; 226 skb = NULL;
227 227
228 _debug("post #%u", seq); 228 _debug("post #%u", seq);
229 ASSERTCMP(call->rx_data_post, ==, seq); 229 ASSERTCMP(call->rx_data_post, ==, seq);
230 call->rx_data_post++; 230 call->rx_data_post++;
231 231
232 if (sp->hdr.flags & RXRPC_LAST_PACKET) 232 if (sp->hdr.flags & RXRPC_LAST_PACKET)
233 set_bit(RXRPC_CALL_RCVD_LAST, &call->flags); 233 set_bit(RXRPC_CALL_RCVD_LAST, &call->flags);
234 234
235 /* if we've reached an out of sequence packet then we need to drain 235 /* if we've reached an out of sequence packet then we need to drain
236 * that queue into the socket Rx queue now */ 236 * that queue into the socket Rx queue now */
237 if (call->rx_data_post == call->rx_first_oos) { 237 if (call->rx_data_post == call->rx_first_oos) {
238 _debug("drain rx oos now"); 238 _debug("drain rx oos now");
239 read_lock(&call->state_lock); 239 read_lock(&call->state_lock);
240 if (call->state < RXRPC_CALL_COMPLETE && 240 if (call->state < RXRPC_CALL_COMPLETE &&
241 !test_and_set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) 241 !test_and_set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events))
242 rxrpc_queue_call(call); 242 rxrpc_queue_call(call);
243 read_unlock(&call->state_lock); 243 read_unlock(&call->state_lock);
244 } 244 }
245 245
246 spin_unlock(&call->lock); 246 spin_unlock(&call->lock);
247 atomic_inc(&call->ackr_not_idle); 247 atomic_inc(&call->ackr_not_idle);
248 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false); 248 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false);
249 _leave(" = 0 [posted]"); 249 _leave(" = 0 [posted]");
250 return 0; 250 return 0;
251 251
252 protocol_error: 252 protocol_error:
253 ret = -EBADMSG; 253 ret = -EBADMSG;
254 out: 254 out:
255 spin_unlock(&call->lock); 255 spin_unlock(&call->lock);
256 _leave(" = %d", ret); 256 _leave(" = %d", ret);
257 return ret; 257 return ret;
258 258
259 discard_and_ack: 259 discard_and_ack:
260 _debug("discard and ACK packet %p", skb); 260 _debug("discard and ACK packet %p", skb);
261 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); 261 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
262 discard: 262 discard:
263 spin_unlock(&call->lock); 263 spin_unlock(&call->lock);
264 rxrpc_free_skb(skb); 264 rxrpc_free_skb(skb);
265 _leave(" = 0 [discarded]"); 265 _leave(" = 0 [discarded]");
266 return 0; 266 return 0;
267 267
268 enqueue_and_ack: 268 enqueue_and_ack:
269 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); 269 __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true);
270 enqueue_packet: 270 enqueue_packet:
271 _net("defer skb %p", skb); 271 _net("defer skb %p", skb);
272 spin_unlock(&call->lock); 272 spin_unlock(&call->lock);
273 skb_queue_tail(&call->rx_queue, skb); 273 skb_queue_tail(&call->rx_queue, skb);
274 atomic_inc(&call->ackr_not_idle); 274 atomic_inc(&call->ackr_not_idle);
275 read_lock(&call->state_lock); 275 read_lock(&call->state_lock);
276 if (call->state < RXRPC_CALL_DEAD) 276 if (call->state < RXRPC_CALL_DEAD)
277 rxrpc_queue_call(call); 277 rxrpc_queue_call(call);
278 read_unlock(&call->state_lock); 278 read_unlock(&call->state_lock);
279 _leave(" = 0 [queued]"); 279 _leave(" = 0 [queued]");
280 return 0; 280 return 0;
281 } 281 }
282 282
283 /* 283 /*
284 * assume an implicit ACKALL of the transmission phase of a client socket upon 284 * assume an implicit ACKALL of the transmission phase of a client socket upon
285 * reception of the first reply packet 285 * reception of the first reply packet
286 */ 286 */
287 static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial) 287 static void rxrpc_assume_implicit_ackall(struct rxrpc_call *call, u32 serial)
288 { 288 {
289 write_lock_bh(&call->state_lock); 289 write_lock_bh(&call->state_lock);
290 290
291 switch (call->state) { 291 switch (call->state) {
292 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 292 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
293 call->state = RXRPC_CALL_CLIENT_RECV_REPLY; 293 call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
294 call->acks_latest = serial; 294 call->acks_latest = serial;
295 295
296 _debug("implicit ACKALL %%%u", call->acks_latest); 296 _debug("implicit ACKALL %%%u", call->acks_latest);
297 set_bit(RXRPC_CALL_RCVD_ACKALL, &call->events); 297 set_bit(RXRPC_CALL_RCVD_ACKALL, &call->events);
298 write_unlock_bh(&call->state_lock); 298 write_unlock_bh(&call->state_lock);
299 299
300 if (try_to_del_timer_sync(&call->resend_timer) >= 0) { 300 if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
301 clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events); 301 clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
302 clear_bit(RXRPC_CALL_RESEND, &call->events); 302 clear_bit(RXRPC_CALL_RESEND, &call->events);
303 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags); 303 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
304 } 304 }
305 break; 305 break;
306 306
307 default: 307 default:
308 write_unlock_bh(&call->state_lock); 308 write_unlock_bh(&call->state_lock);
309 break; 309 break;
310 } 310 }
311 } 311 }
312 312
313 /* 313 /*
314 * post an incoming packet to the nominated call to deal with 314 * post an incoming packet to the nominated call to deal with
315 * - must get rid of the sk_buff, either by freeing it or by queuing it 315 * - must get rid of the sk_buff, either by freeing it or by queuing it
316 */ 316 */
317 void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) 317 void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
318 { 318 {
319 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 319 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
320 __be32 _abort_code; 320 __be32 _abort_code;
321 u32 serial, hi_serial, seq, abort_code; 321 u32 serial, hi_serial, seq, abort_code;
322 322
323 _enter("%p,%p", call, skb); 323 _enter("%p,%p", call, skb);
324 324
325 ASSERT(!irqs_disabled()); 325 ASSERT(!irqs_disabled());
326 326
327 #if 0 // INJECT RX ERROR 327 #if 0 // INJECT RX ERROR
328 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) { 328 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
329 static int skip = 0; 329 static int skip = 0;
330 if (++skip == 3) { 330 if (++skip == 3) {
331 printk("DROPPED 3RD PACKET!!!!!!!!!!!!!\n"); 331 printk("DROPPED 3RD PACKET!!!!!!!!!!!!!\n");
332 skip = 0; 332 skip = 0;
333 goto free_packet; 333 goto free_packet;
334 } 334 }
335 } 335 }
336 #endif 336 #endif
337 337
338 /* track the latest serial number on this connection for ACK packet 338 /* track the latest serial number on this connection for ACK packet
339 * information */ 339 * information */
340 serial = ntohl(sp->hdr.serial); 340 serial = ntohl(sp->hdr.serial);
341 hi_serial = atomic_read(&call->conn->hi_serial); 341 hi_serial = atomic_read(&call->conn->hi_serial);
342 while (serial > hi_serial) 342 while (serial > hi_serial)
343 hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial, 343 hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
344 serial); 344 serial);
345 345
346 /* request ACK generation for any ACK or DATA packet that requests 346 /* request ACK generation for any ACK or DATA packet that requests
347 * it */ 347 * it */
348 if (sp->hdr.flags & RXRPC_REQUEST_ACK) { 348 if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
349 _proto("ACK Requested on %%%u", serial); 349 _proto("ACK Requested on %%%u", serial);
350 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial, 350 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED, sp->hdr.serial,
351 !(sp->hdr.flags & RXRPC_MORE_PACKETS)); 351 !(sp->hdr.flags & RXRPC_MORE_PACKETS));
352 } 352 }
353 353
354 switch (sp->hdr.type) { 354 switch (sp->hdr.type) {
355 case RXRPC_PACKET_TYPE_ABORT: 355 case RXRPC_PACKET_TYPE_ABORT:
356 _debug("abort"); 356 _debug("abort");
357 357
358 if (skb_copy_bits(skb, 0, &_abort_code, 358 if (skb_copy_bits(skb, 0, &_abort_code,
359 sizeof(_abort_code)) < 0) 359 sizeof(_abort_code)) < 0)
360 goto protocol_error; 360 goto protocol_error;
361 361
362 abort_code = ntohl(_abort_code); 362 abort_code = ntohl(_abort_code);
363 _proto("Rx ABORT %%%u { %x }", serial, abort_code); 363 _proto("Rx ABORT %%%u { %x }", serial, abort_code);
364 364
365 write_lock_bh(&call->state_lock); 365 write_lock_bh(&call->state_lock);
366 if (call->state < RXRPC_CALL_COMPLETE) { 366 if (call->state < RXRPC_CALL_COMPLETE) {
367 call->state = RXRPC_CALL_REMOTELY_ABORTED; 367 call->state = RXRPC_CALL_REMOTELY_ABORTED;
368 call->abort_code = abort_code; 368 call->abort_code = abort_code;
369 set_bit(RXRPC_CALL_RCVD_ABORT, &call->events); 369 set_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
370 rxrpc_queue_call(call); 370 rxrpc_queue_call(call);
371 } 371 }
372 goto free_packet_unlock; 372 goto free_packet_unlock;
373 373
374 case RXRPC_PACKET_TYPE_BUSY: 374 case RXRPC_PACKET_TYPE_BUSY:
375 _proto("Rx BUSY %%%u", serial); 375 _proto("Rx BUSY %%%u", serial);
376 376
377 if (call->conn->out_clientflag) 377 if (call->conn->out_clientflag)
378 goto protocol_error; 378 goto protocol_error;
379 379
380 write_lock_bh(&call->state_lock); 380 write_lock_bh(&call->state_lock);
381 switch (call->state) { 381 switch (call->state) {
382 case RXRPC_CALL_CLIENT_SEND_REQUEST: 382 case RXRPC_CALL_CLIENT_SEND_REQUEST:
383 call->state = RXRPC_CALL_SERVER_BUSY; 383 call->state = RXRPC_CALL_SERVER_BUSY;
384 set_bit(RXRPC_CALL_RCVD_BUSY, &call->events); 384 set_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
385 rxrpc_queue_call(call); 385 rxrpc_queue_call(call);
386 case RXRPC_CALL_SERVER_BUSY: 386 case RXRPC_CALL_SERVER_BUSY:
387 goto free_packet_unlock; 387 goto free_packet_unlock;
388 default: 388 default:
389 goto protocol_error_locked; 389 goto protocol_error_locked;
390 } 390 }
391 391
392 default: 392 default:
393 _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], serial); 393 _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], serial);
394 goto protocol_error; 394 goto protocol_error;
395 395
396 case RXRPC_PACKET_TYPE_DATA: 396 case RXRPC_PACKET_TYPE_DATA:
397 seq = ntohl(sp->hdr.seq); 397 seq = ntohl(sp->hdr.seq);
398 398
399 _proto("Rx DATA %%%u { #%u }", serial, seq); 399 _proto("Rx DATA %%%u { #%u }", serial, seq);
400 400
401 if (seq == 0) 401 if (seq == 0)
402 goto protocol_error; 402 goto protocol_error;
403 403
404 call->ackr_prev_seq = sp->hdr.seq; 404 call->ackr_prev_seq = sp->hdr.seq;
405 405
406 /* received data implicitly ACKs all of the request packets we 406 /* received data implicitly ACKs all of the request packets we
407 * sent when we're acting as a client */ 407 * sent when we're acting as a client */
408 if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) 408 if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
409 rxrpc_assume_implicit_ackall(call, serial); 409 rxrpc_assume_implicit_ackall(call, serial);
410 410
411 switch (rxrpc_fast_process_data(call, skb, seq)) { 411 switch (rxrpc_fast_process_data(call, skb, seq)) {
412 case 0: 412 case 0:
413 skb = NULL; 413 skb = NULL;
414 goto done; 414 goto done;
415 415
416 default: 416 default:
417 BUG(); 417 BUG();
418 418
419 /* data packet received beyond the last packet */ 419 /* data packet received beyond the last packet */
420 case -EBADMSG: 420 case -EBADMSG:
421 goto protocol_error; 421 goto protocol_error;
422 } 422 }
423 423
424 case RXRPC_PACKET_TYPE_ACK: 424 case RXRPC_PACKET_TYPE_ACK:
425 /* ACK processing is done in process context */ 425 /* ACK processing is done in process context */
426 read_lock_bh(&call->state_lock); 426 read_lock_bh(&call->state_lock);
427 if (call->state < RXRPC_CALL_DEAD) { 427 if (call->state < RXRPC_CALL_DEAD) {
428 skb_queue_tail(&call->rx_queue, skb); 428 skb_queue_tail(&call->rx_queue, skb);
429 rxrpc_queue_call(call); 429 rxrpc_queue_call(call);
430 skb = NULL; 430 skb = NULL;
431 } 431 }
432 read_unlock_bh(&call->state_lock); 432 read_unlock_bh(&call->state_lock);
433 goto free_packet; 433 goto free_packet;
434 } 434 }
435 435
436 protocol_error: 436 protocol_error:
437 _debug("protocol error"); 437 _debug("protocol error");
438 write_lock_bh(&call->state_lock); 438 write_lock_bh(&call->state_lock);
439 protocol_error_locked: 439 protocol_error_locked:
440 if (call->state <= RXRPC_CALL_COMPLETE) { 440 if (call->state <= RXRPC_CALL_COMPLETE) {
441 call->state = RXRPC_CALL_LOCALLY_ABORTED; 441 call->state = RXRPC_CALL_LOCALLY_ABORTED;
442 call->abort_code = RX_PROTOCOL_ERROR; 442 call->abort_code = RX_PROTOCOL_ERROR;
443 set_bit(RXRPC_CALL_ABORT, &call->events); 443 set_bit(RXRPC_CALL_ABORT, &call->events);
444 rxrpc_queue_call(call); 444 rxrpc_queue_call(call);
445 } 445 }
446 free_packet_unlock: 446 free_packet_unlock:
447 write_unlock_bh(&call->state_lock); 447 write_unlock_bh(&call->state_lock);
448 free_packet: 448 free_packet:
449 rxrpc_free_skb(skb); 449 rxrpc_free_skb(skb);
450 done: 450 done:
451 _leave(""); 451 _leave("");
452 } 452 }
453 453
454 /* 454 /*
455 * split up a jumbo data packet 455 * split up a jumbo data packet
456 */ 456 */
457 static void rxrpc_process_jumbo_packet(struct rxrpc_call *call, 457 static void rxrpc_process_jumbo_packet(struct rxrpc_call *call,
458 struct sk_buff *jumbo) 458 struct sk_buff *jumbo)
459 { 459 {
460 struct rxrpc_jumbo_header jhdr; 460 struct rxrpc_jumbo_header jhdr;
461 struct rxrpc_skb_priv *sp; 461 struct rxrpc_skb_priv *sp;
462 struct sk_buff *part; 462 struct sk_buff *part;
463 463
464 _enter(",{%u,%u}", jumbo->data_len, jumbo->len); 464 _enter(",{%u,%u}", jumbo->data_len, jumbo->len);
465 465
466 sp = rxrpc_skb(jumbo); 466 sp = rxrpc_skb(jumbo);
467 467
468 do { 468 do {
469 sp->hdr.flags &= ~RXRPC_JUMBO_PACKET; 469 sp->hdr.flags &= ~RXRPC_JUMBO_PACKET;
470 470
471 /* make a clone to represent the first subpacket in what's left 471 /* make a clone to represent the first subpacket in what's left
472 * of the jumbo packet */ 472 * of the jumbo packet */
473 part = skb_clone(jumbo, GFP_ATOMIC); 473 part = skb_clone(jumbo, GFP_ATOMIC);
474 if (!part) { 474 if (!part) {
475 /* simply ditch the tail in the event of ENOMEM */ 475 /* simply ditch the tail in the event of ENOMEM */
476 pskb_trim(jumbo, RXRPC_JUMBO_DATALEN); 476 pskb_trim(jumbo, RXRPC_JUMBO_DATALEN);
477 break; 477 break;
478 } 478 }
479 rxrpc_new_skb(part); 479 rxrpc_new_skb(part);
480 480
481 pskb_trim(part, RXRPC_JUMBO_DATALEN); 481 pskb_trim(part, RXRPC_JUMBO_DATALEN);
482 482
483 if (!pskb_pull(jumbo, RXRPC_JUMBO_DATALEN)) 483 if (!pskb_pull(jumbo, RXRPC_JUMBO_DATALEN))
484 goto protocol_error; 484 goto protocol_error;
485 485
486 if (skb_copy_bits(jumbo, 0, &jhdr, sizeof(jhdr)) < 0) 486 if (skb_copy_bits(jumbo, 0, &jhdr, sizeof(jhdr)) < 0)
487 goto protocol_error; 487 goto protocol_error;
488 if (!pskb_pull(jumbo, sizeof(jhdr))) 488 if (!pskb_pull(jumbo, sizeof(jhdr)))
489 BUG(); 489 BUG();
490 490
491 sp->hdr.seq = htonl(ntohl(sp->hdr.seq) + 1); 491 sp->hdr.seq = htonl(ntohl(sp->hdr.seq) + 1);
492 sp->hdr.serial = htonl(ntohl(sp->hdr.serial) + 1); 492 sp->hdr.serial = htonl(ntohl(sp->hdr.serial) + 1);
493 sp->hdr.flags = jhdr.flags; 493 sp->hdr.flags = jhdr.flags;
494 sp->hdr._rsvd = jhdr._rsvd; 494 sp->hdr._rsvd = jhdr._rsvd;
495 495
496 _proto("Rx DATA Jumbo %%%u", ntohl(sp->hdr.serial) - 1); 496 _proto("Rx DATA Jumbo %%%u", ntohl(sp->hdr.serial) - 1);
497 497
498 rxrpc_fast_process_packet(call, part); 498 rxrpc_fast_process_packet(call, part);
499 part = NULL; 499 part = NULL;
500 500
501 } while (sp->hdr.flags & RXRPC_JUMBO_PACKET); 501 } while (sp->hdr.flags & RXRPC_JUMBO_PACKET);
502 502
503 rxrpc_fast_process_packet(call, jumbo); 503 rxrpc_fast_process_packet(call, jumbo);
504 _leave(""); 504 _leave("");
505 return; 505 return;
506 506
507 protocol_error: 507 protocol_error:
508 _debug("protocol error"); 508 _debug("protocol error");
509 rxrpc_free_skb(part); 509 rxrpc_free_skb(part);
510 rxrpc_free_skb(jumbo); 510 rxrpc_free_skb(jumbo);
511 write_lock_bh(&call->state_lock); 511 write_lock_bh(&call->state_lock);
512 if (call->state <= RXRPC_CALL_COMPLETE) { 512 if (call->state <= RXRPC_CALL_COMPLETE) {
513 call->state = RXRPC_CALL_LOCALLY_ABORTED; 513 call->state = RXRPC_CALL_LOCALLY_ABORTED;
514 call->abort_code = RX_PROTOCOL_ERROR; 514 call->abort_code = RX_PROTOCOL_ERROR;
515 set_bit(RXRPC_CALL_ABORT, &call->events); 515 set_bit(RXRPC_CALL_ABORT, &call->events);
516 rxrpc_queue_call(call); 516 rxrpc_queue_call(call);
517 } 517 }
518 write_unlock_bh(&call->state_lock); 518 write_unlock_bh(&call->state_lock);
519 _leave(""); 519 _leave("");
520 } 520 }
521 521
522 /* 522 /*
523 * post an incoming packet to the appropriate call/socket to deal with 523 * post an incoming packet to the appropriate call/socket to deal with
524 * - must get rid of the sk_buff, either by freeing it or by queuing it 524 * - must get rid of the sk_buff, either by freeing it or by queuing it
525 */ 525 */
526 static void rxrpc_post_packet_to_call(struct rxrpc_connection *conn, 526 static void rxrpc_post_packet_to_call(struct rxrpc_connection *conn,
527 struct sk_buff *skb) 527 struct sk_buff *skb)
528 { 528 {
529 struct rxrpc_skb_priv *sp; 529 struct rxrpc_skb_priv *sp;
530 struct rxrpc_call *call; 530 struct rxrpc_call *call;
531 struct rb_node *p; 531 struct rb_node *p;
532 __be32 call_id; 532 __be32 call_id;
533 533
534 _enter("%p,%p", conn, skb); 534 _enter("%p,%p", conn, skb);
535 535
536 read_lock_bh(&conn->lock); 536 read_lock_bh(&conn->lock);
537 537
538 sp = rxrpc_skb(skb); 538 sp = rxrpc_skb(skb);
539 539
540 /* look at extant calls by channel number first */ 540 /* look at extant calls by channel number first */
541 call = conn->channels[ntohl(sp->hdr.cid) & RXRPC_CHANNELMASK]; 541 call = conn->channels[ntohl(sp->hdr.cid) & RXRPC_CHANNELMASK];
542 if (!call || call->call_id != sp->hdr.callNumber) 542 if (!call || call->call_id != sp->hdr.callNumber)
543 goto call_not_extant; 543 goto call_not_extant;
544 544
545 _debug("extant call [%d]", call->state); 545 _debug("extant call [%d]", call->state);
546 ASSERTCMP(call->conn, ==, conn); 546 ASSERTCMP(call->conn, ==, conn);
547 547
548 read_lock(&call->state_lock); 548 read_lock(&call->state_lock);
549 switch (call->state) { 549 switch (call->state) {
550 case RXRPC_CALL_LOCALLY_ABORTED: 550 case RXRPC_CALL_LOCALLY_ABORTED:
551 if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) 551 if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
552 rxrpc_queue_call(call); 552 rxrpc_queue_call(call);
553 case RXRPC_CALL_REMOTELY_ABORTED: 553 case RXRPC_CALL_REMOTELY_ABORTED:
554 case RXRPC_CALL_NETWORK_ERROR: 554 case RXRPC_CALL_NETWORK_ERROR:
555 case RXRPC_CALL_DEAD: 555 case RXRPC_CALL_DEAD:
556 goto free_unlock; 556 goto free_unlock;
557 default: 557 default:
558 break; 558 break;
559 } 559 }
560 560
561 read_unlock(&call->state_lock); 561 read_unlock(&call->state_lock);
562 rxrpc_get_call(call); 562 rxrpc_get_call(call);
563 read_unlock_bh(&conn->lock); 563 read_unlock_bh(&conn->lock);
564 564
565 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && 565 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
566 sp->hdr.flags & RXRPC_JUMBO_PACKET) 566 sp->hdr.flags & RXRPC_JUMBO_PACKET)
567 rxrpc_process_jumbo_packet(call, skb); 567 rxrpc_process_jumbo_packet(call, skb);
568 else 568 else
569 rxrpc_fast_process_packet(call, skb); 569 rxrpc_fast_process_packet(call, skb);
570 570
571 rxrpc_put_call(call); 571 rxrpc_put_call(call);
572 goto done; 572 goto done;
573 573
574 call_not_extant: 574 call_not_extant:
575 /* search the completed calls in case what we're dealing with is 575 /* search the completed calls in case what we're dealing with is
576 * there */ 576 * there */
577 _debug("call not extant"); 577 _debug("call not extant");
578 578
579 call_id = sp->hdr.callNumber; 579 call_id = sp->hdr.callNumber;
580 p = conn->calls.rb_node; 580 p = conn->calls.rb_node;
581 while (p) { 581 while (p) {
582 call = rb_entry(p, struct rxrpc_call, conn_node); 582 call = rb_entry(p, struct rxrpc_call, conn_node);
583 583
584 if (call_id < call->call_id) 584 if (call_id < call->call_id)
585 p = p->rb_left; 585 p = p->rb_left;
586 else if (call_id > call->call_id) 586 else if (call_id > call->call_id)
587 p = p->rb_right; 587 p = p->rb_right;
588 else 588 else
589 goto found_completed_call; 589 goto found_completed_call;
590 } 590 }
591 591
592 dead_call: 592 dead_call:
593 /* it's a either a really old call that we no longer remember or its a 593 /* it's a either a really old call that we no longer remember or its a
594 * new incoming call */ 594 * new incoming call */
595 read_unlock_bh(&conn->lock); 595 read_unlock_bh(&conn->lock);
596 596
597 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED && 597 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
598 sp->hdr.seq == __constant_cpu_to_be32(1)) { 598 sp->hdr.seq == cpu_to_be32(1)) {
599 _debug("incoming call"); 599 _debug("incoming call");
600 skb_queue_tail(&conn->trans->local->accept_queue, skb); 600 skb_queue_tail(&conn->trans->local->accept_queue, skb);
601 rxrpc_queue_work(&conn->trans->local->acceptor); 601 rxrpc_queue_work(&conn->trans->local->acceptor);
602 goto done; 602 goto done;
603 } 603 }
604 604
605 _debug("dead call"); 605 _debug("dead call");
606 skb->priority = RX_CALL_DEAD; 606 skb->priority = RX_CALL_DEAD;
607 rxrpc_reject_packet(conn->trans->local, skb); 607 rxrpc_reject_packet(conn->trans->local, skb);
608 goto done; 608 goto done;
609 609
610 /* resend last packet of a completed call 610 /* resend last packet of a completed call
611 * - client calls may have been aborted or ACK'd 611 * - client calls may have been aborted or ACK'd
612 * - server calls may have been aborted 612 * - server calls may have been aborted
613 */ 613 */
614 found_completed_call: 614 found_completed_call:
615 _debug("completed call"); 615 _debug("completed call");
616 616
617 if (atomic_read(&call->usage) == 0) 617 if (atomic_read(&call->usage) == 0)
618 goto dead_call; 618 goto dead_call;
619 619
620 /* synchronise any state changes */ 620 /* synchronise any state changes */
621 read_lock(&call->state_lock); 621 read_lock(&call->state_lock);
622 ASSERTIFCMP(call->state != RXRPC_CALL_CLIENT_FINAL_ACK, 622 ASSERTIFCMP(call->state != RXRPC_CALL_CLIENT_FINAL_ACK,
623 call->state, >=, RXRPC_CALL_COMPLETE); 623 call->state, >=, RXRPC_CALL_COMPLETE);
624 624
625 if (call->state == RXRPC_CALL_LOCALLY_ABORTED || 625 if (call->state == RXRPC_CALL_LOCALLY_ABORTED ||
626 call->state == RXRPC_CALL_REMOTELY_ABORTED || 626 call->state == RXRPC_CALL_REMOTELY_ABORTED ||
627 call->state == RXRPC_CALL_DEAD) { 627 call->state == RXRPC_CALL_DEAD) {
628 read_unlock(&call->state_lock); 628 read_unlock(&call->state_lock);
629 goto dead_call; 629 goto dead_call;
630 } 630 }
631 631
632 if (call->conn->in_clientflag) { 632 if (call->conn->in_clientflag) {
633 read_unlock(&call->state_lock); 633 read_unlock(&call->state_lock);
634 goto dead_call; /* complete server call */ 634 goto dead_call; /* complete server call */
635 } 635 }
636 636
637 _debug("final ack again"); 637 _debug("final ack again");
638 rxrpc_get_call(call); 638 rxrpc_get_call(call);
639 set_bit(RXRPC_CALL_ACK_FINAL, &call->events); 639 set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
640 rxrpc_queue_call(call); 640 rxrpc_queue_call(call);
641 641
642 free_unlock: 642 free_unlock:
643 read_unlock(&call->state_lock); 643 read_unlock(&call->state_lock);
644 read_unlock_bh(&conn->lock); 644 read_unlock_bh(&conn->lock);
645 rxrpc_free_skb(skb); 645 rxrpc_free_skb(skb);
646 done: 646 done:
647 _leave(""); 647 _leave("");
648 } 648 }
649 649
650 /* 650 /*
651 * post connection-level events to the connection 651 * post connection-level events to the connection
652 * - this includes challenges, responses and some aborts 652 * - this includes challenges, responses and some aborts
653 */ 653 */
654 static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, 654 static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
655 struct sk_buff *skb) 655 struct sk_buff *skb)
656 { 656 {
657 _enter("%p,%p", conn, skb); 657 _enter("%p,%p", conn, skb);
658 658
659 atomic_inc(&conn->usage); 659 atomic_inc(&conn->usage);
660 skb_queue_tail(&conn->rx_queue, skb); 660 skb_queue_tail(&conn->rx_queue, skb);
661 rxrpc_queue_conn(conn); 661 rxrpc_queue_conn(conn);
662 } 662 }
663 663
664 /* 664 /*
665 * handle data received on the local endpoint 665 * handle data received on the local endpoint
666 * - may be called in interrupt context 666 * - may be called in interrupt context
667 */ 667 */
668 void rxrpc_data_ready(struct sock *sk, int count) 668 void rxrpc_data_ready(struct sock *sk, int count)
669 { 669 {
670 struct rxrpc_connection *conn; 670 struct rxrpc_connection *conn;
671 struct rxrpc_transport *trans; 671 struct rxrpc_transport *trans;
672 struct rxrpc_skb_priv *sp; 672 struct rxrpc_skb_priv *sp;
673 struct rxrpc_local *local; 673 struct rxrpc_local *local;
674 struct rxrpc_peer *peer; 674 struct rxrpc_peer *peer;
675 struct sk_buff *skb; 675 struct sk_buff *skb;
676 int ret; 676 int ret;
677 677
678 _enter("%p, %d", sk, count); 678 _enter("%p, %d", sk, count);
679 679
680 ASSERT(!irqs_disabled()); 680 ASSERT(!irqs_disabled());
681 681
682 read_lock_bh(&rxrpc_local_lock); 682 read_lock_bh(&rxrpc_local_lock);
683 local = sk->sk_user_data; 683 local = sk->sk_user_data;
684 if (local && atomic_read(&local->usage) > 0) 684 if (local && atomic_read(&local->usage) > 0)
685 rxrpc_get_local(local); 685 rxrpc_get_local(local);
686 else 686 else
687 local = NULL; 687 local = NULL;
688 read_unlock_bh(&rxrpc_local_lock); 688 read_unlock_bh(&rxrpc_local_lock);
689 if (!local) { 689 if (!local) {
690 _leave(" [local dead]"); 690 _leave(" [local dead]");
691 return; 691 return;
692 } 692 }
693 693
694 skb = skb_recv_datagram(sk, 0, 1, &ret); 694 skb = skb_recv_datagram(sk, 0, 1, &ret);
695 if (!skb) { 695 if (!skb) {
696 rxrpc_put_local(local); 696 rxrpc_put_local(local);
697 if (ret == -EAGAIN) 697 if (ret == -EAGAIN)
698 return; 698 return;
699 _debug("UDP socket error %d", ret); 699 _debug("UDP socket error %d", ret);
700 return; 700 return;
701 } 701 }
702 702
703 rxrpc_new_skb(skb); 703 rxrpc_new_skb(skb);
704 704
705 _net("recv skb %p", skb); 705 _net("recv skb %p", skb);
706 706
707 /* we'll probably need to checksum it (didn't call sock_recvmsg) */ 707 /* we'll probably need to checksum it (didn't call sock_recvmsg) */
708 if (skb_checksum_complete(skb)) { 708 if (skb_checksum_complete(skb)) {
709 rxrpc_free_skb(skb); 709 rxrpc_free_skb(skb);
710 rxrpc_put_local(local); 710 rxrpc_put_local(local);
711 UDP_INC_STATS_BH(UDP_MIB_INERRORS, 0); 711 UDP_INC_STATS_BH(UDP_MIB_INERRORS, 0);
712 _leave(" [CSUM failed]"); 712 _leave(" [CSUM failed]");
713 return; 713 return;
714 } 714 }
715 715
716 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, 0); 716 UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, 0);
717 717
718 /* the socket buffer we have is owned by UDP, with UDP's data all over 718 /* the socket buffer we have is owned by UDP, with UDP's data all over
719 * it, but we really want our own */ 719 * it, but we really want our own */
720 skb_orphan(skb); 720 skb_orphan(skb);
721 sp = rxrpc_skb(skb); 721 sp = rxrpc_skb(skb);
722 memset(sp, 0, sizeof(*sp)); 722 memset(sp, 0, sizeof(*sp));
723 723
724 _net("Rx UDP packet from %08x:%04hu", 724 _net("Rx UDP packet from %08x:%04hu",
725 ntohl(ip_hdr(skb)->saddr), ntohs(udp_hdr(skb)->source)); 725 ntohl(ip_hdr(skb)->saddr), ntohs(udp_hdr(skb)->source));
726 726
727 /* dig out the RxRPC connection details */ 727 /* dig out the RxRPC connection details */
728 if (skb_copy_bits(skb, sizeof(struct udphdr), &sp->hdr, 728 if (skb_copy_bits(skb, sizeof(struct udphdr), &sp->hdr,
729 sizeof(sp->hdr)) < 0) 729 sizeof(sp->hdr)) < 0)
730 goto bad_message; 730 goto bad_message;
731 if (!pskb_pull(skb, sizeof(struct udphdr) + sizeof(sp->hdr))) 731 if (!pskb_pull(skb, sizeof(struct udphdr) + sizeof(sp->hdr)))
732 BUG(); 732 BUG();
733 733
734 _net("Rx RxRPC %s ep=%x call=%x:%x", 734 _net("Rx RxRPC %s ep=%x call=%x:%x",
735 sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient", 735 sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
736 ntohl(sp->hdr.epoch), 736 ntohl(sp->hdr.epoch),
737 ntohl(sp->hdr.cid), 737 ntohl(sp->hdr.cid),
738 ntohl(sp->hdr.callNumber)); 738 ntohl(sp->hdr.callNumber));
739 739
740 if (sp->hdr.type == 0 || sp->hdr.type >= RXRPC_N_PACKET_TYPES) { 740 if (sp->hdr.type == 0 || sp->hdr.type >= RXRPC_N_PACKET_TYPES) {
741 _proto("Rx Bad Packet Type %u", sp->hdr.type); 741 _proto("Rx Bad Packet Type %u", sp->hdr.type);
742 goto bad_message; 742 goto bad_message;
743 } 743 }
744 744
745 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && 745 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
746 (sp->hdr.callNumber == 0 || sp->hdr.seq == 0)) 746 (sp->hdr.callNumber == 0 || sp->hdr.seq == 0))
747 goto bad_message; 747 goto bad_message;
748 748
749 peer = rxrpc_find_peer(local, ip_hdr(skb)->saddr, udp_hdr(skb)->source); 749 peer = rxrpc_find_peer(local, ip_hdr(skb)->saddr, udp_hdr(skb)->source);
750 if (IS_ERR(peer)) 750 if (IS_ERR(peer))
751 goto cant_route_call; 751 goto cant_route_call;
752 752
753 trans = rxrpc_find_transport(local, peer); 753 trans = rxrpc_find_transport(local, peer);
754 rxrpc_put_peer(peer); 754 rxrpc_put_peer(peer);
755 if (!trans) 755 if (!trans)
756 goto cant_route_call; 756 goto cant_route_call;
757 757
758 conn = rxrpc_find_connection(trans, &sp->hdr); 758 conn = rxrpc_find_connection(trans, &sp->hdr);
759 rxrpc_put_transport(trans); 759 rxrpc_put_transport(trans);
760 if (!conn) 760 if (!conn)
761 goto cant_route_call; 761 goto cant_route_call;
762 762
763 _debug("CONN %p {%d}", conn, conn->debug_id); 763 _debug("CONN %p {%d}", conn, conn->debug_id);
764 764
765 if (sp->hdr.callNumber == 0) 765 if (sp->hdr.callNumber == 0)
766 rxrpc_post_packet_to_conn(conn, skb); 766 rxrpc_post_packet_to_conn(conn, skb);
767 else 767 else
768 rxrpc_post_packet_to_call(conn, skb); 768 rxrpc_post_packet_to_call(conn, skb);
769 rxrpc_put_connection(conn); 769 rxrpc_put_connection(conn);
770 rxrpc_put_local(local); 770 rxrpc_put_local(local);
771 return; 771 return;
772 772
773 cant_route_call: 773 cant_route_call:
774 _debug("can't route call"); 774 _debug("can't route call");
775 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED && 775 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED &&
776 sp->hdr.type == RXRPC_PACKET_TYPE_DATA) { 776 sp->hdr.type == RXRPC_PACKET_TYPE_DATA) {
777 if (sp->hdr.seq == __constant_cpu_to_be32(1)) { 777 if (sp->hdr.seq == cpu_to_be32(1)) {
778 _debug("first packet"); 778 _debug("first packet");
779 skb_queue_tail(&local->accept_queue, skb); 779 skb_queue_tail(&local->accept_queue, skb);
780 rxrpc_queue_work(&local->acceptor); 780 rxrpc_queue_work(&local->acceptor);
781 rxrpc_put_local(local); 781 rxrpc_put_local(local);
782 _leave(" [incoming]"); 782 _leave(" [incoming]");
783 return; 783 return;
784 } 784 }
785 skb->priority = RX_INVALID_OPERATION; 785 skb->priority = RX_INVALID_OPERATION;
786 } else { 786 } else {
787 skb->priority = RX_CALL_DEAD; 787 skb->priority = RX_CALL_DEAD;
788 } 788 }
789 789
790 _debug("reject"); 790 _debug("reject");
791 rxrpc_reject_packet(local, skb); 791 rxrpc_reject_packet(local, skb);
792 rxrpc_put_local(local); 792 rxrpc_put_local(local);
793 _leave(" [no call]"); 793 _leave(" [no call]");
794 return; 794 return;
795 795
796 bad_message: 796 bad_message:
797 skb->priority = RX_PROTOCOL_ERROR; 797 skb->priority = RX_PROTOCOL_ERROR;
798 rxrpc_reject_packet(local, skb); 798 rxrpc_reject_packet(local, skb);
799 rxrpc_put_local(local); 799 rxrpc_put_local(local);
800 _leave(" [badmsg]"); 800 _leave(" [badmsg]");
801 } 801 }
802 802
1 /* Kerberos-based RxRPC security 1 /* Kerberos-based RxRPC security
2 * 2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/net.h> 13 #include <linux/net.h>
14 #include <linux/skbuff.h> 14 #include <linux/skbuff.h>
15 #include <linux/udp.h> 15 #include <linux/udp.h>
16 #include <linux/crypto.h> 16 #include <linux/crypto.h>
17 #include <linux/scatterlist.h> 17 #include <linux/scatterlist.h>
18 #include <linux/ctype.h> 18 #include <linux/ctype.h>
19 #include <net/sock.h> 19 #include <net/sock.h>
20 #include <net/af_rxrpc.h> 20 #include <net/af_rxrpc.h>
21 #define rxrpc_debug rxkad_debug 21 #define rxrpc_debug rxkad_debug
22 #include "ar-internal.h" 22 #include "ar-internal.h"
23 23
24 #define RXKAD_VERSION 2 24 #define RXKAD_VERSION 2
25 #define MAXKRB5TICKETLEN 1024 25 #define MAXKRB5TICKETLEN 1024
26 #define RXKAD_TKT_TYPE_KERBEROS_V5 256 26 #define RXKAD_TKT_TYPE_KERBEROS_V5 256
27 #define ANAME_SZ 40 /* size of authentication name */ 27 #define ANAME_SZ 40 /* size of authentication name */
28 #define INST_SZ 40 /* size of principal's instance */ 28 #define INST_SZ 40 /* size of principal's instance */
29 #define REALM_SZ 40 /* size of principal's auth domain */ 29 #define REALM_SZ 40 /* size of principal's auth domain */
30 #define SNAME_SZ 40 /* size of service name */ 30 #define SNAME_SZ 40 /* size of service name */
31 31
32 unsigned rxrpc_debug; 32 unsigned rxrpc_debug;
33 module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); 33 module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
34 MODULE_PARM_DESC(rxrpc_debug, "rxkad debugging mask"); 34 MODULE_PARM_DESC(rxrpc_debug, "rxkad debugging mask");
35 35
36 struct rxkad_level1_hdr { 36 struct rxkad_level1_hdr {
37 __be32 data_size; /* true data size (excluding padding) */ 37 __be32 data_size; /* true data size (excluding padding) */
38 }; 38 };
39 39
40 struct rxkad_level2_hdr { 40 struct rxkad_level2_hdr {
41 __be32 data_size; /* true data size (excluding padding) */ 41 __be32 data_size; /* true data size (excluding padding) */
42 __be32 checksum; /* decrypted data checksum */ 42 __be32 checksum; /* decrypted data checksum */
43 }; 43 };
44 44
45 MODULE_DESCRIPTION("RxRPC network protocol type-2 security (Kerberos)"); 45 MODULE_DESCRIPTION("RxRPC network protocol type-2 security (Kerberos)");
46 MODULE_AUTHOR("Red Hat, Inc."); 46 MODULE_AUTHOR("Red Hat, Inc.");
47 MODULE_LICENSE("GPL"); 47 MODULE_LICENSE("GPL");
48 48
49 /* 49 /*
50 * this holds a pinned cipher so that keventd doesn't get called by the cipher 50 * this holds a pinned cipher so that keventd doesn't get called by the cipher
51 * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE 51 * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE
52 * packets 52 * packets
53 */ 53 */
54 static struct crypto_blkcipher *rxkad_ci; 54 static struct crypto_blkcipher *rxkad_ci;
55 static DEFINE_MUTEX(rxkad_ci_mutex); 55 static DEFINE_MUTEX(rxkad_ci_mutex);
56 56
57 /* 57 /*
58 * initialise connection security 58 * initialise connection security
59 */ 59 */
60 static int rxkad_init_connection_security(struct rxrpc_connection *conn) 60 static int rxkad_init_connection_security(struct rxrpc_connection *conn)
61 { 61 {
62 struct rxrpc_key_payload *payload; 62 struct rxrpc_key_payload *payload;
63 struct crypto_blkcipher *ci; 63 struct crypto_blkcipher *ci;
64 int ret; 64 int ret;
65 65
66 _enter("{%d},{%x}", conn->debug_id, key_serial(conn->key)); 66 _enter("{%d},{%x}", conn->debug_id, key_serial(conn->key));
67 67
68 payload = conn->key->payload.data; 68 payload = conn->key->payload.data;
69 conn->security_ix = payload->k.security_index; 69 conn->security_ix = payload->k.security_index;
70 70
71 ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); 71 ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
72 if (IS_ERR(ci)) { 72 if (IS_ERR(ci)) {
73 _debug("no cipher"); 73 _debug("no cipher");
74 ret = PTR_ERR(ci); 74 ret = PTR_ERR(ci);
75 goto error; 75 goto error;
76 } 76 }
77 77
78 if (crypto_blkcipher_setkey(ci, payload->k.session_key, 78 if (crypto_blkcipher_setkey(ci, payload->k.session_key,
79 sizeof(payload->k.session_key)) < 0) 79 sizeof(payload->k.session_key)) < 0)
80 BUG(); 80 BUG();
81 81
82 switch (conn->security_level) { 82 switch (conn->security_level) {
83 case RXRPC_SECURITY_PLAIN: 83 case RXRPC_SECURITY_PLAIN:
84 break; 84 break;
85 case RXRPC_SECURITY_AUTH: 85 case RXRPC_SECURITY_AUTH:
86 conn->size_align = 8; 86 conn->size_align = 8;
87 conn->security_size = sizeof(struct rxkad_level1_hdr); 87 conn->security_size = sizeof(struct rxkad_level1_hdr);
88 conn->header_size += sizeof(struct rxkad_level1_hdr); 88 conn->header_size += sizeof(struct rxkad_level1_hdr);
89 break; 89 break;
90 case RXRPC_SECURITY_ENCRYPT: 90 case RXRPC_SECURITY_ENCRYPT:
91 conn->size_align = 8; 91 conn->size_align = 8;
92 conn->security_size = sizeof(struct rxkad_level2_hdr); 92 conn->security_size = sizeof(struct rxkad_level2_hdr);
93 conn->header_size += sizeof(struct rxkad_level2_hdr); 93 conn->header_size += sizeof(struct rxkad_level2_hdr);
94 break; 94 break;
95 default: 95 default:
96 ret = -EKEYREJECTED; 96 ret = -EKEYREJECTED;
97 goto error; 97 goto error;
98 } 98 }
99 99
100 conn->cipher = ci; 100 conn->cipher = ci;
101 ret = 0; 101 ret = 0;
102 error: 102 error:
103 _leave(" = %d", ret); 103 _leave(" = %d", ret);
104 return ret; 104 return ret;
105 } 105 }
106 106
107 /* 107 /*
108 * prime the encryption state with the invariant parts of a connection's 108 * prime the encryption state with the invariant parts of a connection's
109 * description 109 * description
110 */ 110 */
111 static void rxkad_prime_packet_security(struct rxrpc_connection *conn) 111 static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
112 { 112 {
113 struct rxrpc_key_payload *payload; 113 struct rxrpc_key_payload *payload;
114 struct blkcipher_desc desc; 114 struct blkcipher_desc desc;
115 struct scatterlist sg[2]; 115 struct scatterlist sg[2];
116 struct rxrpc_crypt iv; 116 struct rxrpc_crypt iv;
117 struct { 117 struct {
118 __be32 x[4]; 118 __be32 x[4];
119 } tmpbuf __attribute__((aligned(16))); /* must all be in same page */ 119 } tmpbuf __attribute__((aligned(16))); /* must all be in same page */
120 120
121 _enter(""); 121 _enter("");
122 122
123 if (!conn->key) 123 if (!conn->key)
124 return; 124 return;
125 125
126 payload = conn->key->payload.data; 126 payload = conn->key->payload.data;
127 memcpy(&iv, payload->k.session_key, sizeof(iv)); 127 memcpy(&iv, payload->k.session_key, sizeof(iv));
128 128
129 desc.tfm = conn->cipher; 129 desc.tfm = conn->cipher;
130 desc.info = iv.x; 130 desc.info = iv.x;
131 desc.flags = 0; 131 desc.flags = 0;
132 132
133 tmpbuf.x[0] = conn->epoch; 133 tmpbuf.x[0] = conn->epoch;
134 tmpbuf.x[1] = conn->cid; 134 tmpbuf.x[1] = conn->cid;
135 tmpbuf.x[2] = 0; 135 tmpbuf.x[2] = 0;
136 tmpbuf.x[3] = htonl(conn->security_ix); 136 tmpbuf.x[3] = htonl(conn->security_ix);
137 137
138 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); 138 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
139 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); 139 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
140 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); 140 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
141 141
142 memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv)); 142 memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv));
143 ASSERTCMP(conn->csum_iv.n[0], ==, tmpbuf.x[2]); 143 ASSERTCMP(conn->csum_iv.n[0], ==, tmpbuf.x[2]);
144 144
145 _leave(""); 145 _leave("");
146 } 146 }
147 147
148 /* 148 /*
149 * partially encrypt a packet (level 1 security) 149 * partially encrypt a packet (level 1 security)
150 */ 150 */
151 static int rxkad_secure_packet_auth(const struct rxrpc_call *call, 151 static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
152 struct sk_buff *skb, 152 struct sk_buff *skb,
153 u32 data_size, 153 u32 data_size,
154 void *sechdr) 154 void *sechdr)
155 { 155 {
156 struct rxrpc_skb_priv *sp; 156 struct rxrpc_skb_priv *sp;
157 struct blkcipher_desc desc; 157 struct blkcipher_desc desc;
158 struct rxrpc_crypt iv; 158 struct rxrpc_crypt iv;
159 struct scatterlist sg[2]; 159 struct scatterlist sg[2];
160 struct { 160 struct {
161 struct rxkad_level1_hdr hdr; 161 struct rxkad_level1_hdr hdr;
162 __be32 first; /* first four bytes of data and padding */ 162 __be32 first; /* first four bytes of data and padding */
163 } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ 163 } tmpbuf __attribute__((aligned(8))); /* must all be in same page */
164 u16 check; 164 u16 check;
165 165
166 sp = rxrpc_skb(skb); 166 sp = rxrpc_skb(skb);
167 167
168 _enter(""); 168 _enter("");
169 169
170 check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber); 170 check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
171 data_size |= (u32) check << 16; 171 data_size |= (u32) check << 16;
172 172
173 tmpbuf.hdr.data_size = htonl(data_size); 173 tmpbuf.hdr.data_size = htonl(data_size);
174 memcpy(&tmpbuf.first, sechdr + 4, sizeof(tmpbuf.first)); 174 memcpy(&tmpbuf.first, sechdr + 4, sizeof(tmpbuf.first));
175 175
176 /* start the encryption afresh */ 176 /* start the encryption afresh */
177 memset(&iv, 0, sizeof(iv)); 177 memset(&iv, 0, sizeof(iv));
178 desc.tfm = call->conn->cipher; 178 desc.tfm = call->conn->cipher;
179 desc.info = iv.x; 179 desc.info = iv.x;
180 desc.flags = 0; 180 desc.flags = 0;
181 181
182 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); 182 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
183 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); 183 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
184 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); 184 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
185 185
186 memcpy(sechdr, &tmpbuf, sizeof(tmpbuf)); 186 memcpy(sechdr, &tmpbuf, sizeof(tmpbuf));
187 187
188 _leave(" = 0"); 188 _leave(" = 0");
189 return 0; 189 return 0;
190 } 190 }
191 191
192 /* 192 /*
193 * wholly encrypt a packet (level 2 security) 193 * wholly encrypt a packet (level 2 security)
194 */ 194 */
195 static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, 195 static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
196 struct sk_buff *skb, 196 struct sk_buff *skb,
197 u32 data_size, 197 u32 data_size,
198 void *sechdr) 198 void *sechdr)
199 { 199 {
200 const struct rxrpc_key_payload *payload; 200 const struct rxrpc_key_payload *payload;
201 struct rxkad_level2_hdr rxkhdr 201 struct rxkad_level2_hdr rxkhdr
202 __attribute__((aligned(8))); /* must be all on one page */ 202 __attribute__((aligned(8))); /* must be all on one page */
203 struct rxrpc_skb_priv *sp; 203 struct rxrpc_skb_priv *sp;
204 struct blkcipher_desc desc; 204 struct blkcipher_desc desc;
205 struct rxrpc_crypt iv; 205 struct rxrpc_crypt iv;
206 struct scatterlist sg[16]; 206 struct scatterlist sg[16];
207 struct sk_buff *trailer; 207 struct sk_buff *trailer;
208 unsigned len; 208 unsigned len;
209 u16 check; 209 u16 check;
210 int nsg; 210 int nsg;
211 211
212 sp = rxrpc_skb(skb); 212 sp = rxrpc_skb(skb);
213 213
214 _enter(""); 214 _enter("");
215 215
216 check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber); 216 check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
217 217
218 rxkhdr.data_size = htonl(data_size | (u32) check << 16); 218 rxkhdr.data_size = htonl(data_size | (u32) check << 16);
219 rxkhdr.checksum = 0; 219 rxkhdr.checksum = 0;
220 220
221 /* encrypt from the session key */ 221 /* encrypt from the session key */
222 payload = call->conn->key->payload.data; 222 payload = call->conn->key->payload.data;
223 memcpy(&iv, payload->k.session_key, sizeof(iv)); 223 memcpy(&iv, payload->k.session_key, sizeof(iv));
224 desc.tfm = call->conn->cipher; 224 desc.tfm = call->conn->cipher;
225 desc.info = iv.x; 225 desc.info = iv.x;
226 desc.flags = 0; 226 desc.flags = 0;
227 227
228 sg_init_one(&sg[0], sechdr, sizeof(rxkhdr)); 228 sg_init_one(&sg[0], sechdr, sizeof(rxkhdr));
229 sg_init_one(&sg[1], &rxkhdr, sizeof(rxkhdr)); 229 sg_init_one(&sg[1], &rxkhdr, sizeof(rxkhdr));
230 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr)); 230 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr));
231 231
232 /* we want to encrypt the skbuff in-place */ 232 /* we want to encrypt the skbuff in-place */
233 nsg = skb_cow_data(skb, 0, &trailer); 233 nsg = skb_cow_data(skb, 0, &trailer);
234 if (nsg < 0 || nsg > 16) 234 if (nsg < 0 || nsg > 16)
235 return -ENOMEM; 235 return -ENOMEM;
236 236
237 len = data_size + call->conn->size_align - 1; 237 len = data_size + call->conn->size_align - 1;
238 len &= ~(call->conn->size_align - 1); 238 len &= ~(call->conn->size_align - 1);
239 239
240 sg_init_table(sg, nsg); 240 sg_init_table(sg, nsg);
241 skb_to_sgvec(skb, sg, 0, len); 241 skb_to_sgvec(skb, sg, 0, len);
242 crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); 242 crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
243 243
244 _leave(" = 0"); 244 _leave(" = 0");
245 return 0; 245 return 0;
246 } 246 }
247 247
248 /* 248 /*
249 * checksum an RxRPC packet header 249 * checksum an RxRPC packet header
250 */ 250 */
251 static int rxkad_secure_packet(const struct rxrpc_call *call, 251 static int rxkad_secure_packet(const struct rxrpc_call *call,
252 struct sk_buff *skb, 252 struct sk_buff *skb,
253 size_t data_size, 253 size_t data_size,
254 void *sechdr) 254 void *sechdr)
255 { 255 {
256 struct rxrpc_skb_priv *sp; 256 struct rxrpc_skb_priv *sp;
257 struct blkcipher_desc desc; 257 struct blkcipher_desc desc;
258 struct rxrpc_crypt iv; 258 struct rxrpc_crypt iv;
259 struct scatterlist sg[2]; 259 struct scatterlist sg[2];
260 struct { 260 struct {
261 __be32 x[2]; 261 __be32 x[2];
262 } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ 262 } tmpbuf __attribute__((aligned(8))); /* must all be in same page */
263 __be32 x; 263 __be32 x;
264 int ret; 264 int ret;
265 265
266 sp = rxrpc_skb(skb); 266 sp = rxrpc_skb(skb);
267 267
268 _enter("{%d{%x}},{#%u},%zu,", 268 _enter("{%d{%x}},{#%u},%zu,",
269 call->debug_id, key_serial(call->conn->key), ntohl(sp->hdr.seq), 269 call->debug_id, key_serial(call->conn->key), ntohl(sp->hdr.seq),
270 data_size); 270 data_size);
271 271
272 if (!call->conn->cipher) 272 if (!call->conn->cipher)
273 return 0; 273 return 0;
274 274
275 ret = key_validate(call->conn->key); 275 ret = key_validate(call->conn->key);
276 if (ret < 0) 276 if (ret < 0)
277 return ret; 277 return ret;
278 278
279 /* continue encrypting from where we left off */ 279 /* continue encrypting from where we left off */
280 memcpy(&iv, call->conn->csum_iv.x, sizeof(iv)); 280 memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
281 desc.tfm = call->conn->cipher; 281 desc.tfm = call->conn->cipher;
282 desc.info = iv.x; 282 desc.info = iv.x;
283 desc.flags = 0; 283 desc.flags = 0;
284 284
285 /* calculate the security checksum */ 285 /* calculate the security checksum */
286 x = htonl(call->channel << (32 - RXRPC_CIDSHIFT)); 286 x = htonl(call->channel << (32 - RXRPC_CIDSHIFT));
287 x |= sp->hdr.seq & __constant_cpu_to_be32(0x3fffffff); 287 x |= sp->hdr.seq & cpu_to_be32(0x3fffffff);
288 tmpbuf.x[0] = sp->hdr.callNumber; 288 tmpbuf.x[0] = sp->hdr.callNumber;
289 tmpbuf.x[1] = x; 289 tmpbuf.x[1] = x;
290 290
291 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); 291 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
292 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); 292 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
293 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); 293 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
294 294
295 x = ntohl(tmpbuf.x[1]); 295 x = ntohl(tmpbuf.x[1]);
296 x = (x >> 16) & 0xffff; 296 x = (x >> 16) & 0xffff;
297 if (x == 0) 297 if (x == 0)
298 x = 1; /* zero checksums are not permitted */ 298 x = 1; /* zero checksums are not permitted */
299 sp->hdr.cksum = htons(x); 299 sp->hdr.cksum = htons(x);
300 300
301 switch (call->conn->security_level) { 301 switch (call->conn->security_level) {
302 case RXRPC_SECURITY_PLAIN: 302 case RXRPC_SECURITY_PLAIN:
303 ret = 0; 303 ret = 0;
304 break; 304 break;
305 case RXRPC_SECURITY_AUTH: 305 case RXRPC_SECURITY_AUTH:
306 ret = rxkad_secure_packet_auth(call, skb, data_size, sechdr); 306 ret = rxkad_secure_packet_auth(call, skb, data_size, sechdr);
307 break; 307 break;
308 case RXRPC_SECURITY_ENCRYPT: 308 case RXRPC_SECURITY_ENCRYPT:
309 ret = rxkad_secure_packet_encrypt(call, skb, data_size, 309 ret = rxkad_secure_packet_encrypt(call, skb, data_size,
310 sechdr); 310 sechdr);
311 break; 311 break;
312 default: 312 default:
313 ret = -EPERM; 313 ret = -EPERM;
314 break; 314 break;
315 } 315 }
316 316
317 _leave(" = %d [set %hx]", ret, x); 317 _leave(" = %d [set %hx]", ret, x);
318 return ret; 318 return ret;
319 } 319 }
320 320
321 /* 321 /*
322 * decrypt partial encryption on a packet (level 1 security) 322 * decrypt partial encryption on a packet (level 1 security)
323 */ 323 */
324 static int rxkad_verify_packet_auth(const struct rxrpc_call *call, 324 static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
325 struct sk_buff *skb, 325 struct sk_buff *skb,
326 u32 *_abort_code) 326 u32 *_abort_code)
327 { 327 {
328 struct rxkad_level1_hdr sechdr; 328 struct rxkad_level1_hdr sechdr;
329 struct rxrpc_skb_priv *sp; 329 struct rxrpc_skb_priv *sp;
330 struct blkcipher_desc desc; 330 struct blkcipher_desc desc;
331 struct rxrpc_crypt iv; 331 struct rxrpc_crypt iv;
332 struct scatterlist sg[16]; 332 struct scatterlist sg[16];
333 struct sk_buff *trailer; 333 struct sk_buff *trailer;
334 u32 data_size, buf; 334 u32 data_size, buf;
335 u16 check; 335 u16 check;
336 int nsg; 336 int nsg;
337 337
338 _enter(""); 338 _enter("");
339 339
340 sp = rxrpc_skb(skb); 340 sp = rxrpc_skb(skb);
341 341
342 /* we want to decrypt the skbuff in-place */ 342 /* we want to decrypt the skbuff in-place */
343 nsg = skb_cow_data(skb, 0, &trailer); 343 nsg = skb_cow_data(skb, 0, &trailer);
344 if (nsg < 0 || nsg > 16) 344 if (nsg < 0 || nsg > 16)
345 goto nomem; 345 goto nomem;
346 346
347 sg_init_table(sg, nsg); 347 sg_init_table(sg, nsg);
348 skb_to_sgvec(skb, sg, 0, 8); 348 skb_to_sgvec(skb, sg, 0, 8);
349 349
350 /* start the decryption afresh */ 350 /* start the decryption afresh */
351 memset(&iv, 0, sizeof(iv)); 351 memset(&iv, 0, sizeof(iv));
352 desc.tfm = call->conn->cipher; 352 desc.tfm = call->conn->cipher;
353 desc.info = iv.x; 353 desc.info = iv.x;
354 desc.flags = 0; 354 desc.flags = 0;
355 355
356 crypto_blkcipher_decrypt_iv(&desc, sg, sg, 8); 356 crypto_blkcipher_decrypt_iv(&desc, sg, sg, 8);
357 357
358 /* remove the decrypted packet length */ 358 /* remove the decrypted packet length */
359 if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0) 359 if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0)
360 goto datalen_error; 360 goto datalen_error;
361 if (!skb_pull(skb, sizeof(sechdr))) 361 if (!skb_pull(skb, sizeof(sechdr)))
362 BUG(); 362 BUG();
363 363
364 buf = ntohl(sechdr.data_size); 364 buf = ntohl(sechdr.data_size);
365 data_size = buf & 0xffff; 365 data_size = buf & 0xffff;
366 366
367 check = buf >> 16; 367 check = buf >> 16;
368 check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber); 368 check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
369 check &= 0xffff; 369 check &= 0xffff;
370 if (check != 0) { 370 if (check != 0) {
371 *_abort_code = RXKADSEALEDINCON; 371 *_abort_code = RXKADSEALEDINCON;
372 goto protocol_error; 372 goto protocol_error;
373 } 373 }
374 374
375 /* shorten the packet to remove the padding */ 375 /* shorten the packet to remove the padding */
376 if (data_size > skb->len) 376 if (data_size > skb->len)
377 goto datalen_error; 377 goto datalen_error;
378 else if (data_size < skb->len) 378 else if (data_size < skb->len)
379 skb->len = data_size; 379 skb->len = data_size;
380 380
381 _leave(" = 0 [dlen=%x]", data_size); 381 _leave(" = 0 [dlen=%x]", data_size);
382 return 0; 382 return 0;
383 383
384 datalen_error: 384 datalen_error:
385 *_abort_code = RXKADDATALEN; 385 *_abort_code = RXKADDATALEN;
386 protocol_error: 386 protocol_error:
387 _leave(" = -EPROTO"); 387 _leave(" = -EPROTO");
388 return -EPROTO; 388 return -EPROTO;
389 389
390 nomem: 390 nomem:
391 _leave(" = -ENOMEM"); 391 _leave(" = -ENOMEM");
392 return -ENOMEM; 392 return -ENOMEM;
393 } 393 }
394 394
395 /* 395 /*
396 * wholly decrypt a packet (level 2 security) 396 * wholly decrypt a packet (level 2 security)
397 */ 397 */
398 static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call, 398 static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
399 struct sk_buff *skb, 399 struct sk_buff *skb,
400 u32 *_abort_code) 400 u32 *_abort_code)
401 { 401 {
402 const struct rxrpc_key_payload *payload; 402 const struct rxrpc_key_payload *payload;
403 struct rxkad_level2_hdr sechdr; 403 struct rxkad_level2_hdr sechdr;
404 struct rxrpc_skb_priv *sp; 404 struct rxrpc_skb_priv *sp;
405 struct blkcipher_desc desc; 405 struct blkcipher_desc desc;
406 struct rxrpc_crypt iv; 406 struct rxrpc_crypt iv;
407 struct scatterlist _sg[4], *sg; 407 struct scatterlist _sg[4], *sg;
408 struct sk_buff *trailer; 408 struct sk_buff *trailer;
409 u32 data_size, buf; 409 u32 data_size, buf;
410 u16 check; 410 u16 check;
411 int nsg; 411 int nsg;
412 412
413 _enter(",{%d}", skb->len); 413 _enter(",{%d}", skb->len);
414 414
415 sp = rxrpc_skb(skb); 415 sp = rxrpc_skb(skb);
416 416
417 /* we want to decrypt the skbuff in-place */ 417 /* we want to decrypt the skbuff in-place */
418 nsg = skb_cow_data(skb, 0, &trailer); 418 nsg = skb_cow_data(skb, 0, &trailer);
419 if (nsg < 0) 419 if (nsg < 0)
420 goto nomem; 420 goto nomem;
421 421
422 sg = _sg; 422 sg = _sg;
423 if (unlikely(nsg > 4)) { 423 if (unlikely(nsg > 4)) {
424 sg = kmalloc(sizeof(*sg) * nsg, GFP_NOIO); 424 sg = kmalloc(sizeof(*sg) * nsg, GFP_NOIO);
425 if (!sg) 425 if (!sg)
426 goto nomem; 426 goto nomem;
427 } 427 }
428 428
429 sg_init_table(sg, nsg); 429 sg_init_table(sg, nsg);
430 skb_to_sgvec(skb, sg, 0, skb->len); 430 skb_to_sgvec(skb, sg, 0, skb->len);
431 431
432 /* decrypt from the session key */ 432 /* decrypt from the session key */
433 payload = call->conn->key->payload.data; 433 payload = call->conn->key->payload.data;
434 memcpy(&iv, payload->k.session_key, sizeof(iv)); 434 memcpy(&iv, payload->k.session_key, sizeof(iv));
435 desc.tfm = call->conn->cipher; 435 desc.tfm = call->conn->cipher;
436 desc.info = iv.x; 436 desc.info = iv.x;
437 desc.flags = 0; 437 desc.flags = 0;
438 438
439 crypto_blkcipher_decrypt_iv(&desc, sg, sg, skb->len); 439 crypto_blkcipher_decrypt_iv(&desc, sg, sg, skb->len);
440 if (sg != _sg) 440 if (sg != _sg)
441 kfree(sg); 441 kfree(sg);
442 442
443 /* remove the decrypted packet length */ 443 /* remove the decrypted packet length */
444 if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0) 444 if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0)
445 goto datalen_error; 445 goto datalen_error;
446 if (!skb_pull(skb, sizeof(sechdr))) 446 if (!skb_pull(skb, sizeof(sechdr)))
447 BUG(); 447 BUG();
448 448
449 buf = ntohl(sechdr.data_size); 449 buf = ntohl(sechdr.data_size);
450 data_size = buf & 0xffff; 450 data_size = buf & 0xffff;
451 451
452 check = buf >> 16; 452 check = buf >> 16;
453 check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber); 453 check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
454 check &= 0xffff; 454 check &= 0xffff;
455 if (check != 0) { 455 if (check != 0) {
456 *_abort_code = RXKADSEALEDINCON; 456 *_abort_code = RXKADSEALEDINCON;
457 goto protocol_error; 457 goto protocol_error;
458 } 458 }
459 459
460 /* shorten the packet to remove the padding */ 460 /* shorten the packet to remove the padding */
461 if (data_size > skb->len) 461 if (data_size > skb->len)
462 goto datalen_error; 462 goto datalen_error;
463 else if (data_size < skb->len) 463 else if (data_size < skb->len)
464 skb->len = data_size; 464 skb->len = data_size;
465 465
466 _leave(" = 0 [dlen=%x]", data_size); 466 _leave(" = 0 [dlen=%x]", data_size);
467 return 0; 467 return 0;
468 468
469 datalen_error: 469 datalen_error:
470 *_abort_code = RXKADDATALEN; 470 *_abort_code = RXKADDATALEN;
471 protocol_error: 471 protocol_error:
472 _leave(" = -EPROTO"); 472 _leave(" = -EPROTO");
473 return -EPROTO; 473 return -EPROTO;
474 474
475 nomem: 475 nomem:
476 _leave(" = -ENOMEM"); 476 _leave(" = -ENOMEM");
477 return -ENOMEM; 477 return -ENOMEM;
478 } 478 }
479 479
480 /* 480 /*
481 * verify the security on a received packet 481 * verify the security on a received packet
482 */ 482 */
483 static int rxkad_verify_packet(const struct rxrpc_call *call, 483 static int rxkad_verify_packet(const struct rxrpc_call *call,
484 struct sk_buff *skb, 484 struct sk_buff *skb,
485 u32 *_abort_code) 485 u32 *_abort_code)
486 { 486 {
487 struct blkcipher_desc desc; 487 struct blkcipher_desc desc;
488 struct rxrpc_skb_priv *sp; 488 struct rxrpc_skb_priv *sp;
489 struct rxrpc_crypt iv; 489 struct rxrpc_crypt iv;
490 struct scatterlist sg[2]; 490 struct scatterlist sg[2];
491 struct { 491 struct {
492 __be32 x[2]; 492 __be32 x[2];
493 } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ 493 } tmpbuf __attribute__((aligned(8))); /* must all be in same page */
494 __be32 x; 494 __be32 x;
495 __be16 cksum; 495 __be16 cksum;
496 int ret; 496 int ret;
497 497
498 sp = rxrpc_skb(skb); 498 sp = rxrpc_skb(skb);
499 499
500 _enter("{%d{%x}},{#%u}", 500 _enter("{%d{%x}},{#%u}",
501 call->debug_id, key_serial(call->conn->key), 501 call->debug_id, key_serial(call->conn->key),
502 ntohl(sp->hdr.seq)); 502 ntohl(sp->hdr.seq));
503 503
504 if (!call->conn->cipher) 504 if (!call->conn->cipher)
505 return 0; 505 return 0;
506 506
507 if (sp->hdr.securityIndex != 2) { 507 if (sp->hdr.securityIndex != 2) {
508 *_abort_code = RXKADINCONSISTENCY; 508 *_abort_code = RXKADINCONSISTENCY;
509 _leave(" = -EPROTO [not rxkad]"); 509 _leave(" = -EPROTO [not rxkad]");
510 return -EPROTO; 510 return -EPROTO;
511 } 511 }
512 512
513 /* continue encrypting from where we left off */ 513 /* continue encrypting from where we left off */
514 memcpy(&iv, call->conn->csum_iv.x, sizeof(iv)); 514 memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
515 desc.tfm = call->conn->cipher; 515 desc.tfm = call->conn->cipher;
516 desc.info = iv.x; 516 desc.info = iv.x;
517 desc.flags = 0; 517 desc.flags = 0;
518 518
519 /* validate the security checksum */ 519 /* validate the security checksum */
520 x = htonl(call->channel << (32 - RXRPC_CIDSHIFT)); 520 x = htonl(call->channel << (32 - RXRPC_CIDSHIFT));
521 x |= sp->hdr.seq & __constant_cpu_to_be32(0x3fffffff); 521 x |= sp->hdr.seq & cpu_to_be32(0x3fffffff);
522 tmpbuf.x[0] = call->call_id; 522 tmpbuf.x[0] = call->call_id;
523 tmpbuf.x[1] = x; 523 tmpbuf.x[1] = x;
524 524
525 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); 525 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
526 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); 526 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
527 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); 527 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
528 528
529 x = ntohl(tmpbuf.x[1]); 529 x = ntohl(tmpbuf.x[1]);
530 x = (x >> 16) & 0xffff; 530 x = (x >> 16) & 0xffff;
531 if (x == 0) 531 if (x == 0)
532 x = 1; /* zero checksums are not permitted */ 532 x = 1; /* zero checksums are not permitted */
533 533
534 cksum = htons(x); 534 cksum = htons(x);
535 if (sp->hdr.cksum != cksum) { 535 if (sp->hdr.cksum != cksum) {
536 *_abort_code = RXKADSEALEDINCON; 536 *_abort_code = RXKADSEALEDINCON;
537 _leave(" = -EPROTO [csum failed]"); 537 _leave(" = -EPROTO [csum failed]");
538 return -EPROTO; 538 return -EPROTO;
539 } 539 }
540 540
541 switch (call->conn->security_level) { 541 switch (call->conn->security_level) {
542 case RXRPC_SECURITY_PLAIN: 542 case RXRPC_SECURITY_PLAIN:
543 ret = 0; 543 ret = 0;
544 break; 544 break;
545 case RXRPC_SECURITY_AUTH: 545 case RXRPC_SECURITY_AUTH:
546 ret = rxkad_verify_packet_auth(call, skb, _abort_code); 546 ret = rxkad_verify_packet_auth(call, skb, _abort_code);
547 break; 547 break;
548 case RXRPC_SECURITY_ENCRYPT: 548 case RXRPC_SECURITY_ENCRYPT:
549 ret = rxkad_verify_packet_encrypt(call, skb, _abort_code); 549 ret = rxkad_verify_packet_encrypt(call, skb, _abort_code);
550 break; 550 break;
551 default: 551 default:
552 ret = -ENOANO; 552 ret = -ENOANO;
553 break; 553 break;
554 } 554 }
555 555
556 _leave(" = %d", ret); 556 _leave(" = %d", ret);
557 return ret; 557 return ret;
558 } 558 }
559 559
560 /* 560 /*
561 * issue a challenge 561 * issue a challenge
562 */ 562 */
563 static int rxkad_issue_challenge(struct rxrpc_connection *conn) 563 static int rxkad_issue_challenge(struct rxrpc_connection *conn)
564 { 564 {
565 struct rxkad_challenge challenge; 565 struct rxkad_challenge challenge;
566 struct rxrpc_header hdr; 566 struct rxrpc_header hdr;
567 struct msghdr msg; 567 struct msghdr msg;
568 struct kvec iov[2]; 568 struct kvec iov[2];
569 size_t len; 569 size_t len;
570 int ret; 570 int ret;
571 571
572 _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); 572 _enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
573 573
574 ret = key_validate(conn->key); 574 ret = key_validate(conn->key);
575 if (ret < 0) 575 if (ret < 0)
576 return ret; 576 return ret;
577 577
578 get_random_bytes(&conn->security_nonce, sizeof(conn->security_nonce)); 578 get_random_bytes(&conn->security_nonce, sizeof(conn->security_nonce));
579 579
580 challenge.version = htonl(2); 580 challenge.version = htonl(2);
581 challenge.nonce = htonl(conn->security_nonce); 581 challenge.nonce = htonl(conn->security_nonce);
582 challenge.min_level = htonl(0); 582 challenge.min_level = htonl(0);
583 challenge.__padding = 0; 583 challenge.__padding = 0;
584 584
585 msg.msg_name = &conn->trans->peer->srx.transport.sin; 585 msg.msg_name = &conn->trans->peer->srx.transport.sin;
586 msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); 586 msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin);
587 msg.msg_control = NULL; 587 msg.msg_control = NULL;
588 msg.msg_controllen = 0; 588 msg.msg_controllen = 0;
589 msg.msg_flags = 0; 589 msg.msg_flags = 0;
590 590
591 hdr.epoch = conn->epoch; 591 hdr.epoch = conn->epoch;
592 hdr.cid = conn->cid; 592 hdr.cid = conn->cid;
593 hdr.callNumber = 0; 593 hdr.callNumber = 0;
594 hdr.seq = 0; 594 hdr.seq = 0;
595 hdr.type = RXRPC_PACKET_TYPE_CHALLENGE; 595 hdr.type = RXRPC_PACKET_TYPE_CHALLENGE;
596 hdr.flags = conn->out_clientflag; 596 hdr.flags = conn->out_clientflag;
597 hdr.userStatus = 0; 597 hdr.userStatus = 0;
598 hdr.securityIndex = conn->security_ix; 598 hdr.securityIndex = conn->security_ix;
599 hdr._rsvd = 0; 599 hdr._rsvd = 0;
600 hdr.serviceId = conn->service_id; 600 hdr.serviceId = conn->service_id;
601 601
602 iov[0].iov_base = &hdr; 602 iov[0].iov_base = &hdr;
603 iov[0].iov_len = sizeof(hdr); 603 iov[0].iov_len = sizeof(hdr);
604 iov[1].iov_base = &challenge; 604 iov[1].iov_base = &challenge;
605 iov[1].iov_len = sizeof(challenge); 605 iov[1].iov_len = sizeof(challenge);
606 606
607 len = iov[0].iov_len + iov[1].iov_len; 607 len = iov[0].iov_len + iov[1].iov_len;
608 608
609 hdr.serial = htonl(atomic_inc_return(&conn->serial)); 609 hdr.serial = htonl(atomic_inc_return(&conn->serial));
610 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial)); 610 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
611 611
612 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len); 612 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
613 if (ret < 0) { 613 if (ret < 0) {
614 _debug("sendmsg failed: %d", ret); 614 _debug("sendmsg failed: %d", ret);
615 return -EAGAIN; 615 return -EAGAIN;
616 } 616 }
617 617
618 _leave(" = 0"); 618 _leave(" = 0");
619 return 0; 619 return 0;
620 } 620 }
621 621
622 /* 622 /*
623 * send a Kerberos security response 623 * send a Kerberos security response
624 */ 624 */
625 static int rxkad_send_response(struct rxrpc_connection *conn, 625 static int rxkad_send_response(struct rxrpc_connection *conn,
626 struct rxrpc_header *hdr, 626 struct rxrpc_header *hdr,
627 struct rxkad_response *resp, 627 struct rxkad_response *resp,
628 const struct rxkad_key *s2) 628 const struct rxkad_key *s2)
629 { 629 {
630 struct msghdr msg; 630 struct msghdr msg;
631 struct kvec iov[3]; 631 struct kvec iov[3];
632 size_t len; 632 size_t len;
633 int ret; 633 int ret;
634 634
635 _enter(""); 635 _enter("");
636 636
637 msg.msg_name = &conn->trans->peer->srx.transport.sin; 637 msg.msg_name = &conn->trans->peer->srx.transport.sin;
638 msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin); 638 msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin);
639 msg.msg_control = NULL; 639 msg.msg_control = NULL;
640 msg.msg_controllen = 0; 640 msg.msg_controllen = 0;
641 msg.msg_flags = 0; 641 msg.msg_flags = 0;
642 642
643 hdr->epoch = conn->epoch; 643 hdr->epoch = conn->epoch;
644 hdr->seq = 0; 644 hdr->seq = 0;
645 hdr->type = RXRPC_PACKET_TYPE_RESPONSE; 645 hdr->type = RXRPC_PACKET_TYPE_RESPONSE;
646 hdr->flags = conn->out_clientflag; 646 hdr->flags = conn->out_clientflag;
647 hdr->userStatus = 0; 647 hdr->userStatus = 0;
648 hdr->_rsvd = 0; 648 hdr->_rsvd = 0;
649 649
650 iov[0].iov_base = hdr; 650 iov[0].iov_base = hdr;
651 iov[0].iov_len = sizeof(*hdr); 651 iov[0].iov_len = sizeof(*hdr);
652 iov[1].iov_base = resp; 652 iov[1].iov_base = resp;
653 iov[1].iov_len = sizeof(*resp); 653 iov[1].iov_len = sizeof(*resp);
654 iov[2].iov_base = (void *) s2->ticket; 654 iov[2].iov_base = (void *) s2->ticket;
655 iov[2].iov_len = s2->ticket_len; 655 iov[2].iov_len = s2->ticket_len;
656 656
657 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len; 657 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
658 658
659 hdr->serial = htonl(atomic_inc_return(&conn->serial)); 659 hdr->serial = htonl(atomic_inc_return(&conn->serial));
660 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial)); 660 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
661 661
662 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len); 662 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
663 if (ret < 0) { 663 if (ret < 0) {
664 _debug("sendmsg failed: %d", ret); 664 _debug("sendmsg failed: %d", ret);
665 return -EAGAIN; 665 return -EAGAIN;
666 } 666 }
667 667
668 _leave(" = 0"); 668 _leave(" = 0");
669 return 0; 669 return 0;
670 } 670 }
671 671
672 /* 672 /*
673 * calculate the response checksum 673 * calculate the response checksum
674 */ 674 */
675 static void rxkad_calc_response_checksum(struct rxkad_response *response) 675 static void rxkad_calc_response_checksum(struct rxkad_response *response)
676 { 676 {
677 u32 csum = 1000003; 677 u32 csum = 1000003;
678 int loop; 678 int loop;
679 u8 *p = (u8 *) response; 679 u8 *p = (u8 *) response;
680 680
681 for (loop = sizeof(*response); loop > 0; loop--) 681 for (loop = sizeof(*response); loop > 0; loop--)
682 csum = csum * 0x10204081 + *p++; 682 csum = csum * 0x10204081 + *p++;
683 683
684 response->encrypted.checksum = htonl(csum); 684 response->encrypted.checksum = htonl(csum);
685 } 685 }
686 686
687 /* 687 /*
688 * load a scatterlist with a potentially split-page buffer 688 * load a scatterlist with a potentially split-page buffer
689 */ 689 */
690 static void rxkad_sg_set_buf2(struct scatterlist sg[2], 690 static void rxkad_sg_set_buf2(struct scatterlist sg[2],
691 void *buf, size_t buflen) 691 void *buf, size_t buflen)
692 { 692 {
693 int nsg = 1; 693 int nsg = 1;
694 694
695 sg_init_table(sg, 2); 695 sg_init_table(sg, 2);
696 696
697 sg_set_buf(&sg[0], buf, buflen); 697 sg_set_buf(&sg[0], buf, buflen);
698 if (sg[0].offset + buflen > PAGE_SIZE) { 698 if (sg[0].offset + buflen > PAGE_SIZE) {
699 /* the buffer was split over two pages */ 699 /* the buffer was split over two pages */
700 sg[0].length = PAGE_SIZE - sg[0].offset; 700 sg[0].length = PAGE_SIZE - sg[0].offset;
701 sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length); 701 sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length);
702 nsg++; 702 nsg++;
703 } 703 }
704 704
705 sg_mark_end(&sg[nsg - 1]); 705 sg_mark_end(&sg[nsg - 1]);
706 706
707 ASSERTCMP(sg[0].length + sg[1].length, ==, buflen); 707 ASSERTCMP(sg[0].length + sg[1].length, ==, buflen);
708 } 708 }
709 709
710 /* 710 /*
711 * encrypt the response packet 711 * encrypt the response packet
712 */ 712 */
713 static void rxkad_encrypt_response(struct rxrpc_connection *conn, 713 static void rxkad_encrypt_response(struct rxrpc_connection *conn,
714 struct rxkad_response *resp, 714 struct rxkad_response *resp,
715 const struct rxkad_key *s2) 715 const struct rxkad_key *s2)
716 { 716 {
717 struct blkcipher_desc desc; 717 struct blkcipher_desc desc;
718 struct rxrpc_crypt iv; 718 struct rxrpc_crypt iv;
719 struct scatterlist sg[2]; 719 struct scatterlist sg[2];
720 720
721 /* continue encrypting from where we left off */ 721 /* continue encrypting from where we left off */
722 memcpy(&iv, s2->session_key, sizeof(iv)); 722 memcpy(&iv, s2->session_key, sizeof(iv));
723 desc.tfm = conn->cipher; 723 desc.tfm = conn->cipher;
724 desc.info = iv.x; 724 desc.info = iv.x;
725 desc.flags = 0; 725 desc.flags = 0;
726 726
727 rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); 727 rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted));
728 crypto_blkcipher_encrypt_iv(&desc, sg, sg, sizeof(resp->encrypted)); 728 crypto_blkcipher_encrypt_iv(&desc, sg, sg, sizeof(resp->encrypted));
729 } 729 }
730 730
731 /* 731 /*
732 * respond to a challenge packet 732 * respond to a challenge packet
733 */ 733 */
734 static int rxkad_respond_to_challenge(struct rxrpc_connection *conn, 734 static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
735 struct sk_buff *skb, 735 struct sk_buff *skb,
736 u32 *_abort_code) 736 u32 *_abort_code)
737 { 737 {
738 const struct rxrpc_key_payload *payload; 738 const struct rxrpc_key_payload *payload;
739 struct rxkad_challenge challenge; 739 struct rxkad_challenge challenge;
740 struct rxkad_response resp 740 struct rxkad_response resp
741 __attribute__((aligned(8))); /* must be aligned for crypto */ 741 __attribute__((aligned(8))); /* must be aligned for crypto */
742 struct rxrpc_skb_priv *sp; 742 struct rxrpc_skb_priv *sp;
743 u32 version, nonce, min_level, abort_code; 743 u32 version, nonce, min_level, abort_code;
744 int ret; 744 int ret;
745 745
746 _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); 746 _enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
747 747
748 if (!conn->key) { 748 if (!conn->key) {
749 _leave(" = -EPROTO [no key]"); 749 _leave(" = -EPROTO [no key]");
750 return -EPROTO; 750 return -EPROTO;
751 } 751 }
752 752
753 ret = key_validate(conn->key); 753 ret = key_validate(conn->key);
754 if (ret < 0) { 754 if (ret < 0) {
755 *_abort_code = RXKADEXPIRED; 755 *_abort_code = RXKADEXPIRED;
756 return ret; 756 return ret;
757 } 757 }
758 758
759 abort_code = RXKADPACKETSHORT; 759 abort_code = RXKADPACKETSHORT;
760 sp = rxrpc_skb(skb); 760 sp = rxrpc_skb(skb);
761 if (skb_copy_bits(skb, 0, &challenge, sizeof(challenge)) < 0) 761 if (skb_copy_bits(skb, 0, &challenge, sizeof(challenge)) < 0)
762 goto protocol_error; 762 goto protocol_error;
763 763
764 version = ntohl(challenge.version); 764 version = ntohl(challenge.version);
765 nonce = ntohl(challenge.nonce); 765 nonce = ntohl(challenge.nonce);
766 min_level = ntohl(challenge.min_level); 766 min_level = ntohl(challenge.min_level);
767 767
768 _proto("Rx CHALLENGE %%%u { v=%u n=%u ml=%u }", 768 _proto("Rx CHALLENGE %%%u { v=%u n=%u ml=%u }",
769 ntohl(sp->hdr.serial), version, nonce, min_level); 769 ntohl(sp->hdr.serial), version, nonce, min_level);
770 770
771 abort_code = RXKADINCONSISTENCY; 771 abort_code = RXKADINCONSISTENCY;
772 if (version != RXKAD_VERSION) 772 if (version != RXKAD_VERSION)
773 goto protocol_error; 773 goto protocol_error;
774 774
775 abort_code = RXKADLEVELFAIL; 775 abort_code = RXKADLEVELFAIL;
776 if (conn->security_level < min_level) 776 if (conn->security_level < min_level)
777 goto protocol_error; 777 goto protocol_error;
778 778
779 payload = conn->key->payload.data; 779 payload = conn->key->payload.data;
780 780
781 /* build the response packet */ 781 /* build the response packet */
782 memset(&resp, 0, sizeof(resp)); 782 memset(&resp, 0, sizeof(resp));
783 783
784 resp.version = RXKAD_VERSION; 784 resp.version = RXKAD_VERSION;
785 resp.encrypted.epoch = conn->epoch; 785 resp.encrypted.epoch = conn->epoch;
786 resp.encrypted.cid = conn->cid; 786 resp.encrypted.cid = conn->cid;
787 resp.encrypted.securityIndex = htonl(conn->security_ix); 787 resp.encrypted.securityIndex = htonl(conn->security_ix);
788 resp.encrypted.call_id[0] = 788 resp.encrypted.call_id[0] =
789 (conn->channels[0] ? conn->channels[0]->call_id : 0); 789 (conn->channels[0] ? conn->channels[0]->call_id : 0);
790 resp.encrypted.call_id[1] = 790 resp.encrypted.call_id[1] =
791 (conn->channels[1] ? conn->channels[1]->call_id : 0); 791 (conn->channels[1] ? conn->channels[1]->call_id : 0);
792 resp.encrypted.call_id[2] = 792 resp.encrypted.call_id[2] =
793 (conn->channels[2] ? conn->channels[2]->call_id : 0); 793 (conn->channels[2] ? conn->channels[2]->call_id : 0);
794 resp.encrypted.call_id[3] = 794 resp.encrypted.call_id[3] =
795 (conn->channels[3] ? conn->channels[3]->call_id : 0); 795 (conn->channels[3] ? conn->channels[3]->call_id : 0);
796 resp.encrypted.inc_nonce = htonl(nonce + 1); 796 resp.encrypted.inc_nonce = htonl(nonce + 1);
797 resp.encrypted.level = htonl(conn->security_level); 797 resp.encrypted.level = htonl(conn->security_level);
798 resp.kvno = htonl(payload->k.kvno); 798 resp.kvno = htonl(payload->k.kvno);
799 resp.ticket_len = htonl(payload->k.ticket_len); 799 resp.ticket_len = htonl(payload->k.ticket_len);
800 800
801 /* calculate the response checksum and then do the encryption */ 801 /* calculate the response checksum and then do the encryption */
802 rxkad_calc_response_checksum(&resp); 802 rxkad_calc_response_checksum(&resp);
803 rxkad_encrypt_response(conn, &resp, &payload->k); 803 rxkad_encrypt_response(conn, &resp, &payload->k);
804 return rxkad_send_response(conn, &sp->hdr, &resp, &payload->k); 804 return rxkad_send_response(conn, &sp->hdr, &resp, &payload->k);
805 805
806 protocol_error: 806 protocol_error:
807 *_abort_code = abort_code; 807 *_abort_code = abort_code;
808 _leave(" = -EPROTO [%d]", abort_code); 808 _leave(" = -EPROTO [%d]", abort_code);
809 return -EPROTO; 809 return -EPROTO;
810 } 810 }
811 811
812 /* 812 /*
813 * decrypt the kerberos IV ticket in the response 813 * decrypt the kerberos IV ticket in the response
814 */ 814 */
815 static int rxkad_decrypt_ticket(struct rxrpc_connection *conn, 815 static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
816 void *ticket, size_t ticket_len, 816 void *ticket, size_t ticket_len,
817 struct rxrpc_crypt *_session_key, 817 struct rxrpc_crypt *_session_key,
818 time_t *_expiry, 818 time_t *_expiry,
819 u32 *_abort_code) 819 u32 *_abort_code)
820 { 820 {
821 struct blkcipher_desc desc; 821 struct blkcipher_desc desc;
822 struct rxrpc_crypt iv, key; 822 struct rxrpc_crypt iv, key;
823 struct scatterlist sg[1]; 823 struct scatterlist sg[1];
824 struct in_addr addr; 824 struct in_addr addr;
825 unsigned life; 825 unsigned life;
826 time_t issue, now; 826 time_t issue, now;
827 bool little_endian; 827 bool little_endian;
828 int ret; 828 int ret;
829 u8 *p, *q, *name, *end; 829 u8 *p, *q, *name, *end;
830 830
831 _enter("{%d},{%x}", conn->debug_id, key_serial(conn->server_key)); 831 _enter("{%d},{%x}", conn->debug_id, key_serial(conn->server_key));
832 832
833 *_expiry = 0; 833 *_expiry = 0;
834 834
835 ret = key_validate(conn->server_key); 835 ret = key_validate(conn->server_key);
836 if (ret < 0) { 836 if (ret < 0) {
837 switch (ret) { 837 switch (ret) {
838 case -EKEYEXPIRED: 838 case -EKEYEXPIRED:
839 *_abort_code = RXKADEXPIRED; 839 *_abort_code = RXKADEXPIRED;
840 goto error; 840 goto error;
841 default: 841 default:
842 *_abort_code = RXKADNOAUTH; 842 *_abort_code = RXKADNOAUTH;
843 goto error; 843 goto error;
844 } 844 }
845 } 845 }
846 846
847 ASSERT(conn->server_key->payload.data != NULL); 847 ASSERT(conn->server_key->payload.data != NULL);
848 ASSERTCMP((unsigned long) ticket & 7UL, ==, 0); 848 ASSERTCMP((unsigned long) ticket & 7UL, ==, 0);
849 849
850 memcpy(&iv, &conn->server_key->type_data, sizeof(iv)); 850 memcpy(&iv, &conn->server_key->type_data, sizeof(iv));
851 851
852 desc.tfm = conn->server_key->payload.data; 852 desc.tfm = conn->server_key->payload.data;
853 desc.info = iv.x; 853 desc.info = iv.x;
854 desc.flags = 0; 854 desc.flags = 0;
855 855
856 sg_init_one(&sg[0], ticket, ticket_len); 856 sg_init_one(&sg[0], ticket, ticket_len);
857 crypto_blkcipher_decrypt_iv(&desc, sg, sg, ticket_len); 857 crypto_blkcipher_decrypt_iv(&desc, sg, sg, ticket_len);
858 858
859 p = ticket; 859 p = ticket;
860 end = p + ticket_len; 860 end = p + ticket_len;
861 861
862 #define Z(size) \ 862 #define Z(size) \
863 ({ \ 863 ({ \
864 u8 *__str = p; \ 864 u8 *__str = p; \
865 q = memchr(p, 0, end - p); \ 865 q = memchr(p, 0, end - p); \
866 if (!q || q - p > (size)) \ 866 if (!q || q - p > (size)) \
867 goto bad_ticket; \ 867 goto bad_ticket; \
868 for (; p < q; p++) \ 868 for (; p < q; p++) \
869 if (!isprint(*p)) \ 869 if (!isprint(*p)) \
870 goto bad_ticket; \ 870 goto bad_ticket; \
871 p++; \ 871 p++; \
872 __str; \ 872 __str; \
873 }) 873 })
874 874
875 /* extract the ticket flags */ 875 /* extract the ticket flags */
876 _debug("KIV FLAGS: %x", *p); 876 _debug("KIV FLAGS: %x", *p);
877 little_endian = *p & 1; 877 little_endian = *p & 1;
878 p++; 878 p++;
879 879
880 /* extract the authentication name */ 880 /* extract the authentication name */
881 name = Z(ANAME_SZ); 881 name = Z(ANAME_SZ);
882 _debug("KIV ANAME: %s", name); 882 _debug("KIV ANAME: %s", name);
883 883
884 /* extract the principal's instance */ 884 /* extract the principal's instance */
885 name = Z(INST_SZ); 885 name = Z(INST_SZ);
886 _debug("KIV INST : %s", name); 886 _debug("KIV INST : %s", name);
887 887
888 /* extract the principal's authentication domain */ 888 /* extract the principal's authentication domain */
889 name = Z(REALM_SZ); 889 name = Z(REALM_SZ);
890 _debug("KIV REALM: %s", name); 890 _debug("KIV REALM: %s", name);
891 891
892 if (end - p < 4 + 8 + 4 + 2) 892 if (end - p < 4 + 8 + 4 + 2)
893 goto bad_ticket; 893 goto bad_ticket;
894 894
895 /* get the IPv4 address of the entity that requested the ticket */ 895 /* get the IPv4 address of the entity that requested the ticket */
896 memcpy(&addr, p, sizeof(addr)); 896 memcpy(&addr, p, sizeof(addr));
897 p += 4; 897 p += 4;
898 _debug("KIV ADDR : "NIPQUAD_FMT, NIPQUAD(addr)); 898 _debug("KIV ADDR : "NIPQUAD_FMT, NIPQUAD(addr));
899 899
900 /* get the session key from the ticket */ 900 /* get the session key from the ticket */
901 memcpy(&key, p, sizeof(key)); 901 memcpy(&key, p, sizeof(key));
902 p += 8; 902 p += 8;
903 _debug("KIV KEY : %08x %08x", ntohl(key.n[0]), ntohl(key.n[1])); 903 _debug("KIV KEY : %08x %08x", ntohl(key.n[0]), ntohl(key.n[1]));
904 memcpy(_session_key, &key, sizeof(key)); 904 memcpy(_session_key, &key, sizeof(key));
905 905
906 /* get the ticket's lifetime */ 906 /* get the ticket's lifetime */
907 life = *p++ * 5 * 60; 907 life = *p++ * 5 * 60;
908 _debug("KIV LIFE : %u", life); 908 _debug("KIV LIFE : %u", life);
909 909
910 /* get the issue time of the ticket */ 910 /* get the issue time of the ticket */
911 if (little_endian) { 911 if (little_endian) {
912 __le32 stamp; 912 __le32 stamp;
913 memcpy(&stamp, p, 4); 913 memcpy(&stamp, p, 4);
914 issue = le32_to_cpu(stamp); 914 issue = le32_to_cpu(stamp);
915 } else { 915 } else {
916 __be32 stamp; 916 __be32 stamp;
917 memcpy(&stamp, p, 4); 917 memcpy(&stamp, p, 4);
918 issue = be32_to_cpu(stamp); 918 issue = be32_to_cpu(stamp);
919 } 919 }
920 p += 4; 920 p += 4;
921 now = get_seconds(); 921 now = get_seconds();
922 _debug("KIV ISSUE: %lx [%lx]", issue, now); 922 _debug("KIV ISSUE: %lx [%lx]", issue, now);
923 923
924 /* check the ticket is in date */ 924 /* check the ticket is in date */
925 if (issue > now) { 925 if (issue > now) {
926 *_abort_code = RXKADNOAUTH; 926 *_abort_code = RXKADNOAUTH;
927 ret = -EKEYREJECTED; 927 ret = -EKEYREJECTED;
928 goto error; 928 goto error;
929 } 929 }
930 930
931 if (issue < now - life) { 931 if (issue < now - life) {
932 *_abort_code = RXKADEXPIRED; 932 *_abort_code = RXKADEXPIRED;
933 ret = -EKEYEXPIRED; 933 ret = -EKEYEXPIRED;
934 goto error; 934 goto error;
935 } 935 }
936 936
937 *_expiry = issue + life; 937 *_expiry = issue + life;
938 938
939 /* get the service name */ 939 /* get the service name */
940 name = Z(SNAME_SZ); 940 name = Z(SNAME_SZ);
941 _debug("KIV SNAME: %s", name); 941 _debug("KIV SNAME: %s", name);
942 942
943 /* get the service instance name */ 943 /* get the service instance name */
944 name = Z(INST_SZ); 944 name = Z(INST_SZ);
945 _debug("KIV SINST: %s", name); 945 _debug("KIV SINST: %s", name);
946 946
947 ret = 0; 947 ret = 0;
948 error: 948 error:
949 _leave(" = %d", ret); 949 _leave(" = %d", ret);
950 return ret; 950 return ret;
951 951
952 bad_ticket: 952 bad_ticket:
953 *_abort_code = RXKADBADTICKET; 953 *_abort_code = RXKADBADTICKET;
954 ret = -EBADMSG; 954 ret = -EBADMSG;
955 goto error; 955 goto error;
956 } 956 }
957 957
958 /* 958 /*
959 * decrypt the response packet 959 * decrypt the response packet
960 */ 960 */
961 static void rxkad_decrypt_response(struct rxrpc_connection *conn, 961 static void rxkad_decrypt_response(struct rxrpc_connection *conn,
962 struct rxkad_response *resp, 962 struct rxkad_response *resp,
963 const struct rxrpc_crypt *session_key) 963 const struct rxrpc_crypt *session_key)
964 { 964 {
965 struct blkcipher_desc desc; 965 struct blkcipher_desc desc;
966 struct scatterlist sg[2]; 966 struct scatterlist sg[2];
967 struct rxrpc_crypt iv; 967 struct rxrpc_crypt iv;
968 968
969 _enter(",,%08x%08x", 969 _enter(",,%08x%08x",
970 ntohl(session_key->n[0]), ntohl(session_key->n[1])); 970 ntohl(session_key->n[0]), ntohl(session_key->n[1]));
971 971
972 ASSERT(rxkad_ci != NULL); 972 ASSERT(rxkad_ci != NULL);
973 973
974 mutex_lock(&rxkad_ci_mutex); 974 mutex_lock(&rxkad_ci_mutex);
975 if (crypto_blkcipher_setkey(rxkad_ci, session_key->x, 975 if (crypto_blkcipher_setkey(rxkad_ci, session_key->x,
976 sizeof(*session_key)) < 0) 976 sizeof(*session_key)) < 0)
977 BUG(); 977 BUG();
978 978
979 memcpy(&iv, session_key, sizeof(iv)); 979 memcpy(&iv, session_key, sizeof(iv));
980 desc.tfm = rxkad_ci; 980 desc.tfm = rxkad_ci;
981 desc.info = iv.x; 981 desc.info = iv.x;
982 desc.flags = 0; 982 desc.flags = 0;
983 983
984 rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); 984 rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted));
985 crypto_blkcipher_decrypt_iv(&desc, sg, sg, sizeof(resp->encrypted)); 985 crypto_blkcipher_decrypt_iv(&desc, sg, sg, sizeof(resp->encrypted));
986 mutex_unlock(&rxkad_ci_mutex); 986 mutex_unlock(&rxkad_ci_mutex);
987 987
988 _leave(""); 988 _leave("");
989 } 989 }
990 990
991 /* 991 /*
992 * verify a response 992 * verify a response
993 */ 993 */
994 static int rxkad_verify_response(struct rxrpc_connection *conn, 994 static int rxkad_verify_response(struct rxrpc_connection *conn,
995 struct sk_buff *skb, 995 struct sk_buff *skb,
996 u32 *_abort_code) 996 u32 *_abort_code)
997 { 997 {
998 struct rxkad_response response 998 struct rxkad_response response
999 __attribute__((aligned(8))); /* must be aligned for crypto */ 999 __attribute__((aligned(8))); /* must be aligned for crypto */
1000 struct rxrpc_skb_priv *sp; 1000 struct rxrpc_skb_priv *sp;
1001 struct rxrpc_crypt session_key; 1001 struct rxrpc_crypt session_key;
1002 time_t expiry; 1002 time_t expiry;
1003 void *ticket; 1003 void *ticket;
1004 u32 abort_code, version, kvno, ticket_len, csum, level; 1004 u32 abort_code, version, kvno, ticket_len, csum, level;
1005 int ret; 1005 int ret;
1006 1006
1007 _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key)); 1007 _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
1008 1008
1009 abort_code = RXKADPACKETSHORT; 1009 abort_code = RXKADPACKETSHORT;
1010 if (skb_copy_bits(skb, 0, &response, sizeof(response)) < 0) 1010 if (skb_copy_bits(skb, 0, &response, sizeof(response)) < 0)
1011 goto protocol_error; 1011 goto protocol_error;
1012 if (!pskb_pull(skb, sizeof(response))) 1012 if (!pskb_pull(skb, sizeof(response)))
1013 BUG(); 1013 BUG();
1014 1014
1015 version = ntohl(response.version); 1015 version = ntohl(response.version);
1016 ticket_len = ntohl(response.ticket_len); 1016 ticket_len = ntohl(response.ticket_len);
1017 kvno = ntohl(response.kvno); 1017 kvno = ntohl(response.kvno);
1018 sp = rxrpc_skb(skb); 1018 sp = rxrpc_skb(skb);
1019 _proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }", 1019 _proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }",
1020 ntohl(sp->hdr.serial), version, kvno, ticket_len); 1020 ntohl(sp->hdr.serial), version, kvno, ticket_len);
1021 1021
1022 abort_code = RXKADINCONSISTENCY; 1022 abort_code = RXKADINCONSISTENCY;
1023 if (version != RXKAD_VERSION) 1023 if (version != RXKAD_VERSION)
1024 goto protocol_error; 1024 goto protocol_error;
1025 1025
1026 abort_code = RXKADTICKETLEN; 1026 abort_code = RXKADTICKETLEN;
1027 if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN) 1027 if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN)
1028 goto protocol_error; 1028 goto protocol_error;
1029 1029
1030 abort_code = RXKADUNKNOWNKEY; 1030 abort_code = RXKADUNKNOWNKEY;
1031 if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5) 1031 if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5)
1032 goto protocol_error; 1032 goto protocol_error;
1033 1033
1034 /* extract the kerberos ticket and decrypt and decode it */ 1034 /* extract the kerberos ticket and decrypt and decode it */
1035 ticket = kmalloc(ticket_len, GFP_NOFS); 1035 ticket = kmalloc(ticket_len, GFP_NOFS);
1036 if (!ticket) 1036 if (!ticket)
1037 return -ENOMEM; 1037 return -ENOMEM;
1038 1038
1039 abort_code = RXKADPACKETSHORT; 1039 abort_code = RXKADPACKETSHORT;
1040 if (skb_copy_bits(skb, 0, ticket, ticket_len) < 0) 1040 if (skb_copy_bits(skb, 0, ticket, ticket_len) < 0)
1041 goto protocol_error_free; 1041 goto protocol_error_free;
1042 1042
1043 ret = rxkad_decrypt_ticket(conn, ticket, ticket_len, &session_key, 1043 ret = rxkad_decrypt_ticket(conn, ticket, ticket_len, &session_key,
1044 &expiry, &abort_code); 1044 &expiry, &abort_code);
1045 if (ret < 0) { 1045 if (ret < 0) {
1046 *_abort_code = abort_code; 1046 *_abort_code = abort_code;
1047 kfree(ticket); 1047 kfree(ticket);
1048 return ret; 1048 return ret;
1049 } 1049 }
1050 1050
1051 /* use the session key from inside the ticket to decrypt the 1051 /* use the session key from inside the ticket to decrypt the
1052 * response */ 1052 * response */
1053 rxkad_decrypt_response(conn, &response, &session_key); 1053 rxkad_decrypt_response(conn, &response, &session_key);
1054 1054
1055 abort_code = RXKADSEALEDINCON; 1055 abort_code = RXKADSEALEDINCON;
1056 if (response.encrypted.epoch != conn->epoch) 1056 if (response.encrypted.epoch != conn->epoch)
1057 goto protocol_error_free; 1057 goto protocol_error_free;
1058 if (response.encrypted.cid != conn->cid) 1058 if (response.encrypted.cid != conn->cid)
1059 goto protocol_error_free; 1059 goto protocol_error_free;
1060 if (ntohl(response.encrypted.securityIndex) != conn->security_ix) 1060 if (ntohl(response.encrypted.securityIndex) != conn->security_ix)
1061 goto protocol_error_free; 1061 goto protocol_error_free;
1062 csum = response.encrypted.checksum; 1062 csum = response.encrypted.checksum;
1063 response.encrypted.checksum = 0; 1063 response.encrypted.checksum = 0;
1064 rxkad_calc_response_checksum(&response); 1064 rxkad_calc_response_checksum(&response);
1065 if (response.encrypted.checksum != csum) 1065 if (response.encrypted.checksum != csum)
1066 goto protocol_error_free; 1066 goto protocol_error_free;
1067 1067
1068 if (ntohl(response.encrypted.call_id[0]) > INT_MAX || 1068 if (ntohl(response.encrypted.call_id[0]) > INT_MAX ||
1069 ntohl(response.encrypted.call_id[1]) > INT_MAX || 1069 ntohl(response.encrypted.call_id[1]) > INT_MAX ||
1070 ntohl(response.encrypted.call_id[2]) > INT_MAX || 1070 ntohl(response.encrypted.call_id[2]) > INT_MAX ||
1071 ntohl(response.encrypted.call_id[3]) > INT_MAX) 1071 ntohl(response.encrypted.call_id[3]) > INT_MAX)
1072 goto protocol_error_free; 1072 goto protocol_error_free;
1073 1073
1074 abort_code = RXKADOUTOFSEQUENCE; 1074 abort_code = RXKADOUTOFSEQUENCE;
1075 if (response.encrypted.inc_nonce != htonl(conn->security_nonce + 1)) 1075 if (response.encrypted.inc_nonce != htonl(conn->security_nonce + 1))
1076 goto protocol_error_free; 1076 goto protocol_error_free;
1077 1077
1078 abort_code = RXKADLEVELFAIL; 1078 abort_code = RXKADLEVELFAIL;
1079 level = ntohl(response.encrypted.level); 1079 level = ntohl(response.encrypted.level);
1080 if (level > RXRPC_SECURITY_ENCRYPT) 1080 if (level > RXRPC_SECURITY_ENCRYPT)
1081 goto protocol_error_free; 1081 goto protocol_error_free;
1082 conn->security_level = level; 1082 conn->security_level = level;
1083 1083
1084 /* create a key to hold the security data and expiration time - after 1084 /* create a key to hold the security data and expiration time - after
1085 * this the connection security can be handled in exactly the same way 1085 * this the connection security can be handled in exactly the same way
1086 * as for a client connection */ 1086 * as for a client connection */
1087 ret = rxrpc_get_server_data_key(conn, &session_key, expiry, kvno); 1087 ret = rxrpc_get_server_data_key(conn, &session_key, expiry, kvno);
1088 if (ret < 0) { 1088 if (ret < 0) {
1089 kfree(ticket); 1089 kfree(ticket);
1090 return ret; 1090 return ret;
1091 } 1091 }
1092 1092
1093 kfree(ticket); 1093 kfree(ticket);
1094 _leave(" = 0"); 1094 _leave(" = 0");
1095 return 0; 1095 return 0;
1096 1096
1097 protocol_error_free: 1097 protocol_error_free:
1098 kfree(ticket); 1098 kfree(ticket);
1099 protocol_error: 1099 protocol_error:
1100 *_abort_code = abort_code; 1100 *_abort_code = abort_code;
1101 _leave(" = -EPROTO [%d]", abort_code); 1101 _leave(" = -EPROTO [%d]", abort_code);
1102 return -EPROTO; 1102 return -EPROTO;
1103 } 1103 }
1104 1104
1105 /* 1105 /*
1106 * clear the connection security 1106 * clear the connection security
1107 */ 1107 */
1108 static void rxkad_clear(struct rxrpc_connection *conn) 1108 static void rxkad_clear(struct rxrpc_connection *conn)
1109 { 1109 {
1110 _enter(""); 1110 _enter("");
1111 1111
1112 if (conn->cipher) 1112 if (conn->cipher)
1113 crypto_free_blkcipher(conn->cipher); 1113 crypto_free_blkcipher(conn->cipher);
1114 } 1114 }
1115 1115
1116 /* 1116 /*
1117 * RxRPC Kerberos-based security 1117 * RxRPC Kerberos-based security
1118 */ 1118 */
1119 static struct rxrpc_security rxkad = { 1119 static struct rxrpc_security rxkad = {
1120 .owner = THIS_MODULE, 1120 .owner = THIS_MODULE,
1121 .name = "rxkad", 1121 .name = "rxkad",
1122 .security_index = RXKAD_VERSION, 1122 .security_index = RXKAD_VERSION,
1123 .init_connection_security = rxkad_init_connection_security, 1123 .init_connection_security = rxkad_init_connection_security,
1124 .prime_packet_security = rxkad_prime_packet_security, 1124 .prime_packet_security = rxkad_prime_packet_security,
1125 .secure_packet = rxkad_secure_packet, 1125 .secure_packet = rxkad_secure_packet,
1126 .verify_packet = rxkad_verify_packet, 1126 .verify_packet = rxkad_verify_packet,
1127 .issue_challenge = rxkad_issue_challenge, 1127 .issue_challenge = rxkad_issue_challenge,
1128 .respond_to_challenge = rxkad_respond_to_challenge, 1128 .respond_to_challenge = rxkad_respond_to_challenge,
1129 .verify_response = rxkad_verify_response, 1129 .verify_response = rxkad_verify_response,
1130 .clear = rxkad_clear, 1130 .clear = rxkad_clear,
1131 }; 1131 };
1132 1132
1133 static __init int rxkad_init(void) 1133 static __init int rxkad_init(void)
1134 { 1134 {
1135 _enter(""); 1135 _enter("");
1136 1136
1137 /* pin the cipher we need so that the crypto layer doesn't invoke 1137 /* pin the cipher we need so that the crypto layer doesn't invoke
1138 * keventd to go get it */ 1138 * keventd to go get it */
1139 rxkad_ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); 1139 rxkad_ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
1140 if (IS_ERR(rxkad_ci)) 1140 if (IS_ERR(rxkad_ci))
1141 return PTR_ERR(rxkad_ci); 1141 return PTR_ERR(rxkad_ci);
1142 1142
1143 return rxrpc_register_security(&rxkad); 1143 return rxrpc_register_security(&rxkad);
1144 } 1144 }
1145 1145
1146 module_init(rxkad_init); 1146 module_init(rxkad_init);
1147 1147
1148 static __exit void rxkad_exit(void) 1148 static __exit void rxkad_exit(void)
1149 { 1149 {
1150 _enter(""); 1150 _enter("");
1151 1151
1152 rxrpc_unregister_security(&rxkad); 1152 rxrpc_unregister_security(&rxkad);
1153 crypto_free_blkcipher(rxkad_ci); 1153 crypto_free_blkcipher(rxkad_ci);
1154 } 1154 }
1155 1155
1156 module_exit(rxkad_exit); 1156 module_exit(rxkad_exit);
1157 1157