Blame view
net/rxrpc/call_accept.c
18.4 KB
2874c5fd2
|
1 |
// SPDX-License-Identifier: GPL-2.0-or-later |
17926a793
|
2 3 4 5 |
/* incoming call handling * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) |
17926a793
|
6 |
*/ |
9b6d53985
|
7 |
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
17926a793
|
8 9 10 11 12 13 14 15 |
#include <linux/module.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/errqueue.h> #include <linux/udp.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/icmp.h> |
5a0e3ad6a
|
16 |
#include <linux/gfp.h> |
00e907127
|
17 |
#include <linux/circ_buf.h> |
17926a793
|
18 19 20 21 22 23 |
#include <net/sock.h> #include <net/af_rxrpc.h> #include <net/ip.h> #include "ar-internal.h" /* |
00e907127
|
24 25 26 27 28 29 30 |
* Preallocate a single service call, connection and peer and, if possible, * give them a user ID and attach the user's side of the ID to them. */ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, struct rxrpc_backlog *b, rxrpc_notify_rx_t notify_rx, rxrpc_user_attach_call_t user_attach_call, |
a25e21f0b
|
31 32 |
unsigned long user_call_ID, gfp_t gfp, unsigned int debug_id) |
00e907127
|
33 34 35 |
{ const void *here = __builtin_return_address(0); struct rxrpc_call *call; |
2baec2c3f
|
36 |
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); |
00e907127
|
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
int max, tmp; unsigned int size = RXRPC_BACKLOG_MAX; unsigned int head, tail, call_head, call_tail; max = rx->sk.sk_max_ack_backlog; tmp = rx->sk.sk_ack_backlog; if (tmp >= max) { _leave(" = -ENOBUFS [full %u]", max); return -ENOBUFS; } max -= tmp; /* We don't need more conns and peers than we have calls, but on the * other hand, we shouldn't ever use more peers than conns or conns * than calls. */ call_head = b->call_backlog_head; call_tail = READ_ONCE(b->call_backlog_tail); tmp = CIRC_CNT(call_head, call_tail, size); if (tmp >= max) { _leave(" = -ENOBUFS [enough %u]", tmp); return -ENOBUFS; } max = tmp + 1; head = b->peer_backlog_head; tail = READ_ONCE(b->peer_backlog_tail); if (CIRC_CNT(head, tail, size) < max) { struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp); if (!peer) return -ENOMEM; b->peer_backlog[head] = peer; smp_store_release(&b->peer_backlog_head, (head + 1) & (size - 1)); } head = b->conn_backlog_head; tail = READ_ONCE(b->conn_backlog_tail); if (CIRC_CNT(head, tail, size) < max) { struct rxrpc_connection *conn; |
2baec2c3f
|
77 |
conn = rxrpc_prealloc_service_connection(rxnet, gfp); |
00e907127
|
78 79 80 81 82 |
if (!conn) return -ENOMEM; b->conn_backlog[head] = conn; smp_store_release(&b->conn_backlog_head, (head + 1) & (size - 1)); |
363deeab6
|
83 |
|
4c1295dcc
|
84 |
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service, |
363deeab6
|
85 |
atomic_read(&conn->usage), here); |
00e907127
|
86 87 88 89 90 |
} /* Now it gets complicated, because calls get registered with the * socket here, particularly if a user ID is preassigned by the user. */ |
a25e21f0b
|
91 |
call = rxrpc_alloc_call(rx, gfp, debug_id); |
00e907127
|
92 93 94 95 |
if (!call) return -ENOMEM; call->flags |= (1 << RXRPC_CALL_IS_SERVICE); call->state = RXRPC_CALL_SERVER_PREALLOC; |
48c9e0ec7
|
96 |
trace_rxrpc_call(call->debug_id, rxrpc_call_new_service, |
00e907127
|
97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
atomic_read(&call->usage), here, (const void *)user_call_ID); write_lock(&rx->call_lock); if (user_attach_call) { struct rxrpc_call *xcall; struct rb_node *parent, **pp; /* Check the user ID isn't already in use */ pp = &rx->calls.rb_node; parent = NULL; while (*pp) { parent = *pp; xcall = rb_entry(parent, struct rxrpc_call, sock_node); |
c01f6c9b3
|
111 |
if (user_call_ID < xcall->user_call_ID) |
00e907127
|
112 |
pp = &(*pp)->rb_left; |
c01f6c9b3
|
113 |
else if (user_call_ID > xcall->user_call_ID) |
00e907127
|
114 115 116 117 118 119 120 |
pp = &(*pp)->rb_right; else goto id_in_use; } call->user_call_ID = user_call_ID; call->notify_rx = notify_rx; |
cbd00891d
|
121 |
rxrpc_get_call(call, rxrpc_call_got_kernel); |
00e907127
|
122 123 124 125 126 127 |
user_attach_call(call, user_call_ID); rxrpc_get_call(call, rxrpc_call_got_userid); rb_link_node(&call->sock_node, parent, pp); rb_insert_color(&call->sock_node, &rx->calls); set_bit(RXRPC_CALL_HAS_USERID, &call->flags); } |
248f219cb
|
128 |
list_add(&call->sock_link, &rx->sock_calls); |
00e907127
|
129 |
write_unlock(&rx->call_lock); |
d3be4d244
|
130 |
rxnet = call->rxnet; |
2baec2c3f
|
131 132 133 |
write_lock(&rxnet->call_lock); list_add_tail(&call->link, &rxnet->calls); write_unlock(&rxnet->call_lock); |
00e907127
|
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
b->call_backlog[call_head] = call; smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1)); _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID); return 0; id_in_use: write_unlock(&rx->call_lock); rxrpc_cleanup_call(call); _leave(" = -EBADSLT"); return -EBADSLT; } /* * Preallocate sufficient service connections, calls and peers to cover the * entire backlog of a socket. When a new call comes in, if we don't have * sufficient of each available, the call gets rejected as busy or ignored. * * The backlog is replenished when a connection is accepted or rejected. */ int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) { struct rxrpc_backlog *b = rx->backlog; if (!b) { b = kzalloc(sizeof(struct rxrpc_backlog), gfp); if (!b) return -ENOMEM; rx->backlog = b; } if (rx->discard_new_call) return 0; |
a25e21f0b
|
167 168 |
while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp, atomic_inc_return(&rxrpc_debug_id)) == 0) |
00e907127
|
169 170 171 172 173 174 175 176 177 178 179 |
; return 0; } /* * Discard the preallocation on a service. */ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) { struct rxrpc_backlog *b = rx->backlog; |
2baec2c3f
|
180 |
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); |
00e907127
|
181 182 183 184 185 |
unsigned int size = RXRPC_BACKLOG_MAX, head, tail; if (!b) return; rx->backlog = NULL; |
248f219cb
|
186 187 188 189 190 |
/* Make sure that there aren't any incoming calls in progress before we * clear the preallocation buffers. */ spin_lock_bh(&rx->incoming_lock); spin_unlock_bh(&rx->incoming_lock); |
00e907127
|
191 192 193 194 195 196 197 198 199 200 201 202 |
head = b->peer_backlog_head; tail = b->peer_backlog_tail; while (CIRC_CNT(head, tail, size) > 0) { struct rxrpc_peer *peer = b->peer_backlog[tail]; kfree(peer); tail = (tail + 1) & (size - 1); } head = b->conn_backlog_head; tail = b->conn_backlog_tail; while (CIRC_CNT(head, tail, size) > 0) { struct rxrpc_connection *conn = b->conn_backlog[tail]; |
2baec2c3f
|
203 |
write_lock(&rxnet->conn_lock); |
00e907127
|
204 205 |
list_del(&conn->link); list_del(&conn->proc_link); |
2baec2c3f
|
206 |
write_unlock(&rxnet->conn_lock); |
00e907127
|
207 |
kfree(conn); |
31f5f9a16
|
208 |
if (atomic_dec_and_test(&rxnet->nr_conns)) |
5bb053bef
|
209 |
wake_up_var(&rxnet->nr_conns); |
00e907127
|
210 211 212 213 214 215 216 |
tail = (tail + 1) & (size - 1); } head = b->call_backlog_head; tail = b->call_backlog_tail; while (CIRC_CNT(head, tail, size) > 0) { struct rxrpc_call *call = b->call_backlog[tail]; |
88f2a8257
|
217 |
rcu_assign_pointer(call->socket, rx); |
00e907127
|
218 219 220 |
if (rx->discard_new_call) { _debug("discard %lx", call->user_call_ID); rx->discard_new_call(call, call->user_call_ID); |
3432a757b
|
221 |
rxrpc_put_call(call, rxrpc_call_put_kernel); |
00e907127
|
222 223 224 225 226 227 228 229 230 231 232 |
} rxrpc_call_completed(call); rxrpc_release_call(rx, call); rxrpc_put_call(call, rxrpc_call_put); tail = (tail + 1) & (size - 1); } kfree(b); } /* |
344b0d8ab
|
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 |
* Ping the other end to fill our RTT cache and to retrieve the rwind * and MTU parameters. */ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); ktime_t now = skb->tstamp; if (call->peer->rtt_usage < 3 || ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, true, true, rxrpc_propose_ack_ping_for_params); } /* |
248f219cb
|
249 250 |
* Allocate a new incoming call from the prealloc pool, along with a connection * and a peer as necessary. |
17926a793
|
251 |
*/ |
248f219cb
|
252 253 |
static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, struct rxrpc_local *local, |
0099dc589
|
254 |
struct rxrpc_peer *peer, |
248f219cb
|
255 |
struct rxrpc_connection *conn, |
ff8878461
|
256 257 |
const struct rxrpc_security *sec, struct key *key, |
248f219cb
|
258 |
struct sk_buff *skb) |
17926a793
|
259 |
{ |
248f219cb
|
260 |
struct rxrpc_backlog *b = rx->backlog; |
248f219cb
|
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 |
struct rxrpc_call *call; unsigned short call_head, conn_head, peer_head; unsigned short call_tail, conn_tail, peer_tail; unsigned short call_count, conn_count; /* #calls >= #conns >= #peers must hold true. */ call_head = smp_load_acquire(&b->call_backlog_head); call_tail = b->call_backlog_tail; call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX); conn_head = smp_load_acquire(&b->conn_backlog_head); conn_tail = b->conn_backlog_tail; conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX); ASSERTCMP(conn_count, >=, call_count); peer_head = smp_load_acquire(&b->peer_backlog_head); peer_tail = b->peer_backlog_tail; ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=, conn_count); if (call_count == 0) return NULL; if (!conn) { |
0099dc589
|
283 284 285 286 |
if (peer && !rxrpc_get_peer_maybe(peer)) peer = NULL; if (!peer) { peer = b->peer_backlog[peer_tail]; |
5a790b737
|
287 |
if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0) |
0099dc589
|
288 |
return NULL; |
248f219cb
|
289 290 291 292 |
b->peer_backlog[peer_tail] = NULL; smp_store_release(&b->peer_backlog_tail, (peer_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); |
0099dc589
|
293 |
|
5e33a23ba
|
294 |
rxrpc_new_incoming_peer(rx, local, peer); |
248f219cb
|
295 |
} |
17926a793
|
296 |
|
248f219cb
|
297 298 299 300 301 |
/* Now allocate and set up the connection */ conn = b->conn_backlog[conn_tail]; b->conn_backlog[conn_tail] = NULL; smp_store_release(&b->conn_backlog_tail, (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); |
09d2bf595
|
302 |
conn->params.local = rxrpc_get_local(local); |
248f219cb
|
303 |
conn->params.peer = peer; |
363deeab6
|
304 |
rxrpc_see_connection(conn); |
ff8878461
|
305 |
rxrpc_new_incoming_connection(rx, conn, sec, key, skb); |
248f219cb
|
306 307 |
} else { rxrpc_get_connection(conn); |
17926a793
|
308 |
} |
248f219cb
|
309 310 311 312 313 |
/* And now we can allocate and set up a new call */ call = b->call_backlog[call_tail]; b->call_backlog[call_tail] = NULL; smp_store_release(&b->call_backlog_tail, (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); |
cbd00891d
|
314 |
rxrpc_see_call(call); |
248f219cb
|
315 |
call->conn = conn; |
91fcfbe88
|
316 |
call->security = conn->security; |
248f219cb
|
317 |
call->peer = rxrpc_get_peer(conn->params.peer); |
f7aec129a
|
318 |
call->cong_cwnd = call->peer->cong_cwnd; |
248f219cb
|
319 |
return call; |
17926a793
|
320 321 322 |
} /* |
248f219cb
|
323 324 325 326 327 328 329 330 331 332 333 |
* Set up a new incoming call. Called in BH context with the RCU read lock * held. * * If this is for a kernel service, when we allocate the call, it will have * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the * retainer ref obtained from the backlog buffer. Prealloc calls for userspace * services only have the ref from the backlog buffer. We want to pass this * ref to non-BH context to dispose of. * * If we want to report an error, we mark the skb with the packet type and * abort code and return NULL. |
540b1c48c
|
334 335 |
* * The call is returned with the user access mutex held. |
17926a793
|
336 |
*/ |
248f219cb
|
337 |
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, |
0099dc589
|
338 |
struct rxrpc_sock *rx, |
248f219cb
|
339 |
struct sk_buff *skb) |
17926a793
|
340 |
{ |
248f219cb
|
341 |
struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
ff8878461
|
342 |
const struct rxrpc_security *sec = NULL; |
c1e15b494
|
343 |
struct rxrpc_connection *conn; |
d7b4c24f4
|
344 |
struct rxrpc_peer *peer = NULL; |
ff8878461
|
345 346 |
struct rxrpc_call *call = NULL; struct key *key = NULL; |
17926a793
|
347 348 |
_enter(""); |
248f219cb
|
349 |
spin_lock(&rx->incoming_lock); |
210f03531
|
350 351 |
if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || rx->sk.sk_state == RXRPC_CLOSE) { |
a25e21f0b
|
352 |
trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber, |
248f219cb
|
353 |
sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); |
ece64fec1
|
354 |
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; |
248f219cb
|
355 |
skb->priority = RX_INVALID_OPERATION; |
344b0d8ab
|
356 |
goto no_call; |
17926a793
|
357 |
} |
17926a793
|
358 |
|
c1e15b494
|
359 360 361 362 363 364 |
/* The peer, connection and call may all have sprung into existence due * to a duplicate packet being handled on another CPU in parallel, so * we have to recheck the routing. However, we're now holding * rx->incoming_lock, so the values should remain stable. */ conn = rxrpc_find_connection_rcu(local, skb, &peer); |
ff8878461
|
365 366 367 368 369 |
if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb)) goto no_call; call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb); key_put(key); |
248f219cb
|
370 |
if (!call) { |
ece64fec1
|
371 |
skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; |
344b0d8ab
|
372 |
goto no_call; |
248f219cb
|
373 |
} |
58dc63c99
|
374 375 376 |
trace_rxrpc_receive(call, rxrpc_receive_incoming, sp->hdr.serial, sp->hdr.seq); |
17926a793
|
377 |
|
248f219cb
|
378 379 380 |
/* Make the call live. */ rxrpc_incoming_call(rx, call, skb); conn = call->conn; |
17926a793
|
381 |
|
248f219cb
|
382 383 |
if (rx->notify_new_call) rx->notify_new_call(&rx->sk, call, call->user_call_ID); |
e6f3afb3f
|
384 385 |
else sk_acceptq_added(&rx->sk); |
17926a793
|
386 |
|
248f219cb
|
387 388 389 390 391 392 393 |
spin_lock(&conn->state_lock); switch (conn->state) { case RXRPC_CONN_SERVICE_UNSECURED: conn->state = RXRPC_CONN_SERVICE_CHALLENGING; set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); rxrpc_queue_conn(call->conn); break; |
17926a793
|
394 |
|
248f219cb
|
395 396 |
case RXRPC_CONN_SERVICE: write_lock(&call->state_lock); |
c1e15b494
|
397 398 399 400 401 402 |
if (call->state < RXRPC_CALL_COMPLETE) { if (rx->discard_new_call) call->state = RXRPC_CALL_SERVER_RECV_REQUEST; else call->state = RXRPC_CALL_SERVER_ACCEPTING; } |
248f219cb
|
403 404 |
write_unlock(&call->state_lock); break; |
17926a793
|
405 |
|
248f219cb
|
406 407 |
case RXRPC_CONN_REMOTELY_ABORTED: rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, |
647530924
|
408 |
conn->abort_code, conn->error); |
248f219cb
|
409 410 411 |
break; case RXRPC_CONN_LOCALLY_ABORTED: rxrpc_abort_call("CON", call, sp->hdr.seq, |
647530924
|
412 |
conn->abort_code, conn->error); |
248f219cb
|
413 |
break; |
17926a793
|
414 415 416 |
default: BUG(); } |
248f219cb
|
417 |
spin_unlock(&conn->state_lock); |
f928970f1
|
418 419 420 |
spin_unlock(&rx->incoming_lock); rxrpc_send_ping(call, skb); |
17926a793
|
421 |
|
248f219cb
|
422 423 |
if (call->state == RXRPC_CALL_SERVER_ACCEPTING) rxrpc_notify_socket(call); |
d991b4a32
|
424 |
|
3432a757b
|
425 426 427 428 429 430 |
/* We have to discard the prealloc queue's ref here and rely on a * combination of the RCU read lock and refs held either by the socket * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel * service to prevent the call from being deallocated too early. */ rxrpc_put_call(call, rxrpc_call_put); |
344b0d8ab
|
431 |
_leave(" = %p{%d}", call, call->debug_id); |
248f219cb
|
432 |
return call; |
344b0d8ab
|
433 434 435 436 437 |
no_call: spin_unlock(&rx->incoming_lock); _leave(" = NULL [%u]", skb->mark); return NULL; |
17926a793
|
438 439 440 441 442 |
} /* * handle acceptance of a call by userspace * - assign the user call ID to the call at the front of the queue |
540b1c48c
|
443 |
* - called with the socket locked. |
17926a793
|
444 |
*/ |
651350d10
|
445 |
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, |
d001648ec
|
446 447 |
unsigned long user_call_ID, rxrpc_notify_rx_t notify_rx) |
540b1c48c
|
448 |
__releases(&rx->sk.sk_lock.slock) |
88f2a8257
|
449 |
__acquires(call->user_mutex) |
17926a793
|
450 451 452 453 454 455 456 457 458 459 |
{ struct rxrpc_call *call; struct rb_node *parent, **pp; int ret; _enter(",%lx", user_call_ID); ASSERT(!irqs_disabled()); write_lock(&rx->call_lock); |
b25de3605
|
460 461 |
if (list_empty(&rx->to_be_accepted)) { write_unlock(&rx->call_lock); |
540b1c48c
|
462 |
release_sock(&rx->sk); |
b25de3605
|
463 464 465 |
kleave(" = -ENODATA [empty]"); return ERR_PTR(-ENODATA); } |
17926a793
|
466 467 |
/* check the user ID isn't already in use */ |
17926a793
|
468 469 470 471 472 473 474 475 476 477 478 |
pp = &rx->calls.rb_node; parent = NULL; while (*pp) { parent = *pp; call = rb_entry(parent, struct rxrpc_call, sock_node); if (user_call_ID < call->user_call_ID) pp = &(*pp)->rb_left; else if (user_call_ID > call->user_call_ID) pp = &(*pp)->rb_right; else |
248f219cb
|
479 |
goto id_in_use; |
17926a793
|
480 |
} |
248f219cb
|
481 482 483 484 485 |
/* Dequeue the first call and check it's still valid. We gain * responsibility for the queue's reference. */ call = list_entry(rx->to_be_accepted.next, struct rxrpc_call, accept_link); |
540b1c48c
|
486 487 488 489 490 491 492 493 494 495 496 497 498 499 |
write_unlock(&rx->call_lock); /* We need to gain the mutex from the interrupt handler without * upsetting lockdep, so we have to release it there and take it here. * We are, however, still holding the socket lock, so other accepts * must wait for us and no one can add the user ID behind our backs. */ if (mutex_lock_interruptible(&call->user_mutex) < 0) { release_sock(&rx->sk); kleave(" = -ERESTARTSYS"); return ERR_PTR(-ERESTARTSYS); } write_lock(&rx->call_lock); |
17926a793
|
500 501 |
list_del_init(&call->accept_link); sk_acceptq_removed(&rx->sk); |
e34d4234b
|
502 |
rxrpc_see_call(call); |
17926a793
|
503 |
|
540b1c48c
|
504 505 506 507 508 509 510 511 512 513 514 515 516 517 |
/* Find the user ID insertion point. */ pp = &rx->calls.rb_node; parent = NULL; while (*pp) { parent = *pp; call = rb_entry(parent, struct rxrpc_call, sock_node); if (user_call_ID < call->user_call_ID) pp = &(*pp)->rb_left; else if (user_call_ID > call->user_call_ID) pp = &(*pp)->rb_right; else BUG(); } |
17926a793
|
518 519 520 521 522 |
write_lock_bh(&call->state_lock); switch (call->state) { case RXRPC_CALL_SERVER_ACCEPTING: call->state = RXRPC_CALL_SERVER_RECV_REQUEST; break; |
f5c17aaeb
|
523 524 |
case RXRPC_CALL_COMPLETE: ret = call->error; |
17926a793
|
525 |
goto out_release; |
17926a793
|
526 527 528 529 530 |
default: BUG(); } /* formalise the acceptance */ |
d001648ec
|
531 |
call->notify_rx = notify_rx; |
17926a793
|
532 |
call->user_call_ID = user_call_ID; |
248f219cb
|
533 |
rxrpc_get_call(call, rxrpc_call_got_userid); |
17926a793
|
534 535 536 537 |
rb_link_node(&call->sock_node, parent, pp); rb_insert_color(&call->sock_node, &rx->calls); if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) BUG(); |
17926a793
|
538 539 540 |
write_unlock_bh(&call->state_lock); write_unlock(&rx->call_lock); |
248f219cb
|
541 542 |
rxrpc_notify_socket(call); rxrpc_service_prealloc(rx, GFP_KERNEL); |
540b1c48c
|
543 |
release_sock(&rx->sk); |
651350d10
|
544 545 |
_leave(" = %p{%d}", call, call->debug_id); return call; |
651350d10
|
546 |
out_release: |
248f219cb
|
547 |
_debug("release %p", call); |
651350d10
|
548 |
write_unlock_bh(&call->state_lock); |
8d94aa381
|
549 |
write_unlock(&rx->call_lock); |
8d94aa381
|
550 |
rxrpc_release_call(rx, call); |
248f219cb
|
551 552 553 554 555 |
rxrpc_put_call(call, rxrpc_call_put); goto out; id_in_use: ret = -EBADSLT; |
651350d10
|
556 |
write_unlock(&rx->call_lock); |
248f219cb
|
557 558 |
out: rxrpc_service_prealloc(rx, GFP_KERNEL); |
540b1c48c
|
559 |
release_sock(&rx->sk); |
651350d10
|
560 561 562 563 564 |
_leave(" = %d", ret); return ERR_PTR(ret); } /* |
b4f1342f9
|
565 |
* Handle rejection of a call by userspace |
651350d10
|
566 567 568 569 570 |
* - reject the call at the front of the queue */ int rxrpc_reject_call(struct rxrpc_sock *rx) { struct rxrpc_call *call; |
248f219cb
|
571 |
bool abort = false; |
651350d10
|
572 573 574 575 576 577 578 |
int ret; _enter(""); ASSERT(!irqs_disabled()); write_lock(&rx->call_lock); |
248f219cb
|
579 |
if (list_empty(&rx->to_be_accepted)) { |
8d94aa381
|
580 |
write_unlock(&rx->call_lock); |
8d94aa381
|
581 582 |
return -ENODATA; } |
651350d10
|
583 |
|
248f219cb
|
584 585 586 587 588 |
/* Dequeue the first call and check it's still valid. We gain * responsibility for the queue's reference. */ call = list_entry(rx->to_be_accepted.next, struct rxrpc_call, accept_link); |
651350d10
|
589 590 |
list_del_init(&call->accept_link); sk_acceptq_removed(&rx->sk); |
e34d4234b
|
591 |
rxrpc_see_call(call); |
651350d10
|
592 593 594 595 |
write_lock_bh(&call->state_lock); switch (call->state) { case RXRPC_CALL_SERVER_ACCEPTING: |
3a92789af
|
596 |
__rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED); |
248f219cb
|
597 598 |
abort = true; /* fall through */ |
f5c17aaeb
|
599 600 |
case RXRPC_CALL_COMPLETE: ret = call->error; |
248f219cb
|
601 |
goto out_discard; |
651350d10
|
602 603 604 |
default: BUG(); } |
17926a793
|
605 |
|
248f219cb
|
606 |
out_discard: |
17926a793
|
607 |
write_unlock_bh(&call->state_lock); |
17926a793
|
608 |
write_unlock(&rx->call_lock); |
248f219cb
|
609 |
if (abort) { |
26cb02aa6
|
610 |
rxrpc_send_abort_packet(call); |
248f219cb
|
611 612 613 614 |
rxrpc_release_call(rx, call); rxrpc_put_call(call, rxrpc_call_put); } rxrpc_service_prealloc(rx, GFP_KERNEL); |
651350d10
|
615 616 617 |
_leave(" = %d", ret); return ret; } |
00e907127
|
618 619 620 621 622 623 624 625 |
/* * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls * @sock: The socket on which to preallocate * @notify_rx: Event notification function for the call * @user_attach_call: Func to attach call to user_call_ID * @user_call_ID: The tag to attach to the preallocated call * @gfp: The allocation conditions. |
a25e21f0b
|
626 |
* @debug_id: The tracing debug ID. |
00e907127
|
627 628 629 630 631 632 633 634 635 636 |
* * Charge up the socket with preallocated calls, each with a user ID. A * function should be provided to effect the attachment from the user's side. * The user is given a ref to hold on the call. * * Note that the call may be come connected before this function returns. */ int rxrpc_kernel_charge_accept(struct socket *sock, rxrpc_notify_rx_t notify_rx, rxrpc_user_attach_call_t user_attach_call, |
a25e21f0b
|
637 638 |
unsigned long user_call_ID, gfp_t gfp, unsigned int debug_id) |
00e907127
|
639 640 641 642 643 644 645 646 647 |
{ struct rxrpc_sock *rx = rxrpc_sk(sock->sk); struct rxrpc_backlog *b = rx->backlog; if (sock->sk->sk_state == RXRPC_CLOSE) return -ESHUTDOWN; return rxrpc_service_prealloc_one(rx, b, notify_rx, user_attach_call, user_call_ID, |
a25e21f0b
|
648 |
gfp, debug_id); |
00e907127
|
649 650 |
} EXPORT_SYMBOL(rxrpc_kernel_charge_accept); |