Blame view
net/rxrpc/conn_client.c
28.8 KB
b4d0d230c treewide: Replace... |
1 |
// SPDX-License-Identifier: GPL-2.0-or-later |
4a3388c80 rxrpc: Use IDR to... |
2 3 |
/* Client connection-specific management code. * |
245500d85 rxrpc: Rewrite th... |
4 |
* Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved. |
4a3388c80 rxrpc: Use IDR to... |
5 6 |
* Written by David Howells (dhowells@redhat.com) * |
45025bcee rxrpc: Improve ma... |
7 8 9 10 |
* Client connections need to be cached for a little while after they've made a * call so as to handle retransmitted DATA packets in case the server didn't * receive the final ACK or terminating ABORT we sent it. * |
45025bcee rxrpc: Improve ma... |
11 12 |
* There are flags of relevance to the cache: * |
45025bcee rxrpc: Improve ma... |
13 14 15 16 17 18 19 20 21 |
* (2) DONT_REUSE - The connection should be discarded as soon as possible and * should not be reused. This is set when an exclusive connection is used * or a call ID counter overflows. * * The caching state may only be changed if the cache lock is held. * * There are two idle client connection expiry durations. If the total number * of connections is below the reap threshold, we use the normal duration; if * it's above, we use the fast duration. |
4a3388c80 rxrpc: Use IDR to... |
22 23 24 25 26 27 28 |
*/ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/idr.h> #include <linux/timer.h> |
174cd4b1e sched/headers: Pr... |
29 |
#include <linux/sched/signal.h> |
4a3388c80 rxrpc: Use IDR to... |
30 |
#include "ar-internal.h" |
45025bcee rxrpc: Improve ma... |
31 |
__read_mostly unsigned int rxrpc_reap_client_connections = 900; |
a158bdd32 rxrpc: Fix call t... |
32 33 |
__read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; |
45025bcee rxrpc: Improve ma... |
34 |
|
4a3388c80 rxrpc: Use IDR to... |
35 36 37 38 39 40 41 42 43 |
/* * We use machine-unique IDs for our client connections. */ DEFINE_IDR(rxrpc_client_conn_ids); static DEFINE_SPINLOCK(rxrpc_conn_id_lock); /* * Get a connection ID and epoch for a client connection from the global pool. * The connection struct pointer is then recorded in the idr radix tree. The |
090f85deb rxrpc: Don't chan... |
44 45 |
* epoch doesn't change until the client is rebooted (or, at least, unless the * module is unloaded). |
4a3388c80 rxrpc: Use IDR to... |
46 |
*/ |
c6d2b8d76 rxrpc: Split clie... |
47 48 |
static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, gfp_t gfp) |
4a3388c80 rxrpc: Use IDR to... |
49 |
{ |
2baec2c3f rxrpc: Support ne... |
50 |
struct rxrpc_net *rxnet = conn->params.local->rxnet; |
4a3388c80 rxrpc: Use IDR to... |
51 52 53 54 55 |
int id; _enter(""); idr_preload(gfp); |
4a3388c80 rxrpc: Use IDR to... |
56 |
spin_lock(&rxrpc_conn_id_lock); |
090f85deb rxrpc: Don't chan... |
57 58 59 60 |
id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn, 1, 0x40000000, GFP_NOWAIT); if (id < 0) goto error; |
4a3388c80 rxrpc: Use IDR to... |
61 62 |
spin_unlock(&rxrpc_conn_id_lock); |
4a3388c80 rxrpc: Use IDR to... |
63 |
idr_preload_end(); |
2baec2c3f rxrpc: Support ne... |
64 |
conn->proto.epoch = rxnet->epoch; |
4a3388c80 rxrpc: Use IDR to... |
65 66 |
conn->proto.cid = id << RXRPC_CIDSHIFT; set_bit(RXRPC_CONN_HAS_IDR, &conn->flags); |
090f85deb rxrpc: Don't chan... |
67 |
_leave(" [CID %x]", conn->proto.cid); |
4a3388c80 rxrpc: Use IDR to... |
68 69 70 71 |
return 0; error: spin_unlock(&rxrpc_conn_id_lock); |
4a3388c80 rxrpc: Use IDR to... |
72 73 74 75 76 77 78 79 |
idr_preload_end(); _leave(" = %d", id); return id; } /* * Release a connection ID for a client connection from the global pool. */ |
001c11224 rxrpc: Maintain a... |
80 |
static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn) |
4a3388c80 rxrpc: Use IDR to... |
81 82 83 84 85 86 87 88 |
{ if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) { spin_lock(&rxrpc_conn_id_lock); idr_remove(&rxrpc_client_conn_ids, conn->proto.cid >> RXRPC_CIDSHIFT); spin_unlock(&rxrpc_conn_id_lock); } } |
eb9b9d227 rxrpc: Check that... |
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
/* * Destroy the client connection ID tree. */ void rxrpc_destroy_client_conn_ids(void) { struct rxrpc_connection *conn; int id; if (!idr_is_empty(&rxrpc_client_conn_ids)) { idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) { pr_err("AF_RXRPC: Leaked client conn %p {%d} ", conn, atomic_read(&conn->usage)); } BUG(); } idr_destroy(&rxrpc_client_conn_ids); } |
c6d2b8d76 rxrpc: Split clie... |
109 110 |
/* |
245500d85 rxrpc: Rewrite th... |
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
* Allocate a connection bundle. */ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp, gfp_t gfp) { struct rxrpc_bundle *bundle; bundle = kzalloc(sizeof(*bundle), gfp); if (bundle) { bundle->params = *cp; rxrpc_get_peer(bundle->params.peer); atomic_set(&bundle->usage, 1); spin_lock_init(&bundle->channel_lock); INIT_LIST_HEAD(&bundle->waiting_calls); } return bundle; } struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle) { atomic_inc(&bundle->usage); return bundle; } void rxrpc_put_bundle(struct rxrpc_bundle *bundle) { unsigned int d = bundle->debug_id; unsigned int u = atomic_dec_return(&bundle->usage); _debug("PUT B=%x %u", d, u); if (u == 0) { rxrpc_put_peer(bundle->params.peer); kfree(bundle); } } /* |
45025bcee rxrpc: Improve ma... |
148 |
* Allocate a client connection. |
c6d2b8d76 rxrpc: Split clie... |
149 150 |
*/ static struct rxrpc_connection * |
245500d85 rxrpc: Rewrite th... |
151 |
rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp) |
c6d2b8d76 rxrpc: Split clie... |
152 153 |
{ struct rxrpc_connection *conn; |
245500d85 rxrpc: Rewrite th... |
154 |
struct rxrpc_net *rxnet = bundle->params.local->rxnet; |
c6d2b8d76 rxrpc: Split clie... |
155 156 157 158 159 160 161 162 163 |
int ret; _enter(""); conn = rxrpc_alloc_connection(gfp); if (!conn) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } |
45025bcee rxrpc: Improve ma... |
164 |
atomic_set(&conn->usage, 1); |
245500d85 rxrpc: Rewrite th... |
165 166 |
conn->bundle = bundle; conn->params = bundle->params; |
c6d2b8d76 rxrpc: Split clie... |
167 168 |
conn->out_clientflag = RXRPC_CLIENT_INITIATED; conn->state = RXRPC_CONN_CLIENT; |
245500d85 rxrpc: Rewrite th... |
169 |
conn->service_id = conn->params.service_id; |
c6d2b8d76 rxrpc: Split clie... |
170 |
|
c6d2b8d76 rxrpc: Split clie... |
171 172 173 174 175 176 177 178 179 180 181 |
ret = rxrpc_get_client_connection_id(conn, gfp); if (ret < 0) goto error_0; ret = rxrpc_init_client_conn_security(conn); if (ret < 0) goto error_1; ret = conn->security->prime_packet_security(conn); if (ret < 0) goto error_2; |
31f5f9a16 rxrpc: Fix appare... |
182 |
atomic_inc(&rxnet->nr_conns); |
2baec2c3f rxrpc: Support ne... |
183 184 185 |
write_lock(&rxnet->conn_lock); list_add_tail(&conn->proc_link, &rxnet->conn_proc_list); write_unlock(&rxnet->conn_lock); |
c6d2b8d76 rxrpc: Split clie... |
186 |
|
245500d85 rxrpc: Rewrite th... |
187 188 |
rxrpc_get_bundle(bundle); rxrpc_get_peer(conn->params.peer); |
c6d2b8d76 rxrpc: Split clie... |
189 190 |
rxrpc_get_local(conn->params.local); key_get(conn->params.key); |
4c1295dcc rxrpc: Fix trace-... |
191 192 |
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client, atomic_read(&conn->usage), |
363deeab6 rxrpc: Add connec... |
193 |
__builtin_return_address(0)); |
245500d85 rxrpc: Rewrite th... |
194 195 |
atomic_inc(&rxnet->nr_client_conns); |
363deeab6 rxrpc: Add connec... |
196 |
trace_rxrpc_client(conn, -1, rxrpc_client_alloc); |
c6d2b8d76 rxrpc: Split clie... |
197 198 199 200 201 202 203 204 205 206 207 208 209 210 |
_leave(" = %p", conn); return conn; error_2: conn->security->clear(conn); error_1: rxrpc_put_client_connection_id(conn); error_0: kfree(conn); _leave(" = %d", ret); return ERR_PTR(ret); } /* |
45025bcee rxrpc: Improve ma... |
211 |
* Determine if a connection may be reused. |
c6d2b8d76 rxrpc: Split clie... |
212 |
*/ |
45025bcee rxrpc: Improve ma... |
213 214 |
static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) { |
245500d85 rxrpc: Rewrite th... |
215 |
struct rxrpc_net *rxnet; |
45025bcee rxrpc: Improve ma... |
216 |
int id_cursor, id, distance, limit; |
245500d85 rxrpc: Rewrite th... |
217 218 219 220 |
if (!conn) goto dont_reuse; rxnet = conn->params.local->rxnet; |
45025bcee rxrpc: Improve ma... |
221 222 |
if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags)) goto dont_reuse; |
245500d85 rxrpc: Rewrite th... |
223 224 |
if (conn->state != RXRPC_CONN_CLIENT || conn->proto.epoch != rxnet->epoch) |
45025bcee rxrpc: Improve ma... |
225 226 227 228 229 230 231 232 |
goto mark_dont_reuse; /* The IDR tree gets very expensive on memory if the connection IDs are * widely scattered throughout the number space, so we shall want to * kill off connections that, say, have an ID more than about four * times the maximum number of client conns away from the current * allocation point to try and keep the IDs concentrated. */ |
444306129 rxrpc: abstract a... |
233 |
id_cursor = idr_get_cursor(&rxrpc_client_conn_ids); |
45025bcee rxrpc: Improve ma... |
234 235 236 237 |
id = conn->proto.cid >> RXRPC_CIDSHIFT; distance = id - id_cursor; if (distance < 0) distance = -distance; |
245500d85 rxrpc: Rewrite th... |
238 |
limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024); |
45025bcee rxrpc: Improve ma... |
239 240 241 242 243 244 245 246 247 248 249 250 |
if (distance > limit) goto mark_dont_reuse; return true; mark_dont_reuse: set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); dont_reuse: return false; } /* |
245500d85 rxrpc: Rewrite th... |
251 252 |
* Look up the conn bundle that matches the connection parameters, adding it if * it doesn't yet exist. |
45025bcee rxrpc: Improve ma... |
253 |
*/ |
245500d85 rxrpc: Rewrite th... |
254 255 |
static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *cp, gfp_t gfp) |
c6d2b8d76 rxrpc: Split clie... |
256 |
{ |
245500d85 rxrpc: Rewrite th... |
257 258 |
static atomic_t rxrpc_bundle_id; struct rxrpc_bundle *bundle, *candidate; |
c6d2b8d76 rxrpc: Split clie... |
259 260 261 |
struct rxrpc_local *local = cp->local; struct rb_node *p, **pp, *parent; long diff; |
c6d2b8d76 rxrpc: Split clie... |
262 |
|
245500d85 rxrpc: Rewrite th... |
263 264 |
_enter("{%px,%x,%u,%u}", cp->peer, key_serial(cp->key), cp->security_level, cp->upgrade); |
c6d2b8d76 rxrpc: Split clie... |
265 |
|
245500d85 rxrpc: Rewrite th... |
266 267 |
if (cp->exclusive) return rxrpc_alloc_bundle(cp, gfp); |
c6d2b8d76 rxrpc: Split clie... |
268 |
|
245500d85 rxrpc: Rewrite th... |
269 270 271 272 273 274 |
/* First, see if the bundle is already there. */ _debug("search 1"); spin_lock(&local->client_bundles_lock); p = local->client_bundles.rb_node; while (p) { bundle = rb_entry(p, struct rxrpc_bundle, local_node); |
f7aec129a rxrpc: Cache the ... |
275 |
|
245500d85 rxrpc: Rewrite th... |
276 277 278 279 280 |
#define cmp(X) ((long)bundle->params.X - (long)cp->X) diff = (cmp(peer) ?: cmp(key) ?: cmp(security_level) ?: cmp(upgrade)); |
45025bcee rxrpc: Improve ma... |
281 |
#undef cmp |
245500d85 rxrpc: Rewrite th... |
282 283 284 285 286 287 |
if (diff < 0) p = p->rb_left; else if (diff > 0) p = p->rb_right; else goto found_bundle; |
c6d2b8d76 rxrpc: Split clie... |
288 |
} |
245500d85 rxrpc: Rewrite th... |
289 290 |
spin_unlock(&local->client_bundles_lock); _debug("not found"); |
c6d2b8d76 rxrpc: Split clie... |
291 |
|
245500d85 rxrpc: Rewrite th... |
292 293 294 295 |
/* It wasn't. We need to add one. */ candidate = rxrpc_alloc_bundle(cp, gfp); if (!candidate) return NULL; |
c6d2b8d76 rxrpc: Split clie... |
296 |
|
c6d2b8d76 rxrpc: Split clie... |
297 |
_debug("search 2"); |
245500d85 rxrpc: Rewrite th... |
298 299 |
spin_lock(&local->client_bundles_lock); pp = &local->client_bundles.rb_node; |
c6d2b8d76 rxrpc: Split clie... |
300 301 302 |
parent = NULL; while (*pp) { parent = *pp; |
245500d85 rxrpc: Rewrite th... |
303 |
bundle = rb_entry(parent, struct rxrpc_bundle, local_node); |
c6d2b8d76 rxrpc: Split clie... |
304 |
|
245500d85 rxrpc: Rewrite th... |
305 |
#define cmp(X) ((long)bundle->params.X - (long)cp->X) |
c6d2b8d76 rxrpc: Split clie... |
306 307 |
diff = (cmp(peer) ?: cmp(key) ?: |
4e255721d rxrpc: Add servic... |
308 309 |
cmp(security_level) ?: cmp(upgrade)); |
45025bcee rxrpc: Improve ma... |
310 |
#undef cmp |
245500d85 rxrpc: Rewrite th... |
311 |
if (diff < 0) |
c6d2b8d76 rxrpc: Split clie... |
312 |
pp = &(*pp)->rb_left; |
245500d85 rxrpc: Rewrite th... |
313 |
else if (diff > 0) |
c6d2b8d76 rxrpc: Split clie... |
314 |
pp = &(*pp)->rb_right; |
245500d85 rxrpc: Rewrite th... |
315 316 |
else goto found_bundle_free; |
c6d2b8d76 rxrpc: Split clie... |
317 |
} |
245500d85 rxrpc: Rewrite th... |
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 |
_debug("new bundle"); candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id); rb_link_node(&candidate->local_node, parent, pp); rb_insert_color(&candidate->local_node, &local->client_bundles); rxrpc_get_bundle(candidate); spin_unlock(&local->client_bundles_lock); _leave(" = %u [new]", candidate->debug_id); return candidate; found_bundle_free: kfree(candidate); found_bundle: rxrpc_get_bundle(bundle); spin_unlock(&local->client_bundles_lock); _leave(" = %u [found]", bundle->debug_id); return bundle; } |
c6d2b8d76 rxrpc: Split clie... |
335 |
|
245500d85 rxrpc: Rewrite th... |
336 337 338 339 340 341 342 343 344 345 346 347 348 |
/* * Create or find a client bundle to use for a call. * * If we return with a connection, the call will be on its waiting list. It's * left to the caller to assign a channel and wake up the call. */ static struct rxrpc_bundle *rxrpc_prep_call(struct rxrpc_sock *rx, struct rxrpc_call *call, struct rxrpc_conn_parameters *cp, struct sockaddr_rxrpc *srx, gfp_t gfp) { struct rxrpc_bundle *bundle; |
c6d2b8d76 rxrpc: Split clie... |
349 |
|
245500d85 rxrpc: Rewrite th... |
350 |
_enter("{%d,%lx},", call->debug_id, call->user_call_ID); |
c6d2b8d76 rxrpc: Split clie... |
351 |
|
245500d85 rxrpc: Rewrite th... |
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 |
cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp); if (!cp->peer) goto error; call->cong_cwnd = cp->peer->cong_cwnd; if (call->cong_cwnd >= call->cong_ssthresh) call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; else call->cong_mode = RXRPC_CALL_SLOW_START; if (cp->upgrade) __set_bit(RXRPC_CALL_UPGRADE, &call->flags); /* Find the client connection bundle. */ bundle = rxrpc_look_up_bundle(cp, gfp); if (!bundle) goto error; /* Get this call queued. Someone else may activate it whilst we're * lining up a new connection, but that's fine. */ spin_lock(&bundle->channel_lock); list_add_tail(&call->chan_wait_link, &bundle->waiting_calls); spin_unlock(&bundle->channel_lock); _leave(" = [B=%x]", bundle->debug_id); return bundle; |
45025bcee rxrpc: Improve ma... |
378 |
|
45025bcee rxrpc: Improve ma... |
379 |
error: |
245500d85 rxrpc: Rewrite th... |
380 381 |
_leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); |
45025bcee rxrpc: Improve ma... |
382 |
} |
c6d2b8d76 rxrpc: Split clie... |
383 |
|
45025bcee rxrpc: Improve ma... |
384 |
/* |
245500d85 rxrpc: Rewrite th... |
385 |
* Allocate a new connection and add it into a bundle. |
45025bcee rxrpc: Improve ma... |
386 |
*/ |
245500d85 rxrpc: Rewrite th... |
387 388 |
static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp) __releases(bundle->channel_lock) |
45025bcee rxrpc: Improve ma... |
389 |
{ |
245500d85 rxrpc: Rewrite th... |
390 391 392 |
struct rxrpc_connection *candidate = NULL, *old = NULL; bool conflict; int i; |
45025bcee rxrpc: Improve ma... |
393 |
|
245500d85 rxrpc: Rewrite th... |
394 |
_enter(""); |
45025bcee rxrpc: Improve ma... |
395 |
|
245500d85 rxrpc: Rewrite th... |
396 397 398 399 400 401 402 403 |
conflict = bundle->alloc_conn; if (!conflict) bundle->alloc_conn = true; spin_unlock(&bundle->channel_lock); if (conflict) { _leave(" [conf]"); return; } |
45025bcee rxrpc: Improve ma... |
404 |
|
245500d85 rxrpc: Rewrite th... |
405 |
candidate = rxrpc_alloc_client_connection(bundle, gfp); |
45025bcee rxrpc: Improve ma... |
406 |
|
245500d85 rxrpc: Rewrite th... |
407 408 |
spin_lock(&bundle->channel_lock); bundle->alloc_conn = false; |
45025bcee rxrpc: Improve ma... |
409 |
|
245500d85 rxrpc: Rewrite th... |
410 411 412 413 414 |
if (IS_ERR(candidate)) { bundle->alloc_error = PTR_ERR(candidate); spin_unlock(&bundle->channel_lock); _leave(" [err %ld]", PTR_ERR(candidate)); return; |
363deeab6 rxrpc: Add connec... |
415 |
} |
45025bcee rxrpc: Improve ma... |
416 |
|
245500d85 rxrpc: Rewrite th... |
417 418 419 420 421 422 423 424 425 426 |
bundle->alloc_error = 0; for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) { unsigned int shift = i * RXRPC_MAXCALLS; int j; old = bundle->conns[i]; if (!rxrpc_may_reuse_conn(old)) { if (old) trace_rxrpc_client(old, -1, rxrpc_client_replace); |
245500d85 rxrpc: Rewrite th... |
427 428 429 430 431 432 433 |
candidate->bundle_shift = shift; bundle->conns[i] = candidate; for (j = 0; j < RXRPC_MAXCALLS; j++) set_bit(shift + j, &bundle->avail_chans); candidate = NULL; break; } |
45025bcee rxrpc: Improve ma... |
434 |
|
245500d85 rxrpc: Rewrite th... |
435 |
old = NULL; |
001c11224 rxrpc: Maintain a... |
436 |
} |
245500d85 rxrpc: Rewrite th... |
437 |
spin_unlock(&bundle->channel_lock); |
c6d2b8d76 rxrpc: Split clie... |
438 |
|
245500d85 rxrpc: Rewrite th... |
439 440 441 442 443 444 445 446 |
if (candidate) { _debug("discard C=%x", candidate->debug_id); trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate); rxrpc_put_connection(candidate); } rxrpc_put_connection(old); _leave(""); |
45025bcee rxrpc: Improve ma... |
447 448 449 |
} /* |
245500d85 rxrpc: Rewrite th... |
450 451 |
* Add a connection to a bundle if there are no usable connections or we have * connections waiting for extra capacity. |
45025bcee rxrpc: Improve ma... |
452 |
*/ |
245500d85 rxrpc: Rewrite th... |
453 |
static void rxrpc_maybe_add_conn(struct rxrpc_bundle *bundle, gfp_t gfp) |
45025bcee rxrpc: Improve ma... |
454 |
{ |
245500d85 rxrpc: Rewrite th... |
455 456 |
struct rxrpc_call *call; int i, usable; |
45025bcee rxrpc: Improve ma... |
457 |
|
245500d85 rxrpc: Rewrite th... |
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 |
_enter(""); spin_lock(&bundle->channel_lock); /* See if there are any usable connections. */ usable = 0; for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) if (rxrpc_may_reuse_conn(bundle->conns[i])) usable++; if (!usable && !list_empty(&bundle->waiting_calls)) { call = list_first_entry(&bundle->waiting_calls, struct rxrpc_call, chan_wait_link); if (test_bit(RXRPC_CALL_UPGRADE, &call->flags)) bundle->try_upgrade = true; } if (!usable) goto alloc_conn; |
288827d53 rxrpc: Allow mult... |
477 478 479 480 481 |
if (!bundle->avail_chans && !bundle->try_upgrade && !list_empty(&bundle->waiting_calls) && usable < ARRAY_SIZE(bundle->conns)) goto alloc_conn; |
245500d85 rxrpc: Rewrite th... |
482 483 484 485 486 487 |
spin_unlock(&bundle->channel_lock); _leave(""); return; alloc_conn: return rxrpc_add_conn_to_bundle(bundle, gfp); |
45025bcee rxrpc: Improve ma... |
488 489 490 491 492 493 494 495 496 497 498 |
} /* * Assign a channel to the call at the front of the queue and wake the call up. * We don't increment the callNumber counter until this number has been exposed * to the world. */ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, unsigned int channel) { struct rxrpc_channel *chan = &conn->channels[channel]; |
245500d85 rxrpc: Rewrite th... |
499 500 |
struct rxrpc_bundle *bundle = conn->bundle; struct rxrpc_call *call = list_entry(bundle->waiting_calls.next, |
45025bcee rxrpc: Improve ma... |
501 502 |
struct rxrpc_call, chan_wait_link); u32 call_id = chan->call_counter + 1; |
245500d85 rxrpc: Rewrite th... |
503 |
_enter("C=%x,%u", conn->debug_id, channel); |
363deeab6 rxrpc: Add connec... |
504 |
trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate); |
3136ef49a rxrpc: Delay term... |
505 506 507 508 |
/* Cancel the final ACK on the previous call if it hasn't been sent yet * as the DATA packet will implicitly ACK it. */ clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); |
245500d85 rxrpc: Rewrite th... |
509 |
clear_bit(conn->bundle_shift + channel, &bundle->avail_chans); |
af338a9ea rxrpc: The client... |
510 |
|
e34d4234b rxrpc: Trace rxrp... |
511 |
rxrpc_see_call(call); |
45025bcee rxrpc: Improve ma... |
512 |
list_del_init(&call->chan_wait_link); |
45025bcee rxrpc: Improve ma... |
513 |
call->peer = rxrpc_get_peer(conn->params.peer); |
245500d85 rxrpc: Rewrite th... |
514 |
call->conn = rxrpc_get_connection(conn); |
45025bcee rxrpc: Improve ma... |
515 516 |
call->cid = conn->proto.cid | channel; call->call_id = call_id; |
245500d85 rxrpc: Rewrite th... |
517 518 519 |
call->security = conn->security; call->security_ix = conn->security_ix; call->service_id = conn->service_id; |
45025bcee rxrpc: Improve ma... |
520 |
|
89ca69480 rxrpc: Trace clie... |
521 |
trace_rxrpc_connect_call(call); |
45025bcee rxrpc: Improve ma... |
522 523 |
_net("CONNECT call %08x:%08x as call %d on conn %d", call->cid, call->call_id, call->debug_id, conn->debug_id); |
245500d85 rxrpc: Rewrite th... |
524 525 526 527 528 529 530 |
write_lock_bh(&call->state_lock); call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; write_unlock_bh(&call->state_lock); /* Paired with the read barrier in rxrpc_connect_call(). This orders * cid and epoch in the connection wrt to call_id without the need to * take the channel_lock. |
45025bcee rxrpc: Improve ma... |
531 532 533 534 535 536 537 538 |
* * We provisionally assign a callNumber at this point, but we don't * confirm it until the call is about to be exposed. * * TODO: Pair with a barrier in the data_ready handler when that looks * at the call ID through a connection channel. */ smp_wmb(); |
245500d85 rxrpc: Rewrite th... |
539 540 541 |
chan->call_id = call_id; chan->call_debug_id = call->debug_id; |
45025bcee rxrpc: Improve ma... |
542 543 544 545 546 |
rcu_assign_pointer(chan->call, call); wake_up(&call->waitq); } /* |
245500d85 rxrpc: Rewrite th... |
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 |
* Remove a connection from the idle list if it's on it. */ static void rxrpc_unidle_conn(struct rxrpc_bundle *bundle, struct rxrpc_connection *conn) { struct rxrpc_net *rxnet = bundle->params.local->rxnet; bool drop_ref; if (!list_empty(&conn->cache_link)) { drop_ref = false; spin_lock(&rxnet->client_conn_cache_lock); if (!list_empty(&conn->cache_link)) { list_del_init(&conn->cache_link); drop_ref = true; } spin_unlock(&rxnet->client_conn_cache_lock); if (drop_ref) rxrpc_put_connection(conn); } } /* |
2629c7fa7 rxrpc: When activ... |
568 569 570 |
* Assign channels and callNumbers to waiting calls with channel_lock * held by caller. */ |
245500d85 rxrpc: Rewrite th... |
571 |
static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle) |
2629c7fa7 rxrpc: When activ... |
572 |
{ |
245500d85 rxrpc: Rewrite th... |
573 574 575 |
struct rxrpc_connection *conn; unsigned long avail, mask; unsigned int channel, slot; |
2629c7fa7 rxrpc: When activ... |
576 |
|
245500d85 rxrpc: Rewrite th... |
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 |
if (bundle->try_upgrade) mask = 1; else mask = ULONG_MAX; while (!list_empty(&bundle->waiting_calls)) { avail = bundle->avail_chans & mask; if (!avail) break; channel = __ffs(avail); clear_bit(channel, &bundle->avail_chans); slot = channel / RXRPC_MAXCALLS; conn = bundle->conns[slot]; if (!conn) break; if (bundle->try_upgrade) set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); rxrpc_unidle_conn(bundle, conn); channel &= (RXRPC_MAXCALLS - 1); conn->act_chans |= 1 << channel; rxrpc_activate_one_channel(conn, channel); } |
2629c7fa7 rxrpc: When activ... |
602 603 604 |
} /* |
45025bcee rxrpc: Improve ma... |
605 606 |
* Assign channels and callNumbers to waiting calls. */ |
245500d85 rxrpc: Rewrite th... |
607 |
static void rxrpc_activate_channels(struct rxrpc_bundle *bundle) |
45025bcee rxrpc: Improve ma... |
608 |
{ |
245500d85 rxrpc: Rewrite th... |
609 |
_enter("B=%x", bundle->debug_id); |
45025bcee rxrpc: Improve ma... |
610 |
|
245500d85 rxrpc: Rewrite th... |
611 |
trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans); |
363deeab6 rxrpc: Add connec... |
612 |
|
245500d85 rxrpc: Rewrite th... |
613 |
if (!bundle->avail_chans) |
45025bcee rxrpc: Improve ma... |
614 |
return; |
245500d85 rxrpc: Rewrite th... |
615 616 617 |
spin_lock(&bundle->channel_lock); rxrpc_activate_channels_locked(bundle); spin_unlock(&bundle->channel_lock); |
45025bcee rxrpc: Improve ma... |
618 619 620 621 622 623 |
_leave(""); } /* * Wait for a callNumber and a channel to be granted to a call. */ |
245500d85 rxrpc: Rewrite th... |
624 625 |
static int rxrpc_wait_for_channel(struct rxrpc_bundle *bundle, struct rxrpc_call *call, gfp_t gfp) |
45025bcee rxrpc: Improve ma... |
626 |
{ |
245500d85 rxrpc: Rewrite th... |
627 |
DECLARE_WAITQUEUE(myself, current); |
45025bcee rxrpc: Improve ma... |
628 629 630 |
int ret = 0; _enter("%d", call->debug_id); |
245500d85 rxrpc: Rewrite th... |
631 632 633 634 635 636 |
if (!gfpflags_allow_blocking(gfp)) { rxrpc_maybe_add_conn(bundle, gfp); rxrpc_activate_channels(bundle); ret = bundle->alloc_error ?: -EAGAIN; goto out; } |
c6d2b8d76 rxrpc: Split clie... |
637 |
|
245500d85 rxrpc: Rewrite th... |
638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 |
add_wait_queue_exclusive(&call->waitq, &myself); for (;;) { rxrpc_maybe_add_conn(bundle, gfp); rxrpc_activate_channels(bundle); ret = bundle->alloc_error; if (ret < 0) break; switch (call->interruptibility) { case RXRPC_INTERRUPTIBLE: case RXRPC_PREINTERRUPTIBLE: set_current_state(TASK_INTERRUPTIBLE); break; case RXRPC_UNINTERRUPTIBLE: default: set_current_state(TASK_UNINTERRUPTIBLE); break; |
c6d2b8d76 rxrpc: Split clie... |
655 |
} |
245500d85 rxrpc: Rewrite th... |
656 657 658 659 660 661 662 |
if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_AWAIT_CONN) break; if ((call->interruptibility == RXRPC_INTERRUPTIBLE || call->interruptibility == RXRPC_PREINTERRUPTIBLE) && signal_pending(current)) { ret = -ERESTARTSYS; break; |
c6d2b8d76 rxrpc: Split clie... |
663 |
} |
245500d85 rxrpc: Rewrite th... |
664 |
schedule(); |
c6d2b8d76 rxrpc: Split clie... |
665 |
} |
245500d85 rxrpc: Rewrite th... |
666 667 |
remove_wait_queue(&call->waitq, &myself); __set_current_state(TASK_RUNNING); |
45025bcee rxrpc: Improve ma... |
668 669 670 671 672 673 674 675 676 677 |
out: _leave(" = %d", ret); return ret; } /* * find a connection for a call * - called in process context with IRQs enabled */ |
5e33a23ba rxrpc: Fix some m... |
678 679 |
int rxrpc_connect_call(struct rxrpc_sock *rx, struct rxrpc_call *call, |
45025bcee rxrpc: Improve ma... |
680 681 682 683 |
struct rxrpc_conn_parameters *cp, struct sockaddr_rxrpc *srx, gfp_t gfp) { |
245500d85 rxrpc: Rewrite th... |
684 |
struct rxrpc_bundle *bundle; |
2baec2c3f rxrpc: Support ne... |
685 |
struct rxrpc_net *rxnet = cp->local->rxnet; |
245500d85 rxrpc: Rewrite th... |
686 |
int ret = 0; |
45025bcee rxrpc: Improve ma... |
687 688 |
_enter("{%d,%lx},", call->debug_id, call->user_call_ID); |
3d18cbb7f rxrpc: Fix conn e... |
689 |
rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); |
45025bcee rxrpc: Improve ma... |
690 |
|
245500d85 rxrpc: Rewrite th... |
691 692 693 |
bundle = rxrpc_prep_call(rx, call, cp, srx, gfp); if (IS_ERR(bundle)) { ret = PTR_ERR(bundle); |
c038a58cc rxrpc: Allow fail... |
694 |
goto out; |
245500d85 rxrpc: Rewrite th... |
695 |
} |
45025bcee rxrpc: Improve ma... |
696 |
|
245500d85 rxrpc: Rewrite th... |
697 698 699 700 |
if (call->state == RXRPC_CALL_CLIENT_AWAIT_CONN) { ret = rxrpc_wait_for_channel(bundle, call, gfp); if (ret < 0) goto wait_failed; |
c038a58cc rxrpc: Allow fail... |
701 |
} |
245500d85 rxrpc: Rewrite th... |
702 703 704 |
granted_channel: /* Paired with the write barrier in rxrpc_activate_one_channel(). */ smp_rmb(); |
45025bcee rxrpc: Improve ma... |
705 |
|
456b2f2dc rxrpc: Fix an err... |
706 |
out_put_bundle: |
245500d85 rxrpc: Rewrite th... |
707 |
rxrpc_put_bundle(bundle); |
456b2f2dc rxrpc: Fix an err... |
708 |
out: |
45025bcee rxrpc: Improve ma... |
709 710 |
_leave(" = %d", ret); return ret; |
45025bcee rxrpc: Improve ma... |
711 |
|
245500d85 rxrpc: Rewrite th... |
712 713 714 715 716 717 718 719 |
wait_failed: spin_lock(&bundle->channel_lock); list_del_init(&call->chan_wait_link); spin_unlock(&bundle->channel_lock); if (call->state != RXRPC_CALL_CLIENT_AWAIT_CONN) { ret = 0; goto granted_channel; |
363deeab6 rxrpc: Add connec... |
720 |
} |
245500d85 rxrpc: Rewrite th... |
721 722 723 724 |
trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed); rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret); rxrpc_disconnect_client_call(bundle, call); |
456b2f2dc rxrpc: Fix an err... |
725 |
goto out_put_bundle; |
45025bcee rxrpc: Improve ma... |
726 727 728 729 730 731 732 733 |
} /* * Note that a call, and thus a connection, is about to be exposed to the * world. */ void rxrpc_expose_client_call(struct rxrpc_call *call) { |
363deeab6 rxrpc: Add connec... |
734 |
unsigned int channel = call->cid & RXRPC_CHANNELMASK; |
45025bcee rxrpc: Improve ma... |
735 |
struct rxrpc_connection *conn = call->conn; |
363deeab6 rxrpc: Add connec... |
736 |
struct rxrpc_channel *chan = &conn->channels[channel]; |
45025bcee rxrpc: Improve ma... |
737 738 739 740 741 742 743 744 745 746 |
if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) { /* Mark the call ID as being used. If the callNumber counter * exceeds ~2 billion, we kill the connection after its * outstanding calls have finished so that the counter doesn't * wrap. */ chan->call_counter++; if (chan->call_counter >= INT_MAX) set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); |
245500d85 rxrpc: Rewrite th... |
747 |
trace_rxrpc_client(conn, channel, rxrpc_client_exposed); |
45025bcee rxrpc: Improve ma... |
748 749 750 751 |
} } /* |
3d18cbb7f rxrpc: Fix conn e... |
752 753 754 755 |
* Set the reap timer. */ static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet) { |
245500d85 rxrpc: Rewrite th... |
756 757 758 |
if (!rxnet->kill_all_client_conns) { unsigned long now = jiffies; unsigned long reap_at = now + rxrpc_conn_idle_client_expiry; |
3d18cbb7f rxrpc: Fix conn e... |
759 |
|
245500d85 rxrpc: Rewrite th... |
760 761 762 |
if (rxnet->live) timer_reduce(&rxnet->client_conn_reap_timer, reap_at); } |
3d18cbb7f rxrpc: Fix conn e... |
763 764 765 |
} /* |
45025bcee rxrpc: Improve ma... |
766 767 |
* Disconnect a client call. */ |
245500d85 rxrpc: Rewrite th... |
768 |
void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call *call) |
45025bcee rxrpc: Improve ma... |
769 |
{ |
245500d85 rxrpc: Rewrite th... |
770 |
struct rxrpc_connection *conn; |
930c9f912 rxrpc: Fix client... |
771 |
struct rxrpc_channel *chan = NULL; |
245500d85 rxrpc: Rewrite th... |
772 773 774 |
struct rxrpc_net *rxnet = bundle->params.local->rxnet; unsigned int channel; bool may_reuse; |
930c9f912 rxrpc: Fix client... |
775 |
u32 cid; |
45025bcee rxrpc: Improve ma... |
776 |
|
245500d85 rxrpc: Rewrite th... |
777 |
_enter("c=%x", call->debug_id); |
930c9f912 rxrpc: Fix client... |
778 |
|
245500d85 rxrpc: Rewrite th... |
779 780 |
spin_lock(&bundle->channel_lock); set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); |
45025bcee rxrpc: Improve ma... |
781 |
|
45025bcee rxrpc: Improve ma... |
782 |
/* Calls that have never actually been assigned a channel can simply be |
245500d85 rxrpc: Rewrite th... |
783 |
* discarded. |
45025bcee rxrpc: Improve ma... |
784 |
*/ |
245500d85 rxrpc: Rewrite th... |
785 786 |
conn = call->conn; if (!conn) { |
45025bcee rxrpc: Improve ma... |
787 788 789 790 |
_debug("call is waiting"); ASSERTCMP(call->call_id, ==, 0); ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags)); list_del_init(&call->chan_wait_link); |
45025bcee rxrpc: Improve ma... |
791 792 |
goto out; } |
245500d85 rxrpc: Rewrite th... |
793 794 795 796 |
cid = call->cid; channel = cid & RXRPC_CHANNELMASK; chan = &conn->channels[channel]; trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); |
930c9f912 rxrpc: Fix client... |
797 |
if (rcu_access_pointer(chan->call) != call) { |
245500d85 rxrpc: Rewrite th... |
798 |
spin_unlock(&bundle->channel_lock); |
930c9f912 rxrpc: Fix client... |
799 800 |
BUG(); } |
45025bcee rxrpc: Improve ma... |
801 |
|
245500d85 rxrpc: Rewrite th... |
802 |
may_reuse = rxrpc_may_reuse_conn(conn); |
45025bcee rxrpc: Improve ma... |
803 804 805 806 807 808 809 810 811 812 |
/* If a client call was exposed to the world, we save the result for * retransmission. * * We use a barrier here so that the call number and abort code can be * read without needing to take a lock. * * TODO: Make the incoming packet handler check this and handle * terminal retransmission without requiring access to the call. */ if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { |
f5c17aaeb rxrpc: Calls shou... |
813 |
_debug("exposed %u,%u", call->call_id, call->abort_code); |
45025bcee rxrpc: Improve ma... |
814 |
__rxrpc_disconnect_call(conn, call); |
245500d85 rxrpc: Rewrite th... |
815 816 817 818 819 820 821 |
if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) { trace_rxrpc_client(conn, channel, rxrpc_client_to_active); bundle->try_upgrade = false; if (may_reuse) rxrpc_activate_channels_locked(bundle); } |
45025bcee rxrpc: Improve ma... |
822 823 824 |
} /* See if we can pass the channel directly to another call. */ |
245500d85 rxrpc: Rewrite th... |
825 |
if (may_reuse && !list_empty(&bundle->waiting_calls)) { |
363deeab6 rxrpc: Add connec... |
826 |
trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); |
45025bcee rxrpc: Improve ma... |
827 |
rxrpc_activate_one_channel(conn, channel); |
245500d85 rxrpc: Rewrite th... |
828 |
goto out; |
45025bcee rxrpc: Improve ma... |
829 |
} |
3136ef49a rxrpc: Delay term... |
830 831 832 833 |
/* Schedule the final ACK to be transmitted in a short while so that it * can be skipped if we find a follow-on call. The first DATA packet * of the follow on call will implicitly ACK this call. */ |
17e9e23b1 rxrpc: Fix receiv... |
834 835 |
if (call->completion == RXRPC_CALL_SUCCEEDED && test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { |
3136ef49a rxrpc: Delay term... |
836 837 838 839 840 841 842 |
unsigned long final_ack_at = jiffies + 2; WRITE_ONCE(chan->final_ack_at, final_ack_at); smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */ set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); rxrpc_reduce_conn_timer(conn, final_ack_at); } |
245500d85 rxrpc: Rewrite th... |
843 844 845 846 |
/* Deactivate the channel. */ rcu_assign_pointer(chan->call, NULL); set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans); conn->act_chans &= ~(1 << channel); |
45025bcee rxrpc: Improve ma... |
847 |
|
245500d85 rxrpc: Rewrite th... |
848 849 850 851 852 853 854 |
/* If no channels remain active, then put the connection on the idle * list for a short while. Give it a ref to stop it going away if it * becomes unbundled. */ if (!conn->act_chans) { trace_rxrpc_client(conn, channel, rxrpc_client_to_idle); conn->idle_timestamp = jiffies; |
45025bcee rxrpc: Improve ma... |
855 |
|
245500d85 rxrpc: Rewrite th... |
856 857 858 859 |
rxrpc_get_connection(conn); spin_lock(&rxnet->client_conn_cache_lock); list_move_tail(&conn->cache_link, &rxnet->idle_client_conns); spin_unlock(&rxnet->client_conn_cache_lock); |
45025bcee rxrpc: Improve ma... |
860 |
|
245500d85 rxrpc: Rewrite th... |
861 |
rxrpc_set_client_reap_timer(rxnet); |
45025bcee rxrpc: Improve ma... |
862 |
} |
c6d2b8d76 rxrpc: Split clie... |
863 |
|
45025bcee rxrpc: Improve ma... |
864 |
out: |
245500d85 rxrpc: Rewrite th... |
865 |
spin_unlock(&bundle->channel_lock); |
45025bcee rxrpc: Improve ma... |
866 867 |
_leave(""); return; |
245500d85 rxrpc: Rewrite th... |
868 |
} |
45025bcee rxrpc: Improve ma... |
869 |
|
245500d85 rxrpc: Rewrite th... |
870 871 872 873 874 875 876 877 |
/* * Remove a connection from a bundle. */ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn) { struct rxrpc_bundle *bundle = conn->bundle; struct rxrpc_local *local = bundle->params.local; unsigned int bindex; |
f3af4ad1e rxrpc: Fix bundle... |
878 |
bool need_drop = false, need_put = false; |
245500d85 rxrpc: Rewrite th... |
879 880 881 |
int i; _enter("C=%x", conn->debug_id); |
ddc7834af rxrpc: Fix loss o... |
882 883 |
if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) rxrpc_process_delayed_final_acks(conn, true); |
245500d85 rxrpc: Rewrite th... |
884 885 886 887 888 889 890 891 |
spin_lock(&bundle->channel_lock); bindex = conn->bundle_shift / RXRPC_MAXCALLS; if (bundle->conns[bindex] == conn) { _debug("clear slot %u", bindex); bundle->conns[bindex] = NULL; for (i = 0; i < RXRPC_MAXCALLS; i++) clear_bit(conn->bundle_shift + i, &bundle->avail_chans); need_drop = true; |
45025bcee rxrpc: Improve ma... |
892 |
} |
245500d85 rxrpc: Rewrite th... |
893 894 895 896 897 898 899 900 901 902 903 904 905 |
spin_unlock(&bundle->channel_lock); /* If there are no more connections, remove the bundle */ if (!bundle->avail_chans) { _debug("maybe unbundle"); spin_lock(&local->client_bundles_lock); for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) if (bundle->conns[i]) break; if (i == ARRAY_SIZE(bundle->conns) && !bundle->params.exclusive) { _debug("erase bundle"); rb_erase(&bundle->local_node, &local->client_bundles); |
f3af4ad1e rxrpc: Fix bundle... |
906 |
need_put = true; |
245500d85 rxrpc: Rewrite th... |
907 908 909 |
} spin_unlock(&local->client_bundles_lock); |
f3af4ad1e rxrpc: Fix bundle... |
910 |
if (need_put) |
245500d85 rxrpc: Rewrite th... |
911 912 913 914 915 916 |
rxrpc_put_bundle(bundle); } if (need_drop) rxrpc_put_connection(conn); _leave(""); |
c6d2b8d76 rxrpc: Split clie... |
917 |
} |
001c11224 rxrpc: Maintain a... |
918 919 |
/* |
45025bcee rxrpc: Improve ma... |
920 |
* Clean up a dead client connection. |
001c11224 rxrpc: Maintain a... |
921 |
*/ |
245500d85 rxrpc: Rewrite th... |
922 |
static void rxrpc_kill_client_conn(struct rxrpc_connection *conn) |
001c11224 rxrpc: Maintain a... |
923 924 |
{ struct rxrpc_local *local = conn->params.local; |
2baec2c3f rxrpc: Support ne... |
925 |
struct rxrpc_net *rxnet = local->rxnet; |
001c11224 rxrpc: Maintain a... |
926 |
|
245500d85 rxrpc: Rewrite th... |
927 |
_enter("C=%x", conn->debug_id); |
363deeab6 rxrpc: Add connec... |
928 |
|
245500d85 rxrpc: Rewrite th... |
929 930 |
trace_rxrpc_client(conn, -1, rxrpc_client_cleanup); atomic_dec(&rxnet->nr_client_conns); |
001c11224 rxrpc: Maintain a... |
931 932 |
rxrpc_put_client_connection_id(conn); |
45025bcee rxrpc: Improve ma... |
933 |
rxrpc_kill_connection(conn); |
45025bcee rxrpc: Improve ma... |
934 935 936 937 938 939 940 |
} /* * Clean up a dead client connections. */ void rxrpc_put_client_conn(struct rxrpc_connection *conn) { |
363deeab6 rxrpc: Add connec... |
941 |
const void *here = __builtin_return_address(0); |
4c1295dcc rxrpc: Fix trace-... |
942 |
unsigned int debug_id = conn->debug_id; |
363deeab6 rxrpc: Add connec... |
943 |
int n; |
45025bcee rxrpc: Improve ma... |
944 |
|
245500d85 rxrpc: Rewrite th... |
945 946 947 |
n = atomic_dec_return(&conn->usage); trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here); if (n <= 0) { |
363deeab6 rxrpc: Add connec... |
948 |
ASSERTCMP(n, >=, 0); |
245500d85 rxrpc: Rewrite th... |
949 |
rxrpc_kill_client_conn(conn); |
45025bcee rxrpc: Improve ma... |
950 |
} |
45025bcee rxrpc: Improve ma... |
951 952 953 954 955 956 957 958 959 |
} /* * Discard expired client connections from the idle list. Each conn in the * idle list has been exposed and holds an extra ref because of that. * * This may be called from conn setup or from a work item so cannot be * considered non-reentrant. */ |
2baec2c3f rxrpc: Support ne... |
960 |
void rxrpc_discard_expired_client_conns(struct work_struct *work) |
45025bcee rxrpc: Improve ma... |
961 962 |
{ struct rxrpc_connection *conn; |
2baec2c3f rxrpc: Support ne... |
963 |
struct rxrpc_net *rxnet = |
3d18cbb7f rxrpc: Fix conn e... |
964 |
container_of(work, struct rxrpc_net, client_conn_reaper); |
45025bcee rxrpc: Improve ma... |
965 966 |
unsigned long expiry, conn_expires_at, now; unsigned int nr_conns; |
45025bcee rxrpc: Improve ma... |
967 |
|
2baec2c3f rxrpc: Support ne... |
968 |
_enter(""); |
45025bcee rxrpc: Improve ma... |
969 |
|
2baec2c3f rxrpc: Support ne... |
970 |
if (list_empty(&rxnet->idle_client_conns)) { |
45025bcee rxrpc: Improve ma... |
971 972 973 974 975 |
_leave(" [empty]"); return; } /* Don't double up on the discarding */ |
2baec2c3f rxrpc: Support ne... |
976 |
if (!spin_trylock(&rxnet->client_conn_discard_lock)) { |
45025bcee rxrpc: Improve ma... |
977 978 979 980 981 982 983 |
_leave(" [already]"); return; } /* We keep an estimate of what the number of conns ought to be after * we've discarded some so that we don't overdo the discarding. */ |
245500d85 rxrpc: Rewrite th... |
984 |
nr_conns = atomic_read(&rxnet->nr_client_conns); |
45025bcee rxrpc: Improve ma... |
985 986 |
next: |
2baec2c3f rxrpc: Support ne... |
987 |
spin_lock(&rxnet->client_conn_cache_lock); |
45025bcee rxrpc: Improve ma... |
988 |
|
2baec2c3f rxrpc: Support ne... |
989 |
if (list_empty(&rxnet->idle_client_conns)) |
45025bcee rxrpc: Improve ma... |
990 |
goto out; |
2baec2c3f rxrpc: Support ne... |
991 |
conn = list_entry(rxnet->idle_client_conns.next, |
45025bcee rxrpc: Improve ma... |
992 |
struct rxrpc_connection, cache_link); |
45025bcee rxrpc: Improve ma... |
993 |
|
2baec2c3f rxrpc: Support ne... |
994 |
if (!rxnet->kill_all_client_conns) { |
45025bcee rxrpc: Improve ma... |
995 996 997 998 999 1000 1001 1002 |
/* If the number of connections is over the reap limit, we * expedite discard by reducing the expiry timeout. We must, * however, have at least a short grace period to be able to do * final-ACK or ABORT retransmission. */ expiry = rxrpc_conn_idle_client_expiry; if (nr_conns > rxrpc_reap_client_connections) expiry = rxrpc_conn_idle_client_fast_expiry; |
f859ab618 rxrpc: Fix servic... |
1003 1004 |
if (conn->params.local->service_closed) expiry = rxrpc_closed_conn_expiry * HZ; |
45025bcee rxrpc: Improve ma... |
1005 1006 1007 1008 1009 1010 1011 |
conn_expires_at = conn->idle_timestamp + expiry; now = READ_ONCE(jiffies); if (time_after(conn_expires_at, now)) goto not_yet_expired; } |
363deeab6 rxrpc: Add connec... |
1012 |
trace_rxrpc_client(conn, -1, rxrpc_client_discard); |
45025bcee rxrpc: Improve ma... |
1013 |
list_del_init(&conn->cache_link); |
2baec2c3f rxrpc: Support ne... |
1014 |
spin_unlock(&rxnet->client_conn_cache_lock); |
45025bcee rxrpc: Improve ma... |
1015 |
|
245500d85 rxrpc: Rewrite th... |
1016 1017 |
rxrpc_unbundle_conn(conn); rxrpc_put_connection(conn); /* Drop the ->cache_link ref */ |
45025bcee rxrpc: Improve ma... |
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 |
nr_conns--; goto next; not_yet_expired: /* The connection at the front of the queue hasn't yet expired, so * schedule the work item for that point if we discarded something. * * We don't worry if the work item is already scheduled - it can look * after rescheduling itself at a later time. We could cancel it, but * then things get messier. */ _debug("not yet"); |
2baec2c3f rxrpc: Support ne... |
1030 |
if (!rxnet->kill_all_client_conns) |
245500d85 rxrpc: Rewrite th... |
1031 |
timer_reduce(&rxnet->client_conn_reap_timer, conn_expires_at); |
45025bcee rxrpc: Improve ma... |
1032 1033 |
out: |
2baec2c3f rxrpc: Support ne... |
1034 1035 |
spin_unlock(&rxnet->client_conn_cache_lock); spin_unlock(&rxnet->client_conn_discard_lock); |
45025bcee rxrpc: Improve ma... |
1036 1037 1038 1039 1040 1041 1042 |
_leave(""); } /* * Preemptively destroy all the client connection records rather than waiting * for them to time out */ |
2baec2c3f rxrpc: Support ne... |
1043 |
void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet) |
45025bcee rxrpc: Improve ma... |
1044 1045 |
{ _enter(""); |
2baec2c3f rxrpc: Support ne... |
1046 1047 1048 |
spin_lock(&rxnet->client_conn_cache_lock); rxnet->kill_all_client_conns = true; spin_unlock(&rxnet->client_conn_cache_lock); |
45025bcee rxrpc: Improve ma... |
1049 |
|
3d18cbb7f rxrpc: Fix conn e... |
1050 |
del_timer_sync(&rxnet->client_conn_reap_timer); |
45025bcee rxrpc: Improve ma... |
1051 |
|
3d18cbb7f rxrpc: Fix conn e... |
1052 |
if (!rxrpc_queue_work(&rxnet->client_conn_reaper)) |
45025bcee rxrpc: Improve ma... |
1053 1054 1055 |
_debug("destroy: queue failed"); _leave(""); |
001c11224 rxrpc: Maintain a... |
1056 |
} |
d12040b69 rxrpc: Fix lack o... |
1057 1058 1059 1060 1061 1062 1063 1064 |
/* * Clean up the client connections on a local endpoint. */ void rxrpc_clean_up_local_conns(struct rxrpc_local *local) { struct rxrpc_connection *conn, *tmp; struct rxrpc_net *rxnet = local->rxnet; |
d12040b69 rxrpc: Fix lack o... |
1065 1066 1067 1068 1069 |
LIST_HEAD(graveyard); _enter(""); spin_lock(&rxnet->client_conn_cache_lock); |
d12040b69 rxrpc: Fix lack o... |
1070 1071 1072 1073 |
list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns, cache_link) { if (conn->params.local == local) { |
d12040b69 rxrpc: Fix lack o... |
1074 |
trace_rxrpc_client(conn, -1, rxrpc_client_discard); |
d12040b69 rxrpc: Fix lack o... |
1075 |
list_move(&conn->cache_link, &graveyard); |
d12040b69 rxrpc: Fix lack o... |
1076 1077 |
} } |
d12040b69 rxrpc: Fix lack o... |
1078 |
spin_unlock(&rxnet->client_conn_cache_lock); |
d12040b69 rxrpc: Fix lack o... |
1079 1080 1081 1082 1083 |
while (!list_empty(&graveyard)) { conn = list_entry(graveyard.next, struct rxrpc_connection, cache_link); list_del_init(&conn->cache_link); |
546a42410 rxrpc: Fix conn b... |
1084 |
rxrpc_unbundle_conn(conn); |
d12040b69 rxrpc: Fix lack o... |
1085 1086 1087 1088 1089 |
rxrpc_put_connection(conn); } _leave(" [culled]"); } |