Blame view

net/rxrpc/conn_client.c 32.2 KB
4a3388c80   David Howells   rxrpc: Use IDR to...
1
2
3
4
5
6
7
8
9
  /* Client connection-specific management code.
   *
   * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
   * Written by David Howells (dhowells@redhat.com)
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public Licence
   * as published by the Free Software Foundation; either version
   * 2 of the Licence, or (at your option) any later version.
45025bcee   David Howells   rxrpc: Improve ma...
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
   *
   *
   * Client connections need to be cached for a little while after they've made a
   * call so as to handle retransmitted DATA packets in case the server didn't
   * receive the final ACK or terminating ABORT we sent it.
   *
   * Client connections can be in one of a number of cache states:
   *
   *  (1) INACTIVE - The connection is not held in any list and may not have been
   *      exposed to the world.  If it has been previously exposed, it was
   *      discarded from the idle list after expiring.
   *
   *  (2) WAITING - The connection is waiting for the number of client conns to
   *      drop below the maximum capacity.  Calls may be in progress upon it from
   *      when it was active and got culled.
   *
   *	The connection is on the rxrpc_waiting_client_conns list which is kept
   *	in to-be-granted order.  Culled conns with waiters go to the back of
   *	the queue just like new conns.
   *
   *  (3) ACTIVE - The connection has at least one call in progress upon it, it
   *      may freely grant available channels to new calls and calls may be
   *      waiting on it for channels to become available.
   *
2baec2c3f   David Howells   rxrpc: Support ne...
34
   *	The connection is on the rxnet->active_client_conns list which is kept
45025bcee   David Howells   rxrpc: Improve ma...
35
36
37
38
   *	in activation order for culling purposes.
   *
   *	rxrpc_nr_active_client_conns is held incremented also.
   *
4e255721d   David Howells   rxrpc: Add servic...
39
40
41
42
   *  (4) UPGRADE - As for ACTIVE, but only one call may be in progress and is
   *      being used to probe for service upgrade.
   *
   *  (5) CULLED - The connection got summarily culled to try and free up
45025bcee   David Howells   rxrpc: Improve ma...
43
44
45
46
   *      capacity.  Calls currently in progress on the connection are allowed to
   *      continue, but new calls will have to wait.  There can be no waiters in
   *      this state - the conn would have to go to the WAITING state instead.
   *
4e255721d   David Howells   rxrpc: Add servic...
47
   *  (6) IDLE - The connection has no calls in progress upon it and must have
45025bcee   David Howells   rxrpc: Improve ma...
48
49
50
51
   *      been exposed to the world (ie. the EXPOSED flag must be set).  When it
   *      expires, the EXPOSED flag is cleared and the connection transitions to
   *      the INACTIVE state.
   *
2baec2c3f   David Howells   rxrpc: Support ne...
52
   *	The connection is on the rxnet->idle_client_conns list which is kept in
45025bcee   David Howells   rxrpc: Improve ma...
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
   *	order of how soon they'll expire.
   *
   * There are flags of relevance to the cache:
   *
   *  (1) EXPOSED - The connection ID got exposed to the world.  If this flag is
   *      set, an extra ref is added to the connection preventing it from being
   *      reaped when it has no calls outstanding.  This flag is cleared and the
   *      ref dropped when a conn is discarded from the idle list.
   *
   *      This allows us to move terminal call state retransmission to the
   *      connection and to discard the call immediately we think it is done
   *      with.  It also give us a chance to reuse the connection.
   *
   *  (2) DONT_REUSE - The connection should be discarded as soon as possible and
   *      should not be reused.  This is set when an exclusive connection is used
   *      or a call ID counter overflows.
   *
   * The caching state may only be changed if the cache lock is held.
   *
   * There are two idle client connection expiry durations.  If the total number
   * of connections is below the reap threshold, we use the normal duration; if
   * it's above, we use the fast duration.
4a3388c80   David Howells   rxrpc: Use IDR to...
75
76
77
78
79
80
81
   */
  
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  
  #include <linux/slab.h>
  #include <linux/idr.h>
  #include <linux/timer.h>
174cd4b1e   Ingo Molnar   sched/headers: Pr...
82
  #include <linux/sched/signal.h>
4a3388c80   David Howells   rxrpc: Use IDR to...
83
  #include "ar-internal.h"
45025bcee   David Howells   rxrpc: Improve ma...
84
85
86
87
  __read_mostly unsigned int rxrpc_max_client_connections = 1000;
  __read_mostly unsigned int rxrpc_reap_client_connections = 900;
  __read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
  __read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
4a3388c80   David Howells   rxrpc: Use IDR to...
88
89
90
91
92
  /*
   * We use machine-unique IDs for our client connections.
   */
  DEFINE_IDR(rxrpc_client_conn_ids);
  static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
2baec2c3f   David Howells   rxrpc: Support ne...
93
  static void rxrpc_cull_active_client_conns(struct rxrpc_net *);
45025bcee   David Howells   rxrpc: Improve ma...
94

4a3388c80   David Howells   rxrpc: Use IDR to...
95
96
97
  /*
   * Get a connection ID and epoch for a client connection from the global pool.
   * The connection struct pointer is then recorded in the idr radix tree.  The
090f85deb   David Howells   rxrpc: Don't chan...
98
99
   * epoch doesn't change until the client is rebooted (or, at least, unless the
   * module is unloaded).
4a3388c80   David Howells   rxrpc: Use IDR to...
100
   */
c6d2b8d76   David Howells   rxrpc: Split clie...
101
102
  static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
  					  gfp_t gfp)
4a3388c80   David Howells   rxrpc: Use IDR to...
103
  {
2baec2c3f   David Howells   rxrpc: Support ne...
104
  	struct rxrpc_net *rxnet = conn->params.local->rxnet;
4a3388c80   David Howells   rxrpc: Use IDR to...
105
106
107
108
109
  	int id;
  
  	_enter("");
  
  	idr_preload(gfp);
4a3388c80   David Howells   rxrpc: Use IDR to...
110
  	spin_lock(&rxrpc_conn_id_lock);
090f85deb   David Howells   rxrpc: Don't chan...
111
112
113
114
  	id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
  			      1, 0x40000000, GFP_NOWAIT);
  	if (id < 0)
  		goto error;
4a3388c80   David Howells   rxrpc: Use IDR to...
115
116
  
  	spin_unlock(&rxrpc_conn_id_lock);
4a3388c80   David Howells   rxrpc: Use IDR to...
117
  	idr_preload_end();
2baec2c3f   David Howells   rxrpc: Support ne...
118
  	conn->proto.epoch = rxnet->epoch;
4a3388c80   David Howells   rxrpc: Use IDR to...
119
120
  	conn->proto.cid = id << RXRPC_CIDSHIFT;
  	set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
090f85deb   David Howells   rxrpc: Don't chan...
121
  	_leave(" [CID %x]", conn->proto.cid);
4a3388c80   David Howells   rxrpc: Use IDR to...
122
123
124
125
  	return 0;
  
  error:
  	spin_unlock(&rxrpc_conn_id_lock);
4a3388c80   David Howells   rxrpc: Use IDR to...
126
127
128
129
130
131
132
133
  	idr_preload_end();
  	_leave(" = %d", id);
  	return id;
  }
  
  /*
   * Release a connection ID for a client connection from the global pool.
   */
001c11224   David Howells   rxrpc: Maintain a...
134
  static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
4a3388c80   David Howells   rxrpc: Use IDR to...
135
136
137
138
139
140
141
142
  {
  	if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
  		spin_lock(&rxrpc_conn_id_lock);
  		idr_remove(&rxrpc_client_conn_ids,
  			   conn->proto.cid >> RXRPC_CIDSHIFT);
  		spin_unlock(&rxrpc_conn_id_lock);
  	}
  }
eb9b9d227   David Howells   rxrpc: Check that...
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
  
  /*
   * Destroy the client connection ID tree.
   */
  void rxrpc_destroy_client_conn_ids(void)
  {
  	struct rxrpc_connection *conn;
  	int id;
  
  	if (!idr_is_empty(&rxrpc_client_conn_ids)) {
  		idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
  			pr_err("AF_RXRPC: Leaked client conn %p {%d}
  ",
  			       conn, atomic_read(&conn->usage));
  		}
  		BUG();
  	}
  
  	idr_destroy(&rxrpc_client_conn_ids);
  }
c6d2b8d76   David Howells   rxrpc: Split clie...
163
164
  
  /*
45025bcee   David Howells   rxrpc: Improve ma...
165
   * Allocate a client connection.
c6d2b8d76   David Howells   rxrpc: Split clie...
166
167
168
169
170
   */
  static struct rxrpc_connection *
  rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
  {
  	struct rxrpc_connection *conn;
2baec2c3f   David Howells   rxrpc: Support ne...
171
  	struct rxrpc_net *rxnet = cp->local->rxnet;
c6d2b8d76   David Howells   rxrpc: Split clie...
172
173
174
175
176
177
178
179
180
  	int ret;
  
  	_enter("");
  
  	conn = rxrpc_alloc_connection(gfp);
  	if (!conn) {
  		_leave(" = -ENOMEM");
  		return ERR_PTR(-ENOMEM);
  	}
45025bcee   David Howells   rxrpc: Improve ma...
181
  	atomic_set(&conn->usage, 1);
8732db67c   David Howells   rxrpc: Fix exclus...
182
  	if (cp->exclusive)
45025bcee   David Howells   rxrpc: Improve ma...
183
  		__set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
4e255721d   David Howells   rxrpc: Add servic...
184
185
  	if (cp->upgrade)
  		__set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
45025bcee   David Howells   rxrpc: Improve ma...
186

c6d2b8d76   David Howells   rxrpc: Split clie...
187
  	conn->params		= *cp;
c6d2b8d76   David Howells   rxrpc: Split clie...
188
189
  	conn->out_clientflag	= RXRPC_CLIENT_INITIATED;
  	conn->state		= RXRPC_CONN_CLIENT;
68d6d1ae5   David Howells   rxrpc: Separate t...
190
  	conn->service_id	= cp->service_id;
c6d2b8d76   David Howells   rxrpc: Split clie...
191

c6d2b8d76   David Howells   rxrpc: Split clie...
192
193
194
195
196
197
198
199
200
201
202
  	ret = rxrpc_get_client_connection_id(conn, gfp);
  	if (ret < 0)
  		goto error_0;
  
  	ret = rxrpc_init_client_conn_security(conn);
  	if (ret < 0)
  		goto error_1;
  
  	ret = conn->security->prime_packet_security(conn);
  	if (ret < 0)
  		goto error_2;
2baec2c3f   David Howells   rxrpc: Support ne...
203
204
205
  	write_lock(&rxnet->conn_lock);
  	list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
  	write_unlock(&rxnet->conn_lock);
c6d2b8d76   David Howells   rxrpc: Split clie...
206
207
208
209
210
  
  	/* We steal the caller's peer ref. */
  	cp->peer = NULL;
  	rxrpc_get_local(conn->params.local);
  	key_get(conn->params.key);
363deeab6   David Howells   rxrpc: Add connec...
211
212
213
  	trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage),
  			 __builtin_return_address(0));
  	trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
c6d2b8d76   David Howells   rxrpc: Split clie...
214
215
216
217
218
219
220
221
222
223
224
225
226
227
  	_leave(" = %p", conn);
  	return conn;
  
  error_2:
  	conn->security->clear(conn);
  error_1:
  	rxrpc_put_client_connection_id(conn);
  error_0:
  	kfree(conn);
  	_leave(" = %d", ret);
  	return ERR_PTR(ret);
  }
  
  /*
45025bcee   David Howells   rxrpc: Improve ma...
228
   * Determine if a connection may be reused.
c6d2b8d76   David Howells   rxrpc: Split clie...
229
   */
45025bcee   David Howells   rxrpc: Improve ma...
230
231
  static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
  {
2baec2c3f   David Howells   rxrpc: Support ne...
232
  	struct rxrpc_net *rxnet = conn->params.local->rxnet;
45025bcee   David Howells   rxrpc: Improve ma...
233
234
235
236
  	int id_cursor, id, distance, limit;
  
  	if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
  		goto dont_reuse;
2baec2c3f   David Howells   rxrpc: Support ne...
237
  	if (conn->proto.epoch != rxnet->epoch)
45025bcee   David Howells   rxrpc: Improve ma...
238
239
240
241
242
243
244
245
  		goto mark_dont_reuse;
  
  	/* The IDR tree gets very expensive on memory if the connection IDs are
  	 * widely scattered throughout the number space, so we shall want to
  	 * kill off connections that, say, have an ID more than about four
  	 * times the maximum number of client conns away from the current
  	 * allocation point to try and keep the IDs concentrated.
  	 */
444306129   Matthew Wilcox   rxrpc: abstract a...
246
  	id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
45025bcee   David Howells   rxrpc: Improve ma...
247
248
249
250
  	id = conn->proto.cid >> RXRPC_CIDSHIFT;
  	distance = id - id_cursor;
  	if (distance < 0)
  		distance = -distance;
444306129   Matthew Wilcox   rxrpc: abstract a...
251
  	limit = max(rxrpc_max_client_connections * 4, 1024U);
45025bcee   David Howells   rxrpc: Improve ma...
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
  	if (distance > limit)
  		goto mark_dont_reuse;
  
  	return true;
  
  mark_dont_reuse:
  	set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
  dont_reuse:
  	return false;
  }
  
  /*
   * Create or find a client connection to use for a call.
   *
   * If we return with a connection, the call will be on its waiting list.  It's
   * left to the caller to assign a channel and wake up the call.
   */
  static int rxrpc_get_client_conn(struct rxrpc_call *call,
  				 struct rxrpc_conn_parameters *cp,
  				 struct sockaddr_rxrpc *srx,
  				 gfp_t gfp)
c6d2b8d76   David Howells   rxrpc: Split clie...
273
274
275
276
277
  {
  	struct rxrpc_connection *conn, *candidate = NULL;
  	struct rxrpc_local *local = cp->local;
  	struct rb_node *p, **pp, *parent;
  	long diff;
45025bcee   David Howells   rxrpc: Improve ma...
278
  	int ret = -ENOMEM;
c6d2b8d76   David Howells   rxrpc: Split clie...
279
280
281
282
283
  
  	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
  
  	cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
  	if (!cp->peer)
45025bcee   David Howells   rxrpc: Improve ma...
284
  		goto error;
c6d2b8d76   David Howells   rxrpc: Split clie...
285

f7aec129a   David Howells   rxrpc: Cache the ...
286
287
288
289
290
  	call->cong_cwnd = cp->peer->cong_cwnd;
  	if (call->cong_cwnd >= call->cong_ssthresh)
  		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
  	else
  		call->cong_mode = RXRPC_CALL_SLOW_START;
45025bcee   David Howells   rxrpc: Improve ma...
291
292
293
  	/* If the connection is not meant to be exclusive, search the available
  	 * connections to see if the connection we want to use already exists.
  	 */
c6d2b8d76   David Howells   rxrpc: Split clie...
294
  	if (!cp->exclusive) {
c6d2b8d76   David Howells   rxrpc: Split clie...
295
296
297
298
299
300
301
302
303
  		_debug("search 1");
  		spin_lock(&local->client_conns_lock);
  		p = local->client_conns.rb_node;
  		while (p) {
  			conn = rb_entry(p, struct rxrpc_connection, client_node);
  
  #define cmp(X) ((long)conn->params.X - (long)cp->X)
  			diff = (cmp(peer) ?:
  				cmp(key) ?:
4e255721d   David Howells   rxrpc: Add servic...
304
305
  				cmp(security_level) ?:
  				cmp(upgrade));
45025bcee   David Howells   rxrpc: Improve ma...
306
307
  #undef cmp
  			if (diff < 0) {
c6d2b8d76   David Howells   rxrpc: Split clie...
308
  				p = p->rb_left;
45025bcee   David Howells   rxrpc: Improve ma...
309
  			} else if (diff > 0) {
c6d2b8d76   David Howells   rxrpc: Split clie...
310
  				p = p->rb_right;
45025bcee   David Howells   rxrpc: Improve ma...
311
312
313
314
315
316
317
318
319
320
321
  			} else {
  				if (rxrpc_may_reuse_conn(conn) &&
  				    rxrpc_get_connection_maybe(conn))
  					goto found_extant_conn;
  				/* The connection needs replacing.  It's better
  				 * to effect that when we have something to
  				 * replace it with so that we don't have to
  				 * rebalance the tree twice.
  				 */
  				break;
  			}
c6d2b8d76   David Howells   rxrpc: Split clie...
322
323
324
  		}
  		spin_unlock(&local->client_conns_lock);
  	}
45025bcee   David Howells   rxrpc: Improve ma...
325
326
327
328
329
330
  	/* There wasn't a connection yet or we need an exclusive connection.
  	 * We need to create a candidate and then potentially redo the search
  	 * in case we're racing with another thread also trying to connect on a
  	 * shareable connection.
  	 */
  	_debug("new conn");
c6d2b8d76   David Howells   rxrpc: Split clie...
331
  	candidate = rxrpc_alloc_client_connection(cp, gfp);
45025bcee   David Howells   rxrpc: Improve ma...
332
333
334
  	if (IS_ERR(candidate)) {
  		ret = PTR_ERR(candidate);
  		goto error_peer;
c6d2b8d76   David Howells   rxrpc: Split clie...
335
  	}
45025bcee   David Howells   rxrpc: Improve ma...
336
337
338
339
340
341
342
  	/* Add the call to the new connection's waiting list in case we're
  	 * going to have to wait for the connection to come live.  It's our
  	 * connection, so we want first dibs on the channel slots.  We would
  	 * normally have to take channel_lock but we do this before anyone else
  	 * can see the connection.
  	 */
  	list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
c6d2b8d76   David Howells   rxrpc: Split clie...
343
  	if (cp->exclusive) {
45025bcee   David Howells   rxrpc: Improve ma...
344
  		call->conn = candidate;
278ac0cdd   David Howells   rxrpc: Cache the ...
345
  		call->security_ix = candidate->security_ix;
68d6d1ae5   David Howells   rxrpc: Separate t...
346
  		call->service_id = candidate->service_id;
45025bcee   David Howells   rxrpc: Improve ma...
347
348
  		_leave(" = 0 [exclusive %d]", candidate->debug_id);
  		return 0;
c6d2b8d76   David Howells   rxrpc: Split clie...
349
  	}
45025bcee   David Howells   rxrpc: Improve ma...
350
351
352
  	/* Publish the new connection for userspace to find.  We need to redo
  	 * the search before doing this lest we race with someone else adding a
  	 * conflicting instance.
c6d2b8d76   David Howells   rxrpc: Split clie...
353
354
355
356
357
358
359
360
361
  	 */
  	_debug("search 2");
  	spin_lock(&local->client_conns_lock);
  
  	pp = &local->client_conns.rb_node;
  	parent = NULL;
  	while (*pp) {
  		parent = *pp;
  		conn = rb_entry(parent, struct rxrpc_connection, client_node);
45025bcee   David Howells   rxrpc: Improve ma...
362
  #define cmp(X) ((long)conn->params.X - (long)candidate->params.X)
c6d2b8d76   David Howells   rxrpc: Split clie...
363
364
  		diff = (cmp(peer) ?:
  			cmp(key) ?:
4e255721d   David Howells   rxrpc: Add servic...
365
366
  			cmp(security_level) ?:
  			cmp(upgrade));
45025bcee   David Howells   rxrpc: Improve ma...
367
368
  #undef cmp
  		if (diff < 0) {
c6d2b8d76   David Howells   rxrpc: Split clie...
369
  			pp = &(*pp)->rb_left;
45025bcee   David Howells   rxrpc: Improve ma...
370
  		} else if (diff > 0) {
c6d2b8d76   David Howells   rxrpc: Split clie...
371
  			pp = &(*pp)->rb_right;
45025bcee   David Howells   rxrpc: Improve ma...
372
373
374
375
376
377
378
379
380
381
  		} else {
  			if (rxrpc_may_reuse_conn(conn) &&
  			    rxrpc_get_connection_maybe(conn))
  				goto found_extant_conn;
  			/* The old connection is from an outdated epoch. */
  			_debug("replace conn");
  			clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
  			rb_replace_node(&conn->client_node,
  					&candidate->client_node,
  					&local->client_conns);
363deeab6   David Howells   rxrpc: Add connec...
382
  			trace_rxrpc_client(conn, -1, rxrpc_client_replace);
45025bcee   David Howells   rxrpc: Improve ma...
383
384
  			goto candidate_published;
  		}
c6d2b8d76   David Howells   rxrpc: Split clie...
385
  	}
c6d2b8d76   David Howells   rxrpc: Split clie...
386
  	_debug("new conn");
001c11224   David Howells   rxrpc: Maintain a...
387
388
  	rb_link_node(&candidate->client_node, parent, pp);
  	rb_insert_color(&candidate->client_node, &local->client_conns);
c6d2b8d76   David Howells   rxrpc: Split clie...
389

45025bcee   David Howells   rxrpc: Improve ma...
390
391
392
  candidate_published:
  	set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
  	call->conn = candidate;
278ac0cdd   David Howells   rxrpc: Cache the ...
393
  	call->security_ix = candidate->security_ix;
68d6d1ae5   David Howells   rxrpc: Separate t...
394
  	call->service_id = candidate->service_id;
c6d2b8d76   David Howells   rxrpc: Split clie...
395
  	spin_unlock(&local->client_conns_lock);
45025bcee   David Howells   rxrpc: Improve ma...
396
397
  	_leave(" = 0 [new %d]", candidate->debug_id);
  	return 0;
c6d2b8d76   David Howells   rxrpc: Split clie...
398

45025bcee   David Howells   rxrpc: Improve ma...
399
400
401
402
403
404
405
  	/* We come here if we found a suitable connection already in existence.
  	 * Discard any candidate we may have allocated, and try to get a
  	 * channel on this one.
  	 */
  found_extant_conn:
  	_debug("found conn");
  	spin_unlock(&local->client_conns_lock);
c6d2b8d76   David Howells   rxrpc: Split clie...
406

363deeab6   David Howells   rxrpc: Add connec...
407
408
409
410
411
  	if (candidate) {
  		trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
  		rxrpc_put_connection(candidate);
  		candidate = NULL;
  	}
c6d2b8d76   David Howells   rxrpc: Split clie...
412

45025bcee   David Howells   rxrpc: Improve ma...
413
414
  	spin_lock(&conn->channel_lock);
  	call->conn = conn;
278ac0cdd   David Howells   rxrpc: Cache the ...
415
  	call->security_ix = conn->security_ix;
68d6d1ae5   David Howells   rxrpc: Separate t...
416
  	call->service_id = conn->service_id;
45025bcee   David Howells   rxrpc: Improve ma...
417
  	list_add(&call->chan_wait_link, &conn->waiting_calls);
c6d2b8d76   David Howells   rxrpc: Split clie...
418
  	spin_unlock(&conn->channel_lock);
45025bcee   David Howells   rxrpc: Improve ma...
419
420
421
422
  	_leave(" = 0 [extant %d]", conn->debug_id);
  	return 0;
  
  error_peer:
c6d2b8d76   David Howells   rxrpc: Split clie...
423
424
  	rxrpc_put_peer(cp->peer);
  	cp->peer = NULL;
45025bcee   David Howells   rxrpc: Improve ma...
425
426
427
428
  error:
  	_leave(" = %d", ret);
  	return ret;
  }
c6d2b8d76   David Howells   rxrpc: Split clie...
429

45025bcee   David Howells   rxrpc: Improve ma...
430
431
432
  /*
   * Activate a connection.
   */
2baec2c3f   David Howells   rxrpc: Support ne...
433
434
  static void rxrpc_activate_conn(struct rxrpc_net *rxnet,
  				struct rxrpc_connection *conn)
45025bcee   David Howells   rxrpc: Improve ma...
435
  {
4e255721d   David Howells   rxrpc: Add servic...
436
437
438
439
440
441
442
  	if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
  		trace_rxrpc_client(conn, -1, rxrpc_client_to_upgrade);
  		conn->cache_state = RXRPC_CONN_CLIENT_UPGRADE;
  	} else {
  		trace_rxrpc_client(conn, -1, rxrpc_client_to_active);
  		conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
  	}
2baec2c3f   David Howells   rxrpc: Support ne...
443
444
  	rxnet->nr_active_client_conns++;
  	list_move_tail(&conn->cache_link, &rxnet->active_client_conns);
45025bcee   David Howells   rxrpc: Improve ma...
445
446
447
448
449
450
451
452
453
454
455
456
457
458
  }
  
  /*
   * Attempt to animate a connection for a new call.
   *
   * If it's not exclusive, the connection is in the endpoint tree, and we're in
   * the conn's list of those waiting to grab a channel.  There is, however, a
   * limit on the number of live connections allowed at any one time, so we may
   * have to wait for capacity to become available.
   *
   * Note that a connection on the waiting queue might *also* have active
   * channels if it has been culled to make space and then re-requested by a new
   * call.
   */
2baec2c3f   David Howells   rxrpc: Support ne...
459
460
  static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet,
  				      struct rxrpc_connection *conn)
45025bcee   David Howells   rxrpc: Improve ma...
461
462
463
464
  {
  	unsigned int nr_conns;
  
  	_enter("%d,%d", conn->debug_id, conn->cache_state);
4e255721d   David Howells   rxrpc: Add servic...
465
466
  	if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE ||
  	    conn->cache_state == RXRPC_CONN_CLIENT_UPGRADE)
45025bcee   David Howells   rxrpc: Improve ma...
467
  		goto out;
2baec2c3f   David Howells   rxrpc: Support ne...
468
  	spin_lock(&rxnet->client_conn_cache_lock);
45025bcee   David Howells   rxrpc: Improve ma...
469

2baec2c3f   David Howells   rxrpc: Support ne...
470
  	nr_conns = rxnet->nr_client_conns;
363deeab6   David Howells   rxrpc: Add connec...
471
472
  	if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
  		trace_rxrpc_client(conn, -1, rxrpc_client_count);
2baec2c3f   David Howells   rxrpc: Support ne...
473
  		rxnet->nr_client_conns = nr_conns + 1;
363deeab6   David Howells   rxrpc: Add connec...
474
  	}
45025bcee   David Howells   rxrpc: Improve ma...
475
476
477
  
  	switch (conn->cache_state) {
  	case RXRPC_CONN_CLIENT_ACTIVE:
4e255721d   David Howells   rxrpc: Add servic...
478
  	case RXRPC_CONN_CLIENT_UPGRADE:
45025bcee   David Howells   rxrpc: Improve ma...
479
480
481
482
483
484
485
486
487
488
489
490
  	case RXRPC_CONN_CLIENT_WAITING:
  		break;
  
  	case RXRPC_CONN_CLIENT_INACTIVE:
  	case RXRPC_CONN_CLIENT_CULLED:
  	case RXRPC_CONN_CLIENT_IDLE:
  		if (nr_conns >= rxrpc_max_client_connections)
  			goto wait_for_capacity;
  		goto activate_conn;
  
  	default:
  		BUG();
001c11224   David Howells   rxrpc: Maintain a...
491
  	}
45025bcee   David Howells   rxrpc: Improve ma...
492
  out_unlock:
2baec2c3f   David Howells   rxrpc: Support ne...
493
  	spin_unlock(&rxnet->client_conn_cache_lock);
45025bcee   David Howells   rxrpc: Improve ma...
494
495
496
  out:
  	_leave(" [%d]", conn->cache_state);
  	return;
c6d2b8d76   David Howells   rxrpc: Split clie...
497

45025bcee   David Howells   rxrpc: Improve ma...
498
499
  activate_conn:
  	_debug("activate");
2baec2c3f   David Howells   rxrpc: Support ne...
500
  	rxrpc_activate_conn(rxnet, conn);
45025bcee   David Howells   rxrpc: Improve ma...
501
502
503
504
  	goto out_unlock;
  
  wait_for_capacity:
  	_debug("wait");
363deeab6   David Howells   rxrpc: Add connec...
505
  	trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
45025bcee   David Howells   rxrpc: Improve ma...
506
  	conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
2baec2c3f   David Howells   rxrpc: Support ne...
507
  	list_move_tail(&conn->cache_link, &rxnet->waiting_client_conns);
45025bcee   David Howells   rxrpc: Improve ma...
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
  	goto out_unlock;
  }
  
  /*
   * Deactivate a channel.
   */
  static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn,
  					 unsigned int channel)
  {
  	struct rxrpc_channel *chan = &conn->channels[channel];
  
  	rcu_assign_pointer(chan->call, NULL);
  	conn->active_chans &= ~(1 << channel);
  }
  
  /*
   * Assign a channel to the call at the front of the queue and wake the call up.
   * We don't increment the callNumber counter until this number has been exposed
   * to the world.
   */
  static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
  				       unsigned int channel)
  {
  	struct rxrpc_channel *chan = &conn->channels[channel];
  	struct rxrpc_call *call = list_entry(conn->waiting_calls.next,
  					     struct rxrpc_call, chan_wait_link);
  	u32 call_id = chan->call_counter + 1;
363deeab6   David Howells   rxrpc: Add connec...
535
  	trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
af338a9ea   David Howells   rxrpc: The client...
536
  	write_lock_bh(&call->state_lock);
c038a58cc   David Howells   rxrpc: Allow fail...
537
538
539
540
  	if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
  		call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
  	else
  		call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
af338a9ea   David Howells   rxrpc: The client...
541
  	write_unlock_bh(&call->state_lock);
e34d4234b   David Howells   rxrpc: Trace rxrp...
542
  	rxrpc_see_call(call);
45025bcee   David Howells   rxrpc: Improve ma...
543
544
545
546
547
  	list_del_init(&call->chan_wait_link);
  	conn->active_chans |= 1 << channel;
  	call->peer	= rxrpc_get_peer(conn->params.peer);
  	call->cid	= conn->proto.cid | channel;
  	call->call_id	= call_id;
89ca69480   David Howells   rxrpc: Trace clie...
548
  	trace_rxrpc_connect_call(call);
45025bcee   David Howells   rxrpc: Improve ma...
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
  	_net("CONNECT call %08x:%08x as call %d on conn %d",
  	     call->cid, call->call_id, call->debug_id, conn->debug_id);
  
  	/* Paired with the read barrier in rxrpc_wait_for_channel().  This
  	 * orders cid and epoch in the connection wrt to call_id without the
  	 * need to take the channel_lock.
  	 *
  	 * We provisionally assign a callNumber at this point, but we don't
  	 * confirm it until the call is about to be exposed.
  	 *
  	 * TODO: Pair with a barrier in the data_ready handler when that looks
  	 * at the call ID through a connection channel.
  	 */
  	smp_wmb();
  	chan->call_id	= call_id;
  	rcu_assign_pointer(chan->call, call);
  	wake_up(&call->waitq);
  }
  
  /*
2629c7fa7   David Howells   rxrpc: When activ...
569
570
571
572
573
574
575
576
577
578
579
   * Assign channels and callNumbers to waiting calls with channel_lock
   * held by caller.
   */
  static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn)
  {
  	u8 avail, mask;
  
  	switch (conn->cache_state) {
  	case RXRPC_CONN_CLIENT_ACTIVE:
  		mask = RXRPC_ACTIVE_CHANS_MASK;
  		break;
4e255721d   David Howells   rxrpc: Add servic...
580
581
582
  	case RXRPC_CONN_CLIENT_UPGRADE:
  		mask = 0x01;
  		break;
2629c7fa7   David Howells   rxrpc: When activ...
583
584
585
586
587
588
589
590
591
592
593
594
  	default:
  		return;
  	}
  
  	while (!list_empty(&conn->waiting_calls) &&
  	       (avail = ~conn->active_chans,
  		avail &= mask,
  		avail != 0))
  		rxrpc_activate_one_channel(conn, __ffs(avail));
  }
  
  /*
45025bcee   David Howells   rxrpc: Improve ma...
595
596
597
598
   * Assign channels and callNumbers to waiting calls.
   */
  static void rxrpc_activate_channels(struct rxrpc_connection *conn)
  {
45025bcee   David Howells   rxrpc: Improve ma...
599
  	_enter("%d", conn->debug_id);
363deeab6   David Howells   rxrpc: Add connec...
600
  	trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans);
2629c7fa7   David Howells   rxrpc: When activ...
601
  	if (conn->active_chans == RXRPC_ACTIVE_CHANS_MASK)
45025bcee   David Howells   rxrpc: Improve ma...
602
603
604
  		return;
  
  	spin_lock(&conn->channel_lock);
2629c7fa7   David Howells   rxrpc: When activ...
605
  	rxrpc_activate_channels_locked(conn);
45025bcee   David Howells   rxrpc: Improve ma...
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
  	spin_unlock(&conn->channel_lock);
  	_leave("");
  }
  
  /*
   * Wait for a callNumber and a channel to be granted to a call.
   */
  static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
  {
  	int ret = 0;
  
  	_enter("%d", call->debug_id);
  
  	if (!call->call_id) {
  		DECLARE_WAITQUEUE(myself, current);
c6d2b8d76   David Howells   rxrpc: Split clie...
621

c6d2b8d76   David Howells   rxrpc: Split clie...
622
  		if (!gfpflags_allow_blocking(gfp)) {
45025bcee   David Howells   rxrpc: Improve ma...
623
624
  			ret = -EAGAIN;
  			goto out;
c6d2b8d76   David Howells   rxrpc: Split clie...
625
  		}
45025bcee   David Howells   rxrpc: Improve ma...
626
  		add_wait_queue_exclusive(&call->waitq, &myself);
c6d2b8d76   David Howells   rxrpc: Split clie...
627
628
  		for (;;) {
  			set_current_state(TASK_INTERRUPTIBLE);
45025bcee   David Howells   rxrpc: Improve ma...
629
630
631
632
  			if (call->call_id)
  				break;
  			if (signal_pending(current)) {
  				ret = -ERESTARTSYS;
c6d2b8d76   David Howells   rxrpc: Split clie...
633
  				break;
45025bcee   David Howells   rxrpc: Improve ma...
634
  			}
c6d2b8d76   David Howells   rxrpc: Split clie...
635
636
  			schedule();
  		}
45025bcee   David Howells   rxrpc: Improve ma...
637
  		remove_wait_queue(&call->waitq, &myself);
c6d2b8d76   David Howells   rxrpc: Split clie...
638
639
  		__set_current_state(TASK_RUNNING);
  	}
45025bcee   David Howells   rxrpc: Improve ma...
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
  	/* Paired with the write barrier in rxrpc_activate_one_channel(). */
  	smp_rmb();
  
  out:
  	_leave(" = %d", ret);
  	return ret;
  }
  
  /*
   * find a connection for a call
   * - called in process context with IRQs enabled
   */
  int rxrpc_connect_call(struct rxrpc_call *call,
  		       struct rxrpc_conn_parameters *cp,
  		       struct sockaddr_rxrpc *srx,
  		       gfp_t gfp)
  {
2baec2c3f   David Howells   rxrpc: Support ne...
657
  	struct rxrpc_net *rxnet = cp->local->rxnet;
45025bcee   David Howells   rxrpc: Improve ma...
658
659
660
  	int ret;
  
  	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
2baec2c3f   David Howells   rxrpc: Support ne...
661
662
  	rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper.work);
  	rxrpc_cull_active_client_conns(rxnet);
45025bcee   David Howells   rxrpc: Improve ma...
663
664
665
  
  	ret = rxrpc_get_client_conn(call, cp, srx, gfp);
  	if (ret < 0)
c038a58cc   David Howells   rxrpc: Allow fail...
666
  		goto out;
45025bcee   David Howells   rxrpc: Improve ma...
667

2baec2c3f   David Howells   rxrpc: Support ne...
668
  	rxrpc_animate_client_conn(rxnet, call->conn);
45025bcee   David Howells   rxrpc: Improve ma...
669
670
671
  	rxrpc_activate_channels(call->conn);
  
  	ret = rxrpc_wait_for_channel(call, gfp);
c038a58cc   David Howells   rxrpc: Allow fail...
672
  	if (ret < 0) {
45025bcee   David Howells   rxrpc: Improve ma...
673
  		rxrpc_disconnect_client_call(call);
c038a58cc   David Howells   rxrpc: Allow fail...
674
675
676
677
678
679
680
  		goto out;
  	}
  
  	spin_lock_bh(&call->conn->params.peer->lock);
  	hlist_add_head(&call->error_link,
  		       &call->conn->params.peer->error_targets);
  	spin_unlock_bh(&call->conn->params.peer->lock);
45025bcee   David Howells   rxrpc: Improve ma...
681

c038a58cc   David Howells   rxrpc: Allow fail...
682
  out:
45025bcee   David Howells   rxrpc: Improve ma...
683
684
685
686
687
688
689
690
691
692
693
  	_leave(" = %d", ret);
  	return ret;
  }
  
  /*
   * Note that a connection is about to be exposed to the world.  Once it is
   * exposed, we maintain an extra ref on it that stops it from being summarily
   * discarded before it's (a) had a chance to deal with retransmission and (b)
   * had a chance at re-use (the per-connection security negotiation is
   * expensive).
   */
363deeab6   David Howells   rxrpc: Add connec...
694
695
  static void rxrpc_expose_client_conn(struct rxrpc_connection *conn,
  				     unsigned int channel)
45025bcee   David Howells   rxrpc: Improve ma...
696
  {
363deeab6   David Howells   rxrpc: Add connec...
697
698
  	if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
  		trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
45025bcee   David Howells   rxrpc: Improve ma...
699
  		rxrpc_get_connection(conn);
363deeab6   David Howells   rxrpc: Add connec...
700
  	}
45025bcee   David Howells   rxrpc: Improve ma...
701
702
703
704
705
706
707
708
  }
  
  /*
   * Note that a call, and thus a connection, is about to be exposed to the
   * world.
   */
  void rxrpc_expose_client_call(struct rxrpc_call *call)
  {
363deeab6   David Howells   rxrpc: Add connec...
709
  	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
45025bcee   David Howells   rxrpc: Improve ma...
710
  	struct rxrpc_connection *conn = call->conn;
363deeab6   David Howells   rxrpc: Add connec...
711
  	struct rxrpc_channel *chan = &conn->channels[channel];
45025bcee   David Howells   rxrpc: Improve ma...
712
713
714
715
716
717
718
719
720
721
  
  	if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
  		/* Mark the call ID as being used.  If the callNumber counter
  		 * exceeds ~2 billion, we kill the connection after its
  		 * outstanding calls have finished so that the counter doesn't
  		 * wrap.
  		 */
  		chan->call_counter++;
  		if (chan->call_counter >= INT_MAX)
  			set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
363deeab6   David Howells   rxrpc: Add connec...
722
  		rxrpc_expose_client_conn(conn, channel);
45025bcee   David Howells   rxrpc: Improve ma...
723
724
725
726
727
728
729
730
731
732
733
  	}
  }
  
  /*
   * Disconnect a client call.
   */
  void rxrpc_disconnect_client_call(struct rxrpc_call *call)
  {
  	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
  	struct rxrpc_connection *conn = call->conn;
  	struct rxrpc_channel *chan = &conn->channels[channel];
2baec2c3f   David Howells   rxrpc: Support ne...
734
  	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&call->socket->sk));
45025bcee   David Howells   rxrpc: Improve ma...
735

363deeab6   David Howells   rxrpc: Add connec...
736
  	trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
45025bcee   David Howells   rxrpc: Improve ma...
737
  	call->conn = NULL;
c6d2b8d76   David Howells   rxrpc: Split clie...
738
  	spin_lock(&conn->channel_lock);
45025bcee   David Howells   rxrpc: Improve ma...
739
740
741
742
743
744
745
746
747
  	/* Calls that have never actually been assigned a channel can simply be
  	 * discarded.  If the conn didn't get used either, it will follow
  	 * immediately unless someone else grabs it in the meantime.
  	 */
  	if (!list_empty(&call->chan_wait_link)) {
  		_debug("call is waiting");
  		ASSERTCMP(call->call_id, ==, 0);
  		ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
  		list_del_init(&call->chan_wait_link);
363deeab6   David Howells   rxrpc: Add connec...
748
  		trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted);
45025bcee   David Howells   rxrpc: Improve ma...
749
750
751
  		/* We must deactivate or idle the connection if it's now
  		 * waiting for nothing.
  		 */
2baec2c3f   David Howells   rxrpc: Support ne...
752
  		spin_lock(&rxnet->client_conn_cache_lock);
45025bcee   David Howells   rxrpc: Improve ma...
753
754
755
756
757
758
759
760
  		if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING &&
  		    list_empty(&conn->waiting_calls) &&
  		    !conn->active_chans)
  			goto idle_connection;
  		goto out;
  	}
  
  	ASSERTCMP(rcu_access_pointer(chan->call), ==, call);
45025bcee   David Howells   rxrpc: Improve ma...
761
762
763
764
765
766
767
768
769
770
771
  
  	/* If a client call was exposed to the world, we save the result for
  	 * retransmission.
  	 *
  	 * We use a barrier here so that the call number and abort code can be
  	 * read without needing to take a lock.
  	 *
  	 * TODO: Make the incoming packet handler check this and handle
  	 * terminal retransmission without requiring access to the call.
  	 */
  	if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
f5c17aaeb   David Howells   rxrpc: Calls shou...
772
  		_debug("exposed %u,%u", call->call_id, call->abort_code);
45025bcee   David Howells   rxrpc: Improve ma...
773
774
775
776
777
778
  		__rxrpc_disconnect_call(conn, call);
  	}
  
  	/* See if we can pass the channel directly to another call. */
  	if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE &&
  	    !list_empty(&conn->waiting_calls)) {
363deeab6   David Howells   rxrpc: Add connec...
779
  		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
45025bcee   David Howells   rxrpc: Improve ma...
780
781
782
783
784
785
786
787
788
  		rxrpc_activate_one_channel(conn, channel);
  		goto out_2;
  	}
  
  	/* Things are more complex and we need the cache lock.  We might be
  	 * able to simply idle the conn or it might now be lurking on the wait
  	 * list.  It might even get moved back to the active list whilst we're
  	 * waiting for the lock.
  	 */
2baec2c3f   David Howells   rxrpc: Support ne...
789
  	spin_lock(&rxnet->client_conn_cache_lock);
45025bcee   David Howells   rxrpc: Improve ma...
790
791
  
  	switch (conn->cache_state) {
4e255721d   David Howells   rxrpc: Add servic...
792
793
794
795
796
797
798
799
800
  	case RXRPC_CONN_CLIENT_UPGRADE:
  		/* Deal with termination of a service upgrade probe. */
  		if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
  			clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
  			trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
  			conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
  			rxrpc_activate_channels_locked(conn);
  		}
  		/* fall through */
45025bcee   David Howells   rxrpc: Improve ma...
801
802
803
804
  	case RXRPC_CONN_CLIENT_ACTIVE:
  		if (list_empty(&conn->waiting_calls)) {
  			rxrpc_deactivate_one_channel(conn, channel);
  			if (!conn->active_chans) {
2baec2c3f   David Howells   rxrpc: Support ne...
805
  				rxnet->nr_active_client_conns--;
45025bcee   David Howells   rxrpc: Improve ma...
806
807
808
809
  				goto idle_connection;
  			}
  			goto out;
  		}
363deeab6   David Howells   rxrpc: Add connec...
810
  		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
45025bcee   David Howells   rxrpc: Improve ma...
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
  		rxrpc_activate_one_channel(conn, channel);
  		goto out;
  
  	case RXRPC_CONN_CLIENT_CULLED:
  		rxrpc_deactivate_one_channel(conn, channel);
  		ASSERT(list_empty(&conn->waiting_calls));
  		if (!conn->active_chans)
  			goto idle_connection;
  		goto out;
  
  	case RXRPC_CONN_CLIENT_WAITING:
  		rxrpc_deactivate_one_channel(conn, channel);
  		goto out;
  
  	default:
  		BUG();
  	}
c6d2b8d76   David Howells   rxrpc: Split clie...
828

45025bcee   David Howells   rxrpc: Improve ma...
829
  out:
2baec2c3f   David Howells   rxrpc: Support ne...
830
  	spin_unlock(&rxnet->client_conn_cache_lock);
45025bcee   David Howells   rxrpc: Improve ma...
831
832
  out_2:
  	spin_unlock(&conn->channel_lock);
c6d2b8d76   David Howells   rxrpc: Split clie...
833
  	rxrpc_put_connection(conn);
45025bcee   David Howells   rxrpc: Improve ma...
834
835
836
837
838
839
840
841
  	_leave("");
  	return;
  
  idle_connection:
  	/* As no channels remain active, the connection gets deactivated
  	 * immediately or moved to the idle list for a short while.
  	 */
  	if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
363deeab6   David Howells   rxrpc: Add connec...
842
  		trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
45025bcee   David Howells   rxrpc: Improve ma...
843
844
  		conn->idle_timestamp = jiffies;
  		conn->cache_state = RXRPC_CONN_CLIENT_IDLE;
2baec2c3f   David Howells   rxrpc: Support ne...
845
846
847
  		list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
  		if (rxnet->idle_client_conns.next == &conn->cache_link &&
  		    !rxnet->kill_all_client_conns)
45025bcee   David Howells   rxrpc: Improve ma...
848
  			queue_delayed_work(rxrpc_workqueue,
2baec2c3f   David Howells   rxrpc: Support ne...
849
  					   &rxnet->client_conn_reaper,
45025bcee   David Howells   rxrpc: Improve ma...
850
851
  					   rxrpc_conn_idle_client_expiry);
  	} else {
363deeab6   David Howells   rxrpc: Add connec...
852
  		trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
45025bcee   David Howells   rxrpc: Improve ma...
853
854
855
856
  		conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
  		list_del_init(&conn->cache_link);
  	}
  	goto out;
c6d2b8d76   David Howells   rxrpc: Split clie...
857
  }
001c11224   David Howells   rxrpc: Maintain a...
858
859
  
  /*
45025bcee   David Howells   rxrpc: Improve ma...
860
   * Clean up a dead client connection.
001c11224   David Howells   rxrpc: Maintain a...
861
   */
45025bcee   David Howells   rxrpc: Improve ma...
862
863
  static struct rxrpc_connection *
  rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
001c11224   David Howells   rxrpc: Maintain a...
864
  {
66d58af7f   David Howells   rxrpc: Fix the pu...
865
  	struct rxrpc_connection *next = NULL;
001c11224   David Howells   rxrpc: Maintain a...
866
  	struct rxrpc_local *local = conn->params.local;
2baec2c3f   David Howells   rxrpc: Support ne...
867
  	struct rxrpc_net *rxnet = local->rxnet;
45025bcee   David Howells   rxrpc: Improve ma...
868
  	unsigned int nr_conns;
001c11224   David Howells   rxrpc: Maintain a...
869

363deeab6   David Howells   rxrpc: Add connec...
870
  	trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
45025bcee   David Howells   rxrpc: Improve ma...
871
872
873
874
875
876
877
  	if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) {
  		spin_lock(&local->client_conns_lock);
  		if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS,
  				       &conn->flags))
  			rb_erase(&conn->client_node, &local->client_conns);
  		spin_unlock(&local->client_conns_lock);
  	}
001c11224   David Howells   rxrpc: Maintain a...
878
879
  
  	rxrpc_put_client_connection_id(conn);
45025bcee   David Howells   rxrpc: Improve ma...
880
881
  
  	ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE);
66d58af7f   David Howells   rxrpc: Fix the pu...
882
  	if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
363deeab6   David Howells   rxrpc: Add connec...
883
  		trace_rxrpc_client(conn, -1, rxrpc_client_uncount);
2baec2c3f   David Howells   rxrpc: Support ne...
884
885
  		spin_lock(&rxnet->client_conn_cache_lock);
  		nr_conns = --rxnet->nr_client_conns;
66d58af7f   David Howells   rxrpc: Fix the pu...
886
887
  
  		if (nr_conns < rxrpc_max_client_connections &&
2baec2c3f   David Howells   rxrpc: Support ne...
888
889
  		    !list_empty(&rxnet->waiting_client_conns)) {
  			next = list_entry(rxnet->waiting_client_conns.next,
66d58af7f   David Howells   rxrpc: Fix the pu...
890
891
  					  struct rxrpc_connection, cache_link);
  			rxrpc_get_connection(next);
2baec2c3f   David Howells   rxrpc: Support ne...
892
  			rxrpc_activate_conn(rxnet, next);
66d58af7f   David Howells   rxrpc: Fix the pu...
893
  		}
45025bcee   David Howells   rxrpc: Improve ma...
894

2baec2c3f   David Howells   rxrpc: Support ne...
895
  		spin_unlock(&rxnet->client_conn_cache_lock);
45025bcee   David Howells   rxrpc: Improve ma...
896
  	}
45025bcee   David Howells   rxrpc: Improve ma...
897
  	rxrpc_kill_connection(conn);
45025bcee   David Howells   rxrpc: Improve ma...
898
899
900
901
902
903
904
905
906
907
908
909
910
911
  	if (next)
  		rxrpc_activate_channels(next);
  
  	/* We need to get rid of the temporary ref we took upon next, but we
  	 * can't call rxrpc_put_connection() recursively.
  	 */
  	return next;
  }
  
  /*
   * Clean up a dead client connections.
   */
  void rxrpc_put_client_conn(struct rxrpc_connection *conn)
  {
363deeab6   David Howells   rxrpc: Add connec...
912
913
  	const void *here = __builtin_return_address(0);
  	int n;
45025bcee   David Howells   rxrpc: Improve ma...
914
915
  
  	do {
363deeab6   David Howells   rxrpc: Add connec...
916
917
918
919
920
921
922
923
  		n = atomic_dec_return(&conn->usage);
  		trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here);
  		if (n > 0)
  			return;
  		ASSERTCMP(n, >=, 0);
  
  		conn = rxrpc_put_one_client_conn(conn);
  	} while (conn);
45025bcee   David Howells   rxrpc: Improve ma...
924
925
926
927
928
  }
  
  /*
   * Kill the longest-active client connections to make room for new ones.
   */
2baec2c3f   David Howells   rxrpc: Support ne...
929
  static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet)
45025bcee   David Howells   rxrpc: Improve ma...
930
931
  {
  	struct rxrpc_connection *conn;
2baec2c3f   David Howells   rxrpc: Support ne...
932
  	unsigned int nr_conns = rxnet->nr_client_conns;
45025bcee   David Howells   rxrpc: Improve ma...
933
934
935
936
937
938
939
940
941
942
  	unsigned int nr_active, limit;
  
  	_enter("");
  
  	ASSERTCMP(nr_conns, >=, 0);
  	if (nr_conns < rxrpc_max_client_connections) {
  		_leave(" [ok]");
  		return;
  	}
  	limit = rxrpc_reap_client_connections;
2baec2c3f   David Howells   rxrpc: Support ne...
943
944
  	spin_lock(&rxnet->client_conn_cache_lock);
  	nr_active = rxnet->nr_active_client_conns;
45025bcee   David Howells   rxrpc: Improve ma...
945
946
  
  	while (nr_active > limit) {
2baec2c3f   David Howells   rxrpc: Support ne...
947
948
  		ASSERT(!list_empty(&rxnet->active_client_conns));
  		conn = list_entry(rxnet->active_client_conns.next,
45025bcee   David Howells   rxrpc: Improve ma...
949
  				  struct rxrpc_connection, cache_link);
4e255721d   David Howells   rxrpc: Add servic...
950
951
  		ASSERTIFCMP(conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE,
  			    conn->cache_state, ==, RXRPC_CONN_CLIENT_UPGRADE);
45025bcee   David Howells   rxrpc: Improve ma...
952
953
  
  		if (list_empty(&conn->waiting_calls)) {
363deeab6   David Howells   rxrpc: Add connec...
954
  			trace_rxrpc_client(conn, -1, rxrpc_client_to_culled);
45025bcee   David Howells   rxrpc: Improve ma...
955
956
957
  			conn->cache_state = RXRPC_CONN_CLIENT_CULLED;
  			list_del_init(&conn->cache_link);
  		} else {
363deeab6   David Howells   rxrpc: Add connec...
958
  			trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
45025bcee   David Howells   rxrpc: Improve ma...
959
960
  			conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
  			list_move_tail(&conn->cache_link,
2baec2c3f   David Howells   rxrpc: Support ne...
961
  				       &rxnet->waiting_client_conns);
45025bcee   David Howells   rxrpc: Improve ma...
962
963
964
965
  		}
  
  		nr_active--;
  	}
2baec2c3f   David Howells   rxrpc: Support ne...
966
967
  	rxnet->nr_active_client_conns = nr_active;
  	spin_unlock(&rxnet->client_conn_cache_lock);
45025bcee   David Howells   rxrpc: Improve ma...
968
969
970
971
972
973
974
975
976
977
978
  	ASSERTCMP(nr_active, >=, 0);
  	_leave(" [culled]");
  }
  
  /*
   * Discard expired client connections from the idle list.  Each conn in the
   * idle list has been exposed and holds an extra ref because of that.
   *
   * This may be called from conn setup or from a work item so cannot be
   * considered non-reentrant.
   */
2baec2c3f   David Howells   rxrpc: Support ne...
979
  void rxrpc_discard_expired_client_conns(struct work_struct *work)
45025bcee   David Howells   rxrpc: Improve ma...
980
981
  {
  	struct rxrpc_connection *conn;
2baec2c3f   David Howells   rxrpc: Support ne...
982
983
984
  	struct rxrpc_net *rxnet =
  		container_of(to_delayed_work(work),
  			     struct rxrpc_net, client_conn_reaper);
45025bcee   David Howells   rxrpc: Improve ma...
985
986
987
  	unsigned long expiry, conn_expires_at, now;
  	unsigned int nr_conns;
  	bool did_discard = false;
2baec2c3f   David Howells   rxrpc: Support ne...
988
  	_enter("");
45025bcee   David Howells   rxrpc: Improve ma...
989

2baec2c3f   David Howells   rxrpc: Support ne...
990
  	if (list_empty(&rxnet->idle_client_conns)) {
45025bcee   David Howells   rxrpc: Improve ma...
991
992
993
994
995
  		_leave(" [empty]");
  		return;
  	}
  
  	/* Don't double up on the discarding */
2baec2c3f   David Howells   rxrpc: Support ne...
996
  	if (!spin_trylock(&rxnet->client_conn_discard_lock)) {
45025bcee   David Howells   rxrpc: Improve ma...
997
998
999
1000
1001
1002
1003
  		_leave(" [already]");
  		return;
  	}
  
  	/* We keep an estimate of what the number of conns ought to be after
  	 * we've discarded some so that we don't overdo the discarding.
  	 */
2baec2c3f   David Howells   rxrpc: Support ne...
1004
  	nr_conns = rxnet->nr_client_conns;
45025bcee   David Howells   rxrpc: Improve ma...
1005
1006
  
  next:
2baec2c3f   David Howells   rxrpc: Support ne...
1007
  	spin_lock(&rxnet->client_conn_cache_lock);
45025bcee   David Howells   rxrpc: Improve ma...
1008

2baec2c3f   David Howells   rxrpc: Support ne...
1009
  	if (list_empty(&rxnet->idle_client_conns))
45025bcee   David Howells   rxrpc: Improve ma...
1010
  		goto out;
2baec2c3f   David Howells   rxrpc: Support ne...
1011
  	conn = list_entry(rxnet->idle_client_conns.next,
45025bcee   David Howells   rxrpc: Improve ma...
1012
1013
  			  struct rxrpc_connection, cache_link);
  	ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags));
2baec2c3f   David Howells   rxrpc: Support ne...
1014
  	if (!rxnet->kill_all_client_conns) {
45025bcee   David Howells   rxrpc: Improve ma...
1015
1016
1017
1018
1019
1020
1021
1022
  		/* If the number of connections is over the reap limit, we
  		 * expedite discard by reducing the expiry timeout.  We must,
  		 * however, have at least a short grace period to be able to do
  		 * final-ACK or ABORT retransmission.
  		 */
  		expiry = rxrpc_conn_idle_client_expiry;
  		if (nr_conns > rxrpc_reap_client_connections)
  			expiry = rxrpc_conn_idle_client_fast_expiry;
1392633ba   David Howells   rxrpc: Fix servic...
1023
1024
  		if (conn->params.local->service_closed)
  			expiry = rxrpc_closed_conn_expiry * HZ;
45025bcee   David Howells   rxrpc: Improve ma...
1025
1026
1027
1028
1029
1030
1031
  
  		conn_expires_at = conn->idle_timestamp + expiry;
  
  		now = READ_ONCE(jiffies);
  		if (time_after(conn_expires_at, now))
  			goto not_yet_expired;
  	}
363deeab6   David Howells   rxrpc: Add connec...
1032
  	trace_rxrpc_client(conn, -1, rxrpc_client_discard);
45025bcee   David Howells   rxrpc: Improve ma...
1033
1034
1035
1036
  	if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
  		BUG();
  	conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
  	list_del_init(&conn->cache_link);
2baec2c3f   David Howells   rxrpc: Support ne...
1037
  	spin_unlock(&rxnet->client_conn_cache_lock);
45025bcee   David Howells   rxrpc: Improve ma...
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
  
  	/* When we cleared the EXPOSED flag, we took on responsibility for the
  	 * reference that that had on the usage count.  We deal with that here.
  	 * If someone re-sets the flag and re-gets the ref, that's fine.
  	 */
  	rxrpc_put_connection(conn);
  	did_discard = true;
  	nr_conns--;
  	goto next;
  
  not_yet_expired:
  	/* The connection at the front of the queue hasn't yet expired, so
  	 * schedule the work item for that point if we discarded something.
  	 *
  	 * We don't worry if the work item is already scheduled - it can look
  	 * after rescheduling itself at a later time.  We could cancel it, but
  	 * then things get messier.
  	 */
  	_debug("not yet");
2baec2c3f   David Howells   rxrpc: Support ne...
1057
  	if (!rxnet->kill_all_client_conns)
45025bcee   David Howells   rxrpc: Improve ma...
1058
  		queue_delayed_work(rxrpc_workqueue,
2baec2c3f   David Howells   rxrpc: Support ne...
1059
  				   &rxnet->client_conn_reaper,
45025bcee   David Howells   rxrpc: Improve ma...
1060
1061
1062
  				   conn_expires_at - now);
  
  out:
2baec2c3f   David Howells   rxrpc: Support ne...
1063
1064
  	spin_unlock(&rxnet->client_conn_cache_lock);
  	spin_unlock(&rxnet->client_conn_discard_lock);
45025bcee   David Howells   rxrpc: Improve ma...
1065
1066
1067
1068
1069
1070
1071
  	_leave("");
  }
  
  /*
   * Preemptively destroy all the client connection records rather than waiting
   * for them to time out
   */
2baec2c3f   David Howells   rxrpc: Support ne...
1072
  void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
45025bcee   David Howells   rxrpc: Improve ma...
1073
1074
  {
  	_enter("");
2baec2c3f   David Howells   rxrpc: Support ne...
1075
1076
1077
  	spin_lock(&rxnet->client_conn_cache_lock);
  	rxnet->kill_all_client_conns = true;
  	spin_unlock(&rxnet->client_conn_cache_lock);
45025bcee   David Howells   rxrpc: Improve ma...
1078

2baec2c3f   David Howells   rxrpc: Support ne...
1079
  	cancel_delayed_work(&rxnet->client_conn_reaper);
45025bcee   David Howells   rxrpc: Improve ma...
1080

2baec2c3f   David Howells   rxrpc: Support ne...
1081
  	if (!queue_delayed_work(rxrpc_workqueue, &rxnet->client_conn_reaper, 0))
45025bcee   David Howells   rxrpc: Improve ma...
1082
1083
1084
  		_debug("destroy: queue failed");
  
  	_leave("");
001c11224   David Howells   rxrpc: Maintain a...
1085
  }