Blame view
net/rds/connection.c
20.2 KB
00e0f34c6 RDS: Connection h... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
/* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/kernel.h> #include <linux/list.h> |
5a0e3ad6a include cleanup: ... |
35 |
#include <linux/slab.h> |
bc3b2d7fb net: Add export.h... |
36 |
#include <linux/export.h> |
00e0f34c6 RDS: Connection h... |
37 38 39 40 |
#include <net/inet_hashtables.h> #include "rds.h" #include "loop.h" |
00e0f34c6 RDS: Connection h... |
41 42 43 44 45 46 47 48 49 50 51 52 53 |
#define RDS_CONNECTION_HASH_BITS 12 #define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS) #define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1) /* converting this to RCU is a chore for another day.. */ static DEFINE_SPINLOCK(rds_conn_lock); static unsigned long rds_conn_count; static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES]; static struct kmem_cache *rds_conn_slab; static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr) { |
1bbdceef1 inet: convert ine... |
54 55 56 57 58 |
static u32 rds_hash_secret __read_mostly; unsigned long hash; net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret)); |
00e0f34c6 RDS: Connection h... |
59 |
/* Pass NULL, don't need struct net for hash */ |
1bbdceef1 inet: convert ine... |
60 61 62 |
hash = __inet_ehashfn(be32_to_cpu(laddr), 0, be32_to_cpu(faddr), 0, rds_hash_secret); |
00e0f34c6 RDS: Connection h... |
63 64 65 66 67 68 69 |
return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK]; } #define rds_conn_info_set(var, test, suffix) do { \ if (test) \ var |= RDS_INFO_CONNECTION_FLAG_##suffix; \ } while (0) |
bcf50ef2c rds: use RCU to p... |
70 |
/* rcu read lock must be held or the connection spinlock */ |
8f384c017 RDS: rds_conn_loo... |
71 72 |
static struct rds_connection *rds_conn_lookup(struct net *net, struct hlist_head *head, |
00e0f34c6 RDS: Connection h... |
73 74 75 76 |
__be32 laddr, __be32 faddr, struct rds_transport *trans) { struct rds_connection *conn, *ret = NULL; |
00e0f34c6 RDS: Connection h... |
77 |
|
b67bfe0d4 hlist: drop the n... |
78 |
hlist_for_each_entry_rcu(conn, head, c_hash_node) { |
00e0f34c6 RDS: Connection h... |
79 |
if (conn->c_faddr == faddr && conn->c_laddr == laddr && |
8f384c017 RDS: rds_conn_loo... |
80 |
conn->c_trans == trans && net == rds_conn_net(conn)) { |
00e0f34c6 RDS: Connection h... |
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
ret = conn; break; } } rdsdebug("returning conn %p for %pI4 -> %pI4 ", ret, &laddr, &faddr); return ret; } /* * This is called by transports as they're bringing down a connection. * It clears partial message state so that the transport can start sending * and receiving over this connection again in the future. It is up to * the transport to have serialized this call with its send and recv. */ |
d769ef81d RDS: Update rds_c... |
97 |
static void rds_conn_path_reset(struct rds_conn_path *cp) |
00e0f34c6 RDS: Connection h... |
98 |
{ |
d769ef81d RDS: Update rds_c... |
99 |
struct rds_connection *conn = cp->cp_conn; |
00e0f34c6 RDS: Connection h... |
100 101 102 103 104 |
rdsdebug("connection %pI4 to %pI4 reset ", &conn->c_laddr, &conn->c_faddr); rds_stats_inc(s_conn_reset); |
d769ef81d RDS: Update rds_c... |
105 106 |
rds_send_path_reset(cp); cp->cp_flags = 0; |
00e0f34c6 RDS: Connection h... |
107 108 109 110 111 112 |
/* Do not clear next_rx_seq here, else we cannot distinguish * retransmitted packets from new packets, and will hand all * of them to the application. That is not consistent with the * reliability guarantees of RDS. */ } |
1c5113cf7 RDS: Initialize a... |
113 114 115 116 117 118 119 120 121 122 123 124 |
static void __rds_conn_path_init(struct rds_connection *conn, struct rds_conn_path *cp, bool is_outgoing) { spin_lock_init(&cp->cp_lock); cp->cp_next_tx_seq = 1; init_waitqueue_head(&cp->cp_waitq); INIT_LIST_HEAD(&cp->cp_send_queue); INIT_LIST_HEAD(&cp->cp_retrans); cp->cp_conn = conn; atomic_set(&cp->cp_state, RDS_CONN_DOWN); cp->cp_send_gen = 0; |
1c5113cf7 RDS: Initialize a... |
125 126 127 128 129 130 131 132 |
cp->cp_reconnect_jiffies = 0; INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker); INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker); INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker); INIT_WORK(&cp->cp_down_w, rds_shutdown_worker); mutex_init(&cp->cp_cm_lock); cp->cp_flags = 0; } |
00e0f34c6 RDS: Connection h... |
133 134 135 136 137 138 139 140 |
/* * There is only every one 'conn' for a given pair of addresses in the * system at a time. They contain messages to be retransmitted and so * span the lifetime of the actual underlying transport connections. * * For now they are not garbage collected once they're created. They * are torn down as the module is removed, if ever. */ |
d5a8ac28a RDS-TCP: Make RDS... |
141 142 |
static struct rds_connection *__rds_conn_create(struct net *net, __be32 laddr, __be32 faddr, |
00e0f34c6 RDS: Connection h... |
143 144 145 |
struct rds_transport *trans, gfp_t gfp, int is_outgoing) { |
cb24405e6 RDS: Refactor end... |
146 |
struct rds_connection *conn, *parent = NULL; |
00e0f34c6 RDS: Connection h... |
147 |
struct hlist_head *head = rds_conn_bucket(laddr, faddr); |
5adb5bc65 RDS: have sockets... |
148 |
struct rds_transport *loop_trans; |
00e0f34c6 RDS: Connection h... |
149 |
unsigned long flags; |
5916e2c15 RDS: TCP: Enable ... |
150 |
int ret, i; |
840df162b rds: reduce memor... |
151 |
int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1); |
00e0f34c6 RDS: Connection h... |
152 |
|
bcf50ef2c rds: use RCU to p... |
153 |
rcu_read_lock(); |
8f384c017 RDS: rds_conn_loo... |
154 |
conn = rds_conn_lookup(net, head, laddr, faddr, trans); |
f64f9e719 net: Move && and ... |
155 |
if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport && |
1789b2c07 RDS: only use pas... |
156 |
laddr == faddr && !is_outgoing) { |
00e0f34c6 RDS: Connection h... |
157 158 159 160 161 162 163 |
/* This is a looped back IB connection, and we're * called by the code handling the incoming connect. * We need a second connection object into which we * can stick the other QP. */ parent = conn; conn = parent->c_passive; } |
bcf50ef2c rds: use RCU to p... |
164 |
rcu_read_unlock(); |
00e0f34c6 RDS: Connection h... |
165 166 |
if (conn) goto out; |
05a178ecd rds: use kmem_cac... |
167 |
conn = kmem_cache_zalloc(rds_conn_slab, gfp); |
8690bfa17 RDS: cleanup: rem... |
168 |
if (!conn) { |
00e0f34c6 RDS: Connection h... |
169 170 171 |
conn = ERR_PTR(-ENOMEM); goto out; } |
840df162b rds: reduce memor... |
172 173 174 175 176 177 |
conn->c_path = kcalloc(npaths, sizeof(struct rds_conn_path), gfp); if (!conn->c_path) { kmem_cache_free(rds_conn_slab, conn); conn = ERR_PTR(-ENOMEM); goto out; } |
00e0f34c6 RDS: Connection h... |
178 |
|
00e0f34c6 RDS: Connection h... |
179 |
INIT_HLIST_NODE(&conn->c_hash_node); |
00e0f34c6 RDS: Connection h... |
180 181 |
conn->c_laddr = laddr; conn->c_faddr = faddr; |
00e0f34c6 RDS: Connection h... |
182 |
|
1c5113cf7 RDS: Initialize a... |
183 |
rds_conn_net_set(conn, net); |
00e0f34c6 RDS: Connection h... |
184 185 186 |
ret = rds_cong_get_maps(conn); if (ret) { |
840df162b rds: reduce memor... |
187 |
kfree(conn->c_path); |
00e0f34c6 RDS: Connection h... |
188 189 190 191 192 193 194 195 196 197 |
kmem_cache_free(rds_conn_slab, conn); conn = ERR_PTR(ret); goto out; } /* * This is where a connection becomes loopback. If *any* RDS sockets * can bind to the destination address then we'd rather the messages * flow through loopback rather than either transport. */ |
d5a8ac28a RDS-TCP: Make RDS... |
198 |
loop_trans = rds_trans_get_preferred(net, faddr); |
5adb5bc65 RDS: have sockets... |
199 200 |
if (loop_trans) { rds_trans_put(loop_trans); |
00e0f34c6 RDS: Connection h... |
201 202 203 204 205 206 207 208 209 210 211 |
conn->c_loopback = 1; if (is_outgoing && trans->t_prefer_loopback) { /* "outgoing" connection - and the transport * says it wants the connection handled by the * loopback transport. This is what TCP does. */ trans = &rds_loop_transport; } } conn->c_trans = trans; |
5916e2c15 RDS: TCP: Enable ... |
212 |
init_waitqueue_head(&conn->c_hs_waitq); |
840df162b rds: reduce memor... |
213 |
for (i = 0; i < npaths; i++) { |
5916e2c15 RDS: TCP: Enable ... |
214 215 216 217 |
__rds_conn_path_init(conn, &conn->c_path[i], is_outgoing); conn->c_path[i].cp_index = i; } |
00e0f34c6 RDS: Connection h... |
218 219 |
ret = trans->conn_alloc(conn, gfp); if (ret) { |
840df162b rds: reduce memor... |
220 |
kfree(conn->c_path); |
00e0f34c6 RDS: Connection h... |
221 222 223 224 |
kmem_cache_free(rds_conn_slab, conn); conn = ERR_PTR(ret); goto out; } |
00e0f34c6 RDS: Connection h... |
225 226 227 228 229 |
rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s ", conn, &laddr, &faddr, trans->t_name ? trans->t_name : "[unknown]", is_outgoing ? "(outgoing)" : ""); |
cb24405e6 RDS: Refactor end... |
230 231 232 233 234 235 236 |
/* * Since we ran without holding the conn lock, someone could * have created the same conn (either normal or passive) in the * interim. We check while holding the lock. If we won, we complete * init and return our conn. If we lost, we rollback and return the * other one. */ |
00e0f34c6 RDS: Connection h... |
237 |
spin_lock_irqsave(&rds_conn_lock, flags); |
cb24405e6 RDS: Refactor end... |
238 239 240 |
if (parent) { /* Creating passive conn */ if (parent->c_passive) { |
1c5113cf7 RDS: Initialize a... |
241 |
trans->conn_free(conn->c_path[0].cp_transport_data); |
840df162b rds: reduce memor... |
242 |
kfree(conn->c_path); |
cb24405e6 RDS: Refactor end... |
243 244 245 |
kmem_cache_free(rds_conn_slab, conn); conn = parent->c_passive; } else { |
00e0f34c6 RDS: Connection h... |
246 |
parent->c_passive = conn; |
cb24405e6 RDS: Refactor end... |
247 248 249 |
rds_cong_add_conn(conn); rds_conn_count++; } |
00e0f34c6 RDS: Connection h... |
250 |
} else { |
cb24405e6 RDS: Refactor end... |
251 252 |
/* Creating normal conn */ struct rds_connection *found; |
3b20fc389 RDS: Use a single... |
253 |
found = rds_conn_lookup(net, head, laddr, faddr, trans); |
cb24405e6 RDS: Refactor end... |
254 |
if (found) { |
1c5113cf7 RDS: Initialize a... |
255 256 |
struct rds_conn_path *cp; int i; |
840df162b rds: reduce memor... |
257 |
for (i = 0; i < npaths; i++) { |
1c5113cf7 RDS: Initialize a... |
258 |
cp = &conn->c_path[i]; |
02105b2cc RDS: TCP: Make rd... |
259 260 261 262 263 264 |
/* The ->conn_alloc invocation may have * allocated resource for all paths, so all * of them may have to be freed here. */ if (cp->cp_transport_data) trans->conn_free(cp->cp_transport_data); |
1c5113cf7 RDS: Initialize a... |
265 |
} |
840df162b rds: reduce memor... |
266 |
kfree(conn->c_path); |
cb24405e6 RDS: Refactor end... |
267 268 269 |
kmem_cache_free(rds_conn_slab, conn); conn = found; } else { |
905dd4184 RDS: TCP: Track p... |
270 271 |
conn->c_my_gen_num = rds_gen_num; conn->c_peer_gen_num = 0; |
3b20fc389 RDS: Use a single... |
272 |
hlist_add_head_rcu(&conn->c_hash_node, head); |
cb24405e6 RDS: Refactor end... |
273 274 275 |
rds_cong_add_conn(conn); rds_conn_count++; } |
00e0f34c6 RDS: Connection h... |
276 |
} |
00e0f34c6 RDS: Connection h... |
277 278 279 280 281 |
spin_unlock_irqrestore(&rds_conn_lock, flags); out: return conn; } |
d5a8ac28a RDS-TCP: Make RDS... |
282 283 |
struct rds_connection *rds_conn_create(struct net *net, __be32 laddr, __be32 faddr, |
00e0f34c6 RDS: Connection h... |
284 285 |
struct rds_transport *trans, gfp_t gfp) { |
d5a8ac28a RDS-TCP: Make RDS... |
286 |
return __rds_conn_create(net, laddr, faddr, trans, gfp, 0); |
00e0f34c6 RDS: Connection h... |
287 |
} |
616b757ae RDS: Export symbo... |
288 |
EXPORT_SYMBOL_GPL(rds_conn_create); |
00e0f34c6 RDS: Connection h... |
289 |
|
d5a8ac28a RDS-TCP: Make RDS... |
290 291 |
struct rds_connection *rds_conn_create_outgoing(struct net *net, __be32 laddr, __be32 faddr, |
00e0f34c6 RDS: Connection h... |
292 293 |
struct rds_transport *trans, gfp_t gfp) { |
d5a8ac28a RDS-TCP: Make RDS... |
294 |
return __rds_conn_create(net, laddr, faddr, trans, gfp, 1); |
00e0f34c6 RDS: Connection h... |
295 |
} |
616b757ae RDS: Export symbo... |
296 |
EXPORT_SYMBOL_GPL(rds_conn_create_outgoing); |
00e0f34c6 RDS: Connection h... |
297 |
|
d769ef81d RDS: Update rds_c... |
298 |
void rds_conn_shutdown(struct rds_conn_path *cp) |
2dc393573 RDS: move rds_shu... |
299 |
{ |
d769ef81d RDS: Update rds_c... |
300 |
struct rds_connection *conn = cp->cp_conn; |
2dc393573 RDS: move rds_shu... |
301 |
/* shut it down unless it's down already */ |
d769ef81d RDS: Update rds_c... |
302 |
if (!rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_DOWN)) { |
2dc393573 RDS: move rds_shu... |
303 304 305 306 307 308 309 |
/* * Quiesce the connection mgmt handlers before we start tearing * things down. We don't hold the mutex for the entire * duration of the shutdown operation, else we may be * deadlocking with the CM handler. Instead, the CM event * handler is supposed to check for state DISCONNECTING */ |
d769ef81d RDS: Update rds_c... |
310 311 312 313 314 315 316 317 318 319 |
mutex_lock(&cp->cp_cm_lock); if (!rds_conn_path_transition(cp, RDS_CONN_UP, RDS_CONN_DISCONNECTING) && !rds_conn_path_transition(cp, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) { rds_conn_path_error(cp, "shutdown called in state %d ", atomic_read(&cp->cp_state)); mutex_unlock(&cp->cp_cm_lock); |
2dc393573 RDS: move rds_shu... |
320 321 |
return; } |
d769ef81d RDS: Update rds_c... |
322 |
mutex_unlock(&cp->cp_cm_lock); |
2dc393573 RDS: move rds_shu... |
323 |
|
d769ef81d RDS: Update rds_c... |
324 325 326 327 |
wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags)); wait_event(cp->cp_waitq, !test_bit(RDS_RECV_REFILL, &cp->cp_flags)); |
7e3f2952e rds: don't let RD... |
328 |
|
226f7a7d9 RDS: Rework path ... |
329 |
conn->c_trans->conn_path_shutdown(cp); |
d769ef81d RDS: Update rds_c... |
330 |
rds_conn_path_reset(cp); |
2dc393573 RDS: move rds_shu... |
331 |
|
d769ef81d RDS: Update rds_c... |
332 |
if (!rds_conn_path_transition(cp, RDS_CONN_DISCONNECTING, |
e97656d03 rds: tcp: allow p... |
333 334 |
RDS_CONN_DOWN) && !rds_conn_path_transition(cp, RDS_CONN_ERROR, |
d769ef81d RDS: Update rds_c... |
335 |
RDS_CONN_DOWN)) { |
2dc393573 RDS: move rds_shu... |
336 337 |
/* This can happen - eg when we're in the middle of tearing * down the connection, and someone unloads the rds module. |
e97656d03 rds: tcp: allow p... |
338 |
* Quite reproducible with loopback connections. |
2dc393573 RDS: move rds_shu... |
339 |
* Mostly harmless. |
e97656d03 rds: tcp: allow p... |
340 341 342 343 344 345 |
* * Note that this also happens with rds-tcp because * we could have triggered rds_conn_path_drop in irq * mode from rds_tcp_state change on the receipt of * a FIN, thus we need to recheck for RDS_CONN_ERROR * here. |
2dc393573 RDS: move rds_shu... |
346 |
*/ |
d769ef81d RDS: Update rds_c... |
347 348 349 350 351 |
rds_conn_path_error(cp, "%s: failed to transition " "to state DOWN, current state " "is %d ", __func__, atomic_read(&cp->cp_state)); |
2dc393573 RDS: move rds_shu... |
352 353 354 355 356 357 358 359 |
return; } } /* Then reconnect if it's still live. * The passive side of an IB loopback connection is never added * to the conn hash, so we never trigger a reconnect on this * conn - the reconnect is always triggered by the active peer. */ |
d769ef81d RDS: Update rds_c... |
360 |
cancel_delayed_work_sync(&cp->cp_conn_w); |
c64534f82 rds: tcp: correct... |
361 362 |
if (conn->c_destroy_in_prog) return; |
bcf50ef2c rds: use RCU to p... |
363 364 365 |
rcu_read_lock(); if (!hlist_unhashed(&conn->c_hash_node)) { rcu_read_unlock(); |
8315011ad RDS: TCP: Simplif... |
366 |
rds_queue_reconnect(cp); |
bcf50ef2c rds: use RCU to p... |
367 368 369 |
} else { rcu_read_unlock(); } |
2dc393573 RDS: move rds_shu... |
370 |
} |
3ecc5693c RDS: Update rds_c... |
371 372 373 374 375 376 |
/* destroy a single rds_conn_path. rds_conn_destroy() iterates over * all paths using rds_conn_path_destroy() */ static void rds_conn_path_destroy(struct rds_conn_path *cp) { struct rds_message *rm, *rtmp; |
02105b2cc RDS: TCP: Make rd... |
377 378 |
if (!cp->cp_transport_data) return; |
3ecc5693c RDS: Update rds_c... |
379 380 381 |
/* make sure lingering queued work won't try to ref the conn */ cancel_delayed_work_sync(&cp->cp_send_w); cancel_delayed_work_sync(&cp->cp_recv_w); |
aed20a53a rds: cancel send/... |
382 383 |
rds_conn_path_drop(cp, true); flush_work(&cp->cp_down_w); |
3ecc5693c RDS: Update rds_c... |
384 385 386 387 388 389 390 391 392 393 394 395 396 |
/* tear down queued messages */ list_for_each_entry_safe(rm, rtmp, &cp->cp_send_queue, m_conn_item) { list_del_init(&rm->m_conn_item); BUG_ON(!list_empty(&rm->m_sock_item)); rds_message_put(rm); } if (cp->cp_xmit_rm) rds_message_put(cp->cp_xmit_rm); cp->cp_conn->c_trans->conn_free(cp->cp_transport_data); } |
2dc393573 RDS: move rds_shu... |
397 398 |
/* * Stop and free a connection. |
ffcec0e11 RDS: don't call r... |
399 400 401 402 |
* * This can only be used in very limited circumstances. It assumes that once * the conn has been shutdown that no one else is referencing the connection. * We can only ensure this in the rmmod path in the current code. |
2dc393573 RDS: move rds_shu... |
403 |
*/ |
00e0f34c6 RDS: Connection h... |
404 405 |
void rds_conn_destroy(struct rds_connection *conn) { |
fe8ff6b58 RDS: lock rds_con... |
406 |
unsigned long flags; |
02105b2cc RDS: TCP: Make rd... |
407 408 |
int i; struct rds_conn_path *cp; |
840df162b rds: reduce memor... |
409 |
int npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1); |
00e0f34c6 RDS: Connection h... |
410 411 412 413 414 |
rdsdebug("freeing conn %p for %pI4 -> " "%pI4 ", conn, &conn->c_laddr, &conn->c_faddr); |
c14b03668 rds: tcp: set lin... |
415 |
conn->c_destroy_in_prog = 1; |
abf454398 RDS: use locking ... |
416 417 |
/* Ensure conn will not be scheduled for reconnect */ spin_lock_irq(&rds_conn_lock); |
bcf50ef2c rds: use RCU to p... |
418 |
hlist_del_init_rcu(&conn->c_hash_node); |
abf454398 RDS: use locking ... |
419 |
spin_unlock_irq(&rds_conn_lock); |
bcf50ef2c rds: use RCU to p... |
420 |
synchronize_rcu(); |
ffcec0e11 RDS: don't call r... |
421 |
/* shut the connection down */ |
840df162b rds: reduce memor... |
422 |
for (i = 0; i < npaths; i++) { |
02105b2cc RDS: TCP: Make rd... |
423 424 425 |
cp = &conn->c_path[i]; rds_conn_path_destroy(cp); BUG_ON(!list_empty(&cp->cp_retrans)); |
00e0f34c6 RDS: Connection h... |
426 |
} |
00e0f34c6 RDS: Connection h... |
427 428 429 430 431 432 433 |
/* * The congestion maps aren't freed up here. They're * freed by rds_cong_exit() after all the connections * have been freed. */ rds_cong_remove_conn(conn); |
840df162b rds: reduce memor... |
434 |
kfree(conn->c_path); |
00e0f34c6 RDS: Connection h... |
435 |
kmem_cache_free(rds_conn_slab, conn); |
fe8ff6b58 RDS: lock rds_con... |
436 |
spin_lock_irqsave(&rds_conn_lock, flags); |
00e0f34c6 RDS: Connection h... |
437 |
rds_conn_count--; |
fe8ff6b58 RDS: lock rds_con... |
438 |
spin_unlock_irqrestore(&rds_conn_lock, flags); |
00e0f34c6 RDS: Connection h... |
439 |
} |
616b757ae RDS: Export symbo... |
440 |
EXPORT_SYMBOL_GPL(rds_conn_destroy); |
00e0f34c6 RDS: Connection h... |
441 442 443 444 445 446 447 |
static void rds_conn_message_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens, int want_send) { struct hlist_head *head; |
00e0f34c6 RDS: Connection h... |
448 449 450 |
struct list_head *list; struct rds_connection *conn; struct rds_message *rm; |
00e0f34c6 RDS: Connection h... |
451 |
unsigned int total = 0; |
501dcccdb rds: block ints w... |
452 |
unsigned long flags; |
00e0f34c6 RDS: Connection h... |
453 |
size_t i; |
992c9ec5f RDS: update rds-i... |
454 |
int j; |
00e0f34c6 RDS: Connection h... |
455 456 |
len /= sizeof(struct rds_info_message); |
bcf50ef2c rds: use RCU to p... |
457 |
rcu_read_lock(); |
00e0f34c6 RDS: Connection h... |
458 459 460 |
for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); i++, head++) { |
b67bfe0d4 hlist: drop the n... |
461 |
hlist_for_each_entry_rcu(conn, head, c_hash_node) { |
992c9ec5f RDS: update rds-i... |
462 |
struct rds_conn_path *cp; |
840df162b rds: reduce memor... |
463 464 465 466 |
int npaths; npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1); |
992c9ec5f RDS: update rds-i... |
467 |
|
840df162b rds: reduce memor... |
468 |
for (j = 0; j < npaths; j++) { |
992c9ec5f RDS: update rds-i... |
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 |
cp = &conn->c_path[j]; if (want_send) list = &cp->cp_send_queue; else list = &cp->cp_retrans; spin_lock_irqsave(&cp->cp_lock, flags); /* XXX too lazy to maintain counts.. */ list_for_each_entry(rm, list, m_conn_item) { total++; if (total <= len) rds_inc_info_copy(&rm->m_inc, iter, conn->c_laddr, conn->c_faddr, 0); } spin_unlock_irqrestore(&cp->cp_lock, flags); |
00e0f34c6 RDS: Connection h... |
489 |
} |
00e0f34c6 RDS: Connection h... |
490 491 |
} } |
bcf50ef2c rds: use RCU to p... |
492 |
rcu_read_unlock(); |
00e0f34c6 RDS: Connection h... |
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 |
lens->nr = total; lens->each = sizeof(struct rds_info_message); } static void rds_conn_message_info_send(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { rds_conn_message_info(sock, len, iter, lens, 1); } static void rds_conn_message_info_retrans(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { rds_conn_message_info(sock, len, iter, lens, 0); } void rds_for_each_conn_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens, int (*visitor)(struct rds_connection *, void *), size_t item_len) { uint64_t buffer[(item_len + 7) / 8]; struct hlist_head *head; |
00e0f34c6 RDS: Connection h... |
521 |
struct rds_connection *conn; |
00e0f34c6 RDS: Connection h... |
522 |
size_t i; |
bcf50ef2c rds: use RCU to p... |
523 |
rcu_read_lock(); |
00e0f34c6 RDS: Connection h... |
524 525 526 527 528 529 |
lens->nr = 0; lens->each = item_len; for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); i++, head++) { |
b67bfe0d4 hlist: drop the n... |
530 |
hlist_for_each_entry_rcu(conn, head, c_hash_node) { |
00e0f34c6 RDS: Connection h... |
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 |
/* XXX no c_lock usage.. */ if (!visitor(conn, buffer)) continue; /* We copy as much as we can fit in the buffer, * but we count all items so that the caller * can resize the buffer. */ if (len >= item_len) { rds_info_copy(iter, buffer, item_len); len -= item_len; } lens->nr++; } } |
bcf50ef2c rds: use RCU to p... |
546 |
rcu_read_unlock(); |
00e0f34c6 RDS: Connection h... |
547 |
} |
616b757ae RDS: Export symbo... |
548 |
EXPORT_SYMBOL_GPL(rds_for_each_conn_info); |
00e0f34c6 RDS: Connection h... |
549 |
|
bb7897631 RDS: mark few int... |
550 551 552 553 554 |
static void rds_walk_conn_path_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens, int (*visitor)(struct rds_conn_path *, void *), size_t item_len) |
992c9ec5f RDS: update rds-i... |
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 |
{ u64 buffer[(item_len + 7) / 8]; struct hlist_head *head; struct rds_connection *conn; size_t i; int j; rcu_read_lock(); lens->nr = 0; lens->each = item_len; for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash); i++, head++) { hlist_for_each_entry_rcu(conn, head, c_hash_node) { struct rds_conn_path *cp; |
840df162b rds: reduce memor... |
571 |
int npaths; |
992c9ec5f RDS: update rds-i... |
572 |
|
840df162b rds: reduce memor... |
573 574 575 |
npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1); for (j = 0; j < npaths; j++) { |
992c9ec5f RDS: update rds-i... |
576 577 578 579 580 |
cp = &conn->c_path[j]; /* XXX no cp_lock usage.. */ if (!visitor(cp, buffer)) continue; |
992c9ec5f RDS: update rds-i... |
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 |
} /* We copy as much as we can fit in the buffer, * but we count all items so that the caller * can resize the buffer. */ if (len >= item_len) { rds_info_copy(iter, buffer, item_len); len -= item_len; } lens->nr++; } } rcu_read_unlock(); } static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer) |
00e0f34c6 RDS: Connection h... |
598 599 |
{ struct rds_info_connection *cinfo = buffer; |
992c9ec5f RDS: update rds-i... |
600 601 602 603 604 |
cinfo->next_tx_seq = cp->cp_next_tx_seq; cinfo->next_rx_seq = cp->cp_next_rx_seq; cinfo->laddr = cp->cp_conn->c_laddr; cinfo->faddr = cp->cp_conn->c_faddr; strncpy(cinfo->transport, cp->cp_conn->c_trans->t_name, |
00e0f34c6 RDS: Connection h... |
605 606 |
sizeof(cinfo->transport)); cinfo->flags = 0; |
992c9ec5f RDS: update rds-i... |
607 |
rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags), |
0f4b1c7e8 rds: fix rds_send... |
608 |
SENDING); |
00e0f34c6 RDS: Connection h... |
609 610 |
/* XXX Future: return the state rather than these funky bits */ rds_conn_info_set(cinfo->flags, |
992c9ec5f RDS: update rds-i... |
611 |
atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING, |
00e0f34c6 RDS: Connection h... |
612 613 |
CONNECTING); rds_conn_info_set(cinfo->flags, |
992c9ec5f RDS: update rds-i... |
614 |
atomic_read(&cp->cp_state) == RDS_CONN_UP, |
00e0f34c6 RDS: Connection h... |
615 616 617 618 619 620 621 622 |
CONNECTED); return 1; } static void rds_conn_info(struct socket *sock, unsigned int len, struct rds_info_iterator *iter, struct rds_info_lengths *lens) { |
992c9ec5f RDS: update rds-i... |
623 |
rds_walk_conn_path_info(sock, len, iter, lens, |
00e0f34c6 RDS: Connection h... |
624 625 626 |
rds_conn_info_visitor, sizeof(struct rds_info_connection)); } |
ef87b7ea3 RDS: remove __ini... |
627 |
int rds_conn_init(void) |
00e0f34c6 RDS: Connection h... |
628 629 630 631 |
{ rds_conn_slab = kmem_cache_create("rds_connection", sizeof(struct rds_connection), 0, 0, NULL); |
8690bfa17 RDS: cleanup: rem... |
632 |
if (!rds_conn_slab) |
00e0f34c6 RDS: Connection h... |
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 |
return -ENOMEM; rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info); rds_info_register_func(RDS_INFO_SEND_MESSAGES, rds_conn_message_info_send); rds_info_register_func(RDS_INFO_RETRANS_MESSAGES, rds_conn_message_info_retrans); return 0; } void rds_conn_exit(void) { rds_loop_exit(); WARN_ON(!hlist_empty(rds_conn_hash)); kmem_cache_destroy(rds_conn_slab); rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info); rds_info_deregister_func(RDS_INFO_SEND_MESSAGES, rds_conn_message_info_send); rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES, rds_conn_message_info_retrans); } /* * Force a disconnect */ |
aed20a53a rds: cancel send/... |
662 |
void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy) |
0cb43965d RDS: split out co... |
663 664 |
{ atomic_set(&cp->cp_state, RDS_CONN_ERROR); |
aed20a53a rds: cancel send/... |
665 666 667 |
if (!destroy && cp->cp_conn->c_destroy_in_prog) return; |
0cb43965d RDS: split out co... |
668 669 670 |
queue_work(rds_wq, &cp->cp_down_w); } EXPORT_SYMBOL_GPL(rds_conn_path_drop); |
00e0f34c6 RDS: Connection h... |
671 672 |
void rds_conn_drop(struct rds_connection *conn) { |
5916e2c15 RDS: TCP: Enable ... |
673 |
WARN_ON(conn->c_trans->t_mp_capable); |
aed20a53a rds: cancel send/... |
674 |
rds_conn_path_drop(&conn->c_path[0], false); |
00e0f34c6 RDS: Connection h... |
675 |
} |
616b757ae RDS: Export symbo... |
676 |
EXPORT_SYMBOL_GPL(rds_conn_drop); |
00e0f34c6 RDS: Connection h... |
677 678 |
/* |
f3c6808d3 RDS: introduce rd... |
679 680 681 |
* If the connection is down, trigger a connect. We may have scheduled a * delayed reconnect however - in this case we should not interfere. */ |
3c0a59001 RDS: Add rds_conn... |
682 683 684 685 686 687 |
void rds_conn_path_connect_if_down(struct rds_conn_path *cp) { if (rds_conn_path_state(cp) == RDS_CONN_DOWN && !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags)) queue_delayed_work(rds_wq, &cp->cp_conn_w, 0); } |
1a0e100fb RDS: TCP: Force e... |
688 |
EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down); |
3c0a59001 RDS: Add rds_conn... |
689 |
|
f3c6808d3 RDS: introduce rd... |
690 691 |
void rds_conn_connect_if_down(struct rds_connection *conn) { |
3c0a59001 RDS: Add rds_conn... |
692 693 |
WARN_ON(conn->c_trans->t_mp_capable); rds_conn_path_connect_if_down(&conn->c_path[0]); |
f3c6808d3 RDS: introduce rd... |
694 695 |
} EXPORT_SYMBOL_GPL(rds_conn_connect_if_down); |
fb1b3dc43 RDS: Add rds_conn... |
696 697 698 699 700 701 702 703 |
void __rds_conn_path_error(struct rds_conn_path *cp, const char *fmt, ...) { va_list ap; va_start(ap, fmt); vprintk(fmt, ap); va_end(ap); |
aed20a53a rds: cancel send/... |
704 |
rds_conn_path_drop(cp, false); |
fb1b3dc43 RDS: Add rds_conn... |
705 |
} |