Blame view
net/rxrpc/local_object.c
12.4 KB
b4d0d230c
|
1 |
// SPDX-License-Identifier: GPL-2.0-or-later |
875636163
|
2 |
/* Local endpoint object management |
17926a793
|
3 |
* |
4f95dd78a
|
4 |
* Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. |
17926a793
|
5 |
* Written by David Howells (dhowells@redhat.com) |
17926a793
|
6 |
*/ |
9b6d53985
|
7 |
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
17926a793
|
8 9 10 |
#include <linux/module.h> #include <linux/net.h> #include <linux/skbuff.h> |
5a0e3ad6a
|
11 |
#include <linux/slab.h> |
44ba06987
|
12 13 |
#include <linux/udp.h> #include <linux/ip.h> |
4f95dd78a
|
14 |
#include <linux/hashtable.h> |
17926a793
|
15 |
#include <net/sock.h> |
5271953ca
|
16 |
#include <net/udp.h> |
17926a793
|
17 18 |
#include <net/af_rxrpc.h> #include "ar-internal.h" |
4f95dd78a
|
19 20 |
static void rxrpc_local_processor(struct work_struct *); static void rxrpc_local_rcu(struct rcu_head *); |
17926a793
|
21 |
|
17926a793
|
22 |
/* |
4f95dd78a
|
23 24 25 26 27 28 29 |
* Compare a local to an address. Return -ve, 0 or +ve to indicate less than, * same or greater than. * * We explicitly don't compare the RxRPC service ID as we want to reject * conflicting uses by differing services. Further, we don't want to share * addresses with different options (IPv6), so we don't compare those bits * either. |
17926a793
|
30 |
*/ |
4f95dd78a
|
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
static long rxrpc_local_cmp_key(const struct rxrpc_local *local, const struct sockaddr_rxrpc *srx) { long diff; diff = ((local->srx.transport_type - srx->transport_type) ?: (local->srx.transport_len - srx->transport_len) ?: (local->srx.transport.family - srx->transport.family)); if (diff != 0) return diff; switch (srx->transport.family) { case AF_INET: /* If the choice of UDP port is left up to the transport, then * the endpoint record doesn't match. */ return ((u16 __force)local->srx.transport.sin.sin_port - (u16 __force)srx->transport.sin.sin_port) ?: memcmp(&local->srx.transport.sin.sin_addr, &srx->transport.sin.sin_addr, sizeof(struct in_addr)); |
d19127473
|
52 |
#ifdef CONFIG_AF_RXRPC_IPV6 |
75b54cb57
|
53 54 55 56 57 58 59 60 61 |
case AF_INET6: /* If the choice of UDP6 port is left up to the transport, then * the endpoint record doesn't match. */ return ((u16 __force)local->srx.transport.sin6.sin6_port - (u16 __force)srx->transport.sin6.sin6_port) ?: memcmp(&local->srx.transport.sin6.sin6_addr, &srx->transport.sin6.sin6_addr, sizeof(struct in6_addr)); |
d19127473
|
62 |
#endif |
4f95dd78a
|
63 64 65 66 67 68 69 70 |
default: BUG(); } } /* * Allocate a new local endpoint. */ |
2baec2c3f
|
71 72 |
static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, const struct sockaddr_rxrpc *srx) |
17926a793
|
73 74 75 76 77 |
{ struct rxrpc_local *local; local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); if (local) { |
4f95dd78a
|
78 |
atomic_set(&local->usage, 1); |
730c5fd42
|
79 |
atomic_set(&local->active_users, 1); |
2baec2c3f
|
80 |
local->rxnet = rxnet; |
17926a793
|
81 |
INIT_LIST_HEAD(&local->link); |
4f95dd78a
|
82 |
INIT_WORK(&local->processor, rxrpc_local_processor); |
17926a793
|
83 |
init_rwsem(&local->defrag_sem); |
17926a793
|
84 |
skb_queue_head_init(&local->reject_queue); |
44ba06987
|
85 |
skb_queue_head_init(&local->event_queue); |
999b69f89
|
86 87 |
local->client_conns = RB_ROOT; spin_lock_init(&local->client_conns_lock); |
17926a793
|
88 89 |
spin_lock_init(&local->lock); rwlock_init(&local->services_lock); |
17926a793
|
90 91 |
local->debug_id = atomic_inc_return(&rxrpc_debug_id); memcpy(&local->srx, srx, sizeof(*srx)); |
28036f448
|
92 |
local->srx.srx_service = 0; |
06d9532fa
|
93 |
trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL); |
17926a793
|
94 95 96 97 98 99 100 101 |
} _leave(" = %p", local); return local; } /* * create the local socket |
4f95dd78a
|
102 |
* - must be called with rxrpc_local_mutex locked |
17926a793
|
103 |
*/ |
2baec2c3f
|
104 |
static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) |
17926a793
|
105 |
{ |
5271953ca
|
106 |
struct sock *usk; |
17926a793
|
107 |
int ret, opt; |
75b54cb57
|
108 109 |
_enter("%p{%d,%d}", local, local->srx.transport_type, local->srx.transport.family); |
17926a793
|
110 111 |
/* create a socket to represent the local endpoint */ |
2baec2c3f
|
112 |
ret = sock_create_kern(net, local->srx.transport.family, |
aaa31cbc6
|
113 |
local->srx.transport_type, 0, &local->socket); |
17926a793
|
114 115 116 117 |
if (ret < 0) { _leave(" = %d [socket]", ret); return ret; } |
2cfa22716
|
118 |
/* set the socket up */ |
5271953ca
|
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
usk = local->socket->sk; inet_sk(usk)->mc_loop = 0; /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */ inet_inc_convert_csum(usk); rcu_assign_sk_user_data(usk, local); udp_sk(usk)->encap_type = UDP_ENCAP_RXRPC; udp_sk(usk)->encap_rcv = rxrpc_input_packet; udp_sk(usk)->encap_destroy = NULL; udp_sk(usk)->gro_receive = NULL; udp_sk(usk)->gro_complete = NULL; udp_encap_enable(); |
7ec8dc96e
|
134 |
#if IS_ENABLED(CONFIG_AF_RXRPC_IPV6) |
5271953ca
|
135 136 137 138 |
if (local->srx.transport.family == AF_INET6) udpv6_encap_enable(); #endif usk->sk_error_report = rxrpc_error_report; |
2cfa22716
|
139 |
|
17926a793
|
140 141 142 143 |
/* if a local address was supplied then bind it */ if (local->srx.transport_len > sizeof(sa_family_t)) { _debug("bind"); ret = kernel_bind(local->socket, |
4f95dd78a
|
144 |
(struct sockaddr *)&local->srx.transport, |
17926a793
|
145 146 |
local->srx.transport_len); if (ret < 0) { |
4f95dd78a
|
147 |
_debug("bind failed %d", ret); |
17926a793
|
148 149 150 |
goto error; } } |
f2aeed3a5
|
151 |
switch (local->srx.transport.family) { |
37a675e76
|
152 153 |
case AF_INET6: /* we want to receive ICMPv6 errors */ |
f2aeed3a5
|
154 |
opt = 1; |
37a675e76
|
155 |
ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR, |
f2aeed3a5
|
156 157 158 159 160 |
(char *) &opt, sizeof(opt)); if (ret < 0) { _debug("setsockopt failed"); goto error; } |
17926a793
|
161 |
|
f2aeed3a5
|
162 |
/* we want to set the don't fragment bit */ |
37a675e76
|
163 164 |
opt = IPV6_PMTUDISC_DO; ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER, |
f2aeed3a5
|
165 166 167 168 169 |
(char *) &opt, sizeof(opt)); if (ret < 0) { _debug("setsockopt failed"); goto error; } |
f2aeed3a5
|
170 |
|
37a675e76
|
171 172 173 |
/* Fall through and set IPv4 options too otherwise we don't get * errors from IPv4 packets sent through the IPv6 socket. */ |
936ee65ff
|
174 |
/* Fall through */ |
37a675e76
|
175 |
case AF_INET: |
f2aeed3a5
|
176 177 |
/* we want to receive ICMP errors */ opt = 1; |
37a675e76
|
178 |
ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, |
f2aeed3a5
|
179 180 181 182 183 184 185 |
(char *) &opt, sizeof(opt)); if (ret < 0) { _debug("setsockopt failed"); goto error; } /* we want to set the don't fragment bit */ |
37a675e76
|
186 187 |
opt = IP_PMTUDISC_DO; ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, |
f2aeed3a5
|
188 189 190 191 192 |
(char *) &opt, sizeof(opt)); if (ret < 0) { _debug("setsockopt failed"); goto error; } |
b604dd988
|
193 194 195 |
/* We want receive timestamps. */ opt = 1; |
7f1bc6e95
|
196 |
ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS_OLD, |
b604dd988
|
197 198 199 200 201 |
(char *)&opt, sizeof(opt)); if (ret < 0) { _debug("setsockopt failed"); goto error; } |
f2aeed3a5
|
202 203 204 205 |
break; default: BUG(); |
17926a793
|
206 |
} |
17926a793
|
207 208 209 210 |
_leave(" = 0"); return 0; error: |
91cf45f02
|
211 |
kernel_sock_shutdown(local->socket, SHUT_RDWR); |
17926a793
|
212 213 214 215 216 217 218 219 220 |
local->socket->sk->sk_user_data = NULL; sock_release(local->socket); local->socket = NULL; _leave(" = %d", ret); return ret; } /* |
4f95dd78a
|
221 |
* Look up or create a new local endpoint using the specified local address. |
17926a793
|
222 |
*/ |
2baec2c3f
|
223 224 |
struct rxrpc_local *rxrpc_lookup_local(struct net *net, const struct sockaddr_rxrpc *srx) |
17926a793
|
225 226 |
{ struct rxrpc_local *local; |
2baec2c3f
|
227 |
struct rxrpc_net *rxnet = rxrpc_net(net); |
4f95dd78a
|
228 229 230 |
struct list_head *cursor; const char *age; long diff; |
17926a793
|
231 |
int ret; |
75b54cb57
|
232 233 |
_enter("{%d,%d,%pISp}", srx->transport_type, srx->transport.family, &srx->transport); |
17926a793
|
234 |
|
2baec2c3f
|
235 |
mutex_lock(&rxnet->local_mutex); |
17926a793
|
236 |
|
2baec2c3f
|
237 238 |
for (cursor = rxnet->local_endpoints.next; cursor != &rxnet->local_endpoints; |
4f95dd78a
|
239 240 |
cursor = cursor->next) { local = list_entry(cursor, struct rxrpc_local, link); |
17926a793
|
241 |
|
4f95dd78a
|
242 243 |
diff = rxrpc_local_cmp_key(local, srx); if (diff < 0) |
17926a793
|
244 |
continue; |
4f95dd78a
|
245 246 247 248 249 250 251 252 253 254 255 256 |
if (diff > 0) break; /* Services aren't allowed to share transport sockets, so * reject that here. It is possible that the object is dying - * but it may also still have the local transport address that * we want bound. */ if (srx->srx_service) { local = NULL; goto addr_in_use; } |
17926a793
|
257 |
|
4f95dd78a
|
258 259 260 261 |
/* Found a match. We replace a dying object. Attempting to * bind the transport socket may still fail if we're attempting * to use a local address that the dying object is still using. */ |
730c5fd42
|
262 |
if (!rxrpc_use_local(local)) |
4f95dd78a
|
263 |
break; |
17926a793
|
264 |
|
4f95dd78a
|
265 266 267 |
age = "old"; goto found; } |
17926a793
|
268 |
|
2baec2c3f
|
269 |
local = rxrpc_alloc_local(rxnet, srx); |
4f95dd78a
|
270 271 |
if (!local) goto nomem; |
17926a793
|
272 |
|
2baec2c3f
|
273 |
ret = rxrpc_open_socket(local, net); |
4f95dd78a
|
274 275 |
if (ret < 0) goto sock_error; |
730c5fd42
|
276 |
if (cursor != &rxnet->local_endpoints) |
b00df840f
|
277 |
list_replace_init(cursor, &local->link); |
730c5fd42
|
278 279 |
else list_add_tail(&local->link, cursor); |
4f95dd78a
|
280 |
age = "new"; |
17926a793
|
281 |
|
4f95dd78a
|
282 |
found: |
2baec2c3f
|
283 |
mutex_unlock(&rxnet->local_mutex); |
17926a793
|
284 |
|
75b54cb57
|
285 286 |
_net("LOCAL %s %d {%pISp}", age, local->debug_id, &local->srx.transport); |
17926a793
|
287 |
|
4f95dd78a
|
288 |
_leave(" = %p", local); |
17926a793
|
289 |
return local; |
4f95dd78a
|
290 291 292 |
nomem: ret = -ENOMEM; sock_error: |
2baec2c3f
|
293 |
mutex_unlock(&rxnet->local_mutex); |
032be5f19
|
294 295 |
if (local) call_rcu(&local->rcu, rxrpc_local_rcu); |
4f95dd78a
|
296 297 |
_leave(" = %d", ret); return ERR_PTR(ret); |
17926a793
|
298 |
|
4f95dd78a
|
299 |
addr_in_use: |
2baec2c3f
|
300 |
mutex_unlock(&rxnet->local_mutex); |
4f95dd78a
|
301 302 303 |
_leave(" = -EADDRINUSE"); return ERR_PTR(-EADDRINUSE); } |
17926a793
|
304 |
|
4f95dd78a
|
305 |
/* |
09d2bf595
|
306 307 308 309 310 311 312 313 |
* Get a ref on a local endpoint. */ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local) { const void *here = __builtin_return_address(0); int n; n = atomic_inc_return(&local->usage); |
06d9532fa
|
314 |
trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here); |
09d2bf595
|
315 316 317 318 319 320 321 322 323 324 325 |
return local; } /* * Get a ref on a local endpoint unless its usage has already reached 0. */ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) { const void *here = __builtin_return_address(0); if (local) { |
bfc18e389
|
326 |
int n = atomic_fetch_add_unless(&local->usage, 1, 0); |
09d2bf595
|
327 |
if (n > 0) |
06d9532fa
|
328 329 |
trace_rxrpc_local(local->debug_id, rxrpc_local_got, n + 1, here); |
09d2bf595
|
330 331 332 333 334 335 336 |
else local = NULL; } return local; } /* |
06d9532fa
|
337 |
* Queue a local endpoint and pass the caller's reference to the work item. |
09d2bf595
|
338 339 340 341 |
*/ void rxrpc_queue_local(struct rxrpc_local *local) { const void *here = __builtin_return_address(0); |
06d9532fa
|
342 343 |
unsigned int debug_id = local->debug_id; int n = atomic_read(&local->usage); |
09d2bf595
|
344 345 |
if (rxrpc_queue_work(&local->processor)) |
06d9532fa
|
346 |
trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here); |
730c5fd42
|
347 348 |
else rxrpc_put_local(local); |
17926a793
|
349 350 351 |
} /* |
09d2bf595
|
352 353 354 355 356 |
* Drop a ref on a local endpoint. */ void rxrpc_put_local(struct rxrpc_local *local) { const void *here = __builtin_return_address(0); |
62221a9b1
|
357 |
unsigned int debug_id; |
09d2bf595
|
358 359 360 |
int n; if (local) { |
62221a9b1
|
361 |
debug_id = local->debug_id; |
09d2bf595
|
362 |
n = atomic_dec_return(&local->usage); |
62221a9b1
|
363 |
trace_rxrpc_local(debug_id, rxrpc_local_put, n, here); |
09d2bf595
|
364 365 |
if (n == 0) |
730c5fd42
|
366 |
call_rcu(&local->rcu, rxrpc_local_rcu); |
09d2bf595
|
367 368 369 370 |
} } /* |
730c5fd42
|
371 372 373 374 |
* Start using a local endpoint. */ struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local) { |
730c5fd42
|
375 376 377 |
local = rxrpc_get_local_maybe(local); if (!local) return NULL; |
843e115de
|
378 |
if (!__rxrpc_use_local(local)) { |
730c5fd42
|
379 380 381 382 383 384 385 386 387 388 389 390 391 |
rxrpc_put_local(local); return NULL; } return local; } /* * Cease using a local endpoint. Once the number of active users reaches 0, we * start the closure of the transport in the work processor. */ void rxrpc_unuse_local(struct rxrpc_local *local) { |
68553f1a6
|
392 |
if (local) { |
843e115de
|
393 394 |
if (__rxrpc_unuse_local(local)) { rxrpc_get_local(local); |
68553f1a6
|
395 |
rxrpc_queue_local(local); |
843e115de
|
396 |
} |
68553f1a6
|
397 |
} |
730c5fd42
|
398 399 400 |
} /* |
4f95dd78a
|
401 402 403 404 405 |
* Destroy a local endpoint's socket and then hand the record to RCU to dispose * of. * * Closing the socket cannot be done from bottom half context or RCU callback * context because it might sleep. |
17926a793
|
406 |
*/ |
4f95dd78a
|
407 |
static void rxrpc_local_destroyer(struct rxrpc_local *local) |
17926a793
|
408 |
{ |
4f95dd78a
|
409 |
struct socket *socket = local->socket; |
2baec2c3f
|
410 |
struct rxrpc_net *rxnet = local->rxnet; |
17926a793
|
411 |
|
4f95dd78a
|
412 |
_enter("%d", local->debug_id); |
17926a793
|
413 |
|
d12040b69
|
414 |
local->dead = true; |
2baec2c3f
|
415 |
mutex_lock(&rxnet->local_mutex); |
4f95dd78a
|
416 |
list_del_init(&local->link); |
2baec2c3f
|
417 |
mutex_unlock(&rxnet->local_mutex); |
4f95dd78a
|
418 |
|
d12040b69
|
419 420 |
rxrpc_clean_up_local_conns(local); rxrpc_service_connection_reaper(&rxnet->service_conn_reaper); |
1e9e5c952
|
421 |
ASSERT(!local->service); |
4f95dd78a
|
422 423 424 425 426 427 428 429 430 431 432 |
if (socket) { local->socket = NULL; kernel_sock_shutdown(socket, SHUT_RDWR); socket->sk->sk_user_data = NULL; sock_release(socket); } /* At this point, there should be no more packets coming in to the * local endpoint. */ |
4f95dd78a
|
433 434 |
rxrpc_purge_queue(&local->reject_queue); rxrpc_purge_queue(&local->event_queue); |
17926a793
|
435 436 437 |
} /* |
730c5fd42
|
438 439 |
* Process events on an endpoint. The work item carries a ref which * we must release. |
17926a793
|
440 |
*/ |
4f95dd78a
|
441 |
static void rxrpc_local_processor(struct work_struct *work) |
17926a793
|
442 443 |
{ struct rxrpc_local *local = |
4f95dd78a
|
444 445 |
container_of(work, struct rxrpc_local, processor); bool again; |
17926a793
|
446 |
|
06d9532fa
|
447 |
trace_rxrpc_local(local->debug_id, rxrpc_local_processing, |
09d2bf595
|
448 |
atomic_read(&local->usage), NULL); |
17926a793
|
449 |
|
4f95dd78a
|
450 451 |
do { again = false; |
843e115de
|
452 |
if (!__rxrpc_use_local(local)) { |
730c5fd42
|
453 454 455 |
rxrpc_local_destroyer(local); break; } |
17926a793
|
456 |
|
4f95dd78a
|
457 458 459 460 |
if (!skb_queue_empty(&local->reject_queue)) { rxrpc_reject_packets(local); again = true; } |
17926a793
|
461 |
|
4f95dd78a
|
462 463 464 465 |
if (!skb_queue_empty(&local->event_queue)) { rxrpc_process_local_events(local); again = true; } |
843e115de
|
466 467 |
__rxrpc_unuse_local(local); |
4f95dd78a
|
468 |
} while (again); |
730c5fd42
|
469 470 |
rxrpc_put_local(local); |
4f95dd78a
|
471 |
} |
17926a793
|
472 |
|
4f95dd78a
|
473 474 475 476 477 478 |
/* * Destroy a local endpoint after the RCU grace period expires. */ static void rxrpc_local_rcu(struct rcu_head *rcu) { struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu); |
17926a793
|
479 |
|
4f95dd78a
|
480 |
_enter("%d", local->debug_id); |
17926a793
|
481 |
|
4f95dd78a
|
482 |
ASSERT(!work_pending(&local->processor)); |
17926a793
|
483 484 485 |
_net("DESTROY LOCAL %d", local->debug_id); kfree(local); |
17926a793
|
486 487 488 489 |
_leave(""); } /* |
4f95dd78a
|
490 |
* Verify the local endpoint list is empty by this point. |
17926a793
|
491 |
*/ |
2baec2c3f
|
492 |
void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet) |
17926a793
|
493 |
{ |
4f95dd78a
|
494 |
struct rxrpc_local *local; |
17926a793
|
495 496 |
_enter(""); |
dee46364c
|
497 |
flush_workqueue(rxrpc_workqueue); |
17926a793
|
498 |
|
2baec2c3f
|
499 500 501 |
if (!list_empty(&rxnet->local_endpoints)) { mutex_lock(&rxnet->local_mutex); list_for_each_entry(local, &rxnet->local_endpoints, link) { |
dee46364c
|
502 503 504 505 |
pr_err("AF_RXRPC: Leaked local %p {%d} ", local, atomic_read(&local->usage)); } |
2baec2c3f
|
506 |
mutex_unlock(&rxnet->local_mutex); |
dee46364c
|
507 |
BUG(); |
17926a793
|
508 |
} |
17926a793
|
509 |
} |