Blame view
net/rxrpc/conn_service.c
5.89 KB
7877a4a4b rxrpc: Split serv... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
/* Service connection management * * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/slab.h> #include "ar-internal.h" /* |
8496af50e rxrpc: Use RCU to... |
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
* Find a service connection under RCU conditions. * * We could use a hash table, but that is subject to bucket stuffing by an * attacker as the client gets to pick the epoch and cid values and would know * the hash function. So, instead, we use a hash table for the peer and from * that an rbtree to find the service connection. Under ordinary circumstances * it might be slower than a large hash table, but it is at least limited in * depth. */ struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer, struct sk_buff *skb) { struct rxrpc_connection *conn = NULL; struct rxrpc_conn_proto k; struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rb_node *p; unsigned int seq = 0; k.epoch = sp->hdr.epoch; k.cid = sp->hdr.cid & RXRPC_CIDMASK; do { /* Unfortunately, rbtree walking doesn't give reliable results * under just the RCU read lock, so we have to check for * changes. */ read_seqbegin_or_lock(&peer->service_conn_lock, &seq); p = rcu_dereference_raw(peer->service_conns.rb_node); while (p) { conn = rb_entry(p, struct rxrpc_connection, service_node); if (conn->proto.index_key < k.index_key) p = rcu_dereference_raw(p->rb_left); else if (conn->proto.index_key > k.index_key) p = rcu_dereference_raw(p->rb_right); else |
fdade4f69 rxrpc: Make servi... |
53 |
break; |
8496af50e rxrpc: Use RCU to... |
54 55 56 |
conn = NULL; } } while (need_seqretry(&peer->service_conn_lock, seq)); |
8496af50e rxrpc: Use RCU to... |
57 58 59 60 61 62 63 64 65 |
done_seqretry(&peer->service_conn_lock, seq); _leave(" = %d", conn ? conn->debug_id : -1); return conn; } /* * Insert a service connection into a peer's tree, thereby making it a target * for incoming packets. */ |
248f219cb rxrpc: Rewrite th... |
66 67 |
static void rxrpc_publish_service_conn(struct rxrpc_peer *peer, struct rxrpc_connection *conn) |
8496af50e rxrpc: Use RCU to... |
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
{ struct rxrpc_connection *cursor = NULL; struct rxrpc_conn_proto k = conn->proto; struct rb_node **pp, *parent; write_seqlock_bh(&peer->service_conn_lock); pp = &peer->service_conns.rb_node; parent = NULL; while (*pp) { parent = *pp; cursor = rb_entry(parent, struct rxrpc_connection, service_node); if (cursor->proto.index_key < k.index_key) pp = &(*pp)->rb_left; else if (cursor->proto.index_key > k.index_key) pp = &(*pp)->rb_right; else goto found_extant_conn; } rb_link_node_rcu(&conn->service_node, parent, pp); rb_insert_color(&conn->service_node, &peer->service_conns); conn_published: set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags); write_sequnlock_bh(&peer->service_conn_lock); _leave(" = %d [new]", conn->debug_id); |
248f219cb rxrpc: Rewrite th... |
96 |
return; |
8496af50e rxrpc: Use RCU to... |
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
found_extant_conn: if (atomic_read(&cursor->usage) == 0) goto replace_old_connection; write_sequnlock_bh(&peer->service_conn_lock); /* We should not be able to get here. rxrpc_incoming_connection() is * called in a non-reentrant context, so there can't be a race to * insert a new connection. */ BUG(); replace_old_connection: /* The old connection is from an outdated epoch. */ _debug("replace conn"); rb_replace_node_rcu(&cursor->service_node, &conn->service_node, &peer->service_conns); clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags); goto conn_published; } /* |
00e907127 rxrpc: Preallocat... |
119 120 121 |
* Preallocate a service connection. The connection is placed on the proc and * reap lists so that we don't have to get the lock from BH context. */ |
2baec2c3f rxrpc: Support ne... |
122 123 |
struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet, gfp_t gfp) |
00e907127 rxrpc: Preallocat... |
124 125 126 127 128 129 130 131 132 |
{ struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp); if (conn) { /* We maintain an extra ref on the connection whilst it is on * the rxrpc_connections list. */ conn->state = RXRPC_CONN_SERVICE_PREALLOC; atomic_set(&conn->usage, 2); |
2baec2c3f rxrpc: Support ne... |
133 134 135 136 |
write_lock(&rxnet->conn_lock); list_add_tail(&conn->link, &rxnet->service_conns); list_add_tail(&conn->proc_link, &rxnet->conn_proc_list); write_unlock(&rxnet->conn_lock); |
363deeab6 rxrpc: Add connec... |
137 138 139 140 |
trace_rxrpc_conn(conn, rxrpc_conn_new_service, atomic_read(&conn->usage), __builtin_return_address(0)); |
00e907127 rxrpc: Preallocat... |
141 142 143 144 145 146 |
} return conn; } /* |
248f219cb rxrpc: Rewrite th... |
147 148 |
* Set up an incoming connection. This is called in BH context with the RCU * read lock held. |
7877a4a4b rxrpc: Split serv... |
149 |
*/ |
4722974d9 rxrpc: Implement ... |
150 151 |
void rxrpc_new_incoming_connection(struct rxrpc_sock *rx, struct rxrpc_connection *conn, |
248f219cb rxrpc: Rewrite th... |
152 |
struct sk_buff *skb) |
7877a4a4b rxrpc: Split serv... |
153 |
{ |
7877a4a4b rxrpc: Split serv... |
154 |
struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
7877a4a4b rxrpc: Split serv... |
155 156 |
_enter(""); |
8496af50e rxrpc: Use RCU to... |
157 158 |
conn->proto.epoch = sp->hdr.epoch; conn->proto.cid = sp->hdr.cid & RXRPC_CIDMASK; |
8496af50e rxrpc: Use RCU to... |
159 |
conn->params.service_id = sp->hdr.serviceId; |
68d6d1ae5 rxrpc: Separate t... |
160 |
conn->service_id = sp->hdr.serviceId; |
8496af50e rxrpc: Use RCU to... |
161 162 |
conn->security_ix = sp->hdr.securityIndex; conn->out_clientflag = 0; |
248f219cb rxrpc: Rewrite th... |
163 |
if (conn->security_ix) |
8496af50e rxrpc: Use RCU to... |
164 |
conn->state = RXRPC_CONN_SERVICE_UNSECURED; |
248f219cb rxrpc: Rewrite th... |
165 166 |
else conn->state = RXRPC_CONN_SERVICE; |
7877a4a4b rxrpc: Split serv... |
167 |
|
4722974d9 rxrpc: Implement ... |
168 169 170 171 172 173 174 |
/* See if we should upgrade the service. This can only happen on the * first packet on a new connection. Once done, it applies to all * subsequent calls on that connection. */ if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE && conn->service_id == rx->service_upgrade.from) conn->service_id = rx->service_upgrade.to; |
8496af50e rxrpc: Use RCU to... |
175 |
/* Make the connection a target for incoming packets. */ |
248f219cb rxrpc: Rewrite th... |
176 |
rxrpc_publish_service_conn(conn->params.peer, conn); |
8496af50e rxrpc: Use RCU to... |
177 |
|
248f219cb rxrpc: Rewrite th... |
178 |
_net("CONNECTION new %d {%x}", conn->debug_id, conn->proto.cid); |
7877a4a4b rxrpc: Split serv... |
179 |
} |
001c11224 rxrpc: Maintain a... |
180 181 182 183 184 185 186 187 |
/* * Remove the service connection from the peer's tree, thereby removing it as a * target for incoming packets. */ void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn) { struct rxrpc_peer *peer = conn->params.peer; |
8496af50e rxrpc: Use RCU to... |
188 |
write_seqlock_bh(&peer->service_conn_lock); |
001c11224 rxrpc: Maintain a... |
189 190 |
if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags)) rb_erase(&conn->service_node, &peer->service_conns); |
8496af50e rxrpc: Use RCU to... |
191 |
write_sequnlock_bh(&peer->service_conn_lock); |
001c11224 rxrpc: Maintain a... |
192 |
} |