Commit 0c3141e910eaaa0b617e2f26c69b266d1cd1f035

Authored by Allan Stephens
Committed by David S. Miller
1 parent b89741a0cc

[TIPC]: Overhaul of socket locking logic

This patch modifies TIPC's socket code to follow the same approach
used by other protocols.  This change eliminates the need for a
mutex in the TIPC-specific portion of the socket protocol data
structure -- in its place, the standard Linux socket backlog queue
and associated locking routines are utilized.  These changes fix
a long-standing receive queue bug on SMP systems, and also enable
individual read and write threads to utilize a socket without
unnecessarily interfering with each other.

Signed-off-by: Allan Stephens <allan.stephens@windriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 3 changed files with 608 additions and 431 deletions Inline Diff

include/net/tipc/tipc_port.h
1 /* 1 /*
2 * include/net/tipc/tipc_port.h: Include file for privileged access to TIPC ports 2 * include/net/tipc/tipc_port.h: Include file for privileged access to TIPC ports
3 * 3 *
4 * Copyright (c) 1994-2007, Ericsson AB 4 * Copyright (c) 1994-2007, Ericsson AB
5 * Copyright (c) 2005-2007, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met: 9 * modification, are permitted provided that the following conditions are met:
10 * 10 *
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its 16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from 17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission. 18 * this software without specific prior written permission.
19 * 19 *
20 * Alternatively, this software may be distributed under the terms of the 20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free 21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation. 22 * Software Foundation.
23 * 23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37 #ifndef _NET_TIPC_PORT_H_ 37 #ifndef _NET_TIPC_PORT_H_
38 #define _NET_TIPC_PORT_H_ 38 #define _NET_TIPC_PORT_H_
39 39
40 #ifdef __KERNEL__ 40 #ifdef __KERNEL__
41 41
42 #include <linux/tipc.h> 42 #include <linux/tipc.h>
43 #include <linux/skbuff.h> 43 #include <linux/skbuff.h>
44 #include <net/tipc/tipc_msg.h> 44 #include <net/tipc/tipc_msg.h>
45 45
46 #define TIPC_FLOW_CONTROL_WIN 512 46 #define TIPC_FLOW_CONTROL_WIN 512
47 47
48 /** 48 /**
49 * struct tipc_port - native TIPC port info available to privileged users 49 * struct tipc_port - native TIPC port info available to privileged users
50 * @usr_handle: pointer to additional user-defined information about port 50 * @usr_handle: pointer to additional user-defined information about port
51 * @lock: pointer to spinlock for controlling access to port 51 * @lock: pointer to spinlock for controlling access to port
52 * @connected: non-zero if port is currently connected to a peer port 52 * @connected: non-zero if port is currently connected to a peer port
53 * @conn_type: TIPC type used when connection was established 53 * @conn_type: TIPC type used when connection was established
54 * @conn_instance: TIPC instance used when connection was established 54 * @conn_instance: TIPC instance used when connection was established
55 * @conn_unacked: number of unacknowledged messages received from peer port 55 * @conn_unacked: number of unacknowledged messages received from peer port
56 * @published: non-zero if port has one or more associated names 56 * @published: non-zero if port has one or more associated names
57 * @congested: non-zero if cannot send because of link or port congestion 57 * @congested: non-zero if cannot send because of link or port congestion
58 * @max_pkt: maximum packet size "hint" used when building messages sent by port 58 * @max_pkt: maximum packet size "hint" used when building messages sent by port
59 * @ref: unique reference to port in TIPC object registry 59 * @ref: unique reference to port in TIPC object registry
60 * @phdr: preformatted message header used when sending messages 60 * @phdr: preformatted message header used when sending messages
61 */ 61 */
62 62
63 struct tipc_port { 63 struct tipc_port {
64 void *usr_handle; 64 void *usr_handle;
65 spinlock_t *lock; 65 spinlock_t *lock;
66 int connected; 66 int connected;
67 u32 conn_type; 67 u32 conn_type;
68 u32 conn_instance; 68 u32 conn_instance;
69 u32 conn_unacked; 69 u32 conn_unacked;
70 int published; 70 int published;
71 u32 congested; 71 u32 congested;
72 u32 max_pkt; 72 u32 max_pkt;
73 u32 ref; 73 u32 ref;
74 struct tipc_msg phdr; 74 struct tipc_msg phdr;
75 }; 75 };
76 76
77 77
78 /** 78 /**
79 * tipc_createport_raw - create a native TIPC port and return it's reference 79 * tipc_createport_raw - create a native TIPC port and return it's reference
80 * 80 *
81 * Note: 'dispatcher' and 'wakeup' deliver a locked port. 81 * Note: 'dispatcher' and 'wakeup' deliver a locked port.
82 */ 82 */
83 83
84 u32 tipc_createport_raw(void *usr_handle, 84 u32 tipc_createport_raw(void *usr_handle,
85 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *), 85 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
86 void (*wakeup)(struct tipc_port *), 86 void (*wakeup)(struct tipc_port *),
87 const u32 importance); 87 const u32 importance);
88 88
89 int tipc_reject_msg(struct sk_buff *buf, u32 err); 89 int tipc_reject_msg(struct sk_buff *buf, u32 err);
90 90
91 int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode); 91 int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode);
92 92
93 void tipc_acknowledge(u32 port_ref,u32 ack); 93 void tipc_acknowledge(u32 port_ref,u32 ack);
94 94
95 struct tipc_port *tipc_get_port(const u32 ref); 95 struct tipc_port *tipc_get_port(const u32 ref);
96 96
97 void *tipc_get_handle(const u32 ref); 97 void *tipc_get_handle(const u32 ref);
98 98
99 /*
100 * The following routines require that the port be locked on entry
101 */
102
103 int tipc_disconnect_port(struct tipc_port *tp_ptr);
104
99 105
100 #endif 106 #endif
101 107
102 #endif 108 #endif
103 109
104 110
1 /* 1 /*
2 * net/tipc/port.c: TIPC port code 2 * net/tipc/port.c: TIPC port code
3 * 3 *
4 * Copyright (c) 1992-2007, Ericsson AB 4 * Copyright (c) 1992-2007, Ericsson AB
5 * Copyright (c) 2004-2007, Wind River Systems 5 * Copyright (c) 2004-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met: 9 * modification, are permitted provided that the following conditions are met:
10 * 10 *
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its 16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from 17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission. 18 * this software without specific prior written permission.
19 * 19 *
20 * Alternatively, this software may be distributed under the terms of the 20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free 21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation. 22 * Software Foundation.
23 * 23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37 #include "core.h" 37 #include "core.h"
38 #include "config.h" 38 #include "config.h"
39 #include "dbg.h" 39 #include "dbg.h"
40 #include "port.h" 40 #include "port.h"
41 #include "addr.h" 41 #include "addr.h"
42 #include "link.h" 42 #include "link.h"
43 #include "node.h" 43 #include "node.h"
44 #include "name_table.h" 44 #include "name_table.h"
45 #include "user_reg.h" 45 #include "user_reg.h"
46 #include "msg.h" 46 #include "msg.h"
47 #include "bcast.h" 47 #include "bcast.h"
48 48
49 /* Connection management: */ 49 /* Connection management: */
50 #define PROBING_INTERVAL 3600000 /* [ms] => 1 h */ 50 #define PROBING_INTERVAL 3600000 /* [ms] => 1 h */
51 #define CONFIRMED 0 51 #define CONFIRMED 0
52 #define PROBING 1 52 #define PROBING 1
53 53
54 #define MAX_REJECT_SIZE 1024 54 #define MAX_REJECT_SIZE 1024
55 55
56 static struct sk_buff *msg_queue_head = NULL; 56 static struct sk_buff *msg_queue_head = NULL;
57 static struct sk_buff *msg_queue_tail = NULL; 57 static struct sk_buff *msg_queue_tail = NULL;
58 58
59 DEFINE_SPINLOCK(tipc_port_list_lock); 59 DEFINE_SPINLOCK(tipc_port_list_lock);
60 static DEFINE_SPINLOCK(queue_lock); 60 static DEFINE_SPINLOCK(queue_lock);
61 61
62 static LIST_HEAD(ports); 62 static LIST_HEAD(ports);
63 static void port_handle_node_down(unsigned long ref); 63 static void port_handle_node_down(unsigned long ref);
64 static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err); 64 static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err);
65 static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err); 65 static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err);
66 static void port_timeout(unsigned long ref); 66 static void port_timeout(unsigned long ref);
67 67
68 68
69 static u32 port_peernode(struct port *p_ptr) 69 static u32 port_peernode(struct port *p_ptr)
70 { 70 {
71 return msg_destnode(&p_ptr->publ.phdr); 71 return msg_destnode(&p_ptr->publ.phdr);
72 } 72 }
73 73
74 static u32 port_peerport(struct port *p_ptr) 74 static u32 port_peerport(struct port *p_ptr)
75 { 75 {
76 return msg_destport(&p_ptr->publ.phdr); 76 return msg_destport(&p_ptr->publ.phdr);
77 } 77 }
78 78
79 static u32 port_out_seqno(struct port *p_ptr) 79 static u32 port_out_seqno(struct port *p_ptr)
80 { 80 {
81 return msg_transp_seqno(&p_ptr->publ.phdr); 81 return msg_transp_seqno(&p_ptr->publ.phdr);
82 } 82 }
83 83
84 static void port_incr_out_seqno(struct port *p_ptr) 84 static void port_incr_out_seqno(struct port *p_ptr)
85 { 85 {
86 struct tipc_msg *m = &p_ptr->publ.phdr; 86 struct tipc_msg *m = &p_ptr->publ.phdr;
87 87
88 if (likely(!msg_routed(m))) 88 if (likely(!msg_routed(m)))
89 return; 89 return;
90 msg_set_transp_seqno(m, (msg_transp_seqno(m) + 1)); 90 msg_set_transp_seqno(m, (msg_transp_seqno(m) + 1));
91 } 91 }
92 92
93 /** 93 /**
94 * tipc_multicast - send a multicast message to local and remote destinations 94 * tipc_multicast - send a multicast message to local and remote destinations
95 */ 95 */
96 96
97 int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain, 97 int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
98 u32 num_sect, struct iovec const *msg_sect) 98 u32 num_sect, struct iovec const *msg_sect)
99 { 99 {
100 struct tipc_msg *hdr; 100 struct tipc_msg *hdr;
101 struct sk_buff *buf; 101 struct sk_buff *buf;
102 struct sk_buff *ibuf = NULL; 102 struct sk_buff *ibuf = NULL;
103 struct port_list dports = {0, NULL, }; 103 struct port_list dports = {0, NULL, };
104 struct port *oport = tipc_port_deref(ref); 104 struct port *oport = tipc_port_deref(ref);
105 int ext_targets; 105 int ext_targets;
106 int res; 106 int res;
107 107
108 if (unlikely(!oport)) 108 if (unlikely(!oport))
109 return -EINVAL; 109 return -EINVAL;
110 110
111 /* Create multicast message */ 111 /* Create multicast message */
112 112
113 hdr = &oport->publ.phdr; 113 hdr = &oport->publ.phdr;
114 msg_set_type(hdr, TIPC_MCAST_MSG); 114 msg_set_type(hdr, TIPC_MCAST_MSG);
115 msg_set_nametype(hdr, seq->type); 115 msg_set_nametype(hdr, seq->type);
116 msg_set_namelower(hdr, seq->lower); 116 msg_set_namelower(hdr, seq->lower);
117 msg_set_nameupper(hdr, seq->upper); 117 msg_set_nameupper(hdr, seq->upper);
118 msg_set_hdr_sz(hdr, MCAST_H_SIZE); 118 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
119 res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, 119 res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
120 !oport->user_port, &buf); 120 !oport->user_port, &buf);
121 if (unlikely(!buf)) 121 if (unlikely(!buf))
122 return res; 122 return res;
123 123
124 /* Figure out where to send multicast message */ 124 /* Figure out where to send multicast message */
125 125
126 ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper, 126 ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
127 TIPC_NODE_SCOPE, &dports); 127 TIPC_NODE_SCOPE, &dports);
128 128
129 /* Send message to destinations (duplicate it only if necessary) */ 129 /* Send message to destinations (duplicate it only if necessary) */
130 130
131 if (ext_targets) { 131 if (ext_targets) {
132 if (dports.count != 0) { 132 if (dports.count != 0) {
133 ibuf = skb_copy(buf, GFP_ATOMIC); 133 ibuf = skb_copy(buf, GFP_ATOMIC);
134 if (ibuf == NULL) { 134 if (ibuf == NULL) {
135 tipc_port_list_free(&dports); 135 tipc_port_list_free(&dports);
136 buf_discard(buf); 136 buf_discard(buf);
137 return -ENOMEM; 137 return -ENOMEM;
138 } 138 }
139 } 139 }
140 res = tipc_bclink_send_msg(buf); 140 res = tipc_bclink_send_msg(buf);
141 if ((res < 0) && (dports.count != 0)) { 141 if ((res < 0) && (dports.count != 0)) {
142 buf_discard(ibuf); 142 buf_discard(ibuf);
143 } 143 }
144 } else { 144 } else {
145 ibuf = buf; 145 ibuf = buf;
146 } 146 }
147 147
148 if (res >= 0) { 148 if (res >= 0) {
149 if (ibuf) 149 if (ibuf)
150 tipc_port_recv_mcast(ibuf, &dports); 150 tipc_port_recv_mcast(ibuf, &dports);
151 } else { 151 } else {
152 tipc_port_list_free(&dports); 152 tipc_port_list_free(&dports);
153 } 153 }
154 return res; 154 return res;
155 } 155 }
156 156
157 /** 157 /**
158 * tipc_port_recv_mcast - deliver multicast message to all destination ports 158 * tipc_port_recv_mcast - deliver multicast message to all destination ports
159 * 159 *
160 * If there is no port list, perform a lookup to create one 160 * If there is no port list, perform a lookup to create one
161 */ 161 */
162 162
163 void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp) 163 void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
164 { 164 {
165 struct tipc_msg* msg; 165 struct tipc_msg* msg;
166 struct port_list dports = {0, NULL, }; 166 struct port_list dports = {0, NULL, };
167 struct port_list *item = dp; 167 struct port_list *item = dp;
168 int cnt = 0; 168 int cnt = 0;
169 169
170 msg = buf_msg(buf); 170 msg = buf_msg(buf);
171 171
172 /* Create destination port list, if one wasn't supplied */ 172 /* Create destination port list, if one wasn't supplied */
173 173
174 if (dp == NULL) { 174 if (dp == NULL) {
175 tipc_nametbl_mc_translate(msg_nametype(msg), 175 tipc_nametbl_mc_translate(msg_nametype(msg),
176 msg_namelower(msg), 176 msg_namelower(msg),
177 msg_nameupper(msg), 177 msg_nameupper(msg),
178 TIPC_CLUSTER_SCOPE, 178 TIPC_CLUSTER_SCOPE,
179 &dports); 179 &dports);
180 item = dp = &dports; 180 item = dp = &dports;
181 } 181 }
182 182
183 /* Deliver a copy of message to each destination port */ 183 /* Deliver a copy of message to each destination port */
184 184
185 if (dp->count != 0) { 185 if (dp->count != 0) {
186 if (dp->count == 1) { 186 if (dp->count == 1) {
187 msg_set_destport(msg, dp->ports[0]); 187 msg_set_destport(msg, dp->ports[0]);
188 tipc_port_recv_msg(buf); 188 tipc_port_recv_msg(buf);
189 tipc_port_list_free(dp); 189 tipc_port_list_free(dp);
190 return; 190 return;
191 } 191 }
192 for (; cnt < dp->count; cnt++) { 192 for (; cnt < dp->count; cnt++) {
193 int index = cnt % PLSIZE; 193 int index = cnt % PLSIZE;
194 struct sk_buff *b = skb_clone(buf, GFP_ATOMIC); 194 struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
195 195
196 if (b == NULL) { 196 if (b == NULL) {
197 warn("Unable to deliver multicast message(s)\n"); 197 warn("Unable to deliver multicast message(s)\n");
198 msg_dbg(msg, "LOST:"); 198 msg_dbg(msg, "LOST:");
199 goto exit; 199 goto exit;
200 } 200 }
201 if ((index == 0) && (cnt != 0)) { 201 if ((index == 0) && (cnt != 0)) {
202 item = item->next; 202 item = item->next;
203 } 203 }
204 msg_set_destport(buf_msg(b),item->ports[index]); 204 msg_set_destport(buf_msg(b),item->ports[index]);
205 tipc_port_recv_msg(b); 205 tipc_port_recv_msg(b);
206 } 206 }
207 } 207 }
208 exit: 208 exit:
209 buf_discard(buf); 209 buf_discard(buf);
210 tipc_port_list_free(dp); 210 tipc_port_list_free(dp);
211 } 211 }
212 212
213 /** 213 /**
214 * tipc_createport_raw - create a native TIPC port 214 * tipc_createport_raw - create a native TIPC port
215 * 215 *
216 * Returns local port reference 216 * Returns local port reference
217 */ 217 */
218 218
219 u32 tipc_createport_raw(void *usr_handle, 219 u32 tipc_createport_raw(void *usr_handle,
220 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *), 220 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
221 void (*wakeup)(struct tipc_port *), 221 void (*wakeup)(struct tipc_port *),
222 const u32 importance) 222 const u32 importance)
223 { 223 {
224 struct port *p_ptr; 224 struct port *p_ptr;
225 struct tipc_msg *msg; 225 struct tipc_msg *msg;
226 u32 ref; 226 u32 ref;
227 227
228 p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC); 228 p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC);
229 if (!p_ptr) { 229 if (!p_ptr) {
230 warn("Port creation failed, no memory\n"); 230 warn("Port creation failed, no memory\n");
231 return 0; 231 return 0;
232 } 232 }
233 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock); 233 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
234 if (!ref) { 234 if (!ref) {
235 warn("Port creation failed, reference table exhausted\n"); 235 warn("Port creation failed, reference table exhausted\n");
236 kfree(p_ptr); 236 kfree(p_ptr);
237 return 0; 237 return 0;
238 } 238 }
239 239
240 tipc_port_lock(ref); 240 tipc_port_lock(ref);
241 p_ptr->publ.usr_handle = usr_handle; 241 p_ptr->publ.usr_handle = usr_handle;
242 p_ptr->publ.max_pkt = MAX_PKT_DEFAULT; 242 p_ptr->publ.max_pkt = MAX_PKT_DEFAULT;
243 p_ptr->publ.ref = ref; 243 p_ptr->publ.ref = ref;
244 msg = &p_ptr->publ.phdr; 244 msg = &p_ptr->publ.phdr;
245 msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 245 msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE,
246 0); 246 0);
247 msg_set_orignode(msg, tipc_own_addr); 247 msg_set_orignode(msg, tipc_own_addr);
248 msg_set_prevnode(msg, tipc_own_addr); 248 msg_set_prevnode(msg, tipc_own_addr);
249 msg_set_origport(msg, ref); 249 msg_set_origport(msg, ref);
250 msg_set_importance(msg,importance); 250 msg_set_importance(msg,importance);
251 p_ptr->last_in_seqno = 41; 251 p_ptr->last_in_seqno = 41;
252 p_ptr->sent = 1; 252 p_ptr->sent = 1;
253 INIT_LIST_HEAD(&p_ptr->wait_list); 253 INIT_LIST_HEAD(&p_ptr->wait_list);
254 INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list); 254 INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
255 p_ptr->congested_link = NULL; 255 p_ptr->congested_link = NULL;
256 p_ptr->dispatcher = dispatcher; 256 p_ptr->dispatcher = dispatcher;
257 p_ptr->wakeup = wakeup; 257 p_ptr->wakeup = wakeup;
258 p_ptr->user_port = NULL; 258 p_ptr->user_port = NULL;
259 k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref); 259 k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
260 spin_lock_bh(&tipc_port_list_lock); 260 spin_lock_bh(&tipc_port_list_lock);
261 INIT_LIST_HEAD(&p_ptr->publications); 261 INIT_LIST_HEAD(&p_ptr->publications);
262 INIT_LIST_HEAD(&p_ptr->port_list); 262 INIT_LIST_HEAD(&p_ptr->port_list);
263 list_add_tail(&p_ptr->port_list, &ports); 263 list_add_tail(&p_ptr->port_list, &ports);
264 spin_unlock_bh(&tipc_port_list_lock); 264 spin_unlock_bh(&tipc_port_list_lock);
265 tipc_port_unlock(p_ptr); 265 tipc_port_unlock(p_ptr);
266 return ref; 266 return ref;
267 } 267 }
268 268
269 int tipc_deleteport(u32 ref) 269 int tipc_deleteport(u32 ref)
270 { 270 {
271 struct port *p_ptr; 271 struct port *p_ptr;
272 struct sk_buff *buf = NULL; 272 struct sk_buff *buf = NULL;
273 273
274 tipc_withdraw(ref, 0, NULL); 274 tipc_withdraw(ref, 0, NULL);
275 p_ptr = tipc_port_lock(ref); 275 p_ptr = tipc_port_lock(ref);
276 if (!p_ptr) 276 if (!p_ptr)
277 return -EINVAL; 277 return -EINVAL;
278 278
279 tipc_ref_discard(ref); 279 tipc_ref_discard(ref);
280 tipc_port_unlock(p_ptr); 280 tipc_port_unlock(p_ptr);
281 281
282 k_cancel_timer(&p_ptr->timer); 282 k_cancel_timer(&p_ptr->timer);
283 if (p_ptr->publ.connected) { 283 if (p_ptr->publ.connected) {
284 buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT); 284 buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
285 tipc_nodesub_unsubscribe(&p_ptr->subscription); 285 tipc_nodesub_unsubscribe(&p_ptr->subscription);
286 } 286 }
287 if (p_ptr->user_port) { 287 if (p_ptr->user_port) {
288 tipc_reg_remove_port(p_ptr->user_port); 288 tipc_reg_remove_port(p_ptr->user_port);
289 kfree(p_ptr->user_port); 289 kfree(p_ptr->user_port);
290 } 290 }
291 291
292 spin_lock_bh(&tipc_port_list_lock); 292 spin_lock_bh(&tipc_port_list_lock);
293 list_del(&p_ptr->port_list); 293 list_del(&p_ptr->port_list);
294 list_del(&p_ptr->wait_list); 294 list_del(&p_ptr->wait_list);
295 spin_unlock_bh(&tipc_port_list_lock); 295 spin_unlock_bh(&tipc_port_list_lock);
296 k_term_timer(&p_ptr->timer); 296 k_term_timer(&p_ptr->timer);
297 kfree(p_ptr); 297 kfree(p_ptr);
298 dbg("Deleted port %u\n", ref); 298 dbg("Deleted port %u\n", ref);
299 tipc_net_route_msg(buf); 299 tipc_net_route_msg(buf);
300 return TIPC_OK; 300 return TIPC_OK;
301 } 301 }
302 302
303 /** 303 /**
304 * tipc_get_port() - return port associated with 'ref' 304 * tipc_get_port() - return port associated with 'ref'
305 * 305 *
306 * Note: Port is not locked. 306 * Note: Port is not locked.
307 */ 307 */
308 308
309 struct tipc_port *tipc_get_port(const u32 ref) 309 struct tipc_port *tipc_get_port(const u32 ref)
310 { 310 {
311 return (struct tipc_port *)tipc_ref_deref(ref); 311 return (struct tipc_port *)tipc_ref_deref(ref);
312 } 312 }
313 313
314 /** 314 /**
315 * tipc_get_handle - return user handle associated to port 'ref' 315 * tipc_get_handle - return user handle associated to port 'ref'
316 */ 316 */
317 317
318 void *tipc_get_handle(const u32 ref) 318 void *tipc_get_handle(const u32 ref)
319 { 319 {
320 struct port *p_ptr; 320 struct port *p_ptr;
321 void * handle; 321 void * handle;
322 322
323 p_ptr = tipc_port_lock(ref); 323 p_ptr = tipc_port_lock(ref);
324 if (!p_ptr) 324 if (!p_ptr)
325 return NULL; 325 return NULL;
326 handle = p_ptr->publ.usr_handle; 326 handle = p_ptr->publ.usr_handle;
327 tipc_port_unlock(p_ptr); 327 tipc_port_unlock(p_ptr);
328 return handle; 328 return handle;
329 } 329 }
330 330
331 static int port_unreliable(struct port *p_ptr) 331 static int port_unreliable(struct port *p_ptr)
332 { 332 {
333 return msg_src_droppable(&p_ptr->publ.phdr); 333 return msg_src_droppable(&p_ptr->publ.phdr);
334 } 334 }
335 335
336 int tipc_portunreliable(u32 ref, unsigned int *isunreliable) 336 int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
337 { 337 {
338 struct port *p_ptr; 338 struct port *p_ptr;
339 339
340 p_ptr = tipc_port_lock(ref); 340 p_ptr = tipc_port_lock(ref);
341 if (!p_ptr) 341 if (!p_ptr)
342 return -EINVAL; 342 return -EINVAL;
343 *isunreliable = port_unreliable(p_ptr); 343 *isunreliable = port_unreliable(p_ptr);
344 tipc_port_unlock(p_ptr); 344 tipc_port_unlock(p_ptr);
345 return TIPC_OK; 345 return TIPC_OK;
346 } 346 }
347 347
348 int tipc_set_portunreliable(u32 ref, unsigned int isunreliable) 348 int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
349 { 349 {
350 struct port *p_ptr; 350 struct port *p_ptr;
351 351
352 p_ptr = tipc_port_lock(ref); 352 p_ptr = tipc_port_lock(ref);
353 if (!p_ptr) 353 if (!p_ptr)
354 return -EINVAL; 354 return -EINVAL;
355 msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0)); 355 msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
356 tipc_port_unlock(p_ptr); 356 tipc_port_unlock(p_ptr);
357 return TIPC_OK; 357 return TIPC_OK;
358 } 358 }
359 359
360 static int port_unreturnable(struct port *p_ptr) 360 static int port_unreturnable(struct port *p_ptr)
361 { 361 {
362 return msg_dest_droppable(&p_ptr->publ.phdr); 362 return msg_dest_droppable(&p_ptr->publ.phdr);
363 } 363 }
364 364
365 int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable) 365 int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
366 { 366 {
367 struct port *p_ptr; 367 struct port *p_ptr;
368 368
369 p_ptr = tipc_port_lock(ref); 369 p_ptr = tipc_port_lock(ref);
370 if (!p_ptr) 370 if (!p_ptr)
371 return -EINVAL; 371 return -EINVAL;
372 *isunrejectable = port_unreturnable(p_ptr); 372 *isunrejectable = port_unreturnable(p_ptr);
373 tipc_port_unlock(p_ptr); 373 tipc_port_unlock(p_ptr);
374 return TIPC_OK; 374 return TIPC_OK;
375 } 375 }
376 376
377 int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable) 377 int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
378 { 378 {
379 struct port *p_ptr; 379 struct port *p_ptr;
380 380
381 p_ptr = tipc_port_lock(ref); 381 p_ptr = tipc_port_lock(ref);
382 if (!p_ptr) 382 if (!p_ptr)
383 return -EINVAL; 383 return -EINVAL;
384 msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0)); 384 msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
385 tipc_port_unlock(p_ptr); 385 tipc_port_unlock(p_ptr);
386 return TIPC_OK; 386 return TIPC_OK;
387 } 387 }
388 388
389 /* 389 /*
390 * port_build_proto_msg(): build a port level protocol 390 * port_build_proto_msg(): build a port level protocol
391 * or a connection abortion message. Called with 391 * or a connection abortion message. Called with
392 * tipc_port lock on. 392 * tipc_port lock on.
393 */ 393 */
394 static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode, 394 static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
395 u32 origport, u32 orignode, 395 u32 origport, u32 orignode,
396 u32 usr, u32 type, u32 err, 396 u32 usr, u32 type, u32 err,
397 u32 seqno, u32 ack) 397 u32 seqno, u32 ack)
398 { 398 {
399 struct sk_buff *buf; 399 struct sk_buff *buf;
400 struct tipc_msg *msg; 400 struct tipc_msg *msg;
401 401
402 buf = buf_acquire(LONG_H_SIZE); 402 buf = buf_acquire(LONG_H_SIZE);
403 if (buf) { 403 if (buf) {
404 msg = buf_msg(buf); 404 msg = buf_msg(buf);
405 msg_init(msg, usr, type, err, LONG_H_SIZE, destnode); 405 msg_init(msg, usr, type, err, LONG_H_SIZE, destnode);
406 msg_set_destport(msg, destport); 406 msg_set_destport(msg, destport);
407 msg_set_origport(msg, origport); 407 msg_set_origport(msg, origport);
408 msg_set_destnode(msg, destnode); 408 msg_set_destnode(msg, destnode);
409 msg_set_orignode(msg, orignode); 409 msg_set_orignode(msg, orignode);
410 msg_set_transp_seqno(msg, seqno); 410 msg_set_transp_seqno(msg, seqno);
411 msg_set_msgcnt(msg, ack); 411 msg_set_msgcnt(msg, ack);
412 msg_dbg(msg, "PORT>SEND>:"); 412 msg_dbg(msg, "PORT>SEND>:");
413 } 413 }
414 return buf; 414 return buf;
415 } 415 }
416 416
417 int tipc_reject_msg(struct sk_buff *buf, u32 err) 417 int tipc_reject_msg(struct sk_buff *buf, u32 err)
418 { 418 {
419 struct tipc_msg *msg = buf_msg(buf); 419 struct tipc_msg *msg = buf_msg(buf);
420 struct sk_buff *rbuf; 420 struct sk_buff *rbuf;
421 struct tipc_msg *rmsg; 421 struct tipc_msg *rmsg;
422 int hdr_sz; 422 int hdr_sz;
423 u32 imp = msg_importance(msg); 423 u32 imp = msg_importance(msg);
424 u32 data_sz = msg_data_sz(msg); 424 u32 data_sz = msg_data_sz(msg);
425 425
426 if (data_sz > MAX_REJECT_SIZE) 426 if (data_sz > MAX_REJECT_SIZE)
427 data_sz = MAX_REJECT_SIZE; 427 data_sz = MAX_REJECT_SIZE;
428 if (msg_connected(msg) && (imp < TIPC_CRITICAL_IMPORTANCE)) 428 if (msg_connected(msg) && (imp < TIPC_CRITICAL_IMPORTANCE))
429 imp++; 429 imp++;
430 msg_dbg(msg, "port->rej: "); 430 msg_dbg(msg, "port->rej: ");
431 431
432 /* discard rejected message if it shouldn't be returned to sender */ 432 /* discard rejected message if it shouldn't be returned to sender */
433 if (msg_errcode(msg) || msg_dest_droppable(msg)) { 433 if (msg_errcode(msg) || msg_dest_droppable(msg)) {
434 buf_discard(buf); 434 buf_discard(buf);
435 return data_sz; 435 return data_sz;
436 } 436 }
437 437
438 /* construct rejected message */ 438 /* construct rejected message */
439 if (msg_mcast(msg)) 439 if (msg_mcast(msg))
440 hdr_sz = MCAST_H_SIZE; 440 hdr_sz = MCAST_H_SIZE;
441 else 441 else
442 hdr_sz = LONG_H_SIZE; 442 hdr_sz = LONG_H_SIZE;
443 rbuf = buf_acquire(data_sz + hdr_sz); 443 rbuf = buf_acquire(data_sz + hdr_sz);
444 if (rbuf == NULL) { 444 if (rbuf == NULL) {
445 buf_discard(buf); 445 buf_discard(buf);
446 return data_sz; 446 return data_sz;
447 } 447 }
448 rmsg = buf_msg(rbuf); 448 rmsg = buf_msg(rbuf);
449 msg_init(rmsg, imp, msg_type(msg), err, hdr_sz, msg_orignode(msg)); 449 msg_init(rmsg, imp, msg_type(msg), err, hdr_sz, msg_orignode(msg));
450 msg_set_destport(rmsg, msg_origport(msg)); 450 msg_set_destport(rmsg, msg_origport(msg));
451 msg_set_prevnode(rmsg, tipc_own_addr); 451 msg_set_prevnode(rmsg, tipc_own_addr);
452 msg_set_origport(rmsg, msg_destport(msg)); 452 msg_set_origport(rmsg, msg_destport(msg));
453 if (msg_short(msg)) 453 if (msg_short(msg))
454 msg_set_orignode(rmsg, tipc_own_addr); 454 msg_set_orignode(rmsg, tipc_own_addr);
455 else 455 else
456 msg_set_orignode(rmsg, msg_destnode(msg)); 456 msg_set_orignode(rmsg, msg_destnode(msg));
457 msg_set_size(rmsg, data_sz + hdr_sz); 457 msg_set_size(rmsg, data_sz + hdr_sz);
458 msg_set_nametype(rmsg, msg_nametype(msg)); 458 msg_set_nametype(rmsg, msg_nametype(msg));
459 msg_set_nameinst(rmsg, msg_nameinst(msg)); 459 msg_set_nameinst(rmsg, msg_nameinst(msg));
460 skb_copy_to_linear_data_offset(rbuf, hdr_sz, msg_data(msg), data_sz); 460 skb_copy_to_linear_data_offset(rbuf, hdr_sz, msg_data(msg), data_sz);
461 461
462 /* send self-abort message when rejecting on a connected port */ 462 /* send self-abort message when rejecting on a connected port */
463 if (msg_connected(msg)) { 463 if (msg_connected(msg)) {
464 struct sk_buff *abuf = NULL; 464 struct sk_buff *abuf = NULL;
465 struct port *p_ptr = tipc_port_lock(msg_destport(msg)); 465 struct port *p_ptr = tipc_port_lock(msg_destport(msg));
466 466
467 if (p_ptr) { 467 if (p_ptr) {
468 if (p_ptr->publ.connected) 468 if (p_ptr->publ.connected)
469 abuf = port_build_self_abort_msg(p_ptr, err); 469 abuf = port_build_self_abort_msg(p_ptr, err);
470 tipc_port_unlock(p_ptr); 470 tipc_port_unlock(p_ptr);
471 } 471 }
472 tipc_net_route_msg(abuf); 472 tipc_net_route_msg(abuf);
473 } 473 }
474 474
475 /* send rejected message */ 475 /* send rejected message */
476 buf_discard(buf); 476 buf_discard(buf);
477 tipc_net_route_msg(rbuf); 477 tipc_net_route_msg(rbuf);
478 return data_sz; 478 return data_sz;
479 } 479 }
480 480
481 int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr, 481 int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
482 struct iovec const *msg_sect, u32 num_sect, 482 struct iovec const *msg_sect, u32 num_sect,
483 int err) 483 int err)
484 { 484 {
485 struct sk_buff *buf; 485 struct sk_buff *buf;
486 int res; 486 int res;
487 487
488 res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, 488 res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
489 !p_ptr->user_port, &buf); 489 !p_ptr->user_port, &buf);
490 if (!buf) 490 if (!buf)
491 return res; 491 return res;
492 492
493 return tipc_reject_msg(buf, err); 493 return tipc_reject_msg(buf, err);
494 } 494 }
495 495
496 static void port_timeout(unsigned long ref) 496 static void port_timeout(unsigned long ref)
497 { 497 {
498 struct port *p_ptr = tipc_port_lock(ref); 498 struct port *p_ptr = tipc_port_lock(ref);
499 struct sk_buff *buf = NULL; 499 struct sk_buff *buf = NULL;
500 500
501 if (!p_ptr) 501 if (!p_ptr)
502 return; 502 return;
503 503
504 if (!p_ptr->publ.connected) { 504 if (!p_ptr->publ.connected) {
505 tipc_port_unlock(p_ptr); 505 tipc_port_unlock(p_ptr);
506 return; 506 return;
507 } 507 }
508 508
509 /* Last probe answered ? */ 509 /* Last probe answered ? */
510 if (p_ptr->probing_state == PROBING) { 510 if (p_ptr->probing_state == PROBING) {
511 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT); 511 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
512 } else { 512 } else {
513 buf = port_build_proto_msg(port_peerport(p_ptr), 513 buf = port_build_proto_msg(port_peerport(p_ptr),
514 port_peernode(p_ptr), 514 port_peernode(p_ptr),
515 p_ptr->publ.ref, 515 p_ptr->publ.ref,
516 tipc_own_addr, 516 tipc_own_addr,
517 CONN_MANAGER, 517 CONN_MANAGER,
518 CONN_PROBE, 518 CONN_PROBE,
519 TIPC_OK, 519 TIPC_OK,
520 port_out_seqno(p_ptr), 520 port_out_seqno(p_ptr),
521 0); 521 0);
522 port_incr_out_seqno(p_ptr); 522 port_incr_out_seqno(p_ptr);
523 p_ptr->probing_state = PROBING; 523 p_ptr->probing_state = PROBING;
524 k_start_timer(&p_ptr->timer, p_ptr->probing_interval); 524 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
525 } 525 }
526 tipc_port_unlock(p_ptr); 526 tipc_port_unlock(p_ptr);
527 tipc_net_route_msg(buf); 527 tipc_net_route_msg(buf);
528 } 528 }
529 529
530 530
531 static void port_handle_node_down(unsigned long ref) 531 static void port_handle_node_down(unsigned long ref)
532 { 532 {
533 struct port *p_ptr = tipc_port_lock(ref); 533 struct port *p_ptr = tipc_port_lock(ref);
534 struct sk_buff* buf = NULL; 534 struct sk_buff* buf = NULL;
535 535
536 if (!p_ptr) 536 if (!p_ptr)
537 return; 537 return;
538 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE); 538 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
539 tipc_port_unlock(p_ptr); 539 tipc_port_unlock(p_ptr);
540 tipc_net_route_msg(buf); 540 tipc_net_route_msg(buf);
541 } 541 }
542 542
543 543
544 static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err) 544 static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err)
545 { 545 {
546 u32 imp = msg_importance(&p_ptr->publ.phdr); 546 u32 imp = msg_importance(&p_ptr->publ.phdr);
547 547
548 if (!p_ptr->publ.connected) 548 if (!p_ptr->publ.connected)
549 return NULL; 549 return NULL;
550 if (imp < TIPC_CRITICAL_IMPORTANCE) 550 if (imp < TIPC_CRITICAL_IMPORTANCE)
551 imp++; 551 imp++;
552 return port_build_proto_msg(p_ptr->publ.ref, 552 return port_build_proto_msg(p_ptr->publ.ref,
553 tipc_own_addr, 553 tipc_own_addr,
554 port_peerport(p_ptr), 554 port_peerport(p_ptr),
555 port_peernode(p_ptr), 555 port_peernode(p_ptr),
556 imp, 556 imp,
557 TIPC_CONN_MSG, 557 TIPC_CONN_MSG,
558 err, 558 err,
559 p_ptr->last_in_seqno + 1, 559 p_ptr->last_in_seqno + 1,
560 0); 560 0);
561 } 561 }
562 562
563 563
564 static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err) 564 static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
565 { 565 {
566 u32 imp = msg_importance(&p_ptr->publ.phdr); 566 u32 imp = msg_importance(&p_ptr->publ.phdr);
567 567
568 if (!p_ptr->publ.connected) 568 if (!p_ptr->publ.connected)
569 return NULL; 569 return NULL;
570 if (imp < TIPC_CRITICAL_IMPORTANCE) 570 if (imp < TIPC_CRITICAL_IMPORTANCE)
571 imp++; 571 imp++;
572 return port_build_proto_msg(port_peerport(p_ptr), 572 return port_build_proto_msg(port_peerport(p_ptr),
573 port_peernode(p_ptr), 573 port_peernode(p_ptr),
574 p_ptr->publ.ref, 574 p_ptr->publ.ref,
575 tipc_own_addr, 575 tipc_own_addr,
576 imp, 576 imp,
577 TIPC_CONN_MSG, 577 TIPC_CONN_MSG,
578 err, 578 err,
579 port_out_seqno(p_ptr), 579 port_out_seqno(p_ptr),
580 0); 580 0);
581 } 581 }
582 582
583 void tipc_port_recv_proto_msg(struct sk_buff *buf) 583 void tipc_port_recv_proto_msg(struct sk_buff *buf)
584 { 584 {
585 struct tipc_msg *msg = buf_msg(buf); 585 struct tipc_msg *msg = buf_msg(buf);
586 struct port *p_ptr = tipc_port_lock(msg_destport(msg)); 586 struct port *p_ptr = tipc_port_lock(msg_destport(msg));
587 u32 err = TIPC_OK; 587 u32 err = TIPC_OK;
588 struct sk_buff *r_buf = NULL; 588 struct sk_buff *r_buf = NULL;
589 struct sk_buff *abort_buf = NULL; 589 struct sk_buff *abort_buf = NULL;
590 590
591 msg_dbg(msg, "PORT<RECV<:"); 591 msg_dbg(msg, "PORT<RECV<:");
592 592
593 if (!p_ptr) { 593 if (!p_ptr) {
594 err = TIPC_ERR_NO_PORT; 594 err = TIPC_ERR_NO_PORT;
595 } else if (p_ptr->publ.connected) { 595 } else if (p_ptr->publ.connected) {
596 if (port_peernode(p_ptr) != msg_orignode(msg)) 596 if (port_peernode(p_ptr) != msg_orignode(msg))
597 err = TIPC_ERR_NO_PORT; 597 err = TIPC_ERR_NO_PORT;
598 if (port_peerport(p_ptr) != msg_origport(msg)) 598 if (port_peerport(p_ptr) != msg_origport(msg))
599 err = TIPC_ERR_NO_PORT; 599 err = TIPC_ERR_NO_PORT;
600 if (!err && msg_routed(msg)) { 600 if (!err && msg_routed(msg)) {
601 u32 seqno = msg_transp_seqno(msg); 601 u32 seqno = msg_transp_seqno(msg);
602 u32 myno = ++p_ptr->last_in_seqno; 602 u32 myno = ++p_ptr->last_in_seqno;
603 if (seqno != myno) { 603 if (seqno != myno) {
604 err = TIPC_ERR_NO_PORT; 604 err = TIPC_ERR_NO_PORT;
605 abort_buf = port_build_self_abort_msg(p_ptr, err); 605 abort_buf = port_build_self_abort_msg(p_ptr, err);
606 } 606 }
607 } 607 }
608 if (msg_type(msg) == CONN_ACK) { 608 if (msg_type(msg) == CONN_ACK) {
609 int wakeup = tipc_port_congested(p_ptr) && 609 int wakeup = tipc_port_congested(p_ptr) &&
610 p_ptr->publ.congested && 610 p_ptr->publ.congested &&
611 p_ptr->wakeup; 611 p_ptr->wakeup;
612 p_ptr->acked += msg_msgcnt(msg); 612 p_ptr->acked += msg_msgcnt(msg);
613 if (tipc_port_congested(p_ptr)) 613 if (tipc_port_congested(p_ptr))
614 goto exit; 614 goto exit;
615 p_ptr->publ.congested = 0; 615 p_ptr->publ.congested = 0;
616 if (!wakeup) 616 if (!wakeup)
617 goto exit; 617 goto exit;
618 p_ptr->wakeup(&p_ptr->publ); 618 p_ptr->wakeup(&p_ptr->publ);
619 goto exit; 619 goto exit;
620 } 620 }
621 } else if (p_ptr->publ.published) { 621 } else if (p_ptr->publ.published) {
622 err = TIPC_ERR_NO_PORT; 622 err = TIPC_ERR_NO_PORT;
623 } 623 }
624 if (err) { 624 if (err) {
625 r_buf = port_build_proto_msg(msg_origport(msg), 625 r_buf = port_build_proto_msg(msg_origport(msg),
626 msg_orignode(msg), 626 msg_orignode(msg),
627 msg_destport(msg), 627 msg_destport(msg),
628 tipc_own_addr, 628 tipc_own_addr,
629 TIPC_HIGH_IMPORTANCE, 629 TIPC_HIGH_IMPORTANCE,
630 TIPC_CONN_MSG, 630 TIPC_CONN_MSG,
631 err, 631 err,
632 0, 632 0,
633 0); 633 0);
634 goto exit; 634 goto exit;
635 } 635 }
636 636
637 /* All is fine */ 637 /* All is fine */
638 if (msg_type(msg) == CONN_PROBE) { 638 if (msg_type(msg) == CONN_PROBE) {
639 r_buf = port_build_proto_msg(msg_origport(msg), 639 r_buf = port_build_proto_msg(msg_origport(msg),
640 msg_orignode(msg), 640 msg_orignode(msg),
641 msg_destport(msg), 641 msg_destport(msg),
642 tipc_own_addr, 642 tipc_own_addr,
643 CONN_MANAGER, 643 CONN_MANAGER,
644 CONN_PROBE_REPLY, 644 CONN_PROBE_REPLY,
645 TIPC_OK, 645 TIPC_OK,
646 port_out_seqno(p_ptr), 646 port_out_seqno(p_ptr),
647 0); 647 0);
648 } 648 }
649 p_ptr->probing_state = CONFIRMED; 649 p_ptr->probing_state = CONFIRMED;
650 port_incr_out_seqno(p_ptr); 650 port_incr_out_seqno(p_ptr);
651 exit: 651 exit:
652 if (p_ptr) 652 if (p_ptr)
653 tipc_port_unlock(p_ptr); 653 tipc_port_unlock(p_ptr);
654 tipc_net_route_msg(r_buf); 654 tipc_net_route_msg(r_buf);
655 tipc_net_route_msg(abort_buf); 655 tipc_net_route_msg(abort_buf);
656 buf_discard(buf); 656 buf_discard(buf);
657 } 657 }
658 658
659 static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id) 659 static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
660 { 660 {
661 struct publication *publ; 661 struct publication *publ;
662 662
663 if (full_id) 663 if (full_id)
664 tipc_printf(buf, "<%u.%u.%u:%u>:", 664 tipc_printf(buf, "<%u.%u.%u:%u>:",
665 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), 665 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
666 tipc_node(tipc_own_addr), p_ptr->publ.ref); 666 tipc_node(tipc_own_addr), p_ptr->publ.ref);
667 else 667 else
668 tipc_printf(buf, "%-10u:", p_ptr->publ.ref); 668 tipc_printf(buf, "%-10u:", p_ptr->publ.ref);
669 669
670 if (p_ptr->publ.connected) { 670 if (p_ptr->publ.connected) {
671 u32 dport = port_peerport(p_ptr); 671 u32 dport = port_peerport(p_ptr);
672 u32 destnode = port_peernode(p_ptr); 672 u32 destnode = port_peernode(p_ptr);
673 673
674 tipc_printf(buf, " connected to <%u.%u.%u:%u>", 674 tipc_printf(buf, " connected to <%u.%u.%u:%u>",
675 tipc_zone(destnode), tipc_cluster(destnode), 675 tipc_zone(destnode), tipc_cluster(destnode),
676 tipc_node(destnode), dport); 676 tipc_node(destnode), dport);
677 if (p_ptr->publ.conn_type != 0) 677 if (p_ptr->publ.conn_type != 0)
678 tipc_printf(buf, " via {%u,%u}", 678 tipc_printf(buf, " via {%u,%u}",
679 p_ptr->publ.conn_type, 679 p_ptr->publ.conn_type,
680 p_ptr->publ.conn_instance); 680 p_ptr->publ.conn_instance);
681 } 681 }
682 else if (p_ptr->publ.published) { 682 else if (p_ptr->publ.published) {
683 tipc_printf(buf, " bound to"); 683 tipc_printf(buf, " bound to");
684 list_for_each_entry(publ, &p_ptr->publications, pport_list) { 684 list_for_each_entry(publ, &p_ptr->publications, pport_list) {
685 if (publ->lower == publ->upper) 685 if (publ->lower == publ->upper)
686 tipc_printf(buf, " {%u,%u}", publ->type, 686 tipc_printf(buf, " {%u,%u}", publ->type,
687 publ->lower); 687 publ->lower);
688 else 688 else
689 tipc_printf(buf, " {%u,%u,%u}", publ->type, 689 tipc_printf(buf, " {%u,%u,%u}", publ->type,
690 publ->lower, publ->upper); 690 publ->lower, publ->upper);
691 } 691 }
692 } 692 }
693 tipc_printf(buf, "\n"); 693 tipc_printf(buf, "\n");
694 } 694 }
695 695
696 #define MAX_PORT_QUERY 32768 696 #define MAX_PORT_QUERY 32768
697 697
698 struct sk_buff *tipc_port_get_ports(void) 698 struct sk_buff *tipc_port_get_ports(void)
699 { 699 {
700 struct sk_buff *buf; 700 struct sk_buff *buf;
701 struct tlv_desc *rep_tlv; 701 struct tlv_desc *rep_tlv;
702 struct print_buf pb; 702 struct print_buf pb;
703 struct port *p_ptr; 703 struct port *p_ptr;
704 int str_len; 704 int str_len;
705 705
706 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY)); 706 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
707 if (!buf) 707 if (!buf)
708 return NULL; 708 return NULL;
709 rep_tlv = (struct tlv_desc *)buf->data; 709 rep_tlv = (struct tlv_desc *)buf->data;
710 710
711 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY); 711 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
712 spin_lock_bh(&tipc_port_list_lock); 712 spin_lock_bh(&tipc_port_list_lock);
713 list_for_each_entry(p_ptr, &ports, port_list) { 713 list_for_each_entry(p_ptr, &ports, port_list) {
714 spin_lock_bh(p_ptr->publ.lock); 714 spin_lock_bh(p_ptr->publ.lock);
715 port_print(p_ptr, &pb, 0); 715 port_print(p_ptr, &pb, 0);
716 spin_unlock_bh(p_ptr->publ.lock); 716 spin_unlock_bh(p_ptr->publ.lock);
717 } 717 }
718 spin_unlock_bh(&tipc_port_list_lock); 718 spin_unlock_bh(&tipc_port_list_lock);
719 str_len = tipc_printbuf_validate(&pb); 719 str_len = tipc_printbuf_validate(&pb);
720 720
721 skb_put(buf, TLV_SPACE(str_len)); 721 skb_put(buf, TLV_SPACE(str_len));
722 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 722 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
723 723
724 return buf; 724 return buf;
725 } 725 }
726 726
727 #if 0 727 #if 0
728 728
729 #define MAX_PORT_STATS 2000 729 #define MAX_PORT_STATS 2000
730 730
731 struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space) 731 struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space)
732 { 732 {
733 u32 ref; 733 u32 ref;
734 struct port *p_ptr; 734 struct port *p_ptr;
735 struct sk_buff *buf; 735 struct sk_buff *buf;
736 struct tlv_desc *rep_tlv; 736 struct tlv_desc *rep_tlv;
737 struct print_buf pb; 737 struct print_buf pb;
738 int str_len; 738 int str_len;
739 739
740 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_PORT_REF)) 740 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_PORT_REF))
741 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 741 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
742 742
743 ref = *(u32 *)TLV_DATA(req_tlv_area); 743 ref = *(u32 *)TLV_DATA(req_tlv_area);
744 ref = ntohl(ref); 744 ref = ntohl(ref);
745 745
746 p_ptr = tipc_port_lock(ref); 746 p_ptr = tipc_port_lock(ref);
747 if (!p_ptr) 747 if (!p_ptr)
748 return cfg_reply_error_string("port not found"); 748 return cfg_reply_error_string("port not found");
749 749
750 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS)); 750 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS));
751 if (!buf) { 751 if (!buf) {
752 tipc_port_unlock(p_ptr); 752 tipc_port_unlock(p_ptr);
753 return NULL; 753 return NULL;
754 } 754 }
755 rep_tlv = (struct tlv_desc *)buf->data; 755 rep_tlv = (struct tlv_desc *)buf->data;
756 756
757 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS); 757 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS);
758 port_print(p_ptr, &pb, 1); 758 port_print(p_ptr, &pb, 1);
759 /* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */ 759 /* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */
760 tipc_port_unlock(p_ptr); 760 tipc_port_unlock(p_ptr);
761 str_len = tipc_printbuf_validate(&pb); 761 str_len = tipc_printbuf_validate(&pb);
762 762
763 skb_put(buf, TLV_SPACE(str_len)); 763 skb_put(buf, TLV_SPACE(str_len));
764 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 764 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
765 765
766 return buf; 766 return buf;
767 } 767 }
768 768
769 #endif 769 #endif
770 770
771 void tipc_port_reinit(void) 771 void tipc_port_reinit(void)
772 { 772 {
773 struct port *p_ptr; 773 struct port *p_ptr;
774 struct tipc_msg *msg; 774 struct tipc_msg *msg;
775 775
776 spin_lock_bh(&tipc_port_list_lock); 776 spin_lock_bh(&tipc_port_list_lock);
777 list_for_each_entry(p_ptr, &ports, port_list) { 777 list_for_each_entry(p_ptr, &ports, port_list) {
778 msg = &p_ptr->publ.phdr; 778 msg = &p_ptr->publ.phdr;
779 if (msg_orignode(msg) == tipc_own_addr) 779 if (msg_orignode(msg) == tipc_own_addr)
780 break; 780 break;
781 msg_set_orignode(msg, tipc_own_addr); 781 msg_set_orignode(msg, tipc_own_addr);
782 } 782 }
783 spin_unlock_bh(&tipc_port_list_lock); 783 spin_unlock_bh(&tipc_port_list_lock);
784 } 784 }
785 785
786 786
787 /* 787 /*
788 * port_dispatcher_sigh(): Signal handler for messages destinated 788 * port_dispatcher_sigh(): Signal handler for messages destinated
789 * to the tipc_port interface. 789 * to the tipc_port interface.
790 */ 790 */
791 791
792 static void port_dispatcher_sigh(void *dummy) 792 static void port_dispatcher_sigh(void *dummy)
793 { 793 {
794 struct sk_buff *buf; 794 struct sk_buff *buf;
795 795
796 spin_lock_bh(&queue_lock); 796 spin_lock_bh(&queue_lock);
797 buf = msg_queue_head; 797 buf = msg_queue_head;
798 msg_queue_head = NULL; 798 msg_queue_head = NULL;
799 spin_unlock_bh(&queue_lock); 799 spin_unlock_bh(&queue_lock);
800 800
801 while (buf) { 801 while (buf) {
802 struct port *p_ptr; 802 struct port *p_ptr;
803 struct user_port *up_ptr; 803 struct user_port *up_ptr;
804 struct tipc_portid orig; 804 struct tipc_portid orig;
805 struct tipc_name_seq dseq; 805 struct tipc_name_seq dseq;
806 void *usr_handle; 806 void *usr_handle;
807 int connected; 807 int connected;
808 int published; 808 int published;
809 u32 message_type; 809 u32 message_type;
810 810
811 struct sk_buff *next = buf->next; 811 struct sk_buff *next = buf->next;
812 struct tipc_msg *msg = buf_msg(buf); 812 struct tipc_msg *msg = buf_msg(buf);
813 u32 dref = msg_destport(msg); 813 u32 dref = msg_destport(msg);
814 814
815 message_type = msg_type(msg); 815 message_type = msg_type(msg);
816 if (message_type > TIPC_DIRECT_MSG) 816 if (message_type > TIPC_DIRECT_MSG)
817 goto reject; /* Unsupported message type */ 817 goto reject; /* Unsupported message type */
818 818
819 p_ptr = tipc_port_lock(dref); 819 p_ptr = tipc_port_lock(dref);
820 if (!p_ptr) 820 if (!p_ptr)
821 goto reject; /* Port deleted while msg in queue */ 821 goto reject; /* Port deleted while msg in queue */
822 822
823 orig.ref = msg_origport(msg); 823 orig.ref = msg_origport(msg);
824 orig.node = msg_orignode(msg); 824 orig.node = msg_orignode(msg);
825 up_ptr = p_ptr->user_port; 825 up_ptr = p_ptr->user_port;
826 usr_handle = up_ptr->usr_handle; 826 usr_handle = up_ptr->usr_handle;
827 connected = p_ptr->publ.connected; 827 connected = p_ptr->publ.connected;
828 published = p_ptr->publ.published; 828 published = p_ptr->publ.published;
829 829
830 if (unlikely(msg_errcode(msg))) 830 if (unlikely(msg_errcode(msg)))
831 goto err; 831 goto err;
832 832
833 switch (message_type) { 833 switch (message_type) {
834 834
835 case TIPC_CONN_MSG:{ 835 case TIPC_CONN_MSG:{
836 tipc_conn_msg_event cb = up_ptr->conn_msg_cb; 836 tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
837 u32 peer_port = port_peerport(p_ptr); 837 u32 peer_port = port_peerport(p_ptr);
838 u32 peer_node = port_peernode(p_ptr); 838 u32 peer_node = port_peernode(p_ptr);
839 839
840 tipc_port_unlock(p_ptr); 840 tipc_port_unlock(p_ptr);
841 if (unlikely(!connected)) { 841 if (unlikely(!connected)) {
842 if (unlikely(published)) 842 if (unlikely(published))
843 goto reject; 843 goto reject;
844 tipc_connect2port(dref,&orig); 844 tipc_connect2port(dref,&orig);
845 } 845 }
846 if (unlikely(msg_origport(msg) != peer_port)) 846 if (unlikely(msg_origport(msg) != peer_port))
847 goto reject; 847 goto reject;
848 if (unlikely(msg_orignode(msg) != peer_node)) 848 if (unlikely(msg_orignode(msg) != peer_node))
849 goto reject; 849 goto reject;
850 if (unlikely(!cb)) 850 if (unlikely(!cb))
851 goto reject; 851 goto reject;
852 if (unlikely(++p_ptr->publ.conn_unacked >= 852 if (unlikely(++p_ptr->publ.conn_unacked >=
853 TIPC_FLOW_CONTROL_WIN)) 853 TIPC_FLOW_CONTROL_WIN))
854 tipc_acknowledge(dref, 854 tipc_acknowledge(dref,
855 p_ptr->publ.conn_unacked); 855 p_ptr->publ.conn_unacked);
856 skb_pull(buf, msg_hdr_sz(msg)); 856 skb_pull(buf, msg_hdr_sz(msg));
857 cb(usr_handle, dref, &buf, msg_data(msg), 857 cb(usr_handle, dref, &buf, msg_data(msg),
858 msg_data_sz(msg)); 858 msg_data_sz(msg));
859 break; 859 break;
860 } 860 }
861 case TIPC_DIRECT_MSG:{ 861 case TIPC_DIRECT_MSG:{
862 tipc_msg_event cb = up_ptr->msg_cb; 862 tipc_msg_event cb = up_ptr->msg_cb;
863 863
864 tipc_port_unlock(p_ptr); 864 tipc_port_unlock(p_ptr);
865 if (unlikely(connected)) 865 if (unlikely(connected))
866 goto reject; 866 goto reject;
867 if (unlikely(!cb)) 867 if (unlikely(!cb))
868 goto reject; 868 goto reject;
869 skb_pull(buf, msg_hdr_sz(msg)); 869 skb_pull(buf, msg_hdr_sz(msg));
870 cb(usr_handle, dref, &buf, msg_data(msg), 870 cb(usr_handle, dref, &buf, msg_data(msg),
871 msg_data_sz(msg), msg_importance(msg), 871 msg_data_sz(msg), msg_importance(msg),
872 &orig); 872 &orig);
873 break; 873 break;
874 } 874 }
875 case TIPC_MCAST_MSG: 875 case TIPC_MCAST_MSG:
876 case TIPC_NAMED_MSG:{ 876 case TIPC_NAMED_MSG:{
877 tipc_named_msg_event cb = up_ptr->named_msg_cb; 877 tipc_named_msg_event cb = up_ptr->named_msg_cb;
878 878
879 tipc_port_unlock(p_ptr); 879 tipc_port_unlock(p_ptr);
880 if (unlikely(connected)) 880 if (unlikely(connected))
881 goto reject; 881 goto reject;
882 if (unlikely(!cb)) 882 if (unlikely(!cb))
883 goto reject; 883 goto reject;
884 if (unlikely(!published)) 884 if (unlikely(!published))
885 goto reject; 885 goto reject;
886 dseq.type = msg_nametype(msg); 886 dseq.type = msg_nametype(msg);
887 dseq.lower = msg_nameinst(msg); 887 dseq.lower = msg_nameinst(msg);
888 dseq.upper = (message_type == TIPC_NAMED_MSG) 888 dseq.upper = (message_type == TIPC_NAMED_MSG)
889 ? dseq.lower : msg_nameupper(msg); 889 ? dseq.lower : msg_nameupper(msg);
890 skb_pull(buf, msg_hdr_sz(msg)); 890 skb_pull(buf, msg_hdr_sz(msg));
891 cb(usr_handle, dref, &buf, msg_data(msg), 891 cb(usr_handle, dref, &buf, msg_data(msg),
892 msg_data_sz(msg), msg_importance(msg), 892 msg_data_sz(msg), msg_importance(msg),
893 &orig, &dseq); 893 &orig, &dseq);
894 break; 894 break;
895 } 895 }
896 } 896 }
897 if (buf) 897 if (buf)
898 buf_discard(buf); 898 buf_discard(buf);
899 buf = next; 899 buf = next;
900 continue; 900 continue;
901 err: 901 err:
902 switch (message_type) { 902 switch (message_type) {
903 903
904 case TIPC_CONN_MSG:{ 904 case TIPC_CONN_MSG:{
905 tipc_conn_shutdown_event cb = 905 tipc_conn_shutdown_event cb =
906 up_ptr->conn_err_cb; 906 up_ptr->conn_err_cb;
907 u32 peer_port = port_peerport(p_ptr); 907 u32 peer_port = port_peerport(p_ptr);
908 u32 peer_node = port_peernode(p_ptr); 908 u32 peer_node = port_peernode(p_ptr);
909 909
910 tipc_port_unlock(p_ptr); 910 tipc_port_unlock(p_ptr);
911 if (!connected || !cb) 911 if (!connected || !cb)
912 break; 912 break;
913 if (msg_origport(msg) != peer_port) 913 if (msg_origport(msg) != peer_port)
914 break; 914 break;
915 if (msg_orignode(msg) != peer_node) 915 if (msg_orignode(msg) != peer_node)
916 break; 916 break;
917 tipc_disconnect(dref); 917 tipc_disconnect(dref);
918 skb_pull(buf, msg_hdr_sz(msg)); 918 skb_pull(buf, msg_hdr_sz(msg));
919 cb(usr_handle, dref, &buf, msg_data(msg), 919 cb(usr_handle, dref, &buf, msg_data(msg),
920 msg_data_sz(msg), msg_errcode(msg)); 920 msg_data_sz(msg), msg_errcode(msg));
921 break; 921 break;
922 } 922 }
923 case TIPC_DIRECT_MSG:{ 923 case TIPC_DIRECT_MSG:{
924 tipc_msg_err_event cb = up_ptr->err_cb; 924 tipc_msg_err_event cb = up_ptr->err_cb;
925 925
926 tipc_port_unlock(p_ptr); 926 tipc_port_unlock(p_ptr);
927 if (connected || !cb) 927 if (connected || !cb)
928 break; 928 break;
929 skb_pull(buf, msg_hdr_sz(msg)); 929 skb_pull(buf, msg_hdr_sz(msg));
930 cb(usr_handle, dref, &buf, msg_data(msg), 930 cb(usr_handle, dref, &buf, msg_data(msg),
931 msg_data_sz(msg), msg_errcode(msg), &orig); 931 msg_data_sz(msg), msg_errcode(msg), &orig);
932 break; 932 break;
933 } 933 }
934 case TIPC_MCAST_MSG: 934 case TIPC_MCAST_MSG:
935 case TIPC_NAMED_MSG:{ 935 case TIPC_NAMED_MSG:{
936 tipc_named_msg_err_event cb = 936 tipc_named_msg_err_event cb =
937 up_ptr->named_err_cb; 937 up_ptr->named_err_cb;
938 938
939 tipc_port_unlock(p_ptr); 939 tipc_port_unlock(p_ptr);
940 if (connected || !cb) 940 if (connected || !cb)
941 break; 941 break;
942 dseq.type = msg_nametype(msg); 942 dseq.type = msg_nametype(msg);
943 dseq.lower = msg_nameinst(msg); 943 dseq.lower = msg_nameinst(msg);
944 dseq.upper = (message_type == TIPC_NAMED_MSG) 944 dseq.upper = (message_type == TIPC_NAMED_MSG)
945 ? dseq.lower : msg_nameupper(msg); 945 ? dseq.lower : msg_nameupper(msg);
946 skb_pull(buf, msg_hdr_sz(msg)); 946 skb_pull(buf, msg_hdr_sz(msg));
947 cb(usr_handle, dref, &buf, msg_data(msg), 947 cb(usr_handle, dref, &buf, msg_data(msg),
948 msg_data_sz(msg), msg_errcode(msg), &dseq); 948 msg_data_sz(msg), msg_errcode(msg), &dseq);
949 break; 949 break;
950 } 950 }
951 } 951 }
952 if (buf) 952 if (buf)
953 buf_discard(buf); 953 buf_discard(buf);
954 buf = next; 954 buf = next;
955 continue; 955 continue;
956 reject: 956 reject:
957 tipc_reject_msg(buf, TIPC_ERR_NO_PORT); 957 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
958 buf = next; 958 buf = next;
959 } 959 }
960 } 960 }
961 961
962 /* 962 /*
963 * port_dispatcher(): Dispatcher for messages destinated 963 * port_dispatcher(): Dispatcher for messages destinated
964 * to the tipc_port interface. Called with port locked. 964 * to the tipc_port interface. Called with port locked.
965 */ 965 */
966 966
967 static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf) 967 static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
968 { 968 {
969 buf->next = NULL; 969 buf->next = NULL;
970 spin_lock_bh(&queue_lock); 970 spin_lock_bh(&queue_lock);
971 if (msg_queue_head) { 971 if (msg_queue_head) {
972 msg_queue_tail->next = buf; 972 msg_queue_tail->next = buf;
973 msg_queue_tail = buf; 973 msg_queue_tail = buf;
974 } else { 974 } else {
975 msg_queue_tail = msg_queue_head = buf; 975 msg_queue_tail = msg_queue_head = buf;
976 tipc_k_signal((Handler)port_dispatcher_sigh, 0); 976 tipc_k_signal((Handler)port_dispatcher_sigh, 0);
977 } 977 }
978 spin_unlock_bh(&queue_lock); 978 spin_unlock_bh(&queue_lock);
979 return TIPC_OK; 979 return TIPC_OK;
980 } 980 }
981 981
982 /* 982 /*
983 * Wake up port after congestion: Called with port locked, 983 * Wake up port after congestion: Called with port locked,
984 * 984 *
985 */ 985 */
986 986
987 static void port_wakeup_sh(unsigned long ref) 987 static void port_wakeup_sh(unsigned long ref)
988 { 988 {
989 struct port *p_ptr; 989 struct port *p_ptr;
990 struct user_port *up_ptr; 990 struct user_port *up_ptr;
991 tipc_continue_event cb = NULL; 991 tipc_continue_event cb = NULL;
992 void *uh = NULL; 992 void *uh = NULL;
993 993
994 p_ptr = tipc_port_lock(ref); 994 p_ptr = tipc_port_lock(ref);
995 if (p_ptr) { 995 if (p_ptr) {
996 up_ptr = p_ptr->user_port; 996 up_ptr = p_ptr->user_port;
997 if (up_ptr) { 997 if (up_ptr) {
998 cb = up_ptr->continue_event_cb; 998 cb = up_ptr->continue_event_cb;
999 uh = up_ptr->usr_handle; 999 uh = up_ptr->usr_handle;
1000 } 1000 }
1001 tipc_port_unlock(p_ptr); 1001 tipc_port_unlock(p_ptr);
1002 } 1002 }
1003 if (cb) 1003 if (cb)
1004 cb(uh, ref); 1004 cb(uh, ref);
1005 } 1005 }
1006 1006
1007 1007
1008 static void port_wakeup(struct tipc_port *p_ptr) 1008 static void port_wakeup(struct tipc_port *p_ptr)
1009 { 1009 {
1010 tipc_k_signal((Handler)port_wakeup_sh, p_ptr->ref); 1010 tipc_k_signal((Handler)port_wakeup_sh, p_ptr->ref);
1011 } 1011 }
1012 1012
1013 void tipc_acknowledge(u32 ref, u32 ack) 1013 void tipc_acknowledge(u32 ref, u32 ack)
1014 { 1014 {
1015 struct port *p_ptr; 1015 struct port *p_ptr;
1016 struct sk_buff *buf = NULL; 1016 struct sk_buff *buf = NULL;
1017 1017
1018 p_ptr = tipc_port_lock(ref); 1018 p_ptr = tipc_port_lock(ref);
1019 if (!p_ptr) 1019 if (!p_ptr)
1020 return; 1020 return;
1021 if (p_ptr->publ.connected) { 1021 if (p_ptr->publ.connected) {
1022 p_ptr->publ.conn_unacked -= ack; 1022 p_ptr->publ.conn_unacked -= ack;
1023 buf = port_build_proto_msg(port_peerport(p_ptr), 1023 buf = port_build_proto_msg(port_peerport(p_ptr),
1024 port_peernode(p_ptr), 1024 port_peernode(p_ptr),
1025 ref, 1025 ref,
1026 tipc_own_addr, 1026 tipc_own_addr,
1027 CONN_MANAGER, 1027 CONN_MANAGER,
1028 CONN_ACK, 1028 CONN_ACK,
1029 TIPC_OK, 1029 TIPC_OK,
1030 port_out_seqno(p_ptr), 1030 port_out_seqno(p_ptr),
1031 ack); 1031 ack);
1032 } 1032 }
1033 tipc_port_unlock(p_ptr); 1033 tipc_port_unlock(p_ptr);
1034 tipc_net_route_msg(buf); 1034 tipc_net_route_msg(buf);
1035 } 1035 }
1036 1036
1037 /* 1037 /*
1038 * tipc_createport(): user level call. Will add port to 1038 * tipc_createport(): user level call. Will add port to
1039 * registry if non-zero user_ref. 1039 * registry if non-zero user_ref.
1040 */ 1040 */
1041 1041
1042 int tipc_createport(u32 user_ref, 1042 int tipc_createport(u32 user_ref,
1043 void *usr_handle, 1043 void *usr_handle,
1044 unsigned int importance, 1044 unsigned int importance,
1045 tipc_msg_err_event error_cb, 1045 tipc_msg_err_event error_cb,
1046 tipc_named_msg_err_event named_error_cb, 1046 tipc_named_msg_err_event named_error_cb,
1047 tipc_conn_shutdown_event conn_error_cb, 1047 tipc_conn_shutdown_event conn_error_cb,
1048 tipc_msg_event msg_cb, 1048 tipc_msg_event msg_cb,
1049 tipc_named_msg_event named_msg_cb, 1049 tipc_named_msg_event named_msg_cb,
1050 tipc_conn_msg_event conn_msg_cb, 1050 tipc_conn_msg_event conn_msg_cb,
1051 tipc_continue_event continue_event_cb,/* May be zero */ 1051 tipc_continue_event continue_event_cb,/* May be zero */
1052 u32 *portref) 1052 u32 *portref)
1053 { 1053 {
1054 struct user_port *up_ptr; 1054 struct user_port *up_ptr;
1055 struct port *p_ptr; 1055 struct port *p_ptr;
1056 u32 ref; 1056 u32 ref;
1057 1057
1058 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC); 1058 up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
1059 if (!up_ptr) { 1059 if (!up_ptr) {
1060 warn("Port creation failed, no memory\n"); 1060 warn("Port creation failed, no memory\n");
1061 return -ENOMEM; 1061 return -ENOMEM;
1062 } 1062 }
1063 ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance); 1063 ref = tipc_createport_raw(NULL, port_dispatcher, port_wakeup, importance);
1064 p_ptr = tipc_port_lock(ref); 1064 p_ptr = tipc_port_lock(ref);
1065 if (!p_ptr) { 1065 if (!p_ptr) {
1066 kfree(up_ptr); 1066 kfree(up_ptr);
1067 return -ENOMEM; 1067 return -ENOMEM;
1068 } 1068 }
1069 1069
1070 p_ptr->user_port = up_ptr; 1070 p_ptr->user_port = up_ptr;
1071 up_ptr->user_ref = user_ref; 1071 up_ptr->user_ref = user_ref;
1072 up_ptr->usr_handle = usr_handle; 1072 up_ptr->usr_handle = usr_handle;
1073 up_ptr->ref = p_ptr->publ.ref; 1073 up_ptr->ref = p_ptr->publ.ref;
1074 up_ptr->err_cb = error_cb; 1074 up_ptr->err_cb = error_cb;
1075 up_ptr->named_err_cb = named_error_cb; 1075 up_ptr->named_err_cb = named_error_cb;
1076 up_ptr->conn_err_cb = conn_error_cb; 1076 up_ptr->conn_err_cb = conn_error_cb;
1077 up_ptr->msg_cb = msg_cb; 1077 up_ptr->msg_cb = msg_cb;
1078 up_ptr->named_msg_cb = named_msg_cb; 1078 up_ptr->named_msg_cb = named_msg_cb;
1079 up_ptr->conn_msg_cb = conn_msg_cb; 1079 up_ptr->conn_msg_cb = conn_msg_cb;
1080 up_ptr->continue_event_cb = continue_event_cb; 1080 up_ptr->continue_event_cb = continue_event_cb;
1081 INIT_LIST_HEAD(&up_ptr->uport_list); 1081 INIT_LIST_HEAD(&up_ptr->uport_list);
1082 tipc_reg_add_port(up_ptr); 1082 tipc_reg_add_port(up_ptr);
1083 *portref = p_ptr->publ.ref; 1083 *portref = p_ptr->publ.ref;
1084 dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref); 1084 dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);
1085 tipc_port_unlock(p_ptr); 1085 tipc_port_unlock(p_ptr);
1086 return TIPC_OK; 1086 return TIPC_OK;
1087 } 1087 }
1088 1088
1089 int tipc_ownidentity(u32 ref, struct tipc_portid *id) 1089 int tipc_ownidentity(u32 ref, struct tipc_portid *id)
1090 { 1090 {
1091 id->ref = ref; 1091 id->ref = ref;
1092 id->node = tipc_own_addr; 1092 id->node = tipc_own_addr;
1093 return TIPC_OK; 1093 return TIPC_OK;
1094 } 1094 }
1095 1095
1096 int tipc_portimportance(u32 ref, unsigned int *importance) 1096 int tipc_portimportance(u32 ref, unsigned int *importance)
1097 { 1097 {
1098 struct port *p_ptr; 1098 struct port *p_ptr;
1099 1099
1100 p_ptr = tipc_port_lock(ref); 1100 p_ptr = tipc_port_lock(ref);
1101 if (!p_ptr) 1101 if (!p_ptr)
1102 return -EINVAL; 1102 return -EINVAL;
1103 *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr); 1103 *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
1104 tipc_port_unlock(p_ptr); 1104 tipc_port_unlock(p_ptr);
1105 return TIPC_OK; 1105 return TIPC_OK;
1106 } 1106 }
1107 1107
1108 int tipc_set_portimportance(u32 ref, unsigned int imp) 1108 int tipc_set_portimportance(u32 ref, unsigned int imp)
1109 { 1109 {
1110 struct port *p_ptr; 1110 struct port *p_ptr;
1111 1111
1112 if (imp > TIPC_CRITICAL_IMPORTANCE) 1112 if (imp > TIPC_CRITICAL_IMPORTANCE)
1113 return -EINVAL; 1113 return -EINVAL;
1114 1114
1115 p_ptr = tipc_port_lock(ref); 1115 p_ptr = tipc_port_lock(ref);
1116 if (!p_ptr) 1116 if (!p_ptr)
1117 return -EINVAL; 1117 return -EINVAL;
1118 msg_set_importance(&p_ptr->publ.phdr, (u32)imp); 1118 msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
1119 tipc_port_unlock(p_ptr); 1119 tipc_port_unlock(p_ptr);
1120 return TIPC_OK; 1120 return TIPC_OK;
1121 } 1121 }
1122 1122
1123 1123
1124 int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) 1124 int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1125 { 1125 {
1126 struct port *p_ptr; 1126 struct port *p_ptr;
1127 struct publication *publ; 1127 struct publication *publ;
1128 u32 key; 1128 u32 key;
1129 int res = -EINVAL; 1129 int res = -EINVAL;
1130 1130
1131 p_ptr = tipc_port_lock(ref); 1131 p_ptr = tipc_port_lock(ref);
1132 if (!p_ptr) 1132 if (!p_ptr)
1133 return -EINVAL; 1133 return -EINVAL;
1134 1134
1135 dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, " 1135 dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, "
1136 "lower = %u, upper = %u\n", 1136 "lower = %u, upper = %u\n",
1137 ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper); 1137 ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper);
1138 if (p_ptr->publ.connected) 1138 if (p_ptr->publ.connected)
1139 goto exit; 1139 goto exit;
1140 if (seq->lower > seq->upper) 1140 if (seq->lower > seq->upper)
1141 goto exit; 1141 goto exit;
1142 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE)) 1142 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE))
1143 goto exit; 1143 goto exit;
1144 key = ref + p_ptr->pub_count + 1; 1144 key = ref + p_ptr->pub_count + 1;
1145 if (key == ref) { 1145 if (key == ref) {
1146 res = -EADDRINUSE; 1146 res = -EADDRINUSE;
1147 goto exit; 1147 goto exit;
1148 } 1148 }
1149 publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, 1149 publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
1150 scope, p_ptr->publ.ref, key); 1150 scope, p_ptr->publ.ref, key);
1151 if (publ) { 1151 if (publ) {
1152 list_add(&publ->pport_list, &p_ptr->publications); 1152 list_add(&publ->pport_list, &p_ptr->publications);
1153 p_ptr->pub_count++; 1153 p_ptr->pub_count++;
1154 p_ptr->publ.published = 1; 1154 p_ptr->publ.published = 1;
1155 res = TIPC_OK; 1155 res = TIPC_OK;
1156 } 1156 }
1157 exit: 1157 exit:
1158 tipc_port_unlock(p_ptr); 1158 tipc_port_unlock(p_ptr);
1159 return res; 1159 return res;
1160 } 1160 }
1161 1161
1162 int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) 1162 int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1163 { 1163 {
1164 struct port *p_ptr; 1164 struct port *p_ptr;
1165 struct publication *publ; 1165 struct publication *publ;
1166 struct publication *tpubl; 1166 struct publication *tpubl;
1167 int res = -EINVAL; 1167 int res = -EINVAL;
1168 1168
1169 p_ptr = tipc_port_lock(ref); 1169 p_ptr = tipc_port_lock(ref);
1170 if (!p_ptr) 1170 if (!p_ptr)
1171 return -EINVAL; 1171 return -EINVAL;
1172 if (!seq) { 1172 if (!seq) {
1173 list_for_each_entry_safe(publ, tpubl, 1173 list_for_each_entry_safe(publ, tpubl,
1174 &p_ptr->publications, pport_list) { 1174 &p_ptr->publications, pport_list) {
1175 tipc_nametbl_withdraw(publ->type, publ->lower, 1175 tipc_nametbl_withdraw(publ->type, publ->lower,
1176 publ->ref, publ->key); 1176 publ->ref, publ->key);
1177 } 1177 }
1178 res = TIPC_OK; 1178 res = TIPC_OK;
1179 } else { 1179 } else {
1180 list_for_each_entry_safe(publ, tpubl, 1180 list_for_each_entry_safe(publ, tpubl,
1181 &p_ptr->publications, pport_list) { 1181 &p_ptr->publications, pport_list) {
1182 if (publ->scope != scope) 1182 if (publ->scope != scope)
1183 continue; 1183 continue;
1184 if (publ->type != seq->type) 1184 if (publ->type != seq->type)
1185 continue; 1185 continue;
1186 if (publ->lower != seq->lower) 1186 if (publ->lower != seq->lower)
1187 continue; 1187 continue;
1188 if (publ->upper != seq->upper) 1188 if (publ->upper != seq->upper)
1189 break; 1189 break;
1190 tipc_nametbl_withdraw(publ->type, publ->lower, 1190 tipc_nametbl_withdraw(publ->type, publ->lower,
1191 publ->ref, publ->key); 1191 publ->ref, publ->key);
1192 res = TIPC_OK; 1192 res = TIPC_OK;
1193 break; 1193 break;
1194 } 1194 }
1195 } 1195 }
1196 if (list_empty(&p_ptr->publications)) 1196 if (list_empty(&p_ptr->publications))
1197 p_ptr->publ.published = 0; 1197 p_ptr->publ.published = 0;
1198 tipc_port_unlock(p_ptr); 1198 tipc_port_unlock(p_ptr);
1199 return res; 1199 return res;
1200 } 1200 }
1201 1201
1202 int tipc_connect2port(u32 ref, struct tipc_portid const *peer) 1202 int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1203 { 1203 {
1204 struct port *p_ptr; 1204 struct port *p_ptr;
1205 struct tipc_msg *msg; 1205 struct tipc_msg *msg;
1206 int res = -EINVAL; 1206 int res = -EINVAL;
1207 1207
1208 p_ptr = tipc_port_lock(ref); 1208 p_ptr = tipc_port_lock(ref);
1209 if (!p_ptr) 1209 if (!p_ptr)
1210 return -EINVAL; 1210 return -EINVAL;
1211 if (p_ptr->publ.published || p_ptr->publ.connected) 1211 if (p_ptr->publ.published || p_ptr->publ.connected)
1212 goto exit; 1212 goto exit;
1213 if (!peer->ref) 1213 if (!peer->ref)
1214 goto exit; 1214 goto exit;
1215 1215
1216 msg = &p_ptr->publ.phdr; 1216 msg = &p_ptr->publ.phdr;
1217 msg_set_destnode(msg, peer->node); 1217 msg_set_destnode(msg, peer->node);
1218 msg_set_destport(msg, peer->ref); 1218 msg_set_destport(msg, peer->ref);
1219 msg_set_orignode(msg, tipc_own_addr); 1219 msg_set_orignode(msg, tipc_own_addr);
1220 msg_set_origport(msg, p_ptr->publ.ref); 1220 msg_set_origport(msg, p_ptr->publ.ref);
1221 msg_set_transp_seqno(msg, 42); 1221 msg_set_transp_seqno(msg, 42);
1222 msg_set_type(msg, TIPC_CONN_MSG); 1222 msg_set_type(msg, TIPC_CONN_MSG);
1223 if (!may_route(peer->node)) 1223 if (!may_route(peer->node))
1224 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1224 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1225 else 1225 else
1226 msg_set_hdr_sz(msg, LONG_H_SIZE); 1226 msg_set_hdr_sz(msg, LONG_H_SIZE);
1227 1227
1228 p_ptr->probing_interval = PROBING_INTERVAL; 1228 p_ptr->probing_interval = PROBING_INTERVAL;
1229 p_ptr->probing_state = CONFIRMED; 1229 p_ptr->probing_state = CONFIRMED;
1230 p_ptr->publ.connected = 1; 1230 p_ptr->publ.connected = 1;
1231 k_start_timer(&p_ptr->timer, p_ptr->probing_interval); 1231 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
1232 1232
1233 tipc_nodesub_subscribe(&p_ptr->subscription,peer->node, 1233 tipc_nodesub_subscribe(&p_ptr->subscription,peer->node,
1234 (void *)(unsigned long)ref, 1234 (void *)(unsigned long)ref,
1235 (net_ev_handler)port_handle_node_down); 1235 (net_ev_handler)port_handle_node_down);
1236 res = TIPC_OK; 1236 res = TIPC_OK;
1237 exit: 1237 exit:
1238 tipc_port_unlock(p_ptr); 1238 tipc_port_unlock(p_ptr);
1239 p_ptr->publ.max_pkt = tipc_link_get_max_pkt(peer->node, ref); 1239 p_ptr->publ.max_pkt = tipc_link_get_max_pkt(peer->node, ref);
1240 return res; 1240 return res;
1241 } 1241 }
1242 1242
1243 /**
1244 * tipc_disconnect_port - disconnect port from peer
1245 *
1246 * Port must be locked.
1247 */
1248
1249 int tipc_disconnect_port(struct tipc_port *tp_ptr)
1250 {
1251 int res;
1252
1253 if (tp_ptr->connected) {
1254 tp_ptr->connected = 0;
1255 /* let timer expire on it's own to avoid deadlock! */
1256 tipc_nodesub_unsubscribe(
1257 &((struct port *)tp_ptr)->subscription);
1258 res = TIPC_OK;
1259 } else {
1260 res = -ENOTCONN;
1261 }
1262 return res;
1263 }
1264
1243 /* 1265 /*
1244 * tipc_disconnect(): Disconnect port form peer. 1266 * tipc_disconnect(): Disconnect port form peer.
1245 * This is a node local operation. 1267 * This is a node local operation.
1246 */ 1268 */
1247 1269
1248 int tipc_disconnect(u32 ref) 1270 int tipc_disconnect(u32 ref)
1249 { 1271 {
1250 struct port *p_ptr; 1272 struct port *p_ptr;
1251 int res = -ENOTCONN; 1273 int res;
1252 1274
1253 p_ptr = tipc_port_lock(ref); 1275 p_ptr = tipc_port_lock(ref);
1254 if (!p_ptr) 1276 if (!p_ptr)
1255 return -EINVAL; 1277 return -EINVAL;
1256 if (p_ptr->publ.connected) { 1278 res = tipc_disconnect_port((struct tipc_port *)p_ptr);
1257 p_ptr->publ.connected = 0;
1258 /* let timer expire on it's own to avoid deadlock! */
1259 tipc_nodesub_unsubscribe(&p_ptr->subscription);
1260 res = TIPC_OK;
1261 }
1262 tipc_port_unlock(p_ptr); 1279 tipc_port_unlock(p_ptr);
1263 return res; 1280 return res;
1264 } 1281 }
1265 1282
1266 /* 1283 /*
1267 * tipc_shutdown(): Send a SHUTDOWN msg to peer and disconnect 1284 * tipc_shutdown(): Send a SHUTDOWN msg to peer and disconnect
1268 */ 1285 */
1269 int tipc_shutdown(u32 ref) 1286 int tipc_shutdown(u32 ref)
1270 { 1287 {
1271 struct port *p_ptr; 1288 struct port *p_ptr;
1272 struct sk_buff *buf = NULL; 1289 struct sk_buff *buf = NULL;
1273 1290
1274 p_ptr = tipc_port_lock(ref); 1291 p_ptr = tipc_port_lock(ref);
1275 if (!p_ptr) 1292 if (!p_ptr)
1276 return -EINVAL; 1293 return -EINVAL;
1277 1294
1278 if (p_ptr->publ.connected) { 1295 if (p_ptr->publ.connected) {
1279 u32 imp = msg_importance(&p_ptr->publ.phdr); 1296 u32 imp = msg_importance(&p_ptr->publ.phdr);
1280 if (imp < TIPC_CRITICAL_IMPORTANCE) 1297 if (imp < TIPC_CRITICAL_IMPORTANCE)
1281 imp++; 1298 imp++;
1282 buf = port_build_proto_msg(port_peerport(p_ptr), 1299 buf = port_build_proto_msg(port_peerport(p_ptr),
1283 port_peernode(p_ptr), 1300 port_peernode(p_ptr),
1284 ref, 1301 ref,
1285 tipc_own_addr, 1302 tipc_own_addr,
1286 imp, 1303 imp,
1287 TIPC_CONN_MSG, 1304 TIPC_CONN_MSG,
1288 TIPC_CONN_SHUTDOWN, 1305 TIPC_CONN_SHUTDOWN,
1289 port_out_seqno(p_ptr), 1306 port_out_seqno(p_ptr),
1290 0); 1307 0);
1291 } 1308 }
1292 tipc_port_unlock(p_ptr); 1309 tipc_port_unlock(p_ptr);
1293 tipc_net_route_msg(buf); 1310 tipc_net_route_msg(buf);
1294 return tipc_disconnect(ref); 1311 return tipc_disconnect(ref);
1295 } 1312 }
1296 1313
1297 int tipc_isconnected(u32 ref, int *isconnected) 1314 int tipc_isconnected(u32 ref, int *isconnected)
1298 { 1315 {
1299 struct port *p_ptr; 1316 struct port *p_ptr;
1300 1317
1301 p_ptr = tipc_port_lock(ref); 1318 p_ptr = tipc_port_lock(ref);
1302 if (!p_ptr) 1319 if (!p_ptr)
1303 return -EINVAL; 1320 return -EINVAL;
1304 *isconnected = p_ptr->publ.connected; 1321 *isconnected = p_ptr->publ.connected;
1305 tipc_port_unlock(p_ptr); 1322 tipc_port_unlock(p_ptr);
1306 return TIPC_OK; 1323 return TIPC_OK;
1307 } 1324 }
1308 1325
1309 int tipc_peer(u32 ref, struct tipc_portid *peer) 1326 int tipc_peer(u32 ref, struct tipc_portid *peer)
1310 { 1327 {
1311 struct port *p_ptr; 1328 struct port *p_ptr;
1312 int res; 1329 int res;
1313 1330
1314 p_ptr = tipc_port_lock(ref); 1331 p_ptr = tipc_port_lock(ref);
1315 if (!p_ptr) 1332 if (!p_ptr)
1316 return -EINVAL; 1333 return -EINVAL;
1317 if (p_ptr->publ.connected) { 1334 if (p_ptr->publ.connected) {
1318 peer->ref = port_peerport(p_ptr); 1335 peer->ref = port_peerport(p_ptr);
1319 peer->node = port_peernode(p_ptr); 1336 peer->node = port_peernode(p_ptr);
1320 res = TIPC_OK; 1337 res = TIPC_OK;
1321 } else 1338 } else
1322 res = -ENOTCONN; 1339 res = -ENOTCONN;
1323 tipc_port_unlock(p_ptr); 1340 tipc_port_unlock(p_ptr);
1324 return res; 1341 return res;
1325 } 1342 }
1326 1343
1327 int tipc_ref_valid(u32 ref) 1344 int tipc_ref_valid(u32 ref)
1328 { 1345 {
1329 /* Works irrespective of type */ 1346 /* Works irrespective of type */
1330 return !!tipc_ref_deref(ref); 1347 return !!tipc_ref_deref(ref);
1331 } 1348 }
1332 1349
1333 1350
1334 /* 1351 /*
1335 * tipc_port_recv_sections(): Concatenate and deliver sectioned 1352 * tipc_port_recv_sections(): Concatenate and deliver sectioned
1336 * message for this node. 1353 * message for this node.
1337 */ 1354 */
1338 1355
1339 int tipc_port_recv_sections(struct port *sender, unsigned int num_sect, 1356 int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
1340 struct iovec const *msg_sect) 1357 struct iovec const *msg_sect)
1341 { 1358 {
1342 struct sk_buff *buf; 1359 struct sk_buff *buf;
1343 int res; 1360 int res;
1344 1361
1345 res = msg_build(&sender->publ.phdr, msg_sect, num_sect, 1362 res = msg_build(&sender->publ.phdr, msg_sect, num_sect,
1346 MAX_MSG_SIZE, !sender->user_port, &buf); 1363 MAX_MSG_SIZE, !sender->user_port, &buf);
1347 if (likely(buf)) 1364 if (likely(buf))
1348 tipc_port_recv_msg(buf); 1365 tipc_port_recv_msg(buf);
1349 return res; 1366 return res;
1350 } 1367 }
1351 1368
1352 /** 1369 /**
1353 * tipc_send - send message sections on connection 1370 * tipc_send - send message sections on connection
1354 */ 1371 */
1355 1372
1356 int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect) 1373 int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1357 { 1374 {
1358 struct port *p_ptr; 1375 struct port *p_ptr;
1359 u32 destnode; 1376 u32 destnode;
1360 int res; 1377 int res;
1361 1378
1362 p_ptr = tipc_port_deref(ref); 1379 p_ptr = tipc_port_deref(ref);
1363 if (!p_ptr || !p_ptr->publ.connected) 1380 if (!p_ptr || !p_ptr->publ.connected)
1364 return -EINVAL; 1381 return -EINVAL;
1365 1382
1366 p_ptr->publ.congested = 1; 1383 p_ptr->publ.congested = 1;
1367 if (!tipc_port_congested(p_ptr)) { 1384 if (!tipc_port_congested(p_ptr)) {
1368 destnode = port_peernode(p_ptr); 1385 destnode = port_peernode(p_ptr);
1369 if (likely(destnode != tipc_own_addr)) 1386 if (likely(destnode != tipc_own_addr))
1370 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, 1387 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1371 destnode); 1388 destnode);
1372 else 1389 else
1373 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect); 1390 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1374 1391
1375 if (likely(res != -ELINKCONG)) { 1392 if (likely(res != -ELINKCONG)) {
1376 port_incr_out_seqno(p_ptr); 1393 port_incr_out_seqno(p_ptr);
1377 p_ptr->publ.congested = 0; 1394 p_ptr->publ.congested = 0;
1378 p_ptr->sent++; 1395 p_ptr->sent++;
1379 return res; 1396 return res;
1380 } 1397 }
1381 } 1398 }
1382 if (port_unreliable(p_ptr)) { 1399 if (port_unreliable(p_ptr)) {
1383 p_ptr->publ.congested = 0; 1400 p_ptr->publ.congested = 0;
1384 /* Just calculate msg length and return */ 1401 /* Just calculate msg length and return */
1385 return msg_calc_data_size(msg_sect, num_sect); 1402 return msg_calc_data_size(msg_sect, num_sect);
1386 } 1403 }
1387 return -ELINKCONG; 1404 return -ELINKCONG;
1388 } 1405 }
1389 1406
1390 /** 1407 /**
1391 * tipc_send_buf - send message buffer on connection 1408 * tipc_send_buf - send message buffer on connection
1392 */ 1409 */
1393 1410
1394 int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz) 1411 int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
1395 { 1412 {
1396 struct port *p_ptr; 1413 struct port *p_ptr;
1397 struct tipc_msg *msg; 1414 struct tipc_msg *msg;
1398 u32 destnode; 1415 u32 destnode;
1399 u32 hsz; 1416 u32 hsz;
1400 u32 sz; 1417 u32 sz;
1401 u32 res; 1418 u32 res;
1402 1419
1403 p_ptr = tipc_port_deref(ref); 1420 p_ptr = tipc_port_deref(ref);
1404 if (!p_ptr || !p_ptr->publ.connected) 1421 if (!p_ptr || !p_ptr->publ.connected)
1405 return -EINVAL; 1422 return -EINVAL;
1406 1423
1407 msg = &p_ptr->publ.phdr; 1424 msg = &p_ptr->publ.phdr;
1408 hsz = msg_hdr_sz(msg); 1425 hsz = msg_hdr_sz(msg);
1409 sz = hsz + dsz; 1426 sz = hsz + dsz;
1410 msg_set_size(msg, sz); 1427 msg_set_size(msg, sz);
1411 if (skb_cow(buf, hsz)) 1428 if (skb_cow(buf, hsz))
1412 return -ENOMEM; 1429 return -ENOMEM;
1413 1430
1414 skb_push(buf, hsz); 1431 skb_push(buf, hsz);
1415 skb_copy_to_linear_data(buf, msg, hsz); 1432 skb_copy_to_linear_data(buf, msg, hsz);
1416 destnode = msg_destnode(msg); 1433 destnode = msg_destnode(msg);
1417 p_ptr->publ.congested = 1; 1434 p_ptr->publ.congested = 1;
1418 if (!tipc_port_congested(p_ptr)) { 1435 if (!tipc_port_congested(p_ptr)) {
1419 if (likely(destnode != tipc_own_addr)) 1436 if (likely(destnode != tipc_own_addr))
1420 res = tipc_send_buf_fast(buf, destnode); 1437 res = tipc_send_buf_fast(buf, destnode);
1421 else { 1438 else {
1422 tipc_port_recv_msg(buf); 1439 tipc_port_recv_msg(buf);
1423 res = sz; 1440 res = sz;
1424 } 1441 }
1425 if (likely(res != -ELINKCONG)) { 1442 if (likely(res != -ELINKCONG)) {
1426 port_incr_out_seqno(p_ptr); 1443 port_incr_out_seqno(p_ptr);
1427 p_ptr->sent++; 1444 p_ptr->sent++;
1428 p_ptr->publ.congested = 0; 1445 p_ptr->publ.congested = 0;
1429 return res; 1446 return res;
1430 } 1447 }
1431 } 1448 }
1432 if (port_unreliable(p_ptr)) { 1449 if (port_unreliable(p_ptr)) {
1433 p_ptr->publ.congested = 0; 1450 p_ptr->publ.congested = 0;
1434 return dsz; 1451 return dsz;
1435 } 1452 }
1436 return -ELINKCONG; 1453 return -ELINKCONG;
1437 } 1454 }
1438 1455
1439 /** 1456 /**
1440 * tipc_forward2name - forward message sections to port name 1457 * tipc_forward2name - forward message sections to port name
1441 */ 1458 */
1442 1459
1443 int tipc_forward2name(u32 ref, 1460 int tipc_forward2name(u32 ref,
1444 struct tipc_name const *name, 1461 struct tipc_name const *name,
1445 u32 domain, 1462 u32 domain,
1446 u32 num_sect, 1463 u32 num_sect,
1447 struct iovec const *msg_sect, 1464 struct iovec const *msg_sect,
1448 struct tipc_portid const *orig, 1465 struct tipc_portid const *orig,
1449 unsigned int importance) 1466 unsigned int importance)
1450 { 1467 {
1451 struct port *p_ptr; 1468 struct port *p_ptr;
1452 struct tipc_msg *msg; 1469 struct tipc_msg *msg;
1453 u32 destnode = domain; 1470 u32 destnode = domain;
1454 u32 destport = 0; 1471 u32 destport = 0;
1455 int res; 1472 int res;
1456 1473
1457 p_ptr = tipc_port_deref(ref); 1474 p_ptr = tipc_port_deref(ref);
1458 if (!p_ptr || p_ptr->publ.connected) 1475 if (!p_ptr || p_ptr->publ.connected)
1459 return -EINVAL; 1476 return -EINVAL;
1460 1477
1461 msg = &p_ptr->publ.phdr; 1478 msg = &p_ptr->publ.phdr;
1462 msg_set_type(msg, TIPC_NAMED_MSG); 1479 msg_set_type(msg, TIPC_NAMED_MSG);
1463 msg_set_orignode(msg, orig->node); 1480 msg_set_orignode(msg, orig->node);
1464 msg_set_origport(msg, orig->ref); 1481 msg_set_origport(msg, orig->ref);
1465 msg_set_hdr_sz(msg, LONG_H_SIZE); 1482 msg_set_hdr_sz(msg, LONG_H_SIZE);
1466 msg_set_nametype(msg, name->type); 1483 msg_set_nametype(msg, name->type);
1467 msg_set_nameinst(msg, name->instance); 1484 msg_set_nameinst(msg, name->instance);
1468 msg_set_lookup_scope(msg, addr_scope(domain)); 1485 msg_set_lookup_scope(msg, addr_scope(domain));
1469 if (importance <= TIPC_CRITICAL_IMPORTANCE) 1486 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1470 msg_set_importance(msg,importance); 1487 msg_set_importance(msg,importance);
1471 destport = tipc_nametbl_translate(name->type, name->instance, &destnode); 1488 destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
1472 msg_set_destnode(msg, destnode); 1489 msg_set_destnode(msg, destnode);
1473 msg_set_destport(msg, destport); 1490 msg_set_destport(msg, destport);
1474 1491
1475 if (likely(destport || destnode)) { 1492 if (likely(destport || destnode)) {
1476 p_ptr->sent++; 1493 p_ptr->sent++;
1477 if (likely(destnode == tipc_own_addr)) 1494 if (likely(destnode == tipc_own_addr))
1478 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect); 1495 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1479 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, 1496 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1480 destnode); 1497 destnode);
1481 if (likely(res != -ELINKCONG)) 1498 if (likely(res != -ELINKCONG))
1482 return res; 1499 return res;
1483 if (port_unreliable(p_ptr)) { 1500 if (port_unreliable(p_ptr)) {
1484 /* Just calculate msg length and return */ 1501 /* Just calculate msg length and return */
1485 return msg_calc_data_size(msg_sect, num_sect); 1502 return msg_calc_data_size(msg_sect, num_sect);
1486 } 1503 }
1487 return -ELINKCONG; 1504 return -ELINKCONG;
1488 } 1505 }
1489 return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect, 1506 return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
1490 TIPC_ERR_NO_NAME); 1507 TIPC_ERR_NO_NAME);
1491 } 1508 }
1492 1509
1493 /** 1510 /**
1494 * tipc_send2name - send message sections to port name 1511 * tipc_send2name - send message sections to port name
1495 */ 1512 */
1496 1513
1497 int tipc_send2name(u32 ref, 1514 int tipc_send2name(u32 ref,
1498 struct tipc_name const *name, 1515 struct tipc_name const *name,
1499 unsigned int domain, 1516 unsigned int domain,
1500 unsigned int num_sect, 1517 unsigned int num_sect,
1501 struct iovec const *msg_sect) 1518 struct iovec const *msg_sect)
1502 { 1519 {
1503 struct tipc_portid orig; 1520 struct tipc_portid orig;
1504 1521
1505 orig.ref = ref; 1522 orig.ref = ref;
1506 orig.node = tipc_own_addr; 1523 orig.node = tipc_own_addr;
1507 return tipc_forward2name(ref, name, domain, num_sect, msg_sect, &orig, 1524 return tipc_forward2name(ref, name, domain, num_sect, msg_sect, &orig,
1508 TIPC_PORT_IMPORTANCE); 1525 TIPC_PORT_IMPORTANCE);
1509 } 1526 }
1510 1527
1511 /** 1528 /**
1512 * tipc_forward_buf2name - forward message buffer to port name 1529 * tipc_forward_buf2name - forward message buffer to port name
1513 */ 1530 */
1514 1531
1515 int tipc_forward_buf2name(u32 ref, 1532 int tipc_forward_buf2name(u32 ref,
1516 struct tipc_name const *name, 1533 struct tipc_name const *name,
1517 u32 domain, 1534 u32 domain,
1518 struct sk_buff *buf, 1535 struct sk_buff *buf,
1519 unsigned int dsz, 1536 unsigned int dsz,
1520 struct tipc_portid const *orig, 1537 struct tipc_portid const *orig,
1521 unsigned int importance) 1538 unsigned int importance)
1522 { 1539 {
1523 struct port *p_ptr; 1540 struct port *p_ptr;
1524 struct tipc_msg *msg; 1541 struct tipc_msg *msg;
1525 u32 destnode = domain; 1542 u32 destnode = domain;
1526 u32 destport = 0; 1543 u32 destport = 0;
1527 int res; 1544 int res;
1528 1545
1529 p_ptr = (struct port *)tipc_ref_deref(ref); 1546 p_ptr = (struct port *)tipc_ref_deref(ref);
1530 if (!p_ptr || p_ptr->publ.connected) 1547 if (!p_ptr || p_ptr->publ.connected)
1531 return -EINVAL; 1548 return -EINVAL;
1532 1549
1533 msg = &p_ptr->publ.phdr; 1550 msg = &p_ptr->publ.phdr;
1534 if (importance <= TIPC_CRITICAL_IMPORTANCE) 1551 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1535 msg_set_importance(msg, importance); 1552 msg_set_importance(msg, importance);
1536 msg_set_type(msg, TIPC_NAMED_MSG); 1553 msg_set_type(msg, TIPC_NAMED_MSG);
1537 msg_set_orignode(msg, orig->node); 1554 msg_set_orignode(msg, orig->node);
1538 msg_set_origport(msg, orig->ref); 1555 msg_set_origport(msg, orig->ref);
1539 msg_set_nametype(msg, name->type); 1556 msg_set_nametype(msg, name->type);
1540 msg_set_nameinst(msg, name->instance); 1557 msg_set_nameinst(msg, name->instance);
1541 msg_set_lookup_scope(msg, addr_scope(domain)); 1558 msg_set_lookup_scope(msg, addr_scope(domain));
1542 msg_set_hdr_sz(msg, LONG_H_SIZE); 1559 msg_set_hdr_sz(msg, LONG_H_SIZE);
1543 msg_set_size(msg, LONG_H_SIZE + dsz); 1560 msg_set_size(msg, LONG_H_SIZE + dsz);
1544 destport = tipc_nametbl_translate(name->type, name->instance, &destnode); 1561 destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
1545 msg_set_destnode(msg, destnode); 1562 msg_set_destnode(msg, destnode);
1546 msg_set_destport(msg, destport); 1563 msg_set_destport(msg, destport);
1547 msg_dbg(msg, "forw2name ==> "); 1564 msg_dbg(msg, "forw2name ==> ");
1548 if (skb_cow(buf, LONG_H_SIZE)) 1565 if (skb_cow(buf, LONG_H_SIZE))
1549 return -ENOMEM; 1566 return -ENOMEM;
1550 skb_push(buf, LONG_H_SIZE); 1567 skb_push(buf, LONG_H_SIZE);
1551 skb_copy_to_linear_data(buf, msg, LONG_H_SIZE); 1568 skb_copy_to_linear_data(buf, msg, LONG_H_SIZE);
1552 msg_dbg(buf_msg(buf),"PREP:"); 1569 msg_dbg(buf_msg(buf),"PREP:");
1553 if (likely(destport || destnode)) { 1570 if (likely(destport || destnode)) {
1554 p_ptr->sent++; 1571 p_ptr->sent++;
1555 if (destnode == tipc_own_addr) 1572 if (destnode == tipc_own_addr)
1556 return tipc_port_recv_msg(buf); 1573 return tipc_port_recv_msg(buf);
1557 res = tipc_send_buf_fast(buf, destnode); 1574 res = tipc_send_buf_fast(buf, destnode);
1558 if (likely(res != -ELINKCONG)) 1575 if (likely(res != -ELINKCONG))
1559 return res; 1576 return res;
1560 if (port_unreliable(p_ptr)) 1577 if (port_unreliable(p_ptr))
1561 return dsz; 1578 return dsz;
1562 return -ELINKCONG; 1579 return -ELINKCONG;
1563 } 1580 }
1564 return tipc_reject_msg(buf, TIPC_ERR_NO_NAME); 1581 return tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
1565 } 1582 }
1566 1583
1567 /** 1584 /**
1568 * tipc_send_buf2name - send message buffer to port name 1585 * tipc_send_buf2name - send message buffer to port name
1569 */ 1586 */
1570 1587
1571 int tipc_send_buf2name(u32 ref, 1588 int tipc_send_buf2name(u32 ref,
1572 struct tipc_name const *dest, 1589 struct tipc_name const *dest,
1573 u32 domain, 1590 u32 domain,
1574 struct sk_buff *buf, 1591 struct sk_buff *buf,
1575 unsigned int dsz) 1592 unsigned int dsz)
1576 { 1593 {
1577 struct tipc_portid orig; 1594 struct tipc_portid orig;
1578 1595
1579 orig.ref = ref; 1596 orig.ref = ref;
1580 orig.node = tipc_own_addr; 1597 orig.node = tipc_own_addr;
1581 return tipc_forward_buf2name(ref, dest, domain, buf, dsz, &orig, 1598 return tipc_forward_buf2name(ref, dest, domain, buf, dsz, &orig,
1582 TIPC_PORT_IMPORTANCE); 1599 TIPC_PORT_IMPORTANCE);
1583 } 1600 }
1584 1601
1585 /** 1602 /**
1586 * tipc_forward2port - forward message sections to port identity 1603 * tipc_forward2port - forward message sections to port identity
1587 */ 1604 */
1588 1605
1589 int tipc_forward2port(u32 ref, 1606 int tipc_forward2port(u32 ref,
1590 struct tipc_portid const *dest, 1607 struct tipc_portid const *dest,
1591 unsigned int num_sect, 1608 unsigned int num_sect,
1592 struct iovec const *msg_sect, 1609 struct iovec const *msg_sect,
1593 struct tipc_portid const *orig, 1610 struct tipc_portid const *orig,
1594 unsigned int importance) 1611 unsigned int importance)
1595 { 1612 {
1596 struct port *p_ptr; 1613 struct port *p_ptr;
1597 struct tipc_msg *msg; 1614 struct tipc_msg *msg;
1598 int res; 1615 int res;
1599 1616
1600 p_ptr = tipc_port_deref(ref); 1617 p_ptr = tipc_port_deref(ref);
1601 if (!p_ptr || p_ptr->publ.connected) 1618 if (!p_ptr || p_ptr->publ.connected)
1602 return -EINVAL; 1619 return -EINVAL;
1603 1620
1604 msg = &p_ptr->publ.phdr; 1621 msg = &p_ptr->publ.phdr;
1605 msg_set_type(msg, TIPC_DIRECT_MSG); 1622 msg_set_type(msg, TIPC_DIRECT_MSG);
1606 msg_set_orignode(msg, orig->node); 1623 msg_set_orignode(msg, orig->node);
1607 msg_set_origport(msg, orig->ref); 1624 msg_set_origport(msg, orig->ref);
1608 msg_set_destnode(msg, dest->node); 1625 msg_set_destnode(msg, dest->node);
1609 msg_set_destport(msg, dest->ref); 1626 msg_set_destport(msg, dest->ref);
1610 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE); 1627 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1611 if (importance <= TIPC_CRITICAL_IMPORTANCE) 1628 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1612 msg_set_importance(msg, importance); 1629 msg_set_importance(msg, importance);
1613 p_ptr->sent++; 1630 p_ptr->sent++;
1614 if (dest->node == tipc_own_addr) 1631 if (dest->node == tipc_own_addr)
1615 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect); 1632 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1616 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node); 1633 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
1617 if (likely(res != -ELINKCONG)) 1634 if (likely(res != -ELINKCONG))
1618 return res; 1635 return res;
1619 if (port_unreliable(p_ptr)) { 1636 if (port_unreliable(p_ptr)) {
1620 /* Just calculate msg length and return */ 1637 /* Just calculate msg length and return */
1621 return msg_calc_data_size(msg_sect, num_sect); 1638 return msg_calc_data_size(msg_sect, num_sect);
1622 } 1639 }
1623 return -ELINKCONG; 1640 return -ELINKCONG;
1624 } 1641 }
1625 1642
1626 /** 1643 /**
1627 * tipc_send2port - send message sections to port identity 1644 * tipc_send2port - send message sections to port identity
1628 */ 1645 */
1629 1646
1630 int tipc_send2port(u32 ref, 1647 int tipc_send2port(u32 ref,
1631 struct tipc_portid const *dest, 1648 struct tipc_portid const *dest,
1632 unsigned int num_sect, 1649 unsigned int num_sect,
1633 struct iovec const *msg_sect) 1650 struct iovec const *msg_sect)
1634 { 1651 {
1635 struct tipc_portid orig; 1652 struct tipc_portid orig;
1636 1653
1637 orig.ref = ref; 1654 orig.ref = ref;
1638 orig.node = tipc_own_addr; 1655 orig.node = tipc_own_addr;
1639 return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig, 1656 return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig,
1640 TIPC_PORT_IMPORTANCE); 1657 TIPC_PORT_IMPORTANCE);
1641 } 1658 }
1642 1659
1643 /** 1660 /**
1644 * tipc_forward_buf2port - forward message buffer to port identity 1661 * tipc_forward_buf2port - forward message buffer to port identity
1645 */ 1662 */
1646 int tipc_forward_buf2port(u32 ref, 1663 int tipc_forward_buf2port(u32 ref,
1647 struct tipc_portid const *dest, 1664 struct tipc_portid const *dest,
1648 struct sk_buff *buf, 1665 struct sk_buff *buf,
1649 unsigned int dsz, 1666 unsigned int dsz,
1650 struct tipc_portid const *orig, 1667 struct tipc_portid const *orig,
1651 unsigned int importance) 1668 unsigned int importance)
1652 { 1669 {
1653 struct port *p_ptr; 1670 struct port *p_ptr;
1654 struct tipc_msg *msg; 1671 struct tipc_msg *msg;
1655 int res; 1672 int res;
1656 1673
1657 p_ptr = (struct port *)tipc_ref_deref(ref); 1674 p_ptr = (struct port *)tipc_ref_deref(ref);
1658 if (!p_ptr || p_ptr->publ.connected) 1675 if (!p_ptr || p_ptr->publ.connected)
1659 return -EINVAL; 1676 return -EINVAL;
1660 1677
1661 msg = &p_ptr->publ.phdr; 1678 msg = &p_ptr->publ.phdr;
1662 msg_set_type(msg, TIPC_DIRECT_MSG); 1679 msg_set_type(msg, TIPC_DIRECT_MSG);
1663 msg_set_orignode(msg, orig->node); 1680 msg_set_orignode(msg, orig->node);
1664 msg_set_origport(msg, orig->ref); 1681 msg_set_origport(msg, orig->ref);
1665 msg_set_destnode(msg, dest->node); 1682 msg_set_destnode(msg, dest->node);
1666 msg_set_destport(msg, dest->ref); 1683 msg_set_destport(msg, dest->ref);
1667 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE); 1684 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1668 if (importance <= TIPC_CRITICAL_IMPORTANCE) 1685 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1669 msg_set_importance(msg, importance); 1686 msg_set_importance(msg, importance);
1670 msg_set_size(msg, DIR_MSG_H_SIZE + dsz); 1687 msg_set_size(msg, DIR_MSG_H_SIZE + dsz);
1671 if (skb_cow(buf, DIR_MSG_H_SIZE)) 1688 if (skb_cow(buf, DIR_MSG_H_SIZE))
1672 return -ENOMEM; 1689 return -ENOMEM;
1673 1690
1674 skb_push(buf, DIR_MSG_H_SIZE); 1691 skb_push(buf, DIR_MSG_H_SIZE);
1675 skb_copy_to_linear_data(buf, msg, DIR_MSG_H_SIZE); 1692 skb_copy_to_linear_data(buf, msg, DIR_MSG_H_SIZE);
1676 msg_dbg(msg, "buf2port: "); 1693 msg_dbg(msg, "buf2port: ");
1677 p_ptr->sent++; 1694 p_ptr->sent++;
1678 if (dest->node == tipc_own_addr) 1695 if (dest->node == tipc_own_addr)
1679 return tipc_port_recv_msg(buf); 1696 return tipc_port_recv_msg(buf);
1680 res = tipc_send_buf_fast(buf, dest->node); 1697 res = tipc_send_buf_fast(buf, dest->node);
1681 if (likely(res != -ELINKCONG)) 1698 if (likely(res != -ELINKCONG))
1682 return res; 1699 return res;
1683 if (port_unreliable(p_ptr)) 1700 if (port_unreliable(p_ptr))
1684 return dsz; 1701 return dsz;
1685 return -ELINKCONG; 1702 return -ELINKCONG;
1686 } 1703 }
1687 1704
1688 /** 1705 /**
1689 * tipc_send_buf2port - send message buffer to port identity 1706 * tipc_send_buf2port - send message buffer to port identity
1690 */ 1707 */
1691 1708
1692 int tipc_send_buf2port(u32 ref, 1709 int tipc_send_buf2port(u32 ref,
1693 struct tipc_portid const *dest, 1710 struct tipc_portid const *dest,
1694 struct sk_buff *buf, 1711 struct sk_buff *buf,
1695 unsigned int dsz) 1712 unsigned int dsz)
1696 { 1713 {
1697 struct tipc_portid orig; 1714 struct tipc_portid orig;
1698 1715
1699 orig.ref = ref; 1716 orig.ref = ref;
1700 orig.node = tipc_own_addr; 1717 orig.node = tipc_own_addr;
1 /* 1 /*
2 * net/tipc/socket.c: TIPC socket API 2 * net/tipc/socket.c: TIPC socket API
3 * 3 *
4 * Copyright (c) 2001-2007, Ericsson AB 4 * Copyright (c) 2001-2007, Ericsson AB
5 * Copyright (c) 2004-2007, Wind River Systems 5 * Copyright (c) 2004-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met: 9 * modification, are permitted provided that the following conditions are met:
10 * 10 *
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its 16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from 17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission. 18 * this software without specific prior written permission.
19 * 19 *
20 * Alternatively, this software may be distributed under the terms of the 20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free 21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation. 22 * Software Foundation.
23 * 23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37 #include <linux/module.h> 37 #include <linux/module.h>
38 #include <linux/types.h> 38 #include <linux/types.h>
39 #include <linux/net.h> 39 #include <linux/net.h>
40 #include <linux/socket.h> 40 #include <linux/socket.h>
41 #include <linux/errno.h> 41 #include <linux/errno.h>
42 #include <linux/mm.h> 42 #include <linux/mm.h>
43 #include <linux/slab.h> 43 #include <linux/slab.h>
44 #include <linux/poll.h> 44 #include <linux/poll.h>
45 #include <linux/fcntl.h> 45 #include <linux/fcntl.h>
46 #include <linux/mutex.h>
47 #include <asm/string.h> 46 #include <asm/string.h>
48 #include <asm/atomic.h> 47 #include <asm/atomic.h>
49 #include <net/sock.h> 48 #include <net/sock.h>
50 49
51 #include <linux/tipc.h> 50 #include <linux/tipc.h>
52 #include <linux/tipc_config.h> 51 #include <linux/tipc_config.h>
53 #include <net/tipc/tipc_msg.h> 52 #include <net/tipc/tipc_msg.h>
54 #include <net/tipc/tipc_port.h> 53 #include <net/tipc/tipc_port.h>
55 54
56 #include "core.h" 55 #include "core.h"
57 56
58 #define SS_LISTENING -1 /* socket is listening */ 57 #define SS_LISTENING -1 /* socket is listening */
59 #define SS_READY -2 /* socket is connectionless */ 58 #define SS_READY -2 /* socket is connectionless */
60 59
61 #define OVERLOAD_LIMIT_BASE 5000 60 #define OVERLOAD_LIMIT_BASE 5000
62 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 61 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
63 62
64 struct tipc_sock { 63 struct tipc_sock {
65 struct sock sk; 64 struct sock sk;
66 struct tipc_port *p; 65 struct tipc_port *p;
67 struct mutex lock;
68 }; 66 };
69 67
70 #define tipc_sk(sk) ((struct tipc_sock*)sk) 68 #define tipc_sk(sk) ((struct tipc_sock *)(sk))
69 #define tipc_sk_port(sk) ((struct tipc_port *)(tipc_sk(sk)->p))
71 70
71 static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
72 static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf); 72 static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
73 static void wakeupdispatch(struct tipc_port *tport); 73 static void wakeupdispatch(struct tipc_port *tport);
74 74
75 static const struct proto_ops packet_ops; 75 static const struct proto_ops packet_ops;
76 static const struct proto_ops stream_ops; 76 static const struct proto_ops stream_ops;
77 static const struct proto_ops msg_ops; 77 static const struct proto_ops msg_ops;
78 78
79 static struct proto tipc_proto; 79 static struct proto tipc_proto;
80 80
81 static int sockets_enabled = 0; 81 static int sockets_enabled = 0;
82 82
83 static atomic_t tipc_queue_size = ATOMIC_INIT(0); 83 static atomic_t tipc_queue_size = ATOMIC_INIT(0);
84 84
85
86 /* 85 /*
87 * sock_lock(): Lock a port/socket pair. lock_sock() can 86 * Revised TIPC socket locking policy:
88 * not be used here, since the same lock must protect ports 87 *
89 * with non-socket interfaces. 88 * Most socket operations take the standard socket lock when they start
90 * See net.c for description of locking policy. 89 * and hold it until they finish (or until they need to sleep). Acquiring
90 * this lock grants the owner exclusive access to the fields of the socket
91 * data structures, with the exception of the backlog queue. A few socket
92 * operations can be done without taking the socket lock because they only
93 * read socket information that never changes during the life of the socket.
94 *
95 * Socket operations may acquire the lock for the associated TIPC port if they
96 * need to perform an operation on the port. If any routine needs to acquire
97 * both the socket lock and the port lock it must take the socket lock first
98 * to avoid the risk of deadlock.
99 *
100 * The dispatcher handling incoming messages cannot grab the socket lock in
101 * the standard fashion, since invoked it runs at the BH level and cannot block.
102 * Instead, it checks to see if the socket lock is currently owned by someone,
103 * and either handles the message itself or adds it to the socket's backlog
104 * queue; in the latter case the queued message is processed once the process
105 * owning the socket lock releases it.
106 *
107 * NOTE: Releasing the socket lock while an operation is sleeping overcomes
108 * the problem of a blocked socket operation preventing any other operations
109 * from occurring. However, applications must be careful if they have
110 * multiple threads trying to send (or receive) on the same socket, as these
111 * operations might interfere with each other. For example, doing a connect
112 * and a receive at the same time might allow the receive to consume the
113 * ACK message meant for the connect. While additional work could be done
114 * to try and overcome this, it doesn't seem to be worthwhile at the present.
115 *
116 * NOTE: Releasing the socket lock while an operation is sleeping also ensures
117 * that another operation that must be performed in a non-blocking manner is
118 * not delayed for very long because the lock has already been taken.
119 *
120 * NOTE: This code assumes that certain fields of a port/socket pair are
121 * constant over its lifetime; such fields can be examined without taking
122 * the socket lock and/or port lock, and do not need to be re-read even
123 * after resuming processing after waiting. These fields include:
124 * - socket type
125 * - pointer to socket sk structure (aka tipc_sock structure)
126 * - pointer to port structure
127 * - port reference
91 */ 128 */
92 static void sock_lock(struct tipc_sock* tsock) 129
130 /**
131 * advance_rx_queue - discard first buffer in socket receive queue
132 *
133 * Caller must hold socket lock
134 */
135
136 static void advance_rx_queue(struct sock *sk)
93 { 137 {
94 spin_lock_bh(tsock->p->lock); 138 buf_discard(__skb_dequeue(&sk->sk_receive_queue));
139 atomic_dec(&tipc_queue_size);
95 } 140 }
96 141
97 /* 142 /**
98 * sock_unlock(): Unlock a port/socket pair 143 * discard_rx_queue - discard all buffers in socket receive queue
144 *
145 * Caller must hold socket lock
99 */ 146 */
100 static void sock_unlock(struct tipc_sock* tsock) 147
148 static void discard_rx_queue(struct sock *sk)
101 { 149 {
102 spin_unlock_bh(tsock->p->lock); 150 struct sk_buff *buf;
151
152 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
153 atomic_dec(&tipc_queue_size);
154 buf_discard(buf);
155 }
103 } 156 }
104 157
105 /** 158 /**
106 * advance_queue - discard first buffer in queue 159 * reject_rx_queue - reject all buffers in socket receive queue
107 * @tsock: TIPC socket 160 *
161 * Caller must hold socket lock
108 */ 162 */
109 163
110 static void advance_queue(struct tipc_sock *tsock) 164 static void reject_rx_queue(struct sock *sk)
111 { 165 {
112 sock_lock(tsock); 166 struct sk_buff *buf;
113 buf_discard(skb_dequeue(&tsock->sk.sk_receive_queue)); 167
114 sock_unlock(tsock); 168 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
115 atomic_dec(&tipc_queue_size); 169 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
170 atomic_dec(&tipc_queue_size);
171 }
116 } 172 }
117 173
118 /** 174 /**
119 * tipc_create - create a TIPC socket 175 * tipc_create - create a TIPC socket
176 * @net: network namespace (must be default network)
120 * @sock: pre-allocated socket structure 177 * @sock: pre-allocated socket structure
121 * @protocol: protocol indicator (must be 0) 178 * @protocol: protocol indicator (must be 0)
122 * 179 *
123 * This routine creates and attaches a 'struct sock' to the 'struct socket', 180 * This routine creates additional data structures used by the TIPC socket,
124 * then create and attaches a TIPC port to the 'struct sock' part. 181 * initializes them, and links them together.
125 * 182 *
126 * Returns 0 on success, errno otherwise 183 * Returns 0 on success, errno otherwise
127 */ 184 */
185
128 static int tipc_create(struct net *net, struct socket *sock, int protocol) 186 static int tipc_create(struct net *net, struct socket *sock, int protocol)
129 { 187 {
130 struct tipc_sock *tsock; 188 const struct proto_ops *ops;
131 struct tipc_port *port; 189 socket_state state;
132 struct sock *sk; 190 struct sock *sk;
133 u32 ref; 191 u32 portref;
134 192
193 /* Validate arguments */
194
135 if (net != &init_net) 195 if (net != &init_net)
136 return -EAFNOSUPPORT; 196 return -EAFNOSUPPORT;
137 197
138 if (unlikely(protocol != 0)) 198 if (unlikely(protocol != 0))
139 return -EPROTONOSUPPORT; 199 return -EPROTONOSUPPORT;
140 200
141 ref = tipc_createport_raw(NULL, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE);
142 if (unlikely(!ref))
143 return -ENOMEM;
144
145 sock->state = SS_UNCONNECTED;
146
147 switch (sock->type) { 201 switch (sock->type) {
148 case SOCK_STREAM: 202 case SOCK_STREAM:
149 sock->ops = &stream_ops; 203 ops = &stream_ops;
204 state = SS_UNCONNECTED;
150 break; 205 break;
151 case SOCK_SEQPACKET: 206 case SOCK_SEQPACKET:
152 sock->ops = &packet_ops; 207 ops = &packet_ops;
208 state = SS_UNCONNECTED;
153 break; 209 break;
154 case SOCK_DGRAM: 210 case SOCK_DGRAM:
155 tipc_set_portunreliable(ref, 1);
156 /* fall through */
157 case SOCK_RDM: 211 case SOCK_RDM:
158 tipc_set_portunreturnable(ref, 1); 212 ops = &msg_ops;
159 sock->ops = &msg_ops; 213 state = SS_READY;
160 sock->state = SS_READY;
161 break; 214 break;
162 default: 215 default:
163 tipc_deleteport(ref);
164 return -EPROTOTYPE; 216 return -EPROTOTYPE;
165 } 217 }
166 218
219 /* Allocate socket's protocol area */
220
167 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); 221 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
168 if (!sk) { 222 if (sk == NULL)
169 tipc_deleteport(ref);
170 return -ENOMEM; 223 return -ENOMEM;
171 }
172 224
173 sock_init_data(sock, sk); 225 /* Allocate TIPC port for socket to use */
174 sk->sk_rcvtimeo = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
175 226
176 tsock = tipc_sk(sk); 227 portref = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
177 port = tipc_get_port(ref); 228 TIPC_LOW_IMPORTANCE);
229 if (unlikely(portref == 0)) {
230 sk_free(sk);
231 return -ENOMEM;
232 }
178 233
179 tsock->p = port; 234 /* Finish initializing socket data structures */
180 port->usr_handle = tsock;
181 235
182 mutex_init(&tsock->lock); 236 sock->ops = ops;
237 sock->state = state;
183 238
184 dbg("sock_create: %x\n",tsock); 239 sock_init_data(sock, sk);
240 sk->sk_rcvtimeo = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
241 sk->sk_backlog_rcv = backlog_rcv;
242 tipc_sk(sk)->p = tipc_get_port(portref);
185 243
186 atomic_inc(&tipc_user_count); 244 if (sock->state == SS_READY) {
245 tipc_set_portunreturnable(portref, 1);
246 if (sock->type == SOCK_DGRAM)
247 tipc_set_portunreliable(portref, 1);
248 }
187 249
250 atomic_inc(&tipc_user_count);
188 return 0; 251 return 0;
189 } 252 }
190 253
191 /** 254 /**
192 * release - destroy a TIPC socket 255 * release - destroy a TIPC socket
193 * @sock: socket to destroy 256 * @sock: socket to destroy
194 * 257 *
195 * This routine cleans up any messages that are still queued on the socket. 258 * This routine cleans up any messages that are still queued on the socket.
196 * For DGRAM and RDM socket types, all queued messages are rejected. 259 * For DGRAM and RDM socket types, all queued messages are rejected.
197 * For SEQPACKET and STREAM socket types, the first message is rejected 260 * For SEQPACKET and STREAM socket types, the first message is rejected
198 * and any others are discarded. (If the first message on a STREAM socket 261 * and any others are discarded. (If the first message on a STREAM socket
199 * is partially-read, it is discarded and the next one is rejected instead.) 262 * is partially-read, it is discarded and the next one is rejected instead.)
200 * 263 *
201 * NOTE: Rejected messages are not necessarily returned to the sender! They 264 * NOTE: Rejected messages are not necessarily returned to the sender! They
202 * are returned or discarded according to the "destination droppable" setting 265 * are returned or discarded according to the "destination droppable" setting
203 * specified for the message by the sender. 266 * specified for the message by the sender.
204 * 267 *
205 * Returns 0 on success, errno otherwise 268 * Returns 0 on success, errno otherwise
206 */ 269 */
207 270
208 static int release(struct socket *sock) 271 static int release(struct socket *sock)
209 { 272 {
210 struct tipc_sock *tsock = tipc_sk(sock->sk);
211 struct sock *sk = sock->sk; 273 struct sock *sk = sock->sk;
212 int res = TIPC_OK; 274 struct tipc_port *tport;
213 struct sk_buff *buf; 275 struct sk_buff *buf;
276 int res;
214 277
215 dbg("sock_delete: %x\n",tsock); 278 /*
216 if (!tsock) 279 * Exit if socket isn't fully initialized (occurs when a failed accept()
280 * releases a pre-allocated child socket that was never used)
281 */
282
283 if (sk == NULL)
217 return 0; 284 return 0;
218 mutex_lock(&tsock->lock);
219 if (!sock->sk) {
220 mutex_unlock(&tsock->lock);
221 return 0;
222 }
223 285
224 /* Reject unreceived messages, unless no longer connected */ 286 tport = tipc_sk_port(sk);
287 lock_sock(sk);
225 288
289 /*
290 * Reject all unreceived messages, except on an active connection
291 * (which disconnects locally & sends a 'FIN+' to peer)
292 */
293
226 while (sock->state != SS_DISCONNECTING) { 294 while (sock->state != SS_DISCONNECTING) {
227 sock_lock(tsock); 295 buf = __skb_dequeue(&sk->sk_receive_queue);
228 buf = skb_dequeue(&sk->sk_receive_queue); 296 if (buf == NULL)
229 if (!buf)
230 tsock->p->usr_handle = NULL;
231 sock_unlock(tsock);
232 if (!buf)
233 break; 297 break;
298 atomic_dec(&tipc_queue_size);
234 if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) 299 if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf)))
235 buf_discard(buf); 300 buf_discard(buf);
236 else 301 else {
302 if ((sock->state == SS_CONNECTING) ||
303 (sock->state == SS_CONNECTED)) {
304 sock->state = SS_DISCONNECTING;
305 tipc_disconnect(tport->ref);
306 }
237 tipc_reject_msg(buf, TIPC_ERR_NO_PORT); 307 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
238 atomic_dec(&tipc_queue_size); 308 }
239 } 309 }
240 310
241 /* Delete TIPC port */ 311 /*
312 * Delete TIPC port; this ensures no more messages are queued
313 * (also disconnects an active connection & sends a 'FIN-' to peer)
314 */
242 315
243 res = tipc_deleteport(tsock->p->ref); 316 res = tipc_deleteport(tport->ref);
244 sock->sk = NULL;
245 317
246 /* Discard any remaining messages */ 318 /* Discard any remaining (connection-based) messages in receive queue */
247 319
248 while ((buf = skb_dequeue(&sk->sk_receive_queue))) { 320 discard_rx_queue(sk);
249 buf_discard(buf);
250 atomic_dec(&tipc_queue_size);
251 }
252 321
253 mutex_unlock(&tsock->lock); 322 /* Reject any messages that accumulated in backlog queue */
254 323
324 sock->state = SS_DISCONNECTING;
325 release_sock(sk);
326
255 sock_put(sk); 327 sock_put(sk);
328 sock->sk = NULL;
256 329
257 atomic_dec(&tipc_user_count); 330 atomic_dec(&tipc_user_count);
258 return res; 331 return res;
259 } 332 }
260 333
261 /** 334 /**
262 * bind - associate or disassocate TIPC name(s) with a socket 335 * bind - associate or disassocate TIPC name(s) with a socket
263 * @sock: socket structure 336 * @sock: socket structure
264 * @uaddr: socket address describing name(s) and desired operation 337 * @uaddr: socket address describing name(s) and desired operation
265 * @uaddr_len: size of socket address data structure 338 * @uaddr_len: size of socket address data structure
266 * 339 *
267 * Name and name sequence binding is indicated using a positive scope value; 340 * Name and name sequence binding is indicated using a positive scope value;
268 * a negative scope value unbinds the specified name. Specifying no name 341 * a negative scope value unbinds the specified name. Specifying no name
269 * (i.e. a socket address length of 0) unbinds all names from the socket. 342 * (i.e. a socket address length of 0) unbinds all names from the socket.
270 * 343 *
271 * Returns 0 on success, errno otherwise 344 * Returns 0 on success, errno otherwise
345 *
346 * NOTE: This routine doesn't need to take the socket lock since it doesn't
347 * access any non-constant socket information.
272 */ 348 */
273 349
274 static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) 350 static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
275 { 351 {
276 struct tipc_sock *tsock = tipc_sk(sock->sk);
277 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 352 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
278 int res; 353 u32 portref = tipc_sk_port(sock->sk)->ref;
279 354
280 if (mutex_lock_interruptible(&tsock->lock)) 355 if (unlikely(!uaddr_len))
281 return -ERESTARTSYS; 356 return tipc_withdraw(portref, 0, NULL);
282 357
283 if (unlikely(!uaddr_len)) { 358 if (uaddr_len < sizeof(struct sockaddr_tipc))
284 res = tipc_withdraw(tsock->p->ref, 0, NULL); 359 return -EINVAL;
285 goto exit; 360 if (addr->family != AF_TIPC)
286 } 361 return -EAFNOSUPPORT;
287 362
288 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
289 res = -EINVAL;
290 goto exit;
291 }
292
293 if (addr->family != AF_TIPC) {
294 res = -EAFNOSUPPORT;
295 goto exit;
296 }
297 if (addr->addrtype == TIPC_ADDR_NAME) 363 if (addr->addrtype == TIPC_ADDR_NAME)
298 addr->addr.nameseq.upper = addr->addr.nameseq.lower; 364 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
299 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) { 365 else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
300 res = -EAFNOSUPPORT; 366 return -EAFNOSUPPORT;
301 goto exit;
302 }
303 367
304 if (addr->scope > 0) 368 return (addr->scope > 0) ?
305 res = tipc_publish(tsock->p->ref, addr->scope, 369 tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
306 &addr->addr.nameseq); 370 tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
307 else
308 res = tipc_withdraw(tsock->p->ref, -addr->scope,
309 &addr->addr.nameseq);
310 exit:
311 mutex_unlock(&tsock->lock);
312 return res;
313 } 371 }
314 372
315 /** 373 /**
316 * get_name - get port ID of socket or peer socket 374 * get_name - get port ID of socket or peer socket
317 * @sock: socket structure 375 * @sock: socket structure
318 * @uaddr: area for returned socket address 376 * @uaddr: area for returned socket address
319 * @uaddr_len: area for returned length of socket address 377 * @uaddr_len: area for returned length of socket address
320 * @peer: 0 to obtain socket name, 1 to obtain peer socket name 378 * @peer: 0 to obtain socket name, 1 to obtain peer socket name
321 * 379 *
322 * Returns 0 on success, errno otherwise 380 * Returns 0 on success, errno otherwise
381 *
382 * NOTE: This routine doesn't need to take the socket lock since it doesn't
383 * access any non-constant socket information.
323 */ 384 */
324 385
325 static int get_name(struct socket *sock, struct sockaddr *uaddr, 386 static int get_name(struct socket *sock, struct sockaddr *uaddr,
326 int *uaddr_len, int peer) 387 int *uaddr_len, int peer)
327 { 388 {
328 struct tipc_sock *tsock = tipc_sk(sock->sk);
329 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 389 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
390 u32 portref = tipc_sk_port(sock->sk)->ref;
330 u32 res; 391 u32 res;
331 392
332 if (mutex_lock_interruptible(&tsock->lock)) 393 if (peer) {
333 return -ERESTARTSYS; 394 res = tipc_peer(portref, &addr->addr.id);
395 if (res)
396 return res;
397 } else {
398 tipc_ownidentity(portref, &addr->addr.id);
399 }
334 400
335 *uaddr_len = sizeof(*addr); 401 *uaddr_len = sizeof(*addr);
336 addr->addrtype = TIPC_ADDR_ID; 402 addr->addrtype = TIPC_ADDR_ID;
337 addr->family = AF_TIPC; 403 addr->family = AF_TIPC;
338 addr->scope = 0; 404 addr->scope = 0;
339 if (peer)
340 res = tipc_peer(tsock->p->ref, &addr->addr.id);
341 else
342 res = tipc_ownidentity(tsock->p->ref, &addr->addr.id);
343 addr->addr.name.domain = 0; 405 addr->addr.name.domain = 0;
344 406
345 mutex_unlock(&tsock->lock); 407 return 0;
346 return res;
347 } 408 }
348 409
349 /** 410 /**
350 * poll - read and possibly block on pollmask 411 * poll - read and possibly block on pollmask
351 * @file: file structure associated with the socket 412 * @file: file structure associated with the socket
352 * @sock: socket for which to calculate the poll bits 413 * @sock: socket for which to calculate the poll bits
353 * @wait: ??? 414 * @wait: ???
354 * 415 *
355 * Returns pollmask value 416 * Returns pollmask value
356 * 417 *
357 * COMMENTARY: 418 * COMMENTARY:
358 * It appears that the usual socket locking mechanisms are not useful here 419 * It appears that the usual socket locking mechanisms are not useful here
359 * since the pollmask info is potentially out-of-date the moment this routine 420 * since the pollmask info is potentially out-of-date the moment this routine
360 * exits. TCP and other protocols seem to rely on higher level poll routines 421 * exits. TCP and other protocols seem to rely on higher level poll routines
361 * to handle any preventable race conditions, so TIPC will do the same ... 422 * to handle any preventable race conditions, so TIPC will do the same ...
362 * 423 *
363 * TIPC sets the returned events as follows: 424 * TIPC sets the returned events as follows:
364 * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty 425 * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty
365 * or if a connection-oriented socket is does not have an active connection 426 * or if a connection-oriented socket is does not have an active connection
366 * (i.e. a read operation will not block). 427 * (i.e. a read operation will not block).
367 * b) POLLOUT is set except when a socket's connection has been terminated 428 * b) POLLOUT is set except when a socket's connection has been terminated
368 * (i.e. a write operation will not block). 429 * (i.e. a write operation will not block).
369 * c) POLLHUP is set when a socket's connection has been terminated. 430 * c) POLLHUP is set when a socket's connection has been terminated.
370 * 431 *
371 * IMPORTANT: The fact that a read or write operation will not block does NOT 432 * IMPORTANT: The fact that a read or write operation will not block does NOT
372 * imply that the operation will succeed! 433 * imply that the operation will succeed!
373 */ 434 */
374 435
375 static unsigned int poll(struct file *file, struct socket *sock, 436 static unsigned int poll(struct file *file, struct socket *sock,
376 poll_table *wait) 437 poll_table *wait)
377 { 438 {
378 struct sock *sk = sock->sk; 439 struct sock *sk = sock->sk;
379 u32 mask; 440 u32 mask;
380 441
381 poll_wait(file, sk->sk_sleep, wait); 442 poll_wait(file, sk->sk_sleep, wait);
382 443
383 if (!skb_queue_empty(&sk->sk_receive_queue) || 444 if (!skb_queue_empty(&sk->sk_receive_queue) ||
384 (sock->state == SS_UNCONNECTED) || 445 (sock->state == SS_UNCONNECTED) ||
385 (sock->state == SS_DISCONNECTING)) 446 (sock->state == SS_DISCONNECTING))
386 mask = (POLLRDNORM | POLLIN); 447 mask = (POLLRDNORM | POLLIN);
387 else 448 else
388 mask = 0; 449 mask = 0;
389 450
390 if (sock->state == SS_DISCONNECTING) 451 if (sock->state == SS_DISCONNECTING)
391 mask |= POLLHUP; 452 mask |= POLLHUP;
392 else 453 else
393 mask |= POLLOUT; 454 mask |= POLLOUT;
394 455
395 return mask; 456 return mask;
396 } 457 }
397 458
398 /** 459 /**
399 * dest_name_check - verify user is permitted to send to specified port name 460 * dest_name_check - verify user is permitted to send to specified port name
400 * @dest: destination address 461 * @dest: destination address
401 * @m: descriptor for message to be sent 462 * @m: descriptor for message to be sent
402 * 463 *
403 * Prevents restricted configuration commands from being issued by 464 * Prevents restricted configuration commands from being issued by
404 * unauthorized users. 465 * unauthorized users.
405 * 466 *
406 * Returns 0 if permission is granted, otherwise errno 467 * Returns 0 if permission is granted, otherwise errno
407 */ 468 */
408 469
409 static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m) 470 static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
410 { 471 {
411 struct tipc_cfg_msg_hdr hdr; 472 struct tipc_cfg_msg_hdr hdr;
412 473
413 if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES)) 474 if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
414 return 0; 475 return 0;
415 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV)) 476 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
416 return 0; 477 return 0;
417
418 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV)) 478 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
419 return -EACCES; 479 return -EACCES;
420 480
421 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr))) 481 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
422 return -EFAULT; 482 return -EFAULT;
423 if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN))) 483 if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
424 return -EACCES; 484 return -EACCES;
425 485
426 return 0; 486 return 0;
427 } 487 }
428 488
429 /** 489 /**
430 * send_msg - send message in connectionless manner 490 * send_msg - send message in connectionless manner
431 * @iocb: (unused) 491 * @iocb: if NULL, indicates that socket lock is already held
432 * @sock: socket structure 492 * @sock: socket structure
433 * @m: message to send 493 * @m: message to send
434 * @total_len: length of message 494 * @total_len: length of message
435 * 495 *
436 * Message must have an destination specified explicitly. 496 * Message must have an destination specified explicitly.
437 * Used for SOCK_RDM and SOCK_DGRAM messages, 497 * Used for SOCK_RDM and SOCK_DGRAM messages,
438 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections. 498 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
439 * (Note: 'SYN+' is prohibited on SOCK_STREAM.) 499 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
440 * 500 *
441 * Returns the number of bytes sent on success, or errno otherwise 501 * Returns the number of bytes sent on success, or errno otherwise
442 */ 502 */
443 503
444 static int send_msg(struct kiocb *iocb, struct socket *sock, 504 static int send_msg(struct kiocb *iocb, struct socket *sock,
445 struct msghdr *m, size_t total_len) 505 struct msghdr *m, size_t total_len)
446 { 506 {
447 struct tipc_sock *tsock = tipc_sk(sock->sk); 507 struct sock *sk = sock->sk;
508 struct tipc_port *tport = tipc_sk_port(sk);
448 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name; 509 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
449 struct sk_buff *buf;
450 int needs_conn; 510 int needs_conn;
451 int res = -EINVAL; 511 int res = -EINVAL;
452 512
453 if (unlikely(!dest)) 513 if (unlikely(!dest))
454 return -EDESTADDRREQ; 514 return -EDESTADDRREQ;
455 if (unlikely((m->msg_namelen < sizeof(*dest)) || 515 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
456 (dest->family != AF_TIPC))) 516 (dest->family != AF_TIPC)))
457 return -EINVAL; 517 return -EINVAL;
458 518
519 if (iocb)
520 lock_sock(sk);
521
459 needs_conn = (sock->state != SS_READY); 522 needs_conn = (sock->state != SS_READY);
460 if (unlikely(needs_conn)) { 523 if (unlikely(needs_conn)) {
461 if (sock->state == SS_LISTENING) 524 if (sock->state == SS_LISTENING) {
462 return -EPIPE; 525 res = -EPIPE;
463 if (sock->state != SS_UNCONNECTED) 526 goto exit;
464 return -EISCONN; 527 }
465 if ((tsock->p->published) || 528 if (sock->state != SS_UNCONNECTED) {
466 ((sock->type == SOCK_STREAM) && (total_len != 0))) 529 res = -EISCONN;
467 return -EOPNOTSUPP; 530 goto exit;
531 }
532 if ((tport->published) ||
533 ((sock->type == SOCK_STREAM) && (total_len != 0))) {
534 res = -EOPNOTSUPP;
535 goto exit;
536 }
468 if (dest->addrtype == TIPC_ADDR_NAME) { 537 if (dest->addrtype == TIPC_ADDR_NAME) {
469 tsock->p->conn_type = dest->addr.name.name.type; 538 tport->conn_type = dest->addr.name.name.type;
470 tsock->p->conn_instance = dest->addr.name.name.instance; 539 tport->conn_instance = dest->addr.name.name.instance;
471 } 540 }
472 }
473 541
474 if (mutex_lock_interruptible(&tsock->lock))
475 return -ERESTARTSYS;
476
477 if (needs_conn) {
478
479 /* Abort any pending connection attempts (very unlikely) */ 542 /* Abort any pending connection attempts (very unlikely) */
480 543
481 while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) { 544 reject_rx_queue(sk);
482 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
483 atomic_dec(&tipc_queue_size);
484 }
485
486 sock->state = SS_CONNECTING;
487 } 545 }
488 546
489 do { 547 do {
490 if (dest->addrtype == TIPC_ADDR_NAME) { 548 if (dest->addrtype == TIPC_ADDR_NAME) {
491 if ((res = dest_name_check(dest, m))) 549 if ((res = dest_name_check(dest, m)))
492 goto exit; 550 break;
493 res = tipc_send2name(tsock->p->ref, 551 res = tipc_send2name(tport->ref,
494 &dest->addr.name.name, 552 &dest->addr.name.name,
495 dest->addr.name.domain, 553 dest->addr.name.domain,
496 m->msg_iovlen, 554 m->msg_iovlen,
497 m->msg_iov); 555 m->msg_iov);
498 } 556 }
499 else if (dest->addrtype == TIPC_ADDR_ID) { 557 else if (dest->addrtype == TIPC_ADDR_ID) {
500 res = tipc_send2port(tsock->p->ref, 558 res = tipc_send2port(tport->ref,
501 &dest->addr.id, 559 &dest->addr.id,
502 m->msg_iovlen, 560 m->msg_iovlen,
503 m->msg_iov); 561 m->msg_iov);
504 } 562 }
505 else if (dest->addrtype == TIPC_ADDR_MCAST) { 563 else if (dest->addrtype == TIPC_ADDR_MCAST) {
506 if (needs_conn) { 564 if (needs_conn) {
507 res = -EOPNOTSUPP; 565 res = -EOPNOTSUPP;
508 goto exit; 566 break;
509 } 567 }
510 if ((res = dest_name_check(dest, m))) 568 if ((res = dest_name_check(dest, m)))
511 goto exit; 569 break;
512 res = tipc_multicast(tsock->p->ref, 570 res = tipc_multicast(tport->ref,
513 &dest->addr.nameseq, 571 &dest->addr.nameseq,
514 0, 572 0,
515 m->msg_iovlen, 573 m->msg_iovlen,
516 m->msg_iov); 574 m->msg_iov);
517 } 575 }
518 if (likely(res != -ELINKCONG)) { 576 if (likely(res != -ELINKCONG)) {
519 exit: 577 if (needs_conn && (res >= 0)) {
520 mutex_unlock(&tsock->lock); 578 sock->state = SS_CONNECTING;
521 return res; 579 }
580 break;
522 } 581 }
523 if (m->msg_flags & MSG_DONTWAIT) { 582 if (m->msg_flags & MSG_DONTWAIT) {
524 res = -EWOULDBLOCK; 583 res = -EWOULDBLOCK;
525 goto exit; 584 break;
526 } 585 }
527 if (wait_event_interruptible(*sock->sk->sk_sleep, 586 release_sock(sk);
528 !tsock->p->congested)) { 587 res = wait_event_interruptible(*sk->sk_sleep,
529 res = -ERESTARTSYS; 588 !tport->congested);
530 goto exit; 589 lock_sock(sk);
531 } 590 if (res)
591 break;
532 } while (1); 592 } while (1);
593
594 exit:
595 if (iocb)
596 release_sock(sk);
597 return res;
533 } 598 }
534 599
535 /** 600 /**
536 * send_packet - send a connection-oriented message 601 * send_packet - send a connection-oriented message
537 * @iocb: (unused) 602 * @iocb: if NULL, indicates that socket lock is already held
538 * @sock: socket structure 603 * @sock: socket structure
539 * @m: message to send 604 * @m: message to send
540 * @total_len: length of message 605 * @total_len: length of message
541 * 606 *
542 * Used for SOCK_SEQPACKET messages and SOCK_STREAM data. 607 * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
543 * 608 *
544 * Returns the number of bytes sent on success, or errno otherwise 609 * Returns the number of bytes sent on success, or errno otherwise
545 */ 610 */
546 611
547 static int send_packet(struct kiocb *iocb, struct socket *sock, 612 static int send_packet(struct kiocb *iocb, struct socket *sock,
548 struct msghdr *m, size_t total_len) 613 struct msghdr *m, size_t total_len)
549 { 614 {
550 struct tipc_sock *tsock = tipc_sk(sock->sk); 615 struct sock *sk = sock->sk;
616 struct tipc_port *tport = tipc_sk_port(sk);
551 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name; 617 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
552 int res; 618 int res;
553 619
554 /* Handle implied connection establishment */ 620 /* Handle implied connection establishment */
555 621
556 if (unlikely(dest)) 622 if (unlikely(dest))
557 return send_msg(iocb, sock, m, total_len); 623 return send_msg(iocb, sock, m, total_len);
558 624
559 if (mutex_lock_interruptible(&tsock->lock)) { 625 if (iocb)
560 return -ERESTARTSYS; 626 lock_sock(sk);
561 }
562 627
563 do { 628 do {
564 if (unlikely(sock->state != SS_CONNECTED)) { 629 if (unlikely(sock->state != SS_CONNECTED)) {
565 if (sock->state == SS_DISCONNECTING) 630 if (sock->state == SS_DISCONNECTING)
566 res = -EPIPE; 631 res = -EPIPE;
567 else 632 else
568 res = -ENOTCONN; 633 res = -ENOTCONN;
569 goto exit; 634 break;
570 } 635 }
571 636
572 res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov); 637 res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov);
573 if (likely(res != -ELINKCONG)) { 638 if (likely(res != -ELINKCONG)) {
574 exit: 639 break;
575 mutex_unlock(&tsock->lock);
576 return res;
577 } 640 }
578 if (m->msg_flags & MSG_DONTWAIT) { 641 if (m->msg_flags & MSG_DONTWAIT) {
579 res = -EWOULDBLOCK; 642 res = -EWOULDBLOCK;
580 goto exit; 643 break;
581 } 644 }
582 if (wait_event_interruptible(*sock->sk->sk_sleep, 645 release_sock(sk);
583 !tsock->p->congested)) { 646 res = wait_event_interruptible(*sk->sk_sleep,
584 res = -ERESTARTSYS; 647 (!tport->congested || !tport->connected));
585 goto exit; 648 lock_sock(sk);
586 } 649 if (res)
650 break;
587 } while (1); 651 } while (1);
652
653 if (iocb)
654 release_sock(sk);
655 return res;
588 } 656 }
589 657
590 /** 658 /**
591 * send_stream - send stream-oriented data 659 * send_stream - send stream-oriented data
592 * @iocb: (unused) 660 * @iocb: (unused)
593 * @sock: socket structure 661 * @sock: socket structure
594 * @m: data to send 662 * @m: data to send
595 * @total_len: total length of data to be sent 663 * @total_len: total length of data to be sent
596 * 664 *
597 * Used for SOCK_STREAM data. 665 * Used for SOCK_STREAM data.
598 * 666 *
599 * Returns the number of bytes sent on success (or partial success), 667 * Returns the number of bytes sent on success (or partial success),
600 * or errno if no data sent 668 * or errno if no data sent
601 */ 669 */
602 670
603
604 static int send_stream(struct kiocb *iocb, struct socket *sock, 671 static int send_stream(struct kiocb *iocb, struct socket *sock,
605 struct msghdr *m, size_t total_len) 672 struct msghdr *m, size_t total_len)
606 { 673 {
607 struct tipc_port *tport; 674 struct sock *sk = sock->sk;
675 struct tipc_port *tport = tipc_sk_port(sk);
608 struct msghdr my_msg; 676 struct msghdr my_msg;
609 struct iovec my_iov; 677 struct iovec my_iov;
610 struct iovec *curr_iov; 678 struct iovec *curr_iov;
611 int curr_iovlen; 679 int curr_iovlen;
612 char __user *curr_start; 680 char __user *curr_start;
613 u32 hdr_size; 681 u32 hdr_size;
614 int curr_left; 682 int curr_left;
615 int bytes_to_send; 683 int bytes_to_send;
616 int bytes_sent; 684 int bytes_sent;
617 int res; 685 int res;
618 686
687 lock_sock(sk);
688
619 /* Handle special cases where there is no connection */ 689 /* Handle special cases where there is no connection */
620 690
621 if (unlikely(sock->state != SS_CONNECTED)) { 691 if (unlikely(sock->state != SS_CONNECTED)) {
622 if (sock->state == SS_UNCONNECTED) 692 if (sock->state == SS_UNCONNECTED) {
623 return send_packet(iocb, sock, m, total_len); 693 res = send_packet(NULL, sock, m, total_len);
624 else if (sock->state == SS_DISCONNECTING) 694 goto exit;
625 return -EPIPE; 695 } else if (sock->state == SS_DISCONNECTING) {
626 else 696 res = -EPIPE;
627 return -ENOTCONN; 697 goto exit;
698 } else {
699 res = -ENOTCONN;
700 goto exit;
701 }
628 } 702 }
629 703
630 if (unlikely(m->msg_name)) 704 if (unlikely(m->msg_name)) {
631 return -EISCONN; 705 res = -EISCONN;
706 goto exit;
707 }
632 708
633 /* 709 /*
634 * Send each iovec entry using one or more messages 710 * Send each iovec entry using one or more messages
635 * 711 *
636 * Note: This algorithm is good for the most likely case 712 * Note: This algorithm is good for the most likely case
637 * (i.e. one large iovec entry), but could be improved to pass sets 713 * (i.e. one large iovec entry), but could be improved to pass sets
638 * of small iovec entries into send_packet(). 714 * of small iovec entries into send_packet().
639 */ 715 */
640 716
641 curr_iov = m->msg_iov; 717 curr_iov = m->msg_iov;
642 curr_iovlen = m->msg_iovlen; 718 curr_iovlen = m->msg_iovlen;
643 my_msg.msg_iov = &my_iov; 719 my_msg.msg_iov = &my_iov;
644 my_msg.msg_iovlen = 1; 720 my_msg.msg_iovlen = 1;
645 my_msg.msg_flags = m->msg_flags; 721 my_msg.msg_flags = m->msg_flags;
646 my_msg.msg_name = NULL; 722 my_msg.msg_name = NULL;
647 bytes_sent = 0; 723 bytes_sent = 0;
648 724
649 tport = tipc_sk(sock->sk)->p;
650 hdr_size = msg_hdr_sz(&tport->phdr); 725 hdr_size = msg_hdr_sz(&tport->phdr);
651 726
652 while (curr_iovlen--) { 727 while (curr_iovlen--) {
653 curr_start = curr_iov->iov_base; 728 curr_start = curr_iov->iov_base;
654 curr_left = curr_iov->iov_len; 729 curr_left = curr_iov->iov_len;
655 730
656 while (curr_left) { 731 while (curr_left) {
657 bytes_to_send = tport->max_pkt - hdr_size; 732 bytes_to_send = tport->max_pkt - hdr_size;
658 if (bytes_to_send > TIPC_MAX_USER_MSG_SIZE) 733 if (bytes_to_send > TIPC_MAX_USER_MSG_SIZE)
659 bytes_to_send = TIPC_MAX_USER_MSG_SIZE; 734 bytes_to_send = TIPC_MAX_USER_MSG_SIZE;
660 if (curr_left < bytes_to_send) 735 if (curr_left < bytes_to_send)
661 bytes_to_send = curr_left; 736 bytes_to_send = curr_left;
662 my_iov.iov_base = curr_start; 737 my_iov.iov_base = curr_start;
663 my_iov.iov_len = bytes_to_send; 738 my_iov.iov_len = bytes_to_send;
664 if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0) { 739 if ((res = send_packet(NULL, sock, &my_msg, 0)) < 0) {
665 if (bytes_sent != 0) 740 if (bytes_sent)
666 res = bytes_sent; 741 res = bytes_sent;
667 return res; 742 goto exit;
668 } 743 }
669 curr_left -= bytes_to_send; 744 curr_left -= bytes_to_send;
670 curr_start += bytes_to_send; 745 curr_start += bytes_to_send;
671 bytes_sent += bytes_to_send; 746 bytes_sent += bytes_to_send;
672 } 747 }
673 748
674 curr_iov++; 749 curr_iov++;
675 } 750 }
676 751 res = bytes_sent;
677 return bytes_sent; 752 exit:
753 release_sock(sk);
754 return res;
678 } 755 }
679 756
680 /** 757 /**
681 * auto_connect - complete connection setup to a remote port 758 * auto_connect - complete connection setup to a remote port
682 * @sock: socket structure 759 * @sock: socket structure
683 * @tsock: TIPC-specific socket structure
684 * @msg: peer's response message 760 * @msg: peer's response message
685 * 761 *
686 * Returns 0 on success, errno otherwise 762 * Returns 0 on success, errno otherwise
687 */ 763 */
688 764
689 static int auto_connect(struct socket *sock, struct tipc_sock *tsock, 765 static int auto_connect(struct socket *sock, struct tipc_msg *msg)
690 struct tipc_msg *msg)
691 { 766 {
767 struct tipc_port *tport = tipc_sk_port(sock->sk);
692 struct tipc_portid peer; 768 struct tipc_portid peer;
693 769
694 if (msg_errcode(msg)) { 770 if (msg_errcode(msg)) {
695 sock->state = SS_DISCONNECTING; 771 sock->state = SS_DISCONNECTING;
696 return -ECONNREFUSED; 772 return -ECONNREFUSED;
697 } 773 }
698 774
699 peer.ref = msg_origport(msg); 775 peer.ref = msg_origport(msg);
700 peer.node = msg_orignode(msg); 776 peer.node = msg_orignode(msg);
701 tipc_connect2port(tsock->p->ref, &peer); 777 tipc_connect2port(tport->ref, &peer);
702 tipc_set_portimportance(tsock->p->ref, msg_importance(msg)); 778 tipc_set_portimportance(tport->ref, msg_importance(msg));
703 sock->state = SS_CONNECTED; 779 sock->state = SS_CONNECTED;
704 return 0; 780 return 0;
705 } 781 }
706 782
707 /** 783 /**
708 * set_orig_addr - capture sender's address for received message 784 * set_orig_addr - capture sender's address for received message
709 * @m: descriptor for message info 785 * @m: descriptor for message info
710 * @msg: received message header 786 * @msg: received message header
711 * 787 *
712 * Note: Address is not captured if not requested by receiver. 788 * Note: Address is not captured if not requested by receiver.
713 */ 789 */
714 790
715 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) 791 static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
716 { 792 {
717 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name; 793 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
718 794
719 if (addr) { 795 if (addr) {
720 addr->family = AF_TIPC; 796 addr->family = AF_TIPC;
721 addr->addrtype = TIPC_ADDR_ID; 797 addr->addrtype = TIPC_ADDR_ID;
722 addr->addr.id.ref = msg_origport(msg); 798 addr->addr.id.ref = msg_origport(msg);
723 addr->addr.id.node = msg_orignode(msg); 799 addr->addr.id.node = msg_orignode(msg);
724 addr->addr.name.domain = 0; /* could leave uninitialized */ 800 addr->addr.name.domain = 0; /* could leave uninitialized */
725 addr->scope = 0; /* could leave uninitialized */ 801 addr->scope = 0; /* could leave uninitialized */
726 m->msg_namelen = sizeof(struct sockaddr_tipc); 802 m->msg_namelen = sizeof(struct sockaddr_tipc);
727 } 803 }
728 } 804 }
729 805
730 /** 806 /**
731 * anc_data_recv - optionally capture ancillary data for received message 807 * anc_data_recv - optionally capture ancillary data for received message
732 * @m: descriptor for message info 808 * @m: descriptor for message info
733 * @msg: received message header 809 * @msg: received message header
734 * @tport: TIPC port associated with message 810 * @tport: TIPC port associated with message
735 * 811 *
736 * Note: Ancillary data is not captured if not requested by receiver. 812 * Note: Ancillary data is not captured if not requested by receiver.
737 * 813 *
738 * Returns 0 if successful, otherwise errno 814 * Returns 0 if successful, otherwise errno
739 */ 815 */
740 816
741 static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 817 static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
742 struct tipc_port *tport) 818 struct tipc_port *tport)
743 { 819 {
744 u32 anc_data[3]; 820 u32 anc_data[3];
745 u32 err; 821 u32 err;
746 u32 dest_type; 822 u32 dest_type;
747 int has_name; 823 int has_name;
748 int res; 824 int res;
749 825
750 if (likely(m->msg_controllen == 0)) 826 if (likely(m->msg_controllen == 0))
751 return 0; 827 return 0;
752 828
753 /* Optionally capture errored message object(s) */ 829 /* Optionally capture errored message object(s) */
754 830
755 err = msg ? msg_errcode(msg) : 0; 831 err = msg ? msg_errcode(msg) : 0;
756 if (unlikely(err)) { 832 if (unlikely(err)) {
757 anc_data[0] = err; 833 anc_data[0] = err;
758 anc_data[1] = msg_data_sz(msg); 834 anc_data[1] = msg_data_sz(msg);
759 if ((res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data))) 835 if ((res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data)))
760 return res; 836 return res;
761 if (anc_data[1] && 837 if (anc_data[1] &&
762 (res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1], 838 (res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
763 msg_data(msg)))) 839 msg_data(msg))))
764 return res; 840 return res;
765 } 841 }
766 842
767 /* Optionally capture message destination object */ 843 /* Optionally capture message destination object */
768 844
769 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; 845 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
770 switch (dest_type) { 846 switch (dest_type) {
771 case TIPC_NAMED_MSG: 847 case TIPC_NAMED_MSG:
772 has_name = 1; 848 has_name = 1;
773 anc_data[0] = msg_nametype(msg); 849 anc_data[0] = msg_nametype(msg);
774 anc_data[1] = msg_namelower(msg); 850 anc_data[1] = msg_namelower(msg);
775 anc_data[2] = msg_namelower(msg); 851 anc_data[2] = msg_namelower(msg);
776 break; 852 break;
777 case TIPC_MCAST_MSG: 853 case TIPC_MCAST_MSG:
778 has_name = 1; 854 has_name = 1;
779 anc_data[0] = msg_nametype(msg); 855 anc_data[0] = msg_nametype(msg);
780 anc_data[1] = msg_namelower(msg); 856 anc_data[1] = msg_namelower(msg);
781 anc_data[2] = msg_nameupper(msg); 857 anc_data[2] = msg_nameupper(msg);
782 break; 858 break;
783 case TIPC_CONN_MSG: 859 case TIPC_CONN_MSG:
784 has_name = (tport->conn_type != 0); 860 has_name = (tport->conn_type != 0);
785 anc_data[0] = tport->conn_type; 861 anc_data[0] = tport->conn_type;
786 anc_data[1] = tport->conn_instance; 862 anc_data[1] = tport->conn_instance;
787 anc_data[2] = tport->conn_instance; 863 anc_data[2] = tport->conn_instance;
788 break; 864 break;
789 default: 865 default:
790 has_name = 0; 866 has_name = 0;
791 } 867 }
792 if (has_name && 868 if (has_name &&
793 (res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data))) 869 (res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data)))
794 return res; 870 return res;
795 871
796 return 0; 872 return 0;
797 } 873 }
798 874
799 /** 875 /**
800 * recv_msg - receive packet-oriented message 876 * recv_msg - receive packet-oriented message
801 * @iocb: (unused) 877 * @iocb: (unused)
802 * @m: descriptor for message info 878 * @m: descriptor for message info
803 * @buf_len: total size of user buffer area 879 * @buf_len: total size of user buffer area
804 * @flags: receive flags 880 * @flags: receive flags
805 * 881 *
806 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages. 882 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
807 * If the complete message doesn't fit in user area, truncate it. 883 * If the complete message doesn't fit in user area, truncate it.
808 * 884 *
809 * Returns size of returned message data, errno otherwise 885 * Returns size of returned message data, errno otherwise
810 */ 886 */
811 887
812 static int recv_msg(struct kiocb *iocb, struct socket *sock, 888 static int recv_msg(struct kiocb *iocb, struct socket *sock,
813 struct msghdr *m, size_t buf_len, int flags) 889 struct msghdr *m, size_t buf_len, int flags)
814 { 890 {
815 struct tipc_sock *tsock = tipc_sk(sock->sk); 891 struct sock *sk = sock->sk;
892 struct tipc_port *tport = tipc_sk_port(sk);
816 struct sk_buff *buf; 893 struct sk_buff *buf;
817 struct tipc_msg *msg; 894 struct tipc_msg *msg;
818 unsigned int q_len;
819 unsigned int sz; 895 unsigned int sz;
820 u32 err; 896 u32 err;
821 int res; 897 int res;
822 898
823 /* Currently doesn't support receiving into multiple iovec entries */ 899 /* Catch invalid receive requests */
824 900
825 if (m->msg_iovlen != 1) 901 if (m->msg_iovlen != 1)
826 return -EOPNOTSUPP; 902 return -EOPNOTSUPP; /* Don't do multiple iovec entries yet */
827 903
828 /* Catch invalid receive attempts */
829
830 if (unlikely(!buf_len)) 904 if (unlikely(!buf_len))
831 return -EINVAL; 905 return -EINVAL;
832 906
833 if (sock->type == SOCK_SEQPACKET) { 907 lock_sock(sk);
834 if (unlikely(sock->state == SS_UNCONNECTED))
835 return -ENOTCONN;
836 if (unlikely((sock->state == SS_DISCONNECTING) &&
837 (skb_queue_len(&sock->sk->sk_receive_queue) == 0)))
838 return -ENOTCONN;
839 }
840 908
841 /* Look for a message in receive queue; wait if necessary */ 909 if (unlikely(sock->state == SS_UNCONNECTED)) {
842 910 res = -ENOTCONN;
843 if (unlikely(mutex_lock_interruptible(&tsock->lock)))
844 return -ERESTARTSYS;
845
846 restart:
847 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
848 (flags & MSG_DONTWAIT))) {
849 res = -EWOULDBLOCK;
850 goto exit; 911 goto exit;
851 } 912 }
852 913
853 if ((res = wait_event_interruptible( 914 restart:
854 *sock->sk->sk_sleep,
855 ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
856 (sock->state == SS_DISCONNECTING))) )) {
857 goto exit;
858 }
859 915
860 /* Catch attempt to receive on an already terminated connection */ 916 /* Look for a message in receive queue; wait if necessary */
861 /* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */
862 917
863 if (!q_len) { 918 while (skb_queue_empty(&sk->sk_receive_queue)) {
864 res = -ENOTCONN; 919 if (sock->state == SS_DISCONNECTING) {
865 goto exit; 920 res = -ENOTCONN;
921 goto exit;
922 }
923 if (flags & MSG_DONTWAIT) {
924 res = -EWOULDBLOCK;
925 goto exit;
926 }
927 release_sock(sk);
928 res = wait_event_interruptible(*sk->sk_sleep,
929 (!skb_queue_empty(&sk->sk_receive_queue) ||
930 (sock->state == SS_DISCONNECTING)));
931 lock_sock(sk);
932 if (res)
933 goto exit;
866 } 934 }
867 935
868 /* Get access to first message in receive queue */ 936 /* Look at first message in receive queue */
869 937
870 buf = skb_peek(&sock->sk->sk_receive_queue); 938 buf = skb_peek(&sk->sk_receive_queue);
871 msg = buf_msg(buf); 939 msg = buf_msg(buf);
872 sz = msg_data_sz(msg); 940 sz = msg_data_sz(msg);
873 err = msg_errcode(msg); 941 err = msg_errcode(msg);
874 942
875 /* Complete connection setup for an implied connect */ 943 /* Complete connection setup for an implied connect */
876 944
877 if (unlikely(sock->state == SS_CONNECTING)) { 945 if (unlikely(sock->state == SS_CONNECTING)) {
878 if ((res = auto_connect(sock, tsock, msg))) 946 res = auto_connect(sock, msg);
947 if (res)
879 goto exit; 948 goto exit;
880 } 949 }
881 950
882 /* Discard an empty non-errored message & try again */ 951 /* Discard an empty non-errored message & try again */
883 952
884 if ((!sz) && (!err)) { 953 if ((!sz) && (!err)) {
885 advance_queue(tsock); 954 advance_rx_queue(sk);
886 goto restart; 955 goto restart;
887 } 956 }
888 957
889 /* Capture sender's address (optional) */ 958 /* Capture sender's address (optional) */
890 959
891 set_orig_addr(m, msg); 960 set_orig_addr(m, msg);
892 961
893 /* Capture ancillary data (optional) */ 962 /* Capture ancillary data (optional) */
894 963
895 if ((res = anc_data_recv(m, msg, tsock->p))) 964 res = anc_data_recv(m, msg, tport);
965 if (res)
896 goto exit; 966 goto exit;
897 967
898 /* Capture message data (if valid) & compute return value (always) */ 968 /* Capture message data (if valid) & compute return value (always) */
899 969
900 if (!err) { 970 if (!err) {
901 if (unlikely(buf_len < sz)) { 971 if (unlikely(buf_len < sz)) {
902 sz = buf_len; 972 sz = buf_len;
903 m->msg_flags |= MSG_TRUNC; 973 m->msg_flags |= MSG_TRUNC;
904 } 974 }
905 if (unlikely(copy_to_user(m->msg_iov->iov_base, msg_data(msg), 975 if (unlikely(copy_to_user(m->msg_iov->iov_base, msg_data(msg),
906 sz))) { 976 sz))) {
907 res = -EFAULT; 977 res = -EFAULT;
908 goto exit; 978 goto exit;
909 } 979 }
910 res = sz; 980 res = sz;
911 } else { 981 } else {
912 if ((sock->state == SS_READY) || 982 if ((sock->state == SS_READY) ||
913 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)) 983 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
914 res = 0; 984 res = 0;
915 else 985 else
916 res = -ECONNRESET; 986 res = -ECONNRESET;
917 } 987 }
918 988
919 /* Consume received message (optional) */ 989 /* Consume received message (optional) */
920 990
921 if (likely(!(flags & MSG_PEEK))) { 991 if (likely(!(flags & MSG_PEEK))) {
922 if ((sock->state != SS_READY) && 992 if ((sock->state != SS_READY) &&
923 (++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 993 (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
924 tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked); 994 tipc_acknowledge(tport->ref, tport->conn_unacked);
925 advance_queue(tsock); 995 advance_rx_queue(sk);
926 } 996 }
927 exit: 997 exit:
928 mutex_unlock(&tsock->lock); 998 release_sock(sk);
929 return res; 999 return res;
930 } 1000 }
931 1001
932 /** 1002 /**
933 * recv_stream - receive stream-oriented data 1003 * recv_stream - receive stream-oriented data
934 * @iocb: (unused) 1004 * @iocb: (unused)
935 * @m: descriptor for message info 1005 * @m: descriptor for message info
936 * @buf_len: total size of user buffer area 1006 * @buf_len: total size of user buffer area
937 * @flags: receive flags 1007 * @flags: receive flags
938 * 1008 *
939 * Used for SOCK_STREAM messages only. If not enough data is available 1009 * Used for SOCK_STREAM messages only. If not enough data is available
940 * will optionally wait for more; never truncates data. 1010 * will optionally wait for more; never truncates data.
941 * 1011 *
942 * Returns size of returned message data, errno otherwise 1012 * Returns size of returned message data, errno otherwise
943 */ 1013 */
944 1014
945 static int recv_stream(struct kiocb *iocb, struct socket *sock, 1015 static int recv_stream(struct kiocb *iocb, struct socket *sock,
946 struct msghdr *m, size_t buf_len, int flags) 1016 struct msghdr *m, size_t buf_len, int flags)
947 { 1017 {
948 struct tipc_sock *tsock = tipc_sk(sock->sk); 1018 struct sock *sk = sock->sk;
1019 struct tipc_port *tport = tipc_sk_port(sk);
949 struct sk_buff *buf; 1020 struct sk_buff *buf;
950 struct tipc_msg *msg; 1021 struct tipc_msg *msg;
951 unsigned int q_len;
952 unsigned int sz; 1022 unsigned int sz;
953 int sz_to_copy; 1023 int sz_to_copy;
954 int sz_copied = 0; 1024 int sz_copied = 0;
955 int needed; 1025 int needed;
956 char __user *crs = m->msg_iov->iov_base; 1026 char __user *crs = m->msg_iov->iov_base;
957 unsigned char *buf_crs; 1027 unsigned char *buf_crs;
958 u32 err; 1028 u32 err;
959 int res; 1029 int res = 0;
960 1030
961 /* Currently doesn't support receiving into multiple iovec entries */ 1031 /* Catch invalid receive attempts */
962 1032
963 if (m->msg_iovlen != 1) 1033 if (m->msg_iovlen != 1)
964 return -EOPNOTSUPP; 1034 return -EOPNOTSUPP; /* Don't do multiple iovec entries yet */
965 1035
966 /* Catch invalid receive attempts */
967
968 if (unlikely(!buf_len)) 1036 if (unlikely(!buf_len))
969 return -EINVAL; 1037 return -EINVAL;
970 1038
971 if (unlikely(sock->state == SS_DISCONNECTING)) { 1039 lock_sock(sk);
972 if (skb_queue_len(&sock->sk->sk_receive_queue) == 0)
973 return -ENOTCONN;
974 } else if (unlikely(sock->state != SS_CONNECTED))
975 return -ENOTCONN;
976 1040
977 /* Look for a message in receive queue; wait if necessary */ 1041 if (unlikely((sock->state == SS_UNCONNECTED) ||
978 1042 (sock->state == SS_CONNECTING))) {
979 if (unlikely(mutex_lock_interruptible(&tsock->lock))) 1043 res = -ENOTCONN;
980 return -ERESTARTSYS;
981
982 restart:
983 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
984 (flags & MSG_DONTWAIT))) {
985 res = -EWOULDBLOCK;
986 goto exit; 1044 goto exit;
987 } 1045 }
988 1046
989 if ((res = wait_event_interruptible( 1047 restart:
990 *sock->sk->sk_sleep,
991 ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
992 (sock->state == SS_DISCONNECTING))) )) {
993 goto exit;
994 }
995 1048
996 /* Catch attempt to receive on an already terminated connection */ 1049 /* Look for a message in receive queue; wait if necessary */
997 /* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */
998 1050
999 if (!q_len) { 1051 while (skb_queue_empty(&sk->sk_receive_queue)) {
1000 res = -ENOTCONN; 1052 if (sock->state == SS_DISCONNECTING) {
1001 goto exit; 1053 res = -ENOTCONN;
1054 goto exit;
1055 }
1056 if (flags & MSG_DONTWAIT) {
1057 res = -EWOULDBLOCK;
1058 goto exit;
1059 }
1060 release_sock(sk);
1061 res = wait_event_interruptible(*sk->sk_sleep,
1062 (!skb_queue_empty(&sk->sk_receive_queue) ||
1063 (sock->state == SS_DISCONNECTING)));
1064 lock_sock(sk);
1065 if (res)
1066 goto exit;
1002 } 1067 }
1003 1068
1004 /* Get access to first message in receive queue */ 1069 /* Look at first message in receive queue */
1005 1070
1006 buf = skb_peek(&sock->sk->sk_receive_queue); 1071 buf = skb_peek(&sk->sk_receive_queue);
1007 msg = buf_msg(buf); 1072 msg = buf_msg(buf);
1008 sz = msg_data_sz(msg); 1073 sz = msg_data_sz(msg);
1009 err = msg_errcode(msg); 1074 err = msg_errcode(msg);
1010 1075
1011 /* Discard an empty non-errored message & try again */ 1076 /* Discard an empty non-errored message & try again */
1012 1077
1013 if ((!sz) && (!err)) { 1078 if ((!sz) && (!err)) {
1014 advance_queue(tsock); 1079 advance_rx_queue(sk);
1015 goto restart; 1080 goto restart;
1016 } 1081 }
1017 1082
1018 /* Optionally capture sender's address & ancillary data of first msg */ 1083 /* Optionally capture sender's address & ancillary data of first msg */
1019 1084
1020 if (sz_copied == 0) { 1085 if (sz_copied == 0) {
1021 set_orig_addr(m, msg); 1086 set_orig_addr(m, msg);
1022 if ((res = anc_data_recv(m, msg, tsock->p))) 1087 res = anc_data_recv(m, msg, tport);
1088 if (res)
1023 goto exit; 1089 goto exit;
1024 } 1090 }
1025 1091
1026 /* Capture message data (if valid) & compute return value (always) */ 1092 /* Capture message data (if valid) & compute return value (always) */
1027 1093
1028 if (!err) { 1094 if (!err) {
1029 buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle); 1095 buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle);
1030 sz = (unsigned char *)msg + msg_size(msg) - buf_crs; 1096 sz = (unsigned char *)msg + msg_size(msg) - buf_crs;
1031 1097
1032 needed = (buf_len - sz_copied); 1098 needed = (buf_len - sz_copied);
1033 sz_to_copy = (sz <= needed) ? sz : needed; 1099 sz_to_copy = (sz <= needed) ? sz : needed;
1034 if (unlikely(copy_to_user(crs, buf_crs, sz_to_copy))) { 1100 if (unlikely(copy_to_user(crs, buf_crs, sz_to_copy))) {
1035 res = -EFAULT; 1101 res = -EFAULT;
1036 goto exit; 1102 goto exit;
1037 } 1103 }
1038 sz_copied += sz_to_copy; 1104 sz_copied += sz_to_copy;
1039 1105
1040 if (sz_to_copy < sz) { 1106 if (sz_to_copy < sz) {
1041 if (!(flags & MSG_PEEK)) 1107 if (!(flags & MSG_PEEK))
1042 TIPC_SKB_CB(buf)->handle = buf_crs + sz_to_copy; 1108 TIPC_SKB_CB(buf)->handle = buf_crs + sz_to_copy;
1043 goto exit; 1109 goto exit;
1044 } 1110 }
1045 1111
1046 crs += sz_to_copy; 1112 crs += sz_to_copy;
1047 } else { 1113 } else {
1048 if (sz_copied != 0) 1114 if (sz_copied != 0)
1049 goto exit; /* can't add error msg to valid data */ 1115 goto exit; /* can't add error msg to valid data */
1050 1116
1051 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control) 1117 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1052 res = 0; 1118 res = 0;
1053 else 1119 else
1054 res = -ECONNRESET; 1120 res = -ECONNRESET;
1055 } 1121 }
1056 1122
1057 /* Consume received message (optional) */ 1123 /* Consume received message (optional) */
1058 1124
1059 if (likely(!(flags & MSG_PEEK))) { 1125 if (likely(!(flags & MSG_PEEK))) {
1060 if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 1126 if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1061 tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked); 1127 tipc_acknowledge(tport->ref, tport->conn_unacked);
1062 advance_queue(tsock); 1128 advance_rx_queue(sk);
1063 } 1129 }
1064 1130
1065 /* Loop around if more data is required */ 1131 /* Loop around if more data is required */
1066 1132
1067 if ((sz_copied < buf_len) /* didn't get all requested data */ 1133 if ((sz_copied < buf_len) /* didn't get all requested data */
1068 && (!skb_queue_empty(&sock->sk->sk_receive_queue) || 1134 && (!skb_queue_empty(&sock->sk->sk_receive_queue) ||
1069 (flags & MSG_WAITALL)) 1135 (flags & MSG_WAITALL))
1070 /* ... and more is ready or required */ 1136 /* ... and more is ready or required */
1071 && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */ 1137 && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */
1072 && (!err) /* ... and haven't reached a FIN */ 1138 && (!err) /* ... and haven't reached a FIN */
1073 ) 1139 )
1074 goto restart; 1140 goto restart;
1075 1141
1076 exit: 1142 exit:
1077 mutex_unlock(&tsock->lock); 1143 release_sock(sk);
1078 return sz_copied ? sz_copied : res; 1144 return sz_copied ? sz_copied : res;
1079 } 1145 }
1080 1146
1081 /** 1147 /**
1082 * rx_queue_full - determine if receive queue can accept another message 1148 * rx_queue_full - determine if receive queue can accept another message
1083 * @msg: message to be added to queue 1149 * @msg: message to be added to queue
1084 * @queue_size: current size of queue 1150 * @queue_size: current size of queue
1085 * @base: nominal maximum size of queue 1151 * @base: nominal maximum size of queue
1086 * 1152 *
1087 * Returns 1 if queue is unable to accept message, 0 otherwise 1153 * Returns 1 if queue is unable to accept message, 0 otherwise
1088 */ 1154 */
1089 1155
1090 static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base) 1156 static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
1091 { 1157 {
1092 u32 threshold; 1158 u32 threshold;
1093 u32 imp = msg_importance(msg); 1159 u32 imp = msg_importance(msg);
1094 1160
1095 if (imp == TIPC_LOW_IMPORTANCE) 1161 if (imp == TIPC_LOW_IMPORTANCE)
1096 threshold = base; 1162 threshold = base;
1097 else if (imp == TIPC_MEDIUM_IMPORTANCE) 1163 else if (imp == TIPC_MEDIUM_IMPORTANCE)
1098 threshold = base * 2; 1164 threshold = base * 2;
1099 else if (imp == TIPC_HIGH_IMPORTANCE) 1165 else if (imp == TIPC_HIGH_IMPORTANCE)
1100 threshold = base * 100; 1166 threshold = base * 100;
1101 else 1167 else
1102 return 0; 1168 return 0;
1103 1169
1104 if (msg_connected(msg)) 1170 if (msg_connected(msg))
1105 threshold *= 4; 1171 threshold *= 4;
1106 1172
1107 return (queue_size >= threshold); 1173 return (queue_size >= threshold);
1108 } 1174 }
1109 1175
1110 /** 1176 /**
1111 * async_disconnect - wrapper function used to disconnect port 1177 * filter_rcv - validate incoming message
1112 * @portref: TIPC port reference (passed as pointer-sized value) 1178 * @sk: socket
1113 */
1114
1115 static void async_disconnect(unsigned long portref)
1116 {
1117 tipc_disconnect((u32)portref);
1118 }
1119
1120 /**
1121 * dispatch - handle arriving message
1122 * @tport: TIPC port that received message
1123 * @buf: message 1179 * @buf: message
1124 * 1180 *
1125 * Called with port locked. Must not take socket lock to avoid deadlock risk. 1181 * Enqueues message on receive queue if acceptable; optionally handles
1182 * disconnect indication for a connected socket.
1126 * 1183 *
1184 * Called with socket lock already taken; port lock may also be taken.
1185 *
1127 * Returns TIPC error status code (TIPC_OK if message is not to be rejected) 1186 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1128 */ 1187 */
1129 1188
1130 static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) 1189 static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1131 { 1190 {
1191 struct socket *sock = sk->sk_socket;
1132 struct tipc_msg *msg = buf_msg(buf); 1192 struct tipc_msg *msg = buf_msg(buf);
1133 struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle;
1134 struct socket *sock;
1135 u32 recv_q_len; 1193 u32 recv_q_len;
1136 1194
1137 /* Reject message if socket is closing */
1138
1139 if (!tsock)
1140 return TIPC_ERR_NO_PORT;
1141
1142 /* Reject message if it is wrong sort of message for socket */ 1195 /* Reject message if it is wrong sort of message for socket */
1143 1196
1144 /* 1197 /*
1145 * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD? 1198 * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
1146 * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY 1199 * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
1147 * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC 1200 * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
1148 */ 1201 */
1149 sock = tsock->sk.sk_socket; 1202
1150 if (sock->state == SS_READY) { 1203 if (sock->state == SS_READY) {
1151 if (msg_connected(msg)) { 1204 if (msg_connected(msg)) {
1152 msg_dbg(msg, "dispatch filter 1\n"); 1205 msg_dbg(msg, "dispatch filter 1\n");
1153 return TIPC_ERR_NO_PORT; 1206 return TIPC_ERR_NO_PORT;
1154 } 1207 }
1155 } else { 1208 } else {
1156 if (msg_mcast(msg)) { 1209 if (msg_mcast(msg)) {
1157 msg_dbg(msg, "dispatch filter 2\n"); 1210 msg_dbg(msg, "dispatch filter 2\n");
1158 return TIPC_ERR_NO_PORT; 1211 return TIPC_ERR_NO_PORT;
1159 } 1212 }
1160 if (sock->state == SS_CONNECTED) { 1213 if (sock->state == SS_CONNECTED) {
1161 if (!msg_connected(msg)) { 1214 if (!msg_connected(msg)) {
1162 msg_dbg(msg, "dispatch filter 3\n"); 1215 msg_dbg(msg, "dispatch filter 3\n");
1163 return TIPC_ERR_NO_PORT; 1216 return TIPC_ERR_NO_PORT;
1164 } 1217 }
1165 } 1218 }
1166 else if (sock->state == SS_CONNECTING) { 1219 else if (sock->state == SS_CONNECTING) {
1167 if (!msg_connected(msg) && (msg_errcode(msg) == 0)) { 1220 if (!msg_connected(msg) && (msg_errcode(msg) == 0)) {
1168 msg_dbg(msg, "dispatch filter 4\n"); 1221 msg_dbg(msg, "dispatch filter 4\n");
1169 return TIPC_ERR_NO_PORT; 1222 return TIPC_ERR_NO_PORT;
1170 } 1223 }
1171 } 1224 }
1172 else if (sock->state == SS_LISTENING) { 1225 else if (sock->state == SS_LISTENING) {
1173 if (msg_connected(msg) || msg_errcode(msg)) { 1226 if (msg_connected(msg) || msg_errcode(msg)) {
1174 msg_dbg(msg, "dispatch filter 5\n"); 1227 msg_dbg(msg, "dispatch filter 5\n");
1175 return TIPC_ERR_NO_PORT; 1228 return TIPC_ERR_NO_PORT;
1176 } 1229 }
1177 } 1230 }
1178 else if (sock->state == SS_DISCONNECTING) { 1231 else if (sock->state == SS_DISCONNECTING) {
1179 msg_dbg(msg, "dispatch filter 6\n"); 1232 msg_dbg(msg, "dispatch filter 6\n");
1180 return TIPC_ERR_NO_PORT; 1233 return TIPC_ERR_NO_PORT;
1181 } 1234 }
1182 else /* (sock->state == SS_UNCONNECTED) */ { 1235 else /* (sock->state == SS_UNCONNECTED) */ {
1183 if (msg_connected(msg) || msg_errcode(msg)) { 1236 if (msg_connected(msg) || msg_errcode(msg)) {
1184 msg_dbg(msg, "dispatch filter 7\n"); 1237 msg_dbg(msg, "dispatch filter 7\n");
1185 return TIPC_ERR_NO_PORT; 1238 return TIPC_ERR_NO_PORT;
1186 } 1239 }
1187 } 1240 }
1188 } 1241 }
1189 1242
1190 /* Reject message if there isn't room to queue it */ 1243 /* Reject message if there isn't room to queue it */
1191 1244
1192 recv_q_len = (u32)atomic_read(&tipc_queue_size); 1245 recv_q_len = (u32)atomic_read(&tipc_queue_size);
1193 if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) { 1246 if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) {
1194 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE)) 1247 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
1195 return TIPC_ERR_OVERLOAD; 1248 return TIPC_ERR_OVERLOAD;
1196 } 1249 }
1197 recv_q_len = skb_queue_len(&tsock->sk.sk_receive_queue); 1250 recv_q_len = skb_queue_len(&sk->sk_receive_queue);
1198 if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) { 1251 if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) {
1199 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2)) 1252 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2))
1200 return TIPC_ERR_OVERLOAD; 1253 return TIPC_ERR_OVERLOAD;
1201 } 1254 }
1202 1255
1256 /* Enqueue message (finally!) */
1257
1258 msg_dbg(msg, "<DISP<: ");
1259 TIPC_SKB_CB(buf)->handle = msg_data(msg);
1260 atomic_inc(&tipc_queue_size);
1261 __skb_queue_tail(&sk->sk_receive_queue, buf);
1262
1203 /* Initiate connection termination for an incoming 'FIN' */ 1263 /* Initiate connection termination for an incoming 'FIN' */
1204 1264
1205 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) { 1265 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
1206 sock->state = SS_DISCONNECTING; 1266 sock->state = SS_DISCONNECTING;
1207 /* Note: Use signal since port lock is already taken! */ 1267 tipc_disconnect_port(tipc_sk_port(sk));
1208 tipc_k_signal((Handler)async_disconnect, tport->ref);
1209 } 1268 }
1210 1269
1211 /* Enqueue message (finally!) */ 1270 if (waitqueue_active(sk->sk_sleep))
1271 wake_up_interruptible(sk->sk_sleep);
1272 return TIPC_OK;
1273 }
1212 1274
1213 msg_dbg(msg,"<DISP<: "); 1275 /**
1214 TIPC_SKB_CB(buf)->handle = msg_data(msg); 1276 * backlog_rcv - handle incoming message from backlog queue
1215 atomic_inc(&tipc_queue_size); 1277 * @sk: socket
1216 skb_queue_tail(&sock->sk->sk_receive_queue, buf); 1278 * @buf: message
1279 *
1280 * Caller must hold socket lock, but not port lock.
1281 *
1282 * Returns 0
1283 */
1217 1284
1218 if (waitqueue_active(sock->sk->sk_sleep)) 1285 static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
1219 wake_up_interruptible(sock->sk->sk_sleep); 1286 {
1220 return TIPC_OK; 1287 u32 res;
1288
1289 res = filter_rcv(sk, buf);
1290 if (res)
1291 tipc_reject_msg(buf, res);
1292 return 0;
1221 } 1293 }
1222 1294
1223 /** 1295 /**
1296 * dispatch - handle incoming message
1297 * @tport: TIPC port that received message
1298 * @buf: message
1299 *
1300 * Called with port lock already taken.
1301 *
1302 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1303 */
1304
1305 static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1306 {
1307 struct sock *sk = (struct sock *)tport->usr_handle;
1308 u32 res;
1309
1310 /*
1311 * Process message if socket is unlocked; otherwise add to backlog queue
1312 *
1313 * This code is based on sk_receive_skb(), but must be distinct from it
1314 * since a TIPC-specific filter/reject mechanism is utilized
1315 */
1316
1317 bh_lock_sock(sk);
1318 if (!sock_owned_by_user(sk)) {
1319 res = filter_rcv(sk, buf);
1320 } else {
1321 sk_add_backlog(sk, buf);
1322 res = TIPC_OK;
1323 }
1324 bh_unlock_sock(sk);
1325
1326 return res;
1327 }
1328
1329 /**
1224 * wakeupdispatch - wake up port after congestion 1330 * wakeupdispatch - wake up port after congestion
1225 * @tport: port to wakeup 1331 * @tport: port to wakeup
1226 * 1332 *
1227 * Called with port lock on. 1333 * Called with port lock already taken.
1228 */ 1334 */
1229 1335
1230 static void wakeupdispatch(struct tipc_port *tport) 1336 static void wakeupdispatch(struct tipc_port *tport)
1231 { 1337 {
1232 struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle; 1338 struct sock *sk = (struct sock *)tport->usr_handle;
1233 1339
1234 if (waitqueue_active(tsock->sk.sk_sleep)) 1340 if (waitqueue_active(sk->sk_sleep))
1235 wake_up_interruptible(tsock->sk.sk_sleep); 1341 wake_up_interruptible(sk->sk_sleep);
1236 } 1342 }
1237 1343
1238 /** 1344 /**
1239 * connect - establish a connection to another TIPC port 1345 * connect - establish a connection to another TIPC port
1240 * @sock: socket structure 1346 * @sock: socket structure
1241 * @dest: socket address for destination port 1347 * @dest: socket address for destination port
1242 * @destlen: size of socket address data structure 1348 * @destlen: size of socket address data structure
1243 * @flags: (unused) 1349 * @flags: file-related flags associated with socket
1244 * 1350 *
1245 * Returns 0 on success, errno otherwise 1351 * Returns 0 on success, errno otherwise
1246 */ 1352 */
1247 1353
1248 static int connect(struct socket *sock, struct sockaddr *dest, int destlen, 1354 static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1249 int flags) 1355 int flags)
1250 { 1356 {
1251 struct tipc_sock *tsock = tipc_sk(sock->sk); 1357 struct sock *sk = sock->sk;
1252 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest; 1358 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1253 struct msghdr m = {NULL,}; 1359 struct msghdr m = {NULL,};
1254 struct sk_buff *buf; 1360 struct sk_buff *buf;
1255 struct tipc_msg *msg; 1361 struct tipc_msg *msg;
1256 int res; 1362 int res;
1257 1363
1364 lock_sock(sk);
1365
1258 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */ 1366 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
1259 1367
1260 if (sock->state == SS_READY) 1368 if (sock->state == SS_READY) {
1261 return -EOPNOTSUPP; 1369 res = -EOPNOTSUPP;
1370 goto exit;
1371 }
1262 1372
1263 /* For now, TIPC does not support the non-blocking form of connect() */ 1373 /* For now, TIPC does not support the non-blocking form of connect() */
1264 1374
1265 if (flags & O_NONBLOCK) 1375 if (flags & O_NONBLOCK) {
1266 return -EWOULDBLOCK; 1376 res = -EWOULDBLOCK;
1377 goto exit;
1378 }
1267 1379
1268 /* Issue Posix-compliant error code if socket is in the wrong state */ 1380 /* Issue Posix-compliant error code if socket is in the wrong state */
1269 1381
1270 if (sock->state == SS_LISTENING) 1382 if (sock->state == SS_LISTENING) {
1271 return -EOPNOTSUPP; 1383 res = -EOPNOTSUPP;
1272 if (sock->state == SS_CONNECTING) 1384 goto exit;
1273 return -EALREADY; 1385 }
1274 if (sock->state != SS_UNCONNECTED) 1386 if (sock->state == SS_CONNECTING) {
1275 return -EISCONN; 1387 res = -EALREADY;
1388 goto exit;
1389 }
1390 if (sock->state != SS_UNCONNECTED) {
1391 res = -EISCONN;
1392 goto exit;
1393 }
1276 1394
1277 /* 1395 /*
1278 * Reject connection attempt using multicast address 1396 * Reject connection attempt using multicast address
1279 * 1397 *
1280 * Note: send_msg() validates the rest of the address fields, 1398 * Note: send_msg() validates the rest of the address fields,
1281 * so there's no need to do it here 1399 * so there's no need to do it here
1282 */ 1400 */
1283 1401
1284 if (dst->addrtype == TIPC_ADDR_MCAST) 1402 if (dst->addrtype == TIPC_ADDR_MCAST) {
1285 return -EINVAL; 1403 res = -EINVAL;
1404 goto exit;
1405 }
1286 1406
1407 /* Reject any messages already in receive queue (very unlikely) */
1408
1409 reject_rx_queue(sk);
1410
1287 /* Send a 'SYN-' to destination */ 1411 /* Send a 'SYN-' to destination */
1288 1412
1289 m.msg_name = dest; 1413 m.msg_name = dest;
1290 m.msg_namelen = destlen; 1414 m.msg_namelen = destlen;
1291 res = send_msg(NULL, sock, &m, 0); 1415 res = send_msg(NULL, sock, &m, 0);
1292 if (res < 0) { 1416 if (res < 0) {
1293 sock->state = SS_DISCONNECTING; 1417 goto exit;
1294 return res;
1295 } 1418 }
1296 1419
1297 if (mutex_lock_interruptible(&tsock->lock)) 1420 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1298 return -ERESTARTSYS;
1299 1421
1300 /* Wait for destination's 'ACK' response */ 1422 release_sock(sk);
1423 res = wait_event_interruptible_timeout(*sk->sk_sleep,
1424 (!skb_queue_empty(&sk->sk_receive_queue) ||
1425 (sock->state != SS_CONNECTING)),
1426 sk->sk_rcvtimeo);
1427 lock_sock(sk);
1301 1428
1302 res = wait_event_interruptible_timeout(*sock->sk->sk_sleep,
1303 skb_queue_len(&sock->sk->sk_receive_queue),
1304 sock->sk->sk_rcvtimeo);
1305 buf = skb_peek(&sock->sk->sk_receive_queue);
1306 if (res > 0) { 1429 if (res > 0) {
1307 msg = buf_msg(buf); 1430 buf = skb_peek(&sk->sk_receive_queue);
1308 res = auto_connect(sock, tsock, msg); 1431 if (buf != NULL) {
1309 if (!res) { 1432 msg = buf_msg(buf);
1310 if (!msg_data_sz(msg)) 1433 res = auto_connect(sock, msg);
1311 advance_queue(tsock); 1434 if (!res) {
1435 if (!msg_data_sz(msg))
1436 advance_rx_queue(sk);
1437 }
1438 } else {
1439 if (sock->state == SS_CONNECTED) {
1440 res = -EISCONN;
1441 } else {
1442 res = -ECONNREFUSED;
1443 }
1312 } 1444 }
1313 } else { 1445 } else {
1314 if (res == 0) 1446 if (res == 0)
1315 res = -ETIMEDOUT; 1447 res = -ETIMEDOUT;
1316 else 1448 else
1317 ; /* leave "res" unchanged */ 1449 ; /* leave "res" unchanged */
1318 sock->state = SS_DISCONNECTING; 1450 sock->state = SS_DISCONNECTING;
1319 } 1451 }
1320 1452
1321 mutex_unlock(&tsock->lock); 1453 exit:
1454 release_sock(sk);
1322 return res; 1455 return res;
1323 } 1456 }
1324 1457
1325 /** 1458 /**
1326 * listen - allow socket to listen for incoming connections 1459 * listen - allow socket to listen for incoming connections
1327 * @sock: socket structure 1460 * @sock: socket structure
1328 * @len: (unused) 1461 * @len: (unused)
1329 * 1462 *
1330 * Returns 0 on success, errno otherwise 1463 * Returns 0 on success, errno otherwise
1331 */ 1464 */
1332 1465
1333 static int listen(struct socket *sock, int len) 1466 static int listen(struct socket *sock, int len)
1334 { 1467 {
1335 /* REQUIRES SOCKET LOCKING OF SOME SORT? */ 1468 struct sock *sk = sock->sk;
1469 int res;
1336 1470
1471 lock_sock(sk);
1472
1337 if (sock->state == SS_READY) 1473 if (sock->state == SS_READY)
1338 return -EOPNOTSUPP; 1474 res = -EOPNOTSUPP;
1339 if (sock->state != SS_UNCONNECTED) 1475 else if (sock->state != SS_UNCONNECTED)
1340 return -EINVAL; 1476 res = -EINVAL;
1341 sock->state = SS_LISTENING; 1477 else {
1342 return 0; 1478 sock->state = SS_LISTENING;
1479 res = 0;
1480 }
1481
1482 release_sock(sk);
1483 return res;
1343 } 1484 }
1344 1485
1345 /** 1486 /**
1346 * accept - wait for connection request 1487 * accept - wait for connection request
1347 * @sock: listening socket 1488 * @sock: listening socket
1348 * @newsock: new socket that is to be connected 1489 * @newsock: new socket that is to be connected
1349 * @flags: file-related flags associated with socket 1490 * @flags: file-related flags associated with socket
1350 * 1491 *
1351 * Returns 0 on success, errno otherwise 1492 * Returns 0 on success, errno otherwise
1352 */ 1493 */
1353 1494
1354 static int accept(struct socket *sock, struct socket *newsock, int flags) 1495 static int accept(struct socket *sock, struct socket *new_sock, int flags)
1355 { 1496 {
1356 struct tipc_sock *tsock = tipc_sk(sock->sk); 1497 struct sock *sk = sock->sk;
1357 struct sk_buff *buf; 1498 struct sk_buff *buf;
1358 int res = -EFAULT; 1499 int res;
1359 1500
1360 if (sock->state == SS_READY) 1501 lock_sock(sk);
1361 return -EOPNOTSUPP;
1362 if (sock->state != SS_LISTENING)
1363 return -EINVAL;
1364 1502
1365 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) && 1503 if (sock->state == SS_READY) {
1366 (flags & O_NONBLOCK))) 1504 res = -EOPNOTSUPP;
1367 return -EWOULDBLOCK;
1368
1369 if (mutex_lock_interruptible(&tsock->lock))
1370 return -ERESTARTSYS;
1371
1372 if (wait_event_interruptible(*sock->sk->sk_sleep,
1373 skb_queue_len(&sock->sk->sk_receive_queue))) {
1374 res = -ERESTARTSYS;
1375 goto exit; 1505 goto exit;
1376 } 1506 }
1377 buf = skb_peek(&sock->sk->sk_receive_queue); 1507 if (sock->state != SS_LISTENING) {
1508 res = -EINVAL;
1509 goto exit;
1510 }
1378 1511
1379 res = tipc_create(sock_net(sock->sk), newsock, 0); 1512 while (skb_queue_empty(&sk->sk_receive_queue)) {
1513 if (flags & O_NONBLOCK) {
1514 res = -EWOULDBLOCK;
1515 goto exit;
1516 }
1517 release_sock(sk);
1518 res = wait_event_interruptible(*sk->sk_sleep,
1519 (!skb_queue_empty(&sk->sk_receive_queue)));
1520 lock_sock(sk);
1521 if (res)
1522 goto exit;
1523 }
1524
1525 buf = skb_peek(&sk->sk_receive_queue);
1526
1527 res = tipc_create(sock_net(sock->sk), new_sock, 0);
1380 if (!res) { 1528 if (!res) {
1381 struct tipc_sock *new_tsock = tipc_sk(newsock->sk); 1529 struct sock *new_sk = new_sock->sk;
1530 struct tipc_port *new_tport = tipc_sk_port(new_sk);
1531 u32 new_ref = new_tport->ref;
1382 struct tipc_portid id; 1532 struct tipc_portid id;
1383 struct tipc_msg *msg = buf_msg(buf); 1533 struct tipc_msg *msg = buf_msg(buf);
1384 u32 new_ref = new_tsock->p->ref;
1385 1534
1535 lock_sock(new_sk);
1536
1537 /*
1538 * Reject any stray messages received by new socket
1539 * before the socket lock was taken (very, very unlikely)
1540 */
1541
1542 reject_rx_queue(new_sk);
1543
1544 /* Connect new socket to it's peer */
1545
1386 id.ref = msg_origport(msg); 1546 id.ref = msg_origport(msg);
1387 id.node = msg_orignode(msg); 1547 id.node = msg_orignode(msg);
1388 tipc_connect2port(new_ref, &id); 1548 tipc_connect2port(new_ref, &id);
1389 newsock->state = SS_CONNECTED; 1549 new_sock->state = SS_CONNECTED;
1390 1550
1391 tipc_set_portimportance(new_ref, msg_importance(msg)); 1551 tipc_set_portimportance(new_ref, msg_importance(msg));
1392 if (msg_named(msg)) { 1552 if (msg_named(msg)) {
1393 new_tsock->p->conn_type = msg_nametype(msg); 1553 new_tport->conn_type = msg_nametype(msg);
1394 new_tsock->p->conn_instance = msg_nameinst(msg); 1554 new_tport->conn_instance = msg_nameinst(msg);
1395 } 1555 }
1396 1556
1397 /* 1557 /*
1398 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 1558 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1399 * Respond to 'SYN+' by queuing it on new socket. 1559 * Respond to 'SYN+' by queuing it on new socket.
1400 */ 1560 */
1401 1561
1402 msg_dbg(msg,"<ACC<: "); 1562 msg_dbg(msg,"<ACC<: ");
1403 if (!msg_data_sz(msg)) { 1563 if (!msg_data_sz(msg)) {
1404 struct msghdr m = {NULL,}; 1564 struct msghdr m = {NULL,};
1405 1565
1406 send_packet(NULL, newsock, &m, 0); 1566 advance_rx_queue(sk);
1407 advance_queue(tsock); 1567 send_packet(NULL, new_sock, &m, 0);
1408 } else { 1568 } else {
1409 sock_lock(tsock); 1569 __skb_dequeue(&sk->sk_receive_queue);
1410 skb_dequeue(&sock->sk->sk_receive_queue); 1570 __skb_queue_head(&new_sk->sk_receive_queue, buf);
1411 sock_unlock(tsock);
1412 skb_queue_head(&newsock->sk->sk_receive_queue, buf);
1413 } 1571 }
1572 release_sock(new_sk);
1414 } 1573 }
1415 exit: 1574 exit:
1416 mutex_unlock(&tsock->lock); 1575 release_sock(sk);
1417 return res; 1576 return res;
1418 } 1577 }
1419 1578
1420 /** 1579 /**
1421 * shutdown - shutdown socket connection 1580 * shutdown - shutdown socket connection
1422 * @sock: socket structure 1581 * @sock: socket structure
1423 * @how: direction to close (must be SHUT_RDWR) 1582 * @how: direction to close (must be SHUT_RDWR)
1424 * 1583 *
1425 * Terminates connection (if necessary), then purges socket's receive queue. 1584 * Terminates connection (if necessary), then purges socket's receive queue.
1426 * 1585 *
1427 * Returns 0 on success, errno otherwise 1586 * Returns 0 on success, errno otherwise
1428 */ 1587 */
1429 1588
1430 static int shutdown(struct socket *sock, int how) 1589 static int shutdown(struct socket *sock, int how)
1431 { 1590 {
1432 struct tipc_sock* tsock = tipc_sk(sock->sk); 1591 struct sock *sk = sock->sk;
1592 struct tipc_port *tport = tipc_sk_port(sk);
1433 struct sk_buff *buf; 1593 struct sk_buff *buf;
1434 int res; 1594 int res;
1435 1595
1436 if (how != SHUT_RDWR) 1596 if (how != SHUT_RDWR)
1437 return -EINVAL; 1597 return -EINVAL;
1438 1598
1439 if (mutex_lock_interruptible(&tsock->lock)) 1599 lock_sock(sk);
1440 return -ERESTARTSYS;
1441 1600
1442 sock_lock(tsock);
1443
1444 switch (sock->state) { 1601 switch (sock->state) {
1602 case SS_CONNECTING:
1445 case SS_CONNECTED: 1603 case SS_CONNECTED:
1446 1604
1447 /* Send 'FIN+' or 'FIN-' message to peer */ 1605 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1448
1449 sock_unlock(tsock);
1450 restart: 1606 restart:
1451 if ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) { 1607 buf = __skb_dequeue(&sk->sk_receive_queue);
1608 if (buf) {
1452 atomic_dec(&tipc_queue_size); 1609 atomic_dec(&tipc_queue_size);
1453 if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) { 1610 if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) {
1454 buf_discard(buf); 1611 buf_discard(buf);
1455 goto restart; 1612 goto restart;
1456 } 1613 }
1614 tipc_disconnect(tport->ref);
1457 tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN); 1615 tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
1616 } else {
1617 tipc_shutdown(tport->ref);
1458 } 1618 }
1459 else {
1460 tipc_shutdown(tsock->p->ref);
1461 }
1462 sock_lock(tsock);
1463 1619
1620 sock->state = SS_DISCONNECTING;
1621
1464 /* fall through */ 1622 /* fall through */
1465 1623
1466 case SS_DISCONNECTING: 1624 case SS_DISCONNECTING:
1467 1625
1468 /* Discard any unreceived messages */ 1626 /* Discard any unreceived messages; wake up sleeping tasks */
1469 1627
1470 while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) { 1628 discard_rx_queue(sk);
1471 atomic_dec(&tipc_queue_size); 1629 if (waitqueue_active(sk->sk_sleep))
1472 buf_discard(buf); 1630 wake_up_interruptible(sk->sk_sleep);
1473 }
1474 tsock->p->conn_unacked = 0;
1475
1476 /* fall through */
1477
1478 case SS_CONNECTING:
1479 sock->state = SS_DISCONNECTING;
1480 res = 0; 1631 res = 0;
1481 break; 1632 break;
1482 1633
1483 default: 1634 default:
1484 res = -ENOTCONN; 1635 res = -ENOTCONN;
1485 } 1636 }
1486 1637
1487 sock_unlock(tsock); 1638 release_sock(sk);
1488
1489 mutex_unlock(&tsock->lock);
1490 return res; 1639 return res;
1491 } 1640 }
1492 1641
1493 /** 1642 /**
1494 * setsockopt - set socket option 1643 * setsockopt - set socket option
1495 * @sock: socket structure 1644 * @sock: socket structure
1496 * @lvl: option level 1645 * @lvl: option level
1497 * @opt: option identifier 1646 * @opt: option identifier
1498 * @ov: pointer to new option value 1647 * @ov: pointer to new option value
1499 * @ol: length of option value 1648 * @ol: length of option value
1500 * 1649 *
1501 * For stream sockets only, accepts and ignores all IPPROTO_TCP options 1650 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
1502 * (to ease compatibility). 1651 * (to ease compatibility).
1503 * 1652 *
1504 * Returns 0 on success, errno otherwise 1653 * Returns 0 on success, errno otherwise
1505 */ 1654 */
1506 1655
1507 static int setsockopt(struct socket *sock, 1656 static int setsockopt(struct socket *sock,
1508 int lvl, int opt, char __user *ov, int ol) 1657 int lvl, int opt, char __user *ov, int ol)
1509 { 1658 {
1510 struct tipc_sock *tsock = tipc_sk(sock->sk); 1659 struct sock *sk = sock->sk;
1660 struct tipc_port *tport = tipc_sk_port(sk);
1511 u32 value; 1661 u32 value;
1512 int res; 1662 int res;
1513 1663
1514 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM)) 1664 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1515 return 0; 1665 return 0;
1516 if (lvl != SOL_TIPC) 1666 if (lvl != SOL_TIPC)
1517 return -ENOPROTOOPT; 1667 return -ENOPROTOOPT;
1518 if (ol < sizeof(value)) 1668 if (ol < sizeof(value))
1519 return -EINVAL; 1669 return -EINVAL;
1520 if ((res = get_user(value, (u32 __user *)ov))) 1670 if ((res = get_user(value, (u32 __user *)ov)))
1521 return res; 1671 return res;
1522 1672
1523 if (mutex_lock_interruptible(&tsock->lock)) 1673 lock_sock(sk);
1524 return -ERESTARTSYS;
1525 1674
1526 switch (opt) { 1675 switch (opt) {
1527 case TIPC_IMPORTANCE: 1676 case TIPC_IMPORTANCE:
1528 res = tipc_set_portimportance(tsock->p->ref, value); 1677 res = tipc_set_portimportance(tport->ref, value);
1529 break; 1678 break;
1530 case TIPC_SRC_DROPPABLE: 1679 case TIPC_SRC_DROPPABLE:
1531 if (sock->type != SOCK_STREAM) 1680 if (sock->type != SOCK_STREAM)
1532 res = tipc_set_portunreliable(tsock->p->ref, value); 1681 res = tipc_set_portunreliable(tport->ref, value);
1533 else 1682 else
1534 res = -ENOPROTOOPT; 1683 res = -ENOPROTOOPT;
1535 break; 1684 break;
1536 case TIPC_DEST_DROPPABLE: 1685 case TIPC_DEST_DROPPABLE:
1537 res = tipc_set_portunreturnable(tsock->p->ref, value); 1686 res = tipc_set_portunreturnable(tport->ref, value);
1538 break; 1687 break;
1539 case TIPC_CONN_TIMEOUT: 1688 case TIPC_CONN_TIMEOUT:
1540 sock->sk->sk_rcvtimeo = msecs_to_jiffies(value); 1689 sk->sk_rcvtimeo = msecs_to_jiffies(value);
1690 /* no need to set "res", since already 0 at this point */
1541 break; 1691 break;
1542 default: 1692 default:
1543 res = -EINVAL; 1693 res = -EINVAL;
1544 } 1694 }
1545 1695
1546 mutex_unlock(&tsock->lock); 1696 release_sock(sk);
1697
1547 return res; 1698 return res;
1548 } 1699 }
1549 1700
1550 /** 1701 /**
1551 * getsockopt - get socket option 1702 * getsockopt - get socket option
1552 * @sock: socket structure 1703 * @sock: socket structure
1553 * @lvl: option level 1704 * @lvl: option level
1554 * @opt: option identifier 1705 * @opt: option identifier
1555 * @ov: receptacle for option value 1706 * @ov: receptacle for option value
1556 * @ol: receptacle for length of option value 1707 * @ol: receptacle for length of option value
1557 * 1708 *
1558 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options 1709 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
1559 * (to ease compatibility). 1710 * (to ease compatibility).
1560 * 1711 *
1561 * Returns 0 on success, errno otherwise 1712 * Returns 0 on success, errno otherwise
1562 */ 1713 */
1563 1714
1564 static int getsockopt(struct socket *sock, 1715 static int getsockopt(struct socket *sock,
1565 int lvl, int opt, char __user *ov, int __user *ol) 1716 int lvl, int opt, char __user *ov, int __user *ol)