Commit 0233493a5fad227645f7f02539cb42db72e76030

Authored by Jon Maloy
Committed by David S. Miller
1 parent 4ea5dab541

tipc: adjustment to group member FSM

Analysis reveals that the member state MBR_QURANTINED in reality is
unnecessary, and can be replaced by the state MBR_JOINING at all
occurrencs.

Acked-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 1 changed file with 2 additions and 3 deletions Inline Diff

1 /* 1 /*
2 * net/tipc/group.c: TIPC group messaging code 2 * net/tipc/group.c: TIPC group messaging code
3 * 3 *
4 * Copyright (c) 2017, Ericsson AB 4 * Copyright (c) 2017, Ericsson AB
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met: 8 * modification, are permitted provided that the following conditions are met:
9 * 9 *
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the names of the copyright holders nor the names of its 15 * 3. Neither the names of the copyright holders nor the names of its
16 * contributors may be used to endorse or promote products derived from 16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission. 17 * this software without specific prior written permission.
18 * 18 *
19 * Alternatively, this software may be distributed under the terms of the 19 * Alternatively, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") version 2 as published by the Free 20 * GNU General Public License ("GPL") version 2 as published by the Free
21 * Software Foundation. 21 * Software Foundation.
22 * 22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE. 33 * POSSIBILITY OF SUCH DAMAGE.
34 */ 34 */
35 35
36 #include "core.h" 36 #include "core.h"
37 #include "addr.h" 37 #include "addr.h"
38 #include "group.h" 38 #include "group.h"
39 #include "bcast.h" 39 #include "bcast.h"
40 #include "server.h" 40 #include "server.h"
41 #include "msg.h" 41 #include "msg.h"
42 #include "socket.h" 42 #include "socket.h"
43 #include "node.h" 43 #include "node.h"
44 #include "name_table.h" 44 #include "name_table.h"
45 #include "subscr.h" 45 #include "subscr.h"
46 46
47 #define ADV_UNIT (((MAX_MSG_SIZE + MAX_H_SIZE) / FLOWCTL_BLK_SZ) + 1) 47 #define ADV_UNIT (((MAX_MSG_SIZE + MAX_H_SIZE) / FLOWCTL_BLK_SZ) + 1)
48 #define ADV_IDLE ADV_UNIT 48 #define ADV_IDLE ADV_UNIT
49 #define ADV_ACTIVE (ADV_UNIT * 12) 49 #define ADV_ACTIVE (ADV_UNIT * 12)
50 50
51 enum mbr_state { 51 enum mbr_state {
52 MBR_QUARANTINED,
53 MBR_DISCOVERED, 52 MBR_DISCOVERED,
54 MBR_JOINING, 53 MBR_JOINING,
55 MBR_PUBLISHED, 54 MBR_PUBLISHED,
56 MBR_JOINED, 55 MBR_JOINED,
57 MBR_PENDING, 56 MBR_PENDING,
58 MBR_ACTIVE, 57 MBR_ACTIVE,
59 MBR_RECLAIMING, 58 MBR_RECLAIMING,
60 MBR_REMITTED, 59 MBR_REMITTED,
61 MBR_LEAVING 60 MBR_LEAVING
62 }; 61 };
63 62
64 struct tipc_member { 63 struct tipc_member {
65 struct rb_node tree_node; 64 struct rb_node tree_node;
66 struct list_head list; 65 struct list_head list;
67 struct list_head small_win; 66 struct list_head small_win;
68 struct sk_buff *event_msg; 67 struct sk_buff *event_msg;
69 struct sk_buff_head deferredq; 68 struct sk_buff_head deferredq;
70 struct tipc_group *group; 69 struct tipc_group *group;
71 u32 node; 70 u32 node;
72 u32 port; 71 u32 port;
73 u32 instance; 72 u32 instance;
74 enum mbr_state state; 73 enum mbr_state state;
75 u16 advertised; 74 u16 advertised;
76 u16 window; 75 u16 window;
77 u16 bc_rcv_nxt; 76 u16 bc_rcv_nxt;
78 u16 bc_syncpt; 77 u16 bc_syncpt;
79 u16 bc_acked; 78 u16 bc_acked;
80 bool usr_pending; 79 bool usr_pending;
81 }; 80 };
82 81
83 struct tipc_group { 82 struct tipc_group {
84 struct rb_root members; 83 struct rb_root members;
85 struct list_head small_win; 84 struct list_head small_win;
86 struct list_head pending; 85 struct list_head pending;
87 struct list_head active; 86 struct list_head active;
88 struct tipc_nlist dests; 87 struct tipc_nlist dests;
89 struct net *net; 88 struct net *net;
90 int subid; 89 int subid;
91 u32 type; 90 u32 type;
92 u32 instance; 91 u32 instance;
93 u32 domain; 92 u32 domain;
94 u32 scope; 93 u32 scope;
95 u32 portid; 94 u32 portid;
96 u16 member_cnt; 95 u16 member_cnt;
97 u16 active_cnt; 96 u16 active_cnt;
98 u16 max_active; 97 u16 max_active;
99 u16 bc_snd_nxt; 98 u16 bc_snd_nxt;
100 u16 bc_ackers; 99 u16 bc_ackers;
101 bool loopback; 100 bool loopback;
102 bool events; 101 bool events;
103 }; 102 };
104 103
105 static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m, 104 static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
106 int mtyp, struct sk_buff_head *xmitq); 105 int mtyp, struct sk_buff_head *xmitq);
107 106
108 static void tipc_group_decr_active(struct tipc_group *grp, 107 static void tipc_group_decr_active(struct tipc_group *grp,
109 struct tipc_member *m) 108 struct tipc_member *m)
110 { 109 {
111 if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING || 110 if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING ||
112 m->state == MBR_REMITTED) 111 m->state == MBR_REMITTED)
113 grp->active_cnt--; 112 grp->active_cnt--;
114 } 113 }
115 114
116 static int tipc_group_rcvbuf_limit(struct tipc_group *grp) 115 static int tipc_group_rcvbuf_limit(struct tipc_group *grp)
117 { 116 {
118 int max_active, active_pool, idle_pool; 117 int max_active, active_pool, idle_pool;
119 int mcnt = grp->member_cnt + 1; 118 int mcnt = grp->member_cnt + 1;
120 119
121 /* Limit simultaneous reception from other members */ 120 /* Limit simultaneous reception from other members */
122 max_active = min(mcnt / 8, 64); 121 max_active = min(mcnt / 8, 64);
123 max_active = max(max_active, 16); 122 max_active = max(max_active, 16);
124 grp->max_active = max_active; 123 grp->max_active = max_active;
125 124
126 /* Reserve blocks for active and idle members */ 125 /* Reserve blocks for active and idle members */
127 active_pool = max_active * ADV_ACTIVE; 126 active_pool = max_active * ADV_ACTIVE;
128 idle_pool = (mcnt - max_active) * ADV_IDLE; 127 idle_pool = (mcnt - max_active) * ADV_IDLE;
129 128
130 /* Scale to bytes, considering worst-case truesize/msgsize ratio */ 129 /* Scale to bytes, considering worst-case truesize/msgsize ratio */
131 return (active_pool + idle_pool) * FLOWCTL_BLK_SZ * 4; 130 return (active_pool + idle_pool) * FLOWCTL_BLK_SZ * 4;
132 } 131 }
133 132
134 u16 tipc_group_bc_snd_nxt(struct tipc_group *grp) 133 u16 tipc_group_bc_snd_nxt(struct tipc_group *grp)
135 { 134 {
136 return grp->bc_snd_nxt; 135 return grp->bc_snd_nxt;
137 } 136 }
138 137
139 static bool tipc_group_is_receiver(struct tipc_member *m) 138 static bool tipc_group_is_receiver(struct tipc_member *m)
140 { 139 {
141 return m->state != MBR_QUARANTINED && m->state != MBR_LEAVING; 140 return m && m->state != MBR_JOINING && m->state != MBR_LEAVING;
142 } 141 }
143 142
144 static bool tipc_group_is_sender(struct tipc_member *m) 143 static bool tipc_group_is_sender(struct tipc_member *m)
145 { 144 {
146 return m && m->state >= MBR_JOINED; 145 return m && m->state >= MBR_JOINED;
147 } 146 }
148 147
149 u32 tipc_group_exclude(struct tipc_group *grp) 148 u32 tipc_group_exclude(struct tipc_group *grp)
150 { 149 {
151 if (!grp->loopback) 150 if (!grp->loopback)
152 return grp->portid; 151 return grp->portid;
153 return 0; 152 return 0;
154 } 153 }
155 154
156 int tipc_group_size(struct tipc_group *grp) 155 int tipc_group_size(struct tipc_group *grp)
157 { 156 {
158 return grp->member_cnt; 157 return grp->member_cnt;
159 } 158 }
160 159
161 struct tipc_group *tipc_group_create(struct net *net, u32 portid, 160 struct tipc_group *tipc_group_create(struct net *net, u32 portid,
162 struct tipc_group_req *mreq) 161 struct tipc_group_req *mreq)
163 { 162 {
164 struct tipc_group *grp; 163 struct tipc_group *grp;
165 u32 type = mreq->type; 164 u32 type = mreq->type;
166 165
167 grp = kzalloc(sizeof(*grp), GFP_ATOMIC); 166 grp = kzalloc(sizeof(*grp), GFP_ATOMIC);
168 if (!grp) 167 if (!grp)
169 return NULL; 168 return NULL;
170 tipc_nlist_init(&grp->dests, tipc_own_addr(net)); 169 tipc_nlist_init(&grp->dests, tipc_own_addr(net));
171 INIT_LIST_HEAD(&grp->small_win); 170 INIT_LIST_HEAD(&grp->small_win);
172 INIT_LIST_HEAD(&grp->active); 171 INIT_LIST_HEAD(&grp->active);
173 INIT_LIST_HEAD(&grp->pending); 172 INIT_LIST_HEAD(&grp->pending);
174 grp->members = RB_ROOT; 173 grp->members = RB_ROOT;
175 grp->net = net; 174 grp->net = net;
176 grp->portid = portid; 175 grp->portid = portid;
177 grp->domain = addr_domain(net, mreq->scope); 176 grp->domain = addr_domain(net, mreq->scope);
178 grp->type = type; 177 grp->type = type;
179 grp->instance = mreq->instance; 178 grp->instance = mreq->instance;
180 grp->scope = mreq->scope; 179 grp->scope = mreq->scope;
181 grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK; 180 grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK;
182 grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS; 181 grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS;
183 if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, &grp->subid)) 182 if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, &grp->subid))
184 return grp; 183 return grp;
185 kfree(grp); 184 kfree(grp);
186 return NULL; 185 return NULL;
187 } 186 }
188 187
189 void tipc_group_delete(struct net *net, struct tipc_group *grp) 188 void tipc_group_delete(struct net *net, struct tipc_group *grp)
190 { 189 {
191 struct rb_root *tree = &grp->members; 190 struct rb_root *tree = &grp->members;
192 struct tipc_member *m, *tmp; 191 struct tipc_member *m, *tmp;
193 struct sk_buff_head xmitq; 192 struct sk_buff_head xmitq;
194 193
195 __skb_queue_head_init(&xmitq); 194 __skb_queue_head_init(&xmitq);
196 195
197 rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) { 196 rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
198 tipc_group_proto_xmit(grp, m, GRP_LEAVE_MSG, &xmitq); 197 tipc_group_proto_xmit(grp, m, GRP_LEAVE_MSG, &xmitq);
199 list_del(&m->list); 198 list_del(&m->list);
200 kfree(m); 199 kfree(m);
201 } 200 }
202 tipc_node_distr_xmit(net, &xmitq); 201 tipc_node_distr_xmit(net, &xmitq);
203 tipc_nlist_purge(&grp->dests); 202 tipc_nlist_purge(&grp->dests);
204 tipc_topsrv_kern_unsubscr(net, grp->subid); 203 tipc_topsrv_kern_unsubscr(net, grp->subid);
205 kfree(grp); 204 kfree(grp);
206 } 205 }
207 206
208 struct tipc_member *tipc_group_find_member(struct tipc_group *grp, 207 struct tipc_member *tipc_group_find_member(struct tipc_group *grp,
209 u32 node, u32 port) 208 u32 node, u32 port)
210 { 209 {
211 struct rb_node *n = grp->members.rb_node; 210 struct rb_node *n = grp->members.rb_node;
212 u64 nkey, key = (u64)node << 32 | port; 211 u64 nkey, key = (u64)node << 32 | port;
213 struct tipc_member *m; 212 struct tipc_member *m;
214 213
215 while (n) { 214 while (n) {
216 m = container_of(n, struct tipc_member, tree_node); 215 m = container_of(n, struct tipc_member, tree_node);
217 nkey = (u64)m->node << 32 | m->port; 216 nkey = (u64)m->node << 32 | m->port;
218 if (key < nkey) 217 if (key < nkey)
219 n = n->rb_left; 218 n = n->rb_left;
220 else if (key > nkey) 219 else if (key > nkey)
221 n = n->rb_right; 220 n = n->rb_right;
222 else 221 else
223 return m; 222 return m;
224 } 223 }
225 return NULL; 224 return NULL;
226 } 225 }
227 226
228 static struct tipc_member *tipc_group_find_dest(struct tipc_group *grp, 227 static struct tipc_member *tipc_group_find_dest(struct tipc_group *grp,
229 u32 node, u32 port) 228 u32 node, u32 port)
230 { 229 {
231 struct tipc_member *m; 230 struct tipc_member *m;
232 231
233 m = tipc_group_find_member(grp, node, port); 232 m = tipc_group_find_member(grp, node, port);
234 if (m && tipc_group_is_receiver(m)) 233 if (m && tipc_group_is_receiver(m))
235 return m; 234 return m;
236 return NULL; 235 return NULL;
237 } 236 }
238 237
239 static struct tipc_member *tipc_group_find_node(struct tipc_group *grp, 238 static struct tipc_member *tipc_group_find_node(struct tipc_group *grp,
240 u32 node) 239 u32 node)
241 { 240 {
242 struct tipc_member *m; 241 struct tipc_member *m;
243 struct rb_node *n; 242 struct rb_node *n;
244 243
245 for (n = rb_first(&grp->members); n; n = rb_next(n)) { 244 for (n = rb_first(&grp->members); n; n = rb_next(n)) {
246 m = container_of(n, struct tipc_member, tree_node); 245 m = container_of(n, struct tipc_member, tree_node);
247 if (m->node == node) 246 if (m->node == node)
248 return m; 247 return m;
249 } 248 }
250 return NULL; 249 return NULL;
251 } 250 }
252 251
253 static void tipc_group_add_to_tree(struct tipc_group *grp, 252 static void tipc_group_add_to_tree(struct tipc_group *grp,
254 struct tipc_member *m) 253 struct tipc_member *m)
255 { 254 {
256 u64 nkey, key = (u64)m->node << 32 | m->port; 255 u64 nkey, key = (u64)m->node << 32 | m->port;
257 struct rb_node **n, *parent = NULL; 256 struct rb_node **n, *parent = NULL;
258 struct tipc_member *tmp; 257 struct tipc_member *tmp;
259 258
260 n = &grp->members.rb_node; 259 n = &grp->members.rb_node;
261 while (*n) { 260 while (*n) {
262 tmp = container_of(*n, struct tipc_member, tree_node); 261 tmp = container_of(*n, struct tipc_member, tree_node);
263 parent = *n; 262 parent = *n;
264 tmp = container_of(parent, struct tipc_member, tree_node); 263 tmp = container_of(parent, struct tipc_member, tree_node);
265 nkey = (u64)tmp->node << 32 | tmp->port; 264 nkey = (u64)tmp->node << 32 | tmp->port;
266 if (key < nkey) 265 if (key < nkey)
267 n = &(*n)->rb_left; 266 n = &(*n)->rb_left;
268 else if (key > nkey) 267 else if (key > nkey)
269 n = &(*n)->rb_right; 268 n = &(*n)->rb_right;
270 else 269 else
271 return; 270 return;
272 } 271 }
273 rb_link_node(&m->tree_node, parent, n); 272 rb_link_node(&m->tree_node, parent, n);
274 rb_insert_color(&m->tree_node, &grp->members); 273 rb_insert_color(&m->tree_node, &grp->members);
275 } 274 }
276 275
277 static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, 276 static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
278 u32 node, u32 port, 277 u32 node, u32 port,
279 int state) 278 int state)
280 { 279 {
281 struct tipc_member *m; 280 struct tipc_member *m;
282 281
283 m = kzalloc(sizeof(*m), GFP_ATOMIC); 282 m = kzalloc(sizeof(*m), GFP_ATOMIC);
284 if (!m) 283 if (!m)
285 return NULL; 284 return NULL;
286 INIT_LIST_HEAD(&m->list); 285 INIT_LIST_HEAD(&m->list);
287 INIT_LIST_HEAD(&m->small_win); 286 INIT_LIST_HEAD(&m->small_win);
288 __skb_queue_head_init(&m->deferredq); 287 __skb_queue_head_init(&m->deferredq);
289 m->group = grp; 288 m->group = grp;
290 m->node = node; 289 m->node = node;
291 m->port = port; 290 m->port = port;
292 m->bc_acked = grp->bc_snd_nxt - 1; 291 m->bc_acked = grp->bc_snd_nxt - 1;
293 grp->member_cnt++; 292 grp->member_cnt++;
294 tipc_group_add_to_tree(grp, m); 293 tipc_group_add_to_tree(grp, m);
295 tipc_nlist_add(&grp->dests, m->node); 294 tipc_nlist_add(&grp->dests, m->node);
296 m->state = state; 295 m->state = state;
297 return m; 296 return m;
298 } 297 }
299 298
300 void tipc_group_add_member(struct tipc_group *grp, u32 node, u32 port) 299 void tipc_group_add_member(struct tipc_group *grp, u32 node, u32 port)
301 { 300 {
302 tipc_group_create_member(grp, node, port, MBR_DISCOVERED); 301 tipc_group_create_member(grp, node, port, MBR_DISCOVERED);
303 } 302 }
304 303
305 static void tipc_group_delete_member(struct tipc_group *grp, 304 static void tipc_group_delete_member(struct tipc_group *grp,
306 struct tipc_member *m) 305 struct tipc_member *m)
307 { 306 {
308 rb_erase(&m->tree_node, &grp->members); 307 rb_erase(&m->tree_node, &grp->members);
309 grp->member_cnt--; 308 grp->member_cnt--;
310 309
311 /* Check if we were waiting for replicast ack from this member */ 310 /* Check if we were waiting for replicast ack from this member */
312 if (grp->bc_ackers && less(m->bc_acked, grp->bc_snd_nxt - 1)) 311 if (grp->bc_ackers && less(m->bc_acked, grp->bc_snd_nxt - 1))
313 grp->bc_ackers--; 312 grp->bc_ackers--;
314 313
315 list_del_init(&m->list); 314 list_del_init(&m->list);
316 list_del_init(&m->small_win); 315 list_del_init(&m->small_win);
317 tipc_group_decr_active(grp, m); 316 tipc_group_decr_active(grp, m);
318 317
319 /* If last member on a node, remove node from dest list */ 318 /* If last member on a node, remove node from dest list */
320 if (!tipc_group_find_node(grp, m->node)) 319 if (!tipc_group_find_node(grp, m->node))
321 tipc_nlist_del(&grp->dests, m->node); 320 tipc_nlist_del(&grp->dests, m->node);
322 321
323 kfree(m); 322 kfree(m);
324 } 323 }
325 324
326 struct tipc_nlist *tipc_group_dests(struct tipc_group *grp) 325 struct tipc_nlist *tipc_group_dests(struct tipc_group *grp)
327 { 326 {
328 return &grp->dests; 327 return &grp->dests;
329 } 328 }
330 329
331 void tipc_group_self(struct tipc_group *grp, struct tipc_name_seq *seq, 330 void tipc_group_self(struct tipc_group *grp, struct tipc_name_seq *seq,
332 int *scope) 331 int *scope)
333 { 332 {
334 seq->type = grp->type; 333 seq->type = grp->type;
335 seq->lower = grp->instance; 334 seq->lower = grp->instance;
336 seq->upper = grp->instance; 335 seq->upper = grp->instance;
337 *scope = grp->scope; 336 *scope = grp->scope;
338 } 337 }
339 338
340 void tipc_group_update_member(struct tipc_member *m, int len) 339 void tipc_group_update_member(struct tipc_member *m, int len)
341 { 340 {
342 struct tipc_group *grp = m->group; 341 struct tipc_group *grp = m->group;
343 struct tipc_member *_m, *tmp; 342 struct tipc_member *_m, *tmp;
344 343
345 if (!tipc_group_is_receiver(m)) 344 if (!tipc_group_is_receiver(m))
346 return; 345 return;
347 346
348 m->window -= len; 347 m->window -= len;
349 348
350 if (m->window >= ADV_IDLE) 349 if (m->window >= ADV_IDLE)
351 return; 350 return;
352 351
353 list_del_init(&m->small_win); 352 list_del_init(&m->small_win);
354 353
355 /* Sort member into small_window members' list */ 354 /* Sort member into small_window members' list */
356 list_for_each_entry_safe(_m, tmp, &grp->small_win, small_win) { 355 list_for_each_entry_safe(_m, tmp, &grp->small_win, small_win) {
357 if (_m->window > m->window) 356 if (_m->window > m->window)
358 break; 357 break;
359 } 358 }
360 list_add_tail(&m->small_win, &_m->small_win); 359 list_add_tail(&m->small_win, &_m->small_win);
361 } 360 }
362 361
363 void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack) 362 void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
364 { 363 {
365 u16 prev = grp->bc_snd_nxt - 1; 364 u16 prev = grp->bc_snd_nxt - 1;
366 struct tipc_member *m; 365 struct tipc_member *m;
367 struct rb_node *n; 366 struct rb_node *n;
368 u16 ackers = 0; 367 u16 ackers = 0;
369 368
370 for (n = rb_first(&grp->members); n; n = rb_next(n)) { 369 for (n = rb_first(&grp->members); n; n = rb_next(n)) {
371 m = container_of(n, struct tipc_member, tree_node); 370 m = container_of(n, struct tipc_member, tree_node);
372 if (tipc_group_is_receiver(m)) { 371 if (tipc_group_is_receiver(m)) {
373 tipc_group_update_member(m, len); 372 tipc_group_update_member(m, len);
374 m->bc_acked = prev; 373 m->bc_acked = prev;
375 ackers++; 374 ackers++;
376 } 375 }
377 } 376 }
378 377
379 /* Mark number of acknowledges to expect, if any */ 378 /* Mark number of acknowledges to expect, if any */
380 if (ack) 379 if (ack)
381 grp->bc_ackers = ackers; 380 grp->bc_ackers = ackers;
382 grp->bc_snd_nxt++; 381 grp->bc_snd_nxt++;
383 } 382 }
384 383
385 bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport, 384 bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
386 int len, struct tipc_member **mbr) 385 int len, struct tipc_member **mbr)
387 { 386 {
388 struct sk_buff_head xmitq; 387 struct sk_buff_head xmitq;
389 struct tipc_member *m; 388 struct tipc_member *m;
390 int adv, state; 389 int adv, state;
391 390
392 m = tipc_group_find_dest(grp, dnode, dport); 391 m = tipc_group_find_dest(grp, dnode, dport);
393 *mbr = m; 392 *mbr = m;
394 if (!m) 393 if (!m)
395 return false; 394 return false;
396 if (m->usr_pending) 395 if (m->usr_pending)
397 return true; 396 return true;
398 if (m->window >= len) 397 if (m->window >= len)
399 return false; 398 return false;
400 m->usr_pending = true; 399 m->usr_pending = true;
401 400
402 /* If not fully advertised, do it now to prevent mutual blocking */ 401 /* If not fully advertised, do it now to prevent mutual blocking */
403 adv = m->advertised; 402 adv = m->advertised;
404 state = m->state; 403 state = m->state;
405 if (state < MBR_JOINED) 404 if (state < MBR_JOINED)
406 return true; 405 return true;
407 if (state == MBR_JOINED && adv == ADV_IDLE) 406 if (state == MBR_JOINED && adv == ADV_IDLE)
408 return true; 407 return true;
409 if (state == MBR_ACTIVE && adv == ADV_ACTIVE) 408 if (state == MBR_ACTIVE && adv == ADV_ACTIVE)
410 return true; 409 return true;
411 if (state == MBR_PENDING && adv == ADV_IDLE) 410 if (state == MBR_PENDING && adv == ADV_IDLE)
412 return true; 411 return true;
413 skb_queue_head_init(&xmitq); 412 skb_queue_head_init(&xmitq);
414 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, &xmitq); 413 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, &xmitq);
415 tipc_node_distr_xmit(grp->net, &xmitq); 414 tipc_node_distr_xmit(grp->net, &xmitq);
416 return true; 415 return true;
417 } 416 }
418 417
419 bool tipc_group_bc_cong(struct tipc_group *grp, int len) 418 bool tipc_group_bc_cong(struct tipc_group *grp, int len)
420 { 419 {
421 struct tipc_member *m = NULL; 420 struct tipc_member *m = NULL;
422 421
423 /* If prev bcast was replicast, reject until all receivers have acked */ 422 /* If prev bcast was replicast, reject until all receivers have acked */
424 if (grp->bc_ackers) 423 if (grp->bc_ackers)
425 return true; 424 return true;
426 425
427 if (list_empty(&grp->small_win)) 426 if (list_empty(&grp->small_win))
428 return false; 427 return false;
429 428
430 m = list_first_entry(&grp->small_win, struct tipc_member, small_win); 429 m = list_first_entry(&grp->small_win, struct tipc_member, small_win);
431 if (m->window >= len) 430 if (m->window >= len)
432 return false; 431 return false;
433 432
434 return tipc_group_cong(grp, m->node, m->port, len, &m); 433 return tipc_group_cong(grp, m->node, m->port, len, &m);
435 } 434 }
436 435
437 /* tipc_group_sort_msg() - sort msg into queue by bcast sequence number 436 /* tipc_group_sort_msg() - sort msg into queue by bcast sequence number
438 */ 437 */
439 static void tipc_group_sort_msg(struct sk_buff *skb, struct sk_buff_head *defq) 438 static void tipc_group_sort_msg(struct sk_buff *skb, struct sk_buff_head *defq)
440 { 439 {
441 struct tipc_msg *_hdr, *hdr = buf_msg(skb); 440 struct tipc_msg *_hdr, *hdr = buf_msg(skb);
442 u16 bc_seqno = msg_grp_bc_seqno(hdr); 441 u16 bc_seqno = msg_grp_bc_seqno(hdr);
443 struct sk_buff *_skb, *tmp; 442 struct sk_buff *_skb, *tmp;
444 int mtyp = msg_type(hdr); 443 int mtyp = msg_type(hdr);
445 444
446 /* Bcast/mcast may be bypassed by ucast or other bcast, - sort it in */ 445 /* Bcast/mcast may be bypassed by ucast or other bcast, - sort it in */
447 if (mtyp == TIPC_GRP_BCAST_MSG || mtyp == TIPC_GRP_MCAST_MSG) { 446 if (mtyp == TIPC_GRP_BCAST_MSG || mtyp == TIPC_GRP_MCAST_MSG) {
448 skb_queue_walk_safe(defq, _skb, tmp) { 447 skb_queue_walk_safe(defq, _skb, tmp) {
449 _hdr = buf_msg(_skb); 448 _hdr = buf_msg(_skb);
450 if (!less(bc_seqno, msg_grp_bc_seqno(_hdr))) 449 if (!less(bc_seqno, msg_grp_bc_seqno(_hdr)))
451 continue; 450 continue;
452 __skb_queue_before(defq, _skb, skb); 451 __skb_queue_before(defq, _skb, skb);
453 return; 452 return;
454 } 453 }
455 /* Bcast was not bypassed, - add to tail */ 454 /* Bcast was not bypassed, - add to tail */
456 } 455 }
457 /* Unicasts are never bypassed, - always add to tail */ 456 /* Unicasts are never bypassed, - always add to tail */
458 __skb_queue_tail(defq, skb); 457 __skb_queue_tail(defq, skb);
459 } 458 }
460 459
461 /* tipc_group_filter_msg() - determine if we should accept arriving message 460 /* tipc_group_filter_msg() - determine if we should accept arriving message
462 */ 461 */
463 void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq, 462 void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
464 struct sk_buff_head *xmitq) 463 struct sk_buff_head *xmitq)
465 { 464 {
466 struct sk_buff *skb = __skb_dequeue(inputq); 465 struct sk_buff *skb = __skb_dequeue(inputq);
467 bool ack, deliver, update, leave = false; 466 bool ack, deliver, update, leave = false;
468 struct sk_buff_head *defq; 467 struct sk_buff_head *defq;
469 struct tipc_member *m; 468 struct tipc_member *m;
470 struct tipc_msg *hdr; 469 struct tipc_msg *hdr;
471 u32 node, port; 470 u32 node, port;
472 int mtyp, blks; 471 int mtyp, blks;
473 472
474 if (!skb) 473 if (!skb)
475 return; 474 return;
476 475
477 hdr = buf_msg(skb); 476 hdr = buf_msg(skb);
478 node = msg_orignode(hdr); 477 node = msg_orignode(hdr);
479 port = msg_origport(hdr); 478 port = msg_origport(hdr);
480 479
481 if (!msg_in_group(hdr)) 480 if (!msg_in_group(hdr))
482 goto drop; 481 goto drop;
483 482
484 m = tipc_group_find_member(grp, node, port); 483 m = tipc_group_find_member(grp, node, port);
485 if (!tipc_group_is_sender(m)) 484 if (!tipc_group_is_sender(m))
486 goto drop; 485 goto drop;
487 486
488 if (less(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt)) 487 if (less(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
489 goto drop; 488 goto drop;
490 489
491 TIPC_SKB_CB(skb)->orig_member = m->instance; 490 TIPC_SKB_CB(skb)->orig_member = m->instance;
492 defq = &m->deferredq; 491 defq = &m->deferredq;
493 tipc_group_sort_msg(skb, defq); 492 tipc_group_sort_msg(skb, defq);
494 493
495 while ((skb = skb_peek(defq))) { 494 while ((skb = skb_peek(defq))) {
496 hdr = buf_msg(skb); 495 hdr = buf_msg(skb);
497 mtyp = msg_type(hdr); 496 mtyp = msg_type(hdr);
498 blks = msg_blocks(hdr); 497 blks = msg_blocks(hdr);
499 deliver = true; 498 deliver = true;
500 ack = false; 499 ack = false;
501 update = false; 500 update = false;
502 501
503 if (more(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt)) 502 if (more(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
504 break; 503 break;
505 504
506 /* Decide what to do with message */ 505 /* Decide what to do with message */
507 switch (mtyp) { 506 switch (mtyp) {
508 case TIPC_GRP_MCAST_MSG: 507 case TIPC_GRP_MCAST_MSG:
509 if (msg_nameinst(hdr) != grp->instance) { 508 if (msg_nameinst(hdr) != grp->instance) {
510 update = true; 509 update = true;
511 deliver = false; 510 deliver = false;
512 } 511 }
513 /* Fall thru */ 512 /* Fall thru */
514 case TIPC_GRP_BCAST_MSG: 513 case TIPC_GRP_BCAST_MSG:
515 m->bc_rcv_nxt++; 514 m->bc_rcv_nxt++;
516 ack = msg_grp_bc_ack_req(hdr); 515 ack = msg_grp_bc_ack_req(hdr);
517 break; 516 break;
518 case TIPC_GRP_UCAST_MSG: 517 case TIPC_GRP_UCAST_MSG:
519 break; 518 break;
520 case TIPC_GRP_MEMBER_EVT: 519 case TIPC_GRP_MEMBER_EVT:
521 if (m->state == MBR_LEAVING) 520 if (m->state == MBR_LEAVING)
522 leave = true; 521 leave = true;
523 if (!grp->events) 522 if (!grp->events)
524 deliver = false; 523 deliver = false;
525 break; 524 break;
526 default: 525 default:
527 break; 526 break;
528 } 527 }
529 528
530 /* Execute decisions */ 529 /* Execute decisions */
531 __skb_dequeue(defq); 530 __skb_dequeue(defq);
532 if (deliver) 531 if (deliver)
533 __skb_queue_tail(inputq, skb); 532 __skb_queue_tail(inputq, skb);
534 else 533 else
535 kfree_skb(skb); 534 kfree_skb(skb);
536 535
537 if (ack) 536 if (ack)
538 tipc_group_proto_xmit(grp, m, GRP_ACK_MSG, xmitq); 537 tipc_group_proto_xmit(grp, m, GRP_ACK_MSG, xmitq);
539 538
540 if (leave) { 539 if (leave) {
541 __skb_queue_purge(defq); 540 __skb_queue_purge(defq);
542 tipc_group_delete_member(grp, m); 541 tipc_group_delete_member(grp, m);
543 break; 542 break;
544 } 543 }
545 if (!update) 544 if (!update)
546 continue; 545 continue;
547 546
548 tipc_group_update_rcv_win(grp, blks, node, port, xmitq); 547 tipc_group_update_rcv_win(grp, blks, node, port, xmitq);
549 } 548 }
550 return; 549 return;
551 drop: 550 drop:
552 kfree_skb(skb); 551 kfree_skb(skb);
553 } 552 }
554 553
555 void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node, 554 void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
556 u32 port, struct sk_buff_head *xmitq) 555 u32 port, struct sk_buff_head *xmitq)
557 { 556 {
558 struct list_head *active = &grp->active; 557 struct list_head *active = &grp->active;
559 int max_active = grp->max_active; 558 int max_active = grp->max_active;
560 int reclaim_limit = max_active * 3 / 4; 559 int reclaim_limit = max_active * 3 / 4;
561 int active_cnt = grp->active_cnt; 560 int active_cnt = grp->active_cnt;
562 struct tipc_member *m, *rm, *pm; 561 struct tipc_member *m, *rm, *pm;
563 562
564 m = tipc_group_find_member(grp, node, port); 563 m = tipc_group_find_member(grp, node, port);
565 if (!m) 564 if (!m)
566 return; 565 return;
567 566
568 m->advertised -= blks; 567 m->advertised -= blks;
569 568
570 switch (m->state) { 569 switch (m->state) {
571 case MBR_JOINED: 570 case MBR_JOINED:
572 /* First, decide if member can go active */ 571 /* First, decide if member can go active */
573 if (active_cnt <= max_active) { 572 if (active_cnt <= max_active) {
574 m->state = MBR_ACTIVE; 573 m->state = MBR_ACTIVE;
575 list_add_tail(&m->list, active); 574 list_add_tail(&m->list, active);
576 grp->active_cnt++; 575 grp->active_cnt++;
577 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq); 576 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
578 } else { 577 } else {
579 m->state = MBR_PENDING; 578 m->state = MBR_PENDING;
580 list_add_tail(&m->list, &grp->pending); 579 list_add_tail(&m->list, &grp->pending);
581 } 580 }
582 581
583 if (active_cnt < reclaim_limit) 582 if (active_cnt < reclaim_limit)
584 break; 583 break;
585 584
586 /* Reclaim from oldest active member, if possible */ 585 /* Reclaim from oldest active member, if possible */
587 if (!list_empty(active)) { 586 if (!list_empty(active)) {
588 rm = list_first_entry(active, struct tipc_member, list); 587 rm = list_first_entry(active, struct tipc_member, list);
589 rm->state = MBR_RECLAIMING; 588 rm->state = MBR_RECLAIMING;
590 list_del_init(&rm->list); 589 list_del_init(&rm->list);
591 tipc_group_proto_xmit(grp, rm, GRP_RECLAIM_MSG, xmitq); 590 tipc_group_proto_xmit(grp, rm, GRP_RECLAIM_MSG, xmitq);
592 break; 591 break;
593 } 592 }
594 /* Nobody to reclaim from; - revert oldest pending to JOINED */ 593 /* Nobody to reclaim from; - revert oldest pending to JOINED */
595 pm = list_first_entry(&grp->pending, struct tipc_member, list); 594 pm = list_first_entry(&grp->pending, struct tipc_member, list);
596 list_del_init(&pm->list); 595 list_del_init(&pm->list);
597 pm->state = MBR_JOINED; 596 pm->state = MBR_JOINED;
598 tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq); 597 tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
599 break; 598 break;
600 case MBR_ACTIVE: 599 case MBR_ACTIVE:
601 if (!list_is_last(&m->list, &grp->active)) 600 if (!list_is_last(&m->list, &grp->active))
602 list_move_tail(&m->list, &grp->active); 601 list_move_tail(&m->list, &grp->active);
603 if (m->advertised > (ADV_ACTIVE * 3 / 4)) 602 if (m->advertised > (ADV_ACTIVE * 3 / 4))
604 break; 603 break;
605 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq); 604 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
606 break; 605 break;
607 case MBR_REMITTED: 606 case MBR_REMITTED:
608 if (m->advertised > ADV_IDLE) 607 if (m->advertised > ADV_IDLE)
609 break; 608 break;
610 m->state = MBR_JOINED; 609 m->state = MBR_JOINED;
611 grp->active_cnt--; 610 grp->active_cnt--;
612 if (m->advertised < ADV_IDLE) { 611 if (m->advertised < ADV_IDLE) {
613 pr_warn_ratelimited("Rcv unexpected msg after REMIT\n"); 612 pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
614 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq); 613 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
615 } 614 }
616 615
617 if (list_empty(&grp->pending)) 616 if (list_empty(&grp->pending))
618 return; 617 return;
619 618
620 /* Set oldest pending member to active and advertise */ 619 /* Set oldest pending member to active and advertise */
621 pm = list_first_entry(&grp->pending, struct tipc_member, list); 620 pm = list_first_entry(&grp->pending, struct tipc_member, list);
622 pm->state = MBR_ACTIVE; 621 pm->state = MBR_ACTIVE;
623 list_move_tail(&pm->list, &grp->active); 622 list_move_tail(&pm->list, &grp->active);
624 grp->active_cnt++; 623 grp->active_cnt++;
625 tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq); 624 tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
626 break; 625 break;
627 case MBR_RECLAIMING: 626 case MBR_RECLAIMING:
628 case MBR_DISCOVERED: 627 case MBR_DISCOVERED:
629 case MBR_JOINING: 628 case MBR_JOINING:
630 case MBR_LEAVING: 629 case MBR_LEAVING:
631 default: 630 default:
632 break; 631 break;
633 } 632 }
634 } 633 }
635 634
636 static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m, 635 static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
637 int mtyp, struct sk_buff_head *xmitq) 636 int mtyp, struct sk_buff_head *xmitq)
638 { 637 {
639 struct tipc_msg *hdr; 638 struct tipc_msg *hdr;
640 struct sk_buff *skb; 639 struct sk_buff *skb;
641 int adv = 0; 640 int adv = 0;
642 641
643 skb = tipc_msg_create(GROUP_PROTOCOL, mtyp, INT_H_SIZE, 0, 642 skb = tipc_msg_create(GROUP_PROTOCOL, mtyp, INT_H_SIZE, 0,
644 m->node, tipc_own_addr(grp->net), 643 m->node, tipc_own_addr(grp->net),
645 m->port, grp->portid, 0); 644 m->port, grp->portid, 0);
646 if (!skb) 645 if (!skb)
647 return; 646 return;
648 647
649 if (m->state == MBR_ACTIVE) 648 if (m->state == MBR_ACTIVE)
650 adv = ADV_ACTIVE - m->advertised; 649 adv = ADV_ACTIVE - m->advertised;
651 else if (m->state == MBR_JOINED || m->state == MBR_PENDING) 650 else if (m->state == MBR_JOINED || m->state == MBR_PENDING)
652 adv = ADV_IDLE - m->advertised; 651 adv = ADV_IDLE - m->advertised;
653 652
654 hdr = buf_msg(skb); 653 hdr = buf_msg(skb);
655 654
656 if (mtyp == GRP_JOIN_MSG) { 655 if (mtyp == GRP_JOIN_MSG) {
657 msg_set_grp_bc_syncpt(hdr, grp->bc_snd_nxt); 656 msg_set_grp_bc_syncpt(hdr, grp->bc_snd_nxt);
658 msg_set_adv_win(hdr, adv); 657 msg_set_adv_win(hdr, adv);
659 m->advertised += adv; 658 m->advertised += adv;
660 } else if (mtyp == GRP_LEAVE_MSG) { 659 } else if (mtyp == GRP_LEAVE_MSG) {
661 msg_set_grp_bc_syncpt(hdr, grp->bc_snd_nxt); 660 msg_set_grp_bc_syncpt(hdr, grp->bc_snd_nxt);
662 } else if (mtyp == GRP_ADV_MSG) { 661 } else if (mtyp == GRP_ADV_MSG) {
663 msg_set_adv_win(hdr, adv); 662 msg_set_adv_win(hdr, adv);
664 m->advertised += adv; 663 m->advertised += adv;
665 } else if (mtyp == GRP_ACK_MSG) { 664 } else if (mtyp == GRP_ACK_MSG) {
666 msg_set_grp_bc_acked(hdr, m->bc_rcv_nxt); 665 msg_set_grp_bc_acked(hdr, m->bc_rcv_nxt);
667 } else if (mtyp == GRP_REMIT_MSG) { 666 } else if (mtyp == GRP_REMIT_MSG) {
668 msg_set_grp_remitted(hdr, m->window); 667 msg_set_grp_remitted(hdr, m->window);
669 } 668 }
670 msg_set_dest_droppable(hdr, true); 669 msg_set_dest_droppable(hdr, true);
671 __skb_queue_tail(xmitq, skb); 670 __skb_queue_tail(xmitq, skb);
672 } 671 }
673 672
674 void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup, 673 void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
675 struct tipc_msg *hdr, struct sk_buff_head *inputq, 674 struct tipc_msg *hdr, struct sk_buff_head *inputq,
676 struct sk_buff_head *xmitq) 675 struct sk_buff_head *xmitq)
677 { 676 {
678 u32 node = msg_orignode(hdr); 677 u32 node = msg_orignode(hdr);
679 u32 port = msg_origport(hdr); 678 u32 port = msg_origport(hdr);
680 struct tipc_member *m, *pm; 679 struct tipc_member *m, *pm;
681 struct tipc_msg *ehdr; 680 struct tipc_msg *ehdr;
682 u16 remitted, in_flight; 681 u16 remitted, in_flight;
683 682
684 if (!grp) 683 if (!grp)
685 return; 684 return;
686 685
687 m = tipc_group_find_member(grp, node, port); 686 m = tipc_group_find_member(grp, node, port);
688 687
689 switch (msg_type(hdr)) { 688 switch (msg_type(hdr)) {
690 case GRP_JOIN_MSG: 689 case GRP_JOIN_MSG:
691 if (!m) 690 if (!m)
692 m = tipc_group_create_member(grp, node, port, 691 m = tipc_group_create_member(grp, node, port,
693 MBR_QUARANTINED); 692 MBR_JOINING);
694 if (!m) 693 if (!m)
695 return; 694 return;
696 m->bc_syncpt = msg_grp_bc_syncpt(hdr); 695 m->bc_syncpt = msg_grp_bc_syncpt(hdr);
697 m->bc_rcv_nxt = m->bc_syncpt; 696 m->bc_rcv_nxt = m->bc_syncpt;
698 m->window += msg_adv_win(hdr); 697 m->window += msg_adv_win(hdr);
699 698
700 /* Wait until PUBLISH event is received */ 699 /* Wait until PUBLISH event is received */
701 if (m->state == MBR_DISCOVERED) { 700 if (m->state == MBR_DISCOVERED) {
702 m->state = MBR_JOINING; 701 m->state = MBR_JOINING;
703 } else if (m->state == MBR_PUBLISHED) { 702 } else if (m->state == MBR_PUBLISHED) {
704 m->state = MBR_JOINED; 703 m->state = MBR_JOINED;
705 *usr_wakeup = true; 704 *usr_wakeup = true;
706 m->usr_pending = false; 705 m->usr_pending = false;
707 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq); 706 tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
708 ehdr = buf_msg(m->event_msg); 707 ehdr = buf_msg(m->event_msg);
709 msg_set_grp_bc_seqno(ehdr, m->bc_syncpt); 708 msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
710 __skb_queue_tail(inputq, m->event_msg); 709 __skb_queue_tail(inputq, m->event_msg);
711 } 710 }
712 list_del_init(&m->small_win); 711 list_del_init(&m->small_win);
713 tipc_group_update_member(m, 0); 712 tipc_group_update_member(m, 0);
714 return; 713 return;
715 case GRP_LEAVE_MSG: 714 case GRP_LEAVE_MSG:
716 if (!m) 715 if (!m)
717 return; 716 return;
718 m->bc_syncpt = msg_grp_bc_syncpt(hdr); 717 m->bc_syncpt = msg_grp_bc_syncpt(hdr);
719 list_del_init(&m->list); 718 list_del_init(&m->list);
720 list_del_init(&m->small_win); 719 list_del_init(&m->small_win);
721 *usr_wakeup = true; 720 *usr_wakeup = true;
722 721
723 /* Wait until WITHDRAW event is received */ 722 /* Wait until WITHDRAW event is received */
724 if (m->state != MBR_LEAVING) { 723 if (m->state != MBR_LEAVING) {
725 tipc_group_decr_active(grp, m); 724 tipc_group_decr_active(grp, m);
726 m->state = MBR_LEAVING; 725 m->state = MBR_LEAVING;
727 return; 726 return;
728 } 727 }
729 /* Otherwise deliver already received WITHDRAW event */ 728 /* Otherwise deliver already received WITHDRAW event */
730 ehdr = buf_msg(m->event_msg); 729 ehdr = buf_msg(m->event_msg);
731 msg_set_grp_bc_seqno(ehdr, m->bc_syncpt); 730 msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
732 __skb_queue_tail(inputq, m->event_msg); 731 __skb_queue_tail(inputq, m->event_msg);
733 return; 732 return;
734 case GRP_ADV_MSG: 733 case GRP_ADV_MSG:
735 if (!m) 734 if (!m)
736 return; 735 return;
737 m->window += msg_adv_win(hdr); 736 m->window += msg_adv_win(hdr);
738 *usr_wakeup = m->usr_pending; 737 *usr_wakeup = m->usr_pending;
739 m->usr_pending = false; 738 m->usr_pending = false;
740 list_del_init(&m->small_win); 739 list_del_init(&m->small_win);
741 return; 740 return;
742 case GRP_ACK_MSG: 741 case GRP_ACK_MSG:
743 if (!m) 742 if (!m)
744 return; 743 return;
745 m->bc_acked = msg_grp_bc_acked(hdr); 744 m->bc_acked = msg_grp_bc_acked(hdr);
746 if (--grp->bc_ackers) 745 if (--grp->bc_ackers)
747 break; 746 break;
748 *usr_wakeup = true; 747 *usr_wakeup = true;
749 m->usr_pending = false; 748 m->usr_pending = false;
750 return; 749 return;
751 case GRP_RECLAIM_MSG: 750 case GRP_RECLAIM_MSG:
752 if (!m) 751 if (!m)
753 return; 752 return;
754 *usr_wakeup = m->usr_pending; 753 *usr_wakeup = m->usr_pending;
755 m->usr_pending = false; 754 m->usr_pending = false;
756 tipc_group_proto_xmit(grp, m, GRP_REMIT_MSG, xmitq); 755 tipc_group_proto_xmit(grp, m, GRP_REMIT_MSG, xmitq);
757 m->window = ADV_IDLE; 756 m->window = ADV_IDLE;
758 return; 757 return;
759 case GRP_REMIT_MSG: 758 case GRP_REMIT_MSG:
760 if (!m || m->state != MBR_RECLAIMING) 759 if (!m || m->state != MBR_RECLAIMING)
761 return; 760 return;
762 761
763 remitted = msg_grp_remitted(hdr); 762 remitted = msg_grp_remitted(hdr);
764 763
765 /* Messages preceding the REMIT still in receive queue */ 764 /* Messages preceding the REMIT still in receive queue */
766 if (m->advertised > remitted) { 765 if (m->advertised > remitted) {
767 m->state = MBR_REMITTED; 766 m->state = MBR_REMITTED;
768 in_flight = m->advertised - remitted; 767 in_flight = m->advertised - remitted;
769 m->advertised = ADV_IDLE + in_flight; 768 m->advertised = ADV_IDLE + in_flight;
770 return; 769 return;
771 } 770 }
772 /* This should never happen */ 771 /* This should never happen */
773 if (m->advertised < remitted) 772 if (m->advertised < remitted)
774 pr_warn_ratelimited("Unexpected REMIT msg\n"); 773 pr_warn_ratelimited("Unexpected REMIT msg\n");
775 774
776 /* All messages preceding the REMIT have been read */ 775 /* All messages preceding the REMIT have been read */
777 m->state = MBR_JOINED; 776 m->state = MBR_JOINED;
778 grp->active_cnt--; 777 grp->active_cnt--;
779 m->advertised = ADV_IDLE; 778 m->advertised = ADV_IDLE;
780 779
781 /* Set oldest pending member to active and advertise */ 780 /* Set oldest pending member to active and advertise */
782 if (list_empty(&grp->pending)) 781 if (list_empty(&grp->pending))
783 return; 782 return;
784 pm = list_first_entry(&grp->pending, struct tipc_member, list); 783 pm = list_first_entry(&grp->pending, struct tipc_member, list);
785 pm->state = MBR_ACTIVE; 784 pm->state = MBR_ACTIVE;
786 list_move_tail(&pm->list, &grp->active); 785 list_move_tail(&pm->list, &grp->active);
787 grp->active_cnt++; 786 grp->active_cnt++;
788 if (pm->advertised <= (ADV_ACTIVE * 3 / 4)) 787 if (pm->advertised <= (ADV_ACTIVE * 3 / 4))
789 tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq); 788 tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
790 return; 789 return;
791 default: 790 default:
792 pr_warn("Received unknown GROUP_PROTO message\n"); 791 pr_warn("Received unknown GROUP_PROTO message\n");
793 } 792 }
794 } 793 }
795 794
796 /* tipc_group_member_evt() - receive and handle a member up/down event 795 /* tipc_group_member_evt() - receive and handle a member up/down event
797 */ 796 */
798 void tipc_group_member_evt(struct tipc_group *grp, 797 void tipc_group_member_evt(struct tipc_group *grp,
799 bool *usr_wakeup, 798 bool *usr_wakeup,
800 int *sk_rcvbuf, 799 int *sk_rcvbuf,
801 struct sk_buff *skb, 800 struct sk_buff *skb,
802 struct sk_buff_head *inputq, 801 struct sk_buff_head *inputq,
803 struct sk_buff_head *xmitq) 802 struct sk_buff_head *xmitq)
804 { 803 {
805 struct tipc_msg *hdr = buf_msg(skb); 804 struct tipc_msg *hdr = buf_msg(skb);
806 struct tipc_event *evt = (void *)msg_data(hdr); 805 struct tipc_event *evt = (void *)msg_data(hdr);
807 u32 instance = evt->found_lower; 806 u32 instance = evt->found_lower;
808 u32 node = evt->port.node; 807 u32 node = evt->port.node;
809 u32 port = evt->port.ref; 808 u32 port = evt->port.ref;
810 int event = evt->event; 809 int event = evt->event;
811 struct tipc_member *m; 810 struct tipc_member *m;
812 struct net *net; 811 struct net *net;
813 bool node_up; 812 bool node_up;
814 u32 self; 813 u32 self;
815 814
816 if (!grp) 815 if (!grp)
817 goto drop; 816 goto drop;
818 817
819 net = grp->net; 818 net = grp->net;
820 self = tipc_own_addr(net); 819 self = tipc_own_addr(net);
821 if (!grp->loopback && node == self && port == grp->portid) 820 if (!grp->loopback && node == self && port == grp->portid)
822 goto drop; 821 goto drop;
823 822
824 /* Convert message before delivery to user */ 823 /* Convert message before delivery to user */
825 msg_set_hdr_sz(hdr, GROUP_H_SIZE); 824 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
826 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE); 825 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
827 msg_set_type(hdr, TIPC_GRP_MEMBER_EVT); 826 msg_set_type(hdr, TIPC_GRP_MEMBER_EVT);
828 msg_set_origport(hdr, port); 827 msg_set_origport(hdr, port);
829 msg_set_orignode(hdr, node); 828 msg_set_orignode(hdr, node);
830 msg_set_nametype(hdr, grp->type); 829 msg_set_nametype(hdr, grp->type);
831 msg_set_grp_evt(hdr, event); 830 msg_set_grp_evt(hdr, event);
832 831
833 m = tipc_group_find_member(grp, node, port); 832 m = tipc_group_find_member(grp, node, port);
834 833
835 if (event == TIPC_PUBLISHED) { 834 if (event == TIPC_PUBLISHED) {
836 if (!m) 835 if (!m)
837 m = tipc_group_create_member(grp, node, port, 836 m = tipc_group_create_member(grp, node, port,
838 MBR_DISCOVERED); 837 MBR_DISCOVERED);
839 if (!m) 838 if (!m)
840 goto drop; 839 goto drop;
841 840
842 /* Hold back event if JOIN message not yet received */ 841 /* Hold back event if JOIN message not yet received */
843 if (m->state == MBR_DISCOVERED) { 842 if (m->state == MBR_DISCOVERED) {
844 m->event_msg = skb; 843 m->event_msg = skb;
845 m->state = MBR_PUBLISHED; 844 m->state = MBR_PUBLISHED;
846 } else { 845 } else {
847 msg_set_grp_bc_seqno(hdr, m->bc_syncpt); 846 msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
848 __skb_queue_tail(inputq, skb); 847 __skb_queue_tail(inputq, skb);
849 m->state = MBR_JOINED; 848 m->state = MBR_JOINED;
850 *usr_wakeup = true; 849 *usr_wakeup = true;
851 m->usr_pending = false; 850 m->usr_pending = false;
852 } 851 }
853 m->instance = instance; 852 m->instance = instance;
854 TIPC_SKB_CB(skb)->orig_member = m->instance; 853 TIPC_SKB_CB(skb)->orig_member = m->instance;
855 tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq); 854 tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
856 tipc_group_update_member(m, 0); 855 tipc_group_update_member(m, 0);
857 } else if (event == TIPC_WITHDRAWN) { 856 } else if (event == TIPC_WITHDRAWN) {
858 if (!m) 857 if (!m)
859 goto drop; 858 goto drop;
860 859
861 TIPC_SKB_CB(skb)->orig_member = m->instance; 860 TIPC_SKB_CB(skb)->orig_member = m->instance;
862 861
863 *usr_wakeup = true; 862 *usr_wakeup = true;
864 m->usr_pending = false; 863 m->usr_pending = false;
865 node_up = tipc_node_is_up(net, node); 864 node_up = tipc_node_is_up(net, node);
866 m->event_msg = NULL; 865 m->event_msg = NULL;
867 866
868 if (node_up) { 867 if (node_up) {
869 /* Hold back event if a LEAVE msg should be expected */ 868 /* Hold back event if a LEAVE msg should be expected */
870 if (m->state != MBR_LEAVING) { 869 if (m->state != MBR_LEAVING) {
871 m->event_msg = skb; 870 m->event_msg = skb;
872 tipc_group_decr_active(grp, m); 871 tipc_group_decr_active(grp, m);
873 m->state = MBR_LEAVING; 872 m->state = MBR_LEAVING;
874 } else { 873 } else {
875 msg_set_grp_bc_seqno(hdr, m->bc_syncpt); 874 msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
876 __skb_queue_tail(inputq, skb); 875 __skb_queue_tail(inputq, skb);
877 } 876 }
878 } else { 877 } else {
879 if (m->state != MBR_LEAVING) { 878 if (m->state != MBR_LEAVING) {
880 tipc_group_decr_active(grp, m); 879 tipc_group_decr_active(grp, m);
881 m->state = MBR_LEAVING; 880 m->state = MBR_LEAVING;
882 msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt); 881 msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt);
883 } else { 882 } else {
884 msg_set_grp_bc_seqno(hdr, m->bc_syncpt); 883 msg_set_grp_bc_seqno(hdr, m->bc_syncpt);
885 } 884 }
886 __skb_queue_tail(inputq, skb); 885 __skb_queue_tail(inputq, skb);
887 } 886 }
888 list_del_init(&m->list); 887 list_del_init(&m->list);
889 list_del_init(&m->small_win); 888 list_del_init(&m->small_win);
890 } 889 }
891 *sk_rcvbuf = tipc_group_rcvbuf_limit(grp); 890 *sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
892 return; 891 return;
893 drop: 892 drop:
894 kfree_skb(skb); 893 kfree_skb(skb);
895 } 894 }
896 895