Commit 30da63a6af1dbf60e6d989faa4b984b1c6c9dfaf

Authored by Marek Lindner
Committed by Antonio Quartulli
1 parent f03a13a716

batman-adv: consolidate duplicated primary_if checking code

Signed-off-by: Marek Lindner <lindner_marek@yahoo.de>
Signed-off-by: Antonio Quartulli <ordex@autistici.org>

Showing 6 changed files with 57 additions and 91 deletions Inline Diff

net/batman-adv/bridge_loop_avoidance.c
1 /* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: 1 /* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Simon Wunderlich 3 * Simon Wunderlich
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public 6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation. 7 * License as published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, but 9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details. 12 * General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA 17 * 02110-1301, USA
18 */ 18 */
19 19
20 #include "main.h" 20 #include "main.h"
21 #include "hash.h" 21 #include "hash.h"
22 #include "hard-interface.h" 22 #include "hard-interface.h"
23 #include "originator.h" 23 #include "originator.h"
24 #include "bridge_loop_avoidance.h" 24 #include "bridge_loop_avoidance.h"
25 #include "translation-table.h" 25 #include "translation-table.h"
26 #include "send.h" 26 #include "send.h"
27 27
28 #include <linux/etherdevice.h> 28 #include <linux/etherdevice.h>
29 #include <linux/crc16.h> 29 #include <linux/crc16.h>
30 #include <linux/if_arp.h> 30 #include <linux/if_arp.h>
31 #include <net/arp.h> 31 #include <net/arp.h>
32 #include <linux/if_vlan.h> 32 #include <linux/if_vlan.h>
33 33
34 static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05}; 34 static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
35 35
36 static void batadv_bla_periodic_work(struct work_struct *work); 36 static void batadv_bla_periodic_work(struct work_struct *work);
37 static void batadv_bla_send_announce(struct batadv_priv *bat_priv, 37 static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
38 struct batadv_backbone_gw *backbone_gw); 38 struct batadv_backbone_gw *backbone_gw);
39 39
40 /* return the index of the claim */ 40 /* return the index of the claim */
41 static inline uint32_t batadv_choose_claim(const void *data, uint32_t size) 41 static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
42 { 42 {
43 const unsigned char *key = data; 43 const unsigned char *key = data;
44 uint32_t hash = 0; 44 uint32_t hash = 0;
45 size_t i; 45 size_t i;
46 46
47 for (i = 0; i < ETH_ALEN + sizeof(short); i++) { 47 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
48 hash += key[i]; 48 hash += key[i];
49 hash += (hash << 10); 49 hash += (hash << 10);
50 hash ^= (hash >> 6); 50 hash ^= (hash >> 6);
51 } 51 }
52 52
53 hash += (hash << 3); 53 hash += (hash << 3);
54 hash ^= (hash >> 11); 54 hash ^= (hash >> 11);
55 hash += (hash << 15); 55 hash += (hash << 15);
56 56
57 return hash % size; 57 return hash % size;
58 } 58 }
59 59
60 /* return the index of the backbone gateway */ 60 /* return the index of the backbone gateway */
61 static inline uint32_t batadv_choose_backbone_gw(const void *data, 61 static inline uint32_t batadv_choose_backbone_gw(const void *data,
62 uint32_t size) 62 uint32_t size)
63 { 63 {
64 const unsigned char *key = data; 64 const unsigned char *key = data;
65 uint32_t hash = 0; 65 uint32_t hash = 0;
66 size_t i; 66 size_t i;
67 67
68 for (i = 0; i < ETH_ALEN + sizeof(short); i++) { 68 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
69 hash += key[i]; 69 hash += key[i];
70 hash += (hash << 10); 70 hash += (hash << 10);
71 hash ^= (hash >> 6); 71 hash ^= (hash >> 6);
72 } 72 }
73 73
74 hash += (hash << 3); 74 hash += (hash << 3);
75 hash ^= (hash >> 11); 75 hash ^= (hash >> 11);
76 hash += (hash << 15); 76 hash += (hash << 15);
77 77
78 return hash % size; 78 return hash % size;
79 } 79 }
80 80
81 81
82 /* compares address and vid of two backbone gws */ 82 /* compares address and vid of two backbone gws */
83 static int batadv_compare_backbone_gw(const struct hlist_node *node, 83 static int batadv_compare_backbone_gw(const struct hlist_node *node,
84 const void *data2) 84 const void *data2)
85 { 85 {
86 const void *data1 = container_of(node, struct batadv_backbone_gw, 86 const void *data1 = container_of(node, struct batadv_backbone_gw,
87 hash_entry); 87 hash_entry);
88 88
89 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); 89 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
90 } 90 }
91 91
92 /* compares address and vid of two claims */ 92 /* compares address and vid of two claims */
93 static int batadv_compare_claim(const struct hlist_node *node, 93 static int batadv_compare_claim(const struct hlist_node *node,
94 const void *data2) 94 const void *data2)
95 { 95 {
96 const void *data1 = container_of(node, struct batadv_claim, 96 const void *data1 = container_of(node, struct batadv_claim,
97 hash_entry); 97 hash_entry);
98 98
99 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0); 99 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
100 } 100 }
101 101
102 /* free a backbone gw */ 102 /* free a backbone gw */
103 static void batadv_backbone_gw_free_ref(struct batadv_backbone_gw *backbone_gw) 103 static void batadv_backbone_gw_free_ref(struct batadv_backbone_gw *backbone_gw)
104 { 104 {
105 if (atomic_dec_and_test(&backbone_gw->refcount)) 105 if (atomic_dec_and_test(&backbone_gw->refcount))
106 kfree_rcu(backbone_gw, rcu); 106 kfree_rcu(backbone_gw, rcu);
107 } 107 }
108 108
109 /* finally deinitialize the claim */ 109 /* finally deinitialize the claim */
110 static void batadv_claim_free_rcu(struct rcu_head *rcu) 110 static void batadv_claim_free_rcu(struct rcu_head *rcu)
111 { 111 {
112 struct batadv_claim *claim; 112 struct batadv_claim *claim;
113 113
114 claim = container_of(rcu, struct batadv_claim, rcu); 114 claim = container_of(rcu, struct batadv_claim, rcu);
115 115
116 batadv_backbone_gw_free_ref(claim->backbone_gw); 116 batadv_backbone_gw_free_ref(claim->backbone_gw);
117 kfree(claim); 117 kfree(claim);
118 } 118 }
119 119
120 /* free a claim, call claim_free_rcu if its the last reference */ 120 /* free a claim, call claim_free_rcu if its the last reference */
121 static void batadv_claim_free_ref(struct batadv_claim *claim) 121 static void batadv_claim_free_ref(struct batadv_claim *claim)
122 { 122 {
123 if (atomic_dec_and_test(&claim->refcount)) 123 if (atomic_dec_and_test(&claim->refcount))
124 call_rcu(&claim->rcu, batadv_claim_free_rcu); 124 call_rcu(&claim->rcu, batadv_claim_free_rcu);
125 } 125 }
126 126
127 /* @bat_priv: the bat priv with all the soft interface information 127 /* @bat_priv: the bat priv with all the soft interface information
128 * @data: search data (may be local/static data) 128 * @data: search data (may be local/static data)
129 * 129 *
130 * looks for a claim in the hash, and returns it if found 130 * looks for a claim in the hash, and returns it if found
131 * or NULL otherwise. 131 * or NULL otherwise.
132 */ 132 */
133 static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv, 133 static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
134 struct batadv_claim *data) 134 struct batadv_claim *data)
135 { 135 {
136 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; 136 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
137 struct hlist_head *head; 137 struct hlist_head *head;
138 struct hlist_node *node; 138 struct hlist_node *node;
139 struct batadv_claim *claim; 139 struct batadv_claim *claim;
140 struct batadv_claim *claim_tmp = NULL; 140 struct batadv_claim *claim_tmp = NULL;
141 int index; 141 int index;
142 142
143 if (!hash) 143 if (!hash)
144 return NULL; 144 return NULL;
145 145
146 index = batadv_choose_claim(data, hash->size); 146 index = batadv_choose_claim(data, hash->size);
147 head = &hash->table[index]; 147 head = &hash->table[index];
148 148
149 rcu_read_lock(); 149 rcu_read_lock();
150 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 150 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
151 if (!batadv_compare_claim(&claim->hash_entry, data)) 151 if (!batadv_compare_claim(&claim->hash_entry, data))
152 continue; 152 continue;
153 153
154 if (!atomic_inc_not_zero(&claim->refcount)) 154 if (!atomic_inc_not_zero(&claim->refcount))
155 continue; 155 continue;
156 156
157 claim_tmp = claim; 157 claim_tmp = claim;
158 break; 158 break;
159 } 159 }
160 rcu_read_unlock(); 160 rcu_read_unlock();
161 161
162 return claim_tmp; 162 return claim_tmp;
163 } 163 }
164 164
165 /** 165 /**
166 * batadv_backbone_hash_find - looks for a claim in the hash 166 * batadv_backbone_hash_find - looks for a claim in the hash
167 * @bat_priv: the bat priv with all the soft interface information 167 * @bat_priv: the bat priv with all the soft interface information
168 * @addr: the address of the originator 168 * @addr: the address of the originator
169 * @vid: the VLAN ID 169 * @vid: the VLAN ID
170 * 170 *
171 * Returns claim if found or NULL otherwise. 171 * Returns claim if found or NULL otherwise.
172 */ 172 */
173 static struct batadv_backbone_gw * 173 static struct batadv_backbone_gw *
174 batadv_backbone_hash_find(struct batadv_priv *bat_priv, 174 batadv_backbone_hash_find(struct batadv_priv *bat_priv,
175 uint8_t *addr, short vid) 175 uint8_t *addr, short vid)
176 { 176 {
177 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 177 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
178 struct hlist_head *head; 178 struct hlist_head *head;
179 struct hlist_node *node; 179 struct hlist_node *node;
180 struct batadv_backbone_gw search_entry, *backbone_gw; 180 struct batadv_backbone_gw search_entry, *backbone_gw;
181 struct batadv_backbone_gw *backbone_gw_tmp = NULL; 181 struct batadv_backbone_gw *backbone_gw_tmp = NULL;
182 int index; 182 int index;
183 183
184 if (!hash) 184 if (!hash)
185 return NULL; 185 return NULL;
186 186
187 memcpy(search_entry.orig, addr, ETH_ALEN); 187 memcpy(search_entry.orig, addr, ETH_ALEN);
188 search_entry.vid = vid; 188 search_entry.vid = vid;
189 189
190 index = batadv_choose_backbone_gw(&search_entry, hash->size); 190 index = batadv_choose_backbone_gw(&search_entry, hash->size);
191 head = &hash->table[index]; 191 head = &hash->table[index];
192 192
193 rcu_read_lock(); 193 rcu_read_lock();
194 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 194 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
195 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry, 195 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
196 &search_entry)) 196 &search_entry))
197 continue; 197 continue;
198 198
199 if (!atomic_inc_not_zero(&backbone_gw->refcount)) 199 if (!atomic_inc_not_zero(&backbone_gw->refcount))
200 continue; 200 continue;
201 201
202 backbone_gw_tmp = backbone_gw; 202 backbone_gw_tmp = backbone_gw;
203 break; 203 break;
204 } 204 }
205 rcu_read_unlock(); 205 rcu_read_unlock();
206 206
207 return backbone_gw_tmp; 207 return backbone_gw_tmp;
208 } 208 }
209 209
210 /* delete all claims for a backbone */ 210 /* delete all claims for a backbone */
211 static void 211 static void
212 batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw) 212 batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw)
213 { 213 {
214 struct batadv_hashtable *hash; 214 struct batadv_hashtable *hash;
215 struct hlist_node *node, *node_tmp; 215 struct hlist_node *node, *node_tmp;
216 struct hlist_head *head; 216 struct hlist_head *head;
217 struct batadv_claim *claim; 217 struct batadv_claim *claim;
218 int i; 218 int i;
219 spinlock_t *list_lock; /* protects write access to the hash lists */ 219 spinlock_t *list_lock; /* protects write access to the hash lists */
220 220
221 hash = backbone_gw->bat_priv->bla.claim_hash; 221 hash = backbone_gw->bat_priv->bla.claim_hash;
222 if (!hash) 222 if (!hash)
223 return; 223 return;
224 224
225 for (i = 0; i < hash->size; i++) { 225 for (i = 0; i < hash->size; i++) {
226 head = &hash->table[i]; 226 head = &hash->table[i];
227 list_lock = &hash->list_locks[i]; 227 list_lock = &hash->list_locks[i];
228 228
229 spin_lock_bh(list_lock); 229 spin_lock_bh(list_lock);
230 hlist_for_each_entry_safe(claim, node, node_tmp, 230 hlist_for_each_entry_safe(claim, node, node_tmp,
231 head, hash_entry) { 231 head, hash_entry) {
232 232
233 if (claim->backbone_gw != backbone_gw) 233 if (claim->backbone_gw != backbone_gw)
234 continue; 234 continue;
235 235
236 batadv_claim_free_ref(claim); 236 batadv_claim_free_ref(claim);
237 hlist_del_rcu(node); 237 hlist_del_rcu(node);
238 } 238 }
239 spin_unlock_bh(list_lock); 239 spin_unlock_bh(list_lock);
240 } 240 }
241 241
242 /* all claims gone, intialize CRC */ 242 /* all claims gone, intialize CRC */
243 backbone_gw->crc = BATADV_BLA_CRC_INIT; 243 backbone_gw->crc = BATADV_BLA_CRC_INIT;
244 } 244 }
245 245
246 /** 246 /**
247 * batadv_bla_send_claim - sends a claim frame according to the provided info 247 * batadv_bla_send_claim - sends a claim frame according to the provided info
248 * @bat_priv: the bat priv with all the soft interface information 248 * @bat_priv: the bat priv with all the soft interface information
249 * @orig: the mac address to be announced within the claim 249 * @orig: the mac address to be announced within the claim
250 * @vid: the VLAN ID 250 * @vid: the VLAN ID
251 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) 251 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
252 */ 252 */
253 static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac, 253 static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
254 short vid, int claimtype) 254 short vid, int claimtype)
255 { 255 {
256 struct sk_buff *skb; 256 struct sk_buff *skb;
257 struct ethhdr *ethhdr; 257 struct ethhdr *ethhdr;
258 struct batadv_hard_iface *primary_if; 258 struct batadv_hard_iface *primary_if;
259 struct net_device *soft_iface; 259 struct net_device *soft_iface;
260 uint8_t *hw_src; 260 uint8_t *hw_src;
261 struct batadv_bla_claim_dst local_claim_dest; 261 struct batadv_bla_claim_dst local_claim_dest;
262 __be32 zeroip = 0; 262 __be32 zeroip = 0;
263 263
264 primary_if = batadv_primary_if_get_selected(bat_priv); 264 primary_if = batadv_primary_if_get_selected(bat_priv);
265 if (!primary_if) 265 if (!primary_if)
266 return; 266 return;
267 267
268 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest, 268 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
269 sizeof(local_claim_dest)); 269 sizeof(local_claim_dest));
270 local_claim_dest.type = claimtype; 270 local_claim_dest.type = claimtype;
271 271
272 soft_iface = primary_if->soft_iface; 272 soft_iface = primary_if->soft_iface;
273 273
274 skb = arp_create(ARPOP_REPLY, ETH_P_ARP, 274 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
275 /* IP DST: 0.0.0.0 */ 275 /* IP DST: 0.0.0.0 */
276 zeroip, 276 zeroip,
277 primary_if->soft_iface, 277 primary_if->soft_iface,
278 /* IP SRC: 0.0.0.0 */ 278 /* IP SRC: 0.0.0.0 */
279 zeroip, 279 zeroip,
280 /* Ethernet DST: Broadcast */ 280 /* Ethernet DST: Broadcast */
281 NULL, 281 NULL,
282 /* Ethernet SRC/HW SRC: originator mac */ 282 /* Ethernet SRC/HW SRC: originator mac */
283 primary_if->net_dev->dev_addr, 283 primary_if->net_dev->dev_addr,
284 /* HW DST: FF:43:05:XX:YY:YY 284 /* HW DST: FF:43:05:XX:YY:YY
285 * with XX = claim type 285 * with XX = claim type
286 * and YY:YY = group id 286 * and YY:YY = group id
287 */ 287 */
288 (uint8_t *)&local_claim_dest); 288 (uint8_t *)&local_claim_dest);
289 289
290 if (!skb) 290 if (!skb)
291 goto out; 291 goto out;
292 292
293 ethhdr = (struct ethhdr *)skb->data; 293 ethhdr = (struct ethhdr *)skb->data;
294 hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr); 294 hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
295 295
296 /* now we pretend that the client would have sent this ... */ 296 /* now we pretend that the client would have sent this ... */
297 switch (claimtype) { 297 switch (claimtype) {
298 case BATADV_CLAIM_TYPE_CLAIM: 298 case BATADV_CLAIM_TYPE_CLAIM:
299 /* normal claim frame 299 /* normal claim frame
300 * set Ethernet SRC to the clients mac 300 * set Ethernet SRC to the clients mac
301 */ 301 */
302 memcpy(ethhdr->h_source, mac, ETH_ALEN); 302 memcpy(ethhdr->h_source, mac, ETH_ALEN);
303 batadv_dbg(BATADV_DBG_BLA, bat_priv, 303 batadv_dbg(BATADV_DBG_BLA, bat_priv,
304 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid); 304 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
305 break; 305 break;
306 case BATADV_CLAIM_TYPE_UNCLAIM: 306 case BATADV_CLAIM_TYPE_UNCLAIM:
307 /* unclaim frame 307 /* unclaim frame
308 * set HW SRC to the clients mac 308 * set HW SRC to the clients mac
309 */ 309 */
310 memcpy(hw_src, mac, ETH_ALEN); 310 memcpy(hw_src, mac, ETH_ALEN);
311 batadv_dbg(BATADV_DBG_BLA, bat_priv, 311 batadv_dbg(BATADV_DBG_BLA, bat_priv,
312 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, 312 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
313 vid); 313 vid);
314 break; 314 break;
315 case BATADV_CLAIM_TYPE_ANNOUNCE: 315 case BATADV_CLAIM_TYPE_ANNOUNCE:
316 /* announcement frame 316 /* announcement frame
317 * set HW SRC to the special mac containg the crc 317 * set HW SRC to the special mac containg the crc
318 */ 318 */
319 memcpy(hw_src, mac, ETH_ALEN); 319 memcpy(hw_src, mac, ETH_ALEN);
320 batadv_dbg(BATADV_DBG_BLA, bat_priv, 320 batadv_dbg(BATADV_DBG_BLA, bat_priv,
321 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n", 321 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
322 ethhdr->h_source, vid); 322 ethhdr->h_source, vid);
323 break; 323 break;
324 case BATADV_CLAIM_TYPE_REQUEST: 324 case BATADV_CLAIM_TYPE_REQUEST:
325 /* request frame 325 /* request frame
326 * set HW SRC and header destination to the receiving backbone 326 * set HW SRC and header destination to the receiving backbone
327 * gws mac 327 * gws mac
328 */ 328 */
329 memcpy(hw_src, mac, ETH_ALEN); 329 memcpy(hw_src, mac, ETH_ALEN);
330 memcpy(ethhdr->h_dest, mac, ETH_ALEN); 330 memcpy(ethhdr->h_dest, mac, ETH_ALEN);
331 batadv_dbg(BATADV_DBG_BLA, bat_priv, 331 batadv_dbg(BATADV_DBG_BLA, bat_priv,
332 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n", 332 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
333 ethhdr->h_source, ethhdr->h_dest, vid); 333 ethhdr->h_source, ethhdr->h_dest, vid);
334 break; 334 break;
335 335
336 } 336 }
337 337
338 if (vid != -1) 338 if (vid != -1)
339 skb = vlan_insert_tag(skb, vid); 339 skb = vlan_insert_tag(skb, vid);
340 340
341 skb_reset_mac_header(skb); 341 skb_reset_mac_header(skb);
342 skb->protocol = eth_type_trans(skb, soft_iface); 342 skb->protocol = eth_type_trans(skb, soft_iface);
343 batadv_inc_counter(bat_priv, BATADV_CNT_RX); 343 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
344 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, 344 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
345 skb->len + ETH_HLEN); 345 skb->len + ETH_HLEN);
346 soft_iface->last_rx = jiffies; 346 soft_iface->last_rx = jiffies;
347 347
348 netif_rx(skb); 348 netif_rx(skb);
349 out: 349 out:
350 if (primary_if) 350 if (primary_if)
351 batadv_hardif_free_ref(primary_if); 351 batadv_hardif_free_ref(primary_if);
352 } 352 }
353 353
354 /** 354 /**
355 * batadv_bla_get_backbone_gw 355 * batadv_bla_get_backbone_gw
356 * @bat_priv: the bat priv with all the soft interface information 356 * @bat_priv: the bat priv with all the soft interface information
357 * @orig: the mac address of the originator 357 * @orig: the mac address of the originator
358 * @vid: the VLAN ID 358 * @vid: the VLAN ID
359 * 359 *
360 * searches for the backbone gw or creates a new one if it could not 360 * searches for the backbone gw or creates a new one if it could not
361 * be found. 361 * be found.
362 */ 362 */
363 static struct batadv_backbone_gw * 363 static struct batadv_backbone_gw *
364 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig, 364 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
365 short vid) 365 short vid)
366 { 366 {
367 struct batadv_backbone_gw *entry; 367 struct batadv_backbone_gw *entry;
368 struct batadv_orig_node *orig_node; 368 struct batadv_orig_node *orig_node;
369 int hash_added; 369 int hash_added;
370 370
371 entry = batadv_backbone_hash_find(bat_priv, orig, vid); 371 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
372 372
373 if (entry) 373 if (entry)
374 return entry; 374 return entry;
375 375
376 batadv_dbg(BATADV_DBG_BLA, bat_priv, 376 batadv_dbg(BATADV_DBG_BLA, bat_priv,
377 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n", 377 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
378 orig, vid); 378 orig, vid);
379 379
380 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 380 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
381 if (!entry) 381 if (!entry)
382 return NULL; 382 return NULL;
383 383
384 entry->vid = vid; 384 entry->vid = vid;
385 entry->lasttime = jiffies; 385 entry->lasttime = jiffies;
386 entry->crc = BATADV_BLA_CRC_INIT; 386 entry->crc = BATADV_BLA_CRC_INIT;
387 entry->bat_priv = bat_priv; 387 entry->bat_priv = bat_priv;
388 atomic_set(&entry->request_sent, 0); 388 atomic_set(&entry->request_sent, 0);
389 memcpy(entry->orig, orig, ETH_ALEN); 389 memcpy(entry->orig, orig, ETH_ALEN);
390 390
391 /* one for the hash, one for returning */ 391 /* one for the hash, one for returning */
392 atomic_set(&entry->refcount, 2); 392 atomic_set(&entry->refcount, 2);
393 393
394 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash, 394 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
395 batadv_compare_backbone_gw, 395 batadv_compare_backbone_gw,
396 batadv_choose_backbone_gw, entry, 396 batadv_choose_backbone_gw, entry,
397 &entry->hash_entry); 397 &entry->hash_entry);
398 398
399 if (unlikely(hash_added != 0)) { 399 if (unlikely(hash_added != 0)) {
400 /* hash failed, free the structure */ 400 /* hash failed, free the structure */
401 kfree(entry); 401 kfree(entry);
402 return NULL; 402 return NULL;
403 } 403 }
404 404
405 /* this is a gateway now, remove any tt entries */ 405 /* this is a gateway now, remove any tt entries */
406 orig_node = batadv_orig_hash_find(bat_priv, orig); 406 orig_node = batadv_orig_hash_find(bat_priv, orig);
407 if (orig_node) { 407 if (orig_node) {
408 batadv_tt_global_del_orig(bat_priv, orig_node, 408 batadv_tt_global_del_orig(bat_priv, orig_node,
409 "became a backbone gateway"); 409 "became a backbone gateway");
410 batadv_orig_node_free_ref(orig_node); 410 batadv_orig_node_free_ref(orig_node);
411 } 411 }
412 return entry; 412 return entry;
413 } 413 }
414 414
415 /* update or add the own backbone gw to make sure we announce 415 /* update or add the own backbone gw to make sure we announce
416 * where we receive other backbone gws 416 * where we receive other backbone gws
417 */ 417 */
418 static void 418 static void
419 batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv, 419 batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
420 struct batadv_hard_iface *primary_if, 420 struct batadv_hard_iface *primary_if,
421 short vid) 421 short vid)
422 { 422 {
423 struct batadv_backbone_gw *backbone_gw; 423 struct batadv_backbone_gw *backbone_gw;
424 424
425 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, 425 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
426 primary_if->net_dev->dev_addr, 426 primary_if->net_dev->dev_addr,
427 vid); 427 vid);
428 if (unlikely(!backbone_gw)) 428 if (unlikely(!backbone_gw))
429 return; 429 return;
430 430
431 backbone_gw->lasttime = jiffies; 431 backbone_gw->lasttime = jiffies;
432 batadv_backbone_gw_free_ref(backbone_gw); 432 batadv_backbone_gw_free_ref(backbone_gw);
433 } 433 }
434 434
435 /* @bat_priv: the bat priv with all the soft interface information 435 /* @bat_priv: the bat priv with all the soft interface information
436 * @vid: the vid where the request came on 436 * @vid: the vid where the request came on
437 * 437 *
438 * Repeat all of our own claims, and finally send an ANNOUNCE frame 438 * Repeat all of our own claims, and finally send an ANNOUNCE frame
439 * to allow the requester another check if the CRC is correct now. 439 * to allow the requester another check if the CRC is correct now.
440 */ 440 */
441 static void batadv_bla_answer_request(struct batadv_priv *bat_priv, 441 static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
442 struct batadv_hard_iface *primary_if, 442 struct batadv_hard_iface *primary_if,
443 short vid) 443 short vid)
444 { 444 {
445 struct hlist_node *node; 445 struct hlist_node *node;
446 struct hlist_head *head; 446 struct hlist_head *head;
447 struct batadv_hashtable *hash; 447 struct batadv_hashtable *hash;
448 struct batadv_claim *claim; 448 struct batadv_claim *claim;
449 struct batadv_backbone_gw *backbone_gw; 449 struct batadv_backbone_gw *backbone_gw;
450 int i; 450 int i;
451 451
452 batadv_dbg(BATADV_DBG_BLA, bat_priv, 452 batadv_dbg(BATADV_DBG_BLA, bat_priv,
453 "bla_answer_request(): received a claim request, send all of our own claims again\n"); 453 "bla_answer_request(): received a claim request, send all of our own claims again\n");
454 454
455 backbone_gw = batadv_backbone_hash_find(bat_priv, 455 backbone_gw = batadv_backbone_hash_find(bat_priv,
456 primary_if->net_dev->dev_addr, 456 primary_if->net_dev->dev_addr,
457 vid); 457 vid);
458 if (!backbone_gw) 458 if (!backbone_gw)
459 return; 459 return;
460 460
461 hash = bat_priv->bla.claim_hash; 461 hash = bat_priv->bla.claim_hash;
462 for (i = 0; i < hash->size; i++) { 462 for (i = 0; i < hash->size; i++) {
463 head = &hash->table[i]; 463 head = &hash->table[i];
464 464
465 rcu_read_lock(); 465 rcu_read_lock();
466 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 466 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
467 /* only own claims are interesting */ 467 /* only own claims are interesting */
468 if (claim->backbone_gw != backbone_gw) 468 if (claim->backbone_gw != backbone_gw)
469 continue; 469 continue;
470 470
471 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid, 471 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
472 BATADV_CLAIM_TYPE_CLAIM); 472 BATADV_CLAIM_TYPE_CLAIM);
473 } 473 }
474 rcu_read_unlock(); 474 rcu_read_unlock();
475 } 475 }
476 476
477 /* finally, send an announcement frame */ 477 /* finally, send an announcement frame */
478 batadv_bla_send_announce(bat_priv, backbone_gw); 478 batadv_bla_send_announce(bat_priv, backbone_gw);
479 batadv_backbone_gw_free_ref(backbone_gw); 479 batadv_backbone_gw_free_ref(backbone_gw);
480 } 480 }
481 481
482 /* @backbone_gw: the backbone gateway from whom we are out of sync 482 /* @backbone_gw: the backbone gateway from whom we are out of sync
483 * 483 *
484 * When the crc is wrong, ask the backbone gateway for a full table update. 484 * When the crc is wrong, ask the backbone gateway for a full table update.
485 * After the request, it will repeat all of his own claims and finally 485 * After the request, it will repeat all of his own claims and finally
486 * send an announcement claim with which we can check again. 486 * send an announcement claim with which we can check again.
487 */ 487 */
488 static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw) 488 static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw)
489 { 489 {
490 /* first, remove all old entries */ 490 /* first, remove all old entries */
491 batadv_bla_del_backbone_claims(backbone_gw); 491 batadv_bla_del_backbone_claims(backbone_gw);
492 492
493 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, 493 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
494 "Sending REQUEST to %pM\n", backbone_gw->orig); 494 "Sending REQUEST to %pM\n", backbone_gw->orig);
495 495
496 /* send request */ 496 /* send request */
497 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig, 497 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
498 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST); 498 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
499 499
500 /* no local broadcasts should be sent or received, for now. */ 500 /* no local broadcasts should be sent or received, for now. */
501 if (!atomic_read(&backbone_gw->request_sent)) { 501 if (!atomic_read(&backbone_gw->request_sent)) {
502 atomic_inc(&backbone_gw->bat_priv->bla.num_requests); 502 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
503 atomic_set(&backbone_gw->request_sent, 1); 503 atomic_set(&backbone_gw->request_sent, 1);
504 } 504 }
505 } 505 }
506 506
507 /* @bat_priv: the bat priv with all the soft interface information 507 /* @bat_priv: the bat priv with all the soft interface information
508 * @backbone_gw: our backbone gateway which should be announced 508 * @backbone_gw: our backbone gateway which should be announced
509 * 509 *
510 * This function sends an announcement. It is called from multiple 510 * This function sends an announcement. It is called from multiple
511 * places. 511 * places.
512 */ 512 */
513 static void batadv_bla_send_announce(struct batadv_priv *bat_priv, 513 static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
514 struct batadv_backbone_gw *backbone_gw) 514 struct batadv_backbone_gw *backbone_gw)
515 { 515 {
516 uint8_t mac[ETH_ALEN]; 516 uint8_t mac[ETH_ALEN];
517 __be16 crc; 517 __be16 crc;
518 518
519 memcpy(mac, batadv_announce_mac, 4); 519 memcpy(mac, batadv_announce_mac, 4);
520 crc = htons(backbone_gw->crc); 520 crc = htons(backbone_gw->crc);
521 memcpy(&mac[4], &crc, 2); 521 memcpy(&mac[4], &crc, 2);
522 522
523 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid, 523 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
524 BATADV_CLAIM_TYPE_ANNOUNCE); 524 BATADV_CLAIM_TYPE_ANNOUNCE);
525 525
526 } 526 }
527 527
528 /** 528 /**
529 * batadv_bla_add_claim - Adds a claim in the claim hash 529 * batadv_bla_add_claim - Adds a claim in the claim hash
530 * @bat_priv: the bat priv with all the soft interface information 530 * @bat_priv: the bat priv with all the soft interface information
531 * @mac: the mac address of the claim 531 * @mac: the mac address of the claim
532 * @vid: the VLAN ID of the frame 532 * @vid: the VLAN ID of the frame
533 * @backbone_gw: the backbone gateway which claims it 533 * @backbone_gw: the backbone gateway which claims it
534 */ 534 */
535 static void batadv_bla_add_claim(struct batadv_priv *bat_priv, 535 static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
536 const uint8_t *mac, const short vid, 536 const uint8_t *mac, const short vid,
537 struct batadv_backbone_gw *backbone_gw) 537 struct batadv_backbone_gw *backbone_gw)
538 { 538 {
539 struct batadv_claim *claim; 539 struct batadv_claim *claim;
540 struct batadv_claim search_claim; 540 struct batadv_claim search_claim;
541 int hash_added; 541 int hash_added;
542 542
543 memcpy(search_claim.addr, mac, ETH_ALEN); 543 memcpy(search_claim.addr, mac, ETH_ALEN);
544 search_claim.vid = vid; 544 search_claim.vid = vid;
545 claim = batadv_claim_hash_find(bat_priv, &search_claim); 545 claim = batadv_claim_hash_find(bat_priv, &search_claim);
546 546
547 /* create a new claim entry if it does not exist yet. */ 547 /* create a new claim entry if it does not exist yet. */
548 if (!claim) { 548 if (!claim) {
549 claim = kzalloc(sizeof(*claim), GFP_ATOMIC); 549 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
550 if (!claim) 550 if (!claim)
551 return; 551 return;
552 552
553 memcpy(claim->addr, mac, ETH_ALEN); 553 memcpy(claim->addr, mac, ETH_ALEN);
554 claim->vid = vid; 554 claim->vid = vid;
555 claim->lasttime = jiffies; 555 claim->lasttime = jiffies;
556 claim->backbone_gw = backbone_gw; 556 claim->backbone_gw = backbone_gw;
557 557
558 atomic_set(&claim->refcount, 2); 558 atomic_set(&claim->refcount, 2);
559 batadv_dbg(BATADV_DBG_BLA, bat_priv, 559 batadv_dbg(BATADV_DBG_BLA, bat_priv,
560 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", 560 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
561 mac, vid); 561 mac, vid);
562 hash_added = batadv_hash_add(bat_priv->bla.claim_hash, 562 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
563 batadv_compare_claim, 563 batadv_compare_claim,
564 batadv_choose_claim, claim, 564 batadv_choose_claim, claim,
565 &claim->hash_entry); 565 &claim->hash_entry);
566 566
567 if (unlikely(hash_added != 0)) { 567 if (unlikely(hash_added != 0)) {
568 /* only local changes happened. */ 568 /* only local changes happened. */
569 kfree(claim); 569 kfree(claim);
570 return; 570 return;
571 } 571 }
572 } else { 572 } else {
573 claim->lasttime = jiffies; 573 claim->lasttime = jiffies;
574 if (claim->backbone_gw == backbone_gw) 574 if (claim->backbone_gw == backbone_gw)
575 /* no need to register a new backbone */ 575 /* no need to register a new backbone */
576 goto claim_free_ref; 576 goto claim_free_ref;
577 577
578 batadv_dbg(BATADV_DBG_BLA, bat_priv, 578 batadv_dbg(BATADV_DBG_BLA, bat_priv,
579 "bla_add_claim(): changing ownership for %pM, vid %d\n", 579 "bla_add_claim(): changing ownership for %pM, vid %d\n",
580 mac, vid); 580 mac, vid);
581 581
582 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 582 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
583 batadv_backbone_gw_free_ref(claim->backbone_gw); 583 batadv_backbone_gw_free_ref(claim->backbone_gw);
584 584
585 } 585 }
586 /* set (new) backbone gw */ 586 /* set (new) backbone gw */
587 atomic_inc(&backbone_gw->refcount); 587 atomic_inc(&backbone_gw->refcount);
588 claim->backbone_gw = backbone_gw; 588 claim->backbone_gw = backbone_gw;
589 589
590 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 590 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
591 backbone_gw->lasttime = jiffies; 591 backbone_gw->lasttime = jiffies;
592 592
593 claim_free_ref: 593 claim_free_ref:
594 batadv_claim_free_ref(claim); 594 batadv_claim_free_ref(claim);
595 } 595 }
596 596
597 /* Delete a claim from the claim hash which has the 597 /* Delete a claim from the claim hash which has the
598 * given mac address and vid. 598 * given mac address and vid.
599 */ 599 */
600 static void batadv_bla_del_claim(struct batadv_priv *bat_priv, 600 static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
601 const uint8_t *mac, const short vid) 601 const uint8_t *mac, const short vid)
602 { 602 {
603 struct batadv_claim search_claim, *claim; 603 struct batadv_claim search_claim, *claim;
604 604
605 memcpy(search_claim.addr, mac, ETH_ALEN); 605 memcpy(search_claim.addr, mac, ETH_ALEN);
606 search_claim.vid = vid; 606 search_claim.vid = vid;
607 claim = batadv_claim_hash_find(bat_priv, &search_claim); 607 claim = batadv_claim_hash_find(bat_priv, &search_claim);
608 if (!claim) 608 if (!claim)
609 return; 609 return;
610 610
611 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", 611 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
612 mac, vid); 612 mac, vid);
613 613
614 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim, 614 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
615 batadv_choose_claim, claim); 615 batadv_choose_claim, claim);
616 batadv_claim_free_ref(claim); /* reference from the hash is gone */ 616 batadv_claim_free_ref(claim); /* reference from the hash is gone */
617 617
618 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); 618 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
619 619
620 /* don't need the reference from hash_find() anymore */ 620 /* don't need the reference from hash_find() anymore */
621 batadv_claim_free_ref(claim); 621 batadv_claim_free_ref(claim);
622 } 622 }
623 623
624 /* check for ANNOUNCE frame, return 1 if handled */ 624 /* check for ANNOUNCE frame, return 1 if handled */
625 static int batadv_handle_announce(struct batadv_priv *bat_priv, 625 static int batadv_handle_announce(struct batadv_priv *bat_priv,
626 uint8_t *an_addr, uint8_t *backbone_addr, 626 uint8_t *an_addr, uint8_t *backbone_addr,
627 short vid) 627 short vid)
628 { 628 {
629 struct batadv_backbone_gw *backbone_gw; 629 struct batadv_backbone_gw *backbone_gw;
630 uint16_t crc; 630 uint16_t crc;
631 631
632 if (memcmp(an_addr, batadv_announce_mac, 4) != 0) 632 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
633 return 0; 633 return 0;
634 634
635 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid); 635 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid);
636 636
637 if (unlikely(!backbone_gw)) 637 if (unlikely(!backbone_gw))
638 return 1; 638 return 1;
639 639
640 640
641 /* handle as ANNOUNCE frame */ 641 /* handle as ANNOUNCE frame */
642 backbone_gw->lasttime = jiffies; 642 backbone_gw->lasttime = jiffies;
643 crc = ntohs(*((__be16 *)(&an_addr[4]))); 643 crc = ntohs(*((__be16 *)(&an_addr[4])));
644 644
645 batadv_dbg(BATADV_DBG_BLA, bat_priv, 645 batadv_dbg(BATADV_DBG_BLA, bat_priv,
646 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n", 646 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
647 vid, backbone_gw->orig, crc); 647 vid, backbone_gw->orig, crc);
648 648
649 if (backbone_gw->crc != crc) { 649 if (backbone_gw->crc != crc) {
650 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, 650 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
651 "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n", 651 "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
652 backbone_gw->orig, backbone_gw->vid, 652 backbone_gw->orig, backbone_gw->vid,
653 backbone_gw->crc, crc); 653 backbone_gw->crc, crc);
654 654
655 batadv_bla_send_request(backbone_gw); 655 batadv_bla_send_request(backbone_gw);
656 } else { 656 } else {
657 /* if we have sent a request and the crc was OK, 657 /* if we have sent a request and the crc was OK,
658 * we can allow traffic again. 658 * we can allow traffic again.
659 */ 659 */
660 if (atomic_read(&backbone_gw->request_sent)) { 660 if (atomic_read(&backbone_gw->request_sent)) {
661 atomic_dec(&backbone_gw->bat_priv->bla.num_requests); 661 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
662 atomic_set(&backbone_gw->request_sent, 0); 662 atomic_set(&backbone_gw->request_sent, 0);
663 } 663 }
664 } 664 }
665 665
666 batadv_backbone_gw_free_ref(backbone_gw); 666 batadv_backbone_gw_free_ref(backbone_gw);
667 return 1; 667 return 1;
668 } 668 }
669 669
670 /* check for REQUEST frame, return 1 if handled */ 670 /* check for REQUEST frame, return 1 if handled */
671 static int batadv_handle_request(struct batadv_priv *bat_priv, 671 static int batadv_handle_request(struct batadv_priv *bat_priv,
672 struct batadv_hard_iface *primary_if, 672 struct batadv_hard_iface *primary_if,
673 uint8_t *backbone_addr, 673 uint8_t *backbone_addr,
674 struct ethhdr *ethhdr, short vid) 674 struct ethhdr *ethhdr, short vid)
675 { 675 {
676 /* check for REQUEST frame */ 676 /* check for REQUEST frame */
677 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest)) 677 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
678 return 0; 678 return 0;
679 679
680 /* sanity check, this should not happen on a normal switch, 680 /* sanity check, this should not happen on a normal switch,
681 * we ignore it in this case. 681 * we ignore it in this case.
682 */ 682 */
683 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr)) 683 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
684 return 1; 684 return 1;
685 685
686 batadv_dbg(BATADV_DBG_BLA, bat_priv, 686 batadv_dbg(BATADV_DBG_BLA, bat_priv,
687 "handle_request(): REQUEST vid %d (sent by %pM)...\n", 687 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
688 vid, ethhdr->h_source); 688 vid, ethhdr->h_source);
689 689
690 batadv_bla_answer_request(bat_priv, primary_if, vid); 690 batadv_bla_answer_request(bat_priv, primary_if, vid);
691 return 1; 691 return 1;
692 } 692 }
693 693
694 /* check for UNCLAIM frame, return 1 if handled */ 694 /* check for UNCLAIM frame, return 1 if handled */
695 static int batadv_handle_unclaim(struct batadv_priv *bat_priv, 695 static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
696 struct batadv_hard_iface *primary_if, 696 struct batadv_hard_iface *primary_if,
697 uint8_t *backbone_addr, 697 uint8_t *backbone_addr,
698 uint8_t *claim_addr, short vid) 698 uint8_t *claim_addr, short vid)
699 { 699 {
700 struct batadv_backbone_gw *backbone_gw; 700 struct batadv_backbone_gw *backbone_gw;
701 701
702 /* unclaim in any case if it is our own */ 702 /* unclaim in any case if it is our own */
703 if (primary_if && batadv_compare_eth(backbone_addr, 703 if (primary_if && batadv_compare_eth(backbone_addr,
704 primary_if->net_dev->dev_addr)) 704 primary_if->net_dev->dev_addr))
705 batadv_bla_send_claim(bat_priv, claim_addr, vid, 705 batadv_bla_send_claim(bat_priv, claim_addr, vid,
706 BATADV_CLAIM_TYPE_UNCLAIM); 706 BATADV_CLAIM_TYPE_UNCLAIM);
707 707
708 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid); 708 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
709 709
710 if (!backbone_gw) 710 if (!backbone_gw)
711 return 1; 711 return 1;
712 712
713 /* this must be an UNCLAIM frame */ 713 /* this must be an UNCLAIM frame */
714 batadv_dbg(BATADV_DBG_BLA, bat_priv, 714 batadv_dbg(BATADV_DBG_BLA, bat_priv,
715 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n", 715 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
716 claim_addr, vid, backbone_gw->orig); 716 claim_addr, vid, backbone_gw->orig);
717 717
718 batadv_bla_del_claim(bat_priv, claim_addr, vid); 718 batadv_bla_del_claim(bat_priv, claim_addr, vid);
719 batadv_backbone_gw_free_ref(backbone_gw); 719 batadv_backbone_gw_free_ref(backbone_gw);
720 return 1; 720 return 1;
721 } 721 }
722 722
723 /* check for CLAIM frame, return 1 if handled */ 723 /* check for CLAIM frame, return 1 if handled */
724 static int batadv_handle_claim(struct batadv_priv *bat_priv, 724 static int batadv_handle_claim(struct batadv_priv *bat_priv,
725 struct batadv_hard_iface *primary_if, 725 struct batadv_hard_iface *primary_if,
726 uint8_t *backbone_addr, uint8_t *claim_addr, 726 uint8_t *backbone_addr, uint8_t *claim_addr,
727 short vid) 727 short vid)
728 { 728 {
729 struct batadv_backbone_gw *backbone_gw; 729 struct batadv_backbone_gw *backbone_gw;
730 730
731 /* register the gateway if not yet available, and add the claim. */ 731 /* register the gateway if not yet available, and add the claim. */
732 732
733 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid); 733 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid);
734 734
735 if (unlikely(!backbone_gw)) 735 if (unlikely(!backbone_gw))
736 return 1; 736 return 1;
737 737
738 /* this must be a CLAIM frame */ 738 /* this must be a CLAIM frame */
739 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw); 739 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
740 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) 740 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
741 batadv_bla_send_claim(bat_priv, claim_addr, vid, 741 batadv_bla_send_claim(bat_priv, claim_addr, vid,
742 BATADV_CLAIM_TYPE_CLAIM); 742 BATADV_CLAIM_TYPE_CLAIM);
743 743
744 /* TODO: we could call something like tt_local_del() here. */ 744 /* TODO: we could call something like tt_local_del() here. */
745 745
746 batadv_backbone_gw_free_ref(backbone_gw); 746 batadv_backbone_gw_free_ref(backbone_gw);
747 return 1; 747 return 1;
748 } 748 }
749 749
750 /** 750 /**
751 * batadv_check_claim_group 751 * batadv_check_claim_group
752 * @bat_priv: the bat priv with all the soft interface information 752 * @bat_priv: the bat priv with all the soft interface information
753 * @hw_src: the Hardware source in the ARP Header 753 * @hw_src: the Hardware source in the ARP Header
754 * @hw_dst: the Hardware destination in the ARP Header 754 * @hw_dst: the Hardware destination in the ARP Header
755 * @ethhdr: pointer to the Ethernet header of the claim frame 755 * @ethhdr: pointer to the Ethernet header of the claim frame
756 * 756 *
757 * checks if it is a claim packet and if its on the same group. 757 * checks if it is a claim packet and if its on the same group.
758 * This function also applies the group ID of the sender 758 * This function also applies the group ID of the sender
759 * if it is in the same mesh. 759 * if it is in the same mesh.
760 * 760 *
761 * returns: 761 * returns:
762 * 2 - if it is a claim packet and on the same group 762 * 2 - if it is a claim packet and on the same group
763 * 1 - if is a claim packet from another group 763 * 1 - if is a claim packet from another group
764 * 0 - if it is not a claim packet 764 * 0 - if it is not a claim packet
765 */ 765 */
766 static int batadv_check_claim_group(struct batadv_priv *bat_priv, 766 static int batadv_check_claim_group(struct batadv_priv *bat_priv,
767 struct batadv_hard_iface *primary_if, 767 struct batadv_hard_iface *primary_if,
768 uint8_t *hw_src, uint8_t *hw_dst, 768 uint8_t *hw_src, uint8_t *hw_dst,
769 struct ethhdr *ethhdr) 769 struct ethhdr *ethhdr)
770 { 770 {
771 uint8_t *backbone_addr; 771 uint8_t *backbone_addr;
772 struct batadv_orig_node *orig_node; 772 struct batadv_orig_node *orig_node;
773 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own; 773 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
774 774
775 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 775 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
776 bla_dst_own = &bat_priv->bla.claim_dest; 776 bla_dst_own = &bat_priv->bla.claim_dest;
777 777
778 /* check if it is a claim packet in general */ 778 /* check if it is a claim packet in general */
779 if (memcmp(bla_dst->magic, bla_dst_own->magic, 779 if (memcmp(bla_dst->magic, bla_dst_own->magic,
780 sizeof(bla_dst->magic)) != 0) 780 sizeof(bla_dst->magic)) != 0)
781 return 0; 781 return 0;
782 782
783 /* if announcement packet, use the source, 783 /* if announcement packet, use the source,
784 * otherwise assume it is in the hw_src 784 * otherwise assume it is in the hw_src
785 */ 785 */
786 switch (bla_dst->type) { 786 switch (bla_dst->type) {
787 case BATADV_CLAIM_TYPE_CLAIM: 787 case BATADV_CLAIM_TYPE_CLAIM:
788 backbone_addr = hw_src; 788 backbone_addr = hw_src;
789 break; 789 break;
790 case BATADV_CLAIM_TYPE_REQUEST: 790 case BATADV_CLAIM_TYPE_REQUEST:
791 case BATADV_CLAIM_TYPE_ANNOUNCE: 791 case BATADV_CLAIM_TYPE_ANNOUNCE:
792 case BATADV_CLAIM_TYPE_UNCLAIM: 792 case BATADV_CLAIM_TYPE_UNCLAIM:
793 backbone_addr = ethhdr->h_source; 793 backbone_addr = ethhdr->h_source;
794 break; 794 break;
795 default: 795 default:
796 return 0; 796 return 0;
797 } 797 }
798 798
799 /* don't accept claim frames from ourselves */ 799 /* don't accept claim frames from ourselves */
800 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) 800 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
801 return 0; 801 return 0;
802 802
803 /* if its already the same group, it is fine. */ 803 /* if its already the same group, it is fine. */
804 if (bla_dst->group == bla_dst_own->group) 804 if (bla_dst->group == bla_dst_own->group)
805 return 2; 805 return 2;
806 806
807 /* lets see if this originator is in our mesh */ 807 /* lets see if this originator is in our mesh */
808 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr); 808 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
809 809
810 /* dont accept claims from gateways which are not in 810 /* dont accept claims from gateways which are not in
811 * the same mesh or group. 811 * the same mesh or group.
812 */ 812 */
813 if (!orig_node) 813 if (!orig_node)
814 return 1; 814 return 1;
815 815
816 /* if our mesh friends mac is bigger, use it for ourselves. */ 816 /* if our mesh friends mac is bigger, use it for ourselves. */
817 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) { 817 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
818 batadv_dbg(BATADV_DBG_BLA, bat_priv, 818 batadv_dbg(BATADV_DBG_BLA, bat_priv,
819 "taking other backbones claim group: %04x\n", 819 "taking other backbones claim group: %04x\n",
820 ntohs(bla_dst->group)); 820 ntohs(bla_dst->group));
821 bla_dst_own->group = bla_dst->group; 821 bla_dst_own->group = bla_dst->group;
822 } 822 }
823 823
824 batadv_orig_node_free_ref(orig_node); 824 batadv_orig_node_free_ref(orig_node);
825 825
826 return 2; 826 return 2;
827 } 827 }
828 828
829 829
830 /* @bat_priv: the bat priv with all the soft interface information 830 /* @bat_priv: the bat priv with all the soft interface information
831 * @skb: the frame to be checked 831 * @skb: the frame to be checked
832 * 832 *
833 * Check if this is a claim frame, and process it accordingly. 833 * Check if this is a claim frame, and process it accordingly.
834 * 834 *
835 * returns 1 if it was a claim frame, otherwise return 0 to 835 * returns 1 if it was a claim frame, otherwise return 0 to
836 * tell the callee that it can use the frame on its own. 836 * tell the callee that it can use the frame on its own.
837 */ 837 */
838 static int batadv_bla_process_claim(struct batadv_priv *bat_priv, 838 static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
839 struct batadv_hard_iface *primary_if, 839 struct batadv_hard_iface *primary_if,
840 struct sk_buff *skb) 840 struct sk_buff *skb)
841 { 841 {
842 struct ethhdr *ethhdr; 842 struct ethhdr *ethhdr;
843 struct vlan_ethhdr *vhdr; 843 struct vlan_ethhdr *vhdr;
844 struct arphdr *arphdr; 844 struct arphdr *arphdr;
845 uint8_t *hw_src, *hw_dst; 845 uint8_t *hw_src, *hw_dst;
846 struct batadv_bla_claim_dst *bla_dst; 846 struct batadv_bla_claim_dst *bla_dst;
847 uint16_t proto; 847 uint16_t proto;
848 int headlen; 848 int headlen;
849 short vid = -1; 849 short vid = -1;
850 int ret; 850 int ret;
851 851
852 ethhdr = (struct ethhdr *)skb_mac_header(skb); 852 ethhdr = (struct ethhdr *)skb_mac_header(skb);
853 853
854 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { 854 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
855 vhdr = (struct vlan_ethhdr *)ethhdr; 855 vhdr = (struct vlan_ethhdr *)ethhdr;
856 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; 856 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
857 proto = ntohs(vhdr->h_vlan_encapsulated_proto); 857 proto = ntohs(vhdr->h_vlan_encapsulated_proto);
858 headlen = sizeof(*vhdr); 858 headlen = sizeof(*vhdr);
859 } else { 859 } else {
860 proto = ntohs(ethhdr->h_proto); 860 proto = ntohs(ethhdr->h_proto);
861 headlen = ETH_HLEN; 861 headlen = ETH_HLEN;
862 } 862 }
863 863
864 if (proto != ETH_P_ARP) 864 if (proto != ETH_P_ARP)
865 return 0; /* not a claim frame */ 865 return 0; /* not a claim frame */
866 866
867 /* this must be a ARP frame. check if it is a claim. */ 867 /* this must be a ARP frame. check if it is a claim. */
868 868
869 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev)))) 869 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
870 return 0; 870 return 0;
871 871
872 /* pskb_may_pull() may have modified the pointers, get ethhdr again */ 872 /* pskb_may_pull() may have modified the pointers, get ethhdr again */
873 ethhdr = (struct ethhdr *)skb_mac_header(skb); 873 ethhdr = (struct ethhdr *)skb_mac_header(skb);
874 arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen); 874 arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
875 875
876 /* Check whether the ARP frame carries a valid 876 /* Check whether the ARP frame carries a valid
877 * IP information 877 * IP information
878 */ 878 */
879 if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) 879 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
880 return 0; 880 return 0;
881 if (arphdr->ar_pro != htons(ETH_P_IP)) 881 if (arphdr->ar_pro != htons(ETH_P_IP))
882 return 0; 882 return 0;
883 if (arphdr->ar_hln != ETH_ALEN) 883 if (arphdr->ar_hln != ETH_ALEN)
884 return 0; 884 return 0;
885 if (arphdr->ar_pln != 4) 885 if (arphdr->ar_pln != 4)
886 return 0; 886 return 0;
887 887
888 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr); 888 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
889 hw_dst = hw_src + ETH_ALEN + 4; 889 hw_dst = hw_src + ETH_ALEN + 4;
890 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 890 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
891 891
892 /* check if it is a claim frame. */ 892 /* check if it is a claim frame. */
893 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst, 893 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
894 ethhdr); 894 ethhdr);
895 if (ret == 1) 895 if (ret == 1)
896 batadv_dbg(BATADV_DBG_BLA, bat_priv, 896 batadv_dbg(BATADV_DBG_BLA, bat_priv,
897 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", 897 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
898 ethhdr->h_source, vid, hw_src, hw_dst); 898 ethhdr->h_source, vid, hw_src, hw_dst);
899 899
900 if (ret < 2) 900 if (ret < 2)
901 return ret; 901 return ret;
902 902
903 /* become a backbone gw ourselves on this vlan if not happened yet */ 903 /* become a backbone gw ourselves on this vlan if not happened yet */
904 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); 904 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
905 905
906 /* check for the different types of claim frames ... */ 906 /* check for the different types of claim frames ... */
907 switch (bla_dst->type) { 907 switch (bla_dst->type) {
908 case BATADV_CLAIM_TYPE_CLAIM: 908 case BATADV_CLAIM_TYPE_CLAIM:
909 if (batadv_handle_claim(bat_priv, primary_if, hw_src, 909 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
910 ethhdr->h_source, vid)) 910 ethhdr->h_source, vid))
911 return 1; 911 return 1;
912 break; 912 break;
913 case BATADV_CLAIM_TYPE_UNCLAIM: 913 case BATADV_CLAIM_TYPE_UNCLAIM:
914 if (batadv_handle_unclaim(bat_priv, primary_if, 914 if (batadv_handle_unclaim(bat_priv, primary_if,
915 ethhdr->h_source, hw_src, vid)) 915 ethhdr->h_source, hw_src, vid))
916 return 1; 916 return 1;
917 break; 917 break;
918 918
919 case BATADV_CLAIM_TYPE_ANNOUNCE: 919 case BATADV_CLAIM_TYPE_ANNOUNCE:
920 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source, 920 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
921 vid)) 921 vid))
922 return 1; 922 return 1;
923 break; 923 break;
924 case BATADV_CLAIM_TYPE_REQUEST: 924 case BATADV_CLAIM_TYPE_REQUEST:
925 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr, 925 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
926 vid)) 926 vid))
927 return 1; 927 return 1;
928 break; 928 break;
929 } 929 }
930 930
931 batadv_dbg(BATADV_DBG_BLA, bat_priv, 931 batadv_dbg(BATADV_DBG_BLA, bat_priv,
932 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", 932 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
933 ethhdr->h_source, vid, hw_src, hw_dst); 933 ethhdr->h_source, vid, hw_src, hw_dst);
934 return 1; 934 return 1;
935 } 935 }
936 936
937 /* Check when we last heard from other nodes, and remove them in case of 937 /* Check when we last heard from other nodes, and remove them in case of
938 * a time out, or clean all backbone gws if now is set. 938 * a time out, or clean all backbone gws if now is set.
939 */ 939 */
940 static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) 940 static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
941 { 941 {
942 struct batadv_backbone_gw *backbone_gw; 942 struct batadv_backbone_gw *backbone_gw;
943 struct hlist_node *node, *node_tmp; 943 struct hlist_node *node, *node_tmp;
944 struct hlist_head *head; 944 struct hlist_head *head;
945 struct batadv_hashtable *hash; 945 struct batadv_hashtable *hash;
946 spinlock_t *list_lock; /* protects write access to the hash lists */ 946 spinlock_t *list_lock; /* protects write access to the hash lists */
947 int i; 947 int i;
948 948
949 hash = bat_priv->bla.backbone_hash; 949 hash = bat_priv->bla.backbone_hash;
950 if (!hash) 950 if (!hash)
951 return; 951 return;
952 952
953 for (i = 0; i < hash->size; i++) { 953 for (i = 0; i < hash->size; i++) {
954 head = &hash->table[i]; 954 head = &hash->table[i];
955 list_lock = &hash->list_locks[i]; 955 list_lock = &hash->list_locks[i];
956 956
957 spin_lock_bh(list_lock); 957 spin_lock_bh(list_lock);
958 hlist_for_each_entry_safe(backbone_gw, node, node_tmp, 958 hlist_for_each_entry_safe(backbone_gw, node, node_tmp,
959 head, hash_entry) { 959 head, hash_entry) {
960 if (now) 960 if (now)
961 goto purge_now; 961 goto purge_now;
962 if (!batadv_has_timed_out(backbone_gw->lasttime, 962 if (!batadv_has_timed_out(backbone_gw->lasttime,
963 BATADV_BLA_BACKBONE_TIMEOUT)) 963 BATADV_BLA_BACKBONE_TIMEOUT))
964 continue; 964 continue;
965 965
966 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, 966 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
967 "bla_purge_backbone_gw(): backbone gw %pM timed out\n", 967 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
968 backbone_gw->orig); 968 backbone_gw->orig);
969 969
970 purge_now: 970 purge_now:
971 /* don't wait for the pending request anymore */ 971 /* don't wait for the pending request anymore */
972 if (atomic_read(&backbone_gw->request_sent)) 972 if (atomic_read(&backbone_gw->request_sent))
973 atomic_dec(&bat_priv->bla.num_requests); 973 atomic_dec(&bat_priv->bla.num_requests);
974 974
975 batadv_bla_del_backbone_claims(backbone_gw); 975 batadv_bla_del_backbone_claims(backbone_gw);
976 976
977 hlist_del_rcu(node); 977 hlist_del_rcu(node);
978 batadv_backbone_gw_free_ref(backbone_gw); 978 batadv_backbone_gw_free_ref(backbone_gw);
979 } 979 }
980 spin_unlock_bh(list_lock); 980 spin_unlock_bh(list_lock);
981 } 981 }
982 } 982 }
983 983
984 /** 984 /**
985 * batadv_bla_purge_claims 985 * batadv_bla_purge_claims
986 * @bat_priv: the bat priv with all the soft interface information 986 * @bat_priv: the bat priv with all the soft interface information
987 * @primary_if: the selected primary interface, may be NULL if now is set 987 * @primary_if: the selected primary interface, may be NULL if now is set
988 * @now: whether the whole hash shall be wiped now 988 * @now: whether the whole hash shall be wiped now
989 * 989 *
990 * Check when we heard last time from our own claims, and remove them in case of 990 * Check when we heard last time from our own claims, and remove them in case of
991 * a time out, or clean all claims if now is set 991 * a time out, or clean all claims if now is set
992 */ 992 */
993 static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, 993 static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
994 struct batadv_hard_iface *primary_if, 994 struct batadv_hard_iface *primary_if,
995 int now) 995 int now)
996 { 996 {
997 struct batadv_claim *claim; 997 struct batadv_claim *claim;
998 struct hlist_node *node; 998 struct hlist_node *node;
999 struct hlist_head *head; 999 struct hlist_head *head;
1000 struct batadv_hashtable *hash; 1000 struct batadv_hashtable *hash;
1001 int i; 1001 int i;
1002 1002
1003 hash = bat_priv->bla.claim_hash; 1003 hash = bat_priv->bla.claim_hash;
1004 if (!hash) 1004 if (!hash)
1005 return; 1005 return;
1006 1006
1007 for (i = 0; i < hash->size; i++) { 1007 for (i = 0; i < hash->size; i++) {
1008 head = &hash->table[i]; 1008 head = &hash->table[i];
1009 1009
1010 rcu_read_lock(); 1010 rcu_read_lock();
1011 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 1011 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
1012 if (now) 1012 if (now)
1013 goto purge_now; 1013 goto purge_now;
1014 if (!batadv_compare_eth(claim->backbone_gw->orig, 1014 if (!batadv_compare_eth(claim->backbone_gw->orig,
1015 primary_if->net_dev->dev_addr)) 1015 primary_if->net_dev->dev_addr))
1016 continue; 1016 continue;
1017 if (!batadv_has_timed_out(claim->lasttime, 1017 if (!batadv_has_timed_out(claim->lasttime,
1018 BATADV_BLA_CLAIM_TIMEOUT)) 1018 BATADV_BLA_CLAIM_TIMEOUT))
1019 continue; 1019 continue;
1020 1020
1021 batadv_dbg(BATADV_DBG_BLA, bat_priv, 1021 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1022 "bla_purge_claims(): %pM, vid %d, time out\n", 1022 "bla_purge_claims(): %pM, vid %d, time out\n",
1023 claim->addr, claim->vid); 1023 claim->addr, claim->vid);
1024 1024
1025 purge_now: 1025 purge_now:
1026 batadv_handle_unclaim(bat_priv, primary_if, 1026 batadv_handle_unclaim(bat_priv, primary_if,
1027 claim->backbone_gw->orig, 1027 claim->backbone_gw->orig,
1028 claim->addr, claim->vid); 1028 claim->addr, claim->vid);
1029 } 1029 }
1030 rcu_read_unlock(); 1030 rcu_read_unlock();
1031 } 1031 }
1032 } 1032 }
1033 1033
1034 /** 1034 /**
1035 * batadv_bla_update_orig_address 1035 * batadv_bla_update_orig_address
1036 * @bat_priv: the bat priv with all the soft interface information 1036 * @bat_priv: the bat priv with all the soft interface information
1037 * @primary_if: the new selected primary_if 1037 * @primary_if: the new selected primary_if
1038 * @oldif: the old primary interface, may be NULL 1038 * @oldif: the old primary interface, may be NULL
1039 * 1039 *
1040 * Update the backbone gateways when the own orig address changes. 1040 * Update the backbone gateways when the own orig address changes.
1041 */ 1041 */
1042 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, 1042 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1043 struct batadv_hard_iface *primary_if, 1043 struct batadv_hard_iface *primary_if,
1044 struct batadv_hard_iface *oldif) 1044 struct batadv_hard_iface *oldif)
1045 { 1045 {
1046 struct batadv_backbone_gw *backbone_gw; 1046 struct batadv_backbone_gw *backbone_gw;
1047 struct hlist_node *node; 1047 struct hlist_node *node;
1048 struct hlist_head *head; 1048 struct hlist_head *head;
1049 struct batadv_hashtable *hash; 1049 struct batadv_hashtable *hash;
1050 __be16 group; 1050 __be16 group;
1051 int i; 1051 int i;
1052 1052
1053 /* reset bridge loop avoidance group id */ 1053 /* reset bridge loop avoidance group id */
1054 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); 1054 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1055 bat_priv->bla.claim_dest.group = group; 1055 bat_priv->bla.claim_dest.group = group;
1056 1056
1057 if (!oldif) { 1057 if (!oldif) {
1058 batadv_bla_purge_claims(bat_priv, NULL, 1); 1058 batadv_bla_purge_claims(bat_priv, NULL, 1);
1059 batadv_bla_purge_backbone_gw(bat_priv, 1); 1059 batadv_bla_purge_backbone_gw(bat_priv, 1);
1060 return; 1060 return;
1061 } 1061 }
1062 1062
1063 hash = bat_priv->bla.backbone_hash; 1063 hash = bat_priv->bla.backbone_hash;
1064 if (!hash) 1064 if (!hash)
1065 return; 1065 return;
1066 1066
1067 for (i = 0; i < hash->size; i++) { 1067 for (i = 0; i < hash->size; i++) {
1068 head = &hash->table[i]; 1068 head = &hash->table[i];
1069 1069
1070 rcu_read_lock(); 1070 rcu_read_lock();
1071 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1071 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1072 /* own orig still holds the old value. */ 1072 /* own orig still holds the old value. */
1073 if (!batadv_compare_eth(backbone_gw->orig, 1073 if (!batadv_compare_eth(backbone_gw->orig,
1074 oldif->net_dev->dev_addr)) 1074 oldif->net_dev->dev_addr))
1075 continue; 1075 continue;
1076 1076
1077 memcpy(backbone_gw->orig, 1077 memcpy(backbone_gw->orig,
1078 primary_if->net_dev->dev_addr, ETH_ALEN); 1078 primary_if->net_dev->dev_addr, ETH_ALEN);
1079 /* send an announce frame so others will ask for our 1079 /* send an announce frame so others will ask for our
1080 * claims and update their tables. 1080 * claims and update their tables.
1081 */ 1081 */
1082 batadv_bla_send_announce(bat_priv, backbone_gw); 1082 batadv_bla_send_announce(bat_priv, backbone_gw);
1083 } 1083 }
1084 rcu_read_unlock(); 1084 rcu_read_unlock();
1085 } 1085 }
1086 } 1086 }
1087 1087
1088 1088
1089 1089
1090 /* (re)start the timer */ 1090 /* (re)start the timer */
1091 static void batadv_bla_start_timer(struct batadv_priv *bat_priv) 1091 static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
1092 { 1092 {
1093 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work); 1093 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1094 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work, 1094 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1095 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH)); 1095 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1096 } 1096 }
1097 1097
1098 /* periodic work to do: 1098 /* periodic work to do:
1099 * * purge structures when they are too old 1099 * * purge structures when they are too old
1100 * * send announcements 1100 * * send announcements
1101 */ 1101 */
1102 static void batadv_bla_periodic_work(struct work_struct *work) 1102 static void batadv_bla_periodic_work(struct work_struct *work)
1103 { 1103 {
1104 struct delayed_work *delayed_work; 1104 struct delayed_work *delayed_work;
1105 struct batadv_priv *bat_priv; 1105 struct batadv_priv *bat_priv;
1106 struct batadv_priv_bla *priv_bla; 1106 struct batadv_priv_bla *priv_bla;
1107 struct hlist_node *node; 1107 struct hlist_node *node;
1108 struct hlist_head *head; 1108 struct hlist_head *head;
1109 struct batadv_backbone_gw *backbone_gw; 1109 struct batadv_backbone_gw *backbone_gw;
1110 struct batadv_hashtable *hash; 1110 struct batadv_hashtable *hash;
1111 struct batadv_hard_iface *primary_if; 1111 struct batadv_hard_iface *primary_if;
1112 int i; 1112 int i;
1113 1113
1114 delayed_work = container_of(work, struct delayed_work, work); 1114 delayed_work = container_of(work, struct delayed_work, work);
1115 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work); 1115 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1116 bat_priv = container_of(priv_bla, struct batadv_priv, bla); 1116 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1117 primary_if = batadv_primary_if_get_selected(bat_priv); 1117 primary_if = batadv_primary_if_get_selected(bat_priv);
1118 if (!primary_if) 1118 if (!primary_if)
1119 goto out; 1119 goto out;
1120 1120
1121 batadv_bla_purge_claims(bat_priv, primary_if, 0); 1121 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1122 batadv_bla_purge_backbone_gw(bat_priv, 0); 1122 batadv_bla_purge_backbone_gw(bat_priv, 0);
1123 1123
1124 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1124 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1125 goto out; 1125 goto out;
1126 1126
1127 hash = bat_priv->bla.backbone_hash; 1127 hash = bat_priv->bla.backbone_hash;
1128 if (!hash) 1128 if (!hash)
1129 goto out; 1129 goto out;
1130 1130
1131 for (i = 0; i < hash->size; i++) { 1131 for (i = 0; i < hash->size; i++) {
1132 head = &hash->table[i]; 1132 head = &hash->table[i];
1133 1133
1134 rcu_read_lock(); 1134 rcu_read_lock();
1135 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1135 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1136 if (!batadv_compare_eth(backbone_gw->orig, 1136 if (!batadv_compare_eth(backbone_gw->orig,
1137 primary_if->net_dev->dev_addr)) 1137 primary_if->net_dev->dev_addr))
1138 continue; 1138 continue;
1139 1139
1140 backbone_gw->lasttime = jiffies; 1140 backbone_gw->lasttime = jiffies;
1141 1141
1142 batadv_bla_send_announce(bat_priv, backbone_gw); 1142 batadv_bla_send_announce(bat_priv, backbone_gw);
1143 } 1143 }
1144 rcu_read_unlock(); 1144 rcu_read_unlock();
1145 } 1145 }
1146 out: 1146 out:
1147 if (primary_if) 1147 if (primary_if)
1148 batadv_hardif_free_ref(primary_if); 1148 batadv_hardif_free_ref(primary_if);
1149 1149
1150 batadv_bla_start_timer(bat_priv); 1150 batadv_bla_start_timer(bat_priv);
1151 } 1151 }
1152 1152
1153 /* The hash for claim and backbone hash receive the same key because they 1153 /* The hash for claim and backbone hash receive the same key because they
1154 * are getting initialized by hash_new with the same key. Reinitializing 1154 * are getting initialized by hash_new with the same key. Reinitializing
1155 * them with to different keys to allow nested locking without generating 1155 * them with to different keys to allow nested locking without generating
1156 * lockdep warnings 1156 * lockdep warnings
1157 */ 1157 */
1158 static struct lock_class_key batadv_claim_hash_lock_class_key; 1158 static struct lock_class_key batadv_claim_hash_lock_class_key;
1159 static struct lock_class_key batadv_backbone_hash_lock_class_key; 1159 static struct lock_class_key batadv_backbone_hash_lock_class_key;
1160 1160
1161 /* initialize all bla structures */ 1161 /* initialize all bla structures */
1162 int batadv_bla_init(struct batadv_priv *bat_priv) 1162 int batadv_bla_init(struct batadv_priv *bat_priv)
1163 { 1163 {
1164 int i; 1164 int i;
1165 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; 1165 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1166 struct batadv_hard_iface *primary_if; 1166 struct batadv_hard_iface *primary_if;
1167 uint16_t crc; 1167 uint16_t crc;
1168 unsigned long entrytime; 1168 unsigned long entrytime;
1169 1169
1170 spin_lock_init(&bat_priv->bla.bcast_duplist_lock); 1170 spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1171 1171
1172 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n"); 1172 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1173 1173
1174 /* setting claim destination address */ 1174 /* setting claim destination address */
1175 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3); 1175 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1176 bat_priv->bla.claim_dest.type = 0; 1176 bat_priv->bla.claim_dest.type = 0;
1177 primary_if = batadv_primary_if_get_selected(bat_priv); 1177 primary_if = batadv_primary_if_get_selected(bat_priv);
1178 if (primary_if) { 1178 if (primary_if) {
1179 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN); 1179 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1180 bat_priv->bla.claim_dest.group = htons(crc); 1180 bat_priv->bla.claim_dest.group = htons(crc);
1181 batadv_hardif_free_ref(primary_if); 1181 batadv_hardif_free_ref(primary_if);
1182 } else { 1182 } else {
1183 bat_priv->bla.claim_dest.group = 0; /* will be set later */ 1183 bat_priv->bla.claim_dest.group = 0; /* will be set later */
1184 } 1184 }
1185 1185
1186 /* initialize the duplicate list */ 1186 /* initialize the duplicate list */
1187 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT); 1187 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1188 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) 1188 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1189 bat_priv->bla.bcast_duplist[i].entrytime = entrytime; 1189 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1190 bat_priv->bla.bcast_duplist_curr = 0; 1190 bat_priv->bla.bcast_duplist_curr = 0;
1191 1191
1192 if (bat_priv->bla.claim_hash) 1192 if (bat_priv->bla.claim_hash)
1193 return 0; 1193 return 0;
1194 1194
1195 bat_priv->bla.claim_hash = batadv_hash_new(128); 1195 bat_priv->bla.claim_hash = batadv_hash_new(128);
1196 bat_priv->bla.backbone_hash = batadv_hash_new(32); 1196 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1197 1197
1198 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash) 1198 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1199 return -ENOMEM; 1199 return -ENOMEM;
1200 1200
1201 batadv_hash_set_lock_class(bat_priv->bla.claim_hash, 1201 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1202 &batadv_claim_hash_lock_class_key); 1202 &batadv_claim_hash_lock_class_key);
1203 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash, 1203 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1204 &batadv_backbone_hash_lock_class_key); 1204 &batadv_backbone_hash_lock_class_key);
1205 1205
1206 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n"); 1206 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1207 1207
1208 batadv_bla_start_timer(bat_priv); 1208 batadv_bla_start_timer(bat_priv);
1209 return 0; 1209 return 0;
1210 } 1210 }
1211 1211
1212 /** 1212 /**
1213 * batadv_bla_check_bcast_duplist 1213 * batadv_bla_check_bcast_duplist
1214 * @bat_priv: the bat priv with all the soft interface information 1214 * @bat_priv: the bat priv with all the soft interface information
1215 * @bcast_packet: encapsulated broadcast frame plus batman header 1215 * @bcast_packet: encapsulated broadcast frame plus batman header
1216 * @bcast_packet_len: length of encapsulated broadcast frame plus batman header 1216 * @bcast_packet_len: length of encapsulated broadcast frame plus batman header
1217 * 1217 *
1218 * check if it is on our broadcast list. Another gateway might 1218 * check if it is on our broadcast list. Another gateway might
1219 * have sent the same packet because it is connected to the same backbone, 1219 * have sent the same packet because it is connected to the same backbone,
1220 * so we have to remove this duplicate. 1220 * so we have to remove this duplicate.
1221 * 1221 *
1222 * This is performed by checking the CRC, which will tell us 1222 * This is performed by checking the CRC, which will tell us
1223 * with a good chance that it is the same packet. If it is furthermore 1223 * with a good chance that it is the same packet. If it is furthermore
1224 * sent by another host, drop it. We allow equal packets from 1224 * sent by another host, drop it. We allow equal packets from
1225 * the same host however as this might be intended. 1225 * the same host however as this might be intended.
1226 */ 1226 */
1227 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, 1227 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1228 struct batadv_bcast_packet *bcast_packet, 1228 struct batadv_bcast_packet *bcast_packet,
1229 int bcast_packet_len) 1229 int bcast_packet_len)
1230 { 1230 {
1231 int i, length, curr, ret = 0; 1231 int i, length, curr, ret = 0;
1232 uint8_t *content; 1232 uint8_t *content;
1233 uint16_t crc; 1233 uint16_t crc;
1234 struct batadv_bcast_duplist_entry *entry; 1234 struct batadv_bcast_duplist_entry *entry;
1235 1235
1236 length = bcast_packet_len - sizeof(*bcast_packet); 1236 length = bcast_packet_len - sizeof(*bcast_packet);
1237 content = (uint8_t *)bcast_packet; 1237 content = (uint8_t *)bcast_packet;
1238 content += sizeof(*bcast_packet); 1238 content += sizeof(*bcast_packet);
1239 1239
1240 /* calculate the crc ... */ 1240 /* calculate the crc ... */
1241 crc = crc16(0, content, length); 1241 crc = crc16(0, content, length);
1242 1242
1243 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock); 1243 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1244 1244
1245 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) { 1245 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1246 curr = (bat_priv->bla.bcast_duplist_curr + i); 1246 curr = (bat_priv->bla.bcast_duplist_curr + i);
1247 curr %= BATADV_DUPLIST_SIZE; 1247 curr %= BATADV_DUPLIST_SIZE;
1248 entry = &bat_priv->bla.bcast_duplist[curr]; 1248 entry = &bat_priv->bla.bcast_duplist[curr];
1249 1249
1250 /* we can stop searching if the entry is too old ; 1250 /* we can stop searching if the entry is too old ;
1251 * later entries will be even older 1251 * later entries will be even older
1252 */ 1252 */
1253 if (batadv_has_timed_out(entry->entrytime, 1253 if (batadv_has_timed_out(entry->entrytime,
1254 BATADV_DUPLIST_TIMEOUT)) 1254 BATADV_DUPLIST_TIMEOUT))
1255 break; 1255 break;
1256 1256
1257 if (entry->crc != crc) 1257 if (entry->crc != crc)
1258 continue; 1258 continue;
1259 1259
1260 if (batadv_compare_eth(entry->orig, bcast_packet->orig)) 1260 if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1261 continue; 1261 continue;
1262 1262
1263 /* this entry seems to match: same crc, not too old, 1263 /* this entry seems to match: same crc, not too old,
1264 * and from another gw. therefore return 1 to forbid it. 1264 * and from another gw. therefore return 1 to forbid it.
1265 */ 1265 */
1266 ret = 1; 1266 ret = 1;
1267 goto out; 1267 goto out;
1268 } 1268 }
1269 /* not found, add a new entry (overwrite the oldest entry) 1269 /* not found, add a new entry (overwrite the oldest entry)
1270 * and allow it, its the first occurence. 1270 * and allow it, its the first occurence.
1271 */ 1271 */
1272 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1); 1272 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1273 curr %= BATADV_DUPLIST_SIZE; 1273 curr %= BATADV_DUPLIST_SIZE;
1274 entry = &bat_priv->bla.bcast_duplist[curr]; 1274 entry = &bat_priv->bla.bcast_duplist[curr];
1275 entry->crc = crc; 1275 entry->crc = crc;
1276 entry->entrytime = jiffies; 1276 entry->entrytime = jiffies;
1277 memcpy(entry->orig, bcast_packet->orig, ETH_ALEN); 1277 memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
1278 bat_priv->bla.bcast_duplist_curr = curr; 1278 bat_priv->bla.bcast_duplist_curr = curr;
1279 1279
1280 out: 1280 out:
1281 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock); 1281 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1282 1282
1283 return ret; 1283 return ret;
1284 } 1284 }
1285 1285
1286 1286
1287 1287
1288 /* @bat_priv: the bat priv with all the soft interface information 1288 /* @bat_priv: the bat priv with all the soft interface information
1289 * @orig: originator mac address 1289 * @orig: originator mac address
1290 * 1290 *
1291 * check if the originator is a gateway for any VLAN ID. 1291 * check if the originator is a gateway for any VLAN ID.
1292 * 1292 *
1293 * returns 1 if it is found, 0 otherwise 1293 * returns 1 if it is found, 0 otherwise
1294 */ 1294 */
1295 int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig) 1295 int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
1296 { 1296 {
1297 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 1297 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1298 struct hlist_head *head; 1298 struct hlist_head *head;
1299 struct hlist_node *node; 1299 struct hlist_node *node;
1300 struct batadv_backbone_gw *backbone_gw; 1300 struct batadv_backbone_gw *backbone_gw;
1301 int i; 1301 int i;
1302 1302
1303 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1303 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1304 return 0; 1304 return 0;
1305 1305
1306 if (!hash) 1306 if (!hash)
1307 return 0; 1307 return 0;
1308 1308
1309 for (i = 0; i < hash->size; i++) { 1309 for (i = 0; i < hash->size; i++) {
1310 head = &hash->table[i]; 1310 head = &hash->table[i];
1311 1311
1312 rcu_read_lock(); 1312 rcu_read_lock();
1313 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1313 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1314 if (batadv_compare_eth(backbone_gw->orig, orig)) { 1314 if (batadv_compare_eth(backbone_gw->orig, orig)) {
1315 rcu_read_unlock(); 1315 rcu_read_unlock();
1316 return 1; 1316 return 1;
1317 } 1317 }
1318 } 1318 }
1319 rcu_read_unlock(); 1319 rcu_read_unlock();
1320 } 1320 }
1321 1321
1322 return 0; 1322 return 0;
1323 } 1323 }
1324 1324
1325 1325
1326 /** 1326 /**
1327 * batadv_bla_is_backbone_gw 1327 * batadv_bla_is_backbone_gw
1328 * @skb: the frame to be checked 1328 * @skb: the frame to be checked
1329 * @orig_node: the orig_node of the frame 1329 * @orig_node: the orig_node of the frame
1330 * @hdr_size: maximum length of the frame 1330 * @hdr_size: maximum length of the frame
1331 * 1331 *
1332 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1 1332 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
1333 * if the orig_node is also a gateway on the soft interface, otherwise it 1333 * if the orig_node is also a gateway on the soft interface, otherwise it
1334 * returns 0. 1334 * returns 0.
1335 */ 1335 */
1336 int batadv_bla_is_backbone_gw(struct sk_buff *skb, 1336 int batadv_bla_is_backbone_gw(struct sk_buff *skb,
1337 struct batadv_orig_node *orig_node, int hdr_size) 1337 struct batadv_orig_node *orig_node, int hdr_size)
1338 { 1338 {
1339 struct ethhdr *ethhdr; 1339 struct ethhdr *ethhdr;
1340 struct vlan_ethhdr *vhdr; 1340 struct vlan_ethhdr *vhdr;
1341 struct batadv_backbone_gw *backbone_gw; 1341 struct batadv_backbone_gw *backbone_gw;
1342 short vid = -1; 1342 short vid = -1;
1343 1343
1344 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance)) 1344 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1345 return 0; 1345 return 0;
1346 1346
1347 /* first, find out the vid. */ 1347 /* first, find out the vid. */
1348 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN)) 1348 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1349 return 0; 1349 return 0;
1350 1350
1351 ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size); 1351 ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
1352 1352
1353 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { 1353 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
1354 if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr))) 1354 if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
1355 return 0; 1355 return 0;
1356 1356
1357 vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size); 1357 vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size);
1358 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; 1358 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1359 } 1359 }
1360 1360
1361 /* see if this originator is a backbone gw for this VLAN */ 1361 /* see if this originator is a backbone gw for this VLAN */
1362 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv, 1362 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1363 orig_node->orig, vid); 1363 orig_node->orig, vid);
1364 if (!backbone_gw) 1364 if (!backbone_gw)
1365 return 0; 1365 return 0;
1366 1366
1367 batadv_backbone_gw_free_ref(backbone_gw); 1367 batadv_backbone_gw_free_ref(backbone_gw);
1368 return 1; 1368 return 1;
1369 } 1369 }
1370 1370
1371 /* free all bla structures (for softinterface free or module unload) */ 1371 /* free all bla structures (for softinterface free or module unload) */
1372 void batadv_bla_free(struct batadv_priv *bat_priv) 1372 void batadv_bla_free(struct batadv_priv *bat_priv)
1373 { 1373 {
1374 struct batadv_hard_iface *primary_if; 1374 struct batadv_hard_iface *primary_if;
1375 1375
1376 cancel_delayed_work_sync(&bat_priv->bla.work); 1376 cancel_delayed_work_sync(&bat_priv->bla.work);
1377 primary_if = batadv_primary_if_get_selected(bat_priv); 1377 primary_if = batadv_primary_if_get_selected(bat_priv);
1378 1378
1379 if (bat_priv->bla.claim_hash) { 1379 if (bat_priv->bla.claim_hash) {
1380 batadv_bla_purge_claims(bat_priv, primary_if, 1); 1380 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1381 batadv_hash_destroy(bat_priv->bla.claim_hash); 1381 batadv_hash_destroy(bat_priv->bla.claim_hash);
1382 bat_priv->bla.claim_hash = NULL; 1382 bat_priv->bla.claim_hash = NULL;
1383 } 1383 }
1384 if (bat_priv->bla.backbone_hash) { 1384 if (bat_priv->bla.backbone_hash) {
1385 batadv_bla_purge_backbone_gw(bat_priv, 1); 1385 batadv_bla_purge_backbone_gw(bat_priv, 1);
1386 batadv_hash_destroy(bat_priv->bla.backbone_hash); 1386 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1387 bat_priv->bla.backbone_hash = NULL; 1387 bat_priv->bla.backbone_hash = NULL;
1388 } 1388 }
1389 if (primary_if) 1389 if (primary_if)
1390 batadv_hardif_free_ref(primary_if); 1390 batadv_hardif_free_ref(primary_if);
1391 } 1391 }
1392 1392
1393 /** 1393 /**
1394 * batadv_bla_rx 1394 * batadv_bla_rx
1395 * @bat_priv: the bat priv with all the soft interface information 1395 * @bat_priv: the bat priv with all the soft interface information
1396 * @skb: the frame to be checked 1396 * @skb: the frame to be checked
1397 * @vid: the VLAN ID of the frame 1397 * @vid: the VLAN ID of the frame
1398 * @is_bcast: the packet came in a broadcast packet type. 1398 * @is_bcast: the packet came in a broadcast packet type.
1399 * 1399 *
1400 * bla_rx avoidance checks if: 1400 * bla_rx avoidance checks if:
1401 * * we have to race for a claim 1401 * * we have to race for a claim
1402 * * if the frame is allowed on the LAN 1402 * * if the frame is allowed on the LAN
1403 * 1403 *
1404 * in these cases, the skb is further handled by this function and 1404 * in these cases, the skb is further handled by this function and
1405 * returns 1, otherwise it returns 0 and the caller shall further 1405 * returns 1, otherwise it returns 0 and the caller shall further
1406 * process the skb. 1406 * process the skb.
1407 */ 1407 */
1408 int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid, 1408 int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
1409 bool is_bcast) 1409 bool is_bcast)
1410 { 1410 {
1411 struct ethhdr *ethhdr; 1411 struct ethhdr *ethhdr;
1412 struct batadv_claim search_claim, *claim = NULL; 1412 struct batadv_claim search_claim, *claim = NULL;
1413 struct batadv_hard_iface *primary_if; 1413 struct batadv_hard_iface *primary_if;
1414 int ret; 1414 int ret;
1415 1415
1416 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1416 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1417 1417
1418 primary_if = batadv_primary_if_get_selected(bat_priv); 1418 primary_if = batadv_primary_if_get_selected(bat_priv);
1419 if (!primary_if) 1419 if (!primary_if)
1420 goto handled; 1420 goto handled;
1421 1421
1422 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1422 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1423 goto allow; 1423 goto allow;
1424 1424
1425 1425
1426 if (unlikely(atomic_read(&bat_priv->bla.num_requests))) 1426 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1427 /* don't allow broadcasts while requests are in flight */ 1427 /* don't allow broadcasts while requests are in flight */
1428 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) 1428 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1429 goto handled; 1429 goto handled;
1430 1430
1431 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); 1431 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1432 search_claim.vid = vid; 1432 search_claim.vid = vid;
1433 claim = batadv_claim_hash_find(bat_priv, &search_claim); 1433 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1434 1434
1435 if (!claim) { 1435 if (!claim) {
1436 /* possible optimization: race for a claim */ 1436 /* possible optimization: race for a claim */
1437 /* No claim exists yet, claim it for us! 1437 /* No claim exists yet, claim it for us!
1438 */ 1438 */
1439 batadv_handle_claim(bat_priv, primary_if, 1439 batadv_handle_claim(bat_priv, primary_if,
1440 primary_if->net_dev->dev_addr, 1440 primary_if->net_dev->dev_addr,
1441 ethhdr->h_source, vid); 1441 ethhdr->h_source, vid);
1442 goto allow; 1442 goto allow;
1443 } 1443 }
1444 1444
1445 /* if it is our own claim ... */ 1445 /* if it is our own claim ... */
1446 if (batadv_compare_eth(claim->backbone_gw->orig, 1446 if (batadv_compare_eth(claim->backbone_gw->orig,
1447 primary_if->net_dev->dev_addr)) { 1447 primary_if->net_dev->dev_addr)) {
1448 /* ... allow it in any case */ 1448 /* ... allow it in any case */
1449 claim->lasttime = jiffies; 1449 claim->lasttime = jiffies;
1450 goto allow; 1450 goto allow;
1451 } 1451 }
1452 1452
1453 /* if it is a broadcast ... */ 1453 /* if it is a broadcast ... */
1454 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { 1454 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
1455 /* ... drop it. the responsible gateway is in charge. 1455 /* ... drop it. the responsible gateway is in charge.
1456 * 1456 *
1457 * We need to check is_bcast because with the gateway 1457 * We need to check is_bcast because with the gateway
1458 * feature, broadcasts (like DHCP requests) may be sent 1458 * feature, broadcasts (like DHCP requests) may be sent
1459 * using a unicast packet type. 1459 * using a unicast packet type.
1460 */ 1460 */
1461 goto handled; 1461 goto handled;
1462 } else { 1462 } else {
1463 /* seems the client considers us as its best gateway. 1463 /* seems the client considers us as its best gateway.
1464 * send a claim and update the claim table 1464 * send a claim and update the claim table
1465 * immediately. 1465 * immediately.
1466 */ 1466 */
1467 batadv_handle_claim(bat_priv, primary_if, 1467 batadv_handle_claim(bat_priv, primary_if,
1468 primary_if->net_dev->dev_addr, 1468 primary_if->net_dev->dev_addr,
1469 ethhdr->h_source, vid); 1469 ethhdr->h_source, vid);
1470 goto allow; 1470 goto allow;
1471 } 1471 }
1472 allow: 1472 allow:
1473 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); 1473 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1474 ret = 0; 1474 ret = 0;
1475 goto out; 1475 goto out;
1476 1476
1477 handled: 1477 handled:
1478 kfree_skb(skb); 1478 kfree_skb(skb);
1479 ret = 1; 1479 ret = 1;
1480 1480
1481 out: 1481 out:
1482 if (primary_if) 1482 if (primary_if)
1483 batadv_hardif_free_ref(primary_if); 1483 batadv_hardif_free_ref(primary_if);
1484 if (claim) 1484 if (claim)
1485 batadv_claim_free_ref(claim); 1485 batadv_claim_free_ref(claim);
1486 return ret; 1486 return ret;
1487 } 1487 }
1488 1488
1489 /** 1489 /**
1490 * batadv_bla_tx 1490 * batadv_bla_tx
1491 * @bat_priv: the bat priv with all the soft interface information 1491 * @bat_priv: the bat priv with all the soft interface information
1492 * @skb: the frame to be checked 1492 * @skb: the frame to be checked
1493 * @vid: the VLAN ID of the frame 1493 * @vid: the VLAN ID of the frame
1494 * 1494 *
1495 * bla_tx checks if: 1495 * bla_tx checks if:
1496 * * a claim was received which has to be processed 1496 * * a claim was received which has to be processed
1497 * * the frame is allowed on the mesh 1497 * * the frame is allowed on the mesh
1498 * 1498 *
1499 * in these cases, the skb is further handled by this function and 1499 * in these cases, the skb is further handled by this function and
1500 * returns 1, otherwise it returns 0 and the caller shall further 1500 * returns 1, otherwise it returns 0 and the caller shall further
1501 * process the skb. 1501 * process the skb.
1502 */ 1502 */
1503 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid) 1503 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
1504 { 1504 {
1505 struct ethhdr *ethhdr; 1505 struct ethhdr *ethhdr;
1506 struct batadv_claim search_claim, *claim = NULL; 1506 struct batadv_claim search_claim, *claim = NULL;
1507 struct batadv_hard_iface *primary_if; 1507 struct batadv_hard_iface *primary_if;
1508 int ret = 0; 1508 int ret = 0;
1509 1509
1510 primary_if = batadv_primary_if_get_selected(bat_priv); 1510 primary_if = batadv_primary_if_get_selected(bat_priv);
1511 if (!primary_if) 1511 if (!primary_if)
1512 goto out; 1512 goto out;
1513 1513
1514 if (!atomic_read(&bat_priv->bridge_loop_avoidance)) 1514 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1515 goto allow; 1515 goto allow;
1516 1516
1517 /* in VLAN case, the mac header might not be set. */ 1517 /* in VLAN case, the mac header might not be set. */
1518 skb_reset_mac_header(skb); 1518 skb_reset_mac_header(skb);
1519 1519
1520 if (batadv_bla_process_claim(bat_priv, primary_if, skb)) 1520 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1521 goto handled; 1521 goto handled;
1522 1522
1523 ethhdr = (struct ethhdr *)skb_mac_header(skb); 1523 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1524 1524
1525 if (unlikely(atomic_read(&bat_priv->bla.num_requests))) 1525 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1526 /* don't allow broadcasts while requests are in flight */ 1526 /* don't allow broadcasts while requests are in flight */
1527 if (is_multicast_ether_addr(ethhdr->h_dest)) 1527 if (is_multicast_ether_addr(ethhdr->h_dest))
1528 goto handled; 1528 goto handled;
1529 1529
1530 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN); 1530 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1531 search_claim.vid = vid; 1531 search_claim.vid = vid;
1532 1532
1533 claim = batadv_claim_hash_find(bat_priv, &search_claim); 1533 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1534 1534
1535 /* if no claim exists, allow it. */ 1535 /* if no claim exists, allow it. */
1536 if (!claim) 1536 if (!claim)
1537 goto allow; 1537 goto allow;
1538 1538
1539 /* check if we are responsible. */ 1539 /* check if we are responsible. */
1540 if (batadv_compare_eth(claim->backbone_gw->orig, 1540 if (batadv_compare_eth(claim->backbone_gw->orig,
1541 primary_if->net_dev->dev_addr)) { 1541 primary_if->net_dev->dev_addr)) {
1542 /* if yes, the client has roamed and we have 1542 /* if yes, the client has roamed and we have
1543 * to unclaim it. 1543 * to unclaim it.
1544 */ 1544 */
1545 batadv_handle_unclaim(bat_priv, primary_if, 1545 batadv_handle_unclaim(bat_priv, primary_if,
1546 primary_if->net_dev->dev_addr, 1546 primary_if->net_dev->dev_addr,
1547 ethhdr->h_source, vid); 1547 ethhdr->h_source, vid);
1548 goto allow; 1548 goto allow;
1549 } 1549 }
1550 1550
1551 /* check if it is a multicast/broadcast frame */ 1551 /* check if it is a multicast/broadcast frame */
1552 if (is_multicast_ether_addr(ethhdr->h_dest)) { 1552 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1553 /* drop it. the responsible gateway has forwarded it into 1553 /* drop it. the responsible gateway has forwarded it into
1554 * the backbone network. 1554 * the backbone network.
1555 */ 1555 */
1556 goto handled; 1556 goto handled;
1557 } else { 1557 } else {
1558 /* we must allow it. at least if we are 1558 /* we must allow it. at least if we are
1559 * responsible for the DESTINATION. 1559 * responsible for the DESTINATION.
1560 */ 1560 */
1561 goto allow; 1561 goto allow;
1562 } 1562 }
1563 allow: 1563 allow:
1564 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); 1564 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1565 ret = 0; 1565 ret = 0;
1566 goto out; 1566 goto out;
1567 handled: 1567 handled:
1568 ret = 1; 1568 ret = 1;
1569 out: 1569 out:
1570 if (primary_if) 1570 if (primary_if)
1571 batadv_hardif_free_ref(primary_if); 1571 batadv_hardif_free_ref(primary_if);
1572 if (claim) 1572 if (claim)
1573 batadv_claim_free_ref(claim); 1573 batadv_claim_free_ref(claim);
1574 return ret; 1574 return ret;
1575 } 1575 }
1576 1576
1577 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) 1577 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1578 { 1578 {
1579 struct net_device *net_dev = (struct net_device *)seq->private; 1579 struct net_device *net_dev = (struct net_device *)seq->private;
1580 struct batadv_priv *bat_priv = netdev_priv(net_dev); 1580 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1581 struct batadv_hashtable *hash = bat_priv->bla.claim_hash; 1581 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
1582 struct batadv_claim *claim; 1582 struct batadv_claim *claim;
1583 struct batadv_hard_iface *primary_if; 1583 struct batadv_hard_iface *primary_if;
1584 struct hlist_node *node; 1584 struct hlist_node *node;
1585 struct hlist_head *head; 1585 struct hlist_head *head;
1586 uint32_t i; 1586 uint32_t i;
1587 bool is_own; 1587 bool is_own;
1588 int ret = 0;
1589 uint8_t *primary_addr; 1588 uint8_t *primary_addr;
1590 1589
1591 primary_if = batadv_primary_if_get_selected(bat_priv); 1590 primary_if = batadv_seq_print_text_primary_if_get(seq);
1592 if (!primary_if) { 1591 if (!primary_if)
1593 ret = seq_printf(seq,
1594 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
1595 net_dev->name);
1596 goto out; 1592 goto out;
1597 }
1598 1593
1599 if (primary_if->if_status != BATADV_IF_ACTIVE) {
1600 ret = seq_printf(seq,
1601 "BATMAN mesh %s disabled - primary interface not active\n",
1602 net_dev->name);
1603 goto out;
1604 }
1605
1606 primary_addr = primary_if->net_dev->dev_addr; 1594 primary_addr = primary_if->net_dev->dev_addr;
1607 seq_printf(seq, 1595 seq_printf(seq,
1608 "Claims announced for the mesh %s (orig %pM, group id %04x)\n", 1596 "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
1609 net_dev->name, primary_addr, 1597 net_dev->name, primary_addr,
1610 ntohs(bat_priv->bla.claim_dest.group)); 1598 ntohs(bat_priv->bla.claim_dest.group));
1611 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n", 1599 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
1612 "Client", "VID", "Originator", "CRC"); 1600 "Client", "VID", "Originator", "CRC");
1613 for (i = 0; i < hash->size; i++) { 1601 for (i = 0; i < hash->size; i++) {
1614 head = &hash->table[i]; 1602 head = &hash->table[i];
1615 1603
1616 rcu_read_lock(); 1604 rcu_read_lock();
1617 hlist_for_each_entry_rcu(claim, node, head, hash_entry) { 1605 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
1618 is_own = batadv_compare_eth(claim->backbone_gw->orig, 1606 is_own = batadv_compare_eth(claim->backbone_gw->orig,
1619 primary_addr); 1607 primary_addr);
1620 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n", 1608 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n",
1621 claim->addr, claim->vid, 1609 claim->addr, claim->vid,
1622 claim->backbone_gw->orig, 1610 claim->backbone_gw->orig,
1623 (is_own ? 'x' : ' '), 1611 (is_own ? 'x' : ' '),
1624 claim->backbone_gw->crc); 1612 claim->backbone_gw->crc);
1625 } 1613 }
1626 rcu_read_unlock(); 1614 rcu_read_unlock();
1627 } 1615 }
1628 out: 1616 out:
1629 if (primary_if) 1617 if (primary_if)
1630 batadv_hardif_free_ref(primary_if); 1618 batadv_hardif_free_ref(primary_if);
1631 return ret; 1619 return 0;
1632 } 1620 }
1633 1621
1634 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset) 1622 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1635 { 1623 {
1636 struct net_device *net_dev = (struct net_device *)seq->private; 1624 struct net_device *net_dev = (struct net_device *)seq->private;
1637 struct batadv_priv *bat_priv = netdev_priv(net_dev); 1625 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1638 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; 1626 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1639 struct batadv_backbone_gw *backbone_gw; 1627 struct batadv_backbone_gw *backbone_gw;
1640 struct batadv_hard_iface *primary_if; 1628 struct batadv_hard_iface *primary_if;
1641 struct hlist_node *node; 1629 struct hlist_node *node;
1642 struct hlist_head *head; 1630 struct hlist_head *head;
1643 int secs, msecs; 1631 int secs, msecs;
1644 uint32_t i; 1632 uint32_t i;
1645 bool is_own; 1633 bool is_own;
1646 int ret = 0;
1647 uint8_t *primary_addr; 1634 uint8_t *primary_addr;
1648 1635
1649 primary_if = batadv_primary_if_get_selected(bat_priv); 1636 primary_if = batadv_seq_print_text_primary_if_get(seq);
1650 if (!primary_if) { 1637 if (!primary_if)
1651 ret = seq_printf(seq,
1652 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
1653 net_dev->name);
1654 goto out; 1638 goto out;
1655 }
1656 1639
1657 if (primary_if->if_status != BATADV_IF_ACTIVE) {
1658 ret = seq_printf(seq,
1659 "BATMAN mesh %s disabled - primary interface not active\n",
1660 net_dev->name);
1661 goto out;
1662 }
1663
1664 primary_addr = primary_if->net_dev->dev_addr; 1640 primary_addr = primary_if->net_dev->dev_addr;
1665 seq_printf(seq, 1641 seq_printf(seq,
1666 "Backbones announced for the mesh %s (orig %pM, group id %04x)\n", 1642 "Backbones announced for the mesh %s (orig %pM, group id %04x)\n",
1667 net_dev->name, primary_addr, 1643 net_dev->name, primary_addr,
1668 ntohs(bat_priv->bla.claim_dest.group)); 1644 ntohs(bat_priv->bla.claim_dest.group));
1669 seq_printf(seq, " %-17s %-5s %-9s (%-4s)\n", 1645 seq_printf(seq, " %-17s %-5s %-9s (%-4s)\n",
1670 "Originator", "VID", "last seen", "CRC"); 1646 "Originator", "VID", "last seen", "CRC");
1671 for (i = 0; i < hash->size; i++) { 1647 for (i = 0; i < hash->size; i++) {
1672 head = &hash->table[i]; 1648 head = &hash->table[i];
1673 1649
1674 rcu_read_lock(); 1650 rcu_read_lock();
1675 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) { 1651 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1676 msecs = jiffies_to_msecs(jiffies - 1652 msecs = jiffies_to_msecs(jiffies -
1677 backbone_gw->lasttime); 1653 backbone_gw->lasttime);
1678 secs = msecs / 1000; 1654 secs = msecs / 1000;
1679 msecs = msecs % 1000; 1655 msecs = msecs % 1000;
1680 1656
1681 is_own = batadv_compare_eth(backbone_gw->orig, 1657 is_own = batadv_compare_eth(backbone_gw->orig,
1682 primary_addr); 1658 primary_addr);
1683 if (is_own) 1659 if (is_own)
1684 continue; 1660 continue;
1685 1661
1686 seq_printf(seq, 1662 seq_printf(seq,
1687 " * %pM on % 5d % 4i.%03is (%04x)\n", 1663 " * %pM on % 5d % 4i.%03is (%04x)\n",
1688 backbone_gw->orig, backbone_gw->vid, 1664 backbone_gw->orig, backbone_gw->vid,
1689 secs, msecs, backbone_gw->crc); 1665 secs, msecs, backbone_gw->crc);
1690 } 1666 }
1691 rcu_read_unlock(); 1667 rcu_read_unlock();
1692 } 1668 }
1693 out: 1669 out:
1694 if (primary_if) 1670 if (primary_if)
1695 batadv_hardif_free_ref(primary_if); 1671 batadv_hardif_free_ref(primary_if);
1696 return ret; 1672 return 0;
1697 } 1673 }
1698 1674
net/batman-adv/gateway_client.c
1 /* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: 1 /* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner 3 * Marek Lindner
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public 6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation. 7 * License as published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, but 9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details. 12 * General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA 17 * 02110-1301, USA
18 */ 18 */
19 19
20 #include "main.h" 20 #include "main.h"
21 #include "sysfs.h" 21 #include "sysfs.h"
22 #include "gateway_client.h" 22 #include "gateway_client.h"
23 #include "gateway_common.h" 23 #include "gateway_common.h"
24 #include "hard-interface.h" 24 #include "hard-interface.h"
25 #include "originator.h" 25 #include "originator.h"
26 #include "translation-table.h" 26 #include "translation-table.h"
27 #include "routing.h" 27 #include "routing.h"
28 #include <linux/ip.h> 28 #include <linux/ip.h>
29 #include <linux/ipv6.h> 29 #include <linux/ipv6.h>
30 #include <linux/udp.h> 30 #include <linux/udp.h>
31 #include <linux/if_vlan.h> 31 #include <linux/if_vlan.h>
32 32
33 /* This is the offset of the options field in a dhcp packet starting at 33 /* This is the offset of the options field in a dhcp packet starting at
34 * the beginning of the dhcp header 34 * the beginning of the dhcp header
35 */ 35 */
36 #define BATADV_DHCP_OPTIONS_OFFSET 240 36 #define BATADV_DHCP_OPTIONS_OFFSET 240
37 #define BATADV_DHCP_REQUEST 3 37 #define BATADV_DHCP_REQUEST 3
38 38
39 static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node) 39 static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
40 { 40 {
41 if (atomic_dec_and_test(&gw_node->refcount)) 41 if (atomic_dec_and_test(&gw_node->refcount))
42 kfree_rcu(gw_node, rcu); 42 kfree_rcu(gw_node, rcu);
43 } 43 }
44 44
45 static struct batadv_gw_node * 45 static struct batadv_gw_node *
46 batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv) 46 batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
47 { 47 {
48 struct batadv_gw_node *gw_node; 48 struct batadv_gw_node *gw_node;
49 49
50 rcu_read_lock(); 50 rcu_read_lock();
51 gw_node = rcu_dereference(bat_priv->gw.curr_gw); 51 gw_node = rcu_dereference(bat_priv->gw.curr_gw);
52 if (!gw_node) 52 if (!gw_node)
53 goto out; 53 goto out;
54 54
55 if (!atomic_inc_not_zero(&gw_node->refcount)) 55 if (!atomic_inc_not_zero(&gw_node->refcount))
56 gw_node = NULL; 56 gw_node = NULL;
57 57
58 out: 58 out:
59 rcu_read_unlock(); 59 rcu_read_unlock();
60 return gw_node; 60 return gw_node;
61 } 61 }
62 62
63 struct batadv_orig_node * 63 struct batadv_orig_node *
64 batadv_gw_get_selected_orig(struct batadv_priv *bat_priv) 64 batadv_gw_get_selected_orig(struct batadv_priv *bat_priv)
65 { 65 {
66 struct batadv_gw_node *gw_node; 66 struct batadv_gw_node *gw_node;
67 struct batadv_orig_node *orig_node = NULL; 67 struct batadv_orig_node *orig_node = NULL;
68 68
69 gw_node = batadv_gw_get_selected_gw_node(bat_priv); 69 gw_node = batadv_gw_get_selected_gw_node(bat_priv);
70 if (!gw_node) 70 if (!gw_node)
71 goto out; 71 goto out;
72 72
73 rcu_read_lock(); 73 rcu_read_lock();
74 orig_node = gw_node->orig_node; 74 orig_node = gw_node->orig_node;
75 if (!orig_node) 75 if (!orig_node)
76 goto unlock; 76 goto unlock;
77 77
78 if (!atomic_inc_not_zero(&orig_node->refcount)) 78 if (!atomic_inc_not_zero(&orig_node->refcount))
79 orig_node = NULL; 79 orig_node = NULL;
80 80
81 unlock: 81 unlock:
82 rcu_read_unlock(); 82 rcu_read_unlock();
83 out: 83 out:
84 if (gw_node) 84 if (gw_node)
85 batadv_gw_node_free_ref(gw_node); 85 batadv_gw_node_free_ref(gw_node);
86 return orig_node; 86 return orig_node;
87 } 87 }
88 88
89 static void batadv_gw_select(struct batadv_priv *bat_priv, 89 static void batadv_gw_select(struct batadv_priv *bat_priv,
90 struct batadv_gw_node *new_gw_node) 90 struct batadv_gw_node *new_gw_node)
91 { 91 {
92 struct batadv_gw_node *curr_gw_node; 92 struct batadv_gw_node *curr_gw_node;
93 93
94 spin_lock_bh(&bat_priv->gw.list_lock); 94 spin_lock_bh(&bat_priv->gw.list_lock);
95 95
96 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) 96 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
97 new_gw_node = NULL; 97 new_gw_node = NULL;
98 98
99 curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1); 99 curr_gw_node = rcu_dereference_protected(bat_priv->gw.curr_gw, 1);
100 rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node); 100 rcu_assign_pointer(bat_priv->gw.curr_gw, new_gw_node);
101 101
102 if (curr_gw_node) 102 if (curr_gw_node)
103 batadv_gw_node_free_ref(curr_gw_node); 103 batadv_gw_node_free_ref(curr_gw_node);
104 104
105 spin_unlock_bh(&bat_priv->gw.list_lock); 105 spin_unlock_bh(&bat_priv->gw.list_lock);
106 } 106 }
107 107
108 void batadv_gw_deselect(struct batadv_priv *bat_priv) 108 void batadv_gw_deselect(struct batadv_priv *bat_priv)
109 { 109 {
110 atomic_set(&bat_priv->gw.reselect, 1); 110 atomic_set(&bat_priv->gw.reselect, 1);
111 } 111 }
112 112
113 static struct batadv_gw_node * 113 static struct batadv_gw_node *
114 batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv) 114 batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
115 { 115 {
116 struct batadv_neigh_node *router; 116 struct batadv_neigh_node *router;
117 struct hlist_node *node; 117 struct hlist_node *node;
118 struct batadv_gw_node *gw_node, *curr_gw = NULL; 118 struct batadv_gw_node *gw_node, *curr_gw = NULL;
119 uint32_t max_gw_factor = 0, tmp_gw_factor = 0; 119 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
120 uint32_t gw_divisor; 120 uint32_t gw_divisor;
121 uint8_t max_tq = 0; 121 uint8_t max_tq = 0;
122 int down, up; 122 int down, up;
123 uint8_t tq_avg; 123 uint8_t tq_avg;
124 struct batadv_orig_node *orig_node; 124 struct batadv_orig_node *orig_node;
125 125
126 gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE; 126 gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
127 gw_divisor *= 64; 127 gw_divisor *= 64;
128 128
129 rcu_read_lock(); 129 rcu_read_lock();
130 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { 130 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
131 if (gw_node->deleted) 131 if (gw_node->deleted)
132 continue; 132 continue;
133 133
134 orig_node = gw_node->orig_node; 134 orig_node = gw_node->orig_node;
135 router = batadv_orig_node_get_router(orig_node); 135 router = batadv_orig_node_get_router(orig_node);
136 if (!router) 136 if (!router)
137 continue; 137 continue;
138 138
139 if (!atomic_inc_not_zero(&gw_node->refcount)) 139 if (!atomic_inc_not_zero(&gw_node->refcount))
140 goto next; 140 goto next;
141 141
142 tq_avg = router->tq_avg; 142 tq_avg = router->tq_avg;
143 143
144 switch (atomic_read(&bat_priv->gw_sel_class)) { 144 switch (atomic_read(&bat_priv->gw_sel_class)) {
145 case 1: /* fast connection */ 145 case 1: /* fast connection */
146 batadv_gw_bandwidth_to_kbit(orig_node->gw_flags, 146 batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
147 &down, &up); 147 &down, &up);
148 148
149 tmp_gw_factor = tq_avg * tq_avg * down * 100 * 100; 149 tmp_gw_factor = tq_avg * tq_avg * down * 100 * 100;
150 tmp_gw_factor /= gw_divisor; 150 tmp_gw_factor /= gw_divisor;
151 151
152 if ((tmp_gw_factor > max_gw_factor) || 152 if ((tmp_gw_factor > max_gw_factor) ||
153 ((tmp_gw_factor == max_gw_factor) && 153 ((tmp_gw_factor == max_gw_factor) &&
154 (tq_avg > max_tq))) { 154 (tq_avg > max_tq))) {
155 if (curr_gw) 155 if (curr_gw)
156 batadv_gw_node_free_ref(curr_gw); 156 batadv_gw_node_free_ref(curr_gw);
157 curr_gw = gw_node; 157 curr_gw = gw_node;
158 atomic_inc(&curr_gw->refcount); 158 atomic_inc(&curr_gw->refcount);
159 } 159 }
160 break; 160 break;
161 161
162 default: /* 2: stable connection (use best statistic) 162 default: /* 2: stable connection (use best statistic)
163 * 3: fast-switch (use best statistic but change as 163 * 3: fast-switch (use best statistic but change as
164 * soon as a better gateway appears) 164 * soon as a better gateway appears)
165 * XX: late-switch (use best statistic but change as 165 * XX: late-switch (use best statistic but change as
166 * soon as a better gateway appears which has 166 * soon as a better gateway appears which has
167 * $routing_class more tq points) 167 * $routing_class more tq points)
168 */ 168 */
169 if (tq_avg > max_tq) { 169 if (tq_avg > max_tq) {
170 if (curr_gw) 170 if (curr_gw)
171 batadv_gw_node_free_ref(curr_gw); 171 batadv_gw_node_free_ref(curr_gw);
172 curr_gw = gw_node; 172 curr_gw = gw_node;
173 atomic_inc(&curr_gw->refcount); 173 atomic_inc(&curr_gw->refcount);
174 } 174 }
175 break; 175 break;
176 } 176 }
177 177
178 if (tq_avg > max_tq) 178 if (tq_avg > max_tq)
179 max_tq = tq_avg; 179 max_tq = tq_avg;
180 180
181 if (tmp_gw_factor > max_gw_factor) 181 if (tmp_gw_factor > max_gw_factor)
182 max_gw_factor = tmp_gw_factor; 182 max_gw_factor = tmp_gw_factor;
183 183
184 batadv_gw_node_free_ref(gw_node); 184 batadv_gw_node_free_ref(gw_node);
185 185
186 next: 186 next:
187 batadv_neigh_node_free_ref(router); 187 batadv_neigh_node_free_ref(router);
188 } 188 }
189 rcu_read_unlock(); 189 rcu_read_unlock();
190 190
191 return curr_gw; 191 return curr_gw;
192 } 192 }
193 193
194 void batadv_gw_election(struct batadv_priv *bat_priv) 194 void batadv_gw_election(struct batadv_priv *bat_priv)
195 { 195 {
196 struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL; 196 struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL;
197 struct batadv_neigh_node *router = NULL; 197 struct batadv_neigh_node *router = NULL;
198 char gw_addr[18] = { '\0' }; 198 char gw_addr[18] = { '\0' };
199 199
200 /* The batman daemon checks here if we already passed a full originator 200 /* The batman daemon checks here if we already passed a full originator
201 * cycle in order to make sure we don't choose the first gateway we 201 * cycle in order to make sure we don't choose the first gateway we
202 * hear about. This check is based on the daemon's uptime which we 202 * hear about. This check is based on the daemon's uptime which we
203 * don't have. 203 * don't have.
204 */ 204 */
205 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) 205 if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
206 goto out; 206 goto out;
207 207
208 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 208 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
209 209
210 if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw) 210 if (!batadv_atomic_dec_not_zero(&bat_priv->gw.reselect) && curr_gw)
211 goto out; 211 goto out;
212 212
213 next_gw = batadv_gw_get_best_gw_node(bat_priv); 213 next_gw = batadv_gw_get_best_gw_node(bat_priv);
214 214
215 if (curr_gw == next_gw) 215 if (curr_gw == next_gw)
216 goto out; 216 goto out;
217 217
218 if (next_gw) { 218 if (next_gw) {
219 sprintf(gw_addr, "%pM", next_gw->orig_node->orig); 219 sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
220 220
221 router = batadv_orig_node_get_router(next_gw->orig_node); 221 router = batadv_orig_node_get_router(next_gw->orig_node);
222 if (!router) { 222 if (!router) {
223 batadv_gw_deselect(bat_priv); 223 batadv_gw_deselect(bat_priv);
224 goto out; 224 goto out;
225 } 225 }
226 } 226 }
227 227
228 if ((curr_gw) && (!next_gw)) { 228 if ((curr_gw) && (!next_gw)) {
229 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 229 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
230 "Removing selected gateway - no gateway in range\n"); 230 "Removing selected gateway - no gateway in range\n");
231 batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL, 231 batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL,
232 NULL); 232 NULL);
233 } else if ((!curr_gw) && (next_gw)) { 233 } else if ((!curr_gw) && (next_gw)) {
234 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 234 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
235 "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n", 235 "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
236 next_gw->orig_node->orig, 236 next_gw->orig_node->orig,
237 next_gw->orig_node->gw_flags, router->tq_avg); 237 next_gw->orig_node->gw_flags, router->tq_avg);
238 batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD, 238 batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD,
239 gw_addr); 239 gw_addr);
240 } else { 240 } else {
241 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 241 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
242 "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n", 242 "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n",
243 next_gw->orig_node->orig, 243 next_gw->orig_node->orig,
244 next_gw->orig_node->gw_flags, router->tq_avg); 244 next_gw->orig_node->gw_flags, router->tq_avg);
245 batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE, 245 batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE,
246 gw_addr); 246 gw_addr);
247 } 247 }
248 248
249 batadv_gw_select(bat_priv, next_gw); 249 batadv_gw_select(bat_priv, next_gw);
250 250
251 out: 251 out:
252 if (curr_gw) 252 if (curr_gw)
253 batadv_gw_node_free_ref(curr_gw); 253 batadv_gw_node_free_ref(curr_gw);
254 if (next_gw) 254 if (next_gw)
255 batadv_gw_node_free_ref(next_gw); 255 batadv_gw_node_free_ref(next_gw);
256 if (router) 256 if (router)
257 batadv_neigh_node_free_ref(router); 257 batadv_neigh_node_free_ref(router);
258 } 258 }
259 259
260 void batadv_gw_check_election(struct batadv_priv *bat_priv, 260 void batadv_gw_check_election(struct batadv_priv *bat_priv,
261 struct batadv_orig_node *orig_node) 261 struct batadv_orig_node *orig_node)
262 { 262 {
263 struct batadv_orig_node *curr_gw_orig; 263 struct batadv_orig_node *curr_gw_orig;
264 struct batadv_neigh_node *router_gw = NULL, *router_orig = NULL; 264 struct batadv_neigh_node *router_gw = NULL, *router_orig = NULL;
265 uint8_t gw_tq_avg, orig_tq_avg; 265 uint8_t gw_tq_avg, orig_tq_avg;
266 266
267 curr_gw_orig = batadv_gw_get_selected_orig(bat_priv); 267 curr_gw_orig = batadv_gw_get_selected_orig(bat_priv);
268 if (!curr_gw_orig) 268 if (!curr_gw_orig)
269 goto deselect; 269 goto deselect;
270 270
271 router_gw = batadv_orig_node_get_router(curr_gw_orig); 271 router_gw = batadv_orig_node_get_router(curr_gw_orig);
272 if (!router_gw) 272 if (!router_gw)
273 goto deselect; 273 goto deselect;
274 274
275 /* this node already is the gateway */ 275 /* this node already is the gateway */
276 if (curr_gw_orig == orig_node) 276 if (curr_gw_orig == orig_node)
277 goto out; 277 goto out;
278 278
279 router_orig = batadv_orig_node_get_router(orig_node); 279 router_orig = batadv_orig_node_get_router(orig_node);
280 if (!router_orig) 280 if (!router_orig)
281 goto out; 281 goto out;
282 282
283 gw_tq_avg = router_gw->tq_avg; 283 gw_tq_avg = router_gw->tq_avg;
284 orig_tq_avg = router_orig->tq_avg; 284 orig_tq_avg = router_orig->tq_avg;
285 285
286 /* the TQ value has to be better */ 286 /* the TQ value has to be better */
287 if (orig_tq_avg < gw_tq_avg) 287 if (orig_tq_avg < gw_tq_avg)
288 goto out; 288 goto out;
289 289
290 /* if the routing class is greater than 3 the value tells us how much 290 /* if the routing class is greater than 3 the value tells us how much
291 * greater the TQ value of the new gateway must be 291 * greater the TQ value of the new gateway must be
292 */ 292 */
293 if ((atomic_read(&bat_priv->gw_sel_class) > 3) && 293 if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
294 (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class))) 294 (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
295 goto out; 295 goto out;
296 296
297 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 297 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
298 "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n", 298 "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
299 gw_tq_avg, orig_tq_avg); 299 gw_tq_avg, orig_tq_avg);
300 300
301 deselect: 301 deselect:
302 batadv_gw_deselect(bat_priv); 302 batadv_gw_deselect(bat_priv);
303 out: 303 out:
304 if (curr_gw_orig) 304 if (curr_gw_orig)
305 batadv_orig_node_free_ref(curr_gw_orig); 305 batadv_orig_node_free_ref(curr_gw_orig);
306 if (router_gw) 306 if (router_gw)
307 batadv_neigh_node_free_ref(router_gw); 307 batadv_neigh_node_free_ref(router_gw);
308 if (router_orig) 308 if (router_orig)
309 batadv_neigh_node_free_ref(router_orig); 309 batadv_neigh_node_free_ref(router_orig);
310 310
311 return; 311 return;
312 } 312 }
313 313
314 static void batadv_gw_node_add(struct batadv_priv *bat_priv, 314 static void batadv_gw_node_add(struct batadv_priv *bat_priv,
315 struct batadv_orig_node *orig_node, 315 struct batadv_orig_node *orig_node,
316 uint8_t new_gwflags) 316 uint8_t new_gwflags)
317 { 317 {
318 struct batadv_gw_node *gw_node; 318 struct batadv_gw_node *gw_node;
319 int down, up; 319 int down, up;
320 320
321 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC); 321 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
322 if (!gw_node) 322 if (!gw_node)
323 return; 323 return;
324 324
325 INIT_HLIST_NODE(&gw_node->list); 325 INIT_HLIST_NODE(&gw_node->list);
326 gw_node->orig_node = orig_node; 326 gw_node->orig_node = orig_node;
327 atomic_set(&gw_node->refcount, 1); 327 atomic_set(&gw_node->refcount, 1);
328 328
329 spin_lock_bh(&bat_priv->gw.list_lock); 329 spin_lock_bh(&bat_priv->gw.list_lock);
330 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list); 330 hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
331 spin_unlock_bh(&bat_priv->gw.list_lock); 331 spin_unlock_bh(&bat_priv->gw.list_lock);
332 332
333 batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up); 333 batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
334 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 334 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
335 "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n", 335 "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
336 orig_node->orig, new_gwflags, 336 orig_node->orig, new_gwflags,
337 (down > 2048 ? down / 1024 : down), 337 (down > 2048 ? down / 1024 : down),
338 (down > 2048 ? "MBit" : "KBit"), 338 (down > 2048 ? "MBit" : "KBit"),
339 (up > 2048 ? up / 1024 : up), 339 (up > 2048 ? up / 1024 : up),
340 (up > 2048 ? "MBit" : "KBit")); 340 (up > 2048 ? "MBit" : "KBit"));
341 } 341 }
342 342
343 void batadv_gw_node_update(struct batadv_priv *bat_priv, 343 void batadv_gw_node_update(struct batadv_priv *bat_priv,
344 struct batadv_orig_node *orig_node, 344 struct batadv_orig_node *orig_node,
345 uint8_t new_gwflags) 345 uint8_t new_gwflags)
346 { 346 {
347 struct hlist_node *node; 347 struct hlist_node *node;
348 struct batadv_gw_node *gw_node, *curr_gw; 348 struct batadv_gw_node *gw_node, *curr_gw;
349 349
350 /* Note: We don't need a NULL check here, since curr_gw never gets 350 /* Note: We don't need a NULL check here, since curr_gw never gets
351 * dereferenced. If curr_gw is NULL we also should not exit as we may 351 * dereferenced. If curr_gw is NULL we also should not exit as we may
352 * have this gateway in our list (duplication check!) even though we 352 * have this gateway in our list (duplication check!) even though we
353 * have no currently selected gateway. 353 * have no currently selected gateway.
354 */ 354 */
355 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 355 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
356 356
357 rcu_read_lock(); 357 rcu_read_lock();
358 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { 358 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
359 if (gw_node->orig_node != orig_node) 359 if (gw_node->orig_node != orig_node)
360 continue; 360 continue;
361 361
362 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 362 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
363 "Gateway class of originator %pM changed from %i to %i\n", 363 "Gateway class of originator %pM changed from %i to %i\n",
364 orig_node->orig, gw_node->orig_node->gw_flags, 364 orig_node->orig, gw_node->orig_node->gw_flags,
365 new_gwflags); 365 new_gwflags);
366 366
367 gw_node->deleted = 0; 367 gw_node->deleted = 0;
368 368
369 if (new_gwflags == BATADV_NO_FLAGS) { 369 if (new_gwflags == BATADV_NO_FLAGS) {
370 gw_node->deleted = jiffies; 370 gw_node->deleted = jiffies;
371 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 371 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
372 "Gateway %pM removed from gateway list\n", 372 "Gateway %pM removed from gateway list\n",
373 orig_node->orig); 373 orig_node->orig);
374 374
375 if (gw_node == curr_gw) 375 if (gw_node == curr_gw)
376 goto deselect; 376 goto deselect;
377 } 377 }
378 378
379 goto unlock; 379 goto unlock;
380 } 380 }
381 381
382 if (new_gwflags == BATADV_NO_FLAGS) 382 if (new_gwflags == BATADV_NO_FLAGS)
383 goto unlock; 383 goto unlock;
384 384
385 batadv_gw_node_add(bat_priv, orig_node, new_gwflags); 385 batadv_gw_node_add(bat_priv, orig_node, new_gwflags);
386 goto unlock; 386 goto unlock;
387 387
388 deselect: 388 deselect:
389 batadv_gw_deselect(bat_priv); 389 batadv_gw_deselect(bat_priv);
390 unlock: 390 unlock:
391 rcu_read_unlock(); 391 rcu_read_unlock();
392 392
393 if (curr_gw) 393 if (curr_gw)
394 batadv_gw_node_free_ref(curr_gw); 394 batadv_gw_node_free_ref(curr_gw);
395 } 395 }
396 396
397 void batadv_gw_node_delete(struct batadv_priv *bat_priv, 397 void batadv_gw_node_delete(struct batadv_priv *bat_priv,
398 struct batadv_orig_node *orig_node) 398 struct batadv_orig_node *orig_node)
399 { 399 {
400 batadv_gw_node_update(bat_priv, orig_node, 0); 400 batadv_gw_node_update(bat_priv, orig_node, 0);
401 } 401 }
402 402
403 void batadv_gw_node_purge(struct batadv_priv *bat_priv) 403 void batadv_gw_node_purge(struct batadv_priv *bat_priv)
404 { 404 {
405 struct batadv_gw_node *gw_node, *curr_gw; 405 struct batadv_gw_node *gw_node, *curr_gw;
406 struct hlist_node *node, *node_tmp; 406 struct hlist_node *node, *node_tmp;
407 unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT); 407 unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT);
408 int do_deselect = 0; 408 int do_deselect = 0;
409 409
410 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 410 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
411 411
412 spin_lock_bh(&bat_priv->gw.list_lock); 412 spin_lock_bh(&bat_priv->gw.list_lock);
413 413
414 hlist_for_each_entry_safe(gw_node, node, node_tmp, 414 hlist_for_each_entry_safe(gw_node, node, node_tmp,
415 &bat_priv->gw.list, list) { 415 &bat_priv->gw.list, list) {
416 if (((!gw_node->deleted) || 416 if (((!gw_node->deleted) ||
417 (time_before(jiffies, gw_node->deleted + timeout))) && 417 (time_before(jiffies, gw_node->deleted + timeout))) &&
418 atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) 418 atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
419 continue; 419 continue;
420 420
421 if (curr_gw == gw_node) 421 if (curr_gw == gw_node)
422 do_deselect = 1; 422 do_deselect = 1;
423 423
424 hlist_del_rcu(&gw_node->list); 424 hlist_del_rcu(&gw_node->list);
425 batadv_gw_node_free_ref(gw_node); 425 batadv_gw_node_free_ref(gw_node);
426 } 426 }
427 427
428 spin_unlock_bh(&bat_priv->gw.list_lock); 428 spin_unlock_bh(&bat_priv->gw.list_lock);
429 429
430 /* gw_deselect() needs to acquire the gw_list_lock */ 430 /* gw_deselect() needs to acquire the gw_list_lock */
431 if (do_deselect) 431 if (do_deselect)
432 batadv_gw_deselect(bat_priv); 432 batadv_gw_deselect(bat_priv);
433 433
434 if (curr_gw) 434 if (curr_gw)
435 batadv_gw_node_free_ref(curr_gw); 435 batadv_gw_node_free_ref(curr_gw);
436 } 436 }
437 437
438 /* fails if orig_node has no router */ 438 /* fails if orig_node has no router */
439 static int batadv_write_buffer_text(struct batadv_priv *bat_priv, 439 static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
440 struct seq_file *seq, 440 struct seq_file *seq,
441 const struct batadv_gw_node *gw_node) 441 const struct batadv_gw_node *gw_node)
442 { 442 {
443 struct batadv_gw_node *curr_gw; 443 struct batadv_gw_node *curr_gw;
444 struct batadv_neigh_node *router; 444 struct batadv_neigh_node *router;
445 int down, up, ret = -1; 445 int down, up, ret = -1;
446 446
447 batadv_gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up); 447 batadv_gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
448 448
449 router = batadv_orig_node_get_router(gw_node->orig_node); 449 router = batadv_orig_node_get_router(gw_node->orig_node);
450 if (!router) 450 if (!router)
451 goto out; 451 goto out;
452 452
453 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 453 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
454 454
455 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n", 455 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
456 (curr_gw == gw_node ? "=>" : " "), 456 (curr_gw == gw_node ? "=>" : " "),
457 gw_node->orig_node->orig, 457 gw_node->orig_node->orig,
458 router->tq_avg, router->addr, 458 router->tq_avg, router->addr,
459 router->if_incoming->net_dev->name, 459 router->if_incoming->net_dev->name,
460 gw_node->orig_node->gw_flags, 460 gw_node->orig_node->gw_flags,
461 (down > 2048 ? down / 1024 : down), 461 (down > 2048 ? down / 1024 : down),
462 (down > 2048 ? "MBit" : "KBit"), 462 (down > 2048 ? "MBit" : "KBit"),
463 (up > 2048 ? up / 1024 : up), 463 (up > 2048 ? up / 1024 : up),
464 (up > 2048 ? "MBit" : "KBit")); 464 (up > 2048 ? "MBit" : "KBit"));
465 465
466 batadv_neigh_node_free_ref(router); 466 batadv_neigh_node_free_ref(router);
467 if (curr_gw) 467 if (curr_gw)
468 batadv_gw_node_free_ref(curr_gw); 468 batadv_gw_node_free_ref(curr_gw);
469 out: 469 out:
470 return ret; 470 return ret;
471 } 471 }
472 472
473 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) 473 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
474 { 474 {
475 struct net_device *net_dev = (struct net_device *)seq->private; 475 struct net_device *net_dev = (struct net_device *)seq->private;
476 struct batadv_priv *bat_priv = netdev_priv(net_dev); 476 struct batadv_priv *bat_priv = netdev_priv(net_dev);
477 struct batadv_hard_iface *primary_if; 477 struct batadv_hard_iface *primary_if;
478 struct batadv_gw_node *gw_node; 478 struct batadv_gw_node *gw_node;
479 struct hlist_node *node; 479 struct hlist_node *node;
480 int gw_count = 0, ret = 0; 480 int gw_count = 0;
481 481
482 primary_if = batadv_primary_if_get_selected(bat_priv); 482 primary_if = batadv_seq_print_text_primary_if_get(seq);
483 if (!primary_if) { 483 if (!primary_if)
484 ret = seq_printf(seq,
485 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
486 net_dev->name);
487 goto out; 484 goto out;
488 }
489 485
490 if (primary_if->if_status != BATADV_IF_ACTIVE) {
491 ret = seq_printf(seq,
492 "BATMAN mesh %s disabled - primary interface not active\n",
493 net_dev->name);
494 goto out;
495 }
496
497 seq_printf(seq, 486 seq_printf(seq,
498 " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", 487 " %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
499 "Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF", 488 "Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF",
500 BATADV_SOURCE_VERSION, primary_if->net_dev->name, 489 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
501 primary_if->net_dev->dev_addr, net_dev->name); 490 primary_if->net_dev->dev_addr, net_dev->name);
502 491
503 rcu_read_lock(); 492 rcu_read_lock();
504 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) { 493 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw.list, list) {
505 if (gw_node->deleted) 494 if (gw_node->deleted)
506 continue; 495 continue;
507 496
508 /* fails if orig_node has no router */ 497 /* fails if orig_node has no router */
509 if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0) 498 if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0)
510 continue; 499 continue;
511 500
512 gw_count++; 501 gw_count++;
513 } 502 }
514 rcu_read_unlock(); 503 rcu_read_unlock();
515 504
516 if (gw_count == 0) 505 if (gw_count == 0)
517 seq_printf(seq, "No gateways in range ...\n"); 506 seq_printf(seq, "No gateways in range ...\n");
518 507
519 out: 508 out:
520 if (primary_if) 509 if (primary_if)
521 batadv_hardif_free_ref(primary_if); 510 batadv_hardif_free_ref(primary_if);
522 return ret; 511 return 0;
523 } 512 }
524 513
525 static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) 514 static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
526 { 515 {
527 int ret = false; 516 int ret = false;
528 unsigned char *p; 517 unsigned char *p;
529 int pkt_len; 518 int pkt_len;
530 519
531 if (skb_linearize(skb) < 0) 520 if (skb_linearize(skb) < 0)
532 goto out; 521 goto out;
533 522
534 pkt_len = skb_headlen(skb); 523 pkt_len = skb_headlen(skb);
535 524
536 if (pkt_len < header_len + BATADV_DHCP_OPTIONS_OFFSET + 1) 525 if (pkt_len < header_len + BATADV_DHCP_OPTIONS_OFFSET + 1)
537 goto out; 526 goto out;
538 527
539 p = skb->data + header_len + BATADV_DHCP_OPTIONS_OFFSET; 528 p = skb->data + header_len + BATADV_DHCP_OPTIONS_OFFSET;
540 pkt_len -= header_len + BATADV_DHCP_OPTIONS_OFFSET + 1; 529 pkt_len -= header_len + BATADV_DHCP_OPTIONS_OFFSET + 1;
541 530
542 /* Access the dhcp option lists. Each entry is made up by: 531 /* Access the dhcp option lists. Each entry is made up by:
543 * - octet 1: option type 532 * - octet 1: option type
544 * - octet 2: option data len (only if type != 255 and 0) 533 * - octet 2: option data len (only if type != 255 and 0)
545 * - octet 3: option data 534 * - octet 3: option data
546 */ 535 */
547 while (*p != 255 && !ret) { 536 while (*p != 255 && !ret) {
548 /* p now points to the first octet: option type */ 537 /* p now points to the first octet: option type */
549 if (*p == 53) { 538 if (*p == 53) {
550 /* type 53 is the message type option. 539 /* type 53 is the message type option.
551 * Jump the len octet and go to the data octet 540 * Jump the len octet and go to the data octet
552 */ 541 */
553 if (pkt_len < 2) 542 if (pkt_len < 2)
554 goto out; 543 goto out;
555 p += 2; 544 p += 2;
556 545
557 /* check if the message type is what we need */ 546 /* check if the message type is what we need */
558 if (*p == BATADV_DHCP_REQUEST) 547 if (*p == BATADV_DHCP_REQUEST)
559 ret = true; 548 ret = true;
560 break; 549 break;
561 } else if (*p == 0) { 550 } else if (*p == 0) {
562 /* option type 0 (padding), just go forward */ 551 /* option type 0 (padding), just go forward */
563 if (pkt_len < 1) 552 if (pkt_len < 1)
564 goto out; 553 goto out;
565 pkt_len--; 554 pkt_len--;
566 p++; 555 p++;
567 } else { 556 } else {
568 /* This is any other option. So we get the length... */ 557 /* This is any other option. So we get the length... */
569 if (pkt_len < 1) 558 if (pkt_len < 1)
570 goto out; 559 goto out;
571 pkt_len--; 560 pkt_len--;
572 p++; 561 p++;
573 562
574 /* ...and then we jump over the data */ 563 /* ...and then we jump over the data */
575 if (pkt_len < 1 + (*p)) 564 if (pkt_len < 1 + (*p))
576 goto out; 565 goto out;
577 pkt_len -= 1 + (*p); 566 pkt_len -= 1 + (*p);
578 p += 1 + (*p); 567 p += 1 + (*p);
579 } 568 }
580 } 569 }
581 out: 570 out:
582 return ret; 571 return ret;
583 } 572 }
584 573
585 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) 574 bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
586 { 575 {
587 struct ethhdr *ethhdr; 576 struct ethhdr *ethhdr;
588 struct iphdr *iphdr; 577 struct iphdr *iphdr;
589 struct ipv6hdr *ipv6hdr; 578 struct ipv6hdr *ipv6hdr;
590 struct udphdr *udphdr; 579 struct udphdr *udphdr;
591 580
592 /* check for ethernet header */ 581 /* check for ethernet header */
593 if (!pskb_may_pull(skb, *header_len + ETH_HLEN)) 582 if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
594 return false; 583 return false;
595 ethhdr = (struct ethhdr *)skb->data; 584 ethhdr = (struct ethhdr *)skb->data;
596 *header_len += ETH_HLEN; 585 *header_len += ETH_HLEN;
597 586
598 /* check for initial vlan header */ 587 /* check for initial vlan header */
599 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { 588 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
600 if (!pskb_may_pull(skb, *header_len + VLAN_HLEN)) 589 if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
601 return false; 590 return false;
602 ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN); 591 ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
603 *header_len += VLAN_HLEN; 592 *header_len += VLAN_HLEN;
604 } 593 }
605 594
606 /* check for ip header */ 595 /* check for ip header */
607 switch (ntohs(ethhdr->h_proto)) { 596 switch (ntohs(ethhdr->h_proto)) {
608 case ETH_P_IP: 597 case ETH_P_IP:
609 if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr))) 598 if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
610 return false; 599 return false;
611 iphdr = (struct iphdr *)(skb->data + *header_len); 600 iphdr = (struct iphdr *)(skb->data + *header_len);
612 *header_len += iphdr->ihl * 4; 601 *header_len += iphdr->ihl * 4;
613 602
614 /* check for udp header */ 603 /* check for udp header */
615 if (iphdr->protocol != IPPROTO_UDP) 604 if (iphdr->protocol != IPPROTO_UDP)
616 return false; 605 return false;
617 606
618 break; 607 break;
619 case ETH_P_IPV6: 608 case ETH_P_IPV6:
620 if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr))) 609 if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
621 return false; 610 return false;
622 ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len); 611 ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
623 *header_len += sizeof(*ipv6hdr); 612 *header_len += sizeof(*ipv6hdr);
624 613
625 /* check for udp header */ 614 /* check for udp header */
626 if (ipv6hdr->nexthdr != IPPROTO_UDP) 615 if (ipv6hdr->nexthdr != IPPROTO_UDP)
627 return false; 616 return false;
628 617
629 break; 618 break;
630 default: 619 default:
631 return false; 620 return false;
632 } 621 }
633 622
634 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) 623 if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
635 return false; 624 return false;
636 udphdr = (struct udphdr *)(skb->data + *header_len); 625 udphdr = (struct udphdr *)(skb->data + *header_len);
637 *header_len += sizeof(*udphdr); 626 *header_len += sizeof(*udphdr);
638 627
639 /* check for bootp port */ 628 /* check for bootp port */
640 if ((ntohs(ethhdr->h_proto) == ETH_P_IP) && 629 if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
641 (ntohs(udphdr->dest) != 67)) 630 (ntohs(udphdr->dest) != 67))
642 return false; 631 return false;
643 632
644 if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) && 633 if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
645 (ntohs(udphdr->dest) != 547)) 634 (ntohs(udphdr->dest) != 547))
646 return false; 635 return false;
647 636
648 return true; 637 return true;
649 } 638 }
650 639
651 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, 640 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
652 struct sk_buff *skb, struct ethhdr *ethhdr) 641 struct sk_buff *skb, struct ethhdr *ethhdr)
653 { 642 {
654 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; 643 struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
655 struct batadv_orig_node *orig_dst_node = NULL; 644 struct batadv_orig_node *orig_dst_node = NULL;
656 struct batadv_gw_node *curr_gw = NULL; 645 struct batadv_gw_node *curr_gw = NULL;
657 bool ret, out_of_range = false; 646 bool ret, out_of_range = false;
658 unsigned int header_len = 0; 647 unsigned int header_len = 0;
659 uint8_t curr_tq_avg; 648 uint8_t curr_tq_avg;
660 649
661 ret = batadv_gw_is_dhcp_target(skb, &header_len); 650 ret = batadv_gw_is_dhcp_target(skb, &header_len);
662 if (!ret) 651 if (!ret)
663 goto out; 652 goto out;
664 653
665 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, 654 orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
666 ethhdr->h_dest); 655 ethhdr->h_dest);
667 if (!orig_dst_node) 656 if (!orig_dst_node)
668 goto out; 657 goto out;
669 658
670 if (!orig_dst_node->gw_flags) 659 if (!orig_dst_node->gw_flags)
671 goto out; 660 goto out;
672 661
673 ret = batadv_is_type_dhcprequest(skb, header_len); 662 ret = batadv_is_type_dhcprequest(skb, header_len);
674 if (!ret) 663 if (!ret)
675 goto out; 664 goto out;
676 665
677 switch (atomic_read(&bat_priv->gw_mode)) { 666 switch (atomic_read(&bat_priv->gw_mode)) {
678 case BATADV_GW_MODE_SERVER: 667 case BATADV_GW_MODE_SERVER:
679 /* If we are a GW then we are our best GW. We can artificially 668 /* If we are a GW then we are our best GW. We can artificially
680 * set the tq towards ourself as the maximum value 669 * set the tq towards ourself as the maximum value
681 */ 670 */
682 curr_tq_avg = BATADV_TQ_MAX_VALUE; 671 curr_tq_avg = BATADV_TQ_MAX_VALUE;
683 break; 672 break;
684 case BATADV_GW_MODE_CLIENT: 673 case BATADV_GW_MODE_CLIENT:
685 curr_gw = batadv_gw_get_selected_gw_node(bat_priv); 674 curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
686 if (!curr_gw) 675 if (!curr_gw)
687 goto out; 676 goto out;
688 677
689 /* packet is going to our gateway */ 678 /* packet is going to our gateway */
690 if (curr_gw->orig_node == orig_dst_node) 679 if (curr_gw->orig_node == orig_dst_node)
691 goto out; 680 goto out;
692 681
693 /* If the dhcp packet has been sent to a different gw, 682 /* If the dhcp packet has been sent to a different gw,
694 * we have to evaluate whether the old gw is still 683 * we have to evaluate whether the old gw is still
695 * reliable enough 684 * reliable enough
696 */ 685 */
697 neigh_curr = batadv_find_router(bat_priv, curr_gw->orig_node, 686 neigh_curr = batadv_find_router(bat_priv, curr_gw->orig_node,
698 NULL); 687 NULL);
699 if (!neigh_curr) 688 if (!neigh_curr)
700 goto out; 689 goto out;
701 690
702 curr_tq_avg = neigh_curr->tq_avg; 691 curr_tq_avg = neigh_curr->tq_avg;
703 break; 692 break;
704 case BATADV_GW_MODE_OFF: 693 case BATADV_GW_MODE_OFF:
705 default: 694 default:
706 goto out; 695 goto out;
707 } 696 }
708 697
709 neigh_old = batadv_find_router(bat_priv, orig_dst_node, NULL); 698 neigh_old = batadv_find_router(bat_priv, orig_dst_node, NULL);
710 if (!neigh_old) 699 if (!neigh_old)
711 goto out; 700 goto out;
712 701
713 if (curr_tq_avg - neigh_old->tq_avg > BATADV_GW_THRESHOLD) 702 if (curr_tq_avg - neigh_old->tq_avg > BATADV_GW_THRESHOLD)
714 out_of_range = true; 703 out_of_range = true;
715 704
716 out: 705 out:
717 if (orig_dst_node) 706 if (orig_dst_node)
718 batadv_orig_node_free_ref(orig_dst_node); 707 batadv_orig_node_free_ref(orig_dst_node);
719 if (curr_gw) 708 if (curr_gw)
720 batadv_gw_node_free_ref(curr_gw); 709 batadv_gw_node_free_ref(curr_gw);
721 if (neigh_old) 710 if (neigh_old)
722 batadv_neigh_node_free_ref(neigh_old); 711 batadv_neigh_node_free_ref(neigh_old);
723 if (neigh_curr) 712 if (neigh_curr)
724 batadv_neigh_node_free_ref(neigh_curr); 713 batadv_neigh_node_free_ref(neigh_curr);
725 return out_of_range; 714 return out_of_range;
726 } 715 }
727 716
net/batman-adv/main.c
1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public 6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation. 7 * License as published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, but 9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details. 12 * General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA 17 * 02110-1301, USA
18 */ 18 */
19 19
20 #include "main.h" 20 #include "main.h"
21 #include "sysfs.h" 21 #include "sysfs.h"
22 #include "debugfs.h" 22 #include "debugfs.h"
23 #include "routing.h" 23 #include "routing.h"
24 #include "send.h" 24 #include "send.h"
25 #include "originator.h" 25 #include "originator.h"
26 #include "soft-interface.h" 26 #include "soft-interface.h"
27 #include "icmp_socket.h" 27 #include "icmp_socket.h"
28 #include "translation-table.h" 28 #include "translation-table.h"
29 #include "hard-interface.h" 29 #include "hard-interface.h"
30 #include "gateway_client.h" 30 #include "gateway_client.h"
31 #include "bridge_loop_avoidance.h" 31 #include "bridge_loop_avoidance.h"
32 #include "vis.h" 32 #include "vis.h"
33 #include "hash.h" 33 #include "hash.h"
34 #include "bat_algo.h" 34 #include "bat_algo.h"
35 35
36 36
37 /* List manipulations on hardif_list have to be rtnl_lock()'ed, 37 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
38 * list traversals just rcu-locked 38 * list traversals just rcu-locked
39 */ 39 */
40 struct list_head batadv_hardif_list; 40 struct list_head batadv_hardif_list;
41 static int (*batadv_rx_handler[256])(struct sk_buff *, 41 static int (*batadv_rx_handler[256])(struct sk_buff *,
42 struct batadv_hard_iface *); 42 struct batadv_hard_iface *);
43 char batadv_routing_algo[20] = "BATMAN_IV"; 43 char batadv_routing_algo[20] = "BATMAN_IV";
44 static struct hlist_head batadv_algo_list; 44 static struct hlist_head batadv_algo_list;
45 45
46 unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 46 unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
47 47
48 struct workqueue_struct *batadv_event_workqueue; 48 struct workqueue_struct *batadv_event_workqueue;
49 49
50 static void batadv_recv_handler_init(void); 50 static void batadv_recv_handler_init(void);
51 51
52 static int __init batadv_init(void) 52 static int __init batadv_init(void)
53 { 53 {
54 INIT_LIST_HEAD(&batadv_hardif_list); 54 INIT_LIST_HEAD(&batadv_hardif_list);
55 INIT_HLIST_HEAD(&batadv_algo_list); 55 INIT_HLIST_HEAD(&batadv_algo_list);
56 56
57 batadv_recv_handler_init(); 57 batadv_recv_handler_init();
58 58
59 batadv_iv_init(); 59 batadv_iv_init();
60 60
61 batadv_event_workqueue = create_singlethread_workqueue("bat_events"); 61 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
62 62
63 if (!batadv_event_workqueue) 63 if (!batadv_event_workqueue)
64 return -ENOMEM; 64 return -ENOMEM;
65 65
66 batadv_socket_init(); 66 batadv_socket_init();
67 batadv_debugfs_init(); 67 batadv_debugfs_init();
68 68
69 register_netdevice_notifier(&batadv_hard_if_notifier); 69 register_netdevice_notifier(&batadv_hard_if_notifier);
70 70
71 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n", 71 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
72 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION); 72 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
73 73
74 return 0; 74 return 0;
75 } 75 }
76 76
77 static void __exit batadv_exit(void) 77 static void __exit batadv_exit(void)
78 { 78 {
79 batadv_debugfs_destroy(); 79 batadv_debugfs_destroy();
80 unregister_netdevice_notifier(&batadv_hard_if_notifier); 80 unregister_netdevice_notifier(&batadv_hard_if_notifier);
81 batadv_hardif_remove_interfaces(); 81 batadv_hardif_remove_interfaces();
82 82
83 flush_workqueue(batadv_event_workqueue); 83 flush_workqueue(batadv_event_workqueue);
84 destroy_workqueue(batadv_event_workqueue); 84 destroy_workqueue(batadv_event_workqueue);
85 batadv_event_workqueue = NULL; 85 batadv_event_workqueue = NULL;
86 86
87 rcu_barrier(); 87 rcu_barrier();
88 } 88 }
89 89
90 int batadv_mesh_init(struct net_device *soft_iface) 90 int batadv_mesh_init(struct net_device *soft_iface)
91 { 91 {
92 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 92 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
93 int ret; 93 int ret;
94 94
95 spin_lock_init(&bat_priv->forw_bat_list_lock); 95 spin_lock_init(&bat_priv->forw_bat_list_lock);
96 spin_lock_init(&bat_priv->forw_bcast_list_lock); 96 spin_lock_init(&bat_priv->forw_bcast_list_lock);
97 spin_lock_init(&bat_priv->tt.changes_list_lock); 97 spin_lock_init(&bat_priv->tt.changes_list_lock);
98 spin_lock_init(&bat_priv->tt.req_list_lock); 98 spin_lock_init(&bat_priv->tt.req_list_lock);
99 spin_lock_init(&bat_priv->tt.roam_list_lock); 99 spin_lock_init(&bat_priv->tt.roam_list_lock);
100 spin_lock_init(&bat_priv->tt.last_changeset_lock); 100 spin_lock_init(&bat_priv->tt.last_changeset_lock);
101 spin_lock_init(&bat_priv->gw.list_lock); 101 spin_lock_init(&bat_priv->gw.list_lock);
102 spin_lock_init(&bat_priv->vis.hash_lock); 102 spin_lock_init(&bat_priv->vis.hash_lock);
103 spin_lock_init(&bat_priv->vis.list_lock); 103 spin_lock_init(&bat_priv->vis.list_lock);
104 104
105 INIT_HLIST_HEAD(&bat_priv->forw_bat_list); 105 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
106 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); 106 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
107 INIT_HLIST_HEAD(&bat_priv->gw.list); 107 INIT_HLIST_HEAD(&bat_priv->gw.list);
108 INIT_LIST_HEAD(&bat_priv->tt.changes_list); 108 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
109 INIT_LIST_HEAD(&bat_priv->tt.req_list); 109 INIT_LIST_HEAD(&bat_priv->tt.req_list);
110 INIT_LIST_HEAD(&bat_priv->tt.roam_list); 110 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
111 111
112 ret = batadv_originator_init(bat_priv); 112 ret = batadv_originator_init(bat_priv);
113 if (ret < 0) 113 if (ret < 0)
114 goto err; 114 goto err;
115 115
116 ret = batadv_tt_init(bat_priv); 116 ret = batadv_tt_init(bat_priv);
117 if (ret < 0) 117 if (ret < 0)
118 goto err; 118 goto err;
119 119
120 batadv_tt_local_add(soft_iface, soft_iface->dev_addr, 120 batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
121 BATADV_NULL_IFINDEX); 121 BATADV_NULL_IFINDEX);
122 122
123 ret = batadv_vis_init(bat_priv); 123 ret = batadv_vis_init(bat_priv);
124 if (ret < 0) 124 if (ret < 0)
125 goto err; 125 goto err;
126 126
127 ret = batadv_bla_init(bat_priv); 127 ret = batadv_bla_init(bat_priv);
128 if (ret < 0) 128 if (ret < 0)
129 goto err; 129 goto err;
130 130
131 atomic_set(&bat_priv->gw.reselect, 0); 131 atomic_set(&bat_priv->gw.reselect, 0);
132 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE); 132 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
133 133
134 return 0; 134 return 0;
135 135
136 err: 136 err:
137 batadv_mesh_free(soft_iface); 137 batadv_mesh_free(soft_iface);
138 return ret; 138 return ret;
139 } 139 }
140 140
141 void batadv_mesh_free(struct net_device *soft_iface) 141 void batadv_mesh_free(struct net_device *soft_iface)
142 { 142 {
143 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 143 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
144 144
145 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING); 145 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
146 146
147 batadv_purge_outstanding_packets(bat_priv, NULL); 147 batadv_purge_outstanding_packets(bat_priv, NULL);
148 148
149 batadv_vis_quit(bat_priv); 149 batadv_vis_quit(bat_priv);
150 150
151 batadv_gw_node_purge(bat_priv); 151 batadv_gw_node_purge(bat_priv);
152 batadv_originator_free(bat_priv); 152 batadv_originator_free(bat_priv);
153 153
154 batadv_tt_free(bat_priv); 154 batadv_tt_free(bat_priv);
155 155
156 batadv_bla_free(bat_priv); 156 batadv_bla_free(bat_priv);
157 157
158 free_percpu(bat_priv->bat_counters); 158 free_percpu(bat_priv->bat_counters);
159 159
160 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); 160 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
161 } 161 }
162 162
163 void batadv_inc_module_count(void) 163 void batadv_inc_module_count(void)
164 { 164 {
165 try_module_get(THIS_MODULE); 165 try_module_get(THIS_MODULE);
166 } 166 }
167 167
168 void batadv_dec_module_count(void) 168 void batadv_dec_module_count(void)
169 { 169 {
170 module_put(THIS_MODULE); 170 module_put(THIS_MODULE);
171 } 171 }
172 172
173 int batadv_is_my_mac(const uint8_t *addr) 173 int batadv_is_my_mac(const uint8_t *addr)
174 { 174 {
175 const struct batadv_hard_iface *hard_iface; 175 const struct batadv_hard_iface *hard_iface;
176 176
177 rcu_read_lock(); 177 rcu_read_lock();
178 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { 178 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
179 if (hard_iface->if_status != BATADV_IF_ACTIVE) 179 if (hard_iface->if_status != BATADV_IF_ACTIVE)
180 continue; 180 continue;
181 181
182 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) { 182 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
183 rcu_read_unlock(); 183 rcu_read_unlock();
184 return 1; 184 return 1;
185 } 185 }
186 } 186 }
187 rcu_read_unlock(); 187 rcu_read_unlock();
188 return 0; 188 return 0;
189 } 189 }
190 190
191 /**
192 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
193 * function that requires the primary interface
194 * @seq: debugfs table seq_file struct
195 *
196 * Returns primary interface if found or NULL otherwise.
197 */
198 struct batadv_hard_iface *
199 batadv_seq_print_text_primary_if_get(struct seq_file *seq)
200 {
201 struct net_device *net_dev = (struct net_device *)seq->private;
202 struct batadv_priv *bat_priv = netdev_priv(net_dev);
203 struct batadv_hard_iface *primary_if;
204
205 primary_if = batadv_primary_if_get_selected(bat_priv);
206
207 if (!primary_if) {
208 seq_printf(seq,
209 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
210 net_dev->name);
211 goto out;
212 }
213
214 if (primary_if->if_status == BATADV_IF_ACTIVE)
215 goto out;
216
217 seq_printf(seq,
218 "BATMAN mesh %s disabled - primary interface not active\n",
219 net_dev->name);
220 batadv_hardif_free_ref(primary_if);
221 primary_if = NULL;
222
223 out:
224 return primary_if;
225 }
226
191 static int batadv_recv_unhandled_packet(struct sk_buff *skb, 227 static int batadv_recv_unhandled_packet(struct sk_buff *skb,
192 struct batadv_hard_iface *recv_if) 228 struct batadv_hard_iface *recv_if)
193 { 229 {
194 return NET_RX_DROP; 230 return NET_RX_DROP;
195 } 231 }
196 232
197 /* incoming packets with the batman ethertype received on any active hard 233 /* incoming packets with the batman ethertype received on any active hard
198 * interface 234 * interface
199 */ 235 */
200 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 236 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
201 struct packet_type *ptype, 237 struct packet_type *ptype,
202 struct net_device *orig_dev) 238 struct net_device *orig_dev)
203 { 239 {
204 struct batadv_priv *bat_priv; 240 struct batadv_priv *bat_priv;
205 struct batadv_ogm_packet *batadv_ogm_packet; 241 struct batadv_ogm_packet *batadv_ogm_packet;
206 struct batadv_hard_iface *hard_iface; 242 struct batadv_hard_iface *hard_iface;
207 uint8_t idx; 243 uint8_t idx;
208 int ret; 244 int ret;
209 245
210 hard_iface = container_of(ptype, struct batadv_hard_iface, 246 hard_iface = container_of(ptype, struct batadv_hard_iface,
211 batman_adv_ptype); 247 batman_adv_ptype);
212 skb = skb_share_check(skb, GFP_ATOMIC); 248 skb = skb_share_check(skb, GFP_ATOMIC);
213 249
214 /* skb was released by skb_share_check() */ 250 /* skb was released by skb_share_check() */
215 if (!skb) 251 if (!skb)
216 goto err_out; 252 goto err_out;
217 253
218 /* packet should hold at least type and version */ 254 /* packet should hold at least type and version */
219 if (unlikely(!pskb_may_pull(skb, 2))) 255 if (unlikely(!pskb_may_pull(skb, 2)))
220 goto err_free; 256 goto err_free;
221 257
222 /* expect a valid ethernet header here. */ 258 /* expect a valid ethernet header here. */
223 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb))) 259 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
224 goto err_free; 260 goto err_free;
225 261
226 if (!hard_iface->soft_iface) 262 if (!hard_iface->soft_iface)
227 goto err_free; 263 goto err_free;
228 264
229 bat_priv = netdev_priv(hard_iface->soft_iface); 265 bat_priv = netdev_priv(hard_iface->soft_iface);
230 266
231 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) 267 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
232 goto err_free; 268 goto err_free;
233 269
234 /* discard frames on not active interfaces */ 270 /* discard frames on not active interfaces */
235 if (hard_iface->if_status != BATADV_IF_ACTIVE) 271 if (hard_iface->if_status != BATADV_IF_ACTIVE)
236 goto err_free; 272 goto err_free;
237 273
238 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data; 274 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
239 275
240 if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) { 276 if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
241 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 277 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
242 "Drop packet: incompatible batman version (%i)\n", 278 "Drop packet: incompatible batman version (%i)\n",
243 batadv_ogm_packet->header.version); 279 batadv_ogm_packet->header.version);
244 goto err_free; 280 goto err_free;
245 } 281 }
246 282
247 /* all receive handlers return whether they received or reused 283 /* all receive handlers return whether they received or reused
248 * the supplied skb. if not, we have to free the skb. 284 * the supplied skb. if not, we have to free the skb.
249 */ 285 */
250 idx = batadv_ogm_packet->header.packet_type; 286 idx = batadv_ogm_packet->header.packet_type;
251 ret = (*batadv_rx_handler[idx])(skb, hard_iface); 287 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
252 288
253 if (ret == NET_RX_DROP) 289 if (ret == NET_RX_DROP)
254 kfree_skb(skb); 290 kfree_skb(skb);
255 291
256 /* return NET_RX_SUCCESS in any case as we 292 /* return NET_RX_SUCCESS in any case as we
257 * most probably dropped the packet for 293 * most probably dropped the packet for
258 * routing-logical reasons. 294 * routing-logical reasons.
259 */ 295 */
260 return NET_RX_SUCCESS; 296 return NET_RX_SUCCESS;
261 297
262 err_free: 298 err_free:
263 kfree_skb(skb); 299 kfree_skb(skb);
264 err_out: 300 err_out:
265 return NET_RX_DROP; 301 return NET_RX_DROP;
266 } 302 }
267 303
268 static void batadv_recv_handler_init(void) 304 static void batadv_recv_handler_init(void)
269 { 305 {
270 int i; 306 int i;
271 307
272 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++) 308 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
273 batadv_rx_handler[i] = batadv_recv_unhandled_packet; 309 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
274 310
275 /* batman icmp packet */ 311 /* batman icmp packet */
276 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet; 312 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
277 /* unicast packet */ 313 /* unicast packet */
278 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet; 314 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
279 /* fragmented unicast packet */ 315 /* fragmented unicast packet */
280 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet; 316 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
281 /* broadcast packet */ 317 /* broadcast packet */
282 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet; 318 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
283 /* vis packet */ 319 /* vis packet */
284 batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet; 320 batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
285 /* Translation table query (request or response) */ 321 /* Translation table query (request or response) */
286 batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query; 322 batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
287 /* Roaming advertisement */ 323 /* Roaming advertisement */
288 batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv; 324 batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
289 } 325 }
290 326
291 int 327 int
292 batadv_recv_handler_register(uint8_t packet_type, 328 batadv_recv_handler_register(uint8_t packet_type,
293 int (*recv_handler)(struct sk_buff *, 329 int (*recv_handler)(struct sk_buff *,
294 struct batadv_hard_iface *)) 330 struct batadv_hard_iface *))
295 { 331 {
296 if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet) 332 if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
297 return -EBUSY; 333 return -EBUSY;
298 334
299 batadv_rx_handler[packet_type] = recv_handler; 335 batadv_rx_handler[packet_type] = recv_handler;
300 return 0; 336 return 0;
301 } 337 }
302 338
303 void batadv_recv_handler_unregister(uint8_t packet_type) 339 void batadv_recv_handler_unregister(uint8_t packet_type)
304 { 340 {
305 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet; 341 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
306 } 342 }
307 343
308 static struct batadv_algo_ops *batadv_algo_get(char *name) 344 static struct batadv_algo_ops *batadv_algo_get(char *name)
309 { 345 {
310 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp; 346 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
311 struct hlist_node *node; 347 struct hlist_node *node;
312 348
313 hlist_for_each_entry(bat_algo_ops_tmp, node, &batadv_algo_list, list) { 349 hlist_for_each_entry(bat_algo_ops_tmp, node, &batadv_algo_list, list) {
314 if (strcmp(bat_algo_ops_tmp->name, name) != 0) 350 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
315 continue; 351 continue;
316 352
317 bat_algo_ops = bat_algo_ops_tmp; 353 bat_algo_ops = bat_algo_ops_tmp;
318 break; 354 break;
319 } 355 }
320 356
321 return bat_algo_ops; 357 return bat_algo_ops;
322 } 358 }
323 359
324 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops) 360 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
325 { 361 {
326 struct batadv_algo_ops *bat_algo_ops_tmp; 362 struct batadv_algo_ops *bat_algo_ops_tmp;
327 int ret; 363 int ret;
328 364
329 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name); 365 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
330 if (bat_algo_ops_tmp) { 366 if (bat_algo_ops_tmp) {
331 pr_info("Trying to register already registered routing algorithm: %s\n", 367 pr_info("Trying to register already registered routing algorithm: %s\n",
332 bat_algo_ops->name); 368 bat_algo_ops->name);
333 ret = -EEXIST; 369 ret = -EEXIST;
334 goto out; 370 goto out;
335 } 371 }
336 372
337 /* all algorithms must implement all ops (for now) */ 373 /* all algorithms must implement all ops (for now) */
338 if (!bat_algo_ops->bat_iface_enable || 374 if (!bat_algo_ops->bat_iface_enable ||
339 !bat_algo_ops->bat_iface_disable || 375 !bat_algo_ops->bat_iface_disable ||
340 !bat_algo_ops->bat_iface_update_mac || 376 !bat_algo_ops->bat_iface_update_mac ||
341 !bat_algo_ops->bat_primary_iface_set || 377 !bat_algo_ops->bat_primary_iface_set ||
342 !bat_algo_ops->bat_ogm_schedule || 378 !bat_algo_ops->bat_ogm_schedule ||
343 !bat_algo_ops->bat_ogm_emit) { 379 !bat_algo_ops->bat_ogm_emit) {
344 pr_info("Routing algo '%s' does not implement required ops\n", 380 pr_info("Routing algo '%s' does not implement required ops\n",
345 bat_algo_ops->name); 381 bat_algo_ops->name);
346 ret = -EINVAL; 382 ret = -EINVAL;
347 goto out; 383 goto out;
348 } 384 }
349 385
350 INIT_HLIST_NODE(&bat_algo_ops->list); 386 INIT_HLIST_NODE(&bat_algo_ops->list);
351 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list); 387 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
352 ret = 0; 388 ret = 0;
353 389
354 out: 390 out:
355 return ret; 391 return ret;
356 } 392 }
357 393
358 int batadv_algo_select(struct batadv_priv *bat_priv, char *name) 394 int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
359 { 395 {
360 struct batadv_algo_ops *bat_algo_ops; 396 struct batadv_algo_ops *bat_algo_ops;
361 int ret = -EINVAL; 397 int ret = -EINVAL;
362 398
363 bat_algo_ops = batadv_algo_get(name); 399 bat_algo_ops = batadv_algo_get(name);
364 if (!bat_algo_ops) 400 if (!bat_algo_ops)
365 goto out; 401 goto out;
366 402
367 bat_priv->bat_algo_ops = bat_algo_ops; 403 bat_priv->bat_algo_ops = bat_algo_ops;
368 ret = 0; 404 ret = 0;
369 405
370 out: 406 out:
371 return ret; 407 return ret;
372 } 408 }
373 409
374 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset) 410 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
375 { 411 {
376 struct batadv_algo_ops *bat_algo_ops; 412 struct batadv_algo_ops *bat_algo_ops;
377 struct hlist_node *node; 413 struct hlist_node *node;
378 414
379 seq_printf(seq, "Available routing algorithms:\n"); 415 seq_printf(seq, "Available routing algorithms:\n");
380 416
381 hlist_for_each_entry(bat_algo_ops, node, &batadv_algo_list, list) { 417 hlist_for_each_entry(bat_algo_ops, node, &batadv_algo_list, list) {
382 seq_printf(seq, "%s\n", bat_algo_ops->name); 418 seq_printf(seq, "%s\n", bat_algo_ops->name);
383 } 419 }
384 420
385 return 0; 421 return 0;
386 } 422 }
387 423
388 static int batadv_param_set_ra(const char *val, const struct kernel_param *kp) 424 static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
389 { 425 {
390 struct batadv_algo_ops *bat_algo_ops; 426 struct batadv_algo_ops *bat_algo_ops;
391 char *algo_name = (char *)val; 427 char *algo_name = (char *)val;
392 size_t name_len = strlen(algo_name); 428 size_t name_len = strlen(algo_name);
393 429
394 if (algo_name[name_len - 1] == '\n') 430 if (algo_name[name_len - 1] == '\n')
395 algo_name[name_len - 1] = '\0'; 431 algo_name[name_len - 1] = '\0';
396 432
397 bat_algo_ops = batadv_algo_get(algo_name); 433 bat_algo_ops = batadv_algo_get(algo_name);
398 if (!bat_algo_ops) { 434 if (!bat_algo_ops) {
399 pr_err("Routing algorithm '%s' is not supported\n", algo_name); 435 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
400 return -EINVAL; 436 return -EINVAL;
401 } 437 }
402 438
403 return param_set_copystring(algo_name, kp); 439 return param_set_copystring(algo_name, kp);
404 } 440 }
405 441
406 static const struct kernel_param_ops batadv_param_ops_ra = { 442 static const struct kernel_param_ops batadv_param_ops_ra = {
407 .set = batadv_param_set_ra, 443 .set = batadv_param_set_ra,
408 .get = param_get_string, 444 .get = param_get_string,
409 }; 445 };
410 446
411 static struct kparam_string batadv_param_string_ra = { 447 static struct kparam_string batadv_param_string_ra = {
412 .maxlen = sizeof(batadv_routing_algo), 448 .maxlen = sizeof(batadv_routing_algo),
413 .string = batadv_routing_algo, 449 .string = batadv_routing_algo,
414 }; 450 };
415 451
416 module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra, 452 module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
417 0644); 453 0644);
418 module_init(batadv_init); 454 module_init(batadv_init);
419 module_exit(batadv_exit); 455 module_exit(batadv_exit);
420 456
421 MODULE_LICENSE("GPL"); 457 MODULE_LICENSE("GPL");
422 458
423 MODULE_AUTHOR(BATADV_DRIVER_AUTHOR); 459 MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
424 MODULE_DESCRIPTION(BATADV_DRIVER_DESC); 460 MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
425 MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE); 461 MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
426 MODULE_VERSION(BATADV_SOURCE_VERSION); 462 MODULE_VERSION(BATADV_SOURCE_VERSION);
427 463
net/batman-adv/main.h
1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public 6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation. 7 * License as published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, but 9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details. 12 * General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA 17 * 02110-1301, USA
18 */ 18 */
19 19
20 #ifndef _NET_BATMAN_ADV_MAIN_H_ 20 #ifndef _NET_BATMAN_ADV_MAIN_H_
21 #define _NET_BATMAN_ADV_MAIN_H_ 21 #define _NET_BATMAN_ADV_MAIN_H_
22 22
23 #define BATADV_DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \ 23 #define BATADV_DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
24 "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>" 24 "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
25 #define BATADV_DRIVER_DESC "B.A.T.M.A.N. advanced" 25 #define BATADV_DRIVER_DESC "B.A.T.M.A.N. advanced"
26 #define BATADV_DRIVER_DEVICE "batman-adv" 26 #define BATADV_DRIVER_DEVICE "batman-adv"
27 27
28 #ifndef BATADV_SOURCE_VERSION 28 #ifndef BATADV_SOURCE_VERSION
29 #define BATADV_SOURCE_VERSION "2012.4.0" 29 #define BATADV_SOURCE_VERSION "2012.4.0"
30 #endif 30 #endif
31 31
32 /* B.A.T.M.A.N. parameters */ 32 /* B.A.T.M.A.N. parameters */
33 33
34 #define BATADV_TQ_MAX_VALUE 255 34 #define BATADV_TQ_MAX_VALUE 255
35 #define BATADV_JITTER 20 35 #define BATADV_JITTER 20
36 36
37 /* Time To Live of broadcast messages */ 37 /* Time To Live of broadcast messages */
38 #define BATADV_TTL 50 38 #define BATADV_TTL 50
39 39
40 /* purge originators after time in seconds if no valid packet comes in 40 /* purge originators after time in seconds if no valid packet comes in
41 * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE 41 * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE
42 */ 42 */
43 #define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */ 43 #define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */
44 #define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */ 44 #define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */
45 #define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */ 45 #define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */
46 #define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */ 46 #define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
47 /* sliding packet range of received originator messages in sequence numbers 47 /* sliding packet range of received originator messages in sequence numbers
48 * (should be a multiple of our word size) 48 * (should be a multiple of our word size)
49 */ 49 */
50 #define BATADV_TQ_LOCAL_WINDOW_SIZE 64 50 #define BATADV_TQ_LOCAL_WINDOW_SIZE 64
51 /* milliseconds we have to keep pending tt_req */ 51 /* milliseconds we have to keep pending tt_req */
52 #define BATADV_TT_REQUEST_TIMEOUT 3000 52 #define BATADV_TT_REQUEST_TIMEOUT 3000
53 53
54 #define BATADV_TQ_GLOBAL_WINDOW_SIZE 5 54 #define BATADV_TQ_GLOBAL_WINDOW_SIZE 5
55 #define BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 55 #define BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
56 #define BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM 1 56 #define BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
57 #define BATADV_TQ_TOTAL_BIDRECT_LIMIT 1 57 #define BATADV_TQ_TOTAL_BIDRECT_LIMIT 1
58 58
59 /* number of OGMs sent with the last tt diff */ 59 /* number of OGMs sent with the last tt diff */
60 #define BATADV_TT_OGM_APPEND_MAX 3 60 #define BATADV_TT_OGM_APPEND_MAX 3
61 61
62 /* Time in which a client can roam at most ROAMING_MAX_COUNT times in 62 /* Time in which a client can roam at most ROAMING_MAX_COUNT times in
63 * milliseconds 63 * milliseconds
64 */ 64 */
65 #define BATADV_ROAMING_MAX_TIME 20000 65 #define BATADV_ROAMING_MAX_TIME 20000
66 #define BATADV_ROAMING_MAX_COUNT 5 66 #define BATADV_ROAMING_MAX_COUNT 5
67 67
68 #define BATADV_NO_FLAGS 0 68 #define BATADV_NO_FLAGS 0
69 69
70 #define BATADV_NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */ 70 #define BATADV_NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
71 71
72 #define BATADV_NUM_WORDS BITS_TO_LONGS(BATADV_TQ_LOCAL_WINDOW_SIZE) 72 #define BATADV_NUM_WORDS BITS_TO_LONGS(BATADV_TQ_LOCAL_WINDOW_SIZE)
73 73
74 #define BATADV_LOG_BUF_LEN 8192 /* has to be a power of 2 */ 74 #define BATADV_LOG_BUF_LEN 8192 /* has to be a power of 2 */
75 75
76 #define BATADV_VIS_INTERVAL 5000 /* 5 seconds */ 76 #define BATADV_VIS_INTERVAL 5000 /* 5 seconds */
77 77
78 /* how much worse secondary interfaces may be to be considered as bonding 78 /* how much worse secondary interfaces may be to be considered as bonding
79 * candidates 79 * candidates
80 */ 80 */
81 #define BATADV_BONDING_TQ_THRESHOLD 50 81 #define BATADV_BONDING_TQ_THRESHOLD 50
82 82
83 /* should not be bigger than 512 bytes or change the size of 83 /* should not be bigger than 512 bytes or change the size of
84 * forw_packet->direct_link_flags 84 * forw_packet->direct_link_flags
85 */ 85 */
86 #define BATADV_MAX_AGGREGATION_BYTES 512 86 #define BATADV_MAX_AGGREGATION_BYTES 512
87 #define BATADV_MAX_AGGREGATION_MS 100 87 #define BATADV_MAX_AGGREGATION_MS 100
88 88
89 #define BATADV_BLA_PERIOD_LENGTH 10000 /* 10 seconds */ 89 #define BATADV_BLA_PERIOD_LENGTH 10000 /* 10 seconds */
90 #define BATADV_BLA_BACKBONE_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 3) 90 #define BATADV_BLA_BACKBONE_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 3)
91 #define BATADV_BLA_CLAIM_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 10) 91 #define BATADV_BLA_CLAIM_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 10)
92 92
93 #define BATADV_DUPLIST_SIZE 16 93 #define BATADV_DUPLIST_SIZE 16
94 #define BATADV_DUPLIST_TIMEOUT 500 /* 500 ms */ 94 #define BATADV_DUPLIST_TIMEOUT 500 /* 500 ms */
95 /* don't reset again within 30 seconds */ 95 /* don't reset again within 30 seconds */
96 #define BATADV_RESET_PROTECTION_MS 30000 96 #define BATADV_RESET_PROTECTION_MS 30000
97 #define BATADV_EXPECTED_SEQNO_RANGE 65536 97 #define BATADV_EXPECTED_SEQNO_RANGE 65536
98 98
99 enum batadv_mesh_state { 99 enum batadv_mesh_state {
100 BATADV_MESH_INACTIVE, 100 BATADV_MESH_INACTIVE,
101 BATADV_MESH_ACTIVE, 101 BATADV_MESH_ACTIVE,
102 BATADV_MESH_DEACTIVATING, 102 BATADV_MESH_DEACTIVATING,
103 }; 103 };
104 104
105 #define BATADV_BCAST_QUEUE_LEN 256 105 #define BATADV_BCAST_QUEUE_LEN 256
106 #define BATADV_BATMAN_QUEUE_LEN 256 106 #define BATADV_BATMAN_QUEUE_LEN 256
107 107
108 enum batadv_uev_action { 108 enum batadv_uev_action {
109 BATADV_UEV_ADD = 0, 109 BATADV_UEV_ADD = 0,
110 BATADV_UEV_DEL, 110 BATADV_UEV_DEL,
111 BATADV_UEV_CHANGE, 111 BATADV_UEV_CHANGE,
112 }; 112 };
113 113
114 enum batadv_uev_type { 114 enum batadv_uev_type {
115 BATADV_UEV_GW = 0, 115 BATADV_UEV_GW = 0,
116 }; 116 };
117 117
118 #define BATADV_GW_THRESHOLD 50 118 #define BATADV_GW_THRESHOLD 50
119 119
120 /* Debug Messages */ 120 /* Debug Messages */
121 #ifdef pr_fmt 121 #ifdef pr_fmt
122 #undef pr_fmt 122 #undef pr_fmt
123 #endif 123 #endif
124 /* Append 'batman-adv: ' before kernel messages */ 124 /* Append 'batman-adv: ' before kernel messages */
125 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 125 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
126 126
127 /* Kernel headers */ 127 /* Kernel headers */
128 128
129 #include <linux/mutex.h> /* mutex */ 129 #include <linux/mutex.h> /* mutex */
130 #include <linux/module.h> /* needed by all modules */ 130 #include <linux/module.h> /* needed by all modules */
131 #include <linux/netdevice.h> /* netdevice */ 131 #include <linux/netdevice.h> /* netdevice */
132 #include <linux/etherdevice.h> /* ethernet address classification */ 132 #include <linux/etherdevice.h> /* ethernet address classification */
133 #include <linux/if_ether.h> /* ethernet header */ 133 #include <linux/if_ether.h> /* ethernet header */
134 #include <linux/poll.h> /* poll_table */ 134 #include <linux/poll.h> /* poll_table */
135 #include <linux/kthread.h> /* kernel threads */ 135 #include <linux/kthread.h> /* kernel threads */
136 #include <linux/pkt_sched.h> /* schedule types */ 136 #include <linux/pkt_sched.h> /* schedule types */
137 #include <linux/workqueue.h> /* workqueue */ 137 #include <linux/workqueue.h> /* workqueue */
138 #include <linux/percpu.h> 138 #include <linux/percpu.h>
139 #include <linux/slab.h> 139 #include <linux/slab.h>
140 #include <net/sock.h> /* struct sock */ 140 #include <net/sock.h> /* struct sock */
141 #include <linux/jiffies.h> 141 #include <linux/jiffies.h>
142 #include <linux/seq_file.h> 142 #include <linux/seq_file.h>
143 #include "types.h" 143 #include "types.h"
144 144
145 extern char batadv_routing_algo[]; 145 extern char batadv_routing_algo[];
146 extern struct list_head batadv_hardif_list; 146 extern struct list_head batadv_hardif_list;
147 147
148 extern unsigned char batadv_broadcast_addr[]; 148 extern unsigned char batadv_broadcast_addr[];
149 extern struct workqueue_struct *batadv_event_workqueue; 149 extern struct workqueue_struct *batadv_event_workqueue;
150 150
151 int batadv_mesh_init(struct net_device *soft_iface); 151 int batadv_mesh_init(struct net_device *soft_iface);
152 void batadv_mesh_free(struct net_device *soft_iface); 152 void batadv_mesh_free(struct net_device *soft_iface);
153 void batadv_inc_module_count(void); 153 void batadv_inc_module_count(void);
154 void batadv_dec_module_count(void); 154 void batadv_dec_module_count(void);
155 int batadv_is_my_mac(const uint8_t *addr); 155 int batadv_is_my_mac(const uint8_t *addr);
156 struct batadv_hard_iface *
157 batadv_seq_print_text_primary_if_get(struct seq_file *seq);
156 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, 158 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
157 struct packet_type *ptype, 159 struct packet_type *ptype,
158 struct net_device *orig_dev); 160 struct net_device *orig_dev);
159 int 161 int
160 batadv_recv_handler_register(uint8_t packet_type, 162 batadv_recv_handler_register(uint8_t packet_type,
161 int (*recv_handler)(struct sk_buff *, 163 int (*recv_handler)(struct sk_buff *,
162 struct batadv_hard_iface *)); 164 struct batadv_hard_iface *));
163 void batadv_recv_handler_unregister(uint8_t packet_type); 165 void batadv_recv_handler_unregister(uint8_t packet_type);
164 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops); 166 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
165 int batadv_algo_select(struct batadv_priv *bat_priv, char *name); 167 int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
166 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset); 168 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
167 169
168 /* all messages related to routing / flooding / broadcasting / etc */ 170 /* all messages related to routing / flooding / broadcasting / etc */
169 enum batadv_dbg_level { 171 enum batadv_dbg_level {
170 BATADV_DBG_BATMAN = BIT(0), 172 BATADV_DBG_BATMAN = BIT(0),
171 BATADV_DBG_ROUTES = BIT(1), /* route added / changed / deleted */ 173 BATADV_DBG_ROUTES = BIT(1), /* route added / changed / deleted */
172 BATADV_DBG_TT = BIT(2), /* translation table operations */ 174 BATADV_DBG_TT = BIT(2), /* translation table operations */
173 BATADV_DBG_BLA = BIT(3), /* bridge loop avoidance */ 175 BATADV_DBG_BLA = BIT(3), /* bridge loop avoidance */
174 BATADV_DBG_ALL = 15, 176 BATADV_DBG_ALL = 15,
175 }; 177 };
176 178
177 #ifdef CONFIG_BATMAN_ADV_DEBUG 179 #ifdef CONFIG_BATMAN_ADV_DEBUG
178 int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...) 180 int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
179 __printf(2, 3); 181 __printf(2, 3);
180 182
181 #define batadv_dbg(type, bat_priv, fmt, arg...) \ 183 #define batadv_dbg(type, bat_priv, fmt, arg...) \
182 do { \ 184 do { \
183 if (atomic_read(&bat_priv->log_level) & type) \ 185 if (atomic_read(&bat_priv->log_level) & type) \
184 batadv_debug_log(bat_priv, fmt, ## arg);\ 186 batadv_debug_log(bat_priv, fmt, ## arg);\
185 } \ 187 } \
186 while (0) 188 while (0)
187 #else /* !CONFIG_BATMAN_ADV_DEBUG */ 189 #else /* !CONFIG_BATMAN_ADV_DEBUG */
188 __printf(3, 4) 190 __printf(3, 4)
189 static inline void batadv_dbg(int type __always_unused, 191 static inline void batadv_dbg(int type __always_unused,
190 struct batadv_priv *bat_priv __always_unused, 192 struct batadv_priv *bat_priv __always_unused,
191 const char *fmt __always_unused, ...) 193 const char *fmt __always_unused, ...)
192 { 194 {
193 } 195 }
194 #endif 196 #endif
195 197
196 #define batadv_info(net_dev, fmt, arg...) \ 198 #define batadv_info(net_dev, fmt, arg...) \
197 do { \ 199 do { \
198 struct net_device *_netdev = (net_dev); \ 200 struct net_device *_netdev = (net_dev); \
199 struct batadv_priv *_batpriv = netdev_priv(_netdev); \ 201 struct batadv_priv *_batpriv = netdev_priv(_netdev); \
200 batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \ 202 batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
201 pr_info("%s: " fmt, _netdev->name, ## arg); \ 203 pr_info("%s: " fmt, _netdev->name, ## arg); \
202 } while (0) 204 } while (0)
203 #define batadv_err(net_dev, fmt, arg...) \ 205 #define batadv_err(net_dev, fmt, arg...) \
204 do { \ 206 do { \
205 struct net_device *_netdev = (net_dev); \ 207 struct net_device *_netdev = (net_dev); \
206 struct batadv_priv *_batpriv = netdev_priv(_netdev); \ 208 struct batadv_priv *_batpriv = netdev_priv(_netdev); \
207 batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \ 209 batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
208 pr_err("%s: " fmt, _netdev->name, ## arg); \ 210 pr_err("%s: " fmt, _netdev->name, ## arg); \
209 } while (0) 211 } while (0)
210 212
211 /* returns 1 if they are the same ethernet addr 213 /* returns 1 if they are the same ethernet addr
212 * 214 *
213 * note: can't use compare_ether_addr() as it requires aligned memory 215 * note: can't use compare_ether_addr() as it requires aligned memory
214 */ 216 */
215 static inline int batadv_compare_eth(const void *data1, const void *data2) 217 static inline int batadv_compare_eth(const void *data1, const void *data2)
216 { 218 {
217 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 219 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
218 } 220 }
219 221
220 /** 222 /**
221 * has_timed_out - compares current time (jiffies) and timestamp + timeout 223 * has_timed_out - compares current time (jiffies) and timestamp + timeout
222 * @timestamp: base value to compare with (in jiffies) 224 * @timestamp: base value to compare with (in jiffies)
223 * @timeout: added to base value before comparing (in milliseconds) 225 * @timeout: added to base value before comparing (in milliseconds)
224 * 226 *
225 * Returns true if current time is after timestamp + timeout 227 * Returns true if current time is after timestamp + timeout
226 */ 228 */
227 static inline bool batadv_has_timed_out(unsigned long timestamp, 229 static inline bool batadv_has_timed_out(unsigned long timestamp,
228 unsigned int timeout) 230 unsigned int timeout)
229 { 231 {
230 return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout)); 232 return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout));
231 } 233 }
232 234
233 #define batadv_atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 235 #define batadv_atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
234 236
235 /* Returns the smallest signed integer in two's complement with the sizeof x */ 237 /* Returns the smallest signed integer in two's complement with the sizeof x */
236 #define batadv_smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u))) 238 #define batadv_smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
237 239
238 /* Checks if a sequence number x is a predecessor/successor of y. 240 /* Checks if a sequence number x is a predecessor/successor of y.
239 * they handle overflows/underflows and can correctly check for a 241 * they handle overflows/underflows and can correctly check for a
240 * predecessor/successor unless the variable sequence number has grown by 242 * predecessor/successor unless the variable sequence number has grown by
241 * more then 2**(bitwidth(x)-1)-1. 243 * more then 2**(bitwidth(x)-1)-1.
242 * This means that for a uint8_t with the maximum value 255, it would think: 244 * This means that for a uint8_t with the maximum value 255, it would think:
243 * - when adding nothing - it is neither a predecessor nor a successor 245 * - when adding nothing - it is neither a predecessor nor a successor
244 * - before adding more than 127 to the starting value - it is a predecessor, 246 * - before adding more than 127 to the starting value - it is a predecessor,
245 * - when adding 128 - it is neither a predecessor nor a successor, 247 * - when adding 128 - it is neither a predecessor nor a successor,
246 * - after adding more than 127 to the starting value - it is a successor 248 * - after adding more than 127 to the starting value - it is a successor
247 */ 249 */
248 #define batadv_seq_before(x, y) ({typeof(x) _d1 = (x); \ 250 #define batadv_seq_before(x, y) ({typeof(x) _d1 = (x); \
249 typeof(y) _d2 = (y); \ 251 typeof(y) _d2 = (y); \
250 typeof(x) _dummy = (_d1 - _d2); \ 252 typeof(x) _dummy = (_d1 - _d2); \
251 (void) (&_d1 == &_d2); \ 253 (void) (&_d1 == &_d2); \
252 _dummy > batadv_smallest_signed_int(_dummy); }) 254 _dummy > batadv_smallest_signed_int(_dummy); })
253 #define batadv_seq_after(x, y) batadv_seq_before(y, x) 255 #define batadv_seq_after(x, y) batadv_seq_before(y, x)
254 256
255 /* Stop preemption on local cpu while incrementing the counter */ 257 /* Stop preemption on local cpu while incrementing the counter */
256 static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx, 258 static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
257 size_t count) 259 size_t count)
258 { 260 {
259 int cpu = get_cpu(); 261 int cpu = get_cpu();
260 per_cpu_ptr(bat_priv->bat_counters, cpu)[idx] += count; 262 per_cpu_ptr(bat_priv->bat_counters, cpu)[idx] += count;
261 put_cpu(); 263 put_cpu();
262 } 264 }
263 265
264 #define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1) 266 #define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
265 267
266 /* Sum and return the cpu-local counters for index 'idx' */ 268 /* Sum and return the cpu-local counters for index 'idx' */
267 static inline uint64_t batadv_sum_counter(struct batadv_priv *bat_priv, 269 static inline uint64_t batadv_sum_counter(struct batadv_priv *bat_priv,
268 size_t idx) 270 size_t idx)
269 { 271 {
270 uint64_t *counters, sum = 0; 272 uint64_t *counters, sum = 0;
271 int cpu; 273 int cpu;
272 274
273 for_each_possible_cpu(cpu) { 275 for_each_possible_cpu(cpu) {
274 counters = per_cpu_ptr(bat_priv->bat_counters, cpu); 276 counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
275 sum += counters[idx]; 277 sum += counters[idx];
276 } 278 }
277 279
278 return sum; 280 return sum;
279 } 281 }
280 282
281 #endif /* _NET_BATMAN_ADV_MAIN_H_ */ 283 #endif /* _NET_BATMAN_ADV_MAIN_H_ */
282 284
net/batman-adv/originator.c
1 /* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: 1 /* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich 3 * Marek Lindner, Simon Wunderlich
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public 6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation. 7 * License as published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, but 9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details. 12 * General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA 17 * 02110-1301, USA
18 */ 18 */
19 19
20 #include "main.h" 20 #include "main.h"
21 #include "originator.h" 21 #include "originator.h"
22 #include "hash.h" 22 #include "hash.h"
23 #include "translation-table.h" 23 #include "translation-table.h"
24 #include "routing.h" 24 #include "routing.h"
25 #include "gateway_client.h" 25 #include "gateway_client.h"
26 #include "hard-interface.h" 26 #include "hard-interface.h"
27 #include "unicast.h" 27 #include "unicast.h"
28 #include "soft-interface.h" 28 #include "soft-interface.h"
29 #include "bridge_loop_avoidance.h" 29 #include "bridge_loop_avoidance.h"
30 30
31 static void batadv_purge_orig(struct work_struct *work); 31 static void batadv_purge_orig(struct work_struct *work);
32 32
33 static void batadv_start_purge_timer(struct batadv_priv *bat_priv) 33 static void batadv_start_purge_timer(struct batadv_priv *bat_priv)
34 { 34 {
35 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig); 35 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
36 queue_delayed_work(batadv_event_workqueue, 36 queue_delayed_work(batadv_event_workqueue,
37 &bat_priv->orig_work, msecs_to_jiffies(1000)); 37 &bat_priv->orig_work, msecs_to_jiffies(1000));
38 } 38 }
39 39
40 /* returns 1 if they are the same originator */ 40 /* returns 1 if they are the same originator */
41 static int batadv_compare_orig(const struct hlist_node *node, const void *data2) 41 static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
42 { 42 {
43 const void *data1 = container_of(node, struct batadv_orig_node, 43 const void *data1 = container_of(node, struct batadv_orig_node,
44 hash_entry); 44 hash_entry);
45 45
46 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 46 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
47 } 47 }
48 48
49 int batadv_originator_init(struct batadv_priv *bat_priv) 49 int batadv_originator_init(struct batadv_priv *bat_priv)
50 { 50 {
51 if (bat_priv->orig_hash) 51 if (bat_priv->orig_hash)
52 return 0; 52 return 0;
53 53
54 bat_priv->orig_hash = batadv_hash_new(1024); 54 bat_priv->orig_hash = batadv_hash_new(1024);
55 55
56 if (!bat_priv->orig_hash) 56 if (!bat_priv->orig_hash)
57 goto err; 57 goto err;
58 58
59 batadv_start_purge_timer(bat_priv); 59 batadv_start_purge_timer(bat_priv);
60 return 0; 60 return 0;
61 61
62 err: 62 err:
63 return -ENOMEM; 63 return -ENOMEM;
64 } 64 }
65 65
66 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node) 66 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
67 { 67 {
68 if (atomic_dec_and_test(&neigh_node->refcount)) 68 if (atomic_dec_and_test(&neigh_node->refcount))
69 kfree_rcu(neigh_node, rcu); 69 kfree_rcu(neigh_node, rcu);
70 } 70 }
71 71
72 /* increases the refcounter of a found router */ 72 /* increases the refcounter of a found router */
73 struct batadv_neigh_node * 73 struct batadv_neigh_node *
74 batadv_orig_node_get_router(struct batadv_orig_node *orig_node) 74 batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
75 { 75 {
76 struct batadv_neigh_node *router; 76 struct batadv_neigh_node *router;
77 77
78 rcu_read_lock(); 78 rcu_read_lock();
79 router = rcu_dereference(orig_node->router); 79 router = rcu_dereference(orig_node->router);
80 80
81 if (router && !atomic_inc_not_zero(&router->refcount)) 81 if (router && !atomic_inc_not_zero(&router->refcount))
82 router = NULL; 82 router = NULL;
83 83
84 rcu_read_unlock(); 84 rcu_read_unlock();
85 return router; 85 return router;
86 } 86 }
87 87
88 struct batadv_neigh_node * 88 struct batadv_neigh_node *
89 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface, 89 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
90 const uint8_t *neigh_addr, uint32_t seqno) 90 const uint8_t *neigh_addr, uint32_t seqno)
91 { 91 {
92 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 92 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
93 struct batadv_neigh_node *neigh_node; 93 struct batadv_neigh_node *neigh_node;
94 94
95 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC); 95 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
96 if (!neigh_node) 96 if (!neigh_node)
97 goto out; 97 goto out;
98 98
99 INIT_HLIST_NODE(&neigh_node->list); 99 INIT_HLIST_NODE(&neigh_node->list);
100 100
101 memcpy(neigh_node->addr, neigh_addr, ETH_ALEN); 101 memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
102 spin_lock_init(&neigh_node->lq_update_lock); 102 spin_lock_init(&neigh_node->lq_update_lock);
103 103
104 /* extra reference for return */ 104 /* extra reference for return */
105 atomic_set(&neigh_node->refcount, 2); 105 atomic_set(&neigh_node->refcount, 2);
106 106
107 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 107 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
108 "Creating new neighbor %pM, initial seqno %d\n", 108 "Creating new neighbor %pM, initial seqno %d\n",
109 neigh_addr, seqno); 109 neigh_addr, seqno);
110 110
111 out: 111 out:
112 return neigh_node; 112 return neigh_node;
113 } 113 }
114 114
115 static void batadv_orig_node_free_rcu(struct rcu_head *rcu) 115 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
116 { 116 {
117 struct hlist_node *node, *node_tmp; 117 struct hlist_node *node, *node_tmp;
118 struct batadv_neigh_node *neigh_node, *tmp_neigh_node; 118 struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
119 struct batadv_orig_node *orig_node; 119 struct batadv_orig_node *orig_node;
120 120
121 orig_node = container_of(rcu, struct batadv_orig_node, rcu); 121 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
122 122
123 spin_lock_bh(&orig_node->neigh_list_lock); 123 spin_lock_bh(&orig_node->neigh_list_lock);
124 124
125 /* for all bonding members ... */ 125 /* for all bonding members ... */
126 list_for_each_entry_safe(neigh_node, tmp_neigh_node, 126 list_for_each_entry_safe(neigh_node, tmp_neigh_node,
127 &orig_node->bond_list, bonding_list) { 127 &orig_node->bond_list, bonding_list) {
128 list_del_rcu(&neigh_node->bonding_list); 128 list_del_rcu(&neigh_node->bonding_list);
129 batadv_neigh_node_free_ref(neigh_node); 129 batadv_neigh_node_free_ref(neigh_node);
130 } 130 }
131 131
132 /* for all neighbors towards this originator ... */ 132 /* for all neighbors towards this originator ... */
133 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 133 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
134 &orig_node->neigh_list, list) { 134 &orig_node->neigh_list, list) {
135 hlist_del_rcu(&neigh_node->list); 135 hlist_del_rcu(&neigh_node->list);
136 batadv_neigh_node_free_ref(neigh_node); 136 batadv_neigh_node_free_ref(neigh_node);
137 } 137 }
138 138
139 spin_unlock_bh(&orig_node->neigh_list_lock); 139 spin_unlock_bh(&orig_node->neigh_list_lock);
140 140
141 batadv_frag_list_free(&orig_node->frag_list); 141 batadv_frag_list_free(&orig_node->frag_list);
142 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, 142 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
143 "originator timed out"); 143 "originator timed out");
144 144
145 kfree(orig_node->tt_buff); 145 kfree(orig_node->tt_buff);
146 kfree(orig_node->bcast_own); 146 kfree(orig_node->bcast_own);
147 kfree(orig_node->bcast_own_sum); 147 kfree(orig_node->bcast_own_sum);
148 kfree(orig_node); 148 kfree(orig_node);
149 } 149 }
150 150
151 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node) 151 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
152 { 152 {
153 if (atomic_dec_and_test(&orig_node->refcount)) 153 if (atomic_dec_and_test(&orig_node->refcount))
154 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu); 154 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
155 } 155 }
156 156
157 void batadv_originator_free(struct batadv_priv *bat_priv) 157 void batadv_originator_free(struct batadv_priv *bat_priv)
158 { 158 {
159 struct batadv_hashtable *hash = bat_priv->orig_hash; 159 struct batadv_hashtable *hash = bat_priv->orig_hash;
160 struct hlist_node *node, *node_tmp; 160 struct hlist_node *node, *node_tmp;
161 struct hlist_head *head; 161 struct hlist_head *head;
162 spinlock_t *list_lock; /* spinlock to protect write access */ 162 spinlock_t *list_lock; /* spinlock to protect write access */
163 struct batadv_orig_node *orig_node; 163 struct batadv_orig_node *orig_node;
164 uint32_t i; 164 uint32_t i;
165 165
166 if (!hash) 166 if (!hash)
167 return; 167 return;
168 168
169 cancel_delayed_work_sync(&bat_priv->orig_work); 169 cancel_delayed_work_sync(&bat_priv->orig_work);
170 170
171 bat_priv->orig_hash = NULL; 171 bat_priv->orig_hash = NULL;
172 172
173 for (i = 0; i < hash->size; i++) { 173 for (i = 0; i < hash->size; i++) {
174 head = &hash->table[i]; 174 head = &hash->table[i];
175 list_lock = &hash->list_locks[i]; 175 list_lock = &hash->list_locks[i];
176 176
177 spin_lock_bh(list_lock); 177 spin_lock_bh(list_lock);
178 hlist_for_each_entry_safe(orig_node, node, node_tmp, 178 hlist_for_each_entry_safe(orig_node, node, node_tmp,
179 head, hash_entry) { 179 head, hash_entry) {
180 180
181 hlist_del_rcu(node); 181 hlist_del_rcu(node);
182 batadv_orig_node_free_ref(orig_node); 182 batadv_orig_node_free_ref(orig_node);
183 } 183 }
184 spin_unlock_bh(list_lock); 184 spin_unlock_bh(list_lock);
185 } 185 }
186 186
187 batadv_hash_destroy(hash); 187 batadv_hash_destroy(hash);
188 } 188 }
189 189
190 /* this function finds or creates an originator entry for the given 190 /* this function finds or creates an originator entry for the given
191 * address if it does not exits 191 * address if it does not exits
192 */ 192 */
193 struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv, 193 struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
194 const uint8_t *addr) 194 const uint8_t *addr)
195 { 195 {
196 struct batadv_orig_node *orig_node; 196 struct batadv_orig_node *orig_node;
197 int size; 197 int size;
198 int hash_added; 198 int hash_added;
199 unsigned long reset_time; 199 unsigned long reset_time;
200 200
201 orig_node = batadv_orig_hash_find(bat_priv, addr); 201 orig_node = batadv_orig_hash_find(bat_priv, addr);
202 if (orig_node) 202 if (orig_node)
203 return orig_node; 203 return orig_node;
204 204
205 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 205 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
206 "Creating new originator: %pM\n", addr); 206 "Creating new originator: %pM\n", addr);
207 207
208 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC); 208 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
209 if (!orig_node) 209 if (!orig_node)
210 return NULL; 210 return NULL;
211 211
212 INIT_HLIST_HEAD(&orig_node->neigh_list); 212 INIT_HLIST_HEAD(&orig_node->neigh_list);
213 INIT_LIST_HEAD(&orig_node->bond_list); 213 INIT_LIST_HEAD(&orig_node->bond_list);
214 spin_lock_init(&orig_node->ogm_cnt_lock); 214 spin_lock_init(&orig_node->ogm_cnt_lock);
215 spin_lock_init(&orig_node->bcast_seqno_lock); 215 spin_lock_init(&orig_node->bcast_seqno_lock);
216 spin_lock_init(&orig_node->neigh_list_lock); 216 spin_lock_init(&orig_node->neigh_list_lock);
217 spin_lock_init(&orig_node->tt_buff_lock); 217 spin_lock_init(&orig_node->tt_buff_lock);
218 218
219 /* extra reference for return */ 219 /* extra reference for return */
220 atomic_set(&orig_node->refcount, 2); 220 atomic_set(&orig_node->refcount, 2);
221 221
222 orig_node->tt_initialised = false; 222 orig_node->tt_initialised = false;
223 orig_node->tt_poss_change = false; 223 orig_node->tt_poss_change = false;
224 orig_node->bat_priv = bat_priv; 224 orig_node->bat_priv = bat_priv;
225 memcpy(orig_node->orig, addr, ETH_ALEN); 225 memcpy(orig_node->orig, addr, ETH_ALEN);
226 orig_node->router = NULL; 226 orig_node->router = NULL;
227 orig_node->tt_crc = 0; 227 orig_node->tt_crc = 0;
228 atomic_set(&orig_node->last_ttvn, 0); 228 atomic_set(&orig_node->last_ttvn, 0);
229 orig_node->tt_buff = NULL; 229 orig_node->tt_buff = NULL;
230 orig_node->tt_buff_len = 0; 230 orig_node->tt_buff_len = 0;
231 atomic_set(&orig_node->tt_size, 0); 231 atomic_set(&orig_node->tt_size, 0);
232 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); 232 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
233 orig_node->bcast_seqno_reset = reset_time; 233 orig_node->bcast_seqno_reset = reset_time;
234 orig_node->batman_seqno_reset = reset_time; 234 orig_node->batman_seqno_reset = reset_time;
235 235
236 atomic_set(&orig_node->bond_candidates, 0); 236 atomic_set(&orig_node->bond_candidates, 0);
237 237
238 size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS; 238 size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
239 239
240 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC); 240 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
241 if (!orig_node->bcast_own) 241 if (!orig_node->bcast_own)
242 goto free_orig_node; 242 goto free_orig_node;
243 243
244 size = bat_priv->num_ifaces * sizeof(uint8_t); 244 size = bat_priv->num_ifaces * sizeof(uint8_t);
245 orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC); 245 orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
246 246
247 INIT_LIST_HEAD(&orig_node->frag_list); 247 INIT_LIST_HEAD(&orig_node->frag_list);
248 orig_node->last_frag_packet = 0; 248 orig_node->last_frag_packet = 0;
249 249
250 if (!orig_node->bcast_own_sum) 250 if (!orig_node->bcast_own_sum)
251 goto free_bcast_own; 251 goto free_bcast_own;
252 252
253 hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig, 253 hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
254 batadv_choose_orig, orig_node, 254 batadv_choose_orig, orig_node,
255 &orig_node->hash_entry); 255 &orig_node->hash_entry);
256 if (hash_added != 0) 256 if (hash_added != 0)
257 goto free_bcast_own_sum; 257 goto free_bcast_own_sum;
258 258
259 return orig_node; 259 return orig_node;
260 free_bcast_own_sum: 260 free_bcast_own_sum:
261 kfree(orig_node->bcast_own_sum); 261 kfree(orig_node->bcast_own_sum);
262 free_bcast_own: 262 free_bcast_own:
263 kfree(orig_node->bcast_own); 263 kfree(orig_node->bcast_own);
264 free_orig_node: 264 free_orig_node:
265 kfree(orig_node); 265 kfree(orig_node);
266 return NULL; 266 return NULL;
267 } 267 }
268 268
269 static bool 269 static bool
270 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv, 270 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
271 struct batadv_orig_node *orig_node, 271 struct batadv_orig_node *orig_node,
272 struct batadv_neigh_node **best_neigh_node) 272 struct batadv_neigh_node **best_neigh_node)
273 { 273 {
274 struct hlist_node *node, *node_tmp; 274 struct hlist_node *node, *node_tmp;
275 struct batadv_neigh_node *neigh_node; 275 struct batadv_neigh_node *neigh_node;
276 bool neigh_purged = false; 276 bool neigh_purged = false;
277 unsigned long last_seen; 277 unsigned long last_seen;
278 struct batadv_hard_iface *if_incoming; 278 struct batadv_hard_iface *if_incoming;
279 279
280 *best_neigh_node = NULL; 280 *best_neigh_node = NULL;
281 281
282 spin_lock_bh(&orig_node->neigh_list_lock); 282 spin_lock_bh(&orig_node->neigh_list_lock);
283 283
284 /* for all neighbors towards this originator ... */ 284 /* for all neighbors towards this originator ... */
285 hlist_for_each_entry_safe(neigh_node, node, node_tmp, 285 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
286 &orig_node->neigh_list, list) { 286 &orig_node->neigh_list, list) {
287 287
288 last_seen = neigh_node->last_seen; 288 last_seen = neigh_node->last_seen;
289 if_incoming = neigh_node->if_incoming; 289 if_incoming = neigh_node->if_incoming;
290 290
291 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) || 291 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
292 (if_incoming->if_status == BATADV_IF_INACTIVE) || 292 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
293 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) || 293 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
294 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) { 294 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
295 295
296 if ((if_incoming->if_status == BATADV_IF_INACTIVE) || 296 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
297 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) || 297 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
298 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) 298 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
299 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 299 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
300 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n", 300 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
301 orig_node->orig, neigh_node->addr, 301 orig_node->orig, neigh_node->addr,
302 if_incoming->net_dev->name); 302 if_incoming->net_dev->name);
303 else 303 else
304 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 304 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
305 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n", 305 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
306 orig_node->orig, neigh_node->addr, 306 orig_node->orig, neigh_node->addr,
307 jiffies_to_msecs(last_seen)); 307 jiffies_to_msecs(last_seen));
308 308
309 neigh_purged = true; 309 neigh_purged = true;
310 310
311 hlist_del_rcu(&neigh_node->list); 311 hlist_del_rcu(&neigh_node->list);
312 batadv_bonding_candidate_del(orig_node, neigh_node); 312 batadv_bonding_candidate_del(orig_node, neigh_node);
313 batadv_neigh_node_free_ref(neigh_node); 313 batadv_neigh_node_free_ref(neigh_node);
314 } else { 314 } else {
315 if ((!*best_neigh_node) || 315 if ((!*best_neigh_node) ||
316 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg)) 316 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
317 *best_neigh_node = neigh_node; 317 *best_neigh_node = neigh_node;
318 } 318 }
319 } 319 }
320 320
321 spin_unlock_bh(&orig_node->neigh_list_lock); 321 spin_unlock_bh(&orig_node->neigh_list_lock);
322 return neigh_purged; 322 return neigh_purged;
323 } 323 }
324 324
325 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, 325 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
326 struct batadv_orig_node *orig_node) 326 struct batadv_orig_node *orig_node)
327 { 327 {
328 struct batadv_neigh_node *best_neigh_node; 328 struct batadv_neigh_node *best_neigh_node;
329 329
330 if (batadv_has_timed_out(orig_node->last_seen, 330 if (batadv_has_timed_out(orig_node->last_seen,
331 2 * BATADV_PURGE_TIMEOUT)) { 331 2 * BATADV_PURGE_TIMEOUT)) {
332 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 332 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
333 "Originator timeout: originator %pM, last_seen %u\n", 333 "Originator timeout: originator %pM, last_seen %u\n",
334 orig_node->orig, 334 orig_node->orig,
335 jiffies_to_msecs(orig_node->last_seen)); 335 jiffies_to_msecs(orig_node->last_seen));
336 return true; 336 return true;
337 } else { 337 } else {
338 if (batadv_purge_orig_neighbors(bat_priv, orig_node, 338 if (batadv_purge_orig_neighbors(bat_priv, orig_node,
339 &best_neigh_node)) 339 &best_neigh_node))
340 batadv_update_route(bat_priv, orig_node, 340 batadv_update_route(bat_priv, orig_node,
341 best_neigh_node); 341 best_neigh_node);
342 } 342 }
343 343
344 return false; 344 return false;
345 } 345 }
346 346
347 static void _batadv_purge_orig(struct batadv_priv *bat_priv) 347 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
348 { 348 {
349 struct batadv_hashtable *hash = bat_priv->orig_hash; 349 struct batadv_hashtable *hash = bat_priv->orig_hash;
350 struct hlist_node *node, *node_tmp; 350 struct hlist_node *node, *node_tmp;
351 struct hlist_head *head; 351 struct hlist_head *head;
352 spinlock_t *list_lock; /* spinlock to protect write access */ 352 spinlock_t *list_lock; /* spinlock to protect write access */
353 struct batadv_orig_node *orig_node; 353 struct batadv_orig_node *orig_node;
354 uint32_t i; 354 uint32_t i;
355 355
356 if (!hash) 356 if (!hash)
357 return; 357 return;
358 358
359 /* for all origins... */ 359 /* for all origins... */
360 for (i = 0; i < hash->size; i++) { 360 for (i = 0; i < hash->size; i++) {
361 head = &hash->table[i]; 361 head = &hash->table[i];
362 list_lock = &hash->list_locks[i]; 362 list_lock = &hash->list_locks[i];
363 363
364 spin_lock_bh(list_lock); 364 spin_lock_bh(list_lock);
365 hlist_for_each_entry_safe(orig_node, node, node_tmp, 365 hlist_for_each_entry_safe(orig_node, node, node_tmp,
366 head, hash_entry) { 366 head, hash_entry) {
367 if (batadv_purge_orig_node(bat_priv, orig_node)) { 367 if (batadv_purge_orig_node(bat_priv, orig_node)) {
368 if (orig_node->gw_flags) 368 if (orig_node->gw_flags)
369 batadv_gw_node_delete(bat_priv, 369 batadv_gw_node_delete(bat_priv,
370 orig_node); 370 orig_node);
371 hlist_del_rcu(node); 371 hlist_del_rcu(node);
372 batadv_orig_node_free_ref(orig_node); 372 batadv_orig_node_free_ref(orig_node);
373 continue; 373 continue;
374 } 374 }
375 375
376 if (batadv_has_timed_out(orig_node->last_frag_packet, 376 if (batadv_has_timed_out(orig_node->last_frag_packet,
377 BATADV_FRAG_TIMEOUT)) 377 BATADV_FRAG_TIMEOUT))
378 batadv_frag_list_free(&orig_node->frag_list); 378 batadv_frag_list_free(&orig_node->frag_list);
379 } 379 }
380 spin_unlock_bh(list_lock); 380 spin_unlock_bh(list_lock);
381 } 381 }
382 382
383 batadv_gw_node_purge(bat_priv); 383 batadv_gw_node_purge(bat_priv);
384 batadv_gw_election(bat_priv); 384 batadv_gw_election(bat_priv);
385 } 385 }
386 386
387 static void batadv_purge_orig(struct work_struct *work) 387 static void batadv_purge_orig(struct work_struct *work)
388 { 388 {
389 struct delayed_work *delayed_work; 389 struct delayed_work *delayed_work;
390 struct batadv_priv *bat_priv; 390 struct batadv_priv *bat_priv;
391 391
392 delayed_work = container_of(work, struct delayed_work, work); 392 delayed_work = container_of(work, struct delayed_work, work);
393 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work); 393 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
394 _batadv_purge_orig(bat_priv); 394 _batadv_purge_orig(bat_priv);
395 batadv_start_purge_timer(bat_priv); 395 batadv_start_purge_timer(bat_priv);
396 } 396 }
397 397
398 void batadv_purge_orig_ref(struct batadv_priv *bat_priv) 398 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
399 { 399 {
400 _batadv_purge_orig(bat_priv); 400 _batadv_purge_orig(bat_priv);
401 } 401 }
402 402
403 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) 403 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
404 { 404 {
405 struct net_device *net_dev = (struct net_device *)seq->private; 405 struct net_device *net_dev = (struct net_device *)seq->private;
406 struct batadv_priv *bat_priv = netdev_priv(net_dev); 406 struct batadv_priv *bat_priv = netdev_priv(net_dev);
407 struct batadv_hashtable *hash = bat_priv->orig_hash; 407 struct batadv_hashtable *hash = bat_priv->orig_hash;
408 struct hlist_node *node, *node_tmp; 408 struct hlist_node *node, *node_tmp;
409 struct hlist_head *head; 409 struct hlist_head *head;
410 struct batadv_hard_iface *primary_if; 410 struct batadv_hard_iface *primary_if;
411 struct batadv_orig_node *orig_node; 411 struct batadv_orig_node *orig_node;
412 struct batadv_neigh_node *neigh_node, *neigh_node_tmp; 412 struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
413 int batman_count = 0; 413 int batman_count = 0;
414 int last_seen_secs; 414 int last_seen_secs;
415 int last_seen_msecs; 415 int last_seen_msecs;
416 unsigned long last_seen_jiffies; 416 unsigned long last_seen_jiffies;
417 uint32_t i; 417 uint32_t i;
418 int ret = 0;
419 418
420 primary_if = batadv_primary_if_get_selected(bat_priv); 419 primary_if = batadv_seq_print_text_primary_if_get(seq);
421 420 if (!primary_if)
422 if (!primary_if) {
423 ret = seq_printf(seq,
424 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
425 net_dev->name);
426 goto out; 421 goto out;
427 }
428 422
429 if (primary_if->if_status != BATADV_IF_ACTIVE) {
430 ret = seq_printf(seq,
431 "BATMAN mesh %s disabled - primary interface not active\n",
432 net_dev->name);
433 goto out;
434 }
435
436 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", 423 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
437 BATADV_SOURCE_VERSION, primary_if->net_dev->name, 424 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
438 primary_if->net_dev->dev_addr, net_dev->name); 425 primary_if->net_dev->dev_addr, net_dev->name);
439 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", 426 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
440 "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE, 427 "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
441 "Nexthop", "outgoingIF", "Potential nexthops"); 428 "Nexthop", "outgoingIF", "Potential nexthops");
442 429
443 for (i = 0; i < hash->size; i++) { 430 for (i = 0; i < hash->size; i++) {
444 head = &hash->table[i]; 431 head = &hash->table[i];
445 432
446 rcu_read_lock(); 433 rcu_read_lock();
447 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 434 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
448 neigh_node = batadv_orig_node_get_router(orig_node); 435 neigh_node = batadv_orig_node_get_router(orig_node);
449 if (!neigh_node) 436 if (!neigh_node)
450 continue; 437 continue;
451 438
452 if (neigh_node->tq_avg == 0) 439 if (neigh_node->tq_avg == 0)
453 goto next; 440 goto next;
454 441
455 last_seen_jiffies = jiffies - orig_node->last_seen; 442 last_seen_jiffies = jiffies - orig_node->last_seen;
456 last_seen_msecs = jiffies_to_msecs(last_seen_jiffies); 443 last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
457 last_seen_secs = last_seen_msecs / 1000; 444 last_seen_secs = last_seen_msecs / 1000;
458 last_seen_msecs = last_seen_msecs % 1000; 445 last_seen_msecs = last_seen_msecs % 1000;
459 446
460 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", 447 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
461 orig_node->orig, last_seen_secs, 448 orig_node->orig, last_seen_secs,
462 last_seen_msecs, neigh_node->tq_avg, 449 last_seen_msecs, neigh_node->tq_avg,
463 neigh_node->addr, 450 neigh_node->addr,
464 neigh_node->if_incoming->net_dev->name); 451 neigh_node->if_incoming->net_dev->name);
465 452
466 hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp, 453 hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp,
467 &orig_node->neigh_list, list) { 454 &orig_node->neigh_list, list) {
468 seq_printf(seq, " %pM (%3i)", 455 seq_printf(seq, " %pM (%3i)",
469 neigh_node_tmp->addr, 456 neigh_node_tmp->addr,
470 neigh_node_tmp->tq_avg); 457 neigh_node_tmp->tq_avg);
471 } 458 }
472 459
473 seq_printf(seq, "\n"); 460 seq_printf(seq, "\n");
474 batman_count++; 461 batman_count++;
475 462
476 next: 463 next:
477 batadv_neigh_node_free_ref(neigh_node); 464 batadv_neigh_node_free_ref(neigh_node);
478 } 465 }
479 rcu_read_unlock(); 466 rcu_read_unlock();
480 } 467 }
481 468
482 if (batman_count == 0) 469 if (batman_count == 0)
483 seq_printf(seq, "No batman nodes in range ...\n"); 470 seq_printf(seq, "No batman nodes in range ...\n");
484 471
485 out: 472 out:
486 if (primary_if) 473 if (primary_if)
487 batadv_hardif_free_ref(primary_if); 474 batadv_hardif_free_ref(primary_if);
488 return ret; 475 return 0;
489 } 476 }
490 477
491 static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node, 478 static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
492 int max_if_num) 479 int max_if_num)
493 { 480 {
494 void *data_ptr; 481 void *data_ptr;
495 size_t data_size, old_size; 482 size_t data_size, old_size;
496 483
497 data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS; 484 data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
498 old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS; 485 old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
499 data_ptr = kmalloc(data_size, GFP_ATOMIC); 486 data_ptr = kmalloc(data_size, GFP_ATOMIC);
500 if (!data_ptr) 487 if (!data_ptr)
501 return -ENOMEM; 488 return -ENOMEM;
502 489
503 memcpy(data_ptr, orig_node->bcast_own, old_size); 490 memcpy(data_ptr, orig_node->bcast_own, old_size);
504 kfree(orig_node->bcast_own); 491 kfree(orig_node->bcast_own);
505 orig_node->bcast_own = data_ptr; 492 orig_node->bcast_own = data_ptr;
506 493
507 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 494 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
508 if (!data_ptr) 495 if (!data_ptr)
509 return -ENOMEM; 496 return -ENOMEM;
510 497
511 memcpy(data_ptr, orig_node->bcast_own_sum, 498 memcpy(data_ptr, orig_node->bcast_own_sum,
512 (max_if_num - 1) * sizeof(uint8_t)); 499 (max_if_num - 1) * sizeof(uint8_t));
513 kfree(orig_node->bcast_own_sum); 500 kfree(orig_node->bcast_own_sum);
514 orig_node->bcast_own_sum = data_ptr; 501 orig_node->bcast_own_sum = data_ptr;
515 502
516 return 0; 503 return 0;
517 } 504 }
518 505
519 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface, 506 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
520 int max_if_num) 507 int max_if_num)
521 { 508 {
522 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 509 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
523 struct batadv_hashtable *hash = bat_priv->orig_hash; 510 struct batadv_hashtable *hash = bat_priv->orig_hash;
524 struct hlist_node *node; 511 struct hlist_node *node;
525 struct hlist_head *head; 512 struct hlist_head *head;
526 struct batadv_orig_node *orig_node; 513 struct batadv_orig_node *orig_node;
527 uint32_t i; 514 uint32_t i;
528 int ret; 515 int ret;
529 516
530 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 517 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
531 * if_num 518 * if_num
532 */ 519 */
533 for (i = 0; i < hash->size; i++) { 520 for (i = 0; i < hash->size; i++) {
534 head = &hash->table[i]; 521 head = &hash->table[i];
535 522
536 rcu_read_lock(); 523 rcu_read_lock();
537 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 524 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
538 spin_lock_bh(&orig_node->ogm_cnt_lock); 525 spin_lock_bh(&orig_node->ogm_cnt_lock);
539 ret = batadv_orig_node_add_if(orig_node, max_if_num); 526 ret = batadv_orig_node_add_if(orig_node, max_if_num);
540 spin_unlock_bh(&orig_node->ogm_cnt_lock); 527 spin_unlock_bh(&orig_node->ogm_cnt_lock);
541 528
542 if (ret == -ENOMEM) 529 if (ret == -ENOMEM)
543 goto err; 530 goto err;
544 } 531 }
545 rcu_read_unlock(); 532 rcu_read_unlock();
546 } 533 }
547 534
548 return 0; 535 return 0;
549 536
550 err: 537 err:
551 rcu_read_unlock(); 538 rcu_read_unlock();
552 return -ENOMEM; 539 return -ENOMEM;
553 } 540 }
554 541
555 static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node, 542 static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node,
556 int max_if_num, int del_if_num) 543 int max_if_num, int del_if_num)
557 { 544 {
558 void *data_ptr = NULL; 545 void *data_ptr = NULL;
559 int chunk_size; 546 int chunk_size;
560 547
561 /* last interface was removed */ 548 /* last interface was removed */
562 if (max_if_num == 0) 549 if (max_if_num == 0)
563 goto free_bcast_own; 550 goto free_bcast_own;
564 551
565 chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS; 552 chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
566 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC); 553 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
567 if (!data_ptr) 554 if (!data_ptr)
568 return -ENOMEM; 555 return -ENOMEM;
569 556
570 /* copy first part */ 557 /* copy first part */
571 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size); 558 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
572 559
573 /* copy second part */ 560 /* copy second part */
574 memcpy((char *)data_ptr + del_if_num * chunk_size, 561 memcpy((char *)data_ptr + del_if_num * chunk_size,
575 orig_node->bcast_own + ((del_if_num + 1) * chunk_size), 562 orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
576 (max_if_num - del_if_num) * chunk_size); 563 (max_if_num - del_if_num) * chunk_size);
577 564
578 free_bcast_own: 565 free_bcast_own:
579 kfree(orig_node->bcast_own); 566 kfree(orig_node->bcast_own);
580 orig_node->bcast_own = data_ptr; 567 orig_node->bcast_own = data_ptr;
581 568
582 if (max_if_num == 0) 569 if (max_if_num == 0)
583 goto free_own_sum; 570 goto free_own_sum;
584 571
585 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); 572 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
586 if (!data_ptr) 573 if (!data_ptr)
587 return -ENOMEM; 574 return -ENOMEM;
588 575
589 memcpy(data_ptr, orig_node->bcast_own_sum, 576 memcpy(data_ptr, orig_node->bcast_own_sum,
590 del_if_num * sizeof(uint8_t)); 577 del_if_num * sizeof(uint8_t));
591 578
592 memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t), 579 memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
593 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)), 580 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
594 (max_if_num - del_if_num) * sizeof(uint8_t)); 581 (max_if_num - del_if_num) * sizeof(uint8_t));
595 582
596 free_own_sum: 583 free_own_sum:
597 kfree(orig_node->bcast_own_sum); 584 kfree(orig_node->bcast_own_sum);
598 orig_node->bcast_own_sum = data_ptr; 585 orig_node->bcast_own_sum = data_ptr;
599 586
600 return 0; 587 return 0;
601 } 588 }
602 589
603 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface, 590 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
604 int max_if_num) 591 int max_if_num)
605 { 592 {
606 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 593 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
607 struct batadv_hashtable *hash = bat_priv->orig_hash; 594 struct batadv_hashtable *hash = bat_priv->orig_hash;
608 struct hlist_node *node; 595 struct hlist_node *node;
609 struct hlist_head *head; 596 struct hlist_head *head;
610 struct batadv_hard_iface *hard_iface_tmp; 597 struct batadv_hard_iface *hard_iface_tmp;
611 struct batadv_orig_node *orig_node; 598 struct batadv_orig_node *orig_node;
612 uint32_t i; 599 uint32_t i;
613 int ret; 600 int ret;
614 601
615 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on 602 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
616 * if_num 603 * if_num
617 */ 604 */
618 for (i = 0; i < hash->size; i++) { 605 for (i = 0; i < hash->size; i++) {
619 head = &hash->table[i]; 606 head = &hash->table[i];
620 607
621 rcu_read_lock(); 608 rcu_read_lock();
622 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 609 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
623 spin_lock_bh(&orig_node->ogm_cnt_lock); 610 spin_lock_bh(&orig_node->ogm_cnt_lock);
624 ret = batadv_orig_node_del_if(orig_node, max_if_num, 611 ret = batadv_orig_node_del_if(orig_node, max_if_num,
625 hard_iface->if_num); 612 hard_iface->if_num);
626 spin_unlock_bh(&orig_node->ogm_cnt_lock); 613 spin_unlock_bh(&orig_node->ogm_cnt_lock);
627 614
628 if (ret == -ENOMEM) 615 if (ret == -ENOMEM)
629 goto err; 616 goto err;
630 } 617 }
631 rcu_read_unlock(); 618 rcu_read_unlock();
632 } 619 }
633 620
634 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ 621 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
635 rcu_read_lock(); 622 rcu_read_lock();
636 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) { 623 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
637 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE) 624 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
638 continue; 625 continue;
639 626
640 if (hard_iface == hard_iface_tmp) 627 if (hard_iface == hard_iface_tmp)
641 continue; 628 continue;
642 629
643 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface) 630 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
644 continue; 631 continue;
645 632
646 if (hard_iface_tmp->if_num > hard_iface->if_num) 633 if (hard_iface_tmp->if_num > hard_iface->if_num)
647 hard_iface_tmp->if_num--; 634 hard_iface_tmp->if_num--;
648 } 635 }
649 rcu_read_unlock(); 636 rcu_read_unlock();
650 637
651 hard_iface->if_num = -1; 638 hard_iface->if_num = -1;
652 return 0; 639 return 0;
653 640
654 err: 641 err:
655 rcu_read_unlock(); 642 rcu_read_unlock();
656 return -ENOMEM; 643 return -ENOMEM;
657 } 644 }
658 645
net/batman-adv/translation-table.c
1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 * 2 *
3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli 3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public 6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation. 7 * License as published by the Free Software Foundation.
8 * 8 *
9 * This program is distributed in the hope that it will be useful, but 9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details. 12 * General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA 17 * 02110-1301, USA
18 */ 18 */
19 19
20 #include "main.h" 20 #include "main.h"
21 #include "translation-table.h" 21 #include "translation-table.h"
22 #include "soft-interface.h" 22 #include "soft-interface.h"
23 #include "hard-interface.h" 23 #include "hard-interface.h"
24 #include "send.h" 24 #include "send.h"
25 #include "hash.h" 25 #include "hash.h"
26 #include "originator.h" 26 #include "originator.h"
27 #include "routing.h" 27 #include "routing.h"
28 #include "bridge_loop_avoidance.h" 28 #include "bridge_loop_avoidance.h"
29 29
30 #include <linux/crc16.h> 30 #include <linux/crc16.h>
31 31
32 static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client, 32 static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
33 struct batadv_orig_node *orig_node); 33 struct batadv_orig_node *orig_node);
34 static void batadv_tt_purge(struct work_struct *work); 34 static void batadv_tt_purge(struct work_struct *work);
35 static void 35 static void
36 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry); 36 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
37 static void batadv_tt_global_del(struct batadv_priv *bat_priv, 37 static void batadv_tt_global_del(struct batadv_priv *bat_priv,
38 struct batadv_orig_node *orig_node, 38 struct batadv_orig_node *orig_node,
39 const unsigned char *addr, 39 const unsigned char *addr,
40 const char *message, bool roaming); 40 const char *message, bool roaming);
41 41
42 /* returns 1 if they are the same mac addr */ 42 /* returns 1 if they are the same mac addr */
43 static int batadv_compare_tt(const struct hlist_node *node, const void *data2) 43 static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
44 { 44 {
45 const void *data1 = container_of(node, struct batadv_tt_common_entry, 45 const void *data1 = container_of(node, struct batadv_tt_common_entry,
46 hash_entry); 46 hash_entry);
47 47
48 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 48 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
49 } 49 }
50 50
51 static void batadv_tt_start_timer(struct batadv_priv *bat_priv) 51 static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
52 { 52 {
53 INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge); 53 INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
54 queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work, 54 queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
55 msecs_to_jiffies(5000)); 55 msecs_to_jiffies(5000));
56 } 56 }
57 57
58 static struct batadv_tt_common_entry * 58 static struct batadv_tt_common_entry *
59 batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data) 59 batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
60 { 60 {
61 struct hlist_head *head; 61 struct hlist_head *head;
62 struct hlist_node *node; 62 struct hlist_node *node;
63 struct batadv_tt_common_entry *tt_common_entry; 63 struct batadv_tt_common_entry *tt_common_entry;
64 struct batadv_tt_common_entry *tt_common_entry_tmp = NULL; 64 struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
65 uint32_t index; 65 uint32_t index;
66 66
67 if (!hash) 67 if (!hash)
68 return NULL; 68 return NULL;
69 69
70 index = batadv_choose_orig(data, hash->size); 70 index = batadv_choose_orig(data, hash->size);
71 head = &hash->table[index]; 71 head = &hash->table[index];
72 72
73 rcu_read_lock(); 73 rcu_read_lock();
74 hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { 74 hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
75 if (!batadv_compare_eth(tt_common_entry, data)) 75 if (!batadv_compare_eth(tt_common_entry, data))
76 continue; 76 continue;
77 77
78 if (!atomic_inc_not_zero(&tt_common_entry->refcount)) 78 if (!atomic_inc_not_zero(&tt_common_entry->refcount))
79 continue; 79 continue;
80 80
81 tt_common_entry_tmp = tt_common_entry; 81 tt_common_entry_tmp = tt_common_entry;
82 break; 82 break;
83 } 83 }
84 rcu_read_unlock(); 84 rcu_read_unlock();
85 85
86 return tt_common_entry_tmp; 86 return tt_common_entry_tmp;
87 } 87 }
88 88
89 static struct batadv_tt_local_entry * 89 static struct batadv_tt_local_entry *
90 batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data) 90 batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
91 { 91 {
92 struct batadv_tt_common_entry *tt_common_entry; 92 struct batadv_tt_common_entry *tt_common_entry;
93 struct batadv_tt_local_entry *tt_local_entry = NULL; 93 struct batadv_tt_local_entry *tt_local_entry = NULL;
94 94
95 tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data); 95 tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
96 if (tt_common_entry) 96 if (tt_common_entry)
97 tt_local_entry = container_of(tt_common_entry, 97 tt_local_entry = container_of(tt_common_entry,
98 struct batadv_tt_local_entry, 98 struct batadv_tt_local_entry,
99 common); 99 common);
100 return tt_local_entry; 100 return tt_local_entry;
101 } 101 }
102 102
103 static struct batadv_tt_global_entry * 103 static struct batadv_tt_global_entry *
104 batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data) 104 batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
105 { 105 {
106 struct batadv_tt_common_entry *tt_common_entry; 106 struct batadv_tt_common_entry *tt_common_entry;
107 struct batadv_tt_global_entry *tt_global_entry = NULL; 107 struct batadv_tt_global_entry *tt_global_entry = NULL;
108 108
109 tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data); 109 tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
110 if (tt_common_entry) 110 if (tt_common_entry)
111 tt_global_entry = container_of(tt_common_entry, 111 tt_global_entry = container_of(tt_common_entry,
112 struct batadv_tt_global_entry, 112 struct batadv_tt_global_entry,
113 common); 113 common);
114 return tt_global_entry; 114 return tt_global_entry;
115 115
116 } 116 }
117 117
118 static void 118 static void
119 batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry *tt_local_entry) 119 batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry *tt_local_entry)
120 { 120 {
121 if (atomic_dec_and_test(&tt_local_entry->common.refcount)) 121 if (atomic_dec_and_test(&tt_local_entry->common.refcount))
122 kfree_rcu(tt_local_entry, common.rcu); 122 kfree_rcu(tt_local_entry, common.rcu);
123 } 123 }
124 124
125 static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu) 125 static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
126 { 126 {
127 struct batadv_tt_common_entry *tt_common_entry; 127 struct batadv_tt_common_entry *tt_common_entry;
128 struct batadv_tt_global_entry *tt_global_entry; 128 struct batadv_tt_global_entry *tt_global_entry;
129 129
130 tt_common_entry = container_of(rcu, struct batadv_tt_common_entry, rcu); 130 tt_common_entry = container_of(rcu, struct batadv_tt_common_entry, rcu);
131 tt_global_entry = container_of(tt_common_entry, 131 tt_global_entry = container_of(tt_common_entry,
132 struct batadv_tt_global_entry, common); 132 struct batadv_tt_global_entry, common);
133 133
134 kfree(tt_global_entry); 134 kfree(tt_global_entry);
135 } 135 }
136 136
137 static void 137 static void
138 batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry) 138 batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
139 { 139 {
140 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) { 140 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
141 batadv_tt_global_del_orig_list(tt_global_entry); 141 batadv_tt_global_del_orig_list(tt_global_entry);
142 call_rcu(&tt_global_entry->common.rcu, 142 call_rcu(&tt_global_entry->common.rcu,
143 batadv_tt_global_entry_free_rcu); 143 batadv_tt_global_entry_free_rcu);
144 } 144 }
145 } 145 }
146 146
147 static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu) 147 static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
148 { 148 {
149 struct batadv_tt_orig_list_entry *orig_entry; 149 struct batadv_tt_orig_list_entry *orig_entry;
150 150
151 orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu); 151 orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
152 batadv_orig_node_free_ref(orig_entry->orig_node); 152 batadv_orig_node_free_ref(orig_entry->orig_node);
153 kfree(orig_entry); 153 kfree(orig_entry);
154 } 154 }
155 155
156 static void 156 static void
157 batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry) 157 batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
158 { 158 {
159 if (!atomic_dec_and_test(&orig_entry->refcount)) 159 if (!atomic_dec_and_test(&orig_entry->refcount))
160 return; 160 return;
161 /* to avoid race conditions, immediately decrease the tt counter */ 161 /* to avoid race conditions, immediately decrease the tt counter */
162 atomic_dec(&orig_entry->orig_node->tt_size); 162 atomic_dec(&orig_entry->orig_node->tt_size);
163 call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu); 163 call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
164 } 164 }
165 165
166 static void batadv_tt_local_event(struct batadv_priv *bat_priv, 166 static void batadv_tt_local_event(struct batadv_priv *bat_priv,
167 const uint8_t *addr, uint8_t flags) 167 const uint8_t *addr, uint8_t flags)
168 { 168 {
169 struct batadv_tt_change_node *tt_change_node, *entry, *safe; 169 struct batadv_tt_change_node *tt_change_node, *entry, *safe;
170 bool event_removed = false; 170 bool event_removed = false;
171 bool del_op_requested, del_op_entry; 171 bool del_op_requested, del_op_entry;
172 172
173 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC); 173 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
174 174
175 if (!tt_change_node) 175 if (!tt_change_node)
176 return; 176 return;
177 177
178 tt_change_node->change.flags = flags; 178 tt_change_node->change.flags = flags;
179 memcpy(tt_change_node->change.addr, addr, ETH_ALEN); 179 memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
180 180
181 del_op_requested = flags & BATADV_TT_CLIENT_DEL; 181 del_op_requested = flags & BATADV_TT_CLIENT_DEL;
182 182
183 /* check for ADD+DEL or DEL+ADD events */ 183 /* check for ADD+DEL or DEL+ADD events */
184 spin_lock_bh(&bat_priv->tt.changes_list_lock); 184 spin_lock_bh(&bat_priv->tt.changes_list_lock);
185 list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list, 185 list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
186 list) { 186 list) {
187 if (!batadv_compare_eth(entry->change.addr, addr)) 187 if (!batadv_compare_eth(entry->change.addr, addr))
188 continue; 188 continue;
189 189
190 /* DEL+ADD in the same orig interval have no effect and can be 190 /* DEL+ADD in the same orig interval have no effect and can be
191 * removed to avoid silly behaviour on the receiver side. The 191 * removed to avoid silly behaviour on the receiver side. The
192 * other way around (ADD+DEL) can happen in case of roaming of 192 * other way around (ADD+DEL) can happen in case of roaming of
193 * a client still in the NEW state. Roaming of NEW clients is 193 * a client still in the NEW state. Roaming of NEW clients is
194 * now possible due to automatically recognition of "temporary" 194 * now possible due to automatically recognition of "temporary"
195 * clients 195 * clients
196 */ 196 */
197 del_op_entry = entry->change.flags & BATADV_TT_CLIENT_DEL; 197 del_op_entry = entry->change.flags & BATADV_TT_CLIENT_DEL;
198 if (!del_op_requested && del_op_entry) 198 if (!del_op_requested && del_op_entry)
199 goto del; 199 goto del;
200 if (del_op_requested && !del_op_entry) 200 if (del_op_requested && !del_op_entry)
201 goto del; 201 goto del;
202 continue; 202 continue;
203 del: 203 del:
204 list_del(&entry->list); 204 list_del(&entry->list);
205 kfree(entry); 205 kfree(entry);
206 kfree(tt_change_node); 206 kfree(tt_change_node);
207 event_removed = true; 207 event_removed = true;
208 goto unlock; 208 goto unlock;
209 } 209 }
210 210
211 /* track the change in the OGMinterval list */ 211 /* track the change in the OGMinterval list */
212 list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list); 212 list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
213 213
214 unlock: 214 unlock:
215 spin_unlock_bh(&bat_priv->tt.changes_list_lock); 215 spin_unlock_bh(&bat_priv->tt.changes_list_lock);
216 216
217 if (event_removed) 217 if (event_removed)
218 atomic_dec(&bat_priv->tt.local_changes); 218 atomic_dec(&bat_priv->tt.local_changes);
219 else 219 else
220 atomic_inc(&bat_priv->tt.local_changes); 220 atomic_inc(&bat_priv->tt.local_changes);
221 } 221 }
222 222
223 int batadv_tt_len(int changes_num) 223 int batadv_tt_len(int changes_num)
224 { 224 {
225 return changes_num * sizeof(struct batadv_tt_change); 225 return changes_num * sizeof(struct batadv_tt_change);
226 } 226 }
227 227
228 static int batadv_tt_local_init(struct batadv_priv *bat_priv) 228 static int batadv_tt_local_init(struct batadv_priv *bat_priv)
229 { 229 {
230 if (bat_priv->tt.local_hash) 230 if (bat_priv->tt.local_hash)
231 return 0; 231 return 0;
232 232
233 bat_priv->tt.local_hash = batadv_hash_new(1024); 233 bat_priv->tt.local_hash = batadv_hash_new(1024);
234 234
235 if (!bat_priv->tt.local_hash) 235 if (!bat_priv->tt.local_hash)
236 return -ENOMEM; 236 return -ENOMEM;
237 237
238 return 0; 238 return 0;
239 } 239 }
240 240
241 void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, 241 void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
242 int ifindex) 242 int ifindex)
243 { 243 {
244 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 244 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
245 struct batadv_tt_local_entry *tt_local_entry = NULL; 245 struct batadv_tt_local_entry *tt_local_entry = NULL;
246 struct batadv_tt_global_entry *tt_global_entry = NULL; 246 struct batadv_tt_global_entry *tt_global_entry = NULL;
247 struct hlist_head *head; 247 struct hlist_head *head;
248 struct hlist_node *node; 248 struct hlist_node *node;
249 struct batadv_tt_orig_list_entry *orig_entry; 249 struct batadv_tt_orig_list_entry *orig_entry;
250 int hash_added; 250 int hash_added;
251 251
252 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr); 252 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
253 253
254 if (tt_local_entry) { 254 if (tt_local_entry) {
255 tt_local_entry->last_seen = jiffies; 255 tt_local_entry->last_seen = jiffies;
256 /* possibly unset the BATADV_TT_CLIENT_PENDING flag */ 256 /* possibly unset the BATADV_TT_CLIENT_PENDING flag */
257 tt_local_entry->common.flags &= ~BATADV_TT_CLIENT_PENDING; 257 tt_local_entry->common.flags &= ~BATADV_TT_CLIENT_PENDING;
258 goto out; 258 goto out;
259 } 259 }
260 260
261 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC); 261 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
262 if (!tt_local_entry) 262 if (!tt_local_entry)
263 goto out; 263 goto out;
264 264
265 batadv_dbg(BATADV_DBG_TT, bat_priv, 265 batadv_dbg(BATADV_DBG_TT, bat_priv,
266 "Creating new local tt entry: %pM (ttvn: %d)\n", addr, 266 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
267 (uint8_t)atomic_read(&bat_priv->tt.vn)); 267 (uint8_t)atomic_read(&bat_priv->tt.vn));
268 268
269 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN); 269 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
270 tt_local_entry->common.flags = BATADV_NO_FLAGS; 270 tt_local_entry->common.flags = BATADV_NO_FLAGS;
271 if (batadv_is_wifi_iface(ifindex)) 271 if (batadv_is_wifi_iface(ifindex))
272 tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI; 272 tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
273 atomic_set(&tt_local_entry->common.refcount, 2); 273 atomic_set(&tt_local_entry->common.refcount, 2);
274 tt_local_entry->last_seen = jiffies; 274 tt_local_entry->last_seen = jiffies;
275 tt_local_entry->common.added_at = tt_local_entry->last_seen; 275 tt_local_entry->common.added_at = tt_local_entry->last_seen;
276 276
277 /* the batman interface mac address should never be purged */ 277 /* the batman interface mac address should never be purged */
278 if (batadv_compare_eth(addr, soft_iface->dev_addr)) 278 if (batadv_compare_eth(addr, soft_iface->dev_addr))
279 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NOPURGE; 279 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NOPURGE;
280 280
281 /* The local entry has to be marked as NEW to avoid to send it in 281 /* The local entry has to be marked as NEW to avoid to send it in
282 * a full table response going out before the next ttvn increment 282 * a full table response going out before the next ttvn increment
283 * (consistency check) 283 * (consistency check)
284 */ 284 */
285 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW; 285 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
286 286
287 hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt, 287 hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
288 batadv_choose_orig, 288 batadv_choose_orig,
289 &tt_local_entry->common, 289 &tt_local_entry->common,
290 &tt_local_entry->common.hash_entry); 290 &tt_local_entry->common.hash_entry);
291 291
292 if (unlikely(hash_added != 0)) { 292 if (unlikely(hash_added != 0)) {
293 /* remove the reference for the hash */ 293 /* remove the reference for the hash */
294 batadv_tt_local_entry_free_ref(tt_local_entry); 294 batadv_tt_local_entry_free_ref(tt_local_entry);
295 goto out; 295 goto out;
296 } 296 }
297 297
298 batadv_tt_local_event(bat_priv, addr, tt_local_entry->common.flags); 298 batadv_tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
299 299
300 /* remove address from global hash if present */ 300 /* remove address from global hash if present */
301 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr); 301 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
302 302
303 /* Check whether it is a roaming! */ 303 /* Check whether it is a roaming! */
304 if (tt_global_entry) { 304 if (tt_global_entry) {
305 /* These node are probably going to update their tt table */ 305 /* These node are probably going to update their tt table */
306 head = &tt_global_entry->orig_list; 306 head = &tt_global_entry->orig_list;
307 rcu_read_lock(); 307 rcu_read_lock();
308 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 308 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
309 orig_entry->orig_node->tt_poss_change = true; 309 orig_entry->orig_node->tt_poss_change = true;
310 310
311 batadv_send_roam_adv(bat_priv, 311 batadv_send_roam_adv(bat_priv,
312 tt_global_entry->common.addr, 312 tt_global_entry->common.addr,
313 orig_entry->orig_node); 313 orig_entry->orig_node);
314 } 314 }
315 rcu_read_unlock(); 315 rcu_read_unlock();
316 /* The global entry has to be marked as ROAMING and 316 /* The global entry has to be marked as ROAMING and
317 * has to be kept for consistency purpose 317 * has to be kept for consistency purpose
318 */ 318 */
319 tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM; 319 tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
320 tt_global_entry->roam_at = jiffies; 320 tt_global_entry->roam_at = jiffies;
321 } 321 }
322 out: 322 out:
323 if (tt_local_entry) 323 if (tt_local_entry)
324 batadv_tt_local_entry_free_ref(tt_local_entry); 324 batadv_tt_local_entry_free_ref(tt_local_entry);
325 if (tt_global_entry) 325 if (tt_global_entry)
326 batadv_tt_global_entry_free_ref(tt_global_entry); 326 batadv_tt_global_entry_free_ref(tt_global_entry);
327 } 327 }
328 328
329 static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff, 329 static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
330 int *packet_buff_len, 330 int *packet_buff_len,
331 int min_packet_len, 331 int min_packet_len,
332 int new_packet_len) 332 int new_packet_len)
333 { 333 {
334 unsigned char *new_buff; 334 unsigned char *new_buff;
335 335
336 new_buff = kmalloc(new_packet_len, GFP_ATOMIC); 336 new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
337 337
338 /* keep old buffer if kmalloc should fail */ 338 /* keep old buffer if kmalloc should fail */
339 if (new_buff) { 339 if (new_buff) {
340 memcpy(new_buff, *packet_buff, min_packet_len); 340 memcpy(new_buff, *packet_buff, min_packet_len);
341 kfree(*packet_buff); 341 kfree(*packet_buff);
342 *packet_buff = new_buff; 342 *packet_buff = new_buff;
343 *packet_buff_len = new_packet_len; 343 *packet_buff_len = new_packet_len;
344 } 344 }
345 } 345 }
346 346
347 static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv, 347 static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
348 unsigned char **packet_buff, 348 unsigned char **packet_buff,
349 int *packet_buff_len, 349 int *packet_buff_len,
350 int min_packet_len) 350 int min_packet_len)
351 { 351 {
352 struct batadv_hard_iface *primary_if; 352 struct batadv_hard_iface *primary_if;
353 int req_len; 353 int req_len;
354 354
355 primary_if = batadv_primary_if_get_selected(bat_priv); 355 primary_if = batadv_primary_if_get_selected(bat_priv);
356 356
357 req_len = min_packet_len; 357 req_len = min_packet_len;
358 req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes)); 358 req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
359 359
360 /* if we have too many changes for one packet don't send any 360 /* if we have too many changes for one packet don't send any
361 * and wait for the tt table request which will be fragmented 361 * and wait for the tt table request which will be fragmented
362 */ 362 */
363 if ((!primary_if) || (req_len > primary_if->soft_iface->mtu)) 363 if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
364 req_len = min_packet_len; 364 req_len = min_packet_len;
365 365
366 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len, 366 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
367 min_packet_len, req_len); 367 min_packet_len, req_len);
368 368
369 if (primary_if) 369 if (primary_if)
370 batadv_hardif_free_ref(primary_if); 370 batadv_hardif_free_ref(primary_if);
371 } 371 }
372 372
373 static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv, 373 static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
374 unsigned char **packet_buff, 374 unsigned char **packet_buff,
375 int *packet_buff_len, 375 int *packet_buff_len,
376 int min_packet_len) 376 int min_packet_len)
377 { 377 {
378 struct batadv_tt_change_node *entry, *safe; 378 struct batadv_tt_change_node *entry, *safe;
379 int count = 0, tot_changes = 0, new_len; 379 int count = 0, tot_changes = 0, new_len;
380 unsigned char *tt_buff; 380 unsigned char *tt_buff;
381 381
382 batadv_tt_prepare_packet_buff(bat_priv, packet_buff, 382 batadv_tt_prepare_packet_buff(bat_priv, packet_buff,
383 packet_buff_len, min_packet_len); 383 packet_buff_len, min_packet_len);
384 384
385 new_len = *packet_buff_len - min_packet_len; 385 new_len = *packet_buff_len - min_packet_len;
386 tt_buff = *packet_buff + min_packet_len; 386 tt_buff = *packet_buff + min_packet_len;
387 387
388 if (new_len > 0) 388 if (new_len > 0)
389 tot_changes = new_len / batadv_tt_len(1); 389 tot_changes = new_len / batadv_tt_len(1);
390 390
391 spin_lock_bh(&bat_priv->tt.changes_list_lock); 391 spin_lock_bh(&bat_priv->tt.changes_list_lock);
392 atomic_set(&bat_priv->tt.local_changes, 0); 392 atomic_set(&bat_priv->tt.local_changes, 0);
393 393
394 list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list, 394 list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
395 list) { 395 list) {
396 if (count < tot_changes) { 396 if (count < tot_changes) {
397 memcpy(tt_buff + batadv_tt_len(count), 397 memcpy(tt_buff + batadv_tt_len(count),
398 &entry->change, sizeof(struct batadv_tt_change)); 398 &entry->change, sizeof(struct batadv_tt_change));
399 count++; 399 count++;
400 } 400 }
401 list_del(&entry->list); 401 list_del(&entry->list);
402 kfree(entry); 402 kfree(entry);
403 } 403 }
404 spin_unlock_bh(&bat_priv->tt.changes_list_lock); 404 spin_unlock_bh(&bat_priv->tt.changes_list_lock);
405 405
406 /* Keep the buffer for possible tt_request */ 406 /* Keep the buffer for possible tt_request */
407 spin_lock_bh(&bat_priv->tt.last_changeset_lock); 407 spin_lock_bh(&bat_priv->tt.last_changeset_lock);
408 kfree(bat_priv->tt.last_changeset); 408 kfree(bat_priv->tt.last_changeset);
409 bat_priv->tt.last_changeset_len = 0; 409 bat_priv->tt.last_changeset_len = 0;
410 bat_priv->tt.last_changeset = NULL; 410 bat_priv->tt.last_changeset = NULL;
411 /* check whether this new OGM has no changes due to size problems */ 411 /* check whether this new OGM has no changes due to size problems */
412 if (new_len > 0) { 412 if (new_len > 0) {
413 /* if kmalloc() fails we will reply with the full table 413 /* if kmalloc() fails we will reply with the full table
414 * instead of providing the diff 414 * instead of providing the diff
415 */ 415 */
416 bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC); 416 bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
417 if (bat_priv->tt.last_changeset) { 417 if (bat_priv->tt.last_changeset) {
418 memcpy(bat_priv->tt.last_changeset, tt_buff, new_len); 418 memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
419 bat_priv->tt.last_changeset_len = new_len; 419 bat_priv->tt.last_changeset_len = new_len;
420 } 420 }
421 } 421 }
422 spin_unlock_bh(&bat_priv->tt.last_changeset_lock); 422 spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
423 423
424 return count; 424 return count;
425 } 425 }
426 426
427 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) 427 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
428 { 428 {
429 struct net_device *net_dev = (struct net_device *)seq->private; 429 struct net_device *net_dev = (struct net_device *)seq->private;
430 struct batadv_priv *bat_priv = netdev_priv(net_dev); 430 struct batadv_priv *bat_priv = netdev_priv(net_dev);
431 struct batadv_hashtable *hash = bat_priv->tt.local_hash; 431 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
432 struct batadv_tt_common_entry *tt_common_entry; 432 struct batadv_tt_common_entry *tt_common_entry;
433 struct batadv_hard_iface *primary_if; 433 struct batadv_hard_iface *primary_if;
434 struct hlist_node *node; 434 struct hlist_node *node;
435 struct hlist_head *head; 435 struct hlist_head *head;
436 uint32_t i; 436 uint32_t i;
437 int ret = 0;
438 437
439 primary_if = batadv_primary_if_get_selected(bat_priv); 438 primary_if = batadv_seq_print_text_primary_if_get(seq);
440 if (!primary_if) { 439 if (!primary_if)
441 ret = seq_printf(seq,
442 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
443 net_dev->name);
444 goto out; 440 goto out;
445 }
446 441
447 if (primary_if->if_status != BATADV_IF_ACTIVE) {
448 ret = seq_printf(seq,
449 "BATMAN mesh %s disabled - primary interface not active\n",
450 net_dev->name);
451 goto out;
452 }
453
454 seq_printf(seq, 442 seq_printf(seq,
455 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n", 443 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
456 net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn)); 444 net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
457 445
458 for (i = 0; i < hash->size; i++) { 446 for (i = 0; i < hash->size; i++) {
459 head = &hash->table[i]; 447 head = &hash->table[i];
460 448
461 rcu_read_lock(); 449 rcu_read_lock();
462 hlist_for_each_entry_rcu(tt_common_entry, node, 450 hlist_for_each_entry_rcu(tt_common_entry, node,
463 head, hash_entry) { 451 head, hash_entry) {
464 seq_printf(seq, " * %pM [%c%c%c%c%c]\n", 452 seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
465 tt_common_entry->addr, 453 tt_common_entry->addr,
466 (tt_common_entry->flags & 454 (tt_common_entry->flags &
467 BATADV_TT_CLIENT_ROAM ? 'R' : '.'), 455 BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
468 (tt_common_entry->flags & 456 (tt_common_entry->flags &
469 BATADV_TT_CLIENT_NOPURGE ? 'P' : '.'), 457 BATADV_TT_CLIENT_NOPURGE ? 'P' : '.'),
470 (tt_common_entry->flags & 458 (tt_common_entry->flags &
471 BATADV_TT_CLIENT_NEW ? 'N' : '.'), 459 BATADV_TT_CLIENT_NEW ? 'N' : '.'),
472 (tt_common_entry->flags & 460 (tt_common_entry->flags &
473 BATADV_TT_CLIENT_PENDING ? 'X' : '.'), 461 BATADV_TT_CLIENT_PENDING ? 'X' : '.'),
474 (tt_common_entry->flags & 462 (tt_common_entry->flags &
475 BATADV_TT_CLIENT_WIFI ? 'W' : '.')); 463 BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
476 } 464 }
477 rcu_read_unlock(); 465 rcu_read_unlock();
478 } 466 }
479 out: 467 out:
480 if (primary_if) 468 if (primary_if)
481 batadv_hardif_free_ref(primary_if); 469 batadv_hardif_free_ref(primary_if);
482 return ret; 470 return 0;
483 } 471 }
484 472
485 static void 473 static void
486 batadv_tt_local_set_pending(struct batadv_priv *bat_priv, 474 batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
487 struct batadv_tt_local_entry *tt_local_entry, 475 struct batadv_tt_local_entry *tt_local_entry,
488 uint16_t flags, const char *message) 476 uint16_t flags, const char *message)
489 { 477 {
490 batadv_tt_local_event(bat_priv, tt_local_entry->common.addr, 478 batadv_tt_local_event(bat_priv, tt_local_entry->common.addr,
491 tt_local_entry->common.flags | flags); 479 tt_local_entry->common.flags | flags);
492 480
493 /* The local client has to be marked as "pending to be removed" but has 481 /* The local client has to be marked as "pending to be removed" but has
494 * to be kept in the table in order to send it in a full table 482 * to be kept in the table in order to send it in a full table
495 * response issued before the net ttvn increment (consistency check) 483 * response issued before the net ttvn increment (consistency check)
496 */ 484 */
497 tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING; 485 tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING;
498 486
499 batadv_dbg(BATADV_DBG_TT, bat_priv, 487 batadv_dbg(BATADV_DBG_TT, bat_priv,
500 "Local tt entry (%pM) pending to be removed: %s\n", 488 "Local tt entry (%pM) pending to be removed: %s\n",
501 tt_local_entry->common.addr, message); 489 tt_local_entry->common.addr, message);
502 } 490 }
503 491
504 void batadv_tt_local_remove(struct batadv_priv *bat_priv, const uint8_t *addr, 492 void batadv_tt_local_remove(struct batadv_priv *bat_priv, const uint8_t *addr,
505 const char *message, bool roaming) 493 const char *message, bool roaming)
506 { 494 {
507 struct batadv_tt_local_entry *tt_local_entry = NULL; 495 struct batadv_tt_local_entry *tt_local_entry = NULL;
508 uint16_t flags; 496 uint16_t flags;
509 497
510 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr); 498 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
511 if (!tt_local_entry) 499 if (!tt_local_entry)
512 goto out; 500 goto out;
513 501
514 flags = BATADV_TT_CLIENT_DEL; 502 flags = BATADV_TT_CLIENT_DEL;
515 if (roaming) 503 if (roaming)
516 flags |= BATADV_TT_CLIENT_ROAM; 504 flags |= BATADV_TT_CLIENT_ROAM;
517 505
518 batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags, message); 506 batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags, message);
519 out: 507 out:
520 if (tt_local_entry) 508 if (tt_local_entry)
521 batadv_tt_local_entry_free_ref(tt_local_entry); 509 batadv_tt_local_entry_free_ref(tt_local_entry);
522 } 510 }
523 511
524 static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv, 512 static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
525 struct hlist_head *head) 513 struct hlist_head *head)
526 { 514 {
527 struct batadv_tt_local_entry *tt_local_entry; 515 struct batadv_tt_local_entry *tt_local_entry;
528 struct batadv_tt_common_entry *tt_common_entry; 516 struct batadv_tt_common_entry *tt_common_entry;
529 struct hlist_node *node, *node_tmp; 517 struct hlist_node *node, *node_tmp;
530 518
531 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head, 519 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
532 hash_entry) { 520 hash_entry) {
533 tt_local_entry = container_of(tt_common_entry, 521 tt_local_entry = container_of(tt_common_entry,
534 struct batadv_tt_local_entry, 522 struct batadv_tt_local_entry,
535 common); 523 common);
536 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_NOPURGE) 524 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_NOPURGE)
537 continue; 525 continue;
538 526
539 /* entry already marked for deletion */ 527 /* entry already marked for deletion */
540 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING) 528 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
541 continue; 529 continue;
542 530
543 if (!batadv_has_timed_out(tt_local_entry->last_seen, 531 if (!batadv_has_timed_out(tt_local_entry->last_seen,
544 BATADV_TT_LOCAL_TIMEOUT)) 532 BATADV_TT_LOCAL_TIMEOUT))
545 continue; 533 continue;
546 534
547 batadv_tt_local_set_pending(bat_priv, tt_local_entry, 535 batadv_tt_local_set_pending(bat_priv, tt_local_entry,
548 BATADV_TT_CLIENT_DEL, "timed out"); 536 BATADV_TT_CLIENT_DEL, "timed out");
549 } 537 }
550 } 538 }
551 539
552 static void batadv_tt_local_purge(struct batadv_priv *bat_priv) 540 static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
553 { 541 {
554 struct batadv_hashtable *hash = bat_priv->tt.local_hash; 542 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
555 struct hlist_head *head; 543 struct hlist_head *head;
556 spinlock_t *list_lock; /* protects write access to the hash lists */ 544 spinlock_t *list_lock; /* protects write access to the hash lists */
557 uint32_t i; 545 uint32_t i;
558 546
559 for (i = 0; i < hash->size; i++) { 547 for (i = 0; i < hash->size; i++) {
560 head = &hash->table[i]; 548 head = &hash->table[i];
561 list_lock = &hash->list_locks[i]; 549 list_lock = &hash->list_locks[i];
562 550
563 spin_lock_bh(list_lock); 551 spin_lock_bh(list_lock);
564 batadv_tt_local_purge_list(bat_priv, head); 552 batadv_tt_local_purge_list(bat_priv, head);
565 spin_unlock_bh(list_lock); 553 spin_unlock_bh(list_lock);
566 } 554 }
567 555
568 } 556 }
569 557
570 static void batadv_tt_local_table_free(struct batadv_priv *bat_priv) 558 static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
571 { 559 {
572 struct batadv_hashtable *hash; 560 struct batadv_hashtable *hash;
573 spinlock_t *list_lock; /* protects write access to the hash lists */ 561 spinlock_t *list_lock; /* protects write access to the hash lists */
574 struct batadv_tt_common_entry *tt_common_entry; 562 struct batadv_tt_common_entry *tt_common_entry;
575 struct batadv_tt_local_entry *tt_local; 563 struct batadv_tt_local_entry *tt_local;
576 struct hlist_node *node, *node_tmp; 564 struct hlist_node *node, *node_tmp;
577 struct hlist_head *head; 565 struct hlist_head *head;
578 uint32_t i; 566 uint32_t i;
579 567
580 if (!bat_priv->tt.local_hash) 568 if (!bat_priv->tt.local_hash)
581 return; 569 return;
582 570
583 hash = bat_priv->tt.local_hash; 571 hash = bat_priv->tt.local_hash;
584 572
585 for (i = 0; i < hash->size; i++) { 573 for (i = 0; i < hash->size; i++) {
586 head = &hash->table[i]; 574 head = &hash->table[i];
587 list_lock = &hash->list_locks[i]; 575 list_lock = &hash->list_locks[i];
588 576
589 spin_lock_bh(list_lock); 577 spin_lock_bh(list_lock);
590 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 578 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
591 head, hash_entry) { 579 head, hash_entry) {
592 hlist_del_rcu(node); 580 hlist_del_rcu(node);
593 tt_local = container_of(tt_common_entry, 581 tt_local = container_of(tt_common_entry,
594 struct batadv_tt_local_entry, 582 struct batadv_tt_local_entry,
595 common); 583 common);
596 batadv_tt_local_entry_free_ref(tt_local); 584 batadv_tt_local_entry_free_ref(tt_local);
597 } 585 }
598 spin_unlock_bh(list_lock); 586 spin_unlock_bh(list_lock);
599 } 587 }
600 588
601 batadv_hash_destroy(hash); 589 batadv_hash_destroy(hash);
602 590
603 bat_priv->tt.local_hash = NULL; 591 bat_priv->tt.local_hash = NULL;
604 } 592 }
605 593
606 static int batadv_tt_global_init(struct batadv_priv *bat_priv) 594 static int batadv_tt_global_init(struct batadv_priv *bat_priv)
607 { 595 {
608 if (bat_priv->tt.global_hash) 596 if (bat_priv->tt.global_hash)
609 return 0; 597 return 0;
610 598
611 bat_priv->tt.global_hash = batadv_hash_new(1024); 599 bat_priv->tt.global_hash = batadv_hash_new(1024);
612 600
613 if (!bat_priv->tt.global_hash) 601 if (!bat_priv->tt.global_hash)
614 return -ENOMEM; 602 return -ENOMEM;
615 603
616 return 0; 604 return 0;
617 } 605 }
618 606
619 static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv) 607 static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
620 { 608 {
621 struct batadv_tt_change_node *entry, *safe; 609 struct batadv_tt_change_node *entry, *safe;
622 610
623 spin_lock_bh(&bat_priv->tt.changes_list_lock); 611 spin_lock_bh(&bat_priv->tt.changes_list_lock);
624 612
625 list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list, 613 list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
626 list) { 614 list) {
627 list_del(&entry->list); 615 list_del(&entry->list);
628 kfree(entry); 616 kfree(entry);
629 } 617 }
630 618
631 atomic_set(&bat_priv->tt.local_changes, 0); 619 atomic_set(&bat_priv->tt.local_changes, 0);
632 spin_unlock_bh(&bat_priv->tt.changes_list_lock); 620 spin_unlock_bh(&bat_priv->tt.changes_list_lock);
633 } 621 }
634 622
635 /* retrieves the orig_tt_list_entry belonging to orig_node from the 623 /* retrieves the orig_tt_list_entry belonging to orig_node from the
636 * batadv_tt_global_entry list 624 * batadv_tt_global_entry list
637 * 625 *
638 * returns it with an increased refcounter, NULL if not found 626 * returns it with an increased refcounter, NULL if not found
639 */ 627 */
640 static struct batadv_tt_orig_list_entry * 628 static struct batadv_tt_orig_list_entry *
641 batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry, 629 batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
642 const struct batadv_orig_node *orig_node) 630 const struct batadv_orig_node *orig_node)
643 { 631 {
644 struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL; 632 struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
645 const struct hlist_head *head; 633 const struct hlist_head *head;
646 struct hlist_node *node; 634 struct hlist_node *node;
647 635
648 rcu_read_lock(); 636 rcu_read_lock();
649 head = &entry->orig_list; 637 head = &entry->orig_list;
650 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) { 638 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
651 if (tmp_orig_entry->orig_node != orig_node) 639 if (tmp_orig_entry->orig_node != orig_node)
652 continue; 640 continue;
653 if (!atomic_inc_not_zero(&tmp_orig_entry->refcount)) 641 if (!atomic_inc_not_zero(&tmp_orig_entry->refcount))
654 continue; 642 continue;
655 643
656 orig_entry = tmp_orig_entry; 644 orig_entry = tmp_orig_entry;
657 break; 645 break;
658 } 646 }
659 rcu_read_unlock(); 647 rcu_read_unlock();
660 648
661 return orig_entry; 649 return orig_entry;
662 } 650 }
663 651
664 /* find out if an orig_node is already in the list of a tt_global_entry. 652 /* find out if an orig_node is already in the list of a tt_global_entry.
665 * returns true if found, false otherwise 653 * returns true if found, false otherwise
666 */ 654 */
667 static bool 655 static bool
668 batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry, 656 batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
669 const struct batadv_orig_node *orig_node) 657 const struct batadv_orig_node *orig_node)
670 { 658 {
671 struct batadv_tt_orig_list_entry *orig_entry; 659 struct batadv_tt_orig_list_entry *orig_entry;
672 bool found = false; 660 bool found = false;
673 661
674 orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node); 662 orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
675 if (orig_entry) { 663 if (orig_entry) {
676 found = true; 664 found = true;
677 batadv_tt_orig_list_entry_free_ref(orig_entry); 665 batadv_tt_orig_list_entry_free_ref(orig_entry);
678 } 666 }
679 667
680 return found; 668 return found;
681 } 669 }
682 670
683 static void 671 static void
684 batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, 672 batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
685 struct batadv_orig_node *orig_node, int ttvn) 673 struct batadv_orig_node *orig_node, int ttvn)
686 { 674 {
687 struct batadv_tt_orig_list_entry *orig_entry; 675 struct batadv_tt_orig_list_entry *orig_entry;
688 676
689 orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node); 677 orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
690 if (orig_entry) { 678 if (orig_entry) {
691 /* refresh the ttvn: the current value could be a bogus one that 679 /* refresh the ttvn: the current value could be a bogus one that
692 * was added during a "temporary client detection" 680 * was added during a "temporary client detection"
693 */ 681 */
694 orig_entry->ttvn = ttvn; 682 orig_entry->ttvn = ttvn;
695 goto out; 683 goto out;
696 } 684 }
697 685
698 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC); 686 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
699 if (!orig_entry) 687 if (!orig_entry)
700 goto out; 688 goto out;
701 689
702 INIT_HLIST_NODE(&orig_entry->list); 690 INIT_HLIST_NODE(&orig_entry->list);
703 atomic_inc(&orig_node->refcount); 691 atomic_inc(&orig_node->refcount);
704 atomic_inc(&orig_node->tt_size); 692 atomic_inc(&orig_node->tt_size);
705 orig_entry->orig_node = orig_node; 693 orig_entry->orig_node = orig_node;
706 orig_entry->ttvn = ttvn; 694 orig_entry->ttvn = ttvn;
707 atomic_set(&orig_entry->refcount, 2); 695 atomic_set(&orig_entry->refcount, 2);
708 696
709 spin_lock_bh(&tt_global->list_lock); 697 spin_lock_bh(&tt_global->list_lock);
710 hlist_add_head_rcu(&orig_entry->list, 698 hlist_add_head_rcu(&orig_entry->list,
711 &tt_global->orig_list); 699 &tt_global->orig_list);
712 spin_unlock_bh(&tt_global->list_lock); 700 spin_unlock_bh(&tt_global->list_lock);
713 out: 701 out:
714 if (orig_entry) 702 if (orig_entry)
715 batadv_tt_orig_list_entry_free_ref(orig_entry); 703 batadv_tt_orig_list_entry_free_ref(orig_entry);
716 } 704 }
717 705
718 /* caller must hold orig_node refcount */ 706 /* caller must hold orig_node refcount */
719 int batadv_tt_global_add(struct batadv_priv *bat_priv, 707 int batadv_tt_global_add(struct batadv_priv *bat_priv,
720 struct batadv_orig_node *orig_node, 708 struct batadv_orig_node *orig_node,
721 const unsigned char *tt_addr, uint8_t flags, 709 const unsigned char *tt_addr, uint8_t flags,
722 uint8_t ttvn) 710 uint8_t ttvn)
723 { 711 {
724 struct batadv_tt_global_entry *tt_global_entry = NULL; 712 struct batadv_tt_global_entry *tt_global_entry = NULL;
725 int ret = 0; 713 int ret = 0;
726 int hash_added; 714 int hash_added;
727 struct batadv_tt_common_entry *common; 715 struct batadv_tt_common_entry *common;
728 716
729 tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr); 717 tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
730 718
731 if (!tt_global_entry) { 719 if (!tt_global_entry) {
732 tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC); 720 tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC);
733 if (!tt_global_entry) 721 if (!tt_global_entry)
734 goto out; 722 goto out;
735 723
736 common = &tt_global_entry->common; 724 common = &tt_global_entry->common;
737 memcpy(common->addr, tt_addr, ETH_ALEN); 725 memcpy(common->addr, tt_addr, ETH_ALEN);
738 726
739 common->flags = flags; 727 common->flags = flags;
740 tt_global_entry->roam_at = 0; 728 tt_global_entry->roam_at = 0;
741 atomic_set(&common->refcount, 2); 729 atomic_set(&common->refcount, 2);
742 common->added_at = jiffies; 730 common->added_at = jiffies;
743 731
744 INIT_HLIST_HEAD(&tt_global_entry->orig_list); 732 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
745 spin_lock_init(&tt_global_entry->list_lock); 733 spin_lock_init(&tt_global_entry->list_lock);
746 734
747 hash_added = batadv_hash_add(bat_priv->tt.global_hash, 735 hash_added = batadv_hash_add(bat_priv->tt.global_hash,
748 batadv_compare_tt, 736 batadv_compare_tt,
749 batadv_choose_orig, common, 737 batadv_choose_orig, common,
750 &common->hash_entry); 738 &common->hash_entry);
751 739
752 if (unlikely(hash_added != 0)) { 740 if (unlikely(hash_added != 0)) {
753 /* remove the reference for the hash */ 741 /* remove the reference for the hash */
754 batadv_tt_global_entry_free_ref(tt_global_entry); 742 batadv_tt_global_entry_free_ref(tt_global_entry);
755 goto out_remove; 743 goto out_remove;
756 } 744 }
757 } else { 745 } else {
758 /* If there is already a global entry, we can use this one for 746 /* If there is already a global entry, we can use this one for
759 * our processing. 747 * our processing.
760 * But if we are trying to add a temporary client we can exit 748 * But if we are trying to add a temporary client we can exit
761 * directly because the temporary information should never 749 * directly because the temporary information should never
762 * override any already known client state (whatever it is) 750 * override any already known client state (whatever it is)
763 */ 751 */
764 if (flags & BATADV_TT_CLIENT_TEMP) 752 if (flags & BATADV_TT_CLIENT_TEMP)
765 goto out; 753 goto out;
766 754
767 /* if the client was temporary added before receiving the first 755 /* if the client was temporary added before receiving the first
768 * OGM announcing it, we have to clear the TEMP flag 756 * OGM announcing it, we have to clear the TEMP flag
769 */ 757 */
770 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP; 758 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP;
771 759
772 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only 760 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
773 * one originator left in the list and we previously received a 761 * one originator left in the list and we previously received a
774 * delete + roaming change for this originator. 762 * delete + roaming change for this originator.
775 * 763 *
776 * We should first delete the old originator before adding the 764 * We should first delete the old originator before adding the
777 * new one. 765 * new one.
778 */ 766 */
779 if (tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM) { 767 if (tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM) {
780 batadv_tt_global_del_orig_list(tt_global_entry); 768 batadv_tt_global_del_orig_list(tt_global_entry);
781 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM; 769 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
782 tt_global_entry->roam_at = 0; 770 tt_global_entry->roam_at = 0;
783 } 771 }
784 } 772 }
785 /* add the new orig_entry (if needed) or update it */ 773 /* add the new orig_entry (if needed) or update it */
786 batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn); 774 batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
787 775
788 batadv_dbg(BATADV_DBG_TT, bat_priv, 776 batadv_dbg(BATADV_DBG_TT, bat_priv,
789 "Creating new global tt entry: %pM (via %pM)\n", 777 "Creating new global tt entry: %pM (via %pM)\n",
790 tt_global_entry->common.addr, orig_node->orig); 778 tt_global_entry->common.addr, orig_node->orig);
791 779
792 out_remove: 780 out_remove:
793 /* remove address from local hash if present */ 781 /* remove address from local hash if present */
794 batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr, 782 batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr,
795 "global tt received", 783 "global tt received",
796 flags & BATADV_TT_CLIENT_ROAM); 784 flags & BATADV_TT_CLIENT_ROAM);
797 ret = 1; 785 ret = 1;
798 out: 786 out:
799 if (tt_global_entry) 787 if (tt_global_entry)
800 batadv_tt_global_entry_free_ref(tt_global_entry); 788 batadv_tt_global_entry_free_ref(tt_global_entry);
801 return ret; 789 return ret;
802 } 790 }
803 791
804 /* print all orig nodes who announce the address for this global entry. 792 /* print all orig nodes who announce the address for this global entry.
805 * it is assumed that the caller holds rcu_read_lock(); 793 * it is assumed that the caller holds rcu_read_lock();
806 */ 794 */
807 static void 795 static void
808 batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry, 796 batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
809 struct seq_file *seq) 797 struct seq_file *seq)
810 { 798 {
811 struct hlist_head *head; 799 struct hlist_head *head;
812 struct hlist_node *node; 800 struct hlist_node *node;
813 struct batadv_tt_orig_list_entry *orig_entry; 801 struct batadv_tt_orig_list_entry *orig_entry;
814 struct batadv_tt_common_entry *tt_common_entry; 802 struct batadv_tt_common_entry *tt_common_entry;
815 uint16_t flags; 803 uint16_t flags;
816 uint8_t last_ttvn; 804 uint8_t last_ttvn;
817 805
818 tt_common_entry = &tt_global_entry->common; 806 tt_common_entry = &tt_global_entry->common;
819 807
820 head = &tt_global_entry->orig_list; 808 head = &tt_global_entry->orig_list;
821 809
822 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 810 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
823 flags = tt_common_entry->flags; 811 flags = tt_common_entry->flags;
824 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn); 812 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
825 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c%c]\n", 813 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c%c]\n",
826 tt_global_entry->common.addr, orig_entry->ttvn, 814 tt_global_entry->common.addr, orig_entry->ttvn,
827 orig_entry->orig_node->orig, last_ttvn, 815 orig_entry->orig_node->orig, last_ttvn,
828 (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'), 816 (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
829 (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'), 817 (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
830 (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.')); 818 (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
831 } 819 }
832 } 820 }
833 821
834 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset) 822 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
835 { 823 {
836 struct net_device *net_dev = (struct net_device *)seq->private; 824 struct net_device *net_dev = (struct net_device *)seq->private;
837 struct batadv_priv *bat_priv = netdev_priv(net_dev); 825 struct batadv_priv *bat_priv = netdev_priv(net_dev);
838 struct batadv_hashtable *hash = bat_priv->tt.global_hash; 826 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
839 struct batadv_tt_common_entry *tt_common_entry; 827 struct batadv_tt_common_entry *tt_common_entry;
840 struct batadv_tt_global_entry *tt_global; 828 struct batadv_tt_global_entry *tt_global;
841 struct batadv_hard_iface *primary_if; 829 struct batadv_hard_iface *primary_if;
842 struct hlist_node *node; 830 struct hlist_node *node;
843 struct hlist_head *head; 831 struct hlist_head *head;
844 uint32_t i; 832 uint32_t i;
845 int ret = 0;
846 833
847 primary_if = batadv_primary_if_get_selected(bat_priv); 834 primary_if = batadv_seq_print_text_primary_if_get(seq);
848 if (!primary_if) { 835 if (!primary_if)
849 ret = seq_printf(seq,
850 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
851 net_dev->name);
852 goto out; 836 goto out;
853 }
854 837
855 if (primary_if->if_status != BATADV_IF_ACTIVE) {
856 ret = seq_printf(seq,
857 "BATMAN mesh %s disabled - primary interface not active\n",
858 net_dev->name);
859 goto out;
860 }
861
862 seq_printf(seq, 838 seq_printf(seq,
863 "Globally announced TT entries received via the mesh %s\n", 839 "Globally announced TT entries received via the mesh %s\n",
864 net_dev->name); 840 net_dev->name);
865 seq_printf(seq, " %-13s %s %-15s %s %s\n", 841 seq_printf(seq, " %-13s %s %-15s %s %s\n",
866 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags"); 842 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
867 843
868 for (i = 0; i < hash->size; i++) { 844 for (i = 0; i < hash->size; i++) {
869 head = &hash->table[i]; 845 head = &hash->table[i];
870 846
871 rcu_read_lock(); 847 rcu_read_lock();
872 hlist_for_each_entry_rcu(tt_common_entry, node, 848 hlist_for_each_entry_rcu(tt_common_entry, node,
873 head, hash_entry) { 849 head, hash_entry) {
874 tt_global = container_of(tt_common_entry, 850 tt_global = container_of(tt_common_entry,
875 struct batadv_tt_global_entry, 851 struct batadv_tt_global_entry,
876 common); 852 common);
877 batadv_tt_global_print_entry(tt_global, seq); 853 batadv_tt_global_print_entry(tt_global, seq);
878 } 854 }
879 rcu_read_unlock(); 855 rcu_read_unlock();
880 } 856 }
881 out: 857 out:
882 if (primary_if) 858 if (primary_if)
883 batadv_hardif_free_ref(primary_if); 859 batadv_hardif_free_ref(primary_if);
884 return ret; 860 return 0;
885 } 861 }
886 862
887 /* deletes the orig list of a tt_global_entry */ 863 /* deletes the orig list of a tt_global_entry */
888 static void 864 static void
889 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry) 865 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
890 { 866 {
891 struct hlist_head *head; 867 struct hlist_head *head;
892 struct hlist_node *node, *safe; 868 struct hlist_node *node, *safe;
893 struct batadv_tt_orig_list_entry *orig_entry; 869 struct batadv_tt_orig_list_entry *orig_entry;
894 870
895 spin_lock_bh(&tt_global_entry->list_lock); 871 spin_lock_bh(&tt_global_entry->list_lock);
896 head = &tt_global_entry->orig_list; 872 head = &tt_global_entry->orig_list;
897 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { 873 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
898 hlist_del_rcu(node); 874 hlist_del_rcu(node);
899 batadv_tt_orig_list_entry_free_ref(orig_entry); 875 batadv_tt_orig_list_entry_free_ref(orig_entry);
900 } 876 }
901 spin_unlock_bh(&tt_global_entry->list_lock); 877 spin_unlock_bh(&tt_global_entry->list_lock);
902 878
903 } 879 }
904 880
905 static void 881 static void
906 batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv, 882 batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
907 struct batadv_tt_global_entry *tt_global_entry, 883 struct batadv_tt_global_entry *tt_global_entry,
908 struct batadv_orig_node *orig_node, 884 struct batadv_orig_node *orig_node,
909 const char *message) 885 const char *message)
910 { 886 {
911 struct hlist_head *head; 887 struct hlist_head *head;
912 struct hlist_node *node, *safe; 888 struct hlist_node *node, *safe;
913 struct batadv_tt_orig_list_entry *orig_entry; 889 struct batadv_tt_orig_list_entry *orig_entry;
914 890
915 spin_lock_bh(&tt_global_entry->list_lock); 891 spin_lock_bh(&tt_global_entry->list_lock);
916 head = &tt_global_entry->orig_list; 892 head = &tt_global_entry->orig_list;
917 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) { 893 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
918 if (orig_entry->orig_node == orig_node) { 894 if (orig_entry->orig_node == orig_node) {
919 batadv_dbg(BATADV_DBG_TT, bat_priv, 895 batadv_dbg(BATADV_DBG_TT, bat_priv,
920 "Deleting %pM from global tt entry %pM: %s\n", 896 "Deleting %pM from global tt entry %pM: %s\n",
921 orig_node->orig, 897 orig_node->orig,
922 tt_global_entry->common.addr, message); 898 tt_global_entry->common.addr, message);
923 hlist_del_rcu(node); 899 hlist_del_rcu(node);
924 batadv_tt_orig_list_entry_free_ref(orig_entry); 900 batadv_tt_orig_list_entry_free_ref(orig_entry);
925 } 901 }
926 } 902 }
927 spin_unlock_bh(&tt_global_entry->list_lock); 903 spin_unlock_bh(&tt_global_entry->list_lock);
928 } 904 }
929 905
930 static void 906 static void
931 batadv_tt_global_del_struct(struct batadv_priv *bat_priv, 907 batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
932 struct batadv_tt_global_entry *tt_global_entry, 908 struct batadv_tt_global_entry *tt_global_entry,
933 const char *message) 909 const char *message)
934 { 910 {
935 batadv_dbg(BATADV_DBG_TT, bat_priv, 911 batadv_dbg(BATADV_DBG_TT, bat_priv,
936 "Deleting global tt entry %pM: %s\n", 912 "Deleting global tt entry %pM: %s\n",
937 tt_global_entry->common.addr, message); 913 tt_global_entry->common.addr, message);
938 914
939 batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt, 915 batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
940 batadv_choose_orig, tt_global_entry->common.addr); 916 batadv_choose_orig, tt_global_entry->common.addr);
941 batadv_tt_global_entry_free_ref(tt_global_entry); 917 batadv_tt_global_entry_free_ref(tt_global_entry);
942 918
943 } 919 }
944 920
945 /* If the client is to be deleted, we check if it is the last origantor entry 921 /* If the client is to be deleted, we check if it is the last origantor entry
946 * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the 922 * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the
947 * timer, otherwise we simply remove the originator scheduled for deletion. 923 * timer, otherwise we simply remove the originator scheduled for deletion.
948 */ 924 */
949 static void 925 static void
950 batadv_tt_global_del_roaming(struct batadv_priv *bat_priv, 926 batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
951 struct batadv_tt_global_entry *tt_global_entry, 927 struct batadv_tt_global_entry *tt_global_entry,
952 struct batadv_orig_node *orig_node, 928 struct batadv_orig_node *orig_node,
953 const char *message) 929 const char *message)
954 { 930 {
955 bool last_entry = true; 931 bool last_entry = true;
956 struct hlist_head *head; 932 struct hlist_head *head;
957 struct hlist_node *node; 933 struct hlist_node *node;
958 struct batadv_tt_orig_list_entry *orig_entry; 934 struct batadv_tt_orig_list_entry *orig_entry;
959 935
960 /* no local entry exists, case 1: 936 /* no local entry exists, case 1:
961 * Check if this is the last one or if other entries exist. 937 * Check if this is the last one or if other entries exist.
962 */ 938 */
963 939
964 rcu_read_lock(); 940 rcu_read_lock();
965 head = &tt_global_entry->orig_list; 941 head = &tt_global_entry->orig_list;
966 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 942 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
967 if (orig_entry->orig_node != orig_node) { 943 if (orig_entry->orig_node != orig_node) {
968 last_entry = false; 944 last_entry = false;
969 break; 945 break;
970 } 946 }
971 } 947 }
972 rcu_read_unlock(); 948 rcu_read_unlock();
973 949
974 if (last_entry) { 950 if (last_entry) {
975 /* its the last one, mark for roaming. */ 951 /* its the last one, mark for roaming. */
976 tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM; 952 tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
977 tt_global_entry->roam_at = jiffies; 953 tt_global_entry->roam_at = jiffies;
978 } else 954 } else
979 /* there is another entry, we can simply delete this 955 /* there is another entry, we can simply delete this
980 * one and can still use the other one. 956 * one and can still use the other one.
981 */ 957 */
982 batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry, 958 batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
983 orig_node, message); 959 orig_node, message);
984 } 960 }
985 961
986 962
987 963
988 static void batadv_tt_global_del(struct batadv_priv *bat_priv, 964 static void batadv_tt_global_del(struct batadv_priv *bat_priv,
989 struct batadv_orig_node *orig_node, 965 struct batadv_orig_node *orig_node,
990 const unsigned char *addr, 966 const unsigned char *addr,
991 const char *message, bool roaming) 967 const char *message, bool roaming)
992 { 968 {
993 struct batadv_tt_global_entry *tt_global_entry = NULL; 969 struct batadv_tt_global_entry *tt_global_entry = NULL;
994 struct batadv_tt_local_entry *local_entry = NULL; 970 struct batadv_tt_local_entry *local_entry = NULL;
995 971
996 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr); 972 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
997 if (!tt_global_entry) 973 if (!tt_global_entry)
998 goto out; 974 goto out;
999 975
1000 if (!roaming) { 976 if (!roaming) {
1001 batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry, 977 batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
1002 orig_node, message); 978 orig_node, message);
1003 979
1004 if (hlist_empty(&tt_global_entry->orig_list)) 980 if (hlist_empty(&tt_global_entry->orig_list))
1005 batadv_tt_global_del_struct(bat_priv, tt_global_entry, 981 batadv_tt_global_del_struct(bat_priv, tt_global_entry,
1006 message); 982 message);
1007 983
1008 goto out; 984 goto out;
1009 } 985 }
1010 986
1011 /* if we are deleting a global entry due to a roam 987 /* if we are deleting a global entry due to a roam
1012 * event, there are two possibilities: 988 * event, there are two possibilities:
1013 * 1) the client roamed from node A to node B => if there 989 * 1) the client roamed from node A to node B => if there
1014 * is only one originator left for this client, we mark 990 * is only one originator left for this client, we mark
1015 * it with BATADV_TT_CLIENT_ROAM, we start a timer and we 991 * it with BATADV_TT_CLIENT_ROAM, we start a timer and we
1016 * wait for node B to claim it. In case of timeout 992 * wait for node B to claim it. In case of timeout
1017 * the entry is purged. 993 * the entry is purged.
1018 * 994 *
1019 * If there are other originators left, we directly delete 995 * If there are other originators left, we directly delete
1020 * the originator. 996 * the originator.
1021 * 2) the client roamed to us => we can directly delete 997 * 2) the client roamed to us => we can directly delete
1022 * the global entry, since it is useless now. 998 * the global entry, since it is useless now.
1023 */ 999 */
1024 local_entry = batadv_tt_local_hash_find(bat_priv, 1000 local_entry = batadv_tt_local_hash_find(bat_priv,
1025 tt_global_entry->common.addr); 1001 tt_global_entry->common.addr);
1026 if (local_entry) { 1002 if (local_entry) {
1027 /* local entry exists, case 2: client roamed to us. */ 1003 /* local entry exists, case 2: client roamed to us. */
1028 batadv_tt_global_del_orig_list(tt_global_entry); 1004 batadv_tt_global_del_orig_list(tt_global_entry);
1029 batadv_tt_global_del_struct(bat_priv, tt_global_entry, message); 1005 batadv_tt_global_del_struct(bat_priv, tt_global_entry, message);
1030 } else 1006 } else
1031 /* no local entry exists, case 1: check for roaming */ 1007 /* no local entry exists, case 1: check for roaming */
1032 batadv_tt_global_del_roaming(bat_priv, tt_global_entry, 1008 batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
1033 orig_node, message); 1009 orig_node, message);
1034 1010
1035 1011
1036 out: 1012 out:
1037 if (tt_global_entry) 1013 if (tt_global_entry)
1038 batadv_tt_global_entry_free_ref(tt_global_entry); 1014 batadv_tt_global_entry_free_ref(tt_global_entry);
1039 if (local_entry) 1015 if (local_entry)
1040 batadv_tt_local_entry_free_ref(local_entry); 1016 batadv_tt_local_entry_free_ref(local_entry);
1041 } 1017 }
1042 1018
1043 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv, 1019 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
1044 struct batadv_orig_node *orig_node, 1020 struct batadv_orig_node *orig_node,
1045 const char *message) 1021 const char *message)
1046 { 1022 {
1047 struct batadv_tt_global_entry *tt_global; 1023 struct batadv_tt_global_entry *tt_global;
1048 struct batadv_tt_common_entry *tt_common_entry; 1024 struct batadv_tt_common_entry *tt_common_entry;
1049 uint32_t i; 1025 uint32_t i;
1050 struct batadv_hashtable *hash = bat_priv->tt.global_hash; 1026 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1051 struct hlist_node *node, *safe; 1027 struct hlist_node *node, *safe;
1052 struct hlist_head *head; 1028 struct hlist_head *head;
1053 spinlock_t *list_lock; /* protects write access to the hash lists */ 1029 spinlock_t *list_lock; /* protects write access to the hash lists */
1054 1030
1055 if (!hash) 1031 if (!hash)
1056 return; 1032 return;
1057 1033
1058 for (i = 0; i < hash->size; i++) { 1034 for (i = 0; i < hash->size; i++) {
1059 head = &hash->table[i]; 1035 head = &hash->table[i];
1060 list_lock = &hash->list_locks[i]; 1036 list_lock = &hash->list_locks[i];
1061 1037
1062 spin_lock_bh(list_lock); 1038 spin_lock_bh(list_lock);
1063 hlist_for_each_entry_safe(tt_common_entry, node, safe, 1039 hlist_for_each_entry_safe(tt_common_entry, node, safe,
1064 head, hash_entry) { 1040 head, hash_entry) {
1065 tt_global = container_of(tt_common_entry, 1041 tt_global = container_of(tt_common_entry,
1066 struct batadv_tt_global_entry, 1042 struct batadv_tt_global_entry,
1067 common); 1043 common);
1068 1044
1069 batadv_tt_global_del_orig_entry(bat_priv, tt_global, 1045 batadv_tt_global_del_orig_entry(bat_priv, tt_global,
1070 orig_node, message); 1046 orig_node, message);
1071 1047
1072 if (hlist_empty(&tt_global->orig_list)) { 1048 if (hlist_empty(&tt_global->orig_list)) {
1073 batadv_dbg(BATADV_DBG_TT, bat_priv, 1049 batadv_dbg(BATADV_DBG_TT, bat_priv,
1074 "Deleting global tt entry %pM: %s\n", 1050 "Deleting global tt entry %pM: %s\n",
1075 tt_global->common.addr, message); 1051 tt_global->common.addr, message);
1076 hlist_del_rcu(node); 1052 hlist_del_rcu(node);
1077 batadv_tt_global_entry_free_ref(tt_global); 1053 batadv_tt_global_entry_free_ref(tt_global);
1078 } 1054 }
1079 } 1055 }
1080 spin_unlock_bh(list_lock); 1056 spin_unlock_bh(list_lock);
1081 } 1057 }
1082 orig_node->tt_initialised = false; 1058 orig_node->tt_initialised = false;
1083 } 1059 }
1084 1060
1085 static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global, 1061 static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
1086 char **msg) 1062 char **msg)
1087 { 1063 {
1088 bool purge = false; 1064 bool purge = false;
1089 unsigned long roam_timeout = BATADV_TT_CLIENT_ROAM_TIMEOUT; 1065 unsigned long roam_timeout = BATADV_TT_CLIENT_ROAM_TIMEOUT;
1090 unsigned long temp_timeout = BATADV_TT_CLIENT_TEMP_TIMEOUT; 1066 unsigned long temp_timeout = BATADV_TT_CLIENT_TEMP_TIMEOUT;
1091 1067
1092 if ((tt_global->common.flags & BATADV_TT_CLIENT_ROAM) && 1068 if ((tt_global->common.flags & BATADV_TT_CLIENT_ROAM) &&
1093 batadv_has_timed_out(tt_global->roam_at, roam_timeout)) { 1069 batadv_has_timed_out(tt_global->roam_at, roam_timeout)) {
1094 purge = true; 1070 purge = true;
1095 *msg = "Roaming timeout\n"; 1071 *msg = "Roaming timeout\n";
1096 } 1072 }
1097 1073
1098 if ((tt_global->common.flags & BATADV_TT_CLIENT_TEMP) && 1074 if ((tt_global->common.flags & BATADV_TT_CLIENT_TEMP) &&
1099 batadv_has_timed_out(tt_global->common.added_at, temp_timeout)) { 1075 batadv_has_timed_out(tt_global->common.added_at, temp_timeout)) {
1100 purge = true; 1076 purge = true;
1101 *msg = "Temporary client timeout\n"; 1077 *msg = "Temporary client timeout\n";
1102 } 1078 }
1103 1079
1104 return purge; 1080 return purge;
1105 } 1081 }
1106 1082
1107 static void batadv_tt_global_purge(struct batadv_priv *bat_priv) 1083 static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
1108 { 1084 {
1109 struct batadv_hashtable *hash = bat_priv->tt.global_hash; 1085 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1110 struct hlist_head *head; 1086 struct hlist_head *head;
1111 struct hlist_node *node, *node_tmp; 1087 struct hlist_node *node, *node_tmp;
1112 spinlock_t *list_lock; /* protects write access to the hash lists */ 1088 spinlock_t *list_lock; /* protects write access to the hash lists */
1113 uint32_t i; 1089 uint32_t i;
1114 char *msg = NULL; 1090 char *msg = NULL;
1115 struct batadv_tt_common_entry *tt_common; 1091 struct batadv_tt_common_entry *tt_common;
1116 struct batadv_tt_global_entry *tt_global; 1092 struct batadv_tt_global_entry *tt_global;
1117 1093
1118 for (i = 0; i < hash->size; i++) { 1094 for (i = 0; i < hash->size; i++) {
1119 head = &hash->table[i]; 1095 head = &hash->table[i];
1120 list_lock = &hash->list_locks[i]; 1096 list_lock = &hash->list_locks[i];
1121 1097
1122 spin_lock_bh(list_lock); 1098 spin_lock_bh(list_lock);
1123 hlist_for_each_entry_safe(tt_common, node, node_tmp, head, 1099 hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
1124 hash_entry) { 1100 hash_entry) {
1125 tt_global = container_of(tt_common, 1101 tt_global = container_of(tt_common,
1126 struct batadv_tt_global_entry, 1102 struct batadv_tt_global_entry,
1127 common); 1103 common);
1128 1104
1129 if (!batadv_tt_global_to_purge(tt_global, &msg)) 1105 if (!batadv_tt_global_to_purge(tt_global, &msg))
1130 continue; 1106 continue;
1131 1107
1132 batadv_dbg(BATADV_DBG_TT, bat_priv, 1108 batadv_dbg(BATADV_DBG_TT, bat_priv,
1133 "Deleting global tt entry (%pM): %s\n", 1109 "Deleting global tt entry (%pM): %s\n",
1134 tt_global->common.addr, msg); 1110 tt_global->common.addr, msg);
1135 1111
1136 hlist_del_rcu(node); 1112 hlist_del_rcu(node);
1137 1113
1138 batadv_tt_global_entry_free_ref(tt_global); 1114 batadv_tt_global_entry_free_ref(tt_global);
1139 } 1115 }
1140 spin_unlock_bh(list_lock); 1116 spin_unlock_bh(list_lock);
1141 } 1117 }
1142 } 1118 }
1143 1119
1144 static void batadv_tt_global_table_free(struct batadv_priv *bat_priv) 1120 static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
1145 { 1121 {
1146 struct batadv_hashtable *hash; 1122 struct batadv_hashtable *hash;
1147 spinlock_t *list_lock; /* protects write access to the hash lists */ 1123 spinlock_t *list_lock; /* protects write access to the hash lists */
1148 struct batadv_tt_common_entry *tt_common_entry; 1124 struct batadv_tt_common_entry *tt_common_entry;
1149 struct batadv_tt_global_entry *tt_global; 1125 struct batadv_tt_global_entry *tt_global;
1150 struct hlist_node *node, *node_tmp; 1126 struct hlist_node *node, *node_tmp;
1151 struct hlist_head *head; 1127 struct hlist_head *head;
1152 uint32_t i; 1128 uint32_t i;
1153 1129
1154 if (!bat_priv->tt.global_hash) 1130 if (!bat_priv->tt.global_hash)
1155 return; 1131 return;
1156 1132
1157 hash = bat_priv->tt.global_hash; 1133 hash = bat_priv->tt.global_hash;
1158 1134
1159 for (i = 0; i < hash->size; i++) { 1135 for (i = 0; i < hash->size; i++) {
1160 head = &hash->table[i]; 1136 head = &hash->table[i];
1161 list_lock = &hash->list_locks[i]; 1137 list_lock = &hash->list_locks[i];
1162 1138
1163 spin_lock_bh(list_lock); 1139 spin_lock_bh(list_lock);
1164 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, 1140 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
1165 head, hash_entry) { 1141 head, hash_entry) {
1166 hlist_del_rcu(node); 1142 hlist_del_rcu(node);
1167 tt_global = container_of(tt_common_entry, 1143 tt_global = container_of(tt_common_entry,
1168 struct batadv_tt_global_entry, 1144 struct batadv_tt_global_entry,
1169 common); 1145 common);
1170 batadv_tt_global_entry_free_ref(tt_global); 1146 batadv_tt_global_entry_free_ref(tt_global);
1171 } 1147 }
1172 spin_unlock_bh(list_lock); 1148 spin_unlock_bh(list_lock);
1173 } 1149 }
1174 1150
1175 batadv_hash_destroy(hash); 1151 batadv_hash_destroy(hash);
1176 1152
1177 bat_priv->tt.global_hash = NULL; 1153 bat_priv->tt.global_hash = NULL;
1178 } 1154 }
1179 1155
1180 static bool 1156 static bool
1181 _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry, 1157 _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
1182 struct batadv_tt_global_entry *tt_global_entry) 1158 struct batadv_tt_global_entry *tt_global_entry)
1183 { 1159 {
1184 bool ret = false; 1160 bool ret = false;
1185 1161
1186 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_WIFI && 1162 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_WIFI &&
1187 tt_global_entry->common.flags & BATADV_TT_CLIENT_WIFI) 1163 tt_global_entry->common.flags & BATADV_TT_CLIENT_WIFI)
1188 ret = true; 1164 ret = true;
1189 1165
1190 return ret; 1166 return ret;
1191 } 1167 }
1192 1168
1193 struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv, 1169 struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
1194 const uint8_t *src, 1170 const uint8_t *src,
1195 const uint8_t *addr) 1171 const uint8_t *addr)
1196 { 1172 {
1197 struct batadv_tt_local_entry *tt_local_entry = NULL; 1173 struct batadv_tt_local_entry *tt_local_entry = NULL;
1198 struct batadv_tt_global_entry *tt_global_entry = NULL; 1174 struct batadv_tt_global_entry *tt_global_entry = NULL;
1199 struct batadv_orig_node *orig_node = NULL; 1175 struct batadv_orig_node *orig_node = NULL;
1200 struct batadv_neigh_node *router = NULL; 1176 struct batadv_neigh_node *router = NULL;
1201 struct hlist_head *head; 1177 struct hlist_head *head;
1202 struct hlist_node *node; 1178 struct hlist_node *node;
1203 struct batadv_tt_orig_list_entry *orig_entry; 1179 struct batadv_tt_orig_list_entry *orig_entry;
1204 int best_tq; 1180 int best_tq;
1205 1181
1206 if (src && atomic_read(&bat_priv->ap_isolation)) { 1182 if (src && atomic_read(&bat_priv->ap_isolation)) {
1207 tt_local_entry = batadv_tt_local_hash_find(bat_priv, src); 1183 tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
1208 if (!tt_local_entry) 1184 if (!tt_local_entry)
1209 goto out; 1185 goto out;
1210 } 1186 }
1211 1187
1212 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr); 1188 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
1213 if (!tt_global_entry) 1189 if (!tt_global_entry)
1214 goto out; 1190 goto out;
1215 1191
1216 /* check whether the clients should not communicate due to AP 1192 /* check whether the clients should not communicate due to AP
1217 * isolation 1193 * isolation
1218 */ 1194 */
1219 if (tt_local_entry && 1195 if (tt_local_entry &&
1220 _batadv_is_ap_isolated(tt_local_entry, tt_global_entry)) 1196 _batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
1221 goto out; 1197 goto out;
1222 1198
1223 best_tq = 0; 1199 best_tq = 0;
1224 1200
1225 rcu_read_lock(); 1201 rcu_read_lock();
1226 head = &tt_global_entry->orig_list; 1202 head = &tt_global_entry->orig_list;
1227 hlist_for_each_entry_rcu(orig_entry, node, head, list) { 1203 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
1228 router = batadv_orig_node_get_router(orig_entry->orig_node); 1204 router = batadv_orig_node_get_router(orig_entry->orig_node);
1229 if (!router) 1205 if (!router)
1230 continue; 1206 continue;
1231 1207
1232 if (router->tq_avg > best_tq) { 1208 if (router->tq_avg > best_tq) {
1233 orig_node = orig_entry->orig_node; 1209 orig_node = orig_entry->orig_node;
1234 best_tq = router->tq_avg; 1210 best_tq = router->tq_avg;
1235 } 1211 }
1236 batadv_neigh_node_free_ref(router); 1212 batadv_neigh_node_free_ref(router);
1237 } 1213 }
1238 /* found anything? */ 1214 /* found anything? */
1239 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount)) 1215 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
1240 orig_node = NULL; 1216 orig_node = NULL;
1241 rcu_read_unlock(); 1217 rcu_read_unlock();
1242 out: 1218 out:
1243 if (tt_global_entry) 1219 if (tt_global_entry)
1244 batadv_tt_global_entry_free_ref(tt_global_entry); 1220 batadv_tt_global_entry_free_ref(tt_global_entry);
1245 if (tt_local_entry) 1221 if (tt_local_entry)
1246 batadv_tt_local_entry_free_ref(tt_local_entry); 1222 batadv_tt_local_entry_free_ref(tt_local_entry);
1247 1223
1248 return orig_node; 1224 return orig_node;
1249 } 1225 }
1250 1226
1251 /* Calculates the checksum of the local table of a given orig_node */ 1227 /* Calculates the checksum of the local table of a given orig_node */
1252 static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv, 1228 static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1253 struct batadv_orig_node *orig_node) 1229 struct batadv_orig_node *orig_node)
1254 { 1230 {
1255 uint16_t total = 0, total_one; 1231 uint16_t total = 0, total_one;
1256 struct batadv_hashtable *hash = bat_priv->tt.global_hash; 1232 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1257 struct batadv_tt_common_entry *tt_common; 1233 struct batadv_tt_common_entry *tt_common;
1258 struct batadv_tt_global_entry *tt_global; 1234 struct batadv_tt_global_entry *tt_global;
1259 struct hlist_node *node; 1235 struct hlist_node *node;
1260 struct hlist_head *head; 1236 struct hlist_head *head;
1261 uint32_t i; 1237 uint32_t i;
1262 int j; 1238 int j;
1263 1239
1264 for (i = 0; i < hash->size; i++) { 1240 for (i = 0; i < hash->size; i++) {
1265 head = &hash->table[i]; 1241 head = &hash->table[i];
1266 1242
1267 rcu_read_lock(); 1243 rcu_read_lock();
1268 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) { 1244 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
1269 tt_global = container_of(tt_common, 1245 tt_global = container_of(tt_common,
1270 struct batadv_tt_global_entry, 1246 struct batadv_tt_global_entry,
1271 common); 1247 common);
1272 /* Roaming clients are in the global table for 1248 /* Roaming clients are in the global table for
1273 * consistency only. They don't have to be 1249 * consistency only. They don't have to be
1274 * taken into account while computing the 1250 * taken into account while computing the
1275 * global crc 1251 * global crc
1276 */ 1252 */
1277 if (tt_common->flags & BATADV_TT_CLIENT_ROAM) 1253 if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
1278 continue; 1254 continue;
1279 /* Temporary clients have not been announced yet, so 1255 /* Temporary clients have not been announced yet, so
1280 * they have to be skipped while computing the global 1256 * they have to be skipped while computing the global
1281 * crc 1257 * crc
1282 */ 1258 */
1283 if (tt_common->flags & BATADV_TT_CLIENT_TEMP) 1259 if (tt_common->flags & BATADV_TT_CLIENT_TEMP)
1284 continue; 1260 continue;
1285 1261
1286 /* find out if this global entry is announced by this 1262 /* find out if this global entry is announced by this
1287 * originator 1263 * originator
1288 */ 1264 */
1289 if (!batadv_tt_global_entry_has_orig(tt_global, 1265 if (!batadv_tt_global_entry_has_orig(tt_global,
1290 orig_node)) 1266 orig_node))
1291 continue; 1267 continue;
1292 1268
1293 total_one = 0; 1269 total_one = 0;
1294 for (j = 0; j < ETH_ALEN; j++) 1270 for (j = 0; j < ETH_ALEN; j++)
1295 total_one = crc16_byte(total_one, 1271 total_one = crc16_byte(total_one,
1296 tt_common->addr[j]); 1272 tt_common->addr[j]);
1297 total ^= total_one; 1273 total ^= total_one;
1298 } 1274 }
1299 rcu_read_unlock(); 1275 rcu_read_unlock();
1300 } 1276 }
1301 1277
1302 return total; 1278 return total;
1303 } 1279 }
1304 1280
1305 /* Calculates the checksum of the local table */ 1281 /* Calculates the checksum of the local table */
1306 static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv) 1282 static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
1307 { 1283 {
1308 uint16_t total = 0, total_one; 1284 uint16_t total = 0, total_one;
1309 struct batadv_hashtable *hash = bat_priv->tt.local_hash; 1285 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
1310 struct batadv_tt_common_entry *tt_common; 1286 struct batadv_tt_common_entry *tt_common;
1311 struct hlist_node *node; 1287 struct hlist_node *node;
1312 struct hlist_head *head; 1288 struct hlist_head *head;
1313 uint32_t i; 1289 uint32_t i;
1314 int j; 1290 int j;
1315 1291
1316 for (i = 0; i < hash->size; i++) { 1292 for (i = 0; i < hash->size; i++) {
1317 head = &hash->table[i]; 1293 head = &hash->table[i];
1318 1294
1319 rcu_read_lock(); 1295 rcu_read_lock();
1320 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) { 1296 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
1321 /* not yet committed clients have not to be taken into 1297 /* not yet committed clients have not to be taken into
1322 * account while computing the CRC 1298 * account while computing the CRC
1323 */ 1299 */
1324 if (tt_common->flags & BATADV_TT_CLIENT_NEW) 1300 if (tt_common->flags & BATADV_TT_CLIENT_NEW)
1325 continue; 1301 continue;
1326 total_one = 0; 1302 total_one = 0;
1327 for (j = 0; j < ETH_ALEN; j++) 1303 for (j = 0; j < ETH_ALEN; j++)
1328 total_one = crc16_byte(total_one, 1304 total_one = crc16_byte(total_one,
1329 tt_common->addr[j]); 1305 tt_common->addr[j]);
1330 total ^= total_one; 1306 total ^= total_one;
1331 } 1307 }
1332 rcu_read_unlock(); 1308 rcu_read_unlock();
1333 } 1309 }
1334 1310
1335 return total; 1311 return total;
1336 } 1312 }
1337 1313
1338 static void batadv_tt_req_list_free(struct batadv_priv *bat_priv) 1314 static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
1339 { 1315 {
1340 struct batadv_tt_req_node *node, *safe; 1316 struct batadv_tt_req_node *node, *safe;
1341 1317
1342 spin_lock_bh(&bat_priv->tt.req_list_lock); 1318 spin_lock_bh(&bat_priv->tt.req_list_lock);
1343 1319
1344 list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { 1320 list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
1345 list_del(&node->list); 1321 list_del(&node->list);
1346 kfree(node); 1322 kfree(node);
1347 } 1323 }
1348 1324
1349 spin_unlock_bh(&bat_priv->tt.req_list_lock); 1325 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1350 } 1326 }
1351 1327
1352 static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv, 1328 static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
1353 struct batadv_orig_node *orig_node, 1329 struct batadv_orig_node *orig_node,
1354 const unsigned char *tt_buff, 1330 const unsigned char *tt_buff,
1355 uint8_t tt_num_changes) 1331 uint8_t tt_num_changes)
1356 { 1332 {
1357 uint16_t tt_buff_len = batadv_tt_len(tt_num_changes); 1333 uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
1358 1334
1359 /* Replace the old buffer only if I received something in the 1335 /* Replace the old buffer only if I received something in the
1360 * last OGM (the OGM could carry no changes) 1336 * last OGM (the OGM could carry no changes)
1361 */ 1337 */
1362 spin_lock_bh(&orig_node->tt_buff_lock); 1338 spin_lock_bh(&orig_node->tt_buff_lock);
1363 if (tt_buff_len > 0) { 1339 if (tt_buff_len > 0) {
1364 kfree(orig_node->tt_buff); 1340 kfree(orig_node->tt_buff);
1365 orig_node->tt_buff_len = 0; 1341 orig_node->tt_buff_len = 0;
1366 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC); 1342 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
1367 if (orig_node->tt_buff) { 1343 if (orig_node->tt_buff) {
1368 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len); 1344 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
1369 orig_node->tt_buff_len = tt_buff_len; 1345 orig_node->tt_buff_len = tt_buff_len;
1370 } 1346 }
1371 } 1347 }
1372 spin_unlock_bh(&orig_node->tt_buff_lock); 1348 spin_unlock_bh(&orig_node->tt_buff_lock);
1373 } 1349 }
1374 1350
1375 static void batadv_tt_req_purge(struct batadv_priv *bat_priv) 1351 static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
1376 { 1352 {
1377 struct batadv_tt_req_node *node, *safe; 1353 struct batadv_tt_req_node *node, *safe;
1378 1354
1379 spin_lock_bh(&bat_priv->tt.req_list_lock); 1355 spin_lock_bh(&bat_priv->tt.req_list_lock);
1380 list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { 1356 list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
1381 if (batadv_has_timed_out(node->issued_at, 1357 if (batadv_has_timed_out(node->issued_at,
1382 BATADV_TT_REQUEST_TIMEOUT)) { 1358 BATADV_TT_REQUEST_TIMEOUT)) {
1383 list_del(&node->list); 1359 list_del(&node->list);
1384 kfree(node); 1360 kfree(node);
1385 } 1361 }
1386 } 1362 }
1387 spin_unlock_bh(&bat_priv->tt.req_list_lock); 1363 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1388 } 1364 }
1389 1365
1390 /* returns the pointer to the new tt_req_node struct if no request 1366 /* returns the pointer to the new tt_req_node struct if no request
1391 * has already been issued for this orig_node, NULL otherwise 1367 * has already been issued for this orig_node, NULL otherwise
1392 */ 1368 */
1393 static struct batadv_tt_req_node * 1369 static struct batadv_tt_req_node *
1394 batadv_new_tt_req_node(struct batadv_priv *bat_priv, 1370 batadv_new_tt_req_node(struct batadv_priv *bat_priv,
1395 struct batadv_orig_node *orig_node) 1371 struct batadv_orig_node *orig_node)
1396 { 1372 {
1397 struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL; 1373 struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1398 1374
1399 spin_lock_bh(&bat_priv->tt.req_list_lock); 1375 spin_lock_bh(&bat_priv->tt.req_list_lock);
1400 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) { 1376 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
1401 if (batadv_compare_eth(tt_req_node_tmp, orig_node) && 1377 if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
1402 !batadv_has_timed_out(tt_req_node_tmp->issued_at, 1378 !batadv_has_timed_out(tt_req_node_tmp->issued_at,
1403 BATADV_TT_REQUEST_TIMEOUT)) 1379 BATADV_TT_REQUEST_TIMEOUT))
1404 goto unlock; 1380 goto unlock;
1405 } 1381 }
1406 1382
1407 tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC); 1383 tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
1408 if (!tt_req_node) 1384 if (!tt_req_node)
1409 goto unlock; 1385 goto unlock;
1410 1386
1411 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN); 1387 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
1412 tt_req_node->issued_at = jiffies; 1388 tt_req_node->issued_at = jiffies;
1413 1389
1414 list_add(&tt_req_node->list, &bat_priv->tt.req_list); 1390 list_add(&tt_req_node->list, &bat_priv->tt.req_list);
1415 unlock: 1391 unlock:
1416 spin_unlock_bh(&bat_priv->tt.req_list_lock); 1392 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1417 return tt_req_node; 1393 return tt_req_node;
1418 } 1394 }
1419 1395
1420 /* data_ptr is useless here, but has to be kept to respect the prototype */ 1396 /* data_ptr is useless here, but has to be kept to respect the prototype */
1421 static int batadv_tt_local_valid_entry(const void *entry_ptr, 1397 static int batadv_tt_local_valid_entry(const void *entry_ptr,
1422 const void *data_ptr) 1398 const void *data_ptr)
1423 { 1399 {
1424 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr; 1400 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
1425 1401
1426 if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW) 1402 if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
1427 return 0; 1403 return 0;
1428 return 1; 1404 return 1;
1429 } 1405 }
1430 1406
1431 static int batadv_tt_global_valid(const void *entry_ptr, 1407 static int batadv_tt_global_valid(const void *entry_ptr,
1432 const void *data_ptr) 1408 const void *data_ptr)
1433 { 1409 {
1434 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr; 1410 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
1435 const struct batadv_tt_global_entry *tt_global_entry; 1411 const struct batadv_tt_global_entry *tt_global_entry;
1436 const struct batadv_orig_node *orig_node = data_ptr; 1412 const struct batadv_orig_node *orig_node = data_ptr;
1437 1413
1438 if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM || 1414 if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM ||
1439 tt_common_entry->flags & BATADV_TT_CLIENT_TEMP) 1415 tt_common_entry->flags & BATADV_TT_CLIENT_TEMP)
1440 return 0; 1416 return 0;
1441 1417
1442 tt_global_entry = container_of(tt_common_entry, 1418 tt_global_entry = container_of(tt_common_entry,
1443 struct batadv_tt_global_entry, 1419 struct batadv_tt_global_entry,
1444 common); 1420 common);
1445 1421
1446 return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node); 1422 return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
1447 } 1423 }
1448 1424
1449 static struct sk_buff * 1425 static struct sk_buff *
1450 batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, 1426 batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1451 struct batadv_hashtable *hash, 1427 struct batadv_hashtable *hash,
1452 struct batadv_hard_iface *primary_if, 1428 struct batadv_hard_iface *primary_if,
1453 int (*valid_cb)(const void *, const void *), 1429 int (*valid_cb)(const void *, const void *),
1454 void *cb_data) 1430 void *cb_data)
1455 { 1431 {
1456 struct batadv_tt_common_entry *tt_common_entry; 1432 struct batadv_tt_common_entry *tt_common_entry;
1457 struct batadv_tt_query_packet *tt_response; 1433 struct batadv_tt_query_packet *tt_response;
1458 struct batadv_tt_change *tt_change; 1434 struct batadv_tt_change *tt_change;
1459 struct hlist_node *node; 1435 struct hlist_node *node;
1460 struct hlist_head *head; 1436 struct hlist_head *head;
1461 struct sk_buff *skb = NULL; 1437 struct sk_buff *skb = NULL;
1462 uint16_t tt_tot, tt_count; 1438 uint16_t tt_tot, tt_count;
1463 ssize_t tt_query_size = sizeof(struct batadv_tt_query_packet); 1439 ssize_t tt_query_size = sizeof(struct batadv_tt_query_packet);
1464 uint32_t i; 1440 uint32_t i;
1465 size_t len; 1441 size_t len;
1466 1442
1467 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) { 1443 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1468 tt_len = primary_if->soft_iface->mtu - tt_query_size; 1444 tt_len = primary_if->soft_iface->mtu - tt_query_size;
1469 tt_len -= tt_len % sizeof(struct batadv_tt_change); 1445 tt_len -= tt_len % sizeof(struct batadv_tt_change);
1470 } 1446 }
1471 tt_tot = tt_len / sizeof(struct batadv_tt_change); 1447 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1472 1448
1473 len = tt_query_size + tt_len; 1449 len = tt_query_size + tt_len;
1474 skb = dev_alloc_skb(len + ETH_HLEN); 1450 skb = dev_alloc_skb(len + ETH_HLEN);
1475 if (!skb) 1451 if (!skb)
1476 goto out; 1452 goto out;
1477 1453
1478 skb_reserve(skb, ETH_HLEN); 1454 skb_reserve(skb, ETH_HLEN);
1479 tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len); 1455 tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
1480 tt_response->ttvn = ttvn; 1456 tt_response->ttvn = ttvn;
1481 1457
1482 tt_change = (struct batadv_tt_change *)(skb->data + tt_query_size); 1458 tt_change = (struct batadv_tt_change *)(skb->data + tt_query_size);
1483 tt_count = 0; 1459 tt_count = 0;
1484 1460
1485 rcu_read_lock(); 1461 rcu_read_lock();
1486 for (i = 0; i < hash->size; i++) { 1462 for (i = 0; i < hash->size; i++) {
1487 head = &hash->table[i]; 1463 head = &hash->table[i];
1488 1464
1489 hlist_for_each_entry_rcu(tt_common_entry, node, 1465 hlist_for_each_entry_rcu(tt_common_entry, node,
1490 head, hash_entry) { 1466 head, hash_entry) {
1491 if (tt_count == tt_tot) 1467 if (tt_count == tt_tot)
1492 break; 1468 break;
1493 1469
1494 if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data))) 1470 if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
1495 continue; 1471 continue;
1496 1472
1497 memcpy(tt_change->addr, tt_common_entry->addr, 1473 memcpy(tt_change->addr, tt_common_entry->addr,
1498 ETH_ALEN); 1474 ETH_ALEN);
1499 tt_change->flags = BATADV_NO_FLAGS; 1475 tt_change->flags = BATADV_NO_FLAGS;
1500 1476
1501 tt_count++; 1477 tt_count++;
1502 tt_change++; 1478 tt_change++;
1503 } 1479 }
1504 } 1480 }
1505 rcu_read_unlock(); 1481 rcu_read_unlock();
1506 1482
1507 /* store in the message the number of entries we have successfully 1483 /* store in the message the number of entries we have successfully
1508 * copied 1484 * copied
1509 */ 1485 */
1510 tt_response->tt_data = htons(tt_count); 1486 tt_response->tt_data = htons(tt_count);
1511 1487
1512 out: 1488 out:
1513 return skb; 1489 return skb;
1514 } 1490 }
1515 1491
1516 static int batadv_send_tt_request(struct batadv_priv *bat_priv, 1492 static int batadv_send_tt_request(struct batadv_priv *bat_priv,
1517 struct batadv_orig_node *dst_orig_node, 1493 struct batadv_orig_node *dst_orig_node,
1518 uint8_t ttvn, uint16_t tt_crc, 1494 uint8_t ttvn, uint16_t tt_crc,
1519 bool full_table) 1495 bool full_table)
1520 { 1496 {
1521 struct sk_buff *skb = NULL; 1497 struct sk_buff *skb = NULL;
1522 struct batadv_tt_query_packet *tt_request; 1498 struct batadv_tt_query_packet *tt_request;
1523 struct batadv_neigh_node *neigh_node = NULL; 1499 struct batadv_neigh_node *neigh_node = NULL;
1524 struct batadv_hard_iface *primary_if; 1500 struct batadv_hard_iface *primary_if;
1525 struct batadv_tt_req_node *tt_req_node = NULL; 1501 struct batadv_tt_req_node *tt_req_node = NULL;
1526 int ret = 1; 1502 int ret = 1;
1527 size_t tt_req_len; 1503 size_t tt_req_len;
1528 1504
1529 primary_if = batadv_primary_if_get_selected(bat_priv); 1505 primary_if = batadv_primary_if_get_selected(bat_priv);
1530 if (!primary_if) 1506 if (!primary_if)
1531 goto out; 1507 goto out;
1532 1508
1533 /* The new tt_req will be issued only if I'm not waiting for a 1509 /* The new tt_req will be issued only if I'm not waiting for a
1534 * reply from the same orig_node yet 1510 * reply from the same orig_node yet
1535 */ 1511 */
1536 tt_req_node = batadv_new_tt_req_node(bat_priv, dst_orig_node); 1512 tt_req_node = batadv_new_tt_req_node(bat_priv, dst_orig_node);
1537 if (!tt_req_node) 1513 if (!tt_req_node)
1538 goto out; 1514 goto out;
1539 1515
1540 skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN); 1516 skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN);
1541 if (!skb) 1517 if (!skb)
1542 goto out; 1518 goto out;
1543 1519
1544 skb_reserve(skb, ETH_HLEN); 1520 skb_reserve(skb, ETH_HLEN);
1545 1521
1546 tt_req_len = sizeof(*tt_request); 1522 tt_req_len = sizeof(*tt_request);
1547 tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len); 1523 tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len);
1548 1524
1549 tt_request->header.packet_type = BATADV_TT_QUERY; 1525 tt_request->header.packet_type = BATADV_TT_QUERY;
1550 tt_request->header.version = BATADV_COMPAT_VERSION; 1526 tt_request->header.version = BATADV_COMPAT_VERSION;
1551 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN); 1527 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1552 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN); 1528 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1553 tt_request->header.ttl = BATADV_TTL; 1529 tt_request->header.ttl = BATADV_TTL;
1554 tt_request->ttvn = ttvn; 1530 tt_request->ttvn = ttvn;
1555 tt_request->tt_data = htons(tt_crc); 1531 tt_request->tt_data = htons(tt_crc);
1556 tt_request->flags = BATADV_TT_REQUEST; 1532 tt_request->flags = BATADV_TT_REQUEST;
1557 1533
1558 if (full_table) 1534 if (full_table)
1559 tt_request->flags |= BATADV_TT_FULL_TABLE; 1535 tt_request->flags |= BATADV_TT_FULL_TABLE;
1560 1536
1561 neigh_node = batadv_orig_node_get_router(dst_orig_node); 1537 neigh_node = batadv_orig_node_get_router(dst_orig_node);
1562 if (!neigh_node) 1538 if (!neigh_node)
1563 goto out; 1539 goto out;
1564 1540
1565 batadv_dbg(BATADV_DBG_TT, bat_priv, 1541 batadv_dbg(BATADV_DBG_TT, bat_priv,
1566 "Sending TT_REQUEST to %pM via %pM [%c]\n", 1542 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1567 dst_orig_node->orig, neigh_node->addr, 1543 dst_orig_node->orig, neigh_node->addr,
1568 (full_table ? 'F' : '.')); 1544 (full_table ? 'F' : '.'));
1569 1545
1570 batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX); 1546 batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
1571 1547
1572 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1548 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1573 ret = 0; 1549 ret = 0;
1574 1550
1575 out: 1551 out:
1576 if (neigh_node) 1552 if (neigh_node)
1577 batadv_neigh_node_free_ref(neigh_node); 1553 batadv_neigh_node_free_ref(neigh_node);
1578 if (primary_if) 1554 if (primary_if)
1579 batadv_hardif_free_ref(primary_if); 1555 batadv_hardif_free_ref(primary_if);
1580 if (ret) 1556 if (ret)
1581 kfree_skb(skb); 1557 kfree_skb(skb);
1582 if (ret && tt_req_node) { 1558 if (ret && tt_req_node) {
1583 spin_lock_bh(&bat_priv->tt.req_list_lock); 1559 spin_lock_bh(&bat_priv->tt.req_list_lock);
1584 list_del(&tt_req_node->list); 1560 list_del(&tt_req_node->list);
1585 spin_unlock_bh(&bat_priv->tt.req_list_lock); 1561 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1586 kfree(tt_req_node); 1562 kfree(tt_req_node);
1587 } 1563 }
1588 return ret; 1564 return ret;
1589 } 1565 }
1590 1566
1591 static bool 1567 static bool
1592 batadv_send_other_tt_response(struct batadv_priv *bat_priv, 1568 batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1593 struct batadv_tt_query_packet *tt_request) 1569 struct batadv_tt_query_packet *tt_request)
1594 { 1570 {
1595 struct batadv_orig_node *req_dst_orig_node = NULL; 1571 struct batadv_orig_node *req_dst_orig_node = NULL;
1596 struct batadv_orig_node *res_dst_orig_node = NULL; 1572 struct batadv_orig_node *res_dst_orig_node = NULL;
1597 struct batadv_neigh_node *neigh_node = NULL; 1573 struct batadv_neigh_node *neigh_node = NULL;
1598 struct batadv_hard_iface *primary_if = NULL; 1574 struct batadv_hard_iface *primary_if = NULL;
1599 uint8_t orig_ttvn, req_ttvn, ttvn; 1575 uint8_t orig_ttvn, req_ttvn, ttvn;
1600 int ret = false; 1576 int ret = false;
1601 unsigned char *tt_buff; 1577 unsigned char *tt_buff;
1602 bool full_table; 1578 bool full_table;
1603 uint16_t tt_len, tt_tot; 1579 uint16_t tt_len, tt_tot;
1604 struct sk_buff *skb = NULL; 1580 struct sk_buff *skb = NULL;
1605 struct batadv_tt_query_packet *tt_response; 1581 struct batadv_tt_query_packet *tt_response;
1606 uint8_t *packet_pos; 1582 uint8_t *packet_pos;
1607 size_t len; 1583 size_t len;
1608 1584
1609 batadv_dbg(BATADV_DBG_TT, bat_priv, 1585 batadv_dbg(BATADV_DBG_TT, bat_priv,
1610 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n", 1586 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1611 tt_request->src, tt_request->ttvn, tt_request->dst, 1587 tt_request->src, tt_request->ttvn, tt_request->dst,
1612 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.')); 1588 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1613 1589
1614 /* Let's get the orig node of the REAL destination */ 1590 /* Let's get the orig node of the REAL destination */
1615 req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst); 1591 req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst);
1616 if (!req_dst_orig_node) 1592 if (!req_dst_orig_node)
1617 goto out; 1593 goto out;
1618 1594
1619 res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src); 1595 res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1620 if (!res_dst_orig_node) 1596 if (!res_dst_orig_node)
1621 goto out; 1597 goto out;
1622 1598
1623 neigh_node = batadv_orig_node_get_router(res_dst_orig_node); 1599 neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
1624 if (!neigh_node) 1600 if (!neigh_node)
1625 goto out; 1601 goto out;
1626 1602
1627 primary_if = batadv_primary_if_get_selected(bat_priv); 1603 primary_if = batadv_primary_if_get_selected(bat_priv);
1628 if (!primary_if) 1604 if (!primary_if)
1629 goto out; 1605 goto out;
1630 1606
1631 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); 1607 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1632 req_ttvn = tt_request->ttvn; 1608 req_ttvn = tt_request->ttvn;
1633 1609
1634 /* I don't have the requested data */ 1610 /* I don't have the requested data */
1635 if (orig_ttvn != req_ttvn || 1611 if (orig_ttvn != req_ttvn ||
1636 tt_request->tt_data != htons(req_dst_orig_node->tt_crc)) 1612 tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
1637 goto out; 1613 goto out;
1638 1614
1639 /* If the full table has been explicitly requested */ 1615 /* If the full table has been explicitly requested */
1640 if (tt_request->flags & BATADV_TT_FULL_TABLE || 1616 if (tt_request->flags & BATADV_TT_FULL_TABLE ||
1641 !req_dst_orig_node->tt_buff) 1617 !req_dst_orig_node->tt_buff)
1642 full_table = true; 1618 full_table = true;
1643 else 1619 else
1644 full_table = false; 1620 full_table = false;
1645 1621
1646 /* In this version, fragmentation is not implemented, then 1622 /* In this version, fragmentation is not implemented, then
1647 * I'll send only one packet with as much TT entries as I can 1623 * I'll send only one packet with as much TT entries as I can
1648 */ 1624 */
1649 if (!full_table) { 1625 if (!full_table) {
1650 spin_lock_bh(&req_dst_orig_node->tt_buff_lock); 1626 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1651 tt_len = req_dst_orig_node->tt_buff_len; 1627 tt_len = req_dst_orig_node->tt_buff_len;
1652 tt_tot = tt_len / sizeof(struct batadv_tt_change); 1628 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1653 1629
1654 len = sizeof(*tt_response) + tt_len; 1630 len = sizeof(*tt_response) + tt_len;
1655 skb = dev_alloc_skb(len + ETH_HLEN); 1631 skb = dev_alloc_skb(len + ETH_HLEN);
1656 if (!skb) 1632 if (!skb)
1657 goto unlock; 1633 goto unlock;
1658 1634
1659 skb_reserve(skb, ETH_HLEN); 1635 skb_reserve(skb, ETH_HLEN);
1660 packet_pos = skb_put(skb, len); 1636 packet_pos = skb_put(skb, len);
1661 tt_response = (struct batadv_tt_query_packet *)packet_pos; 1637 tt_response = (struct batadv_tt_query_packet *)packet_pos;
1662 tt_response->ttvn = req_ttvn; 1638 tt_response->ttvn = req_ttvn;
1663 tt_response->tt_data = htons(tt_tot); 1639 tt_response->tt_data = htons(tt_tot);
1664 1640
1665 tt_buff = skb->data + sizeof(*tt_response); 1641 tt_buff = skb->data + sizeof(*tt_response);
1666 /* Copy the last orig_node's OGM buffer */ 1642 /* Copy the last orig_node's OGM buffer */
1667 memcpy(tt_buff, req_dst_orig_node->tt_buff, 1643 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1668 req_dst_orig_node->tt_buff_len); 1644 req_dst_orig_node->tt_buff_len);
1669 1645
1670 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock); 1646 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1671 } else { 1647 } else {
1672 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size); 1648 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size);
1673 tt_len *= sizeof(struct batadv_tt_change); 1649 tt_len *= sizeof(struct batadv_tt_change);
1674 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); 1650 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1675 1651
1676 skb = batadv_tt_response_fill_table(tt_len, ttvn, 1652 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1677 bat_priv->tt.global_hash, 1653 bat_priv->tt.global_hash,
1678 primary_if, 1654 primary_if,
1679 batadv_tt_global_valid, 1655 batadv_tt_global_valid,
1680 req_dst_orig_node); 1656 req_dst_orig_node);
1681 if (!skb) 1657 if (!skb)
1682 goto out; 1658 goto out;
1683 1659
1684 tt_response = (struct batadv_tt_query_packet *)skb->data; 1660 tt_response = (struct batadv_tt_query_packet *)skb->data;
1685 } 1661 }
1686 1662
1687 tt_response->header.packet_type = BATADV_TT_QUERY; 1663 tt_response->header.packet_type = BATADV_TT_QUERY;
1688 tt_response->header.version = BATADV_COMPAT_VERSION; 1664 tt_response->header.version = BATADV_COMPAT_VERSION;
1689 tt_response->header.ttl = BATADV_TTL; 1665 tt_response->header.ttl = BATADV_TTL;
1690 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN); 1666 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1691 memcpy(tt_response->dst, tt_request->src, ETH_ALEN); 1667 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1692 tt_response->flags = BATADV_TT_RESPONSE; 1668 tt_response->flags = BATADV_TT_RESPONSE;
1693 1669
1694 if (full_table) 1670 if (full_table)
1695 tt_response->flags |= BATADV_TT_FULL_TABLE; 1671 tt_response->flags |= BATADV_TT_FULL_TABLE;
1696 1672
1697 batadv_dbg(BATADV_DBG_TT, bat_priv, 1673 batadv_dbg(BATADV_DBG_TT, bat_priv,
1698 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n", 1674 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1699 res_dst_orig_node->orig, neigh_node->addr, 1675 res_dst_orig_node->orig, neigh_node->addr,
1700 req_dst_orig_node->orig, req_ttvn); 1676 req_dst_orig_node->orig, req_ttvn);
1701 1677
1702 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX); 1678 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
1703 1679
1704 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1680 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1705 ret = true; 1681 ret = true;
1706 goto out; 1682 goto out;
1707 1683
1708 unlock: 1684 unlock:
1709 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock); 1685 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1710 1686
1711 out: 1687 out:
1712 if (res_dst_orig_node) 1688 if (res_dst_orig_node)
1713 batadv_orig_node_free_ref(res_dst_orig_node); 1689 batadv_orig_node_free_ref(res_dst_orig_node);
1714 if (req_dst_orig_node) 1690 if (req_dst_orig_node)
1715 batadv_orig_node_free_ref(req_dst_orig_node); 1691 batadv_orig_node_free_ref(req_dst_orig_node);
1716 if (neigh_node) 1692 if (neigh_node)
1717 batadv_neigh_node_free_ref(neigh_node); 1693 batadv_neigh_node_free_ref(neigh_node);
1718 if (primary_if) 1694 if (primary_if)
1719 batadv_hardif_free_ref(primary_if); 1695 batadv_hardif_free_ref(primary_if);
1720 if (!ret) 1696 if (!ret)
1721 kfree_skb(skb); 1697 kfree_skb(skb);
1722 return ret; 1698 return ret;
1723 1699
1724 } 1700 }
1725 1701
1726 static bool 1702 static bool
1727 batadv_send_my_tt_response(struct batadv_priv *bat_priv, 1703 batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1728 struct batadv_tt_query_packet *tt_request) 1704 struct batadv_tt_query_packet *tt_request)
1729 { 1705 {
1730 struct batadv_orig_node *orig_node = NULL; 1706 struct batadv_orig_node *orig_node = NULL;
1731 struct batadv_neigh_node *neigh_node = NULL; 1707 struct batadv_neigh_node *neigh_node = NULL;
1732 struct batadv_hard_iface *primary_if = NULL; 1708 struct batadv_hard_iface *primary_if = NULL;
1733 uint8_t my_ttvn, req_ttvn, ttvn; 1709 uint8_t my_ttvn, req_ttvn, ttvn;
1734 int ret = false; 1710 int ret = false;
1735 unsigned char *tt_buff; 1711 unsigned char *tt_buff;
1736 bool full_table; 1712 bool full_table;
1737 uint16_t tt_len, tt_tot; 1713 uint16_t tt_len, tt_tot;
1738 struct sk_buff *skb = NULL; 1714 struct sk_buff *skb = NULL;
1739 struct batadv_tt_query_packet *tt_response; 1715 struct batadv_tt_query_packet *tt_response;
1740 uint8_t *packet_pos; 1716 uint8_t *packet_pos;
1741 size_t len; 1717 size_t len;
1742 1718
1743 batadv_dbg(BATADV_DBG_TT, bat_priv, 1719 batadv_dbg(BATADV_DBG_TT, bat_priv,
1744 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n", 1720 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1745 tt_request->src, tt_request->ttvn, 1721 tt_request->src, tt_request->ttvn,
1746 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.')); 1722 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1747 1723
1748 1724
1749 my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); 1725 my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
1750 req_ttvn = tt_request->ttvn; 1726 req_ttvn = tt_request->ttvn;
1751 1727
1752 orig_node = batadv_orig_hash_find(bat_priv, tt_request->src); 1728 orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1753 if (!orig_node) 1729 if (!orig_node)
1754 goto out; 1730 goto out;
1755 1731
1756 neigh_node = batadv_orig_node_get_router(orig_node); 1732 neigh_node = batadv_orig_node_get_router(orig_node);
1757 if (!neigh_node) 1733 if (!neigh_node)
1758 goto out; 1734 goto out;
1759 1735
1760 primary_if = batadv_primary_if_get_selected(bat_priv); 1736 primary_if = batadv_primary_if_get_selected(bat_priv);
1761 if (!primary_if) 1737 if (!primary_if)
1762 goto out; 1738 goto out;
1763 1739
1764 /* If the full table has been explicitly requested or the gap 1740 /* If the full table has been explicitly requested or the gap
1765 * is too big send the whole local translation table 1741 * is too big send the whole local translation table
1766 */ 1742 */
1767 if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn || 1743 if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
1768 !bat_priv->tt.last_changeset) 1744 !bat_priv->tt.last_changeset)
1769 full_table = true; 1745 full_table = true;
1770 else 1746 else
1771 full_table = false; 1747 full_table = false;
1772 1748
1773 /* In this version, fragmentation is not implemented, then 1749 /* In this version, fragmentation is not implemented, then
1774 * I'll send only one packet with as much TT entries as I can 1750 * I'll send only one packet with as much TT entries as I can
1775 */ 1751 */
1776 if (!full_table) { 1752 if (!full_table) {
1777 spin_lock_bh(&bat_priv->tt.last_changeset_lock); 1753 spin_lock_bh(&bat_priv->tt.last_changeset_lock);
1778 tt_len = bat_priv->tt.last_changeset_len; 1754 tt_len = bat_priv->tt.last_changeset_len;
1779 tt_tot = tt_len / sizeof(struct batadv_tt_change); 1755 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1780 1756
1781 len = sizeof(*tt_response) + tt_len; 1757 len = sizeof(*tt_response) + tt_len;
1782 skb = dev_alloc_skb(len + ETH_HLEN); 1758 skb = dev_alloc_skb(len + ETH_HLEN);
1783 if (!skb) 1759 if (!skb)
1784 goto unlock; 1760 goto unlock;
1785 1761
1786 skb_reserve(skb, ETH_HLEN); 1762 skb_reserve(skb, ETH_HLEN);
1787 packet_pos = skb_put(skb, len); 1763 packet_pos = skb_put(skb, len);
1788 tt_response = (struct batadv_tt_query_packet *)packet_pos; 1764 tt_response = (struct batadv_tt_query_packet *)packet_pos;
1789 tt_response->ttvn = req_ttvn; 1765 tt_response->ttvn = req_ttvn;
1790 tt_response->tt_data = htons(tt_tot); 1766 tt_response->tt_data = htons(tt_tot);
1791 1767
1792 tt_buff = skb->data + sizeof(*tt_response); 1768 tt_buff = skb->data + sizeof(*tt_response);
1793 memcpy(tt_buff, bat_priv->tt.last_changeset, 1769 memcpy(tt_buff, bat_priv->tt.last_changeset,
1794 bat_priv->tt.last_changeset_len); 1770 bat_priv->tt.last_changeset_len);
1795 spin_unlock_bh(&bat_priv->tt.last_changeset_lock); 1771 spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
1796 } else { 1772 } else {
1797 tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num); 1773 tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
1798 tt_len *= sizeof(struct batadv_tt_change); 1774 tt_len *= sizeof(struct batadv_tt_change);
1799 ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); 1775 ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
1800 1776
1801 skb = batadv_tt_response_fill_table(tt_len, ttvn, 1777 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1802 bat_priv->tt.local_hash, 1778 bat_priv->tt.local_hash,
1803 primary_if, 1779 primary_if,
1804 batadv_tt_local_valid_entry, 1780 batadv_tt_local_valid_entry,
1805 NULL); 1781 NULL);
1806 if (!skb) 1782 if (!skb)
1807 goto out; 1783 goto out;
1808 1784
1809 tt_response = (struct batadv_tt_query_packet *)skb->data; 1785 tt_response = (struct batadv_tt_query_packet *)skb->data;
1810 } 1786 }
1811 1787
1812 tt_response->header.packet_type = BATADV_TT_QUERY; 1788 tt_response->header.packet_type = BATADV_TT_QUERY;
1813 tt_response->header.version = BATADV_COMPAT_VERSION; 1789 tt_response->header.version = BATADV_COMPAT_VERSION;
1814 tt_response->header.ttl = BATADV_TTL; 1790 tt_response->header.ttl = BATADV_TTL;
1815 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN); 1791 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1816 memcpy(tt_response->dst, tt_request->src, ETH_ALEN); 1792 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1817 tt_response->flags = BATADV_TT_RESPONSE; 1793 tt_response->flags = BATADV_TT_RESPONSE;
1818 1794
1819 if (full_table) 1795 if (full_table)
1820 tt_response->flags |= BATADV_TT_FULL_TABLE; 1796 tt_response->flags |= BATADV_TT_FULL_TABLE;
1821 1797
1822 batadv_dbg(BATADV_DBG_TT, bat_priv, 1798 batadv_dbg(BATADV_DBG_TT, bat_priv,
1823 "Sending TT_RESPONSE to %pM via %pM [%c]\n", 1799 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1824 orig_node->orig, neigh_node->addr, 1800 orig_node->orig, neigh_node->addr,
1825 (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.')); 1801 (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1826 1802
1827 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX); 1803 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
1828 1804
1829 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1805 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1830 ret = true; 1806 ret = true;
1831 goto out; 1807 goto out;
1832 1808
1833 unlock: 1809 unlock:
1834 spin_unlock_bh(&bat_priv->tt.last_changeset_lock); 1810 spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
1835 out: 1811 out:
1836 if (orig_node) 1812 if (orig_node)
1837 batadv_orig_node_free_ref(orig_node); 1813 batadv_orig_node_free_ref(orig_node);
1838 if (neigh_node) 1814 if (neigh_node)
1839 batadv_neigh_node_free_ref(neigh_node); 1815 batadv_neigh_node_free_ref(neigh_node);
1840 if (primary_if) 1816 if (primary_if)
1841 batadv_hardif_free_ref(primary_if); 1817 batadv_hardif_free_ref(primary_if);
1842 if (!ret) 1818 if (!ret)
1843 kfree_skb(skb); 1819 kfree_skb(skb);
1844 /* This packet was for me, so it doesn't need to be re-routed */ 1820 /* This packet was for me, so it doesn't need to be re-routed */
1845 return true; 1821 return true;
1846 } 1822 }
1847 1823
1848 bool batadv_send_tt_response(struct batadv_priv *bat_priv, 1824 bool batadv_send_tt_response(struct batadv_priv *bat_priv,
1849 struct batadv_tt_query_packet *tt_request) 1825 struct batadv_tt_query_packet *tt_request)
1850 { 1826 {
1851 if (batadv_is_my_mac(tt_request->dst)) { 1827 if (batadv_is_my_mac(tt_request->dst)) {
1852 /* don't answer backbone gws! */ 1828 /* don't answer backbone gws! */
1853 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src)) 1829 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
1854 return true; 1830 return true;
1855 1831
1856 return batadv_send_my_tt_response(bat_priv, tt_request); 1832 return batadv_send_my_tt_response(bat_priv, tt_request);
1857 } else { 1833 } else {
1858 return batadv_send_other_tt_response(bat_priv, tt_request); 1834 return batadv_send_other_tt_response(bat_priv, tt_request);
1859 } 1835 }
1860 } 1836 }
1861 1837
1862 static void _batadv_tt_update_changes(struct batadv_priv *bat_priv, 1838 static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
1863 struct batadv_orig_node *orig_node, 1839 struct batadv_orig_node *orig_node,
1864 struct batadv_tt_change *tt_change, 1840 struct batadv_tt_change *tt_change,
1865 uint16_t tt_num_changes, uint8_t ttvn) 1841 uint16_t tt_num_changes, uint8_t ttvn)
1866 { 1842 {
1867 int i; 1843 int i;
1868 int roams; 1844 int roams;
1869 1845
1870 for (i = 0; i < tt_num_changes; i++) { 1846 for (i = 0; i < tt_num_changes; i++) {
1871 if ((tt_change + i)->flags & BATADV_TT_CLIENT_DEL) { 1847 if ((tt_change + i)->flags & BATADV_TT_CLIENT_DEL) {
1872 roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM; 1848 roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM;
1873 batadv_tt_global_del(bat_priv, orig_node, 1849 batadv_tt_global_del(bat_priv, orig_node,
1874 (tt_change + i)->addr, 1850 (tt_change + i)->addr,
1875 "tt removed by changes", 1851 "tt removed by changes",
1876 roams); 1852 roams);
1877 } else { 1853 } else {
1878 if (!batadv_tt_global_add(bat_priv, orig_node, 1854 if (!batadv_tt_global_add(bat_priv, orig_node,
1879 (tt_change + i)->addr, 1855 (tt_change + i)->addr,
1880 (tt_change + i)->flags, ttvn)) 1856 (tt_change + i)->flags, ttvn))
1881 /* In case of problem while storing a 1857 /* In case of problem while storing a
1882 * global_entry, we stop the updating 1858 * global_entry, we stop the updating
1883 * procedure without committing the 1859 * procedure without committing the
1884 * ttvn change. This will avoid to send 1860 * ttvn change. This will avoid to send
1885 * corrupted data on tt_request 1861 * corrupted data on tt_request
1886 */ 1862 */
1887 return; 1863 return;
1888 } 1864 }
1889 } 1865 }
1890 orig_node->tt_initialised = true; 1866 orig_node->tt_initialised = true;
1891 } 1867 }
1892 1868
1893 static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv, 1869 static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
1894 struct batadv_tt_query_packet *tt_response) 1870 struct batadv_tt_query_packet *tt_response)
1895 { 1871 {
1896 struct batadv_orig_node *orig_node = NULL; 1872 struct batadv_orig_node *orig_node = NULL;
1897 1873
1898 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src); 1874 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1899 if (!orig_node) 1875 if (!orig_node)
1900 goto out; 1876 goto out;
1901 1877
1902 /* Purge the old table first.. */ 1878 /* Purge the old table first.. */
1903 batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table"); 1879 batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
1904 1880
1905 _batadv_tt_update_changes(bat_priv, orig_node, 1881 _batadv_tt_update_changes(bat_priv, orig_node,
1906 (struct batadv_tt_change *)(tt_response + 1), 1882 (struct batadv_tt_change *)(tt_response + 1),
1907 ntohs(tt_response->tt_data), 1883 ntohs(tt_response->tt_data),
1908 tt_response->ttvn); 1884 tt_response->ttvn);
1909 1885
1910 spin_lock_bh(&orig_node->tt_buff_lock); 1886 spin_lock_bh(&orig_node->tt_buff_lock);
1911 kfree(orig_node->tt_buff); 1887 kfree(orig_node->tt_buff);
1912 orig_node->tt_buff_len = 0; 1888 orig_node->tt_buff_len = 0;
1913 orig_node->tt_buff = NULL; 1889 orig_node->tt_buff = NULL;
1914 spin_unlock_bh(&orig_node->tt_buff_lock); 1890 spin_unlock_bh(&orig_node->tt_buff_lock);
1915 1891
1916 atomic_set(&orig_node->last_ttvn, tt_response->ttvn); 1892 atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1917 1893
1918 out: 1894 out:
1919 if (orig_node) 1895 if (orig_node)
1920 batadv_orig_node_free_ref(orig_node); 1896 batadv_orig_node_free_ref(orig_node);
1921 } 1897 }
1922 1898
1923 static void batadv_tt_update_changes(struct batadv_priv *bat_priv, 1899 static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
1924 struct batadv_orig_node *orig_node, 1900 struct batadv_orig_node *orig_node,
1925 uint16_t tt_num_changes, uint8_t ttvn, 1901 uint16_t tt_num_changes, uint8_t ttvn,
1926 struct batadv_tt_change *tt_change) 1902 struct batadv_tt_change *tt_change)
1927 { 1903 {
1928 _batadv_tt_update_changes(bat_priv, orig_node, tt_change, 1904 _batadv_tt_update_changes(bat_priv, orig_node, tt_change,
1929 tt_num_changes, ttvn); 1905 tt_num_changes, ttvn);
1930 1906
1931 batadv_tt_save_orig_buffer(bat_priv, orig_node, 1907 batadv_tt_save_orig_buffer(bat_priv, orig_node,
1932 (unsigned char *)tt_change, tt_num_changes); 1908 (unsigned char *)tt_change, tt_num_changes);
1933 atomic_set(&orig_node->last_ttvn, ttvn); 1909 atomic_set(&orig_node->last_ttvn, ttvn);
1934 } 1910 }
1935 1911
1936 bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr) 1912 bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr)
1937 { 1913 {
1938 struct batadv_tt_local_entry *tt_local_entry = NULL; 1914 struct batadv_tt_local_entry *tt_local_entry = NULL;
1939 bool ret = false; 1915 bool ret = false;
1940 1916
1941 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr); 1917 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
1942 if (!tt_local_entry) 1918 if (!tt_local_entry)
1943 goto out; 1919 goto out;
1944 /* Check if the client has been logically deleted (but is kept for 1920 /* Check if the client has been logically deleted (but is kept for
1945 * consistency purpose) 1921 * consistency purpose)
1946 */ 1922 */
1947 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING) 1923 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
1948 goto out; 1924 goto out;
1949 ret = true; 1925 ret = true;
1950 out: 1926 out:
1951 if (tt_local_entry) 1927 if (tt_local_entry)
1952 batadv_tt_local_entry_free_ref(tt_local_entry); 1928 batadv_tt_local_entry_free_ref(tt_local_entry);
1953 return ret; 1929 return ret;
1954 } 1930 }
1955 1931
1956 void batadv_handle_tt_response(struct batadv_priv *bat_priv, 1932 void batadv_handle_tt_response(struct batadv_priv *bat_priv,
1957 struct batadv_tt_query_packet *tt_response) 1933 struct batadv_tt_query_packet *tt_response)
1958 { 1934 {
1959 struct batadv_tt_req_node *node, *safe; 1935 struct batadv_tt_req_node *node, *safe;
1960 struct batadv_orig_node *orig_node = NULL; 1936 struct batadv_orig_node *orig_node = NULL;
1961 struct batadv_tt_change *tt_change; 1937 struct batadv_tt_change *tt_change;
1962 1938
1963 batadv_dbg(BATADV_DBG_TT, bat_priv, 1939 batadv_dbg(BATADV_DBG_TT, bat_priv,
1964 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n", 1940 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1965 tt_response->src, tt_response->ttvn, 1941 tt_response->src, tt_response->ttvn,
1966 ntohs(tt_response->tt_data), 1942 ntohs(tt_response->tt_data),
1967 (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.')); 1943 (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1968 1944
1969 /* we should have never asked a backbone gw */ 1945 /* we should have never asked a backbone gw */
1970 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src)) 1946 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
1971 goto out; 1947 goto out;
1972 1948
1973 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src); 1949 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1974 if (!orig_node) 1950 if (!orig_node)
1975 goto out; 1951 goto out;
1976 1952
1977 if (tt_response->flags & BATADV_TT_FULL_TABLE) { 1953 if (tt_response->flags & BATADV_TT_FULL_TABLE) {
1978 batadv_tt_fill_gtable(bat_priv, tt_response); 1954 batadv_tt_fill_gtable(bat_priv, tt_response);
1979 } else { 1955 } else {
1980 tt_change = (struct batadv_tt_change *)(tt_response + 1); 1956 tt_change = (struct batadv_tt_change *)(tt_response + 1);
1981 batadv_tt_update_changes(bat_priv, orig_node, 1957 batadv_tt_update_changes(bat_priv, orig_node,
1982 ntohs(tt_response->tt_data), 1958 ntohs(tt_response->tt_data),
1983 tt_response->ttvn, tt_change); 1959 tt_response->ttvn, tt_change);
1984 } 1960 }
1985 1961
1986 /* Delete the tt_req_node from pending tt_requests list */ 1962 /* Delete the tt_req_node from pending tt_requests list */
1987 spin_lock_bh(&bat_priv->tt.req_list_lock); 1963 spin_lock_bh(&bat_priv->tt.req_list_lock);
1988 list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { 1964 list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
1989 if (!batadv_compare_eth(node->addr, tt_response->src)) 1965 if (!batadv_compare_eth(node->addr, tt_response->src))
1990 continue; 1966 continue;
1991 list_del(&node->list); 1967 list_del(&node->list);
1992 kfree(node); 1968 kfree(node);
1993 } 1969 }
1994 spin_unlock_bh(&bat_priv->tt.req_list_lock); 1970 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1995 1971
1996 /* Recalculate the CRC for this orig_node and store it */ 1972 /* Recalculate the CRC for this orig_node and store it */
1997 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node); 1973 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
1998 /* Roaming phase is over: tables are in sync again. I can 1974 /* Roaming phase is over: tables are in sync again. I can
1999 * unset the flag 1975 * unset the flag
2000 */ 1976 */
2001 orig_node->tt_poss_change = false; 1977 orig_node->tt_poss_change = false;
2002 out: 1978 out:
2003 if (orig_node) 1979 if (orig_node)
2004 batadv_orig_node_free_ref(orig_node); 1980 batadv_orig_node_free_ref(orig_node);
2005 } 1981 }
2006 1982
2007 int batadv_tt_init(struct batadv_priv *bat_priv) 1983 int batadv_tt_init(struct batadv_priv *bat_priv)
2008 { 1984 {
2009 int ret; 1985 int ret;
2010 1986
2011 ret = batadv_tt_local_init(bat_priv); 1987 ret = batadv_tt_local_init(bat_priv);
2012 if (ret < 0) 1988 if (ret < 0)
2013 return ret; 1989 return ret;
2014 1990
2015 ret = batadv_tt_global_init(bat_priv); 1991 ret = batadv_tt_global_init(bat_priv);
2016 if (ret < 0) 1992 if (ret < 0)
2017 return ret; 1993 return ret;
2018 1994
2019 batadv_tt_start_timer(bat_priv); 1995 batadv_tt_start_timer(bat_priv);
2020 1996
2021 return 1; 1997 return 1;
2022 } 1998 }
2023 1999
2024 static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv) 2000 static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
2025 { 2001 {
2026 struct batadv_tt_roam_node *node, *safe; 2002 struct batadv_tt_roam_node *node, *safe;
2027 2003
2028 spin_lock_bh(&bat_priv->tt.roam_list_lock); 2004 spin_lock_bh(&bat_priv->tt.roam_list_lock);
2029 2005
2030 list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) { 2006 list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
2031 list_del(&node->list); 2007 list_del(&node->list);
2032 kfree(node); 2008 kfree(node);
2033 } 2009 }
2034 2010
2035 spin_unlock_bh(&bat_priv->tt.roam_list_lock); 2011 spin_unlock_bh(&bat_priv->tt.roam_list_lock);
2036 } 2012 }
2037 2013
2038 static void batadv_tt_roam_purge(struct batadv_priv *bat_priv) 2014 static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
2039 { 2015 {
2040 struct batadv_tt_roam_node *node, *safe; 2016 struct batadv_tt_roam_node *node, *safe;
2041 2017
2042 spin_lock_bh(&bat_priv->tt.roam_list_lock); 2018 spin_lock_bh(&bat_priv->tt.roam_list_lock);
2043 list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) { 2019 list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
2044 if (!batadv_has_timed_out(node->first_time, 2020 if (!batadv_has_timed_out(node->first_time,
2045 BATADV_ROAMING_MAX_TIME)) 2021 BATADV_ROAMING_MAX_TIME))
2046 continue; 2022 continue;
2047 2023
2048 list_del(&node->list); 2024 list_del(&node->list);
2049 kfree(node); 2025 kfree(node);
2050 } 2026 }
2051 spin_unlock_bh(&bat_priv->tt.roam_list_lock); 2027 spin_unlock_bh(&bat_priv->tt.roam_list_lock);
2052 } 2028 }
2053 2029
2054 /* This function checks whether the client already reached the 2030 /* This function checks whether the client already reached the
2055 * maximum number of possible roaming phases. In this case the ROAMING_ADV 2031 * maximum number of possible roaming phases. In this case the ROAMING_ADV
2056 * will not be sent. 2032 * will not be sent.
2057 * 2033 *
2058 * returns true if the ROAMING_ADV can be sent, false otherwise 2034 * returns true if the ROAMING_ADV can be sent, false otherwise
2059 */ 2035 */
2060 static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, 2036 static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
2061 uint8_t *client) 2037 uint8_t *client)
2062 { 2038 {
2063 struct batadv_tt_roam_node *tt_roam_node; 2039 struct batadv_tt_roam_node *tt_roam_node;
2064 bool ret = false; 2040 bool ret = false;
2065 2041
2066 spin_lock_bh(&bat_priv->tt.roam_list_lock); 2042 spin_lock_bh(&bat_priv->tt.roam_list_lock);
2067 /* The new tt_req will be issued only if I'm not waiting for a 2043 /* The new tt_req will be issued only if I'm not waiting for a
2068 * reply from the same orig_node yet 2044 * reply from the same orig_node yet
2069 */ 2045 */
2070 list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) { 2046 list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) {
2071 if (!batadv_compare_eth(tt_roam_node->addr, client)) 2047 if (!batadv_compare_eth(tt_roam_node->addr, client))
2072 continue; 2048 continue;
2073 2049
2074 if (batadv_has_timed_out(tt_roam_node->first_time, 2050 if (batadv_has_timed_out(tt_roam_node->first_time,
2075 BATADV_ROAMING_MAX_TIME)) 2051 BATADV_ROAMING_MAX_TIME))
2076 continue; 2052 continue;
2077 2053
2078 if (!batadv_atomic_dec_not_zero(&tt_roam_node->counter)) 2054 if (!batadv_atomic_dec_not_zero(&tt_roam_node->counter))
2079 /* Sorry, you roamed too many times! */ 2055 /* Sorry, you roamed too many times! */
2080 goto unlock; 2056 goto unlock;
2081 ret = true; 2057 ret = true;
2082 break; 2058 break;
2083 } 2059 }
2084 2060
2085 if (!ret) { 2061 if (!ret) {
2086 tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC); 2062 tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
2087 if (!tt_roam_node) 2063 if (!tt_roam_node)
2088 goto unlock; 2064 goto unlock;
2089 2065
2090 tt_roam_node->first_time = jiffies; 2066 tt_roam_node->first_time = jiffies;
2091 atomic_set(&tt_roam_node->counter, 2067 atomic_set(&tt_roam_node->counter,
2092 BATADV_ROAMING_MAX_COUNT - 1); 2068 BATADV_ROAMING_MAX_COUNT - 1);
2093 memcpy(tt_roam_node->addr, client, ETH_ALEN); 2069 memcpy(tt_roam_node->addr, client, ETH_ALEN);
2094 2070
2095 list_add(&tt_roam_node->list, &bat_priv->tt.roam_list); 2071 list_add(&tt_roam_node->list, &bat_priv->tt.roam_list);
2096 ret = true; 2072 ret = true;
2097 } 2073 }
2098 2074
2099 unlock: 2075 unlock:
2100 spin_unlock_bh(&bat_priv->tt.roam_list_lock); 2076 spin_unlock_bh(&bat_priv->tt.roam_list_lock);
2101 return ret; 2077 return ret;
2102 } 2078 }
2103 2079
2104 static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client, 2080 static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
2105 struct batadv_orig_node *orig_node) 2081 struct batadv_orig_node *orig_node)
2106 { 2082 {
2107 struct batadv_neigh_node *neigh_node = NULL; 2083 struct batadv_neigh_node *neigh_node = NULL;
2108 struct sk_buff *skb = NULL; 2084 struct sk_buff *skb = NULL;
2109 struct batadv_roam_adv_packet *roam_adv_packet; 2085 struct batadv_roam_adv_packet *roam_adv_packet;
2110 int ret = 1; 2086 int ret = 1;
2111 struct batadv_hard_iface *primary_if; 2087 struct batadv_hard_iface *primary_if;
2112 size_t len = sizeof(*roam_adv_packet); 2088 size_t len = sizeof(*roam_adv_packet);
2113 2089
2114 /* before going on we have to check whether the client has 2090 /* before going on we have to check whether the client has
2115 * already roamed to us too many times 2091 * already roamed to us too many times
2116 */ 2092 */
2117 if (!batadv_tt_check_roam_count(bat_priv, client)) 2093 if (!batadv_tt_check_roam_count(bat_priv, client))
2118 goto out; 2094 goto out;
2119 2095
2120 skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN); 2096 skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN);
2121 if (!skb) 2097 if (!skb)
2122 goto out; 2098 goto out;
2123 2099
2124 skb_reserve(skb, ETH_HLEN); 2100 skb_reserve(skb, ETH_HLEN);
2125 2101
2126 roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len); 2102 roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
2127 2103
2128 roam_adv_packet->header.packet_type = BATADV_ROAM_ADV; 2104 roam_adv_packet->header.packet_type = BATADV_ROAM_ADV;
2129 roam_adv_packet->header.version = BATADV_COMPAT_VERSION; 2105 roam_adv_packet->header.version = BATADV_COMPAT_VERSION;
2130 roam_adv_packet->header.ttl = BATADV_TTL; 2106 roam_adv_packet->header.ttl = BATADV_TTL;
2131 roam_adv_packet->reserved = 0; 2107 roam_adv_packet->reserved = 0;
2132 primary_if = batadv_primary_if_get_selected(bat_priv); 2108 primary_if = batadv_primary_if_get_selected(bat_priv);
2133 if (!primary_if) 2109 if (!primary_if)
2134 goto out; 2110 goto out;
2135 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); 2111 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
2136 batadv_hardif_free_ref(primary_if); 2112 batadv_hardif_free_ref(primary_if);
2137 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN); 2113 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
2138 memcpy(roam_adv_packet->client, client, ETH_ALEN); 2114 memcpy(roam_adv_packet->client, client, ETH_ALEN);
2139 2115
2140 neigh_node = batadv_orig_node_get_router(orig_node); 2116 neigh_node = batadv_orig_node_get_router(orig_node);
2141 if (!neigh_node) 2117 if (!neigh_node)
2142 goto out; 2118 goto out;
2143 2119
2144 batadv_dbg(BATADV_DBG_TT, bat_priv, 2120 batadv_dbg(BATADV_DBG_TT, bat_priv,
2145 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n", 2121 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
2146 orig_node->orig, client, neigh_node->addr); 2122 orig_node->orig, client, neigh_node->addr);
2147 2123
2148 batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX); 2124 batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
2149 2125
2150 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 2126 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
2151 ret = 0; 2127 ret = 0;
2152 2128
2153 out: 2129 out:
2154 if (neigh_node) 2130 if (neigh_node)
2155 batadv_neigh_node_free_ref(neigh_node); 2131 batadv_neigh_node_free_ref(neigh_node);
2156 if (ret) 2132 if (ret)
2157 kfree_skb(skb); 2133 kfree_skb(skb);
2158 return; 2134 return;
2159 } 2135 }
2160 2136
2161 static void batadv_tt_purge(struct work_struct *work) 2137 static void batadv_tt_purge(struct work_struct *work)
2162 { 2138 {
2163 struct delayed_work *delayed_work; 2139 struct delayed_work *delayed_work;
2164 struct batadv_priv_tt *priv_tt; 2140 struct batadv_priv_tt *priv_tt;
2165 struct batadv_priv *bat_priv; 2141 struct batadv_priv *bat_priv;
2166 2142
2167 delayed_work = container_of(work, struct delayed_work, work); 2143 delayed_work = container_of(work, struct delayed_work, work);
2168 priv_tt = container_of(delayed_work, struct batadv_priv_tt, work); 2144 priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
2169 bat_priv = container_of(priv_tt, struct batadv_priv, tt); 2145 bat_priv = container_of(priv_tt, struct batadv_priv, tt);
2170 2146
2171 batadv_tt_local_purge(bat_priv); 2147 batadv_tt_local_purge(bat_priv);
2172 batadv_tt_global_purge(bat_priv); 2148 batadv_tt_global_purge(bat_priv);
2173 batadv_tt_req_purge(bat_priv); 2149 batadv_tt_req_purge(bat_priv);
2174 batadv_tt_roam_purge(bat_priv); 2150 batadv_tt_roam_purge(bat_priv);
2175 2151
2176 batadv_tt_start_timer(bat_priv); 2152 batadv_tt_start_timer(bat_priv);
2177 } 2153 }
2178 2154
2179 void batadv_tt_free(struct batadv_priv *bat_priv) 2155 void batadv_tt_free(struct batadv_priv *bat_priv)
2180 { 2156 {
2181 cancel_delayed_work_sync(&bat_priv->tt.work); 2157 cancel_delayed_work_sync(&bat_priv->tt.work);
2182 2158
2183 batadv_tt_local_table_free(bat_priv); 2159 batadv_tt_local_table_free(bat_priv);
2184 batadv_tt_global_table_free(bat_priv); 2160 batadv_tt_global_table_free(bat_priv);
2185 batadv_tt_req_list_free(bat_priv); 2161 batadv_tt_req_list_free(bat_priv);
2186 batadv_tt_changes_list_free(bat_priv); 2162 batadv_tt_changes_list_free(bat_priv);
2187 batadv_tt_roam_list_free(bat_priv); 2163 batadv_tt_roam_list_free(bat_priv);
2188 2164
2189 kfree(bat_priv->tt.last_changeset); 2165 kfree(bat_priv->tt.last_changeset);
2190 } 2166 }
2191 2167
2192 /* This function will enable or disable the specified flags for all the entries 2168 /* This function will enable or disable the specified flags for all the entries
2193 * in the given hash table and returns the number of modified entries 2169 * in the given hash table and returns the number of modified entries
2194 */ 2170 */
2195 static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash, 2171 static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
2196 uint16_t flags, bool enable) 2172 uint16_t flags, bool enable)
2197 { 2173 {
2198 uint32_t i; 2174 uint32_t i;
2199 uint16_t changed_num = 0; 2175 uint16_t changed_num = 0;
2200 struct hlist_head *head; 2176 struct hlist_head *head;
2201 struct hlist_node *node; 2177 struct hlist_node *node;
2202 struct batadv_tt_common_entry *tt_common_entry; 2178 struct batadv_tt_common_entry *tt_common_entry;
2203 2179
2204 if (!hash) 2180 if (!hash)
2205 goto out; 2181 goto out;
2206 2182
2207 for (i = 0; i < hash->size; i++) { 2183 for (i = 0; i < hash->size; i++) {
2208 head = &hash->table[i]; 2184 head = &hash->table[i];
2209 2185
2210 rcu_read_lock(); 2186 rcu_read_lock();
2211 hlist_for_each_entry_rcu(tt_common_entry, node, 2187 hlist_for_each_entry_rcu(tt_common_entry, node,
2212 head, hash_entry) { 2188 head, hash_entry) {
2213 if (enable) { 2189 if (enable) {
2214 if ((tt_common_entry->flags & flags) == flags) 2190 if ((tt_common_entry->flags & flags) == flags)
2215 continue; 2191 continue;
2216 tt_common_entry->flags |= flags; 2192 tt_common_entry->flags |= flags;
2217 } else { 2193 } else {
2218 if (!(tt_common_entry->flags & flags)) 2194 if (!(tt_common_entry->flags & flags))
2219 continue; 2195 continue;
2220 tt_common_entry->flags &= ~flags; 2196 tt_common_entry->flags &= ~flags;
2221 } 2197 }
2222 changed_num++; 2198 changed_num++;
2223 } 2199 }
2224 rcu_read_unlock(); 2200 rcu_read_unlock();
2225 } 2201 }
2226 out: 2202 out:
2227 return changed_num; 2203 return changed_num;
2228 } 2204 }
2229 2205
2230 /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */ 2206 /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
2231 static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) 2207 static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
2232 { 2208 {
2233 struct batadv_hashtable *hash = bat_priv->tt.local_hash; 2209 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
2234 struct batadv_tt_common_entry *tt_common; 2210 struct batadv_tt_common_entry *tt_common;
2235 struct batadv_tt_local_entry *tt_local; 2211 struct batadv_tt_local_entry *tt_local;
2236 struct hlist_node *node, *node_tmp; 2212 struct hlist_node *node, *node_tmp;
2237 struct hlist_head *head; 2213 struct hlist_head *head;
2238 spinlock_t *list_lock; /* protects write access to the hash lists */ 2214 spinlock_t *list_lock; /* protects write access to the hash lists */
2239 uint32_t i; 2215 uint32_t i;
2240 2216
2241 if (!hash) 2217 if (!hash)
2242 return; 2218 return;
2243 2219
2244 for (i = 0; i < hash->size; i++) { 2220 for (i = 0; i < hash->size; i++) {
2245 head = &hash->table[i]; 2221 head = &hash->table[i];
2246 list_lock = &hash->list_locks[i]; 2222 list_lock = &hash->list_locks[i];
2247 2223
2248 spin_lock_bh(list_lock); 2224 spin_lock_bh(list_lock);
2249 hlist_for_each_entry_safe(tt_common, node, node_tmp, head, 2225 hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
2250 hash_entry) { 2226 hash_entry) {
2251 if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING)) 2227 if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING))
2252 continue; 2228 continue;
2253 2229
2254 batadv_dbg(BATADV_DBG_TT, bat_priv, 2230 batadv_dbg(BATADV_DBG_TT, bat_priv,
2255 "Deleting local tt entry (%pM): pending\n", 2231 "Deleting local tt entry (%pM): pending\n",
2256 tt_common->addr); 2232 tt_common->addr);
2257 2233
2258 atomic_dec(&bat_priv->tt.local_entry_num); 2234 atomic_dec(&bat_priv->tt.local_entry_num);
2259 hlist_del_rcu(node); 2235 hlist_del_rcu(node);
2260 tt_local = container_of(tt_common, 2236 tt_local = container_of(tt_common,
2261 struct batadv_tt_local_entry, 2237 struct batadv_tt_local_entry,
2262 common); 2238 common);
2263 batadv_tt_local_entry_free_ref(tt_local); 2239 batadv_tt_local_entry_free_ref(tt_local);
2264 } 2240 }
2265 spin_unlock_bh(list_lock); 2241 spin_unlock_bh(list_lock);
2266 } 2242 }
2267 2243
2268 } 2244 }
2269 2245
2270 static int batadv_tt_commit_changes(struct batadv_priv *bat_priv, 2246 static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
2271 unsigned char **packet_buff, 2247 unsigned char **packet_buff,
2272 int *packet_buff_len, int packet_min_len) 2248 int *packet_buff_len, int packet_min_len)
2273 { 2249 {
2274 uint16_t changed_num = 0; 2250 uint16_t changed_num = 0;
2275 2251
2276 if (atomic_read(&bat_priv->tt.local_changes) < 1) 2252 if (atomic_read(&bat_priv->tt.local_changes) < 1)
2277 return -ENOENT; 2253 return -ENOENT;
2278 2254
2279 changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash, 2255 changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
2280 BATADV_TT_CLIENT_NEW, false); 2256 BATADV_TT_CLIENT_NEW, false);
2281 2257
2282 /* all reset entries have to be counted as local entries */ 2258 /* all reset entries have to be counted as local entries */
2283 atomic_add(changed_num, &bat_priv->tt.local_entry_num); 2259 atomic_add(changed_num, &bat_priv->tt.local_entry_num);
2284 batadv_tt_local_purge_pending_clients(bat_priv); 2260 batadv_tt_local_purge_pending_clients(bat_priv);
2285 bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv); 2261 bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
2286 2262
2287 /* Increment the TTVN only once per OGM interval */ 2263 /* Increment the TTVN only once per OGM interval */
2288 atomic_inc(&bat_priv->tt.vn); 2264 atomic_inc(&bat_priv->tt.vn);
2289 batadv_dbg(BATADV_DBG_TT, bat_priv, 2265 batadv_dbg(BATADV_DBG_TT, bat_priv,
2290 "Local changes committed, updating to ttvn %u\n", 2266 "Local changes committed, updating to ttvn %u\n",
2291 (uint8_t)atomic_read(&bat_priv->tt.vn)); 2267 (uint8_t)atomic_read(&bat_priv->tt.vn));
2292 bat_priv->tt.poss_change = false; 2268 bat_priv->tt.poss_change = false;
2293 2269
2294 /* reset the sending counter */ 2270 /* reset the sending counter */
2295 atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX); 2271 atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
2296 2272
2297 return batadv_tt_changes_fill_buff(bat_priv, packet_buff, 2273 return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
2298 packet_buff_len, packet_min_len); 2274 packet_buff_len, packet_min_len);
2299 } 2275 }
2300 2276
2301 /* when calling this function (hard_iface == primary_if) has to be true */ 2277 /* when calling this function (hard_iface == primary_if) has to be true */
2302 int batadv_tt_append_diff(struct batadv_priv *bat_priv, 2278 int batadv_tt_append_diff(struct batadv_priv *bat_priv,
2303 unsigned char **packet_buff, int *packet_buff_len, 2279 unsigned char **packet_buff, int *packet_buff_len,
2304 int packet_min_len) 2280 int packet_min_len)
2305 { 2281 {
2306 int tt_num_changes; 2282 int tt_num_changes;
2307 2283
2308 /* if at least one change happened */ 2284 /* if at least one change happened */
2309 tt_num_changes = batadv_tt_commit_changes(bat_priv, packet_buff, 2285 tt_num_changes = batadv_tt_commit_changes(bat_priv, packet_buff,
2310 packet_buff_len, 2286 packet_buff_len,
2311 packet_min_len); 2287 packet_min_len);
2312 2288
2313 /* if the changes have been sent often enough */ 2289 /* if the changes have been sent often enough */
2314 if ((tt_num_changes < 0) && 2290 if ((tt_num_changes < 0) &&
2315 (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) { 2291 (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
2316 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len, 2292 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
2317 packet_min_len, packet_min_len); 2293 packet_min_len, packet_min_len);
2318 tt_num_changes = 0; 2294 tt_num_changes = 0;
2319 } 2295 }
2320 2296
2321 return tt_num_changes; 2297 return tt_num_changes;
2322 } 2298 }
2323 2299
2324 bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src, 2300 bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
2325 uint8_t *dst) 2301 uint8_t *dst)
2326 { 2302 {
2327 struct batadv_tt_local_entry *tt_local_entry = NULL; 2303 struct batadv_tt_local_entry *tt_local_entry = NULL;
2328 struct batadv_tt_global_entry *tt_global_entry = NULL; 2304 struct batadv_tt_global_entry *tt_global_entry = NULL;
2329 bool ret = false; 2305 bool ret = false;
2330 2306
2331 if (!atomic_read(&bat_priv->ap_isolation)) 2307 if (!atomic_read(&bat_priv->ap_isolation))
2332 goto out; 2308 goto out;
2333 2309
2334 tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst); 2310 tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst);
2335 if (!tt_local_entry) 2311 if (!tt_local_entry)
2336 goto out; 2312 goto out;
2337 2313
2338 tt_global_entry = batadv_tt_global_hash_find(bat_priv, src); 2314 tt_global_entry = batadv_tt_global_hash_find(bat_priv, src);
2339 if (!tt_global_entry) 2315 if (!tt_global_entry)
2340 goto out; 2316 goto out;
2341 2317
2342 if (!_batadv_is_ap_isolated(tt_local_entry, tt_global_entry)) 2318 if (!_batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
2343 goto out; 2319 goto out;
2344 2320
2345 ret = true; 2321 ret = true;
2346 2322
2347 out: 2323 out:
2348 if (tt_global_entry) 2324 if (tt_global_entry)
2349 batadv_tt_global_entry_free_ref(tt_global_entry); 2325 batadv_tt_global_entry_free_ref(tt_global_entry);
2350 if (tt_local_entry) 2326 if (tt_local_entry)
2351 batadv_tt_local_entry_free_ref(tt_local_entry); 2327 batadv_tt_local_entry_free_ref(tt_local_entry);
2352 return ret; 2328 return ret;
2353 } 2329 }
2354 2330
2355 void batadv_tt_update_orig(struct batadv_priv *bat_priv, 2331 void batadv_tt_update_orig(struct batadv_priv *bat_priv,
2356 struct batadv_orig_node *orig_node, 2332 struct batadv_orig_node *orig_node,
2357 const unsigned char *tt_buff, uint8_t tt_num_changes, 2333 const unsigned char *tt_buff, uint8_t tt_num_changes,
2358 uint8_t ttvn, uint16_t tt_crc) 2334 uint8_t ttvn, uint16_t tt_crc)
2359 { 2335 {
2360 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); 2336 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
2361 bool full_table = true; 2337 bool full_table = true;
2362 struct batadv_tt_change *tt_change; 2338 struct batadv_tt_change *tt_change;
2363 2339
2364 /* don't care about a backbone gateways updates. */ 2340 /* don't care about a backbone gateways updates. */
2365 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig)) 2341 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2366 return; 2342 return;
2367 2343
2368 /* orig table not initialised AND first diff is in the OGM OR the ttvn 2344 /* orig table not initialised AND first diff is in the OGM OR the ttvn
2369 * increased by one -> we can apply the attached changes 2345 * increased by one -> we can apply the attached changes
2370 */ 2346 */
2371 if ((!orig_node->tt_initialised && ttvn == 1) || 2347 if ((!orig_node->tt_initialised && ttvn == 1) ||
2372 ttvn - orig_ttvn == 1) { 2348 ttvn - orig_ttvn == 1) {
2373 /* the OGM could not contain the changes due to their size or 2349 /* the OGM could not contain the changes due to their size or
2374 * because they have already been sent BATADV_TT_OGM_APPEND_MAX 2350 * because they have already been sent BATADV_TT_OGM_APPEND_MAX
2375 * times. 2351 * times.
2376 * In this case send a tt request 2352 * In this case send a tt request
2377 */ 2353 */
2378 if (!tt_num_changes) { 2354 if (!tt_num_changes) {
2379 full_table = false; 2355 full_table = false;
2380 goto request_table; 2356 goto request_table;
2381 } 2357 }
2382 2358
2383 tt_change = (struct batadv_tt_change *)tt_buff; 2359 tt_change = (struct batadv_tt_change *)tt_buff;
2384 batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes, 2360 batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
2385 ttvn, tt_change); 2361 ttvn, tt_change);
2386 2362
2387 /* Even if we received the precomputed crc with the OGM, we 2363 /* Even if we received the precomputed crc with the OGM, we
2388 * prefer to recompute it to spot any possible inconsistency 2364 * prefer to recompute it to spot any possible inconsistency
2389 * in the global table 2365 * in the global table
2390 */ 2366 */
2391 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node); 2367 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
2392 2368
2393 /* The ttvn alone is not enough to guarantee consistency 2369 /* The ttvn alone is not enough to guarantee consistency
2394 * because a single value could represent different states 2370 * because a single value could represent different states
2395 * (due to the wrap around). Thus a node has to check whether 2371 * (due to the wrap around). Thus a node has to check whether
2396 * the resulting table (after applying the changes) is still 2372 * the resulting table (after applying the changes) is still
2397 * consistent or not. E.g. a node could disconnect while its 2373 * consistent or not. E.g. a node could disconnect while its
2398 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case 2374 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2399 * checking the CRC value is mandatory to detect the 2375 * checking the CRC value is mandatory to detect the
2400 * inconsistency 2376 * inconsistency
2401 */ 2377 */
2402 if (orig_node->tt_crc != tt_crc) 2378 if (orig_node->tt_crc != tt_crc)
2403 goto request_table; 2379 goto request_table;
2404 2380
2405 /* Roaming phase is over: tables are in sync again. I can 2381 /* Roaming phase is over: tables are in sync again. I can
2406 * unset the flag 2382 * unset the flag
2407 */ 2383 */
2408 orig_node->tt_poss_change = false; 2384 orig_node->tt_poss_change = false;
2409 } else { 2385 } else {
2410 /* if we missed more than one change or our tables are not 2386 /* if we missed more than one change or our tables are not
2411 * in sync anymore -> request fresh tt data 2387 * in sync anymore -> request fresh tt data
2412 */ 2388 */
2413 if (!orig_node->tt_initialised || ttvn != orig_ttvn || 2389 if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
2414 orig_node->tt_crc != tt_crc) { 2390 orig_node->tt_crc != tt_crc) {
2415 request_table: 2391 request_table:
2416 batadv_dbg(BATADV_DBG_TT, bat_priv, 2392 batadv_dbg(BATADV_DBG_TT, bat_priv,
2417 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n", 2393 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2418 orig_node->orig, ttvn, orig_ttvn, tt_crc, 2394 orig_node->orig, ttvn, orig_ttvn, tt_crc,
2419 orig_node->tt_crc, tt_num_changes); 2395 orig_node->tt_crc, tt_num_changes);
2420 batadv_send_tt_request(bat_priv, orig_node, ttvn, 2396 batadv_send_tt_request(bat_priv, orig_node, ttvn,
2421 tt_crc, full_table); 2397 tt_crc, full_table);
2422 return; 2398 return;
2423 } 2399 }
2424 } 2400 }
2425 } 2401 }
2426 2402
2427 /* returns true whether we know that the client has moved from its old 2403 /* returns true whether we know that the client has moved from its old
2428 * originator to another one. This entry is kept is still kept for consistency 2404 * originator to another one. This entry is kept is still kept for consistency
2429 * purposes 2405 * purposes
2430 */ 2406 */
2431 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv, 2407 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
2432 uint8_t *addr) 2408 uint8_t *addr)
2433 { 2409 {
2434 struct batadv_tt_global_entry *tt_global_entry; 2410 struct batadv_tt_global_entry *tt_global_entry;
2435 bool ret = false; 2411 bool ret = false;
2436 2412
2437 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr); 2413 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
2438 if (!tt_global_entry) 2414 if (!tt_global_entry)
2439 goto out; 2415 goto out;
2440 2416
2441 ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM; 2417 ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM;
2442 batadv_tt_global_entry_free_ref(tt_global_entry); 2418 batadv_tt_global_entry_free_ref(tt_global_entry);
2443 out: 2419 out:
2444 return ret; 2420 return ret;
2445 } 2421 }
2446 2422
2447 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, 2423 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
2448 struct batadv_orig_node *orig_node, 2424 struct batadv_orig_node *orig_node,
2449 const unsigned char *addr) 2425 const unsigned char *addr)
2450 { 2426 {
2451 bool ret = false; 2427 bool ret = false;
2452 2428
2453 if (!batadv_tt_global_add(bat_priv, orig_node, addr, 2429 if (!batadv_tt_global_add(bat_priv, orig_node, addr,
2454 BATADV_TT_CLIENT_TEMP, 2430 BATADV_TT_CLIENT_TEMP,
2455 atomic_read(&orig_node->last_ttvn))) 2431 atomic_read(&orig_node->last_ttvn)))
2456 goto out; 2432 goto out;
2457 2433
2458 batadv_dbg(BATADV_DBG_TT, bat_priv, 2434 batadv_dbg(BATADV_DBG_TT, bat_priv,
2459 "Added temporary global client (addr: %pM orig: %pM)\n", 2435 "Added temporary global client (addr: %pM orig: %pM)\n",
2460 addr, orig_node->orig); 2436 addr, orig_node->orig);
2461 ret = true; 2437 ret = true;
2462 out: 2438 out:
2463 return ret; 2439 return ret;
2464 } 2440 }
2465 2441