Commit e300d314664ef6746e697d5b581f85114ab1f843
Committed by
Antonio Quartulli
1 parent
b8cbd81d09
batman-adv: refine API calls for unicast transmissions of SKBs
With this patch the functions batadv_send_skb_unicast() and batadv_send_skb_unicast_4addr() are further refined into batadv_send_skb_via_tt(), batadv_send_skb_via_tt_4addr() and batadv_send_skb_via_gw(). This way we avoid any "guessing" about where to send a packet in the unicast forwarding methods and let the callers decide. This is going to be useful for the upcoming multicast related patches in particular. Further, the return values were polished a little to use the more appropriate NET_XMIT_* defines. Signed-off-by: Linus Lüssing <linus.luessing@web.de> Acked-by: Antonio Quartulli <antonio@meshcoding.com> Signed-off-by: Marek Lindner <lindner_marek@yahoo.de> Signed-off-by: Antonio Quartulli <antonio@meshcoding.com>
Showing 4 changed files with 108 additions and 48 deletions Inline Diff
net/batman-adv/distributed-arp-table.c
1 | /* Copyright (C) 2011-2013 B.A.T.M.A.N. contributors: | 1 | /* Copyright (C) 2011-2013 B.A.T.M.A.N. contributors: |
2 | * | 2 | * |
3 | * Antonio Quartulli | 3 | * Antonio Quartulli |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of version 2 of the GNU General Public | 6 | * modify it under the terms of version 2 of the GNU General Public |
7 | * License as published by the Free Software Foundation. | 7 | * License as published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, but | 9 | * This program is distributed in the hope that it will be useful, but |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | * General Public License for more details. | 12 | * General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | 16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
17 | * 02110-1301, USA | 17 | * 02110-1301, USA |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/if_ether.h> | 20 | #include <linux/if_ether.h> |
21 | #include <linux/if_arp.h> | 21 | #include <linux/if_arp.h> |
22 | #include <linux/if_vlan.h> | 22 | #include <linux/if_vlan.h> |
23 | #include <net/arp.h> | 23 | #include <net/arp.h> |
24 | 24 | ||
25 | #include "main.h" | 25 | #include "main.h" |
26 | #include "hash.h" | 26 | #include "hash.h" |
27 | #include "distributed-arp-table.h" | 27 | #include "distributed-arp-table.h" |
28 | #include "hard-interface.h" | 28 | #include "hard-interface.h" |
29 | #include "originator.h" | 29 | #include "originator.h" |
30 | #include "send.h" | 30 | #include "send.h" |
31 | #include "types.h" | 31 | #include "types.h" |
32 | #include "translation-table.h" | 32 | #include "translation-table.h" |
33 | 33 | ||
34 | static void batadv_dat_purge(struct work_struct *work); | 34 | static void batadv_dat_purge(struct work_struct *work); |
35 | 35 | ||
36 | /** | 36 | /** |
37 | * batadv_dat_start_timer - initialise the DAT periodic worker | 37 | * batadv_dat_start_timer - initialise the DAT periodic worker |
38 | * @bat_priv: the bat priv with all the soft interface information | 38 | * @bat_priv: the bat priv with all the soft interface information |
39 | */ | 39 | */ |
40 | static void batadv_dat_start_timer(struct batadv_priv *bat_priv) | 40 | static void batadv_dat_start_timer(struct batadv_priv *bat_priv) |
41 | { | 41 | { |
42 | INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge); | 42 | INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge); |
43 | queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work, | 43 | queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work, |
44 | msecs_to_jiffies(10000)); | 44 | msecs_to_jiffies(10000)); |
45 | } | 45 | } |
46 | 46 | ||
47 | /** | 47 | /** |
48 | * batadv_dat_entry_free_ref - decrement the dat_entry refcounter and possibly | 48 | * batadv_dat_entry_free_ref - decrement the dat_entry refcounter and possibly |
49 | * free it | 49 | * free it |
50 | * @dat_entry: the entry to free | 50 | * @dat_entry: the entry to free |
51 | */ | 51 | */ |
52 | static void batadv_dat_entry_free_ref(struct batadv_dat_entry *dat_entry) | 52 | static void batadv_dat_entry_free_ref(struct batadv_dat_entry *dat_entry) |
53 | { | 53 | { |
54 | if (atomic_dec_and_test(&dat_entry->refcount)) | 54 | if (atomic_dec_and_test(&dat_entry->refcount)) |
55 | kfree_rcu(dat_entry, rcu); | 55 | kfree_rcu(dat_entry, rcu); |
56 | } | 56 | } |
57 | 57 | ||
58 | /** | 58 | /** |
59 | * batadv_dat_to_purge - check whether a dat_entry has to be purged or not | 59 | * batadv_dat_to_purge - check whether a dat_entry has to be purged or not |
60 | * @dat_entry: the entry to check | 60 | * @dat_entry: the entry to check |
61 | * | 61 | * |
62 | * Returns true if the entry has to be purged now, false otherwise. | 62 | * Returns true if the entry has to be purged now, false otherwise. |
63 | */ | 63 | */ |
64 | static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry) | 64 | static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry) |
65 | { | 65 | { |
66 | return batadv_has_timed_out(dat_entry->last_update, | 66 | return batadv_has_timed_out(dat_entry->last_update, |
67 | BATADV_DAT_ENTRY_TIMEOUT); | 67 | BATADV_DAT_ENTRY_TIMEOUT); |
68 | } | 68 | } |
69 | 69 | ||
70 | /** | 70 | /** |
71 | * __batadv_dat_purge - delete entries from the DAT local storage | 71 | * __batadv_dat_purge - delete entries from the DAT local storage |
72 | * @bat_priv: the bat priv with all the soft interface information | 72 | * @bat_priv: the bat priv with all the soft interface information |
73 | * @to_purge: function in charge to decide whether an entry has to be purged or | 73 | * @to_purge: function in charge to decide whether an entry has to be purged or |
74 | * not. This function takes the dat_entry as argument and has to | 74 | * not. This function takes the dat_entry as argument and has to |
75 | * returns a boolean value: true is the entry has to be deleted, | 75 | * returns a boolean value: true is the entry has to be deleted, |
76 | * false otherwise | 76 | * false otherwise |
77 | * | 77 | * |
78 | * Loops over each entry in the DAT local storage and deletes it if and only if | 78 | * Loops over each entry in the DAT local storage and deletes it if and only if |
79 | * the to_purge function passed as argument returns true. | 79 | * the to_purge function passed as argument returns true. |
80 | */ | 80 | */ |
81 | static void __batadv_dat_purge(struct batadv_priv *bat_priv, | 81 | static void __batadv_dat_purge(struct batadv_priv *bat_priv, |
82 | bool (*to_purge)(struct batadv_dat_entry *)) | 82 | bool (*to_purge)(struct batadv_dat_entry *)) |
83 | { | 83 | { |
84 | spinlock_t *list_lock; /* protects write access to the hash lists */ | 84 | spinlock_t *list_lock; /* protects write access to the hash lists */ |
85 | struct batadv_dat_entry *dat_entry; | 85 | struct batadv_dat_entry *dat_entry; |
86 | struct hlist_node *node_tmp; | 86 | struct hlist_node *node_tmp; |
87 | struct hlist_head *head; | 87 | struct hlist_head *head; |
88 | uint32_t i; | 88 | uint32_t i; |
89 | 89 | ||
90 | if (!bat_priv->dat.hash) | 90 | if (!bat_priv->dat.hash) |
91 | return; | 91 | return; |
92 | 92 | ||
93 | for (i = 0; i < bat_priv->dat.hash->size; i++) { | 93 | for (i = 0; i < bat_priv->dat.hash->size; i++) { |
94 | head = &bat_priv->dat.hash->table[i]; | 94 | head = &bat_priv->dat.hash->table[i]; |
95 | list_lock = &bat_priv->dat.hash->list_locks[i]; | 95 | list_lock = &bat_priv->dat.hash->list_locks[i]; |
96 | 96 | ||
97 | spin_lock_bh(list_lock); | 97 | spin_lock_bh(list_lock); |
98 | hlist_for_each_entry_safe(dat_entry, node_tmp, head, | 98 | hlist_for_each_entry_safe(dat_entry, node_tmp, head, |
99 | hash_entry) { | 99 | hash_entry) { |
100 | /* if a helper function has been passed as parameter, | 100 | /* if a helper function has been passed as parameter, |
101 | * ask it if the entry has to be purged or not | 101 | * ask it if the entry has to be purged or not |
102 | */ | 102 | */ |
103 | if (to_purge && !to_purge(dat_entry)) | 103 | if (to_purge && !to_purge(dat_entry)) |
104 | continue; | 104 | continue; |
105 | 105 | ||
106 | hlist_del_rcu(&dat_entry->hash_entry); | 106 | hlist_del_rcu(&dat_entry->hash_entry); |
107 | batadv_dat_entry_free_ref(dat_entry); | 107 | batadv_dat_entry_free_ref(dat_entry); |
108 | } | 108 | } |
109 | spin_unlock_bh(list_lock); | 109 | spin_unlock_bh(list_lock); |
110 | } | 110 | } |
111 | } | 111 | } |
112 | 112 | ||
113 | /** | 113 | /** |
114 | * batadv_dat_purge - periodic task that deletes old entries from the local DAT | 114 | * batadv_dat_purge - periodic task that deletes old entries from the local DAT |
115 | * hash table | 115 | * hash table |
116 | * @work: kernel work struct | 116 | * @work: kernel work struct |
117 | */ | 117 | */ |
118 | static void batadv_dat_purge(struct work_struct *work) | 118 | static void batadv_dat_purge(struct work_struct *work) |
119 | { | 119 | { |
120 | struct delayed_work *delayed_work; | 120 | struct delayed_work *delayed_work; |
121 | struct batadv_priv_dat *priv_dat; | 121 | struct batadv_priv_dat *priv_dat; |
122 | struct batadv_priv *bat_priv; | 122 | struct batadv_priv *bat_priv; |
123 | 123 | ||
124 | delayed_work = container_of(work, struct delayed_work, work); | 124 | delayed_work = container_of(work, struct delayed_work, work); |
125 | priv_dat = container_of(delayed_work, struct batadv_priv_dat, work); | 125 | priv_dat = container_of(delayed_work, struct batadv_priv_dat, work); |
126 | bat_priv = container_of(priv_dat, struct batadv_priv, dat); | 126 | bat_priv = container_of(priv_dat, struct batadv_priv, dat); |
127 | 127 | ||
128 | __batadv_dat_purge(bat_priv, batadv_dat_to_purge); | 128 | __batadv_dat_purge(bat_priv, batadv_dat_to_purge); |
129 | batadv_dat_start_timer(bat_priv); | 129 | batadv_dat_start_timer(bat_priv); |
130 | } | 130 | } |
131 | 131 | ||
132 | /** | 132 | /** |
133 | * batadv_compare_dat - comparing function used in the local DAT hash table | 133 | * batadv_compare_dat - comparing function used in the local DAT hash table |
134 | * @node: node in the local table | 134 | * @node: node in the local table |
135 | * @data2: second object to compare the node to | 135 | * @data2: second object to compare the node to |
136 | * | 136 | * |
137 | * Returns 1 if the two entries are the same, 0 otherwise. | 137 | * Returns 1 if the two entries are the same, 0 otherwise. |
138 | */ | 138 | */ |
139 | static int batadv_compare_dat(const struct hlist_node *node, const void *data2) | 139 | static int batadv_compare_dat(const struct hlist_node *node, const void *data2) |
140 | { | 140 | { |
141 | const void *data1 = container_of(node, struct batadv_dat_entry, | 141 | const void *data1 = container_of(node, struct batadv_dat_entry, |
142 | hash_entry); | 142 | hash_entry); |
143 | 143 | ||
144 | return (memcmp(data1, data2, sizeof(__be32)) == 0 ? 1 : 0); | 144 | return (memcmp(data1, data2, sizeof(__be32)) == 0 ? 1 : 0); |
145 | } | 145 | } |
146 | 146 | ||
147 | /** | 147 | /** |
148 | * batadv_arp_hw_src - extract the hw_src field from an ARP packet | 148 | * batadv_arp_hw_src - extract the hw_src field from an ARP packet |
149 | * @skb: ARP packet | 149 | * @skb: ARP packet |
150 | * @hdr_size: size of the possible header before the ARP packet | 150 | * @hdr_size: size of the possible header before the ARP packet |
151 | * | 151 | * |
152 | * Returns the value of the hw_src field in the ARP packet. | 152 | * Returns the value of the hw_src field in the ARP packet. |
153 | */ | 153 | */ |
154 | static uint8_t *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size) | 154 | static uint8_t *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size) |
155 | { | 155 | { |
156 | uint8_t *addr; | 156 | uint8_t *addr; |
157 | 157 | ||
158 | addr = (uint8_t *)(skb->data + hdr_size); | 158 | addr = (uint8_t *)(skb->data + hdr_size); |
159 | addr += ETH_HLEN + sizeof(struct arphdr); | 159 | addr += ETH_HLEN + sizeof(struct arphdr); |
160 | 160 | ||
161 | return addr; | 161 | return addr; |
162 | } | 162 | } |
163 | 163 | ||
164 | /** | 164 | /** |
165 | * batadv_arp_ip_src - extract the ip_src field from an ARP packet | 165 | * batadv_arp_ip_src - extract the ip_src field from an ARP packet |
166 | * @skb: ARP packet | 166 | * @skb: ARP packet |
167 | * @hdr_size: size of the possible header before the ARP packet | 167 | * @hdr_size: size of the possible header before the ARP packet |
168 | * | 168 | * |
169 | * Returns the value of the ip_src field in the ARP packet. | 169 | * Returns the value of the ip_src field in the ARP packet. |
170 | */ | 170 | */ |
171 | static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size) | 171 | static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size) |
172 | { | 172 | { |
173 | return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN); | 173 | return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN); |
174 | } | 174 | } |
175 | 175 | ||
176 | /** | 176 | /** |
177 | * batadv_arp_hw_dst - extract the hw_dst field from an ARP packet | 177 | * batadv_arp_hw_dst - extract the hw_dst field from an ARP packet |
178 | * @skb: ARP packet | 178 | * @skb: ARP packet |
179 | * @hdr_size: size of the possible header before the ARP packet | 179 | * @hdr_size: size of the possible header before the ARP packet |
180 | * | 180 | * |
181 | * Returns the value of the hw_dst field in the ARP packet. | 181 | * Returns the value of the hw_dst field in the ARP packet. |
182 | */ | 182 | */ |
183 | static uint8_t *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size) | 183 | static uint8_t *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size) |
184 | { | 184 | { |
185 | return batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN + 4; | 185 | return batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN + 4; |
186 | } | 186 | } |
187 | 187 | ||
188 | /** | 188 | /** |
189 | * batadv_arp_ip_dst - extract the ip_dst field from an ARP packet | 189 | * batadv_arp_ip_dst - extract the ip_dst field from an ARP packet |
190 | * @skb: ARP packet | 190 | * @skb: ARP packet |
191 | * @hdr_size: size of the possible header before the ARP packet | 191 | * @hdr_size: size of the possible header before the ARP packet |
192 | * | 192 | * |
193 | * Returns the value of the ip_dst field in the ARP packet. | 193 | * Returns the value of the ip_dst field in the ARP packet. |
194 | */ | 194 | */ |
195 | static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size) | 195 | static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size) |
196 | { | 196 | { |
197 | return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN * 2 + 4); | 197 | return *(__be32 *)(batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN * 2 + 4); |
198 | } | 198 | } |
199 | 199 | ||
200 | /** | 200 | /** |
201 | * batadv_hash_dat - compute the hash value for an IP address | 201 | * batadv_hash_dat - compute the hash value for an IP address |
202 | * @data: data to hash | 202 | * @data: data to hash |
203 | * @size: size of the hash table | 203 | * @size: size of the hash table |
204 | * | 204 | * |
205 | * Returns the selected index in the hash table for the given data. | 205 | * Returns the selected index in the hash table for the given data. |
206 | */ | 206 | */ |
207 | static uint32_t batadv_hash_dat(const void *data, uint32_t size) | 207 | static uint32_t batadv_hash_dat(const void *data, uint32_t size) |
208 | { | 208 | { |
209 | uint32_t hash = 0; | 209 | uint32_t hash = 0; |
210 | const struct batadv_dat_entry *dat = data; | 210 | const struct batadv_dat_entry *dat = data; |
211 | 211 | ||
212 | hash = batadv_hash_bytes(hash, &dat->ip, sizeof(dat->ip)); | 212 | hash = batadv_hash_bytes(hash, &dat->ip, sizeof(dat->ip)); |
213 | hash = batadv_hash_bytes(hash, &dat->vid, sizeof(dat->vid)); | 213 | hash = batadv_hash_bytes(hash, &dat->vid, sizeof(dat->vid)); |
214 | 214 | ||
215 | hash += (hash << 3); | 215 | hash += (hash << 3); |
216 | hash ^= (hash >> 11); | 216 | hash ^= (hash >> 11); |
217 | hash += (hash << 15); | 217 | hash += (hash << 15); |
218 | 218 | ||
219 | return hash % size; | 219 | return hash % size; |
220 | } | 220 | } |
221 | 221 | ||
222 | /** | 222 | /** |
223 | * batadv_dat_entry_hash_find - look for a given dat_entry in the local hash | 223 | * batadv_dat_entry_hash_find - look for a given dat_entry in the local hash |
224 | * table | 224 | * table |
225 | * @bat_priv: the bat priv with all the soft interface information | 225 | * @bat_priv: the bat priv with all the soft interface information |
226 | * @ip: search key | 226 | * @ip: search key |
227 | * @vid: VLAN identifier | 227 | * @vid: VLAN identifier |
228 | * | 228 | * |
229 | * Returns the dat_entry if found, NULL otherwise. | 229 | * Returns the dat_entry if found, NULL otherwise. |
230 | */ | 230 | */ |
231 | static struct batadv_dat_entry * | 231 | static struct batadv_dat_entry * |
232 | batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip, | 232 | batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip, |
233 | unsigned short vid) | 233 | unsigned short vid) |
234 | { | 234 | { |
235 | struct hlist_head *head; | 235 | struct hlist_head *head; |
236 | struct batadv_dat_entry to_find, *dat_entry, *dat_entry_tmp = NULL; | 236 | struct batadv_dat_entry to_find, *dat_entry, *dat_entry_tmp = NULL; |
237 | struct batadv_hashtable *hash = bat_priv->dat.hash; | 237 | struct batadv_hashtable *hash = bat_priv->dat.hash; |
238 | uint32_t index; | 238 | uint32_t index; |
239 | 239 | ||
240 | if (!hash) | 240 | if (!hash) |
241 | return NULL; | 241 | return NULL; |
242 | 242 | ||
243 | to_find.ip = ip; | 243 | to_find.ip = ip; |
244 | to_find.vid = vid; | 244 | to_find.vid = vid; |
245 | 245 | ||
246 | index = batadv_hash_dat(&to_find, hash->size); | 246 | index = batadv_hash_dat(&to_find, hash->size); |
247 | head = &hash->table[index]; | 247 | head = &hash->table[index]; |
248 | 248 | ||
249 | rcu_read_lock(); | 249 | rcu_read_lock(); |
250 | hlist_for_each_entry_rcu(dat_entry, head, hash_entry) { | 250 | hlist_for_each_entry_rcu(dat_entry, head, hash_entry) { |
251 | if (dat_entry->ip != ip) | 251 | if (dat_entry->ip != ip) |
252 | continue; | 252 | continue; |
253 | 253 | ||
254 | if (!atomic_inc_not_zero(&dat_entry->refcount)) | 254 | if (!atomic_inc_not_zero(&dat_entry->refcount)) |
255 | continue; | 255 | continue; |
256 | 256 | ||
257 | dat_entry_tmp = dat_entry; | 257 | dat_entry_tmp = dat_entry; |
258 | break; | 258 | break; |
259 | } | 259 | } |
260 | rcu_read_unlock(); | 260 | rcu_read_unlock(); |
261 | 261 | ||
262 | return dat_entry_tmp; | 262 | return dat_entry_tmp; |
263 | } | 263 | } |
264 | 264 | ||
265 | /** | 265 | /** |
266 | * batadv_dat_entry_add - add a new dat entry or update it if already exists | 266 | * batadv_dat_entry_add - add a new dat entry or update it if already exists |
267 | * @bat_priv: the bat priv with all the soft interface information | 267 | * @bat_priv: the bat priv with all the soft interface information |
268 | * @ip: ipv4 to add/edit | 268 | * @ip: ipv4 to add/edit |
269 | * @mac_addr: mac address to assign to the given ipv4 | 269 | * @mac_addr: mac address to assign to the given ipv4 |
270 | * @vid: VLAN identifier | 270 | * @vid: VLAN identifier |
271 | */ | 271 | */ |
272 | static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip, | 272 | static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip, |
273 | uint8_t *mac_addr, unsigned short vid) | 273 | uint8_t *mac_addr, unsigned short vid) |
274 | { | 274 | { |
275 | struct batadv_dat_entry *dat_entry; | 275 | struct batadv_dat_entry *dat_entry; |
276 | int hash_added; | 276 | int hash_added; |
277 | 277 | ||
278 | dat_entry = batadv_dat_entry_hash_find(bat_priv, ip, vid); | 278 | dat_entry = batadv_dat_entry_hash_find(bat_priv, ip, vid); |
279 | /* if this entry is already known, just update it */ | 279 | /* if this entry is already known, just update it */ |
280 | if (dat_entry) { | 280 | if (dat_entry) { |
281 | if (!batadv_compare_eth(dat_entry->mac_addr, mac_addr)) | 281 | if (!batadv_compare_eth(dat_entry->mac_addr, mac_addr)) |
282 | memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN); | 282 | memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN); |
283 | dat_entry->last_update = jiffies; | 283 | dat_entry->last_update = jiffies; |
284 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 284 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
285 | "Entry updated: %pI4 %pM (vid: %d)\n", | 285 | "Entry updated: %pI4 %pM (vid: %d)\n", |
286 | &dat_entry->ip, dat_entry->mac_addr, | 286 | &dat_entry->ip, dat_entry->mac_addr, |
287 | BATADV_PRINT_VID(vid)); | 287 | BATADV_PRINT_VID(vid)); |
288 | goto out; | 288 | goto out; |
289 | } | 289 | } |
290 | 290 | ||
291 | dat_entry = kmalloc(sizeof(*dat_entry), GFP_ATOMIC); | 291 | dat_entry = kmalloc(sizeof(*dat_entry), GFP_ATOMIC); |
292 | if (!dat_entry) | 292 | if (!dat_entry) |
293 | goto out; | 293 | goto out; |
294 | 294 | ||
295 | dat_entry->ip = ip; | 295 | dat_entry->ip = ip; |
296 | dat_entry->vid = vid; | 296 | dat_entry->vid = vid; |
297 | memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN); | 297 | memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN); |
298 | dat_entry->last_update = jiffies; | 298 | dat_entry->last_update = jiffies; |
299 | atomic_set(&dat_entry->refcount, 2); | 299 | atomic_set(&dat_entry->refcount, 2); |
300 | 300 | ||
301 | hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat, | 301 | hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat, |
302 | batadv_hash_dat, dat_entry, | 302 | batadv_hash_dat, dat_entry, |
303 | &dat_entry->hash_entry); | 303 | &dat_entry->hash_entry); |
304 | 304 | ||
305 | if (unlikely(hash_added != 0)) { | 305 | if (unlikely(hash_added != 0)) { |
306 | /* remove the reference for the hash */ | 306 | /* remove the reference for the hash */ |
307 | batadv_dat_entry_free_ref(dat_entry); | 307 | batadv_dat_entry_free_ref(dat_entry); |
308 | goto out; | 308 | goto out; |
309 | } | 309 | } |
310 | 310 | ||
311 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM (vid: %d)\n", | 311 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM (vid: %d)\n", |
312 | &dat_entry->ip, dat_entry->mac_addr, BATADV_PRINT_VID(vid)); | 312 | &dat_entry->ip, dat_entry->mac_addr, BATADV_PRINT_VID(vid)); |
313 | 313 | ||
314 | out: | 314 | out: |
315 | if (dat_entry) | 315 | if (dat_entry) |
316 | batadv_dat_entry_free_ref(dat_entry); | 316 | batadv_dat_entry_free_ref(dat_entry); |
317 | } | 317 | } |
318 | 318 | ||
319 | #ifdef CONFIG_BATMAN_ADV_DEBUG | 319 | #ifdef CONFIG_BATMAN_ADV_DEBUG |
320 | 320 | ||
321 | /** | 321 | /** |
322 | * batadv_dbg_arp - print a debug message containing all the ARP packet details | 322 | * batadv_dbg_arp - print a debug message containing all the ARP packet details |
323 | * @bat_priv: the bat priv with all the soft interface information | 323 | * @bat_priv: the bat priv with all the soft interface information |
324 | * @skb: ARP packet | 324 | * @skb: ARP packet |
325 | * @type: ARP type | 325 | * @type: ARP type |
326 | * @hdr_size: size of the possible header before the ARP packet | 326 | * @hdr_size: size of the possible header before the ARP packet |
327 | * @msg: message to print together with the debugging information | 327 | * @msg: message to print together with the debugging information |
328 | */ | 328 | */ |
329 | static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, | 329 | static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, |
330 | uint16_t type, int hdr_size, char *msg) | 330 | uint16_t type, int hdr_size, char *msg) |
331 | { | 331 | { |
332 | struct batadv_unicast_4addr_packet *unicast_4addr_packet; | 332 | struct batadv_unicast_4addr_packet *unicast_4addr_packet; |
333 | struct batadv_bcast_packet *bcast_pkt; | 333 | struct batadv_bcast_packet *bcast_pkt; |
334 | uint8_t *orig_addr; | 334 | uint8_t *orig_addr; |
335 | __be32 ip_src, ip_dst; | 335 | __be32 ip_src, ip_dst; |
336 | 336 | ||
337 | if (msg) | 337 | if (msg) |
338 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "%s\n", msg); | 338 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "%s\n", msg); |
339 | 339 | ||
340 | ip_src = batadv_arp_ip_src(skb, hdr_size); | 340 | ip_src = batadv_arp_ip_src(skb, hdr_size); |
341 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); | 341 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); |
342 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 342 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
343 | "ARP MSG = [src: %pM-%pI4 dst: %pM-%pI4]\n", | 343 | "ARP MSG = [src: %pM-%pI4 dst: %pM-%pI4]\n", |
344 | batadv_arp_hw_src(skb, hdr_size), &ip_src, | 344 | batadv_arp_hw_src(skb, hdr_size), &ip_src, |
345 | batadv_arp_hw_dst(skb, hdr_size), &ip_dst); | 345 | batadv_arp_hw_dst(skb, hdr_size), &ip_dst); |
346 | 346 | ||
347 | if (hdr_size == 0) | 347 | if (hdr_size == 0) |
348 | return; | 348 | return; |
349 | 349 | ||
350 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | 350 | unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; |
351 | 351 | ||
352 | switch (unicast_4addr_packet->u.header.packet_type) { | 352 | switch (unicast_4addr_packet->u.header.packet_type) { |
353 | case BATADV_UNICAST: | 353 | case BATADV_UNICAST: |
354 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 354 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
355 | "* encapsulated within a UNICAST packet\n"); | 355 | "* encapsulated within a UNICAST packet\n"); |
356 | break; | 356 | break; |
357 | case BATADV_UNICAST_4ADDR: | 357 | case BATADV_UNICAST_4ADDR: |
358 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 358 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
359 | "* encapsulated within a UNICAST_4ADDR packet (src: %pM)\n", | 359 | "* encapsulated within a UNICAST_4ADDR packet (src: %pM)\n", |
360 | unicast_4addr_packet->src); | 360 | unicast_4addr_packet->src); |
361 | switch (unicast_4addr_packet->subtype) { | 361 | switch (unicast_4addr_packet->subtype) { |
362 | case BATADV_P_DAT_DHT_PUT: | 362 | case BATADV_P_DAT_DHT_PUT: |
363 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_PUT\n"); | 363 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_PUT\n"); |
364 | break; | 364 | break; |
365 | case BATADV_P_DAT_DHT_GET: | 365 | case BATADV_P_DAT_DHT_GET: |
366 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_GET\n"); | 366 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DAT_DHT_GET\n"); |
367 | break; | 367 | break; |
368 | case BATADV_P_DAT_CACHE_REPLY: | 368 | case BATADV_P_DAT_CACHE_REPLY: |
369 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 369 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
370 | "* type: DAT_CACHE_REPLY\n"); | 370 | "* type: DAT_CACHE_REPLY\n"); |
371 | break; | 371 | break; |
372 | case BATADV_P_DATA: | 372 | case BATADV_P_DATA: |
373 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DATA\n"); | 373 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: DATA\n"); |
374 | break; | 374 | break; |
375 | default: | 375 | default: |
376 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n", | 376 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n", |
377 | unicast_4addr_packet->u.header.packet_type); | 377 | unicast_4addr_packet->u.header.packet_type); |
378 | } | 378 | } |
379 | break; | 379 | break; |
380 | case BATADV_BCAST: | 380 | case BATADV_BCAST: |
381 | bcast_pkt = (struct batadv_bcast_packet *)unicast_4addr_packet; | 381 | bcast_pkt = (struct batadv_bcast_packet *)unicast_4addr_packet; |
382 | orig_addr = bcast_pkt->orig; | 382 | orig_addr = bcast_pkt->orig; |
383 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 383 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
384 | "* encapsulated within a BCAST packet (src: %pM)\n", | 384 | "* encapsulated within a BCAST packet (src: %pM)\n", |
385 | orig_addr); | 385 | orig_addr); |
386 | break; | 386 | break; |
387 | default: | 387 | default: |
388 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 388 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
389 | "* encapsulated within an unknown packet type (0x%x)\n", | 389 | "* encapsulated within an unknown packet type (0x%x)\n", |
390 | unicast_4addr_packet->u.header.packet_type); | 390 | unicast_4addr_packet->u.header.packet_type); |
391 | } | 391 | } |
392 | } | 392 | } |
393 | 393 | ||
394 | #else | 394 | #else |
395 | 395 | ||
396 | static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, | 396 | static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb, |
397 | uint16_t type, int hdr_size, char *msg) | 397 | uint16_t type, int hdr_size, char *msg) |
398 | { | 398 | { |
399 | } | 399 | } |
400 | 400 | ||
401 | #endif /* CONFIG_BATMAN_ADV_DEBUG */ | 401 | #endif /* CONFIG_BATMAN_ADV_DEBUG */ |
402 | 402 | ||
403 | /** | 403 | /** |
404 | * batadv_is_orig_node_eligible - check whether a node can be a DHT candidate | 404 | * batadv_is_orig_node_eligible - check whether a node can be a DHT candidate |
405 | * @res: the array with the already selected candidates | 405 | * @res: the array with the already selected candidates |
406 | * @select: number of already selected candidates | 406 | * @select: number of already selected candidates |
407 | * @tmp_max: address of the currently evaluated node | 407 | * @tmp_max: address of the currently evaluated node |
408 | * @max: current round max address | 408 | * @max: current round max address |
409 | * @last_max: address of the last selected candidate | 409 | * @last_max: address of the last selected candidate |
410 | * @candidate: orig_node under evaluation | 410 | * @candidate: orig_node under evaluation |
411 | * @max_orig_node: last selected candidate | 411 | * @max_orig_node: last selected candidate |
412 | * | 412 | * |
413 | * Returns true if the node has been elected as next candidate or false | 413 | * Returns true if the node has been elected as next candidate or false |
414 | * otherwise. | 414 | * otherwise. |
415 | */ | 415 | */ |
416 | static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res, | 416 | static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res, |
417 | int select, batadv_dat_addr_t tmp_max, | 417 | int select, batadv_dat_addr_t tmp_max, |
418 | batadv_dat_addr_t max, | 418 | batadv_dat_addr_t max, |
419 | batadv_dat_addr_t last_max, | 419 | batadv_dat_addr_t last_max, |
420 | struct batadv_orig_node *candidate, | 420 | struct batadv_orig_node *candidate, |
421 | struct batadv_orig_node *max_orig_node) | 421 | struct batadv_orig_node *max_orig_node) |
422 | { | 422 | { |
423 | bool ret = false; | 423 | bool ret = false; |
424 | int j; | 424 | int j; |
425 | 425 | ||
426 | /* check if orig node candidate is running DAT */ | 426 | /* check if orig node candidate is running DAT */ |
427 | if (!(candidate->capabilities & BATADV_ORIG_CAPA_HAS_DAT)) | 427 | if (!(candidate->capabilities & BATADV_ORIG_CAPA_HAS_DAT)) |
428 | goto out; | 428 | goto out; |
429 | 429 | ||
430 | /* Check if this node has already been selected... */ | 430 | /* Check if this node has already been selected... */ |
431 | for (j = 0; j < select; j++) | 431 | for (j = 0; j < select; j++) |
432 | if (res[j].orig_node == candidate) | 432 | if (res[j].orig_node == candidate) |
433 | break; | 433 | break; |
434 | /* ..and possibly skip it */ | 434 | /* ..and possibly skip it */ |
435 | if (j < select) | 435 | if (j < select) |
436 | goto out; | 436 | goto out; |
437 | /* sanity check: has it already been selected? This should not happen */ | 437 | /* sanity check: has it already been selected? This should not happen */ |
438 | if (tmp_max > last_max) | 438 | if (tmp_max > last_max) |
439 | goto out; | 439 | goto out; |
440 | /* check if during this iteration an originator with a closer dht | 440 | /* check if during this iteration an originator with a closer dht |
441 | * address has already been found | 441 | * address has already been found |
442 | */ | 442 | */ |
443 | if (tmp_max < max) | 443 | if (tmp_max < max) |
444 | goto out; | 444 | goto out; |
445 | /* this is an hash collision with the temporary selected node. Choose | 445 | /* this is an hash collision with the temporary selected node. Choose |
446 | * the one with the lowest address | 446 | * the one with the lowest address |
447 | */ | 447 | */ |
448 | if ((tmp_max == max) && max_orig_node && | 448 | if ((tmp_max == max) && max_orig_node && |
449 | (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0)) | 449 | (batadv_compare_eth(candidate->orig, max_orig_node->orig) > 0)) |
450 | goto out; | 450 | goto out; |
451 | 451 | ||
452 | ret = true; | 452 | ret = true; |
453 | out: | 453 | out: |
454 | return ret; | 454 | return ret; |
455 | } | 455 | } |
456 | 456 | ||
457 | /** | 457 | /** |
458 | * batadv_choose_next_candidate - select the next DHT candidate | 458 | * batadv_choose_next_candidate - select the next DHT candidate |
459 | * @bat_priv: the bat priv with all the soft interface information | 459 | * @bat_priv: the bat priv with all the soft interface information |
460 | * @cands: candidates array | 460 | * @cands: candidates array |
461 | * @select: number of candidates already present in the array | 461 | * @select: number of candidates already present in the array |
462 | * @ip_key: key to look up in the DHT | 462 | * @ip_key: key to look up in the DHT |
463 | * @last_max: pointer where the address of the selected candidate will be saved | 463 | * @last_max: pointer where the address of the selected candidate will be saved |
464 | */ | 464 | */ |
465 | static void batadv_choose_next_candidate(struct batadv_priv *bat_priv, | 465 | static void batadv_choose_next_candidate(struct batadv_priv *bat_priv, |
466 | struct batadv_dat_candidate *cands, | 466 | struct batadv_dat_candidate *cands, |
467 | int select, batadv_dat_addr_t ip_key, | 467 | int select, batadv_dat_addr_t ip_key, |
468 | batadv_dat_addr_t *last_max) | 468 | batadv_dat_addr_t *last_max) |
469 | { | 469 | { |
470 | batadv_dat_addr_t max = 0, tmp_max = 0; | 470 | batadv_dat_addr_t max = 0, tmp_max = 0; |
471 | struct batadv_orig_node *orig_node, *max_orig_node = NULL; | 471 | struct batadv_orig_node *orig_node, *max_orig_node = NULL; |
472 | struct batadv_hashtable *hash = bat_priv->orig_hash; | 472 | struct batadv_hashtable *hash = bat_priv->orig_hash; |
473 | struct hlist_head *head; | 473 | struct hlist_head *head; |
474 | int i; | 474 | int i; |
475 | 475 | ||
476 | /* if no node is eligible as candidate, leave the candidate type as | 476 | /* if no node is eligible as candidate, leave the candidate type as |
477 | * NOT_FOUND | 477 | * NOT_FOUND |
478 | */ | 478 | */ |
479 | cands[select].type = BATADV_DAT_CANDIDATE_NOT_FOUND; | 479 | cands[select].type = BATADV_DAT_CANDIDATE_NOT_FOUND; |
480 | 480 | ||
481 | /* iterate over the originator list and find the node with the closest | 481 | /* iterate over the originator list and find the node with the closest |
482 | * dat_address which has not been selected yet | 482 | * dat_address which has not been selected yet |
483 | */ | 483 | */ |
484 | for (i = 0; i < hash->size; i++) { | 484 | for (i = 0; i < hash->size; i++) { |
485 | head = &hash->table[i]; | 485 | head = &hash->table[i]; |
486 | 486 | ||
487 | rcu_read_lock(); | 487 | rcu_read_lock(); |
488 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { | 488 | hlist_for_each_entry_rcu(orig_node, head, hash_entry) { |
489 | /* the dht space is a ring using unsigned addresses */ | 489 | /* the dht space is a ring using unsigned addresses */ |
490 | tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr + | 490 | tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr + |
491 | ip_key; | 491 | ip_key; |
492 | 492 | ||
493 | if (!batadv_is_orig_node_eligible(cands, select, | 493 | if (!batadv_is_orig_node_eligible(cands, select, |
494 | tmp_max, max, | 494 | tmp_max, max, |
495 | *last_max, orig_node, | 495 | *last_max, orig_node, |
496 | max_orig_node)) | 496 | max_orig_node)) |
497 | continue; | 497 | continue; |
498 | 498 | ||
499 | if (!atomic_inc_not_zero(&orig_node->refcount)) | 499 | if (!atomic_inc_not_zero(&orig_node->refcount)) |
500 | continue; | 500 | continue; |
501 | 501 | ||
502 | max = tmp_max; | 502 | max = tmp_max; |
503 | if (max_orig_node) | 503 | if (max_orig_node) |
504 | batadv_orig_node_free_ref(max_orig_node); | 504 | batadv_orig_node_free_ref(max_orig_node); |
505 | max_orig_node = orig_node; | 505 | max_orig_node = orig_node; |
506 | } | 506 | } |
507 | rcu_read_unlock(); | 507 | rcu_read_unlock(); |
508 | } | 508 | } |
509 | if (max_orig_node) { | 509 | if (max_orig_node) { |
510 | cands[select].type = BATADV_DAT_CANDIDATE_ORIG; | 510 | cands[select].type = BATADV_DAT_CANDIDATE_ORIG; |
511 | cands[select].orig_node = max_orig_node; | 511 | cands[select].orig_node = max_orig_node; |
512 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 512 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
513 | "dat_select_candidates() %d: selected %pM addr=%u dist=%u\n", | 513 | "dat_select_candidates() %d: selected %pM addr=%u dist=%u\n", |
514 | select, max_orig_node->orig, max_orig_node->dat_addr, | 514 | select, max_orig_node->orig, max_orig_node->dat_addr, |
515 | max); | 515 | max); |
516 | } | 516 | } |
517 | *last_max = max; | 517 | *last_max = max; |
518 | } | 518 | } |
519 | 519 | ||
520 | /** | 520 | /** |
521 | * batadv_dat_select_candidates - select the nodes which the DHT message has to | 521 | * batadv_dat_select_candidates - select the nodes which the DHT message has to |
522 | * be sent to | 522 | * be sent to |
523 | * @bat_priv: the bat priv with all the soft interface information | 523 | * @bat_priv: the bat priv with all the soft interface information |
524 | * @ip_dst: ipv4 to look up in the DHT | 524 | * @ip_dst: ipv4 to look up in the DHT |
525 | * | 525 | * |
526 | * An originator O is selected if and only if its DHT_ID value is one of three | 526 | * An originator O is selected if and only if its DHT_ID value is one of three |
527 | * closest values (from the LEFT, with wrap around if needed) then the hash | 527 | * closest values (from the LEFT, with wrap around if needed) then the hash |
528 | * value of the key. ip_dst is the key. | 528 | * value of the key. ip_dst is the key. |
529 | * | 529 | * |
530 | * Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM. | 530 | * Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM. |
531 | */ | 531 | */ |
532 | static struct batadv_dat_candidate * | 532 | static struct batadv_dat_candidate * |
533 | batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) | 533 | batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) |
534 | { | 534 | { |
535 | int select; | 535 | int select; |
536 | batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key; | 536 | batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key; |
537 | struct batadv_dat_candidate *res; | 537 | struct batadv_dat_candidate *res; |
538 | 538 | ||
539 | if (!bat_priv->orig_hash) | 539 | if (!bat_priv->orig_hash) |
540 | return NULL; | 540 | return NULL; |
541 | 541 | ||
542 | res = kmalloc(BATADV_DAT_CANDIDATES_NUM * sizeof(*res), GFP_ATOMIC); | 542 | res = kmalloc(BATADV_DAT_CANDIDATES_NUM * sizeof(*res), GFP_ATOMIC); |
543 | if (!res) | 543 | if (!res) |
544 | return NULL; | 544 | return NULL; |
545 | 545 | ||
546 | ip_key = (batadv_dat_addr_t)batadv_hash_dat(&ip_dst, | 546 | ip_key = (batadv_dat_addr_t)batadv_hash_dat(&ip_dst, |
547 | BATADV_DAT_ADDR_MAX); | 547 | BATADV_DAT_ADDR_MAX); |
548 | 548 | ||
549 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 549 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
550 | "dat_select_candidates(): IP=%pI4 hash(IP)=%u\n", &ip_dst, | 550 | "dat_select_candidates(): IP=%pI4 hash(IP)=%u\n", &ip_dst, |
551 | ip_key); | 551 | ip_key); |
552 | 552 | ||
553 | for (select = 0; select < BATADV_DAT_CANDIDATES_NUM; select++) | 553 | for (select = 0; select < BATADV_DAT_CANDIDATES_NUM; select++) |
554 | batadv_choose_next_candidate(bat_priv, res, select, ip_key, | 554 | batadv_choose_next_candidate(bat_priv, res, select, ip_key, |
555 | &last_max); | 555 | &last_max); |
556 | 556 | ||
557 | return res; | 557 | return res; |
558 | } | 558 | } |
559 | 559 | ||
560 | /** | 560 | /** |
561 | * batadv_dat_send_data - send a payload to the selected candidates | 561 | * batadv_dat_send_data - send a payload to the selected candidates |
562 | * @bat_priv: the bat priv with all the soft interface information | 562 | * @bat_priv: the bat priv with all the soft interface information |
563 | * @skb: payload to send | 563 | * @skb: payload to send |
564 | * @ip: the DHT key | 564 | * @ip: the DHT key |
565 | * @packet_subtype: unicast4addr packet subtype to use | 565 | * @packet_subtype: unicast4addr packet subtype to use |
566 | * | 566 | * |
567 | * This function copies the skb with pskb_copy() and is sent as unicast packet | 567 | * This function copies the skb with pskb_copy() and is sent as unicast packet |
568 | * to each of the selected candidates. | 568 | * to each of the selected candidates. |
569 | * | 569 | * |
570 | * Returns true if the packet is sent to at least one candidate, false | 570 | * Returns true if the packet is sent to at least one candidate, false |
571 | * otherwise. | 571 | * otherwise. |
572 | */ | 572 | */ |
573 | static bool batadv_dat_send_data(struct batadv_priv *bat_priv, | 573 | static bool batadv_dat_send_data(struct batadv_priv *bat_priv, |
574 | struct sk_buff *skb, __be32 ip, | 574 | struct sk_buff *skb, __be32 ip, |
575 | int packet_subtype) | 575 | int packet_subtype) |
576 | { | 576 | { |
577 | int i; | 577 | int i; |
578 | bool ret = false; | 578 | bool ret = false; |
579 | int send_status; | 579 | int send_status; |
580 | struct batadv_neigh_node *neigh_node = NULL; | 580 | struct batadv_neigh_node *neigh_node = NULL; |
581 | struct sk_buff *tmp_skb; | 581 | struct sk_buff *tmp_skb; |
582 | struct batadv_dat_candidate *cand; | 582 | struct batadv_dat_candidate *cand; |
583 | 583 | ||
584 | cand = batadv_dat_select_candidates(bat_priv, ip); | 584 | cand = batadv_dat_select_candidates(bat_priv, ip); |
585 | if (!cand) | 585 | if (!cand) |
586 | goto out; | 586 | goto out; |
587 | 587 | ||
588 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "DHT_SEND for %pI4\n", &ip); | 588 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "DHT_SEND for %pI4\n", &ip); |
589 | 589 | ||
590 | for (i = 0; i < BATADV_DAT_CANDIDATES_NUM; i++) { | 590 | for (i = 0; i < BATADV_DAT_CANDIDATES_NUM; i++) { |
591 | if (cand[i].type == BATADV_DAT_CANDIDATE_NOT_FOUND) | 591 | if (cand[i].type == BATADV_DAT_CANDIDATE_NOT_FOUND) |
592 | continue; | 592 | continue; |
593 | 593 | ||
594 | neigh_node = batadv_orig_node_get_router(cand[i].orig_node); | 594 | neigh_node = batadv_orig_node_get_router(cand[i].orig_node); |
595 | if (!neigh_node) | 595 | if (!neigh_node) |
596 | goto free_orig; | 596 | goto free_orig; |
597 | 597 | ||
598 | tmp_skb = pskb_copy(skb, GFP_ATOMIC); | 598 | tmp_skb = pskb_copy(skb, GFP_ATOMIC); |
599 | if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb, | 599 | if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb, |
600 | cand[i].orig_node, | 600 | cand[i].orig_node, |
601 | packet_subtype)) { | 601 | packet_subtype)) { |
602 | kfree_skb(tmp_skb); | 602 | kfree_skb(tmp_skb); |
603 | goto free_neigh; | 603 | goto free_neigh; |
604 | } | 604 | } |
605 | 605 | ||
606 | send_status = batadv_send_skb_packet(tmp_skb, | 606 | send_status = batadv_send_skb_packet(tmp_skb, |
607 | neigh_node->if_incoming, | 607 | neigh_node->if_incoming, |
608 | neigh_node->addr); | 608 | neigh_node->addr); |
609 | if (send_status == NET_XMIT_SUCCESS) { | 609 | if (send_status == NET_XMIT_SUCCESS) { |
610 | /* count the sent packet */ | 610 | /* count the sent packet */ |
611 | switch (packet_subtype) { | 611 | switch (packet_subtype) { |
612 | case BATADV_P_DAT_DHT_GET: | 612 | case BATADV_P_DAT_DHT_GET: |
613 | batadv_inc_counter(bat_priv, | 613 | batadv_inc_counter(bat_priv, |
614 | BATADV_CNT_DAT_GET_TX); | 614 | BATADV_CNT_DAT_GET_TX); |
615 | break; | 615 | break; |
616 | case BATADV_P_DAT_DHT_PUT: | 616 | case BATADV_P_DAT_DHT_PUT: |
617 | batadv_inc_counter(bat_priv, | 617 | batadv_inc_counter(bat_priv, |
618 | BATADV_CNT_DAT_PUT_TX); | 618 | BATADV_CNT_DAT_PUT_TX); |
619 | break; | 619 | break; |
620 | } | 620 | } |
621 | 621 | ||
622 | /* packet sent to a candidate: return true */ | 622 | /* packet sent to a candidate: return true */ |
623 | ret = true; | 623 | ret = true; |
624 | } | 624 | } |
625 | free_neigh: | 625 | free_neigh: |
626 | batadv_neigh_node_free_ref(neigh_node); | 626 | batadv_neigh_node_free_ref(neigh_node); |
627 | free_orig: | 627 | free_orig: |
628 | batadv_orig_node_free_ref(cand[i].orig_node); | 628 | batadv_orig_node_free_ref(cand[i].orig_node); |
629 | } | 629 | } |
630 | 630 | ||
631 | out: | 631 | out: |
632 | kfree(cand); | 632 | kfree(cand); |
633 | return ret; | 633 | return ret; |
634 | } | 634 | } |
635 | 635 | ||
636 | /** | 636 | /** |
637 | * batadv_dat_tvlv_container_update - update the dat tvlv container after dat | 637 | * batadv_dat_tvlv_container_update - update the dat tvlv container after dat |
638 | * setting change | 638 | * setting change |
639 | * @bat_priv: the bat priv with all the soft interface information | 639 | * @bat_priv: the bat priv with all the soft interface information |
640 | */ | 640 | */ |
641 | static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv) | 641 | static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv) |
642 | { | 642 | { |
643 | char dat_mode; | 643 | char dat_mode; |
644 | 644 | ||
645 | dat_mode = atomic_read(&bat_priv->distributed_arp_table); | 645 | dat_mode = atomic_read(&bat_priv->distributed_arp_table); |
646 | 646 | ||
647 | switch (dat_mode) { | 647 | switch (dat_mode) { |
648 | case 0: | 648 | case 0: |
649 | batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1); | 649 | batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1); |
650 | break; | 650 | break; |
651 | case 1: | 651 | case 1: |
652 | batadv_tvlv_container_register(bat_priv, BATADV_TVLV_DAT, 1, | 652 | batadv_tvlv_container_register(bat_priv, BATADV_TVLV_DAT, 1, |
653 | NULL, 0); | 653 | NULL, 0); |
654 | break; | 654 | break; |
655 | } | 655 | } |
656 | } | 656 | } |
657 | 657 | ||
658 | /** | 658 | /** |
659 | * batadv_dat_status_update - update the dat tvlv container after dat | 659 | * batadv_dat_status_update - update the dat tvlv container after dat |
660 | * setting change | 660 | * setting change |
661 | * @net_dev: the soft interface net device | 661 | * @net_dev: the soft interface net device |
662 | */ | 662 | */ |
663 | void batadv_dat_status_update(struct net_device *net_dev) | 663 | void batadv_dat_status_update(struct net_device *net_dev) |
664 | { | 664 | { |
665 | struct batadv_priv *bat_priv = netdev_priv(net_dev); | 665 | struct batadv_priv *bat_priv = netdev_priv(net_dev); |
666 | batadv_dat_tvlv_container_update(bat_priv); | 666 | batadv_dat_tvlv_container_update(bat_priv); |
667 | } | 667 | } |
668 | 668 | ||
669 | /** | 669 | /** |
670 | * batadv_gw_tvlv_ogm_handler_v1 - process incoming dat tvlv container | 670 | * batadv_gw_tvlv_ogm_handler_v1 - process incoming dat tvlv container |
671 | * @bat_priv: the bat priv with all the soft interface information | 671 | * @bat_priv: the bat priv with all the soft interface information |
672 | * @orig: the orig_node of the ogm | 672 | * @orig: the orig_node of the ogm |
673 | * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) | 673 | * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) |
674 | * @tvlv_value: tvlv buffer containing the gateway data | 674 | * @tvlv_value: tvlv buffer containing the gateway data |
675 | * @tvlv_value_len: tvlv buffer length | 675 | * @tvlv_value_len: tvlv buffer length |
676 | */ | 676 | */ |
677 | static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, | 677 | static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, |
678 | struct batadv_orig_node *orig, | 678 | struct batadv_orig_node *orig, |
679 | uint8_t flags, | 679 | uint8_t flags, |
680 | void *tvlv_value, | 680 | void *tvlv_value, |
681 | uint16_t tvlv_value_len) | 681 | uint16_t tvlv_value_len) |
682 | { | 682 | { |
683 | if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) | 683 | if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) |
684 | orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_DAT; | 684 | orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_DAT; |
685 | else | 685 | else |
686 | orig->capabilities |= BATADV_ORIG_CAPA_HAS_DAT; | 686 | orig->capabilities |= BATADV_ORIG_CAPA_HAS_DAT; |
687 | } | 687 | } |
688 | 688 | ||
689 | /** | 689 | /** |
690 | * batadv_dat_hash_free - free the local DAT hash table | 690 | * batadv_dat_hash_free - free the local DAT hash table |
691 | * @bat_priv: the bat priv with all the soft interface information | 691 | * @bat_priv: the bat priv with all the soft interface information |
692 | */ | 692 | */ |
693 | static void batadv_dat_hash_free(struct batadv_priv *bat_priv) | 693 | static void batadv_dat_hash_free(struct batadv_priv *bat_priv) |
694 | { | 694 | { |
695 | if (!bat_priv->dat.hash) | 695 | if (!bat_priv->dat.hash) |
696 | return; | 696 | return; |
697 | 697 | ||
698 | __batadv_dat_purge(bat_priv, NULL); | 698 | __batadv_dat_purge(bat_priv, NULL); |
699 | 699 | ||
700 | batadv_hash_destroy(bat_priv->dat.hash); | 700 | batadv_hash_destroy(bat_priv->dat.hash); |
701 | 701 | ||
702 | bat_priv->dat.hash = NULL; | 702 | bat_priv->dat.hash = NULL; |
703 | } | 703 | } |
704 | 704 | ||
705 | /** | 705 | /** |
706 | * batadv_dat_init - initialise the DAT internals | 706 | * batadv_dat_init - initialise the DAT internals |
707 | * @bat_priv: the bat priv with all the soft interface information | 707 | * @bat_priv: the bat priv with all the soft interface information |
708 | */ | 708 | */ |
709 | int batadv_dat_init(struct batadv_priv *bat_priv) | 709 | int batadv_dat_init(struct batadv_priv *bat_priv) |
710 | { | 710 | { |
711 | if (bat_priv->dat.hash) | 711 | if (bat_priv->dat.hash) |
712 | return 0; | 712 | return 0; |
713 | 713 | ||
714 | bat_priv->dat.hash = batadv_hash_new(1024); | 714 | bat_priv->dat.hash = batadv_hash_new(1024); |
715 | 715 | ||
716 | if (!bat_priv->dat.hash) | 716 | if (!bat_priv->dat.hash) |
717 | return -ENOMEM; | 717 | return -ENOMEM; |
718 | 718 | ||
719 | batadv_dat_start_timer(bat_priv); | 719 | batadv_dat_start_timer(bat_priv); |
720 | 720 | ||
721 | batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1, | 721 | batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1, |
722 | NULL, BATADV_TVLV_DAT, 1, | 722 | NULL, BATADV_TVLV_DAT, 1, |
723 | BATADV_TVLV_HANDLER_OGM_CIFNOTFND); | 723 | BATADV_TVLV_HANDLER_OGM_CIFNOTFND); |
724 | batadv_dat_tvlv_container_update(bat_priv); | 724 | batadv_dat_tvlv_container_update(bat_priv); |
725 | return 0; | 725 | return 0; |
726 | } | 726 | } |
727 | 727 | ||
728 | /** | 728 | /** |
729 | * batadv_dat_free - free the DAT internals | 729 | * batadv_dat_free - free the DAT internals |
730 | * @bat_priv: the bat priv with all the soft interface information | 730 | * @bat_priv: the bat priv with all the soft interface information |
731 | */ | 731 | */ |
732 | void batadv_dat_free(struct batadv_priv *bat_priv) | 732 | void batadv_dat_free(struct batadv_priv *bat_priv) |
733 | { | 733 | { |
734 | batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1); | 734 | batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1); |
735 | batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_DAT, 1); | 735 | batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_DAT, 1); |
736 | 736 | ||
737 | cancel_delayed_work_sync(&bat_priv->dat.work); | 737 | cancel_delayed_work_sync(&bat_priv->dat.work); |
738 | 738 | ||
739 | batadv_dat_hash_free(bat_priv); | 739 | batadv_dat_hash_free(bat_priv); |
740 | } | 740 | } |
741 | 741 | ||
742 | /** | 742 | /** |
743 | * batadv_dat_cache_seq_print_text - print the local DAT hash table | 743 | * batadv_dat_cache_seq_print_text - print the local DAT hash table |
744 | * @seq: seq file to print on | 744 | * @seq: seq file to print on |
745 | * @offset: not used | 745 | * @offset: not used |
746 | */ | 746 | */ |
747 | int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset) | 747 | int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset) |
748 | { | 748 | { |
749 | struct net_device *net_dev = (struct net_device *)seq->private; | 749 | struct net_device *net_dev = (struct net_device *)seq->private; |
750 | struct batadv_priv *bat_priv = netdev_priv(net_dev); | 750 | struct batadv_priv *bat_priv = netdev_priv(net_dev); |
751 | struct batadv_hashtable *hash = bat_priv->dat.hash; | 751 | struct batadv_hashtable *hash = bat_priv->dat.hash; |
752 | struct batadv_dat_entry *dat_entry; | 752 | struct batadv_dat_entry *dat_entry; |
753 | struct batadv_hard_iface *primary_if; | 753 | struct batadv_hard_iface *primary_if; |
754 | struct hlist_head *head; | 754 | struct hlist_head *head; |
755 | unsigned long last_seen_jiffies; | 755 | unsigned long last_seen_jiffies; |
756 | int last_seen_msecs, last_seen_secs, last_seen_mins; | 756 | int last_seen_msecs, last_seen_secs, last_seen_mins; |
757 | uint32_t i; | 757 | uint32_t i; |
758 | 758 | ||
759 | primary_if = batadv_seq_print_text_primary_if_get(seq); | 759 | primary_if = batadv_seq_print_text_primary_if_get(seq); |
760 | if (!primary_if) | 760 | if (!primary_if) |
761 | goto out; | 761 | goto out; |
762 | 762 | ||
763 | seq_printf(seq, "Distributed ARP Table (%s):\n", net_dev->name); | 763 | seq_printf(seq, "Distributed ARP Table (%s):\n", net_dev->name); |
764 | seq_printf(seq, " %-7s %-9s %4s %11s\n", "IPv4", | 764 | seq_printf(seq, " %-7s %-9s %4s %11s\n", "IPv4", |
765 | "MAC", "VID", "last-seen"); | 765 | "MAC", "VID", "last-seen"); |
766 | 766 | ||
767 | for (i = 0; i < hash->size; i++) { | 767 | for (i = 0; i < hash->size; i++) { |
768 | head = &hash->table[i]; | 768 | head = &hash->table[i]; |
769 | 769 | ||
770 | rcu_read_lock(); | 770 | rcu_read_lock(); |
771 | hlist_for_each_entry_rcu(dat_entry, head, hash_entry) { | 771 | hlist_for_each_entry_rcu(dat_entry, head, hash_entry) { |
772 | last_seen_jiffies = jiffies - dat_entry->last_update; | 772 | last_seen_jiffies = jiffies - dat_entry->last_update; |
773 | last_seen_msecs = jiffies_to_msecs(last_seen_jiffies); | 773 | last_seen_msecs = jiffies_to_msecs(last_seen_jiffies); |
774 | last_seen_mins = last_seen_msecs / 60000; | 774 | last_seen_mins = last_seen_msecs / 60000; |
775 | last_seen_msecs = last_seen_msecs % 60000; | 775 | last_seen_msecs = last_seen_msecs % 60000; |
776 | last_seen_secs = last_seen_msecs / 1000; | 776 | last_seen_secs = last_seen_msecs / 1000; |
777 | 777 | ||
778 | seq_printf(seq, " * %15pI4 %14pM %4i %6i:%02i\n", | 778 | seq_printf(seq, " * %15pI4 %14pM %4i %6i:%02i\n", |
779 | &dat_entry->ip, dat_entry->mac_addr, | 779 | &dat_entry->ip, dat_entry->mac_addr, |
780 | BATADV_PRINT_VID(dat_entry->vid), | 780 | BATADV_PRINT_VID(dat_entry->vid), |
781 | last_seen_mins, last_seen_secs); | 781 | last_seen_mins, last_seen_secs); |
782 | } | 782 | } |
783 | rcu_read_unlock(); | 783 | rcu_read_unlock(); |
784 | } | 784 | } |
785 | 785 | ||
786 | out: | 786 | out: |
787 | if (primary_if) | 787 | if (primary_if) |
788 | batadv_hardif_free_ref(primary_if); | 788 | batadv_hardif_free_ref(primary_if); |
789 | return 0; | 789 | return 0; |
790 | } | 790 | } |
791 | 791 | ||
792 | /** | 792 | /** |
793 | * batadv_arp_get_type - parse an ARP packet and gets the type | 793 | * batadv_arp_get_type - parse an ARP packet and gets the type |
794 | * @bat_priv: the bat priv with all the soft interface information | 794 | * @bat_priv: the bat priv with all the soft interface information |
795 | * @skb: packet to analyse | 795 | * @skb: packet to analyse |
796 | * @hdr_size: size of the possible header before the ARP packet in the skb | 796 | * @hdr_size: size of the possible header before the ARP packet in the skb |
797 | * | 797 | * |
798 | * Returns the ARP type if the skb contains a valid ARP packet, 0 otherwise. | 798 | * Returns the ARP type if the skb contains a valid ARP packet, 0 otherwise. |
799 | */ | 799 | */ |
800 | static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv, | 800 | static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv, |
801 | struct sk_buff *skb, int hdr_size) | 801 | struct sk_buff *skb, int hdr_size) |
802 | { | 802 | { |
803 | struct arphdr *arphdr; | 803 | struct arphdr *arphdr; |
804 | struct ethhdr *ethhdr; | 804 | struct ethhdr *ethhdr; |
805 | __be32 ip_src, ip_dst; | 805 | __be32 ip_src, ip_dst; |
806 | uint8_t *hw_src, *hw_dst; | 806 | uint8_t *hw_src, *hw_dst; |
807 | uint16_t type = 0; | 807 | uint16_t type = 0; |
808 | 808 | ||
809 | /* pull the ethernet header */ | 809 | /* pull the ethernet header */ |
810 | if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN))) | 810 | if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN))) |
811 | goto out; | 811 | goto out; |
812 | 812 | ||
813 | ethhdr = (struct ethhdr *)(skb->data + hdr_size); | 813 | ethhdr = (struct ethhdr *)(skb->data + hdr_size); |
814 | 814 | ||
815 | if (ethhdr->h_proto != htons(ETH_P_ARP)) | 815 | if (ethhdr->h_proto != htons(ETH_P_ARP)) |
816 | goto out; | 816 | goto out; |
817 | 817 | ||
818 | /* pull the ARP payload */ | 818 | /* pull the ARP payload */ |
819 | if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN + | 819 | if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN + |
820 | arp_hdr_len(skb->dev)))) | 820 | arp_hdr_len(skb->dev)))) |
821 | goto out; | 821 | goto out; |
822 | 822 | ||
823 | arphdr = (struct arphdr *)(skb->data + hdr_size + ETH_HLEN); | 823 | arphdr = (struct arphdr *)(skb->data + hdr_size + ETH_HLEN); |
824 | 824 | ||
825 | /* check whether the ARP packet carries a valid IP information */ | 825 | /* check whether the ARP packet carries a valid IP information */ |
826 | if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) | 826 | if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) |
827 | goto out; | 827 | goto out; |
828 | 828 | ||
829 | if (arphdr->ar_pro != htons(ETH_P_IP)) | 829 | if (arphdr->ar_pro != htons(ETH_P_IP)) |
830 | goto out; | 830 | goto out; |
831 | 831 | ||
832 | if (arphdr->ar_hln != ETH_ALEN) | 832 | if (arphdr->ar_hln != ETH_ALEN) |
833 | goto out; | 833 | goto out; |
834 | 834 | ||
835 | if (arphdr->ar_pln != 4) | 835 | if (arphdr->ar_pln != 4) |
836 | goto out; | 836 | goto out; |
837 | 837 | ||
838 | /* Check for bad reply/request. If the ARP message is not sane, DAT | 838 | /* Check for bad reply/request. If the ARP message is not sane, DAT |
839 | * will simply ignore it | 839 | * will simply ignore it |
840 | */ | 840 | */ |
841 | ip_src = batadv_arp_ip_src(skb, hdr_size); | 841 | ip_src = batadv_arp_ip_src(skb, hdr_size); |
842 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); | 842 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); |
843 | if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) || | 843 | if (ipv4_is_loopback(ip_src) || ipv4_is_multicast(ip_src) || |
844 | ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst) || | 844 | ipv4_is_loopback(ip_dst) || ipv4_is_multicast(ip_dst) || |
845 | ipv4_is_zeronet(ip_src) || ipv4_is_lbcast(ip_src) || | 845 | ipv4_is_zeronet(ip_src) || ipv4_is_lbcast(ip_src) || |
846 | ipv4_is_zeronet(ip_dst) || ipv4_is_lbcast(ip_dst)) | 846 | ipv4_is_zeronet(ip_dst) || ipv4_is_lbcast(ip_dst)) |
847 | goto out; | 847 | goto out; |
848 | 848 | ||
849 | hw_src = batadv_arp_hw_src(skb, hdr_size); | 849 | hw_src = batadv_arp_hw_src(skb, hdr_size); |
850 | if (is_zero_ether_addr(hw_src) || is_multicast_ether_addr(hw_src)) | 850 | if (is_zero_ether_addr(hw_src) || is_multicast_ether_addr(hw_src)) |
851 | goto out; | 851 | goto out; |
852 | 852 | ||
853 | /* don't care about the destination MAC address in ARP requests */ | 853 | /* don't care about the destination MAC address in ARP requests */ |
854 | if (arphdr->ar_op != htons(ARPOP_REQUEST)) { | 854 | if (arphdr->ar_op != htons(ARPOP_REQUEST)) { |
855 | hw_dst = batadv_arp_hw_dst(skb, hdr_size); | 855 | hw_dst = batadv_arp_hw_dst(skb, hdr_size); |
856 | if (is_zero_ether_addr(hw_dst) || | 856 | if (is_zero_ether_addr(hw_dst) || |
857 | is_multicast_ether_addr(hw_dst)) | 857 | is_multicast_ether_addr(hw_dst)) |
858 | goto out; | 858 | goto out; |
859 | } | 859 | } |
860 | 860 | ||
861 | type = ntohs(arphdr->ar_op); | 861 | type = ntohs(arphdr->ar_op); |
862 | out: | 862 | out: |
863 | return type; | 863 | return type; |
864 | } | 864 | } |
865 | 865 | ||
866 | /** | 866 | /** |
867 | * batadv_dat_get_vid - extract the VLAN identifier from skb if any | 867 | * batadv_dat_get_vid - extract the VLAN identifier from skb if any |
868 | * @skb: the buffer containing the packet to extract the VID from | 868 | * @skb: the buffer containing the packet to extract the VID from |
869 | * @hdr_size: the size of the batman-adv header encapsulating the packet | 869 | * @hdr_size: the size of the batman-adv header encapsulating the packet |
870 | * | 870 | * |
871 | * If the packet embedded in the skb is vlan tagged this function returns the | 871 | * If the packet embedded in the skb is vlan tagged this function returns the |
872 | * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned. | 872 | * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned. |
873 | */ | 873 | */ |
874 | static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size) | 874 | static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size) |
875 | { | 875 | { |
876 | unsigned short vid; | 876 | unsigned short vid; |
877 | 877 | ||
878 | vid = batadv_get_vid(skb, *hdr_size); | 878 | vid = batadv_get_vid(skb, *hdr_size); |
879 | 879 | ||
880 | /* ARP parsing functions jump forward of hdr_size + ETH_HLEN. | 880 | /* ARP parsing functions jump forward of hdr_size + ETH_HLEN. |
881 | * If the header contained in the packet is a VLAN one (which is longer) | 881 | * If the header contained in the packet is a VLAN one (which is longer) |
882 | * hdr_size is updated so that the functions will still skip the | 882 | * hdr_size is updated so that the functions will still skip the |
883 | * correct amount of bytes. | 883 | * correct amount of bytes. |
884 | */ | 884 | */ |
885 | if (vid & BATADV_VLAN_HAS_TAG) | 885 | if (vid & BATADV_VLAN_HAS_TAG) |
886 | *hdr_size += VLAN_HLEN; | 886 | *hdr_size += VLAN_HLEN; |
887 | 887 | ||
888 | return vid; | 888 | return vid; |
889 | } | 889 | } |
890 | 890 | ||
891 | /** | 891 | /** |
892 | * batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to | 892 | * batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to |
893 | * answer using DAT | 893 | * answer using DAT |
894 | * @bat_priv: the bat priv with all the soft interface information | 894 | * @bat_priv: the bat priv with all the soft interface information |
895 | * @skb: packet to check | 895 | * @skb: packet to check |
896 | * | 896 | * |
897 | * Returns true if the message has been sent to the dht candidates, false | 897 | * Returns true if the message has been sent to the dht candidates, false |
898 | * otherwise. In case of a positive return value the message has to be enqueued | 898 | * otherwise. In case of a positive return value the message has to be enqueued |
899 | * to permit the fallback. | 899 | * to permit the fallback. |
900 | */ | 900 | */ |
901 | bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, | 901 | bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, |
902 | struct sk_buff *skb) | 902 | struct sk_buff *skb) |
903 | { | 903 | { |
904 | uint16_t type = 0; | 904 | uint16_t type = 0; |
905 | __be32 ip_dst, ip_src; | 905 | __be32 ip_dst, ip_src; |
906 | uint8_t *hw_src; | 906 | uint8_t *hw_src; |
907 | bool ret = false; | 907 | bool ret = false; |
908 | struct batadv_dat_entry *dat_entry = NULL; | 908 | struct batadv_dat_entry *dat_entry = NULL; |
909 | struct sk_buff *skb_new; | 909 | struct sk_buff *skb_new; |
910 | int hdr_size = 0; | 910 | int hdr_size = 0; |
911 | unsigned short vid; | 911 | unsigned short vid; |
912 | 912 | ||
913 | if (!atomic_read(&bat_priv->distributed_arp_table)) | 913 | if (!atomic_read(&bat_priv->distributed_arp_table)) |
914 | goto out; | 914 | goto out; |
915 | 915 | ||
916 | vid = batadv_dat_get_vid(skb, &hdr_size); | 916 | vid = batadv_dat_get_vid(skb, &hdr_size); |
917 | 917 | ||
918 | type = batadv_arp_get_type(bat_priv, skb, hdr_size); | 918 | type = batadv_arp_get_type(bat_priv, skb, hdr_size); |
919 | /* If the node gets an ARP_REQUEST it has to send a DHT_GET unicast | 919 | /* If the node gets an ARP_REQUEST it has to send a DHT_GET unicast |
920 | * message to the selected DHT candidates | 920 | * message to the selected DHT candidates |
921 | */ | 921 | */ |
922 | if (type != ARPOP_REQUEST) | 922 | if (type != ARPOP_REQUEST) |
923 | goto out; | 923 | goto out; |
924 | 924 | ||
925 | batadv_dbg_arp(bat_priv, skb, type, hdr_size, | 925 | batadv_dbg_arp(bat_priv, skb, type, hdr_size, |
926 | "Parsing outgoing ARP REQUEST"); | 926 | "Parsing outgoing ARP REQUEST"); |
927 | 927 | ||
928 | ip_src = batadv_arp_ip_src(skb, hdr_size); | 928 | ip_src = batadv_arp_ip_src(skb, hdr_size); |
929 | hw_src = batadv_arp_hw_src(skb, hdr_size); | 929 | hw_src = batadv_arp_hw_src(skb, hdr_size); |
930 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); | 930 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); |
931 | 931 | ||
932 | batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid); | 932 | batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid); |
933 | 933 | ||
934 | dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid); | 934 | dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid); |
935 | if (dat_entry) { | 935 | if (dat_entry) { |
936 | /* If the ARP request is destined for a local client the local | 936 | /* If the ARP request is destined for a local client the local |
937 | * client will answer itself. DAT would only generate a | 937 | * client will answer itself. DAT would only generate a |
938 | * duplicate packet. | 938 | * duplicate packet. |
939 | * | 939 | * |
940 | * Moreover, if the soft-interface is enslaved into a bridge, an | 940 | * Moreover, if the soft-interface is enslaved into a bridge, an |
941 | * additional DAT answer may trigger kernel warnings about | 941 | * additional DAT answer may trigger kernel warnings about |
942 | * a packet coming from the wrong port. | 942 | * a packet coming from the wrong port. |
943 | */ | 943 | */ |
944 | if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, | 944 | if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, |
945 | BATADV_NO_FLAGS)) { | 945 | BATADV_NO_FLAGS)) { |
946 | ret = true; | 946 | ret = true; |
947 | goto out; | 947 | goto out; |
948 | } | 948 | } |
949 | 949 | ||
950 | skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src, | 950 | skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src, |
951 | bat_priv->soft_iface, ip_dst, hw_src, | 951 | bat_priv->soft_iface, ip_dst, hw_src, |
952 | dat_entry->mac_addr, hw_src); | 952 | dat_entry->mac_addr, hw_src); |
953 | if (!skb_new) | 953 | if (!skb_new) |
954 | goto out; | 954 | goto out; |
955 | 955 | ||
956 | if (vid & BATADV_VLAN_HAS_TAG) | 956 | if (vid & BATADV_VLAN_HAS_TAG) |
957 | skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q), | 957 | skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q), |
958 | vid & VLAN_VID_MASK); | 958 | vid & VLAN_VID_MASK); |
959 | 959 | ||
960 | skb_reset_mac_header(skb_new); | 960 | skb_reset_mac_header(skb_new); |
961 | skb_new->protocol = eth_type_trans(skb_new, | 961 | skb_new->protocol = eth_type_trans(skb_new, |
962 | bat_priv->soft_iface); | 962 | bat_priv->soft_iface); |
963 | bat_priv->stats.rx_packets++; | 963 | bat_priv->stats.rx_packets++; |
964 | bat_priv->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size; | 964 | bat_priv->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size; |
965 | bat_priv->soft_iface->last_rx = jiffies; | 965 | bat_priv->soft_iface->last_rx = jiffies; |
966 | 966 | ||
967 | netif_rx(skb_new); | 967 | netif_rx(skb_new); |
968 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n"); | 968 | batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n"); |
969 | ret = true; | 969 | ret = true; |
970 | } else { | 970 | } else { |
971 | /* Send the request to the DHT */ | 971 | /* Send the request to the DHT */ |
972 | ret = batadv_dat_send_data(bat_priv, skb, ip_dst, | 972 | ret = batadv_dat_send_data(bat_priv, skb, ip_dst, |
973 | BATADV_P_DAT_DHT_GET); | 973 | BATADV_P_DAT_DHT_GET); |
974 | } | 974 | } |
975 | out: | 975 | out: |
976 | if (dat_entry) | 976 | if (dat_entry) |
977 | batadv_dat_entry_free_ref(dat_entry); | 977 | batadv_dat_entry_free_ref(dat_entry); |
978 | return ret; | 978 | return ret; |
979 | } | 979 | } |
980 | 980 | ||
981 | /** | 981 | /** |
982 | * batadv_dat_snoop_incoming_arp_request - snoop the ARP request and try to | 982 | * batadv_dat_snoop_incoming_arp_request - snoop the ARP request and try to |
983 | * answer using the local DAT storage | 983 | * answer using the local DAT storage |
984 | * @bat_priv: the bat priv with all the soft interface information | 984 | * @bat_priv: the bat priv with all the soft interface information |
985 | * @skb: packet to check | 985 | * @skb: packet to check |
986 | * @hdr_size: size of the encapsulation header | 986 | * @hdr_size: size of the encapsulation header |
987 | * | 987 | * |
988 | * Returns true if the request has been answered, false otherwise. | 988 | * Returns true if the request has been answered, false otherwise. |
989 | */ | 989 | */ |
990 | bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv, | 990 | bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv, |
991 | struct sk_buff *skb, int hdr_size) | 991 | struct sk_buff *skb, int hdr_size) |
992 | { | 992 | { |
993 | uint16_t type; | 993 | uint16_t type; |
994 | __be32 ip_src, ip_dst; | 994 | __be32 ip_src, ip_dst; |
995 | uint8_t *hw_src; | 995 | uint8_t *hw_src; |
996 | struct sk_buff *skb_new; | 996 | struct sk_buff *skb_new; |
997 | struct batadv_dat_entry *dat_entry = NULL; | 997 | struct batadv_dat_entry *dat_entry = NULL; |
998 | bool ret = false; | 998 | bool ret = false; |
999 | unsigned short vid; | 999 | unsigned short vid; |
1000 | int err; | 1000 | int err; |
1001 | 1001 | ||
1002 | if (!atomic_read(&bat_priv->distributed_arp_table)) | 1002 | if (!atomic_read(&bat_priv->distributed_arp_table)) |
1003 | goto out; | 1003 | goto out; |
1004 | 1004 | ||
1005 | vid = batadv_dat_get_vid(skb, &hdr_size); | 1005 | vid = batadv_dat_get_vid(skb, &hdr_size); |
1006 | 1006 | ||
1007 | type = batadv_arp_get_type(bat_priv, skb, hdr_size); | 1007 | type = batadv_arp_get_type(bat_priv, skb, hdr_size); |
1008 | if (type != ARPOP_REQUEST) | 1008 | if (type != ARPOP_REQUEST) |
1009 | goto out; | 1009 | goto out; |
1010 | 1010 | ||
1011 | hw_src = batadv_arp_hw_src(skb, hdr_size); | 1011 | hw_src = batadv_arp_hw_src(skb, hdr_size); |
1012 | ip_src = batadv_arp_ip_src(skb, hdr_size); | 1012 | ip_src = batadv_arp_ip_src(skb, hdr_size); |
1013 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); | 1013 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); |
1014 | 1014 | ||
1015 | batadv_dbg_arp(bat_priv, skb, type, hdr_size, | 1015 | batadv_dbg_arp(bat_priv, skb, type, hdr_size, |
1016 | "Parsing incoming ARP REQUEST"); | 1016 | "Parsing incoming ARP REQUEST"); |
1017 | 1017 | ||
1018 | batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid); | 1018 | batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid); |
1019 | 1019 | ||
1020 | dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid); | 1020 | dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid); |
1021 | if (!dat_entry) | 1021 | if (!dat_entry) |
1022 | goto out; | 1022 | goto out; |
1023 | 1023 | ||
1024 | skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src, | 1024 | skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src, |
1025 | bat_priv->soft_iface, ip_dst, hw_src, | 1025 | bat_priv->soft_iface, ip_dst, hw_src, |
1026 | dat_entry->mac_addr, hw_src); | 1026 | dat_entry->mac_addr, hw_src); |
1027 | 1027 | ||
1028 | if (!skb_new) | 1028 | if (!skb_new) |
1029 | goto out; | 1029 | goto out; |
1030 | 1030 | ||
1031 | if (vid & BATADV_VLAN_HAS_TAG) | 1031 | if (vid & BATADV_VLAN_HAS_TAG) |
1032 | skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q), | 1032 | skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q), |
1033 | vid & VLAN_VID_MASK); | 1033 | vid & VLAN_VID_MASK); |
1034 | 1034 | ||
1035 | /* To preserve backwards compatibility, the node has choose the outgoing | 1035 | /* To preserve backwards compatibility, the node has choose the outgoing |
1036 | * format based on the incoming request packet type. The assumption is | 1036 | * format based on the incoming request packet type. The assumption is |
1037 | * that a node not using the 4addr packet format doesn't support it. | 1037 | * that a node not using the 4addr packet format doesn't support it. |
1038 | */ | 1038 | */ |
1039 | if (hdr_size == sizeof(struct batadv_unicast_4addr_packet)) | 1039 | if (hdr_size == sizeof(struct batadv_unicast_4addr_packet)) |
1040 | err = batadv_send_skb_unicast_4addr(bat_priv, skb_new, | 1040 | err = batadv_send_skb_via_tt_4addr(bat_priv, skb_new, |
1041 | BATADV_P_DAT_CACHE_REPLY, | 1041 | BATADV_P_DAT_CACHE_REPLY, |
1042 | vid); | 1042 | vid); |
1043 | else | 1043 | else |
1044 | err = batadv_send_skb_unicast(bat_priv, skb_new, vid); | 1044 | err = batadv_send_skb_via_tt(bat_priv, skb_new, vid); |
1045 | 1045 | ||
1046 | if (!err) { | 1046 | if (err != NET_XMIT_DROP) { |
1047 | batadv_inc_counter(bat_priv, BATADV_CNT_DAT_CACHED_REPLY_TX); | 1047 | batadv_inc_counter(bat_priv, BATADV_CNT_DAT_CACHED_REPLY_TX); |
1048 | ret = true; | 1048 | ret = true; |
1049 | } | 1049 | } |
1050 | out: | 1050 | out: |
1051 | if (dat_entry) | 1051 | if (dat_entry) |
1052 | batadv_dat_entry_free_ref(dat_entry); | 1052 | batadv_dat_entry_free_ref(dat_entry); |
1053 | if (ret) | 1053 | if (ret) |
1054 | kfree_skb(skb); | 1054 | kfree_skb(skb); |
1055 | return ret; | 1055 | return ret; |
1056 | } | 1056 | } |
1057 | 1057 | ||
1058 | /** | 1058 | /** |
1059 | * batadv_dat_snoop_outgoing_arp_reply - snoop the ARP reply and fill the DHT | 1059 | * batadv_dat_snoop_outgoing_arp_reply - snoop the ARP reply and fill the DHT |
1060 | * @bat_priv: the bat priv with all the soft interface information | 1060 | * @bat_priv: the bat priv with all the soft interface information |
1061 | * @skb: packet to check | 1061 | * @skb: packet to check |
1062 | */ | 1062 | */ |
1063 | void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv, | 1063 | void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv, |
1064 | struct sk_buff *skb) | 1064 | struct sk_buff *skb) |
1065 | { | 1065 | { |
1066 | uint16_t type; | 1066 | uint16_t type; |
1067 | __be32 ip_src, ip_dst; | 1067 | __be32 ip_src, ip_dst; |
1068 | uint8_t *hw_src, *hw_dst; | 1068 | uint8_t *hw_src, *hw_dst; |
1069 | int hdr_size = 0; | 1069 | int hdr_size = 0; |
1070 | unsigned short vid; | 1070 | unsigned short vid; |
1071 | 1071 | ||
1072 | if (!atomic_read(&bat_priv->distributed_arp_table)) | 1072 | if (!atomic_read(&bat_priv->distributed_arp_table)) |
1073 | return; | 1073 | return; |
1074 | 1074 | ||
1075 | vid = batadv_dat_get_vid(skb, &hdr_size); | 1075 | vid = batadv_dat_get_vid(skb, &hdr_size); |
1076 | 1076 | ||
1077 | type = batadv_arp_get_type(bat_priv, skb, hdr_size); | 1077 | type = batadv_arp_get_type(bat_priv, skb, hdr_size); |
1078 | if (type != ARPOP_REPLY) | 1078 | if (type != ARPOP_REPLY) |
1079 | return; | 1079 | return; |
1080 | 1080 | ||
1081 | batadv_dbg_arp(bat_priv, skb, type, hdr_size, | 1081 | batadv_dbg_arp(bat_priv, skb, type, hdr_size, |
1082 | "Parsing outgoing ARP REPLY"); | 1082 | "Parsing outgoing ARP REPLY"); |
1083 | 1083 | ||
1084 | hw_src = batadv_arp_hw_src(skb, hdr_size); | 1084 | hw_src = batadv_arp_hw_src(skb, hdr_size); |
1085 | ip_src = batadv_arp_ip_src(skb, hdr_size); | 1085 | ip_src = batadv_arp_ip_src(skb, hdr_size); |
1086 | hw_dst = batadv_arp_hw_dst(skb, hdr_size); | 1086 | hw_dst = batadv_arp_hw_dst(skb, hdr_size); |
1087 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); | 1087 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); |
1088 | 1088 | ||
1089 | batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid); | 1089 | batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid); |
1090 | batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid); | 1090 | batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid); |
1091 | 1091 | ||
1092 | /* Send the ARP reply to the candidates for both the IP addresses that | 1092 | /* Send the ARP reply to the candidates for both the IP addresses that |
1093 | * the node obtained from the ARP reply | 1093 | * the node obtained from the ARP reply |
1094 | */ | 1094 | */ |
1095 | batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT); | 1095 | batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT); |
1096 | batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT); | 1096 | batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT); |
1097 | } | 1097 | } |
1098 | /** | 1098 | /** |
1099 | * batadv_dat_snoop_incoming_arp_reply - snoop the ARP reply and fill the local | 1099 | * batadv_dat_snoop_incoming_arp_reply - snoop the ARP reply and fill the local |
1100 | * DAT storage only | 1100 | * DAT storage only |
1101 | * @bat_priv: the bat priv with all the soft interface information | 1101 | * @bat_priv: the bat priv with all the soft interface information |
1102 | * @skb: packet to check | 1102 | * @skb: packet to check |
1103 | * @hdr_size: size of the encapsulation header | 1103 | * @hdr_size: size of the encapsulation header |
1104 | */ | 1104 | */ |
1105 | bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, | 1105 | bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, |
1106 | struct sk_buff *skb, int hdr_size) | 1106 | struct sk_buff *skb, int hdr_size) |
1107 | { | 1107 | { |
1108 | uint16_t type; | 1108 | uint16_t type; |
1109 | __be32 ip_src, ip_dst; | 1109 | __be32 ip_src, ip_dst; |
1110 | uint8_t *hw_src, *hw_dst; | 1110 | uint8_t *hw_src, *hw_dst; |
1111 | bool ret = false; | 1111 | bool ret = false; |
1112 | unsigned short vid; | 1112 | unsigned short vid; |
1113 | 1113 | ||
1114 | if (!atomic_read(&bat_priv->distributed_arp_table)) | 1114 | if (!atomic_read(&bat_priv->distributed_arp_table)) |
1115 | goto out; | 1115 | goto out; |
1116 | 1116 | ||
1117 | vid = batadv_dat_get_vid(skb, &hdr_size); | 1117 | vid = batadv_dat_get_vid(skb, &hdr_size); |
1118 | 1118 | ||
1119 | type = batadv_arp_get_type(bat_priv, skb, hdr_size); | 1119 | type = batadv_arp_get_type(bat_priv, skb, hdr_size); |
1120 | if (type != ARPOP_REPLY) | 1120 | if (type != ARPOP_REPLY) |
1121 | goto out; | 1121 | goto out; |
1122 | 1122 | ||
1123 | batadv_dbg_arp(bat_priv, skb, type, hdr_size, | 1123 | batadv_dbg_arp(bat_priv, skb, type, hdr_size, |
1124 | "Parsing incoming ARP REPLY"); | 1124 | "Parsing incoming ARP REPLY"); |
1125 | 1125 | ||
1126 | hw_src = batadv_arp_hw_src(skb, hdr_size); | 1126 | hw_src = batadv_arp_hw_src(skb, hdr_size); |
1127 | ip_src = batadv_arp_ip_src(skb, hdr_size); | 1127 | ip_src = batadv_arp_ip_src(skb, hdr_size); |
1128 | hw_dst = batadv_arp_hw_dst(skb, hdr_size); | 1128 | hw_dst = batadv_arp_hw_dst(skb, hdr_size); |
1129 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); | 1129 | ip_dst = batadv_arp_ip_dst(skb, hdr_size); |
1130 | 1130 | ||
1131 | /* Update our internal cache with both the IP addresses the node got | 1131 | /* Update our internal cache with both the IP addresses the node got |
1132 | * within the ARP reply | 1132 | * within the ARP reply |
1133 | */ | 1133 | */ |
1134 | batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid); | 1134 | batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid); |
1135 | batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid); | 1135 | batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid); |
1136 | 1136 | ||
1137 | /* if this REPLY is directed to a client of mine, let's deliver the | 1137 | /* if this REPLY is directed to a client of mine, let's deliver the |
1138 | * packet to the interface | 1138 | * packet to the interface |
1139 | */ | 1139 | */ |
1140 | ret = !batadv_is_my_client(bat_priv, hw_dst, vid); | 1140 | ret = !batadv_is_my_client(bat_priv, hw_dst, vid); |
1141 | out: | 1141 | out: |
1142 | if (ret) | 1142 | if (ret) |
1143 | kfree_skb(skb); | 1143 | kfree_skb(skb); |
1144 | /* if ret == false -> packet has to be delivered to the interface */ | 1144 | /* if ret == false -> packet has to be delivered to the interface */ |
1145 | return ret; | 1145 | return ret; |
1146 | } | 1146 | } |
1147 | 1147 | ||
1148 | /** | 1148 | /** |
1149 | * batadv_dat_drop_broadcast_packet - check if an ARP request has to be dropped | 1149 | * batadv_dat_drop_broadcast_packet - check if an ARP request has to be dropped |
1150 | * (because the node has already obtained the reply via DAT) or not | 1150 | * (because the node has already obtained the reply via DAT) or not |
1151 | * @bat_priv: the bat priv with all the soft interface information | 1151 | * @bat_priv: the bat priv with all the soft interface information |
1152 | * @forw_packet: the broadcast packet | 1152 | * @forw_packet: the broadcast packet |
1153 | * | 1153 | * |
1154 | * Returns true if the node can drop the packet, false otherwise. | 1154 | * Returns true if the node can drop the packet, false otherwise. |
1155 | */ | 1155 | */ |
1156 | bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv, | 1156 | bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv, |
1157 | struct batadv_forw_packet *forw_packet) | 1157 | struct batadv_forw_packet *forw_packet) |
1158 | { | 1158 | { |
1159 | uint16_t type; | 1159 | uint16_t type; |
1160 | __be32 ip_dst; | 1160 | __be32 ip_dst; |
1161 | struct batadv_dat_entry *dat_entry = NULL; | 1161 | struct batadv_dat_entry *dat_entry = NULL; |
1162 | bool ret = false; | 1162 | bool ret = false; |
1163 | int hdr_size = sizeof(struct batadv_bcast_packet); | 1163 | int hdr_size = sizeof(struct batadv_bcast_packet); |
1164 | unsigned short vid; | 1164 | unsigned short vid; |
1165 | 1165 | ||
1166 | if (!atomic_read(&bat_priv->distributed_arp_table)) | 1166 | if (!atomic_read(&bat_priv->distributed_arp_table)) |
1167 | goto out; | 1167 | goto out; |
1168 | 1168 | ||
1169 | /* If this packet is an ARP_REQUEST and the node already has the | 1169 | /* If this packet is an ARP_REQUEST and the node already has the |
1170 | * information that it is going to ask, then the packet can be dropped | 1170 | * information that it is going to ask, then the packet can be dropped |
1171 | */ | 1171 | */ |
1172 | if (forw_packet->num_packets) | 1172 | if (forw_packet->num_packets) |
1173 | goto out; | 1173 | goto out; |
1174 | 1174 | ||
1175 | vid = batadv_dat_get_vid(forw_packet->skb, &hdr_size); | 1175 | vid = batadv_dat_get_vid(forw_packet->skb, &hdr_size); |
1176 | 1176 | ||
1177 | type = batadv_arp_get_type(bat_priv, forw_packet->skb, hdr_size); | 1177 | type = batadv_arp_get_type(bat_priv, forw_packet->skb, hdr_size); |
1178 | if (type != ARPOP_REQUEST) | 1178 | if (type != ARPOP_REQUEST) |
1179 | goto out; | 1179 | goto out; |
1180 | 1180 | ||
1181 | ip_dst = batadv_arp_ip_dst(forw_packet->skb, hdr_size); | 1181 | ip_dst = batadv_arp_ip_dst(forw_packet->skb, hdr_size); |
1182 | dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid); | 1182 | dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid); |
1183 | /* check if the node already got this entry */ | 1183 | /* check if the node already got this entry */ |
1184 | if (!dat_entry) { | 1184 | if (!dat_entry) { |
1185 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 1185 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
1186 | "ARP Request for %pI4: fallback\n", &ip_dst); | 1186 | "ARP Request for %pI4: fallback\n", &ip_dst); |
1187 | goto out; | 1187 | goto out; |
1188 | } | 1188 | } |
1189 | 1189 | ||
1190 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 1190 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
1191 | "ARP Request for %pI4: fallback prevented\n", &ip_dst); | 1191 | "ARP Request for %pI4: fallback prevented\n", &ip_dst); |
1192 | ret = true; | 1192 | ret = true; |
1193 | 1193 | ||
1194 | out: | 1194 | out: |
1195 | if (dat_entry) | 1195 | if (dat_entry) |
1196 | batadv_dat_entry_free_ref(dat_entry); | 1196 | batadv_dat_entry_free_ref(dat_entry); |
1197 | return ret; | 1197 | return ret; |
1198 | } | 1198 | } |
1199 | 1199 |
net/batman-adv/send.c
1 | /* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors: | 1 | /* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors: |
2 | * | 2 | * |
3 | * Marek Lindner, Simon Wunderlich | 3 | * Marek Lindner, Simon Wunderlich |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of version 2 of the GNU General Public | 6 | * modify it under the terms of version 2 of the GNU General Public |
7 | * License as published by the Free Software Foundation. | 7 | * License as published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, but | 9 | * This program is distributed in the hope that it will be useful, but |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | * General Public License for more details. | 12 | * General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | 16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
17 | * 02110-1301, USA | 17 | * 02110-1301, USA |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include "main.h" | 20 | #include "main.h" |
21 | #include "distributed-arp-table.h" | 21 | #include "distributed-arp-table.h" |
22 | #include "send.h" | 22 | #include "send.h" |
23 | #include "routing.h" | 23 | #include "routing.h" |
24 | #include "translation-table.h" | 24 | #include "translation-table.h" |
25 | #include "soft-interface.h" | 25 | #include "soft-interface.h" |
26 | #include "hard-interface.h" | 26 | #include "hard-interface.h" |
27 | #include "gateway_common.h" | 27 | #include "gateway_common.h" |
28 | #include "gateway_client.h" | 28 | #include "gateway_client.h" |
29 | #include "originator.h" | 29 | #include "originator.h" |
30 | #include "network-coding.h" | 30 | #include "network-coding.h" |
31 | #include "fragmentation.h" | 31 | #include "fragmentation.h" |
32 | 32 | ||
33 | static void batadv_send_outstanding_bcast_packet(struct work_struct *work); | 33 | static void batadv_send_outstanding_bcast_packet(struct work_struct *work); |
34 | 34 | ||
35 | /* send out an already prepared packet to the given address via the | 35 | /* send out an already prepared packet to the given address via the |
36 | * specified batman interface | 36 | * specified batman interface |
37 | */ | 37 | */ |
38 | int batadv_send_skb_packet(struct sk_buff *skb, | 38 | int batadv_send_skb_packet(struct sk_buff *skb, |
39 | struct batadv_hard_iface *hard_iface, | 39 | struct batadv_hard_iface *hard_iface, |
40 | const uint8_t *dst_addr) | 40 | const uint8_t *dst_addr) |
41 | { | 41 | { |
42 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 42 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
43 | struct ethhdr *ethhdr; | 43 | struct ethhdr *ethhdr; |
44 | 44 | ||
45 | if (hard_iface->if_status != BATADV_IF_ACTIVE) | 45 | if (hard_iface->if_status != BATADV_IF_ACTIVE) |
46 | goto send_skb_err; | 46 | goto send_skb_err; |
47 | 47 | ||
48 | if (unlikely(!hard_iface->net_dev)) | 48 | if (unlikely(!hard_iface->net_dev)) |
49 | goto send_skb_err; | 49 | goto send_skb_err; |
50 | 50 | ||
51 | if (!(hard_iface->net_dev->flags & IFF_UP)) { | 51 | if (!(hard_iface->net_dev->flags & IFF_UP)) { |
52 | pr_warn("Interface %s is not up - can't send packet via that interface!\n", | 52 | pr_warn("Interface %s is not up - can't send packet via that interface!\n", |
53 | hard_iface->net_dev->name); | 53 | hard_iface->net_dev->name); |
54 | goto send_skb_err; | 54 | goto send_skb_err; |
55 | } | 55 | } |
56 | 56 | ||
57 | /* push to the ethernet header. */ | 57 | /* push to the ethernet header. */ |
58 | if (batadv_skb_head_push(skb, ETH_HLEN) < 0) | 58 | if (batadv_skb_head_push(skb, ETH_HLEN) < 0) |
59 | goto send_skb_err; | 59 | goto send_skb_err; |
60 | 60 | ||
61 | skb_reset_mac_header(skb); | 61 | skb_reset_mac_header(skb); |
62 | 62 | ||
63 | ethhdr = eth_hdr(skb); | 63 | ethhdr = eth_hdr(skb); |
64 | memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); | 64 | memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); |
65 | memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); | 65 | memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); |
66 | ethhdr->h_proto = htons(ETH_P_BATMAN); | 66 | ethhdr->h_proto = htons(ETH_P_BATMAN); |
67 | 67 | ||
68 | skb_set_network_header(skb, ETH_HLEN); | 68 | skb_set_network_header(skb, ETH_HLEN); |
69 | skb->protocol = htons(ETH_P_BATMAN); | 69 | skb->protocol = htons(ETH_P_BATMAN); |
70 | 70 | ||
71 | skb->dev = hard_iface->net_dev; | 71 | skb->dev = hard_iface->net_dev; |
72 | 72 | ||
73 | /* Save a clone of the skb to use when decoding coded packets */ | 73 | /* Save a clone of the skb to use when decoding coded packets */ |
74 | batadv_nc_skb_store_for_decoding(bat_priv, skb); | 74 | batadv_nc_skb_store_for_decoding(bat_priv, skb); |
75 | 75 | ||
76 | /* dev_queue_xmit() returns a negative result on error. However on | 76 | /* dev_queue_xmit() returns a negative result on error. However on |
77 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP | 77 | * congestion and traffic shaping, it drops and returns NET_XMIT_DROP |
78 | * (which is > 0). This will not be treated as an error. | 78 | * (which is > 0). This will not be treated as an error. |
79 | */ | 79 | */ |
80 | return dev_queue_xmit(skb); | 80 | return dev_queue_xmit(skb); |
81 | send_skb_err: | 81 | send_skb_err: |
82 | kfree_skb(skb); | 82 | kfree_skb(skb); |
83 | return NET_XMIT_DROP; | 83 | return NET_XMIT_DROP; |
84 | } | 84 | } |
85 | 85 | ||
86 | /** | 86 | /** |
87 | * batadv_send_skb_to_orig - Lookup next-hop and transmit skb. | 87 | * batadv_send_skb_to_orig - Lookup next-hop and transmit skb. |
88 | * @skb: Packet to be transmitted. | 88 | * @skb: Packet to be transmitted. |
89 | * @orig_node: Final destination of the packet. | 89 | * @orig_node: Final destination of the packet. |
90 | * @recv_if: Interface used when receiving the packet (can be NULL). | 90 | * @recv_if: Interface used when receiving the packet (can be NULL). |
91 | * | 91 | * |
92 | * Looks up the best next-hop towards the passed originator and passes the | 92 | * Looks up the best next-hop towards the passed originator and passes the |
93 | * skb on for preparation of MAC header. If the packet originated from this | 93 | * skb on for preparation of MAC header. If the packet originated from this |
94 | * host, NULL can be passed as recv_if and no interface alternating is | 94 | * host, NULL can be passed as recv_if and no interface alternating is |
95 | * attempted. | 95 | * attempted. |
96 | * | 96 | * |
97 | * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or | 97 | * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or |
98 | * NET_XMIT_POLICED if the skb is buffered for later transmit. | 98 | * NET_XMIT_POLICED if the skb is buffered for later transmit. |
99 | */ | 99 | */ |
100 | int batadv_send_skb_to_orig(struct sk_buff *skb, | 100 | int batadv_send_skb_to_orig(struct sk_buff *skb, |
101 | struct batadv_orig_node *orig_node, | 101 | struct batadv_orig_node *orig_node, |
102 | struct batadv_hard_iface *recv_if) | 102 | struct batadv_hard_iface *recv_if) |
103 | { | 103 | { |
104 | struct batadv_priv *bat_priv = orig_node->bat_priv; | 104 | struct batadv_priv *bat_priv = orig_node->bat_priv; |
105 | struct batadv_neigh_node *neigh_node; | 105 | struct batadv_neigh_node *neigh_node; |
106 | int ret = NET_XMIT_DROP; | 106 | int ret = NET_XMIT_DROP; |
107 | 107 | ||
108 | /* batadv_find_router() increases neigh_nodes refcount if found. */ | 108 | /* batadv_find_router() increases neigh_nodes refcount if found. */ |
109 | neigh_node = batadv_find_router(bat_priv, orig_node, recv_if); | 109 | neigh_node = batadv_find_router(bat_priv, orig_node, recv_if); |
110 | if (!neigh_node) | 110 | if (!neigh_node) |
111 | goto out; | 111 | goto out; |
112 | 112 | ||
113 | /* Check if the skb is too large to send in one piece and fragment | 113 | /* Check if the skb is too large to send in one piece and fragment |
114 | * it if needed. | 114 | * it if needed. |
115 | */ | 115 | */ |
116 | if (atomic_read(&bat_priv->fragmentation) && | 116 | if (atomic_read(&bat_priv->fragmentation) && |
117 | skb->len > neigh_node->if_incoming->net_dev->mtu) { | 117 | skb->len > neigh_node->if_incoming->net_dev->mtu) { |
118 | /* Fragment and send packet. */ | 118 | /* Fragment and send packet. */ |
119 | if (batadv_frag_send_packet(skb, orig_node, neigh_node)) | 119 | if (batadv_frag_send_packet(skb, orig_node, neigh_node)) |
120 | ret = NET_XMIT_SUCCESS; | 120 | ret = NET_XMIT_SUCCESS; |
121 | 121 | ||
122 | goto out; | 122 | goto out; |
123 | } | 123 | } |
124 | 124 | ||
125 | /* try to network code the packet, if it is received on an interface | 125 | /* try to network code the packet, if it is received on an interface |
126 | * (i.e. being forwarded). If the packet originates from this node or if | 126 | * (i.e. being forwarded). If the packet originates from this node or if |
127 | * network coding fails, then send the packet as usual. | 127 | * network coding fails, then send the packet as usual. |
128 | */ | 128 | */ |
129 | if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) { | 129 | if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) { |
130 | ret = NET_XMIT_POLICED; | 130 | ret = NET_XMIT_POLICED; |
131 | } else { | 131 | } else { |
132 | batadv_send_skb_packet(skb, neigh_node->if_incoming, | 132 | batadv_send_skb_packet(skb, neigh_node->if_incoming, |
133 | neigh_node->addr); | 133 | neigh_node->addr); |
134 | ret = NET_XMIT_SUCCESS; | 134 | ret = NET_XMIT_SUCCESS; |
135 | } | 135 | } |
136 | 136 | ||
137 | out: | 137 | out: |
138 | if (neigh_node) | 138 | if (neigh_node) |
139 | batadv_neigh_node_free_ref(neigh_node); | 139 | batadv_neigh_node_free_ref(neigh_node); |
140 | 140 | ||
141 | return ret; | 141 | return ret; |
142 | } | 142 | } |
143 | 143 | ||
144 | /** | 144 | /** |
145 | * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the | 145 | * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the |
146 | * common fields for unicast packets | 146 | * common fields for unicast packets |
147 | * @skb: the skb carrying the unicast header to initialize | 147 | * @skb: the skb carrying the unicast header to initialize |
148 | * @hdr_size: amount of bytes to push at the beginning of the skb | 148 | * @hdr_size: amount of bytes to push at the beginning of the skb |
149 | * @orig_node: the destination node | 149 | * @orig_node: the destination node |
150 | * | 150 | * |
151 | * Returns false if the buffer extension was not possible or true otherwise. | 151 | * Returns false if the buffer extension was not possible or true otherwise. |
152 | */ | 152 | */ |
153 | static bool | 153 | static bool |
154 | batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size, | 154 | batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size, |
155 | struct batadv_orig_node *orig_node) | 155 | struct batadv_orig_node *orig_node) |
156 | { | 156 | { |
157 | struct batadv_unicast_packet *unicast_packet; | 157 | struct batadv_unicast_packet *unicast_packet; |
158 | uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); | 158 | uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); |
159 | 159 | ||
160 | if (batadv_skb_head_push(skb, hdr_size) < 0) | 160 | if (batadv_skb_head_push(skb, hdr_size) < 0) |
161 | return false; | 161 | return false; |
162 | 162 | ||
163 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 163 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
164 | unicast_packet->header.version = BATADV_COMPAT_VERSION; | 164 | unicast_packet->header.version = BATADV_COMPAT_VERSION; |
165 | /* batman packet type: unicast */ | 165 | /* batman packet type: unicast */ |
166 | unicast_packet->header.packet_type = BATADV_UNICAST; | 166 | unicast_packet->header.packet_type = BATADV_UNICAST; |
167 | /* set unicast ttl */ | 167 | /* set unicast ttl */ |
168 | unicast_packet->header.ttl = BATADV_TTL; | 168 | unicast_packet->header.ttl = BATADV_TTL; |
169 | /* copy the destination for faster routing */ | 169 | /* copy the destination for faster routing */ |
170 | memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); | 170 | memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); |
171 | /* set the destination tt version number */ | 171 | /* set the destination tt version number */ |
172 | unicast_packet->ttvn = ttvn; | 172 | unicast_packet->ttvn = ttvn; |
173 | 173 | ||
174 | return true; | 174 | return true; |
175 | } | 175 | } |
176 | 176 | ||
177 | /** | 177 | /** |
178 | * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header | 178 | * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header |
179 | * @skb: the skb containing the payload to encapsulate | 179 | * @skb: the skb containing the payload to encapsulate |
180 | * @orig_node: the destination node | 180 | * @orig_node: the destination node |
181 | * | 181 | * |
182 | * Returns false if the payload could not be encapsulated or true otherwise. | 182 | * Returns false if the payload could not be encapsulated or true otherwise. |
183 | */ | 183 | */ |
184 | static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb, | 184 | static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb, |
185 | struct batadv_orig_node *orig_node) | 185 | struct batadv_orig_node *orig_node) |
186 | { | 186 | { |
187 | size_t uni_size = sizeof(struct batadv_unicast_packet); | 187 | size_t uni_size = sizeof(struct batadv_unicast_packet); |
188 | 188 | ||
189 | return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node); | 189 | return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node); |
190 | } | 190 | } |
191 | 191 | ||
192 | /** | 192 | /** |
193 | * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a | 193 | * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a |
194 | * unicast 4addr header | 194 | * unicast 4addr header |
195 | * @bat_priv: the bat priv with all the soft interface information | 195 | * @bat_priv: the bat priv with all the soft interface information |
196 | * @skb: the skb containing the payload to encapsulate | 196 | * @skb: the skb containing the payload to encapsulate |
197 | * @orig_node: the destination node | 197 | * @orig_node: the destination node |
198 | * @packet_subtype: the unicast 4addr packet subtype to use | 198 | * @packet_subtype: the unicast 4addr packet subtype to use |
199 | * | 199 | * |
200 | * Returns false if the payload could not be encapsulated or true otherwise. | 200 | * Returns false if the payload could not be encapsulated or true otherwise. |
201 | */ | 201 | */ |
202 | bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv, | 202 | bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv, |
203 | struct sk_buff *skb, | 203 | struct sk_buff *skb, |
204 | struct batadv_orig_node *orig, | 204 | struct batadv_orig_node *orig, |
205 | int packet_subtype) | 205 | int packet_subtype) |
206 | { | 206 | { |
207 | struct batadv_hard_iface *primary_if; | 207 | struct batadv_hard_iface *primary_if; |
208 | struct batadv_unicast_4addr_packet *uc_4addr_packet; | 208 | struct batadv_unicast_4addr_packet *uc_4addr_packet; |
209 | bool ret = false; | 209 | bool ret = false; |
210 | 210 | ||
211 | primary_if = batadv_primary_if_get_selected(bat_priv); | 211 | primary_if = batadv_primary_if_get_selected(bat_priv); |
212 | if (!primary_if) | 212 | if (!primary_if) |
213 | goto out; | 213 | goto out; |
214 | 214 | ||
215 | /* Pull the header space and fill the unicast_packet substructure. | 215 | /* Pull the header space and fill the unicast_packet substructure. |
216 | * We can do that because the first member of the uc_4addr_packet | 216 | * We can do that because the first member of the uc_4addr_packet |
217 | * is of type struct unicast_packet | 217 | * is of type struct unicast_packet |
218 | */ | 218 | */ |
219 | if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet), | 219 | if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet), |
220 | orig)) | 220 | orig)) |
221 | goto out; | 221 | goto out; |
222 | 222 | ||
223 | uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; | 223 | uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data; |
224 | uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR; | 224 | uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR; |
225 | memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); | 225 | memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN); |
226 | uc_4addr_packet->subtype = packet_subtype; | 226 | uc_4addr_packet->subtype = packet_subtype; |
227 | uc_4addr_packet->reserved = 0; | 227 | uc_4addr_packet->reserved = 0; |
228 | 228 | ||
229 | ret = true; | 229 | ret = true; |
230 | out: | 230 | out: |
231 | if (primary_if) | 231 | if (primary_if) |
232 | batadv_hardif_free_ref(primary_if); | 232 | batadv_hardif_free_ref(primary_if); |
233 | return ret; | 233 | return ret; |
234 | } | 234 | } |
235 | 235 | ||
236 | /** | 236 | /** |
237 | * batadv_send_generic_unicast_skb - send an skb as unicast | 237 | * batadv_send_skb_unicast - encapsulate and send an skb via unicast |
238 | * @bat_priv: the bat priv with all the soft interface information | 238 | * @bat_priv: the bat priv with all the soft interface information |
239 | * @skb: payload to send | 239 | * @skb: payload to send |
240 | * @packet_type: the batman unicast packet type to use | 240 | * @packet_type: the batman unicast packet type to use |
241 | * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast | 241 | * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast |
242 | * 4addr packets) | 242 | * 4addr packets) |
243 | * @orig_node: the originator to send the packet to | ||
243 | * @vid: the vid to be used to search the translation table | 244 | * @vid: the vid to be used to search the translation table |
244 | * | 245 | * |
245 | * Returns 1 in case of error or 0 otherwise. | 246 | * Wrap the given skb into a batman-adv unicast or unicast-4addr header |
247 | * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied | ||
248 | * as packet_type. Then send this frame to the given orig_node and release a | ||
249 | * reference to this orig_node. | ||
250 | * | ||
251 | * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. | ||
246 | */ | 252 | */ |
247 | int batadv_send_skb_generic_unicast(struct batadv_priv *bat_priv, | 253 | static int batadv_send_skb_unicast(struct batadv_priv *bat_priv, |
248 | struct sk_buff *skb, int packet_type, | 254 | struct sk_buff *skb, int packet_type, |
249 | int packet_subtype, | 255 | int packet_subtype, |
250 | unsigned short vid) | 256 | struct batadv_orig_node *orig_node, |
257 | unsigned short vid) | ||
251 | { | 258 | { |
252 | struct ethhdr *ethhdr = (struct ethhdr *)skb->data; | 259 | struct ethhdr *ethhdr = (struct ethhdr *)skb->data; |
253 | struct batadv_unicast_packet *unicast_packet; | 260 | struct batadv_unicast_packet *unicast_packet; |
254 | struct batadv_orig_node *orig_node; | 261 | int ret = NET_XMIT_DROP; |
255 | int ret = NET_RX_DROP; | ||
256 | 262 | ||
257 | /* get routing information */ | ||
258 | if (is_multicast_ether_addr(ethhdr->h_dest)) | ||
259 | orig_node = batadv_gw_get_selected_orig(bat_priv); | ||
260 | else | ||
261 | /* check for tt host - increases orig_node refcount. | ||
262 | * returns NULL in case of AP isolation | ||
263 | */ | ||
264 | orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source, | ||
265 | ethhdr->h_dest, vid); | ||
266 | |||
267 | if (!orig_node) | 263 | if (!orig_node) |
268 | goto out; | 264 | goto out; |
269 | 265 | ||
270 | switch (packet_type) { | 266 | switch (packet_type) { |
271 | case BATADV_UNICAST: | 267 | case BATADV_UNICAST: |
272 | if (!batadv_send_skb_prepare_unicast(skb, orig_node)) | 268 | if (!batadv_send_skb_prepare_unicast(skb, orig_node)) |
273 | goto out; | 269 | goto out; |
274 | break; | 270 | break; |
275 | case BATADV_UNICAST_4ADDR: | 271 | case BATADV_UNICAST_4ADDR: |
276 | if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, | 272 | if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb, |
277 | orig_node, | 273 | orig_node, |
278 | packet_subtype)) | 274 | packet_subtype)) |
279 | goto out; | 275 | goto out; |
280 | break; | 276 | break; |
281 | default: | 277 | default: |
282 | /* this function supports UNICAST and UNICAST_4ADDR only. It | 278 | /* this function supports UNICAST and UNICAST_4ADDR only. It |
283 | * should never be invoked with any other packet type | 279 | * should never be invoked with any other packet type |
284 | */ | 280 | */ |
285 | goto out; | 281 | goto out; |
286 | } | 282 | } |
287 | 283 | ||
288 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 284 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
289 | 285 | ||
290 | /* inform the destination node that we are still missing a correct route | 286 | /* inform the destination node that we are still missing a correct route |
291 | * for this client. The destination will receive this packet and will | 287 | * for this client. The destination will receive this packet and will |
292 | * try to reroute it because the ttvn contained in the header is less | 288 | * try to reroute it because the ttvn contained in the header is less |
293 | * than the current one | 289 | * than the current one |
294 | */ | 290 | */ |
295 | if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) | 291 | if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) |
296 | unicast_packet->ttvn = unicast_packet->ttvn - 1; | 292 | unicast_packet->ttvn = unicast_packet->ttvn - 1; |
297 | 293 | ||
298 | if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) | 294 | if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) |
299 | ret = 0; | 295 | ret = NET_XMIT_SUCCESS; |
300 | 296 | ||
301 | out: | 297 | out: |
302 | if (orig_node) | 298 | if (orig_node) |
303 | batadv_orig_node_free_ref(orig_node); | 299 | batadv_orig_node_free_ref(orig_node); |
304 | if (ret == NET_RX_DROP) | 300 | if (ret == NET_XMIT_DROP) |
305 | kfree_skb(skb); | 301 | kfree_skb(skb); |
306 | return ret; | 302 | return ret; |
303 | } | ||
304 | |||
305 | /** | ||
306 | * batadv_send_skb_via_tt_generic - send an skb via TT lookup | ||
307 | * @bat_priv: the bat priv with all the soft interface information | ||
308 | * @skb: payload to send | ||
309 | * @packet_type: the batman unicast packet type to use | ||
310 | * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast | ||
311 | * 4addr packets) | ||
312 | * @vid: the vid to be used to search the translation table | ||
313 | * | ||
314 | * Look up the recipient node for the destination address in the ethernet | ||
315 | * header via the translation table. Wrap the given skb into a batman-adv | ||
316 | * unicast or unicast-4addr header depending on whether BATADV_UNICAST or | ||
317 | * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame | ||
318 | * to the according destination node. | ||
319 | * | ||
320 | * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. | ||
321 | */ | ||
322 | int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv, | ||
323 | struct sk_buff *skb, int packet_type, | ||
324 | int packet_subtype, unsigned short vid) | ||
325 | { | ||
326 | struct ethhdr *ethhdr = (struct ethhdr *)skb->data; | ||
327 | struct batadv_orig_node *orig_node; | ||
328 | |||
329 | orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source, | ||
330 | ethhdr->h_dest, vid); | ||
331 | return batadv_send_skb_unicast(bat_priv, skb, packet_type, | ||
332 | packet_subtype, orig_node, vid); | ||
333 | } | ||
334 | |||
335 | /** | ||
336 | * batadv_send_skb_via_gw - send an skb via gateway lookup | ||
337 | * @bat_priv: the bat priv with all the soft interface information | ||
338 | * @skb: payload to send | ||
339 | * @vid: the vid to be used to search the translation table | ||
340 | * | ||
341 | * Look up the currently selected gateway. Wrap the given skb into a batman-adv | ||
342 | * unicast header and send this frame to this gateway node. | ||
343 | * | ||
344 | * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. | ||
345 | */ | ||
346 | int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb, | ||
347 | unsigned short vid) | ||
348 | { | ||
349 | struct batadv_orig_node *orig_node; | ||
350 | |||
351 | orig_node = batadv_gw_get_selected_orig(bat_priv); | ||
352 | return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, | ||
353 | orig_node, vid); | ||
307 | } | 354 | } |
308 | 355 | ||
309 | void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface) | 356 | void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface) |
310 | { | 357 | { |
311 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 358 | struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); |
312 | 359 | ||
313 | if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) || | 360 | if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) || |
314 | (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)) | 361 | (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)) |
315 | return; | 362 | return; |
316 | 363 | ||
317 | /* the interface gets activated here to avoid race conditions between | 364 | /* the interface gets activated here to avoid race conditions between |
318 | * the moment of activating the interface in | 365 | * the moment of activating the interface in |
319 | * hardif_activate_interface() where the originator mac is set and | 366 | * hardif_activate_interface() where the originator mac is set and |
320 | * outdated packets (especially uninitialized mac addresses) in the | 367 | * outdated packets (especially uninitialized mac addresses) in the |
321 | * packet queue | 368 | * packet queue |
322 | */ | 369 | */ |
323 | if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED) | 370 | if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED) |
324 | hard_iface->if_status = BATADV_IF_ACTIVE; | 371 | hard_iface->if_status = BATADV_IF_ACTIVE; |
325 | 372 | ||
326 | bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface); | 373 | bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface); |
327 | } | 374 | } |
328 | 375 | ||
329 | static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet) | 376 | static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet) |
330 | { | 377 | { |
331 | if (forw_packet->skb) | 378 | if (forw_packet->skb) |
332 | kfree_skb(forw_packet->skb); | 379 | kfree_skb(forw_packet->skb); |
333 | if (forw_packet->if_incoming) | 380 | if (forw_packet->if_incoming) |
334 | batadv_hardif_free_ref(forw_packet->if_incoming); | 381 | batadv_hardif_free_ref(forw_packet->if_incoming); |
335 | kfree(forw_packet); | 382 | kfree(forw_packet); |
336 | } | 383 | } |
337 | 384 | ||
338 | static void | 385 | static void |
339 | _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, | 386 | _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, |
340 | struct batadv_forw_packet *forw_packet, | 387 | struct batadv_forw_packet *forw_packet, |
341 | unsigned long send_time) | 388 | unsigned long send_time) |
342 | { | 389 | { |
343 | /* add new packet to packet list */ | 390 | /* add new packet to packet list */ |
344 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); | 391 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); |
345 | hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list); | 392 | hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list); |
346 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | 393 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
347 | 394 | ||
348 | /* start timer for this packet */ | 395 | /* start timer for this packet */ |
349 | queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work, | 396 | queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work, |
350 | send_time); | 397 | send_time); |
351 | } | 398 | } |
352 | 399 | ||
353 | /* add a broadcast packet to the queue and setup timers. broadcast packets | 400 | /* add a broadcast packet to the queue and setup timers. broadcast packets |
354 | * are sent multiple times to increase probability for being received. | 401 | * are sent multiple times to increase probability for being received. |
355 | * | 402 | * |
356 | * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on | 403 | * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on |
357 | * errors. | 404 | * errors. |
358 | * | 405 | * |
359 | * The skb is not consumed, so the caller should make sure that the | 406 | * The skb is not consumed, so the caller should make sure that the |
360 | * skb is freed. | 407 | * skb is freed. |
361 | */ | 408 | */ |
362 | int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, | 409 | int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, |
363 | const struct sk_buff *skb, | 410 | const struct sk_buff *skb, |
364 | unsigned long delay) | 411 | unsigned long delay) |
365 | { | 412 | { |
366 | struct batadv_hard_iface *primary_if = NULL; | 413 | struct batadv_hard_iface *primary_if = NULL; |
367 | struct batadv_forw_packet *forw_packet; | 414 | struct batadv_forw_packet *forw_packet; |
368 | struct batadv_bcast_packet *bcast_packet; | 415 | struct batadv_bcast_packet *bcast_packet; |
369 | struct sk_buff *newskb; | 416 | struct sk_buff *newskb; |
370 | 417 | ||
371 | if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { | 418 | if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { |
372 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 419 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
373 | "bcast packet queue full\n"); | 420 | "bcast packet queue full\n"); |
374 | goto out; | 421 | goto out; |
375 | } | 422 | } |
376 | 423 | ||
377 | primary_if = batadv_primary_if_get_selected(bat_priv); | 424 | primary_if = batadv_primary_if_get_selected(bat_priv); |
378 | if (!primary_if) | 425 | if (!primary_if) |
379 | goto out_and_inc; | 426 | goto out_and_inc; |
380 | 427 | ||
381 | forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC); | 428 | forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC); |
382 | 429 | ||
383 | if (!forw_packet) | 430 | if (!forw_packet) |
384 | goto out_and_inc; | 431 | goto out_and_inc; |
385 | 432 | ||
386 | newskb = skb_copy(skb, GFP_ATOMIC); | 433 | newskb = skb_copy(skb, GFP_ATOMIC); |
387 | if (!newskb) | 434 | if (!newskb) |
388 | goto packet_free; | 435 | goto packet_free; |
389 | 436 | ||
390 | /* as we have a copy now, it is safe to decrease the TTL */ | 437 | /* as we have a copy now, it is safe to decrease the TTL */ |
391 | bcast_packet = (struct batadv_bcast_packet *)newskb->data; | 438 | bcast_packet = (struct batadv_bcast_packet *)newskb->data; |
392 | bcast_packet->header.ttl--; | 439 | bcast_packet->header.ttl--; |
393 | 440 | ||
394 | skb_reset_mac_header(newskb); | 441 | skb_reset_mac_header(newskb); |
395 | 442 | ||
396 | forw_packet->skb = newskb; | 443 | forw_packet->skb = newskb; |
397 | forw_packet->if_incoming = primary_if; | 444 | forw_packet->if_incoming = primary_if; |
398 | 445 | ||
399 | /* how often did we send the bcast packet ? */ | 446 | /* how often did we send the bcast packet ? */ |
400 | forw_packet->num_packets = 0; | 447 | forw_packet->num_packets = 0; |
401 | 448 | ||
402 | INIT_DELAYED_WORK(&forw_packet->delayed_work, | 449 | INIT_DELAYED_WORK(&forw_packet->delayed_work, |
403 | batadv_send_outstanding_bcast_packet); | 450 | batadv_send_outstanding_bcast_packet); |
404 | 451 | ||
405 | _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay); | 452 | _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay); |
406 | return NETDEV_TX_OK; | 453 | return NETDEV_TX_OK; |
407 | 454 | ||
408 | packet_free: | 455 | packet_free: |
409 | kfree(forw_packet); | 456 | kfree(forw_packet); |
410 | out_and_inc: | 457 | out_and_inc: |
411 | atomic_inc(&bat_priv->bcast_queue_left); | 458 | atomic_inc(&bat_priv->bcast_queue_left); |
412 | out: | 459 | out: |
413 | if (primary_if) | 460 | if (primary_if) |
414 | batadv_hardif_free_ref(primary_if); | 461 | batadv_hardif_free_ref(primary_if); |
415 | return NETDEV_TX_BUSY; | 462 | return NETDEV_TX_BUSY; |
416 | } | 463 | } |
417 | 464 | ||
418 | static void batadv_send_outstanding_bcast_packet(struct work_struct *work) | 465 | static void batadv_send_outstanding_bcast_packet(struct work_struct *work) |
419 | { | 466 | { |
420 | struct batadv_hard_iface *hard_iface; | 467 | struct batadv_hard_iface *hard_iface; |
421 | struct delayed_work *delayed_work; | 468 | struct delayed_work *delayed_work; |
422 | struct batadv_forw_packet *forw_packet; | 469 | struct batadv_forw_packet *forw_packet; |
423 | struct sk_buff *skb1; | 470 | struct sk_buff *skb1; |
424 | struct net_device *soft_iface; | 471 | struct net_device *soft_iface; |
425 | struct batadv_priv *bat_priv; | 472 | struct batadv_priv *bat_priv; |
426 | 473 | ||
427 | delayed_work = container_of(work, struct delayed_work, work); | 474 | delayed_work = container_of(work, struct delayed_work, work); |
428 | forw_packet = container_of(delayed_work, struct batadv_forw_packet, | 475 | forw_packet = container_of(delayed_work, struct batadv_forw_packet, |
429 | delayed_work); | 476 | delayed_work); |
430 | soft_iface = forw_packet->if_incoming->soft_iface; | 477 | soft_iface = forw_packet->if_incoming->soft_iface; |
431 | bat_priv = netdev_priv(soft_iface); | 478 | bat_priv = netdev_priv(soft_iface); |
432 | 479 | ||
433 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); | 480 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); |
434 | hlist_del(&forw_packet->list); | 481 | hlist_del(&forw_packet->list); |
435 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | 482 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
436 | 483 | ||
437 | if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) | 484 | if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) |
438 | goto out; | 485 | goto out; |
439 | 486 | ||
440 | if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) | 487 | if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) |
441 | goto out; | 488 | goto out; |
442 | 489 | ||
443 | /* rebroadcast packet */ | 490 | /* rebroadcast packet */ |
444 | rcu_read_lock(); | 491 | rcu_read_lock(); |
445 | list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { | 492 | list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { |
446 | if (hard_iface->soft_iface != soft_iface) | 493 | if (hard_iface->soft_iface != soft_iface) |
447 | continue; | 494 | continue; |
448 | 495 | ||
449 | if (forw_packet->num_packets >= hard_iface->num_bcasts) | 496 | if (forw_packet->num_packets >= hard_iface->num_bcasts) |
450 | continue; | 497 | continue; |
451 | 498 | ||
452 | /* send a copy of the saved skb */ | 499 | /* send a copy of the saved skb */ |
453 | skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); | 500 | skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); |
454 | if (skb1) | 501 | if (skb1) |
455 | batadv_send_skb_packet(skb1, hard_iface, | 502 | batadv_send_skb_packet(skb1, hard_iface, |
456 | batadv_broadcast_addr); | 503 | batadv_broadcast_addr); |
457 | } | 504 | } |
458 | rcu_read_unlock(); | 505 | rcu_read_unlock(); |
459 | 506 | ||
460 | forw_packet->num_packets++; | 507 | forw_packet->num_packets++; |
461 | 508 | ||
462 | /* if we still have some more bcasts to send */ | 509 | /* if we still have some more bcasts to send */ |
463 | if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) { | 510 | if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) { |
464 | _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, | 511 | _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, |
465 | msecs_to_jiffies(5)); | 512 | msecs_to_jiffies(5)); |
466 | return; | 513 | return; |
467 | } | 514 | } |
468 | 515 | ||
469 | out: | 516 | out: |
470 | batadv_forw_packet_free(forw_packet); | 517 | batadv_forw_packet_free(forw_packet); |
471 | atomic_inc(&bat_priv->bcast_queue_left); | 518 | atomic_inc(&bat_priv->bcast_queue_left); |
472 | } | 519 | } |
473 | 520 | ||
474 | void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work) | 521 | void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work) |
475 | { | 522 | { |
476 | struct delayed_work *delayed_work; | 523 | struct delayed_work *delayed_work; |
477 | struct batadv_forw_packet *forw_packet; | 524 | struct batadv_forw_packet *forw_packet; |
478 | struct batadv_priv *bat_priv; | 525 | struct batadv_priv *bat_priv; |
479 | 526 | ||
480 | delayed_work = container_of(work, struct delayed_work, work); | 527 | delayed_work = container_of(work, struct delayed_work, work); |
481 | forw_packet = container_of(delayed_work, struct batadv_forw_packet, | 528 | forw_packet = container_of(delayed_work, struct batadv_forw_packet, |
482 | delayed_work); | 529 | delayed_work); |
483 | bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); | 530 | bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); |
484 | spin_lock_bh(&bat_priv->forw_bat_list_lock); | 531 | spin_lock_bh(&bat_priv->forw_bat_list_lock); |
485 | hlist_del(&forw_packet->list); | 532 | hlist_del(&forw_packet->list); |
486 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); | 533 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); |
487 | 534 | ||
488 | if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) | 535 | if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) |
489 | goto out; | 536 | goto out; |
490 | 537 | ||
491 | bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet); | 538 | bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet); |
492 | 539 | ||
493 | /* we have to have at least one packet in the queue | 540 | /* we have to have at least one packet in the queue |
494 | * to determine the queues wake up time unless we are | 541 | * to determine the queues wake up time unless we are |
495 | * shutting down | 542 | * shutting down |
496 | */ | 543 | */ |
497 | if (forw_packet->own) | 544 | if (forw_packet->own) |
498 | batadv_schedule_bat_ogm(forw_packet->if_incoming); | 545 | batadv_schedule_bat_ogm(forw_packet->if_incoming); |
499 | 546 | ||
500 | out: | 547 | out: |
501 | /* don't count own packet */ | 548 | /* don't count own packet */ |
502 | if (!forw_packet->own) | 549 | if (!forw_packet->own) |
503 | atomic_inc(&bat_priv->batman_queue_left); | 550 | atomic_inc(&bat_priv->batman_queue_left); |
504 | 551 | ||
505 | batadv_forw_packet_free(forw_packet); | 552 | batadv_forw_packet_free(forw_packet); |
506 | } | 553 | } |
507 | 554 | ||
508 | void | 555 | void |
509 | batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, | 556 | batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, |
510 | const struct batadv_hard_iface *hard_iface) | 557 | const struct batadv_hard_iface *hard_iface) |
511 | { | 558 | { |
512 | struct batadv_forw_packet *forw_packet; | 559 | struct batadv_forw_packet *forw_packet; |
513 | struct hlist_node *safe_tmp_node; | 560 | struct hlist_node *safe_tmp_node; |
514 | bool pending; | 561 | bool pending; |
515 | 562 | ||
516 | if (hard_iface) | 563 | if (hard_iface) |
517 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 564 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
518 | "purge_outstanding_packets(): %s\n", | 565 | "purge_outstanding_packets(): %s\n", |
519 | hard_iface->net_dev->name); | 566 | hard_iface->net_dev->name); |
520 | else | 567 | else |
521 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 568 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
522 | "purge_outstanding_packets()\n"); | 569 | "purge_outstanding_packets()\n"); |
523 | 570 | ||
524 | /* free bcast list */ | 571 | /* free bcast list */ |
525 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); | 572 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); |
526 | hlist_for_each_entry_safe(forw_packet, safe_tmp_node, | 573 | hlist_for_each_entry_safe(forw_packet, safe_tmp_node, |
527 | &bat_priv->forw_bcast_list, list) { | 574 | &bat_priv->forw_bcast_list, list) { |
528 | /* if purge_outstanding_packets() was called with an argument | 575 | /* if purge_outstanding_packets() was called with an argument |
529 | * we delete only packets belonging to the given interface | 576 | * we delete only packets belonging to the given interface |
530 | */ | 577 | */ |
531 | if ((hard_iface) && | 578 | if ((hard_iface) && |
532 | (forw_packet->if_incoming != hard_iface)) | 579 | (forw_packet->if_incoming != hard_iface)) |
533 | continue; | 580 | continue; |
534 | 581 | ||
535 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | 582 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
536 | 583 | ||
537 | /* batadv_send_outstanding_bcast_packet() will lock the list to | 584 | /* batadv_send_outstanding_bcast_packet() will lock the list to |
538 | * delete the item from the list | 585 | * delete the item from the list |
539 | */ | 586 | */ |
540 | pending = cancel_delayed_work_sync(&forw_packet->delayed_work); | 587 | pending = cancel_delayed_work_sync(&forw_packet->delayed_work); |
541 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); | 588 | spin_lock_bh(&bat_priv->forw_bcast_list_lock); |
542 | 589 | ||
543 | if (pending) { | 590 | if (pending) { |
544 | hlist_del(&forw_packet->list); | 591 | hlist_del(&forw_packet->list); |
545 | batadv_forw_packet_free(forw_packet); | 592 | batadv_forw_packet_free(forw_packet); |
546 | } | 593 | } |
547 | } | 594 | } |
548 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | 595 | spin_unlock_bh(&bat_priv->forw_bcast_list_lock); |
549 | 596 | ||
550 | /* free batman packet list */ | 597 | /* free batman packet list */ |
551 | spin_lock_bh(&bat_priv->forw_bat_list_lock); | 598 | spin_lock_bh(&bat_priv->forw_bat_list_lock); |
552 | hlist_for_each_entry_safe(forw_packet, safe_tmp_node, | 599 | hlist_for_each_entry_safe(forw_packet, safe_tmp_node, |
553 | &bat_priv->forw_bat_list, list) { | 600 | &bat_priv->forw_bat_list, list) { |
554 | /* if purge_outstanding_packets() was called with an argument | 601 | /* if purge_outstanding_packets() was called with an argument |
555 | * we delete only packets belonging to the given interface | 602 | * we delete only packets belonging to the given interface |
556 | */ | 603 | */ |
557 | if ((hard_iface) && | 604 | if ((hard_iface) && |
558 | (forw_packet->if_incoming != hard_iface)) | 605 | (forw_packet->if_incoming != hard_iface)) |
559 | continue; | 606 | continue; |
560 | 607 | ||
561 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); | 608 | spin_unlock_bh(&bat_priv->forw_bat_list_lock); |
562 | 609 | ||
563 | /* send_outstanding_bat_packet() will lock the list to | 610 | /* send_outstanding_bat_packet() will lock the list to |
564 | * delete the item from the list | 611 | * delete the item from the list |
565 | */ | 612 | */ |
net/batman-adv/send.h
1 | /* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors: | 1 | /* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors: |
2 | * | 2 | * |
3 | * Marek Lindner, Simon Wunderlich | 3 | * Marek Lindner, Simon Wunderlich |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of version 2 of the GNU General Public | 6 | * modify it under the terms of version 2 of the GNU General Public |
7 | * License as published by the Free Software Foundation. | 7 | * License as published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, but | 9 | * This program is distributed in the hope that it will be useful, but |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | * General Public License for more details. | 12 | * General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | 16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
17 | * 02110-1301, USA | 17 | * 02110-1301, USA |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #ifndef _NET_BATMAN_ADV_SEND_H_ | 20 | #ifndef _NET_BATMAN_ADV_SEND_H_ |
21 | #define _NET_BATMAN_ADV_SEND_H_ | 21 | #define _NET_BATMAN_ADV_SEND_H_ |
22 | 22 | ||
23 | int batadv_send_skb_packet(struct sk_buff *skb, | 23 | int batadv_send_skb_packet(struct sk_buff *skb, |
24 | struct batadv_hard_iface *hard_iface, | 24 | struct batadv_hard_iface *hard_iface, |
25 | const uint8_t *dst_addr); | 25 | const uint8_t *dst_addr); |
26 | int batadv_send_skb_to_orig(struct sk_buff *skb, | 26 | int batadv_send_skb_to_orig(struct sk_buff *skb, |
27 | struct batadv_orig_node *orig_node, | 27 | struct batadv_orig_node *orig_node, |
28 | struct batadv_hard_iface *recv_if); | 28 | struct batadv_hard_iface *recv_if); |
29 | void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface); | 29 | void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface); |
30 | int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, | 30 | int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv, |
31 | const struct sk_buff *skb, | 31 | const struct sk_buff *skb, |
32 | unsigned long delay); | 32 | unsigned long delay); |
33 | void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work); | 33 | void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work); |
34 | void | 34 | void |
35 | batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, | 35 | batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, |
36 | const struct batadv_hard_iface *hard_iface); | 36 | const struct batadv_hard_iface *hard_iface); |
37 | bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv, | 37 | bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv, |
38 | struct sk_buff *skb, | 38 | struct sk_buff *skb, |
39 | struct batadv_orig_node *orig_node, | 39 | struct batadv_orig_node *orig_node, |
40 | int packet_subtype); | 40 | int packet_subtype); |
41 | int batadv_send_skb_generic_unicast(struct batadv_priv *bat_priv, | 41 | int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv, |
42 | struct sk_buff *skb, int packet_type, | 42 | struct sk_buff *skb, int packet_type, |
43 | int packet_subtype, | 43 | int packet_subtype, unsigned short vid); |
44 | unsigned short vid); | 44 | int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb, |
45 | unsigned short vid); | ||
45 | 46 | ||
46 | /** | 47 | /** |
47 | * batadv_send_unicast_skb - send the skb encapsulated in a unicast packet | 48 | * batadv_send_skb_via_tt - send an skb via TT lookup |
48 | * @bat_priv: the bat priv with all the soft interface information | 49 | * @bat_priv: the bat priv with all the soft interface information |
49 | * @skb: the payload to send | 50 | * @skb: the payload to send |
50 | * @vid: the vid to be used to search the translation table | 51 | * @vid: the vid to be used to search the translation table |
51 | * | 52 | * |
52 | * Returns 1 in case of error or 0 otherwise. | 53 | * Look up the recipient node for the destination address in the ethernet |
54 | * header via the translation table. Wrap the given skb into a batman-adv | ||
55 | * unicast header. Then send this frame to the according destination node. | ||
56 | * | ||
57 | * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. | ||
53 | */ | 58 | */ |
54 | static inline int batadv_send_skb_unicast(struct batadv_priv *bat_priv, | 59 | static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv, |
55 | struct sk_buff *skb, | 60 | struct sk_buff *skb, |
56 | unsigned short vid) | 61 | unsigned short vid) |
57 | { | 62 | { |
58 | return batadv_send_skb_generic_unicast(bat_priv, skb, BATADV_UNICAST, | 63 | return batadv_send_skb_via_tt_generic(bat_priv, skb, BATADV_UNICAST, 0, |
59 | 0, vid); | 64 | vid); |
60 | } | 65 | } |
61 | 66 | ||
62 | /** | 67 | /** |
63 | * batadv_send_4addr_unicast_skb - send the skb encapsulated in a unicast 4addr | 68 | * batadv_send_skb_via_tt_4addr - send an skb via TT lookup |
64 | * packet | ||
65 | * @bat_priv: the bat priv with all the soft interface information | 69 | * @bat_priv: the bat priv with all the soft interface information |
66 | * @skb: the payload to send | 70 | * @skb: the payload to send |
67 | * @packet_subtype: the unicast 4addr packet subtype to use | 71 | * @packet_subtype: the unicast 4addr packet subtype to use |
68 | * @vid: the vid to be used to search the translation table | 72 | * @vid: the vid to be used to search the translation table |
69 | * | 73 | * |
70 | * Returns 1 in case of error or 0 otherwise. | 74 | * Look up the recipient node for the destination address in the ethernet |
75 | * header via the translation table. Wrap the given skb into a batman-adv | ||
76 | * unicast-4addr header. Then send this frame to the according destination | ||
77 | * node. | ||
78 | * | ||
79 | * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. | ||
71 | */ | 80 | */ |
72 | static inline int batadv_send_skb_unicast_4addr(struct batadv_priv *bat_priv, | 81 | static inline int batadv_send_skb_via_tt_4addr(struct batadv_priv *bat_priv, |
73 | struct sk_buff *skb, | 82 | struct sk_buff *skb, |
74 | int packet_subtype, | 83 | int packet_subtype, |
75 | unsigned short vid) | 84 | unsigned short vid) |
76 | { | 85 | { |
77 | return batadv_send_skb_generic_unicast(bat_priv, skb, | 86 | return batadv_send_skb_via_tt_generic(bat_priv, skb, |
78 | BATADV_UNICAST_4ADDR, | 87 | BATADV_UNICAST_4ADDR, |
79 | packet_subtype, vid); | 88 | packet_subtype, vid); |
80 | } | 89 | } |
81 | 90 | ||
82 | #endif /* _NET_BATMAN_ADV_SEND_H_ */ | 91 | #endif /* _NET_BATMAN_ADV_SEND_H_ */ |
net/batman-adv/soft-interface.c
1 | /* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors: | 1 | /* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors: |
2 | * | 2 | * |
3 | * Marek Lindner, Simon Wunderlich | 3 | * Marek Lindner, Simon Wunderlich |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or | 5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of version 2 of the GNU General Public | 6 | * modify it under the terms of version 2 of the GNU General Public |
7 | * License as published by the Free Software Foundation. | 7 | * License as published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, but | 9 | * This program is distributed in the hope that it will be useful, but |
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | * General Public License for more details. | 12 | * General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | 16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
17 | * 02110-1301, USA | 17 | * 02110-1301, USA |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include "main.h" | 20 | #include "main.h" |
21 | #include "soft-interface.h" | 21 | #include "soft-interface.h" |
22 | #include "hard-interface.h" | 22 | #include "hard-interface.h" |
23 | #include "distributed-arp-table.h" | 23 | #include "distributed-arp-table.h" |
24 | #include "routing.h" | 24 | #include "routing.h" |
25 | #include "send.h" | 25 | #include "send.h" |
26 | #include "debugfs.h" | 26 | #include "debugfs.h" |
27 | #include "translation-table.h" | 27 | #include "translation-table.h" |
28 | #include "hash.h" | 28 | #include "hash.h" |
29 | #include "gateway_common.h" | 29 | #include "gateway_common.h" |
30 | #include "gateway_client.h" | 30 | #include "gateway_client.h" |
31 | #include "sysfs.h" | 31 | #include "sysfs.h" |
32 | #include "originator.h" | 32 | #include "originator.h" |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/ethtool.h> | 34 | #include <linux/ethtool.h> |
35 | #include <linux/etherdevice.h> | 35 | #include <linux/etherdevice.h> |
36 | #include <linux/if_vlan.h> | 36 | #include <linux/if_vlan.h> |
37 | #include "bridge_loop_avoidance.h" | 37 | #include "bridge_loop_avoidance.h" |
38 | #include "network-coding.h" | 38 | #include "network-coding.h" |
39 | 39 | ||
40 | 40 | ||
41 | static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); | 41 | static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); |
42 | static void batadv_get_drvinfo(struct net_device *dev, | 42 | static void batadv_get_drvinfo(struct net_device *dev, |
43 | struct ethtool_drvinfo *info); | 43 | struct ethtool_drvinfo *info); |
44 | static u32 batadv_get_msglevel(struct net_device *dev); | 44 | static u32 batadv_get_msglevel(struct net_device *dev); |
45 | static void batadv_set_msglevel(struct net_device *dev, u32 value); | 45 | static void batadv_set_msglevel(struct net_device *dev, u32 value); |
46 | static u32 batadv_get_link(struct net_device *dev); | 46 | static u32 batadv_get_link(struct net_device *dev); |
47 | static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data); | 47 | static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data); |
48 | static void batadv_get_ethtool_stats(struct net_device *dev, | 48 | static void batadv_get_ethtool_stats(struct net_device *dev, |
49 | struct ethtool_stats *stats, u64 *data); | 49 | struct ethtool_stats *stats, u64 *data); |
50 | static int batadv_get_sset_count(struct net_device *dev, int stringset); | 50 | static int batadv_get_sset_count(struct net_device *dev, int stringset); |
51 | 51 | ||
52 | static const struct ethtool_ops batadv_ethtool_ops = { | 52 | static const struct ethtool_ops batadv_ethtool_ops = { |
53 | .get_settings = batadv_get_settings, | 53 | .get_settings = batadv_get_settings, |
54 | .get_drvinfo = batadv_get_drvinfo, | 54 | .get_drvinfo = batadv_get_drvinfo, |
55 | .get_msglevel = batadv_get_msglevel, | 55 | .get_msglevel = batadv_get_msglevel, |
56 | .set_msglevel = batadv_set_msglevel, | 56 | .set_msglevel = batadv_set_msglevel, |
57 | .get_link = batadv_get_link, | 57 | .get_link = batadv_get_link, |
58 | .get_strings = batadv_get_strings, | 58 | .get_strings = batadv_get_strings, |
59 | .get_ethtool_stats = batadv_get_ethtool_stats, | 59 | .get_ethtool_stats = batadv_get_ethtool_stats, |
60 | .get_sset_count = batadv_get_sset_count, | 60 | .get_sset_count = batadv_get_sset_count, |
61 | }; | 61 | }; |
62 | 62 | ||
63 | int batadv_skb_head_push(struct sk_buff *skb, unsigned int len) | 63 | int batadv_skb_head_push(struct sk_buff *skb, unsigned int len) |
64 | { | 64 | { |
65 | int result; | 65 | int result; |
66 | 66 | ||
67 | /* TODO: We must check if we can release all references to non-payload | 67 | /* TODO: We must check if we can release all references to non-payload |
68 | * data using skb_header_release in our skbs to allow skb_cow_header to | 68 | * data using skb_header_release in our skbs to allow skb_cow_header to |
69 | * work optimally. This means that those skbs are not allowed to read | 69 | * work optimally. This means that those skbs are not allowed to read |
70 | * or write any data which is before the current position of skb->data | 70 | * or write any data which is before the current position of skb->data |
71 | * after that call and thus allow other skbs with the same data buffer | 71 | * after that call and thus allow other skbs with the same data buffer |
72 | * to write freely in that area. | 72 | * to write freely in that area. |
73 | */ | 73 | */ |
74 | result = skb_cow_head(skb, len); | 74 | result = skb_cow_head(skb, len); |
75 | if (result < 0) | 75 | if (result < 0) |
76 | return result; | 76 | return result; |
77 | 77 | ||
78 | skb_push(skb, len); | 78 | skb_push(skb, len); |
79 | return 0; | 79 | return 0; |
80 | } | 80 | } |
81 | 81 | ||
82 | static int batadv_interface_open(struct net_device *dev) | 82 | static int batadv_interface_open(struct net_device *dev) |
83 | { | 83 | { |
84 | netif_start_queue(dev); | 84 | netif_start_queue(dev); |
85 | return 0; | 85 | return 0; |
86 | } | 86 | } |
87 | 87 | ||
88 | static int batadv_interface_release(struct net_device *dev) | 88 | static int batadv_interface_release(struct net_device *dev) |
89 | { | 89 | { |
90 | netif_stop_queue(dev); | 90 | netif_stop_queue(dev); |
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
93 | 93 | ||
94 | static struct net_device_stats *batadv_interface_stats(struct net_device *dev) | 94 | static struct net_device_stats *batadv_interface_stats(struct net_device *dev) |
95 | { | 95 | { |
96 | struct batadv_priv *bat_priv = netdev_priv(dev); | 96 | struct batadv_priv *bat_priv = netdev_priv(dev); |
97 | struct net_device_stats *stats = &bat_priv->stats; | 97 | struct net_device_stats *stats = &bat_priv->stats; |
98 | 98 | ||
99 | stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX); | 99 | stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX); |
100 | stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES); | 100 | stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES); |
101 | stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED); | 101 | stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED); |
102 | stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX); | 102 | stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX); |
103 | stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES); | 103 | stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES); |
104 | return stats; | 104 | return stats; |
105 | } | 105 | } |
106 | 106 | ||
107 | static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) | 107 | static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) |
108 | { | 108 | { |
109 | struct batadv_priv *bat_priv = netdev_priv(dev); | 109 | struct batadv_priv *bat_priv = netdev_priv(dev); |
110 | struct sockaddr *addr = p; | 110 | struct sockaddr *addr = p; |
111 | uint8_t old_addr[ETH_ALEN]; | 111 | uint8_t old_addr[ETH_ALEN]; |
112 | 112 | ||
113 | if (!is_valid_ether_addr(addr->sa_data)) | 113 | if (!is_valid_ether_addr(addr->sa_data)) |
114 | return -EADDRNOTAVAIL; | 114 | return -EADDRNOTAVAIL; |
115 | 115 | ||
116 | memcpy(old_addr, dev->dev_addr, ETH_ALEN); | 116 | memcpy(old_addr, dev->dev_addr, ETH_ALEN); |
117 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | 117 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); |
118 | 118 | ||
119 | /* only modify transtable if it has been initialized before */ | 119 | /* only modify transtable if it has been initialized before */ |
120 | if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) { | 120 | if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) { |
121 | batadv_tt_local_remove(bat_priv, old_addr, BATADV_NO_FLAGS, | 121 | batadv_tt_local_remove(bat_priv, old_addr, BATADV_NO_FLAGS, |
122 | "mac address changed", false); | 122 | "mac address changed", false); |
123 | batadv_tt_local_add(dev, addr->sa_data, BATADV_NO_FLAGS, | 123 | batadv_tt_local_add(dev, addr->sa_data, BATADV_NO_FLAGS, |
124 | BATADV_NULL_IFINDEX); | 124 | BATADV_NULL_IFINDEX); |
125 | } | 125 | } |
126 | 126 | ||
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu) | 130 | static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu) |
131 | { | 131 | { |
132 | /* check ranges */ | 132 | /* check ranges */ |
133 | if ((new_mtu < 68) || (new_mtu > batadv_hardif_min_mtu(dev))) | 133 | if ((new_mtu < 68) || (new_mtu > batadv_hardif_min_mtu(dev))) |
134 | return -EINVAL; | 134 | return -EINVAL; |
135 | 135 | ||
136 | dev->mtu = new_mtu; | 136 | dev->mtu = new_mtu; |
137 | 137 | ||
138 | return 0; | 138 | return 0; |
139 | } | 139 | } |
140 | 140 | ||
141 | /** | 141 | /** |
142 | * batadv_interface_set_rx_mode - set the rx mode of a device | 142 | * batadv_interface_set_rx_mode - set the rx mode of a device |
143 | * @dev: registered network device to modify | 143 | * @dev: registered network device to modify |
144 | * | 144 | * |
145 | * We do not actually need to set any rx filters for the virtual batman | 145 | * We do not actually need to set any rx filters for the virtual batman |
146 | * soft interface. However a dummy handler enables a user to set static | 146 | * soft interface. However a dummy handler enables a user to set static |
147 | * multicast listeners for instance. | 147 | * multicast listeners for instance. |
148 | */ | 148 | */ |
149 | static void batadv_interface_set_rx_mode(struct net_device *dev) | 149 | static void batadv_interface_set_rx_mode(struct net_device *dev) |
150 | { | 150 | { |
151 | } | 151 | } |
152 | 152 | ||
153 | static int batadv_interface_tx(struct sk_buff *skb, | 153 | static int batadv_interface_tx(struct sk_buff *skb, |
154 | struct net_device *soft_iface) | 154 | struct net_device *soft_iface) |
155 | { | 155 | { |
156 | struct ethhdr *ethhdr; | 156 | struct ethhdr *ethhdr; |
157 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); | 157 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); |
158 | struct batadv_hard_iface *primary_if = NULL; | 158 | struct batadv_hard_iface *primary_if = NULL; |
159 | struct batadv_bcast_packet *bcast_packet; | 159 | struct batadv_bcast_packet *bcast_packet; |
160 | __be16 ethertype = htons(ETH_P_BATMAN); | 160 | __be16 ethertype = htons(ETH_P_BATMAN); |
161 | static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, | 161 | static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, |
162 | 0x00, 0x00}; | 162 | 0x00, 0x00}; |
163 | static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00, | 163 | static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00, |
164 | 0x00, 0x00}; | 164 | 0x00, 0x00}; |
165 | struct vlan_ethhdr *vhdr; | 165 | struct vlan_ethhdr *vhdr; |
166 | unsigned int header_len = 0; | 166 | unsigned int header_len = 0; |
167 | int data_len = skb->len, ret; | 167 | int data_len = skb->len, ret; |
168 | unsigned long brd_delay = 1; | 168 | unsigned long brd_delay = 1; |
169 | bool do_bcast = false; | 169 | bool do_bcast = false; |
170 | unsigned short vid; | 170 | unsigned short vid; |
171 | uint32_t seqno; | 171 | uint32_t seqno; |
172 | 172 | ||
173 | if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) | 173 | if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) |
174 | goto dropped; | 174 | goto dropped; |
175 | 175 | ||
176 | soft_iface->trans_start = jiffies; | 176 | soft_iface->trans_start = jiffies; |
177 | vid = batadv_get_vid(skb, 0); | 177 | vid = batadv_get_vid(skb, 0); |
178 | ethhdr = (struct ethhdr *)skb->data; | 178 | ethhdr = (struct ethhdr *)skb->data; |
179 | 179 | ||
180 | switch (ntohs(ethhdr->h_proto)) { | 180 | switch (ntohs(ethhdr->h_proto)) { |
181 | case ETH_P_8021Q: | 181 | case ETH_P_8021Q: |
182 | vhdr = (struct vlan_ethhdr *)skb->data; | 182 | vhdr = (struct vlan_ethhdr *)skb->data; |
183 | 183 | ||
184 | if (vhdr->h_vlan_encapsulated_proto != ethertype) | 184 | if (vhdr->h_vlan_encapsulated_proto != ethertype) |
185 | break; | 185 | break; |
186 | 186 | ||
187 | /* fall through */ | 187 | /* fall through */ |
188 | case ETH_P_BATMAN: | 188 | case ETH_P_BATMAN: |
189 | goto dropped; | 189 | goto dropped; |
190 | } | 190 | } |
191 | 191 | ||
192 | if (batadv_bla_tx(bat_priv, skb, vid)) | 192 | if (batadv_bla_tx(bat_priv, skb, vid)) |
193 | goto dropped; | 193 | goto dropped; |
194 | 194 | ||
195 | /* skb->data might have been reallocated by batadv_bla_tx() */ | 195 | /* skb->data might have been reallocated by batadv_bla_tx() */ |
196 | ethhdr = (struct ethhdr *)skb->data; | 196 | ethhdr = (struct ethhdr *)skb->data; |
197 | 197 | ||
198 | /* Register the client MAC in the transtable */ | 198 | /* Register the client MAC in the transtable */ |
199 | if (!is_multicast_ether_addr(ethhdr->h_source)) | 199 | if (!is_multicast_ether_addr(ethhdr->h_source)) |
200 | batadv_tt_local_add(soft_iface, ethhdr->h_source, vid, | 200 | batadv_tt_local_add(soft_iface, ethhdr->h_source, vid, |
201 | skb->skb_iif); | 201 | skb->skb_iif); |
202 | 202 | ||
203 | /* don't accept stp packets. STP does not help in meshes. | 203 | /* don't accept stp packets. STP does not help in meshes. |
204 | * better use the bridge loop avoidance ... | 204 | * better use the bridge loop avoidance ... |
205 | * | 205 | * |
206 | * The same goes for ECTP sent at least by some Cisco Switches, | 206 | * The same goes for ECTP sent at least by some Cisco Switches, |
207 | * it might confuse the mesh when used with bridge loop avoidance. | 207 | * it might confuse the mesh when used with bridge loop avoidance. |
208 | */ | 208 | */ |
209 | if (batadv_compare_eth(ethhdr->h_dest, stp_addr)) | 209 | if (batadv_compare_eth(ethhdr->h_dest, stp_addr)) |
210 | goto dropped; | 210 | goto dropped; |
211 | 211 | ||
212 | if (batadv_compare_eth(ethhdr->h_dest, ectp_addr)) | 212 | if (batadv_compare_eth(ethhdr->h_dest, ectp_addr)) |
213 | goto dropped; | 213 | goto dropped; |
214 | 214 | ||
215 | if (is_multicast_ether_addr(ethhdr->h_dest)) { | 215 | if (is_multicast_ether_addr(ethhdr->h_dest)) { |
216 | do_bcast = true; | 216 | do_bcast = true; |
217 | 217 | ||
218 | switch (atomic_read(&bat_priv->gw_mode)) { | 218 | switch (atomic_read(&bat_priv->gw_mode)) { |
219 | case BATADV_GW_MODE_SERVER: | 219 | case BATADV_GW_MODE_SERVER: |
220 | /* gateway servers should not send dhcp | 220 | /* gateway servers should not send dhcp |
221 | * requests into the mesh | 221 | * requests into the mesh |
222 | */ | 222 | */ |
223 | ret = batadv_gw_is_dhcp_target(skb, &header_len); | 223 | ret = batadv_gw_is_dhcp_target(skb, &header_len); |
224 | if (ret) | 224 | if (ret) |
225 | goto dropped; | 225 | goto dropped; |
226 | break; | 226 | break; |
227 | case BATADV_GW_MODE_CLIENT: | 227 | case BATADV_GW_MODE_CLIENT: |
228 | /* gateway clients should send dhcp requests | 228 | /* gateway clients should send dhcp requests |
229 | * via unicast to their gateway | 229 | * via unicast to their gateway |
230 | */ | 230 | */ |
231 | ret = batadv_gw_is_dhcp_target(skb, &header_len); | 231 | ret = batadv_gw_is_dhcp_target(skb, &header_len); |
232 | if (ret) | 232 | if (ret) |
233 | do_bcast = false; | 233 | do_bcast = false; |
234 | break; | 234 | break; |
235 | case BATADV_GW_MODE_OFF: | 235 | case BATADV_GW_MODE_OFF: |
236 | default: | 236 | default: |
237 | break; | 237 | break; |
238 | } | 238 | } |
239 | 239 | ||
240 | /* reminder: ethhdr might have become unusable from here on | 240 | /* reminder: ethhdr might have become unusable from here on |
241 | * (batadv_gw_is_dhcp_target() might have reallocated skb data) | 241 | * (batadv_gw_is_dhcp_target() might have reallocated skb data) |
242 | */ | 242 | */ |
243 | } | 243 | } |
244 | 244 | ||
245 | batadv_skb_set_priority(skb, 0); | 245 | batadv_skb_set_priority(skb, 0); |
246 | 246 | ||
247 | /* ethernet packet should be broadcasted */ | 247 | /* ethernet packet should be broadcasted */ |
248 | if (do_bcast) { | 248 | if (do_bcast) { |
249 | primary_if = batadv_primary_if_get_selected(bat_priv); | 249 | primary_if = batadv_primary_if_get_selected(bat_priv); |
250 | if (!primary_if) | 250 | if (!primary_if) |
251 | goto dropped; | 251 | goto dropped; |
252 | 252 | ||
253 | /* in case of ARP request, we do not immediately broadcasti the | 253 | /* in case of ARP request, we do not immediately broadcasti the |
254 | * packet, instead we first wait for DAT to try to retrieve the | 254 | * packet, instead we first wait for DAT to try to retrieve the |
255 | * correct ARP entry | 255 | * correct ARP entry |
256 | */ | 256 | */ |
257 | if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb)) | 257 | if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb)) |
258 | brd_delay = msecs_to_jiffies(ARP_REQ_DELAY); | 258 | brd_delay = msecs_to_jiffies(ARP_REQ_DELAY); |
259 | 259 | ||
260 | if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0) | 260 | if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0) |
261 | goto dropped; | 261 | goto dropped; |
262 | 262 | ||
263 | bcast_packet = (struct batadv_bcast_packet *)skb->data; | 263 | bcast_packet = (struct batadv_bcast_packet *)skb->data; |
264 | bcast_packet->header.version = BATADV_COMPAT_VERSION; | 264 | bcast_packet->header.version = BATADV_COMPAT_VERSION; |
265 | bcast_packet->header.ttl = BATADV_TTL; | 265 | bcast_packet->header.ttl = BATADV_TTL; |
266 | 266 | ||
267 | /* batman packet type: broadcast */ | 267 | /* batman packet type: broadcast */ |
268 | bcast_packet->header.packet_type = BATADV_BCAST; | 268 | bcast_packet->header.packet_type = BATADV_BCAST; |
269 | bcast_packet->reserved = 0; | 269 | bcast_packet->reserved = 0; |
270 | 270 | ||
271 | /* hw address of first interface is the orig mac because only | 271 | /* hw address of first interface is the orig mac because only |
272 | * this mac is known throughout the mesh | 272 | * this mac is known throughout the mesh |
273 | */ | 273 | */ |
274 | memcpy(bcast_packet->orig, | 274 | memcpy(bcast_packet->orig, |
275 | primary_if->net_dev->dev_addr, ETH_ALEN); | 275 | primary_if->net_dev->dev_addr, ETH_ALEN); |
276 | 276 | ||
277 | /* set broadcast sequence number */ | 277 | /* set broadcast sequence number */ |
278 | seqno = atomic_inc_return(&bat_priv->bcast_seqno); | 278 | seqno = atomic_inc_return(&bat_priv->bcast_seqno); |
279 | bcast_packet->seqno = htonl(seqno); | 279 | bcast_packet->seqno = htonl(seqno); |
280 | 280 | ||
281 | batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay); | 281 | batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay); |
282 | 282 | ||
283 | /* a copy is stored in the bcast list, therefore removing | 283 | /* a copy is stored in the bcast list, therefore removing |
284 | * the original skb. | 284 | * the original skb. |
285 | */ | 285 | */ |
286 | kfree_skb(skb); | 286 | kfree_skb(skb); |
287 | 287 | ||
288 | /* unicast packet */ | 288 | /* unicast packet */ |
289 | } else { | 289 | } else { |
290 | if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { | 290 | if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { |
291 | ret = batadv_gw_out_of_range(bat_priv, skb); | 291 | ret = batadv_gw_out_of_range(bat_priv, skb); |
292 | if (ret) | 292 | if (ret) |
293 | goto dropped; | 293 | goto dropped; |
294 | } | 294 | } |
295 | 295 | ||
296 | if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb)) | 296 | if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb)) |
297 | goto dropped; | 297 | goto dropped; |
298 | 298 | ||
299 | batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb); | 299 | batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb); |
300 | 300 | ||
301 | ret = batadv_send_skb_unicast(bat_priv, skb, vid); | 301 | if (is_multicast_ether_addr(ethhdr->h_dest)) |
302 | if (ret != 0) | 302 | ret = batadv_send_skb_via_gw(bat_priv, skb, vid); |
303 | else | ||
304 | ret = batadv_send_skb_via_tt(bat_priv, skb, vid); | ||
305 | |||
306 | if (ret == NET_XMIT_DROP) | ||
303 | goto dropped_freed; | 307 | goto dropped_freed; |
304 | } | 308 | } |
305 | 309 | ||
306 | batadv_inc_counter(bat_priv, BATADV_CNT_TX); | 310 | batadv_inc_counter(bat_priv, BATADV_CNT_TX); |
307 | batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len); | 311 | batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len); |
308 | goto end; | 312 | goto end; |
309 | 313 | ||
310 | dropped: | 314 | dropped: |
311 | kfree_skb(skb); | 315 | kfree_skb(skb); |
312 | dropped_freed: | 316 | dropped_freed: |
313 | batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED); | 317 | batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED); |
314 | end: | 318 | end: |
315 | if (primary_if) | 319 | if (primary_if) |
316 | batadv_hardif_free_ref(primary_if); | 320 | batadv_hardif_free_ref(primary_if); |
317 | return NETDEV_TX_OK; | 321 | return NETDEV_TX_OK; |
318 | } | 322 | } |
319 | 323 | ||
320 | void batadv_interface_rx(struct net_device *soft_iface, | 324 | void batadv_interface_rx(struct net_device *soft_iface, |
321 | struct sk_buff *skb, struct batadv_hard_iface *recv_if, | 325 | struct sk_buff *skb, struct batadv_hard_iface *recv_if, |
322 | int hdr_size, struct batadv_orig_node *orig_node) | 326 | int hdr_size, struct batadv_orig_node *orig_node) |
323 | { | 327 | { |
324 | struct batadv_header *batadv_header = (struct batadv_header *)skb->data; | 328 | struct batadv_header *batadv_header = (struct batadv_header *)skb->data; |
325 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); | 329 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); |
326 | __be16 ethertype = htons(ETH_P_BATMAN); | 330 | __be16 ethertype = htons(ETH_P_BATMAN); |
327 | struct vlan_ethhdr *vhdr; | 331 | struct vlan_ethhdr *vhdr; |
328 | struct ethhdr *ethhdr; | 332 | struct ethhdr *ethhdr; |
329 | unsigned short vid; | 333 | unsigned short vid; |
330 | bool is_bcast; | 334 | bool is_bcast; |
331 | 335 | ||
332 | is_bcast = (batadv_header->packet_type == BATADV_BCAST); | 336 | is_bcast = (batadv_header->packet_type == BATADV_BCAST); |
333 | 337 | ||
334 | /* check if enough space is available for pulling, and pull */ | 338 | /* check if enough space is available for pulling, and pull */ |
335 | if (!pskb_may_pull(skb, hdr_size)) | 339 | if (!pskb_may_pull(skb, hdr_size)) |
336 | goto dropped; | 340 | goto dropped; |
337 | 341 | ||
338 | skb_pull_rcsum(skb, hdr_size); | 342 | skb_pull_rcsum(skb, hdr_size); |
339 | skb_reset_mac_header(skb); | 343 | skb_reset_mac_header(skb); |
340 | 344 | ||
341 | vid = batadv_get_vid(skb, hdr_size); | 345 | vid = batadv_get_vid(skb, hdr_size); |
342 | ethhdr = eth_hdr(skb); | 346 | ethhdr = eth_hdr(skb); |
343 | 347 | ||
344 | switch (ntohs(ethhdr->h_proto)) { | 348 | switch (ntohs(ethhdr->h_proto)) { |
345 | case ETH_P_8021Q: | 349 | case ETH_P_8021Q: |
346 | vhdr = (struct vlan_ethhdr *)skb->data; | 350 | vhdr = (struct vlan_ethhdr *)skb->data; |
347 | 351 | ||
348 | if (vhdr->h_vlan_encapsulated_proto != ethertype) | 352 | if (vhdr->h_vlan_encapsulated_proto != ethertype) |
349 | break; | 353 | break; |
350 | 354 | ||
351 | /* fall through */ | 355 | /* fall through */ |
352 | case ETH_P_BATMAN: | 356 | case ETH_P_BATMAN: |
353 | goto dropped; | 357 | goto dropped; |
354 | } | 358 | } |
355 | 359 | ||
356 | /* skb->dev & skb->pkt_type are set here */ | 360 | /* skb->dev & skb->pkt_type are set here */ |
357 | if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) | 361 | if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) |
358 | goto dropped; | 362 | goto dropped; |
359 | skb->protocol = eth_type_trans(skb, soft_iface); | 363 | skb->protocol = eth_type_trans(skb, soft_iface); |
360 | 364 | ||
361 | /* should not be necessary anymore as we use skb_pull_rcsum() | 365 | /* should not be necessary anymore as we use skb_pull_rcsum() |
362 | * TODO: please verify this and remove this TODO | 366 | * TODO: please verify this and remove this TODO |
363 | * -- Dec 21st 2009, Simon Wunderlich | 367 | * -- Dec 21st 2009, Simon Wunderlich |
364 | */ | 368 | */ |
365 | 369 | ||
366 | /* skb->ip_summed = CHECKSUM_UNNECESSARY; */ | 370 | /* skb->ip_summed = CHECKSUM_UNNECESSARY; */ |
367 | 371 | ||
368 | batadv_inc_counter(bat_priv, BATADV_CNT_RX); | 372 | batadv_inc_counter(bat_priv, BATADV_CNT_RX); |
369 | batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, | 373 | batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, |
370 | skb->len + ETH_HLEN); | 374 | skb->len + ETH_HLEN); |
371 | 375 | ||
372 | soft_iface->last_rx = jiffies; | 376 | soft_iface->last_rx = jiffies; |
373 | 377 | ||
374 | /* Let the bridge loop avoidance check the packet. If will | 378 | /* Let the bridge loop avoidance check the packet. If will |
375 | * not handle it, we can safely push it up. | 379 | * not handle it, we can safely push it up. |
376 | */ | 380 | */ |
377 | if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) | 381 | if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) |
378 | goto out; | 382 | goto out; |
379 | 383 | ||
380 | if (orig_node) | 384 | if (orig_node) |
381 | batadv_tt_add_temporary_global_entry(bat_priv, orig_node, | 385 | batadv_tt_add_temporary_global_entry(bat_priv, orig_node, |
382 | ethhdr->h_source, vid); | 386 | ethhdr->h_source, vid); |
383 | 387 | ||
384 | if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest, | 388 | if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest, |
385 | vid)) | 389 | vid)) |
386 | goto dropped; | 390 | goto dropped; |
387 | 391 | ||
388 | netif_rx(skb); | 392 | netif_rx(skb); |
389 | goto out; | 393 | goto out; |
390 | 394 | ||
391 | dropped: | 395 | dropped: |
392 | kfree_skb(skb); | 396 | kfree_skb(skb); |
393 | out: | 397 | out: |
394 | return; | 398 | return; |
395 | } | 399 | } |
396 | 400 | ||
397 | /** | 401 | /** |
398 | * batadv_softif_vlan_free_ref - decrease the vlan object refcounter and | 402 | * batadv_softif_vlan_free_ref - decrease the vlan object refcounter and |
399 | * possibly free it | 403 | * possibly free it |
400 | * @softif_vlan: the vlan object to release | 404 | * @softif_vlan: the vlan object to release |
401 | */ | 405 | */ |
402 | void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan) | 406 | void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan) |
403 | { | 407 | { |
404 | if (atomic_dec_and_test(&softif_vlan->refcount)) | 408 | if (atomic_dec_and_test(&softif_vlan->refcount)) |
405 | kfree_rcu(softif_vlan, rcu); | 409 | kfree_rcu(softif_vlan, rcu); |
406 | } | 410 | } |
407 | 411 | ||
408 | /** | 412 | /** |
409 | * batadv_softif_vlan_get - get the vlan object for a specific vid | 413 | * batadv_softif_vlan_get - get the vlan object for a specific vid |
410 | * @bat_priv: the bat priv with all the soft interface information | 414 | * @bat_priv: the bat priv with all the soft interface information |
411 | * @vid: the identifier of the vlan object to retrieve | 415 | * @vid: the identifier of the vlan object to retrieve |
412 | * | 416 | * |
413 | * Returns the private data of the vlan matching the vid passed as argument or | 417 | * Returns the private data of the vlan matching the vid passed as argument or |
414 | * NULL otherwise. The refcounter of the returned object is incremented by 1. | 418 | * NULL otherwise. The refcounter of the returned object is incremented by 1. |
415 | */ | 419 | */ |
416 | struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv, | 420 | struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv, |
417 | unsigned short vid) | 421 | unsigned short vid) |
418 | { | 422 | { |
419 | struct batadv_softif_vlan *vlan_tmp, *vlan = NULL; | 423 | struct batadv_softif_vlan *vlan_tmp, *vlan = NULL; |
420 | 424 | ||
421 | rcu_read_lock(); | 425 | rcu_read_lock(); |
422 | hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) { | 426 | hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) { |
423 | if (vlan_tmp->vid != vid) | 427 | if (vlan_tmp->vid != vid) |
424 | continue; | 428 | continue; |
425 | 429 | ||
426 | if (!atomic_inc_not_zero(&vlan_tmp->refcount)) | 430 | if (!atomic_inc_not_zero(&vlan_tmp->refcount)) |
427 | continue; | 431 | continue; |
428 | 432 | ||
429 | vlan = vlan_tmp; | 433 | vlan = vlan_tmp; |
430 | break; | 434 | break; |
431 | } | 435 | } |
432 | rcu_read_unlock(); | 436 | rcu_read_unlock(); |
433 | 437 | ||
434 | return vlan; | 438 | return vlan; |
435 | } | 439 | } |
436 | 440 | ||
437 | /** | 441 | /** |
438 | * batadv_create_vlan - allocate the needed resources for a new vlan | 442 | * batadv_create_vlan - allocate the needed resources for a new vlan |
439 | * @bat_priv: the bat priv with all the soft interface information | 443 | * @bat_priv: the bat priv with all the soft interface information |
440 | * @vid: the VLAN identifier | 444 | * @vid: the VLAN identifier |
441 | * | 445 | * |
442 | * Returns 0 on success, a negative error otherwise. | 446 | * Returns 0 on success, a negative error otherwise. |
443 | */ | 447 | */ |
444 | int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) | 448 | int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) |
445 | { | 449 | { |
446 | struct batadv_softif_vlan *vlan; | 450 | struct batadv_softif_vlan *vlan; |
447 | int err; | 451 | int err; |
448 | 452 | ||
449 | vlan = batadv_softif_vlan_get(bat_priv, vid); | 453 | vlan = batadv_softif_vlan_get(bat_priv, vid); |
450 | if (vlan) { | 454 | if (vlan) { |
451 | batadv_softif_vlan_free_ref(vlan); | 455 | batadv_softif_vlan_free_ref(vlan); |
452 | return -EEXIST; | 456 | return -EEXIST; |
453 | } | 457 | } |
454 | 458 | ||
455 | vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); | 459 | vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); |
456 | if (!vlan) | 460 | if (!vlan) |
457 | return -ENOMEM; | 461 | return -ENOMEM; |
458 | 462 | ||
459 | vlan->vid = vid; | 463 | vlan->vid = vid; |
460 | atomic_set(&vlan->refcount, 1); | 464 | atomic_set(&vlan->refcount, 1); |
461 | 465 | ||
462 | atomic_set(&vlan->ap_isolation, 0); | 466 | atomic_set(&vlan->ap_isolation, 0); |
463 | 467 | ||
464 | err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan); | 468 | err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan); |
465 | if (err) { | 469 | if (err) { |
466 | kfree(vlan); | 470 | kfree(vlan); |
467 | return err; | 471 | return err; |
468 | } | 472 | } |
469 | 473 | ||
470 | /* add a new TT local entry. This one will be marked with the NOPURGE | 474 | /* add a new TT local entry. This one will be marked with the NOPURGE |
471 | * flag | 475 | * flag |
472 | */ | 476 | */ |
473 | batadv_tt_local_add(bat_priv->soft_iface, | 477 | batadv_tt_local_add(bat_priv->soft_iface, |
474 | bat_priv->soft_iface->dev_addr, vid, | 478 | bat_priv->soft_iface->dev_addr, vid, |
475 | BATADV_NULL_IFINDEX); | 479 | BATADV_NULL_IFINDEX); |
476 | 480 | ||
477 | spin_lock_bh(&bat_priv->softif_vlan_list_lock); | 481 | spin_lock_bh(&bat_priv->softif_vlan_list_lock); |
478 | hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); | 482 | hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); |
479 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); | 483 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); |
480 | 484 | ||
481 | return 0; | 485 | return 0; |
482 | } | 486 | } |
483 | 487 | ||
484 | /** | 488 | /** |
485 | * batadv_softif_destroy_vlan - remove and destroy a softif_vlan object | 489 | * batadv_softif_destroy_vlan - remove and destroy a softif_vlan object |
486 | * @bat_priv: the bat priv with all the soft interface information | 490 | * @bat_priv: the bat priv with all the soft interface information |
487 | * @vlan: the object to remove | 491 | * @vlan: the object to remove |
488 | */ | 492 | */ |
489 | static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv, | 493 | static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv, |
490 | struct batadv_softif_vlan *vlan) | 494 | struct batadv_softif_vlan *vlan) |
491 | { | 495 | { |
492 | spin_lock_bh(&bat_priv->softif_vlan_list_lock); | 496 | spin_lock_bh(&bat_priv->softif_vlan_list_lock); |
493 | hlist_del_rcu(&vlan->list); | 497 | hlist_del_rcu(&vlan->list); |
494 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); | 498 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); |
495 | 499 | ||
496 | batadv_sysfs_del_vlan(bat_priv, vlan); | 500 | batadv_sysfs_del_vlan(bat_priv, vlan); |
497 | 501 | ||
498 | /* explicitly remove the associated TT local entry because it is marked | 502 | /* explicitly remove the associated TT local entry because it is marked |
499 | * with the NOPURGE flag | 503 | * with the NOPURGE flag |
500 | */ | 504 | */ |
501 | batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr, | 505 | batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr, |
502 | vlan->vid, "vlan interface destroyed", false); | 506 | vlan->vid, "vlan interface destroyed", false); |
503 | 507 | ||
504 | batadv_softif_vlan_free_ref(vlan); | 508 | batadv_softif_vlan_free_ref(vlan); |
505 | } | 509 | } |
506 | 510 | ||
507 | /** | 511 | /** |
508 | * batadv_interface_add_vid - ndo_add_vid API implementation | 512 | * batadv_interface_add_vid - ndo_add_vid API implementation |
509 | * @dev: the netdev of the mesh interface | 513 | * @dev: the netdev of the mesh interface |
510 | * @vid: identifier of the new vlan | 514 | * @vid: identifier of the new vlan |
511 | * | 515 | * |
512 | * Set up all the internal structures for handling the new vlan on top of the | 516 | * Set up all the internal structures for handling the new vlan on top of the |
513 | * mesh interface | 517 | * mesh interface |
514 | * | 518 | * |
515 | * Returns 0 on success or a negative error code in case of failure. | 519 | * Returns 0 on success or a negative error code in case of failure. |
516 | */ | 520 | */ |
517 | static int batadv_interface_add_vid(struct net_device *dev, __be16 proto, | 521 | static int batadv_interface_add_vid(struct net_device *dev, __be16 proto, |
518 | unsigned short vid) | 522 | unsigned short vid) |
519 | { | 523 | { |
520 | struct batadv_priv *bat_priv = netdev_priv(dev); | 524 | struct batadv_priv *bat_priv = netdev_priv(dev); |
521 | 525 | ||
522 | /* only 802.1Q vlans are supported. | 526 | /* only 802.1Q vlans are supported. |
523 | * batman-adv does not know how to handle other types | 527 | * batman-adv does not know how to handle other types |
524 | */ | 528 | */ |
525 | if (proto != htons(ETH_P_8021Q)) | 529 | if (proto != htons(ETH_P_8021Q)) |
526 | return -EINVAL; | 530 | return -EINVAL; |
527 | 531 | ||
528 | vid |= BATADV_VLAN_HAS_TAG; | 532 | vid |= BATADV_VLAN_HAS_TAG; |
529 | 533 | ||
530 | return batadv_softif_create_vlan(bat_priv, vid); | 534 | return batadv_softif_create_vlan(bat_priv, vid); |
531 | } | 535 | } |
532 | 536 | ||
533 | /** | 537 | /** |
534 | * batadv_interface_kill_vid - ndo_kill_vid API implementation | 538 | * batadv_interface_kill_vid - ndo_kill_vid API implementation |
535 | * @dev: the netdev of the mesh interface | 539 | * @dev: the netdev of the mesh interface |
536 | * @vid: identifier of the deleted vlan | 540 | * @vid: identifier of the deleted vlan |
537 | * | 541 | * |
538 | * Destroy all the internal structures used to handle the vlan identified by vid | 542 | * Destroy all the internal structures used to handle the vlan identified by vid |
539 | * on top of the mesh interface | 543 | * on top of the mesh interface |
540 | * | 544 | * |
541 | * Returns 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q | 545 | * Returns 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q |
542 | * or -ENOENT if the specified vlan id wasn't registered. | 546 | * or -ENOENT if the specified vlan id wasn't registered. |
543 | */ | 547 | */ |
544 | static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto, | 548 | static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto, |
545 | unsigned short vid) | 549 | unsigned short vid) |
546 | { | 550 | { |
547 | struct batadv_priv *bat_priv = netdev_priv(dev); | 551 | struct batadv_priv *bat_priv = netdev_priv(dev); |
548 | struct batadv_softif_vlan *vlan; | 552 | struct batadv_softif_vlan *vlan; |
549 | 553 | ||
550 | /* only 802.1Q vlans are supported. batman-adv does not know how to | 554 | /* only 802.1Q vlans are supported. batman-adv does not know how to |
551 | * handle other types | 555 | * handle other types |
552 | */ | 556 | */ |
553 | if (proto != htons(ETH_P_8021Q)) | 557 | if (proto != htons(ETH_P_8021Q)) |
554 | return -EINVAL; | 558 | return -EINVAL; |
555 | 559 | ||
556 | vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG); | 560 | vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG); |
557 | if (!vlan) | 561 | if (!vlan) |
558 | return -ENOENT; | 562 | return -ENOENT; |
559 | 563 | ||
560 | batadv_softif_destroy_vlan(bat_priv, vlan); | 564 | batadv_softif_destroy_vlan(bat_priv, vlan); |
561 | 565 | ||
562 | /* finally free the vlan object */ | 566 | /* finally free the vlan object */ |
563 | batadv_softif_vlan_free_ref(vlan); | 567 | batadv_softif_vlan_free_ref(vlan); |
564 | 568 | ||
565 | return 0; | 569 | return 0; |
566 | } | 570 | } |
567 | 571 | ||
568 | /* batman-adv network devices have devices nesting below it and are a special | 572 | /* batman-adv network devices have devices nesting below it and are a special |
569 | * "super class" of normal network devices; split their locks off into a | 573 | * "super class" of normal network devices; split their locks off into a |
570 | * separate class since they always nest. | 574 | * separate class since they always nest. |
571 | */ | 575 | */ |
572 | static struct lock_class_key batadv_netdev_xmit_lock_key; | 576 | static struct lock_class_key batadv_netdev_xmit_lock_key; |
573 | static struct lock_class_key batadv_netdev_addr_lock_key; | 577 | static struct lock_class_key batadv_netdev_addr_lock_key; |
574 | 578 | ||
575 | /** | 579 | /** |
576 | * batadv_set_lockdep_class_one - Set lockdep class for a single tx queue | 580 | * batadv_set_lockdep_class_one - Set lockdep class for a single tx queue |
577 | * @dev: device which owns the tx queue | 581 | * @dev: device which owns the tx queue |
578 | * @txq: tx queue to modify | 582 | * @txq: tx queue to modify |
579 | * @_unused: always NULL | 583 | * @_unused: always NULL |
580 | */ | 584 | */ |
581 | static void batadv_set_lockdep_class_one(struct net_device *dev, | 585 | static void batadv_set_lockdep_class_one(struct net_device *dev, |
582 | struct netdev_queue *txq, | 586 | struct netdev_queue *txq, |
583 | void *_unused) | 587 | void *_unused) |
584 | { | 588 | { |
585 | lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key); | 589 | lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key); |
586 | } | 590 | } |
587 | 591 | ||
588 | /** | 592 | /** |
589 | * batadv_set_lockdep_class - Set txq and addr_list lockdep class | 593 | * batadv_set_lockdep_class - Set txq and addr_list lockdep class |
590 | * @dev: network device to modify | 594 | * @dev: network device to modify |
591 | */ | 595 | */ |
592 | static void batadv_set_lockdep_class(struct net_device *dev) | 596 | static void batadv_set_lockdep_class(struct net_device *dev) |
593 | { | 597 | { |
594 | lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key); | 598 | lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key); |
595 | netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL); | 599 | netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL); |
596 | } | 600 | } |
597 | 601 | ||
598 | /** | 602 | /** |
599 | * batadv_softif_destroy_finish - cleans up the remains of a softif | 603 | * batadv_softif_destroy_finish - cleans up the remains of a softif |
600 | * @work: work queue item | 604 | * @work: work queue item |
601 | * | 605 | * |
602 | * Free the parts of the soft interface which can not be removed under | 606 | * Free the parts of the soft interface which can not be removed under |
603 | * rtnl lock (to prevent deadlock situations). | 607 | * rtnl lock (to prevent deadlock situations). |
604 | */ | 608 | */ |
605 | static void batadv_softif_destroy_finish(struct work_struct *work) | 609 | static void batadv_softif_destroy_finish(struct work_struct *work) |
606 | { | 610 | { |
607 | struct batadv_softif_vlan *vlan; | 611 | struct batadv_softif_vlan *vlan; |
608 | struct batadv_priv *bat_priv; | 612 | struct batadv_priv *bat_priv; |
609 | struct net_device *soft_iface; | 613 | struct net_device *soft_iface; |
610 | 614 | ||
611 | bat_priv = container_of(work, struct batadv_priv, | 615 | bat_priv = container_of(work, struct batadv_priv, |
612 | cleanup_work); | 616 | cleanup_work); |
613 | soft_iface = bat_priv->soft_iface; | 617 | soft_iface = bat_priv->soft_iface; |
614 | 618 | ||
615 | /* destroy the "untagged" VLAN */ | 619 | /* destroy the "untagged" VLAN */ |
616 | vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS); | 620 | vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS); |
617 | if (vlan) { | 621 | if (vlan) { |
618 | batadv_softif_destroy_vlan(bat_priv, vlan); | 622 | batadv_softif_destroy_vlan(bat_priv, vlan); |
619 | batadv_softif_vlan_free_ref(vlan); | 623 | batadv_softif_vlan_free_ref(vlan); |
620 | } | 624 | } |
621 | 625 | ||
622 | batadv_sysfs_del_meshif(soft_iface); | 626 | batadv_sysfs_del_meshif(soft_iface); |
623 | 627 | ||
624 | rtnl_lock(); | 628 | rtnl_lock(); |
625 | unregister_netdevice(soft_iface); | 629 | unregister_netdevice(soft_iface); |
626 | rtnl_unlock(); | 630 | rtnl_unlock(); |
627 | } | 631 | } |
628 | 632 | ||
629 | /** | 633 | /** |
630 | * batadv_softif_init_late - late stage initialization of soft interface | 634 | * batadv_softif_init_late - late stage initialization of soft interface |
631 | * @dev: registered network device to modify | 635 | * @dev: registered network device to modify |
632 | * | 636 | * |
633 | * Returns error code on failures | 637 | * Returns error code on failures |
634 | */ | 638 | */ |
635 | static int batadv_softif_init_late(struct net_device *dev) | 639 | static int batadv_softif_init_late(struct net_device *dev) |
636 | { | 640 | { |
637 | struct batadv_priv *bat_priv; | 641 | struct batadv_priv *bat_priv; |
638 | uint32_t random_seqno; | 642 | uint32_t random_seqno; |
639 | int ret; | 643 | int ret; |
640 | size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM; | 644 | size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM; |
641 | 645 | ||
642 | batadv_set_lockdep_class(dev); | 646 | batadv_set_lockdep_class(dev); |
643 | 647 | ||
644 | bat_priv = netdev_priv(dev); | 648 | bat_priv = netdev_priv(dev); |
645 | bat_priv->soft_iface = dev; | 649 | bat_priv->soft_iface = dev; |
646 | INIT_WORK(&bat_priv->cleanup_work, batadv_softif_destroy_finish); | 650 | INIT_WORK(&bat_priv->cleanup_work, batadv_softif_destroy_finish); |
647 | 651 | ||
648 | /* batadv_interface_stats() needs to be available as soon as | 652 | /* batadv_interface_stats() needs to be available as soon as |
649 | * register_netdevice() has been called | 653 | * register_netdevice() has been called |
650 | */ | 654 | */ |
651 | bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t)); | 655 | bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t)); |
652 | if (!bat_priv->bat_counters) | 656 | if (!bat_priv->bat_counters) |
653 | return -ENOMEM; | 657 | return -ENOMEM; |
654 | 658 | ||
655 | atomic_set(&bat_priv->aggregated_ogms, 1); | 659 | atomic_set(&bat_priv->aggregated_ogms, 1); |
656 | atomic_set(&bat_priv->bonding, 0); | 660 | atomic_set(&bat_priv->bonding, 0); |
657 | #ifdef CONFIG_BATMAN_ADV_BLA | 661 | #ifdef CONFIG_BATMAN_ADV_BLA |
658 | atomic_set(&bat_priv->bridge_loop_avoidance, 0); | 662 | atomic_set(&bat_priv->bridge_loop_avoidance, 0); |
659 | #endif | 663 | #endif |
660 | #ifdef CONFIG_BATMAN_ADV_DAT | 664 | #ifdef CONFIG_BATMAN_ADV_DAT |
661 | atomic_set(&bat_priv->distributed_arp_table, 1); | 665 | atomic_set(&bat_priv->distributed_arp_table, 1); |
662 | #endif | 666 | #endif |
663 | atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF); | 667 | atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF); |
664 | atomic_set(&bat_priv->gw_sel_class, 20); | 668 | atomic_set(&bat_priv->gw_sel_class, 20); |
665 | atomic_set(&bat_priv->gw.bandwidth_down, 100); | 669 | atomic_set(&bat_priv->gw.bandwidth_down, 100); |
666 | atomic_set(&bat_priv->gw.bandwidth_up, 20); | 670 | atomic_set(&bat_priv->gw.bandwidth_up, 20); |
667 | atomic_set(&bat_priv->orig_interval, 1000); | 671 | atomic_set(&bat_priv->orig_interval, 1000); |
668 | atomic_set(&bat_priv->hop_penalty, 30); | 672 | atomic_set(&bat_priv->hop_penalty, 30); |
669 | #ifdef CONFIG_BATMAN_ADV_DEBUG | 673 | #ifdef CONFIG_BATMAN_ADV_DEBUG |
670 | atomic_set(&bat_priv->log_level, 0); | 674 | atomic_set(&bat_priv->log_level, 0); |
671 | #endif | 675 | #endif |
672 | atomic_set(&bat_priv->fragmentation, 1); | 676 | atomic_set(&bat_priv->fragmentation, 1); |
673 | atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN); | 677 | atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN); |
674 | atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN); | 678 | atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN); |
675 | 679 | ||
676 | atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); | 680 | atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); |
677 | atomic_set(&bat_priv->bcast_seqno, 1); | 681 | atomic_set(&bat_priv->bcast_seqno, 1); |
678 | atomic_set(&bat_priv->tt.vn, 0); | 682 | atomic_set(&bat_priv->tt.vn, 0); |
679 | atomic_set(&bat_priv->tt.local_changes, 0); | 683 | atomic_set(&bat_priv->tt.local_changes, 0); |
680 | atomic_set(&bat_priv->tt.ogm_append_cnt, 0); | 684 | atomic_set(&bat_priv->tt.ogm_append_cnt, 0); |
681 | #ifdef CONFIG_BATMAN_ADV_BLA | 685 | #ifdef CONFIG_BATMAN_ADV_BLA |
682 | atomic_set(&bat_priv->bla.num_requests, 0); | 686 | atomic_set(&bat_priv->bla.num_requests, 0); |
683 | #endif | 687 | #endif |
684 | bat_priv->tt.last_changeset = NULL; | 688 | bat_priv->tt.last_changeset = NULL; |
685 | bat_priv->tt.last_changeset_len = 0; | 689 | bat_priv->tt.last_changeset_len = 0; |
686 | 690 | ||
687 | /* randomize initial seqno to avoid collision */ | 691 | /* randomize initial seqno to avoid collision */ |
688 | get_random_bytes(&random_seqno, sizeof(random_seqno)); | 692 | get_random_bytes(&random_seqno, sizeof(random_seqno)); |
689 | atomic_set(&bat_priv->frag_seqno, random_seqno); | 693 | atomic_set(&bat_priv->frag_seqno, random_seqno); |
690 | 694 | ||
691 | bat_priv->primary_if = NULL; | 695 | bat_priv->primary_if = NULL; |
692 | bat_priv->num_ifaces = 0; | 696 | bat_priv->num_ifaces = 0; |
693 | 697 | ||
694 | batadv_nc_init_bat_priv(bat_priv); | 698 | batadv_nc_init_bat_priv(bat_priv); |
695 | 699 | ||
696 | ret = batadv_algo_select(bat_priv, batadv_routing_algo); | 700 | ret = batadv_algo_select(bat_priv, batadv_routing_algo); |
697 | if (ret < 0) | 701 | if (ret < 0) |
698 | goto free_bat_counters; | 702 | goto free_bat_counters; |
699 | 703 | ||
700 | ret = batadv_debugfs_add_meshif(dev); | 704 | ret = batadv_debugfs_add_meshif(dev); |
701 | if (ret < 0) | 705 | if (ret < 0) |
702 | goto free_bat_counters; | 706 | goto free_bat_counters; |
703 | 707 | ||
704 | ret = batadv_mesh_init(dev); | 708 | ret = batadv_mesh_init(dev); |
705 | if (ret < 0) | 709 | if (ret < 0) |
706 | goto unreg_debugfs; | 710 | goto unreg_debugfs; |
707 | 711 | ||
708 | return 0; | 712 | return 0; |
709 | 713 | ||
710 | unreg_debugfs: | 714 | unreg_debugfs: |
711 | batadv_debugfs_del_meshif(dev); | 715 | batadv_debugfs_del_meshif(dev); |
712 | free_bat_counters: | 716 | free_bat_counters: |
713 | free_percpu(bat_priv->bat_counters); | 717 | free_percpu(bat_priv->bat_counters); |
714 | bat_priv->bat_counters = NULL; | 718 | bat_priv->bat_counters = NULL; |
715 | 719 | ||
716 | return ret; | 720 | return ret; |
717 | } | 721 | } |
718 | 722 | ||
719 | /** | 723 | /** |
720 | * batadv_softif_slave_add - Add a slave interface to a batadv_soft_interface | 724 | * batadv_softif_slave_add - Add a slave interface to a batadv_soft_interface |
721 | * @dev: batadv_soft_interface used as master interface | 725 | * @dev: batadv_soft_interface used as master interface |
722 | * @slave_dev: net_device which should become the slave interface | 726 | * @slave_dev: net_device which should become the slave interface |
723 | * | 727 | * |
724 | * Return 0 if successful or error otherwise. | 728 | * Return 0 if successful or error otherwise. |
725 | */ | 729 | */ |
726 | static int batadv_softif_slave_add(struct net_device *dev, | 730 | static int batadv_softif_slave_add(struct net_device *dev, |
727 | struct net_device *slave_dev) | 731 | struct net_device *slave_dev) |
728 | { | 732 | { |
729 | struct batadv_hard_iface *hard_iface; | 733 | struct batadv_hard_iface *hard_iface; |
730 | int ret = -EINVAL; | 734 | int ret = -EINVAL; |
731 | 735 | ||
732 | hard_iface = batadv_hardif_get_by_netdev(slave_dev); | 736 | hard_iface = batadv_hardif_get_by_netdev(slave_dev); |
733 | if (!hard_iface || hard_iface->soft_iface != NULL) | 737 | if (!hard_iface || hard_iface->soft_iface != NULL) |
734 | goto out; | 738 | goto out; |
735 | 739 | ||
736 | ret = batadv_hardif_enable_interface(hard_iface, dev->name); | 740 | ret = batadv_hardif_enable_interface(hard_iface, dev->name); |
737 | 741 | ||
738 | out: | 742 | out: |
739 | if (hard_iface) | 743 | if (hard_iface) |
740 | batadv_hardif_free_ref(hard_iface); | 744 | batadv_hardif_free_ref(hard_iface); |
741 | return ret; | 745 | return ret; |
742 | } | 746 | } |
743 | 747 | ||
744 | /** | 748 | /** |
745 | * batadv_softif_slave_del - Delete a slave iface from a batadv_soft_interface | 749 | * batadv_softif_slave_del - Delete a slave iface from a batadv_soft_interface |
746 | * @dev: batadv_soft_interface used as master interface | 750 | * @dev: batadv_soft_interface used as master interface |
747 | * @slave_dev: net_device which should be removed from the master interface | 751 | * @slave_dev: net_device which should be removed from the master interface |
748 | * | 752 | * |
749 | * Return 0 if successful or error otherwise. | 753 | * Return 0 if successful or error otherwise. |
750 | */ | 754 | */ |
751 | static int batadv_softif_slave_del(struct net_device *dev, | 755 | static int batadv_softif_slave_del(struct net_device *dev, |
752 | struct net_device *slave_dev) | 756 | struct net_device *slave_dev) |
753 | { | 757 | { |
754 | struct batadv_hard_iface *hard_iface; | 758 | struct batadv_hard_iface *hard_iface; |
755 | int ret = -EINVAL; | 759 | int ret = -EINVAL; |
756 | 760 | ||
757 | hard_iface = batadv_hardif_get_by_netdev(slave_dev); | 761 | hard_iface = batadv_hardif_get_by_netdev(slave_dev); |
758 | 762 | ||
759 | if (!hard_iface || hard_iface->soft_iface != dev) | 763 | if (!hard_iface || hard_iface->soft_iface != dev) |
760 | goto out; | 764 | goto out; |
761 | 765 | ||
762 | batadv_hardif_disable_interface(hard_iface, BATADV_IF_CLEANUP_KEEP); | 766 | batadv_hardif_disable_interface(hard_iface, BATADV_IF_CLEANUP_KEEP); |
763 | ret = 0; | 767 | ret = 0; |
764 | 768 | ||
765 | out: | 769 | out: |
766 | if (hard_iface) | 770 | if (hard_iface) |
767 | batadv_hardif_free_ref(hard_iface); | 771 | batadv_hardif_free_ref(hard_iface); |
768 | return ret; | 772 | return ret; |
769 | } | 773 | } |
770 | 774 | ||
771 | static const struct net_device_ops batadv_netdev_ops = { | 775 | static const struct net_device_ops batadv_netdev_ops = { |
772 | .ndo_init = batadv_softif_init_late, | 776 | .ndo_init = batadv_softif_init_late, |
773 | .ndo_open = batadv_interface_open, | 777 | .ndo_open = batadv_interface_open, |
774 | .ndo_stop = batadv_interface_release, | 778 | .ndo_stop = batadv_interface_release, |
775 | .ndo_get_stats = batadv_interface_stats, | 779 | .ndo_get_stats = batadv_interface_stats, |
776 | .ndo_vlan_rx_add_vid = batadv_interface_add_vid, | 780 | .ndo_vlan_rx_add_vid = batadv_interface_add_vid, |
777 | .ndo_vlan_rx_kill_vid = batadv_interface_kill_vid, | 781 | .ndo_vlan_rx_kill_vid = batadv_interface_kill_vid, |
778 | .ndo_set_mac_address = batadv_interface_set_mac_addr, | 782 | .ndo_set_mac_address = batadv_interface_set_mac_addr, |
779 | .ndo_change_mtu = batadv_interface_change_mtu, | 783 | .ndo_change_mtu = batadv_interface_change_mtu, |
780 | .ndo_set_rx_mode = batadv_interface_set_rx_mode, | 784 | .ndo_set_rx_mode = batadv_interface_set_rx_mode, |
781 | .ndo_start_xmit = batadv_interface_tx, | 785 | .ndo_start_xmit = batadv_interface_tx, |
782 | .ndo_validate_addr = eth_validate_addr, | 786 | .ndo_validate_addr = eth_validate_addr, |
783 | .ndo_add_slave = batadv_softif_slave_add, | 787 | .ndo_add_slave = batadv_softif_slave_add, |
784 | .ndo_del_slave = batadv_softif_slave_del, | 788 | .ndo_del_slave = batadv_softif_slave_del, |
785 | }; | 789 | }; |
786 | 790 | ||
787 | /** | 791 | /** |
788 | * batadv_softif_free - Deconstructor of batadv_soft_interface | 792 | * batadv_softif_free - Deconstructor of batadv_soft_interface |
789 | * @dev: Device to cleanup and remove | 793 | * @dev: Device to cleanup and remove |
790 | */ | 794 | */ |
791 | static void batadv_softif_free(struct net_device *dev) | 795 | static void batadv_softif_free(struct net_device *dev) |
792 | { | 796 | { |
793 | batadv_debugfs_del_meshif(dev); | 797 | batadv_debugfs_del_meshif(dev); |
794 | batadv_mesh_free(dev); | 798 | batadv_mesh_free(dev); |
795 | 799 | ||
796 | /* some scheduled RCU callbacks need the bat_priv struct to accomplish | 800 | /* some scheduled RCU callbacks need the bat_priv struct to accomplish |
797 | * their tasks. Wait for them all to be finished before freeing the | 801 | * their tasks. Wait for them all to be finished before freeing the |
798 | * netdev and its private data (bat_priv) | 802 | * netdev and its private data (bat_priv) |
799 | */ | 803 | */ |
800 | rcu_barrier(); | 804 | rcu_barrier(); |
801 | 805 | ||
802 | free_netdev(dev); | 806 | free_netdev(dev); |
803 | } | 807 | } |
804 | 808 | ||
805 | /** | 809 | /** |
806 | * batadv_softif_init_early - early stage initialization of soft interface | 810 | * batadv_softif_init_early - early stage initialization of soft interface |
807 | * @dev: registered network device to modify | 811 | * @dev: registered network device to modify |
808 | */ | 812 | */ |
809 | static void batadv_softif_init_early(struct net_device *dev) | 813 | static void batadv_softif_init_early(struct net_device *dev) |
810 | { | 814 | { |
811 | struct batadv_priv *priv = netdev_priv(dev); | 815 | struct batadv_priv *priv = netdev_priv(dev); |
812 | 816 | ||
813 | ether_setup(dev); | 817 | ether_setup(dev); |
814 | 818 | ||
815 | dev->netdev_ops = &batadv_netdev_ops; | 819 | dev->netdev_ops = &batadv_netdev_ops; |
816 | dev->destructor = batadv_softif_free; | 820 | dev->destructor = batadv_softif_free; |
817 | dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; | 821 | dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
818 | dev->tx_queue_len = 0; | 822 | dev->tx_queue_len = 0; |
819 | 823 | ||
820 | /* can't call min_mtu, because the needed variables | 824 | /* can't call min_mtu, because the needed variables |
821 | * have not been initialized yet | 825 | * have not been initialized yet |
822 | */ | 826 | */ |
823 | dev->mtu = ETH_DATA_LEN; | 827 | dev->mtu = ETH_DATA_LEN; |
824 | /* reserve more space in the skbuff for our header */ | 828 | /* reserve more space in the skbuff for our header */ |
825 | dev->hard_header_len = batadv_max_header_len(); | 829 | dev->hard_header_len = batadv_max_header_len(); |
826 | 830 | ||
827 | /* generate random address */ | 831 | /* generate random address */ |
828 | eth_hw_addr_random(dev); | 832 | eth_hw_addr_random(dev); |
829 | 833 | ||
830 | SET_ETHTOOL_OPS(dev, &batadv_ethtool_ops); | 834 | SET_ETHTOOL_OPS(dev, &batadv_ethtool_ops); |
831 | 835 | ||
832 | memset(priv, 0, sizeof(*priv)); | 836 | memset(priv, 0, sizeof(*priv)); |
833 | } | 837 | } |
834 | 838 | ||
835 | struct net_device *batadv_softif_create(const char *name) | 839 | struct net_device *batadv_softif_create(const char *name) |
836 | { | 840 | { |
837 | struct net_device *soft_iface; | 841 | struct net_device *soft_iface; |
838 | int ret; | 842 | int ret; |
839 | 843 | ||
840 | soft_iface = alloc_netdev(sizeof(struct batadv_priv), name, | 844 | soft_iface = alloc_netdev(sizeof(struct batadv_priv), name, |
841 | batadv_softif_init_early); | 845 | batadv_softif_init_early); |
842 | if (!soft_iface) | 846 | if (!soft_iface) |
843 | return NULL; | 847 | return NULL; |
844 | 848 | ||
845 | soft_iface->rtnl_link_ops = &batadv_link_ops; | 849 | soft_iface->rtnl_link_ops = &batadv_link_ops; |
846 | 850 | ||
847 | ret = register_netdevice(soft_iface); | 851 | ret = register_netdevice(soft_iface); |
848 | if (ret < 0) { | 852 | if (ret < 0) { |
849 | pr_err("Unable to register the batman interface '%s': %i\n", | 853 | pr_err("Unable to register the batman interface '%s': %i\n", |
850 | name, ret); | 854 | name, ret); |
851 | free_netdev(soft_iface); | 855 | free_netdev(soft_iface); |
852 | return NULL; | 856 | return NULL; |
853 | } | 857 | } |
854 | 858 | ||
855 | return soft_iface; | 859 | return soft_iface; |
856 | } | 860 | } |
857 | 861 | ||
858 | /** | 862 | /** |
859 | * batadv_softif_destroy_sysfs - deletion of batadv_soft_interface via sysfs | 863 | * batadv_softif_destroy_sysfs - deletion of batadv_soft_interface via sysfs |
860 | * @soft_iface: the to-be-removed batman-adv interface | 864 | * @soft_iface: the to-be-removed batman-adv interface |
861 | */ | 865 | */ |
862 | void batadv_softif_destroy_sysfs(struct net_device *soft_iface) | 866 | void batadv_softif_destroy_sysfs(struct net_device *soft_iface) |
863 | { | 867 | { |
864 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); | 868 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); |
865 | 869 | ||
866 | queue_work(batadv_event_workqueue, &bat_priv->cleanup_work); | 870 | queue_work(batadv_event_workqueue, &bat_priv->cleanup_work); |
867 | } | 871 | } |
868 | 872 | ||
869 | /** | 873 | /** |
870 | * batadv_softif_destroy_netlink - deletion of batadv_soft_interface via netlink | 874 | * batadv_softif_destroy_netlink - deletion of batadv_soft_interface via netlink |
871 | * @soft_iface: the to-be-removed batman-adv interface | 875 | * @soft_iface: the to-be-removed batman-adv interface |
872 | * @head: list pointer | 876 | * @head: list pointer |
873 | */ | 877 | */ |
874 | static void batadv_softif_destroy_netlink(struct net_device *soft_iface, | 878 | static void batadv_softif_destroy_netlink(struct net_device *soft_iface, |
875 | struct list_head *head) | 879 | struct list_head *head) |
876 | { | 880 | { |
877 | struct batadv_hard_iface *hard_iface; | 881 | struct batadv_hard_iface *hard_iface; |
878 | 882 | ||
879 | list_for_each_entry(hard_iface, &batadv_hardif_list, list) { | 883 | list_for_each_entry(hard_iface, &batadv_hardif_list, list) { |
880 | if (hard_iface->soft_iface == soft_iface) | 884 | if (hard_iface->soft_iface == soft_iface) |
881 | batadv_hardif_disable_interface(hard_iface, | 885 | batadv_hardif_disable_interface(hard_iface, |
882 | BATADV_IF_CLEANUP_KEEP); | 886 | BATADV_IF_CLEANUP_KEEP); |
883 | } | 887 | } |
884 | 888 | ||
885 | batadv_sysfs_del_meshif(soft_iface); | 889 | batadv_sysfs_del_meshif(soft_iface); |
886 | unregister_netdevice_queue(soft_iface, head); | 890 | unregister_netdevice_queue(soft_iface, head); |
887 | } | 891 | } |
888 | 892 | ||
889 | int batadv_softif_is_valid(const struct net_device *net_dev) | 893 | int batadv_softif_is_valid(const struct net_device *net_dev) |
890 | { | 894 | { |
891 | if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx) | 895 | if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx) |
892 | return 1; | 896 | return 1; |
893 | 897 | ||
894 | return 0; | 898 | return 0; |
895 | } | 899 | } |
896 | 900 | ||
897 | struct rtnl_link_ops batadv_link_ops __read_mostly = { | 901 | struct rtnl_link_ops batadv_link_ops __read_mostly = { |
898 | .kind = "batadv", | 902 | .kind = "batadv", |
899 | .priv_size = sizeof(struct batadv_priv), | 903 | .priv_size = sizeof(struct batadv_priv), |
900 | .setup = batadv_softif_init_early, | 904 | .setup = batadv_softif_init_early, |
901 | .dellink = batadv_softif_destroy_netlink, | 905 | .dellink = batadv_softif_destroy_netlink, |
902 | }; | 906 | }; |
903 | 907 | ||
904 | /* ethtool */ | 908 | /* ethtool */ |
905 | static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 909 | static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
906 | { | 910 | { |
907 | cmd->supported = 0; | 911 | cmd->supported = 0; |
908 | cmd->advertising = 0; | 912 | cmd->advertising = 0; |
909 | ethtool_cmd_speed_set(cmd, SPEED_10); | 913 | ethtool_cmd_speed_set(cmd, SPEED_10); |
910 | cmd->duplex = DUPLEX_FULL; | 914 | cmd->duplex = DUPLEX_FULL; |
911 | cmd->port = PORT_TP; | 915 | cmd->port = PORT_TP; |
912 | cmd->phy_address = 0; | 916 | cmd->phy_address = 0; |
913 | cmd->transceiver = XCVR_INTERNAL; | 917 | cmd->transceiver = XCVR_INTERNAL; |
914 | cmd->autoneg = AUTONEG_DISABLE; | 918 | cmd->autoneg = AUTONEG_DISABLE; |
915 | cmd->maxtxpkt = 0; | 919 | cmd->maxtxpkt = 0; |
916 | cmd->maxrxpkt = 0; | 920 | cmd->maxrxpkt = 0; |
917 | 921 | ||
918 | return 0; | 922 | return 0; |
919 | } | 923 | } |
920 | 924 | ||
921 | static void batadv_get_drvinfo(struct net_device *dev, | 925 | static void batadv_get_drvinfo(struct net_device *dev, |
922 | struct ethtool_drvinfo *info) | 926 | struct ethtool_drvinfo *info) |
923 | { | 927 | { |
924 | strlcpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver)); | 928 | strlcpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver)); |
925 | strlcpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version)); | 929 | strlcpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version)); |
926 | strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); | 930 | strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); |
927 | strlcpy(info->bus_info, "batman", sizeof(info->bus_info)); | 931 | strlcpy(info->bus_info, "batman", sizeof(info->bus_info)); |
928 | } | 932 | } |
929 | 933 | ||
930 | static u32 batadv_get_msglevel(struct net_device *dev) | 934 | static u32 batadv_get_msglevel(struct net_device *dev) |
931 | { | 935 | { |
932 | return -EOPNOTSUPP; | 936 | return -EOPNOTSUPP; |
933 | } | 937 | } |
934 | 938 | ||
935 | static void batadv_set_msglevel(struct net_device *dev, u32 value) | 939 | static void batadv_set_msglevel(struct net_device *dev, u32 value) |
936 | { | 940 | { |
937 | } | 941 | } |
938 | 942 | ||
939 | static u32 batadv_get_link(struct net_device *dev) | 943 | static u32 batadv_get_link(struct net_device *dev) |
940 | { | 944 | { |
941 | return 1; | 945 | return 1; |
942 | } | 946 | } |
943 | 947 | ||
944 | /* Inspired by drivers/net/ethernet/dlink/sundance.c:1702 | 948 | /* Inspired by drivers/net/ethernet/dlink/sundance.c:1702 |
945 | * Declare each description string in struct.name[] to get fixed sized buffer | 949 | * Declare each description string in struct.name[] to get fixed sized buffer |
946 | * and compile time checking for strings longer than ETH_GSTRING_LEN. | 950 | * and compile time checking for strings longer than ETH_GSTRING_LEN. |
947 | */ | 951 | */ |
948 | static const struct { | 952 | static const struct { |
949 | const char name[ETH_GSTRING_LEN]; | 953 | const char name[ETH_GSTRING_LEN]; |
950 | } batadv_counters_strings[] = { | 954 | } batadv_counters_strings[] = { |
951 | { "tx" }, | 955 | { "tx" }, |
952 | { "tx_bytes" }, | 956 | { "tx_bytes" }, |
953 | { "tx_dropped" }, | 957 | { "tx_dropped" }, |
954 | { "rx" }, | 958 | { "rx" }, |
955 | { "rx_bytes" }, | 959 | { "rx_bytes" }, |
956 | { "forward" }, | 960 | { "forward" }, |
957 | { "forward_bytes" }, | 961 | { "forward_bytes" }, |
958 | { "mgmt_tx" }, | 962 | { "mgmt_tx" }, |
959 | { "mgmt_tx_bytes" }, | 963 | { "mgmt_tx_bytes" }, |
960 | { "mgmt_rx" }, | 964 | { "mgmt_rx" }, |
961 | { "mgmt_rx_bytes" }, | 965 | { "mgmt_rx_bytes" }, |
962 | { "frag_tx" }, | 966 | { "frag_tx" }, |
963 | { "frag_tx_bytes" }, | 967 | { "frag_tx_bytes" }, |
964 | { "frag_rx" }, | 968 | { "frag_rx" }, |
965 | { "frag_rx_bytes" }, | 969 | { "frag_rx_bytes" }, |
966 | { "frag_fwd" }, | 970 | { "frag_fwd" }, |
967 | { "frag_fwd_bytes" }, | 971 | { "frag_fwd_bytes" }, |
968 | { "tt_request_tx" }, | 972 | { "tt_request_tx" }, |
969 | { "tt_request_rx" }, | 973 | { "tt_request_rx" }, |
970 | { "tt_response_tx" }, | 974 | { "tt_response_tx" }, |
971 | { "tt_response_rx" }, | 975 | { "tt_response_rx" }, |
972 | { "tt_roam_adv_tx" }, | 976 | { "tt_roam_adv_tx" }, |
973 | { "tt_roam_adv_rx" }, | 977 | { "tt_roam_adv_rx" }, |
974 | #ifdef CONFIG_BATMAN_ADV_DAT | 978 | #ifdef CONFIG_BATMAN_ADV_DAT |
975 | { "dat_get_tx" }, | 979 | { "dat_get_tx" }, |
976 | { "dat_get_rx" }, | 980 | { "dat_get_rx" }, |
977 | { "dat_put_tx" }, | 981 | { "dat_put_tx" }, |
978 | { "dat_put_rx" }, | 982 | { "dat_put_rx" }, |
979 | { "dat_cached_reply_tx" }, | 983 | { "dat_cached_reply_tx" }, |
980 | #endif | 984 | #endif |
981 | #ifdef CONFIG_BATMAN_ADV_NC | 985 | #ifdef CONFIG_BATMAN_ADV_NC |
982 | { "nc_code" }, | 986 | { "nc_code" }, |
983 | { "nc_code_bytes" }, | 987 | { "nc_code_bytes" }, |
984 | { "nc_recode" }, | 988 | { "nc_recode" }, |
985 | { "nc_recode_bytes" }, | 989 | { "nc_recode_bytes" }, |
986 | { "nc_buffer" }, | 990 | { "nc_buffer" }, |
987 | { "nc_decode" }, | 991 | { "nc_decode" }, |
988 | { "nc_decode_bytes" }, | 992 | { "nc_decode_bytes" }, |
989 | { "nc_decode_failed" }, | 993 | { "nc_decode_failed" }, |
990 | { "nc_sniffed" }, | 994 | { "nc_sniffed" }, |
991 | #endif | 995 | #endif |
992 | }; | 996 | }; |
993 | 997 | ||
994 | static void batadv_get_strings(struct net_device *dev, uint32_t stringset, | 998 | static void batadv_get_strings(struct net_device *dev, uint32_t stringset, |
995 | uint8_t *data) | 999 | uint8_t *data) |
996 | { | 1000 | { |
997 | if (stringset == ETH_SS_STATS) | 1001 | if (stringset == ETH_SS_STATS) |
998 | memcpy(data, batadv_counters_strings, | 1002 | memcpy(data, batadv_counters_strings, |
999 | sizeof(batadv_counters_strings)); | 1003 | sizeof(batadv_counters_strings)); |
1000 | } | 1004 | } |
1001 | 1005 | ||
1002 | static void batadv_get_ethtool_stats(struct net_device *dev, | 1006 | static void batadv_get_ethtool_stats(struct net_device *dev, |
1003 | struct ethtool_stats *stats, | 1007 | struct ethtool_stats *stats, |
1004 | uint64_t *data) | 1008 | uint64_t *data) |
1005 | { | 1009 | { |
1006 | struct batadv_priv *bat_priv = netdev_priv(dev); | 1010 | struct batadv_priv *bat_priv = netdev_priv(dev); |
1007 | int i; | 1011 | int i; |
1008 | 1012 | ||
1009 | for (i = 0; i < BATADV_CNT_NUM; i++) | 1013 | for (i = 0; i < BATADV_CNT_NUM; i++) |
1010 | data[i] = batadv_sum_counter(bat_priv, i); | 1014 | data[i] = batadv_sum_counter(bat_priv, i); |
1011 | } | 1015 | } |
1012 | 1016 | ||
1013 | static int batadv_get_sset_count(struct net_device *dev, int stringset) | 1017 | static int batadv_get_sset_count(struct net_device *dev, int stringset) |
1014 | { | 1018 | { |
1015 | if (stringset == ETH_SS_STATS) | 1019 | if (stringset == ETH_SS_STATS) |
1016 | return BATADV_CNT_NUM; | 1020 | return BATADV_CNT_NUM; |
1017 | 1021 | ||
1018 | return -EOPNOTSUPP; | 1022 | return -EOPNOTSUPP; |
1019 | } | 1023 | } |
1020 | 1024 |