Commit 6b86bd62a505a4a9739474f00f8088395b7a80ba
Committed by
John W. Linville
1 parent
85a9994a0a
Exists in
master
and in
7 other branches
mac80211: mesh: move some code to make it static
There's no need to have table functions in one file and all users in another, move the functions to the right file and make them static. Also move a static variable to the beginning of the file to make it easier to find. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Showing 3 changed files with 53 additions and 54 deletions Inline Diff
net/mac80211/mesh.c
1 | /* | 1 | /* |
2 | * Copyright (c) 2008, 2009 open80211s Ltd. | 2 | * Copyright (c) 2008, 2009 open80211s Ltd. |
3 | * Authors: Luis Carlos Cobo <luisca@cozybit.com> | 3 | * Authors: Luis Carlos Cobo <luisca@cozybit.com> |
4 | * Javier Cardona <javier@cozybit.com> | 4 | * Javier Cardona <javier@cozybit.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <asm/unaligned.h> | 12 | #include <asm/unaligned.h> |
13 | #include "ieee80211_i.h" | 13 | #include "ieee80211_i.h" |
14 | #include "mesh.h" | 14 | #include "mesh.h" |
15 | 15 | ||
16 | #define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) | 16 | #define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) |
17 | #define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) | 17 | #define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) |
18 | #define IEEE80211_MESH_RANN_INTERVAL (1 * HZ) | 18 | #define IEEE80211_MESH_RANN_INTERVAL (1 * HZ) |
19 | 19 | ||
20 | #define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01 | 20 | #define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01 |
21 | #define MESHCONF_CAPAB_FORWARDING 0x08 | 21 | #define MESHCONF_CAPAB_FORWARDING 0x08 |
22 | 22 | ||
23 | #define TMR_RUNNING_HK 0 | 23 | #define TMR_RUNNING_HK 0 |
24 | #define TMR_RUNNING_MP 1 | 24 | #define TMR_RUNNING_MP 1 |
25 | #define TMR_RUNNING_MPR 2 | 25 | #define TMR_RUNNING_MPR 2 |
26 | 26 | ||
27 | int mesh_allocated; | 27 | int mesh_allocated; |
28 | static struct kmem_cache *rm_cache; | 28 | static struct kmem_cache *rm_cache; |
29 | 29 | ||
30 | void ieee80211s_init(void) | 30 | void ieee80211s_init(void) |
31 | { | 31 | { |
32 | mesh_pathtbl_init(); | 32 | mesh_pathtbl_init(); |
33 | mesh_allocated = 1; | 33 | mesh_allocated = 1; |
34 | rm_cache = kmem_cache_create("mesh_rmc", sizeof(struct rmc_entry), | 34 | rm_cache = kmem_cache_create("mesh_rmc", sizeof(struct rmc_entry), |
35 | 0, 0, NULL); | 35 | 0, 0, NULL); |
36 | } | 36 | } |
37 | 37 | ||
38 | void ieee80211s_stop(void) | 38 | void ieee80211s_stop(void) |
39 | { | 39 | { |
40 | mesh_pathtbl_unregister(); | 40 | mesh_pathtbl_unregister(); |
41 | kmem_cache_destroy(rm_cache); | 41 | kmem_cache_destroy(rm_cache); |
42 | } | 42 | } |
43 | 43 | ||
44 | static void ieee80211_mesh_housekeeping_timer(unsigned long data) | 44 | static void ieee80211_mesh_housekeeping_timer(unsigned long data) |
45 | { | 45 | { |
46 | struct ieee80211_sub_if_data *sdata = (void *) data; | 46 | struct ieee80211_sub_if_data *sdata = (void *) data; |
47 | struct ieee80211_local *local = sdata->local; | 47 | struct ieee80211_local *local = sdata->local; |
48 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 48 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
49 | 49 | ||
50 | set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); | 50 | set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); |
51 | 51 | ||
52 | if (local->quiescing) { | 52 | if (local->quiescing) { |
53 | set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); | 53 | set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); |
54 | return; | 54 | return; |
55 | } | 55 | } |
56 | 56 | ||
57 | ieee80211_queue_work(&local->hw, &sdata->work); | 57 | ieee80211_queue_work(&local->hw, &sdata->work); |
58 | } | 58 | } |
59 | 59 | ||
60 | /** | 60 | /** |
61 | * mesh_matches_local - check if the config of a mesh point matches ours | 61 | * mesh_matches_local - check if the config of a mesh point matches ours |
62 | * | 62 | * |
63 | * @ie: information elements of a management frame from the mesh peer | 63 | * @ie: information elements of a management frame from the mesh peer |
64 | * @sdata: local mesh subif | 64 | * @sdata: local mesh subif |
65 | * | 65 | * |
66 | * This function checks if the mesh configuration of a mesh point matches the | 66 | * This function checks if the mesh configuration of a mesh point matches the |
67 | * local mesh configuration, i.e. if both nodes belong to the same mesh network. | 67 | * local mesh configuration, i.e. if both nodes belong to the same mesh network. |
68 | */ | 68 | */ |
69 | bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata) | 69 | bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata) |
70 | { | 70 | { |
71 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 71 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * As support for each feature is added, check for matching | 74 | * As support for each feature is added, check for matching |
75 | * - On mesh config capabilities | 75 | * - On mesh config capabilities |
76 | * - Power Save Support En | 76 | * - Power Save Support En |
77 | * - Sync support enabled | 77 | * - Sync support enabled |
78 | * - Sync support active | 78 | * - Sync support active |
79 | * - Sync support required from peer | 79 | * - Sync support required from peer |
80 | * - MDA enabled | 80 | * - MDA enabled |
81 | * - Power management control on fc | 81 | * - Power management control on fc |
82 | */ | 82 | */ |
83 | if (ifmsh->mesh_id_len == ie->mesh_id_len && | 83 | if (ifmsh->mesh_id_len == ie->mesh_id_len && |
84 | memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && | 84 | memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && |
85 | (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) && | 85 | (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) && |
86 | (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) && | 86 | (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) && |
87 | (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) && | 87 | (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) && |
88 | (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) && | 88 | (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) && |
89 | (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)) | 89 | (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)) |
90 | return true; | 90 | return true; |
91 | 91 | ||
92 | return false; | 92 | return false; |
93 | } | 93 | } |
94 | 94 | ||
95 | /** | 95 | /** |
96 | * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links | 96 | * mesh_peer_accepts_plinks - check if an mp is willing to establish peer links |
97 | * | 97 | * |
98 | * @ie: information elements of a management frame from the mesh peer | 98 | * @ie: information elements of a management frame from the mesh peer |
99 | */ | 99 | */ |
100 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) | 100 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) |
101 | { | 101 | { |
102 | return (ie->mesh_config->meshconf_cap & | 102 | return (ie->mesh_config->meshconf_cap & |
103 | MESHCONF_CAPAB_ACCEPT_PLINKS) != 0; | 103 | MESHCONF_CAPAB_ACCEPT_PLINKS) != 0; |
104 | } | 104 | } |
105 | 105 | ||
106 | /** | 106 | /** |
107 | * mesh_accept_plinks_update: update accepting_plink in local mesh beacons | 107 | * mesh_accept_plinks_update: update accepting_plink in local mesh beacons |
108 | * | 108 | * |
109 | * @sdata: mesh interface in which mesh beacons are going to be updated | 109 | * @sdata: mesh interface in which mesh beacons are going to be updated |
110 | */ | 110 | */ |
111 | void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata) | 111 | void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata) |
112 | { | 112 | { |
113 | bool free_plinks; | 113 | bool free_plinks; |
114 | 114 | ||
115 | /* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0, | 115 | /* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0, |
116 | * the mesh interface might be able to establish plinks with peers that | 116 | * the mesh interface might be able to establish plinks with peers that |
117 | * are already on the table but are not on PLINK_ESTAB state. However, | 117 | * are already on the table but are not on PLINK_ESTAB state. However, |
118 | * in general the mesh interface is not accepting peer link requests | 118 | * in general the mesh interface is not accepting peer link requests |
119 | * from new peers, and that must be reflected in the beacon | 119 | * from new peers, and that must be reflected in the beacon |
120 | */ | 120 | */ |
121 | free_plinks = mesh_plink_availables(sdata); | 121 | free_plinks = mesh_plink_availables(sdata); |
122 | 122 | ||
123 | if (free_plinks != sdata->u.mesh.accepting_plinks) | 123 | if (free_plinks != sdata->u.mesh.accepting_plinks) |
124 | ieee80211_mesh_housekeeping_timer((unsigned long) sdata); | 124 | ieee80211_mesh_housekeeping_timer((unsigned long) sdata); |
125 | } | 125 | } |
126 | 126 | ||
127 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) | 127 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata) |
128 | { | 128 | { |
129 | int i; | 129 | int i; |
130 | 130 | ||
131 | sdata->u.mesh.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL); | 131 | sdata->u.mesh.rmc = kmalloc(sizeof(struct mesh_rmc), GFP_KERNEL); |
132 | if (!sdata->u.mesh.rmc) | 132 | if (!sdata->u.mesh.rmc) |
133 | return -ENOMEM; | 133 | return -ENOMEM; |
134 | sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1; | 134 | sdata->u.mesh.rmc->idx_mask = RMC_BUCKETS - 1; |
135 | for (i = 0; i < RMC_BUCKETS; i++) | 135 | for (i = 0; i < RMC_BUCKETS; i++) |
136 | INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i].list); | 136 | INIT_LIST_HEAD(&sdata->u.mesh.rmc->bucket[i].list); |
137 | return 0; | 137 | return 0; |
138 | } | 138 | } |
139 | 139 | ||
140 | void mesh_rmc_free(struct ieee80211_sub_if_data *sdata) | 140 | void mesh_rmc_free(struct ieee80211_sub_if_data *sdata) |
141 | { | 141 | { |
142 | struct mesh_rmc *rmc = sdata->u.mesh.rmc; | 142 | struct mesh_rmc *rmc = sdata->u.mesh.rmc; |
143 | struct rmc_entry *p, *n; | 143 | struct rmc_entry *p, *n; |
144 | int i; | 144 | int i; |
145 | 145 | ||
146 | if (!sdata->u.mesh.rmc) | 146 | if (!sdata->u.mesh.rmc) |
147 | return; | 147 | return; |
148 | 148 | ||
149 | for (i = 0; i < RMC_BUCKETS; i++) | 149 | for (i = 0; i < RMC_BUCKETS; i++) |
150 | list_for_each_entry_safe(p, n, &rmc->bucket[i].list, list) { | 150 | list_for_each_entry_safe(p, n, &rmc->bucket[i].list, list) { |
151 | list_del(&p->list); | 151 | list_del(&p->list); |
152 | kmem_cache_free(rm_cache, p); | 152 | kmem_cache_free(rm_cache, p); |
153 | } | 153 | } |
154 | 154 | ||
155 | kfree(rmc); | 155 | kfree(rmc); |
156 | sdata->u.mesh.rmc = NULL; | 156 | sdata->u.mesh.rmc = NULL; |
157 | } | 157 | } |
158 | 158 | ||
159 | /** | 159 | /** |
160 | * mesh_rmc_check - Check frame in recent multicast cache and add if absent. | 160 | * mesh_rmc_check - Check frame in recent multicast cache and add if absent. |
161 | * | 161 | * |
162 | * @sa: source address | 162 | * @sa: source address |
163 | * @mesh_hdr: mesh_header | 163 | * @mesh_hdr: mesh_header |
164 | * | 164 | * |
165 | * Returns: 0 if the frame is not in the cache, nonzero otherwise. | 165 | * Returns: 0 if the frame is not in the cache, nonzero otherwise. |
166 | * | 166 | * |
167 | * Checks using the source address and the mesh sequence number if we have | 167 | * Checks using the source address and the mesh sequence number if we have |
168 | * received this frame lately. If the frame is not in the cache, it is added to | 168 | * received this frame lately. If the frame is not in the cache, it is added to |
169 | * it. | 169 | * it. |
170 | */ | 170 | */ |
171 | int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, | 171 | int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr, |
172 | struct ieee80211_sub_if_data *sdata) | 172 | struct ieee80211_sub_if_data *sdata) |
173 | { | 173 | { |
174 | struct mesh_rmc *rmc = sdata->u.mesh.rmc; | 174 | struct mesh_rmc *rmc = sdata->u.mesh.rmc; |
175 | u32 seqnum = 0; | 175 | u32 seqnum = 0; |
176 | int entries = 0; | 176 | int entries = 0; |
177 | u8 idx; | 177 | u8 idx; |
178 | struct rmc_entry *p, *n; | 178 | struct rmc_entry *p, *n; |
179 | 179 | ||
180 | /* Don't care about endianness since only match matters */ | 180 | /* Don't care about endianness since only match matters */ |
181 | memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum)); | 181 | memcpy(&seqnum, &mesh_hdr->seqnum, sizeof(mesh_hdr->seqnum)); |
182 | idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask; | 182 | idx = le32_to_cpu(mesh_hdr->seqnum) & rmc->idx_mask; |
183 | list_for_each_entry_safe(p, n, &rmc->bucket[idx].list, list) { | 183 | list_for_each_entry_safe(p, n, &rmc->bucket[idx].list, list) { |
184 | ++entries; | 184 | ++entries; |
185 | if (time_after(jiffies, p->exp_time) || | 185 | if (time_after(jiffies, p->exp_time) || |
186 | (entries == RMC_QUEUE_MAX_LEN)) { | 186 | (entries == RMC_QUEUE_MAX_LEN)) { |
187 | list_del(&p->list); | 187 | list_del(&p->list); |
188 | kmem_cache_free(rm_cache, p); | 188 | kmem_cache_free(rm_cache, p); |
189 | --entries; | 189 | --entries; |
190 | } else if ((seqnum == p->seqnum) && | 190 | } else if ((seqnum == p->seqnum) && |
191 | (memcmp(sa, p->sa, ETH_ALEN) == 0)) | 191 | (memcmp(sa, p->sa, ETH_ALEN) == 0)) |
192 | return -1; | 192 | return -1; |
193 | } | 193 | } |
194 | 194 | ||
195 | p = kmem_cache_alloc(rm_cache, GFP_ATOMIC); | 195 | p = kmem_cache_alloc(rm_cache, GFP_ATOMIC); |
196 | if (!p) { | 196 | if (!p) { |
197 | printk(KERN_DEBUG "o11s: could not allocate RMC entry\n"); | 197 | printk(KERN_DEBUG "o11s: could not allocate RMC entry\n"); |
198 | return 0; | 198 | return 0; |
199 | } | 199 | } |
200 | p->seqnum = seqnum; | 200 | p->seqnum = seqnum; |
201 | p->exp_time = jiffies + RMC_TIMEOUT; | 201 | p->exp_time = jiffies + RMC_TIMEOUT; |
202 | memcpy(p->sa, sa, ETH_ALEN); | 202 | memcpy(p->sa, sa, ETH_ALEN); |
203 | list_add(&p->list, &rmc->bucket[idx].list); | 203 | list_add(&p->list, &rmc->bucket[idx].list); |
204 | return 0; | 204 | return 0; |
205 | } | 205 | } |
206 | 206 | ||
207 | void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) | 207 | void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata) |
208 | { | 208 | { |
209 | struct ieee80211_local *local = sdata->local; | 209 | struct ieee80211_local *local = sdata->local; |
210 | struct ieee80211_supported_band *sband; | 210 | struct ieee80211_supported_band *sband; |
211 | u8 *pos; | 211 | u8 *pos; |
212 | int len, i, rate; | 212 | int len, i, rate; |
213 | u8 neighbors; | 213 | u8 neighbors; |
214 | 214 | ||
215 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; | 215 | sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; |
216 | len = sband->n_bitrates; | 216 | len = sband->n_bitrates; |
217 | if (len > 8) | 217 | if (len > 8) |
218 | len = 8; | 218 | len = 8; |
219 | pos = skb_put(skb, len + 2); | 219 | pos = skb_put(skb, len + 2); |
220 | *pos++ = WLAN_EID_SUPP_RATES; | 220 | *pos++ = WLAN_EID_SUPP_RATES; |
221 | *pos++ = len; | 221 | *pos++ = len; |
222 | for (i = 0; i < len; i++) { | 222 | for (i = 0; i < len; i++) { |
223 | rate = sband->bitrates[i].bitrate; | 223 | rate = sband->bitrates[i].bitrate; |
224 | *pos++ = (u8) (rate / 5); | 224 | *pos++ = (u8) (rate / 5); |
225 | } | 225 | } |
226 | 226 | ||
227 | if (sband->n_bitrates > len) { | 227 | if (sband->n_bitrates > len) { |
228 | pos = skb_put(skb, sband->n_bitrates - len + 2); | 228 | pos = skb_put(skb, sband->n_bitrates - len + 2); |
229 | *pos++ = WLAN_EID_EXT_SUPP_RATES; | 229 | *pos++ = WLAN_EID_EXT_SUPP_RATES; |
230 | *pos++ = sband->n_bitrates - len; | 230 | *pos++ = sband->n_bitrates - len; |
231 | for (i = len; i < sband->n_bitrates; i++) { | 231 | for (i = len; i < sband->n_bitrates; i++) { |
232 | rate = sband->bitrates[i].bitrate; | 232 | rate = sband->bitrates[i].bitrate; |
233 | *pos++ = (u8) (rate / 5); | 233 | *pos++ = (u8) (rate / 5); |
234 | } | 234 | } |
235 | } | 235 | } |
236 | 236 | ||
237 | if (sband->band == IEEE80211_BAND_2GHZ) { | 237 | if (sband->band == IEEE80211_BAND_2GHZ) { |
238 | pos = skb_put(skb, 2 + 1); | 238 | pos = skb_put(skb, 2 + 1); |
239 | *pos++ = WLAN_EID_DS_PARAMS; | 239 | *pos++ = WLAN_EID_DS_PARAMS; |
240 | *pos++ = 1; | 240 | *pos++ = 1; |
241 | *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq); | 241 | *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq); |
242 | } | 242 | } |
243 | 243 | ||
244 | pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len); | 244 | pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len); |
245 | *pos++ = WLAN_EID_MESH_ID; | 245 | *pos++ = WLAN_EID_MESH_ID; |
246 | *pos++ = sdata->u.mesh.mesh_id_len; | 246 | *pos++ = sdata->u.mesh.mesh_id_len; |
247 | if (sdata->u.mesh.mesh_id_len) | 247 | if (sdata->u.mesh.mesh_id_len) |
248 | memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len); | 248 | memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len); |
249 | 249 | ||
250 | pos = skb_put(skb, 2 + sizeof(struct ieee80211_meshconf_ie)); | 250 | pos = skb_put(skb, 2 + sizeof(struct ieee80211_meshconf_ie)); |
251 | *pos++ = WLAN_EID_MESH_CONFIG; | 251 | *pos++ = WLAN_EID_MESH_CONFIG; |
252 | *pos++ = sizeof(struct ieee80211_meshconf_ie); | 252 | *pos++ = sizeof(struct ieee80211_meshconf_ie); |
253 | 253 | ||
254 | /* Active path selection protocol ID */ | 254 | /* Active path selection protocol ID */ |
255 | *pos++ = sdata->u.mesh.mesh_pp_id; | 255 | *pos++ = sdata->u.mesh.mesh_pp_id; |
256 | 256 | ||
257 | /* Active path selection metric ID */ | 257 | /* Active path selection metric ID */ |
258 | *pos++ = sdata->u.mesh.mesh_pm_id; | 258 | *pos++ = sdata->u.mesh.mesh_pm_id; |
259 | 259 | ||
260 | /* Congestion control mode identifier */ | 260 | /* Congestion control mode identifier */ |
261 | *pos++ = sdata->u.mesh.mesh_cc_id; | 261 | *pos++ = sdata->u.mesh.mesh_cc_id; |
262 | 262 | ||
263 | /* Synchronization protocol identifier */ | 263 | /* Synchronization protocol identifier */ |
264 | *pos++ = sdata->u.mesh.mesh_sp_id; | 264 | *pos++ = sdata->u.mesh.mesh_sp_id; |
265 | 265 | ||
266 | /* Authentication Protocol identifier */ | 266 | /* Authentication Protocol identifier */ |
267 | *pos++ = sdata->u.mesh.mesh_auth_id; | 267 | *pos++ = sdata->u.mesh.mesh_auth_id; |
268 | 268 | ||
269 | /* Mesh Formation Info - number of neighbors */ | 269 | /* Mesh Formation Info - number of neighbors */ |
270 | neighbors = atomic_read(&sdata->u.mesh.mshstats.estab_plinks); | 270 | neighbors = atomic_read(&sdata->u.mesh.mshstats.estab_plinks); |
271 | /* Number of neighbor mesh STAs or 15 whichever is smaller */ | 271 | /* Number of neighbor mesh STAs or 15 whichever is smaller */ |
272 | neighbors = (neighbors > 15) ? 15 : neighbors; | 272 | neighbors = (neighbors > 15) ? 15 : neighbors; |
273 | *pos++ = neighbors << 1; | 273 | *pos++ = neighbors << 1; |
274 | 274 | ||
275 | /* Mesh capability */ | 275 | /* Mesh capability */ |
276 | sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata); | 276 | sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata); |
277 | *pos = MESHCONF_CAPAB_FORWARDING; | 277 | *pos = MESHCONF_CAPAB_FORWARDING; |
278 | *pos++ |= sdata->u.mesh.accepting_plinks ? | 278 | *pos++ |= sdata->u.mesh.accepting_plinks ? |
279 | MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; | 279 | MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; |
280 | *pos++ = 0x00; | 280 | *pos++ = 0x00; |
281 | 281 | ||
282 | if (sdata->u.mesh.ie) { | 282 | if (sdata->u.mesh.ie) { |
283 | int len = sdata->u.mesh.ie_len; | 283 | int len = sdata->u.mesh.ie_len; |
284 | const u8 *data = sdata->u.mesh.ie; | 284 | const u8 *data = sdata->u.mesh.ie; |
285 | if (skb_tailroom(skb) > len) | 285 | if (skb_tailroom(skb) > len) |
286 | memcpy(skb_put(skb, len), data, len); | 286 | memcpy(skb_put(skb, len), data, len); |
287 | } | 287 | } |
288 | } | 288 | } |
289 | 289 | ||
290 | u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl) | ||
291 | { | ||
292 | /* Use last four bytes of hw addr and interface index as hash index */ | ||
293 | return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd) | ||
294 | & tbl->hash_mask; | ||
295 | } | ||
296 | |||
297 | struct mesh_table *mesh_table_alloc(int size_order) | ||
298 | { | ||
299 | int i; | ||
300 | struct mesh_table *newtbl; | ||
301 | |||
302 | newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL); | ||
303 | if (!newtbl) | ||
304 | return NULL; | ||
305 | |||
306 | newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * | ||
307 | (1 << size_order), GFP_KERNEL); | ||
308 | |||
309 | if (!newtbl->hash_buckets) { | ||
310 | kfree(newtbl); | ||
311 | return NULL; | ||
312 | } | ||
313 | |||
314 | newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * | ||
315 | (1 << size_order), GFP_KERNEL); | ||
316 | if (!newtbl->hashwlock) { | ||
317 | kfree(newtbl->hash_buckets); | ||
318 | kfree(newtbl); | ||
319 | return NULL; | ||
320 | } | ||
321 | |||
322 | newtbl->size_order = size_order; | ||
323 | newtbl->hash_mask = (1 << size_order) - 1; | ||
324 | atomic_set(&newtbl->entries, 0); | ||
325 | get_random_bytes(&newtbl->hash_rnd, | ||
326 | sizeof(newtbl->hash_rnd)); | ||
327 | for (i = 0; i <= newtbl->hash_mask; i++) | ||
328 | spin_lock_init(&newtbl->hashwlock[i]); | ||
329 | |||
330 | return newtbl; | ||
331 | } | ||
332 | |||
333 | 290 | ||
334 | static void ieee80211_mesh_path_timer(unsigned long data) | 291 | static void ieee80211_mesh_path_timer(unsigned long data) |
335 | { | 292 | { |
336 | struct ieee80211_sub_if_data *sdata = | 293 | struct ieee80211_sub_if_data *sdata = |
337 | (struct ieee80211_sub_if_data *) data; | 294 | (struct ieee80211_sub_if_data *) data; |
338 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 295 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
339 | struct ieee80211_local *local = sdata->local; | 296 | struct ieee80211_local *local = sdata->local; |
340 | 297 | ||
341 | if (local->quiescing) { | 298 | if (local->quiescing) { |
342 | set_bit(TMR_RUNNING_MP, &ifmsh->timers_running); | 299 | set_bit(TMR_RUNNING_MP, &ifmsh->timers_running); |
343 | return; | 300 | return; |
344 | } | 301 | } |
345 | 302 | ||
346 | ieee80211_queue_work(&local->hw, &sdata->work); | 303 | ieee80211_queue_work(&local->hw, &sdata->work); |
347 | } | 304 | } |
348 | 305 | ||
349 | static void ieee80211_mesh_path_root_timer(unsigned long data) | 306 | static void ieee80211_mesh_path_root_timer(unsigned long data) |
350 | { | 307 | { |
351 | struct ieee80211_sub_if_data *sdata = | 308 | struct ieee80211_sub_if_data *sdata = |
352 | (struct ieee80211_sub_if_data *) data; | 309 | (struct ieee80211_sub_if_data *) data; |
353 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 310 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
354 | struct ieee80211_local *local = sdata->local; | 311 | struct ieee80211_local *local = sdata->local; |
355 | 312 | ||
356 | set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); | 313 | set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); |
357 | 314 | ||
358 | if (local->quiescing) { | 315 | if (local->quiescing) { |
359 | set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running); | 316 | set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running); |
360 | return; | 317 | return; |
361 | } | 318 | } |
362 | 319 | ||
363 | ieee80211_queue_work(&local->hw, &sdata->work); | 320 | ieee80211_queue_work(&local->hw, &sdata->work); |
364 | } | 321 | } |
365 | 322 | ||
366 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) | 323 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) |
367 | { | 324 | { |
368 | if (ifmsh->mshcfg.dot11MeshHWMPRootMode) | 325 | if (ifmsh->mshcfg.dot11MeshHWMPRootMode) |
369 | set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); | 326 | set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); |
370 | else { | 327 | else { |
371 | clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); | 328 | clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); |
372 | /* stop running timer */ | 329 | /* stop running timer */ |
373 | del_timer_sync(&ifmsh->mesh_path_root_timer); | 330 | del_timer_sync(&ifmsh->mesh_path_root_timer); |
374 | } | 331 | } |
375 | } | 332 | } |
376 | 333 | ||
377 | /** | 334 | /** |
378 | * ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame | 335 | * ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame |
379 | * @hdr: 802.11 frame header | 336 | * @hdr: 802.11 frame header |
380 | * @fc: frame control field | 337 | * @fc: frame control field |
381 | * @meshda: destination address in the mesh | 338 | * @meshda: destination address in the mesh |
382 | * @meshsa: source address address in the mesh. Same as TA, as frame is | 339 | * @meshsa: source address address in the mesh. Same as TA, as frame is |
383 | * locally originated. | 340 | * locally originated. |
384 | * | 341 | * |
385 | * Return the length of the 802.11 (does not include a mesh control header) | 342 | * Return the length of the 802.11 (does not include a mesh control header) |
386 | */ | 343 | */ |
387 | int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, | 344 | int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, |
388 | const u8 *meshda, const u8 *meshsa) | 345 | const u8 *meshda, const u8 *meshsa) |
389 | { | 346 | { |
390 | if (is_multicast_ether_addr(meshda)) { | 347 | if (is_multicast_ether_addr(meshda)) { |
391 | *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); | 348 | *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); |
392 | /* DA TA SA */ | 349 | /* DA TA SA */ |
393 | memcpy(hdr->addr1, meshda, ETH_ALEN); | 350 | memcpy(hdr->addr1, meshda, ETH_ALEN); |
394 | memcpy(hdr->addr2, meshsa, ETH_ALEN); | 351 | memcpy(hdr->addr2, meshsa, ETH_ALEN); |
395 | memcpy(hdr->addr3, meshsa, ETH_ALEN); | 352 | memcpy(hdr->addr3, meshsa, ETH_ALEN); |
396 | return 24; | 353 | return 24; |
397 | } else { | 354 | } else { |
398 | *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | | 355 | *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | |
399 | IEEE80211_FCTL_TODS); | 356 | IEEE80211_FCTL_TODS); |
400 | /* RA TA DA SA */ | 357 | /* RA TA DA SA */ |
401 | memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */ | 358 | memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */ |
402 | memcpy(hdr->addr2, meshsa, ETH_ALEN); | 359 | memcpy(hdr->addr2, meshsa, ETH_ALEN); |
403 | memcpy(hdr->addr3, meshda, ETH_ALEN); | 360 | memcpy(hdr->addr3, meshda, ETH_ALEN); |
404 | memcpy(hdr->addr4, meshsa, ETH_ALEN); | 361 | memcpy(hdr->addr4, meshsa, ETH_ALEN); |
405 | return 30; | 362 | return 30; |
406 | } | 363 | } |
407 | } | 364 | } |
408 | 365 | ||
409 | /** | 366 | /** |
410 | * ieee80211_new_mesh_header - create a new mesh header | 367 | * ieee80211_new_mesh_header - create a new mesh header |
411 | * @meshhdr: uninitialized mesh header | 368 | * @meshhdr: uninitialized mesh header |
412 | * @sdata: mesh interface to be used | 369 | * @sdata: mesh interface to be used |
413 | * @addr4or5: 1st address in the ae header, which may correspond to address 4 | 370 | * @addr4or5: 1st address in the ae header, which may correspond to address 4 |
414 | * (if addr6 is NULL) or address 5 (if addr6 is present). It may | 371 | * (if addr6 is NULL) or address 5 (if addr6 is present). It may |
415 | * be NULL. | 372 | * be NULL. |
416 | * @addr6: 2nd address in the ae header, which corresponds to addr6 of the | 373 | * @addr6: 2nd address in the ae header, which corresponds to addr6 of the |
417 | * mesh frame | 374 | * mesh frame |
418 | * | 375 | * |
419 | * Return the header length. | 376 | * Return the header length. |
420 | */ | 377 | */ |
421 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, | 378 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, |
422 | struct ieee80211_sub_if_data *sdata, char *addr4or5, | 379 | struct ieee80211_sub_if_data *sdata, char *addr4or5, |
423 | char *addr6) | 380 | char *addr6) |
424 | { | 381 | { |
425 | int aelen = 0; | 382 | int aelen = 0; |
426 | BUG_ON(!addr4or5 && addr6); | 383 | BUG_ON(!addr4or5 && addr6); |
427 | memset(meshhdr, 0, sizeof(*meshhdr)); | 384 | memset(meshhdr, 0, sizeof(*meshhdr)); |
428 | meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; | 385 | meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL; |
429 | put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum); | 386 | put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum); |
430 | sdata->u.mesh.mesh_seqnum++; | 387 | sdata->u.mesh.mesh_seqnum++; |
431 | if (addr4or5 && !addr6) { | 388 | if (addr4or5 && !addr6) { |
432 | meshhdr->flags |= MESH_FLAGS_AE_A4; | 389 | meshhdr->flags |= MESH_FLAGS_AE_A4; |
433 | aelen += ETH_ALEN; | 390 | aelen += ETH_ALEN; |
434 | memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN); | 391 | memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN); |
435 | } else if (addr4or5 && addr6) { | 392 | } else if (addr4or5 && addr6) { |
436 | meshhdr->flags |= MESH_FLAGS_AE_A5_A6; | 393 | meshhdr->flags |= MESH_FLAGS_AE_A5_A6; |
437 | aelen += 2 * ETH_ALEN; | 394 | aelen += 2 * ETH_ALEN; |
438 | memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN); | 395 | memcpy(meshhdr->eaddr1, addr4or5, ETH_ALEN); |
439 | memcpy(meshhdr->eaddr2, addr6, ETH_ALEN); | 396 | memcpy(meshhdr->eaddr2, addr6, ETH_ALEN); |
440 | } | 397 | } |
441 | return 6 + aelen; | 398 | return 6 + aelen; |
442 | } | 399 | } |
443 | 400 | ||
444 | static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata, | 401 | static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata, |
445 | struct ieee80211_if_mesh *ifmsh) | 402 | struct ieee80211_if_mesh *ifmsh) |
446 | { | 403 | { |
447 | bool free_plinks; | 404 | bool free_plinks; |
448 | 405 | ||
449 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG | 406 | #ifdef CONFIG_MAC80211_VERBOSE_DEBUG |
450 | printk(KERN_DEBUG "%s: running mesh housekeeping\n", | 407 | printk(KERN_DEBUG "%s: running mesh housekeeping\n", |
451 | sdata->name); | 408 | sdata->name); |
452 | #endif | 409 | #endif |
453 | 410 | ||
454 | ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); | 411 | ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT); |
455 | mesh_path_expire(sdata); | 412 | mesh_path_expire(sdata); |
456 | 413 | ||
457 | free_plinks = mesh_plink_availables(sdata); | 414 | free_plinks = mesh_plink_availables(sdata); |
458 | if (free_plinks != sdata->u.mesh.accepting_plinks) | 415 | if (free_plinks != sdata->u.mesh.accepting_plinks) |
459 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); | 416 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); |
460 | 417 | ||
461 | mod_timer(&ifmsh->housekeeping_timer, | 418 | mod_timer(&ifmsh->housekeeping_timer, |
462 | round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); | 419 | round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL)); |
463 | } | 420 | } |
464 | 421 | ||
465 | static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata) | 422 | static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata) |
466 | { | 423 | { |
467 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 424 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
468 | 425 | ||
469 | mesh_path_tx_root_frame(sdata); | 426 | mesh_path_tx_root_frame(sdata); |
470 | mod_timer(&ifmsh->mesh_path_root_timer, | 427 | mod_timer(&ifmsh->mesh_path_root_timer, |
471 | round_jiffies(jiffies + IEEE80211_MESH_RANN_INTERVAL)); | 428 | round_jiffies(jiffies + IEEE80211_MESH_RANN_INTERVAL)); |
472 | } | 429 | } |
473 | 430 | ||
474 | #ifdef CONFIG_PM | 431 | #ifdef CONFIG_PM |
475 | void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) | 432 | void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) |
476 | { | 433 | { |
477 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 434 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
478 | 435 | ||
479 | /* use atomic bitops in case both timers fire at the same time */ | 436 | /* use atomic bitops in case both timers fire at the same time */ |
480 | 437 | ||
481 | if (del_timer_sync(&ifmsh->housekeeping_timer)) | 438 | if (del_timer_sync(&ifmsh->housekeeping_timer)) |
482 | set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); | 439 | set_bit(TMR_RUNNING_HK, &ifmsh->timers_running); |
483 | if (del_timer_sync(&ifmsh->mesh_path_timer)) | 440 | if (del_timer_sync(&ifmsh->mesh_path_timer)) |
484 | set_bit(TMR_RUNNING_MP, &ifmsh->timers_running); | 441 | set_bit(TMR_RUNNING_MP, &ifmsh->timers_running); |
485 | if (del_timer_sync(&ifmsh->mesh_path_root_timer)) | 442 | if (del_timer_sync(&ifmsh->mesh_path_root_timer)) |
486 | set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running); | 443 | set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running); |
487 | } | 444 | } |
488 | 445 | ||
489 | void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata) | 446 | void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata) |
490 | { | 447 | { |
491 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 448 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
492 | 449 | ||
493 | if (test_and_clear_bit(TMR_RUNNING_HK, &ifmsh->timers_running)) | 450 | if (test_and_clear_bit(TMR_RUNNING_HK, &ifmsh->timers_running)) |
494 | add_timer(&ifmsh->housekeeping_timer); | 451 | add_timer(&ifmsh->housekeeping_timer); |
495 | if (test_and_clear_bit(TMR_RUNNING_MP, &ifmsh->timers_running)) | 452 | if (test_and_clear_bit(TMR_RUNNING_MP, &ifmsh->timers_running)) |
496 | add_timer(&ifmsh->mesh_path_timer); | 453 | add_timer(&ifmsh->mesh_path_timer); |
497 | if (test_and_clear_bit(TMR_RUNNING_MPR, &ifmsh->timers_running)) | 454 | if (test_and_clear_bit(TMR_RUNNING_MPR, &ifmsh->timers_running)) |
498 | add_timer(&ifmsh->mesh_path_root_timer); | 455 | add_timer(&ifmsh->mesh_path_root_timer); |
499 | ieee80211_mesh_root_setup(ifmsh); | 456 | ieee80211_mesh_root_setup(ifmsh); |
500 | } | 457 | } |
501 | #endif | 458 | #endif |
502 | 459 | ||
503 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) | 460 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) |
504 | { | 461 | { |
505 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 462 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
506 | struct ieee80211_local *local = sdata->local; | 463 | struct ieee80211_local *local = sdata->local; |
507 | 464 | ||
508 | local->fif_other_bss++; | 465 | local->fif_other_bss++; |
509 | /* mesh ifaces must set allmulti to forward mcast traffic */ | 466 | /* mesh ifaces must set allmulti to forward mcast traffic */ |
510 | atomic_inc(&local->iff_allmultis); | 467 | atomic_inc(&local->iff_allmultis); |
511 | ieee80211_configure_filter(local); | 468 | ieee80211_configure_filter(local); |
512 | 469 | ||
513 | ifmsh->mesh_cc_id = 0; /* Disabled */ | 470 | ifmsh->mesh_cc_id = 0; /* Disabled */ |
514 | ifmsh->mesh_sp_id = 0; /* Neighbor Offset */ | 471 | ifmsh->mesh_sp_id = 0; /* Neighbor Offset */ |
515 | ifmsh->mesh_auth_id = 0; /* Disabled */ | 472 | ifmsh->mesh_auth_id = 0; /* Disabled */ |
516 | set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); | 473 | set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); |
517 | ieee80211_mesh_root_setup(ifmsh); | 474 | ieee80211_mesh_root_setup(ifmsh); |
518 | ieee80211_queue_work(&local->hw, &sdata->work); | 475 | ieee80211_queue_work(&local->hw, &sdata->work); |
519 | sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; | 476 | sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; |
520 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | | 477 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | |
521 | BSS_CHANGED_BEACON_ENABLED | | 478 | BSS_CHANGED_BEACON_ENABLED | |
522 | BSS_CHANGED_BEACON_INT); | 479 | BSS_CHANGED_BEACON_INT); |
523 | } | 480 | } |
524 | 481 | ||
525 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) | 482 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) |
526 | { | 483 | { |
527 | struct ieee80211_local *local = sdata->local; | 484 | struct ieee80211_local *local = sdata->local; |
528 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 485 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
529 | 486 | ||
530 | ifmsh->mesh_id_len = 0; | 487 | ifmsh->mesh_id_len = 0; |
531 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); | 488 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); |
532 | sta_info_flush(local, NULL); | 489 | sta_info_flush(local, NULL); |
533 | 490 | ||
534 | del_timer_sync(&sdata->u.mesh.housekeeping_timer); | 491 | del_timer_sync(&sdata->u.mesh.housekeeping_timer); |
535 | del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); | 492 | del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); |
536 | /* | 493 | /* |
537 | * If the timer fired while we waited for it, it will have | 494 | * If the timer fired while we waited for it, it will have |
538 | * requeued the work. Now the work will be running again | 495 | * requeued the work. Now the work will be running again |
539 | * but will not rearm the timer again because it checks | 496 | * but will not rearm the timer again because it checks |
540 | * whether the interface is running, which, at this point, | 497 | * whether the interface is running, which, at this point, |
541 | * it no longer is. | 498 | * it no longer is. |
542 | */ | 499 | */ |
543 | cancel_work_sync(&sdata->work); | 500 | cancel_work_sync(&sdata->work); |
544 | 501 | ||
545 | local->fif_other_bss--; | 502 | local->fif_other_bss--; |
546 | atomic_dec(&local->iff_allmultis); | 503 | atomic_dec(&local->iff_allmultis); |
547 | ieee80211_configure_filter(local); | 504 | ieee80211_configure_filter(local); |
548 | } | 505 | } |
549 | 506 | ||
550 | static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, | 507 | static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, |
551 | u16 stype, | 508 | u16 stype, |
552 | struct ieee80211_mgmt *mgmt, | 509 | struct ieee80211_mgmt *mgmt, |
553 | size_t len, | 510 | size_t len, |
554 | struct ieee80211_rx_status *rx_status) | 511 | struct ieee80211_rx_status *rx_status) |
555 | { | 512 | { |
556 | struct ieee80211_local *local = sdata->local; | 513 | struct ieee80211_local *local = sdata->local; |
557 | struct ieee802_11_elems elems; | 514 | struct ieee802_11_elems elems; |
558 | struct ieee80211_channel *channel; | 515 | struct ieee80211_channel *channel; |
559 | u32 supp_rates = 0; | 516 | u32 supp_rates = 0; |
560 | size_t baselen; | 517 | size_t baselen; |
561 | int freq; | 518 | int freq; |
562 | enum ieee80211_band band = rx_status->band; | 519 | enum ieee80211_band band = rx_status->band; |
563 | 520 | ||
564 | /* ignore ProbeResp to foreign address */ | 521 | /* ignore ProbeResp to foreign address */ |
565 | if (stype == IEEE80211_STYPE_PROBE_RESP && | 522 | if (stype == IEEE80211_STYPE_PROBE_RESP && |
566 | compare_ether_addr(mgmt->da, sdata->vif.addr)) | 523 | compare_ether_addr(mgmt->da, sdata->vif.addr)) |
567 | return; | 524 | return; |
568 | 525 | ||
569 | baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; | 526 | baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt; |
570 | if (baselen > len) | 527 | if (baselen > len) |
571 | return; | 528 | return; |
572 | 529 | ||
573 | ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, | 530 | ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, |
574 | &elems); | 531 | &elems); |
575 | 532 | ||
576 | /* ignore beacons from secure mesh peers if our security is off */ | 533 | /* ignore beacons from secure mesh peers if our security is off */ |
577 | if (elems.rsn_len && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) | 534 | if (elems.rsn_len && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) |
578 | return; | 535 | return; |
579 | 536 | ||
580 | if (elems.ds_params && elems.ds_params_len == 1) | 537 | if (elems.ds_params && elems.ds_params_len == 1) |
581 | freq = ieee80211_channel_to_frequency(elems.ds_params[0], band); | 538 | freq = ieee80211_channel_to_frequency(elems.ds_params[0], band); |
582 | else | 539 | else |
583 | freq = rx_status->freq; | 540 | freq = rx_status->freq; |
584 | 541 | ||
585 | channel = ieee80211_get_channel(local->hw.wiphy, freq); | 542 | channel = ieee80211_get_channel(local->hw.wiphy, freq); |
586 | 543 | ||
587 | if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) | 544 | if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) |
588 | return; | 545 | return; |
589 | 546 | ||
590 | if (elems.mesh_id && elems.mesh_config && | 547 | if (elems.mesh_id && elems.mesh_config && |
591 | mesh_matches_local(&elems, sdata)) { | 548 | mesh_matches_local(&elems, sdata)) { |
592 | supp_rates = ieee80211_sta_get_rates(local, &elems, band); | 549 | supp_rates = ieee80211_sta_get_rates(local, &elems, band); |
593 | mesh_neighbour_update(mgmt->sa, supp_rates, sdata, &elems); | 550 | mesh_neighbour_update(mgmt->sa, supp_rates, sdata, &elems); |
594 | } | 551 | } |
595 | } | 552 | } |
596 | 553 | ||
597 | static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, | 554 | static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, |
598 | struct ieee80211_mgmt *mgmt, | 555 | struct ieee80211_mgmt *mgmt, |
599 | size_t len, | 556 | size_t len, |
600 | struct ieee80211_rx_status *rx_status) | 557 | struct ieee80211_rx_status *rx_status) |
601 | { | 558 | { |
602 | switch (mgmt->u.action.category) { | 559 | switch (mgmt->u.action.category) { |
603 | case WLAN_CATEGORY_MESH_ACTION: | 560 | case WLAN_CATEGORY_MESH_ACTION: |
604 | mesh_rx_plink_frame(sdata, mgmt, len, rx_status); | 561 | mesh_rx_plink_frame(sdata, mgmt, len, rx_status); |
605 | break; | 562 | break; |
606 | case WLAN_CATEGORY_MESH_PATH_SEL: | 563 | case WLAN_CATEGORY_MESH_PATH_SEL: |
607 | mesh_rx_path_sel_frame(sdata, mgmt, len); | 564 | mesh_rx_path_sel_frame(sdata, mgmt, len); |
608 | break; | 565 | break; |
609 | } | 566 | } |
610 | } | 567 | } |
611 | 568 | ||
612 | void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | 569 | void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, |
613 | struct sk_buff *skb) | 570 | struct sk_buff *skb) |
614 | { | 571 | { |
615 | struct ieee80211_rx_status *rx_status; | 572 | struct ieee80211_rx_status *rx_status; |
616 | struct ieee80211_mgmt *mgmt; | 573 | struct ieee80211_mgmt *mgmt; |
617 | u16 stype; | 574 | u16 stype; |
618 | 575 | ||
619 | rx_status = IEEE80211_SKB_RXCB(skb); | 576 | rx_status = IEEE80211_SKB_RXCB(skb); |
620 | mgmt = (struct ieee80211_mgmt *) skb->data; | 577 | mgmt = (struct ieee80211_mgmt *) skb->data; |
621 | stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; | 578 | stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; |
622 | 579 | ||
623 | switch (stype) { | 580 | switch (stype) { |
624 | case IEEE80211_STYPE_PROBE_RESP: | 581 | case IEEE80211_STYPE_PROBE_RESP: |
625 | case IEEE80211_STYPE_BEACON: | 582 | case IEEE80211_STYPE_BEACON: |
626 | ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len, | 583 | ieee80211_mesh_rx_bcn_presp(sdata, stype, mgmt, skb->len, |
627 | rx_status); | 584 | rx_status); |
628 | break; | 585 | break; |
629 | case IEEE80211_STYPE_ACTION: | 586 | case IEEE80211_STYPE_ACTION: |
630 | ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); | 587 | ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); |
631 | break; | 588 | break; |
632 | } | 589 | } |
633 | } | 590 | } |
634 | 591 | ||
635 | void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) | 592 | void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) |
636 | { | 593 | { |
637 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 594 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
638 | 595 | ||
639 | if (ifmsh->preq_queue_len && | 596 | if (ifmsh->preq_queue_len && |
640 | time_after(jiffies, | 597 | time_after(jiffies, |
641 | ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) | 598 | ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval))) |
642 | mesh_path_start_discovery(sdata); | 599 | mesh_path_start_discovery(sdata); |
643 | 600 | ||
644 | if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags)) | 601 | if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags)) |
645 | mesh_mpath_table_grow(); | 602 | mesh_mpath_table_grow(); |
646 | 603 | ||
647 | if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags)) | 604 | if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags)) |
648 | mesh_mpp_table_grow(); | 605 | mesh_mpp_table_grow(); |
649 | 606 | ||
650 | if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) | 607 | if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags)) |
651 | ieee80211_mesh_housekeeping(sdata, ifmsh); | 608 | ieee80211_mesh_housekeeping(sdata, ifmsh); |
652 | 609 | ||
653 | if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags)) | 610 | if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags)) |
654 | ieee80211_mesh_rootpath(sdata); | 611 | ieee80211_mesh_rootpath(sdata); |
655 | } | 612 | } |
656 | 613 | ||
657 | void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) | 614 | void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) |
658 | { | 615 | { |
659 | struct ieee80211_sub_if_data *sdata; | 616 | struct ieee80211_sub_if_data *sdata; |
660 | 617 | ||
661 | rcu_read_lock(); | 618 | rcu_read_lock(); |
662 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | 619 | list_for_each_entry_rcu(sdata, &local->interfaces, list) |
663 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 620 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
664 | ieee80211_queue_work(&local->hw, &sdata->work); | 621 | ieee80211_queue_work(&local->hw, &sdata->work); |
665 | rcu_read_unlock(); | 622 | rcu_read_unlock(); |
666 | } | 623 | } |
667 | 624 | ||
668 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) | 625 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) |
669 | { | 626 | { |
670 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 627 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
671 | 628 | ||
672 | setup_timer(&ifmsh->housekeeping_timer, | 629 | setup_timer(&ifmsh->housekeeping_timer, |
673 | ieee80211_mesh_housekeeping_timer, | 630 | ieee80211_mesh_housekeeping_timer, |
674 | (unsigned long) sdata); | 631 | (unsigned long) sdata); |
675 | 632 | ||
676 | ifmsh->accepting_plinks = true; | 633 | ifmsh->accepting_plinks = true; |
677 | ifmsh->preq_id = 0; | 634 | ifmsh->preq_id = 0; |
678 | ifmsh->sn = 0; | 635 | ifmsh->sn = 0; |
679 | atomic_set(&ifmsh->mpaths, 0); | 636 | atomic_set(&ifmsh->mpaths, 0); |
680 | mesh_rmc_init(sdata); | 637 | mesh_rmc_init(sdata); |
681 | ifmsh->last_preq = jiffies; | 638 | ifmsh->last_preq = jiffies; |
682 | /* Allocate all mesh structures when creating the first mesh interface. */ | 639 | /* Allocate all mesh structures when creating the first mesh interface. */ |
683 | if (!mesh_allocated) | 640 | if (!mesh_allocated) |
684 | ieee80211s_init(); | 641 | ieee80211s_init(); |
685 | setup_timer(&ifmsh->mesh_path_timer, | 642 | setup_timer(&ifmsh->mesh_path_timer, |
686 | ieee80211_mesh_path_timer, | 643 | ieee80211_mesh_path_timer, |
687 | (unsigned long) sdata); | 644 | (unsigned long) sdata); |
688 | setup_timer(&ifmsh->mesh_path_root_timer, | 645 | setup_timer(&ifmsh->mesh_path_root_timer, |
689 | ieee80211_mesh_path_root_timer, | 646 | ieee80211_mesh_path_root_timer, |
690 | (unsigned long) sdata); | 647 | (unsigned long) sdata); |
691 | INIT_LIST_HEAD(&ifmsh->preq_queue.list); | 648 | INIT_LIST_HEAD(&ifmsh->preq_queue.list); |
692 | spin_lock_init(&ifmsh->mesh_preq_queue_lock); | 649 | spin_lock_init(&ifmsh->mesh_preq_queue_lock); |
693 | } | 650 | } |
694 | 651 |
net/mac80211/mesh.h
1 | /* | 1 | /* |
2 | * Copyright (c) 2008, 2009 open80211s Ltd. | 2 | * Copyright (c) 2008, 2009 open80211s Ltd. |
3 | * Authors: Luis Carlos Cobo <luisca@cozybit.com> | 3 | * Authors: Luis Carlos Cobo <luisca@cozybit.com> |
4 | * Javier Cardona <javier@cozybit.com> | 4 | * Javier Cardona <javier@cozybit.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef IEEE80211S_H | 11 | #ifndef IEEE80211S_H |
12 | #define IEEE80211S_H | 12 | #define IEEE80211S_H |
13 | 13 | ||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/jhash.h> | 15 | #include <linux/jhash.h> |
16 | #include <asm/unaligned.h> | 16 | #include <asm/unaligned.h> |
17 | #include "ieee80211_i.h" | 17 | #include "ieee80211_i.h" |
18 | 18 | ||
19 | 19 | ||
20 | /* Data structures */ | 20 | /* Data structures */ |
21 | 21 | ||
22 | /** | 22 | /** |
23 | * enum mesh_path_flags - mac80211 mesh path flags | 23 | * enum mesh_path_flags - mac80211 mesh path flags |
24 | * | 24 | * |
25 | * | 25 | * |
26 | * | 26 | * |
27 | * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding | 27 | * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding |
28 | * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path | 28 | * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path |
29 | * @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence | 29 | * @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence |
30 | * number | 30 | * number |
31 | * @MESH_PATH_FIXED: the mesh path has been manually set and should not be | 31 | * @MESH_PATH_FIXED: the mesh path has been manually set and should not be |
32 | * modified | 32 | * modified |
33 | * @MESH_PATH_RESOLVED: the mesh path can has been resolved | 33 | * @MESH_PATH_RESOLVED: the mesh path can has been resolved |
34 | * | 34 | * |
35 | * MESH_PATH_RESOLVED is used by the mesh path timer to | 35 | * MESH_PATH_RESOLVED is used by the mesh path timer to |
36 | * decide when to stop or cancel the mesh path discovery. | 36 | * decide when to stop or cancel the mesh path discovery. |
37 | */ | 37 | */ |
38 | enum mesh_path_flags { | 38 | enum mesh_path_flags { |
39 | MESH_PATH_ACTIVE = BIT(0), | 39 | MESH_PATH_ACTIVE = BIT(0), |
40 | MESH_PATH_RESOLVING = BIT(1), | 40 | MESH_PATH_RESOLVING = BIT(1), |
41 | MESH_PATH_SN_VALID = BIT(2), | 41 | MESH_PATH_SN_VALID = BIT(2), |
42 | MESH_PATH_FIXED = BIT(3), | 42 | MESH_PATH_FIXED = BIT(3), |
43 | MESH_PATH_RESOLVED = BIT(4), | 43 | MESH_PATH_RESOLVED = BIT(4), |
44 | }; | 44 | }; |
45 | 45 | ||
46 | /** | 46 | /** |
47 | * enum mesh_deferred_task_flags - mac80211 mesh deferred tasks | 47 | * enum mesh_deferred_task_flags - mac80211 mesh deferred tasks |
48 | * | 48 | * |
49 | * | 49 | * |
50 | * | 50 | * |
51 | * @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks | 51 | * @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks |
52 | * @MESH_WORK_GROW_MPATH_TABLE: the mesh path table is full and needs | 52 | * @MESH_WORK_GROW_MPATH_TABLE: the mesh path table is full and needs |
53 | * to grow. | 53 | * to grow. |
54 | * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to | 54 | * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to |
55 | * grow | 55 | * grow |
56 | * @MESH_WORK_ROOT: the mesh root station needs to send a frame | 56 | * @MESH_WORK_ROOT: the mesh root station needs to send a frame |
57 | */ | 57 | */ |
58 | enum mesh_deferred_task_flags { | 58 | enum mesh_deferred_task_flags { |
59 | MESH_WORK_HOUSEKEEPING, | 59 | MESH_WORK_HOUSEKEEPING, |
60 | MESH_WORK_GROW_MPATH_TABLE, | 60 | MESH_WORK_GROW_MPATH_TABLE, |
61 | MESH_WORK_GROW_MPP_TABLE, | 61 | MESH_WORK_GROW_MPP_TABLE, |
62 | MESH_WORK_ROOT, | 62 | MESH_WORK_ROOT, |
63 | }; | 63 | }; |
64 | 64 | ||
65 | /** | 65 | /** |
66 | * struct mesh_path - mac80211 mesh path structure | 66 | * struct mesh_path - mac80211 mesh path structure |
67 | * | 67 | * |
68 | * @dst: mesh path destination mac address | 68 | * @dst: mesh path destination mac address |
69 | * @sdata: mesh subif | 69 | * @sdata: mesh subif |
70 | * @next_hop: mesh neighbor to which frames for this destination will be | 70 | * @next_hop: mesh neighbor to which frames for this destination will be |
71 | * forwarded | 71 | * forwarded |
72 | * @timer: mesh path discovery timer | 72 | * @timer: mesh path discovery timer |
73 | * @frame_queue: pending queue for frames sent to this destination while the | 73 | * @frame_queue: pending queue for frames sent to this destination while the |
74 | * path is unresolved | 74 | * path is unresolved |
75 | * @sn: target sequence number | 75 | * @sn: target sequence number |
76 | * @metric: current metric to this destination | 76 | * @metric: current metric to this destination |
77 | * @hop_count: hops to destination | 77 | * @hop_count: hops to destination |
78 | * @exp_time: in jiffies, when the path will expire or when it expired | 78 | * @exp_time: in jiffies, when the path will expire or when it expired |
79 | * @discovery_timeout: timeout (lapse in jiffies) used for the last discovery | 79 | * @discovery_timeout: timeout (lapse in jiffies) used for the last discovery |
80 | * retry | 80 | * retry |
81 | * @discovery_retries: number of discovery retries | 81 | * @discovery_retries: number of discovery retries |
82 | * @flags: mesh path flags, as specified on &enum mesh_path_flags | 82 | * @flags: mesh path flags, as specified on &enum mesh_path_flags |
83 | * @state_lock: mesh path state lock | 83 | * @state_lock: mesh path state lock |
84 | * | 84 | * |
85 | * | 85 | * |
86 | * The combination of dst and sdata is unique in the mesh path table. Since the | 86 | * The combination of dst and sdata is unique in the mesh path table. Since the |
87 | * next_hop STA is only protected by RCU as well, deleting the STA must also | 87 | * next_hop STA is only protected by RCU as well, deleting the STA must also |
88 | * remove/substitute the mesh_path structure and wait until that is no longer | 88 | * remove/substitute the mesh_path structure and wait until that is no longer |
89 | * reachable before destroying the STA completely. | 89 | * reachable before destroying the STA completely. |
90 | */ | 90 | */ |
91 | struct mesh_path { | 91 | struct mesh_path { |
92 | u8 dst[ETH_ALEN]; | 92 | u8 dst[ETH_ALEN]; |
93 | u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ | 93 | u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ |
94 | struct ieee80211_sub_if_data *sdata; | 94 | struct ieee80211_sub_if_data *sdata; |
95 | struct sta_info *next_hop; | 95 | struct sta_info *next_hop; |
96 | struct timer_list timer; | 96 | struct timer_list timer; |
97 | struct sk_buff_head frame_queue; | 97 | struct sk_buff_head frame_queue; |
98 | struct rcu_head rcu; | 98 | struct rcu_head rcu; |
99 | u32 sn; | 99 | u32 sn; |
100 | u32 metric; | 100 | u32 metric; |
101 | u8 hop_count; | 101 | u8 hop_count; |
102 | unsigned long exp_time; | 102 | unsigned long exp_time; |
103 | u32 discovery_timeout; | 103 | u32 discovery_timeout; |
104 | u8 discovery_retries; | 104 | u8 discovery_retries; |
105 | enum mesh_path_flags flags; | 105 | enum mesh_path_flags flags; |
106 | spinlock_t state_lock; | 106 | spinlock_t state_lock; |
107 | }; | 107 | }; |
108 | 108 | ||
109 | /** | 109 | /** |
110 | * struct mesh_table | 110 | * struct mesh_table |
111 | * | 111 | * |
112 | * @hash_buckets: array of hash buckets of the table | 112 | * @hash_buckets: array of hash buckets of the table |
113 | * @hashwlock: array of locks to protect write operations, one per bucket | 113 | * @hashwlock: array of locks to protect write operations, one per bucket |
114 | * @hash_mask: 2^size_order - 1, used to compute hash idx | 114 | * @hash_mask: 2^size_order - 1, used to compute hash idx |
115 | * @hash_rnd: random value used for hash computations | 115 | * @hash_rnd: random value used for hash computations |
116 | * @entries: number of entries in the table | 116 | * @entries: number of entries in the table |
117 | * @free_node: function to free nodes of the table | 117 | * @free_node: function to free nodes of the table |
118 | * @copy_node: function to copy nodes of the table | 118 | * @copy_node: function to copy nodes of the table |
119 | * @size_order: determines size of the table, there will be 2^size_order hash | 119 | * @size_order: determines size of the table, there will be 2^size_order hash |
120 | * buckets | 120 | * buckets |
121 | * @mean_chain_len: maximum average length for the hash buckets' list, if it is | 121 | * @mean_chain_len: maximum average length for the hash buckets' list, if it is |
122 | * reached, the table will grow | 122 | * reached, the table will grow |
123 | */ | 123 | */ |
124 | struct mesh_table { | 124 | struct mesh_table { |
125 | /* Number of buckets will be 2^N */ | 125 | /* Number of buckets will be 2^N */ |
126 | struct hlist_head *hash_buckets; | 126 | struct hlist_head *hash_buckets; |
127 | spinlock_t *hashwlock; /* One per bucket, for add/del */ | 127 | spinlock_t *hashwlock; /* One per bucket, for add/del */ |
128 | unsigned int hash_mask; /* (2^size_order) - 1 */ | 128 | unsigned int hash_mask; /* (2^size_order) - 1 */ |
129 | __u32 hash_rnd; /* Used for hash generation */ | 129 | __u32 hash_rnd; /* Used for hash generation */ |
130 | atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ | 130 | atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ |
131 | void (*free_node) (struct hlist_node *p, bool free_leafs); | 131 | void (*free_node) (struct hlist_node *p, bool free_leafs); |
132 | int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); | 132 | int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); |
133 | int size_order; | 133 | int size_order; |
134 | int mean_chain_len; | 134 | int mean_chain_len; |
135 | }; | 135 | }; |
136 | 136 | ||
137 | /* Recent multicast cache */ | 137 | /* Recent multicast cache */ |
138 | /* RMC_BUCKETS must be a power of 2, maximum 256 */ | 138 | /* RMC_BUCKETS must be a power of 2, maximum 256 */ |
139 | #define RMC_BUCKETS 256 | 139 | #define RMC_BUCKETS 256 |
140 | #define RMC_QUEUE_MAX_LEN 4 | 140 | #define RMC_QUEUE_MAX_LEN 4 |
141 | #define RMC_TIMEOUT (3 * HZ) | 141 | #define RMC_TIMEOUT (3 * HZ) |
142 | 142 | ||
143 | /** | 143 | /** |
144 | * struct rmc_entry - entry in the Recent Multicast Cache | 144 | * struct rmc_entry - entry in the Recent Multicast Cache |
145 | * | 145 | * |
146 | * @seqnum: mesh sequence number of the frame | 146 | * @seqnum: mesh sequence number of the frame |
147 | * @exp_time: expiration time of the entry, in jiffies | 147 | * @exp_time: expiration time of the entry, in jiffies |
148 | * @sa: source address of the frame | 148 | * @sa: source address of the frame |
149 | * | 149 | * |
150 | * The Recent Multicast Cache keeps track of the latest multicast frames that | 150 | * The Recent Multicast Cache keeps track of the latest multicast frames that |
151 | * have been received by a mesh interface and discards received multicast frames | 151 | * have been received by a mesh interface and discards received multicast frames |
152 | * that are found in the cache. | 152 | * that are found in the cache. |
153 | */ | 153 | */ |
154 | struct rmc_entry { | 154 | struct rmc_entry { |
155 | struct list_head list; | 155 | struct list_head list; |
156 | u32 seqnum; | 156 | u32 seqnum; |
157 | unsigned long exp_time; | 157 | unsigned long exp_time; |
158 | u8 sa[ETH_ALEN]; | 158 | u8 sa[ETH_ALEN]; |
159 | }; | 159 | }; |
160 | 160 | ||
161 | struct mesh_rmc { | 161 | struct mesh_rmc { |
162 | struct rmc_entry bucket[RMC_BUCKETS]; | 162 | struct rmc_entry bucket[RMC_BUCKETS]; |
163 | u32 idx_mask; | 163 | u32 idx_mask; |
164 | }; | 164 | }; |
165 | 165 | ||
166 | 166 | ||
167 | #define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */ | 167 | #define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */ |
168 | 168 | ||
169 | #define MESH_PATH_EXPIRE (600 * HZ) | 169 | #define MESH_PATH_EXPIRE (600 * HZ) |
170 | 170 | ||
171 | /* Default maximum number of plinks per interface */ | 171 | /* Default maximum number of plinks per interface */ |
172 | #define MESH_MAX_PLINKS 256 | 172 | #define MESH_MAX_PLINKS 256 |
173 | 173 | ||
174 | /* Maximum number of paths per interface */ | 174 | /* Maximum number of paths per interface */ |
175 | #define MESH_MAX_MPATHS 1024 | 175 | #define MESH_MAX_MPATHS 1024 |
176 | 176 | ||
177 | /* Pending ANA approval */ | 177 | /* Pending ANA approval */ |
178 | #define MESH_PATH_SEL_ACTION 0 | 178 | #define MESH_PATH_SEL_ACTION 0 |
179 | 179 | ||
180 | /* PERR reason codes */ | 180 | /* PERR reason codes */ |
181 | #define PEER_RCODE_UNSPECIFIED 11 | 181 | #define PEER_RCODE_UNSPECIFIED 11 |
182 | #define PERR_RCODE_NO_ROUTE 12 | 182 | #define PERR_RCODE_NO_ROUTE 12 |
183 | #define PERR_RCODE_DEST_UNREACH 13 | 183 | #define PERR_RCODE_DEST_UNREACH 13 |
184 | 184 | ||
185 | /* Public interfaces */ | 185 | /* Public interfaces */ |
186 | /* Various */ | 186 | /* Various */ |
187 | int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, | 187 | int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, |
188 | const u8 *da, const u8 *sa); | 188 | const u8 *da, const u8 *sa); |
189 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, | 189 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, |
190 | struct ieee80211_sub_if_data *sdata, char *addr4or5, | 190 | struct ieee80211_sub_if_data *sdata, char *addr4or5, |
191 | char *addr6); | 191 | char *addr6); |
192 | int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, | 192 | int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, |
193 | struct ieee80211_sub_if_data *sdata); | 193 | struct ieee80211_sub_if_data *sdata); |
194 | bool mesh_matches_local(struct ieee802_11_elems *ie, | 194 | bool mesh_matches_local(struct ieee802_11_elems *ie, |
195 | struct ieee80211_sub_if_data *sdata); | 195 | struct ieee80211_sub_if_data *sdata); |
196 | void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); | 196 | void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); |
197 | void mesh_mgmt_ies_add(struct sk_buff *skb, | 197 | void mesh_mgmt_ies_add(struct sk_buff *skb, |
198 | struct ieee80211_sub_if_data *sdata); | 198 | struct ieee80211_sub_if_data *sdata); |
199 | void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); | 199 | void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); |
200 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); | 200 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); |
201 | void ieee80211s_init(void); | 201 | void ieee80211s_init(void); |
202 | void ieee80211s_update_metric(struct ieee80211_local *local, | 202 | void ieee80211s_update_metric(struct ieee80211_local *local, |
203 | struct sta_info *stainfo, struct sk_buff *skb); | 203 | struct sta_info *stainfo, struct sk_buff *skb); |
204 | void ieee80211s_stop(void); | 204 | void ieee80211s_stop(void); |
205 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); | 205 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); |
206 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); | 206 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); |
207 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); | 207 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); |
208 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); | 208 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); |
209 | 209 | ||
210 | /* Mesh paths */ | 210 | /* Mesh paths */ |
211 | int mesh_nexthop_lookup(struct sk_buff *skb, | 211 | int mesh_nexthop_lookup(struct sk_buff *skb, |
212 | struct ieee80211_sub_if_data *sdata); | 212 | struct ieee80211_sub_if_data *sdata); |
213 | void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata); | 213 | void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata); |
214 | struct mesh_path *mesh_path_lookup(u8 *dst, | 214 | struct mesh_path *mesh_path_lookup(u8 *dst, |
215 | struct ieee80211_sub_if_data *sdata); | 215 | struct ieee80211_sub_if_data *sdata); |
216 | struct mesh_path *mpp_path_lookup(u8 *dst, | 216 | struct mesh_path *mpp_path_lookup(u8 *dst, |
217 | struct ieee80211_sub_if_data *sdata); | 217 | struct ieee80211_sub_if_data *sdata); |
218 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata); | 218 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata); |
219 | struct mesh_path *mesh_path_lookup_by_idx(int idx, | 219 | struct mesh_path *mesh_path_lookup_by_idx(int idx, |
220 | struct ieee80211_sub_if_data *sdata); | 220 | struct ieee80211_sub_if_data *sdata); |
221 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); | 221 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); |
222 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata); | 222 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata); |
223 | void mesh_path_flush(struct ieee80211_sub_if_data *sdata); | 223 | void mesh_path_flush(struct ieee80211_sub_if_data *sdata); |
224 | void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, | 224 | void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, |
225 | struct ieee80211_mgmt *mgmt, size_t len); | 225 | struct ieee80211_mgmt *mgmt, size_t len); |
226 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); | 226 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); |
227 | /* Mesh plinks */ | 227 | /* Mesh plinks */ |
228 | void mesh_neighbour_update(u8 *hw_addr, u32 rates, | 228 | void mesh_neighbour_update(u8 *hw_addr, u32 rates, |
229 | struct ieee80211_sub_if_data *sdata, | 229 | struct ieee80211_sub_if_data *sdata, |
230 | struct ieee802_11_elems *ie); | 230 | struct ieee802_11_elems *ie); |
231 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); | 231 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); |
232 | void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); | 232 | void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); |
233 | void mesh_plink_broken(struct sta_info *sta); | 233 | void mesh_plink_broken(struct sta_info *sta); |
234 | void mesh_plink_deactivate(struct sta_info *sta); | 234 | void mesh_plink_deactivate(struct sta_info *sta); |
235 | int mesh_plink_open(struct sta_info *sta); | 235 | int mesh_plink_open(struct sta_info *sta); |
236 | void mesh_plink_block(struct sta_info *sta); | 236 | void mesh_plink_block(struct sta_info *sta); |
237 | void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, | 237 | void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, |
238 | struct ieee80211_mgmt *mgmt, size_t len, | 238 | struct ieee80211_mgmt *mgmt, size_t len, |
239 | struct ieee80211_rx_status *rx_status); | 239 | struct ieee80211_rx_status *rx_status); |
240 | 240 | ||
241 | /* Private interfaces */ | 241 | /* Private interfaces */ |
242 | /* Mesh tables */ | 242 | /* Mesh tables */ |
243 | struct mesh_table *mesh_table_alloc(int size_order); | ||
244 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs); | ||
245 | void mesh_mpath_table_grow(void); | 243 | void mesh_mpath_table_grow(void); |
246 | void mesh_mpp_table_grow(void); | 244 | void mesh_mpp_table_grow(void); |
247 | u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, | ||
248 | struct mesh_table *tbl); | ||
249 | /* Mesh paths */ | 245 | /* Mesh paths */ |
250 | int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode, | 246 | int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode, |
251 | const u8 *ra, struct ieee80211_sub_if_data *sdata); | 247 | const u8 *ra, struct ieee80211_sub_if_data *sdata); |
252 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); | 248 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); |
253 | void mesh_path_flush_pending(struct mesh_path *mpath); | 249 | void mesh_path_flush_pending(struct mesh_path *mpath); |
254 | void mesh_path_tx_pending(struct mesh_path *mpath); | 250 | void mesh_path_tx_pending(struct mesh_path *mpath); |
255 | int mesh_pathtbl_init(void); | 251 | int mesh_pathtbl_init(void); |
256 | void mesh_pathtbl_unregister(void); | 252 | void mesh_pathtbl_unregister(void); |
257 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); | 253 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); |
258 | void mesh_path_timer(unsigned long data); | 254 | void mesh_path_timer(unsigned long data); |
259 | void mesh_path_flush_by_nexthop(struct sta_info *sta); | 255 | void mesh_path_flush_by_nexthop(struct sta_info *sta); |
260 | void mesh_path_discard_frame(struct sk_buff *skb, | 256 | void mesh_path_discard_frame(struct sk_buff *skb, |
261 | struct ieee80211_sub_if_data *sdata); | 257 | struct ieee80211_sub_if_data *sdata); |
262 | void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); | 258 | void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); |
263 | void mesh_path_restart(struct ieee80211_sub_if_data *sdata); | 259 | void mesh_path_restart(struct ieee80211_sub_if_data *sdata); |
264 | void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); | 260 | void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); |
265 | 261 | ||
266 | extern int mesh_paths_generation; | 262 | extern int mesh_paths_generation; |
267 | 263 | ||
268 | #ifdef CONFIG_MAC80211_MESH | 264 | #ifdef CONFIG_MAC80211_MESH |
269 | extern int mesh_allocated; | 265 | extern int mesh_allocated; |
270 | 266 | ||
271 | static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) | 267 | static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) |
272 | { | 268 | { |
273 | return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks - | 269 | return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks - |
274 | atomic_read(&sdata->u.mesh.mshstats.estab_plinks); | 270 | atomic_read(&sdata->u.mesh.mshstats.estab_plinks); |
275 | } | 271 | } |
276 | 272 | ||
277 | static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata) | 273 | static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata) |
278 | { | 274 | { |
279 | return (min_t(long, mesh_plink_free_count(sdata), | 275 | return (min_t(long, mesh_plink_free_count(sdata), |
280 | MESH_MAX_PLINKS - sdata->local->num_sta)) > 0; | 276 | MESH_MAX_PLINKS - sdata->local->num_sta)) > 0; |
281 | } | 277 | } |
282 | 278 | ||
283 | static inline void mesh_path_activate(struct mesh_path *mpath) | 279 | static inline void mesh_path_activate(struct mesh_path *mpath) |
284 | { | 280 | { |
285 | mpath->flags |= MESH_PATH_ACTIVE | MESH_PATH_RESOLVED; | 281 | mpath->flags |= MESH_PATH_ACTIVE | MESH_PATH_RESOLVED; |
286 | } | 282 | } |
287 | 283 | ||
288 | static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) | 284 | static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) |
289 | { | 285 | { |
290 | return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP; | 286 | return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP; |
291 | } | 287 | } |
292 | 288 | ||
293 | #define for_each_mesh_entry(x, p, node, i) \ | 289 | #define for_each_mesh_entry(x, p, node, i) \ |
294 | for (i = 0; i <= x->hash_mask; i++) \ | 290 | for (i = 0; i <= x->hash_mask; i++) \ |
295 | hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list) | 291 | hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list) |
296 | 292 | ||
297 | void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); | 293 | void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); |
298 | 294 | ||
299 | void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata); | 295 | void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata); |
300 | void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata); | 296 | void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata); |
301 | void mesh_plink_quiesce(struct sta_info *sta); | 297 | void mesh_plink_quiesce(struct sta_info *sta); |
302 | void mesh_plink_restart(struct sta_info *sta); | 298 | void mesh_plink_restart(struct sta_info *sta); |
303 | #else | 299 | #else |
304 | #define mesh_allocated 0 | 300 | #define mesh_allocated 0 |
305 | static inline void | 301 | static inline void |
306 | ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {} | 302 | ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {} |
307 | static inline void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) | 303 | static inline void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) |
308 | {} | 304 | {} |
309 | static inline void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata) | 305 | static inline void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata) |
310 | {} | 306 | {} |
311 | static inline void mesh_plink_quiesce(struct sta_info *sta) {} | 307 | static inline void mesh_plink_quiesce(struct sta_info *sta) {} |
312 | static inline void mesh_plink_restart(struct sta_info *sta) {} | 308 | static inline void mesh_plink_restart(struct sta_info *sta) {} |
313 | static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) | 309 | static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) |
314 | { return false; } | 310 | { return false; } |
315 | #endif | 311 | #endif |
316 | 312 | ||
317 | #endif /* IEEE80211S_H */ | 313 | #endif /* IEEE80211S_H */ |
318 | 314 |
net/mac80211/mesh_pathtbl.c
1 | /* | 1 | /* |
2 | * Copyright (c) 2008, 2009 open80211s Ltd. | 2 | * Copyright (c) 2008, 2009 open80211s Ltd. |
3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> | 3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/etherdevice.h> | 10 | #include <linux/etherdevice.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/random.h> | 12 | #include <linux/random.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/string.h> | 15 | #include <linux/string.h> |
16 | #include <net/mac80211.h> | 16 | #include <net/mac80211.h> |
17 | #include "ieee80211_i.h" | 17 | #include "ieee80211_i.h" |
18 | #include "mesh.h" | 18 | #include "mesh.h" |
19 | 19 | ||
20 | /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ | 20 | /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ |
21 | #define INIT_PATHS_SIZE_ORDER 2 | 21 | #define INIT_PATHS_SIZE_ORDER 2 |
22 | 22 | ||
23 | /* Keep the mean chain length below this constant */ | 23 | /* Keep the mean chain length below this constant */ |
24 | #define MEAN_CHAIN_LEN 2 | 24 | #define MEAN_CHAIN_LEN 2 |
25 | 25 | ||
26 | #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \ | 26 | #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \ |
27 | time_after(jiffies, mpath->exp_time) && \ | 27 | time_after(jiffies, mpath->exp_time) && \ |
28 | !(mpath->flags & MESH_PATH_FIXED)) | 28 | !(mpath->flags & MESH_PATH_FIXED)) |
29 | 29 | ||
30 | struct mpath_node { | 30 | struct mpath_node { |
31 | struct hlist_node list; | 31 | struct hlist_node list; |
32 | struct rcu_head rcu; | 32 | struct rcu_head rcu; |
33 | /* This indirection allows two different tables to point to the same | 33 | /* This indirection allows two different tables to point to the same |
34 | * mesh_path structure, useful when resizing | 34 | * mesh_path structure, useful when resizing |
35 | */ | 35 | */ |
36 | struct mesh_path *mpath; | 36 | struct mesh_path *mpath; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | static struct mesh_table *mesh_paths; | 39 | static struct mesh_table *mesh_paths; |
40 | static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ | 40 | static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ |
41 | 41 | ||
42 | int mesh_paths_generation; | 42 | int mesh_paths_generation; |
43 | |||
44 | /* This lock will have the grow table function as writer and add / delete nodes | ||
45 | * as readers. When reading the table (i.e. doing lookups) we are well protected | ||
46 | * by RCU | ||
47 | */ | ||
48 | static DEFINE_RWLOCK(pathtbl_resize_lock); | ||
49 | |||
50 | |||
51 | static struct mesh_table *mesh_table_alloc(int size_order) | ||
52 | { | ||
53 | int i; | ||
54 | struct mesh_table *newtbl; | ||
55 | |||
56 | newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL); | ||
57 | if (!newtbl) | ||
58 | return NULL; | ||
59 | |||
60 | newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * | ||
61 | (1 << size_order), GFP_KERNEL); | ||
62 | |||
63 | if (!newtbl->hash_buckets) { | ||
64 | kfree(newtbl); | ||
65 | return NULL; | ||
66 | } | ||
67 | |||
68 | newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * | ||
69 | (1 << size_order), GFP_KERNEL); | ||
70 | if (!newtbl->hashwlock) { | ||
71 | kfree(newtbl->hash_buckets); | ||
72 | kfree(newtbl); | ||
73 | return NULL; | ||
74 | } | ||
75 | |||
76 | newtbl->size_order = size_order; | ||
77 | newtbl->hash_mask = (1 << size_order) - 1; | ||
78 | atomic_set(&newtbl->entries, 0); | ||
79 | get_random_bytes(&newtbl->hash_rnd, | ||
80 | sizeof(newtbl->hash_rnd)); | ||
81 | for (i = 0; i <= newtbl->hash_mask; i++) | ||
82 | spin_lock_init(&newtbl->hashwlock[i]); | ||
83 | |||
84 | return newtbl; | ||
85 | } | ||
86 | |||
43 | static void __mesh_table_free(struct mesh_table *tbl) | 87 | static void __mesh_table_free(struct mesh_table *tbl) |
44 | { | 88 | { |
45 | kfree(tbl->hash_buckets); | 89 | kfree(tbl->hash_buckets); |
46 | kfree(tbl->hashwlock); | 90 | kfree(tbl->hashwlock); |
47 | kfree(tbl); | 91 | kfree(tbl); |
48 | } | 92 | } |
49 | 93 | ||
50 | void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | 94 | static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) |
51 | { | 95 | { |
52 | struct hlist_head *mesh_hash; | 96 | struct hlist_head *mesh_hash; |
53 | struct hlist_node *p, *q; | 97 | struct hlist_node *p, *q; |
54 | int i; | 98 | int i; |
55 | 99 | ||
56 | mesh_hash = tbl->hash_buckets; | 100 | mesh_hash = tbl->hash_buckets; |
57 | for (i = 0; i <= tbl->hash_mask; i++) { | 101 | for (i = 0; i <= tbl->hash_mask; i++) { |
58 | spin_lock_bh(&tbl->hashwlock[i]); | 102 | spin_lock_bh(&tbl->hashwlock[i]); |
59 | hlist_for_each_safe(p, q, &mesh_hash[i]) { | 103 | hlist_for_each_safe(p, q, &mesh_hash[i]) { |
60 | tbl->free_node(p, free_leafs); | 104 | tbl->free_node(p, free_leafs); |
61 | atomic_dec(&tbl->entries); | 105 | atomic_dec(&tbl->entries); |
62 | } | 106 | } |
63 | spin_unlock_bh(&tbl->hashwlock[i]); | 107 | spin_unlock_bh(&tbl->hashwlock[i]); |
64 | } | 108 | } |
65 | __mesh_table_free(tbl); | 109 | __mesh_table_free(tbl); |
66 | } | 110 | } |
67 | 111 | ||
68 | static int mesh_table_grow(struct mesh_table *oldtbl, | 112 | static int mesh_table_grow(struct mesh_table *oldtbl, |
69 | struct mesh_table *newtbl) | 113 | struct mesh_table *newtbl) |
70 | { | 114 | { |
71 | struct hlist_head *oldhash; | 115 | struct hlist_head *oldhash; |
72 | struct hlist_node *p, *q; | 116 | struct hlist_node *p, *q; |
73 | int i; | 117 | int i; |
74 | 118 | ||
75 | if (atomic_read(&oldtbl->entries) | 119 | if (atomic_read(&oldtbl->entries) |
76 | < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1)) | 120 | < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1)) |
77 | return -EAGAIN; | 121 | return -EAGAIN; |
78 | 122 | ||
79 | newtbl->free_node = oldtbl->free_node; | 123 | newtbl->free_node = oldtbl->free_node; |
80 | newtbl->mean_chain_len = oldtbl->mean_chain_len; | 124 | newtbl->mean_chain_len = oldtbl->mean_chain_len; |
81 | newtbl->copy_node = oldtbl->copy_node; | 125 | newtbl->copy_node = oldtbl->copy_node; |
82 | atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); | 126 | atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); |
83 | 127 | ||
84 | oldhash = oldtbl->hash_buckets; | 128 | oldhash = oldtbl->hash_buckets; |
85 | for (i = 0; i <= oldtbl->hash_mask; i++) | 129 | for (i = 0; i <= oldtbl->hash_mask; i++) |
86 | hlist_for_each(p, &oldhash[i]) | 130 | hlist_for_each(p, &oldhash[i]) |
87 | if (oldtbl->copy_node(p, newtbl) < 0) | 131 | if (oldtbl->copy_node(p, newtbl) < 0) |
88 | goto errcopy; | 132 | goto errcopy; |
89 | 133 | ||
90 | return 0; | 134 | return 0; |
91 | 135 | ||
92 | errcopy: | 136 | errcopy: |
93 | for (i = 0; i <= newtbl->hash_mask; i++) { | 137 | for (i = 0; i <= newtbl->hash_mask; i++) { |
94 | hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) | 138 | hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) |
95 | oldtbl->free_node(p, 0); | 139 | oldtbl->free_node(p, 0); |
96 | } | 140 | } |
97 | return -ENOMEM; | 141 | return -ENOMEM; |
98 | } | 142 | } |
99 | 143 | ||
144 | static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, | ||
145 | struct mesh_table *tbl) | ||
146 | { | ||
147 | /* Use last four bytes of hw addr and interface index as hash index */ | ||
148 | return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd) | ||
149 | & tbl->hash_mask; | ||
150 | } | ||
100 | 151 | ||
101 | /* This lock will have the grow table function as writer and add / delete nodes | ||
102 | * as readers. When reading the table (i.e. doing lookups) we are well protected | ||
103 | * by RCU | ||
104 | */ | ||
105 | static DEFINE_RWLOCK(pathtbl_resize_lock); | ||
106 | 152 | ||
107 | /** | 153 | /** |
108 | * | 154 | * |
109 | * mesh_path_assign_nexthop - update mesh path next hop | 155 | * mesh_path_assign_nexthop - update mesh path next hop |
110 | * | 156 | * |
111 | * @mpath: mesh path to update | 157 | * @mpath: mesh path to update |
112 | * @sta: next hop to assign | 158 | * @sta: next hop to assign |
113 | * | 159 | * |
114 | * Locking: mpath->state_lock must be held when calling this function | 160 | * Locking: mpath->state_lock must be held when calling this function |
115 | */ | 161 | */ |
116 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) | 162 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) |
117 | { | 163 | { |
118 | struct sk_buff *skb; | 164 | struct sk_buff *skb; |
119 | struct ieee80211_hdr *hdr; | 165 | struct ieee80211_hdr *hdr; |
120 | struct sk_buff_head tmpq; | 166 | struct sk_buff_head tmpq; |
121 | unsigned long flags; | 167 | unsigned long flags; |
122 | 168 | ||
123 | rcu_assign_pointer(mpath->next_hop, sta); | 169 | rcu_assign_pointer(mpath->next_hop, sta); |
124 | 170 | ||
125 | __skb_queue_head_init(&tmpq); | 171 | __skb_queue_head_init(&tmpq); |
126 | 172 | ||
127 | spin_lock_irqsave(&mpath->frame_queue.lock, flags); | 173 | spin_lock_irqsave(&mpath->frame_queue.lock, flags); |
128 | 174 | ||
129 | while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { | 175 | while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { |
130 | hdr = (struct ieee80211_hdr *) skb->data; | 176 | hdr = (struct ieee80211_hdr *) skb->data; |
131 | memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); | 177 | memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); |
132 | __skb_queue_tail(&tmpq, skb); | 178 | __skb_queue_tail(&tmpq, skb); |
133 | } | 179 | } |
134 | 180 | ||
135 | skb_queue_splice(&tmpq, &mpath->frame_queue); | 181 | skb_queue_splice(&tmpq, &mpath->frame_queue); |
136 | spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); | 182 | spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); |
137 | } | 183 | } |
138 | 184 | ||
139 | 185 | ||
140 | /** | 186 | /** |
141 | * mesh_path_lookup - look up a path in the mesh path table | 187 | * mesh_path_lookup - look up a path in the mesh path table |
142 | * @dst: hardware address (ETH_ALEN length) of destination | 188 | * @dst: hardware address (ETH_ALEN length) of destination |
143 | * @sdata: local subif | 189 | * @sdata: local subif |
144 | * | 190 | * |
145 | * Returns: pointer to the mesh path structure, or NULL if not found | 191 | * Returns: pointer to the mesh path structure, or NULL if not found |
146 | * | 192 | * |
147 | * Locking: must be called within a read rcu section. | 193 | * Locking: must be called within a read rcu section. |
148 | */ | 194 | */ |
149 | struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) | 195 | struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) |
150 | { | 196 | { |
151 | struct mesh_path *mpath; | 197 | struct mesh_path *mpath; |
152 | struct hlist_node *n; | 198 | struct hlist_node *n; |
153 | struct hlist_head *bucket; | 199 | struct hlist_head *bucket; |
154 | struct mesh_table *tbl; | 200 | struct mesh_table *tbl; |
155 | struct mpath_node *node; | 201 | struct mpath_node *node; |
156 | 202 | ||
157 | tbl = rcu_dereference(mesh_paths); | 203 | tbl = rcu_dereference(mesh_paths); |
158 | 204 | ||
159 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; | 205 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; |
160 | hlist_for_each_entry_rcu(node, n, bucket, list) { | 206 | hlist_for_each_entry_rcu(node, n, bucket, list) { |
161 | mpath = node->mpath; | 207 | mpath = node->mpath; |
162 | if (mpath->sdata == sdata && | 208 | if (mpath->sdata == sdata && |
163 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { | 209 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { |
164 | if (MPATH_EXPIRED(mpath)) { | 210 | if (MPATH_EXPIRED(mpath)) { |
165 | spin_lock_bh(&mpath->state_lock); | 211 | spin_lock_bh(&mpath->state_lock); |
166 | if (MPATH_EXPIRED(mpath)) | 212 | if (MPATH_EXPIRED(mpath)) |
167 | mpath->flags &= ~MESH_PATH_ACTIVE; | 213 | mpath->flags &= ~MESH_PATH_ACTIVE; |
168 | spin_unlock_bh(&mpath->state_lock); | 214 | spin_unlock_bh(&mpath->state_lock); |
169 | } | 215 | } |
170 | return mpath; | 216 | return mpath; |
171 | } | 217 | } |
172 | } | 218 | } |
173 | return NULL; | 219 | return NULL; |
174 | } | 220 | } |
175 | 221 | ||
176 | struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) | 222 | struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) |
177 | { | 223 | { |
178 | struct mesh_path *mpath; | 224 | struct mesh_path *mpath; |
179 | struct hlist_node *n; | 225 | struct hlist_node *n; |
180 | struct hlist_head *bucket; | 226 | struct hlist_head *bucket; |
181 | struct mesh_table *tbl; | 227 | struct mesh_table *tbl; |
182 | struct mpath_node *node; | 228 | struct mpath_node *node; |
183 | 229 | ||
184 | tbl = rcu_dereference(mpp_paths); | 230 | tbl = rcu_dereference(mpp_paths); |
185 | 231 | ||
186 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; | 232 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; |
187 | hlist_for_each_entry_rcu(node, n, bucket, list) { | 233 | hlist_for_each_entry_rcu(node, n, bucket, list) { |
188 | mpath = node->mpath; | 234 | mpath = node->mpath; |
189 | if (mpath->sdata == sdata && | 235 | if (mpath->sdata == sdata && |
190 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { | 236 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { |
191 | if (MPATH_EXPIRED(mpath)) { | 237 | if (MPATH_EXPIRED(mpath)) { |
192 | spin_lock_bh(&mpath->state_lock); | 238 | spin_lock_bh(&mpath->state_lock); |
193 | if (MPATH_EXPIRED(mpath)) | 239 | if (MPATH_EXPIRED(mpath)) |
194 | mpath->flags &= ~MESH_PATH_ACTIVE; | 240 | mpath->flags &= ~MESH_PATH_ACTIVE; |
195 | spin_unlock_bh(&mpath->state_lock); | 241 | spin_unlock_bh(&mpath->state_lock); |
196 | } | 242 | } |
197 | return mpath; | 243 | return mpath; |
198 | } | 244 | } |
199 | } | 245 | } |
200 | return NULL; | 246 | return NULL; |
201 | } | 247 | } |
202 | 248 | ||
203 | 249 | ||
204 | /** | 250 | /** |
205 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index | 251 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index |
206 | * @idx: index | 252 | * @idx: index |
207 | * @sdata: local subif, or NULL for all entries | 253 | * @sdata: local subif, or NULL for all entries |
208 | * | 254 | * |
209 | * Returns: pointer to the mesh path structure, or NULL if not found. | 255 | * Returns: pointer to the mesh path structure, or NULL if not found. |
210 | * | 256 | * |
211 | * Locking: must be called within a read rcu section. | 257 | * Locking: must be called within a read rcu section. |
212 | */ | 258 | */ |
213 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) | 259 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) |
214 | { | 260 | { |
215 | struct mpath_node *node; | 261 | struct mpath_node *node; |
216 | struct hlist_node *p; | 262 | struct hlist_node *p; |
217 | int i; | 263 | int i; |
218 | int j = 0; | 264 | int j = 0; |
219 | 265 | ||
220 | for_each_mesh_entry(mesh_paths, p, node, i) { | 266 | for_each_mesh_entry(mesh_paths, p, node, i) { |
221 | if (sdata && node->mpath->sdata != sdata) | 267 | if (sdata && node->mpath->sdata != sdata) |
222 | continue; | 268 | continue; |
223 | if (j++ == idx) { | 269 | if (j++ == idx) { |
224 | if (MPATH_EXPIRED(node->mpath)) { | 270 | if (MPATH_EXPIRED(node->mpath)) { |
225 | spin_lock_bh(&node->mpath->state_lock); | 271 | spin_lock_bh(&node->mpath->state_lock); |
226 | if (MPATH_EXPIRED(node->mpath)) | 272 | if (MPATH_EXPIRED(node->mpath)) |
227 | node->mpath->flags &= ~MESH_PATH_ACTIVE; | 273 | node->mpath->flags &= ~MESH_PATH_ACTIVE; |
228 | spin_unlock_bh(&node->mpath->state_lock); | 274 | spin_unlock_bh(&node->mpath->state_lock); |
229 | } | 275 | } |
230 | return node->mpath; | 276 | return node->mpath; |
231 | } | 277 | } |
232 | } | 278 | } |
233 | 279 | ||
234 | return NULL; | 280 | return NULL; |
235 | } | 281 | } |
236 | 282 | ||
237 | /** | 283 | /** |
238 | * mesh_path_add - allocate and add a new path to the mesh path table | 284 | * mesh_path_add - allocate and add a new path to the mesh path table |
239 | * @addr: destination address of the path (ETH_ALEN length) | 285 | * @addr: destination address of the path (ETH_ALEN length) |
240 | * @sdata: local subif | 286 | * @sdata: local subif |
241 | * | 287 | * |
242 | * Returns: 0 on success | 288 | * Returns: 0 on success |
243 | * | 289 | * |
244 | * State: the initial state of the new path is set to 0 | 290 | * State: the initial state of the new path is set to 0 |
245 | */ | 291 | */ |
246 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | 292 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) |
247 | { | 293 | { |
248 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 294 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
249 | struct ieee80211_local *local = sdata->local; | 295 | struct ieee80211_local *local = sdata->local; |
250 | struct mesh_path *mpath, *new_mpath; | 296 | struct mesh_path *mpath, *new_mpath; |
251 | struct mpath_node *node, *new_node; | 297 | struct mpath_node *node, *new_node; |
252 | struct hlist_head *bucket; | 298 | struct hlist_head *bucket; |
253 | struct hlist_node *n; | 299 | struct hlist_node *n; |
254 | int grow = 0; | 300 | int grow = 0; |
255 | int err = 0; | 301 | int err = 0; |
256 | u32 hash_idx; | 302 | u32 hash_idx; |
257 | 303 | ||
258 | if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) | 304 | if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) |
259 | /* never add ourselves as neighbours */ | 305 | /* never add ourselves as neighbours */ |
260 | return -ENOTSUPP; | 306 | return -ENOTSUPP; |
261 | 307 | ||
262 | if (is_multicast_ether_addr(dst)) | 308 | if (is_multicast_ether_addr(dst)) |
263 | return -ENOTSUPP; | 309 | return -ENOTSUPP; |
264 | 310 | ||
265 | if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) | 311 | if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) |
266 | return -ENOSPC; | 312 | return -ENOSPC; |
267 | 313 | ||
268 | err = -ENOMEM; | 314 | err = -ENOMEM; |
269 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); | 315 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); |
270 | if (!new_mpath) | 316 | if (!new_mpath) |
271 | goto err_path_alloc; | 317 | goto err_path_alloc; |
272 | 318 | ||
273 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); | 319 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
274 | if (!new_node) | 320 | if (!new_node) |
275 | goto err_node_alloc; | 321 | goto err_node_alloc; |
276 | 322 | ||
277 | read_lock_bh(&pathtbl_resize_lock); | 323 | read_lock_bh(&pathtbl_resize_lock); |
278 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 324 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
279 | new_mpath->sdata = sdata; | 325 | new_mpath->sdata = sdata; |
280 | new_mpath->flags = 0; | 326 | new_mpath->flags = 0; |
281 | skb_queue_head_init(&new_mpath->frame_queue); | 327 | skb_queue_head_init(&new_mpath->frame_queue); |
282 | new_node->mpath = new_mpath; | 328 | new_node->mpath = new_mpath; |
283 | new_mpath->timer.data = (unsigned long) new_mpath; | 329 | new_mpath->timer.data = (unsigned long) new_mpath; |
284 | new_mpath->timer.function = mesh_path_timer; | 330 | new_mpath->timer.function = mesh_path_timer; |
285 | new_mpath->exp_time = jiffies; | 331 | new_mpath->exp_time = jiffies; |
286 | spin_lock_init(&new_mpath->state_lock); | 332 | spin_lock_init(&new_mpath->state_lock); |
287 | init_timer(&new_mpath->timer); | 333 | init_timer(&new_mpath->timer); |
288 | 334 | ||
289 | hash_idx = mesh_table_hash(dst, sdata, mesh_paths); | 335 | hash_idx = mesh_table_hash(dst, sdata, mesh_paths); |
290 | bucket = &mesh_paths->hash_buckets[hash_idx]; | 336 | bucket = &mesh_paths->hash_buckets[hash_idx]; |
291 | 337 | ||
292 | spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); | 338 | spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); |
293 | 339 | ||
294 | err = -EEXIST; | 340 | err = -EEXIST; |
295 | hlist_for_each_entry(node, n, bucket, list) { | 341 | hlist_for_each_entry(node, n, bucket, list) { |
296 | mpath = node->mpath; | 342 | mpath = node->mpath; |
297 | if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) | 343 | if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) |
298 | goto err_exists; | 344 | goto err_exists; |
299 | } | 345 | } |
300 | 346 | ||
301 | hlist_add_head_rcu(&new_node->list, bucket); | 347 | hlist_add_head_rcu(&new_node->list, bucket); |
302 | if (atomic_inc_return(&mesh_paths->entries) >= | 348 | if (atomic_inc_return(&mesh_paths->entries) >= |
303 | mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) | 349 | mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) |
304 | grow = 1; | 350 | grow = 1; |
305 | 351 | ||
306 | mesh_paths_generation++; | 352 | mesh_paths_generation++; |
307 | 353 | ||
308 | spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); | 354 | spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); |
309 | read_unlock_bh(&pathtbl_resize_lock); | 355 | read_unlock_bh(&pathtbl_resize_lock); |
310 | if (grow) { | 356 | if (grow) { |
311 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); | 357 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); |
312 | ieee80211_queue_work(&local->hw, &sdata->work); | 358 | ieee80211_queue_work(&local->hw, &sdata->work); |
313 | } | 359 | } |
314 | return 0; | 360 | return 0; |
315 | 361 | ||
316 | err_exists: | 362 | err_exists: |
317 | spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); | 363 | spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); |
318 | read_unlock_bh(&pathtbl_resize_lock); | 364 | read_unlock_bh(&pathtbl_resize_lock); |
319 | kfree(new_node); | 365 | kfree(new_node); |
320 | err_node_alloc: | 366 | err_node_alloc: |
321 | kfree(new_mpath); | 367 | kfree(new_mpath); |
322 | err_path_alloc: | 368 | err_path_alloc: |
323 | atomic_dec(&sdata->u.mesh.mpaths); | 369 | atomic_dec(&sdata->u.mesh.mpaths); |
324 | return err; | 370 | return err; |
325 | } | 371 | } |
326 | 372 | ||
327 | void mesh_mpath_table_grow(void) | 373 | void mesh_mpath_table_grow(void) |
328 | { | 374 | { |
329 | struct mesh_table *oldtbl, *newtbl; | 375 | struct mesh_table *oldtbl, *newtbl; |
330 | 376 | ||
331 | rcu_read_lock(); | 377 | rcu_read_lock(); |
332 | newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1); | 378 | newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1); |
333 | if (!newtbl) | 379 | if (!newtbl) |
334 | return; | 380 | return; |
335 | write_lock_bh(&pathtbl_resize_lock); | 381 | write_lock_bh(&pathtbl_resize_lock); |
336 | oldtbl = mesh_paths; | 382 | oldtbl = mesh_paths; |
337 | if (mesh_table_grow(mesh_paths, newtbl) < 0) { | 383 | if (mesh_table_grow(mesh_paths, newtbl) < 0) { |
338 | rcu_read_unlock(); | 384 | rcu_read_unlock(); |
339 | __mesh_table_free(newtbl); | 385 | __mesh_table_free(newtbl); |
340 | write_unlock_bh(&pathtbl_resize_lock); | 386 | write_unlock_bh(&pathtbl_resize_lock); |
341 | return; | 387 | return; |
342 | } | 388 | } |
343 | rcu_read_unlock(); | 389 | rcu_read_unlock(); |
344 | rcu_assign_pointer(mesh_paths, newtbl); | 390 | rcu_assign_pointer(mesh_paths, newtbl); |
345 | write_unlock_bh(&pathtbl_resize_lock); | 391 | write_unlock_bh(&pathtbl_resize_lock); |
346 | 392 | ||
347 | synchronize_rcu(); | 393 | synchronize_rcu(); |
348 | mesh_table_free(oldtbl, false); | 394 | mesh_table_free(oldtbl, false); |
349 | } | 395 | } |
350 | 396 | ||
351 | void mesh_mpp_table_grow(void) | 397 | void mesh_mpp_table_grow(void) |
352 | { | 398 | { |
353 | struct mesh_table *oldtbl, *newtbl; | 399 | struct mesh_table *oldtbl, *newtbl; |
354 | 400 | ||
355 | rcu_read_lock(); | 401 | rcu_read_lock(); |
356 | newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1); | 402 | newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1); |
357 | if (!newtbl) | 403 | if (!newtbl) |
358 | return; | 404 | return; |
359 | write_lock_bh(&pathtbl_resize_lock); | 405 | write_lock_bh(&pathtbl_resize_lock); |
360 | oldtbl = mpp_paths; | 406 | oldtbl = mpp_paths; |
361 | if (mesh_table_grow(mpp_paths, newtbl) < 0) { | 407 | if (mesh_table_grow(mpp_paths, newtbl) < 0) { |
362 | rcu_read_unlock(); | 408 | rcu_read_unlock(); |
363 | __mesh_table_free(newtbl); | 409 | __mesh_table_free(newtbl); |
364 | write_unlock_bh(&pathtbl_resize_lock); | 410 | write_unlock_bh(&pathtbl_resize_lock); |
365 | return; | 411 | return; |
366 | } | 412 | } |
367 | rcu_read_unlock(); | 413 | rcu_read_unlock(); |
368 | rcu_assign_pointer(mpp_paths, newtbl); | 414 | rcu_assign_pointer(mpp_paths, newtbl); |
369 | write_unlock_bh(&pathtbl_resize_lock); | 415 | write_unlock_bh(&pathtbl_resize_lock); |
370 | 416 | ||
371 | synchronize_rcu(); | 417 | synchronize_rcu(); |
372 | mesh_table_free(oldtbl, false); | 418 | mesh_table_free(oldtbl, false); |
373 | } | 419 | } |
374 | 420 | ||
375 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | 421 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) |
376 | { | 422 | { |
377 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 423 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
378 | struct ieee80211_local *local = sdata->local; | 424 | struct ieee80211_local *local = sdata->local; |
379 | struct mesh_path *mpath, *new_mpath; | 425 | struct mesh_path *mpath, *new_mpath; |
380 | struct mpath_node *node, *new_node; | 426 | struct mpath_node *node, *new_node; |
381 | struct hlist_head *bucket; | 427 | struct hlist_head *bucket; |
382 | struct hlist_node *n; | 428 | struct hlist_node *n; |
383 | int grow = 0; | 429 | int grow = 0; |
384 | int err = 0; | 430 | int err = 0; |
385 | u32 hash_idx; | 431 | u32 hash_idx; |
386 | 432 | ||
387 | if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) | 433 | if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) |
388 | /* never add ourselves as neighbours */ | 434 | /* never add ourselves as neighbours */ |
389 | return -ENOTSUPP; | 435 | return -ENOTSUPP; |
390 | 436 | ||
391 | if (is_multicast_ether_addr(dst)) | 437 | if (is_multicast_ether_addr(dst)) |
392 | return -ENOTSUPP; | 438 | return -ENOTSUPP; |
393 | 439 | ||
394 | err = -ENOMEM; | 440 | err = -ENOMEM; |
395 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); | 441 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); |
396 | if (!new_mpath) | 442 | if (!new_mpath) |
397 | goto err_path_alloc; | 443 | goto err_path_alloc; |
398 | 444 | ||
399 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); | 445 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
400 | if (!new_node) | 446 | if (!new_node) |
401 | goto err_node_alloc; | 447 | goto err_node_alloc; |
402 | 448 | ||
403 | read_lock_bh(&pathtbl_resize_lock); | 449 | read_lock_bh(&pathtbl_resize_lock); |
404 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 450 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
405 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); | 451 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); |
406 | new_mpath->sdata = sdata; | 452 | new_mpath->sdata = sdata; |
407 | new_mpath->flags = 0; | 453 | new_mpath->flags = 0; |
408 | skb_queue_head_init(&new_mpath->frame_queue); | 454 | skb_queue_head_init(&new_mpath->frame_queue); |
409 | new_node->mpath = new_mpath; | 455 | new_node->mpath = new_mpath; |
410 | new_mpath->exp_time = jiffies; | 456 | new_mpath->exp_time = jiffies; |
411 | spin_lock_init(&new_mpath->state_lock); | 457 | spin_lock_init(&new_mpath->state_lock); |
412 | 458 | ||
413 | hash_idx = mesh_table_hash(dst, sdata, mpp_paths); | 459 | hash_idx = mesh_table_hash(dst, sdata, mpp_paths); |
414 | bucket = &mpp_paths->hash_buckets[hash_idx]; | 460 | bucket = &mpp_paths->hash_buckets[hash_idx]; |
415 | 461 | ||
416 | spin_lock_bh(&mpp_paths->hashwlock[hash_idx]); | 462 | spin_lock_bh(&mpp_paths->hashwlock[hash_idx]); |
417 | 463 | ||
418 | err = -EEXIST; | 464 | err = -EEXIST; |
419 | hlist_for_each_entry(node, n, bucket, list) { | 465 | hlist_for_each_entry(node, n, bucket, list) { |
420 | mpath = node->mpath; | 466 | mpath = node->mpath; |
421 | if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) | 467 | if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) |
422 | goto err_exists; | 468 | goto err_exists; |
423 | } | 469 | } |
424 | 470 | ||
425 | hlist_add_head_rcu(&new_node->list, bucket); | 471 | hlist_add_head_rcu(&new_node->list, bucket); |
426 | if (atomic_inc_return(&mpp_paths->entries) >= | 472 | if (atomic_inc_return(&mpp_paths->entries) >= |
427 | mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) | 473 | mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) |
428 | grow = 1; | 474 | grow = 1; |
429 | 475 | ||
430 | spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); | 476 | spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); |
431 | read_unlock_bh(&pathtbl_resize_lock); | 477 | read_unlock_bh(&pathtbl_resize_lock); |
432 | if (grow) { | 478 | if (grow) { |
433 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); | 479 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); |
434 | ieee80211_queue_work(&local->hw, &sdata->work); | 480 | ieee80211_queue_work(&local->hw, &sdata->work); |
435 | } | 481 | } |
436 | return 0; | 482 | return 0; |
437 | 483 | ||
438 | err_exists: | 484 | err_exists: |
439 | spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); | 485 | spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); |
440 | read_unlock_bh(&pathtbl_resize_lock); | 486 | read_unlock_bh(&pathtbl_resize_lock); |
441 | kfree(new_node); | 487 | kfree(new_node); |
442 | err_node_alloc: | 488 | err_node_alloc: |
443 | kfree(new_mpath); | 489 | kfree(new_mpath); |
444 | err_path_alloc: | 490 | err_path_alloc: |
445 | return err; | 491 | return err; |
446 | } | 492 | } |
447 | 493 | ||
448 | 494 | ||
449 | /** | 495 | /** |
450 | * mesh_plink_broken - deactivates paths and sends perr when a link breaks | 496 | * mesh_plink_broken - deactivates paths and sends perr when a link breaks |
451 | * | 497 | * |
452 | * @sta: broken peer link | 498 | * @sta: broken peer link |
453 | * | 499 | * |
454 | * This function must be called from the rate control algorithm if enough | 500 | * This function must be called from the rate control algorithm if enough |
455 | * delivery errors suggest that a peer link is no longer usable. | 501 | * delivery errors suggest that a peer link is no longer usable. |
456 | */ | 502 | */ |
457 | void mesh_plink_broken(struct sta_info *sta) | 503 | void mesh_plink_broken(struct sta_info *sta) |
458 | { | 504 | { |
459 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | 505 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
460 | struct mesh_path *mpath; | 506 | struct mesh_path *mpath; |
461 | struct mpath_node *node; | 507 | struct mpath_node *node; |
462 | struct hlist_node *p; | 508 | struct hlist_node *p; |
463 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 509 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
464 | int i; | 510 | int i; |
465 | 511 | ||
466 | rcu_read_lock(); | 512 | rcu_read_lock(); |
467 | for_each_mesh_entry(mesh_paths, p, node, i) { | 513 | for_each_mesh_entry(mesh_paths, p, node, i) { |
468 | mpath = node->mpath; | 514 | mpath = node->mpath; |
469 | spin_lock_bh(&mpath->state_lock); | 515 | spin_lock_bh(&mpath->state_lock); |
470 | if (mpath->next_hop == sta && | 516 | if (mpath->next_hop == sta && |
471 | mpath->flags & MESH_PATH_ACTIVE && | 517 | mpath->flags & MESH_PATH_ACTIVE && |
472 | !(mpath->flags & MESH_PATH_FIXED)) { | 518 | !(mpath->flags & MESH_PATH_FIXED)) { |
473 | mpath->flags &= ~MESH_PATH_ACTIVE; | 519 | mpath->flags &= ~MESH_PATH_ACTIVE; |
474 | ++mpath->sn; | 520 | ++mpath->sn; |
475 | spin_unlock_bh(&mpath->state_lock); | 521 | spin_unlock_bh(&mpath->state_lock); |
476 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, | 522 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, |
477 | mpath->dst, cpu_to_le32(mpath->sn), | 523 | mpath->dst, cpu_to_le32(mpath->sn), |
478 | cpu_to_le16(PERR_RCODE_DEST_UNREACH), | 524 | cpu_to_le16(PERR_RCODE_DEST_UNREACH), |
479 | bcast, sdata); | 525 | bcast, sdata); |
480 | } else | 526 | } else |
481 | spin_unlock_bh(&mpath->state_lock); | 527 | spin_unlock_bh(&mpath->state_lock); |
482 | } | 528 | } |
483 | rcu_read_unlock(); | 529 | rcu_read_unlock(); |
484 | } | 530 | } |
485 | 531 | ||
486 | /** | 532 | /** |
487 | * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches | 533 | * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches |
488 | * | 534 | * |
489 | * @sta - mesh peer to match | 535 | * @sta - mesh peer to match |
490 | * | 536 | * |
491 | * RCU notes: this function is called when a mesh plink transitions from | 537 | * RCU notes: this function is called when a mesh plink transitions from |
492 | * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that | 538 | * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that |
493 | * allows path creation. This will happen before the sta can be freed (because | 539 | * allows path creation. This will happen before the sta can be freed (because |
494 | * sta_info_destroy() calls this) so any reader in a rcu read block will be | 540 | * sta_info_destroy() calls this) so any reader in a rcu read block will be |
495 | * protected against the plink disappearing. | 541 | * protected against the plink disappearing. |
496 | */ | 542 | */ |
497 | void mesh_path_flush_by_nexthop(struct sta_info *sta) | 543 | void mesh_path_flush_by_nexthop(struct sta_info *sta) |
498 | { | 544 | { |
499 | struct mesh_path *mpath; | 545 | struct mesh_path *mpath; |
500 | struct mpath_node *node; | 546 | struct mpath_node *node; |
501 | struct hlist_node *p; | 547 | struct hlist_node *p; |
502 | int i; | 548 | int i; |
503 | 549 | ||
504 | for_each_mesh_entry(mesh_paths, p, node, i) { | 550 | for_each_mesh_entry(mesh_paths, p, node, i) { |
505 | mpath = node->mpath; | 551 | mpath = node->mpath; |
506 | if (mpath->next_hop == sta) | 552 | if (mpath->next_hop == sta) |
507 | mesh_path_del(mpath->dst, mpath->sdata); | 553 | mesh_path_del(mpath->dst, mpath->sdata); |
508 | } | 554 | } |
509 | } | 555 | } |
510 | 556 | ||
511 | void mesh_path_flush(struct ieee80211_sub_if_data *sdata) | 557 | void mesh_path_flush(struct ieee80211_sub_if_data *sdata) |
512 | { | 558 | { |
513 | struct mesh_path *mpath; | 559 | struct mesh_path *mpath; |
514 | struct mpath_node *node; | 560 | struct mpath_node *node; |
515 | struct hlist_node *p; | 561 | struct hlist_node *p; |
516 | int i; | 562 | int i; |
517 | 563 | ||
518 | for_each_mesh_entry(mesh_paths, p, node, i) { | 564 | for_each_mesh_entry(mesh_paths, p, node, i) { |
519 | mpath = node->mpath; | 565 | mpath = node->mpath; |
520 | if (mpath->sdata == sdata) | 566 | if (mpath->sdata == sdata) |
521 | mesh_path_del(mpath->dst, mpath->sdata); | 567 | mesh_path_del(mpath->dst, mpath->sdata); |
522 | } | 568 | } |
523 | } | 569 | } |
524 | 570 | ||
525 | static void mesh_path_node_reclaim(struct rcu_head *rp) | 571 | static void mesh_path_node_reclaim(struct rcu_head *rp) |
526 | { | 572 | { |
527 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | 573 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); |
528 | struct ieee80211_sub_if_data *sdata = node->mpath->sdata; | 574 | struct ieee80211_sub_if_data *sdata = node->mpath->sdata; |
529 | 575 | ||
530 | del_timer_sync(&node->mpath->timer); | 576 | del_timer_sync(&node->mpath->timer); |
531 | atomic_dec(&sdata->u.mesh.mpaths); | 577 | atomic_dec(&sdata->u.mesh.mpaths); |
532 | kfree(node->mpath); | 578 | kfree(node->mpath); |
533 | kfree(node); | 579 | kfree(node); |
534 | } | 580 | } |
535 | 581 | ||
536 | /** | 582 | /** |
537 | * mesh_path_del - delete a mesh path from the table | 583 | * mesh_path_del - delete a mesh path from the table |
538 | * | 584 | * |
539 | * @addr: dst address (ETH_ALEN length) | 585 | * @addr: dst address (ETH_ALEN length) |
540 | * @sdata: local subif | 586 | * @sdata: local subif |
541 | * | 587 | * |
542 | * Returns: 0 if successful | 588 | * Returns: 0 if successful |
543 | */ | 589 | */ |
544 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) | 590 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) |
545 | { | 591 | { |
546 | struct mesh_path *mpath; | 592 | struct mesh_path *mpath; |
547 | struct mpath_node *node; | 593 | struct mpath_node *node; |
548 | struct hlist_head *bucket; | 594 | struct hlist_head *bucket; |
549 | struct hlist_node *n; | 595 | struct hlist_node *n; |
550 | int hash_idx; | 596 | int hash_idx; |
551 | int err = 0; | 597 | int err = 0; |
552 | 598 | ||
553 | read_lock_bh(&pathtbl_resize_lock); | 599 | read_lock_bh(&pathtbl_resize_lock); |
554 | hash_idx = mesh_table_hash(addr, sdata, mesh_paths); | 600 | hash_idx = mesh_table_hash(addr, sdata, mesh_paths); |
555 | bucket = &mesh_paths->hash_buckets[hash_idx]; | 601 | bucket = &mesh_paths->hash_buckets[hash_idx]; |
556 | 602 | ||
557 | spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); | 603 | spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); |
558 | hlist_for_each_entry(node, n, bucket, list) { | 604 | hlist_for_each_entry(node, n, bucket, list) { |
559 | mpath = node->mpath; | 605 | mpath = node->mpath; |
560 | if (mpath->sdata == sdata && | 606 | if (mpath->sdata == sdata && |
561 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { | 607 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { |
562 | spin_lock_bh(&mpath->state_lock); | 608 | spin_lock_bh(&mpath->state_lock); |
563 | mpath->flags |= MESH_PATH_RESOLVING; | 609 | mpath->flags |= MESH_PATH_RESOLVING; |
564 | hlist_del_rcu(&node->list); | 610 | hlist_del_rcu(&node->list); |
565 | call_rcu(&node->rcu, mesh_path_node_reclaim); | 611 | call_rcu(&node->rcu, mesh_path_node_reclaim); |
566 | atomic_dec(&mesh_paths->entries); | 612 | atomic_dec(&mesh_paths->entries); |
567 | spin_unlock_bh(&mpath->state_lock); | 613 | spin_unlock_bh(&mpath->state_lock); |
568 | goto enddel; | 614 | goto enddel; |
569 | } | 615 | } |
570 | } | 616 | } |
571 | 617 | ||
572 | err = -ENXIO; | 618 | err = -ENXIO; |
573 | enddel: | 619 | enddel: |
574 | mesh_paths_generation++; | 620 | mesh_paths_generation++; |
575 | spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); | 621 | spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); |
576 | read_unlock_bh(&pathtbl_resize_lock); | 622 | read_unlock_bh(&pathtbl_resize_lock); |
577 | return err; | 623 | return err; |
578 | } | 624 | } |
579 | 625 | ||
580 | /** | 626 | /** |
581 | * mesh_path_tx_pending - sends pending frames in a mesh path queue | 627 | * mesh_path_tx_pending - sends pending frames in a mesh path queue |
582 | * | 628 | * |
583 | * @mpath: mesh path to activate | 629 | * @mpath: mesh path to activate |
584 | * | 630 | * |
585 | * Locking: the state_lock of the mpath structure must NOT be held when calling | 631 | * Locking: the state_lock of the mpath structure must NOT be held when calling |
586 | * this function. | 632 | * this function. |
587 | */ | 633 | */ |
588 | void mesh_path_tx_pending(struct mesh_path *mpath) | 634 | void mesh_path_tx_pending(struct mesh_path *mpath) |
589 | { | 635 | { |
590 | if (mpath->flags & MESH_PATH_ACTIVE) | 636 | if (mpath->flags & MESH_PATH_ACTIVE) |
591 | ieee80211_add_pending_skbs(mpath->sdata->local, | 637 | ieee80211_add_pending_skbs(mpath->sdata->local, |
592 | &mpath->frame_queue); | 638 | &mpath->frame_queue); |
593 | } | 639 | } |
594 | 640 | ||
595 | /** | 641 | /** |
596 | * mesh_path_discard_frame - discard a frame whose path could not be resolved | 642 | * mesh_path_discard_frame - discard a frame whose path could not be resolved |
597 | * | 643 | * |
598 | * @skb: frame to discard | 644 | * @skb: frame to discard |
599 | * @sdata: network subif the frame was to be sent through | 645 | * @sdata: network subif the frame was to be sent through |
600 | * | 646 | * |
601 | * If the frame was being forwarded from another MP, a PERR frame will be sent | 647 | * If the frame was being forwarded from another MP, a PERR frame will be sent |
602 | * to the precursor. The precursor's address (i.e. the previous hop) was saved | 648 | * to the precursor. The precursor's address (i.e. the previous hop) was saved |
603 | * in addr1 of the frame-to-be-forwarded, and would only be overwritten once | 649 | * in addr1 of the frame-to-be-forwarded, and would only be overwritten once |
604 | * the destination is successfully resolved. | 650 | * the destination is successfully resolved. |
605 | * | 651 | * |
606 | * Locking: the function must me called within a rcu_read_lock region | 652 | * Locking: the function must me called within a rcu_read_lock region |
607 | */ | 653 | */ |
608 | void mesh_path_discard_frame(struct sk_buff *skb, | 654 | void mesh_path_discard_frame(struct sk_buff *skb, |
609 | struct ieee80211_sub_if_data *sdata) | 655 | struct ieee80211_sub_if_data *sdata) |
610 | { | 656 | { |
611 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 657 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
612 | struct mesh_path *mpath; | 658 | struct mesh_path *mpath; |
613 | u32 sn = 0; | 659 | u32 sn = 0; |
614 | 660 | ||
615 | if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) { | 661 | if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) { |
616 | u8 *ra, *da; | 662 | u8 *ra, *da; |
617 | 663 | ||
618 | da = hdr->addr3; | 664 | da = hdr->addr3; |
619 | ra = hdr->addr1; | 665 | ra = hdr->addr1; |
620 | mpath = mesh_path_lookup(da, sdata); | 666 | mpath = mesh_path_lookup(da, sdata); |
621 | if (mpath) | 667 | if (mpath) |
622 | sn = ++mpath->sn; | 668 | sn = ++mpath->sn; |
623 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data, | 669 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data, |
624 | cpu_to_le32(sn), | 670 | cpu_to_le32(sn), |
625 | cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata); | 671 | cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata); |
626 | } | 672 | } |
627 | 673 | ||
628 | kfree_skb(skb); | 674 | kfree_skb(skb); |
629 | sdata->u.mesh.mshstats.dropped_frames_no_route++; | 675 | sdata->u.mesh.mshstats.dropped_frames_no_route++; |
630 | } | 676 | } |
631 | 677 | ||
632 | /** | 678 | /** |
633 | * mesh_path_flush_pending - free the pending queue of a mesh path | 679 | * mesh_path_flush_pending - free the pending queue of a mesh path |
634 | * | 680 | * |
635 | * @mpath: mesh path whose queue has to be freed | 681 | * @mpath: mesh path whose queue has to be freed |
636 | * | 682 | * |
637 | * Locking: the function must me called withing a rcu_read_lock region | 683 | * Locking: the function must me called withing a rcu_read_lock region |
638 | */ | 684 | */ |
639 | void mesh_path_flush_pending(struct mesh_path *mpath) | 685 | void mesh_path_flush_pending(struct mesh_path *mpath) |
640 | { | 686 | { |
641 | struct sk_buff *skb; | 687 | struct sk_buff *skb; |
642 | 688 | ||
643 | while ((skb = skb_dequeue(&mpath->frame_queue)) && | 689 | while ((skb = skb_dequeue(&mpath->frame_queue)) && |
644 | (mpath->flags & MESH_PATH_ACTIVE)) | 690 | (mpath->flags & MESH_PATH_ACTIVE)) |
645 | mesh_path_discard_frame(skb, mpath->sdata); | 691 | mesh_path_discard_frame(skb, mpath->sdata); |
646 | } | 692 | } |
647 | 693 | ||
648 | /** | 694 | /** |
649 | * mesh_path_fix_nexthop - force a specific next hop for a mesh path | 695 | * mesh_path_fix_nexthop - force a specific next hop for a mesh path |
650 | * | 696 | * |
651 | * @mpath: the mesh path to modify | 697 | * @mpath: the mesh path to modify |
652 | * @next_hop: the next hop to force | 698 | * @next_hop: the next hop to force |
653 | * | 699 | * |
654 | * Locking: this function must be called holding mpath->state_lock | 700 | * Locking: this function must be called holding mpath->state_lock |
655 | */ | 701 | */ |
656 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) | 702 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) |
657 | { | 703 | { |
658 | spin_lock_bh(&mpath->state_lock); | 704 | spin_lock_bh(&mpath->state_lock); |
659 | mesh_path_assign_nexthop(mpath, next_hop); | 705 | mesh_path_assign_nexthop(mpath, next_hop); |
660 | mpath->sn = 0xffff; | 706 | mpath->sn = 0xffff; |
661 | mpath->metric = 0; | 707 | mpath->metric = 0; |
662 | mpath->hop_count = 0; | 708 | mpath->hop_count = 0; |
663 | mpath->exp_time = 0; | 709 | mpath->exp_time = 0; |
664 | mpath->flags |= MESH_PATH_FIXED; | 710 | mpath->flags |= MESH_PATH_FIXED; |
665 | mesh_path_activate(mpath); | 711 | mesh_path_activate(mpath); |
666 | spin_unlock_bh(&mpath->state_lock); | 712 | spin_unlock_bh(&mpath->state_lock); |
667 | mesh_path_tx_pending(mpath); | 713 | mesh_path_tx_pending(mpath); |
668 | } | 714 | } |
669 | 715 | ||
670 | static void mesh_path_node_free(struct hlist_node *p, bool free_leafs) | 716 | static void mesh_path_node_free(struct hlist_node *p, bool free_leafs) |
671 | { | 717 | { |
672 | struct mesh_path *mpath; | 718 | struct mesh_path *mpath; |
673 | struct mpath_node *node = hlist_entry(p, struct mpath_node, list); | 719 | struct mpath_node *node = hlist_entry(p, struct mpath_node, list); |
674 | mpath = node->mpath; | 720 | mpath = node->mpath; |
675 | hlist_del_rcu(p); | 721 | hlist_del_rcu(p); |
676 | if (free_leafs) | 722 | if (free_leafs) |
677 | kfree(mpath); | 723 | kfree(mpath); |
678 | kfree(node); | 724 | kfree(node); |
679 | } | 725 | } |
680 | 726 | ||
681 | static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) | 727 | static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) |
682 | { | 728 | { |
683 | struct mesh_path *mpath; | 729 | struct mesh_path *mpath; |
684 | struct mpath_node *node, *new_node; | 730 | struct mpath_node *node, *new_node; |
685 | u32 hash_idx; | 731 | u32 hash_idx; |
686 | 732 | ||
687 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); | 733 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
688 | if (new_node == NULL) | 734 | if (new_node == NULL) |
689 | return -ENOMEM; | 735 | return -ENOMEM; |
690 | 736 | ||
691 | node = hlist_entry(p, struct mpath_node, list); | 737 | node = hlist_entry(p, struct mpath_node, list); |
692 | mpath = node->mpath; | 738 | mpath = node->mpath; |
693 | new_node->mpath = mpath; | 739 | new_node->mpath = mpath; |
694 | hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl); | 740 | hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl); |
695 | hlist_add_head(&new_node->list, | 741 | hlist_add_head(&new_node->list, |
696 | &newtbl->hash_buckets[hash_idx]); | 742 | &newtbl->hash_buckets[hash_idx]); |
697 | return 0; | 743 | return 0; |
698 | } | 744 | } |
699 | 745 | ||
700 | int mesh_pathtbl_init(void) | 746 | int mesh_pathtbl_init(void) |
701 | { | 747 | { |
702 | mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | 748 | mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); |
703 | if (!mesh_paths) | 749 | if (!mesh_paths) |
704 | return -ENOMEM; | 750 | return -ENOMEM; |
705 | mesh_paths->free_node = &mesh_path_node_free; | 751 | mesh_paths->free_node = &mesh_path_node_free; |
706 | mesh_paths->copy_node = &mesh_path_node_copy; | 752 | mesh_paths->copy_node = &mesh_path_node_copy; |
707 | mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; | 753 | mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; |
708 | 754 | ||
709 | mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | 755 | mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); |
710 | if (!mpp_paths) { | 756 | if (!mpp_paths) { |
711 | mesh_table_free(mesh_paths, true); | 757 | mesh_table_free(mesh_paths, true); |
712 | return -ENOMEM; | 758 | return -ENOMEM; |
713 | } | 759 | } |
714 | mpp_paths->free_node = &mesh_path_node_free; | 760 | mpp_paths->free_node = &mesh_path_node_free; |
715 | mpp_paths->copy_node = &mesh_path_node_copy; | 761 | mpp_paths->copy_node = &mesh_path_node_copy; |
716 | mpp_paths->mean_chain_len = MEAN_CHAIN_LEN; | 762 | mpp_paths->mean_chain_len = MEAN_CHAIN_LEN; |
717 | 763 | ||
718 | return 0; | 764 | return 0; |
719 | } | 765 | } |
720 | 766 | ||
721 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | 767 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) |
722 | { | 768 | { |
723 | struct mesh_path *mpath; | 769 | struct mesh_path *mpath; |
724 | struct mpath_node *node; | 770 | struct mpath_node *node; |
725 | struct hlist_node *p; | 771 | struct hlist_node *p; |
726 | int i; | 772 | int i; |
727 | 773 | ||
728 | read_lock_bh(&pathtbl_resize_lock); | 774 | read_lock_bh(&pathtbl_resize_lock); |
729 | for_each_mesh_entry(mesh_paths, p, node, i) { | 775 | for_each_mesh_entry(mesh_paths, p, node, i) { |
730 | if (node->mpath->sdata != sdata) | 776 | if (node->mpath->sdata != sdata) |
731 | continue; | 777 | continue; |
732 | mpath = node->mpath; | 778 | mpath = node->mpath; |
733 | spin_lock_bh(&mpath->state_lock); | 779 | spin_lock_bh(&mpath->state_lock); |
734 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && | 780 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && |
735 | (!(mpath->flags & MESH_PATH_FIXED)) && | 781 | (!(mpath->flags & MESH_PATH_FIXED)) && |
736 | time_after(jiffies, | 782 | time_after(jiffies, |
737 | mpath->exp_time + MESH_PATH_EXPIRE)) { | 783 | mpath->exp_time + MESH_PATH_EXPIRE)) { |
738 | spin_unlock_bh(&mpath->state_lock); | 784 | spin_unlock_bh(&mpath->state_lock); |
739 | mesh_path_del(mpath->dst, mpath->sdata); | 785 | mesh_path_del(mpath->dst, mpath->sdata); |
740 | } else | 786 | } else |
741 | spin_unlock_bh(&mpath->state_lock); | 787 | spin_unlock_bh(&mpath->state_lock); |
742 | } | 788 | } |
743 | read_unlock_bh(&pathtbl_resize_lock); | 789 | read_unlock_bh(&pathtbl_resize_lock); |
744 | } | 790 | } |
745 | 791 | ||
746 | void mesh_pathtbl_unregister(void) | 792 | void mesh_pathtbl_unregister(void) |