Commit f5e50cd0757cc97cd1caded0d3f07ff09b5319e4
Committed by
John W. Linville
1 parent
ece1a2e7e8
Exists in
master
and in
6 other branches
mac80211: Improve mpath state locking
No need to take the mpath state lock when an mpath is removed. Also, no need checking the lock when reading mpath flags. Signed-off-by: Javier Cardona <javier@cozybit.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Showing 2 changed files with 7 additions and 11 deletions Inline Diff
net/mac80211/mesh.h
1 | /* | 1 | /* |
2 | * Copyright (c) 2008, 2009 open80211s Ltd. | 2 | * Copyright (c) 2008, 2009 open80211s Ltd. |
3 | * Authors: Luis Carlos Cobo <luisca@cozybit.com> | 3 | * Authors: Luis Carlos Cobo <luisca@cozybit.com> |
4 | * Javier Cardona <javier@cozybit.com> | 4 | * Javier Cardona <javier@cozybit.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef IEEE80211S_H | 11 | #ifndef IEEE80211S_H |
12 | #define IEEE80211S_H | 12 | #define IEEE80211S_H |
13 | 13 | ||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/jhash.h> | 15 | #include <linux/jhash.h> |
16 | #include <asm/unaligned.h> | 16 | #include <asm/unaligned.h> |
17 | #include "ieee80211_i.h" | 17 | #include "ieee80211_i.h" |
18 | 18 | ||
19 | 19 | ||
20 | /* Data structures */ | 20 | /* Data structures */ |
21 | 21 | ||
22 | /** | 22 | /** |
23 | * enum mesh_path_flags - mac80211 mesh path flags | 23 | * enum mesh_path_flags - mac80211 mesh path flags |
24 | * | 24 | * |
25 | * | 25 | * |
26 | * | 26 | * |
27 | * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding | 27 | * @MESH_PATH_ACTIVE: the mesh path can be used for forwarding |
28 | * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path | 28 | * @MESH_PATH_RESOLVING: the discovery process is running for this mesh path |
29 | * @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence | 29 | * @MESH_PATH_SN_VALID: the mesh path contains a valid destination sequence |
30 | * number | 30 | * number |
31 | * @MESH_PATH_FIXED: the mesh path has been manually set and should not be | 31 | * @MESH_PATH_FIXED: the mesh path has been manually set and should not be |
32 | * modified | 32 | * modified |
33 | * @MESH_PATH_RESOLVED: the mesh path can has been resolved | 33 | * @MESH_PATH_RESOLVED: the mesh path can has been resolved |
34 | * | 34 | * |
35 | * MESH_PATH_RESOLVED is used by the mesh path timer to | 35 | * MESH_PATH_RESOLVED is used by the mesh path timer to |
36 | * decide when to stop or cancel the mesh path discovery. | 36 | * decide when to stop or cancel the mesh path discovery. |
37 | */ | 37 | */ |
38 | enum mesh_path_flags { | 38 | enum mesh_path_flags { |
39 | MESH_PATH_ACTIVE = BIT(0), | 39 | MESH_PATH_ACTIVE = BIT(0), |
40 | MESH_PATH_RESOLVING = BIT(1), | 40 | MESH_PATH_RESOLVING = BIT(1), |
41 | MESH_PATH_SN_VALID = BIT(2), | 41 | MESH_PATH_SN_VALID = BIT(2), |
42 | MESH_PATH_FIXED = BIT(3), | 42 | MESH_PATH_FIXED = BIT(3), |
43 | MESH_PATH_RESOLVED = BIT(4), | 43 | MESH_PATH_RESOLVED = BIT(4), |
44 | }; | 44 | }; |
45 | 45 | ||
46 | /** | 46 | /** |
47 | * enum mesh_deferred_task_flags - mac80211 mesh deferred tasks | 47 | * enum mesh_deferred_task_flags - mac80211 mesh deferred tasks |
48 | * | 48 | * |
49 | * | 49 | * |
50 | * | 50 | * |
51 | * @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks | 51 | * @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks |
52 | * @MESH_WORK_GROW_MPATH_TABLE: the mesh path table is full and needs | 52 | * @MESH_WORK_GROW_MPATH_TABLE: the mesh path table is full and needs |
53 | * to grow. | 53 | * to grow. |
54 | * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to | 54 | * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to |
55 | * grow | 55 | * grow |
56 | * @MESH_WORK_ROOT: the mesh root station needs to send a frame | 56 | * @MESH_WORK_ROOT: the mesh root station needs to send a frame |
57 | */ | 57 | */ |
58 | enum mesh_deferred_task_flags { | 58 | enum mesh_deferred_task_flags { |
59 | MESH_WORK_HOUSEKEEPING, | 59 | MESH_WORK_HOUSEKEEPING, |
60 | MESH_WORK_GROW_MPATH_TABLE, | 60 | MESH_WORK_GROW_MPATH_TABLE, |
61 | MESH_WORK_GROW_MPP_TABLE, | 61 | MESH_WORK_GROW_MPP_TABLE, |
62 | MESH_WORK_ROOT, | 62 | MESH_WORK_ROOT, |
63 | }; | 63 | }; |
64 | 64 | ||
65 | /** | 65 | /** |
66 | * struct mesh_path - mac80211 mesh path structure | 66 | * struct mesh_path - mac80211 mesh path structure |
67 | * | 67 | * |
68 | * @dst: mesh path destination mac address | 68 | * @dst: mesh path destination mac address |
69 | * @sdata: mesh subif | 69 | * @sdata: mesh subif |
70 | * @next_hop: mesh neighbor to which frames for this destination will be | 70 | * @next_hop: mesh neighbor to which frames for this destination will be |
71 | * forwarded | 71 | * forwarded |
72 | * @timer: mesh path discovery timer | 72 | * @timer: mesh path discovery timer |
73 | * @frame_queue: pending queue for frames sent to this destination while the | 73 | * @frame_queue: pending queue for frames sent to this destination while the |
74 | * path is unresolved | 74 | * path is unresolved |
75 | * @sn: target sequence number | 75 | * @sn: target sequence number |
76 | * @metric: current metric to this destination | 76 | * @metric: current metric to this destination |
77 | * @hop_count: hops to destination | 77 | * @hop_count: hops to destination |
78 | * @exp_time: in jiffies, when the path will expire or when it expired | 78 | * @exp_time: in jiffies, when the path will expire or when it expired |
79 | * @discovery_timeout: timeout (lapse in jiffies) used for the last discovery | 79 | * @discovery_timeout: timeout (lapse in jiffies) used for the last discovery |
80 | * retry | 80 | * retry |
81 | * @discovery_retries: number of discovery retries | 81 | * @discovery_retries: number of discovery retries |
82 | * @flags: mesh path flags, as specified on &enum mesh_path_flags | 82 | * @flags: mesh path flags, as specified on &enum mesh_path_flags |
83 | * @state_lock: mesh path state lock | 83 | * @state_lock: mesh path state lock used to protect changes to the |
84 | * mpath itself. No need to take this lock when adding or removing | ||
85 | * an mpath to a hash bucket on a path table. | ||
84 | * @is_gate: the destination station of this path is a mesh gate | 86 | * @is_gate: the destination station of this path is a mesh gate |
85 | * | 87 | * |
86 | * | 88 | * |
87 | * The combination of dst and sdata is unique in the mesh path table. Since the | 89 | * The combination of dst and sdata is unique in the mesh path table. Since the |
88 | * next_hop STA is only protected by RCU as well, deleting the STA must also | 90 | * next_hop STA is only protected by RCU as well, deleting the STA must also |
89 | * remove/substitute the mesh_path structure and wait until that is no longer | 91 | * remove/substitute the mesh_path structure and wait until that is no longer |
90 | * reachable before destroying the STA completely. | 92 | * reachable before destroying the STA completely. |
91 | */ | 93 | */ |
92 | struct mesh_path { | 94 | struct mesh_path { |
93 | u8 dst[ETH_ALEN]; | 95 | u8 dst[ETH_ALEN]; |
94 | u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ | 96 | u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ |
95 | struct ieee80211_sub_if_data *sdata; | 97 | struct ieee80211_sub_if_data *sdata; |
96 | struct sta_info __rcu *next_hop; | 98 | struct sta_info __rcu *next_hop; |
97 | struct timer_list timer; | 99 | struct timer_list timer; |
98 | struct sk_buff_head frame_queue; | 100 | struct sk_buff_head frame_queue; |
99 | struct rcu_head rcu; | 101 | struct rcu_head rcu; |
100 | u32 sn; | 102 | u32 sn; |
101 | u32 metric; | 103 | u32 metric; |
102 | u8 hop_count; | 104 | u8 hop_count; |
103 | unsigned long exp_time; | 105 | unsigned long exp_time; |
104 | u32 discovery_timeout; | 106 | u32 discovery_timeout; |
105 | u8 discovery_retries; | 107 | u8 discovery_retries; |
106 | enum mesh_path_flags flags; | 108 | enum mesh_path_flags flags; |
107 | spinlock_t state_lock; | 109 | spinlock_t state_lock; |
108 | bool is_gate; | 110 | bool is_gate; |
109 | }; | 111 | }; |
110 | 112 | ||
111 | /** | 113 | /** |
112 | * struct mesh_table | 114 | * struct mesh_table |
113 | * | 115 | * |
114 | * @hash_buckets: array of hash buckets of the table | 116 | * @hash_buckets: array of hash buckets of the table |
115 | * @hashwlock: array of locks to protect write operations, one per bucket | 117 | * @hashwlock: array of locks to protect write operations, one per bucket |
116 | * @hash_mask: 2^size_order - 1, used to compute hash idx | 118 | * @hash_mask: 2^size_order - 1, used to compute hash idx |
117 | * @hash_rnd: random value used for hash computations | 119 | * @hash_rnd: random value used for hash computations |
118 | * @entries: number of entries in the table | 120 | * @entries: number of entries in the table |
119 | * @free_node: function to free nodes of the table | 121 | * @free_node: function to free nodes of the table |
120 | * @copy_node: function to copy nodes of the table | 122 | * @copy_node: function to copy nodes of the table |
121 | * @size_order: determines size of the table, there will be 2^size_order hash | 123 | * @size_order: determines size of the table, there will be 2^size_order hash |
122 | * buckets | 124 | * buckets |
123 | * @mean_chain_len: maximum average length for the hash buckets' list, if it is | 125 | * @mean_chain_len: maximum average length for the hash buckets' list, if it is |
124 | * reached, the table will grow | 126 | * reached, the table will grow |
125 | * @known_gates: list of known mesh gates and their mpaths by the station. The | 127 | * @known_gates: list of known mesh gates and their mpaths by the station. The |
126 | * gate's mpath may or may not be resolved and active. | 128 | * gate's mpath may or may not be resolved and active. |
127 | * | 129 | * |
128 | * rcu_head: RCU head to free the table | 130 | * rcu_head: RCU head to free the table |
129 | */ | 131 | */ |
130 | struct mesh_table { | 132 | struct mesh_table { |
131 | /* Number of buckets will be 2^N */ | 133 | /* Number of buckets will be 2^N */ |
132 | struct hlist_head *hash_buckets; | 134 | struct hlist_head *hash_buckets; |
133 | spinlock_t *hashwlock; /* One per bucket, for add/del */ | 135 | spinlock_t *hashwlock; /* One per bucket, for add/del */ |
134 | unsigned int hash_mask; /* (2^size_order) - 1 */ | 136 | unsigned int hash_mask; /* (2^size_order) - 1 */ |
135 | __u32 hash_rnd; /* Used for hash generation */ | 137 | __u32 hash_rnd; /* Used for hash generation */ |
136 | atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ | 138 | atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ |
137 | void (*free_node) (struct hlist_node *p, bool free_leafs); | 139 | void (*free_node) (struct hlist_node *p, bool free_leafs); |
138 | int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); | 140 | int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); |
139 | int size_order; | 141 | int size_order; |
140 | int mean_chain_len; | 142 | int mean_chain_len; |
141 | struct hlist_head *known_gates; | 143 | struct hlist_head *known_gates; |
142 | spinlock_t gates_lock; | 144 | spinlock_t gates_lock; |
143 | 145 | ||
144 | struct rcu_head rcu_head; | 146 | struct rcu_head rcu_head; |
145 | }; | 147 | }; |
146 | 148 | ||
147 | /* Recent multicast cache */ | 149 | /* Recent multicast cache */ |
148 | /* RMC_BUCKETS must be a power of 2, maximum 256 */ | 150 | /* RMC_BUCKETS must be a power of 2, maximum 256 */ |
149 | #define RMC_BUCKETS 256 | 151 | #define RMC_BUCKETS 256 |
150 | #define RMC_QUEUE_MAX_LEN 4 | 152 | #define RMC_QUEUE_MAX_LEN 4 |
151 | #define RMC_TIMEOUT (3 * HZ) | 153 | #define RMC_TIMEOUT (3 * HZ) |
152 | 154 | ||
153 | /** | 155 | /** |
154 | * struct rmc_entry - entry in the Recent Multicast Cache | 156 | * struct rmc_entry - entry in the Recent Multicast Cache |
155 | * | 157 | * |
156 | * @seqnum: mesh sequence number of the frame | 158 | * @seqnum: mesh sequence number of the frame |
157 | * @exp_time: expiration time of the entry, in jiffies | 159 | * @exp_time: expiration time of the entry, in jiffies |
158 | * @sa: source address of the frame | 160 | * @sa: source address of the frame |
159 | * | 161 | * |
160 | * The Recent Multicast Cache keeps track of the latest multicast frames that | 162 | * The Recent Multicast Cache keeps track of the latest multicast frames that |
161 | * have been received by a mesh interface and discards received multicast frames | 163 | * have been received by a mesh interface and discards received multicast frames |
162 | * that are found in the cache. | 164 | * that are found in the cache. |
163 | */ | 165 | */ |
164 | struct rmc_entry { | 166 | struct rmc_entry { |
165 | struct list_head list; | 167 | struct list_head list; |
166 | u32 seqnum; | 168 | u32 seqnum; |
167 | unsigned long exp_time; | 169 | unsigned long exp_time; |
168 | u8 sa[ETH_ALEN]; | 170 | u8 sa[ETH_ALEN]; |
169 | }; | 171 | }; |
170 | 172 | ||
171 | struct mesh_rmc { | 173 | struct mesh_rmc { |
172 | struct rmc_entry bucket[RMC_BUCKETS]; | 174 | struct rmc_entry bucket[RMC_BUCKETS]; |
173 | u32 idx_mask; | 175 | u32 idx_mask; |
174 | }; | 176 | }; |
175 | 177 | ||
176 | #define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) | 178 | #define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ) |
177 | #define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) | 179 | #define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ) |
178 | 180 | ||
179 | #define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */ | 181 | #define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */ |
180 | 182 | ||
181 | #define MESH_PATH_EXPIRE (600 * HZ) | 183 | #define MESH_PATH_EXPIRE (600 * HZ) |
182 | 184 | ||
183 | /* Default maximum number of plinks per interface */ | 185 | /* Default maximum number of plinks per interface */ |
184 | #define MESH_MAX_PLINKS 256 | 186 | #define MESH_MAX_PLINKS 256 |
185 | 187 | ||
186 | /* Maximum number of paths per interface */ | 188 | /* Maximum number of paths per interface */ |
187 | #define MESH_MAX_MPATHS 1024 | 189 | #define MESH_MAX_MPATHS 1024 |
188 | 190 | ||
189 | /* Public interfaces */ | 191 | /* Public interfaces */ |
190 | /* Various */ | 192 | /* Various */ |
191 | int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, | 193 | int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, |
192 | const u8 *da, const u8 *sa); | 194 | const u8 *da, const u8 *sa); |
193 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, | 195 | int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr, |
194 | struct ieee80211_sub_if_data *sdata, char *addr4or5, | 196 | struct ieee80211_sub_if_data *sdata, char *addr4or5, |
195 | char *addr6); | 197 | char *addr6); |
196 | int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, | 198 | int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, |
197 | struct ieee80211_sub_if_data *sdata); | 199 | struct ieee80211_sub_if_data *sdata); |
198 | bool mesh_matches_local(struct ieee802_11_elems *ie, | 200 | bool mesh_matches_local(struct ieee802_11_elems *ie, |
199 | struct ieee80211_sub_if_data *sdata); | 201 | struct ieee80211_sub_if_data *sdata); |
200 | void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); | 202 | void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); |
201 | void mesh_mgmt_ies_add(struct sk_buff *skb, | 203 | void mesh_mgmt_ies_add(struct sk_buff *skb, |
202 | struct ieee80211_sub_if_data *sdata); | 204 | struct ieee80211_sub_if_data *sdata); |
203 | int mesh_add_meshconf_ie(struct sk_buff *skb, | 205 | int mesh_add_meshconf_ie(struct sk_buff *skb, |
204 | struct ieee80211_sub_if_data *sdata); | 206 | struct ieee80211_sub_if_data *sdata); |
205 | int mesh_add_meshid_ie(struct sk_buff *skb, | 207 | int mesh_add_meshid_ie(struct sk_buff *skb, |
206 | struct ieee80211_sub_if_data *sdata); | 208 | struct ieee80211_sub_if_data *sdata); |
207 | int mesh_add_rsn_ie(struct sk_buff *skb, | 209 | int mesh_add_rsn_ie(struct sk_buff *skb, |
208 | struct ieee80211_sub_if_data *sdata); | 210 | struct ieee80211_sub_if_data *sdata); |
209 | int mesh_add_vendor_ies(struct sk_buff *skb, | 211 | int mesh_add_vendor_ies(struct sk_buff *skb, |
210 | struct ieee80211_sub_if_data *sdata); | 212 | struct ieee80211_sub_if_data *sdata); |
211 | int mesh_add_srates_ie(struct sk_buff *skb, | 213 | int mesh_add_srates_ie(struct sk_buff *skb, |
212 | struct ieee80211_sub_if_data *sdata); | 214 | struct ieee80211_sub_if_data *sdata); |
213 | int mesh_add_ext_srates_ie(struct sk_buff *skb, | 215 | int mesh_add_ext_srates_ie(struct sk_buff *skb, |
214 | struct ieee80211_sub_if_data *sdata); | 216 | struct ieee80211_sub_if_data *sdata); |
215 | int mesh_add_ds_params_ie(struct sk_buff *skb, | 217 | int mesh_add_ds_params_ie(struct sk_buff *skb, |
216 | struct ieee80211_sub_if_data *sdata); | 218 | struct ieee80211_sub_if_data *sdata); |
217 | void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); | 219 | void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); |
218 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); | 220 | int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); |
219 | void ieee80211s_init(void); | 221 | void ieee80211s_init(void); |
220 | void ieee80211s_update_metric(struct ieee80211_local *local, | 222 | void ieee80211s_update_metric(struct ieee80211_local *local, |
221 | struct sta_info *stainfo, struct sk_buff *skb); | 223 | struct sta_info *stainfo, struct sk_buff *skb); |
222 | void ieee80211s_stop(void); | 224 | void ieee80211s_stop(void); |
223 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); | 225 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); |
224 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); | 226 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); |
225 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); | 227 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); |
226 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); | 228 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); |
227 | 229 | ||
228 | /* Mesh paths */ | 230 | /* Mesh paths */ |
229 | int mesh_nexthop_lookup(struct sk_buff *skb, | 231 | int mesh_nexthop_lookup(struct sk_buff *skb, |
230 | struct ieee80211_sub_if_data *sdata); | 232 | struct ieee80211_sub_if_data *sdata); |
231 | void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata); | 233 | void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata); |
232 | struct mesh_path *mesh_path_lookup(u8 *dst, | 234 | struct mesh_path *mesh_path_lookup(u8 *dst, |
233 | struct ieee80211_sub_if_data *sdata); | 235 | struct ieee80211_sub_if_data *sdata); |
234 | struct mesh_path *mpp_path_lookup(u8 *dst, | 236 | struct mesh_path *mpp_path_lookup(u8 *dst, |
235 | struct ieee80211_sub_if_data *sdata); | 237 | struct ieee80211_sub_if_data *sdata); |
236 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata); | 238 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata); |
237 | struct mesh_path *mesh_path_lookup_by_idx(int idx, | 239 | struct mesh_path *mesh_path_lookup_by_idx(int idx, |
238 | struct ieee80211_sub_if_data *sdata); | 240 | struct ieee80211_sub_if_data *sdata); |
239 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); | 241 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop); |
240 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata); | 242 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata); |
241 | void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, | 243 | void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata, |
242 | struct ieee80211_mgmt *mgmt, size_t len); | 244 | struct ieee80211_mgmt *mgmt, size_t len); |
243 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); | 245 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); |
244 | 246 | ||
245 | int mesh_path_add_gate(struct mesh_path *mpath); | 247 | int mesh_path_add_gate(struct mesh_path *mpath); |
246 | int mesh_path_send_to_gates(struct mesh_path *mpath); | 248 | int mesh_path_send_to_gates(struct mesh_path *mpath); |
247 | int mesh_gate_num(struct ieee80211_sub_if_data *sdata); | 249 | int mesh_gate_num(struct ieee80211_sub_if_data *sdata); |
248 | /* Mesh plinks */ | 250 | /* Mesh plinks */ |
249 | void mesh_neighbour_update(u8 *hw_addr, u32 rates, | 251 | void mesh_neighbour_update(u8 *hw_addr, u32 rates, |
250 | struct ieee80211_sub_if_data *sdata, | 252 | struct ieee80211_sub_if_data *sdata, |
251 | struct ieee802_11_elems *ie); | 253 | struct ieee802_11_elems *ie); |
252 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); | 254 | bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); |
253 | void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); | 255 | void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); |
254 | void mesh_plink_broken(struct sta_info *sta); | 256 | void mesh_plink_broken(struct sta_info *sta); |
255 | void mesh_plink_deactivate(struct sta_info *sta); | 257 | void mesh_plink_deactivate(struct sta_info *sta); |
256 | int mesh_plink_open(struct sta_info *sta); | 258 | int mesh_plink_open(struct sta_info *sta); |
257 | void mesh_plink_block(struct sta_info *sta); | 259 | void mesh_plink_block(struct sta_info *sta); |
258 | void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, | 260 | void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, |
259 | struct ieee80211_mgmt *mgmt, size_t len, | 261 | struct ieee80211_mgmt *mgmt, size_t len, |
260 | struct ieee80211_rx_status *rx_status); | 262 | struct ieee80211_rx_status *rx_status); |
261 | 263 | ||
262 | /* Private interfaces */ | 264 | /* Private interfaces */ |
263 | /* Mesh tables */ | 265 | /* Mesh tables */ |
264 | void mesh_mpath_table_grow(void); | 266 | void mesh_mpath_table_grow(void); |
265 | void mesh_mpp_table_grow(void); | 267 | void mesh_mpp_table_grow(void); |
266 | /* Mesh paths */ | 268 | /* Mesh paths */ |
267 | int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode, | 269 | int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode, |
268 | const u8 *ra, struct ieee80211_sub_if_data *sdata); | 270 | const u8 *ra, struct ieee80211_sub_if_data *sdata); |
269 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); | 271 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta); |
270 | void mesh_path_flush_pending(struct mesh_path *mpath); | 272 | void mesh_path_flush_pending(struct mesh_path *mpath); |
271 | void mesh_path_tx_pending(struct mesh_path *mpath); | 273 | void mesh_path_tx_pending(struct mesh_path *mpath); |
272 | int mesh_pathtbl_init(void); | 274 | int mesh_pathtbl_init(void); |
273 | void mesh_pathtbl_unregister(void); | 275 | void mesh_pathtbl_unregister(void); |
274 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); | 276 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); |
275 | void mesh_path_timer(unsigned long data); | 277 | void mesh_path_timer(unsigned long data); |
276 | void mesh_path_flush_by_nexthop(struct sta_info *sta); | 278 | void mesh_path_flush_by_nexthop(struct sta_info *sta); |
277 | void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata); | 279 | void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata); |
278 | void mesh_path_discard_frame(struct sk_buff *skb, | 280 | void mesh_path_discard_frame(struct sk_buff *skb, |
279 | struct ieee80211_sub_if_data *sdata); | 281 | struct ieee80211_sub_if_data *sdata); |
280 | void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); | 282 | void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); |
281 | void mesh_path_restart(struct ieee80211_sub_if_data *sdata); | 283 | void mesh_path_restart(struct ieee80211_sub_if_data *sdata); |
282 | void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); | 284 | void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); |
283 | 285 | ||
284 | bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt); | 286 | bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt); |
285 | extern int mesh_paths_generation; | 287 | extern int mesh_paths_generation; |
286 | 288 | ||
287 | #ifdef CONFIG_MAC80211_MESH | 289 | #ifdef CONFIG_MAC80211_MESH |
288 | extern int mesh_allocated; | 290 | extern int mesh_allocated; |
289 | 291 | ||
290 | static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) | 292 | static inline int mesh_plink_free_count(struct ieee80211_sub_if_data *sdata) |
291 | { | 293 | { |
292 | return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks - | 294 | return sdata->u.mesh.mshcfg.dot11MeshMaxPeerLinks - |
293 | atomic_read(&sdata->u.mesh.mshstats.estab_plinks); | 295 | atomic_read(&sdata->u.mesh.mshstats.estab_plinks); |
294 | } | 296 | } |
295 | 297 | ||
296 | static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata) | 298 | static inline bool mesh_plink_availables(struct ieee80211_sub_if_data *sdata) |
297 | { | 299 | { |
298 | return (min_t(long, mesh_plink_free_count(sdata), | 300 | return (min_t(long, mesh_plink_free_count(sdata), |
299 | MESH_MAX_PLINKS - sdata->local->num_sta)) > 0; | 301 | MESH_MAX_PLINKS - sdata->local->num_sta)) > 0; |
300 | } | 302 | } |
301 | 303 | ||
302 | static inline void mesh_path_activate(struct mesh_path *mpath) | 304 | static inline void mesh_path_activate(struct mesh_path *mpath) |
303 | { | 305 | { |
304 | mpath->flags |= MESH_PATH_ACTIVE | MESH_PATH_RESOLVED; | 306 | mpath->flags |= MESH_PATH_ACTIVE | MESH_PATH_RESOLVED; |
305 | } | 307 | } |
306 | 308 | ||
307 | static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) | 309 | static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) |
308 | { | 310 | { |
309 | return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP; | 311 | return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP; |
310 | } | 312 | } |
311 | 313 | ||
312 | void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); | 314 | void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); |
313 | 315 | ||
314 | void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata); | 316 | void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata); |
315 | void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata); | 317 | void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata); |
316 | void mesh_plink_quiesce(struct sta_info *sta); | 318 | void mesh_plink_quiesce(struct sta_info *sta); |
317 | void mesh_plink_restart(struct sta_info *sta); | 319 | void mesh_plink_restart(struct sta_info *sta); |
318 | #else | 320 | #else |
319 | #define mesh_allocated 0 | 321 | #define mesh_allocated 0 |
320 | static inline void | 322 | static inline void |
321 | ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {} | 323 | ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {} |
322 | static inline void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) | 324 | static inline void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) |
323 | {} | 325 | {} |
324 | static inline void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata) | 326 | static inline void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata) |
325 | {} | 327 | {} |
326 | static inline void mesh_plink_quiesce(struct sta_info *sta) {} | 328 | static inline void mesh_plink_quiesce(struct sta_info *sta) {} |
327 | static inline void mesh_plink_restart(struct sta_info *sta) {} | 329 | static inline void mesh_plink_restart(struct sta_info *sta) {} |
328 | static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) | 330 | static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) |
329 | { return false; } | 331 | { return false; } |
330 | #endif | 332 | #endif |
331 | 333 | ||
332 | #endif /* IEEE80211S_H */ | 334 | #endif /* IEEE80211S_H */ |
333 | 335 |
net/mac80211/mesh_pathtbl.c
1 | /* | 1 | /* |
2 | * Copyright (c) 2008, 2009 open80211s Ltd. | 2 | * Copyright (c) 2008, 2009 open80211s Ltd. |
3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> | 3 | * Author: Luis Carlos Cobo <luisca@cozybit.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/etherdevice.h> | 10 | #include <linux/etherdevice.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/random.h> | 12 | #include <linux/random.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/string.h> | 15 | #include <linux/string.h> |
16 | #include <net/mac80211.h> | 16 | #include <net/mac80211.h> |
17 | #include "ieee80211_i.h" | 17 | #include "ieee80211_i.h" |
18 | #include "mesh.h" | 18 | #include "mesh.h" |
19 | 19 | ||
20 | #ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG | 20 | #ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG |
21 | #define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args) | 21 | #define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args) |
22 | #else | 22 | #else |
23 | #define mpath_dbg(fmt, args...) do { (void)(0); } while (0) | 23 | #define mpath_dbg(fmt, args...) do { (void)(0); } while (0) |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ | 26 | /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */ |
27 | #define INIT_PATHS_SIZE_ORDER 2 | 27 | #define INIT_PATHS_SIZE_ORDER 2 |
28 | 28 | ||
29 | /* Keep the mean chain length below this constant */ | 29 | /* Keep the mean chain length below this constant */ |
30 | #define MEAN_CHAIN_LEN 2 | 30 | #define MEAN_CHAIN_LEN 2 |
31 | 31 | ||
32 | #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \ | 32 | #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \ |
33 | time_after(jiffies, mpath->exp_time) && \ | 33 | time_after(jiffies, mpath->exp_time) && \ |
34 | !(mpath->flags & MESH_PATH_FIXED)) | 34 | !(mpath->flags & MESH_PATH_FIXED)) |
35 | 35 | ||
36 | struct mpath_node { | 36 | struct mpath_node { |
37 | struct hlist_node list; | 37 | struct hlist_node list; |
38 | struct rcu_head rcu; | 38 | struct rcu_head rcu; |
39 | /* This indirection allows two different tables to point to the same | 39 | /* This indirection allows two different tables to point to the same |
40 | * mesh_path structure, useful when resizing | 40 | * mesh_path structure, useful when resizing |
41 | */ | 41 | */ |
42 | struct mesh_path *mpath; | 42 | struct mesh_path *mpath; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | static struct mesh_table __rcu *mesh_paths; | 45 | static struct mesh_table __rcu *mesh_paths; |
46 | static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ | 46 | static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ |
47 | 47 | ||
48 | int mesh_paths_generation; | 48 | int mesh_paths_generation; |
49 | 49 | ||
50 | /* This lock will have the grow table function as writer and add / delete nodes | 50 | /* This lock will have the grow table function as writer and add / delete nodes |
51 | * as readers. When reading the table (i.e. doing lookups) we are well protected | 51 | * as readers. When reading the table (i.e. doing lookups) we are well protected |
52 | * by RCU | 52 | * by RCU |
53 | */ | 53 | */ |
54 | static DEFINE_RWLOCK(pathtbl_resize_lock); | 54 | static DEFINE_RWLOCK(pathtbl_resize_lock); |
55 | 55 | ||
56 | 56 | ||
57 | static inline struct mesh_table *resize_dereference_mesh_paths(void) | 57 | static inline struct mesh_table *resize_dereference_mesh_paths(void) |
58 | { | 58 | { |
59 | return rcu_dereference_protected(mesh_paths, | 59 | return rcu_dereference_protected(mesh_paths, |
60 | lockdep_is_held(&pathtbl_resize_lock)); | 60 | lockdep_is_held(&pathtbl_resize_lock)); |
61 | } | 61 | } |
62 | 62 | ||
63 | static inline struct mesh_table *resize_dereference_mpp_paths(void) | 63 | static inline struct mesh_table *resize_dereference_mpp_paths(void) |
64 | { | 64 | { |
65 | return rcu_dereference_protected(mpp_paths, | 65 | return rcu_dereference_protected(mpp_paths, |
66 | lockdep_is_held(&pathtbl_resize_lock)); | 66 | lockdep_is_held(&pathtbl_resize_lock)); |
67 | } | 67 | } |
68 | 68 | ||
69 | static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath); | 69 | static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath); |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * CAREFUL -- "tbl" must not be an expression, | 72 | * CAREFUL -- "tbl" must not be an expression, |
73 | * in particular not an rcu_dereference(), since | 73 | * in particular not an rcu_dereference(), since |
74 | * it's used twice. So it is illegal to do | 74 | * it's used twice. So it is illegal to do |
75 | * for_each_mesh_entry(rcu_dereference(...), ...) | 75 | * for_each_mesh_entry(rcu_dereference(...), ...) |
76 | */ | 76 | */ |
77 | #define for_each_mesh_entry(tbl, p, node, i) \ | 77 | #define for_each_mesh_entry(tbl, p, node, i) \ |
78 | for (i = 0; i <= tbl->hash_mask; i++) \ | 78 | for (i = 0; i <= tbl->hash_mask; i++) \ |
79 | hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list) | 79 | hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list) |
80 | 80 | ||
81 | 81 | ||
82 | static struct mesh_table *mesh_table_alloc(int size_order) | 82 | static struct mesh_table *mesh_table_alloc(int size_order) |
83 | { | 83 | { |
84 | int i; | 84 | int i; |
85 | struct mesh_table *newtbl; | 85 | struct mesh_table *newtbl; |
86 | 86 | ||
87 | newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); | 87 | newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC); |
88 | if (!newtbl) | 88 | if (!newtbl) |
89 | return NULL; | 89 | return NULL; |
90 | 90 | ||
91 | newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * | 91 | newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) * |
92 | (1 << size_order), GFP_ATOMIC); | 92 | (1 << size_order), GFP_ATOMIC); |
93 | 93 | ||
94 | if (!newtbl->hash_buckets) { | 94 | if (!newtbl->hash_buckets) { |
95 | kfree(newtbl); | 95 | kfree(newtbl); |
96 | return NULL; | 96 | return NULL; |
97 | } | 97 | } |
98 | 98 | ||
99 | newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * | 99 | newtbl->hashwlock = kmalloc(sizeof(spinlock_t) * |
100 | (1 << size_order), GFP_ATOMIC); | 100 | (1 << size_order), GFP_ATOMIC); |
101 | if (!newtbl->hashwlock) { | 101 | if (!newtbl->hashwlock) { |
102 | kfree(newtbl->hash_buckets); | 102 | kfree(newtbl->hash_buckets); |
103 | kfree(newtbl); | 103 | kfree(newtbl); |
104 | return NULL; | 104 | return NULL; |
105 | } | 105 | } |
106 | 106 | ||
107 | newtbl->size_order = size_order; | 107 | newtbl->size_order = size_order; |
108 | newtbl->hash_mask = (1 << size_order) - 1; | 108 | newtbl->hash_mask = (1 << size_order) - 1; |
109 | atomic_set(&newtbl->entries, 0); | 109 | atomic_set(&newtbl->entries, 0); |
110 | get_random_bytes(&newtbl->hash_rnd, | 110 | get_random_bytes(&newtbl->hash_rnd, |
111 | sizeof(newtbl->hash_rnd)); | 111 | sizeof(newtbl->hash_rnd)); |
112 | for (i = 0; i <= newtbl->hash_mask; i++) | 112 | for (i = 0; i <= newtbl->hash_mask; i++) |
113 | spin_lock_init(&newtbl->hashwlock[i]); | 113 | spin_lock_init(&newtbl->hashwlock[i]); |
114 | spin_lock_init(&newtbl->gates_lock); | 114 | spin_lock_init(&newtbl->gates_lock); |
115 | 115 | ||
116 | return newtbl; | 116 | return newtbl; |
117 | } | 117 | } |
118 | 118 | ||
119 | static void __mesh_table_free(struct mesh_table *tbl) | 119 | static void __mesh_table_free(struct mesh_table *tbl) |
120 | { | 120 | { |
121 | kfree(tbl->hash_buckets); | 121 | kfree(tbl->hash_buckets); |
122 | kfree(tbl->hashwlock); | 122 | kfree(tbl->hashwlock); |
123 | kfree(tbl); | 123 | kfree(tbl); |
124 | } | 124 | } |
125 | 125 | ||
126 | static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) | 126 | static void mesh_table_free(struct mesh_table *tbl, bool free_leafs) |
127 | { | 127 | { |
128 | struct hlist_head *mesh_hash; | 128 | struct hlist_head *mesh_hash; |
129 | struct hlist_node *p, *q; | 129 | struct hlist_node *p, *q; |
130 | struct mpath_node *gate; | 130 | struct mpath_node *gate; |
131 | int i; | 131 | int i; |
132 | 132 | ||
133 | mesh_hash = tbl->hash_buckets; | 133 | mesh_hash = tbl->hash_buckets; |
134 | for (i = 0; i <= tbl->hash_mask; i++) { | 134 | for (i = 0; i <= tbl->hash_mask; i++) { |
135 | spin_lock_bh(&tbl->hashwlock[i]); | 135 | spin_lock_bh(&tbl->hashwlock[i]); |
136 | hlist_for_each_safe(p, q, &mesh_hash[i]) { | 136 | hlist_for_each_safe(p, q, &mesh_hash[i]) { |
137 | tbl->free_node(p, free_leafs); | 137 | tbl->free_node(p, free_leafs); |
138 | atomic_dec(&tbl->entries); | 138 | atomic_dec(&tbl->entries); |
139 | } | 139 | } |
140 | spin_unlock_bh(&tbl->hashwlock[i]); | 140 | spin_unlock_bh(&tbl->hashwlock[i]); |
141 | } | 141 | } |
142 | if (free_leafs) { | 142 | if (free_leafs) { |
143 | spin_lock_bh(&tbl->gates_lock); | 143 | spin_lock_bh(&tbl->gates_lock); |
144 | hlist_for_each_entry_safe(gate, p, q, | 144 | hlist_for_each_entry_safe(gate, p, q, |
145 | tbl->known_gates, list) { | 145 | tbl->known_gates, list) { |
146 | hlist_del(&gate->list); | 146 | hlist_del(&gate->list); |
147 | kfree(gate); | 147 | kfree(gate); |
148 | } | 148 | } |
149 | kfree(tbl->known_gates); | 149 | kfree(tbl->known_gates); |
150 | spin_unlock_bh(&tbl->gates_lock); | 150 | spin_unlock_bh(&tbl->gates_lock); |
151 | } | 151 | } |
152 | 152 | ||
153 | __mesh_table_free(tbl); | 153 | __mesh_table_free(tbl); |
154 | } | 154 | } |
155 | 155 | ||
156 | static int mesh_table_grow(struct mesh_table *oldtbl, | 156 | static int mesh_table_grow(struct mesh_table *oldtbl, |
157 | struct mesh_table *newtbl) | 157 | struct mesh_table *newtbl) |
158 | { | 158 | { |
159 | struct hlist_head *oldhash; | 159 | struct hlist_head *oldhash; |
160 | struct hlist_node *p, *q; | 160 | struct hlist_node *p, *q; |
161 | int i; | 161 | int i; |
162 | 162 | ||
163 | if (atomic_read(&oldtbl->entries) | 163 | if (atomic_read(&oldtbl->entries) |
164 | < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1)) | 164 | < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1)) |
165 | return -EAGAIN; | 165 | return -EAGAIN; |
166 | 166 | ||
167 | newtbl->free_node = oldtbl->free_node; | 167 | newtbl->free_node = oldtbl->free_node; |
168 | newtbl->mean_chain_len = oldtbl->mean_chain_len; | 168 | newtbl->mean_chain_len = oldtbl->mean_chain_len; |
169 | newtbl->copy_node = oldtbl->copy_node; | 169 | newtbl->copy_node = oldtbl->copy_node; |
170 | newtbl->known_gates = oldtbl->known_gates; | 170 | newtbl->known_gates = oldtbl->known_gates; |
171 | atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); | 171 | atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries)); |
172 | 172 | ||
173 | oldhash = oldtbl->hash_buckets; | 173 | oldhash = oldtbl->hash_buckets; |
174 | for (i = 0; i <= oldtbl->hash_mask; i++) | 174 | for (i = 0; i <= oldtbl->hash_mask; i++) |
175 | hlist_for_each(p, &oldhash[i]) | 175 | hlist_for_each(p, &oldhash[i]) |
176 | if (oldtbl->copy_node(p, newtbl) < 0) | 176 | if (oldtbl->copy_node(p, newtbl) < 0) |
177 | goto errcopy; | 177 | goto errcopy; |
178 | 178 | ||
179 | return 0; | 179 | return 0; |
180 | 180 | ||
181 | errcopy: | 181 | errcopy: |
182 | for (i = 0; i <= newtbl->hash_mask; i++) { | 182 | for (i = 0; i <= newtbl->hash_mask; i++) { |
183 | hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) | 183 | hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) |
184 | oldtbl->free_node(p, 0); | 184 | oldtbl->free_node(p, 0); |
185 | } | 185 | } |
186 | return -ENOMEM; | 186 | return -ENOMEM; |
187 | } | 187 | } |
188 | 188 | ||
189 | static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, | 189 | static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, |
190 | struct mesh_table *tbl) | 190 | struct mesh_table *tbl) |
191 | { | 191 | { |
192 | /* Use last four bytes of hw addr and interface index as hash index */ | 192 | /* Use last four bytes of hw addr and interface index as hash index */ |
193 | return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd) | 193 | return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd) |
194 | & tbl->hash_mask; | 194 | & tbl->hash_mask; |
195 | } | 195 | } |
196 | 196 | ||
197 | 197 | ||
198 | /** | 198 | /** |
199 | * | 199 | * |
200 | * mesh_path_assign_nexthop - update mesh path next hop | 200 | * mesh_path_assign_nexthop - update mesh path next hop |
201 | * | 201 | * |
202 | * @mpath: mesh path to update | 202 | * @mpath: mesh path to update |
203 | * @sta: next hop to assign | 203 | * @sta: next hop to assign |
204 | * | 204 | * |
205 | * Locking: mpath->state_lock must be held when calling this function | 205 | * Locking: mpath->state_lock must be held when calling this function |
206 | */ | 206 | */ |
207 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) | 207 | void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) |
208 | { | 208 | { |
209 | struct sk_buff *skb; | 209 | struct sk_buff *skb; |
210 | struct ieee80211_hdr *hdr; | 210 | struct ieee80211_hdr *hdr; |
211 | struct sk_buff_head tmpq; | 211 | struct sk_buff_head tmpq; |
212 | unsigned long flags; | 212 | unsigned long flags; |
213 | 213 | ||
214 | rcu_assign_pointer(mpath->next_hop, sta); | 214 | rcu_assign_pointer(mpath->next_hop, sta); |
215 | 215 | ||
216 | __skb_queue_head_init(&tmpq); | 216 | __skb_queue_head_init(&tmpq); |
217 | 217 | ||
218 | spin_lock_irqsave(&mpath->frame_queue.lock, flags); | 218 | spin_lock_irqsave(&mpath->frame_queue.lock, flags); |
219 | 219 | ||
220 | while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { | 220 | while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { |
221 | hdr = (struct ieee80211_hdr *) skb->data; | 221 | hdr = (struct ieee80211_hdr *) skb->data; |
222 | memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); | 222 | memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); |
223 | __skb_queue_tail(&tmpq, skb); | 223 | __skb_queue_tail(&tmpq, skb); |
224 | } | 224 | } |
225 | 225 | ||
226 | skb_queue_splice(&tmpq, &mpath->frame_queue); | 226 | skb_queue_splice(&tmpq, &mpath->frame_queue); |
227 | spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); | 227 | spin_unlock_irqrestore(&mpath->frame_queue.lock, flags); |
228 | } | 228 | } |
229 | 229 | ||
230 | static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, | 230 | static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, |
231 | struct mesh_path *gate_mpath) | 231 | struct mesh_path *gate_mpath) |
232 | { | 232 | { |
233 | struct ieee80211_hdr *hdr; | 233 | struct ieee80211_hdr *hdr; |
234 | struct ieee80211s_hdr *mshdr; | 234 | struct ieee80211s_hdr *mshdr; |
235 | int mesh_hdrlen, hdrlen; | 235 | int mesh_hdrlen, hdrlen; |
236 | char *next_hop; | 236 | char *next_hop; |
237 | 237 | ||
238 | hdr = (struct ieee80211_hdr *) skb->data; | 238 | hdr = (struct ieee80211_hdr *) skb->data; |
239 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 239 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
240 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | 240 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); |
241 | 241 | ||
242 | if (!(mshdr->flags & MESH_FLAGS_AE)) { | 242 | if (!(mshdr->flags & MESH_FLAGS_AE)) { |
243 | /* size of the fixed part of the mesh header */ | 243 | /* size of the fixed part of the mesh header */ |
244 | mesh_hdrlen = 6; | 244 | mesh_hdrlen = 6; |
245 | 245 | ||
246 | /* make room for the two extended addresses */ | 246 | /* make room for the two extended addresses */ |
247 | skb_push(skb, 2 * ETH_ALEN); | 247 | skb_push(skb, 2 * ETH_ALEN); |
248 | memmove(skb->data, hdr, hdrlen + mesh_hdrlen); | 248 | memmove(skb->data, hdr, hdrlen + mesh_hdrlen); |
249 | 249 | ||
250 | hdr = (struct ieee80211_hdr *) skb->data; | 250 | hdr = (struct ieee80211_hdr *) skb->data; |
251 | 251 | ||
252 | /* we preserve the previous mesh header and only add | 252 | /* we preserve the previous mesh header and only add |
253 | * the new addreses */ | 253 | * the new addreses */ |
254 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); | 254 | mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); |
255 | mshdr->flags = MESH_FLAGS_AE_A5_A6; | 255 | mshdr->flags = MESH_FLAGS_AE_A5_A6; |
256 | memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); | 256 | memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN); |
257 | memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); | 257 | memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN); |
258 | } | 258 | } |
259 | 259 | ||
260 | /* update next hop */ | 260 | /* update next hop */ |
261 | hdr = (struct ieee80211_hdr *) skb->data; | 261 | hdr = (struct ieee80211_hdr *) skb->data; |
262 | rcu_read_lock(); | 262 | rcu_read_lock(); |
263 | next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; | 263 | next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr; |
264 | memcpy(hdr->addr1, next_hop, ETH_ALEN); | 264 | memcpy(hdr->addr1, next_hop, ETH_ALEN); |
265 | rcu_read_unlock(); | 265 | rcu_read_unlock(); |
266 | memcpy(hdr->addr3, dst_addr, ETH_ALEN); | 266 | memcpy(hdr->addr3, dst_addr, ETH_ALEN); |
267 | } | 267 | } |
268 | 268 | ||
269 | /** | 269 | /** |
270 | * | 270 | * |
271 | * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another | 271 | * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another |
272 | * | 272 | * |
273 | * This function is used to transfer or copy frames from an unresolved mpath to | 273 | * This function is used to transfer or copy frames from an unresolved mpath to |
274 | * a gate mpath. The function also adds the Address Extension field and | 274 | * a gate mpath. The function also adds the Address Extension field and |
275 | * updates the next hop. | 275 | * updates the next hop. |
276 | * | 276 | * |
277 | * If a frame already has an Address Extension field, only the next hop and | 277 | * If a frame already has an Address Extension field, only the next hop and |
278 | * destination addresses are updated. | 278 | * destination addresses are updated. |
279 | * | 279 | * |
280 | * The gate mpath must be an active mpath with a valid mpath->next_hop. | 280 | * The gate mpath must be an active mpath with a valid mpath->next_hop. |
281 | * | 281 | * |
282 | * @mpath: An active mpath the frames will be sent to (i.e. the gate) | 282 | * @mpath: An active mpath the frames will be sent to (i.e. the gate) |
283 | * @from_mpath: The failed mpath | 283 | * @from_mpath: The failed mpath |
284 | * @copy: When true, copy all the frames to the new mpath queue. When false, | 284 | * @copy: When true, copy all the frames to the new mpath queue. When false, |
285 | * move them. | 285 | * move them. |
286 | */ | 286 | */ |
287 | static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, | 287 | static void mesh_path_move_to_queue(struct mesh_path *gate_mpath, |
288 | struct mesh_path *from_mpath, | 288 | struct mesh_path *from_mpath, |
289 | bool copy) | 289 | bool copy) |
290 | { | 290 | { |
291 | struct sk_buff *skb, *cp_skb = NULL; | 291 | struct sk_buff *skb, *cp_skb = NULL; |
292 | struct sk_buff_head gateq, failq; | 292 | struct sk_buff_head gateq, failq; |
293 | unsigned long flags; | 293 | unsigned long flags; |
294 | int num_skbs; | 294 | int num_skbs; |
295 | 295 | ||
296 | BUG_ON(gate_mpath == from_mpath); | 296 | BUG_ON(gate_mpath == from_mpath); |
297 | BUG_ON(!gate_mpath->next_hop); | 297 | BUG_ON(!gate_mpath->next_hop); |
298 | 298 | ||
299 | __skb_queue_head_init(&gateq); | 299 | __skb_queue_head_init(&gateq); |
300 | __skb_queue_head_init(&failq); | 300 | __skb_queue_head_init(&failq); |
301 | 301 | ||
302 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); | 302 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); |
303 | skb_queue_splice_init(&from_mpath->frame_queue, &failq); | 303 | skb_queue_splice_init(&from_mpath->frame_queue, &failq); |
304 | spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); | 304 | spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); |
305 | 305 | ||
306 | num_skbs = skb_queue_len(&failq); | 306 | num_skbs = skb_queue_len(&failq); |
307 | 307 | ||
308 | while (num_skbs--) { | 308 | while (num_skbs--) { |
309 | skb = __skb_dequeue(&failq); | 309 | skb = __skb_dequeue(&failq); |
310 | if (copy) { | 310 | if (copy) { |
311 | cp_skb = skb_copy(skb, GFP_ATOMIC); | 311 | cp_skb = skb_copy(skb, GFP_ATOMIC); |
312 | if (cp_skb) | 312 | if (cp_skb) |
313 | __skb_queue_tail(&failq, cp_skb); | 313 | __skb_queue_tail(&failq, cp_skb); |
314 | } | 314 | } |
315 | 315 | ||
316 | prepare_for_gate(skb, gate_mpath->dst, gate_mpath); | 316 | prepare_for_gate(skb, gate_mpath->dst, gate_mpath); |
317 | __skb_queue_tail(&gateq, skb); | 317 | __skb_queue_tail(&gateq, skb); |
318 | } | 318 | } |
319 | 319 | ||
320 | spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags); | 320 | spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags); |
321 | skb_queue_splice(&gateq, &gate_mpath->frame_queue); | 321 | skb_queue_splice(&gateq, &gate_mpath->frame_queue); |
322 | mpath_dbg("Mpath queue for gate %pM has %d frames\n", | 322 | mpath_dbg("Mpath queue for gate %pM has %d frames\n", |
323 | gate_mpath->dst, | 323 | gate_mpath->dst, |
324 | skb_queue_len(&gate_mpath->frame_queue)); | 324 | skb_queue_len(&gate_mpath->frame_queue)); |
325 | spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags); | 325 | spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags); |
326 | 326 | ||
327 | if (!copy) | 327 | if (!copy) |
328 | return; | 328 | return; |
329 | 329 | ||
330 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); | 330 | spin_lock_irqsave(&from_mpath->frame_queue.lock, flags); |
331 | skb_queue_splice(&failq, &from_mpath->frame_queue); | 331 | skb_queue_splice(&failq, &from_mpath->frame_queue); |
332 | spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); | 332 | spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags); |
333 | } | 333 | } |
334 | 334 | ||
335 | 335 | ||
336 | /** | 336 | /** |
337 | * mesh_path_lookup - look up a path in the mesh path table | 337 | * mesh_path_lookup - look up a path in the mesh path table |
338 | * @dst: hardware address (ETH_ALEN length) of destination | 338 | * @dst: hardware address (ETH_ALEN length) of destination |
339 | * @sdata: local subif | 339 | * @sdata: local subif |
340 | * | 340 | * |
341 | * Returns: pointer to the mesh path structure, or NULL if not found | 341 | * Returns: pointer to the mesh path structure, or NULL if not found |
342 | * | 342 | * |
343 | * Locking: must be called within a read rcu section. | 343 | * Locking: must be called within a read rcu section. |
344 | */ | 344 | */ |
345 | struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) | 345 | struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) |
346 | { | 346 | { |
347 | struct mesh_path *mpath; | 347 | struct mesh_path *mpath; |
348 | struct hlist_node *n; | 348 | struct hlist_node *n; |
349 | struct hlist_head *bucket; | 349 | struct hlist_head *bucket; |
350 | struct mesh_table *tbl; | 350 | struct mesh_table *tbl; |
351 | struct mpath_node *node; | 351 | struct mpath_node *node; |
352 | 352 | ||
353 | tbl = rcu_dereference(mesh_paths); | 353 | tbl = rcu_dereference(mesh_paths); |
354 | 354 | ||
355 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; | 355 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; |
356 | hlist_for_each_entry_rcu(node, n, bucket, list) { | 356 | hlist_for_each_entry_rcu(node, n, bucket, list) { |
357 | mpath = node->mpath; | 357 | mpath = node->mpath; |
358 | if (mpath->sdata == sdata && | 358 | if (mpath->sdata == sdata && |
359 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { | 359 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { |
360 | if (MPATH_EXPIRED(mpath)) { | 360 | if (MPATH_EXPIRED(mpath)) { |
361 | spin_lock_bh(&mpath->state_lock); | 361 | spin_lock_bh(&mpath->state_lock); |
362 | if (MPATH_EXPIRED(mpath)) | 362 | if (MPATH_EXPIRED(mpath)) |
363 | mpath->flags &= ~MESH_PATH_ACTIVE; | 363 | mpath->flags &= ~MESH_PATH_ACTIVE; |
364 | spin_unlock_bh(&mpath->state_lock); | 364 | spin_unlock_bh(&mpath->state_lock); |
365 | } | 365 | } |
366 | return mpath; | 366 | return mpath; |
367 | } | 367 | } |
368 | } | 368 | } |
369 | return NULL; | 369 | return NULL; |
370 | } | 370 | } |
371 | 371 | ||
372 | struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) | 372 | struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata) |
373 | { | 373 | { |
374 | struct mesh_path *mpath; | 374 | struct mesh_path *mpath; |
375 | struct hlist_node *n; | 375 | struct hlist_node *n; |
376 | struct hlist_head *bucket; | 376 | struct hlist_head *bucket; |
377 | struct mesh_table *tbl; | 377 | struct mesh_table *tbl; |
378 | struct mpath_node *node; | 378 | struct mpath_node *node; |
379 | 379 | ||
380 | tbl = rcu_dereference(mpp_paths); | 380 | tbl = rcu_dereference(mpp_paths); |
381 | 381 | ||
382 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; | 382 | bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)]; |
383 | hlist_for_each_entry_rcu(node, n, bucket, list) { | 383 | hlist_for_each_entry_rcu(node, n, bucket, list) { |
384 | mpath = node->mpath; | 384 | mpath = node->mpath; |
385 | if (mpath->sdata == sdata && | 385 | if (mpath->sdata == sdata && |
386 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { | 386 | memcmp(dst, mpath->dst, ETH_ALEN) == 0) { |
387 | if (MPATH_EXPIRED(mpath)) { | 387 | if (MPATH_EXPIRED(mpath)) { |
388 | spin_lock_bh(&mpath->state_lock); | 388 | spin_lock_bh(&mpath->state_lock); |
389 | if (MPATH_EXPIRED(mpath)) | 389 | if (MPATH_EXPIRED(mpath)) |
390 | mpath->flags &= ~MESH_PATH_ACTIVE; | 390 | mpath->flags &= ~MESH_PATH_ACTIVE; |
391 | spin_unlock_bh(&mpath->state_lock); | 391 | spin_unlock_bh(&mpath->state_lock); |
392 | } | 392 | } |
393 | return mpath; | 393 | return mpath; |
394 | } | 394 | } |
395 | } | 395 | } |
396 | return NULL; | 396 | return NULL; |
397 | } | 397 | } |
398 | 398 | ||
399 | 399 | ||
400 | /** | 400 | /** |
401 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index | 401 | * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index |
402 | * @idx: index | 402 | * @idx: index |
403 | * @sdata: local subif, or NULL for all entries | 403 | * @sdata: local subif, or NULL for all entries |
404 | * | 404 | * |
405 | * Returns: pointer to the mesh path structure, or NULL if not found. | 405 | * Returns: pointer to the mesh path structure, or NULL if not found. |
406 | * | 406 | * |
407 | * Locking: must be called within a read rcu section. | 407 | * Locking: must be called within a read rcu section. |
408 | */ | 408 | */ |
409 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) | 409 | struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) |
410 | { | 410 | { |
411 | struct mesh_table *tbl = rcu_dereference(mesh_paths); | 411 | struct mesh_table *tbl = rcu_dereference(mesh_paths); |
412 | struct mpath_node *node; | 412 | struct mpath_node *node; |
413 | struct hlist_node *p; | 413 | struct hlist_node *p; |
414 | int i; | 414 | int i; |
415 | int j = 0; | 415 | int j = 0; |
416 | 416 | ||
417 | for_each_mesh_entry(tbl, p, node, i) { | 417 | for_each_mesh_entry(tbl, p, node, i) { |
418 | if (sdata && node->mpath->sdata != sdata) | 418 | if (sdata && node->mpath->sdata != sdata) |
419 | continue; | 419 | continue; |
420 | if (j++ == idx) { | 420 | if (j++ == idx) { |
421 | if (MPATH_EXPIRED(node->mpath)) { | 421 | if (MPATH_EXPIRED(node->mpath)) { |
422 | spin_lock_bh(&node->mpath->state_lock); | 422 | spin_lock_bh(&node->mpath->state_lock); |
423 | if (MPATH_EXPIRED(node->mpath)) | 423 | if (MPATH_EXPIRED(node->mpath)) |
424 | node->mpath->flags &= ~MESH_PATH_ACTIVE; | 424 | node->mpath->flags &= ~MESH_PATH_ACTIVE; |
425 | spin_unlock_bh(&node->mpath->state_lock); | 425 | spin_unlock_bh(&node->mpath->state_lock); |
426 | } | 426 | } |
427 | return node->mpath; | 427 | return node->mpath; |
428 | } | 428 | } |
429 | } | 429 | } |
430 | 430 | ||
431 | return NULL; | 431 | return NULL; |
432 | } | 432 | } |
433 | 433 | ||
434 | static void mesh_gate_node_reclaim(struct rcu_head *rp) | 434 | static void mesh_gate_node_reclaim(struct rcu_head *rp) |
435 | { | 435 | { |
436 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | 436 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); |
437 | kfree(node); | 437 | kfree(node); |
438 | } | 438 | } |
439 | 439 | ||
440 | /** | 440 | /** |
441 | * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates | 441 | * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates |
442 | * @mesh_tbl: table which contains known_gates list | 442 | * @mesh_tbl: table which contains known_gates list |
443 | * @mpath: mpath to known mesh gate | 443 | * @mpath: mpath to known mesh gate |
444 | * | 444 | * |
445 | * Returns: 0 on success | 445 | * Returns: 0 on success |
446 | * | 446 | * |
447 | */ | 447 | */ |
448 | static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath) | 448 | static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath) |
449 | { | 449 | { |
450 | struct mpath_node *gate, *new_gate; | 450 | struct mpath_node *gate, *new_gate; |
451 | struct hlist_node *n; | 451 | struct hlist_node *n; |
452 | int err; | 452 | int err; |
453 | 453 | ||
454 | rcu_read_lock(); | 454 | rcu_read_lock(); |
455 | tbl = rcu_dereference(tbl); | 455 | tbl = rcu_dereference(tbl); |
456 | 456 | ||
457 | hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list) | 457 | hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list) |
458 | if (gate->mpath == mpath) { | 458 | if (gate->mpath == mpath) { |
459 | err = -EEXIST; | 459 | err = -EEXIST; |
460 | goto err_rcu; | 460 | goto err_rcu; |
461 | } | 461 | } |
462 | 462 | ||
463 | new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC); | 463 | new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
464 | if (!new_gate) { | 464 | if (!new_gate) { |
465 | err = -ENOMEM; | 465 | err = -ENOMEM; |
466 | goto err_rcu; | 466 | goto err_rcu; |
467 | } | 467 | } |
468 | 468 | ||
469 | mpath->is_gate = true; | 469 | mpath->is_gate = true; |
470 | mpath->sdata->u.mesh.num_gates++; | 470 | mpath->sdata->u.mesh.num_gates++; |
471 | new_gate->mpath = mpath; | 471 | new_gate->mpath = mpath; |
472 | spin_lock_bh(&tbl->gates_lock); | 472 | spin_lock_bh(&tbl->gates_lock); |
473 | hlist_add_head_rcu(&new_gate->list, tbl->known_gates); | 473 | hlist_add_head_rcu(&new_gate->list, tbl->known_gates); |
474 | spin_unlock_bh(&tbl->gates_lock); | 474 | spin_unlock_bh(&tbl->gates_lock); |
475 | rcu_read_unlock(); | 475 | rcu_read_unlock(); |
476 | mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n", | 476 | mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n", |
477 | mpath->sdata->name, mpath->dst, | 477 | mpath->sdata->name, mpath->dst, |
478 | mpath->sdata->u.mesh.num_gates); | 478 | mpath->sdata->u.mesh.num_gates); |
479 | return 0; | 479 | return 0; |
480 | err_rcu: | 480 | err_rcu: |
481 | rcu_read_unlock(); | 481 | rcu_read_unlock(); |
482 | return err; | 482 | return err; |
483 | } | 483 | } |
484 | 484 | ||
485 | /** | 485 | /** |
486 | * mesh_gate_del - remove a mesh gate from the list of known gates | 486 | * mesh_gate_del - remove a mesh gate from the list of known gates |
487 | * @tbl: table which holds our list of known gates | 487 | * @tbl: table which holds our list of known gates |
488 | * @mpath: gate mpath | 488 | * @mpath: gate mpath |
489 | * | 489 | * |
490 | * Returns: 0 on success | 490 | * Returns: 0 on success |
491 | * | 491 | * |
492 | * Locking: must be called inside rcu_read_lock() section | 492 | * Locking: must be called inside rcu_read_lock() section |
493 | */ | 493 | */ |
494 | static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) | 494 | static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) |
495 | { | 495 | { |
496 | struct mpath_node *gate; | 496 | struct mpath_node *gate; |
497 | struct hlist_node *p, *q; | 497 | struct hlist_node *p, *q; |
498 | 498 | ||
499 | tbl = rcu_dereference(tbl); | 499 | tbl = rcu_dereference(tbl); |
500 | 500 | ||
501 | hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) | 501 | hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) |
502 | if (gate->mpath == mpath) { | 502 | if (gate->mpath == mpath) { |
503 | spin_lock_bh(&tbl->gates_lock); | 503 | spin_lock_bh(&tbl->gates_lock); |
504 | hlist_del_rcu(&gate->list); | 504 | hlist_del_rcu(&gate->list); |
505 | call_rcu(&gate->rcu, mesh_gate_node_reclaim); | 505 | call_rcu(&gate->rcu, mesh_gate_node_reclaim); |
506 | spin_unlock_bh(&tbl->gates_lock); | 506 | spin_unlock_bh(&tbl->gates_lock); |
507 | mpath->sdata->u.mesh.num_gates--; | 507 | mpath->sdata->u.mesh.num_gates--; |
508 | mpath->is_gate = false; | 508 | mpath->is_gate = false; |
509 | mpath_dbg("Mesh path (%s): Deleted gate: %pM. " | 509 | mpath_dbg("Mesh path (%s): Deleted gate: %pM. " |
510 | "%d known gates\n", mpath->sdata->name, | 510 | "%d known gates\n", mpath->sdata->name, |
511 | mpath->dst, mpath->sdata->u.mesh.num_gates); | 511 | mpath->dst, mpath->sdata->u.mesh.num_gates); |
512 | break; | 512 | break; |
513 | } | 513 | } |
514 | 514 | ||
515 | return 0; | 515 | return 0; |
516 | } | 516 | } |
517 | 517 | ||
518 | /** | 518 | /** |
519 | * | 519 | * |
520 | * mesh_path_add_gate - add the given mpath to a mesh gate to our path table | 520 | * mesh_path_add_gate - add the given mpath to a mesh gate to our path table |
521 | * @mpath: gate path to add to table | 521 | * @mpath: gate path to add to table |
522 | */ | 522 | */ |
523 | int mesh_path_add_gate(struct mesh_path *mpath) | 523 | int mesh_path_add_gate(struct mesh_path *mpath) |
524 | { | 524 | { |
525 | return mesh_gate_add(mesh_paths, mpath); | 525 | return mesh_gate_add(mesh_paths, mpath); |
526 | } | 526 | } |
527 | 527 | ||
528 | /** | 528 | /** |
529 | * mesh_gate_num - number of gates known to this interface | 529 | * mesh_gate_num - number of gates known to this interface |
530 | * @sdata: subif data | 530 | * @sdata: subif data |
531 | */ | 531 | */ |
532 | int mesh_gate_num(struct ieee80211_sub_if_data *sdata) | 532 | int mesh_gate_num(struct ieee80211_sub_if_data *sdata) |
533 | { | 533 | { |
534 | return sdata->u.mesh.num_gates; | 534 | return sdata->u.mesh.num_gates; |
535 | } | 535 | } |
536 | 536 | ||
537 | /** | 537 | /** |
538 | * mesh_path_add - allocate and add a new path to the mesh path table | 538 | * mesh_path_add - allocate and add a new path to the mesh path table |
539 | * @addr: destination address of the path (ETH_ALEN length) | 539 | * @addr: destination address of the path (ETH_ALEN length) |
540 | * @sdata: local subif | 540 | * @sdata: local subif |
541 | * | 541 | * |
542 | * Returns: 0 on success | 542 | * Returns: 0 on success |
543 | * | 543 | * |
544 | * State: the initial state of the new path is set to 0 | 544 | * State: the initial state of the new path is set to 0 |
545 | */ | 545 | */ |
546 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | 546 | int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) |
547 | { | 547 | { |
548 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 548 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
549 | struct ieee80211_local *local = sdata->local; | 549 | struct ieee80211_local *local = sdata->local; |
550 | struct mesh_table *tbl; | 550 | struct mesh_table *tbl; |
551 | struct mesh_path *mpath, *new_mpath; | 551 | struct mesh_path *mpath, *new_mpath; |
552 | struct mpath_node *node, *new_node; | 552 | struct mpath_node *node, *new_node; |
553 | struct hlist_head *bucket; | 553 | struct hlist_head *bucket; |
554 | struct hlist_node *n; | 554 | struct hlist_node *n; |
555 | int grow = 0; | 555 | int grow = 0; |
556 | int err = 0; | 556 | int err = 0; |
557 | u32 hash_idx; | 557 | u32 hash_idx; |
558 | 558 | ||
559 | if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) | 559 | if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) |
560 | /* never add ourselves as neighbours */ | 560 | /* never add ourselves as neighbours */ |
561 | return -ENOTSUPP; | 561 | return -ENOTSUPP; |
562 | 562 | ||
563 | if (is_multicast_ether_addr(dst)) | 563 | if (is_multicast_ether_addr(dst)) |
564 | return -ENOTSUPP; | 564 | return -ENOTSUPP; |
565 | 565 | ||
566 | if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) | 566 | if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0) |
567 | return -ENOSPC; | 567 | return -ENOSPC; |
568 | 568 | ||
569 | err = -ENOMEM; | 569 | err = -ENOMEM; |
570 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); | 570 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); |
571 | if (!new_mpath) | 571 | if (!new_mpath) |
572 | goto err_path_alloc; | 572 | goto err_path_alloc; |
573 | 573 | ||
574 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); | 574 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
575 | if (!new_node) | 575 | if (!new_node) |
576 | goto err_node_alloc; | 576 | goto err_node_alloc; |
577 | 577 | ||
578 | read_lock_bh(&pathtbl_resize_lock); | 578 | read_lock_bh(&pathtbl_resize_lock); |
579 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 579 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
580 | new_mpath->sdata = sdata; | 580 | new_mpath->sdata = sdata; |
581 | new_mpath->flags = 0; | 581 | new_mpath->flags = 0; |
582 | skb_queue_head_init(&new_mpath->frame_queue); | 582 | skb_queue_head_init(&new_mpath->frame_queue); |
583 | new_node->mpath = new_mpath; | 583 | new_node->mpath = new_mpath; |
584 | new_mpath->timer.data = (unsigned long) new_mpath; | 584 | new_mpath->timer.data = (unsigned long) new_mpath; |
585 | new_mpath->timer.function = mesh_path_timer; | 585 | new_mpath->timer.function = mesh_path_timer; |
586 | new_mpath->exp_time = jiffies; | 586 | new_mpath->exp_time = jiffies; |
587 | spin_lock_init(&new_mpath->state_lock); | 587 | spin_lock_init(&new_mpath->state_lock); |
588 | init_timer(&new_mpath->timer); | 588 | init_timer(&new_mpath->timer); |
589 | 589 | ||
590 | tbl = resize_dereference_mesh_paths(); | 590 | tbl = resize_dereference_mesh_paths(); |
591 | 591 | ||
592 | hash_idx = mesh_table_hash(dst, sdata, tbl); | 592 | hash_idx = mesh_table_hash(dst, sdata, tbl); |
593 | bucket = &tbl->hash_buckets[hash_idx]; | 593 | bucket = &tbl->hash_buckets[hash_idx]; |
594 | 594 | ||
595 | spin_lock_bh(&tbl->hashwlock[hash_idx]); | 595 | spin_lock_bh(&tbl->hashwlock[hash_idx]); |
596 | 596 | ||
597 | err = -EEXIST; | 597 | err = -EEXIST; |
598 | hlist_for_each_entry(node, n, bucket, list) { | 598 | hlist_for_each_entry(node, n, bucket, list) { |
599 | mpath = node->mpath; | 599 | mpath = node->mpath; |
600 | if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) | 600 | if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) |
601 | goto err_exists; | 601 | goto err_exists; |
602 | } | 602 | } |
603 | 603 | ||
604 | hlist_add_head_rcu(&new_node->list, bucket); | 604 | hlist_add_head_rcu(&new_node->list, bucket); |
605 | if (atomic_inc_return(&tbl->entries) >= | 605 | if (atomic_inc_return(&tbl->entries) >= |
606 | tbl->mean_chain_len * (tbl->hash_mask + 1)) | 606 | tbl->mean_chain_len * (tbl->hash_mask + 1)) |
607 | grow = 1; | 607 | grow = 1; |
608 | 608 | ||
609 | mesh_paths_generation++; | 609 | mesh_paths_generation++; |
610 | 610 | ||
611 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); | 611 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); |
612 | read_unlock_bh(&pathtbl_resize_lock); | 612 | read_unlock_bh(&pathtbl_resize_lock); |
613 | if (grow) { | 613 | if (grow) { |
614 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); | 614 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); |
615 | ieee80211_queue_work(&local->hw, &sdata->work); | 615 | ieee80211_queue_work(&local->hw, &sdata->work); |
616 | } | 616 | } |
617 | return 0; | 617 | return 0; |
618 | 618 | ||
619 | err_exists: | 619 | err_exists: |
620 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); | 620 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); |
621 | read_unlock_bh(&pathtbl_resize_lock); | 621 | read_unlock_bh(&pathtbl_resize_lock); |
622 | kfree(new_node); | 622 | kfree(new_node); |
623 | err_node_alloc: | 623 | err_node_alloc: |
624 | kfree(new_mpath); | 624 | kfree(new_mpath); |
625 | err_path_alloc: | 625 | err_path_alloc: |
626 | atomic_dec(&sdata->u.mesh.mpaths); | 626 | atomic_dec(&sdata->u.mesh.mpaths); |
627 | return err; | 627 | return err; |
628 | } | 628 | } |
629 | 629 | ||
630 | static void mesh_table_free_rcu(struct rcu_head *rcu) | 630 | static void mesh_table_free_rcu(struct rcu_head *rcu) |
631 | { | 631 | { |
632 | struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head); | 632 | struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head); |
633 | 633 | ||
634 | mesh_table_free(tbl, false); | 634 | mesh_table_free(tbl, false); |
635 | } | 635 | } |
636 | 636 | ||
637 | void mesh_mpath_table_grow(void) | 637 | void mesh_mpath_table_grow(void) |
638 | { | 638 | { |
639 | struct mesh_table *oldtbl, *newtbl; | 639 | struct mesh_table *oldtbl, *newtbl; |
640 | 640 | ||
641 | write_lock_bh(&pathtbl_resize_lock); | 641 | write_lock_bh(&pathtbl_resize_lock); |
642 | oldtbl = resize_dereference_mesh_paths(); | 642 | oldtbl = resize_dereference_mesh_paths(); |
643 | newtbl = mesh_table_alloc(oldtbl->size_order + 1); | 643 | newtbl = mesh_table_alloc(oldtbl->size_order + 1); |
644 | if (!newtbl) | 644 | if (!newtbl) |
645 | goto out; | 645 | goto out; |
646 | if (mesh_table_grow(oldtbl, newtbl) < 0) { | 646 | if (mesh_table_grow(oldtbl, newtbl) < 0) { |
647 | __mesh_table_free(newtbl); | 647 | __mesh_table_free(newtbl); |
648 | goto out; | 648 | goto out; |
649 | } | 649 | } |
650 | rcu_assign_pointer(mesh_paths, newtbl); | 650 | rcu_assign_pointer(mesh_paths, newtbl); |
651 | 651 | ||
652 | call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); | 652 | call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); |
653 | 653 | ||
654 | out: | 654 | out: |
655 | write_unlock_bh(&pathtbl_resize_lock); | 655 | write_unlock_bh(&pathtbl_resize_lock); |
656 | } | 656 | } |
657 | 657 | ||
658 | void mesh_mpp_table_grow(void) | 658 | void mesh_mpp_table_grow(void) |
659 | { | 659 | { |
660 | struct mesh_table *oldtbl, *newtbl; | 660 | struct mesh_table *oldtbl, *newtbl; |
661 | 661 | ||
662 | write_lock_bh(&pathtbl_resize_lock); | 662 | write_lock_bh(&pathtbl_resize_lock); |
663 | oldtbl = resize_dereference_mpp_paths(); | 663 | oldtbl = resize_dereference_mpp_paths(); |
664 | newtbl = mesh_table_alloc(oldtbl->size_order + 1); | 664 | newtbl = mesh_table_alloc(oldtbl->size_order + 1); |
665 | if (!newtbl) | 665 | if (!newtbl) |
666 | goto out; | 666 | goto out; |
667 | if (mesh_table_grow(oldtbl, newtbl) < 0) { | 667 | if (mesh_table_grow(oldtbl, newtbl) < 0) { |
668 | __mesh_table_free(newtbl); | 668 | __mesh_table_free(newtbl); |
669 | goto out; | 669 | goto out; |
670 | } | 670 | } |
671 | rcu_assign_pointer(mpp_paths, newtbl); | 671 | rcu_assign_pointer(mpp_paths, newtbl); |
672 | call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); | 672 | call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu); |
673 | 673 | ||
674 | out: | 674 | out: |
675 | write_unlock_bh(&pathtbl_resize_lock); | 675 | write_unlock_bh(&pathtbl_resize_lock); |
676 | } | 676 | } |
677 | 677 | ||
678 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | 678 | int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) |
679 | { | 679 | { |
680 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 680 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
681 | struct ieee80211_local *local = sdata->local; | 681 | struct ieee80211_local *local = sdata->local; |
682 | struct mesh_table *tbl; | 682 | struct mesh_table *tbl; |
683 | struct mesh_path *mpath, *new_mpath; | 683 | struct mesh_path *mpath, *new_mpath; |
684 | struct mpath_node *node, *new_node; | 684 | struct mpath_node *node, *new_node; |
685 | struct hlist_head *bucket; | 685 | struct hlist_head *bucket; |
686 | struct hlist_node *n; | 686 | struct hlist_node *n; |
687 | int grow = 0; | 687 | int grow = 0; |
688 | int err = 0; | 688 | int err = 0; |
689 | u32 hash_idx; | 689 | u32 hash_idx; |
690 | 690 | ||
691 | if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) | 691 | if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0) |
692 | /* never add ourselves as neighbours */ | 692 | /* never add ourselves as neighbours */ |
693 | return -ENOTSUPP; | 693 | return -ENOTSUPP; |
694 | 694 | ||
695 | if (is_multicast_ether_addr(dst)) | 695 | if (is_multicast_ether_addr(dst)) |
696 | return -ENOTSUPP; | 696 | return -ENOTSUPP; |
697 | 697 | ||
698 | err = -ENOMEM; | 698 | err = -ENOMEM; |
699 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); | 699 | new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC); |
700 | if (!new_mpath) | 700 | if (!new_mpath) |
701 | goto err_path_alloc; | 701 | goto err_path_alloc; |
702 | 702 | ||
703 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); | 703 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
704 | if (!new_node) | 704 | if (!new_node) |
705 | goto err_node_alloc; | 705 | goto err_node_alloc; |
706 | 706 | ||
707 | read_lock_bh(&pathtbl_resize_lock); | 707 | read_lock_bh(&pathtbl_resize_lock); |
708 | memcpy(new_mpath->dst, dst, ETH_ALEN); | 708 | memcpy(new_mpath->dst, dst, ETH_ALEN); |
709 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); | 709 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); |
710 | new_mpath->sdata = sdata; | 710 | new_mpath->sdata = sdata; |
711 | new_mpath->flags = 0; | 711 | new_mpath->flags = 0; |
712 | skb_queue_head_init(&new_mpath->frame_queue); | 712 | skb_queue_head_init(&new_mpath->frame_queue); |
713 | new_node->mpath = new_mpath; | 713 | new_node->mpath = new_mpath; |
714 | init_timer(&new_mpath->timer); | 714 | init_timer(&new_mpath->timer); |
715 | new_mpath->exp_time = jiffies; | 715 | new_mpath->exp_time = jiffies; |
716 | spin_lock_init(&new_mpath->state_lock); | 716 | spin_lock_init(&new_mpath->state_lock); |
717 | 717 | ||
718 | tbl = resize_dereference_mpp_paths(); | 718 | tbl = resize_dereference_mpp_paths(); |
719 | 719 | ||
720 | hash_idx = mesh_table_hash(dst, sdata, tbl); | 720 | hash_idx = mesh_table_hash(dst, sdata, tbl); |
721 | bucket = &tbl->hash_buckets[hash_idx]; | 721 | bucket = &tbl->hash_buckets[hash_idx]; |
722 | 722 | ||
723 | spin_lock_bh(&tbl->hashwlock[hash_idx]); | 723 | spin_lock_bh(&tbl->hashwlock[hash_idx]); |
724 | 724 | ||
725 | err = -EEXIST; | 725 | err = -EEXIST; |
726 | hlist_for_each_entry(node, n, bucket, list) { | 726 | hlist_for_each_entry(node, n, bucket, list) { |
727 | mpath = node->mpath; | 727 | mpath = node->mpath; |
728 | if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) | 728 | if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0) |
729 | goto err_exists; | 729 | goto err_exists; |
730 | } | 730 | } |
731 | 731 | ||
732 | hlist_add_head_rcu(&new_node->list, bucket); | 732 | hlist_add_head_rcu(&new_node->list, bucket); |
733 | if (atomic_inc_return(&tbl->entries) >= | 733 | if (atomic_inc_return(&tbl->entries) >= |
734 | tbl->mean_chain_len * (tbl->hash_mask + 1)) | 734 | tbl->mean_chain_len * (tbl->hash_mask + 1)) |
735 | grow = 1; | 735 | grow = 1; |
736 | 736 | ||
737 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); | 737 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); |
738 | read_unlock_bh(&pathtbl_resize_lock); | 738 | read_unlock_bh(&pathtbl_resize_lock); |
739 | if (grow) { | 739 | if (grow) { |
740 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); | 740 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); |
741 | ieee80211_queue_work(&local->hw, &sdata->work); | 741 | ieee80211_queue_work(&local->hw, &sdata->work); |
742 | } | 742 | } |
743 | return 0; | 743 | return 0; |
744 | 744 | ||
745 | err_exists: | 745 | err_exists: |
746 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); | 746 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); |
747 | read_unlock_bh(&pathtbl_resize_lock); | 747 | read_unlock_bh(&pathtbl_resize_lock); |
748 | kfree(new_node); | 748 | kfree(new_node); |
749 | err_node_alloc: | 749 | err_node_alloc: |
750 | kfree(new_mpath); | 750 | kfree(new_mpath); |
751 | err_path_alloc: | 751 | err_path_alloc: |
752 | return err; | 752 | return err; |
753 | } | 753 | } |
754 | 754 | ||
755 | 755 | ||
756 | /** | 756 | /** |
757 | * mesh_plink_broken - deactivates paths and sends perr when a link breaks | 757 | * mesh_plink_broken - deactivates paths and sends perr when a link breaks |
758 | * | 758 | * |
759 | * @sta: broken peer link | 759 | * @sta: broken peer link |
760 | * | 760 | * |
761 | * This function must be called from the rate control algorithm if enough | 761 | * This function must be called from the rate control algorithm if enough |
762 | * delivery errors suggest that a peer link is no longer usable. | 762 | * delivery errors suggest that a peer link is no longer usable. |
763 | */ | 763 | */ |
764 | void mesh_plink_broken(struct sta_info *sta) | 764 | void mesh_plink_broken(struct sta_info *sta) |
765 | { | 765 | { |
766 | struct mesh_table *tbl; | 766 | struct mesh_table *tbl; |
767 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | 767 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
768 | struct mesh_path *mpath; | 768 | struct mesh_path *mpath; |
769 | struct mpath_node *node; | 769 | struct mpath_node *node; |
770 | struct hlist_node *p; | 770 | struct hlist_node *p; |
771 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 771 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
772 | int i; | 772 | int i; |
773 | __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE); | 773 | __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE); |
774 | 774 | ||
775 | rcu_read_lock(); | 775 | rcu_read_lock(); |
776 | tbl = rcu_dereference(mesh_paths); | 776 | tbl = rcu_dereference(mesh_paths); |
777 | for_each_mesh_entry(tbl, p, node, i) { | 777 | for_each_mesh_entry(tbl, p, node, i) { |
778 | mpath = node->mpath; | 778 | mpath = node->mpath; |
779 | spin_lock_bh(&mpath->state_lock); | ||
780 | if (rcu_dereference(mpath->next_hop) == sta && | 779 | if (rcu_dereference(mpath->next_hop) == sta && |
781 | mpath->flags & MESH_PATH_ACTIVE && | 780 | mpath->flags & MESH_PATH_ACTIVE && |
782 | !(mpath->flags & MESH_PATH_FIXED)) { | 781 | !(mpath->flags & MESH_PATH_FIXED)) { |
782 | spin_lock_bh(&mpath->state_lock); | ||
783 | mpath->flags &= ~MESH_PATH_ACTIVE; | 783 | mpath->flags &= ~MESH_PATH_ACTIVE; |
784 | ++mpath->sn; | 784 | ++mpath->sn; |
785 | spin_unlock_bh(&mpath->state_lock); | 785 | spin_unlock_bh(&mpath->state_lock); |
786 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, | 786 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, |
787 | mpath->dst, cpu_to_le32(mpath->sn), | 787 | mpath->dst, cpu_to_le32(mpath->sn), |
788 | reason, bcast, sdata); | 788 | reason, bcast, sdata); |
789 | } else | 789 | } |
790 | spin_unlock_bh(&mpath->state_lock); | ||
791 | } | 790 | } |
792 | rcu_read_unlock(); | 791 | rcu_read_unlock(); |
793 | } | 792 | } |
794 | 793 | ||
795 | /** | 794 | /** |
796 | * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches | 795 | * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches |
797 | * | 796 | * |
798 | * @sta - mesh peer to match | 797 | * @sta - mesh peer to match |
799 | * | 798 | * |
800 | * RCU notes: this function is called when a mesh plink transitions from | 799 | * RCU notes: this function is called when a mesh plink transitions from |
801 | * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that | 800 | * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that |
802 | * allows path creation. This will happen before the sta can be freed (because | 801 | * allows path creation. This will happen before the sta can be freed (because |
803 | * sta_info_destroy() calls this) so any reader in a rcu read block will be | 802 | * sta_info_destroy() calls this) so any reader in a rcu read block will be |
804 | * protected against the plink disappearing. | 803 | * protected against the plink disappearing. |
805 | */ | 804 | */ |
806 | void mesh_path_flush_by_nexthop(struct sta_info *sta) | 805 | void mesh_path_flush_by_nexthop(struct sta_info *sta) |
807 | { | 806 | { |
808 | struct mesh_table *tbl; | 807 | struct mesh_table *tbl; |
809 | struct mesh_path *mpath; | 808 | struct mesh_path *mpath; |
810 | struct mpath_node *node; | 809 | struct mpath_node *node; |
811 | struct hlist_node *p; | 810 | struct hlist_node *p; |
812 | int i; | 811 | int i; |
813 | 812 | ||
814 | rcu_read_lock(); | 813 | rcu_read_lock(); |
815 | tbl = rcu_dereference(mesh_paths); | 814 | tbl = rcu_dereference(mesh_paths); |
816 | for_each_mesh_entry(tbl, p, node, i) { | 815 | for_each_mesh_entry(tbl, p, node, i) { |
817 | mpath = node->mpath; | 816 | mpath = node->mpath; |
818 | if (rcu_dereference(mpath->next_hop) == sta) | 817 | if (rcu_dereference(mpath->next_hop) == sta) |
819 | mesh_path_del(mpath->dst, mpath->sdata); | 818 | mesh_path_del(mpath->dst, mpath->sdata); |
820 | } | 819 | } |
821 | rcu_read_unlock(); | 820 | rcu_read_unlock(); |
822 | } | 821 | } |
823 | 822 | ||
824 | static void mesh_path_flush(struct ieee80211_sub_if_data *sdata) | 823 | static void mesh_path_flush(struct ieee80211_sub_if_data *sdata) |
825 | { | 824 | { |
826 | struct mesh_table *tbl; | 825 | struct mesh_table *tbl; |
827 | struct mesh_path *mpath; | 826 | struct mesh_path *mpath; |
828 | struct mpath_node *node; | 827 | struct mpath_node *node; |
829 | struct hlist_node *p; | 828 | struct hlist_node *p; |
830 | int i; | 829 | int i; |
831 | 830 | ||
832 | rcu_read_lock(); | 831 | rcu_read_lock(); |
833 | tbl = rcu_dereference(mesh_paths); | 832 | tbl = rcu_dereference(mesh_paths); |
834 | for_each_mesh_entry(tbl, p, node, i) { | 833 | for_each_mesh_entry(tbl, p, node, i) { |
835 | mpath = node->mpath; | 834 | mpath = node->mpath; |
836 | if (mpath->sdata == sdata) | 835 | if (mpath->sdata == sdata) |
837 | mesh_path_del(mpath->dst, mpath->sdata); | 836 | mesh_path_del(mpath->dst, mpath->sdata); |
838 | } | 837 | } |
839 | rcu_read_unlock(); | 838 | rcu_read_unlock(); |
840 | } | 839 | } |
841 | 840 | ||
842 | static void mesh_path_node_reclaim(struct rcu_head *rp) | 841 | static void mesh_path_node_reclaim(struct rcu_head *rp) |
843 | { | 842 | { |
844 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | 843 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); |
845 | struct ieee80211_sub_if_data *sdata = node->mpath->sdata; | 844 | struct ieee80211_sub_if_data *sdata = node->mpath->sdata; |
846 | 845 | ||
847 | del_timer_sync(&node->mpath->timer); | 846 | del_timer_sync(&node->mpath->timer); |
848 | atomic_dec(&sdata->u.mesh.mpaths); | 847 | atomic_dec(&sdata->u.mesh.mpaths); |
849 | kfree(node->mpath); | 848 | kfree(node->mpath); |
850 | kfree(node); | 849 | kfree(node); |
851 | } | 850 | } |
852 | 851 | ||
853 | static void mpp_path_flush(struct ieee80211_sub_if_data *sdata) | 852 | static void mpp_path_flush(struct ieee80211_sub_if_data *sdata) |
854 | { | 853 | { |
855 | struct mesh_table *tbl; | 854 | struct mesh_table *tbl; |
856 | struct mesh_path *mpath; | 855 | struct mesh_path *mpath; |
857 | struct mpath_node *node; | 856 | struct mpath_node *node; |
858 | struct hlist_node *p; | 857 | struct hlist_node *p; |
859 | int i; | 858 | int i; |
860 | 859 | ||
861 | read_lock_bh(&pathtbl_resize_lock); | 860 | read_lock_bh(&pathtbl_resize_lock); |
862 | tbl = rcu_dereference_protected(mpp_paths, | 861 | tbl = rcu_dereference_protected(mpp_paths, |
863 | lockdep_is_held(pathtbl_resize_lock)); | 862 | lockdep_is_held(pathtbl_resize_lock)); |
864 | for_each_mesh_entry(tbl, p, node, i) { | 863 | for_each_mesh_entry(tbl, p, node, i) { |
865 | mpath = node->mpath; | 864 | mpath = node->mpath; |
866 | if (mpath->sdata != sdata) | 865 | if (mpath->sdata != sdata) |
867 | continue; | 866 | continue; |
868 | spin_lock_bh(&tbl->hashwlock[i]); | 867 | spin_lock_bh(&tbl->hashwlock[i]); |
869 | spin_lock_bh(&mpath->state_lock); | 868 | hlist_del_rcu(&node->list); |
870 | call_rcu(&node->rcu, mesh_path_node_reclaim); | 869 | call_rcu(&node->rcu, mesh_path_node_reclaim); |
871 | atomic_dec(&tbl->entries); | 870 | atomic_dec(&tbl->entries); |
872 | spin_unlock_bh(&tbl->hashwlock[i]); | 871 | spin_unlock_bh(&tbl->hashwlock[i]); |
873 | } | 872 | } |
874 | read_unlock_bh(&pathtbl_resize_lock); | 873 | read_unlock_bh(&pathtbl_resize_lock); |
875 | } | 874 | } |
876 | 875 | ||
877 | /** | 876 | /** |
878 | * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface | 877 | * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface |
879 | * | 878 | * |
880 | * This function deletes both mesh paths as well as mesh portal paths. | 879 | * This function deletes both mesh paths as well as mesh portal paths. |
881 | * | 880 | * |
882 | * @sdata - interface data to match | 881 | * @sdata - interface data to match |
883 | * | 882 | * |
884 | */ | 883 | */ |
885 | void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) | 884 | void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) |
886 | { | 885 | { |
887 | mesh_path_flush(sdata); | 886 | mesh_path_flush(sdata); |
888 | mpp_path_flush(sdata); | 887 | mpp_path_flush(sdata); |
889 | } | 888 | } |
890 | 889 | ||
891 | /** | 890 | /** |
892 | * mesh_path_del - delete a mesh path from the table | 891 | * mesh_path_del - delete a mesh path from the table |
893 | * | 892 | * |
894 | * @addr: dst address (ETH_ALEN length) | 893 | * @addr: dst address (ETH_ALEN length) |
895 | * @sdata: local subif | 894 | * @sdata: local subif |
896 | * | 895 | * |
897 | * Returns: 0 if successful | 896 | * Returns: 0 if successful |
898 | */ | 897 | */ |
899 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) | 898 | int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) |
900 | { | 899 | { |
901 | struct mesh_table *tbl; | 900 | struct mesh_table *tbl; |
902 | struct mesh_path *mpath; | 901 | struct mesh_path *mpath; |
903 | struct mpath_node *node; | 902 | struct mpath_node *node; |
904 | struct hlist_head *bucket; | 903 | struct hlist_head *bucket; |
905 | struct hlist_node *n; | 904 | struct hlist_node *n; |
906 | int hash_idx; | 905 | int hash_idx; |
907 | int err = 0; | 906 | int err = 0; |
908 | 907 | ||
909 | read_lock_bh(&pathtbl_resize_lock); | 908 | read_lock_bh(&pathtbl_resize_lock); |
910 | tbl = resize_dereference_mesh_paths(); | 909 | tbl = resize_dereference_mesh_paths(); |
911 | hash_idx = mesh_table_hash(addr, sdata, tbl); | 910 | hash_idx = mesh_table_hash(addr, sdata, tbl); |
912 | bucket = &tbl->hash_buckets[hash_idx]; | 911 | bucket = &tbl->hash_buckets[hash_idx]; |
913 | 912 | ||
914 | spin_lock_bh(&tbl->hashwlock[hash_idx]); | 913 | spin_lock_bh(&tbl->hashwlock[hash_idx]); |
915 | hlist_for_each_entry(node, n, bucket, list) { | 914 | hlist_for_each_entry(node, n, bucket, list) { |
916 | mpath = node->mpath; | 915 | mpath = node->mpath; |
917 | if (mpath->sdata == sdata && | 916 | if (mpath->sdata == sdata && |
918 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { | 917 | memcmp(addr, mpath->dst, ETH_ALEN) == 0) { |
919 | spin_lock_bh(&mpath->state_lock); | 918 | spin_lock_bh(&mpath->state_lock); |
920 | if (mpath->is_gate) | 919 | if (mpath->is_gate) |
921 | mesh_gate_del(tbl, mpath); | 920 | mesh_gate_del(tbl, mpath); |
922 | mpath->flags |= MESH_PATH_RESOLVING; | 921 | mpath->flags |= MESH_PATH_RESOLVING; |
923 | hlist_del_rcu(&node->list); | 922 | hlist_del_rcu(&node->list); |
924 | call_rcu(&node->rcu, mesh_path_node_reclaim); | 923 | call_rcu(&node->rcu, mesh_path_node_reclaim); |
925 | atomic_dec(&tbl->entries); | 924 | atomic_dec(&tbl->entries); |
926 | spin_unlock_bh(&mpath->state_lock); | 925 | spin_unlock_bh(&mpath->state_lock); |
927 | goto enddel; | 926 | goto enddel; |
928 | } | 927 | } |
929 | } | 928 | } |
930 | 929 | ||
931 | err = -ENXIO; | 930 | err = -ENXIO; |
932 | enddel: | 931 | enddel: |
933 | mesh_paths_generation++; | 932 | mesh_paths_generation++; |
934 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); | 933 | spin_unlock_bh(&tbl->hashwlock[hash_idx]); |
935 | read_unlock_bh(&pathtbl_resize_lock); | 934 | read_unlock_bh(&pathtbl_resize_lock); |
936 | return err; | 935 | return err; |
937 | } | 936 | } |
938 | 937 | ||
939 | /** | 938 | /** |
940 | * mesh_path_tx_pending - sends pending frames in a mesh path queue | 939 | * mesh_path_tx_pending - sends pending frames in a mesh path queue |
941 | * | 940 | * |
942 | * @mpath: mesh path to activate | 941 | * @mpath: mesh path to activate |
943 | * | 942 | * |
944 | * Locking: the state_lock of the mpath structure must NOT be held when calling | 943 | * Locking: the state_lock of the mpath structure must NOT be held when calling |
945 | * this function. | 944 | * this function. |
946 | */ | 945 | */ |
947 | void mesh_path_tx_pending(struct mesh_path *mpath) | 946 | void mesh_path_tx_pending(struct mesh_path *mpath) |
948 | { | 947 | { |
949 | if (mpath->flags & MESH_PATH_ACTIVE) | 948 | if (mpath->flags & MESH_PATH_ACTIVE) |
950 | ieee80211_add_pending_skbs(mpath->sdata->local, | 949 | ieee80211_add_pending_skbs(mpath->sdata->local, |
951 | &mpath->frame_queue); | 950 | &mpath->frame_queue); |
952 | } | 951 | } |
953 | 952 | ||
954 | /** | 953 | /** |
955 | * mesh_path_send_to_gates - sends pending frames to all known mesh gates | 954 | * mesh_path_send_to_gates - sends pending frames to all known mesh gates |
956 | * | 955 | * |
957 | * @mpath: mesh path whose queue will be emptied | 956 | * @mpath: mesh path whose queue will be emptied |
958 | * | 957 | * |
959 | * If there is only one gate, the frames are transferred from the failed mpath | 958 | * If there is only one gate, the frames are transferred from the failed mpath |
960 | * queue to that gate's queue. If there are more than one gates, the frames | 959 | * queue to that gate's queue. If there are more than one gates, the frames |
961 | * are copied from each gate to the next. After frames are copied, the | 960 | * are copied from each gate to the next. After frames are copied, the |
962 | * mpath queues are emptied onto the transmission queue. | 961 | * mpath queues are emptied onto the transmission queue. |
963 | */ | 962 | */ |
964 | int mesh_path_send_to_gates(struct mesh_path *mpath) | 963 | int mesh_path_send_to_gates(struct mesh_path *mpath) |
965 | { | 964 | { |
966 | struct ieee80211_sub_if_data *sdata = mpath->sdata; | 965 | struct ieee80211_sub_if_data *sdata = mpath->sdata; |
967 | struct hlist_node *n; | 966 | struct hlist_node *n; |
968 | struct mesh_table *tbl; | 967 | struct mesh_table *tbl; |
969 | struct mesh_path *from_mpath = mpath; | 968 | struct mesh_path *from_mpath = mpath; |
970 | struct mpath_node *gate = NULL; | 969 | struct mpath_node *gate = NULL; |
971 | bool copy = false; | 970 | bool copy = false; |
972 | struct hlist_head *known_gates; | 971 | struct hlist_head *known_gates; |
973 | 972 | ||
974 | rcu_read_lock(); | 973 | rcu_read_lock(); |
975 | tbl = rcu_dereference(mesh_paths); | 974 | tbl = rcu_dereference(mesh_paths); |
976 | known_gates = tbl->known_gates; | 975 | known_gates = tbl->known_gates; |
977 | rcu_read_unlock(); | 976 | rcu_read_unlock(); |
978 | 977 | ||
979 | if (!known_gates) | 978 | if (!known_gates) |
980 | return -EHOSTUNREACH; | 979 | return -EHOSTUNREACH; |
981 | 980 | ||
982 | hlist_for_each_entry_rcu(gate, n, known_gates, list) { | 981 | hlist_for_each_entry_rcu(gate, n, known_gates, list) { |
983 | if (gate->mpath->sdata != sdata) | 982 | if (gate->mpath->sdata != sdata) |
984 | continue; | 983 | continue; |
985 | 984 | ||
986 | if (gate->mpath->flags & MESH_PATH_ACTIVE) { | 985 | if (gate->mpath->flags & MESH_PATH_ACTIVE) { |
987 | mpath_dbg("Forwarding to %pM\n", gate->mpath->dst); | 986 | mpath_dbg("Forwarding to %pM\n", gate->mpath->dst); |
988 | mesh_path_move_to_queue(gate->mpath, from_mpath, copy); | 987 | mesh_path_move_to_queue(gate->mpath, from_mpath, copy); |
989 | from_mpath = gate->mpath; | 988 | from_mpath = gate->mpath; |
990 | copy = true; | 989 | copy = true; |
991 | } else { | 990 | } else { |
992 | mpath_dbg("Not forwarding %p\n", gate->mpath); | 991 | mpath_dbg("Not forwarding %p\n", gate->mpath); |
993 | mpath_dbg("flags %x\n", gate->mpath->flags); | 992 | mpath_dbg("flags %x\n", gate->mpath->flags); |
994 | } | 993 | } |
995 | } | 994 | } |
996 | 995 | ||
997 | hlist_for_each_entry_rcu(gate, n, known_gates, list) | 996 | hlist_for_each_entry_rcu(gate, n, known_gates, list) |
998 | if (gate->mpath->sdata == sdata) { | 997 | if (gate->mpath->sdata == sdata) { |
999 | mpath_dbg("Sending to %pM\n", gate->mpath->dst); | 998 | mpath_dbg("Sending to %pM\n", gate->mpath->dst); |
1000 | mesh_path_tx_pending(gate->mpath); | 999 | mesh_path_tx_pending(gate->mpath); |
1001 | } | 1000 | } |
1002 | 1001 | ||
1003 | return (from_mpath == mpath) ? -EHOSTUNREACH : 0; | 1002 | return (from_mpath == mpath) ? -EHOSTUNREACH : 0; |
1004 | } | 1003 | } |
1005 | 1004 | ||
1006 | /** | 1005 | /** |
1007 | * mesh_path_discard_frame - discard a frame whose path could not be resolved | 1006 | * mesh_path_discard_frame - discard a frame whose path could not be resolved |
1008 | * | 1007 | * |
1009 | * @skb: frame to discard | 1008 | * @skb: frame to discard |
1010 | * @sdata: network subif the frame was to be sent through | 1009 | * @sdata: network subif the frame was to be sent through |
1011 | * | 1010 | * |
1012 | * If the frame was being forwarded from another MP, a PERR frame will be sent | 1011 | * If the frame was being forwarded from another MP, a PERR frame will be sent |
1013 | * to the precursor. The precursor's address (i.e. the previous hop) was saved | 1012 | * to the precursor. The precursor's address (i.e. the previous hop) was saved |
1014 | * in addr1 of the frame-to-be-forwarded, and would only be overwritten once | 1013 | * in addr1 of the frame-to-be-forwarded, and would only be overwritten once |
1015 | * the destination is successfully resolved. | 1014 | * the destination is successfully resolved. |
1016 | * | 1015 | * |
1017 | * Locking: the function must me called within a rcu_read_lock region | 1016 | * Locking: the function must me called within a rcu_read_lock region |
1018 | */ | 1017 | */ |
1019 | void mesh_path_discard_frame(struct sk_buff *skb, | 1018 | void mesh_path_discard_frame(struct sk_buff *skb, |
1020 | struct ieee80211_sub_if_data *sdata) | 1019 | struct ieee80211_sub_if_data *sdata) |
1021 | { | 1020 | { |
1022 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 1021 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
1023 | struct mesh_path *mpath; | 1022 | struct mesh_path *mpath; |
1024 | u32 sn = 0; | 1023 | u32 sn = 0; |
1025 | __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD); | 1024 | __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD); |
1026 | 1025 | ||
1027 | if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) { | 1026 | if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) { |
1028 | u8 *ra, *da; | 1027 | u8 *ra, *da; |
1029 | 1028 | ||
1030 | da = hdr->addr3; | 1029 | da = hdr->addr3; |
1031 | ra = hdr->addr1; | 1030 | ra = hdr->addr1; |
1032 | rcu_read_lock(); | 1031 | rcu_read_lock(); |
1033 | mpath = mesh_path_lookup(da, sdata); | 1032 | mpath = mesh_path_lookup(da, sdata); |
1034 | if (mpath) { | 1033 | if (mpath) { |
1035 | spin_lock_bh(&mpath->state_lock); | 1034 | spin_lock_bh(&mpath->state_lock); |
1036 | sn = ++mpath->sn; | 1035 | sn = ++mpath->sn; |
1037 | spin_unlock_bh(&mpath->state_lock); | 1036 | spin_unlock_bh(&mpath->state_lock); |
1038 | } | 1037 | } |
1039 | rcu_read_unlock(); | 1038 | rcu_read_unlock(); |
1040 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data, | 1039 | mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data, |
1041 | cpu_to_le32(sn), reason, ra, sdata); | 1040 | cpu_to_le32(sn), reason, ra, sdata); |
1042 | } | 1041 | } |
1043 | 1042 | ||
1044 | kfree_skb(skb); | 1043 | kfree_skb(skb); |
1045 | sdata->u.mesh.mshstats.dropped_frames_no_route++; | 1044 | sdata->u.mesh.mshstats.dropped_frames_no_route++; |
1046 | } | 1045 | } |
1047 | 1046 | ||
1048 | /** | 1047 | /** |
1049 | * mesh_path_flush_pending - free the pending queue of a mesh path | 1048 | * mesh_path_flush_pending - free the pending queue of a mesh path |
1050 | * | 1049 | * |
1051 | * @mpath: mesh path whose queue has to be freed | 1050 | * @mpath: mesh path whose queue has to be freed |
1052 | * | 1051 | * |
1053 | * Locking: the function must me called within a rcu_read_lock region | 1052 | * Locking: the function must me called within a rcu_read_lock region |
1054 | */ | 1053 | */ |
1055 | void mesh_path_flush_pending(struct mesh_path *mpath) | 1054 | void mesh_path_flush_pending(struct mesh_path *mpath) |
1056 | { | 1055 | { |
1057 | struct sk_buff *skb; | 1056 | struct sk_buff *skb; |
1058 | 1057 | ||
1059 | while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) | 1058 | while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL) |
1060 | mesh_path_discard_frame(skb, mpath->sdata); | 1059 | mesh_path_discard_frame(skb, mpath->sdata); |
1061 | } | 1060 | } |
1062 | 1061 | ||
1063 | /** | 1062 | /** |
1064 | * mesh_path_fix_nexthop - force a specific next hop for a mesh path | 1063 | * mesh_path_fix_nexthop - force a specific next hop for a mesh path |
1065 | * | 1064 | * |
1066 | * @mpath: the mesh path to modify | 1065 | * @mpath: the mesh path to modify |
1067 | * @next_hop: the next hop to force | 1066 | * @next_hop: the next hop to force |
1068 | * | 1067 | * |
1069 | * Locking: this function must be called holding mpath->state_lock | 1068 | * Locking: this function must be called holding mpath->state_lock |
1070 | */ | 1069 | */ |
1071 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) | 1070 | void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop) |
1072 | { | 1071 | { |
1073 | spin_lock_bh(&mpath->state_lock); | 1072 | spin_lock_bh(&mpath->state_lock); |
1074 | mesh_path_assign_nexthop(mpath, next_hop); | 1073 | mesh_path_assign_nexthop(mpath, next_hop); |
1075 | mpath->sn = 0xffff; | 1074 | mpath->sn = 0xffff; |
1076 | mpath->metric = 0; | 1075 | mpath->metric = 0; |
1077 | mpath->hop_count = 0; | 1076 | mpath->hop_count = 0; |
1078 | mpath->exp_time = 0; | 1077 | mpath->exp_time = 0; |
1079 | mpath->flags |= MESH_PATH_FIXED; | 1078 | mpath->flags |= MESH_PATH_FIXED; |
1080 | mesh_path_activate(mpath); | 1079 | mesh_path_activate(mpath); |
1081 | spin_unlock_bh(&mpath->state_lock); | 1080 | spin_unlock_bh(&mpath->state_lock); |
1082 | mesh_path_tx_pending(mpath); | 1081 | mesh_path_tx_pending(mpath); |
1083 | } | 1082 | } |
1084 | 1083 | ||
1085 | static void mesh_path_node_free(struct hlist_node *p, bool free_leafs) | 1084 | static void mesh_path_node_free(struct hlist_node *p, bool free_leafs) |
1086 | { | 1085 | { |
1087 | struct mesh_path *mpath; | 1086 | struct mesh_path *mpath; |
1088 | struct mpath_node *node = hlist_entry(p, struct mpath_node, list); | 1087 | struct mpath_node *node = hlist_entry(p, struct mpath_node, list); |
1089 | mpath = node->mpath; | 1088 | mpath = node->mpath; |
1090 | hlist_del_rcu(p); | 1089 | hlist_del_rcu(p); |
1091 | if (free_leafs) { | 1090 | if (free_leafs) { |
1092 | del_timer_sync(&mpath->timer); | 1091 | del_timer_sync(&mpath->timer); |
1093 | kfree(mpath); | 1092 | kfree(mpath); |
1094 | } | 1093 | } |
1095 | kfree(node); | 1094 | kfree(node); |
1096 | } | 1095 | } |
1097 | 1096 | ||
1098 | static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) | 1097 | static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl) |
1099 | { | 1098 | { |
1100 | struct mesh_path *mpath; | 1099 | struct mesh_path *mpath; |
1101 | struct mpath_node *node, *new_node; | 1100 | struct mpath_node *node, *new_node; |
1102 | u32 hash_idx; | 1101 | u32 hash_idx; |
1103 | 1102 | ||
1104 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); | 1103 | new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC); |
1105 | if (new_node == NULL) | 1104 | if (new_node == NULL) |
1106 | return -ENOMEM; | 1105 | return -ENOMEM; |
1107 | 1106 | ||
1108 | node = hlist_entry(p, struct mpath_node, list); | 1107 | node = hlist_entry(p, struct mpath_node, list); |
1109 | mpath = node->mpath; | 1108 | mpath = node->mpath; |
1110 | new_node->mpath = mpath; | 1109 | new_node->mpath = mpath; |
1111 | hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl); | 1110 | hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl); |
1112 | hlist_add_head(&new_node->list, | 1111 | hlist_add_head(&new_node->list, |
1113 | &newtbl->hash_buckets[hash_idx]); | 1112 | &newtbl->hash_buckets[hash_idx]); |
1114 | return 0; | 1113 | return 0; |
1115 | } | 1114 | } |
1116 | 1115 | ||
1117 | int mesh_pathtbl_init(void) | 1116 | int mesh_pathtbl_init(void) |
1118 | { | 1117 | { |
1119 | struct mesh_table *tbl_path, *tbl_mpp; | 1118 | struct mesh_table *tbl_path, *tbl_mpp; |
1120 | 1119 | ||
1121 | tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | 1120 | tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); |
1122 | if (!tbl_path) | 1121 | if (!tbl_path) |
1123 | return -ENOMEM; | 1122 | return -ENOMEM; |
1124 | tbl_path->free_node = &mesh_path_node_free; | 1123 | tbl_path->free_node = &mesh_path_node_free; |
1125 | tbl_path->copy_node = &mesh_path_node_copy; | 1124 | tbl_path->copy_node = &mesh_path_node_copy; |
1126 | tbl_path->mean_chain_len = MEAN_CHAIN_LEN; | 1125 | tbl_path->mean_chain_len = MEAN_CHAIN_LEN; |
1127 | tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); | 1126 | tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); |
1128 | INIT_HLIST_HEAD(tbl_path->known_gates); | 1127 | INIT_HLIST_HEAD(tbl_path->known_gates); |
1129 | 1128 | ||
1130 | 1129 | ||
1131 | tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); | 1130 | tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); |
1132 | if (!tbl_mpp) { | 1131 | if (!tbl_mpp) { |
1133 | mesh_table_free(tbl_path, true); | 1132 | mesh_table_free(tbl_path, true); |
1134 | return -ENOMEM; | 1133 | return -ENOMEM; |
1135 | } | 1134 | } |
1136 | tbl_mpp->free_node = &mesh_path_node_free; | 1135 | tbl_mpp->free_node = &mesh_path_node_free; |
1137 | tbl_mpp->copy_node = &mesh_path_node_copy; | 1136 | tbl_mpp->copy_node = &mesh_path_node_copy; |
1138 | tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; | 1137 | tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; |
1139 | tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); | 1138 | tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC); |
1140 | INIT_HLIST_HEAD(tbl_mpp->known_gates); | 1139 | INIT_HLIST_HEAD(tbl_mpp->known_gates); |
1141 | 1140 | ||
1142 | /* Need no locking since this is during init */ | 1141 | /* Need no locking since this is during init */ |
1143 | RCU_INIT_POINTER(mesh_paths, tbl_path); | 1142 | RCU_INIT_POINTER(mesh_paths, tbl_path); |
1144 | RCU_INIT_POINTER(mpp_paths, tbl_mpp); | 1143 | RCU_INIT_POINTER(mpp_paths, tbl_mpp); |
1145 | 1144 | ||
1146 | return 0; | 1145 | return 0; |
1147 | } | 1146 | } |
1148 | 1147 | ||
1149 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | 1148 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) |
1150 | { | 1149 | { |
1151 | struct mesh_table *tbl; | 1150 | struct mesh_table *tbl; |
1152 | struct mesh_path *mpath; | 1151 | struct mesh_path *mpath; |
1153 | struct mpath_node *node; | 1152 | struct mpath_node *node; |
1154 | struct hlist_node *p; | 1153 | struct hlist_node *p; |
1155 | int i; | 1154 | int i; |
1156 | 1155 | ||
1157 | rcu_read_lock(); | 1156 | rcu_read_lock(); |
1158 | tbl = rcu_dereference(mesh_paths); | 1157 | tbl = rcu_dereference(mesh_paths); |
1159 | for_each_mesh_entry(tbl, p, node, i) { | 1158 | for_each_mesh_entry(tbl, p, node, i) { |
1160 | if (node->mpath->sdata != sdata) | 1159 | if (node->mpath->sdata != sdata) |
1161 | continue; | 1160 | continue; |
1162 | mpath = node->mpath; | 1161 | mpath = node->mpath; |
1163 | spin_lock_bh(&mpath->state_lock); | ||
1164 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && | 1162 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && |
1165 | (!(mpath->flags & MESH_PATH_FIXED)) && | 1163 | (!(mpath->flags & MESH_PATH_FIXED)) && |
1166 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) { | 1164 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) |
1167 | spin_unlock_bh(&mpath->state_lock); | ||
1168 | mesh_path_del(mpath->dst, mpath->sdata); | 1165 | mesh_path_del(mpath->dst, mpath->sdata); |
1169 | } else | ||
1170 | spin_unlock_bh(&mpath->state_lock); | ||
1171 | } | ||
1172 | rcu_read_unlock(); | 1166 | rcu_read_unlock(); |
1173 | } | 1167 | } |
1174 | 1168 | ||
1175 | void mesh_pathtbl_unregister(void) | 1169 | void mesh_pathtbl_unregister(void) |
1176 | { | 1170 | { |
1177 | /* no need for locking during exit path */ | 1171 | /* no need for locking during exit path */ |
1178 | mesh_table_free(rcu_dereference_raw(mesh_paths), true); | 1172 | mesh_table_free(rcu_dereference_raw(mesh_paths), true); |
1179 | mesh_table_free(rcu_dereference_raw(mpp_paths), true); | 1173 | mesh_table_free(rcu_dereference_raw(mpp_paths), true); |
1180 | } | 1174 | } |