Commit 6b86bd62a505a4a9739474f00f8088395b7a80ba

Authored by Johannes Berg
Committed by John W. Linville
1 parent 85a9994a0a

mac80211: mesh: move some code to make it static

There's no need to have table functions in one
file and all users in another, move the functions
to the right file and make them static. Also move
a static variable to the beginning of the file to
make it easier to find.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>

Showing 3 changed files with 53 additions and 54 deletions Side-by-side Diff

... ... @@ -287,49 +287,6 @@
287 287 }
288 288 }
289 289  
290   -u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl)
291   -{
292   - /* Use last four bytes of hw addr and interface index as hash index */
293   - return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
294   - & tbl->hash_mask;
295   -}
296   -
297   -struct mesh_table *mesh_table_alloc(int size_order)
298   -{
299   - int i;
300   - struct mesh_table *newtbl;
301   -
302   - newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL);
303   - if (!newtbl)
304   - return NULL;
305   -
306   - newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
307   - (1 << size_order), GFP_KERNEL);
308   -
309   - if (!newtbl->hash_buckets) {
310   - kfree(newtbl);
311   - return NULL;
312   - }
313   -
314   - newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
315   - (1 << size_order), GFP_KERNEL);
316   - if (!newtbl->hashwlock) {
317   - kfree(newtbl->hash_buckets);
318   - kfree(newtbl);
319   - return NULL;
320   - }
321   -
322   - newtbl->size_order = size_order;
323   - newtbl->hash_mask = (1 << size_order) - 1;
324   - atomic_set(&newtbl->entries, 0);
325   - get_random_bytes(&newtbl->hash_rnd,
326   - sizeof(newtbl->hash_rnd));
327   - for (i = 0; i <= newtbl->hash_mask; i++)
328   - spin_lock_init(&newtbl->hashwlock[i]);
329   -
330   - return newtbl;
331   -}
332   -
333 290  
334 291 static void ieee80211_mesh_path_timer(unsigned long data)
335 292 {
... ... @@ -240,12 +240,8 @@
240 240  
241 241 /* Private interfaces */
242 242 /* Mesh tables */
243   -struct mesh_table *mesh_table_alloc(int size_order);
244   -void mesh_table_free(struct mesh_table *tbl, bool free_leafs);
245 243 void mesh_mpath_table_grow(void);
246 244 void mesh_mpp_table_grow(void);
247   -u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
248   - struct mesh_table *tbl);
249 245 /* Mesh paths */
250 246 int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode,
251 247 const u8 *ra, struct ieee80211_sub_if_data *sdata);
net/mac80211/mesh_pathtbl.c
... ... @@ -40,6 +40,50 @@
40 40 static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
41 41  
42 42 int mesh_paths_generation;
  43 +
  44 +/* This lock will have the grow table function as writer and add / delete nodes
  45 + * as readers. When reading the table (i.e. doing lookups) we are well protected
  46 + * by RCU
  47 + */
  48 +static DEFINE_RWLOCK(pathtbl_resize_lock);
  49 +
  50 +
  51 +static struct mesh_table *mesh_table_alloc(int size_order)
  52 +{
  53 + int i;
  54 + struct mesh_table *newtbl;
  55 +
  56 + newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL);
  57 + if (!newtbl)
  58 + return NULL;
  59 +
  60 + newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
  61 + (1 << size_order), GFP_KERNEL);
  62 +
  63 + if (!newtbl->hash_buckets) {
  64 + kfree(newtbl);
  65 + return NULL;
  66 + }
  67 +
  68 + newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
  69 + (1 << size_order), GFP_KERNEL);
  70 + if (!newtbl->hashwlock) {
  71 + kfree(newtbl->hash_buckets);
  72 + kfree(newtbl);
  73 + return NULL;
  74 + }
  75 +
  76 + newtbl->size_order = size_order;
  77 + newtbl->hash_mask = (1 << size_order) - 1;
  78 + atomic_set(&newtbl->entries, 0);
  79 + get_random_bytes(&newtbl->hash_rnd,
  80 + sizeof(newtbl->hash_rnd));
  81 + for (i = 0; i <= newtbl->hash_mask; i++)
  82 + spin_lock_init(&newtbl->hashwlock[i]);
  83 +
  84 + return newtbl;
  85 +}
  86 +
43 87 static void __mesh_table_free(struct mesh_table *tbl)
44 88 {
45 89 kfree(tbl->hash_buckets);
... ... @@ -47,7 +91,7 @@
47 91 kfree(tbl);
48 92 }
49 93  
50   -void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
  94 +static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
51 95 {
52 96 struct hlist_head *mesh_hash;
53 97 struct hlist_node *p, *q;
... ... @@ -66,7 +110,7 @@
66 110 }
67 111  
68 112 static int mesh_table_grow(struct mesh_table *oldtbl,
69   - struct mesh_table *newtbl)
  113 + struct mesh_table *newtbl)
70 114 {
71 115 struct hlist_head *oldhash;
72 116 struct hlist_node *p, *q;
73 117  
... ... @@ -97,12 +141,14 @@
97 141 return -ENOMEM;
98 142 }
99 143  
  144 +static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
  145 + struct mesh_table *tbl)
  146 +{
  147 + /* Use last four bytes of hw addr and interface index as hash index */
  148 + return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
  149 + & tbl->hash_mask;
  150 +}
100 151  
101   -/* This lock will have the grow table function as writer and add / delete nodes
102   - * as readers. When reading the table (i.e. doing lookups) we are well protected
103   - * by RCU
104   - */
105   -static DEFINE_RWLOCK(pathtbl_resize_lock);
106 152  
107 153 /**
108 154 *