Blame view

lib/rhashtable.c 24.3 KB
7e1e77636   Thomas Graf   lib: Resizable, S...
1
2
3
  /*
   * Resizable, Scalable, Concurrent Hash Table
   *
02fd97c3d   Herbert Xu   rhashtable: Allow...
4
   * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
a5ec68e3b   Thomas Graf   rhashtable: Use a...
5
   * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
7e1e77636   Thomas Graf   lib: Resizable, S...
6
7
   * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
   *
7e1e77636   Thomas Graf   lib: Resizable, S...
8
   * Code partially derived from nft_hash
02fd97c3d   Herbert Xu   rhashtable: Allow...
9
10
   * Rewritten with rehash code from br_multicast plus single list
   * pointer as suggested by Josh Triplett
7e1e77636   Thomas Graf   lib: Resizable, S...
11
12
13
14
15
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License version 2 as
   * published by the Free Software Foundation.
   */
07ee0722b   Herbert Xu   rhashtable: Add c...
16
  #include <linux/atomic.h>
7e1e77636   Thomas Graf   lib: Resizable, S...
17
18
19
  #include <linux/kernel.h>
  #include <linux/init.h>
  #include <linux/log2.h>
5beb5c90c   Eric Dumazet   rhashtable: use c...
20
  #include <linux/sched.h>
7e1e77636   Thomas Graf   lib: Resizable, S...
21
22
23
  #include <linux/slab.h>
  #include <linux/vmalloc.h>
  #include <linux/mm.h>
87545899b   Daniel Borkmann   net: replace rema...
24
  #include <linux/jhash.h>
7e1e77636   Thomas Graf   lib: Resizable, S...
25
26
  #include <linux/random.h>
  #include <linux/rhashtable.h>
61d7b0977   Stephen Rothwell   rhashtable: using...
27
  #include <linux/err.h>
6d7954130   Hauke Mehrtens   rhashtable: add m...
28
  #include <linux/export.h>
7e1e77636   Thomas Graf   lib: Resizable, S...
29
30
  
  #define HASH_DEFAULT_SIZE	64UL
c2e213cff   Herbert Xu   rhashtable: Intro...
31
  #define HASH_MIN_SIZE		4U
4cf0b354d   Florian Westphal   rhashtable: avoid...
32
  #define BUCKET_LOCKS_PER_CPU	32UL
97defe1ec   Thomas Graf   rhashtable: Per b...
33

988dfbd79   Herbert Xu   rhashtable: Move ...
34
  static u32 head_hashfn(struct rhashtable *ht,
8d24c0b43   Thomas Graf   rhashtable: Do ha...
35
36
  		       const struct bucket_table *tbl,
  		       const struct rhash_head *he)
7e1e77636   Thomas Graf   lib: Resizable, S...
37
  {
02fd97c3d   Herbert Xu   rhashtable: Allow...
38
  	return rht_head_hashfn(ht, tbl, he, ht->p);
7e1e77636   Thomas Graf   lib: Resizable, S...
39
  }
a03eaec0d   Thomas Graf   rhashtable: Dump ...
40
  #ifdef CONFIG_PROVE_LOCKING
a03eaec0d   Thomas Graf   rhashtable: Dump ...
41
  #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
a03eaec0d   Thomas Graf   rhashtable: Dump ...
42
43
44
45
46
47
48
49
50
  
  int lockdep_rht_mutex_is_held(struct rhashtable *ht)
  {
  	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
  }
  EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
  
  int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
  {
02fd97c3d   Herbert Xu   rhashtable: Allow...
51
  	spinlock_t *lock = rht_bucket_lock(tbl, hash);
a03eaec0d   Thomas Graf   rhashtable: Dump ...
52
53
54
55
56
57
  
  	return (debug_locks) ? lockdep_is_held(lock) : 1;
  }
  EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
  #else
  #define ASSERT_RHT_MUTEX(HT)
a03eaec0d   Thomas Graf   rhashtable: Dump ...
58
  #endif
b9ecfdaa1   Herbert Xu   rhashtable: Allow...
59
60
  static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
  			      gfp_t gfp)
97defe1ec   Thomas Graf   rhashtable: Per b...
61
62
63
64
65
66
67
  {
  	unsigned int i, size;
  #if defined(CONFIG_PROVE_LOCKING)
  	unsigned int nr_pcpus = 2;
  #else
  	unsigned int nr_pcpus = num_possible_cpus();
  #endif
4cf0b354d   Florian Westphal   rhashtable: avoid...
68
  	nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
97defe1ec   Thomas Graf   rhashtable: Per b...
69
  	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
a5ec68e3b   Thomas Graf   rhashtable: Use a...
70
71
  	/* Never allocate more than 0.5 locks per bucket */
  	size = min_t(unsigned int, size, tbl->size >> 1);
97defe1ec   Thomas Graf   rhashtable: Per b...
72
73
  
  	if (sizeof(spinlock_t) != 0) {
9dbeea7f0   Eric Dumazet   rhashtable: fix a...
74
  		tbl->locks = NULL;
97defe1ec   Thomas Graf   rhashtable: Per b...
75
  #ifdef CONFIG_NUMA
b9ecfdaa1   Herbert Xu   rhashtable: Allow...
76
77
  		if (size * sizeof(spinlock_t) > PAGE_SIZE &&
  		    gfp == GFP_KERNEL)
97defe1ec   Thomas Graf   rhashtable: Per b...
78
  			tbl->locks = vmalloc(size * sizeof(spinlock_t));
97defe1ec   Thomas Graf   rhashtable: Per b...
79
  #endif
4cf0b354d   Florian Westphal   rhashtable: avoid...
80
81
  		if (gfp != GFP_KERNEL)
  			gfp |= __GFP_NOWARN | __GFP_NORETRY;
9dbeea7f0   Eric Dumazet   rhashtable: fix a...
82
83
84
  		if (!tbl->locks)
  			tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
  						   gfp);
97defe1ec   Thomas Graf   rhashtable: Per b...
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
  		if (!tbl->locks)
  			return -ENOMEM;
  		for (i = 0; i < size; i++)
  			spin_lock_init(&tbl->locks[i]);
  	}
  	tbl->locks_mask = size - 1;
  
  	return 0;
  }
  
  static void bucket_table_free(const struct bucket_table *tbl)
  {
  	if (tbl)
  		kvfree(tbl->locks);
  
  	kvfree(tbl);
  }
9d901bc05   Herbert Xu   rhashtable: Free ...
102
103
104
105
  static void bucket_table_free_rcu(struct rcu_head *head)
  {
  	bucket_table_free(container_of(head, struct bucket_table, rcu));
  }
97defe1ec   Thomas Graf   rhashtable: Per b...
106
  static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
b9ecfdaa1   Herbert Xu   rhashtable: Allow...
107
108
  					       size_t nbuckets,
  					       gfp_t gfp)
7e1e77636   Thomas Graf   lib: Resizable, S...
109
  {
eb6d1abf1   Daniel Borkmann   rhashtable: bette...
110
  	struct bucket_table *tbl = NULL;
7e1e77636   Thomas Graf   lib: Resizable, S...
111
  	size_t size;
f89bd6f87   Thomas Graf   rhashtable: Suppo...
112
  	int i;
7e1e77636   Thomas Graf   lib: Resizable, S...
113
114
  
  	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
b9ecfdaa1   Herbert Xu   rhashtable: Allow...
115
116
117
118
  	if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
  	    gfp != GFP_KERNEL)
  		tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
  	if (tbl == NULL && gfp == GFP_KERNEL)
7e1e77636   Thomas Graf   lib: Resizable, S...
119
  		tbl = vzalloc(size);
7e1e77636   Thomas Graf   lib: Resizable, S...
120
121
122
123
  	if (tbl == NULL)
  		return NULL;
  
  	tbl->size = nbuckets;
b9ecfdaa1   Herbert Xu   rhashtable: Allow...
124
  	if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
97defe1ec   Thomas Graf   rhashtable: Per b...
125
126
127
  		bucket_table_free(tbl);
  		return NULL;
  	}
7e1e77636   Thomas Graf   lib: Resizable, S...
128

eddee5ba3   Herbert Xu   rhashtable: Fix w...
129
  	INIT_LIST_HEAD(&tbl->walkers);
5269b53da   Herbert Xu   rhashtable: Move ...
130
  	get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
f89bd6f87   Thomas Graf   rhashtable: Suppo...
131
132
  	for (i = 0; i < nbuckets; i++)
  		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
97defe1ec   Thomas Graf   rhashtable: Per b...
133
  	return tbl;
7e1e77636   Thomas Graf   lib: Resizable, S...
134
  }
b824478b2   Herbert Xu   rhashtable: Add m...
135
136
137
138
139
140
141
142
143
144
145
146
  static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
  						  struct bucket_table *tbl)
  {
  	struct bucket_table *new_tbl;
  
  	do {
  		new_tbl = tbl;
  		tbl = rht_dereference_rcu(tbl->future_tbl, ht);
  	} while (tbl);
  
  	return new_tbl;
  }
299e5c32a   Thomas Graf   rhashtable: Use '...
147
  static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
a5ec68e3b   Thomas Graf   rhashtable: Use a...
148
  {
aa34a6cb0   Herbert Xu   rhashtable: Add a...
149
  	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
b824478b2   Herbert Xu   rhashtable: Add m...
150
151
  	struct bucket_table *new_tbl = rhashtable_last_table(ht,
  		rht_dereference_rcu(old_tbl->future_tbl, ht));
aa34a6cb0   Herbert Xu   rhashtable: Add a...
152
153
154
155
  	struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
  	int err = -ENOENT;
  	struct rhash_head *head, *next, *entry;
  	spinlock_t *new_bucket_lock;
299e5c32a   Thomas Graf   rhashtable: Use '...
156
  	unsigned int new_hash;
aa34a6cb0   Herbert Xu   rhashtable: Add a...
157
158
159
160
161
162
163
  
  	rht_for_each(entry, old_tbl, old_hash) {
  		err = 0;
  		next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
  
  		if (rht_is_a_nulls(next))
  			break;
a5ec68e3b   Thomas Graf   rhashtable: Use a...
164

aa34a6cb0   Herbert Xu   rhashtable: Add a...
165
166
  		pprev = &entry->next;
  	}
a5ec68e3b   Thomas Graf   rhashtable: Use a...
167

aa34a6cb0   Herbert Xu   rhashtable: Add a...
168
169
  	if (err)
  		goto out;
97defe1ec   Thomas Graf   rhashtable: Per b...
170

aa34a6cb0   Herbert Xu   rhashtable: Add a...
171
  	new_hash = head_hashfn(ht, new_tbl, entry);
7e1e77636   Thomas Graf   lib: Resizable, S...
172

02fd97c3d   Herbert Xu   rhashtable: Allow...
173
  	new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
7e1e77636   Thomas Graf   lib: Resizable, S...
174

8f2484bdb   Herbert Xu   rhashtable: Use S...
175
  	spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
aa34a6cb0   Herbert Xu   rhashtable: Add a...
176
177
  	head = rht_dereference_bucket(new_tbl->buckets[new_hash],
  				      new_tbl, new_hash);
97defe1ec   Thomas Graf   rhashtable: Per b...
178

7def0f952   Dmitriy Vyukov   lib: fix data rac...
179
  	RCU_INIT_POINTER(entry->next, head);
a5ec68e3b   Thomas Graf   rhashtable: Use a...
180

aa34a6cb0   Herbert Xu   rhashtable: Add a...
181
182
  	rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
  	spin_unlock(new_bucket_lock);
97defe1ec   Thomas Graf   rhashtable: Per b...
183

aa34a6cb0   Herbert Xu   rhashtable: Add a...
184
  	rcu_assign_pointer(*pprev, next);
7e1e77636   Thomas Graf   lib: Resizable, S...
185

aa34a6cb0   Herbert Xu   rhashtable: Add a...
186
187
188
  out:
  	return err;
  }
97defe1ec   Thomas Graf   rhashtable: Per b...
189

299e5c32a   Thomas Graf   rhashtable: Use '...
190
191
  static void rhashtable_rehash_chain(struct rhashtable *ht,
  				    unsigned int old_hash)
aa34a6cb0   Herbert Xu   rhashtable: Add a...
192
193
194
  {
  	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
  	spinlock_t *old_bucket_lock;
02fd97c3d   Herbert Xu   rhashtable: Allow...
195
  	old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
a5ec68e3b   Thomas Graf   rhashtable: Use a...
196

aa34a6cb0   Herbert Xu   rhashtable: Add a...
197
198
199
  	spin_lock_bh(old_bucket_lock);
  	while (!rhashtable_rehash_one(ht, old_hash))
  		;
63d512d0c   Herbert Xu   rhashtable: Add r...
200
  	old_tbl->rehash++;
aa34a6cb0   Herbert Xu   rhashtable: Add a...
201
  	spin_unlock_bh(old_bucket_lock);
97defe1ec   Thomas Graf   rhashtable: Per b...
202
  }
b824478b2   Herbert Xu   rhashtable: Add m...
203
204
205
  static int rhashtable_rehash_attach(struct rhashtable *ht,
  				    struct bucket_table *old_tbl,
  				    struct bucket_table *new_tbl)
97defe1ec   Thomas Graf   rhashtable: Per b...
206
  {
b824478b2   Herbert Xu   rhashtable: Add m...
207
208
209
210
211
212
213
214
  	/* Protect future_tbl using the first bucket lock. */
  	spin_lock_bh(old_tbl->locks);
  
  	/* Did somebody beat us to it? */
  	if (rcu_access_pointer(old_tbl->future_tbl)) {
  		spin_unlock_bh(old_tbl->locks);
  		return -EEXIST;
  	}
7cd10db8d   Thomas Graf   rhashtable: Add m...
215

aa34a6cb0   Herbert Xu   rhashtable: Add a...
216
217
  	/* Make insertions go into the new, empty table right away. Deletions
  	 * and lookups will be attempted in both tables until we synchronize.
aa34a6cb0   Herbert Xu   rhashtable: Add a...
218
  	 */
c4db8848a   Herbert Xu   rhashtable: Move ...
219
  	rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
aa34a6cb0   Herbert Xu   rhashtable: Add a...
220

b824478b2   Herbert Xu   rhashtable: Add m...
221
222
223
224
225
226
227
228
229
230
  	spin_unlock_bh(old_tbl->locks);
  
  	return 0;
  }
  
  static int rhashtable_rehash_table(struct rhashtable *ht)
  {
  	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
  	struct bucket_table *new_tbl;
  	struct rhashtable_walker *walker;
299e5c32a   Thomas Graf   rhashtable: Use '...
231
  	unsigned int old_hash;
b824478b2   Herbert Xu   rhashtable: Add m...
232
233
234
235
  
  	new_tbl = rht_dereference(old_tbl->future_tbl, ht);
  	if (!new_tbl)
  		return 0;
aa34a6cb0   Herbert Xu   rhashtable: Add a...
236
237
238
239
240
  	for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
  		rhashtable_rehash_chain(ht, old_hash);
  
  	/* Publish the new table pointer. */
  	rcu_assign_pointer(ht->tbl, new_tbl);
ba7c95ea3   Herbert Xu   rhashtable: Fix s...
241
  	spin_lock(&ht->lock);
eddee5ba3   Herbert Xu   rhashtable: Fix w...
242
243
  	list_for_each_entry(walker, &old_tbl->walkers, list)
  		walker->tbl = NULL;
ba7c95ea3   Herbert Xu   rhashtable: Fix s...
244
  	spin_unlock(&ht->lock);
eddee5ba3   Herbert Xu   rhashtable: Fix w...
245

aa34a6cb0   Herbert Xu   rhashtable: Add a...
246
247
248
249
  	/* Wait for readers. All new readers will see the new
  	 * table, and thus no references to the old table will
  	 * remain.
  	 */
9d901bc05   Herbert Xu   rhashtable: Free ...
250
  	call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
b824478b2   Herbert Xu   rhashtable: Add m...
251
252
  
  	return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
7e1e77636   Thomas Graf   lib: Resizable, S...
253
254
255
256
257
  }
  
  /**
   * rhashtable_expand - Expand hash table while allowing concurrent lookups
   * @ht:		the hash table to expand
7e1e77636   Thomas Graf   lib: Resizable, S...
258
   *
aa34a6cb0   Herbert Xu   rhashtable: Add a...
259
   * A secondary bucket array is allocated and the hash entries are migrated.
7e1e77636   Thomas Graf   lib: Resizable, S...
260
261
262
263
   *
   * This function may only be called in a context where it is safe to call
   * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
   *
97defe1ec   Thomas Graf   rhashtable: Per b...
264
265
266
267
268
   * The caller must ensure that no concurrent resizing occurs by holding
   * ht->mutex.
   *
   * It is valid to have concurrent insertions and deletions protected by per
   * bucket locks or concurrent RCU protected lookups and traversals.
7e1e77636   Thomas Graf   lib: Resizable, S...
269
   */
b824478b2   Herbert Xu   rhashtable: Add m...
270
  static int rhashtable_expand(struct rhashtable *ht)
7e1e77636   Thomas Graf   lib: Resizable, S...
271
272
  {
  	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
b824478b2   Herbert Xu   rhashtable: Add m...
273
  	int err;
7e1e77636   Thomas Graf   lib: Resizable, S...
274
275
  
  	ASSERT_RHT_MUTEX(ht);
b824478b2   Herbert Xu   rhashtable: Add m...
276
  	old_tbl = rhashtable_last_table(ht, old_tbl);
b9ecfdaa1   Herbert Xu   rhashtable: Allow...
277
  	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
7e1e77636   Thomas Graf   lib: Resizable, S...
278
279
  	if (new_tbl == NULL)
  		return -ENOMEM;
b824478b2   Herbert Xu   rhashtable: Add m...
280
281
282
283
284
  	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
  	if (err)
  		bucket_table_free(new_tbl);
  
  	return err;
7e1e77636   Thomas Graf   lib: Resizable, S...
285
  }
7e1e77636   Thomas Graf   lib: Resizable, S...
286
287
288
289
  
  /**
   * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
   * @ht:		the hash table to shrink
7e1e77636   Thomas Graf   lib: Resizable, S...
290
   *
18093d1c0   Herbert Xu   rhashtable: Shrin...
291
292
   * This function shrinks the hash table to fit, i.e., the smallest
   * size would not cause it to expand right away automatically.
7e1e77636   Thomas Graf   lib: Resizable, S...
293
   *
97defe1ec   Thomas Graf   rhashtable: Per b...
294
295
296
   * The caller must ensure that no concurrent resizing occurs by holding
   * ht->mutex.
   *
7e1e77636   Thomas Graf   lib: Resizable, S...
297
298
   * The caller must ensure that no concurrent table mutations take place.
   * It is however valid to have concurrent lookups if they are RCU protected.
97defe1ec   Thomas Graf   rhashtable: Per b...
299
300
301
   *
   * It is valid to have concurrent insertions and deletions protected by per
   * bucket locks or concurrent RCU protected lookups and traversals.
7e1e77636   Thomas Graf   lib: Resizable, S...
302
   */
b824478b2   Herbert Xu   rhashtable: Add m...
303
  static int rhashtable_shrink(struct rhashtable *ht)
7e1e77636   Thomas Graf   lib: Resizable, S...
304
  {
a5b6846f9   Daniel Borkmann   rhashtable: kill ...
305
  	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
12311959e   Vegard Nossum   rhashtable: fix s...
306
307
  	unsigned int nelems = atomic_read(&ht->nelems);
  	unsigned int size = 0;
b824478b2   Herbert Xu   rhashtable: Add m...
308
  	int err;
7e1e77636   Thomas Graf   lib: Resizable, S...
309
310
  
  	ASSERT_RHT_MUTEX(ht);
12311959e   Vegard Nossum   rhashtable: fix s...
311
312
  	if (nelems)
  		size = roundup_pow_of_two(nelems * 3 / 2);
18093d1c0   Herbert Xu   rhashtable: Shrin...
313
314
315
316
317
  	if (size < ht->p.min_size)
  		size = ht->p.min_size;
  
  	if (old_tbl->size <= size)
  		return 0;
b824478b2   Herbert Xu   rhashtable: Add m...
318
319
  	if (rht_dereference(old_tbl->future_tbl, ht))
  		return -EEXIST;
b9ecfdaa1   Herbert Xu   rhashtable: Allow...
320
  	new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
97defe1ec   Thomas Graf   rhashtable: Per b...
321
  	if (new_tbl == NULL)
7e1e77636   Thomas Graf   lib: Resizable, S...
322
  		return -ENOMEM;
b824478b2   Herbert Xu   rhashtable: Add m...
323
324
325
326
327
  	err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
  	if (err)
  		bucket_table_free(new_tbl);
  
  	return err;
7e1e77636   Thomas Graf   lib: Resizable, S...
328
  }
7e1e77636   Thomas Graf   lib: Resizable, S...
329

97defe1ec   Thomas Graf   rhashtable: Per b...
330
331
332
333
  static void rht_deferred_worker(struct work_struct *work)
  {
  	struct rhashtable *ht;
  	struct bucket_table *tbl;
b824478b2   Herbert Xu   rhashtable: Add m...
334
  	int err = 0;
97defe1ec   Thomas Graf   rhashtable: Per b...
335

57699a40b   Ying Xue   rhashtable: Fix r...
336
  	ht = container_of(work, struct rhashtable, run_work);
97defe1ec   Thomas Graf   rhashtable: Per b...
337
  	mutex_lock(&ht->mutex);
28134a53d   Herbert Xu   rhashtable: Fix p...
338

97defe1ec   Thomas Graf   rhashtable: Per b...
339
  	tbl = rht_dereference(ht->tbl, ht);
b824478b2   Herbert Xu   rhashtable: Add m...
340
  	tbl = rhashtable_last_table(ht, tbl);
97defe1ec   Thomas Graf   rhashtable: Per b...
341

a5b6846f9   Daniel Borkmann   rhashtable: kill ...
342
  	if (rht_grow_above_75(ht, tbl))
97defe1ec   Thomas Graf   rhashtable: Per b...
343
  		rhashtable_expand(ht);
b5e2c150a   Thomas Graf   rhashtable: Disab...
344
  	else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
97defe1ec   Thomas Graf   rhashtable: Per b...
345
  		rhashtable_shrink(ht);
b824478b2   Herbert Xu   rhashtable: Add m...
346
347
  
  	err = rhashtable_rehash_table(ht);
97defe1ec   Thomas Graf   rhashtable: Per b...
348
  	mutex_unlock(&ht->mutex);
b824478b2   Herbert Xu   rhashtable: Add m...
349
350
351
  
  	if (err)
  		schedule_work(&ht->run_work);
97defe1ec   Thomas Graf   rhashtable: Per b...
352
  }
ca26893f0   Herbert Xu   rhashtable: Add r...
353
354
  static int rhashtable_insert_rehash(struct rhashtable *ht,
  				    struct bucket_table *tbl)
ccd57b1bd   Herbert Xu   rhashtable: Add i...
355
356
357
  {
  	struct bucket_table *old_tbl;
  	struct bucket_table *new_tbl;
ccd57b1bd   Herbert Xu   rhashtable: Add i...
358
359
360
361
  	unsigned int size;
  	int err;
  
  	old_tbl = rht_dereference_rcu(ht->tbl, ht);
ccd57b1bd   Herbert Xu   rhashtable: Add i...
362
363
  
  	size = tbl->size;
3cf92222a   Herbert Xu   rhashtable: Preve...
364
  	err = -EBUSY;
ccd57b1bd   Herbert Xu   rhashtable: Add i...
365
366
  	if (rht_grow_above_75(ht, tbl))
  		size *= 2;
a87b9ebf1   Thomas Graf   rhashtable: Do no...
367
368
  	/* Do not schedule more than one rehash */
  	else if (old_tbl != tbl)
3cf92222a   Herbert Xu   rhashtable: Preve...
369
370
371
  		goto fail;
  
  	err = -ENOMEM;
ccd57b1bd   Herbert Xu   rhashtable: Add i...
372
373
  
  	new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
3cf92222a   Herbert Xu   rhashtable: Preve...
374
375
  	if (new_tbl == NULL)
  		goto fail;
ccd57b1bd   Herbert Xu   rhashtable: Add i...
376
377
378
379
380
381
382
383
384
385
  
  	err = rhashtable_rehash_attach(ht, tbl, new_tbl);
  	if (err) {
  		bucket_table_free(new_tbl);
  		if (err == -EEXIST)
  			err = 0;
  	} else
  		schedule_work(&ht->run_work);
  
  	return err;
3cf92222a   Herbert Xu   rhashtable: Preve...
386
387
388
389
390
391
392
393
394
395
396
  
  fail:
  	/* Do not fail the insert if someone else did a rehash. */
  	if (likely(rcu_dereference_raw(tbl->future_tbl)))
  		return 0;
  
  	/* Schedule async rehash to retry allocation in process context. */
  	if (err == -ENOMEM)
  		schedule_work(&ht->run_work);
  
  	return err;
ccd57b1bd   Herbert Xu   rhashtable: Add i...
397
  }
ccd57b1bd   Herbert Xu   rhashtable: Add i...
398

ca26893f0   Herbert Xu   rhashtable: Add r...
399
400
401
  static void *rhashtable_lookup_one(struct rhashtable *ht,
  				   struct bucket_table *tbl, unsigned int hash,
  				   const void *key, struct rhash_head *obj)
02fd97c3d   Herbert Xu   rhashtable: Allow...
402
  {
ca26893f0   Herbert Xu   rhashtable: Add r...
403
404
405
406
407
  	struct rhashtable_compare_arg arg = {
  		.ht = ht,
  		.key = key,
  	};
  	struct rhash_head __rcu **pprev;
02fd97c3d   Herbert Xu   rhashtable: Allow...
408
  	struct rhash_head *head;
ca26893f0   Herbert Xu   rhashtable: Add r...
409
  	int elasticity;
02fd97c3d   Herbert Xu   rhashtable: Allow...
410

ca26893f0   Herbert Xu   rhashtable: Add r...
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
  	elasticity = ht->elasticity;
  	pprev = &tbl->buckets[hash];
  	rht_for_each(head, tbl, hash) {
  		struct rhlist_head *list;
  		struct rhlist_head *plist;
  
  		elasticity--;
  		if (!key ||
  		    (ht->p.obj_cmpfn ?
  		     ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
  		     rhashtable_compare(&arg, rht_obj(ht, head))))
  			continue;
  
  		if (!ht->rhlist)
  			return rht_obj(ht, head);
  
  		list = container_of(obj, struct rhlist_head, rhead);
  		plist = container_of(head, struct rhlist_head, rhead);
  
  		RCU_INIT_POINTER(list->next, plist);
  		head = rht_dereference_bucket(head->next, tbl, hash);
  		RCU_INIT_POINTER(list->rhead.next, head);
  		rcu_assign_pointer(*pprev, obj);
  
  		return NULL;
5ca8cc5bf   Pablo Neira Ayuso   rhashtable: add r...
436
  	}
02fd97c3d   Herbert Xu   rhashtable: Allow...
437

ca26893f0   Herbert Xu   rhashtable: Add r...
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
  	if (elasticity <= 0)
  		return ERR_PTR(-EAGAIN);
  
  	return ERR_PTR(-ENOENT);
  }
  
  static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
  						  struct bucket_table *tbl,
  						  unsigned int hash,
  						  struct rhash_head *obj,
  						  void *data)
  {
  	struct bucket_table *new_tbl;
  	struct rhash_head *head;
  
  	if (!IS_ERR_OR_NULL(data))
  		return ERR_PTR(-EEXIST);
07ee0722b   Herbert Xu   rhashtable: Add c...
455

ca26893f0   Herbert Xu   rhashtable: Add r...
456
457
  	if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
  		return ERR_CAST(data);
ccd57b1bd   Herbert Xu   rhashtable: Add i...
458

ca26893f0   Herbert Xu   rhashtable: Add r...
459
460
461
462
463
464
465
466
467
468
469
470
  	new_tbl = rcu_dereference(tbl->future_tbl);
  	if (new_tbl)
  		return new_tbl;
  
  	if (PTR_ERR(data) != -ENOENT)
  		return ERR_CAST(data);
  
  	if (unlikely(rht_grow_above_max(ht, tbl)))
  		return ERR_PTR(-E2BIG);
  
  	if (unlikely(rht_grow_above_100(ht, tbl)))
  		return ERR_PTR(-EAGAIN);
02fd97c3d   Herbert Xu   rhashtable: Allow...
471
472
473
474
  
  	head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
  
  	RCU_INIT_POINTER(obj->next, head);
ca26893f0   Herbert Xu   rhashtable: Add r...
475
476
477
478
479
480
  	if (ht->rhlist) {
  		struct rhlist_head *list;
  
  		list = container_of(obj, struct rhlist_head, rhead);
  		RCU_INIT_POINTER(list->next, NULL);
  	}
02fd97c3d   Herbert Xu   rhashtable: Allow...
481
482
483
484
  
  	rcu_assign_pointer(tbl->buckets[hash], obj);
  
  	atomic_inc(&ht->nelems);
ca26893f0   Herbert Xu   rhashtable: Add r...
485
486
  	if (rht_grow_above_75(ht, tbl))
  		schedule_work(&ht->run_work);
02fd97c3d   Herbert Xu   rhashtable: Allow...
487

ca26893f0   Herbert Xu   rhashtable: Add r...
488
489
  	return NULL;
  }
02fd97c3d   Herbert Xu   rhashtable: Allow...
490

ca26893f0   Herbert Xu   rhashtable: Add r...
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
  static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
  				   struct rhash_head *obj)
  {
  	struct bucket_table *new_tbl;
  	struct bucket_table *tbl;
  	unsigned int hash;
  	spinlock_t *lock;
  	void *data;
  
  	tbl = rcu_dereference(ht->tbl);
  
  	/* All insertions must grab the oldest table containing
  	 * the hashed bucket that is yet to be rehashed.
  	 */
  	for (;;) {
  		hash = rht_head_hashfn(ht, tbl, obj, ht->p);
  		lock = rht_bucket_lock(tbl, hash);
  		spin_lock_bh(lock);
  
  		if (tbl->rehash <= hash)
  			break;
  
  		spin_unlock_bh(lock);
  		tbl = rcu_dereference(tbl->future_tbl);
  	}
  
  	data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
  	new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
  	if (PTR_ERR(new_tbl) != -EEXIST)
  		data = ERR_CAST(new_tbl);
  
  	while (!IS_ERR_OR_NULL(new_tbl)) {
  		tbl = new_tbl;
  		hash = rht_head_hashfn(ht, tbl, obj, ht->p);
  		spin_lock_nested(rht_bucket_lock(tbl, hash),
  				 SINGLE_DEPTH_NESTING);
  
  		data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
  		new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
  		if (PTR_ERR(new_tbl) != -EEXIST)
  			data = ERR_CAST(new_tbl);
  
  		spin_unlock(rht_bucket_lock(tbl, hash));
  	}
  
  	spin_unlock_bh(lock);
  
  	if (PTR_ERR(data) == -EAGAIN)
  		data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
  			       -EAGAIN);
  
  	return data;
  }
  
  void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
  			     struct rhash_head *obj)
  {
  	void *data;
  
  	do {
  		rcu_read_lock();
  		data = rhashtable_try_insert(ht, key, obj);
  		rcu_read_unlock();
  	} while (PTR_ERR(data) == -EAGAIN);
  
  	return data;
02fd97c3d   Herbert Xu   rhashtable: Allow...
557
558
  }
  EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
f2dba9c6f   Herbert Xu   rhashtable: Intro...
559
  /**
246779dd0   Herbert Xu   rhashtable: Remov...
560
   * rhashtable_walk_enter - Initialise an iterator
f2dba9c6f   Herbert Xu   rhashtable: Intro...
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
   * @ht:		Table to walk over
   * @iter:	Hash table Iterator
   *
   * This function prepares a hash table walk.
   *
   * Note that if you restart a walk after rhashtable_walk_stop you
   * may see the same object twice.  Also, you may miss objects if
   * there are removals in between rhashtable_walk_stop and the next
   * call to rhashtable_walk_start.
   *
   * For a completely stable walk you should construct your own data
   * structure outside the hash table.
   *
   * This function may sleep so you must not call it from interrupt
   * context or with spin locks held.
   *
246779dd0   Herbert Xu   rhashtable: Remov...
577
   * You must call rhashtable_walk_exit after this function returns.
f2dba9c6f   Herbert Xu   rhashtable: Intro...
578
   */
246779dd0   Herbert Xu   rhashtable: Remov...
579
  void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
f2dba9c6f   Herbert Xu   rhashtable: Intro...
580
581
582
583
584
  {
  	iter->ht = ht;
  	iter->p = NULL;
  	iter->slot = 0;
  	iter->skip = 0;
c6ff52682   Herbert Xu   rhashtable: Fix w...
585
  	spin_lock(&ht->lock);
246779dd0   Herbert Xu   rhashtable: Remov...
586
  	iter->walker.tbl =
179ccc0a7   Herbert Xu   rhashtable: Kill ...
587
  		rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
246779dd0   Herbert Xu   rhashtable: Remov...
588
  	list_add(&iter->walker.list, &iter->walker.tbl->walkers);
c6ff52682   Herbert Xu   rhashtable: Fix w...
589
  	spin_unlock(&ht->lock);
f2dba9c6f   Herbert Xu   rhashtable: Intro...
590
  }
246779dd0   Herbert Xu   rhashtable: Remov...
591
  EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
f2dba9c6f   Herbert Xu   rhashtable: Intro...
592
593
594
595
596
597
598
599
600
  
  /**
   * rhashtable_walk_exit - Free an iterator
   * @iter:	Hash table Iterator
   *
   * This function frees resources allocated by rhashtable_walk_init.
   */
  void rhashtable_walk_exit(struct rhashtable_iter *iter)
  {
c6ff52682   Herbert Xu   rhashtable: Fix w...
601
  	spin_lock(&iter->ht->lock);
246779dd0   Herbert Xu   rhashtable: Remov...
602
603
  	if (iter->walker.tbl)
  		list_del(&iter->walker.list);
c6ff52682   Herbert Xu   rhashtable: Fix w...
604
  	spin_unlock(&iter->ht->lock);
f2dba9c6f   Herbert Xu   rhashtable: Intro...
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
  }
  EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
  
  /**
   * rhashtable_walk_start - Start a hash table walk
   * @iter:	Hash table iterator
   *
   * Start a hash table walk.  Note that we take the RCU lock in all
   * cases including when we return an error.  So you must always call
   * rhashtable_walk_stop to clean up.
   *
   * Returns zero if successful.
   *
   * Returns -EAGAIN if resize event occured.  Note that the iterator
   * will rewind back to the beginning and you may use it immediately
   * by calling rhashtable_walk_next.
   */
  int rhashtable_walk_start(struct rhashtable_iter *iter)
db4374f48   Thomas Graf   rhashtable: Annot...
623
  	__acquires(RCU)
f2dba9c6f   Herbert Xu   rhashtable: Intro...
624
  {
eddee5ba3   Herbert Xu   rhashtable: Fix w...
625
  	struct rhashtable *ht = iter->ht;
c6ff52682   Herbert Xu   rhashtable: Fix w...
626
  	rcu_read_lock();
eddee5ba3   Herbert Xu   rhashtable: Fix w...
627

c6ff52682   Herbert Xu   rhashtable: Fix w...
628
  	spin_lock(&ht->lock);
246779dd0   Herbert Xu   rhashtable: Remov...
629
630
  	if (iter->walker.tbl)
  		list_del(&iter->walker.list);
c6ff52682   Herbert Xu   rhashtable: Fix w...
631
  	spin_unlock(&ht->lock);
eddee5ba3   Herbert Xu   rhashtable: Fix w...
632

246779dd0   Herbert Xu   rhashtable: Remov...
633
634
  	if (!iter->walker.tbl) {
  		iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
f2dba9c6f   Herbert Xu   rhashtable: Intro...
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
  		return -EAGAIN;
  	}
  
  	return 0;
  }
  EXPORT_SYMBOL_GPL(rhashtable_walk_start);
  
  /**
   * rhashtable_walk_next - Return the next object and advance the iterator
   * @iter:	Hash table iterator
   *
   * Note that you must call rhashtable_walk_stop when you are finished
   * with the walk.
   *
   * Returns the next object or NULL when the end of the table is reached.
   *
   * Returns -EAGAIN if resize event occured.  Note that the iterator
   * will rewind back to the beginning and you may continue to use it.
   */
  void *rhashtable_walk_next(struct rhashtable_iter *iter)
  {
246779dd0   Herbert Xu   rhashtable: Remov...
656
  	struct bucket_table *tbl = iter->walker.tbl;
ca26893f0   Herbert Xu   rhashtable: Add r...
657
  	struct rhlist_head *list = iter->list;
f2dba9c6f   Herbert Xu   rhashtable: Intro...
658
659
  	struct rhashtable *ht = iter->ht;
  	struct rhash_head *p = iter->p;
ca26893f0   Herbert Xu   rhashtable: Add r...
660
  	bool rhlist = ht->rhlist;
f2dba9c6f   Herbert Xu   rhashtable: Intro...
661

f2dba9c6f   Herbert Xu   rhashtable: Intro...
662
  	if (p) {
ca26893f0   Herbert Xu   rhashtable: Add r...
663
664
665
666
  		if (!rhlist || !(list = rcu_dereference(list->next))) {
  			p = rcu_dereference(p->next);
  			list = container_of(p, struct rhlist_head, rhead);
  		}
f2dba9c6f   Herbert Xu   rhashtable: Intro...
667
668
669
670
671
672
673
  		goto next;
  	}
  
  	for (; iter->slot < tbl->size; iter->slot++) {
  		int skip = iter->skip;
  
  		rht_for_each_rcu(p, tbl, iter->slot) {
ca26893f0   Herbert Xu   rhashtable: Add r...
674
675
676
677
678
679
680
681
682
683
684
685
  			if (rhlist) {
  				list = container_of(p, struct rhlist_head,
  						    rhead);
  				do {
  					if (!skip)
  						goto next;
  					skip--;
  					list = rcu_dereference(list->next);
  				} while (list);
  
  				continue;
  			}
f2dba9c6f   Herbert Xu   rhashtable: Intro...
686
687
688
689
690
691
692
693
694
  			if (!skip)
  				break;
  			skip--;
  		}
  
  next:
  		if (!rht_is_a_nulls(p)) {
  			iter->skip++;
  			iter->p = p;
ca26893f0   Herbert Xu   rhashtable: Add r...
695
696
  			iter->list = list;
  			return rht_obj(ht, rhlist ? &list->rhead : p);
f2dba9c6f   Herbert Xu   rhashtable: Intro...
697
698
699
700
  		}
  
  		iter->skip = 0;
  	}
142b942a7   Phil Sutter   rhashtable: fix f...
701
  	iter->p = NULL;
d88252f9b   Herbert Xu   rhashtable: Add b...
702
703
  	/* Ensure we see any new tables. */
  	smp_rmb();
246779dd0   Herbert Xu   rhashtable: Remov...
704
705
  	iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
  	if (iter->walker.tbl) {
f2dba9c6f   Herbert Xu   rhashtable: Intro...
706
707
  		iter->slot = 0;
  		iter->skip = 0;
f2dba9c6f   Herbert Xu   rhashtable: Intro...
708
709
  		return ERR_PTR(-EAGAIN);
  	}
c936a79fc   Thomas Graf   rhashtable: Simpl...
710
  	return NULL;
f2dba9c6f   Herbert Xu   rhashtable: Intro...
711
712
713
714
715
716
717
718
719
720
  }
  EXPORT_SYMBOL_GPL(rhashtable_walk_next);
  
  /**
   * rhashtable_walk_stop - Finish a hash table walk
   * @iter:	Hash table iterator
   *
   * Finish a hash table walk.
   */
  void rhashtable_walk_stop(struct rhashtable_iter *iter)
db4374f48   Thomas Graf   rhashtable: Annot...
721
  	__releases(RCU)
f2dba9c6f   Herbert Xu   rhashtable: Intro...
722
  {
eddee5ba3   Herbert Xu   rhashtable: Fix w...
723
  	struct rhashtable *ht;
246779dd0   Herbert Xu   rhashtable: Remov...
724
  	struct bucket_table *tbl = iter->walker.tbl;
eddee5ba3   Herbert Xu   rhashtable: Fix w...
725

eddee5ba3   Herbert Xu   rhashtable: Fix w...
726
  	if (!tbl)
963ecbd41   Herbert Xu   rhashtable: Fix u...
727
  		goto out;
eddee5ba3   Herbert Xu   rhashtable: Fix w...
728
729
  
  	ht = iter->ht;
ba7c95ea3   Herbert Xu   rhashtable: Fix s...
730
  	spin_lock(&ht->lock);
c4db8848a   Herbert Xu   rhashtable: Move ...
731
  	if (tbl->rehash < tbl->size)
246779dd0   Herbert Xu   rhashtable: Remov...
732
  		list_add(&iter->walker.list, &tbl->walkers);
eddee5ba3   Herbert Xu   rhashtable: Fix w...
733
  	else
246779dd0   Herbert Xu   rhashtable: Remov...
734
  		iter->walker.tbl = NULL;
ba7c95ea3   Herbert Xu   rhashtable: Fix s...
735
  	spin_unlock(&ht->lock);
eddee5ba3   Herbert Xu   rhashtable: Fix w...
736

f2dba9c6f   Herbert Xu   rhashtable: Intro...
737
  	iter->p = NULL;
963ecbd41   Herbert Xu   rhashtable: Fix u...
738
739
740
  
  out:
  	rcu_read_unlock();
f2dba9c6f   Herbert Xu   rhashtable: Intro...
741
742
  }
  EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
488fb86ee   Herbert Xu   rhashtable: Make ...
743
  static size_t rounded_hashtable_size(const struct rhashtable_params *params)
7e1e77636   Thomas Graf   lib: Resizable, S...
744
  {
940001762   Ying Xue   lib/rhashtable: a...
745
  	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
e2e21c1c5   Herbert Xu   rhashtable: Remov...
746
  		   (unsigned long)params->min_size);
7e1e77636   Thomas Graf   lib: Resizable, S...
747
  }
31ccde2da   Herbert Xu   rhashtable: Allow...
748
749
750
751
  static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
  {
  	return jhash2(key, length, seed);
  }
7e1e77636   Thomas Graf   lib: Resizable, S...
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
  /**
   * rhashtable_init - initialize a new hash table
   * @ht:		hash table to be initialized
   * @params:	configuration parameters
   *
   * Initializes a new hash table based on the provided configuration
   * parameters. A table can be configured either with a variable or
   * fixed length key:
   *
   * Configuration Example 1: Fixed length keys
   * struct test_obj {
   *	int			key;
   *	void *			my_member;
   *	struct rhash_head	node;
   * };
   *
   * struct rhashtable_params params = {
   *	.head_offset = offsetof(struct test_obj, node),
   *	.key_offset = offsetof(struct test_obj, key),
   *	.key_len = sizeof(int),
87545899b   Daniel Borkmann   net: replace rema...
772
   *	.hashfn = jhash,
f89bd6f87   Thomas Graf   rhashtable: Suppo...
773
   *	.nulls_base = (1U << RHT_BASE_SHIFT),
7e1e77636   Thomas Graf   lib: Resizable, S...
774
775
776
777
778
779
780
781
   * };
   *
   * Configuration Example 2: Variable length keys
   * struct test_obj {
   *	[...]
   *	struct rhash_head	node;
   * };
   *
49f7b33e6   Patrick McHardy   rhashtable: provi...
782
   * u32 my_hash_fn(const void *data, u32 len, u32 seed)
7e1e77636   Thomas Graf   lib: Resizable, S...
783
784
785
786
787
788
789
790
   * {
   *	struct test_obj *obj = data;
   *
   *	return [... hash ...];
   * }
   *
   * struct rhashtable_params params = {
   *	.head_offset = offsetof(struct test_obj, node),
87545899b   Daniel Borkmann   net: replace rema...
791
   *	.hashfn = jhash,
7e1e77636   Thomas Graf   lib: Resizable, S...
792
   *	.obj_hashfn = my_hash_fn,
7e1e77636   Thomas Graf   lib: Resizable, S...
793
794
   * };
   */
488fb86ee   Herbert Xu   rhashtable: Make ...
795
796
  int rhashtable_init(struct rhashtable *ht,
  		    const struct rhashtable_params *params)
7e1e77636   Thomas Graf   lib: Resizable, S...
797
798
799
800
801
  {
  	struct bucket_table *tbl;
  	size_t size;
  
  	size = HASH_DEFAULT_SIZE;
31ccde2da   Herbert Xu   rhashtable: Allow...
802
  	if ((!params->key_len && !params->obj_hashfn) ||
02fd97c3d   Herbert Xu   rhashtable: Allow...
803
  	    (params->obj_hashfn && !params->obj_cmpfn))
7e1e77636   Thomas Graf   lib: Resizable, S...
804
  		return -EINVAL;
f89bd6f87   Thomas Graf   rhashtable: Suppo...
805
806
  	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
  		return -EINVAL;
97defe1ec   Thomas Graf   rhashtable: Per b...
807
808
  	memset(ht, 0, sizeof(*ht));
  	mutex_init(&ht->mutex);
ba7c95ea3   Herbert Xu   rhashtable: Fix s...
809
  	spin_lock_init(&ht->lock);
97defe1ec   Thomas Graf   rhashtable: Per b...
810
  	memcpy(&ht->p, params, sizeof(*params));
a998f712f   Thomas Graf   rhashtable: Round...
811
812
813
814
815
  	if (params->min_size)
  		ht->p.min_size = roundup_pow_of_two(params->min_size);
  
  	if (params->max_size)
  		ht->p.max_size = rounddown_pow_of_two(params->max_size);
07ee0722b   Herbert Xu   rhashtable: Add c...
816
817
818
819
820
  	if (params->insecure_max_entries)
  		ht->p.insecure_max_entries =
  			rounddown_pow_of_two(params->insecure_max_entries);
  	else
  		ht->p.insecure_max_entries = ht->p.max_size * 2;
488fb86ee   Herbert Xu   rhashtable: Make ...
821
  	ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
a998f712f   Thomas Graf   rhashtable: Round...
822

3a324606b   Herbert Xu   rhashtable: Enfor...
823
824
  	if (params->nelem_hint)
  		size = rounded_hashtable_size(&ht->p);
27ed44a5d   Herbert Xu   rhashtable: Add c...
825
826
827
828
829
830
831
832
833
834
835
836
  	/* The maximum (not average) chain length grows with the
  	 * size of the hash table, at a rate of (log N)/(log log N).
  	 * The value of 16 is selected so that even if the hash
  	 * table grew to 2^32 you would not expect the maximum
  	 * chain length to exceed it unless we are under attack
  	 * (or extremely unlucky).
  	 *
  	 * As this limit is only to detect attacks, we don't need
  	 * to set it to a lower value as you'd need the chain
  	 * length to vastly exceed 16 to have any real effect
  	 * on the system.
  	 */
ccd57b1bd   Herbert Xu   rhashtable: Add i...
837
838
  	if (!params->insecure_elasticity)
  		ht->elasticity = 16;
97defe1ec   Thomas Graf   rhashtable: Per b...
839
840
841
842
  	if (params->locks_mul)
  		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
  	else
  		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
31ccde2da   Herbert Xu   rhashtable: Allow...
843
844
845
846
847
848
849
850
851
  	ht->key_len = ht->p.key_len;
  	if (!params->hashfn) {
  		ht->p.hashfn = jhash;
  
  		if (!(ht->key_len & (sizeof(u32) - 1))) {
  			ht->key_len /= sizeof(u32);
  			ht->p.hashfn = rhashtable_jhash2;
  		}
  	}
b9ecfdaa1   Herbert Xu   rhashtable: Allow...
852
  	tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
7e1e77636   Thomas Graf   lib: Resizable, S...
853
854
  	if (tbl == NULL)
  		return -ENOMEM;
545a148e4   Ying Xue   rhashtable: initi...
855
  	atomic_set(&ht->nelems, 0);
a5b6846f9   Daniel Borkmann   rhashtable: kill ...
856

7e1e77636   Thomas Graf   lib: Resizable, S...
857
  	RCU_INIT_POINTER(ht->tbl, tbl);
4c4b52d9b   Daniel Borkmann   rhashtable: remov...
858
  	INIT_WORK(&ht->run_work, rht_deferred_worker);
97defe1ec   Thomas Graf   rhashtable: Per b...
859

7e1e77636   Thomas Graf   lib: Resizable, S...
860
861
862
863
864
  	return 0;
  }
  EXPORT_SYMBOL_GPL(rhashtable_init);
  
  /**
ca26893f0   Herbert Xu   rhashtable: Add r...
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
   * rhltable_init - initialize a new hash list table
   * @hlt:	hash list table to be initialized
   * @params:	configuration parameters
   *
   * Initializes a new hash list table.
   *
   * See documentation for rhashtable_init.
   */
  int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
  {
  	int err;
  
  	/* No rhlist NULLs marking for now. */
  	if (params->nulls_base)
  		return -EINVAL;
  
  	err = rhashtable_init(&hlt->ht, params);
  	hlt->ht.rhlist = true;
  	return err;
  }
  EXPORT_SYMBOL_GPL(rhltable_init);
  
  static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
  				void (*free_fn)(void *ptr, void *arg),
  				void *arg)
  {
  	struct rhlist_head *list;
  
  	if (!ht->rhlist) {
  		free_fn(rht_obj(ht, obj), arg);
  		return;
  	}
  
  	list = container_of(obj, struct rhlist_head, rhead);
  	do {
  		obj = &list->rhead;
  		list = rht_dereference(list->next, ht);
  		free_fn(rht_obj(ht, obj), arg);
  	} while (list);
  }
  
  /**
6b6f302ce   Thomas Graf   rhashtable: Add r...
907
   * rhashtable_free_and_destroy - free elements and destroy hash table
7e1e77636   Thomas Graf   lib: Resizable, S...
908
   * @ht:		the hash table to destroy
6b6f302ce   Thomas Graf   rhashtable: Add r...
909
910
   * @free_fn:	callback to release resources of element
   * @arg:	pointer passed to free_fn
7e1e77636   Thomas Graf   lib: Resizable, S...
911
   *
6b6f302ce   Thomas Graf   rhashtable: Add r...
912
913
914
915
916
917
918
919
   * Stops an eventual async resize. If defined, invokes free_fn for each
   * element to releasal resources. Please note that RCU protected
   * readers may still be accessing the elements. Releasing of resources
   * must occur in a compatible manner. Then frees the bucket array.
   *
   * This function will eventually sleep to wait for an async resize
   * to complete. The caller is responsible that no further write operations
   * occurs in parallel.
7e1e77636   Thomas Graf   lib: Resizable, S...
920
   */
6b6f302ce   Thomas Graf   rhashtable: Add r...
921
922
923
  void rhashtable_free_and_destroy(struct rhashtable *ht,
  				 void (*free_fn)(void *ptr, void *arg),
  				 void *arg)
7e1e77636   Thomas Graf   lib: Resizable, S...
924
  {
6b6f302ce   Thomas Graf   rhashtable: Add r...
925
926
  	const struct bucket_table *tbl;
  	unsigned int i;
97defe1ec   Thomas Graf   rhashtable: Per b...
927

4c4b52d9b   Daniel Borkmann   rhashtable: remov...
928
  	cancel_work_sync(&ht->run_work);
97defe1ec   Thomas Graf   rhashtable: Per b...
929

57699a40b   Ying Xue   rhashtable: Fix r...
930
  	mutex_lock(&ht->mutex);
6b6f302ce   Thomas Graf   rhashtable: Add r...
931
932
933
934
935
936
937
938
939
940
941
942
  	tbl = rht_dereference(ht->tbl, ht);
  	if (free_fn) {
  		for (i = 0; i < tbl->size; i++) {
  			struct rhash_head *pos, *next;
  
  			for (pos = rht_dereference(tbl->buckets[i], ht),
  			     next = !rht_is_a_nulls(pos) ?
  					rht_dereference(pos->next, ht) : NULL;
  			     !rht_is_a_nulls(pos);
  			     pos = next,
  			     next = !rht_is_a_nulls(pos) ?
  					rht_dereference(pos->next, ht) : NULL)
ca26893f0   Herbert Xu   rhashtable: Add r...
943
  				rhashtable_free_one(ht, pos, free_fn, arg);
6b6f302ce   Thomas Graf   rhashtable: Add r...
944
945
946
947
  		}
  	}
  
  	bucket_table_free(tbl);
97defe1ec   Thomas Graf   rhashtable: Per b...
948
  	mutex_unlock(&ht->mutex);
7e1e77636   Thomas Graf   lib: Resizable, S...
949
  }
6b6f302ce   Thomas Graf   rhashtable: Add r...
950
951
952
953
954
955
  EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
  
  void rhashtable_destroy(struct rhashtable *ht)
  {
  	return rhashtable_free_and_destroy(ht, NULL, NULL);
  }
7e1e77636   Thomas Graf   lib: Resizable, S...
956
  EXPORT_SYMBOL_GPL(rhashtable_destroy);