Blame view
lib/rhashtable.c
27.3 KB
7e1e77636 lib: Resizable, S... |
1 2 3 |
/* * Resizable, Scalable, Concurrent Hash Table * |
02fd97c3d rhashtable: Allow... |
4 |
* Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> |
a5ec68e3b rhashtable: Use a... |
5 |
* Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> |
7e1e77636 lib: Resizable, S... |
6 7 |
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> * |
7e1e77636 lib: Resizable, S... |
8 |
* Code partially derived from nft_hash |
02fd97c3d rhashtable: Allow... |
9 10 |
* Rewritten with rehash code from br_multicast plus single list * pointer as suggested by Josh Triplett |
7e1e77636 lib: Resizable, S... |
11 12 13 14 15 |
* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ |
07ee0722b rhashtable: Add c... |
16 |
#include <linux/atomic.h> |
7e1e77636 lib: Resizable, S... |
17 18 19 |
#include <linux/kernel.h> #include <linux/init.h> #include <linux/log2.h> |
5beb5c90c rhashtable: use c... |
20 |
#include <linux/sched.h> |
b2d091031 sched/headers: Pr... |
21 |
#include <linux/rculist.h> |
7e1e77636 lib: Resizable, S... |
22 23 24 |
#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mm.h> |
87545899b net: replace rema... |
25 |
#include <linux/jhash.h> |
7e1e77636 lib: Resizable, S... |
26 27 |
#include <linux/random.h> #include <linux/rhashtable.h> |
61d7b0977 rhashtable: using... |
28 |
#include <linux/err.h> |
6d7954130 rhashtable: add m... |
29 |
#include <linux/export.h> |
7e1e77636 lib: Resizable, S... |
30 31 |
#define HASH_DEFAULT_SIZE 64UL |
c2e213cff rhashtable: Intro... |
32 |
#define HASH_MIN_SIZE 4U |
4cf0b354d rhashtable: avoid... |
33 |
#define BUCKET_LOCKS_PER_CPU 32UL |
97defe1ec rhashtable: Per b... |
34 |
|
da20420f8 rhashtable: Add n... |
35 36 37 38 |
union nested_table { union nested_table __rcu *table; struct rhash_head __rcu *bucket; }; |
988dfbd79 rhashtable: Move ... |
39 |
static u32 head_hashfn(struct rhashtable *ht, |
8d24c0b43 rhashtable: Do ha... |
40 41 |
const struct bucket_table *tbl, const struct rhash_head *he) |
7e1e77636 lib: Resizable, S... |
42 |
{ |
02fd97c3d rhashtable: Allow... |
43 |
return rht_head_hashfn(ht, tbl, he, ht->p); |
7e1e77636 lib: Resizable, S... |
44 |
} |
a03eaec0d rhashtable: Dump ... |
45 |
#ifdef CONFIG_PROVE_LOCKING |
a03eaec0d rhashtable: Dump ... |
46 |
#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) |
a03eaec0d rhashtable: Dump ... |
47 48 49 50 51 52 53 54 55 |
int lockdep_rht_mutex_is_held(struct rhashtable *ht) { return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; } EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) { |
02fd97c3d rhashtable: Allow... |
56 |
spinlock_t *lock = rht_bucket_lock(tbl, hash); |
a03eaec0d rhashtable: Dump ... |
57 58 59 60 61 62 |
return (debug_locks) ? lockdep_is_held(lock) : 1; } EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); #else #define ASSERT_RHT_MUTEX(HT) |
a03eaec0d rhashtable: Dump ... |
63 |
#endif |
b9ecfdaa1 rhashtable: Allow... |
64 65 |
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, gfp_t gfp) |
97defe1ec rhashtable: Per b... |
66 67 68 69 70 71 72 |
{ unsigned int i, size; #if defined(CONFIG_PROVE_LOCKING) unsigned int nr_pcpus = 2; #else unsigned int nr_pcpus = num_possible_cpus(); #endif |
4cf0b354d rhashtable: avoid... |
73 |
nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL); |
97defe1ec rhashtable: Per b... |
74 |
size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); |
a5ec68e3b rhashtable: Use a... |
75 76 |
/* Never allocate more than 0.5 locks per bucket */ size = min_t(unsigned int, size, tbl->size >> 1); |
97defe1ec rhashtable: Per b... |
77 |
|
da20420f8 rhashtable: Add n... |
78 79 |
if (tbl->nest) size = min(size, 1U << tbl->nest); |
97defe1ec rhashtable: Per b... |
80 |
if (sizeof(spinlock_t) != 0) { |
43ca5bc4f lib/rhashtable.c:... |
81 82 83 |
if (gfpflags_allow_blocking(gfp)) tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp); else |
9dbeea7f0 rhashtable: fix a... |
84 85 |
tbl->locks = kmalloc_array(size, sizeof(spinlock_t), gfp); |
97defe1ec rhashtable: Per b... |
86 87 88 89 90 91 92 93 94 |
if (!tbl->locks) return -ENOMEM; for (i = 0; i < size; i++) spin_lock_init(&tbl->locks[i]); } tbl->locks_mask = size - 1; return 0; } |
da20420f8 rhashtable: Add n... |
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
static void nested_table_free(union nested_table *ntbl, unsigned int size) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); const unsigned int len = 1 << shift; unsigned int i; ntbl = rcu_dereference_raw(ntbl->table); if (!ntbl) return; if (size > len) { size >>= shift; for (i = 0; i < len; i++) nested_table_free(ntbl + i, size); } kfree(ntbl); } static void nested_bucket_table_free(const struct bucket_table *tbl) { unsigned int size = tbl->size >> tbl->nest; unsigned int len = 1 << tbl->nest; union nested_table *ntbl; unsigned int i; ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); for (i = 0; i < len; i++) nested_table_free(ntbl + i, size); kfree(ntbl); } |
97defe1ec rhashtable: Per b... |
128 129 |
static void bucket_table_free(const struct bucket_table *tbl) { |
da20420f8 rhashtable: Add n... |
130 131 |
if (tbl->nest) nested_bucket_table_free(tbl); |
ca435407b rhashtable: Fix u... |
132 |
kvfree(tbl->locks); |
97defe1ec rhashtable: Per b... |
133 134 |
kvfree(tbl); } |
9d901bc05 rhashtable: Free ... |
135 136 137 138 |
static void bucket_table_free_rcu(struct rcu_head *head) { bucket_table_free(container_of(head, struct bucket_table, rcu)); } |
da20420f8 rhashtable: Add n... |
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 |
static union nested_table *nested_table_alloc(struct rhashtable *ht, union nested_table __rcu **prev, unsigned int shifted, unsigned int nhash) { union nested_table *ntbl; int i; ntbl = rcu_dereference(*prev); if (ntbl) return ntbl; ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); if (ntbl && shifted) { for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++) INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht, (i << shifted) | nhash); } rcu_assign_pointer(*prev, ntbl); return ntbl; } static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, size_t nbuckets, gfp_t gfp) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); struct bucket_table *tbl; size_t size; if (nbuckets < (1 << (shift + 1))) return NULL; size = sizeof(*tbl) + sizeof(tbl->buckets[0]); tbl = kzalloc(size, gfp); if (!tbl) return NULL; if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, 0, 0)) { kfree(tbl); return NULL; } tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; return tbl; } |
97defe1ec rhashtable: Per b... |
191 |
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
b9ecfdaa1 rhashtable: Allow... |
192 193 |
size_t nbuckets, gfp_t gfp) |
7e1e77636 lib: Resizable, S... |
194 |
{ |
eb6d1abf1 rhashtable: bette... |
195 |
struct bucket_table *tbl = NULL; |
7e1e77636 lib: Resizable, S... |
196 |
size_t size; |
f89bd6f87 rhashtable: Suppo... |
197 |
int i; |
7e1e77636 lib: Resizable, S... |
198 199 |
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
12e8fd6fd lib/rhashtable.c:... |
200 |
if (gfp != GFP_KERNEL) |
b9ecfdaa1 rhashtable: Allow... |
201 |
tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); |
12e8fd6fd lib/rhashtable.c:... |
202 203 |
else tbl = kvzalloc(size, gfp); |
da20420f8 rhashtable: Add n... |
204 205 206 207 208 209 210 |
size = nbuckets; if (tbl == NULL && gfp != GFP_KERNEL) { tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); nbuckets = 0; } |
7e1e77636 lib: Resizable, S... |
211 212 |
if (tbl == NULL) return NULL; |
da20420f8 rhashtable: Add n... |
213 |
tbl->size = size; |
7e1e77636 lib: Resizable, S... |
214 |
|
b9ecfdaa1 rhashtable: Allow... |
215 |
if (alloc_bucket_locks(ht, tbl, gfp) < 0) { |
97defe1ec rhashtable: Per b... |
216 217 218 |
bucket_table_free(tbl); return NULL; } |
7e1e77636 lib: Resizable, S... |
219 |
|
eddee5ba3 rhashtable: Fix w... |
220 |
INIT_LIST_HEAD(&tbl->walkers); |
d48ad080e rhashtable: use g... |
221 |
tbl->hash_rnd = get_random_u32(); |
5269b53da rhashtable: Move ... |
222 |
|
f89bd6f87 rhashtable: Suppo... |
223 224 |
for (i = 0; i < nbuckets; i++) INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); |
97defe1ec rhashtable: Per b... |
225 |
return tbl; |
7e1e77636 lib: Resizable, S... |
226 |
} |
b824478b2 rhashtable: Add m... |
227 228 229 230 231 232 233 234 235 236 237 238 |
static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, struct bucket_table *tbl) { struct bucket_table *new_tbl; do { new_tbl = tbl; tbl = rht_dereference_rcu(tbl->future_tbl, ht); } while (tbl); return new_tbl; } |
299e5c32a rhashtable: Use '... |
239 |
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) |
a5ec68e3b rhashtable: Use a... |
240 |
{ |
aa34a6cb0 rhashtable: Add a... |
241 |
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
b824478b2 rhashtable: Add m... |
242 243 |
struct bucket_table *new_tbl = rhashtable_last_table(ht, rht_dereference_rcu(old_tbl->future_tbl, ht)); |
da20420f8 rhashtable: Add n... |
244 245 |
struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash); int err = -EAGAIN; |
aa34a6cb0 rhashtable: Add a... |
246 247 |
struct rhash_head *head, *next, *entry; spinlock_t *new_bucket_lock; |
299e5c32a rhashtable: Use '... |
248 |
unsigned int new_hash; |
aa34a6cb0 rhashtable: Add a... |
249 |
|
da20420f8 rhashtable: Add n... |
250 251 252 253 |
if (new_tbl->nest) goto out; err = -ENOENT; |
aa34a6cb0 rhashtable: Add a... |
254 255 256 257 258 259 |
rht_for_each(entry, old_tbl, old_hash) { err = 0; next = rht_dereference_bucket(entry->next, old_tbl, old_hash); if (rht_is_a_nulls(next)) break; |
a5ec68e3b rhashtable: Use a... |
260 |
|
aa34a6cb0 rhashtable: Add a... |
261 262 |
pprev = &entry->next; } |
a5ec68e3b rhashtable: Use a... |
263 |
|
aa34a6cb0 rhashtable: Add a... |
264 265 |
if (err) goto out; |
97defe1ec rhashtable: Per b... |
266 |
|
aa34a6cb0 rhashtable: Add a... |
267 |
new_hash = head_hashfn(ht, new_tbl, entry); |
7e1e77636 lib: Resizable, S... |
268 |
|
02fd97c3d rhashtable: Allow... |
269 |
new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); |
7e1e77636 lib: Resizable, S... |
270 |
|
8f2484bdb rhashtable: Use S... |
271 |
spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); |
aa34a6cb0 rhashtable: Add a... |
272 273 |
head = rht_dereference_bucket(new_tbl->buckets[new_hash], new_tbl, new_hash); |
97defe1ec rhashtable: Per b... |
274 |
|
7def0f952 lib: fix data rac... |
275 |
RCU_INIT_POINTER(entry->next, head); |
a5ec68e3b rhashtable: Use a... |
276 |
|
aa34a6cb0 rhashtable: Add a... |
277 278 |
rcu_assign_pointer(new_tbl->buckets[new_hash], entry); spin_unlock(new_bucket_lock); |
97defe1ec rhashtable: Per b... |
279 |
|
aa34a6cb0 rhashtable: Add a... |
280 |
rcu_assign_pointer(*pprev, next); |
7e1e77636 lib: Resizable, S... |
281 |
|
aa34a6cb0 rhashtable: Add a... |
282 283 284 |
out: return err; } |
97defe1ec rhashtable: Per b... |
285 |
|
da20420f8 rhashtable: Add n... |
286 |
static int rhashtable_rehash_chain(struct rhashtable *ht, |
299e5c32a rhashtable: Use '... |
287 |
unsigned int old_hash) |
aa34a6cb0 rhashtable: Add a... |
288 289 290 |
{ struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); spinlock_t *old_bucket_lock; |
da20420f8 rhashtable: Add n... |
291 |
int err; |
aa34a6cb0 rhashtable: Add a... |
292 |
|
02fd97c3d rhashtable: Allow... |
293 |
old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); |
a5ec68e3b rhashtable: Use a... |
294 |
|
aa34a6cb0 rhashtable: Add a... |
295 |
spin_lock_bh(old_bucket_lock); |
da20420f8 rhashtable: Add n... |
296 |
while (!(err = rhashtable_rehash_one(ht, old_hash))) |
aa34a6cb0 rhashtable: Add a... |
297 |
; |
da20420f8 rhashtable: Add n... |
298 299 300 301 302 |
if (err == -ENOENT) { old_tbl->rehash++; err = 0; } |
aa34a6cb0 rhashtable: Add a... |
303 |
spin_unlock_bh(old_bucket_lock); |
da20420f8 rhashtable: Add n... |
304 305 |
return err; |
97defe1ec rhashtable: Per b... |
306 |
} |
b824478b2 rhashtable: Add m... |
307 308 309 |
static int rhashtable_rehash_attach(struct rhashtable *ht, struct bucket_table *old_tbl, struct bucket_table *new_tbl) |
97defe1ec rhashtable: Per b... |
310 |
{ |
b824478b2 rhashtable: Add m... |
311 312 313 314 315 316 317 318 |
/* Protect future_tbl using the first bucket lock. */ spin_lock_bh(old_tbl->locks); /* Did somebody beat us to it? */ if (rcu_access_pointer(old_tbl->future_tbl)) { spin_unlock_bh(old_tbl->locks); return -EEXIST; } |
7cd10db8d rhashtable: Add m... |
319 |
|
aa34a6cb0 rhashtable: Add a... |
320 321 |
/* Make insertions go into the new, empty table right away. Deletions * and lookups will be attempted in both tables until we synchronize. |
aa34a6cb0 rhashtable: Add a... |
322 |
*/ |
c4db8848a rhashtable: Move ... |
323 |
rcu_assign_pointer(old_tbl->future_tbl, new_tbl); |
aa34a6cb0 rhashtable: Add a... |
324 |
|
b824478b2 rhashtable: Add m... |
325 326 327 328 329 330 331 332 333 334 |
spin_unlock_bh(old_tbl->locks); return 0; } static int rhashtable_rehash_table(struct rhashtable *ht) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl; struct rhashtable_walker *walker; |
299e5c32a rhashtable: Use '... |
335 |
unsigned int old_hash; |
da20420f8 rhashtable: Add n... |
336 |
int err; |
b824478b2 rhashtable: Add m... |
337 338 339 340 |
new_tbl = rht_dereference(old_tbl->future_tbl, ht); if (!new_tbl) return 0; |
da20420f8 rhashtable: Add n... |
341 342 343 344 |
for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { err = rhashtable_rehash_chain(ht, old_hash); if (err) return err; |
33dc9f7c5 rhashtable: add s... |
345 |
cond_resched(); |
da20420f8 rhashtable: Add n... |
346 |
} |
aa34a6cb0 rhashtable: Add a... |
347 348 349 |
/* Publish the new table pointer. */ rcu_assign_pointer(ht->tbl, new_tbl); |
ba7c95ea3 rhashtable: Fix s... |
350 |
spin_lock(&ht->lock); |
eddee5ba3 rhashtable: Fix w... |
351 352 |
list_for_each_entry(walker, &old_tbl->walkers, list) walker->tbl = NULL; |
ba7c95ea3 rhashtable: Fix s... |
353 |
spin_unlock(&ht->lock); |
eddee5ba3 rhashtable: Fix w... |
354 |
|
aa34a6cb0 rhashtable: Add a... |
355 356 357 358 |
/* Wait for readers. All new readers will see the new * table, and thus no references to the old table will * remain. */ |
9d901bc05 rhashtable: Free ... |
359 |
call_rcu(&old_tbl->rcu, bucket_table_free_rcu); |
b824478b2 rhashtable: Add m... |
360 361 |
return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; |
7e1e77636 lib: Resizable, S... |
362 |
} |
da20420f8 rhashtable: Add n... |
363 364 365 |
static int rhashtable_rehash_alloc(struct rhashtable *ht, struct bucket_table *old_tbl, unsigned int size) |
7e1e77636 lib: Resizable, S... |
366 |
{ |
da20420f8 rhashtable: Add n... |
367 |
struct bucket_table *new_tbl; |
b824478b2 rhashtable: Add m... |
368 |
int err; |
7e1e77636 lib: Resizable, S... |
369 370 |
ASSERT_RHT_MUTEX(ht); |
da20420f8 rhashtable: Add n... |
371 |
new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
7e1e77636 lib: Resizable, S... |
372 373 |
if (new_tbl == NULL) return -ENOMEM; |
b824478b2 rhashtable: Add m... |
374 375 376 377 378 |
err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); if (err) bucket_table_free(new_tbl); return err; |
7e1e77636 lib: Resizable, S... |
379 |
} |
7e1e77636 lib: Resizable, S... |
380 381 382 383 |
/** * rhashtable_shrink - Shrink hash table while allowing concurrent lookups * @ht: the hash table to shrink |
7e1e77636 lib: Resizable, S... |
384 |
* |
18093d1c0 rhashtable: Shrin... |
385 386 |
* This function shrinks the hash table to fit, i.e., the smallest * size would not cause it to expand right away automatically. |
7e1e77636 lib: Resizable, S... |
387 |
* |
97defe1ec rhashtable: Per b... |
388 389 390 |
* The caller must ensure that no concurrent resizing occurs by holding * ht->mutex. * |
7e1e77636 lib: Resizable, S... |
391 392 |
* The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. |
97defe1ec rhashtable: Per b... |
393 394 395 |
* * It is valid to have concurrent insertions and deletions protected by per * bucket locks or concurrent RCU protected lookups and traversals. |
7e1e77636 lib: Resizable, S... |
396 |
*/ |
b824478b2 rhashtable: Add m... |
397 |
static int rhashtable_shrink(struct rhashtable *ht) |
7e1e77636 lib: Resizable, S... |
398 |
{ |
da20420f8 rhashtable: Add n... |
399 |
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
12311959e rhashtable: fix s... |
400 401 |
unsigned int nelems = atomic_read(&ht->nelems); unsigned int size = 0; |
7e1e77636 lib: Resizable, S... |
402 |
|
12311959e rhashtable: fix s... |
403 404 |
if (nelems) size = roundup_pow_of_two(nelems * 3 / 2); |
18093d1c0 rhashtable: Shrin... |
405 406 407 408 409 |
if (size < ht->p.min_size) size = ht->p.min_size; if (old_tbl->size <= size) return 0; |
b824478b2 rhashtable: Add m... |
410 411 |
if (rht_dereference(old_tbl->future_tbl, ht)) return -EEXIST; |
da20420f8 rhashtable: Add n... |
412 |
return rhashtable_rehash_alloc(ht, old_tbl, size); |
7e1e77636 lib: Resizable, S... |
413 |
} |
7e1e77636 lib: Resizable, S... |
414 |
|
97defe1ec rhashtable: Per b... |
415 416 417 418 |
static void rht_deferred_worker(struct work_struct *work) { struct rhashtable *ht; struct bucket_table *tbl; |
b824478b2 rhashtable: Add m... |
419 |
int err = 0; |
97defe1ec rhashtable: Per b... |
420 |
|
57699a40b rhashtable: Fix r... |
421 |
ht = container_of(work, struct rhashtable, run_work); |
97defe1ec rhashtable: Per b... |
422 |
mutex_lock(&ht->mutex); |
28134a53d rhashtable: Fix p... |
423 |
|
97defe1ec rhashtable: Per b... |
424 |
tbl = rht_dereference(ht->tbl, ht); |
b824478b2 rhashtable: Add m... |
425 |
tbl = rhashtable_last_table(ht, tbl); |
97defe1ec rhashtable: Per b... |
426 |
|
a5b6846f9 rhashtable: kill ... |
427 |
if (rht_grow_above_75(ht, tbl)) |
da20420f8 rhashtable: Add n... |
428 |
err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); |
b5e2c150a rhashtable: Disab... |
429 |
else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) |
da20420f8 rhashtable: Add n... |
430 431 432 |
err = rhashtable_shrink(ht); else if (tbl->nest) err = rhashtable_rehash_alloc(ht, tbl, tbl->size); |
b824478b2 rhashtable: Add m... |
433 |
|
da20420f8 rhashtable: Add n... |
434 435 |
if (!err) err = rhashtable_rehash_table(ht); |
b824478b2 rhashtable: Add m... |
436 |
|
97defe1ec rhashtable: Per b... |
437 |
mutex_unlock(&ht->mutex); |
b824478b2 rhashtable: Add m... |
438 439 440 |
if (err) schedule_work(&ht->run_work); |
97defe1ec rhashtable: Per b... |
441 |
} |
ca26893f0 rhashtable: Add r... |
442 443 |
static int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl) |
ccd57b1bd rhashtable: Add i... |
444 445 446 |
{ struct bucket_table *old_tbl; struct bucket_table *new_tbl; |
ccd57b1bd rhashtable: Add i... |
447 448 449 450 |
unsigned int size; int err; old_tbl = rht_dereference_rcu(ht->tbl, ht); |
ccd57b1bd rhashtable: Add i... |
451 452 |
size = tbl->size; |
3cf92222a rhashtable: Preve... |
453 |
err = -EBUSY; |
ccd57b1bd rhashtable: Add i... |
454 455 |
if (rht_grow_above_75(ht, tbl)) size *= 2; |
a87b9ebf1 rhashtable: Do no... |
456 457 |
/* Do not schedule more than one rehash */ else if (old_tbl != tbl) |
3cf92222a rhashtable: Preve... |
458 459 460 |
goto fail; err = -ENOMEM; |
ccd57b1bd rhashtable: Add i... |
461 462 |
new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); |
3cf92222a rhashtable: Preve... |
463 464 |
if (new_tbl == NULL) goto fail; |
ccd57b1bd rhashtable: Add i... |
465 466 467 468 469 470 471 472 473 474 |
err = rhashtable_rehash_attach(ht, tbl, new_tbl); if (err) { bucket_table_free(new_tbl); if (err == -EEXIST) err = 0; } else schedule_work(&ht->run_work); return err; |
3cf92222a rhashtable: Preve... |
475 476 477 478 479 480 481 482 483 484 485 |
fail: /* Do not fail the insert if someone else did a rehash. */ if (likely(rcu_dereference_raw(tbl->future_tbl))) return 0; /* Schedule async rehash to retry allocation in process context. */ if (err == -ENOMEM) schedule_work(&ht->run_work); return err; |
ccd57b1bd rhashtable: Add i... |
486 |
} |
ccd57b1bd rhashtable: Add i... |
487 |
|
ca26893f0 rhashtable: Add r... |
488 489 490 |
static void *rhashtable_lookup_one(struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash, const void *key, struct rhash_head *obj) |
02fd97c3d rhashtable: Allow... |
491 |
{ |
ca26893f0 rhashtable: Add r... |
492 493 494 495 496 |
struct rhashtable_compare_arg arg = { .ht = ht, .key = key, }; struct rhash_head __rcu **pprev; |
02fd97c3d rhashtable: Allow... |
497 |
struct rhash_head *head; |
ca26893f0 rhashtable: Add r... |
498 |
int elasticity; |
02fd97c3d rhashtable: Allow... |
499 |
|
5f8ddeab1 rhashtable: remov... |
500 |
elasticity = RHT_ELASTICITY; |
da20420f8 rhashtable: Add n... |
501 502 |
pprev = rht_bucket_var(tbl, hash); rht_for_each_continue(head, *pprev, tbl, hash) { |
ca26893f0 rhashtable: Add r... |
503 504 505 506 507 508 509 |
struct rhlist_head *list; struct rhlist_head *plist; elasticity--; if (!key || (ht->p.obj_cmpfn ? ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : |
07cf9d303 rhashtable: Fix r... |
510 511 |
rhashtable_compare(&arg, rht_obj(ht, head)))) { pprev = &head->next; |
ca26893f0 rhashtable: Add r... |
512 |
continue; |
07cf9d303 rhashtable: Fix r... |
513 |
} |
ca26893f0 rhashtable: Add r... |
514 515 516 517 518 519 520 521 522 523 524 525 526 |
if (!ht->rhlist) return rht_obj(ht, head); list = container_of(obj, struct rhlist_head, rhead); plist = container_of(head, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, plist); head = rht_dereference_bucket(head->next, tbl, hash); RCU_INIT_POINTER(list->rhead.next, head); rcu_assign_pointer(*pprev, obj); return NULL; |
5ca8cc5bf rhashtable: add r... |
527 |
} |
02fd97c3d rhashtable: Allow... |
528 |
|
ca26893f0 rhashtable: Add r... |
529 530 531 532 533 534 535 536 537 538 539 540 |
if (elasticity <= 0) return ERR_PTR(-EAGAIN); return ERR_PTR(-ENOENT); } static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, void *data) { |
da20420f8 rhashtable: Add n... |
541 |
struct rhash_head __rcu **pprev; |
ca26893f0 rhashtable: Add r... |
542 543 544 545 546 |
struct bucket_table *new_tbl; struct rhash_head *head; if (!IS_ERR_OR_NULL(data)) return ERR_PTR(-EEXIST); |
07ee0722b rhashtable: Add c... |
547 |
|
ca26893f0 rhashtable: Add r... |
548 549 |
if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) return ERR_CAST(data); |
ccd57b1bd rhashtable: Add i... |
550 |
|
ca26893f0 rhashtable: Add r... |
551 552 553 554 555 556 557 558 559 560 561 562 |
new_tbl = rcu_dereference(tbl->future_tbl); if (new_tbl) return new_tbl; if (PTR_ERR(data) != -ENOENT) return ERR_CAST(data); if (unlikely(rht_grow_above_max(ht, tbl))) return ERR_PTR(-E2BIG); if (unlikely(rht_grow_above_100(ht, tbl))) return ERR_PTR(-EAGAIN); |
02fd97c3d rhashtable: Allow... |
563 |
|
da20420f8 rhashtable: Add n... |
564 565 566 567 568 |
pprev = rht_bucket_insert(ht, tbl, hash); if (!pprev) return ERR_PTR(-ENOMEM); head = rht_dereference_bucket(*pprev, tbl, hash); |
02fd97c3d rhashtable: Allow... |
569 570 |
RCU_INIT_POINTER(obj->next, head); |
ca26893f0 rhashtable: Add r... |
571 572 573 574 575 576 |
if (ht->rhlist) { struct rhlist_head *list; list = container_of(obj, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, NULL); } |
02fd97c3d rhashtable: Allow... |
577 |
|
da20420f8 rhashtable: Add n... |
578 |
rcu_assign_pointer(*pprev, obj); |
02fd97c3d rhashtable: Allow... |
579 580 |
atomic_inc(&ht->nelems); |
ca26893f0 rhashtable: Add r... |
581 582 |
if (rht_grow_above_75(ht, tbl)) schedule_work(&ht->run_work); |
02fd97c3d rhashtable: Allow... |
583 |
|
ca26893f0 rhashtable: Add r... |
584 585 |
return NULL; } |
02fd97c3d rhashtable: Allow... |
586 |
|
ca26893f0 rhashtable: Add r... |
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 |
static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, struct rhash_head *obj) { struct bucket_table *new_tbl; struct bucket_table *tbl; unsigned int hash; spinlock_t *lock; void *data; tbl = rcu_dereference(ht->tbl); /* All insertions must grab the oldest table containing * the hashed bucket that is yet to be rehashed. */ for (;;) { hash = rht_head_hashfn(ht, tbl, obj, ht->p); lock = rht_bucket_lock(tbl, hash); spin_lock_bh(lock); if (tbl->rehash <= hash) break; spin_unlock_bh(lock); tbl = rcu_dereference(tbl->future_tbl); } data = rhashtable_lookup_one(ht, tbl, hash, key, obj); new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); if (PTR_ERR(new_tbl) != -EEXIST) data = ERR_CAST(new_tbl); while (!IS_ERR_OR_NULL(new_tbl)) { tbl = new_tbl; hash = rht_head_hashfn(ht, tbl, obj, ht->p); spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); data = rhashtable_lookup_one(ht, tbl, hash, key, obj); new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); if (PTR_ERR(new_tbl) != -EEXIST) data = ERR_CAST(new_tbl); spin_unlock(rht_bucket_lock(tbl, hash)); } spin_unlock_bh(lock); if (PTR_ERR(data) == -EAGAIN) data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: -EAGAIN); return data; } void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, struct rhash_head *obj) { void *data; do { rcu_read_lock(); data = rhashtable_try_insert(ht, key, obj); rcu_read_unlock(); } while (PTR_ERR(data) == -EAGAIN); return data; |
02fd97c3d rhashtable: Allow... |
653 654 |
} EXPORT_SYMBOL_GPL(rhashtable_insert_slow); |
f2dba9c6f rhashtable: Intro... |
655 |
/** |
246779dd0 rhashtable: Remov... |
656 |
* rhashtable_walk_enter - Initialise an iterator |
f2dba9c6f rhashtable: Intro... |
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 |
* @ht: Table to walk over * @iter: Hash table Iterator * * This function prepares a hash table walk. * * Note that if you restart a walk after rhashtable_walk_stop you * may see the same object twice. Also, you may miss objects if * there are removals in between rhashtable_walk_stop and the next * call to rhashtable_walk_start. * * For a completely stable walk you should construct your own data * structure outside the hash table. * * This function may sleep so you must not call it from interrupt * context or with spin locks held. * |
246779dd0 rhashtable: Remov... |
673 |
* You must call rhashtable_walk_exit after this function returns. |
f2dba9c6f rhashtable: Intro... |
674 |
*/ |
246779dd0 rhashtable: Remov... |
675 |
void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) |
f2dba9c6f rhashtable: Intro... |
676 677 678 679 680 |
{ iter->ht = ht; iter->p = NULL; iter->slot = 0; iter->skip = 0; |
c6ff52682 rhashtable: Fix w... |
681 |
spin_lock(&ht->lock); |
246779dd0 rhashtable: Remov... |
682 |
iter->walker.tbl = |
179ccc0a7 rhashtable: Kill ... |
683 |
rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); |
246779dd0 rhashtable: Remov... |
684 |
list_add(&iter->walker.list, &iter->walker.tbl->walkers); |
c6ff52682 rhashtable: Fix w... |
685 |
spin_unlock(&ht->lock); |
f2dba9c6f rhashtable: Intro... |
686 |
} |
246779dd0 rhashtable: Remov... |
687 |
EXPORT_SYMBOL_GPL(rhashtable_walk_enter); |
f2dba9c6f rhashtable: Intro... |
688 689 690 691 692 693 694 695 696 |
/** * rhashtable_walk_exit - Free an iterator * @iter: Hash table Iterator * * This function frees resources allocated by rhashtable_walk_init. */ void rhashtable_walk_exit(struct rhashtable_iter *iter) { |
c6ff52682 rhashtable: Fix w... |
697 |
spin_lock(&iter->ht->lock); |
246779dd0 rhashtable: Remov... |
698 699 |
if (iter->walker.tbl) list_del(&iter->walker.list); |
c6ff52682 rhashtable: Fix w... |
700 |
spin_unlock(&iter->ht->lock); |
f2dba9c6f rhashtable: Intro... |
701 702 703 704 705 706 707 |
} EXPORT_SYMBOL_GPL(rhashtable_walk_exit); /** * rhashtable_walk_start - Start a hash table walk * @iter: Hash table iterator * |
0647169cf rhashtable: Docum... |
708 709 710 |
* Start a hash table walk at the current iterator position. Note that we take * the RCU lock in all cases including when we return an error. So you must * always call rhashtable_walk_stop to clean up. |
f2dba9c6f rhashtable: Intro... |
711 712 713 714 715 716 717 718 |
* * Returns zero if successful. * * Returns -EAGAIN if resize event occured. Note that the iterator * will rewind back to the beginning and you may use it immediately * by calling rhashtable_walk_next. */ int rhashtable_walk_start(struct rhashtable_iter *iter) |
db4374f48 rhashtable: Annot... |
719 |
__acquires(RCU) |
f2dba9c6f rhashtable: Intro... |
720 |
{ |
eddee5ba3 rhashtable: Fix w... |
721 |
struct rhashtable *ht = iter->ht; |
c6ff52682 rhashtable: Fix w... |
722 |
rcu_read_lock(); |
eddee5ba3 rhashtable: Fix w... |
723 |
|
c6ff52682 rhashtable: Fix w... |
724 |
spin_lock(&ht->lock); |
246779dd0 rhashtable: Remov... |
725 726 |
if (iter->walker.tbl) list_del(&iter->walker.list); |
c6ff52682 rhashtable: Fix w... |
727 |
spin_unlock(&ht->lock); |
eddee5ba3 rhashtable: Fix w... |
728 |
|
246779dd0 rhashtable: Remov... |
729 730 |
if (!iter->walker.tbl) { iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); |
f2dba9c6f rhashtable: Intro... |
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 |
return -EAGAIN; } return 0; } EXPORT_SYMBOL_GPL(rhashtable_walk_start); /** * rhashtable_walk_next - Return the next object and advance the iterator * @iter: Hash table iterator * * Note that you must call rhashtable_walk_stop when you are finished * with the walk. * * Returns the next object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occured. Note that the iterator * will rewind back to the beginning and you may continue to use it. */ void *rhashtable_walk_next(struct rhashtable_iter *iter) { |
246779dd0 rhashtable: Remov... |
752 |
struct bucket_table *tbl = iter->walker.tbl; |
ca26893f0 rhashtable: Add r... |
753 |
struct rhlist_head *list = iter->list; |
f2dba9c6f rhashtable: Intro... |
754 755 |
struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; |
ca26893f0 rhashtable: Add r... |
756 |
bool rhlist = ht->rhlist; |
f2dba9c6f rhashtable: Intro... |
757 |
|
f2dba9c6f rhashtable: Intro... |
758 |
if (p) { |
ca26893f0 rhashtable: Add r... |
759 760 761 762 |
if (!rhlist || !(list = rcu_dereference(list->next))) { p = rcu_dereference(p->next); list = container_of(p, struct rhlist_head, rhead); } |
f2dba9c6f rhashtable: Intro... |
763 764 765 766 767 768 769 |
goto next; } for (; iter->slot < tbl->size; iter->slot++) { int skip = iter->skip; rht_for_each_rcu(p, tbl, iter->slot) { |
ca26893f0 rhashtable: Add r... |
770 771 772 773 774 775 776 777 778 779 780 781 |
if (rhlist) { list = container_of(p, struct rhlist_head, rhead); do { if (!skip) goto next; skip--; list = rcu_dereference(list->next); } while (list); continue; } |
f2dba9c6f rhashtable: Intro... |
782 783 784 785 786 787 788 789 790 |
if (!skip) break; skip--; } next: if (!rht_is_a_nulls(p)) { iter->skip++; iter->p = p; |
ca26893f0 rhashtable: Add r... |
791 792 |
iter->list = list; return rht_obj(ht, rhlist ? &list->rhead : p); |
f2dba9c6f rhashtable: Intro... |
793 794 795 796 |
} iter->skip = 0; } |
142b942a7 rhashtable: fix f... |
797 |
iter->p = NULL; |
d88252f9b rhashtable: Add b... |
798 799 |
/* Ensure we see any new tables. */ smp_rmb(); |
246779dd0 rhashtable: Remov... |
800 801 |
iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (iter->walker.tbl) { |
f2dba9c6f rhashtable: Intro... |
802 803 |
iter->slot = 0; iter->skip = 0; |
f2dba9c6f rhashtable: Intro... |
804 805 |
return ERR_PTR(-EAGAIN); } |
c936a79fc rhashtable: Simpl... |
806 |
return NULL; |
f2dba9c6f rhashtable: Intro... |
807 808 809 810 811 812 813 |
} EXPORT_SYMBOL_GPL(rhashtable_walk_next); /** * rhashtable_walk_stop - Finish a hash table walk * @iter: Hash table iterator * |
0647169cf rhashtable: Docum... |
814 815 |
* Finish a hash table walk. Does not reset the iterator to the start of the * hash table. |
f2dba9c6f rhashtable: Intro... |
816 817 |
*/ void rhashtable_walk_stop(struct rhashtable_iter *iter) |
db4374f48 rhashtable: Annot... |
818 |
__releases(RCU) |
f2dba9c6f rhashtable: Intro... |
819 |
{ |
eddee5ba3 rhashtable: Fix w... |
820 |
struct rhashtable *ht; |
246779dd0 rhashtable: Remov... |
821 |
struct bucket_table *tbl = iter->walker.tbl; |
eddee5ba3 rhashtable: Fix w... |
822 |
|
eddee5ba3 rhashtable: Fix w... |
823 |
if (!tbl) |
963ecbd41 rhashtable: Fix u... |
824 |
goto out; |
eddee5ba3 rhashtable: Fix w... |
825 826 |
ht = iter->ht; |
ba7c95ea3 rhashtable: Fix s... |
827 |
spin_lock(&ht->lock); |
c4db8848a rhashtable: Move ... |
828 |
if (tbl->rehash < tbl->size) |
246779dd0 rhashtable: Remov... |
829 |
list_add(&iter->walker.list, &tbl->walkers); |
eddee5ba3 rhashtable: Fix w... |
830 |
else |
246779dd0 rhashtable: Remov... |
831 |
iter->walker.tbl = NULL; |
ba7c95ea3 rhashtable: Fix s... |
832 |
spin_unlock(&ht->lock); |
eddee5ba3 rhashtable: Fix w... |
833 |
|
f2dba9c6f rhashtable: Intro... |
834 |
iter->p = NULL; |
963ecbd41 rhashtable: Fix u... |
835 836 837 |
out: rcu_read_unlock(); |
f2dba9c6f rhashtable: Intro... |
838 839 |
} EXPORT_SYMBOL_GPL(rhashtable_walk_stop); |
488fb86ee rhashtable: Make ... |
840 |
static size_t rounded_hashtable_size(const struct rhashtable_params *params) |
7e1e77636 lib: Resizable, S... |
841 |
{ |
cfb876dc3 lib/rhashtable: c... |
842 843 844 845 846 847 848 849 850 851 |
size_t retsize; if (params->nelem_hint) retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), (unsigned long)params->min_size); else retsize = max(HASH_DEFAULT_SIZE, (unsigned long)params->min_size); return retsize; |
7e1e77636 lib: Resizable, S... |
852 |
} |
31ccde2da rhashtable: Allow... |
853 854 855 856 |
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) { return jhash2(key, length, seed); } |
7e1e77636 lib: Resizable, S... |
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 |
/** * rhashtable_init - initialize a new hash table * @ht: hash table to be initialized * @params: configuration parameters * * Initializes a new hash table based on the provided configuration * parameters. A table can be configured either with a variable or * fixed length key: * * Configuration Example 1: Fixed length keys * struct test_obj { * int key; * void * my_member; * struct rhash_head node; * }; * * struct rhashtable_params params = { * .head_offset = offsetof(struct test_obj, node), * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), |
87545899b net: replace rema... |
877 |
* .hashfn = jhash, |
f89bd6f87 rhashtable: Suppo... |
878 |
* .nulls_base = (1U << RHT_BASE_SHIFT), |
7e1e77636 lib: Resizable, S... |
879 880 881 882 883 884 885 886 |
* }; * * Configuration Example 2: Variable length keys * struct test_obj { * [...] * struct rhash_head node; * }; * |
49f7b33e6 rhashtable: provi... |
887 |
* u32 my_hash_fn(const void *data, u32 len, u32 seed) |
7e1e77636 lib: Resizable, S... |
888 889 890 891 892 893 894 895 |
* { * struct test_obj *obj = data; * * return [... hash ...]; * } * * struct rhashtable_params params = { * .head_offset = offsetof(struct test_obj, node), |
87545899b net: replace rema... |
896 |
* .hashfn = jhash, |
7e1e77636 lib: Resizable, S... |
897 |
* .obj_hashfn = my_hash_fn, |
7e1e77636 lib: Resizable, S... |
898 899 |
* }; */ |
488fb86ee rhashtable: Make ... |
900 901 |
int rhashtable_init(struct rhashtable *ht, const struct rhashtable_params *params) |
7e1e77636 lib: Resizable, S... |
902 903 904 |
{ struct bucket_table *tbl; size_t size; |
31ccde2da rhashtable: Allow... |
905 |
if ((!params->key_len && !params->obj_hashfn) || |
02fd97c3d rhashtable: Allow... |
906 |
(params->obj_hashfn && !params->obj_cmpfn)) |
7e1e77636 lib: Resizable, S... |
907 |
return -EINVAL; |
f89bd6f87 rhashtable: Suppo... |
908 909 |
if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) return -EINVAL; |
97defe1ec rhashtable: Per b... |
910 911 |
memset(ht, 0, sizeof(*ht)); mutex_init(&ht->mutex); |
ba7c95ea3 rhashtable: Fix s... |
912 |
spin_lock_init(&ht->lock); |
97defe1ec rhashtable: Per b... |
913 |
memcpy(&ht->p, params, sizeof(*params)); |
a998f712f rhashtable: Round... |
914 915 |
if (params->min_size) ht->p.min_size = roundup_pow_of_two(params->min_size); |
6d684e546 rhashtable: Cap t... |
916 917 |
/* Cap total entries at 2^31 to avoid nelems overflow. */ ht->max_elems = 1u << 31; |
2d2ab658d rhashtable: Do no... |
918 919 920 921 922 923 |
if (params->max_size) { ht->p.max_size = rounddown_pow_of_two(params->max_size); if (ht->p.max_size < ht->max_elems / 2) ht->max_elems = ht->p.max_size * 2; } |
6d684e546 rhashtable: Cap t... |
924 |
|
48e75b430 rhashtable: compa... |
925 |
ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); |
a998f712f rhashtable: Round... |
926 |
|
cfb876dc3 lib/rhashtable: c... |
927 |
size = rounded_hashtable_size(&ht->p); |
3a324606b rhashtable: Enfor... |
928 |
|
97defe1ec rhashtable: Per b... |
929 930 931 932 |
if (params->locks_mul) ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); else ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; |
31ccde2da rhashtable: Allow... |
933 934 935 936 937 938 939 940 941 |
ht->key_len = ht->p.key_len; if (!params->hashfn) { ht->p.hashfn = jhash; if (!(ht->key_len & (sizeof(u32) - 1))) { ht->key_len /= sizeof(u32); ht->p.hashfn = rhashtable_jhash2; } } |
b9ecfdaa1 rhashtable: Allow... |
942 |
tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
7e1e77636 lib: Resizable, S... |
943 944 |
if (tbl == NULL) return -ENOMEM; |
545a148e4 rhashtable: initi... |
945 |
atomic_set(&ht->nelems, 0); |
a5b6846f9 rhashtable: kill ... |
946 |
|
7e1e77636 lib: Resizable, S... |
947 |
RCU_INIT_POINTER(ht->tbl, tbl); |
4c4b52d9b rhashtable: remov... |
948 |
INIT_WORK(&ht->run_work, rht_deferred_worker); |
97defe1ec rhashtable: Per b... |
949 |
|
7e1e77636 lib: Resizable, S... |
950 951 952 953 954 |
return 0; } EXPORT_SYMBOL_GPL(rhashtable_init); /** |
ca26893f0 rhashtable: Add r... |
955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 |
* rhltable_init - initialize a new hash list table * @hlt: hash list table to be initialized * @params: configuration parameters * * Initializes a new hash list table. * * See documentation for rhashtable_init. */ int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) { int err; /* No rhlist NULLs marking for now. */ if (params->nulls_base) return -EINVAL; err = rhashtable_init(&hlt->ht, params); hlt->ht.rhlist = true; return err; } EXPORT_SYMBOL_GPL(rhltable_init); static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, void (*free_fn)(void *ptr, void *arg), void *arg) { struct rhlist_head *list; if (!ht->rhlist) { free_fn(rht_obj(ht, obj), arg); return; } list = container_of(obj, struct rhlist_head, rhead); do { obj = &list->rhead; list = rht_dereference(list->next, ht); free_fn(rht_obj(ht, obj), arg); } while (list); } /** |
6b6f302ce rhashtable: Add r... |
997 |
* rhashtable_free_and_destroy - free elements and destroy hash table |
7e1e77636 lib: Resizable, S... |
998 |
* @ht: the hash table to destroy |
6b6f302ce rhashtable: Add r... |
999 1000 |
* @free_fn: callback to release resources of element * @arg: pointer passed to free_fn |
7e1e77636 lib: Resizable, S... |
1001 |
* |
6b6f302ce rhashtable: Add r... |
1002 1003 1004 1005 1006 1007 1008 1009 |
* Stops an eventual async resize. If defined, invokes free_fn for each * element to releasal resources. Please note that RCU protected * readers may still be accessing the elements. Releasing of resources * must occur in a compatible manner. Then frees the bucket array. * * This function will eventually sleep to wait for an async resize * to complete. The caller is responsible that no further write operations * occurs in parallel. |
7e1e77636 lib: Resizable, S... |
1010 |
*/ |
6b6f302ce rhashtable: Add r... |
1011 1012 1013 |
void rhashtable_free_and_destroy(struct rhashtable *ht, void (*free_fn)(void *ptr, void *arg), void *arg) |
7e1e77636 lib: Resizable, S... |
1014 |
{ |
da20420f8 rhashtable: Add n... |
1015 |
struct bucket_table *tbl; |
6b6f302ce rhashtable: Add r... |
1016 |
unsigned int i; |
97defe1ec rhashtable: Per b... |
1017 |
|
4c4b52d9b rhashtable: remov... |
1018 |
cancel_work_sync(&ht->run_work); |
97defe1ec rhashtable: Per b... |
1019 |
|
57699a40b rhashtable: Fix r... |
1020 |
mutex_lock(&ht->mutex); |
6b6f302ce rhashtable: Add r... |
1021 1022 1023 1024 |
tbl = rht_dereference(ht->tbl, ht); if (free_fn) { for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next; |
33dc9f7c5 rhashtable: add s... |
1025 |
cond_resched(); |
da20420f8 rhashtable: Add n... |
1026 |
for (pos = rht_dereference(*rht_bucket(tbl, i), ht), |
6b6f302ce rhashtable: Add r... |
1027 1028 1029 1030 1031 1032 |
next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; !rht_is_a_nulls(pos); pos = next, next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL) |
ca26893f0 rhashtable: Add r... |
1033 |
rhashtable_free_one(ht, pos, free_fn, arg); |
6b6f302ce rhashtable: Add r... |
1034 1035 1036 1037 |
} } bucket_table_free(tbl); |
97defe1ec rhashtable: Per b... |
1038 |
mutex_unlock(&ht->mutex); |
7e1e77636 lib: Resizable, S... |
1039 |
} |
6b6f302ce rhashtable: Add r... |
1040 1041 1042 1043 1044 1045 |
EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); void rhashtable_destroy(struct rhashtable *ht) { return rhashtable_free_and_destroy(ht, NULL, NULL); } |
7e1e77636 lib: Resizable, S... |
1046 |
EXPORT_SYMBOL_GPL(rhashtable_destroy); |
da20420f8 rhashtable: Add n... |
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 |
struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); static struct rhash_head __rcu *rhnull = (struct rhash_head __rcu *)NULLS_MARKER(0); unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; unsigned int subhash = hash; union nested_table *ntbl; ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); |
c4d2603da rhashtable: Fix R... |
1060 |
ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); |
da20420f8 rhashtable: Add n... |
1061 1062 1063 1064 |
subhash >>= tbl->nest; while (ntbl && size > (1 << shift)) { index = subhash & ((1 << shift) - 1); |
c4d2603da rhashtable: Fix R... |
1065 1066 |
ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); |
da20420f8 rhashtable: Add n... |
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 |
size >>= shift; subhash >>= shift; } if (!ntbl) return &rhnull; return &ntbl[subhash].bucket; } EXPORT_SYMBOL_GPL(rht_bucket_nested); struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; union nested_table *ntbl; unsigned int shifted; unsigned int nhash; ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); hash >>= tbl->nest; nhash = index; shifted = tbl->nest; ntbl = nested_table_alloc(ht, &ntbl[index].table, size <= (1 << shift) ? shifted : 0, nhash); while (ntbl && size > (1 << shift)) { index = hash & ((1 << shift) - 1); size >>= shift; hash >>= shift; nhash |= index << shifted; shifted += shift; ntbl = nested_table_alloc(ht, &ntbl[index].table, size <= (1 << shift) ? shifted : 0, nhash); } if (!ntbl) return NULL; return &ntbl[hash].bucket; } EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); |