Blame view
lib/rhashtable.c
29.5 KB
d2912cb15 treewide: Replace... |
1 |
// SPDX-License-Identifier: GPL-2.0-only |
7e1e77636 lib: Resizable, S... |
2 3 4 |
/* * Resizable, Scalable, Concurrent Hash Table * |
02fd97c3d rhashtable: Allow... |
5 |
* Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> |
a5ec68e3b rhashtable: Use a... |
6 |
* Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> |
7e1e77636 lib: Resizable, S... |
7 8 |
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> * |
7e1e77636 lib: Resizable, S... |
9 |
* Code partially derived from nft_hash |
02fd97c3d rhashtable: Allow... |
10 11 |
* Rewritten with rehash code from br_multicast plus single list * pointer as suggested by Josh Triplett |
7e1e77636 lib: Resizable, S... |
12 |
*/ |
07ee0722b rhashtable: Add c... |
13 |
#include <linux/atomic.h> |
7e1e77636 lib: Resizable, S... |
14 15 16 |
#include <linux/kernel.h> #include <linux/init.h> #include <linux/log2.h> |
5beb5c90c rhashtable: use c... |
17 |
#include <linux/sched.h> |
b2d091031 sched/headers: Pr... |
18 |
#include <linux/rculist.h> |
7e1e77636 lib: Resizable, S... |
19 20 21 |
#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mm.h> |
87545899b net: replace rema... |
22 |
#include <linux/jhash.h> |
7e1e77636 lib: Resizable, S... |
23 24 |
#include <linux/random.h> #include <linux/rhashtable.h> |
61d7b0977 rhashtable: using... |
25 |
#include <linux/err.h> |
6d7954130 rhashtable: add m... |
26 |
#include <linux/export.h> |
7e1e77636 lib: Resizable, S... |
27 28 |
#define HASH_DEFAULT_SIZE 64UL |
c2e213cff rhashtable: Intro... |
29 |
#define HASH_MIN_SIZE 4U |
97defe1ec rhashtable: Per b... |
30 |
|
da20420f8 rhashtable: Add n... |
31 32 |
union nested_table { union nested_table __rcu *table; |
ce9b362bf rhashtable: Resto... |
33 |
struct rhash_lock_head __rcu *bucket; |
da20420f8 rhashtable: Add n... |
34 |
}; |
988dfbd79 rhashtable: Move ... |
35 |
static u32 head_hashfn(struct rhashtable *ht, |
8d24c0b43 rhashtable: Do ha... |
36 37 |
const struct bucket_table *tbl, const struct rhash_head *he) |
7e1e77636 lib: Resizable, S... |
38 |
{ |
02fd97c3d rhashtable: Allow... |
39 |
return rht_head_hashfn(ht, tbl, he, ht->p); |
7e1e77636 lib: Resizable, S... |
40 |
} |
a03eaec0d rhashtable: Dump ... |
41 |
#ifdef CONFIG_PROVE_LOCKING |
a03eaec0d rhashtable: Dump ... |
42 |
#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) |
a03eaec0d rhashtable: Dump ... |
43 44 45 46 47 48 49 50 51 |
int lockdep_rht_mutex_is_held(struct rhashtable *ht) { return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; } EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) { |
8f0db0180 rhashtable: use b... |
52 53 54 55 |
if (!debug_locks) return 1; if (unlikely(tbl->nest)) return 1; |
ca0b709d1 rhashtable: use B... |
56 |
return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]); |
a03eaec0d rhashtable: Dump ... |
57 58 59 60 |
} EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); #else #define ASSERT_RHT_MUTEX(HT) |
a03eaec0d rhashtable: Dump ... |
61 |
#endif |
4a3084aaa rhashtable: Drop ... |
62 63 64 65 66 67 68 69 |
static inline union nested_table *nested_table_top( const struct bucket_table *tbl) { /* The top-level bucket entry does not need RCU protection * because it's set at the same time as tbl->nest. */ return (void *)rcu_dereference_protected(tbl->buckets[0], 1); } |
da20420f8 rhashtable: Add n... |
70 71 72 73 74 |
static void nested_table_free(union nested_table *ntbl, unsigned int size) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); const unsigned int len = 1 << shift; unsigned int i; |
4a3084aaa rhashtable: Drop ... |
75 |
ntbl = rcu_dereference_protected(ntbl->table, 1); |
da20420f8 rhashtable: Add n... |
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
if (!ntbl) return; if (size > len) { size >>= shift; for (i = 0; i < len; i++) nested_table_free(ntbl + i, size); } kfree(ntbl); } static void nested_bucket_table_free(const struct bucket_table *tbl) { unsigned int size = tbl->size >> tbl->nest; unsigned int len = 1 << tbl->nest; union nested_table *ntbl; unsigned int i; |
4a3084aaa rhashtable: Drop ... |
94 |
ntbl = nested_table_top(tbl); |
da20420f8 rhashtable: Add n... |
95 96 97 98 99 100 |
for (i = 0; i < len; i++) nested_table_free(ntbl + i, size); kfree(ntbl); } |
97defe1ec rhashtable: Per b... |
101 102 |
static void bucket_table_free(const struct bucket_table *tbl) { |
da20420f8 rhashtable: Add n... |
103 104 |
if (tbl->nest) nested_bucket_table_free(tbl); |
97defe1ec rhashtable: Per b... |
105 106 |
kvfree(tbl); } |
9d901bc05 rhashtable: Free ... |
107 108 109 110 |
static void bucket_table_free_rcu(struct rcu_head *head) { bucket_table_free(container_of(head, struct bucket_table, rcu)); } |
da20420f8 rhashtable: Add n... |
111 112 |
static union nested_table *nested_table_alloc(struct rhashtable *ht, union nested_table __rcu **prev, |
5af68ef73 rhashtable: simpl... |
113 |
bool leaf) |
da20420f8 rhashtable: Add n... |
114 115 116 117 118 119 120 121 122 |
{ union nested_table *ntbl; int i; ntbl = rcu_dereference(*prev); if (ntbl) return ntbl; ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); |
5af68ef73 rhashtable: simpl... |
123 124 |
if (ntbl && leaf) { for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) |
9b4f64a22 rhashtable: simpl... |
125 |
INIT_RHT_NULLS_HEAD(ntbl[i].bucket); |
da20420f8 rhashtable: Add n... |
126 |
} |
e9458a4e3 rhashtable: Fix c... |
127 |
if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL) |
7a41c294c rhashtable: use c... |
128 129 130 131 |
return ntbl; /* Raced with another thread. */ kfree(ntbl); return rcu_dereference(*prev); |
da20420f8 rhashtable: Add n... |
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
} static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, size_t nbuckets, gfp_t gfp) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); struct bucket_table *tbl; size_t size; if (nbuckets < (1 << (shift + 1))) return NULL; size = sizeof(*tbl) + sizeof(tbl->buckets[0]); tbl = kzalloc(size, gfp); if (!tbl) return NULL; if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, |
5af68ef73 rhashtable: simpl... |
152 |
false)) { |
da20420f8 rhashtable: Add n... |
153 154 155 156 157 158 159 160 |
kfree(tbl); return NULL; } tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; return tbl; } |
97defe1ec rhashtable: Per b... |
161 |
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
b9ecfdaa1 rhashtable: Allow... |
162 163 |
size_t nbuckets, gfp_t gfp) |
7e1e77636 lib: Resizable, S... |
164 |
{ |
eb6d1abf1 rhashtable: bette... |
165 |
struct bucket_table *tbl = NULL; |
8f0db0180 rhashtable: use b... |
166 |
size_t size; |
f89bd6f87 rhashtable: Suppo... |
167 |
int i; |
149212f07 rhashtable: add l... |
168 |
static struct lock_class_key __key; |
7e1e77636 lib: Resizable, S... |
169 |
|
c252aa3e8 rhashtable: use s... |
170 |
tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp); |
da20420f8 rhashtable: Add n... |
171 172 |
size = nbuckets; |
2d22ecf6d lib/rhashtable: g... |
173 |
if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) { |
da20420f8 rhashtable: Add n... |
174 175 176 |
tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); nbuckets = 0; } |
2d22ecf6d lib/rhashtable: g... |
177 |
|
7e1e77636 lib: Resizable, S... |
178 179 |
if (tbl == NULL) return NULL; |
149212f07 rhashtable: add l... |
180 |
lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0); |
da20420f8 rhashtable: Add n... |
181 |
tbl->size = size; |
7e1e77636 lib: Resizable, S... |
182 |
|
4feb7c7a4 rhashtable: don't... |
183 |
rcu_head_init(&tbl->rcu); |
eddee5ba3 rhashtable: Fix w... |
184 |
INIT_LIST_HEAD(&tbl->walkers); |
d48ad080e rhashtable: use g... |
185 |
tbl->hash_rnd = get_random_u32(); |
5269b53da rhashtable: Move ... |
186 |
|
f89bd6f87 rhashtable: Suppo... |
187 |
for (i = 0; i < nbuckets; i++) |
9b4f64a22 rhashtable: simpl... |
188 |
INIT_RHT_NULLS_HEAD(tbl->buckets[i]); |
f89bd6f87 rhashtable: Suppo... |
189 |
|
97defe1ec rhashtable: Per b... |
190 |
return tbl; |
7e1e77636 lib: Resizable, S... |
191 |
} |
b824478b2 rhashtable: Add m... |
192 193 194 195 196 197 198 199 200 201 202 203 |
static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, struct bucket_table *tbl) { struct bucket_table *new_tbl; do { new_tbl = tbl; tbl = rht_dereference_rcu(tbl->future_tbl, ht); } while (tbl); return new_tbl; } |
8f0db0180 rhashtable: use b... |
204 |
static int rhashtable_rehash_one(struct rhashtable *ht, |
ce9b362bf rhashtable: Resto... |
205 |
struct rhash_lock_head __rcu **bkt, |
8f0db0180 rhashtable: use b... |
206 |
unsigned int old_hash) |
a5ec68e3b rhashtable: Use a... |
207 |
{ |
aa34a6cb0 rhashtable: Add a... |
208 |
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
c0690016a rhashtable: clean... |
209 |
struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); |
da20420f8 rhashtable: Add n... |
210 |
int err = -EAGAIN; |
aa34a6cb0 rhashtable: Add a... |
211 |
struct rhash_head *head, *next, *entry; |
e4edbe3c1 rhashtable: fix s... |
212 |
struct rhash_head __rcu **pprev = NULL; |
299e5c32a rhashtable: Use '... |
213 |
unsigned int new_hash; |
aa34a6cb0 rhashtable: Add a... |
214 |
|
da20420f8 rhashtable: Add n... |
215 216 217 218 |
if (new_tbl->nest) goto out; err = -ENOENT; |
adc6a3ab1 rhashtable: move ... |
219 220 |
rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash), old_tbl, old_hash) { |
aa34a6cb0 rhashtable: Add a... |
221 222 223 224 225 |
err = 0; next = rht_dereference_bucket(entry->next, old_tbl, old_hash); if (rht_is_a_nulls(next)) break; |
a5ec68e3b rhashtable: Use a... |
226 |
|
aa34a6cb0 rhashtable: Add a... |
227 228 |
pprev = &entry->next; } |
a5ec68e3b rhashtable: Use a... |
229 |
|
aa34a6cb0 rhashtable: Add a... |
230 231 |
if (err) goto out; |
97defe1ec rhashtable: Per b... |
232 |
|
aa34a6cb0 rhashtable: Add a... |
233 |
new_hash = head_hashfn(ht, new_tbl, entry); |
7e1e77636 lib: Resizable, S... |
234 |
|
149212f07 rhashtable: add l... |
235 |
rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING); |
7e1e77636 lib: Resizable, S... |
236 |
|
adc6a3ab1 rhashtable: move ... |
237 |
head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash); |
97defe1ec rhashtable: Per b... |
238 |
|
7def0f952 lib: fix data rac... |
239 |
RCU_INIT_POINTER(entry->next, head); |
a5ec68e3b rhashtable: Use a... |
240 |
|
149212f07 rhashtable: add l... |
241 |
rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry); |
97defe1ec rhashtable: Per b... |
242 |
|
8f0db0180 rhashtable: use b... |
243 244 245 246 |
if (pprev) rcu_assign_pointer(*pprev, next); else /* Need to preserved the bit lock. */ |
f4712b46a rhashtable: repla... |
247 |
rht_assign_locked(bkt, next); |
7e1e77636 lib: Resizable, S... |
248 |
|
aa34a6cb0 rhashtable: Add a... |
249 250 251 |
out: return err; } |
97defe1ec rhashtable: Per b... |
252 |
|
da20420f8 rhashtable: Add n... |
253 |
static int rhashtable_rehash_chain(struct rhashtable *ht, |
299e5c32a rhashtable: Use '... |
254 |
unsigned int old_hash) |
aa34a6cb0 rhashtable: Add a... |
255 256 |
{ struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
ce9b362bf rhashtable: Resto... |
257 |
struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); |
da20420f8 rhashtable: Add n... |
258 |
int err; |
aa34a6cb0 rhashtable: Add a... |
259 |
|
8f0db0180 rhashtable: use b... |
260 261 |
if (!bkt) return 0; |
149212f07 rhashtable: add l... |
262 |
rht_lock(old_tbl, bkt); |
a5ec68e3b rhashtable: Use a... |
263 |
|
8f0db0180 rhashtable: use b... |
264 |
while (!(err = rhashtable_rehash_one(ht, bkt, old_hash))) |
aa34a6cb0 rhashtable: Add a... |
265 |
; |
da20420f8 rhashtable: Add n... |
266 |
|
4feb7c7a4 rhashtable: don't... |
267 |
if (err == -ENOENT) |
da20420f8 rhashtable: Add n... |
268 |
err = 0; |
149212f07 rhashtable: add l... |
269 |
rht_unlock(old_tbl, bkt); |
da20420f8 rhashtable: Add n... |
270 271 |
return err; |
97defe1ec rhashtable: Per b... |
272 |
} |
b824478b2 rhashtable: Add m... |
273 274 275 |
static int rhashtable_rehash_attach(struct rhashtable *ht, struct bucket_table *old_tbl, struct bucket_table *new_tbl) |
97defe1ec rhashtable: Per b... |
276 |
{ |
aa34a6cb0 rhashtable: Add a... |
277 278 |
/* Make insertions go into the new, empty table right away. Deletions * and lookups will be attempted in both tables until we synchronize. |
0ad66449a rhashtable: use c... |
279 280 |
* As cmpxchg() provides strong barriers, we do not need * rcu_assign_pointer(). |
aa34a6cb0 rhashtable: Add a... |
281 |
*/ |
aa34a6cb0 rhashtable: Add a... |
282 |
|
e9458a4e3 rhashtable: Fix c... |
283 284 |
if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL, new_tbl) != NULL) |
0ad66449a rhashtable: use c... |
285 |
return -EEXIST; |
b824478b2 rhashtable: Add m... |
286 287 288 289 290 291 292 293 294 |
return 0; } static int rhashtable_rehash_table(struct rhashtable *ht) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl; struct rhashtable_walker *walker; |
299e5c32a rhashtable: Use '... |
295 |
unsigned int old_hash; |
da20420f8 rhashtable: Add n... |
296 |
int err; |
b824478b2 rhashtable: Add m... |
297 298 299 300 |
new_tbl = rht_dereference(old_tbl->future_tbl, ht); if (!new_tbl) return 0; |
da20420f8 rhashtable: Add n... |
301 302 303 304 |
for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { err = rhashtable_rehash_chain(ht, old_hash); if (err) return err; |
ae6da1f50 rhashtable: add s... |
305 |
cond_resched(); |
da20420f8 rhashtable: Add n... |
306 |
} |
aa34a6cb0 rhashtable: Add a... |
307 308 309 |
/* Publish the new table pointer. */ rcu_assign_pointer(ht->tbl, new_tbl); |
ba7c95ea3 rhashtable: Fix s... |
310 |
spin_lock(&ht->lock); |
eddee5ba3 rhashtable: Fix w... |
311 312 |
list_for_each_entry(walker, &old_tbl->walkers, list) walker->tbl = NULL; |
aa34a6cb0 rhashtable: Add a... |
313 314 315 |
/* Wait for readers. All new readers will see the new * table, and thus no references to the old table will * remain. |
4feb7c7a4 rhashtable: don't... |
316 317 318 |
* We do this inside the locked region so that * rhashtable_walk_stop() can use rcu_head_after_call_rcu() * to check if it should not re-link the table. |
aa34a6cb0 rhashtable: Add a... |
319 |
*/ |
9d901bc05 rhashtable: Free ... |
320 |
call_rcu(&old_tbl->rcu, bucket_table_free_rcu); |
4feb7c7a4 rhashtable: don't... |
321 |
spin_unlock(&ht->lock); |
b824478b2 rhashtable: Add m... |
322 323 |
return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; |
7e1e77636 lib: Resizable, S... |
324 |
} |
da20420f8 rhashtable: Add n... |
325 326 327 |
static int rhashtable_rehash_alloc(struct rhashtable *ht, struct bucket_table *old_tbl, unsigned int size) |
7e1e77636 lib: Resizable, S... |
328 |
{ |
da20420f8 rhashtable: Add n... |
329 |
struct bucket_table *new_tbl; |
b824478b2 rhashtable: Add m... |
330 |
int err; |
7e1e77636 lib: Resizable, S... |
331 332 |
ASSERT_RHT_MUTEX(ht); |
da20420f8 rhashtable: Add n... |
333 |
new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
7e1e77636 lib: Resizable, S... |
334 335 |
if (new_tbl == NULL) return -ENOMEM; |
b824478b2 rhashtable: Add m... |
336 337 338 339 340 |
err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); if (err) bucket_table_free(new_tbl); return err; |
7e1e77636 lib: Resizable, S... |
341 |
} |
7e1e77636 lib: Resizable, S... |
342 343 344 345 |
/** * rhashtable_shrink - Shrink hash table while allowing concurrent lookups * @ht: the hash table to shrink |
7e1e77636 lib: Resizable, S... |
346 |
* |
18093d1c0 rhashtable: Shrin... |
347 348 |
* This function shrinks the hash table to fit, i.e., the smallest * size would not cause it to expand right away automatically. |
7e1e77636 lib: Resizable, S... |
349 |
* |
97defe1ec rhashtable: Per b... |
350 351 352 |
* The caller must ensure that no concurrent resizing occurs by holding * ht->mutex. * |
7e1e77636 lib: Resizable, S... |
353 354 |
* The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. |
97defe1ec rhashtable: Per b... |
355 356 357 |
* * It is valid to have concurrent insertions and deletions protected by per * bucket locks or concurrent RCU protected lookups and traversals. |
7e1e77636 lib: Resizable, S... |
358 |
*/ |
b824478b2 rhashtable: Add m... |
359 |
static int rhashtable_shrink(struct rhashtable *ht) |
7e1e77636 lib: Resizable, S... |
360 |
{ |
da20420f8 rhashtable: Add n... |
361 |
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
12311959e rhashtable: fix s... |
362 363 |
unsigned int nelems = atomic_read(&ht->nelems); unsigned int size = 0; |
7e1e77636 lib: Resizable, S... |
364 |
|
12311959e rhashtable: fix s... |
365 366 |
if (nelems) size = roundup_pow_of_two(nelems * 3 / 2); |
18093d1c0 rhashtable: Shrin... |
367 368 369 370 371 |
if (size < ht->p.min_size) size = ht->p.min_size; if (old_tbl->size <= size) return 0; |
b824478b2 rhashtable: Add m... |
372 373 |
if (rht_dereference(old_tbl->future_tbl, ht)) return -EEXIST; |
da20420f8 rhashtable: Add n... |
374 |
return rhashtable_rehash_alloc(ht, old_tbl, size); |
7e1e77636 lib: Resizable, S... |
375 |
} |
7e1e77636 lib: Resizable, S... |
376 |
|
97defe1ec rhashtable: Per b... |
377 378 379 380 |
static void rht_deferred_worker(struct work_struct *work) { struct rhashtable *ht; struct bucket_table *tbl; |
b824478b2 rhashtable: Add m... |
381 |
int err = 0; |
97defe1ec rhashtable: Per b... |
382 |
|
57699a40b rhashtable: Fix r... |
383 |
ht = container_of(work, struct rhashtable, run_work); |
97defe1ec rhashtable: Per b... |
384 |
mutex_lock(&ht->mutex); |
28134a53d rhashtable: Fix p... |
385 |
|
97defe1ec rhashtable: Per b... |
386 |
tbl = rht_dereference(ht->tbl, ht); |
b824478b2 rhashtable: Add m... |
387 |
tbl = rhashtable_last_table(ht, tbl); |
97defe1ec rhashtable: Per b... |
388 |
|
a5b6846f9 rhashtable: kill ... |
389 |
if (rht_grow_above_75(ht, tbl)) |
da20420f8 rhashtable: Add n... |
390 |
err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); |
b5e2c150a rhashtable: Disab... |
391 |
else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) |
da20420f8 rhashtable: Add n... |
392 393 394 |
err = rhashtable_shrink(ht); else if (tbl->nest) err = rhashtable_rehash_alloc(ht, tbl, tbl->size); |
b824478b2 rhashtable: Add m... |
395 |
|
408f13ef3 rhashtable: Still... |
396 397 398 399 400 401 |
if (!err || err == -EEXIST) { int nerr; nerr = rhashtable_rehash_table(ht); err = err ?: nerr; } |
b824478b2 rhashtable: Add m... |
402 |
|
97defe1ec rhashtable: Per b... |
403 |
mutex_unlock(&ht->mutex); |
b824478b2 rhashtable: Add m... |
404 405 406 |
if (err) schedule_work(&ht->run_work); |
97defe1ec rhashtable: Per b... |
407 |
} |
ca26893f0 rhashtable: Add r... |
408 409 |
static int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl) |
ccd57b1bd rhashtable: Add i... |
410 411 412 |
{ struct bucket_table *old_tbl; struct bucket_table *new_tbl; |
ccd57b1bd rhashtable: Add i... |
413 414 415 416 |
unsigned int size; int err; old_tbl = rht_dereference_rcu(ht->tbl, ht); |
ccd57b1bd rhashtable: Add i... |
417 418 |
size = tbl->size; |
3cf92222a rhashtable: Preve... |
419 |
err = -EBUSY; |
ccd57b1bd rhashtable: Add i... |
420 421 |
if (rht_grow_above_75(ht, tbl)) size *= 2; |
a87b9ebf1 rhashtable: Do no... |
422 423 |
/* Do not schedule more than one rehash */ else if (old_tbl != tbl) |
3cf92222a rhashtable: Preve... |
424 425 426 |
goto fail; err = -ENOMEM; |
ccd57b1bd rhashtable: Add i... |
427 |
|
93f976b51 lib/rhashtable: s... |
428 |
new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN); |
3cf92222a rhashtable: Preve... |
429 430 |
if (new_tbl == NULL) goto fail; |
ccd57b1bd rhashtable: Add i... |
431 432 433 434 435 436 437 438 439 440 |
err = rhashtable_rehash_attach(ht, tbl, new_tbl); if (err) { bucket_table_free(new_tbl); if (err == -EEXIST) err = 0; } else schedule_work(&ht->run_work); return err; |
3cf92222a rhashtable: Preve... |
441 442 443 |
fail: /* Do not fail the insert if someone else did a rehash. */ |
c0690016a rhashtable: clean... |
444 |
if (likely(rcu_access_pointer(tbl->future_tbl))) |
3cf92222a rhashtable: Preve... |
445 446 447 448 449 450 451 |
return 0; /* Schedule async rehash to retry allocation in process context. */ if (err == -ENOMEM) schedule_work(&ht->run_work); return err; |
ccd57b1bd rhashtable: Add i... |
452 |
} |
ccd57b1bd rhashtable: Add i... |
453 |
|
ca26893f0 rhashtable: Add r... |
454 |
static void *rhashtable_lookup_one(struct rhashtable *ht, |
ce9b362bf rhashtable: Resto... |
455 |
struct rhash_lock_head __rcu **bkt, |
ca26893f0 rhashtable: Add r... |
456 457 |
struct bucket_table *tbl, unsigned int hash, const void *key, struct rhash_head *obj) |
02fd97c3d rhashtable: Allow... |
458 |
{ |
ca26893f0 rhashtable: Add r... |
459 460 461 462 |
struct rhashtable_compare_arg arg = { .ht = ht, .key = key, }; |
e4edbe3c1 rhashtable: fix s... |
463 |
struct rhash_head __rcu **pprev = NULL; |
02fd97c3d rhashtable: Allow... |
464 |
struct rhash_head *head; |
ca26893f0 rhashtable: Add r... |
465 |
int elasticity; |
02fd97c3d rhashtable: Allow... |
466 |
|
5f8ddeab1 rhashtable: remov... |
467 |
elasticity = RHT_ELASTICITY; |
adc6a3ab1 rhashtable: move ... |
468 |
rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { |
ca26893f0 rhashtable: Add r... |
469 470 471 472 473 474 475 |
struct rhlist_head *list; struct rhlist_head *plist; elasticity--; if (!key || (ht->p.obj_cmpfn ? ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : |
d3dcf8eb6 rhashtable: Fix r... |
476 477 |
rhashtable_compare(&arg, rht_obj(ht, head)))) { pprev = &head->next; |
ca26893f0 rhashtable: Add r... |
478 |
continue; |
d3dcf8eb6 rhashtable: Fix r... |
479 |
} |
ca26893f0 rhashtable: Add r... |
480 481 482 483 484 485 486 487 488 489 |
if (!ht->rhlist) return rht_obj(ht, head); list = container_of(obj, struct rhlist_head, rhead); plist = container_of(head, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, plist); head = rht_dereference_bucket(head->next, tbl, hash); RCU_INIT_POINTER(list->rhead.next, head); |
8f0db0180 rhashtable: use b... |
490 491 492 493 |
if (pprev) rcu_assign_pointer(*pprev, obj); else /* Need to preserve the bit lock */ |
f4712b46a rhashtable: repla... |
494 |
rht_assign_locked(bkt, obj); |
ca26893f0 rhashtable: Add r... |
495 496 |
return NULL; |
5ca8cc5bf rhashtable: add r... |
497 |
} |
02fd97c3d rhashtable: Allow... |
498 |
|
ca26893f0 rhashtable: Add r... |
499 500 501 502 503 |
if (elasticity <= 0) return ERR_PTR(-EAGAIN); return ERR_PTR(-ENOENT); } |
ce9b362bf rhashtable: Resto... |
504 505 506 507 |
static struct bucket_table *rhashtable_insert_one( struct rhashtable *ht, struct rhash_lock_head __rcu **bkt, struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, void *data) |
ca26893f0 rhashtable: Add r... |
508 509 510 511 512 513 |
{ struct bucket_table *new_tbl; struct rhash_head *head; if (!IS_ERR_OR_NULL(data)) return ERR_PTR(-EEXIST); |
07ee0722b rhashtable: Add c... |
514 |
|
ca26893f0 rhashtable: Add r... |
515 516 |
if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) return ERR_CAST(data); |
ccd57b1bd rhashtable: Add i... |
517 |
|
c0690016a rhashtable: clean... |
518 |
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
ca26893f0 rhashtable: Add r... |
519 520 521 522 523 524 525 526 527 528 529 |
if (new_tbl) return new_tbl; if (PTR_ERR(data) != -ENOENT) return ERR_CAST(data); if (unlikely(rht_grow_above_max(ht, tbl))) return ERR_PTR(-E2BIG); if (unlikely(rht_grow_above_100(ht, tbl))) return ERR_PTR(-EAGAIN); |
02fd97c3d rhashtable: Allow... |
530 |
|
adc6a3ab1 rhashtable: move ... |
531 |
head = rht_ptr(bkt, tbl, hash); |
02fd97c3d rhashtable: Allow... |
532 533 |
RCU_INIT_POINTER(obj->next, head); |
ca26893f0 rhashtable: Add r... |
534 535 536 537 538 539 |
if (ht->rhlist) { struct rhlist_head *list; list = container_of(obj, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, NULL); } |
02fd97c3d rhashtable: Allow... |
540 |
|
8f0db0180 rhashtable: use b... |
541 542 543 |
/* bkt is always the head of the list, so it holds * the lock, which we need to preserve */ |
f4712b46a rhashtable: repla... |
544 |
rht_assign_locked(bkt, obj); |
02fd97c3d rhashtable: Allow... |
545 546 |
atomic_inc(&ht->nelems); |
ca26893f0 rhashtable: Add r... |
547 548 |
if (rht_grow_above_75(ht, tbl)) schedule_work(&ht->run_work); |
02fd97c3d rhashtable: Allow... |
549 |
|
ca26893f0 rhashtable: Add r... |
550 551 |
return NULL; } |
02fd97c3d rhashtable: Allow... |
552 |
|
ca26893f0 rhashtable: Add r... |
553 554 555 556 557 |
static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, struct rhash_head *obj) { struct bucket_table *new_tbl; struct bucket_table *tbl; |
ce9b362bf rhashtable: Resto... |
558 |
struct rhash_lock_head __rcu **bkt; |
ca26893f0 rhashtable: Add r... |
559 |
unsigned int hash; |
ca26893f0 rhashtable: Add r... |
560 |
void *data; |
4feb7c7a4 rhashtable: don't... |
561 |
new_tbl = rcu_dereference(ht->tbl); |
ca26893f0 rhashtable: Add r... |
562 |
|
4feb7c7a4 rhashtable: don't... |
563 |
do { |
ca26893f0 rhashtable: Add r... |
564 565 |
tbl = new_tbl; hash = rht_head_hashfn(ht, tbl, obj, ht->p); |
8f0db0180 rhashtable: use b... |
566 567 568 569 570 571 572 573 574 |
if (rcu_access_pointer(tbl->future_tbl)) /* Failure is OK */ bkt = rht_bucket_var(tbl, hash); else bkt = rht_bucket_insert(ht, tbl, hash); if (bkt == NULL) { new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); data = ERR_PTR(-EAGAIN); } else { |
149212f07 rhashtable: add l... |
575 |
rht_lock(tbl, bkt); |
8f0db0180 rhashtable: use b... |
576 577 578 579 580 581 |
data = rhashtable_lookup_one(ht, bkt, tbl, hash, key, obj); new_tbl = rhashtable_insert_one(ht, bkt, tbl, hash, obj, data); if (PTR_ERR(new_tbl) != -EEXIST) data = ERR_CAST(new_tbl); |
149212f07 rhashtable: add l... |
582 |
rht_unlock(tbl, bkt); |
8f0db0180 rhashtable: use b... |
583 |
} |
4feb7c7a4 rhashtable: don't... |
584 |
} while (!IS_ERR_OR_NULL(new_tbl)); |
ca26893f0 rhashtable: Add r... |
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 |
if (PTR_ERR(data) == -EAGAIN) data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: -EAGAIN); return data; } void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, struct rhash_head *obj) { void *data; do { rcu_read_lock(); data = rhashtable_try_insert(ht, key, obj); rcu_read_unlock(); } while (PTR_ERR(data) == -EAGAIN); return data; |
02fd97c3d rhashtable: Allow... |
605 606 |
} EXPORT_SYMBOL_GPL(rhashtable_insert_slow); |
f2dba9c6f rhashtable: Intro... |
607 |
/** |
246779dd0 rhashtable: Remov... |
608 |
* rhashtable_walk_enter - Initialise an iterator |
f2dba9c6f rhashtable: Intro... |
609 610 611 612 613 614 615 616 617 618 619 620 621 |
* @ht: Table to walk over * @iter: Hash table Iterator * * This function prepares a hash table walk. * * Note that if you restart a walk after rhashtable_walk_stop you * may see the same object twice. Also, you may miss objects if * there are removals in between rhashtable_walk_stop and the next * call to rhashtable_walk_start. * * For a completely stable walk you should construct your own data * structure outside the hash table. * |
82266e98d rhashtable: Revis... |
622 623 624 |
* This function may be called from any process context, including * non-preemptable context, but cannot be called from softirq or * hardirq context. |
f2dba9c6f rhashtable: Intro... |
625 |
* |
246779dd0 rhashtable: Remov... |
626 |
* You must call rhashtable_walk_exit after this function returns. |
f2dba9c6f rhashtable: Intro... |
627 |
*/ |
246779dd0 rhashtable: Remov... |
628 |
void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) |
f2dba9c6f rhashtable: Intro... |
629 630 631 632 633 |
{ iter->ht = ht; iter->p = NULL; iter->slot = 0; iter->skip = 0; |
2db54b475 rhashtable: Add r... |
634 |
iter->end_of_table = 0; |
f2dba9c6f rhashtable: Intro... |
635 |
|
c6ff52682 rhashtable: Fix w... |
636 |
spin_lock(&ht->lock); |
246779dd0 rhashtable: Remov... |
637 |
iter->walker.tbl = |
179ccc0a7 rhashtable: Kill ... |
638 |
rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); |
246779dd0 rhashtable: Remov... |
639 |
list_add(&iter->walker.list, &iter->walker.tbl->walkers); |
c6ff52682 rhashtable: Fix w... |
640 |
spin_unlock(&ht->lock); |
f2dba9c6f rhashtable: Intro... |
641 |
} |
246779dd0 rhashtable: Remov... |
642 |
EXPORT_SYMBOL_GPL(rhashtable_walk_enter); |
f2dba9c6f rhashtable: Intro... |
643 644 645 646 647 |
/** * rhashtable_walk_exit - Free an iterator * @iter: Hash table Iterator * |
6c4128f65 rhashtable: Remov... |
648 |
* This function frees resources allocated by rhashtable_walk_enter. |
f2dba9c6f rhashtable: Intro... |
649 650 651 |
*/ void rhashtable_walk_exit(struct rhashtable_iter *iter) { |
c6ff52682 rhashtable: Fix w... |
652 |
spin_lock(&iter->ht->lock); |
246779dd0 rhashtable: Remov... |
653 654 |
if (iter->walker.tbl) list_del(&iter->walker.list); |
c6ff52682 rhashtable: Fix w... |
655 |
spin_unlock(&iter->ht->lock); |
f2dba9c6f rhashtable: Intro... |
656 657 658 659 |
} EXPORT_SYMBOL_GPL(rhashtable_walk_exit); /** |
97a6ec4ac rhashtable: Chang... |
660 |
* rhashtable_walk_start_check - Start a hash table walk |
f2dba9c6f rhashtable: Intro... |
661 662 |
* @iter: Hash table iterator * |
0647169cf rhashtable: Docum... |
663 664 665 |
* Start a hash table walk at the current iterator position. Note that we take * the RCU lock in all cases including when we return an error. So you must * always call rhashtable_walk_stop to clean up. |
f2dba9c6f rhashtable: Intro... |
666 667 668 669 670 671 |
* * Returns zero if successful. * * Returns -EAGAIN if resize event occured. Note that the iterator * will rewind back to the beginning and you may use it immediately * by calling rhashtable_walk_next. |
97a6ec4ac rhashtable: Chang... |
672 673 674 675 |
* * rhashtable_walk_start is defined as an inline variant that returns * void. This is preferred in cases where the caller would ignore * resize events and always continue. |
f2dba9c6f rhashtable: Intro... |
676 |
*/ |
97a6ec4ac rhashtable: Chang... |
677 |
int rhashtable_walk_start_check(struct rhashtable_iter *iter) |
db4374f48 rhashtable: Annot... |
678 |
__acquires(RCU) |
f2dba9c6f rhashtable: Intro... |
679 |
{ |
eddee5ba3 rhashtable: Fix w... |
680 |
struct rhashtable *ht = iter->ht; |
5d240a893 rhashtable: impro... |
681 |
bool rhlist = ht->rhlist; |
eddee5ba3 rhashtable: Fix w... |
682 |
|
c6ff52682 rhashtable: Fix w... |
683 |
rcu_read_lock(); |
eddee5ba3 rhashtable: Fix w... |
684 |
|
c6ff52682 rhashtable: Fix w... |
685 |
spin_lock(&ht->lock); |
246779dd0 rhashtable: Remov... |
686 687 |
if (iter->walker.tbl) list_del(&iter->walker.list); |
c6ff52682 rhashtable: Fix w... |
688 |
spin_unlock(&ht->lock); |
eddee5ba3 rhashtable: Fix w... |
689 |
|
5d240a893 rhashtable: impro... |
690 691 692 |
if (iter->end_of_table) return 0; if (!iter->walker.tbl) { |
246779dd0 rhashtable: Remov... |
693 |
iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); |
b41cc04b6 rhashtable: reset... |
694 695 |
iter->slot = 0; iter->skip = 0; |
f2dba9c6f rhashtable: Intro... |
696 697 |
return -EAGAIN; } |
5d240a893 rhashtable: impro... |
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 |
if (iter->p && !rhlist) { /* * We need to validate that 'p' is still in the table, and * if so, update 'skip' */ struct rhash_head *p; int skip = 0; rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { skip++; if (p == iter->p) { iter->skip = skip; goto found; } } iter->p = NULL; } else if (iter->p && rhlist) { /* Need to validate that 'list' is still in the table, and * if so, update 'skip' and 'p'. */ struct rhash_head *p; struct rhlist_head *list; int skip = 0; rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { for (list = container_of(p, struct rhlist_head, rhead); list; list = rcu_dereference(list->next)) { skip++; if (list == iter->list) { iter->p = p; |
c643ecf35 lib: rhashtable: ... |
727 |
iter->skip = skip; |
5d240a893 rhashtable: impro... |
728 729 730 731 732 733 734 |
goto found; } } } iter->p = NULL; } found: |
f2dba9c6f rhashtable: Intro... |
735 736 |
return 0; } |
97a6ec4ac rhashtable: Chang... |
737 |
EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); |
f2dba9c6f rhashtable: Intro... |
738 739 |
/** |
2db54b475 rhashtable: Add r... |
740 741 742 |
* __rhashtable_walk_find_next - Find the next element in a table (or the first * one in case of a new walk). * |
f2dba9c6f rhashtable: Intro... |
743 744 |
* @iter: Hash table iterator * |
2db54b475 rhashtable: Add r... |
745 |
* Returns the found object or NULL when the end of the table is reached. |
f2dba9c6f rhashtable: Intro... |
746 |
* |
2db54b475 rhashtable: Add r... |
747 |
* Returns -EAGAIN if resize event occurred. |
f2dba9c6f rhashtable: Intro... |
748 |
*/ |
2db54b475 rhashtable: Add r... |
749 |
static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter) |
f2dba9c6f rhashtable: Intro... |
750 |
{ |
246779dd0 rhashtable: Remov... |
751 |
struct bucket_table *tbl = iter->walker.tbl; |
ca26893f0 rhashtable: Add r... |
752 |
struct rhlist_head *list = iter->list; |
f2dba9c6f rhashtable: Intro... |
753 754 |
struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; |
ca26893f0 rhashtable: Add r... |
755 |
bool rhlist = ht->rhlist; |
f2dba9c6f rhashtable: Intro... |
756 |
|
2db54b475 rhashtable: Add r... |
757 758 |
if (!tbl) return NULL; |
f2dba9c6f rhashtable: Intro... |
759 760 761 762 763 |
for (; iter->slot < tbl->size; iter->slot++) { int skip = iter->skip; rht_for_each_rcu(p, tbl, iter->slot) { |
ca26893f0 rhashtable: Add r... |
764 765 766 767 768 769 770 771 772 773 774 775 |
if (rhlist) { list = container_of(p, struct rhlist_head, rhead); do { if (!skip) goto next; skip--; list = rcu_dereference(list->next); } while (list); continue; } |
f2dba9c6f rhashtable: Intro... |
776 777 778 779 780 781 782 783 784 |
if (!skip) break; skip--; } next: if (!rht_is_a_nulls(p)) { iter->skip++; iter->p = p; |
ca26893f0 rhashtable: Add r... |
785 786 |
iter->list = list; return rht_obj(ht, rhlist ? &list->rhead : p); |
f2dba9c6f rhashtable: Intro... |
787 788 789 790 |
} iter->skip = 0; } |
142b942a7 rhashtable: fix f... |
791 |
iter->p = NULL; |
d88252f9b rhashtable: Add b... |
792 793 |
/* Ensure we see any new tables. */ smp_rmb(); |
246779dd0 rhashtable: Remov... |
794 795 |
iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (iter->walker.tbl) { |
f2dba9c6f rhashtable: Intro... |
796 797 |
iter->slot = 0; iter->skip = 0; |
f2dba9c6f rhashtable: Intro... |
798 |
return ERR_PTR(-EAGAIN); |
2db54b475 rhashtable: Add r... |
799 800 |
} else { iter->end_of_table = true; |
f2dba9c6f rhashtable: Intro... |
801 |
} |
c936a79fc rhashtable: Simpl... |
802 |
return NULL; |
f2dba9c6f rhashtable: Intro... |
803 |
} |
2db54b475 rhashtable: Add r... |
804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 |
/** * rhashtable_walk_next - Return the next object and advance the iterator * @iter: Hash table iterator * * Note that you must call rhashtable_walk_stop when you are finished * with the walk. * * Returns the next object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred. Note that the iterator * will rewind back to the beginning and you may continue to use it. */ void *rhashtable_walk_next(struct rhashtable_iter *iter) { struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; bool rhlist = ht->rhlist; if (p) { if (!rhlist || !(list = rcu_dereference(list->next))) { p = rcu_dereference(p->next); list = container_of(p, struct rhlist_head, rhead); } if (!rht_is_a_nulls(p)) { iter->skip++; iter->p = p; iter->list = list; return rht_obj(ht, rhlist ? &list->rhead : p); } /* At the end of this slot, switch to next one and then find * next entry from that point. */ iter->skip = 0; iter->slot++; } return __rhashtable_walk_find_next(iter); } |
f2dba9c6f rhashtable: Intro... |
845 846 847 |
EXPORT_SYMBOL_GPL(rhashtable_walk_next); /** |
2db54b475 rhashtable: Add r... |
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 |
* rhashtable_walk_peek - Return the next object but don't advance the iterator * @iter: Hash table iterator * * Returns the next object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred. Note that the iterator * will rewind back to the beginning and you may continue to use it. */ void *rhashtable_walk_peek(struct rhashtable_iter *iter) { struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; if (p) return rht_obj(ht, ht->rhlist ? &list->rhead : p); /* No object found in current iter, find next one in the table. */ if (iter->skip) { /* A nonzero skip value points to the next entry in the table * beyond that last one that was found. Decrement skip so * we find the current value. __rhashtable_walk_find_next * will restore the original value of skip assuming that * the table hasn't changed. */ iter->skip--; } return __rhashtable_walk_find_next(iter); } EXPORT_SYMBOL_GPL(rhashtable_walk_peek); /** |
f2dba9c6f rhashtable: Intro... |
882 883 884 |
* rhashtable_walk_stop - Finish a hash table walk * @iter: Hash table iterator * |
0647169cf rhashtable: Docum... |
885 886 |
* Finish a hash table walk. Does not reset the iterator to the start of the * hash table. |
f2dba9c6f rhashtable: Intro... |
887 888 |
*/ void rhashtable_walk_stop(struct rhashtable_iter *iter) |
db4374f48 rhashtable: Annot... |
889 |
__releases(RCU) |
f2dba9c6f rhashtable: Intro... |
890 |
{ |
eddee5ba3 rhashtable: Fix w... |
891 |
struct rhashtable *ht; |
246779dd0 rhashtable: Remov... |
892 |
struct bucket_table *tbl = iter->walker.tbl; |
eddee5ba3 rhashtable: Fix w... |
893 |
|
eddee5ba3 rhashtable: Fix w... |
894 |
if (!tbl) |
963ecbd41 rhashtable: Fix u... |
895 |
goto out; |
eddee5ba3 rhashtable: Fix w... |
896 897 |
ht = iter->ht; |
ba7c95ea3 rhashtable: Fix s... |
898 |
spin_lock(&ht->lock); |
4feb7c7a4 rhashtable: don't... |
899 900 |
if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu)) /* This bucket table is being freed, don't re-link it. */ |
246779dd0 rhashtable: Remov... |
901 |
iter->walker.tbl = NULL; |
4feb7c7a4 rhashtable: don't... |
902 903 |
else list_add(&iter->walker.list, &tbl->walkers); |
ba7c95ea3 rhashtable: Fix s... |
904 |
spin_unlock(&ht->lock); |
eddee5ba3 rhashtable: Fix w... |
905 |
|
963ecbd41 rhashtable: Fix u... |
906 907 |
out: rcu_read_unlock(); |
f2dba9c6f rhashtable: Intro... |
908 909 |
} EXPORT_SYMBOL_GPL(rhashtable_walk_stop); |
488fb86ee rhashtable: Make ... |
910 |
static size_t rounded_hashtable_size(const struct rhashtable_params *params) |
7e1e77636 lib: Resizable, S... |
911 |
{ |
107d01f5b lib/rhashtable: c... |
912 913 914 915 916 917 918 919 920 921 |
size_t retsize; if (params->nelem_hint) retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), (unsigned long)params->min_size); else retsize = max(HASH_DEFAULT_SIZE, (unsigned long)params->min_size); return retsize; |
7e1e77636 lib: Resizable, S... |
922 |
} |
31ccde2da rhashtable: Allow... |
923 924 925 926 |
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) { return jhash2(key, length, seed); } |
7e1e77636 lib: Resizable, S... |
927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 |
/** * rhashtable_init - initialize a new hash table * @ht: hash table to be initialized * @params: configuration parameters * * Initializes a new hash table based on the provided configuration * parameters. A table can be configured either with a variable or * fixed length key: * * Configuration Example 1: Fixed length keys * struct test_obj { * int key; * void * my_member; * struct rhash_head node; * }; * * struct rhashtable_params params = { * .head_offset = offsetof(struct test_obj, node), * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), |
87545899b net: replace rema... |
947 |
* .hashfn = jhash, |
7e1e77636 lib: Resizable, S... |
948 949 950 951 952 953 954 955 |
* }; * * Configuration Example 2: Variable length keys * struct test_obj { * [...] * struct rhash_head node; * }; * |
49f7b33e6 rhashtable: provi... |
956 |
* u32 my_hash_fn(const void *data, u32 len, u32 seed) |
7e1e77636 lib: Resizable, S... |
957 958 959 960 961 962 963 964 |
* { * struct test_obj *obj = data; * * return [... hash ...]; * } * * struct rhashtable_params params = { * .head_offset = offsetof(struct test_obj, node), |
87545899b net: replace rema... |
965 |
* .hashfn = jhash, |
7e1e77636 lib: Resizable, S... |
966 |
* .obj_hashfn = my_hash_fn, |
7e1e77636 lib: Resizable, S... |
967 968 |
* }; */ |
488fb86ee rhashtable: Make ... |
969 970 |
int rhashtable_init(struct rhashtable *ht, const struct rhashtable_params *params) |
7e1e77636 lib: Resizable, S... |
971 972 973 |
{ struct bucket_table *tbl; size_t size; |
31ccde2da rhashtable: Allow... |
974 |
if ((!params->key_len && !params->obj_hashfn) || |
02fd97c3d rhashtable: Allow... |
975 |
(params->obj_hashfn && !params->obj_cmpfn)) |
7e1e77636 lib: Resizable, S... |
976 |
return -EINVAL; |
97defe1ec rhashtable: Per b... |
977 978 |
memset(ht, 0, sizeof(*ht)); mutex_init(&ht->mutex); |
ba7c95ea3 rhashtable: Fix s... |
979 |
spin_lock_init(&ht->lock); |
97defe1ec rhashtable: Per b... |
980 |
memcpy(&ht->p, params, sizeof(*params)); |
a998f712f rhashtable: Round... |
981 982 |
if (params->min_size) ht->p.min_size = roundup_pow_of_two(params->min_size); |
6d684e546 rhashtable: Cap t... |
983 984 |
/* Cap total entries at 2^31 to avoid nelems overflow. */ ht->max_elems = 1u << 31; |
2d2ab658d rhashtable: Do no... |
985 986 987 988 989 990 |
if (params->max_size) { ht->p.max_size = rounddown_pow_of_two(params->max_size); if (ht->p.max_size < ht->max_elems / 2) ht->max_elems = ht->p.max_size * 2; } |
6d684e546 rhashtable: Cap t... |
991 |
|
48e75b430 rhashtable: compa... |
992 |
ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); |
a998f712f rhashtable: Round... |
993 |
|
107d01f5b lib/rhashtable: c... |
994 |
size = rounded_hashtable_size(&ht->p); |
3a324606b rhashtable: Enfor... |
995 |
|
31ccde2da rhashtable: Allow... |
996 997 998 999 1000 1001 1002 1003 1004 |
ht->key_len = ht->p.key_len; if (!params->hashfn) { ht->p.hashfn = jhash; if (!(ht->key_len & (sizeof(u32) - 1))) { ht->key_len /= sizeof(u32); ht->p.hashfn = rhashtable_jhash2; } } |
2d22ecf6d lib/rhashtable: g... |
1005 1006 1007 1008 1009 |
/* * This is api initialization and thus we need to guarantee the * initial rhashtable allocation. Upon failure, retry with the * smallest possible size with __GFP_NOFAIL semantics. */ |
b9ecfdaa1 rhashtable: Allow... |
1010 |
tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
2d22ecf6d lib/rhashtable: g... |
1011 1012 1013 1014 |
if (unlikely(tbl == NULL)) { size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL); } |
7e1e77636 lib: Resizable, S... |
1015 |
|
545a148e4 rhashtable: initi... |
1016 |
atomic_set(&ht->nelems, 0); |
a5b6846f9 rhashtable: kill ... |
1017 |
|
7e1e77636 lib: Resizable, S... |
1018 |
RCU_INIT_POINTER(ht->tbl, tbl); |
4c4b52d9b rhashtable: remov... |
1019 |
INIT_WORK(&ht->run_work, rht_deferred_worker); |
97defe1ec rhashtable: Per b... |
1020 |
|
7e1e77636 lib: Resizable, S... |
1021 1022 1023 1024 1025 |
return 0; } EXPORT_SYMBOL_GPL(rhashtable_init); /** |
ca26893f0 rhashtable: Add r... |
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 |
* rhltable_init - initialize a new hash list table * @hlt: hash list table to be initialized * @params: configuration parameters * * Initializes a new hash list table. * * See documentation for rhashtable_init. */ int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) { int err; |
ca26893f0 rhashtable: Add r... |
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 |
err = rhashtable_init(&hlt->ht, params); hlt->ht.rhlist = true; return err; } EXPORT_SYMBOL_GPL(rhltable_init); static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, void (*free_fn)(void *ptr, void *arg), void *arg) { struct rhlist_head *list; if (!ht->rhlist) { free_fn(rht_obj(ht, obj), arg); return; } list = container_of(obj, struct rhlist_head, rhead); do { obj = &list->rhead; list = rht_dereference(list->next, ht); free_fn(rht_obj(ht, obj), arg); } while (list); } /** |
6b6f302ce rhashtable: Add r... |
1063 |
* rhashtable_free_and_destroy - free elements and destroy hash table |
7e1e77636 lib: Resizable, S... |
1064 |
* @ht: the hash table to destroy |
6b6f302ce rhashtable: Add r... |
1065 1066 |
* @free_fn: callback to release resources of element * @arg: pointer passed to free_fn |
7e1e77636 lib: Resizable, S... |
1067 |
* |
6b6f302ce rhashtable: Add r... |
1068 1069 1070 1071 1072 1073 1074 1075 |
* Stops an eventual async resize. If defined, invokes free_fn for each * element to releasal resources. Please note that RCU protected * readers may still be accessing the elements. Releasing of resources * must occur in a compatible manner. Then frees the bucket array. * * This function will eventually sleep to wait for an async resize * to complete. The caller is responsible that no further write operations * occurs in parallel. |
7e1e77636 lib: Resizable, S... |
1076 |
*/ |
6b6f302ce rhashtable: Add r... |
1077 1078 1079 |
void rhashtable_free_and_destroy(struct rhashtable *ht, void (*free_fn)(void *ptr, void *arg), void *arg) |
7e1e77636 lib: Resizable, S... |
1080 |
{ |
0026129c8 rhashtable: add r... |
1081 |
struct bucket_table *tbl, *next_tbl; |
6b6f302ce rhashtable: Add r... |
1082 |
unsigned int i; |
97defe1ec rhashtable: Per b... |
1083 |
|
4c4b52d9b rhashtable: remov... |
1084 |
cancel_work_sync(&ht->run_work); |
97defe1ec rhashtable: Per b... |
1085 |
|
57699a40b rhashtable: Fix r... |
1086 |
mutex_lock(&ht->mutex); |
6b6f302ce rhashtable: Add r... |
1087 |
tbl = rht_dereference(ht->tbl, ht); |
0026129c8 rhashtable: add r... |
1088 |
restart: |
6b6f302ce rhashtable: Add r... |
1089 1090 1091 |
if (free_fn) { for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next; |
ae6da1f50 rhashtable: add s... |
1092 |
cond_resched(); |
adc6a3ab1 rhashtable: move ... |
1093 |
for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)), |
6b6f302ce rhashtable: Add r... |
1094 1095 1096 1097 1098 1099 |
next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; !rht_is_a_nulls(pos); pos = next, next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL) |
ca26893f0 rhashtable: Add r... |
1100 |
rhashtable_free_one(ht, pos, free_fn, arg); |
6b6f302ce rhashtable: Add r... |
1101 1102 |
} } |
0026129c8 rhashtable: add r... |
1103 |
next_tbl = rht_dereference(tbl->future_tbl, ht); |
6b6f302ce rhashtable: Add r... |
1104 |
bucket_table_free(tbl); |
0026129c8 rhashtable: add r... |
1105 1106 1107 1108 |
if (next_tbl) { tbl = next_tbl; goto restart; } |
97defe1ec rhashtable: Per b... |
1109 |
mutex_unlock(&ht->mutex); |
7e1e77636 lib: Resizable, S... |
1110 |
} |
6b6f302ce rhashtable: Add r... |
1111 1112 1113 1114 1115 1116 |
EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); void rhashtable_destroy(struct rhashtable *ht) { return rhashtable_free_and_destroy(ht, NULL, NULL); } |
7e1e77636 lib: Resizable, S... |
1117 |
EXPORT_SYMBOL_GPL(rhashtable_destroy); |
da20420f8 rhashtable: Add n... |
1118 |
|
ce9b362bf rhashtable: Resto... |
1119 1120 |
struct rhash_lock_head __rcu **__rht_bucket_nested( const struct bucket_table *tbl, unsigned int hash) |
da20420f8 rhashtable: Add n... |
1121 1122 |
{ const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); |
da20420f8 rhashtable: Add n... |
1123 1124 1125 1126 |
unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; unsigned int subhash = hash; union nested_table *ntbl; |
4a3084aaa rhashtable: Drop ... |
1127 |
ntbl = nested_table_top(tbl); |
c4d2603da rhashtable: Fix R... |
1128 |
ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); |
da20420f8 rhashtable: Add n... |
1129 1130 1131 1132 |
subhash >>= tbl->nest; while (ntbl && size > (1 << shift)) { index = subhash & ((1 << shift) - 1); |
c4d2603da rhashtable: Fix R... |
1133 1134 |
ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); |
da20420f8 rhashtable: Add n... |
1135 1136 1137 |
size >>= shift; subhash >>= shift; } |
ff302db96 rhashtable: allow... |
1138 1139 |
if (!ntbl) return NULL; |
da20420f8 rhashtable: Add n... |
1140 1141 1142 1143 |
return &ntbl[subhash].bucket; } |
ff302db96 rhashtable: allow... |
1144 |
EXPORT_SYMBOL_GPL(__rht_bucket_nested); |
ce9b362bf rhashtable: Resto... |
1145 1146 |
struct rhash_lock_head __rcu **rht_bucket_nested( const struct bucket_table *tbl, unsigned int hash) |
ff302db96 rhashtable: allow... |
1147 |
{ |
ce9b362bf rhashtable: Resto... |
1148 |
static struct rhash_lock_head __rcu *rhnull; |
ff302db96 rhashtable: allow... |
1149 1150 1151 1152 1153 |
if (!rhnull) INIT_RHT_NULLS_HEAD(rhnull); return __rht_bucket_nested(tbl, hash) ?: &rhnull; } |
da20420f8 rhashtable: Add n... |
1154 |
EXPORT_SYMBOL_GPL(rht_bucket_nested); |
ce9b362bf rhashtable: Resto... |
1155 1156 |
struct rhash_lock_head __rcu **rht_bucket_nested_insert( struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) |
da20420f8 rhashtable: Add n... |
1157 1158 1159 1160 1161 |
{ const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; union nested_table *ntbl; |
da20420f8 rhashtable: Add n... |
1162 |
|
4a3084aaa rhashtable: Drop ... |
1163 |
ntbl = nested_table_top(tbl); |
da20420f8 rhashtable: Add n... |
1164 |
hash >>= tbl->nest; |
da20420f8 rhashtable: Add n... |
1165 |
ntbl = nested_table_alloc(ht, &ntbl[index].table, |
5af68ef73 rhashtable: simpl... |
1166 |
size <= (1 << shift)); |
da20420f8 rhashtable: Add n... |
1167 1168 1169 1170 1171 |
while (ntbl && size > (1 << shift)) { index = hash & ((1 << shift) - 1); size >>= shift; hash >>= shift; |
da20420f8 rhashtable: Add n... |
1172 |
ntbl = nested_table_alloc(ht, &ntbl[index].table, |
5af68ef73 rhashtable: simpl... |
1173 |
size <= (1 << shift)); |
da20420f8 rhashtable: Add n... |
1174 1175 1176 1177 1178 1179 1180 1181 1182 |
} if (!ntbl) return NULL; return &ntbl[hash].bucket; } EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); |