Blame view
lib/rhashtable.c
29.4 KB
d2912cb15 treewide: Replace... |
1 |
// SPDX-License-Identifier: GPL-2.0-only |
7e1e77636 lib: Resizable, S... |
2 3 4 |
/* * Resizable, Scalable, Concurrent Hash Table * |
02fd97c3d rhashtable: Allow... |
5 |
* Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> |
a5ec68e3b rhashtable: Use a... |
6 |
* Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> |
7e1e77636 lib: Resizable, S... |
7 8 |
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> * |
7e1e77636 lib: Resizable, S... |
9 |
* Code partially derived from nft_hash |
02fd97c3d rhashtable: Allow... |
10 11 |
* Rewritten with rehash code from br_multicast plus single list * pointer as suggested by Josh Triplett |
7e1e77636 lib: Resizable, S... |
12 |
*/ |
07ee0722b rhashtable: Add c... |
13 |
#include <linux/atomic.h> |
7e1e77636 lib: Resizable, S... |
14 15 16 |
#include <linux/kernel.h> #include <linux/init.h> #include <linux/log2.h> |
5beb5c90c rhashtable: use c... |
17 |
#include <linux/sched.h> |
b2d091031 sched/headers: Pr... |
18 |
#include <linux/rculist.h> |
7e1e77636 lib: Resizable, S... |
19 20 21 |
#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mm.h> |
87545899b net: replace rema... |
22 |
#include <linux/jhash.h> |
7e1e77636 lib: Resizable, S... |
23 24 |
#include <linux/random.h> #include <linux/rhashtable.h> |
61d7b0977 rhashtable: using... |
25 |
#include <linux/err.h> |
6d7954130 rhashtable: add m... |
26 |
#include <linux/export.h> |
7e1e77636 lib: Resizable, S... |
27 28 |
#define HASH_DEFAULT_SIZE 64UL |
c2e213cff rhashtable: Intro... |
29 |
#define HASH_MIN_SIZE 4U |
97defe1ec rhashtable: Per b... |
30 |
|
da20420f8 rhashtable: Add n... |
31 32 |
union nested_table { union nested_table __rcu *table; |
ba6306e3f rhashtable: Remov... |
33 |
struct rhash_lock_head *bucket; |
da20420f8 rhashtable: Add n... |
34 |
}; |
988dfbd79 rhashtable: Move ... |
35 |
static u32 head_hashfn(struct rhashtable *ht, |
8d24c0b43 rhashtable: Do ha... |
36 37 |
const struct bucket_table *tbl, const struct rhash_head *he) |
7e1e77636 lib: Resizable, S... |
38 |
{ |
02fd97c3d rhashtable: Allow... |
39 |
return rht_head_hashfn(ht, tbl, he, ht->p); |
7e1e77636 lib: Resizable, S... |
40 |
} |
a03eaec0d rhashtable: Dump ... |
41 |
#ifdef CONFIG_PROVE_LOCKING |
a03eaec0d rhashtable: Dump ... |
42 |
#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) |
a03eaec0d rhashtable: Dump ... |
43 44 45 46 47 48 49 50 51 |
int lockdep_rht_mutex_is_held(struct rhashtable *ht) { return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; } EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) { |
8f0db0180 rhashtable: use b... |
52 53 54 55 |
if (!debug_locks) return 1; if (unlikely(tbl->nest)) return 1; |
ca0b709d1 rhashtable: use B... |
56 |
return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]); |
a03eaec0d rhashtable: Dump ... |
57 58 59 60 |
} EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); #else #define ASSERT_RHT_MUTEX(HT) |
a03eaec0d rhashtable: Dump ... |
61 |
#endif |
da20420f8 rhashtable: Add n... |
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
static void nested_table_free(union nested_table *ntbl, unsigned int size) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); const unsigned int len = 1 << shift; unsigned int i; ntbl = rcu_dereference_raw(ntbl->table); if (!ntbl) return; if (size > len) { size >>= shift; for (i = 0; i < len; i++) nested_table_free(ntbl + i, size); } kfree(ntbl); } static void nested_bucket_table_free(const struct bucket_table *tbl) { unsigned int size = tbl->size >> tbl->nest; unsigned int len = 1 << tbl->nest; union nested_table *ntbl; unsigned int i; ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); for (i = 0; i < len; i++) nested_table_free(ntbl + i, size); kfree(ntbl); } |
97defe1ec rhashtable: Per b... |
95 96 |
static void bucket_table_free(const struct bucket_table *tbl) { |
da20420f8 rhashtable: Add n... |
97 98 |
if (tbl->nest) nested_bucket_table_free(tbl); |
97defe1ec rhashtable: Per b... |
99 100 |
kvfree(tbl); } |
9d901bc05 rhashtable: Free ... |
101 102 103 104 |
static void bucket_table_free_rcu(struct rcu_head *head) { bucket_table_free(container_of(head, struct bucket_table, rcu)); } |
da20420f8 rhashtable: Add n... |
105 106 |
static union nested_table *nested_table_alloc(struct rhashtable *ht, union nested_table __rcu **prev, |
5af68ef73 rhashtable: simpl... |
107 |
bool leaf) |
da20420f8 rhashtable: Add n... |
108 109 110 111 112 113 114 115 116 |
{ union nested_table *ntbl; int i; ntbl = rcu_dereference(*prev); if (ntbl) return ntbl; ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); |
5af68ef73 rhashtable: simpl... |
117 118 |
if (ntbl && leaf) { for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) |
9b4f64a22 rhashtable: simpl... |
119 |
INIT_RHT_NULLS_HEAD(ntbl[i].bucket); |
da20420f8 rhashtable: Add n... |
120 |
} |
e9458a4e3 rhashtable: Fix c... |
121 |
if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL) |
7a41c294c rhashtable: use c... |
122 123 124 125 |
return ntbl; /* Raced with another thread. */ kfree(ntbl); return rcu_dereference(*prev); |
da20420f8 rhashtable: Add n... |
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
} static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, size_t nbuckets, gfp_t gfp) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); struct bucket_table *tbl; size_t size; if (nbuckets < (1 << (shift + 1))) return NULL; size = sizeof(*tbl) + sizeof(tbl->buckets[0]); tbl = kzalloc(size, gfp); if (!tbl) return NULL; if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, |
5af68ef73 rhashtable: simpl... |
146 |
false)) { |
da20420f8 rhashtable: Add n... |
147 148 149 150 151 152 153 154 |
kfree(tbl); return NULL; } tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; return tbl; } |
97defe1ec rhashtable: Per b... |
155 |
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
b9ecfdaa1 rhashtable: Allow... |
156 157 |
size_t nbuckets, gfp_t gfp) |
7e1e77636 lib: Resizable, S... |
158 |
{ |
eb6d1abf1 rhashtable: bette... |
159 |
struct bucket_table *tbl = NULL; |
8f0db0180 rhashtable: use b... |
160 |
size_t size; |
f89bd6f87 rhashtable: Suppo... |
161 |
int i; |
149212f07 rhashtable: add l... |
162 |
static struct lock_class_key __key; |
7e1e77636 lib: Resizable, S... |
163 |
|
c252aa3e8 rhashtable: use s... |
164 |
tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp); |
da20420f8 rhashtable: Add n... |
165 166 |
size = nbuckets; |
2d22ecf6d lib/rhashtable: g... |
167 |
if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) { |
da20420f8 rhashtable: Add n... |
168 169 170 |
tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); nbuckets = 0; } |
2d22ecf6d lib/rhashtable: g... |
171 |
|
7e1e77636 lib: Resizable, S... |
172 173 |
if (tbl == NULL) return NULL; |
149212f07 rhashtable: add l... |
174 |
lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0); |
da20420f8 rhashtable: Add n... |
175 |
tbl->size = size; |
7e1e77636 lib: Resizable, S... |
176 |
|
4feb7c7a4 rhashtable: don't... |
177 |
rcu_head_init(&tbl->rcu); |
eddee5ba3 rhashtable: Fix w... |
178 |
INIT_LIST_HEAD(&tbl->walkers); |
d48ad080e rhashtable: use g... |
179 |
tbl->hash_rnd = get_random_u32(); |
5269b53da rhashtable: Move ... |
180 |
|
f89bd6f87 rhashtable: Suppo... |
181 |
for (i = 0; i < nbuckets; i++) |
9b4f64a22 rhashtable: simpl... |
182 |
INIT_RHT_NULLS_HEAD(tbl->buckets[i]); |
f89bd6f87 rhashtable: Suppo... |
183 |
|
97defe1ec rhashtable: Per b... |
184 |
return tbl; |
7e1e77636 lib: Resizable, S... |
185 |
} |
b824478b2 rhashtable: Add m... |
186 187 188 189 190 191 192 193 194 195 196 197 |
static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, struct bucket_table *tbl) { struct bucket_table *new_tbl; do { new_tbl = tbl; tbl = rht_dereference_rcu(tbl->future_tbl, ht); } while (tbl); return new_tbl; } |
8f0db0180 rhashtable: use b... |
198 |
static int rhashtable_rehash_one(struct rhashtable *ht, |
ba6306e3f rhashtable: Remov... |
199 |
struct rhash_lock_head **bkt, |
8f0db0180 rhashtable: use b... |
200 |
unsigned int old_hash) |
a5ec68e3b rhashtable: Use a... |
201 |
{ |
aa34a6cb0 rhashtable: Add a... |
202 |
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
c0690016a rhashtable: clean... |
203 |
struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); |
da20420f8 rhashtable: Add n... |
204 |
int err = -EAGAIN; |
aa34a6cb0 rhashtable: Add a... |
205 |
struct rhash_head *head, *next, *entry; |
e4edbe3c1 rhashtable: fix s... |
206 |
struct rhash_head __rcu **pprev = NULL; |
299e5c32a rhashtable: Use '... |
207 |
unsigned int new_hash; |
aa34a6cb0 rhashtable: Add a... |
208 |
|
da20420f8 rhashtable: Add n... |
209 210 211 212 |
if (new_tbl->nest) goto out; err = -ENOENT; |
adc6a3ab1 rhashtable: move ... |
213 214 |
rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash), old_tbl, old_hash) { |
aa34a6cb0 rhashtable: Add a... |
215 216 217 218 219 |
err = 0; next = rht_dereference_bucket(entry->next, old_tbl, old_hash); if (rht_is_a_nulls(next)) break; |
a5ec68e3b rhashtable: Use a... |
220 |
|
aa34a6cb0 rhashtable: Add a... |
221 222 |
pprev = &entry->next; } |
a5ec68e3b rhashtable: Use a... |
223 |
|
aa34a6cb0 rhashtable: Add a... |
224 225 |
if (err) goto out; |
97defe1ec rhashtable: Per b... |
226 |
|
aa34a6cb0 rhashtable: Add a... |
227 |
new_hash = head_hashfn(ht, new_tbl, entry); |
7e1e77636 lib: Resizable, S... |
228 |
|
149212f07 rhashtable: add l... |
229 |
rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING); |
7e1e77636 lib: Resizable, S... |
230 |
|
adc6a3ab1 rhashtable: move ... |
231 |
head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash); |
97defe1ec rhashtable: Per b... |
232 |
|
7def0f952 lib: fix data rac... |
233 |
RCU_INIT_POINTER(entry->next, head); |
a5ec68e3b rhashtable: Use a... |
234 |
|
149212f07 rhashtable: add l... |
235 |
rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry); |
97defe1ec rhashtable: Per b... |
236 |
|
8f0db0180 rhashtable: use b... |
237 238 239 240 |
if (pprev) rcu_assign_pointer(*pprev, next); else /* Need to preserved the bit lock. */ |
f4712b46a rhashtable: repla... |
241 |
rht_assign_locked(bkt, next); |
7e1e77636 lib: Resizable, S... |
242 |
|
aa34a6cb0 rhashtable: Add a... |
243 244 245 |
out: return err; } |
97defe1ec rhashtable: Per b... |
246 |
|
da20420f8 rhashtable: Add n... |
247 |
static int rhashtable_rehash_chain(struct rhashtable *ht, |
299e5c32a rhashtable: Use '... |
248 |
unsigned int old_hash) |
aa34a6cb0 rhashtable: Add a... |
249 250 |
{ struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
ba6306e3f rhashtable: Remov... |
251 |
struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash); |
da20420f8 rhashtable: Add n... |
252 |
int err; |
aa34a6cb0 rhashtable: Add a... |
253 |
|
8f0db0180 rhashtable: use b... |
254 255 |
if (!bkt) return 0; |
149212f07 rhashtable: add l... |
256 |
rht_lock(old_tbl, bkt); |
a5ec68e3b rhashtable: Use a... |
257 |
|
8f0db0180 rhashtable: use b... |
258 |
while (!(err = rhashtable_rehash_one(ht, bkt, old_hash))) |
aa34a6cb0 rhashtable: Add a... |
259 |
; |
da20420f8 rhashtable: Add n... |
260 |
|
4feb7c7a4 rhashtable: don't... |
261 |
if (err == -ENOENT) |
da20420f8 rhashtable: Add n... |
262 |
err = 0; |
149212f07 rhashtable: add l... |
263 |
rht_unlock(old_tbl, bkt); |
da20420f8 rhashtable: Add n... |
264 265 |
return err; |
97defe1ec rhashtable: Per b... |
266 |
} |
b824478b2 rhashtable: Add m... |
267 268 269 |
static int rhashtable_rehash_attach(struct rhashtable *ht, struct bucket_table *old_tbl, struct bucket_table *new_tbl) |
97defe1ec rhashtable: Per b... |
270 |
{ |
aa34a6cb0 rhashtable: Add a... |
271 272 |
/* Make insertions go into the new, empty table right away. Deletions * and lookups will be attempted in both tables until we synchronize. |
0ad66449a rhashtable: use c... |
273 274 |
* As cmpxchg() provides strong barriers, we do not need * rcu_assign_pointer(). |
aa34a6cb0 rhashtable: Add a... |
275 |
*/ |
aa34a6cb0 rhashtable: Add a... |
276 |
|
e9458a4e3 rhashtable: Fix c... |
277 278 |
if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL, new_tbl) != NULL) |
0ad66449a rhashtable: use c... |
279 |
return -EEXIST; |
b824478b2 rhashtable: Add m... |
280 281 282 283 284 285 286 287 288 |
return 0; } static int rhashtable_rehash_table(struct rhashtable *ht) { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct bucket_table *new_tbl; struct rhashtable_walker *walker; |
299e5c32a rhashtable: Use '... |
289 |
unsigned int old_hash; |
da20420f8 rhashtable: Add n... |
290 |
int err; |
b824478b2 rhashtable: Add m... |
291 292 293 294 |
new_tbl = rht_dereference(old_tbl->future_tbl, ht); if (!new_tbl) return 0; |
da20420f8 rhashtable: Add n... |
295 296 297 298 |
for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { err = rhashtable_rehash_chain(ht, old_hash); if (err) return err; |
ae6da1f50 rhashtable: add s... |
299 |
cond_resched(); |
da20420f8 rhashtable: Add n... |
300 |
} |
aa34a6cb0 rhashtable: Add a... |
301 302 303 |
/* Publish the new table pointer. */ rcu_assign_pointer(ht->tbl, new_tbl); |
ba7c95ea3 rhashtable: Fix s... |
304 |
spin_lock(&ht->lock); |
eddee5ba3 rhashtable: Fix w... |
305 306 |
list_for_each_entry(walker, &old_tbl->walkers, list) walker->tbl = NULL; |
aa34a6cb0 rhashtable: Add a... |
307 308 309 |
/* Wait for readers. All new readers will see the new * table, and thus no references to the old table will * remain. |
4feb7c7a4 rhashtable: don't... |
310 311 312 |
* We do this inside the locked region so that * rhashtable_walk_stop() can use rcu_head_after_call_rcu() * to check if it should not re-link the table. |
aa34a6cb0 rhashtable: Add a... |
313 |
*/ |
9d901bc05 rhashtable: Free ... |
314 |
call_rcu(&old_tbl->rcu, bucket_table_free_rcu); |
4feb7c7a4 rhashtable: don't... |
315 |
spin_unlock(&ht->lock); |
b824478b2 rhashtable: Add m... |
316 317 |
return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; |
7e1e77636 lib: Resizable, S... |
318 |
} |
da20420f8 rhashtable: Add n... |
319 320 321 |
static int rhashtable_rehash_alloc(struct rhashtable *ht, struct bucket_table *old_tbl, unsigned int size) |
7e1e77636 lib: Resizable, S... |
322 |
{ |
da20420f8 rhashtable: Add n... |
323 |
struct bucket_table *new_tbl; |
b824478b2 rhashtable: Add m... |
324 |
int err; |
7e1e77636 lib: Resizable, S... |
325 326 |
ASSERT_RHT_MUTEX(ht); |
da20420f8 rhashtable: Add n... |
327 |
new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
7e1e77636 lib: Resizable, S... |
328 329 |
if (new_tbl == NULL) return -ENOMEM; |
b824478b2 rhashtable: Add m... |
330 331 332 333 334 |
err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); if (err) bucket_table_free(new_tbl); return err; |
7e1e77636 lib: Resizable, S... |
335 |
} |
7e1e77636 lib: Resizable, S... |
336 337 338 339 |
/** * rhashtable_shrink - Shrink hash table while allowing concurrent lookups * @ht: the hash table to shrink |
7e1e77636 lib: Resizable, S... |
340 |
* |
18093d1c0 rhashtable: Shrin... |
341 342 |
* This function shrinks the hash table to fit, i.e., the smallest * size would not cause it to expand right away automatically. |
7e1e77636 lib: Resizable, S... |
343 |
* |
97defe1ec rhashtable: Per b... |
344 345 346 |
* The caller must ensure that no concurrent resizing occurs by holding * ht->mutex. * |
7e1e77636 lib: Resizable, S... |
347 348 |
* The caller must ensure that no concurrent table mutations take place. * It is however valid to have concurrent lookups if they are RCU protected. |
97defe1ec rhashtable: Per b... |
349 350 351 |
* * It is valid to have concurrent insertions and deletions protected by per * bucket locks or concurrent RCU protected lookups and traversals. |
7e1e77636 lib: Resizable, S... |
352 |
*/ |
b824478b2 rhashtable: Add m... |
353 |
static int rhashtable_shrink(struct rhashtable *ht) |
7e1e77636 lib: Resizable, S... |
354 |
{ |
da20420f8 rhashtable: Add n... |
355 |
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
12311959e rhashtable: fix s... |
356 357 |
unsigned int nelems = atomic_read(&ht->nelems); unsigned int size = 0; |
7e1e77636 lib: Resizable, S... |
358 |
|
12311959e rhashtable: fix s... |
359 360 |
if (nelems) size = roundup_pow_of_two(nelems * 3 / 2); |
18093d1c0 rhashtable: Shrin... |
361 362 363 364 365 |
if (size < ht->p.min_size) size = ht->p.min_size; if (old_tbl->size <= size) return 0; |
b824478b2 rhashtable: Add m... |
366 367 |
if (rht_dereference(old_tbl->future_tbl, ht)) return -EEXIST; |
da20420f8 rhashtable: Add n... |
368 |
return rhashtable_rehash_alloc(ht, old_tbl, size); |
7e1e77636 lib: Resizable, S... |
369 |
} |
7e1e77636 lib: Resizable, S... |
370 |
|
97defe1ec rhashtable: Per b... |
371 372 373 374 |
static void rht_deferred_worker(struct work_struct *work) { struct rhashtable *ht; struct bucket_table *tbl; |
b824478b2 rhashtable: Add m... |
375 |
int err = 0; |
97defe1ec rhashtable: Per b... |
376 |
|
57699a40b rhashtable: Fix r... |
377 |
ht = container_of(work, struct rhashtable, run_work); |
97defe1ec rhashtable: Per b... |
378 |
mutex_lock(&ht->mutex); |
28134a53d rhashtable: Fix p... |
379 |
|
97defe1ec rhashtable: Per b... |
380 |
tbl = rht_dereference(ht->tbl, ht); |
b824478b2 rhashtable: Add m... |
381 |
tbl = rhashtable_last_table(ht, tbl); |
97defe1ec rhashtable: Per b... |
382 |
|
a5b6846f9 rhashtable: kill ... |
383 |
if (rht_grow_above_75(ht, tbl)) |
da20420f8 rhashtable: Add n... |
384 |
err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); |
b5e2c150a rhashtable: Disab... |
385 |
else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) |
da20420f8 rhashtable: Add n... |
386 387 388 |
err = rhashtable_shrink(ht); else if (tbl->nest) err = rhashtable_rehash_alloc(ht, tbl, tbl->size); |
b824478b2 rhashtable: Add m... |
389 |
|
408f13ef3 rhashtable: Still... |
390 391 392 393 394 395 |
if (!err || err == -EEXIST) { int nerr; nerr = rhashtable_rehash_table(ht); err = err ?: nerr; } |
b824478b2 rhashtable: Add m... |
396 |
|
97defe1ec rhashtable: Per b... |
397 |
mutex_unlock(&ht->mutex); |
b824478b2 rhashtable: Add m... |
398 399 400 |
if (err) schedule_work(&ht->run_work); |
97defe1ec rhashtable: Per b... |
401 |
} |
ca26893f0 rhashtable: Add r... |
402 403 |
static int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl) |
ccd57b1bd rhashtable: Add i... |
404 405 406 |
{ struct bucket_table *old_tbl; struct bucket_table *new_tbl; |
ccd57b1bd rhashtable: Add i... |
407 408 409 410 |
unsigned int size; int err; old_tbl = rht_dereference_rcu(ht->tbl, ht); |
ccd57b1bd rhashtable: Add i... |
411 412 |
size = tbl->size; |
3cf92222a rhashtable: Preve... |
413 |
err = -EBUSY; |
ccd57b1bd rhashtable: Add i... |
414 415 |
if (rht_grow_above_75(ht, tbl)) size *= 2; |
a87b9ebf1 rhashtable: Do no... |
416 417 |
/* Do not schedule more than one rehash */ else if (old_tbl != tbl) |
3cf92222a rhashtable: Preve... |
418 419 420 |
goto fail; err = -ENOMEM; |
ccd57b1bd rhashtable: Add i... |
421 |
|
93f976b51 lib/rhashtable: s... |
422 |
new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN); |
3cf92222a rhashtable: Preve... |
423 424 |
if (new_tbl == NULL) goto fail; |
ccd57b1bd rhashtable: Add i... |
425 426 427 428 429 430 431 432 433 434 |
err = rhashtable_rehash_attach(ht, tbl, new_tbl); if (err) { bucket_table_free(new_tbl); if (err == -EEXIST) err = 0; } else schedule_work(&ht->run_work); return err; |
3cf92222a rhashtable: Preve... |
435 436 437 |
fail: /* Do not fail the insert if someone else did a rehash. */ |
c0690016a rhashtable: clean... |
438 |
if (likely(rcu_access_pointer(tbl->future_tbl))) |
3cf92222a rhashtable: Preve... |
439 440 441 442 443 444 445 |
return 0; /* Schedule async rehash to retry allocation in process context. */ if (err == -ENOMEM) schedule_work(&ht->run_work); return err; |
ccd57b1bd rhashtable: Add i... |
446 |
} |
ccd57b1bd rhashtable: Add i... |
447 |
|
ca26893f0 rhashtable: Add r... |
448 |
static void *rhashtable_lookup_one(struct rhashtable *ht, |
ba6306e3f rhashtable: Remov... |
449 |
struct rhash_lock_head **bkt, |
ca26893f0 rhashtable: Add r... |
450 451 |
struct bucket_table *tbl, unsigned int hash, const void *key, struct rhash_head *obj) |
02fd97c3d rhashtable: Allow... |
452 |
{ |
ca26893f0 rhashtable: Add r... |
453 454 455 456 |
struct rhashtable_compare_arg arg = { .ht = ht, .key = key, }; |
e4edbe3c1 rhashtable: fix s... |
457 |
struct rhash_head __rcu **pprev = NULL; |
02fd97c3d rhashtable: Allow... |
458 |
struct rhash_head *head; |
ca26893f0 rhashtable: Add r... |
459 |
int elasticity; |
02fd97c3d rhashtable: Allow... |
460 |
|
5f8ddeab1 rhashtable: remov... |
461 |
elasticity = RHT_ELASTICITY; |
adc6a3ab1 rhashtable: move ... |
462 |
rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { |
ca26893f0 rhashtable: Add r... |
463 464 465 466 467 468 469 |
struct rhlist_head *list; struct rhlist_head *plist; elasticity--; if (!key || (ht->p.obj_cmpfn ? ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : |
d3dcf8eb6 rhashtable: Fix r... |
470 471 |
rhashtable_compare(&arg, rht_obj(ht, head)))) { pprev = &head->next; |
ca26893f0 rhashtable: Add r... |
472 |
continue; |
d3dcf8eb6 rhashtable: Fix r... |
473 |
} |
ca26893f0 rhashtable: Add r... |
474 475 476 477 478 479 480 481 482 483 |
if (!ht->rhlist) return rht_obj(ht, head); list = container_of(obj, struct rhlist_head, rhead); plist = container_of(head, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, plist); head = rht_dereference_bucket(head->next, tbl, hash); RCU_INIT_POINTER(list->rhead.next, head); |
8f0db0180 rhashtable: use b... |
484 485 486 487 |
if (pprev) rcu_assign_pointer(*pprev, obj); else /* Need to preserve the bit lock */ |
f4712b46a rhashtable: repla... |
488 |
rht_assign_locked(bkt, obj); |
ca26893f0 rhashtable: Add r... |
489 490 |
return NULL; |
5ca8cc5bf rhashtable: add r... |
491 |
} |
02fd97c3d rhashtable: Allow... |
492 |
|
ca26893f0 rhashtable: Add r... |
493 494 495 496 497 498 499 |
if (elasticity <= 0) return ERR_PTR(-EAGAIN); return ERR_PTR(-ENOENT); } static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, |
ba6306e3f rhashtable: Remov... |
500 |
struct rhash_lock_head **bkt, |
ca26893f0 rhashtable: Add r... |
501 502 503 504 505 506 507 508 509 510 |
struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, void *data) { struct bucket_table *new_tbl; struct rhash_head *head; if (!IS_ERR_OR_NULL(data)) return ERR_PTR(-EEXIST); |
07ee0722b rhashtable: Add c... |
511 |
|
ca26893f0 rhashtable: Add r... |
512 513 |
if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT) return ERR_CAST(data); |
ccd57b1bd rhashtable: Add i... |
514 |
|
c0690016a rhashtable: clean... |
515 |
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
ca26893f0 rhashtable: Add r... |
516 517 518 519 520 521 522 523 524 525 526 |
if (new_tbl) return new_tbl; if (PTR_ERR(data) != -ENOENT) return ERR_CAST(data); if (unlikely(rht_grow_above_max(ht, tbl))) return ERR_PTR(-E2BIG); if (unlikely(rht_grow_above_100(ht, tbl))) return ERR_PTR(-EAGAIN); |
02fd97c3d rhashtable: Allow... |
527 |
|
adc6a3ab1 rhashtable: move ... |
528 |
head = rht_ptr(bkt, tbl, hash); |
02fd97c3d rhashtable: Allow... |
529 530 |
RCU_INIT_POINTER(obj->next, head); |
ca26893f0 rhashtable: Add r... |
531 532 533 534 535 536 |
if (ht->rhlist) { struct rhlist_head *list; list = container_of(obj, struct rhlist_head, rhead); RCU_INIT_POINTER(list->next, NULL); } |
02fd97c3d rhashtable: Allow... |
537 |
|
8f0db0180 rhashtable: use b... |
538 539 540 |
/* bkt is always the head of the list, so it holds * the lock, which we need to preserve */ |
f4712b46a rhashtable: repla... |
541 |
rht_assign_locked(bkt, obj); |
02fd97c3d rhashtable: Allow... |
542 543 |
atomic_inc(&ht->nelems); |
ca26893f0 rhashtable: Add r... |
544 545 |
if (rht_grow_above_75(ht, tbl)) schedule_work(&ht->run_work); |
02fd97c3d rhashtable: Allow... |
546 |
|
ca26893f0 rhashtable: Add r... |
547 548 |
return NULL; } |
02fd97c3d rhashtable: Allow... |
549 |
|
ca26893f0 rhashtable: Add r... |
550 551 552 553 554 |
static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, struct rhash_head *obj) { struct bucket_table *new_tbl; struct bucket_table *tbl; |
ba6306e3f rhashtable: Remov... |
555 |
struct rhash_lock_head **bkt; |
ca26893f0 rhashtable: Add r... |
556 |
unsigned int hash; |
ca26893f0 rhashtable: Add r... |
557 |
void *data; |
4feb7c7a4 rhashtable: don't... |
558 |
new_tbl = rcu_dereference(ht->tbl); |
ca26893f0 rhashtable: Add r... |
559 |
|
4feb7c7a4 rhashtable: don't... |
560 |
do { |
ca26893f0 rhashtable: Add r... |
561 562 |
tbl = new_tbl; hash = rht_head_hashfn(ht, tbl, obj, ht->p); |
8f0db0180 rhashtable: use b... |
563 564 565 566 567 568 569 570 571 |
if (rcu_access_pointer(tbl->future_tbl)) /* Failure is OK */ bkt = rht_bucket_var(tbl, hash); else bkt = rht_bucket_insert(ht, tbl, hash); if (bkt == NULL) { new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); data = ERR_PTR(-EAGAIN); } else { |
149212f07 rhashtable: add l... |
572 |
rht_lock(tbl, bkt); |
8f0db0180 rhashtable: use b... |
573 574 575 576 577 578 |
data = rhashtable_lookup_one(ht, bkt, tbl, hash, key, obj); new_tbl = rhashtable_insert_one(ht, bkt, tbl, hash, obj, data); if (PTR_ERR(new_tbl) != -EEXIST) data = ERR_CAST(new_tbl); |
149212f07 rhashtable: add l... |
579 |
rht_unlock(tbl, bkt); |
8f0db0180 rhashtable: use b... |
580 |
} |
4feb7c7a4 rhashtable: don't... |
581 |
} while (!IS_ERR_OR_NULL(new_tbl)); |
ca26893f0 rhashtable: Add r... |
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 |
if (PTR_ERR(data) == -EAGAIN) data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: -EAGAIN); return data; } void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, struct rhash_head *obj) { void *data; do { rcu_read_lock(); data = rhashtable_try_insert(ht, key, obj); rcu_read_unlock(); } while (PTR_ERR(data) == -EAGAIN); return data; |
02fd97c3d rhashtable: Allow... |
602 603 |
} EXPORT_SYMBOL_GPL(rhashtable_insert_slow); |
f2dba9c6f rhashtable: Intro... |
604 |
/** |
246779dd0 rhashtable: Remov... |
605 |
* rhashtable_walk_enter - Initialise an iterator |
f2dba9c6f rhashtable: Intro... |
606 607 608 609 610 611 612 613 614 615 616 617 618 |
* @ht: Table to walk over * @iter: Hash table Iterator * * This function prepares a hash table walk. * * Note that if you restart a walk after rhashtable_walk_stop you * may see the same object twice. Also, you may miss objects if * there are removals in between rhashtable_walk_stop and the next * call to rhashtable_walk_start. * * For a completely stable walk you should construct your own data * structure outside the hash table. * |
82266e98d rhashtable: Revis... |
619 620 621 |
* This function may be called from any process context, including * non-preemptable context, but cannot be called from softirq or * hardirq context. |
f2dba9c6f rhashtable: Intro... |
622 |
* |
246779dd0 rhashtable: Remov... |
623 |
* You must call rhashtable_walk_exit after this function returns. |
f2dba9c6f rhashtable: Intro... |
624 |
*/ |
246779dd0 rhashtable: Remov... |
625 |
void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) |
f2dba9c6f rhashtable: Intro... |
626 627 628 629 630 |
{ iter->ht = ht; iter->p = NULL; iter->slot = 0; iter->skip = 0; |
2db54b475 rhashtable: Add r... |
631 |
iter->end_of_table = 0; |
f2dba9c6f rhashtable: Intro... |
632 |
|
c6ff52682 rhashtable: Fix w... |
633 |
spin_lock(&ht->lock); |
246779dd0 rhashtable: Remov... |
634 |
iter->walker.tbl = |
179ccc0a7 rhashtable: Kill ... |
635 |
rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); |
246779dd0 rhashtable: Remov... |
636 |
list_add(&iter->walker.list, &iter->walker.tbl->walkers); |
c6ff52682 rhashtable: Fix w... |
637 |
spin_unlock(&ht->lock); |
f2dba9c6f rhashtable: Intro... |
638 |
} |
246779dd0 rhashtable: Remov... |
639 |
EXPORT_SYMBOL_GPL(rhashtable_walk_enter); |
f2dba9c6f rhashtable: Intro... |
640 641 642 643 644 |
/** * rhashtable_walk_exit - Free an iterator * @iter: Hash table Iterator * |
6c4128f65 rhashtable: Remov... |
645 |
* This function frees resources allocated by rhashtable_walk_enter. |
f2dba9c6f rhashtable: Intro... |
646 647 648 |
*/ void rhashtable_walk_exit(struct rhashtable_iter *iter) { |
c6ff52682 rhashtable: Fix w... |
649 |
spin_lock(&iter->ht->lock); |
246779dd0 rhashtable: Remov... |
650 651 |
if (iter->walker.tbl) list_del(&iter->walker.list); |
c6ff52682 rhashtable: Fix w... |
652 |
spin_unlock(&iter->ht->lock); |
f2dba9c6f rhashtable: Intro... |
653 654 655 656 |
} EXPORT_SYMBOL_GPL(rhashtable_walk_exit); /** |
97a6ec4ac rhashtable: Chang... |
657 |
* rhashtable_walk_start_check - Start a hash table walk |
f2dba9c6f rhashtable: Intro... |
658 659 |
* @iter: Hash table iterator * |
0647169cf rhashtable: Docum... |
660 661 662 |
* Start a hash table walk at the current iterator position. Note that we take * the RCU lock in all cases including when we return an error. So you must * always call rhashtable_walk_stop to clean up. |
f2dba9c6f rhashtable: Intro... |
663 664 665 666 667 668 |
* * Returns zero if successful. * * Returns -EAGAIN if resize event occured. Note that the iterator * will rewind back to the beginning and you may use it immediately * by calling rhashtable_walk_next. |
97a6ec4ac rhashtable: Chang... |
669 670 671 672 |
* * rhashtable_walk_start is defined as an inline variant that returns * void. This is preferred in cases where the caller would ignore * resize events and always continue. |
f2dba9c6f rhashtable: Intro... |
673 |
*/ |
97a6ec4ac rhashtable: Chang... |
674 |
int rhashtable_walk_start_check(struct rhashtable_iter *iter) |
db4374f48 rhashtable: Annot... |
675 |
__acquires(RCU) |
f2dba9c6f rhashtable: Intro... |
676 |
{ |
eddee5ba3 rhashtable: Fix w... |
677 |
struct rhashtable *ht = iter->ht; |
5d240a893 rhashtable: impro... |
678 |
bool rhlist = ht->rhlist; |
eddee5ba3 rhashtable: Fix w... |
679 |
|
c6ff52682 rhashtable: Fix w... |
680 |
rcu_read_lock(); |
eddee5ba3 rhashtable: Fix w... |
681 |
|
c6ff52682 rhashtable: Fix w... |
682 |
spin_lock(&ht->lock); |
246779dd0 rhashtable: Remov... |
683 684 |
if (iter->walker.tbl) list_del(&iter->walker.list); |
c6ff52682 rhashtable: Fix w... |
685 |
spin_unlock(&ht->lock); |
eddee5ba3 rhashtable: Fix w... |
686 |
|
5d240a893 rhashtable: impro... |
687 688 689 |
if (iter->end_of_table) return 0; if (!iter->walker.tbl) { |
246779dd0 rhashtable: Remov... |
690 |
iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); |
b41cc04b6 rhashtable: reset... |
691 692 |
iter->slot = 0; iter->skip = 0; |
f2dba9c6f rhashtable: Intro... |
693 694 |
return -EAGAIN; } |
5d240a893 rhashtable: impro... |
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 |
if (iter->p && !rhlist) { /* * We need to validate that 'p' is still in the table, and * if so, update 'skip' */ struct rhash_head *p; int skip = 0; rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { skip++; if (p == iter->p) { iter->skip = skip; goto found; } } iter->p = NULL; } else if (iter->p && rhlist) { /* Need to validate that 'list' is still in the table, and * if so, update 'skip' and 'p'. */ struct rhash_head *p; struct rhlist_head *list; int skip = 0; rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { for (list = container_of(p, struct rhlist_head, rhead); list; list = rcu_dereference(list->next)) { skip++; if (list == iter->list) { iter->p = p; |
c643ecf35 lib: rhashtable: ... |
724 |
iter->skip = skip; |
5d240a893 rhashtable: impro... |
725 726 727 728 729 730 731 |
goto found; } } } iter->p = NULL; } found: |
f2dba9c6f rhashtable: Intro... |
732 733 |
return 0; } |
97a6ec4ac rhashtable: Chang... |
734 |
EXPORT_SYMBOL_GPL(rhashtable_walk_start_check); |
f2dba9c6f rhashtable: Intro... |
735 736 |
/** |
2db54b475 rhashtable: Add r... |
737 738 739 |
* __rhashtable_walk_find_next - Find the next element in a table (or the first * one in case of a new walk). * |
f2dba9c6f rhashtable: Intro... |
740 741 |
* @iter: Hash table iterator * |
2db54b475 rhashtable: Add r... |
742 |
* Returns the found object or NULL when the end of the table is reached. |
f2dba9c6f rhashtable: Intro... |
743 |
* |
2db54b475 rhashtable: Add r... |
744 |
* Returns -EAGAIN if resize event occurred. |
f2dba9c6f rhashtable: Intro... |
745 |
*/ |
2db54b475 rhashtable: Add r... |
746 |
static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter) |
f2dba9c6f rhashtable: Intro... |
747 |
{ |
246779dd0 rhashtable: Remov... |
748 |
struct bucket_table *tbl = iter->walker.tbl; |
ca26893f0 rhashtable: Add r... |
749 |
struct rhlist_head *list = iter->list; |
f2dba9c6f rhashtable: Intro... |
750 751 |
struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; |
ca26893f0 rhashtable: Add r... |
752 |
bool rhlist = ht->rhlist; |
f2dba9c6f rhashtable: Intro... |
753 |
|
2db54b475 rhashtable: Add r... |
754 755 |
if (!tbl) return NULL; |
f2dba9c6f rhashtable: Intro... |
756 757 758 759 760 |
for (; iter->slot < tbl->size; iter->slot++) { int skip = iter->skip; rht_for_each_rcu(p, tbl, iter->slot) { |
ca26893f0 rhashtable: Add r... |
761 762 763 764 765 766 767 768 769 770 771 772 |
if (rhlist) { list = container_of(p, struct rhlist_head, rhead); do { if (!skip) goto next; skip--; list = rcu_dereference(list->next); } while (list); continue; } |
f2dba9c6f rhashtable: Intro... |
773 774 775 776 777 778 779 780 781 |
if (!skip) break; skip--; } next: if (!rht_is_a_nulls(p)) { iter->skip++; iter->p = p; |
ca26893f0 rhashtable: Add r... |
782 783 |
iter->list = list; return rht_obj(ht, rhlist ? &list->rhead : p); |
f2dba9c6f rhashtable: Intro... |
784 785 786 787 |
} iter->skip = 0; } |
142b942a7 rhashtable: fix f... |
788 |
iter->p = NULL; |
d88252f9b rhashtable: Add b... |
789 790 |
/* Ensure we see any new tables. */ smp_rmb(); |
246779dd0 rhashtable: Remov... |
791 792 |
iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); if (iter->walker.tbl) { |
f2dba9c6f rhashtable: Intro... |
793 794 |
iter->slot = 0; iter->skip = 0; |
f2dba9c6f rhashtable: Intro... |
795 |
return ERR_PTR(-EAGAIN); |
2db54b475 rhashtable: Add r... |
796 797 |
} else { iter->end_of_table = true; |
f2dba9c6f rhashtable: Intro... |
798 |
} |
c936a79fc rhashtable: Simpl... |
799 |
return NULL; |
f2dba9c6f rhashtable: Intro... |
800 |
} |
2db54b475 rhashtable: Add r... |
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 |
/** * rhashtable_walk_next - Return the next object and advance the iterator * @iter: Hash table iterator * * Note that you must call rhashtable_walk_stop when you are finished * with the walk. * * Returns the next object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred. Note that the iterator * will rewind back to the beginning and you may continue to use it. */ void *rhashtable_walk_next(struct rhashtable_iter *iter) { struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; bool rhlist = ht->rhlist; if (p) { if (!rhlist || !(list = rcu_dereference(list->next))) { p = rcu_dereference(p->next); list = container_of(p, struct rhlist_head, rhead); } if (!rht_is_a_nulls(p)) { iter->skip++; iter->p = p; iter->list = list; return rht_obj(ht, rhlist ? &list->rhead : p); } /* At the end of this slot, switch to next one and then find * next entry from that point. */ iter->skip = 0; iter->slot++; } return __rhashtable_walk_find_next(iter); } |
f2dba9c6f rhashtable: Intro... |
842 843 844 |
EXPORT_SYMBOL_GPL(rhashtable_walk_next); /** |
2db54b475 rhashtable: Add r... |
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 |
* rhashtable_walk_peek - Return the next object but don't advance the iterator * @iter: Hash table iterator * * Returns the next object or NULL when the end of the table is reached. * * Returns -EAGAIN if resize event occurred. Note that the iterator * will rewind back to the beginning and you may continue to use it. */ void *rhashtable_walk_peek(struct rhashtable_iter *iter) { struct rhlist_head *list = iter->list; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; if (p) return rht_obj(ht, ht->rhlist ? &list->rhead : p); /* No object found in current iter, find next one in the table. */ if (iter->skip) { /* A nonzero skip value points to the next entry in the table * beyond that last one that was found. Decrement skip so * we find the current value. __rhashtable_walk_find_next * will restore the original value of skip assuming that * the table hasn't changed. */ iter->skip--; } return __rhashtable_walk_find_next(iter); } EXPORT_SYMBOL_GPL(rhashtable_walk_peek); /** |
f2dba9c6f rhashtable: Intro... |
879 880 881 |
* rhashtable_walk_stop - Finish a hash table walk * @iter: Hash table iterator * |
0647169cf rhashtable: Docum... |
882 883 |
* Finish a hash table walk. Does not reset the iterator to the start of the * hash table. |
f2dba9c6f rhashtable: Intro... |
884 885 |
*/ void rhashtable_walk_stop(struct rhashtable_iter *iter) |
db4374f48 rhashtable: Annot... |
886 |
__releases(RCU) |
f2dba9c6f rhashtable: Intro... |
887 |
{ |
eddee5ba3 rhashtable: Fix w... |
888 |
struct rhashtable *ht; |
246779dd0 rhashtable: Remov... |
889 |
struct bucket_table *tbl = iter->walker.tbl; |
eddee5ba3 rhashtable: Fix w... |
890 |
|
eddee5ba3 rhashtable: Fix w... |
891 |
if (!tbl) |
963ecbd41 rhashtable: Fix u... |
892 |
goto out; |
eddee5ba3 rhashtable: Fix w... |
893 894 |
ht = iter->ht; |
ba7c95ea3 rhashtable: Fix s... |
895 |
spin_lock(&ht->lock); |
4feb7c7a4 rhashtable: don't... |
896 897 |
if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu)) /* This bucket table is being freed, don't re-link it. */ |
246779dd0 rhashtable: Remov... |
898 |
iter->walker.tbl = NULL; |
4feb7c7a4 rhashtable: don't... |
899 900 |
else list_add(&iter->walker.list, &tbl->walkers); |
ba7c95ea3 rhashtable: Fix s... |
901 |
spin_unlock(&ht->lock); |
eddee5ba3 rhashtable: Fix w... |
902 |
|
963ecbd41 rhashtable: Fix u... |
903 904 |
out: rcu_read_unlock(); |
f2dba9c6f rhashtable: Intro... |
905 906 |
} EXPORT_SYMBOL_GPL(rhashtable_walk_stop); |
488fb86ee rhashtable: Make ... |
907 |
static size_t rounded_hashtable_size(const struct rhashtable_params *params) |
7e1e77636 lib: Resizable, S... |
908 |
{ |
107d01f5b lib/rhashtable: c... |
909 910 911 912 913 914 915 916 917 918 |
size_t retsize; if (params->nelem_hint) retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3), (unsigned long)params->min_size); else retsize = max(HASH_DEFAULT_SIZE, (unsigned long)params->min_size); return retsize; |
7e1e77636 lib: Resizable, S... |
919 |
} |
31ccde2da rhashtable: Allow... |
920 921 922 923 |
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) { return jhash2(key, length, seed); } |
7e1e77636 lib: Resizable, S... |
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 |
/** * rhashtable_init - initialize a new hash table * @ht: hash table to be initialized * @params: configuration parameters * * Initializes a new hash table based on the provided configuration * parameters. A table can be configured either with a variable or * fixed length key: * * Configuration Example 1: Fixed length keys * struct test_obj { * int key; * void * my_member; * struct rhash_head node; * }; * * struct rhashtable_params params = { * .head_offset = offsetof(struct test_obj, node), * .key_offset = offsetof(struct test_obj, key), * .key_len = sizeof(int), |
87545899b net: replace rema... |
944 |
* .hashfn = jhash, |
7e1e77636 lib: Resizable, S... |
945 946 947 948 949 950 951 952 |
* }; * * Configuration Example 2: Variable length keys * struct test_obj { * [...] * struct rhash_head node; * }; * |
49f7b33e6 rhashtable: provi... |
953 |
* u32 my_hash_fn(const void *data, u32 len, u32 seed) |
7e1e77636 lib: Resizable, S... |
954 955 956 957 958 959 960 961 |
* { * struct test_obj *obj = data; * * return [... hash ...]; * } * * struct rhashtable_params params = { * .head_offset = offsetof(struct test_obj, node), |
87545899b net: replace rema... |
962 |
* .hashfn = jhash, |
7e1e77636 lib: Resizable, S... |
963 |
* .obj_hashfn = my_hash_fn, |
7e1e77636 lib: Resizable, S... |
964 965 |
* }; */ |
488fb86ee rhashtable: Make ... |
966 967 |
int rhashtable_init(struct rhashtable *ht, const struct rhashtable_params *params) |
7e1e77636 lib: Resizable, S... |
968 969 970 |
{ struct bucket_table *tbl; size_t size; |
31ccde2da rhashtable: Allow... |
971 |
if ((!params->key_len && !params->obj_hashfn) || |
02fd97c3d rhashtable: Allow... |
972 |
(params->obj_hashfn && !params->obj_cmpfn)) |
7e1e77636 lib: Resizable, S... |
973 |
return -EINVAL; |
97defe1ec rhashtable: Per b... |
974 975 |
memset(ht, 0, sizeof(*ht)); mutex_init(&ht->mutex); |
ba7c95ea3 rhashtable: Fix s... |
976 |
spin_lock_init(&ht->lock); |
97defe1ec rhashtable: Per b... |
977 |
memcpy(&ht->p, params, sizeof(*params)); |
a998f712f rhashtable: Round... |
978 979 |
if (params->min_size) ht->p.min_size = roundup_pow_of_two(params->min_size); |
6d684e546 rhashtable: Cap t... |
980 981 |
/* Cap total entries at 2^31 to avoid nelems overflow. */ ht->max_elems = 1u << 31; |
2d2ab658d rhashtable: Do no... |
982 983 984 985 986 987 |
if (params->max_size) { ht->p.max_size = rounddown_pow_of_two(params->max_size); if (ht->p.max_size < ht->max_elems / 2) ht->max_elems = ht->p.max_size * 2; } |
6d684e546 rhashtable: Cap t... |
988 |
|
48e75b430 rhashtable: compa... |
989 |
ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); |
a998f712f rhashtable: Round... |
990 |
|
107d01f5b lib/rhashtable: c... |
991 |
size = rounded_hashtable_size(&ht->p); |
3a324606b rhashtable: Enfor... |
992 |
|
31ccde2da rhashtable: Allow... |
993 994 995 996 997 998 999 1000 1001 |
ht->key_len = ht->p.key_len; if (!params->hashfn) { ht->p.hashfn = jhash; if (!(ht->key_len & (sizeof(u32) - 1))) { ht->key_len /= sizeof(u32); ht->p.hashfn = rhashtable_jhash2; } } |
2d22ecf6d lib/rhashtable: g... |
1002 1003 1004 1005 1006 |
/* * This is api initialization and thus we need to guarantee the * initial rhashtable allocation. Upon failure, retry with the * smallest possible size with __GFP_NOFAIL semantics. */ |
b9ecfdaa1 rhashtable: Allow... |
1007 |
tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
2d22ecf6d lib/rhashtable: g... |
1008 1009 1010 1011 |
if (unlikely(tbl == NULL)) { size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE); tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL); } |
7e1e77636 lib: Resizable, S... |
1012 |
|
545a148e4 rhashtable: initi... |
1013 |
atomic_set(&ht->nelems, 0); |
a5b6846f9 rhashtable: kill ... |
1014 |
|
7e1e77636 lib: Resizable, S... |
1015 |
RCU_INIT_POINTER(ht->tbl, tbl); |
4c4b52d9b rhashtable: remov... |
1016 |
INIT_WORK(&ht->run_work, rht_deferred_worker); |
97defe1ec rhashtable: Per b... |
1017 |
|
7e1e77636 lib: Resizable, S... |
1018 1019 1020 1021 1022 |
return 0; } EXPORT_SYMBOL_GPL(rhashtable_init); /** |
ca26893f0 rhashtable: Add r... |
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 |
* rhltable_init - initialize a new hash list table * @hlt: hash list table to be initialized * @params: configuration parameters * * Initializes a new hash list table. * * See documentation for rhashtable_init. */ int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) { int err; |
ca26893f0 rhashtable: Add r... |
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 |
err = rhashtable_init(&hlt->ht, params); hlt->ht.rhlist = true; return err; } EXPORT_SYMBOL_GPL(rhltable_init); static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, void (*free_fn)(void *ptr, void *arg), void *arg) { struct rhlist_head *list; if (!ht->rhlist) { free_fn(rht_obj(ht, obj), arg); return; } list = container_of(obj, struct rhlist_head, rhead); do { obj = &list->rhead; list = rht_dereference(list->next, ht); free_fn(rht_obj(ht, obj), arg); } while (list); } /** |
6b6f302ce rhashtable: Add r... |
1060 |
* rhashtable_free_and_destroy - free elements and destroy hash table |
7e1e77636 lib: Resizable, S... |
1061 |
* @ht: the hash table to destroy |
6b6f302ce rhashtable: Add r... |
1062 1063 |
* @free_fn: callback to release resources of element * @arg: pointer passed to free_fn |
7e1e77636 lib: Resizable, S... |
1064 |
* |
6b6f302ce rhashtable: Add r... |
1065 1066 1067 1068 1069 1070 1071 1072 |
* Stops an eventual async resize. If defined, invokes free_fn for each * element to releasal resources. Please note that RCU protected * readers may still be accessing the elements. Releasing of resources * must occur in a compatible manner. Then frees the bucket array. * * This function will eventually sleep to wait for an async resize * to complete. The caller is responsible that no further write operations * occurs in parallel. |
7e1e77636 lib: Resizable, S... |
1073 |
*/ |
6b6f302ce rhashtable: Add r... |
1074 1075 1076 |
void rhashtable_free_and_destroy(struct rhashtable *ht, void (*free_fn)(void *ptr, void *arg), void *arg) |
7e1e77636 lib: Resizable, S... |
1077 |
{ |
0026129c8 rhashtable: add r... |
1078 |
struct bucket_table *tbl, *next_tbl; |
6b6f302ce rhashtable: Add r... |
1079 |
unsigned int i; |
97defe1ec rhashtable: Per b... |
1080 |
|
4c4b52d9b rhashtable: remov... |
1081 |
cancel_work_sync(&ht->run_work); |
97defe1ec rhashtable: Per b... |
1082 |
|
57699a40b rhashtable: Fix r... |
1083 |
mutex_lock(&ht->mutex); |
6b6f302ce rhashtable: Add r... |
1084 |
tbl = rht_dereference(ht->tbl, ht); |
0026129c8 rhashtable: add r... |
1085 |
restart: |
6b6f302ce rhashtable: Add r... |
1086 1087 1088 |
if (free_fn) { for (i = 0; i < tbl->size; i++) { struct rhash_head *pos, *next; |
ae6da1f50 rhashtable: add s... |
1089 |
cond_resched(); |
adc6a3ab1 rhashtable: move ... |
1090 |
for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)), |
6b6f302ce rhashtable: Add r... |
1091 1092 1093 1094 1095 1096 |
next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; !rht_is_a_nulls(pos); pos = next, next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL) |
ca26893f0 rhashtable: Add r... |
1097 |
rhashtable_free_one(ht, pos, free_fn, arg); |
6b6f302ce rhashtable: Add r... |
1098 1099 |
} } |
0026129c8 rhashtable: add r... |
1100 |
next_tbl = rht_dereference(tbl->future_tbl, ht); |
6b6f302ce rhashtable: Add r... |
1101 |
bucket_table_free(tbl); |
0026129c8 rhashtable: add r... |
1102 1103 1104 1105 |
if (next_tbl) { tbl = next_tbl; goto restart; } |
97defe1ec rhashtable: Per b... |
1106 |
mutex_unlock(&ht->mutex); |
7e1e77636 lib: Resizable, S... |
1107 |
} |
6b6f302ce rhashtable: Add r... |
1108 1109 1110 1111 1112 1113 |
EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); void rhashtable_destroy(struct rhashtable *ht) { return rhashtable_free_and_destroy(ht, NULL, NULL); } |
7e1e77636 lib: Resizable, S... |
1114 |
EXPORT_SYMBOL_GPL(rhashtable_destroy); |
da20420f8 rhashtable: Add n... |
1115 |
|
ba6306e3f rhashtable: Remov... |
1116 1117 |
struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash) |
da20420f8 rhashtable: Add n... |
1118 1119 |
{ const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); |
da20420f8 rhashtable: Add n... |
1120 1121 1122 1123 1124 1125 |
unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; unsigned int subhash = hash; union nested_table *ntbl; ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); |
c4d2603da rhashtable: Fix R... |
1126 |
ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); |
da20420f8 rhashtable: Add n... |
1127 1128 1129 1130 |
subhash >>= tbl->nest; while (ntbl && size > (1 << shift)) { index = subhash & ((1 << shift) - 1); |
c4d2603da rhashtable: Fix R... |
1131 1132 |
ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); |
da20420f8 rhashtable: Add n... |
1133 1134 1135 |
size >>= shift; subhash >>= shift; } |
ff302db96 rhashtable: allow... |
1136 1137 |
if (!ntbl) return NULL; |
da20420f8 rhashtable: Add n... |
1138 1139 1140 1141 |
return &ntbl[subhash].bucket; } |
ff302db96 rhashtable: allow... |
1142 |
EXPORT_SYMBOL_GPL(__rht_bucket_nested); |
ba6306e3f rhashtable: Remov... |
1143 1144 |
struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash) |
ff302db96 rhashtable: allow... |
1145 |
{ |
ba6306e3f rhashtable: Remov... |
1146 |
static struct rhash_lock_head *rhnull; |
ff302db96 rhashtable: allow... |
1147 1148 1149 1150 1151 |
if (!rhnull) INIT_RHT_NULLS_HEAD(rhnull); return __rht_bucket_nested(tbl, hash) ?: &rhnull; } |
da20420f8 rhashtable: Add n... |
1152 |
EXPORT_SYMBOL_GPL(rht_bucket_nested); |
ba6306e3f rhashtable: Remov... |
1153 1154 1155 |
struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) |
da20420f8 rhashtable: Add n... |
1156 1157 1158 1159 1160 |
{ const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; union nested_table *ntbl; |
da20420f8 rhashtable: Add n... |
1161 1162 1163 |
ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]); hash >>= tbl->nest; |
da20420f8 rhashtable: Add n... |
1164 |
ntbl = nested_table_alloc(ht, &ntbl[index].table, |
5af68ef73 rhashtable: simpl... |
1165 |
size <= (1 << shift)); |
da20420f8 rhashtable: Add n... |
1166 1167 1168 1169 1170 |
while (ntbl && size > (1 << shift)) { index = hash & ((1 << shift) - 1); size >>= shift; hash >>= shift; |
da20420f8 rhashtable: Add n... |
1171 |
ntbl = nested_table_alloc(ht, &ntbl[index].table, |
5af68ef73 rhashtable: simpl... |
1172 |
size <= (1 << shift)); |
da20420f8 rhashtable: Add n... |
1173 1174 1175 1176 1177 1178 1179 1180 1181 |
} if (!ntbl) return NULL; return &ntbl[hash].bucket; } EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); |