Blame view
include/linux/rculist.h
23.5 KB
b24413180 License cleanup: ... |
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
82524746c rcu: split list.h... |
2 3 4 5 6 7 8 9 10 |
#ifndef _LINUX_RCULIST_H #define _LINUX_RCULIST_H #ifdef __KERNEL__ /* * RCU-protected list version */ #include <linux/list.h> |
10aa9d2cf rculist.h: use th... |
11 |
#include <linux/rcupdate.h> |
82524746c rcu: split list.h... |
12 13 |
/* |
65e6bf484 rcu: add comment ... |
14 15 16 17 18 19 20 21 22 |
* Why is there no list_empty_rcu()? Because list_empty() serves this * purpose. The list_empty() function fetches the RCU-protected pointer * and compares it to the address of the list head, but neither dereferences * this pointer itself nor provides this pointer to the caller. Therefore, * it is not necessary to use rcu_dereference(), so that list_empty() can * be used anywhere you would want to use a list_empty_rcu(). */ /* |
2a855b644 rcu: Make list_sp... |
23 24 25 26 27 28 29 30 31 32 |
* INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers * @list: list to be initialized * * You should instead use INIT_LIST_HEAD() for normal initialization and * cleanup tasks, when readers have no access to the list being initialized. * However, if the list being initialized is visible to readers, you * need to keep the compiler from being too mischievous. */ static inline void INIT_LIST_HEAD_RCU(struct list_head *list) { |
7d0ae8086 rcu: Convert ACCE... |
33 34 |
WRITE_ONCE(list->next, list); WRITE_ONCE(list->prev, list); |
2a855b644 rcu: Make list_sp... |
35 36 37 |
} /* |
67bdbffd6 rculist: avoid __... |
38 39 40 41 42 43 |
* return the ->next pointer of a list_head in an rcu safe * way, we must not access it directly */ #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) /* |
82524746c rcu: split list.h... |
44 45 46 47 48 49 50 51 |
* Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know * the prev/next entries already! */ static inline void __list_add_rcu(struct list_head *new, struct list_head *prev, struct list_head *next) { |
54acd4397 rculist: Consolid... |
52 53 |
if (!__list_add_valid(new, prev, next)) return; |
82524746c rcu: split list.h... |
54 55 |
new->next = next; new->prev = prev; |
67bdbffd6 rculist: avoid __... |
56 |
rcu_assign_pointer(list_next_rcu(prev), new); |
82524746c rcu: split list.h... |
57 |
next->prev = new; |
82524746c rcu: split list.h... |
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
} /** * list_add_rcu - add a new entry to rcu-protected list * @new: new entry to be added * @head: list head to add it after * * Insert a new entry after the specified head. * This is good for implementing stacks. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as list_add_rcu() * or list_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * list_for_each_entry_rcu(). */ static inline void list_add_rcu(struct list_head *new, struct list_head *head) { __list_add_rcu(new, head, head->next); } /** * list_add_tail_rcu - add a new entry to rcu-protected list * @new: new entry to be added * @head: list head to add it before * * Insert a new entry before the specified head. * This is useful for implementing queues. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as list_add_tail_rcu() * or list_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * list_for_each_entry_rcu(). */ static inline void list_add_tail_rcu(struct list_head *new, struct list_head *head) { __list_add_rcu(new, head->prev, head); } /** * list_del_rcu - deletes entry from list without re-initialization * @entry: the element to delete from the list. * * Note: list_empty() on entry does not return true after this, * the entry is in an undefined state. It is useful for RCU based * lockfree traversal. * * In particular, it means that we can not poison the forward * pointers that may still be used for walking the list. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as list_del_rcu() * or list_add_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * list_for_each_entry_rcu(). * * Note that the caller is not permitted to immediately free * the newly deleted entry. Instead, either synchronize_rcu() * or call_rcu() must be used to defer freeing until an RCU * grace period has elapsed. */ static inline void list_del_rcu(struct list_head *entry) { |
559f9badd rcu: List-debug v... |
129 |
__list_del_entry(entry); |
82524746c rcu: split list.h... |
130 131 132 133 |
entry->prev = LIST_POISON2; } /** |
6beeac76f mmu-notifiers: ad... |
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
* hlist_del_init_rcu - deletes entry from hash list with re-initialization * @n: the element to delete from the hash list. * * Note: list_unhashed() on the node return true after this. It is * useful for RCU based read lockfree traversal if the writer side * must know if the list entry is still hashed or already unhashed. * * In particular, it means that we can not poison the forward pointers * that may still be used for walking the hash list and we can only * zero the pprev pointer so list_unhashed() will return true after * this. * * The caller must take whatever precautions are necessary (such as * holding appropriate locks) to avoid racing with another * list-mutation primitive, such as hlist_add_head_rcu() or * hlist_del_rcu(), running on this same list. However, it is * perfectly legal to run concurrently with the _rcu list-traversal * primitives, such as hlist_for_each_entry_rcu(). */ static inline void hlist_del_init_rcu(struct hlist_node *n) { if (!hlist_unhashed(n)) { __hlist_del(n); n->pprev = NULL; } } /** |
82524746c rcu: split list.h... |
162 163 164 165 166 167 168 169 170 171 172 173 |
* list_replace_rcu - replace old entry by new one * @old : the element to be replaced * @new : the new element to insert * * The @old entry will be replaced with the @new entry atomically. * Note: @old should not be empty. */ static inline void list_replace_rcu(struct list_head *old, struct list_head *new) { new->next = old->next; new->prev = old->prev; |
67bdbffd6 rculist: avoid __... |
174 |
rcu_assign_pointer(list_next_rcu(new->prev), new); |
82524746c rcu: split list.h... |
175 |
new->next->prev = new; |
82524746c rcu: split list.h... |
176 177 178 179 |
old->prev = LIST_POISON2; } /** |
7d86dccf2 list: Introduces ... |
180 |
* __list_splice_init_rcu - join an RCU-protected list into an existing list. |
82524746c rcu: split list.h... |
181 |
* @list: the RCU-protected list to splice |
7d86dccf2 list: Introduces ... |
182 183 |
* @prev: points to the last element of the existing list * @next: points to the first element of the existing list |
82524746c rcu: split list.h... |
184 185 |
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... * |
7d86dccf2 list: Introduces ... |
186 187 |
* The list pointed to by @prev and @next can be RCU-read traversed * concurrently with this function. |
82524746c rcu: split list.h... |
188 189 190 |
* * Note that this function blocks. * |
7d86dccf2 list: Introduces ... |
191 192 193 194 195 196 |
* Important note: the caller must take whatever action is necessary to prevent * any other updates to the existing list. In principle, it is possible to * modify the list as soon as sync() begins execution. If this sort of thing * becomes necessary, an alternative version based on call_rcu() could be * created. But only if -really- needed -- there is no shortage of RCU API * members. |
82524746c rcu: split list.h... |
197 |
*/ |
7d86dccf2 list: Introduces ... |
198 199 200 201 |
static inline void __list_splice_init_rcu(struct list_head *list, struct list_head *prev, struct list_head *next, void (*sync)(void)) |
82524746c rcu: split list.h... |
202 203 204 |
{ struct list_head *first = list->next; struct list_head *last = list->prev; |
82524746c rcu: split list.h... |
205 |
|
2a855b644 rcu: Make list_sp... |
206 207 208 209 210 |
/* * "first" and "last" tracking list, so initialize it. RCU readers * have access to this list, so we must use INIT_LIST_HEAD_RCU() * instead of INIT_LIST_HEAD(). */ |
82524746c rcu: split list.h... |
211 |
|
2a855b644 rcu: Make list_sp... |
212 |
INIT_LIST_HEAD_RCU(list); |
82524746c rcu: split list.h... |
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
/* * At this point, the list body still points to the source list. * Wait for any readers to finish using the list before splicing * the list body into the new list. Any new readers will see * an empty list. */ sync(); /* * Readers are finished with the source list, so perform splice. * The order is important if the new list is global and accessible * to concurrent RCU readers. Note that RCU readers are not * permitted to traverse the prev pointers without excluding * this function. */ |
7d86dccf2 list: Introduces ... |
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 |
last->next = next; rcu_assign_pointer(list_next_rcu(prev), first); first->prev = prev; next->prev = last; } /** * list_splice_init_rcu - splice an RCU-protected list into an existing list, * designed for stacks. * @list: the RCU-protected list to splice * @head: the place in the existing list to splice the first list into * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... */ static inline void list_splice_init_rcu(struct list_head *list, struct list_head *head, void (*sync)(void)) { if (!list_empty(list)) __list_splice_init_rcu(list, head, head->next, sync); } /** * list_splice_tail_init_rcu - splice an RCU-protected list into an existing * list, designed for queues. * @list: the RCU-protected list to splice * @head: the place in the existing list to splice the first list into * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... */ static inline void list_splice_tail_init_rcu(struct list_head *list, struct list_head *head, void (*sync)(void)) { if (!list_empty(list)) __list_splice_init_rcu(list, head->prev, head, sync); |
82524746c rcu: split list.h... |
264 |
} |
72c6a9870 rculist.h: introd... |
265 266 267 268 |
/** * list_entry_rcu - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. |
3943f42c1 Replace mentions ... |
269 |
* @member: the name of the list_head within the struct. |
72c6a9870 rculist.h: introd... |
270 271 272 273 274 |
* * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_entry_rcu(ptr, type, member) \ |
5383f45db locking/barriers:... |
275 |
container_of(READ_ONCE(ptr), type, member) |
72c6a9870 rculist.h: introd... |
276 |
|
27fdb35fe doc: Fix various ... |
277 |
/* |
f88022a4f rcu: Replace list... |
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 |
* Where are list_empty_rcu() and list_first_entry_rcu()? * * Implementing those functions following their counterparts list_empty() and * list_first_entry() is not advisable because they lead to subtle race * conditions as the following snippet shows: * * if (!list_empty_rcu(mylist)) { * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); * do_something(bar); * } * * The list may not be empty when list_empty_rcu checks it, but it may be when * list_first_entry_rcu rereads the ->next pointer. * * Rereading the ->next pointer is not a problem for list_empty() and * list_first_entry() because they would be protected by a lock that blocks * writers. * * See list_first_or_null_rcu for an alternative. */ /** * list_first_or_null_rcu - get the first element from a list |
72c6a9870 rculist.h: introd... |
301 302 |
* @ptr: the list head to take the element from. * @type: the type of the struct this is embedded in. |
3943f42c1 Replace mentions ... |
303 |
* @member: the name of the list_head within the struct. |
72c6a9870 rculist.h: introd... |
304 |
* |
f88022a4f rcu: Replace list... |
305 |
* Note that if the list is empty, it returns NULL. |
72c6a9870 rculist.h: introd... |
306 307 308 309 |
* * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ |
f88022a4f rcu: Replace list... |
310 |
#define list_first_or_null_rcu(ptr, type, member) \ |
0adab9b9a rcu: Indentation ... |
311 312 |
({ \ struct list_head *__ptr = (ptr); \ |
7d0ae8086 rcu: Convert ACCE... |
313 |
struct list_head *__next = READ_ONCE(__ptr->next); \ |
0adab9b9a rcu: Indentation ... |
314 315 |
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ }) |
72c6a9870 rculist.h: introd... |
316 |
|
82524746c rcu: split list.h... |
317 |
/** |
ff3c44e67 rcu: Add list_nex... |
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 |
* list_next_or_null_rcu - get the first element from a list * @head: the head for the list. * @ptr: the list head to take the next element from. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * Note that if the ptr is at the end of the list, NULL is returned. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). */ #define list_next_or_null_rcu(head, ptr, type, member) \ ({ \ struct list_head *__head = (head); \ struct list_head *__ptr = (ptr); \ struct list_head *__next = READ_ONCE(__ptr->next); \ likely(__next != __head) ? list_entry_rcu(__next, type, \ member) : NULL; \ }) /** |
82524746c rcu: split list.h... |
339 340 341 |
* list_for_each_entry_rcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. |
3943f42c1 Replace mentions ... |
342 |
* @member: the name of the list_head within the struct. |
82524746c rcu: split list.h... |
343 344 345 346 347 348 |
* * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as list_add_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ #define list_for_each_entry_rcu(pos, head, member) \ |
72c6a9870 rculist.h: introd... |
349 |
for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ |
e66eed651 list: remove pref... |
350 |
&pos->member != (head); \ |
72c6a9870 rculist.h: introd... |
351 |
pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
82524746c rcu: split list.h... |
352 |
|
82524746c rcu: split list.h... |
353 |
/** |
69b907297 list: Add lockles... |
354 355 356 357 358 359 360 361 362 363 364 365 366 367 |
* list_entry_lockless - get the struct for this entry * @ptr: the &struct list_head pointer. * @type: the type of the struct this is embedded in. * @member: the name of the list_head within the struct. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu(), but requires some implicit RCU * read-side guarding. One example is running within a special * exception-time environment where preemption is disabled and where * lockdep cannot be invoked (in which case updaters must use RCU-sched, * as in synchronize_sched(), call_rcu_sched(), and friends). Another * example is when items are added to the list, but never deleted. */ #define list_entry_lockless(ptr, type, member) \ |
5383f45db locking/barriers:... |
368 |
container_of((typeof(ptr))READ_ONCE(ptr), type, member) |
69b907297 list: Add lockles... |
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 |
/** * list_for_each_entry_lockless - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_struct within the struct. * * This primitive may safely run concurrently with the _rcu list-mutation * primitives such as list_add_rcu(), but requires some implicit RCU * read-side guarding. One example is running within a special * exception-time environment where preemption is disabled and where * lockdep cannot be invoked (in which case updaters must use RCU-sched, * as in synchronize_sched(), call_rcu_sched(), and friends). Another * example is when items are added to the list, but never deleted. */ #define list_for_each_entry_lockless(pos, head, member) \ for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ &pos->member != (head); \ pos = list_entry_lockless(pos->member.next, typeof(*pos), member)) /** |
254245d23 netdev: add netde... |
390 391 392 |
* list_for_each_entry_continue_rcu - continue iteration over list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. |
3943f42c1 Replace mentions ... |
393 |
* @member: the name of the list_head within the struct. |
254245d23 netdev: add netde... |
394 395 396 397 398 399 |
* * Continue to iterate over list of given type, continuing after * the current position. */ #define list_for_each_entry_continue_rcu(pos, head, member) \ for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ |
e66eed651 list: remove pref... |
400 |
&pos->member != (head); \ |
254245d23 netdev: add netde... |
401 402 403 |
pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) /** |
82524746c rcu: split list.h... |
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 |
* hlist_del_rcu - deletes entry from hash list without re-initialization * @n: the element to delete from the hash list. * * Note: list_unhashed() on entry does not return true after this, * the entry is in an undefined state. It is useful for RCU based * lockfree traversal. * * In particular, it means that we can not poison the forward * pointers that may still be used for walking the hash list. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry(). */ static inline void hlist_del_rcu(struct hlist_node *n) { __hlist_del(n); n->pprev = LIST_POISON2; } /** * hlist_replace_rcu - replace old entry by new one * @old : the element to be replaced * @new : the new element to insert * * The @old entry will be replaced with the @new entry atomically. */ static inline void hlist_replace_rcu(struct hlist_node *old, struct hlist_node *new) { struct hlist_node *next = old->next; new->next = next; new->pprev = old->pprev; |
67bdbffd6 rculist: avoid __... |
442 |
rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); |
82524746c rcu: split list.h... |
443 444 |
if (next) new->next->pprev = &new->next; |
82524746c rcu: split list.h... |
445 446 |
old->pprev = LIST_POISON2; } |
67bdbffd6 rculist: avoid __... |
447 448 449 450 451 452 |
/* * return the first or the next element in an RCU protected hlist */ #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) #define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) |
82524746c rcu: split list.h... |
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 |
/** * hlist_add_head_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_add_head_rcu(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; |
10aa9d2cf rculist.h: use th... |
476 |
|
82524746c rcu: split list.h... |
477 478 |
n->next = first; n->pprev = &h->first; |
67bdbffd6 rculist: avoid __... |
479 |
rcu_assign_pointer(hlist_first_rcu(h), n); |
82524746c rcu: split list.h... |
480 481 |
if (first) first->pprev = &n->next; |
82524746c rcu: split list.h... |
482 483 484 |
} /** |
1602f49b5 Merge git://git.k... |
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 |
* hlist_add_tail_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_add_tail_rcu(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *i, *last = NULL; |
48ac34666 hlist_add_tail_rc... |
507 508 |
/* Note: write side code, so rcu accessors are not needed. */ for (i = h->first; i; i = i->next) |
1602f49b5 Merge git://git.k... |
509 510 511 512 513 514 515 516 517 518 519 520 |
last = i; if (last) { n->next = last->next; n->pprev = &last->next; rcu_assign_pointer(hlist_next_rcu(last), n); } else { hlist_add_head_rcu(n, h); } } /** |
82524746c rcu: split list.h... |
521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 |
* hlist_add_before_rcu * @n: the new element to add to the hash list. * @next: the existing element to add the new element before. * * Description: * Adds the specified element to the specified hlist * before the specified node while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. */ static inline void hlist_add_before_rcu(struct hlist_node *n, struct hlist_node *next) { n->pprev = next->pprev; n->next = next; |
67bdbffd6 rculist: avoid __... |
543 |
rcu_assign_pointer(hlist_pprev_rcu(n), n); |
82524746c rcu: split list.h... |
544 |
next->pprev = &n->next; |
82524746c rcu: split list.h... |
545 546 547 |
} /** |
1d023284c list: fix order o... |
548 |
* hlist_add_behind_rcu |
82524746c rcu: split list.h... |
549 |
* @n: the new element to add to the hash list. |
1d023284c list: fix order o... |
550 |
* @prev: the existing element to add the new element after. |
82524746c rcu: split list.h... |
551 552 553 554 555 556 557 558 559 560 561 562 563 564 |
* * Description: * Adds the specified element to the specified hlist * after the specified node while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_add_head_rcu() * or hlist_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. */ |
1d023284c list: fix order o... |
565 566 |
static inline void hlist_add_behind_rcu(struct hlist_node *n, struct hlist_node *prev) |
82524746c rcu: split list.h... |
567 568 569 |
{ n->next = prev->next; n->pprev = &prev->next; |
67bdbffd6 rculist: avoid __... |
570 |
rcu_assign_pointer(hlist_next_rcu(prev), n); |
82524746c rcu: split list.h... |
571 572 573 |
if (n->next) n->next->pprev = &n->next; } |
67bdbffd6 rculist: avoid __... |
574 575 |
#define __hlist_for_each_rcu(pos, head) \ for (pos = rcu_dereference(hlist_first_rcu(head)); \ |
75d65a425 hlist: remove sof... |
576 |
pos; \ |
67bdbffd6 rculist: avoid __... |
577 |
pos = rcu_dereference(hlist_next_rcu(pos))) |
1cc523271 seq_file: add RCU... |
578 |
|
82524746c rcu: split list.h... |
579 580 |
/** * hlist_for_each_entry_rcu - iterate over rcu list of given type |
b67bfe0d4 hlist: drop the n... |
581 |
* @pos: the type * to use as a loop cursor. |
82524746c rcu: split list.h... |
582 583 584 585 586 587 588 |
* @head: the head for your list. * @member: the name of the hlist_node within the struct. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ |
b67bfe0d4 hlist: drop the n... |
589 590 591 592 593 594 |
#define hlist_for_each_entry_rcu(pos, head, member) \ for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) |
82524746c rcu: split list.h... |
595 |
|
5c578aedc IPv6: convert add... |
596 |
/** |
12bcbe66d rcu: Add _notrace... |
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 |
* hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). * * This is the same as hlist_for_each_entry_rcu() except that it does * not do any RCU debugging or tracing. */ #define hlist_for_each_entry_rcu_notrace(pos, head, member) \ for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) /** |
4f70ecca9 net: rcu fixes |
617 |
* hlist_for_each_entry_rcu_bh - iterate over rcu list of given type |
b67bfe0d4 hlist: drop the n... |
618 |
* @pos: the type * to use as a loop cursor. |
4f70ecca9 net: rcu fixes |
619 620 621 622 623 624 625 |
* @head: the head for your list. * @member: the name of the hlist_node within the struct. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ |
b67bfe0d4 hlist: drop the n... |
626 627 628 629 630 631 |
#define hlist_for_each_entry_rcu_bh(pos, head, member) \ for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ typeof(*(pos)), member); \ pos; \ pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\ &(pos)->member)), typeof(*(pos)), member)) |
4f70ecca9 net: rcu fixes |
632 633 |
/** |
5c578aedc IPv6: convert add... |
634 |
* hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point |
b67bfe0d4 hlist: drop the n... |
635 |
* @pos: the type * to use as a loop cursor. |
5c578aedc IPv6: convert add... |
636 637 |
* @member: the name of the hlist_node within the struct. */ |
b67bfe0d4 hlist: drop the n... |
638 |
#define hlist_for_each_entry_continue_rcu(pos, member) \ |
f520c98e3 rculist: Fix spar... |
639 640 |
for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member); \ |
b67bfe0d4 hlist: drop the n... |
641 |
pos; \ |
f520c98e3 rculist: Fix spar... |
642 643 |
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member)) |
5c578aedc IPv6: convert add... |
644 |
|
4f70ecca9 net: rcu fixes |
645 646 |
/** * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point |
b67bfe0d4 hlist: drop the n... |
647 |
* @pos: the type * to use as a loop cursor. |
4f70ecca9 net: rcu fixes |
648 649 |
* @member: the name of the hlist_node within the struct. */ |
b67bfe0d4 hlist: drop the n... |
650 |
#define hlist_for_each_entry_continue_rcu_bh(pos, member) \ |
f520c98e3 rculist: Fix spar... |
651 652 |
for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member); \ |
b67bfe0d4 hlist: drop the n... |
653 |
pos; \ |
f520c98e3 rculist: Fix spar... |
654 655 |
pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member)) |
4f70ecca9 net: rcu fixes |
656 |
|
97ede29e8 tipc: convert nam... |
657 658 659 660 661 662 663 |
/** * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point * @pos: the type * to use as a loop cursor. * @member: the name of the hlist_node within the struct. */ #define hlist_for_each_entry_from_rcu(pos, member) \ for (; pos; \ |
f517700cc rculist: Fix anot... |
664 665 |
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \ &(pos)->member)), typeof(*(pos)), member)) |
5c578aedc IPv6: convert add... |
666 |
|
82524746c rcu: split list.h... |
667 668 |
#endif /* __KERNEL__ */ #endif |