Blame view
include/linux/srcutiny.h
2.59 KB
8c366db05
|
1 |
/* SPDX-License-Identifier: GPL-2.0+ */ |
d8be81735
|
2 3 4 5 |
/* * Sleepable Read-Copy Update mechanism for mutual exclusion, * tiny variant. * |
d8be81735
|
6 7 |
* Copyright (C) IBM Corporation, 2017 * |
8c366db05
|
8 |
* Author: Paul McKenney <paulmck@linux.ibm.com> |
d8be81735
|
9 10 11 12 13 14 15 16 |
*/ #ifndef _LINUX_SRCU_TINY_H #define _LINUX_SRCU_TINY_H #include <linux/swait.h> struct srcu_struct { |
3ddf20c95
|
17 18 19 20 |
short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */ short srcu_idx; /* Current reader array element. */ u8 srcu_gp_running; /* GP workqueue running? */ u8 srcu_gp_waiting; /* GP waiting for readers? */ |
d8be81735
|
21 22 |
struct swait_queue_head srcu_wq; /* Last srcu_read_unlock() wakes GP. */ |
2464dd940
|
23 24 |
struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */ struct rcu_head **srcu_cb_tail; /* Pending callbacks: Tail. */ |
d8be81735
|
25 26 27 28 29 30 31 |
struct work_struct srcu_work; /* For driving grace periods. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ }; void srcu_drive_gp(struct work_struct *wp); |
9c80172b9
|
32 |
#define __SRCU_STRUCT_INIT(name, __ignored) \ |
d8be81735
|
33 34 |
{ \ .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \ |
2464dd940
|
35 |
.srcu_cb_tail = &name.srcu_cb_head, \ |
d8be81735
|
36 37 38 39 40 41 42 43 44 |
.srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \ __SRCU_DEP_MAP_INIT(name) \ } /* * This odd _STATIC_ arrangement is needed for API compatibility with * Tree SRCU, which needs some per-CPU data. */ #define DEFINE_SRCU(name) \ |
9c80172b9
|
45 |
struct srcu_struct name = __SRCU_STRUCT_INIT(name, name) |
d8be81735
|
46 |
#define DEFINE_STATIC_SRCU(name) \ |
9c80172b9
|
47 |
static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name) |
d8be81735
|
48 |
|
aacb5d91a
|
49 |
void synchronize_srcu(struct srcu_struct *ssp); |
d8be81735
|
50 |
|
d4efe6c5a
|
51 52 53 54 55 56 |
/* * Counts the new reader in the appropriate per-CPU element of the * srcu_struct. Can be invoked from irq/bh handlers, but the matching * __srcu_read_unlock() must be in the same handler instance. Returns an * index that must be passed to the matching srcu_read_unlock(). */ |
aacb5d91a
|
57 |
static inline int __srcu_read_lock(struct srcu_struct *ssp) |
d4efe6c5a
|
58 59 |
{ int idx; |
aacb5d91a
|
60 61 |
idx = READ_ONCE(ssp->srcu_idx); WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1); |
d4efe6c5a
|
62 63 |
return idx; } |
aacb5d91a
|
64 |
static inline void synchronize_srcu_expedited(struct srcu_struct *ssp) |
d8be81735
|
65 |
{ |
aacb5d91a
|
66 |
synchronize_srcu(ssp); |
d8be81735
|
67 |
} |
aacb5d91a
|
68 |
static inline void srcu_barrier(struct srcu_struct *ssp) |
d8be81735
|
69 |
{ |
aacb5d91a
|
70 |
synchronize_srcu(ssp); |
d8be81735
|
71 |
} |
115a1a528
|
72 |
/* Defined here to avoid size increase for non-torture kernels. */ |
aacb5d91a
|
73 |
static inline void srcu_torture_stats_print(struct srcu_struct *ssp, |
115a1a528
|
74 75 76 |
char *tt, char *tf) { int idx; |
aacb5d91a
|
77 |
idx = READ_ONCE(ssp->srcu_idx) & 0x1; |
115a1a528
|
78 79 80 |
pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd) ", tt, tf, idx, |
aacb5d91a
|
81 82 |
READ_ONCE(ssp->srcu_lock_nesting[!idx]), READ_ONCE(ssp->srcu_lock_nesting[idx])); |
115a1a528
|
83 |
} |
d8be81735
|
84 |
#endif |