Blame view
include/linux/percpu-rwsem.h
4.16 KB
b24413180
|
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
62ac665ff
|
2 3 |
#ifndef _LINUX_PERCPU_RWSEM_H #define _LINUX_PERCPU_RWSEM_H |
9390ef0c8
|
4 |
#include <linux/atomic.h> |
62ac665ff
|
5 |
#include <linux/percpu.h> |
52b94129f
|
6 |
#include <linux/rcuwait.h> |
7f26482a8
|
7 |
#include <linux/wait.h> |
001dac627
|
8 |
#include <linux/rcu_sync.h> |
8ebe34731
|
9 |
#include <linux/lockdep.h> |
62ac665ff
|
10 11 |
struct percpu_rw_semaphore { |
001dac627
|
12 |
struct rcu_sync rss; |
80127a396
|
13 |
unsigned int __percpu *read_count; |
7f26482a8
|
14 15 16 |
struct rcuwait writer; wait_queue_head_t waiters; atomic_t block; |
1751060e2
|
17 18 19 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif |
62ac665ff
|
20 |
}; |
1751060e2
|
21 22 23 24 25 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }, #else #define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) #endif |
3f2947b78
|
26 |
#define __DEFINE_PERCPU_RWSEM(name, is_static) \ |
11d9684ca
|
27 |
static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \ |
3f2947b78
|
28 |
is_static struct percpu_rw_semaphore name = { \ |
95bf33b55
|
29 |
.rss = __RCU_SYNC_INITIALIZER(name.rss), \ |
11d9684ca
|
30 |
.read_count = &__percpu_rwsem_rc_##name, \ |
52b94129f
|
31 |
.writer = __RCUWAIT_INITIALIZER(name.writer), \ |
7f26482a8
|
32 33 |
.waiters = __WAIT_QUEUE_HEAD_INITIALIZER(name.waiters), \ .block = ATOMIC_INIT(0), \ |
1751060e2
|
34 |
__PERCPU_RWSEM_DEP_MAP_INIT(name) \ |
11d9684ca
|
35 |
} |
1751060e2
|
36 |
|
3f2947b78
|
37 38 39 40 |
#define DEFINE_PERCPU_RWSEM(name) \ __DEFINE_PERCPU_RWSEM(name, /* not static */) #define DEFINE_STATIC_PERCPU_RWSEM(name) \ __DEFINE_PERCPU_RWSEM(name, static) |
11d9684ca
|
41 |
|
206c98ffb
|
42 |
extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool); |
80127a396
|
43 |
|
02e525b2a
|
44 |
static inline void percpu_down_read(struct percpu_rw_semaphore *sem) |
80127a396
|
45 46 |
{ might_sleep(); |
1751060e2
|
47 |
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); |
80127a396
|
48 49 50 51 52 53 54 |
preempt_disable(); /* * We are in an RCU-sched read-side critical section, so the writer * cannot both change sem->state from readers_fast and start checking * counters while we are here. So if we see !sem->state, we know that * the writer won't be checking until we're past the preempt_enable() |
e3e740544
|
55 |
* and that once the synchronize_rcu() is done, the writer will see |
80127a396
|
56 57 |
* anything we did within this RCU-sched read-size critical section. */ |
71365d402
|
58 |
if (likely(rcu_sync_is_idle(&sem->rss))) |
e6b1a44ec
|
59 |
this_cpu_inc(*sem->read_count); |
71365d402
|
60 |
else |
80127a396
|
61 |
__percpu_down_read(sem, false); /* Unconditional memory barrier */ |
80127a396
|
62 |
/* |
02e525b2a
|
63 |
* The preempt_enable() prevents the compiler from |
80127a396
|
64 65 |
* bleeding the critical section out. */ |
259d69b7f
|
66 67 |
preempt_enable(); } |
206c98ffb
|
68 |
static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem) |
80127a396
|
69 |
{ |
206c98ffb
|
70 |
bool ret = true; |
80127a396
|
71 72 73 74 75 |
preempt_disable(); /* * Same as in percpu_down_read(). */ |
71365d402
|
76 |
if (likely(rcu_sync_is_idle(&sem->rss))) |
e6b1a44ec
|
77 |
this_cpu_inc(*sem->read_count); |
71365d402
|
78 |
else |
80127a396
|
79 80 81 82 83 84 85 86 |
ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ preempt_enable(); /* * The barrier() from preempt_enable() prevents the compiler from * bleeding the critical section out. */ if (ret) |
1751060e2
|
87 |
rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); |
80127a396
|
88 89 90 |
return ret; } |
02e525b2a
|
91 |
static inline void percpu_up_read(struct percpu_rw_semaphore *sem) |
80127a396
|
92 |
{ |
1751060e2
|
93 |
rwsem_release(&sem->dep_map, _RET_IP_); |
02e525b2a
|
94 |
preempt_disable(); |
80127a396
|
95 96 97 |
/* * Same as in percpu_down_read(). */ |
ac8dec420
|
98 |
if (likely(rcu_sync_is_idle(&sem->rss))) { |
e6b1a44ec
|
99 |
this_cpu_dec(*sem->read_count); |
ac8dec420
|
100 101 102 103 104 105 106 107 108 109 110 |
} else { /* * slowpath; reader will only ever wake a single blocked * writer. */ smp_mb(); /* B matches C */ /* * In other words, if they see our decrement (presumably to * aggregate zero, as that is the only time it matters) they * will also see our critical section. */ |
e6b1a44ec
|
111 |
this_cpu_dec(*sem->read_count); |
ac8dec420
|
112 113 |
rcuwait_wake_up(&sem->writer); } |
80127a396
|
114 |
preempt_enable(); |
80127a396
|
115 |
} |
5c1eabe68
|
116 |
|
a1fd3e24d
|
117 118 |
extern void percpu_down_write(struct percpu_rw_semaphore *); extern void percpu_up_write(struct percpu_rw_semaphore *); |
62ac665ff
|
119 |
|
8ebe34731
|
120 121 |
extern int __percpu_init_rwsem(struct percpu_rw_semaphore *, const char *, struct lock_class_key *); |
80127a396
|
122 |
|
a1fd3e24d
|
123 |
extern void percpu_free_rwsem(struct percpu_rw_semaphore *); |
62ac665ff
|
124 |
|
80127a396
|
125 |
#define percpu_init_rwsem(sem) \ |
8ebe34731
|
126 127 |
({ \ static struct lock_class_key rwsem_key; \ |
80127a396
|
128 |
__percpu_init_rwsem(sem, #sem, &rwsem_key); \ |
8ebe34731
|
129 |
}) |
1751060e2
|
130 131 |
#define percpu_rwsem_is_held(sem) lockdep_is_held(sem) #define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem) |
11d9684ca
|
132 |
|
55cc15650
|
133 134 135 |
static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, bool read, unsigned long ip) { |
1751060e2
|
136 |
lock_release(&sem->dep_map, ip); |
55cc15650
|
137 138 139 140 141 |
} static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, bool read, unsigned long ip) { |
1751060e2
|
142 |
lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip); |
55cc15650
|
143 |
} |
62ac665ff
|
144 |
#endif |