Blame view
include/linux/percpu-rwsem.h
3.92 KB
62ac665ff
|
1 2 |
#ifndef _LINUX_PERCPU_RWSEM_H #define _LINUX_PERCPU_RWSEM_H |
9390ef0c8
|
3 |
#include <linux/atomic.h> |
a1fd3e24d
|
4 |
#include <linux/rwsem.h> |
62ac665ff
|
5 |
#include <linux/percpu.h> |
a1fd3e24d
|
6 |
#include <linux/wait.h> |
001dac627
|
7 |
#include <linux/rcu_sync.h> |
8ebe34731
|
8 |
#include <linux/lockdep.h> |
62ac665ff
|
9 10 |
struct percpu_rw_semaphore { |
001dac627
|
11 |
struct rcu_sync rss; |
80127a396
|
12 |
unsigned int __percpu *read_count; |
a1fd3e24d
|
13 |
struct rw_semaphore rw_sem; |
80127a396
|
14 15 |
wait_queue_head_t writer; int readers_block; |
62ac665ff
|
16 |
}; |
11d9684ca
|
17 18 19 20 21 22 23 24 |
#define DEFINE_STATIC_PERCPU_RWSEM(name) \ static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \ static struct percpu_rw_semaphore name = { \ .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \ .read_count = &__percpu_rwsem_rc_##name, \ .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \ .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \ } |
80127a396
|
25 26 |
extern int __percpu_down_read(struct percpu_rw_semaphore *, int); extern void __percpu_up_read(struct percpu_rw_semaphore *); |
259d69b7f
|
27 |
static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem) |
80127a396
|
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
{ might_sleep(); rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_); preempt_disable(); /* * We are in an RCU-sched read-side critical section, so the writer * cannot both change sem->state from readers_fast and start checking * counters while we are here. So if we see !sem->state, we know that * the writer won't be checking until we're past the preempt_enable() * and that one the synchronize_sched() is done, the writer will see * anything we did within this RCU-sched read-size critical section. */ __this_cpu_inc(*sem->read_count); if (unlikely(!rcu_sync_is_idle(&sem->rss))) __percpu_down_read(sem, false); /* Unconditional memory barrier */ |
259d69b7f
|
45 |
barrier(); |
80127a396
|
46 |
/* |
259d69b7f
|
47 |
* The barrier() prevents the compiler from |
80127a396
|
48 49 50 |
* bleeding the critical section out. */ } |
259d69b7f
|
51 52 53 54 55 |
static inline void percpu_down_read(struct percpu_rw_semaphore *sem) { percpu_down_read_preempt_disable(sem); preempt_enable(); } |
80127a396
|
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) { int ret = 1; preempt_disable(); /* * Same as in percpu_down_read(). */ __this_cpu_inc(*sem->read_count); if (unlikely(!rcu_sync_is_idle(&sem->rss))) ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ preempt_enable(); /* * The barrier() from preempt_enable() prevents the compiler from * bleeding the critical section out. */ if (ret) rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_); return ret; } |
259d69b7f
|
78 |
static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem) |
80127a396
|
79 80 |
{ /* |
259d69b7f
|
81 |
* The barrier() prevents the compiler from |
80127a396
|
82 83 |
* bleeding the critical section out. */ |
259d69b7f
|
84 |
barrier(); |
80127a396
|
85 86 87 88 89 90 91 92 93 94 95 |
/* * Same as in percpu_down_read(). */ if (likely(rcu_sync_is_idle(&sem->rss))) __this_cpu_dec(*sem->read_count); else __percpu_up_read(sem); /* Unconditional memory barrier */ preempt_enable(); rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); } |
5c1eabe68
|
96 |
|
259d69b7f
|
97 98 99 100 101 |
static inline void percpu_up_read(struct percpu_rw_semaphore *sem) { preempt_disable(); percpu_up_read_preempt_enable(sem); } |
a1fd3e24d
|
102 103 |
extern void percpu_down_write(struct percpu_rw_semaphore *); extern void percpu_up_write(struct percpu_rw_semaphore *); |
62ac665ff
|
104 |
|
8ebe34731
|
105 106 |
extern int __percpu_init_rwsem(struct percpu_rw_semaphore *, const char *, struct lock_class_key *); |
80127a396
|
107 |
|
a1fd3e24d
|
108 |
extern void percpu_free_rwsem(struct percpu_rw_semaphore *); |
62ac665ff
|
109 |
|
80127a396
|
110 |
#define percpu_init_rwsem(sem) \ |
8ebe34731
|
111 112 |
({ \ static struct lock_class_key rwsem_key; \ |
80127a396
|
113 |
__percpu_init_rwsem(sem, #sem, &rwsem_key); \ |
8ebe34731
|
114 |
}) |
55cc15650
|
115 |
#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem) |
11d9684ca
|
116 117 |
#define percpu_rwsem_assert_held(sem) \ lockdep_assert_held(&(sem)->rw_sem) |
55cc15650
|
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, bool read, unsigned long ip) { lock_release(&sem->rw_sem.dep_map, 1, ip); #ifdef CONFIG_RWSEM_SPIN_ON_OWNER if (!read) sem->rw_sem.owner = NULL; #endif } static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem, bool read, unsigned long ip) { lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip); } |
62ac665ff
|
133 |
#endif |