Blame view
kernel/rcupdate.c
4.12 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
/* * Read-Copy Update mechanism for mutual exclusion * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * |
01c1c660f
|
18 |
* Copyright IBM Corporation, 2001 |
1da177e4c
|
19 20 21 |
* * Authors: Dipankar Sarma <dipankar@in.ibm.com> * Manfred Spraul <manfred@colorfullife.com> |
a71fca58b
|
22 |
* |
1da177e4c
|
23 24 25 26 27 28 29 |
* Based on the original work by Paul McKenney <paulmck@us.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - |
a71fca58b
|
30 |
* http://lse.sourceforge.net/locking/rcupdate.html |
1da177e4c
|
31 32 33 34 35 36 37 38 39 40 41 |
* */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <asm/atomic.h> #include <linux/bitops.h> |
1da177e4c
|
42 43 |
#include <linux/percpu.h> #include <linux/notifier.h> |
1da177e4c
|
44 |
#include <linux/cpu.h> |
9331b3157
|
45 |
#include <linux/mutex.h> |
01c1c660f
|
46 |
#include <linux/module.h> |
d9f1bb6ad
|
47 |
#include <linux/kernel_stat.h> |
e3818b8dc
|
48 |
#include <linux/hardirq.h> |
1da177e4c
|
49 |
|
162cc2794
|
50 51 52 53 54 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key rcu_lock_key; struct lockdep_map rcu_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); EXPORT_SYMBOL_GPL(rcu_lock_map); |
632ee2001
|
55 56 57 58 59 60 61 62 63 64 |
static struct lock_class_key rcu_bh_lock_key; struct lockdep_map rcu_bh_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); EXPORT_SYMBOL_GPL(rcu_bh_lock_map); static struct lock_class_key rcu_sched_lock_key; struct lockdep_map rcu_sched_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); EXPORT_SYMBOL_GPL(rcu_sched_lock_map); |
162cc2794
|
65 |
#endif |
d9f1bb6ad
|
66 |
int rcu_scheduler_active __read_mostly; |
f5f654096
|
67 |
EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
d9f1bb6ad
|
68 |
|
e3818b8dc
|
69 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
bc293d62b
|
70 71 72 73 74 75 |
int debug_lockdep_rcu_enabled(void) { return rcu_scheduler_active && debug_locks && current->lockdep_recursion == 0; } EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); |
e3818b8dc
|
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
/** * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? * * Check for bottom half being disabled, which covers both the * CONFIG_PROVE_RCU and not cases. Note that if someone uses * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) * will show the situation. * * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. */ int rcu_read_lock_bh_held(void) { if (!debug_lockdep_rcu_enabled()) return 1; return in_softirq(); } EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
d9f1bb6ad
|
95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
/* * This function is invoked towards the end of the scheduler's initialization * process. Before this is called, the idle task might contain * RCU read-side critical sections (during which time, this idle * task is booting the system). After this function is called, the * idle tasks are prohibited from containing RCU read-side critical * sections. */ void rcu_scheduler_starting(void) { WARN_ON(num_online_cpus() != 1); WARN_ON(nr_context_switches() > 0); rcu_scheduler_active = 1; } |
fbf6bfca7
|
109 110 111 112 |
/* * Awaken the corresponding synchronize_rcu() instance now that a * grace period has elapsed. */ |
4446a36ff
|
113 |
void wakeme_after_rcu(struct rcu_head *head) |
21a1ea9eb
|
114 |
{ |
01c1c660f
|
115 116 117 118 |
struct rcu_synchronize *rcu; rcu = container_of(head, struct rcu_synchronize, head); complete(&rcu->completion); |
21a1ea9eb
|
119 |
} |
ee84b8243
|
120 121 122 123 124 125 126 127 128 129 130 |
#ifdef CONFIG_PROVE_RCU /* * wrapper function to avoid #include problems. */ int rcu_my_thread_group_empty(void) { return thread_group_empty(current); } EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty); #endif /* #ifdef CONFIG_PROVE_RCU */ |