Blame view
kernel/rcutiny.c
6.7 KB
9b1d82fa1
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
/* * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright IBM Corporation, 2008 * * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> * * For detailed explanation of Read-Copy Update mechanism see - |
4ce5b9034
|
23 |
* Documentation/RCU |
9b1d82fa1
|
24 |
*/ |
9b1d82fa1
|
25 |
#include <linux/moduleparam.h> |
4ce5b9034
|
26 27 |
#include <linux/completion.h> #include <linux/interrupt.h> |
9b1d82fa1
|
28 |
#include <linux/notifier.h> |
4ce5b9034
|
29 30 31 |
#include <linux/rcupdate.h> #include <linux/kernel.h> #include <linux/module.h> |
9b1d82fa1
|
32 |
#include <linux/mutex.h> |
4ce5b9034
|
33 34 35 |
#include <linux/sched.h> #include <linux/types.h> #include <linux/init.h> |
9b1d82fa1
|
36 |
#include <linux/time.h> |
4ce5b9034
|
37 |
#include <linux/cpu.h> |
268bb0ce3
|
38 |
#include <linux/prefetch.h> |
9b1d82fa1
|
39 |
|
29c00b4a1
|
40 |
#ifdef CONFIG_RCU_TRACE |
29c00b4a1
|
41 |
#include <trace/events/rcu.h> |
29c00b4a1
|
42 43 44 |
#endif /* #else #ifdef CONFIG_RCU_TRACE */ #include "rcu.h" |
a57eb940d
|
45 |
/* Forward declarations for rcutiny_plugin.h. */ |
24278d148
|
46 |
struct rcu_ctrlblk; |
965a002b4
|
47 48 49 |
static void invoke_rcu_callbacks(void); static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); static void rcu_process_callbacks(struct softirq_action *unused); |
a57eb940d
|
50 51 52 53 54 |
static void __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), struct rcu_ctrlblk *rcp); #include "rcutiny_plugin.h" |
9b1d82fa1
|
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
#ifdef CONFIG_NO_HZ static long rcu_dynticks_nesting = 1; /* * Enter dynticks-idle mode, which is an extended quiescent state * if we have fully entered that mode (i.e., if the new value of * dynticks_nesting is zero). */ void rcu_enter_nohz(void) { if (--rcu_dynticks_nesting == 0) rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ } /* * Exit dynticks-idle mode, so that we are no longer in an extended * quiescent state. */ void rcu_exit_nohz(void) { rcu_dynticks_nesting++; } #endif /* #ifdef CONFIG_NO_HZ */ /* |
b554d7de8
|
82 83 |
* Helper function for rcu_sched_qs() and rcu_bh_qs(). * Also irqs are disabled to avoid confusion due to interrupt handlers |
4ce5b9034
|
84 |
* invoking call_rcu(). |
9b1d82fa1
|
85 86 87 |
*/ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) { |
9b1d82fa1
|
88 89 90 |
if (rcp->rcucblist != NULL && rcp->donetail != rcp->curtail) { rcp->donetail = rcp->curtail; |
9b1d82fa1
|
91 92 |
return 1; } |
4ce5b9034
|
93 |
|
9b1d82fa1
|
94 95 96 97 98 99 100 101 102 103 |
return 0; } /* * Record an rcu quiescent state. And an rcu_bh quiescent state while we * are at it, given that any rcu quiescent state is also an rcu_bh * quiescent state. Use "+" instead of "||" to defeat short circuiting. */ void rcu_sched_qs(int cpu) { |
b554d7de8
|
104 105 106 |
unsigned long flags; local_irq_save(flags); |
99652b54d
|
107 108 |
if (rcu_qsctr_help(&rcu_sched_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk)) |
965a002b4
|
109 |
invoke_rcu_callbacks(); |
b554d7de8
|
110 |
local_irq_restore(flags); |
9b1d82fa1
|
111 112 113 114 115 116 117 |
} /* * Record an rcu_bh quiescent state. */ void rcu_bh_qs(int cpu) { |
b554d7de8
|
118 119 120 |
unsigned long flags; local_irq_save(flags); |
9b1d82fa1
|
121 |
if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
965a002b4
|
122 |
invoke_rcu_callbacks(); |
b554d7de8
|
123 |
local_irq_restore(flags); |
9b1d82fa1
|
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
} /* * Check to see if the scheduling-clock interrupt came from an extended * quiescent state, and, if so, tell RCU about it. */ void rcu_check_callbacks(int cpu, int user) { if (user || (idle_cpu(cpu) && !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) rcu_sched_qs(cpu); else if (!in_softirq()) rcu_bh_qs(cpu); |
a57eb940d
|
139 |
rcu_preempt_check_callbacks(); |
9b1d82fa1
|
140 141 142 |
} /* |
b2c0710c4
|
143 144 |
* Invoke the RCU callbacks on the specified rcu_ctrlkblk structure * whose grace period has elapsed. |
9b1d82fa1
|
145 |
*/ |
965a002b4
|
146 |
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
9b1d82fa1
|
147 |
{ |
d4c08f2ac
|
148 |
char *rn = NULL; |
9b1d82fa1
|
149 |
struct rcu_head *next, *list; |
4ce5b9034
|
150 |
unsigned long flags; |
9e571a82f
|
151 |
RCU_TRACE(int cb_count = 0); |
9b1d82fa1
|
152 153 |
/* If no RCU callbacks ready to invoke, just return. */ |
29c00b4a1
|
154 |
if (&rcp->rcucblist == rcp->donetail) { |
72fe701b7
|
155 156 |
RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1)); RCU_TRACE(trace_rcu_batch_end(rcp->name, 0)); |
9b1d82fa1
|
157 |
return; |
29c00b4a1
|
158 |
} |
9b1d82fa1
|
159 160 161 |
/* Move the ready-to-invoke callbacks to a local list. */ local_irq_save(flags); |
72fe701b7
|
162 |
RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1)); |
9b1d82fa1
|
163 164 165 166 167 |
list = rcp->rcucblist; rcp->rcucblist = *rcp->donetail; *rcp->donetail = NULL; if (rcp->curtail == rcp->donetail) rcp->curtail = &rcp->rcucblist; |
a57eb940d
|
168 |
rcu_preempt_remove_callbacks(rcp); |
9b1d82fa1
|
169 170 171 172 |
rcp->donetail = &rcp->rcucblist; local_irq_restore(flags); /* Invoke the callbacks on the local list. */ |
d4c08f2ac
|
173 |
RCU_TRACE(rn = rcp->name); |
9b1d82fa1
|
174 175 176 |
while (list) { next = list->next; prefetch(next); |
551d55a94
|
177 |
debug_rcu_head_unqueue(list); |
b2c0710c4
|
178 |
local_bh_disable(); |
d4c08f2ac
|
179 |
__rcu_reclaim(rn, list); |
b2c0710c4
|
180 |
local_bh_enable(); |
9b1d82fa1
|
181 |
list = next; |
9e571a82f
|
182 |
RCU_TRACE(cb_count++); |
9b1d82fa1
|
183 |
} |
9e571a82f
|
184 |
RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); |
72fe701b7
|
185 |
RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count)); |
9b1d82fa1
|
186 |
} |
965a002b4
|
187 |
static void rcu_process_callbacks(struct softirq_action *unused) |
b2c0710c4
|
188 |
{ |
965a002b4
|
189 190 191 |
__rcu_process_callbacks(&rcu_sched_ctrlblk); __rcu_process_callbacks(&rcu_bh_ctrlblk); rcu_preempt_process_callbacks(); |
b2c0710c4
|
192 193 194 |
} /* |
9b1d82fa1
|
195 196 197 198 199 200 201 202 203 |
* Wait for a grace period to elapse. But it is illegal to invoke * synchronize_sched() from within an RCU read-side critical section. * Therefore, any legal call to synchronize_sched() is a quiescent * state, and so on a UP system, synchronize_sched() need do nothing. * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the * benefits of doing might_sleep() to reduce latency.) * * Cool, huh? (Due to Josh Triplett.) * |
da848c47b
|
204 205 |
* But we want to make this a static inline later. The cond_resched() * currently makes this problematic. |
9b1d82fa1
|
206 207 208 209 210 211 |
*/ void synchronize_sched(void) { cond_resched(); } EXPORT_SYMBOL_GPL(synchronize_sched); |
9b1d82fa1
|
212 213 214 215 216 217 218 219 |
/* * Helper function for call_rcu() and call_rcu_bh(). */ static void __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), struct rcu_ctrlblk *rcp) { unsigned long flags; |
551d55a94
|
220 |
debug_rcu_head_queue(head); |
9b1d82fa1
|
221 222 |
head->func = func; head->next = NULL; |
4ce5b9034
|
223 |
|
9b1d82fa1
|
224 225 226 |
local_irq_save(flags); *rcp->curtail = head; rcp->curtail = &head->next; |
9e571a82f
|
227 |
RCU_TRACE(rcp->qlen++); |
9b1d82fa1
|
228 229 230 231 |
local_irq_restore(flags); } /* |
a57eb940d
|
232 |
* Post an RCU callback to be invoked after the end of an RCU-sched grace |
9b1d82fa1
|
233 234 235 |
* period. But since we have but one CPU, that would be after any * quiescent state. */ |
a57eb940d
|
236 |
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
9b1d82fa1
|
237 |
{ |
99652b54d
|
238 |
__call_rcu(head, func, &rcu_sched_ctrlblk); |
9b1d82fa1
|
239 |
} |
a57eb940d
|
240 |
EXPORT_SYMBOL_GPL(call_rcu_sched); |
9b1d82fa1
|
241 242 243 244 245 |
/* * Post an RCU bottom-half callback to be invoked after any subsequent * quiescent state. */ |
4ce5b9034
|
246 |
void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
9b1d82fa1
|
247 248 249 250 |
{ __call_rcu(head, func, &rcu_bh_ctrlblk); } EXPORT_SYMBOL_GPL(call_rcu_bh); |