Blame view

kernel/rcupdate.c 6.58 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
  /*
   * Read-Copy Update mechanism for mutual exclusion
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
   *
01c1c660f   Paul E. McKenney   Preempt-RCU: reor...
18
   * Copyright IBM Corporation, 2001
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
   *
   * Authors: Dipankar Sarma <dipankar@in.ibm.com>
   *	    Manfred Spraul <manfred@colorfullife.com>
   * 
   * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
   * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
   * Papers:
   * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
   * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
   *
   * For detailed explanation of Read-Copy Update mechanism see -
   * 		http://lse.sourceforge.net/locking/rcupdate.html
   *
   */
  #include <linux/types.h>
  #include <linux/kernel.h>
  #include <linux/init.h>
  #include <linux/spinlock.h>
  #include <linux/smp.h>
  #include <linux/interrupt.h>
  #include <linux/sched.h>
  #include <asm/atomic.h>
  #include <linux/bitops.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
42
43
  #include <linux/percpu.h>
  #include <linux/notifier.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
44
  #include <linux/cpu.h>
9331b3157   Ingo Molnar   [PATCH] convert k...
45
  #include <linux/mutex.h>
01c1c660f   Paul E. McKenney   Preempt-RCU: reor...
46
  #include <linux/module.h>
a68260483   Paul E. McKenney   rcu: Teach RCU th...
47
  #include <linux/kernel_stat.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
48

70f12f848   Paul E. McKenney   rcu: add rcu_barr...
49
50
51
52
53
  enum rcu_barrier {
  	RCU_BARRIER_STD,
  	RCU_BARRIER_BH,
  	RCU_BARRIER_SCHED,
  };
01c1c660f   Paul E. McKenney   Preempt-RCU: reor...
54
  static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
21a1ea9eb   Dipankar Sarma   [PATCH] rcu batch...
55
  static atomic_t rcu_barrier_cpu_count;
9331b3157   Ingo Molnar   [PATCH] convert k...
56
  static DEFINE_MUTEX(rcu_barrier_mutex);
21a1ea9eb   Dipankar Sarma   [PATCH] rcu batch...
57
  static struct completion rcu_barrier_completion;
a68260483   Paul E. McKenney   rcu: Teach RCU th...
58
  int rcu_scheduler_active __read_mostly;
21a1ea9eb   Dipankar Sarma   [PATCH] rcu batch...
59

5b1d07ed0   David Howells   RCU: Don't try an...
60
61
62
  static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
  static struct rcu_head rcu_migrate_head[3];
  static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
fbf6bfca7   Paul E. McKenney   rcupdate: fix com...
63
64
65
66
  /*
   * Awaken the corresponding synchronize_rcu() instance now that a
   * grace period has elapsed.
   */
4446a36ff   Paul E. McKenney   rcu: add call_rcu...
67
  void wakeme_after_rcu(struct rcu_head  *head)
21a1ea9eb   Dipankar Sarma   [PATCH] rcu batch...
68
  {
01c1c660f   Paul E. McKenney   Preempt-RCU: reor...
69
70
71
72
  	struct rcu_synchronize *rcu;
  
  	rcu = container_of(head, struct rcu_synchronize, head);
  	complete(&rcu->completion);
21a1ea9eb   Dipankar Sarma   [PATCH] rcu batch...
73
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
74
75
  
  /**
01c1c660f   Paul E. McKenney   Preempt-RCU: reor...
76
   * synchronize_rcu - wait until a grace period has elapsed.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
77
   *
01c1c660f   Paul E. McKenney   Preempt-RCU: reor...
78
79
   * Control will return to the caller some time after a full grace
   * period has elapsed, in other words after all currently executing RCU
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
80
81
82
83
   * read-side critical sections have completed.  RCU read-side critical
   * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
   * and may be nested.
   */
ea7d3fef4   Paul E. McKenney   rcu: eliminate sy...
84
85
86
  void synchronize_rcu(void)
  {
  	struct rcu_synchronize rcu;
a68260483   Paul E. McKenney   rcu: Teach RCU th...
87
88
89
  
  	if (rcu_blocking_is_gp())
  		return;
ea7d3fef4   Paul E. McKenney   rcu: eliminate sy...
90
91
92
93
94
95
  	init_completion(&rcu.completion);
  	/* Will wake me after RCU finished. */
  	call_rcu(&rcu.head, wakeme_after_rcu);
  	/* Wait for it. */
  	wait_for_completion(&rcu.completion);
  }
01c1c660f   Paul E. McKenney   Preempt-RCU: reor...
96
  EXPORT_SYMBOL_GPL(synchronize_rcu);
c32e06605   Paul E. McKenney   [PATCH] rcutortur...
97

ab4720ec7   Dipankar Sarma   [PATCH] add rcu_b...
98
99
100
101
102
103
104
105
106
  static void rcu_barrier_callback(struct rcu_head *notused)
  {
  	if (atomic_dec_and_test(&rcu_barrier_cpu_count))
  		complete(&rcu_barrier_completion);
  }
  
  /*
   * Called with preemption disabled, and from cross-cpu IRQ context.
   */
70f12f848   Paul E. McKenney   rcu: add rcu_barr...
107
  static void rcu_barrier_func(void *type)
ab4720ec7   Dipankar Sarma   [PATCH] add rcu_b...
108
109
  {
  	int cpu = smp_processor_id();
01c1c660f   Paul E. McKenney   Preempt-RCU: reor...
110
  	struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
ab4720ec7   Dipankar Sarma   [PATCH] add rcu_b...
111

ab4720ec7   Dipankar Sarma   [PATCH] add rcu_b...
112
  	atomic_inc(&rcu_barrier_cpu_count);
70f12f848   Paul E. McKenney   rcu: add rcu_barr...
113
114
115
116
117
118
119
120
121
122
123
  	switch ((enum rcu_barrier)type) {
  	case RCU_BARRIER_STD:
  		call_rcu(head, rcu_barrier_callback);
  		break;
  	case RCU_BARRIER_BH:
  		call_rcu_bh(head, rcu_barrier_callback);
  		break;
  	case RCU_BARRIER_SCHED:
  		call_rcu_sched(head, rcu_barrier_callback);
  		break;
  	}
ab4720ec7   Dipankar Sarma   [PATCH] add rcu_b...
124
  }
5b1d07ed0   David Howells   RCU: Don't try an...
125
126
127
128
  static inline void wait_migrated_callbacks(void)
  {
  	wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
  }
f69b17d7e   Lai Jiangshan   rcu: rcu_barrier ...
129

70f12f848   Paul E. McKenney   rcu: add rcu_barr...
130
131
132
  /*
   * Orchestrate the specified type of RCU barrier, waiting for all
   * RCU callbacks of the specified type to complete.
ab4720ec7   Dipankar Sarma   [PATCH] add rcu_b...
133
   */
70f12f848   Paul E. McKenney   rcu: add rcu_barr...
134
  static void _rcu_barrier(enum rcu_barrier type)
ab4720ec7   Dipankar Sarma   [PATCH] add rcu_b...
135
136
  {
  	BUG_ON(in_interrupt());
9331b3157   Ingo Molnar   [PATCH] convert k...
137
138
  	/* Take cpucontrol mutex to protect against CPU hotplug */
  	mutex_lock(&rcu_barrier_mutex);
ab4720ec7   Dipankar Sarma   [PATCH] add rcu_b...
139
  	init_completion(&rcu_barrier_completion);
e0ecfa791   Paul E. McKenney   Preempt-RCU: fix ...
140
  	/*
5f8651515   Lai Jiangshan   rcupdate: fix bug...
141
142
143
144
145
146
147
  	 * Initialize rcu_barrier_cpu_count to 1, then invoke
  	 * rcu_barrier_func() on each CPU, so that each CPU also has
  	 * incremented rcu_barrier_cpu_count.  Only then is it safe to
  	 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
  	 * might complete its grace period before all of the other CPUs
  	 * did their increment, causing this function to return too
  	 * early.
e0ecfa791   Paul E. McKenney   Preempt-RCU: fix ...
148
  	 */
5f8651515   Lai Jiangshan   rcupdate: fix bug...
149
  	atomic_set(&rcu_barrier_cpu_count, 1);
59190f421   Linus Torvalds   Merge branch 'gen...
150
  	on_each_cpu(rcu_barrier_func, (void *)type, 1);
5f8651515   Lai Jiangshan   rcupdate: fix bug...
151
152
  	if (atomic_dec_and_test(&rcu_barrier_cpu_count))
  		complete(&rcu_barrier_completion);
ab4720ec7   Dipankar Sarma   [PATCH] add rcu_b...
153
  	wait_for_completion(&rcu_barrier_completion);
9331b3157   Ingo Molnar   [PATCH] convert k...
154
  	mutex_unlock(&rcu_barrier_mutex);
f69b17d7e   Lai Jiangshan   rcu: rcu_barrier ...
155
  	wait_migrated_callbacks();
ab4720ec7   Dipankar Sarma   [PATCH] add rcu_b...
156
  }
70f12f848   Paul E. McKenney   rcu: add rcu_barr...
157
158
159
160
161
162
163
164
  
  /**
   * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
   */
  void rcu_barrier(void)
  {
  	_rcu_barrier(RCU_BARRIER_STD);
  }
ab4720ec7   Dipankar Sarma   [PATCH] add rcu_b...
165
  EXPORT_SYMBOL_GPL(rcu_barrier);
70f12f848   Paul E. McKenney   rcu: add rcu_barr...
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
  /**
   * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
   */
  void rcu_barrier_bh(void)
  {
  	_rcu_barrier(RCU_BARRIER_BH);
  }
  EXPORT_SYMBOL_GPL(rcu_barrier_bh);
  
  /**
   * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
   */
  void rcu_barrier_sched(void)
  {
  	_rcu_barrier(RCU_BARRIER_SCHED);
  }
  EXPORT_SYMBOL_GPL(rcu_barrier_sched);
f69b17d7e   Lai Jiangshan   rcu: rcu_barrier ...
183
184
185
186
187
  static void rcu_migrate_callback(struct rcu_head *notused)
  {
  	if (atomic_dec_and_test(&rcu_migrate_type_count))
  		wake_up(&rcu_migrate_wq);
  }
f69b17d7e   Lai Jiangshan   rcu: rcu_barrier ...
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
  static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
  		unsigned long action, void *hcpu)
  {
  	if (action == CPU_DYING) {
  		/*
  		 * preempt_disable() in on_each_cpu() prevents stop_machine(),
  		 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
  		 * returns, all online cpus have queued rcu_barrier_func(),
  		 * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
  		 *
  		 * These callbacks ensure _rcu_barrier() waits for all
  		 * RCU callbacks of the specified type to complete.
  		 */
  		atomic_set(&rcu_migrate_type_count, 3);
  		call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
  		call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
  		call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
  	} else if (action == CPU_POST_DEAD) {
  		/* rcu_migrate_head is protected by cpu_add_remove_lock */
  		wait_migrated_callbacks();
  	}
  
  	return NOTIFY_OK;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
212
213
  void __init rcu_init(void)
  {
01c1c660f   Paul E. McKenney   Preempt-RCU: reor...
214
  	__rcu_init();
f69b17d7e   Lai Jiangshan   rcu: rcu_barrier ...
215
  	hotcpu_notifier(rcu_barrier_cpu_hotplug, 0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
216
  }
a68260483   Paul E. McKenney   rcu: Teach RCU th...
217
218
219
220
221
222
  void rcu_scheduler_starting(void)
  {
  	WARN_ON(num_online_cpus() != 1);
  	WARN_ON(nr_context_switches() > 0);
  	rcu_scheduler_active = 1;
  }