Blame view

kernel/cpu.c 47.4 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
9
10
11
12
  /* CPU control.
   * (C) 2001, 2002, 2003, 2004 Rusty Russell
   *
   * This code is licenced under the GPL.
   */
  #include <linux/proc_fs.h>
  #include <linux/smp.h>
  #include <linux/init.h>
  #include <linux/notifier.h>
  #include <linux/sched.h>
  #include <linux/unistd.h>
  #include <linux/cpu.h>
cb79295e2   Anton Vorontsov   cpu: introduce cl...
13
14
  #include <linux/oom.h>
  #include <linux/rcupdate.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
15
  #include <linux/export.h>
e4cc2f873   Anton Vorontsov   kernel/cpu.c: doc...
16
  #include <linux/bug.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
17
18
  #include <linux/kthread.h>
  #include <linux/stop_machine.h>
81615b624   Ingo Molnar   [PATCH] Convert k...
19
  #include <linux/mutex.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
20
  #include <linux/gfp.h>
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
21
  #include <linux/suspend.h>
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
22
  #include <linux/lockdep.h>
345527b1e   Preeti U Murthy   clockevents: Fix ...
23
  #include <linux/tick.h>
a89941816   Thomas Gleixner   hotplug: Prevent ...
24
  #include <linux/irq.h>
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
25
  #include <linux/smpboot.h>
e6d4989a9   Richard Weinberger   relayfs: Convert ...
26
  #include <linux/relay.h>
6731d4f12   Sebastian Andrzej Siewior   slab: Convert to ...
27
  #include <linux/slab.h>
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
28

bb3632c61   Todd E Brandt   PM / sleep: trace...
29
  #include <trace/events/power.h>
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
30
31
  #define CREATE_TRACE_POINTS
  #include <trace/events/cpuhp.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
32

38498a67a   Thomas Gleixner   smp: Add generic ...
33
  #include "smpboot.h"
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
34
35
36
37
  /**
   * cpuhp_cpu_state - Per cpu hotplug state storage
   * @state:	The current cpu state
   * @target:	The target state
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
38
39
   * @thread:	Pointer to the hotplug thread
   * @should_run:	Thread should execute
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
40
   * @rollback:	Perform a rollback
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
41
42
43
   * @single:	Single callback invocation
   * @bringup:	Single callback bringup or teardown selector
   * @cb_state:	The state for a single callback (install/uninstall)
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
44
45
   * @result:	Result of the operation
   * @done:	Signal completion to the issuer of the task
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
46
47
48
49
   */
  struct cpuhp_cpu_state {
  	enum cpuhp_state	state;
  	enum cpuhp_state	target;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
50
51
52
  #ifdef CONFIG_SMP
  	struct task_struct	*thread;
  	bool			should_run;
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
53
  	bool			rollback;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
54
55
  	bool			single;
  	bool			bringup;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
56
  	struct hlist_node	*node;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
57
  	enum cpuhp_state	cb_state;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
58
59
60
  	int			result;
  	struct completion	done;
  #endif
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
61
62
63
64
65
66
67
68
69
70
71
  };
  
  static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
  
  /**
   * cpuhp_step - Hotplug state machine step
   * @name:	Name of the step
   * @startup:	Startup function of the step
   * @teardown:	Teardown function of the step
   * @skip_onerr:	Do not invoke the functions on error rollback
   *		Will go away once the notifiers	are gone
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
72
   * @cant_stop:	Bringup/teardown can't be stopped at this step
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
73
74
   */
  struct cpuhp_step {
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
75
76
  	const char		*name;
  	union {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
77
78
79
80
  		int		(*single)(unsigned int cpu);
  		int		(*multi)(unsigned int cpu,
  					 struct hlist_node *node);
  	} startup;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
81
  	union {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
82
83
84
85
  		int		(*single)(unsigned int cpu);
  		int		(*multi)(unsigned int cpu,
  					 struct hlist_node *node);
  	} teardown;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
86
87
88
89
  	struct hlist_head	list;
  	bool			skip_onerr;
  	bool			cant_stop;
  	bool			multi_instance;
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
90
  };
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
91
  static DEFINE_MUTEX(cpuhp_state_mutex);
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
92
  static struct cpuhp_step cpuhp_bp_states[];
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
93
  static struct cpuhp_step cpuhp_ap_states[];
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
94

a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
  static bool cpuhp_is_ap_state(enum cpuhp_state state)
  {
  	/*
  	 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
  	 * purposes as that state is handled explicitly in cpu_down.
  	 */
  	return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
  }
  
  static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
  {
  	struct cpuhp_step *sp;
  
  	sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
  	return sp + state;
  }
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
111
112
113
114
  /**
   * cpuhp_invoke_callback _ Invoke the callbacks for a given state
   * @cpu:	The cpu for which the callback should be invoked
   * @step:	The step in the state machine
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
115
   * @bringup:	True if the bringup callback should be invoked
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
116
   *
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
117
   * Called from cpu hotplug and from the state register machinery.
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
118
   */
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
119
  static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
120
  				 bool bringup, struct hlist_node *node)
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
121
122
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
123
  	struct cpuhp_step *step = cpuhp_get_step(state);
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
124
125
126
127
128
  	int (*cbm)(unsigned int cpu, struct hlist_node *node);
  	int (*cb)(unsigned int cpu);
  	int ret, cnt;
  
  	if (!step->multi_instance) {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
129
  		cb = bringup ? step->startup.single : step->teardown.single;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
130
131
  		if (!cb)
  			return 0;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
132
  		trace_cpuhp_enter(cpu, st->target, state, cb);
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
133
  		ret = cb(cpu);
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
134
  		trace_cpuhp_exit(cpu, st->state, state, ret);
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
135
136
  		return ret;
  	}
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
137
  	cbm = bringup ? step->startup.multi : step->teardown.multi;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
  	if (!cbm)
  		return 0;
  
  	/* Single invocation for instance add/remove */
  	if (node) {
  		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
  		ret = cbm(cpu, node);
  		trace_cpuhp_exit(cpu, st->state, state, ret);
  		return ret;
  	}
  
  	/* State transition. Invoke on all instances */
  	cnt = 0;
  	hlist_for_each(node, &step->list) {
  		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
  		ret = cbm(cpu, node);
  		trace_cpuhp_exit(cpu, st->state, state, ret);
  		if (ret)
  			goto err;
  		cnt++;
  	}
  	return 0;
  err:
  	/* Rollback the instances if one failed */
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
162
  	cbm = !bringup ? step->startup.multi : step->teardown.multi;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
163
164
165
166
167
168
169
  	if (!cbm)
  		return ret;
  
  	hlist_for_each(node, &step->list) {
  		if (!cnt--)
  			break;
  		cbm(cpu, node);
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
170
171
172
  	}
  	return ret;
  }
98a79d6a5   Rusty Russell   cpumask: centrali...
173
  #ifdef CONFIG_SMP
b3199c025   Rusty Russell   cpumask: switch o...
174
  /* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa9538777   Linus Torvalds   cpu hotplug: simp...
175
  static DEFINE_MUTEX(cpu_add_remove_lock);
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
176
177
  bool cpuhp_tasks_frozen;
  EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
178

79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
179
  /*
93ae4f978   Srivatsa S. Bhat   CPU hotplug: Prov...
180
181
182
183
184
   * The following two APIs (cpu_maps_update_begin/done) must be used when
   * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
   * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
   * hotplug callback (un)registration performed using __register_cpu_notifier()
   * or __unregister_cpu_notifier().
79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
185
186
187
188
189
   */
  void cpu_maps_update_begin(void)
  {
  	mutex_lock(&cpu_add_remove_lock);
  }
93ae4f978   Srivatsa S. Bhat   CPU hotplug: Prov...
190
  EXPORT_SYMBOL(cpu_notifier_register_begin);
79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
191
192
193
194
195
  
  void cpu_maps_update_done(void)
  {
  	mutex_unlock(&cpu_add_remove_lock);
  }
93ae4f978   Srivatsa S. Bhat   CPU hotplug: Prov...
196
  EXPORT_SYMBOL(cpu_notifier_register_done);
79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
197

5c113fbee   Daniel J Blueman   fix cpu_chain sec...
198
  static RAW_NOTIFIER_HEAD(cpu_chain);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
199

e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
200
201
202
203
  /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
   * Should always be manipulated under cpu_add_remove_lock
   */
  static int cpu_hotplug_disabled;
79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
204
  #ifdef CONFIG_HOTPLUG_CPU
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
205
206
  static struct {
  	struct task_struct *active_writer;
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
207
208
209
210
  	/* wait queue to wake up the active_writer */
  	wait_queue_head_t wq;
  	/* verifies that no writer will get active while readers are active */
  	struct mutex lock;
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
211
212
213
214
  	/*
  	 * Also blocks the new readers during
  	 * an ongoing cpu hotplug operation.
  	 */
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
215
  	atomic_t refcount;
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
216
217
218
219
  
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  	struct lockdep_map dep_map;
  #endif
31950eb66   Linus Torvalds   mm/init: cpu_hotp...
220
221
  } cpu_hotplug = {
  	.active_writer = NULL,
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
222
  	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
31950eb66   Linus Torvalds   mm/init: cpu_hotp...
223
  	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
224
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
a705e07b9   Joonas Lahtinen   cpu/hotplug: Use ...
225
  	.dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
226
  #endif
31950eb66   Linus Torvalds   mm/init: cpu_hotp...
227
  };
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
228

a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
229
230
  /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
  #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
dd56af42b   Paul E. McKenney   rcu: Eliminate de...
231
232
  #define cpuhp_lock_acquire_tryread() \
  				  lock_map_acquire_tryread(&cpu_hotplug.dep_map)
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
233
234
  #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
  #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
62db99f47   Paul E. McKenney   cpu: Avoid puts_p...
235

86ef5c9a8   Gautham R Shenoy   cpu-hotplug: repl...
236
  void get_online_cpus(void)
a9d9baa1e   Ashok Raj   [PATCH] clean up ...
237
  {
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
238
239
  	might_sleep();
  	if (cpu_hotplug.active_writer == current)
aa9538777   Linus Torvalds   cpu hotplug: simp...
240
  		return;
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
241
  	cpuhp_lock_acquire_read();
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
242
  	mutex_lock(&cpu_hotplug.lock);
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
243
  	atomic_inc(&cpu_hotplug.refcount);
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
244
  	mutex_unlock(&cpu_hotplug.lock);
a9d9baa1e   Ashok Raj   [PATCH] clean up ...
245
  }
86ef5c9a8   Gautham R Shenoy   cpu-hotplug: repl...
246
  EXPORT_SYMBOL_GPL(get_online_cpus);
90d45d17f   Ashok Raj   [PATCH] cpu hotpl...
247

86ef5c9a8   Gautham R Shenoy   cpu-hotplug: repl...
248
  void put_online_cpus(void)
a9d9baa1e   Ashok Raj   [PATCH] clean up ...
249
  {
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
250
  	int refcount;
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
251
  	if (cpu_hotplug.active_writer == current)
aa9538777   Linus Torvalds   cpu hotplug: simp...
252
  		return;
075663d19   Srivatsa S. Bhat   CPU hotplug, debu...
253

87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
254
255
256
257
258
259
  	refcount = atomic_dec_return(&cpu_hotplug.refcount);
  	if (WARN_ON(refcount < 0)) /* try to fix things up */
  		atomic_inc(&cpu_hotplug.refcount);
  
  	if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
  		wake_up(&cpu_hotplug.wq);
075663d19   Srivatsa S. Bhat   CPU hotplug, debu...
260

a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
261
  	cpuhp_lock_release();
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
262

a9d9baa1e   Ashok Raj   [PATCH] clean up ...
263
  }
86ef5c9a8   Gautham R Shenoy   cpu-hotplug: repl...
264
  EXPORT_SYMBOL_GPL(put_online_cpus);
a9d9baa1e   Ashok Raj   [PATCH] clean up ...
265

d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
266
267
268
269
270
271
272
  /*
   * This ensures that the hotplug operation can begin only when the
   * refcount goes to zero.
   *
   * Note that during a cpu-hotplug operation, the new readers, if any,
   * will be blocked by the cpu_hotplug.lock
   *
d2ba7e2ae   Oleg Nesterov   simplify cpu_hotp...
273
274
   * Since cpu_hotplug_begin() is always called after invoking
   * cpu_maps_update_begin(), we can be sure that only one writer is active.
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
275
276
277
278
279
280
281
282
283
284
   *
   * Note that theoretically, there is a possibility of a livelock:
   * - Refcount goes to zero, last reader wakes up the sleeping
   *   writer.
   * - Last reader unlocks the cpu_hotplug.lock.
   * - A new reader arrives at this moment, bumps up the refcount.
   * - The writer acquires the cpu_hotplug.lock finds the refcount
   *   non zero and goes to sleep again.
   *
   * However, this is very difficult to achieve in practice since
86ef5c9a8   Gautham R Shenoy   cpu-hotplug: repl...
285
   * get_online_cpus() not an api which is called all that often.
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
286
287
   *
   */
b9d10be7a   Toshi Kani   ACPI / processor:...
288
  void cpu_hotplug_begin(void)
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
289
  {
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
290
  	DEFINE_WAIT(wait);
d2ba7e2ae   Oleg Nesterov   simplify cpu_hotp...
291

87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
292
  	cpu_hotplug.active_writer = current;
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
293
  	cpuhp_lock_acquire();
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
294

d2ba7e2ae   Oleg Nesterov   simplify cpu_hotp...
295
296
  	for (;;) {
  		mutex_lock(&cpu_hotplug.lock);
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
297
298
299
  		prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
  		if (likely(!atomic_read(&cpu_hotplug.refcount)))
  				break;
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
300
301
  		mutex_unlock(&cpu_hotplug.lock);
  		schedule();
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
302
  	}
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
303
  	finish_wait(&cpu_hotplug.wq, &wait);
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
304
  }
b9d10be7a   Toshi Kani   ACPI / processor:...
305
  void cpu_hotplug_done(void)
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
306
307
308
  {
  	cpu_hotplug.active_writer = NULL;
  	mutex_unlock(&cpu_hotplug.lock);
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
309
  	cpuhp_lock_release();
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
310
  }
79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
311

16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
312
313
314
315
316
317
318
319
320
321
  /*
   * Wait for currently running CPU hotplug operations to complete (if any) and
   * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
   * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
   * hotplug path before performing hotplug operations. So acquiring that lock
   * guarantees mutual exclusion from any currently running hotplug operations.
   */
  void cpu_hotplug_disable(void)
  {
  	cpu_maps_update_begin();
89af7ba57   Vitaly Kuznetsov   cpu-hotplug: conv...
322
  	cpu_hotplug_disabled++;
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
323
324
  	cpu_maps_update_done();
  }
32145c467   Vitaly Kuznetsov   cpu-hotplug: expo...
325
  EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
326

01b411590   Lianwei Wang   cpu/hotplug: Hand...
327
328
329
330
331
332
333
  static void __cpu_hotplug_enable(void)
  {
  	if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable
  "))
  		return;
  	cpu_hotplug_disabled--;
  }
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
334
335
336
  void cpu_hotplug_enable(void)
  {
  	cpu_maps_update_begin();
01b411590   Lianwei Wang   cpu/hotplug: Hand...
337
  	__cpu_hotplug_enable();
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
338
339
  	cpu_maps_update_done();
  }
32145c467   Vitaly Kuznetsov   cpu-hotplug: expo...
340
  EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
b9d10be7a   Toshi Kani   ACPI / processor:...
341
  #endif	/* CONFIG_HOTPLUG_CPU */
79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
342

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
343
  /* Need to know about CPUs going up/down? */
71cf5aeeb   Mathias Krause   kernel, cpu: Remo...
344
  int register_cpu_notifier(struct notifier_block *nb)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
345
  {
bd5349cfd   Neil Brown   [PATCH] Convert c...
346
  	int ret;
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
347
  	cpu_maps_update_begin();
bd5349cfd   Neil Brown   [PATCH] Convert c...
348
  	ret = raw_notifier_chain_register(&cpu_chain, nb);
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
349
  	cpu_maps_update_done();
bd5349cfd   Neil Brown   [PATCH] Convert c...
350
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
351
  }
65edc68c3   Chandra Seetharaman   [PATCH] cpu hotpl...
352

71cf5aeeb   Mathias Krause   kernel, cpu: Remo...
353
  int __register_cpu_notifier(struct notifier_block *nb)
93ae4f978   Srivatsa S. Bhat   CPU hotplug: Prov...
354
355
356
  {
  	return raw_notifier_chain_register(&cpu_chain, nb);
  }
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
357
  static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
e9fb7631e   Akinobu Mita   cpu-hotplug: intr...
358
359
  			int *nr_calls)
  {
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
360
361
  	unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
  	void *hcpu = (void *)(long)cpu;
e6bde73b0   Akinobu Mita   cpu-hotplug: retu...
362
  	int ret;
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
363
  	ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
e9fb7631e   Akinobu Mita   cpu-hotplug: intr...
364
  					nr_calls);
e6bde73b0   Akinobu Mita   cpu-hotplug: retu...
365
366
  
  	return notifier_to_errno(ret);
e9fb7631e   Akinobu Mita   cpu-hotplug: intr...
367
  }
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
368
  static int cpu_notify(unsigned long val, unsigned int cpu)
e9fb7631e   Akinobu Mita   cpu-hotplug: intr...
369
  {
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
370
  	return __cpu_notify(val, cpu, -1, NULL);
e9fb7631e   Akinobu Mita   cpu-hotplug: intr...
371
  }
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
372
373
374
375
  static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
  {
  	BUG_ON(cpu_notify(val, cpu));
  }
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
  /* Notifier wrappers for transitioning to state machine */
  static int notify_prepare(unsigned int cpu)
  {
  	int nr_calls = 0;
  	int ret;
  
  	ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
  	if (ret) {
  		nr_calls--;
  		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed
  ",
  				__func__, cpu);
  		__cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
  	}
  	return ret;
  }
  
  static int notify_online(unsigned int cpu)
  {
  	cpu_notify(CPU_ONLINE, cpu);
  	return 0;
  }
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
398
399
400
401
402
403
404
  static int bringup_wait_for_ap(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  
  	wait_for_completion(&st->done);
  	return st->result;
  }
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
405
406
407
408
  static int bringup_cpu(unsigned int cpu)
  {
  	struct task_struct *idle = idle_thread_get(cpu);
  	int ret;
aa877175e   Boris Ostrovsky   cpu/hotplug: Prev...
409
410
411
412
413
414
  	/*
  	 * Some architectures have to walk the irq descriptors to
  	 * setup the vector space for the cpu which comes online.
  	 * Prevent irq alloc/free across the bringup.
  	 */
  	irq_lock_sparse();
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
415
416
  	/* Arch-specific enabling code. */
  	ret = __cpu_up(cpu, idle);
aa877175e   Boris Ostrovsky   cpu/hotplug: Prev...
417
  	irq_unlock_sparse();
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
418
419
420
421
  	if (ret) {
  		cpu_notify(CPU_UP_CANCELED, cpu);
  		return ret;
  	}
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
422
  	ret = bringup_wait_for_ap(cpu);
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
423
  	BUG_ON(!cpu_online(cpu));
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
424
  	return ret;
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
425
  }
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
426
427
428
  /*
   * Hotplug state machine related functions
   */
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
429
  static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
430
431
  {
  	for (st->state++; st->state < st->target; st->state++) {
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
432
  		struct cpuhp_step *step = cpuhp_get_step(st->state);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
433
434
  
  		if (!step->skip_onerr)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
435
  			cpuhp_invoke_callback(cpu, st->state, true, NULL);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
436
437
438
439
  	}
  }
  
  static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
440
  				enum cpuhp_state target)
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
441
442
443
444
445
  {
  	enum cpuhp_state prev_state = st->state;
  	int ret = 0;
  
  	for (; st->state > target; st->state--) {
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
446
  		ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
447
448
  		if (ret) {
  			st->target = prev_state;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
449
  			undo_cpu_down(cpu, st);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
450
451
452
453
454
  			break;
  		}
  	}
  	return ret;
  }
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
455
  static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
456
457
  {
  	for (st->state--; st->state > st->target; st->state--) {
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
458
  		struct cpuhp_step *step = cpuhp_get_step(st->state);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
459
460
  
  		if (!step->skip_onerr)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
461
  			cpuhp_invoke_callback(cpu, st->state, false, NULL);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
462
463
464
465
  	}
  }
  
  static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
466
  			      enum cpuhp_state target)
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
467
468
469
470
471
  {
  	enum cpuhp_state prev_state = st->state;
  	int ret = 0;
  
  	while (st->state < target) {
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
472
  		st->state++;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
473
  		ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
474
475
  		if (ret) {
  			st->target = prev_state;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
476
  			undo_cpu_up(cpu, st);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
477
478
479
480
481
  			break;
  		}
  	}
  	return ret;
  }
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
  /*
   * The cpu hotplug threads manage the bringup and teardown of the cpus
   */
  static void cpuhp_create(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  
  	init_completion(&st->done);
  }
  
  static int cpuhp_should_run(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  
  	return st->should_run;
  }
  
  /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
  static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
  {
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
502
  	enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
503

a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
504
  	return cpuhp_down_callbacks(cpu, st, target);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
505
506
507
508
509
  }
  
  /* Execute the online startup callbacks. Used to be CPU_ONLINE */
  static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
  {
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
510
  	return cpuhp_up_callbacks(cpu, st, st->target);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
  }
  
  /*
   * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
   * callbacks when a state gets [un]installed at runtime.
   */
  static void cpuhp_thread_fun(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  	int ret = 0;
  
  	/*
  	 * Paired with the mb() in cpuhp_kick_ap_work and
  	 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
  	 */
  	smp_mb();
  	if (!st->should_run)
  		return;
  
  	st->should_run = false;
  
  	/* Single callback invocation for [un]install ? */
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
533
  	if (st->single) {
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
534
535
  		if (st->cb_state < CPUHP_AP_ONLINE) {
  			local_irq_disable();
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
536
  			ret = cpuhp_invoke_callback(cpu, st->cb_state,
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
537
  						    st->bringup, st->node);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
538
539
  			local_irq_enable();
  		} else {
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
540
  			ret = cpuhp_invoke_callback(cpu, st->cb_state,
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
541
  						    st->bringup, st->node);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
542
  		}
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
543
544
  	} else if (st->rollback) {
  		BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
545
  		undo_cpu_down(cpu, st);
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
546
547
548
549
550
551
  		/*
  		 * This is a momentary workaround to keep the notifier users
  		 * happy. Will go away once we got rid of the notifiers.
  		 */
  		cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
  		st->rollback = false;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
552
  	} else {
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
553
  		/* Cannot happen .... */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
554
  		BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
555

4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
556
557
558
559
560
561
562
563
564
565
566
  		/* Regular hotplug work */
  		if (st->state < st->target)
  			ret = cpuhp_ap_online(cpu, st);
  		else if (st->state > st->target)
  			ret = cpuhp_ap_offline(cpu, st);
  	}
  	st->result = ret;
  	complete(&st->done);
  }
  
  /* Invoke a single callback on a remote cpu */
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
567
  static int
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
568
569
  cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
  			 struct hlist_node *node)
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
570
571
572
573
574
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  
  	if (!cpu_online(cpu))
  		return 0;
6a4e24518   Thomas Gleixner   cpu/hotplug: Hand...
575
576
577
578
579
  	/*
  	 * If we are up and running, use the hotplug thread. For early calls
  	 * we invoke the thread function directly.
  	 */
  	if (!st->thread)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
580
  		return cpuhp_invoke_callback(cpu, state, bringup, node);
6a4e24518   Thomas Gleixner   cpu/hotplug: Hand...
581

4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
582
  	st->cb_state = state;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
583
584
  	st->single = true;
  	st->bringup = bringup;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
585
  	st->node = node;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
586

4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
587
588
589
590
591
592
593
594
595
596
597
598
  	/*
  	 * Make sure the above stores are visible before should_run becomes
  	 * true. Paired with the mb() above in cpuhp_thread_fun()
  	 */
  	smp_mb();
  	st->should_run = true;
  	wake_up_process(st->thread);
  	wait_for_completion(&st->done);
  	return st->result;
  }
  
  /* Regular hotplug invocation of the AP hotplug thread */
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
599
  static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
600
  {
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
601
  	st->result = 0;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
602
  	st->single = false;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
603
604
605
606
607
608
609
  	/*
  	 * Make sure the above stores are visible before should_run becomes
  	 * true. Paired with the mb() above in cpuhp_thread_fun()
  	 */
  	smp_mb();
  	st->should_run = true;
  	wake_up_process(st->thread);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
610
611
612
613
614
615
616
617
618
  }
  
  static int cpuhp_kick_ap_work(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  	enum cpuhp_state state = st->state;
  
  	trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
  	__cpuhp_kick_ap_work(st);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
  	wait_for_completion(&st->done);
  	trace_cpuhp_exit(cpu, st->state, state, st->result);
  	return st->result;
  }
  
  static struct smp_hotplug_thread cpuhp_threads = {
  	.store			= &cpuhp_state.thread,
  	.create			= &cpuhp_create,
  	.thread_should_run	= cpuhp_should_run,
  	.thread_fn		= cpuhp_thread_fun,
  	.thread_comm		= "cpuhp/%u",
  	.selfparking		= true,
  };
  
  void __init cpuhp_threads_init(void)
  {
  	BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
  	kthread_unpark(this_cpu_read(cpuhp_state.thread));
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
638
  EXPORT_SYMBOL(register_cpu_notifier);
93ae4f978   Srivatsa S. Bhat   CPU hotplug: Prov...
639
  EXPORT_SYMBOL(__register_cpu_notifier);
71cf5aeeb   Mathias Krause   kernel, cpu: Remo...
640
  void unregister_cpu_notifier(struct notifier_block *nb)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
641
  {
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
642
  	cpu_maps_update_begin();
bd5349cfd   Neil Brown   [PATCH] Convert c...
643
  	raw_notifier_chain_unregister(&cpu_chain, nb);
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
644
  	cpu_maps_update_done();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
645
646
  }
  EXPORT_SYMBOL(unregister_cpu_notifier);
71cf5aeeb   Mathias Krause   kernel, cpu: Remo...
647
  void __unregister_cpu_notifier(struct notifier_block *nb)
93ae4f978   Srivatsa S. Bhat   CPU hotplug: Prov...
648
649
650
651
  {
  	raw_notifier_chain_unregister(&cpu_chain, nb);
  }
  EXPORT_SYMBOL(__unregister_cpu_notifier);
56eaecc8e   Michal Hocko   hotplug: Make reg...
652
  #ifdef CONFIG_HOTPLUG_CPU
e4cc2f873   Anton Vorontsov   kernel/cpu.c: doc...
653
654
655
656
657
658
659
660
661
662
663
664
  /**
   * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
   * @cpu: a CPU id
   *
   * This function walks all processes, finds a valid mm struct for each one and
   * then clears a corresponding bit in mm's cpumask.  While this all sounds
   * trivial, there are various non-obvious corner cases, which this function
   * tries to solve in a safe manner.
   *
   * Also note that the function uses a somewhat relaxed locking scheme, so it may
   * be called only for an already offlined CPU.
   */
cb79295e2   Anton Vorontsov   cpu: introduce cl...
665
666
667
668
669
670
671
672
673
674
675
  void clear_tasks_mm_cpumask(int cpu)
  {
  	struct task_struct *p;
  
  	/*
  	 * This function is called after the cpu is taken down and marked
  	 * offline, so its not like new tasks will ever get this cpu set in
  	 * their mm mask. -- Peter Zijlstra
  	 * Thus, we may use rcu_read_lock() here, instead of grabbing
  	 * full-fledged tasklist_lock.
  	 */
e4cc2f873   Anton Vorontsov   kernel/cpu.c: doc...
676
  	WARN_ON(cpu_online(cpu));
cb79295e2   Anton Vorontsov   cpu: introduce cl...
677
678
679
  	rcu_read_lock();
  	for_each_process(p) {
  		struct task_struct *t;
e4cc2f873   Anton Vorontsov   kernel/cpu.c: doc...
680
681
682
683
  		/*
  		 * Main thread might exit, but other threads may still have
  		 * a valid mm. Find one.
  		 */
cb79295e2   Anton Vorontsov   cpu: introduce cl...
684
685
686
687
688
689
690
691
  		t = find_lock_task_mm(p);
  		if (!t)
  			continue;
  		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
  		task_unlock(t);
  	}
  	rcu_read_unlock();
  }
b728ca060   Kirill Tkhai   sched: Rework che...
692
  static inline void check_for_tasks(int dead_cpu)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
693
  {
b728ca060   Kirill Tkhai   sched: Rework che...
694
  	struct task_struct *g, *p;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
695

a75a6068d   Oleg Nesterov   cpu/hotplug: Read...
696
697
  	read_lock(&tasklist_lock);
  	for_each_process_thread(g, p) {
b728ca060   Kirill Tkhai   sched: Rework che...
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
  		if (!p->on_rq)
  			continue;
  		/*
  		 * We do the check with unlocked task_rq(p)->lock.
  		 * Order the reading to do not warn about a task,
  		 * which was running on this cpu in the past, and
  		 * it's just been woken on another cpu.
  		 */
  		rmb();
  		if (task_cpu(p) != dead_cpu)
  			continue;
  
  		pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)
  ",
  			p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
a75a6068d   Oleg Nesterov   cpu/hotplug: Read...
713
714
  	}
  	read_unlock(&tasklist_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
715
  }
984581728   Thomas Gleixner   cpu/hotplug: Spli...
716
717
718
719
720
721
722
723
724
725
726
727
728
729
  static int notify_down_prepare(unsigned int cpu)
  {
  	int err, nr_calls = 0;
  
  	err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
  	if (err) {
  		nr_calls--;
  		__cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
  		pr_warn("%s: attempt to take down CPU %u failed
  ",
  				__func__, cpu);
  	}
  	return err;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
730
  /* Take this CPU down. */
71cf5aeeb   Mathias Krause   kernel, cpu: Remo...
731
  static int take_cpu_down(void *_param)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
732
  {
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
733
734
  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  	enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
735
  	int err, cpu = smp_processor_id();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
736

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
737
738
739
  	/* Ensure this CPU doesn't handle any more interrupts. */
  	err = __cpu_disable();
  	if (err < 0)
f37051364   Zwane Mwaikambo   [PATCH] i386 CPU ...
740
  		return err;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
741

a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
742
743
744
745
746
747
  	/*
  	 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
  	 * do this step again.
  	 */
  	WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
  	st->state--;
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
748
  	/* Invoke the former CPU_DYING callbacks */
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
749
  	for (; st->state > target; st->state--)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
750
  		cpuhp_invoke_callback(cpu, st->state, false, NULL);
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
751

52c063d1a   Thomas Gleixner   clockevents: Make...
752
753
  	/* Give up timekeeping duties */
  	tick_handover_do_timer();
14e568e78   Thomas Gleixner   stop_machine: Use...
754
  	/* Park the stopper thread */
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
755
  	stop_machine_park(cpu);
f37051364   Zwane Mwaikambo   [PATCH] i386 CPU ...
756
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
757
  }
984581728   Thomas Gleixner   cpu/hotplug: Spli...
758
  static int takedown_cpu(unsigned int cpu)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
759
  {
e69aab131   Thomas Gleixner   cpu/hotplug: Make...
760
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
984581728   Thomas Gleixner   cpu/hotplug: Spli...
761
  	int err;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
762

2a58c527b   Thomas Gleixner   cpu/hotplug: Fix ...
763
  	/* Park the smpboot threads */
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
764
  	kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
2a58c527b   Thomas Gleixner   cpu/hotplug: Fix ...
765
  	smpboot_park_threads(cpu);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
766

6acce3ef8   Peter Zijlstra   sched: Remove get...
767
  	/*
a89941816   Thomas Gleixner   hotplug: Prevent ...
768
769
  	 * Prevent irq alloc/free while the dying cpu reorganizes the
  	 * interrupt affinities.
6acce3ef8   Peter Zijlstra   sched: Remove get...
770
  	 */
a89941816   Thomas Gleixner   hotplug: Prevent ...
771
  	irq_lock_sparse();
6acce3ef8   Peter Zijlstra   sched: Remove get...
772

a89941816   Thomas Gleixner   hotplug: Prevent ...
773
774
775
  	/*
  	 * So now all preempt/rcu users must observe !cpu_active().
  	 */
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
776
  	err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
043215875   Rusty Russell   Hotplug CPU: don'...
777
  	if (err) {
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
778
  		/* CPU refused to die */
a89941816   Thomas Gleixner   hotplug: Prevent ...
779
  		irq_unlock_sparse();
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
780
781
  		/* Unpark the hotplug thread so we can rollback there */
  		kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
984581728   Thomas Gleixner   cpu/hotplug: Spli...
782
  		return err;
8fa1d7d3b   Satoru Takeuchi   [PATCH] cpu-hotpl...
783
  	}
043215875   Rusty Russell   Hotplug CPU: don'...
784
  	BUG_ON(cpu_online(cpu));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
785

48c5ccae8   Peter Zijlstra   sched: Simplify c...
786
  	/*
ee1e714b9   Thomas Gleixner   cpu/hotplug: Remo...
787
  	 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
48c5ccae8   Peter Zijlstra   sched: Simplify c...
788
789
  	 * runnable tasks from the cpu, there's only the idle task left now
  	 * that the migration thread is done doing the stop_machine thing.
51a96c778   Peter Zijlstra   cpu: Remove incor...
790
791
  	 *
  	 * Wait for the stop thread to go away.
48c5ccae8   Peter Zijlstra   sched: Simplify c...
792
  	 */
e69aab131   Thomas Gleixner   cpu/hotplug: Make...
793
794
  	wait_for_completion(&st->done);
  	BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
795

a89941816   Thomas Gleixner   hotplug: Prevent ...
796
797
  	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
  	irq_unlock_sparse();
345527b1e   Preeti U Murthy   clockevents: Fix ...
798
  	hotplug_cpu__broadcast_tick_pull(cpu);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
799
800
  	/* This actually kills the CPU. */
  	__cpu_die(cpu);
a49b116dc   Thomas Gleixner   clockevents: Clea...
801
  	tick_cleanup_dead_cpu(cpu);
984581728   Thomas Gleixner   cpu/hotplug: Spli...
802
803
  	return 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
804

984581728   Thomas Gleixner   cpu/hotplug: Spli...
805
806
807
  static int notify_dead(unsigned int cpu)
  {
  	cpu_notify_nofail(CPU_DEAD, cpu);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
808
  	check_for_tasks(cpu);
984581728   Thomas Gleixner   cpu/hotplug: Spli...
809
810
  	return 0;
  }
71f87b2fc   Thomas Gleixner   cpu/hotplug: Plug...
811
812
813
814
815
816
  static void cpuhp_complete_idle_dead(void *arg)
  {
  	struct cpuhp_cpu_state *st = arg;
  
  	complete(&st->done);
  }
e69aab131   Thomas Gleixner   cpu/hotplug: Make...
817
818
819
820
821
  void cpuhp_report_idle_dead(void)
  {
  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  
  	BUG_ON(st->state != CPUHP_AP_OFFLINE);
27d50c7ee   Thomas Gleixner   rcu: Make CPU_DYI...
822
  	rcu_report_dead(smp_processor_id());
71f87b2fc   Thomas Gleixner   cpu/hotplug: Plug...
823
824
825
826
827
828
829
  	st->state = CPUHP_AP_IDLE_DEAD;
  	/*
  	 * We cannot call complete after rcu_report_dead() so we delegate it
  	 * to an online cpu.
  	 */
  	smp_call_function_single(cpumask_first(cpu_online_mask),
  				 cpuhp_complete_idle_dead, st, 0);
e69aab131   Thomas Gleixner   cpu/hotplug: Make...
830
  }
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
831
832
833
834
835
836
837
  #else
  #define notify_down_prepare	NULL
  #define takedown_cpu		NULL
  #define notify_dead		NULL
  #endif
  
  #ifdef CONFIG_HOTPLUG_CPU
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
838

984581728   Thomas Gleixner   cpu/hotplug: Spli...
839
  /* Requires cpu_add_remove_lock to be held */
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
840
841
  static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
  			   enum cpuhp_state target)
984581728   Thomas Gleixner   cpu/hotplug: Spli...
842
  {
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
843
844
845
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  	int prev_state, ret = 0;
  	bool hasdied = false;
984581728   Thomas Gleixner   cpu/hotplug: Spli...
846
847
848
  
  	if (num_online_cpus() == 1)
  		return -EBUSY;
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
849
  	if (!cpu_present(cpu))
984581728   Thomas Gleixner   cpu/hotplug: Spli...
850
851
852
853
854
  		return -EINVAL;
  
  	cpu_hotplug_begin();
  
  	cpuhp_tasks_frozen = tasks_frozen;
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
855
  	prev_state = st->state;
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
856
  	st->target = target;
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
857
858
859
860
  	/*
  	 * If the current CPU state is in the range of the AP hotplug thread,
  	 * then we need to kick the thread.
  	 */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
861
  	if (st->state > CPUHP_TEARDOWN_CPU) {
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
862
863
864
865
866
867
868
869
870
871
872
873
  		ret = cpuhp_kick_ap_work(cpu);
  		/*
  		 * The AP side has done the error rollback already. Just
  		 * return the error code..
  		 */
  		if (ret)
  			goto out;
  
  		/*
  		 * We might have stopped still in the range of the AP hotplug
  		 * thread. Nothing to do anymore.
  		 */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
874
  		if (st->state > CPUHP_TEARDOWN_CPU)
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
875
876
877
  			goto out;
  	}
  	/*
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
878
  	 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
879
880
  	 * to do the further cleanups.
  	 */
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
881
  	ret = cpuhp_down_callbacks(cpu, st, target);
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
882
883
884
885
886
  	if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
  		st->target = prev_state;
  		st->rollback = true;
  		cpuhp_kick_ap_work(cpu);
  	}
984581728   Thomas Gleixner   cpu/hotplug: Spli...
887

cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
888
  	hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
889
  out:
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
890
  	cpu_hotplug_done();
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
891
892
  	/* This post dead nonsense must die */
  	if (!ret && hasdied)
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
893
  		cpu_notify_nofail(CPU_POST_DEAD, cpu);
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
894
  	return ret;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
895
  }
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
896
  static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
897
  {
9ea09af3b   Heiko Carstens   stop_machine: int...
898
  	int err;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
899

d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
900
  	cpu_maps_update_begin();
e761b7725   Max Krasnyansky   cpu hotplug, sche...
901
902
  
  	if (cpu_hotplug_disabled) {
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
903
  		err = -EBUSY;
e761b7725   Max Krasnyansky   cpu hotplug, sche...
904
905
  		goto out;
  	}
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
906
  	err = _cpu_down(cpu, 0, target);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
907

e761b7725   Max Krasnyansky   cpu hotplug, sche...
908
  out:
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
909
  	cpu_maps_update_done();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
910
911
  	return err;
  }
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
912
913
914
915
  int cpu_down(unsigned int cpu)
  {
  	return do_cpu_down(cpu, CPUHP_OFFLINE);
  }
b62b8ef90   Zhang Rui   force offline the...
916
  EXPORT_SYMBOL(cpu_down);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
917
  #endif /*CONFIG_HOTPLUG_CPU*/
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
918
  /**
ee1e714b9   Thomas Gleixner   cpu/hotplug: Remo...
919
   * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
920
921
   * @cpu: cpu that just started
   *
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
922
923
924
925
926
927
928
   * It must be called by the arch code on the new cpu, before the new cpu
   * enables interrupts and before the "boot" cpu returns from __cpu_up().
   */
  void notify_cpu_starting(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  	enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
0c6d4576c   Sebastian Andrzej Siewior   cpu/hotplug: Get ...
929
  	rcu_cpu_starting(cpu);	/* Enables RCU usage on this CPU. */
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
930
  	while (st->state < target) {
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
931
  		st->state++;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
932
  		cpuhp_invoke_callback(cpu, st->state, true, NULL);
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
933
934
  	}
  }
949338e35   Thomas Gleixner   cpu/hotplug: Move...
935
936
  /*
   * Called from the idle task. We need to set active here, so we can kick off
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
937
938
939
   * the stopper thread and unpark the smpboot threads. If the target state is
   * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
   * cpu further.
949338e35   Thomas Gleixner   cpu/hotplug: Move...
940
   */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
941
  void cpuhp_online_idle(enum cpuhp_state state)
949338e35   Thomas Gleixner   cpu/hotplug: Move...
942
  {
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
943
944
945
946
947
948
949
950
  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  	unsigned int cpu = smp_processor_id();
  
  	/* Happens for the boot cpu */
  	if (state != CPUHP_AP_ONLINE_IDLE)
  		return;
  
  	st->state = CPUHP_AP_ONLINE_IDLE;
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
951

8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
952
  	/* Unpark the stopper thread and the hotplug thread of this cpu */
949338e35   Thomas Gleixner   cpu/hotplug: Move...
953
  	stop_machine_unpark(cpu);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
954
  	kthread_unpark(st->thread);
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
955
956
957
958
959
960
  
  	/* Should we go further up ? */
  	if (st->target > CPUHP_AP_ONLINE_IDLE)
  		__cpuhp_kick_ap_work(st);
  	else
  		complete(&st->done);
949338e35   Thomas Gleixner   cpu/hotplug: Move...
961
  }
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
962
  /* Requires cpu_add_remove_lock to be held */
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
963
  static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
964
  {
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
965
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
3bb5d2ee3   Suresh Siddha   smp, idle: Alloca...
966
  	struct task_struct *idle;
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
967
  	int ret = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
968

d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
969
  	cpu_hotplug_begin();
38498a67a   Thomas Gleixner   smp: Add generic ...
970

757c989b9   Thomas Gleixner   cpu/hotplug: Make...
971
  	if (!cpu_present(cpu)) {
5e5041f35   Yasuaki Ishimatsu   ACPI / processor:...
972
973
974
  		ret = -EINVAL;
  		goto out;
  	}
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
975
976
977
978
979
  	/*
  	 * The caller of do_cpu_up might have raced with another
  	 * caller. Ignore it for now.
  	 */
  	if (st->state >= target)
38498a67a   Thomas Gleixner   smp: Add generic ...
980
  		goto out;
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
981
982
983
984
985
986
987
988
  
  	if (st->state == CPUHP_OFFLINE) {
  		/* Let it fail before we try to bring the cpu up */
  		idle = idle_thread_get(cpu);
  		if (IS_ERR(idle)) {
  			ret = PTR_ERR(idle);
  			goto out;
  		}
3bb5d2ee3   Suresh Siddha   smp, idle: Alloca...
989
  	}
38498a67a   Thomas Gleixner   smp: Add generic ...
990

ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
991
  	cpuhp_tasks_frozen = tasks_frozen;
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
992
  	st->target = target;
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
993
994
995
996
  	/*
  	 * If the current CPU state is in the range of the AP hotplug thread,
  	 * then we need to kick the thread once more.
  	 */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
997
  	if (st->state > CPUHP_BRINGUP_CPU) {
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
  		ret = cpuhp_kick_ap_work(cpu);
  		/*
  		 * The AP side has done the error rollback already. Just
  		 * return the error code..
  		 */
  		if (ret)
  			goto out;
  	}
  
  	/*
  	 * Try to reach the target state. We max out on the BP at
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
1009
  	 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1010
1011
  	 * responsible for bringing it up to the target state.
  	 */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
1012
  	target = min((int)target, CPUHP_BRINGUP_CPU);
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
1013
  	ret = cpuhp_up_callbacks(cpu, st, target);
38498a67a   Thomas Gleixner   smp: Add generic ...
1014
  out:
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1015
  	cpu_hotplug_done();
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1016
1017
  	return ret;
  }
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1018
  static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1019
1020
  {
  	int err = 0;
cf23422b9   minskey guo   cpu/mem hotplug: ...
1021

e0b582ec5   Rusty Russell   cpumask: convert ...
1022
  	if (!cpu_possible(cpu)) {
84117da5b   Fabian Frederick   kernel/cpu.c: con...
1023
1024
1025
  		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time
  ",
  		       cpu);
87d5e0236   Chen Gong   kernel/cpu.c: del...
1026
  #if defined(CONFIG_IA64)
84117da5b   Fabian Frederick   kernel/cpu.c: con...
1027
1028
  		pr_err("please check additional_cpus= boot parameter
  ");
73e753a50   KAMEZAWA Hiroyuki   CPU HOTPLUG: avoi...
1029
1030
1031
  #endif
  		return -EINVAL;
  	}
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1032

01b0f1970   Toshi Kani   cpu/mem hotplug: ...
1033
1034
1035
  	err = try_online_node(cpu_to_node(cpu));
  	if (err)
  		return err;
cf23422b9   minskey guo   cpu/mem hotplug: ...
1036

d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1037
  	cpu_maps_update_begin();
e761b7725   Max Krasnyansky   cpu hotplug, sche...
1038
1039
  
  	if (cpu_hotplug_disabled) {
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1040
  		err = -EBUSY;
e761b7725   Max Krasnyansky   cpu hotplug, sche...
1041
1042
  		goto out;
  	}
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1043
  	err = _cpu_up(cpu, 0, target);
e761b7725   Max Krasnyansky   cpu hotplug, sche...
1044
  out:
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1045
  	cpu_maps_update_done();
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1046
1047
  	return err;
  }
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1048
1049
1050
1051
1052
  
  int cpu_up(unsigned int cpu)
  {
  	return do_cpu_up(cpu, CPUHP_ONLINE);
  }
a513f6bab   Paul E. McKenney   cpu: Export cpu_up()
1053
  EXPORT_SYMBOL_GPL(cpu_up);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1054

f3de4be9d   Rafael J. Wysocki   PM: Fix dependenc...
1055
  #ifdef CONFIG_PM_SLEEP_SMP
e0b582ec5   Rusty Russell   cpumask: convert ...
1056
  static cpumask_var_t frozen_cpus;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1057

d391e5522   James Morse   cpu/hotplug: Allo...
1058
  int freeze_secondary_cpus(int primary)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1059
  {
d391e5522   James Morse   cpu/hotplug: Allo...
1060
  	int cpu, error = 0;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1061

d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1062
  	cpu_maps_update_begin();
d391e5522   James Morse   cpu/hotplug: Allo...
1063
1064
  	if (!cpu_online(primary))
  		primary = cpumask_first(cpu_online_mask);
9ee349ad6   Xiaotian Feng   sched: Fix set_cp...
1065
1066
  	/*
  	 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1067
1068
  	 * with the userspace trying to use the CPU hotplug at the same time
  	 */
e0b582ec5   Rusty Russell   cpumask: convert ...
1069
  	cpumask_clear(frozen_cpus);
6ad4c1888   Peter Zijlstra   sched: Fix balanc...
1070

84117da5b   Fabian Frederick   kernel/cpu.c: con...
1071
1072
  	pr_info("Disabling non-boot CPUs ...
  ");
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1073
  	for_each_online_cpu(cpu) {
d391e5522   James Morse   cpu/hotplug: Allo...
1074
  		if (cpu == primary)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1075
  			continue;
bb3632c61   Todd E Brandt   PM / sleep: trace...
1076
  		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1077
  		error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
bb3632c61   Todd E Brandt   PM / sleep: trace...
1078
  		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
feae3203d   Mike Travis   timers, init: Lim...
1079
  		if (!error)
e0b582ec5   Rusty Russell   cpumask: convert ...
1080
  			cpumask_set_cpu(cpu, frozen_cpus);
feae3203d   Mike Travis   timers, init: Lim...
1081
  		else {
84117da5b   Fabian Frederick   kernel/cpu.c: con...
1082
1083
  			pr_err("Error taking CPU%d down: %d
  ", cpu, error);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1084
1085
1086
  			break;
  		}
  	}
86886e55b   Joseph Cihula   x86, intel_txt: I...
1087

89af7ba57   Vitaly Kuznetsov   cpu-hotplug: conv...
1088
  	if (!error)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1089
  		BUG_ON(num_online_cpus() > 1);
89af7ba57   Vitaly Kuznetsov   cpu-hotplug: conv...
1090
  	else
84117da5b   Fabian Frederick   kernel/cpu.c: con...
1091
1092
  		pr_err("Non-boot CPUs are not disabled
  ");
89af7ba57   Vitaly Kuznetsov   cpu-hotplug: conv...
1093
1094
1095
1096
1097
1098
1099
  
  	/*
  	 * Make sure the CPUs won't be enabled by someone else. We need to do
  	 * this even in case of failure as all disable_nonboot_cpus() users are
  	 * supposed to do enable_nonboot_cpus() on the failure path.
  	 */
  	cpu_hotplug_disabled++;
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1100
  	cpu_maps_update_done();
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1101
1102
  	return error;
  }
d0af9eed5   Suresh Siddha   x86, pat/mtrr: Re...
1103
1104
1105
1106
1107
1108
1109
  void __weak arch_enable_nonboot_cpus_begin(void)
  {
  }
  
  void __weak arch_enable_nonboot_cpus_end(void)
  {
  }
71cf5aeeb   Mathias Krause   kernel, cpu: Remo...
1110
  void enable_nonboot_cpus(void)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1111
1112
1113
1114
  {
  	int cpu, error;
  
  	/* Allow everyone to use the CPU hotplug again */
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1115
  	cpu_maps_update_begin();
01b411590   Lianwei Wang   cpu/hotplug: Hand...
1116
  	__cpu_hotplug_enable();
e0b582ec5   Rusty Russell   cpumask: convert ...
1117
  	if (cpumask_empty(frozen_cpus))
1d64b9cb1   Rafael J. Wysocki   [PATCH] Fix micro...
1118
  		goto out;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1119

84117da5b   Fabian Frederick   kernel/cpu.c: con...
1120
1121
  	pr_info("Enabling non-boot CPUs ...
  ");
d0af9eed5   Suresh Siddha   x86, pat/mtrr: Re...
1122
1123
  
  	arch_enable_nonboot_cpus_begin();
e0b582ec5   Rusty Russell   cpumask: convert ...
1124
  	for_each_cpu(cpu, frozen_cpus) {
bb3632c61   Todd E Brandt   PM / sleep: trace...
1125
  		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1126
  		error = _cpu_up(cpu, 1, CPUHP_ONLINE);
bb3632c61   Todd E Brandt   PM / sleep: trace...
1127
  		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1128
  		if (!error) {
84117da5b   Fabian Frederick   kernel/cpu.c: con...
1129
1130
  			pr_info("CPU%d is up
  ", cpu);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1131
1132
  			continue;
  		}
84117da5b   Fabian Frederick   kernel/cpu.c: con...
1133
1134
  		pr_warn("Error taking CPU%d up: %d
  ", cpu, error);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1135
  	}
d0af9eed5   Suresh Siddha   x86, pat/mtrr: Re...
1136
1137
  
  	arch_enable_nonboot_cpus_end();
e0b582ec5   Rusty Russell   cpumask: convert ...
1138
  	cpumask_clear(frozen_cpus);
1d64b9cb1   Rafael J. Wysocki   [PATCH] Fix micro...
1139
  out:
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1140
  	cpu_maps_update_done();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1141
  }
e0b582ec5   Rusty Russell   cpumask: convert ...
1142

d7268a31c   Fenghua Yu   CPU: Add right qu...
1143
  static int __init alloc_frozen_cpus(void)
e0b582ec5   Rusty Russell   cpumask: convert ...
1144
1145
1146
1147
1148
1149
  {
  	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
  		return -ENOMEM;
  	return 0;
  }
  core_initcall(alloc_frozen_cpus);
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1150
1151
  
  /*
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
   * When callbacks for CPU hotplug notifications are being executed, we must
   * ensure that the state of the system with respect to the tasks being frozen
   * or not, as reported by the notification, remains unchanged *throughout the
   * duration* of the execution of the callbacks.
   * Hence we need to prevent the freezer from racing with regular CPU hotplug.
   *
   * This synchronization is implemented by mutually excluding regular CPU
   * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
   * Hibernate notifications.
   */
  static int
  cpu_hotplug_pm_callback(struct notifier_block *nb,
  			unsigned long action, void *ptr)
  {
  	switch (action) {
  
  	case PM_SUSPEND_PREPARE:
  	case PM_HIBERNATION_PREPARE:
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
1170
  		cpu_hotplug_disable();
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1171
1172
1173
1174
  		break;
  
  	case PM_POST_SUSPEND:
  	case PM_POST_HIBERNATION:
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
1175
  		cpu_hotplug_enable();
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1176
1177
1178
1179
1180
1181
1182
1183
  		break;
  
  	default:
  		return NOTIFY_DONE;
  	}
  
  	return NOTIFY_OK;
  }
d7268a31c   Fenghua Yu   CPU: Add right qu...
1184
  static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1185
  {
6e32d479d   Fenghua Yu   kernel/cpu.c: Add...
1186
1187
1188
1189
1190
  	/*
  	 * cpu_hotplug_pm_callback has higher priority than x86
  	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
  	 * to disable cpu hotplug to avoid cpu hotplug race.
  	 */
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1191
1192
1193
1194
  	pm_notifier(cpu_hotplug_pm_callback, 0);
  	return 0;
  }
  core_initcall(cpu_hotplug_pm_sync_init);
f3de4be9d   Rafael J. Wysocki   PM: Fix dependenc...
1195
  #endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec0   Max Krasnyansky   sched: Move cpu m...
1196
1197
  
  #endif /* CONFIG_SMP */
b8d317d10   Mike Travis   cpumask: make cpu...
1198

cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1199
1200
1201
1202
  /* Boot processor state steps */
  static struct cpuhp_step cpuhp_bp_states[] = {
  	[CPUHP_OFFLINE] = {
  		.name			= "offline",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1203
1204
  		.startup.single		= NULL,
  		.teardown.single	= NULL,
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1205
1206
1207
  	},
  #ifdef CONFIG_SMP
  	[CPUHP_CREATE_THREADS]= {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1208
  		.name			= "threads:prepare",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1209
1210
  		.startup.single		= smpboot_create_threads,
  		.teardown.single	= NULL,
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1211
  		.cant_stop		= true,
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1212
  	},
00e16c3d6   Thomas Gleixner   perf/core: Conver...
1213
  	[CPUHP_PERF_PREPARE] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1214
1215
1216
  		.name			= "perf:prepare",
  		.startup.single		= perf_event_init_cpu,
  		.teardown.single	= perf_event_exit_cpu,
00e16c3d6   Thomas Gleixner   perf/core: Conver...
1217
  	},
7ee681b25   Thomas Gleixner   workqueue: Conver...
1218
  	[CPUHP_WORKQUEUE_PREP] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1219
1220
1221
  		.name			= "workqueue:prepare",
  		.startup.single		= workqueue_prepare_cpu,
  		.teardown.single	= NULL,
7ee681b25   Thomas Gleixner   workqueue: Conver...
1222
  	},
27590dc17   Thomas Gleixner   hrtimer: Convert ...
1223
  	[CPUHP_HRTIMERS_PREPARE] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1224
1225
1226
  		.name			= "hrtimers:prepare",
  		.startup.single		= hrtimers_prepare_cpu,
  		.teardown.single	= hrtimers_dead_cpu,
27590dc17   Thomas Gleixner   hrtimer: Convert ...
1227
  	},
31487f832   Richard Weinberger   smp/cfd: Convert ...
1228
  	[CPUHP_SMPCFD_PREPARE] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1229
  		.name			= "smpcfd:prepare",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1230
1231
  		.startup.single		= smpcfd_prepare_cpu,
  		.teardown.single	= smpcfd_dead_cpu,
31487f832   Richard Weinberger   smp/cfd: Convert ...
1232
  	},
e6d4989a9   Richard Weinberger   relayfs: Convert ...
1233
1234
1235
1236
1237
  	[CPUHP_RELAY_PREPARE] = {
  		.name			= "relay:prepare",
  		.startup.single		= relay_prepare_cpu,
  		.teardown.single	= NULL,
  	},
6731d4f12   Sebastian Andrzej Siewior   slab: Convert to ...
1238
1239
1240
1241
  	[CPUHP_SLAB_PREPARE] = {
  		.name			= "slab:prepare",
  		.startup.single		= slab_prepare_cpu,
  		.teardown.single	= slab_dead_cpu,
31487f832   Richard Weinberger   smp/cfd: Convert ...
1242
  	},
4df837425   Thomas Gleixner   rcu: Convert rcut...
1243
  	[CPUHP_RCUTREE_PREP] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1244
  		.name			= "RCU/tree:prepare",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1245
1246
  		.startup.single		= rcutree_prepare_cpu,
  		.teardown.single	= rcutree_dead_cpu,
4df837425   Thomas Gleixner   rcu: Convert rcut...
1247
  	},
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1248
1249
1250
1251
  	/*
  	 * Preparatory and dead notifiers. Will be replaced once the notifiers
  	 * are converted to states.
  	 */
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1252
1253
  	[CPUHP_NOTIFY_PREPARE] = {
  		.name			= "notify:prepare",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1254
1255
  		.startup.single		= notify_prepare,
  		.teardown.single	= notify_dead,
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1256
  		.skip_onerr		= true,
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1257
  		.cant_stop		= true,
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1258
  	},
4fae16dff   Richard Cochran   timers/core: Corr...
1259
1260
1261
1262
1263
1264
  	/*
  	 * On the tear-down path, timers_dead_cpu() must be invoked
  	 * before blk_mq_queue_reinit_notify() from notify_dead(),
  	 * otherwise a RCU stall occurs.
  	 */
  	[CPUHP_TIMERS_DEAD] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1265
1266
1267
  		.name			= "timers:dead",
  		.startup.single		= NULL,
  		.teardown.single	= timers_dead_cpu,
4fae16dff   Richard Cochran   timers/core: Corr...
1268
  	},
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1269
  	/* Kicks the plugged cpu into life */
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1270
1271
  	[CPUHP_BRINGUP_CPU] = {
  		.name			= "cpu:bringup",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1272
1273
  		.startup.single		= bringup_cpu,
  		.teardown.single	= NULL,
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1274
  		.cant_stop		= true,
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1275
  	},
31487f832   Richard Weinberger   smp/cfd: Convert ...
1276
  	[CPUHP_AP_SMPCFD_DYING] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1277
  		.name			= "smpcfd:dying",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1278
1279
  		.startup.single		= NULL,
  		.teardown.single	= smpcfd_dying_cpu,
31487f832   Richard Weinberger   smp/cfd: Convert ...
1280
  	},
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1281
1282
1283
1284
  	/*
  	 * Handled on controll processor until the plugged processor manages
  	 * this itself.
  	 */
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1285
1286
  	[CPUHP_TEARDOWN_CPU] = {
  		.name			= "cpu:teardown",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1287
1288
  		.startup.single		= NULL,
  		.teardown.single	= takedown_cpu,
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1289
  		.cant_stop		= true,
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1290
  	},
a7c734140   Thomas Gleixner   cpu/hotplug: Keep...
1291
1292
  #else
  	[CPUHP_BRINGUP_CPU] = { },
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1293
  #endif
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1294
  };
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1295
1296
1297
  /* Application processor state steps */
  static struct cpuhp_step cpuhp_ap_states[] = {
  #ifdef CONFIG_SMP
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
  	/* Final state before CPU kills itself */
  	[CPUHP_AP_IDLE_DEAD] = {
  		.name			= "idle:dead",
  	},
  	/*
  	 * Last state before CPU enters the idle loop to die. Transient state
  	 * for synchronization.
  	 */
  	[CPUHP_AP_OFFLINE] = {
  		.name			= "ap:offline",
  		.cant_stop		= true,
  	},
9cf7243d5   Thomas Gleixner   sched: Make set_c...
1310
1311
1312
  	/* First state is scheduler control. Interrupts are disabled */
  	[CPUHP_AP_SCHED_STARTING] = {
  		.name			= "sched:starting",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1313
1314
  		.startup.single		= sched_cpu_starting,
  		.teardown.single	= sched_cpu_dying,
9cf7243d5   Thomas Gleixner   sched: Make set_c...
1315
  	},
4df837425   Thomas Gleixner   rcu: Convert rcut...
1316
  	[CPUHP_AP_RCUTREE_DYING] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1317
  		.name			= "RCU/tree:dying",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1318
1319
  		.startup.single		= NULL,
  		.teardown.single	= rcutree_dying_cpu,
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1320
  	},
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1321
1322
1323
1324
1325
1326
  	/* Entry state on starting. Interrupts enabled from here on. Transient
  	 * state for synchronsization */
  	[CPUHP_AP_ONLINE] = {
  		.name			= "ap:online",
  	},
  	/* Handle smpboot threads park/unpark */
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1327
  	[CPUHP_AP_SMPBOOT_THREADS] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1328
  		.name			= "smpboot/threads:online",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1329
1330
  		.startup.single		= smpboot_unpark_threads,
  		.teardown.single	= NULL,
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1331
  	},
00e16c3d6   Thomas Gleixner   perf/core: Conver...
1332
  	[CPUHP_AP_PERF_ONLINE] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1333
1334
1335
  		.name			= "perf:online",
  		.startup.single		= perf_event_init_cpu,
  		.teardown.single	= perf_event_exit_cpu,
00e16c3d6   Thomas Gleixner   perf/core: Conver...
1336
  	},
7ee681b25   Thomas Gleixner   workqueue: Conver...
1337
  	[CPUHP_AP_WORKQUEUE_ONLINE] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1338
1339
1340
  		.name			= "workqueue:online",
  		.startup.single		= workqueue_online_cpu,
  		.teardown.single	= workqueue_offline_cpu,
7ee681b25   Thomas Gleixner   workqueue: Conver...
1341
  	},
4df837425   Thomas Gleixner   rcu: Convert rcut...
1342
  	[CPUHP_AP_RCUTREE_ONLINE] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1343
  		.name			= "RCU/tree:online",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1344
1345
  		.startup.single		= rcutree_online_cpu,
  		.teardown.single	= rcutree_offline_cpu,
4df837425   Thomas Gleixner   rcu: Convert rcut...
1346
  	},
00e16c3d6   Thomas Gleixner   perf/core: Conver...
1347

d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1348
1349
1350
1351
  	/*
  	 * Online/down_prepare notifiers. Will be removed once the notifiers
  	 * are converted to states.
  	 */
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1352
1353
  	[CPUHP_AP_NOTIFY_ONLINE] = {
  		.name			= "notify:online",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1354
1355
  		.startup.single		= notify_online,
  		.teardown.single	= notify_down_prepare,
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
1356
  		.skip_onerr		= true,
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1357
  	},
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1358
  #endif
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1359
1360
1361
  	/*
  	 * The dynamically registered state space is here
  	 */
aaddd7d1c   Thomas Gleixner   sched/hotplug: Ma...
1362
1363
1364
1365
  #ifdef CONFIG_SMP
  	/* Last state is scheduler control setting the cpu active */
  	[CPUHP_AP_ACTIVE] = {
  		.name			= "sched:active",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1366
1367
  		.startup.single		= sched_cpu_activate,
  		.teardown.single	= sched_cpu_deactivate,
aaddd7d1c   Thomas Gleixner   sched/hotplug: Ma...
1368
1369
  	},
  #endif
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1370
  	/* CPU is fully up and running. */
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1371
1372
  	[CPUHP_ONLINE] = {
  		.name			= "online",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1373
1374
  		.startup.single		= NULL,
  		.teardown.single	= NULL,
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1375
1376
  	},
  };
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1377
1378
1379
1380
1381
1382
1383
  /* Sanity check for callbacks */
  static int cpuhp_cb_check(enum cpuhp_state state)
  {
  	if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
  		return -EINVAL;
  	return 0;
  }
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1384
1385
1386
  static void cpuhp_store_callbacks(enum cpuhp_state state,
  				  const char *name,
  				  int (*startup)(unsigned int cpu),
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1387
1388
  				  int (*teardown)(unsigned int cpu),
  				  bool multi_instance)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1389
1390
1391
1392
1393
1394
  {
  	/* (Un)Install the callbacks for further cpu hotplug operations */
  	struct cpuhp_step *sp;
  
  	mutex_lock(&cpuhp_state_mutex);
  	sp = cpuhp_get_step(state);
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1395
1396
  	sp->startup.single = startup;
  	sp->teardown.single = teardown;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1397
  	sp->name = name;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1398
1399
  	sp->multi_instance = multi_instance;
  	INIT_HLIST_HEAD(&sp->list);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1400
1401
1402
1403
1404
  	mutex_unlock(&cpuhp_state_mutex);
  }
  
  static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
  {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1405
  	return cpuhp_get_step(state)->teardown.single;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1406
  }
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1407
1408
1409
1410
  /*
   * Call the startup/teardown function for a step either on the AP or
   * on the current CPU.
   */
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1411
1412
  static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
  			    struct hlist_node *node)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1413
  {
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
1414
  	struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1415
  	int ret;
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1416
1417
  	if ((bringup && !sp->startup.single) ||
  	    (!bringup && !sp->teardown.single))
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1418
  		return 0;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1419
1420
1421
1422
  	/*
  	 * The non AP bound callbacks can fail on bringup. On teardown
  	 * e.g. module removal we crash for now.
  	 */
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1423
1424
  #ifdef CONFIG_SMP
  	if (cpuhp_is_ap_state(state))
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1425
  		ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1426
  	else
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1427
  		ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1428
  #else
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1429
  	ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1430
  #endif
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
  	BUG_ON(ret && !bringup);
  	return ret;
  }
  
  /*
   * Called from __cpuhp_setup_state on a recoverable failure.
   *
   * Note: The teardown callbacks for rollback are not allowed to fail!
   */
  static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1441
  				   struct hlist_node *node)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1442
1443
  {
  	int cpu;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
  	/* Roll back the already executed steps on the other cpus */
  	for_each_present_cpu(cpu) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  		int cpustate = st->state;
  
  		if (cpu >= failedcpu)
  			break;
  
  		/* Did we invoke the startup call on that cpu ? */
  		if (cpustate >= state)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1454
  			cpuhp_issue_call(cpu, state, false, node);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
  	}
  }
  
  /*
   * Returns a free for dynamic slot assignment of the Online state. The states
   * are protected by the cpuhp_slot_states mutex and an empty slot is identified
   * by having no name assigned.
   */
  static int cpuhp_reserve_state(enum cpuhp_state state)
  {
  	enum cpuhp_state i;
  
  	mutex_lock(&cpuhp_state_mutex);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1468
1469
  	for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
  		if (cpuhp_ap_states[i].name)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1470
  			continue;
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1471
  		cpuhp_ap_states[i].name = "Reserved";
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1472
1473
1474
1475
1476
1477
1478
1479
  		mutex_unlock(&cpuhp_state_mutex);
  		return i;
  	}
  	mutex_unlock(&cpuhp_state_mutex);
  	WARN(1, "No more dynamic states available for CPU hotplug
  ");
  	return -ENOSPC;
  }
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
  int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
  			       bool invoke)
  {
  	struct cpuhp_step *sp;
  	int cpu;
  	int ret;
  
  	sp = cpuhp_get_step(state);
  	if (sp->multi_instance == false)
  		return -EINVAL;
  
  	get_online_cpus();
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1492
  	if (!invoke || !sp->startup.multi)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
  		goto add_node;
  
  	/*
  	 * Try to call the startup callback for each present cpu
  	 * depending on the hotplug state of the cpu.
  	 */
  	for_each_present_cpu(cpu) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  		int cpustate = st->state;
  
  		if (cpustate < state)
  			continue;
  
  		ret = cpuhp_issue_call(cpu, state, true, node);
  		if (ret) {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1508
  			if (sp->teardown.multi)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
  				cpuhp_rollback_install(cpu, state, node);
  			goto err;
  		}
  	}
  add_node:
  	ret = 0;
  	mutex_lock(&cpuhp_state_mutex);
  	hlist_add_head(node, &sp->list);
  	mutex_unlock(&cpuhp_state_mutex);
  
  err:
  	put_online_cpus();
  	return ret;
  }
  EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
  /**
   * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
   * @state:	The state to setup
   * @invoke:	If true, the startup function is invoked for cpus where
   *		cpu state >= @state
   * @startup:	startup callback function
   * @teardown:	teardown callback function
   *
   * Returns 0 if successful, otherwise a proper error code
   */
  int __cpuhp_setup_state(enum cpuhp_state state,
  			const char *name, bool invoke,
  			int (*startup)(unsigned int cpu),
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1537
1538
  			int (*teardown)(unsigned int cpu),
  			bool multi_instance)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
  {
  	int cpu, ret = 0;
  	int dyn_state = 0;
  
  	if (cpuhp_cb_check(state) || !name)
  		return -EINVAL;
  
  	get_online_cpus();
  
  	/* currently assignments for the ONLINE state are possible */
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1549
  	if (state == CPUHP_AP_ONLINE_DYN) {
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1550
1551
1552
1553
1554
1555
  		dyn_state = 1;
  		ret = cpuhp_reserve_state(state);
  		if (ret < 0)
  			goto out;
  		state = ret;
  	}
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1556
  	cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
  
  	if (!invoke || !startup)
  		goto out;
  
  	/*
  	 * Try to call the startup callback for each present cpu
  	 * depending on the hotplug state of the cpu.
  	 */
  	for_each_present_cpu(cpu) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  		int cpustate = st->state;
  
  		if (cpustate < state)
  			continue;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1571
  		ret = cpuhp_issue_call(cpu, state, true, NULL);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1572
  		if (ret) {
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
1573
  			if (teardown)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1574
1575
  				cpuhp_rollback_install(cpu, state, NULL);
  			cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
  			goto out;
  		}
  	}
  out:
  	put_online_cpus();
  	if (!ret && dyn_state)
  		return state;
  	return ret;
  }
  EXPORT_SYMBOL(__cpuhp_setup_state);
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
  int __cpuhp_state_remove_instance(enum cpuhp_state state,
  				  struct hlist_node *node, bool invoke)
  {
  	struct cpuhp_step *sp = cpuhp_get_step(state);
  	int cpu;
  
  	BUG_ON(cpuhp_cb_check(state));
  
  	if (!sp->multi_instance)
  		return -EINVAL;
  
  	get_online_cpus();
  	if (!invoke || !cpuhp_get_teardown_cb(state))
  		goto remove;
  	/*
  	 * Call the teardown callback for each present cpu depending
  	 * on the hotplug state of the cpu. This function is not
  	 * allowed to fail currently!
  	 */
  	for_each_present_cpu(cpu) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  		int cpustate = st->state;
  
  		if (cpustate >= state)
  			cpuhp_issue_call(cpu, state, false, node);
  	}
  
  remove:
  	mutex_lock(&cpuhp_state_mutex);
  	hlist_del(node);
  	mutex_unlock(&cpuhp_state_mutex);
  	put_online_cpus();
  
  	return 0;
  }
  EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
  /**
   * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
   * @state:	The state to remove
   * @invoke:	If true, the teardown function is invoked for cpus where
   *		cpu state >= @state
   *
   * The teardown callback is currently not allowed to fail. Think
   * about module removal!
   */
  void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
  {
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1633
  	struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1634
1635
1636
1637
1638
  	int cpu;
  
  	BUG_ON(cpuhp_cb_check(state));
  
  	get_online_cpus();
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1639
1640
1641
1642
1643
1644
1645
  	if (sp->multi_instance) {
  		WARN(!hlist_empty(&sp->list),
  		     "Error: Removing state %d which has instances left.
  ",
  		     state);
  		goto remove;
  	}
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
1646
  	if (!invoke || !cpuhp_get_teardown_cb(state))
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
  		goto remove;
  
  	/*
  	 * Call the teardown callback for each present cpu depending
  	 * on the hotplug state of the cpu. This function is not
  	 * allowed to fail currently!
  	 */
  	for_each_present_cpu(cpu) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  		int cpustate = st->state;
  
  		if (cpustate >= state)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1659
  			cpuhp_issue_call(cpu, state, false, NULL);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1660
1661
  	}
  remove:
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1662
  	cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1663
1664
1665
  	put_online_cpus();
  }
  EXPORT_SYMBOL(__cpuhp_remove_state);
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
  #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
  static ssize_t show_cpuhp_state(struct device *dev,
  				struct device_attribute *attr, char *buf)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  
  	return sprintf(buf, "%d
  ", st->state);
  }
  static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
  static ssize_t write_cpuhp_target(struct device *dev,
  				  struct device_attribute *attr,
  				  const char *buf, size_t count)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  	struct cpuhp_step *sp;
  	int target, ret;
  
  	ret = kstrtoint(buf, 10, &target);
  	if (ret)
  		return ret;
  
  #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
  	if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
  		return -EINVAL;
  #else
  	if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
  		return -EINVAL;
  #endif
  
  	ret = lock_device_hotplug_sysfs();
  	if (ret)
  		return ret;
  
  	mutex_lock(&cpuhp_state_mutex);
  	sp = cpuhp_get_step(target);
  	ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
  	mutex_unlock(&cpuhp_state_mutex);
  	if (ret)
  		return ret;
  
  	if (st->state < target)
  		ret = do_cpu_up(dev->id, target);
  	else
  		ret = do_cpu_down(dev->id, target);
  
  	unlock_device_hotplug();
  	return ret ? ret : count;
  }
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
1715
1716
1717
1718
1719
1720
1721
1722
  static ssize_t show_cpuhp_target(struct device *dev,
  				 struct device_attribute *attr, char *buf)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  
  	return sprintf(buf, "%d
  ", st->target);
  }
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1723
  static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
  
  static struct attribute *cpuhp_cpu_attrs[] = {
  	&dev_attr_state.attr,
  	&dev_attr_target.attr,
  	NULL
  };
  
  static struct attribute_group cpuhp_cpu_attr_group = {
  	.attrs = cpuhp_cpu_attrs,
  	.name = "hotplug",
  	NULL
  };
  
  static ssize_t show_cpuhp_states(struct device *dev,
  				 struct device_attribute *attr, char *buf)
  {
  	ssize_t cur, res = 0;
  	int i;
  
  	mutex_lock(&cpuhp_state_mutex);
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1744
  	for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
  		struct cpuhp_step *sp = cpuhp_get_step(i);
  
  		if (sp->name) {
  			cur = sprintf(buf, "%3d: %s
  ", i, sp->name);
  			buf += cur;
  			res += cur;
  		}
  	}
  	mutex_unlock(&cpuhp_state_mutex);
  	return res;
  }
  static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
  
  static struct attribute *cpuhp_cpu_root_attrs[] = {
  	&dev_attr_states.attr,
  	NULL
  };
  
  static struct attribute_group cpuhp_cpu_root_attr_group = {
  	.attrs = cpuhp_cpu_root_attrs,
  	.name = "hotplug",
  	NULL
  };
  
  static int __init cpuhp_sysfs_init(void)
  {
  	int cpu, ret;
  
  	ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
  				 &cpuhp_cpu_root_attr_group);
  	if (ret)
  		return ret;
  
  	for_each_possible_cpu(cpu) {
  		struct device *dev = get_cpu_device(cpu);
  
  		if (!dev)
  			continue;
  		ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
  		if (ret)
  			return ret;
  	}
  	return 0;
  }
  device_initcall(cpuhp_sysfs_init);
  #endif
e56b3bc79   Linus Torvalds   cpu masks: optimi...
1792
1793
1794
1795
  /*
   * cpu_bit_bitmap[] is a special, "compressed" data structure that
   * represents all NR_CPUS bits binary values of 1<<nr.
   *
e0b582ec5   Rusty Russell   cpumask: convert ...
1796
   * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc79   Linus Torvalds   cpu masks: optimi...
1797
1798
   * mask value that has a single bit set only.
   */
b8d317d10   Mike Travis   cpumask: make cpu...
1799

e56b3bc79   Linus Torvalds   cpu masks: optimi...
1800
  /* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e4   Michael Rodriguez   kernel/cpu.c: fix...
1801
  #define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
e56b3bc79   Linus Torvalds   cpu masks: optimi...
1802
1803
1804
  #define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
  #define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
  #define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d10   Mike Travis   cpumask: make cpu...
1805

e56b3bc79   Linus Torvalds   cpu masks: optimi...
1806
1807
1808
1809
1810
1811
1812
  const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
  
  	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
  	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
  #if BITS_PER_LONG > 32
  	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
  	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
b8d317d10   Mike Travis   cpumask: make cpu...
1813
1814
  #endif
  };
e56b3bc79   Linus Torvalds   cpu masks: optimi...
1815
  EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a37   Rusty Russell   cpumask: introduc...
1816
1817
1818
  
  const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
  EXPORT_SYMBOL(cpu_all_bits);
b3199c025   Rusty Russell   cpumask: switch o...
1819
1820
  
  #ifdef CONFIG_INIT_ALL_POSSIBLE
4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
1821
  struct cpumask __cpu_possible_mask __read_mostly
c4c54dd1c   Rasmus Villemoes   kernel/cpu.c: cha...
1822
  	= {CPU_BITS_ALL};
b3199c025   Rusty Russell   cpumask: switch o...
1823
  #else
4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
1824
  struct cpumask __cpu_possible_mask __read_mostly;
b3199c025   Rusty Russell   cpumask: switch o...
1825
  #endif
4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
1826
  EXPORT_SYMBOL(__cpu_possible_mask);
b3199c025   Rusty Russell   cpumask: switch o...
1827

4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
1828
1829
  struct cpumask __cpu_online_mask __read_mostly;
  EXPORT_SYMBOL(__cpu_online_mask);
b3199c025   Rusty Russell   cpumask: switch o...
1830

4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
1831
1832
  struct cpumask __cpu_present_mask __read_mostly;
  EXPORT_SYMBOL(__cpu_present_mask);
b3199c025   Rusty Russell   cpumask: switch o...
1833

4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
1834
1835
  struct cpumask __cpu_active_mask __read_mostly;
  EXPORT_SYMBOL(__cpu_active_mask);
3fa415206   Rusty Russell   cpumask: make set...
1836

3fa415206   Rusty Russell   cpumask: make set...
1837
1838
  void init_cpu_present(const struct cpumask *src)
  {
c4c54dd1c   Rasmus Villemoes   kernel/cpu.c: cha...
1839
  	cpumask_copy(&__cpu_present_mask, src);
3fa415206   Rusty Russell   cpumask: make set...
1840
1841
1842
1843
  }
  
  void init_cpu_possible(const struct cpumask *src)
  {
c4c54dd1c   Rasmus Villemoes   kernel/cpu.c: cha...
1844
  	cpumask_copy(&__cpu_possible_mask, src);
3fa415206   Rusty Russell   cpumask: make set...
1845
1846
1847
1848
  }
  
  void init_cpu_online(const struct cpumask *src)
  {
c4c54dd1c   Rasmus Villemoes   kernel/cpu.c: cha...
1849
  	cpumask_copy(&__cpu_online_mask, src);
3fa415206   Rusty Russell   cpumask: make set...
1850
  }
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
  
  /*
   * Activate the first processor.
   */
  void __init boot_cpu_init(void)
  {
  	int cpu = smp_processor_id();
  
  	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
  	set_cpu_online(cpu, true);
  	set_cpu_active(cpu, true);
  	set_cpu_present(cpu, true);
  	set_cpu_possible(cpu, true);
  }
  
  /*
   * Must be called _AFTER_ setting up the per_cpu areas
   */
  void __init boot_cpu_state_init(void)
  {
  	per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
  }
ac9c69731   Todd Poynor   Move x86_64 idle ...
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
  
  static ATOMIC_NOTIFIER_HEAD(idle_notifier);
  
  void idle_notifier_register(struct notifier_block *n)
  {
  	atomic_notifier_chain_register(&idle_notifier, n);
  }
  EXPORT_SYMBOL_GPL(idle_notifier_register);
  
  void idle_notifier_unregister(struct notifier_block *n)
  {
  	atomic_notifier_chain_unregister(&idle_notifier, n);
  }
  EXPORT_SYMBOL_GPL(idle_notifier_unregister);
  
  void idle_notifier_call_chain(unsigned long val)
  {
  	atomic_notifier_call_chain(&idle_notifier, val, NULL);
  }
  EXPORT_SYMBOL_GPL(idle_notifier_call_chain);