Blame view

kernel/cpu.c 43.9 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
9
10
11
12
  /* CPU control.
   * (C) 2001, 2002, 2003, 2004 Rusty Russell
   *
   * This code is licenced under the GPL.
   */
  #include <linux/proc_fs.h>
  #include <linux/smp.h>
  #include <linux/init.h>
  #include <linux/notifier.h>
  #include <linux/sched.h>
  #include <linux/unistd.h>
  #include <linux/cpu.h>
cb79295e2   Anton Vorontsov   cpu: introduce cl...
13
14
  #include <linux/oom.h>
  #include <linux/rcupdate.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
15
  #include <linux/export.h>
e4cc2f873   Anton Vorontsov   kernel/cpu.c: doc...
16
  #include <linux/bug.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
17
18
  #include <linux/kthread.h>
  #include <linux/stop_machine.h>
81615b624   Ingo Molnar   [PATCH] Convert k...
19
  #include <linux/mutex.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
20
  #include <linux/gfp.h>
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
21
  #include <linux/suspend.h>
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
22
  #include <linux/lockdep.h>
345527b1e   Preeti U Murthy   clockevents: Fix ...
23
  #include <linux/tick.h>
a89941816   Thomas Gleixner   hotplug: Prevent ...
24
  #include <linux/irq.h>
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
25
  #include <linux/smpboot.h>
e6d4989a9   Richard Weinberger   relayfs: Convert ...
26
  #include <linux/relay.h>
6731d4f12   Sebastian Andrzej Siewior   slab: Convert to ...
27
  #include <linux/slab.h>
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
28

bb3632c61   Todd E Brandt   PM / sleep: trace...
29
  #include <trace/events/power.h>
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
30
31
  #define CREATE_TRACE_POINTS
  #include <trace/events/cpuhp.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
32

38498a67a   Thomas Gleixner   smp: Add generic ...
33
  #include "smpboot.h"
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
34
35
36
37
  /**
   * cpuhp_cpu_state - Per cpu hotplug state storage
   * @state:	The current cpu state
   * @target:	The target state
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
38
39
   * @thread:	Pointer to the hotplug thread
   * @should_run:	Thread should execute
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
40
   * @rollback:	Perform a rollback
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
41
42
43
   * @single:	Single callback invocation
   * @bringup:	Single callback bringup or teardown selector
   * @cb_state:	The state for a single callback (install/uninstall)
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
44
45
   * @result:	Result of the operation
   * @done:	Signal completion to the issuer of the task
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
46
47
48
49
   */
  struct cpuhp_cpu_state {
  	enum cpuhp_state	state;
  	enum cpuhp_state	target;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
50
51
52
  #ifdef CONFIG_SMP
  	struct task_struct	*thread;
  	bool			should_run;
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
53
  	bool			rollback;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
54
55
  	bool			single;
  	bool			bringup;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
56
  	struct hlist_node	*node;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
57
  	enum cpuhp_state	cb_state;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
58
59
60
  	int			result;
  	struct completion	done;
  #endif
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
61
62
63
64
65
66
67
68
69
70
71
  };
  
  static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
  
  /**
   * cpuhp_step - Hotplug state machine step
   * @name:	Name of the step
   * @startup:	Startup function of the step
   * @teardown:	Teardown function of the step
   * @skip_onerr:	Do not invoke the functions on error rollback
   *		Will go away once the notifiers	are gone
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
72
   * @cant_stop:	Bringup/teardown can't be stopped at this step
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
73
74
   */
  struct cpuhp_step {
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
75
76
  	const char		*name;
  	union {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
77
78
79
80
  		int		(*single)(unsigned int cpu);
  		int		(*multi)(unsigned int cpu,
  					 struct hlist_node *node);
  	} startup;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
81
  	union {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
82
83
84
85
  		int		(*single)(unsigned int cpu);
  		int		(*multi)(unsigned int cpu,
  					 struct hlist_node *node);
  	} teardown;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
86
87
88
89
  	struct hlist_head	list;
  	bool			skip_onerr;
  	bool			cant_stop;
  	bool			multi_instance;
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
90
  };
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
91
  static DEFINE_MUTEX(cpuhp_state_mutex);
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
92
  static struct cpuhp_step cpuhp_bp_states[];
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
93
  static struct cpuhp_step cpuhp_ap_states[];
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
94

a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
  static bool cpuhp_is_ap_state(enum cpuhp_state state)
  {
  	/*
  	 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
  	 * purposes as that state is handled explicitly in cpu_down.
  	 */
  	return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
  }
  
  static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
  {
  	struct cpuhp_step *sp;
  
  	sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
  	return sp + state;
  }
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
111
112
113
114
  /**
   * cpuhp_invoke_callback _ Invoke the callbacks for a given state
   * @cpu:	The cpu for which the callback should be invoked
   * @step:	The step in the state machine
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
115
   * @bringup:	True if the bringup callback should be invoked
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
116
   *
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
117
   * Called from cpu hotplug and from the state register machinery.
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
118
   */
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
119
  static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
120
  				 bool bringup, struct hlist_node *node)
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
121
122
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
123
  	struct cpuhp_step *step = cpuhp_get_step(state);
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
124
125
126
127
128
  	int (*cbm)(unsigned int cpu, struct hlist_node *node);
  	int (*cb)(unsigned int cpu);
  	int ret, cnt;
  
  	if (!step->multi_instance) {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
129
  		cb = bringup ? step->startup.single : step->teardown.single;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
130
131
  		if (!cb)
  			return 0;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
132
  		trace_cpuhp_enter(cpu, st->target, state, cb);
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
133
  		ret = cb(cpu);
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
134
  		trace_cpuhp_exit(cpu, st->state, state, ret);
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
135
136
  		return ret;
  	}
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
137
  	cbm = bringup ? step->startup.multi : step->teardown.multi;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
  	if (!cbm)
  		return 0;
  
  	/* Single invocation for instance add/remove */
  	if (node) {
  		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
  		ret = cbm(cpu, node);
  		trace_cpuhp_exit(cpu, st->state, state, ret);
  		return ret;
  	}
  
  	/* State transition. Invoke on all instances */
  	cnt = 0;
  	hlist_for_each(node, &step->list) {
  		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
  		ret = cbm(cpu, node);
  		trace_cpuhp_exit(cpu, st->state, state, ret);
  		if (ret)
  			goto err;
  		cnt++;
  	}
  	return 0;
  err:
  	/* Rollback the instances if one failed */
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
162
  	cbm = !bringup ? step->startup.multi : step->teardown.multi;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
163
164
165
166
167
168
169
  	if (!cbm)
  		return ret;
  
  	hlist_for_each(node, &step->list) {
  		if (!cnt--)
  			break;
  		cbm(cpu, node);
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
170
171
172
  	}
  	return ret;
  }
98a79d6a5   Rusty Russell   cpumask: centrali...
173
  #ifdef CONFIG_SMP
b3199c025   Rusty Russell   cpumask: switch o...
174
  /* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa9538777   Linus Torvalds   cpu hotplug: simp...
175
  static DEFINE_MUTEX(cpu_add_remove_lock);
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
176
177
  bool cpuhp_tasks_frozen;
  EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
178

79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
179
  /*
93ae4f978   Srivatsa S. Bhat   CPU hotplug: Prov...
180
181
   * The following two APIs (cpu_maps_update_begin/done) must be used when
   * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
182
183
184
185
186
187
188
189
190
191
   */
  void cpu_maps_update_begin(void)
  {
  	mutex_lock(&cpu_add_remove_lock);
  }
  
  void cpu_maps_update_done(void)
  {
  	mutex_unlock(&cpu_add_remove_lock);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
192

e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
193
194
195
196
  /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
   * Should always be manipulated under cpu_add_remove_lock
   */
  static int cpu_hotplug_disabled;
79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
197
  #ifdef CONFIG_HOTPLUG_CPU
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
198
199
  static struct {
  	struct task_struct *active_writer;
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
200
201
202
203
  	/* wait queue to wake up the active_writer */
  	wait_queue_head_t wq;
  	/* verifies that no writer will get active while readers are active */
  	struct mutex lock;
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
204
205
206
207
  	/*
  	 * Also blocks the new readers during
  	 * an ongoing cpu hotplug operation.
  	 */
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
208
  	atomic_t refcount;
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
209
210
211
212
  
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  	struct lockdep_map dep_map;
  #endif
31950eb66   Linus Torvalds   mm/init: cpu_hotp...
213
214
  } cpu_hotplug = {
  	.active_writer = NULL,
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
215
  	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
31950eb66   Linus Torvalds   mm/init: cpu_hotp...
216
  	.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
217
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
a705e07b9   Joonas Lahtinen   cpu/hotplug: Use ...
218
  	.dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
219
  #endif
31950eb66   Linus Torvalds   mm/init: cpu_hotp...
220
  };
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
221

a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
222
223
  /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
  #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
dd56af42b   Paul E. McKenney   rcu: Eliminate de...
224
225
  #define cpuhp_lock_acquire_tryread() \
  				  lock_map_acquire_tryread(&cpu_hotplug.dep_map)
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
226
227
  #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
  #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
62db99f47   Paul E. McKenney   cpu: Avoid puts_p...
228

86ef5c9a8   Gautham R Shenoy   cpu-hotplug: repl...
229
  void get_online_cpus(void)
a9d9baa1e   Ashok Raj   [PATCH] clean up ...
230
  {
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
231
232
  	might_sleep();
  	if (cpu_hotplug.active_writer == current)
aa9538777   Linus Torvalds   cpu hotplug: simp...
233
  		return;
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
234
  	cpuhp_lock_acquire_read();
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
235
  	mutex_lock(&cpu_hotplug.lock);
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
236
  	atomic_inc(&cpu_hotplug.refcount);
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
237
  	mutex_unlock(&cpu_hotplug.lock);
a9d9baa1e   Ashok Raj   [PATCH] clean up ...
238
  }
86ef5c9a8   Gautham R Shenoy   cpu-hotplug: repl...
239
  EXPORT_SYMBOL_GPL(get_online_cpus);
90d45d17f   Ashok Raj   [PATCH] cpu hotpl...
240

86ef5c9a8   Gautham R Shenoy   cpu-hotplug: repl...
241
  void put_online_cpus(void)
a9d9baa1e   Ashok Raj   [PATCH] clean up ...
242
  {
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
243
  	int refcount;
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
244
  	if (cpu_hotplug.active_writer == current)
aa9538777   Linus Torvalds   cpu hotplug: simp...
245
  		return;
075663d19   Srivatsa S. Bhat   CPU hotplug, debu...
246

87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
247
248
249
250
251
252
  	refcount = atomic_dec_return(&cpu_hotplug.refcount);
  	if (WARN_ON(refcount < 0)) /* try to fix things up */
  		atomic_inc(&cpu_hotplug.refcount);
  
  	if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
  		wake_up(&cpu_hotplug.wq);
075663d19   Srivatsa S. Bhat   CPU hotplug, debu...
253

a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
254
  	cpuhp_lock_release();
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
255

a9d9baa1e   Ashok Raj   [PATCH] clean up ...
256
  }
86ef5c9a8   Gautham R Shenoy   cpu-hotplug: repl...
257
  EXPORT_SYMBOL_GPL(put_online_cpus);
a9d9baa1e   Ashok Raj   [PATCH] clean up ...
258

d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
259
260
261
262
263
264
265
  /*
   * This ensures that the hotplug operation can begin only when the
   * refcount goes to zero.
   *
   * Note that during a cpu-hotplug operation, the new readers, if any,
   * will be blocked by the cpu_hotplug.lock
   *
d2ba7e2ae   Oleg Nesterov   simplify cpu_hotp...
266
267
   * Since cpu_hotplug_begin() is always called after invoking
   * cpu_maps_update_begin(), we can be sure that only one writer is active.
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
268
269
270
271
272
273
274
275
276
277
   *
   * Note that theoretically, there is a possibility of a livelock:
   * - Refcount goes to zero, last reader wakes up the sleeping
   *   writer.
   * - Last reader unlocks the cpu_hotplug.lock.
   * - A new reader arrives at this moment, bumps up the refcount.
   * - The writer acquires the cpu_hotplug.lock finds the refcount
   *   non zero and goes to sleep again.
   *
   * However, this is very difficult to achieve in practice since
86ef5c9a8   Gautham R Shenoy   cpu-hotplug: repl...
278
   * get_online_cpus() not an api which is called all that often.
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
279
280
   *
   */
b9d10be7a   Toshi Kani   ACPI / processor:...
281
  void cpu_hotplug_begin(void)
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
282
  {
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
283
  	DEFINE_WAIT(wait);
d2ba7e2ae   Oleg Nesterov   simplify cpu_hotp...
284

87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
285
  	cpu_hotplug.active_writer = current;
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
286
  	cpuhp_lock_acquire();
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
287

d2ba7e2ae   Oleg Nesterov   simplify cpu_hotp...
288
289
  	for (;;) {
  		mutex_lock(&cpu_hotplug.lock);
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
290
291
292
  		prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
  		if (likely(!atomic_read(&cpu_hotplug.refcount)))
  				break;
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
293
294
  		mutex_unlock(&cpu_hotplug.lock);
  		schedule();
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
295
  	}
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
296
  	finish_wait(&cpu_hotplug.wq, &wait);
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
297
  }
b9d10be7a   Toshi Kani   ACPI / processor:...
298
  void cpu_hotplug_done(void)
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
299
300
301
  {
  	cpu_hotplug.active_writer = NULL;
  	mutex_unlock(&cpu_hotplug.lock);
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
302
  	cpuhp_lock_release();
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
303
  }
79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
304

16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
305
306
307
308
309
310
311
312
313
314
  /*
   * Wait for currently running CPU hotplug operations to complete (if any) and
   * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
   * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
   * hotplug path before performing hotplug operations. So acquiring that lock
   * guarantees mutual exclusion from any currently running hotplug operations.
   */
  void cpu_hotplug_disable(void)
  {
  	cpu_maps_update_begin();
89af7ba57   Vitaly Kuznetsov   cpu-hotplug: conv...
315
  	cpu_hotplug_disabled++;
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
316
317
  	cpu_maps_update_done();
  }
32145c467   Vitaly Kuznetsov   cpu-hotplug: expo...
318
  EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
319

01b411590   Lianwei Wang   cpu/hotplug: Hand...
320
321
322
323
324
325
326
  static void __cpu_hotplug_enable(void)
  {
  	if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable
  "))
  		return;
  	cpu_hotplug_disabled--;
  }
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
327
328
329
  void cpu_hotplug_enable(void)
  {
  	cpu_maps_update_begin();
01b411590   Lianwei Wang   cpu/hotplug: Hand...
330
  	__cpu_hotplug_enable();
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
331
332
  	cpu_maps_update_done();
  }
32145c467   Vitaly Kuznetsov   cpu-hotplug: expo...
333
  EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
b9d10be7a   Toshi Kani   ACPI / processor:...
334
  #endif	/* CONFIG_HOTPLUG_CPU */
79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
335

ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
336
  /* Notifier wrappers for transitioning to state machine */
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
337

8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
338
339
340
341
342
343
344
  static int bringup_wait_for_ap(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  
  	wait_for_completion(&st->done);
  	return st->result;
  }
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
345
346
347
348
  static int bringup_cpu(unsigned int cpu)
  {
  	struct task_struct *idle = idle_thread_get(cpu);
  	int ret;
aa877175e   Boris Ostrovsky   cpu/hotplug: Prev...
349
350
351
352
353
354
  	/*
  	 * Some architectures have to walk the irq descriptors to
  	 * setup the vector space for the cpu which comes online.
  	 * Prevent irq alloc/free across the bringup.
  	 */
  	irq_lock_sparse();
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
355
356
  	/* Arch-specific enabling code. */
  	ret = __cpu_up(cpu, idle);
aa877175e   Boris Ostrovsky   cpu/hotplug: Prev...
357
  	irq_unlock_sparse();
530e9b76a   Thomas Gleixner   cpu/hotplug: Remo...
358
  	if (ret)
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
359
  		return ret;
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
360
  	ret = bringup_wait_for_ap(cpu);
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
361
  	BUG_ON(!cpu_online(cpu));
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
362
  	return ret;
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
363
  }
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
364
365
366
  /*
   * Hotplug state machine related functions
   */
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
367
  static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
368
369
  {
  	for (st->state++; st->state < st->target; st->state++) {
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
370
  		struct cpuhp_step *step = cpuhp_get_step(st->state);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
371
372
  
  		if (!step->skip_onerr)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
373
  			cpuhp_invoke_callback(cpu, st->state, true, NULL);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
374
375
376
377
  	}
  }
  
  static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
378
  				enum cpuhp_state target)
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
379
380
381
382
383
  {
  	enum cpuhp_state prev_state = st->state;
  	int ret = 0;
  
  	for (; st->state > target; st->state--) {
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
384
  		ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
385
386
  		if (ret) {
  			st->target = prev_state;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
387
  			undo_cpu_down(cpu, st);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
388
389
390
391
392
  			break;
  		}
  	}
  	return ret;
  }
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
393
  static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
394
395
  {
  	for (st->state--; st->state > st->target; st->state--) {
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
396
  		struct cpuhp_step *step = cpuhp_get_step(st->state);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
397
398
  
  		if (!step->skip_onerr)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
399
  			cpuhp_invoke_callback(cpu, st->state, false, NULL);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
400
401
402
403
  	}
  }
  
  static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
404
  			      enum cpuhp_state target)
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
405
406
407
408
409
  {
  	enum cpuhp_state prev_state = st->state;
  	int ret = 0;
  
  	while (st->state < target) {
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
410
  		st->state++;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
411
  		ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
412
413
  		if (ret) {
  			st->target = prev_state;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
414
  			undo_cpu_up(cpu, st);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
415
416
417
418
419
  			break;
  		}
  	}
  	return ret;
  }
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
  /*
   * The cpu hotplug threads manage the bringup and teardown of the cpus
   */
  static void cpuhp_create(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  
  	init_completion(&st->done);
  }
  
  static int cpuhp_should_run(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  
  	return st->should_run;
  }
  
  /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
  static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
  {
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
440
  	enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
441

a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
442
  	return cpuhp_down_callbacks(cpu, st, target);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
443
444
445
446
447
  }
  
  /* Execute the online startup callbacks. Used to be CPU_ONLINE */
  static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
  {
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
448
  	return cpuhp_up_callbacks(cpu, st, st->target);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
  }
  
  /*
   * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
   * callbacks when a state gets [un]installed at runtime.
   */
  static void cpuhp_thread_fun(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  	int ret = 0;
  
  	/*
  	 * Paired with the mb() in cpuhp_kick_ap_work and
  	 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
  	 */
  	smp_mb();
  	if (!st->should_run)
  		return;
  
  	st->should_run = false;
  
  	/* Single callback invocation for [un]install ? */
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
471
  	if (st->single) {
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
472
473
  		if (st->cb_state < CPUHP_AP_ONLINE) {
  			local_irq_disable();
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
474
  			ret = cpuhp_invoke_callback(cpu, st->cb_state,
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
475
  						    st->bringup, st->node);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
476
477
  			local_irq_enable();
  		} else {
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
478
  			ret = cpuhp_invoke_callback(cpu, st->cb_state,
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
479
  						    st->bringup, st->node);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
480
  		}
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
481
482
  	} else if (st->rollback) {
  		BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
483
  		undo_cpu_down(cpu, st);
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
484
  		st->rollback = false;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
485
  	} else {
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
486
  		/* Cannot happen .... */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
487
  		BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
488

4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
489
490
491
492
493
494
495
496
497
498
499
  		/* Regular hotplug work */
  		if (st->state < st->target)
  			ret = cpuhp_ap_online(cpu, st);
  		else if (st->state > st->target)
  			ret = cpuhp_ap_offline(cpu, st);
  	}
  	st->result = ret;
  	complete(&st->done);
  }
  
  /* Invoke a single callback on a remote cpu */
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
500
  static int
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
501
502
  cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
  			 struct hlist_node *node)
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
503
504
505
506
507
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  
  	if (!cpu_online(cpu))
  		return 0;
6a4e24518   Thomas Gleixner   cpu/hotplug: Hand...
508
509
510
511
512
  	/*
  	 * If we are up and running, use the hotplug thread. For early calls
  	 * we invoke the thread function directly.
  	 */
  	if (!st->thread)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
513
  		return cpuhp_invoke_callback(cpu, state, bringup, node);
6a4e24518   Thomas Gleixner   cpu/hotplug: Hand...
514

4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
515
  	st->cb_state = state;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
516
517
  	st->single = true;
  	st->bringup = bringup;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
518
  	st->node = node;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
519

4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
520
521
522
523
524
525
526
527
528
529
530
531
  	/*
  	 * Make sure the above stores are visible before should_run becomes
  	 * true. Paired with the mb() above in cpuhp_thread_fun()
  	 */
  	smp_mb();
  	st->should_run = true;
  	wake_up_process(st->thread);
  	wait_for_completion(&st->done);
  	return st->result;
  }
  
  /* Regular hotplug invocation of the AP hotplug thread */
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
532
  static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
533
  {
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
534
  	st->result = 0;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
535
  	st->single = false;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
536
537
538
539
540
541
542
  	/*
  	 * Make sure the above stores are visible before should_run becomes
  	 * true. Paired with the mb() above in cpuhp_thread_fun()
  	 */
  	smp_mb();
  	st->should_run = true;
  	wake_up_process(st->thread);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
543
544
545
546
547
548
549
550
551
  }
  
  static int cpuhp_kick_ap_work(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  	enum cpuhp_state state = st->state;
  
  	trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
  	__cpuhp_kick_ap_work(st);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
  	wait_for_completion(&st->done);
  	trace_cpuhp_exit(cpu, st->state, state, st->result);
  	return st->result;
  }
  
  static struct smp_hotplug_thread cpuhp_threads = {
  	.store			= &cpuhp_state.thread,
  	.create			= &cpuhp_create,
  	.thread_should_run	= cpuhp_should_run,
  	.thread_fn		= cpuhp_thread_fun,
  	.thread_comm		= "cpuhp/%u",
  	.selfparking		= true,
  };
  
  void __init cpuhp_threads_init(void)
  {
  	BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
  	kthread_unpark(this_cpu_read(cpuhp_state.thread));
  }
777c6e0da   Michal Hocko   hotplug: Make reg...
571
  #ifdef CONFIG_HOTPLUG_CPU
e4cc2f873   Anton Vorontsov   kernel/cpu.c: doc...
572
573
574
575
576
577
578
579
580
581
582
583
  /**
   * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
   * @cpu: a CPU id
   *
   * This function walks all processes, finds a valid mm struct for each one and
   * then clears a corresponding bit in mm's cpumask.  While this all sounds
   * trivial, there are various non-obvious corner cases, which this function
   * tries to solve in a safe manner.
   *
   * Also note that the function uses a somewhat relaxed locking scheme, so it may
   * be called only for an already offlined CPU.
   */
cb79295e2   Anton Vorontsov   cpu: introduce cl...
584
585
586
587
588
589
590
591
592
593
594
  void clear_tasks_mm_cpumask(int cpu)
  {
  	struct task_struct *p;
  
  	/*
  	 * This function is called after the cpu is taken down and marked
  	 * offline, so its not like new tasks will ever get this cpu set in
  	 * their mm mask. -- Peter Zijlstra
  	 * Thus, we may use rcu_read_lock() here, instead of grabbing
  	 * full-fledged tasklist_lock.
  	 */
e4cc2f873   Anton Vorontsov   kernel/cpu.c: doc...
595
  	WARN_ON(cpu_online(cpu));
cb79295e2   Anton Vorontsov   cpu: introduce cl...
596
597
598
  	rcu_read_lock();
  	for_each_process(p) {
  		struct task_struct *t;
e4cc2f873   Anton Vorontsov   kernel/cpu.c: doc...
599
600
601
602
  		/*
  		 * Main thread might exit, but other threads may still have
  		 * a valid mm. Find one.
  		 */
cb79295e2   Anton Vorontsov   cpu: introduce cl...
603
604
605
606
607
608
609
610
  		t = find_lock_task_mm(p);
  		if (!t)
  			continue;
  		cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
  		task_unlock(t);
  	}
  	rcu_read_unlock();
  }
b728ca060   Kirill Tkhai   sched: Rework che...
611
  static inline void check_for_tasks(int dead_cpu)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
612
  {
b728ca060   Kirill Tkhai   sched: Rework che...
613
  	struct task_struct *g, *p;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
614

a75a6068d   Oleg Nesterov   cpu/hotplug: Read...
615
616
  	read_lock(&tasklist_lock);
  	for_each_process_thread(g, p) {
b728ca060   Kirill Tkhai   sched: Rework che...
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
  		if (!p->on_rq)
  			continue;
  		/*
  		 * We do the check with unlocked task_rq(p)->lock.
  		 * Order the reading to do not warn about a task,
  		 * which was running on this cpu in the past, and
  		 * it's just been woken on another cpu.
  		 */
  		rmb();
  		if (task_cpu(p) != dead_cpu)
  			continue;
  
  		pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)
  ",
  			p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
a75a6068d   Oleg Nesterov   cpu/hotplug: Read...
632
633
  	}
  	read_unlock(&tasklist_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
634
635
636
  }
  
  /* Take this CPU down. */
71cf5aeeb   Mathias Krause   kernel, cpu: Remo...
637
  static int take_cpu_down(void *_param)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
638
  {
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
639
640
  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  	enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
641
  	int err, cpu = smp_processor_id();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
642

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
643
644
645
  	/* Ensure this CPU doesn't handle any more interrupts. */
  	err = __cpu_disable();
  	if (err < 0)
f37051364   Zwane Mwaikambo   [PATCH] i386 CPU ...
646
  		return err;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
647

a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
648
649
650
651
652
653
  	/*
  	 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
  	 * do this step again.
  	 */
  	WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
  	st->state--;
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
654
  	/* Invoke the former CPU_DYING callbacks */
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
655
  	for (; st->state > target; st->state--)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
656
  		cpuhp_invoke_callback(cpu, st->state, false, NULL);
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
657

52c063d1a   Thomas Gleixner   clockevents: Make...
658
659
  	/* Give up timekeeping duties */
  	tick_handover_do_timer();
14e568e78   Thomas Gleixner   stop_machine: Use...
660
  	/* Park the stopper thread */
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
661
  	stop_machine_park(cpu);
f37051364   Zwane Mwaikambo   [PATCH] i386 CPU ...
662
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
663
  }
984581728   Thomas Gleixner   cpu/hotplug: Spli...
664
  static int takedown_cpu(unsigned int cpu)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
665
  {
e69aab131   Thomas Gleixner   cpu/hotplug: Make...
666
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
984581728   Thomas Gleixner   cpu/hotplug: Spli...
667
  	int err;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
668

2a58c527b   Thomas Gleixner   cpu/hotplug: Fix ...
669
  	/* Park the smpboot threads */
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
670
  	kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
2a58c527b   Thomas Gleixner   cpu/hotplug: Fix ...
671
  	smpboot_park_threads(cpu);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
672

6acce3ef8   Peter Zijlstra   sched: Remove get...
673
  	/*
a89941816   Thomas Gleixner   hotplug: Prevent ...
674
675
  	 * Prevent irq alloc/free while the dying cpu reorganizes the
  	 * interrupt affinities.
6acce3ef8   Peter Zijlstra   sched: Remove get...
676
  	 */
a89941816   Thomas Gleixner   hotplug: Prevent ...
677
  	irq_lock_sparse();
6acce3ef8   Peter Zijlstra   sched: Remove get...
678

a89941816   Thomas Gleixner   hotplug: Prevent ...
679
680
681
  	/*
  	 * So now all preempt/rcu users must observe !cpu_active().
  	 */
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
682
  	err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
043215875   Rusty Russell   Hotplug CPU: don'...
683
  	if (err) {
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
684
  		/* CPU refused to die */
a89941816   Thomas Gleixner   hotplug: Prevent ...
685
  		irq_unlock_sparse();
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
686
687
  		/* Unpark the hotplug thread so we can rollback there */
  		kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
984581728   Thomas Gleixner   cpu/hotplug: Spli...
688
  		return err;
8fa1d7d3b   Satoru Takeuchi   [PATCH] cpu-hotpl...
689
  	}
043215875   Rusty Russell   Hotplug CPU: don'...
690
  	BUG_ON(cpu_online(cpu));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
691

48c5ccae8   Peter Zijlstra   sched: Simplify c...
692
  	/*
ee1e714b9   Thomas Gleixner   cpu/hotplug: Remo...
693
  	 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
48c5ccae8   Peter Zijlstra   sched: Simplify c...
694
695
  	 * runnable tasks from the cpu, there's only the idle task left now
  	 * that the migration thread is done doing the stop_machine thing.
51a96c778   Peter Zijlstra   cpu: Remove incor...
696
697
  	 *
  	 * Wait for the stop thread to go away.
48c5ccae8   Peter Zijlstra   sched: Simplify c...
698
  	 */
e69aab131   Thomas Gleixner   cpu/hotplug: Make...
699
700
  	wait_for_completion(&st->done);
  	BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
701

a89941816   Thomas Gleixner   hotplug: Prevent ...
702
703
  	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
  	irq_unlock_sparse();
345527b1e   Preeti U Murthy   clockevents: Fix ...
704
  	hotplug_cpu__broadcast_tick_pull(cpu);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
705
706
  	/* This actually kills the CPU. */
  	__cpu_die(cpu);
a49b116dc   Thomas Gleixner   clockevents: Clea...
707
  	tick_cleanup_dead_cpu(cpu);
984581728   Thomas Gleixner   cpu/hotplug: Spli...
708
709
  	return 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
710

71f87b2fc   Thomas Gleixner   cpu/hotplug: Plug...
711
712
713
714
715
716
  static void cpuhp_complete_idle_dead(void *arg)
  {
  	struct cpuhp_cpu_state *st = arg;
  
  	complete(&st->done);
  }
e69aab131   Thomas Gleixner   cpu/hotplug: Make...
717
718
719
720
721
  void cpuhp_report_idle_dead(void)
  {
  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  
  	BUG_ON(st->state != CPUHP_AP_OFFLINE);
27d50c7ee   Thomas Gleixner   rcu: Make CPU_DYI...
722
  	rcu_report_dead(smp_processor_id());
71f87b2fc   Thomas Gleixner   cpu/hotplug: Plug...
723
724
725
726
727
728
729
  	st->state = CPUHP_AP_IDLE_DEAD;
  	/*
  	 * We cannot call complete after rcu_report_dead() so we delegate it
  	 * to an online cpu.
  	 */
  	smp_call_function_single(cpumask_first(cpu_online_mask),
  				 cpuhp_complete_idle_dead, st, 0);
e69aab131   Thomas Gleixner   cpu/hotplug: Make...
730
  }
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
731
  #else
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
732
  #define takedown_cpu		NULL
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
733
734
735
  #endif
  
  #ifdef CONFIG_HOTPLUG_CPU
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
736

984581728   Thomas Gleixner   cpu/hotplug: Spli...
737
  /* Requires cpu_add_remove_lock to be held */
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
738
739
  static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
  			   enum cpuhp_state target)
984581728   Thomas Gleixner   cpu/hotplug: Spli...
740
  {
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
741
742
743
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  	int prev_state, ret = 0;
  	bool hasdied = false;
984581728   Thomas Gleixner   cpu/hotplug: Spli...
744
745
746
  
  	if (num_online_cpus() == 1)
  		return -EBUSY;
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
747
  	if (!cpu_present(cpu))
984581728   Thomas Gleixner   cpu/hotplug: Spli...
748
749
750
751
752
  		return -EINVAL;
  
  	cpu_hotplug_begin();
  
  	cpuhp_tasks_frozen = tasks_frozen;
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
753
  	prev_state = st->state;
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
754
  	st->target = target;
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
755
756
757
758
  	/*
  	 * If the current CPU state is in the range of the AP hotplug thread,
  	 * then we need to kick the thread.
  	 */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
759
  	if (st->state > CPUHP_TEARDOWN_CPU) {
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
760
761
762
763
764
765
766
767
768
769
770
771
  		ret = cpuhp_kick_ap_work(cpu);
  		/*
  		 * The AP side has done the error rollback already. Just
  		 * return the error code..
  		 */
  		if (ret)
  			goto out;
  
  		/*
  		 * We might have stopped still in the range of the AP hotplug
  		 * thread. Nothing to do anymore.
  		 */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
772
  		if (st->state > CPUHP_TEARDOWN_CPU)
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
773
774
775
  			goto out;
  	}
  	/*
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
776
  	 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
777
778
  	 * to do the further cleanups.
  	 */
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
779
  	ret = cpuhp_down_callbacks(cpu, st, target);
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
780
781
782
783
784
  	if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
  		st->target = prev_state;
  		st->rollback = true;
  		cpuhp_kick_ap_work(cpu);
  	}
984581728   Thomas Gleixner   cpu/hotplug: Spli...
785

cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
786
  	hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
787
  out:
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
788
  	cpu_hotplug_done();
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
789
  	return ret;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
790
  }
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
791
  static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
792
  {
9ea09af3b   Heiko Carstens   stop_machine: int...
793
  	int err;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
794

d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
795
  	cpu_maps_update_begin();
e761b7725   Max Krasnyansky   cpu hotplug, sche...
796
797
  
  	if (cpu_hotplug_disabled) {
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
798
  		err = -EBUSY;
e761b7725   Max Krasnyansky   cpu hotplug, sche...
799
800
  		goto out;
  	}
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
801
  	err = _cpu_down(cpu, 0, target);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
802

e761b7725   Max Krasnyansky   cpu hotplug, sche...
803
  out:
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
804
  	cpu_maps_update_done();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
805
806
  	return err;
  }
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
807
808
809
810
  int cpu_down(unsigned int cpu)
  {
  	return do_cpu_down(cpu, CPUHP_OFFLINE);
  }
b62b8ef90   Zhang Rui   force offline the...
811
  EXPORT_SYMBOL(cpu_down);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
812
  #endif /*CONFIG_HOTPLUG_CPU*/
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
813
  /**
ee1e714b9   Thomas Gleixner   cpu/hotplug: Remo...
814
   * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
815
816
   * @cpu: cpu that just started
   *
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
817
818
819
820
821
822
823
   * It must be called by the arch code on the new cpu, before the new cpu
   * enables interrupts and before the "boot" cpu returns from __cpu_up().
   */
  void notify_cpu_starting(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  	enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
0c6d4576c   Sebastian Andrzej Siewior   cpu/hotplug: Get ...
824
  	rcu_cpu_starting(cpu);	/* Enables RCU usage on this CPU. */
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
825
  	while (st->state < target) {
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
826
  		st->state++;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
827
  		cpuhp_invoke_callback(cpu, st->state, true, NULL);
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
828
829
  	}
  }
949338e35   Thomas Gleixner   cpu/hotplug: Move...
830
831
  /*
   * Called from the idle task. We need to set active here, so we can kick off
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
832
833
834
   * the stopper thread and unpark the smpboot threads. If the target state is
   * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
   * cpu further.
949338e35   Thomas Gleixner   cpu/hotplug: Move...
835
   */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
836
  void cpuhp_online_idle(enum cpuhp_state state)
949338e35   Thomas Gleixner   cpu/hotplug: Move...
837
  {
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
838
839
840
841
842
843
844
845
  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  	unsigned int cpu = smp_processor_id();
  
  	/* Happens for the boot cpu */
  	if (state != CPUHP_AP_ONLINE_IDLE)
  		return;
  
  	st->state = CPUHP_AP_ONLINE_IDLE;
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
846

8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
847
  	/* Unpark the stopper thread and the hotplug thread of this cpu */
949338e35   Thomas Gleixner   cpu/hotplug: Move...
848
  	stop_machine_unpark(cpu);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
849
  	kthread_unpark(st->thread);
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
850
851
852
853
854
855
  
  	/* Should we go further up ? */
  	if (st->target > CPUHP_AP_ONLINE_IDLE)
  		__cpuhp_kick_ap_work(st);
  	else
  		complete(&st->done);
949338e35   Thomas Gleixner   cpu/hotplug: Move...
856
  }
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
857
  /* Requires cpu_add_remove_lock to be held */
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
858
  static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
859
  {
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
860
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
3bb5d2ee3   Suresh Siddha   smp, idle: Alloca...
861
  	struct task_struct *idle;
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
862
  	int ret = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
863

d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
864
  	cpu_hotplug_begin();
38498a67a   Thomas Gleixner   smp: Add generic ...
865

757c989b9   Thomas Gleixner   cpu/hotplug: Make...
866
  	if (!cpu_present(cpu)) {
5e5041f35   Yasuaki Ishimatsu   ACPI / processor:...
867
868
869
  		ret = -EINVAL;
  		goto out;
  	}
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
870
871
872
873
874
  	/*
  	 * The caller of do_cpu_up might have raced with another
  	 * caller. Ignore it for now.
  	 */
  	if (st->state >= target)
38498a67a   Thomas Gleixner   smp: Add generic ...
875
  		goto out;
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
876
877
878
879
880
881
882
883
  
  	if (st->state == CPUHP_OFFLINE) {
  		/* Let it fail before we try to bring the cpu up */
  		idle = idle_thread_get(cpu);
  		if (IS_ERR(idle)) {
  			ret = PTR_ERR(idle);
  			goto out;
  		}
3bb5d2ee3   Suresh Siddha   smp, idle: Alloca...
884
  	}
38498a67a   Thomas Gleixner   smp: Add generic ...
885

ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
886
  	cpuhp_tasks_frozen = tasks_frozen;
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
887
  	st->target = target;
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
888
889
890
891
  	/*
  	 * If the current CPU state is in the range of the AP hotplug thread,
  	 * then we need to kick the thread once more.
  	 */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
892
  	if (st->state > CPUHP_BRINGUP_CPU) {
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
893
894
895
896
897
898
899
900
901
902
903
  		ret = cpuhp_kick_ap_work(cpu);
  		/*
  		 * The AP side has done the error rollback already. Just
  		 * return the error code..
  		 */
  		if (ret)
  			goto out;
  	}
  
  	/*
  	 * Try to reach the target state. We max out on the BP at
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
904
  	 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
905
906
  	 * responsible for bringing it up to the target state.
  	 */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
907
  	target = min((int)target, CPUHP_BRINGUP_CPU);
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
908
  	ret = cpuhp_up_callbacks(cpu, st, target);
38498a67a   Thomas Gleixner   smp: Add generic ...
909
  out:
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
910
  	cpu_hotplug_done();
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
911
912
  	return ret;
  }
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
913
  static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
914
915
  {
  	int err = 0;
cf23422b9   minskey guo   cpu/mem hotplug: ...
916

e0b582ec5   Rusty Russell   cpumask: convert ...
917
  	if (!cpu_possible(cpu)) {
84117da5b   Fabian Frederick   kernel/cpu.c: con...
918
919
920
  		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time
  ",
  		       cpu);
87d5e0236   Chen Gong   kernel/cpu.c: del...
921
  #if defined(CONFIG_IA64)
84117da5b   Fabian Frederick   kernel/cpu.c: con...
922
923
  		pr_err("please check additional_cpus= boot parameter
  ");
73e753a50   KAMEZAWA Hiroyuki   CPU HOTPLUG: avoi...
924
925
926
  #endif
  		return -EINVAL;
  	}
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
927

01b0f1970   Toshi Kani   cpu/mem hotplug: ...
928
929
930
  	err = try_online_node(cpu_to_node(cpu));
  	if (err)
  		return err;
cf23422b9   minskey guo   cpu/mem hotplug: ...
931

d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
932
  	cpu_maps_update_begin();
e761b7725   Max Krasnyansky   cpu hotplug, sche...
933
934
  
  	if (cpu_hotplug_disabled) {
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
935
  		err = -EBUSY;
e761b7725   Max Krasnyansky   cpu hotplug, sche...
936
937
  		goto out;
  	}
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
938
  	err = _cpu_up(cpu, 0, target);
e761b7725   Max Krasnyansky   cpu hotplug, sche...
939
  out:
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
940
  	cpu_maps_update_done();
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
941
942
  	return err;
  }
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
943
944
945
946
947
  
  int cpu_up(unsigned int cpu)
  {
  	return do_cpu_up(cpu, CPUHP_ONLINE);
  }
a513f6bab   Paul E. McKenney   cpu: Export cpu_up()
948
  EXPORT_SYMBOL_GPL(cpu_up);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
949

f3de4be9d   Rafael J. Wysocki   PM: Fix dependenc...
950
  #ifdef CONFIG_PM_SLEEP_SMP
e0b582ec5   Rusty Russell   cpumask: convert ...
951
  static cpumask_var_t frozen_cpus;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
952

d391e5522   James Morse   cpu/hotplug: Allo...
953
  int freeze_secondary_cpus(int primary)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
954
  {
d391e5522   James Morse   cpu/hotplug: Allo...
955
  	int cpu, error = 0;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
956

d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
957
  	cpu_maps_update_begin();
d391e5522   James Morse   cpu/hotplug: Allo...
958
959
  	if (!cpu_online(primary))
  		primary = cpumask_first(cpu_online_mask);
9ee349ad6   Xiaotian Feng   sched: Fix set_cp...
960
961
  	/*
  	 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
962
963
  	 * with the userspace trying to use the CPU hotplug at the same time
  	 */
e0b582ec5   Rusty Russell   cpumask: convert ...
964
  	cpumask_clear(frozen_cpus);
6ad4c1888   Peter Zijlstra   sched: Fix balanc...
965

84117da5b   Fabian Frederick   kernel/cpu.c: con...
966
967
  	pr_info("Disabling non-boot CPUs ...
  ");
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
968
  	for_each_online_cpu(cpu) {
d391e5522   James Morse   cpu/hotplug: Allo...
969
  		if (cpu == primary)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
970
  			continue;
bb3632c61   Todd E Brandt   PM / sleep: trace...
971
  		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
972
  		error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
bb3632c61   Todd E Brandt   PM / sleep: trace...
973
  		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
feae3203d   Mike Travis   timers, init: Lim...
974
  		if (!error)
e0b582ec5   Rusty Russell   cpumask: convert ...
975
  			cpumask_set_cpu(cpu, frozen_cpus);
feae3203d   Mike Travis   timers, init: Lim...
976
  		else {
84117da5b   Fabian Frederick   kernel/cpu.c: con...
977
978
  			pr_err("Error taking CPU%d down: %d
  ", cpu, error);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
979
980
981
  			break;
  		}
  	}
86886e55b   Joseph Cihula   x86, intel_txt: I...
982

89af7ba57   Vitaly Kuznetsov   cpu-hotplug: conv...
983
  	if (!error)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
984
  		BUG_ON(num_online_cpus() > 1);
89af7ba57   Vitaly Kuznetsov   cpu-hotplug: conv...
985
  	else
84117da5b   Fabian Frederick   kernel/cpu.c: con...
986
987
  		pr_err("Non-boot CPUs are not disabled
  ");
89af7ba57   Vitaly Kuznetsov   cpu-hotplug: conv...
988
989
990
991
992
993
994
  
  	/*
  	 * Make sure the CPUs won't be enabled by someone else. We need to do
  	 * this even in case of failure as all disable_nonboot_cpus() users are
  	 * supposed to do enable_nonboot_cpus() on the failure path.
  	 */
  	cpu_hotplug_disabled++;
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
995
  	cpu_maps_update_done();
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
996
997
  	return error;
  }
d0af9eed5   Suresh Siddha   x86, pat/mtrr: Re...
998
999
1000
1001
1002
1003
1004
  void __weak arch_enable_nonboot_cpus_begin(void)
  {
  }
  
  void __weak arch_enable_nonboot_cpus_end(void)
  {
  }
71cf5aeeb   Mathias Krause   kernel, cpu: Remo...
1005
  void enable_nonboot_cpus(void)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1006
1007
1008
1009
  {
  	int cpu, error;
  
  	/* Allow everyone to use the CPU hotplug again */
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1010
  	cpu_maps_update_begin();
01b411590   Lianwei Wang   cpu/hotplug: Hand...
1011
  	__cpu_hotplug_enable();
e0b582ec5   Rusty Russell   cpumask: convert ...
1012
  	if (cpumask_empty(frozen_cpus))
1d64b9cb1   Rafael J. Wysocki   [PATCH] Fix micro...
1013
  		goto out;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1014

84117da5b   Fabian Frederick   kernel/cpu.c: con...
1015
1016
  	pr_info("Enabling non-boot CPUs ...
  ");
d0af9eed5   Suresh Siddha   x86, pat/mtrr: Re...
1017
1018
  
  	arch_enable_nonboot_cpus_begin();
e0b582ec5   Rusty Russell   cpumask: convert ...
1019
  	for_each_cpu(cpu, frozen_cpus) {
bb3632c61   Todd E Brandt   PM / sleep: trace...
1020
  		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1021
  		error = _cpu_up(cpu, 1, CPUHP_ONLINE);
bb3632c61   Todd E Brandt   PM / sleep: trace...
1022
  		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1023
  		if (!error) {
84117da5b   Fabian Frederick   kernel/cpu.c: con...
1024
1025
  			pr_info("CPU%d is up
  ", cpu);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1026
1027
  			continue;
  		}
84117da5b   Fabian Frederick   kernel/cpu.c: con...
1028
1029
  		pr_warn("Error taking CPU%d up: %d
  ", cpu, error);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1030
  	}
d0af9eed5   Suresh Siddha   x86, pat/mtrr: Re...
1031
1032
  
  	arch_enable_nonboot_cpus_end();
e0b582ec5   Rusty Russell   cpumask: convert ...
1033
  	cpumask_clear(frozen_cpus);
1d64b9cb1   Rafael J. Wysocki   [PATCH] Fix micro...
1034
  out:
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1035
  	cpu_maps_update_done();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1036
  }
e0b582ec5   Rusty Russell   cpumask: convert ...
1037

d7268a31c   Fenghua Yu   CPU: Add right qu...
1038
  static int __init alloc_frozen_cpus(void)
e0b582ec5   Rusty Russell   cpumask: convert ...
1039
1040
1041
1042
1043
1044
  {
  	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
  		return -ENOMEM;
  	return 0;
  }
  core_initcall(alloc_frozen_cpus);
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1045
1046
  
  /*
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
   * When callbacks for CPU hotplug notifications are being executed, we must
   * ensure that the state of the system with respect to the tasks being frozen
   * or not, as reported by the notification, remains unchanged *throughout the
   * duration* of the execution of the callbacks.
   * Hence we need to prevent the freezer from racing with regular CPU hotplug.
   *
   * This synchronization is implemented by mutually excluding regular CPU
   * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
   * Hibernate notifications.
   */
  static int
  cpu_hotplug_pm_callback(struct notifier_block *nb,
  			unsigned long action, void *ptr)
  {
  	switch (action) {
  
  	case PM_SUSPEND_PREPARE:
  	case PM_HIBERNATION_PREPARE:
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
1065
  		cpu_hotplug_disable();
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1066
1067
1068
1069
  		break;
  
  	case PM_POST_SUSPEND:
  	case PM_POST_HIBERNATION:
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
1070
  		cpu_hotplug_enable();
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1071
1072
1073
1074
1075
1076
1077
1078
  		break;
  
  	default:
  		return NOTIFY_DONE;
  	}
  
  	return NOTIFY_OK;
  }
d7268a31c   Fenghua Yu   CPU: Add right qu...
1079
  static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1080
  {
6e32d479d   Fenghua Yu   kernel/cpu.c: Add...
1081
1082
1083
1084
1085
  	/*
  	 * cpu_hotplug_pm_callback has higher priority than x86
  	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
  	 * to disable cpu hotplug to avoid cpu hotplug race.
  	 */
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1086
1087
1088
1089
  	pm_notifier(cpu_hotplug_pm_callback, 0);
  	return 0;
  }
  core_initcall(cpu_hotplug_pm_sync_init);
f3de4be9d   Rafael J. Wysocki   PM: Fix dependenc...
1090
  #endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec0   Max Krasnyansky   sched: Move cpu m...
1091
1092
  
  #endif /* CONFIG_SMP */
b8d317d10   Mike Travis   cpumask: make cpu...
1093

cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1094
1095
1096
1097
  /* Boot processor state steps */
  static struct cpuhp_step cpuhp_bp_states[] = {
  	[CPUHP_OFFLINE] = {
  		.name			= "offline",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1098
1099
  		.startup.single		= NULL,
  		.teardown.single	= NULL,
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1100
1101
1102
  	},
  #ifdef CONFIG_SMP
  	[CPUHP_CREATE_THREADS]= {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1103
  		.name			= "threads:prepare",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1104
1105
  		.startup.single		= smpboot_create_threads,
  		.teardown.single	= NULL,
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1106
  		.cant_stop		= true,
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1107
  	},
00e16c3d6   Thomas Gleixner   perf/core: Conver...
1108
  	[CPUHP_PERF_PREPARE] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1109
1110
1111
  		.name			= "perf:prepare",
  		.startup.single		= perf_event_init_cpu,
  		.teardown.single	= perf_event_exit_cpu,
00e16c3d6   Thomas Gleixner   perf/core: Conver...
1112
  	},
7ee681b25   Thomas Gleixner   workqueue: Conver...
1113
  	[CPUHP_WORKQUEUE_PREP] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1114
1115
1116
  		.name			= "workqueue:prepare",
  		.startup.single		= workqueue_prepare_cpu,
  		.teardown.single	= NULL,
7ee681b25   Thomas Gleixner   workqueue: Conver...
1117
  	},
27590dc17   Thomas Gleixner   hrtimer: Convert ...
1118
  	[CPUHP_HRTIMERS_PREPARE] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1119
1120
1121
  		.name			= "hrtimers:prepare",
  		.startup.single		= hrtimers_prepare_cpu,
  		.teardown.single	= hrtimers_dead_cpu,
27590dc17   Thomas Gleixner   hrtimer: Convert ...
1122
  	},
31487f832   Richard Weinberger   smp/cfd: Convert ...
1123
  	[CPUHP_SMPCFD_PREPARE] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1124
  		.name			= "smpcfd:prepare",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1125
1126
  		.startup.single		= smpcfd_prepare_cpu,
  		.teardown.single	= smpcfd_dead_cpu,
31487f832   Richard Weinberger   smp/cfd: Convert ...
1127
  	},
e6d4989a9   Richard Weinberger   relayfs: Convert ...
1128
1129
1130
1131
1132
  	[CPUHP_RELAY_PREPARE] = {
  		.name			= "relay:prepare",
  		.startup.single		= relay_prepare_cpu,
  		.teardown.single	= NULL,
  	},
6731d4f12   Sebastian Andrzej Siewior   slab: Convert to ...
1133
1134
1135
1136
  	[CPUHP_SLAB_PREPARE] = {
  		.name			= "slab:prepare",
  		.startup.single		= slab_prepare_cpu,
  		.teardown.single	= slab_dead_cpu,
31487f832   Richard Weinberger   smp/cfd: Convert ...
1137
  	},
4df837425   Thomas Gleixner   rcu: Convert rcut...
1138
  	[CPUHP_RCUTREE_PREP] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1139
  		.name			= "RCU/tree:prepare",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1140
1141
  		.startup.single		= rcutree_prepare_cpu,
  		.teardown.single	= rcutree_dead_cpu,
4df837425   Thomas Gleixner   rcu: Convert rcut...
1142
  	},
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1143
  	/*
4fae16dff   Richard Cochran   timers/core: Corr...
1144
1145
1146
1147
1148
  	 * On the tear-down path, timers_dead_cpu() must be invoked
  	 * before blk_mq_queue_reinit_notify() from notify_dead(),
  	 * otherwise a RCU stall occurs.
  	 */
  	[CPUHP_TIMERS_DEAD] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1149
1150
1151
  		.name			= "timers:dead",
  		.startup.single		= NULL,
  		.teardown.single	= timers_dead_cpu,
4fae16dff   Richard Cochran   timers/core: Corr...
1152
  	},
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1153
  	/* Kicks the plugged cpu into life */
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1154
1155
  	[CPUHP_BRINGUP_CPU] = {
  		.name			= "cpu:bringup",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1156
1157
  		.startup.single		= bringup_cpu,
  		.teardown.single	= NULL,
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1158
  		.cant_stop		= true,
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1159
  	},
31487f832   Richard Weinberger   smp/cfd: Convert ...
1160
  	[CPUHP_AP_SMPCFD_DYING] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1161
  		.name			= "smpcfd:dying",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1162
1163
  		.startup.single		= NULL,
  		.teardown.single	= smpcfd_dying_cpu,
31487f832   Richard Weinberger   smp/cfd: Convert ...
1164
  	},
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1165
1166
1167
1168
  	/*
  	 * Handled on controll processor until the plugged processor manages
  	 * this itself.
  	 */
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1169
1170
  	[CPUHP_TEARDOWN_CPU] = {
  		.name			= "cpu:teardown",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1171
1172
  		.startup.single		= NULL,
  		.teardown.single	= takedown_cpu,
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1173
  		.cant_stop		= true,
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1174
  	},
a7c734140   Thomas Gleixner   cpu/hotplug: Keep...
1175
1176
  #else
  	[CPUHP_BRINGUP_CPU] = { },
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1177
  #endif
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1178
  };
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1179
1180
1181
  /* Application processor state steps */
  static struct cpuhp_step cpuhp_ap_states[] = {
  #ifdef CONFIG_SMP
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
  	/* Final state before CPU kills itself */
  	[CPUHP_AP_IDLE_DEAD] = {
  		.name			= "idle:dead",
  	},
  	/*
  	 * Last state before CPU enters the idle loop to die. Transient state
  	 * for synchronization.
  	 */
  	[CPUHP_AP_OFFLINE] = {
  		.name			= "ap:offline",
  		.cant_stop		= true,
  	},
9cf7243d5   Thomas Gleixner   sched: Make set_c...
1194
1195
1196
  	/* First state is scheduler control. Interrupts are disabled */
  	[CPUHP_AP_SCHED_STARTING] = {
  		.name			= "sched:starting",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1197
1198
  		.startup.single		= sched_cpu_starting,
  		.teardown.single	= sched_cpu_dying,
9cf7243d5   Thomas Gleixner   sched: Make set_c...
1199
  	},
4df837425   Thomas Gleixner   rcu: Convert rcut...
1200
  	[CPUHP_AP_RCUTREE_DYING] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1201
  		.name			= "RCU/tree:dying",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1202
1203
  		.startup.single		= NULL,
  		.teardown.single	= rcutree_dying_cpu,
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1204
  	},
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1205
1206
1207
1208
1209
1210
  	/* Entry state on starting. Interrupts enabled from here on. Transient
  	 * state for synchronsization */
  	[CPUHP_AP_ONLINE] = {
  		.name			= "ap:online",
  	},
  	/* Handle smpboot threads park/unpark */
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1211
  	[CPUHP_AP_SMPBOOT_THREADS] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1212
  		.name			= "smpboot/threads:online",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1213
1214
  		.startup.single		= smpboot_unpark_threads,
  		.teardown.single	= NULL,
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1215
  	},
00e16c3d6   Thomas Gleixner   perf/core: Conver...
1216
  	[CPUHP_AP_PERF_ONLINE] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1217
1218
1219
  		.name			= "perf:online",
  		.startup.single		= perf_event_init_cpu,
  		.teardown.single	= perf_event_exit_cpu,
00e16c3d6   Thomas Gleixner   perf/core: Conver...
1220
  	},
7ee681b25   Thomas Gleixner   workqueue: Conver...
1221
  	[CPUHP_AP_WORKQUEUE_ONLINE] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1222
1223
1224
  		.name			= "workqueue:online",
  		.startup.single		= workqueue_online_cpu,
  		.teardown.single	= workqueue_offline_cpu,
7ee681b25   Thomas Gleixner   workqueue: Conver...
1225
  	},
4df837425   Thomas Gleixner   rcu: Convert rcut...
1226
  	[CPUHP_AP_RCUTREE_ONLINE] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1227
  		.name			= "RCU/tree:online",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1228
1229
  		.startup.single		= rcutree_online_cpu,
  		.teardown.single	= rcutree_offline_cpu,
4df837425   Thomas Gleixner   rcu: Convert rcut...
1230
  	},
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1231
  #endif
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1232
1233
1234
  	/*
  	 * The dynamically registered state space is here
  	 */
aaddd7d1c   Thomas Gleixner   sched/hotplug: Ma...
1235
1236
1237
1238
  #ifdef CONFIG_SMP
  	/* Last state is scheduler control setting the cpu active */
  	[CPUHP_AP_ACTIVE] = {
  		.name			= "sched:active",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1239
1240
  		.startup.single		= sched_cpu_activate,
  		.teardown.single	= sched_cpu_deactivate,
aaddd7d1c   Thomas Gleixner   sched/hotplug: Ma...
1241
1242
  	},
  #endif
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1243
  	/* CPU is fully up and running. */
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1244
1245
  	[CPUHP_ONLINE] = {
  		.name			= "online",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1246
1247
  		.startup.single		= NULL,
  		.teardown.single	= NULL,
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1248
1249
  	},
  };
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1250
1251
1252
1253
1254
1255
1256
  /* Sanity check for callbacks */
  static int cpuhp_cb_check(enum cpuhp_state state)
  {
  	if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
  		return -EINVAL;
  	return 0;
  }
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
  /*
   * Returns a free for dynamic slot assignment of the Online state. The states
   * are protected by the cpuhp_slot_states mutex and an empty slot is identified
   * by having no name assigned.
   */
  static int cpuhp_reserve_state(enum cpuhp_state state)
  {
  	enum cpuhp_state i;
  
  	for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
  		if (!cpuhp_ap_states[i].name)
  			return i;
  	}
  	WARN(1, "No more dynamic states available for CPU hotplug
  ");
  	return -ENOSPC;
  }
  
  static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
  				 int (*startup)(unsigned int cpu),
  				 int (*teardown)(unsigned int cpu),
  				 bool multi_instance)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1279
1280
1281
  {
  	/* (Un)Install the callbacks for further cpu hotplug operations */
  	struct cpuhp_step *sp;
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1282
  	int ret = 0;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1283
1284
  
  	mutex_lock(&cpuhp_state_mutex);
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1285
1286
1287
1288
1289
1290
1291
  
  	if (state == CPUHP_AP_ONLINE_DYN) {
  		ret = cpuhp_reserve_state(state);
  		if (ret < 0)
  			goto out;
  		state = ret;
  	}
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1292
  	sp = cpuhp_get_step(state);
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1293
1294
1295
1296
  	if (name && sp->name) {
  		ret = -EBUSY;
  		goto out;
  	}
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1297
1298
  	sp->startup.single = startup;
  	sp->teardown.single = teardown;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1299
  	sp->name = name;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1300
1301
  	sp->multi_instance = multi_instance;
  	INIT_HLIST_HEAD(&sp->list);
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1302
  out:
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1303
  	mutex_unlock(&cpuhp_state_mutex);
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1304
  	return ret;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1305
1306
1307
1308
  }
  
  static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
  {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1309
  	return cpuhp_get_step(state)->teardown.single;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1310
  }
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1311
1312
1313
1314
  /*
   * Call the startup/teardown function for a step either on the AP or
   * on the current CPU.
   */
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1315
1316
  static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
  			    struct hlist_node *node)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1317
  {
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
1318
  	struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1319
  	int ret;
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1320
1321
  	if ((bringup && !sp->startup.single) ||
  	    (!bringup && !sp->teardown.single))
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1322
  		return 0;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1323
1324
1325
1326
  	/*
  	 * The non AP bound callbacks can fail on bringup. On teardown
  	 * e.g. module removal we crash for now.
  	 */
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1327
1328
  #ifdef CONFIG_SMP
  	if (cpuhp_is_ap_state(state))
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1329
  		ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1330
  	else
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1331
  		ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1332
  #else
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1333
  	ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1334
  #endif
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
  	BUG_ON(ret && !bringup);
  	return ret;
  }
  
  /*
   * Called from __cpuhp_setup_state on a recoverable failure.
   *
   * Note: The teardown callbacks for rollback are not allowed to fail!
   */
  static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1345
  				   struct hlist_node *node)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1346
1347
  {
  	int cpu;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
  	/* Roll back the already executed steps on the other cpus */
  	for_each_present_cpu(cpu) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  		int cpustate = st->state;
  
  		if (cpu >= failedcpu)
  			break;
  
  		/* Did we invoke the startup call on that cpu ? */
  		if (cpustate >= state)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1358
  			cpuhp_issue_call(cpu, state, false, node);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1359
1360
  	}
  }
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
  int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
  			       bool invoke)
  {
  	struct cpuhp_step *sp;
  	int cpu;
  	int ret;
  
  	sp = cpuhp_get_step(state);
  	if (sp->multi_instance == false)
  		return -EINVAL;
  
  	get_online_cpus();
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1373
  	if (!invoke || !sp->startup.multi)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
  		goto add_node;
  
  	/*
  	 * Try to call the startup callback for each present cpu
  	 * depending on the hotplug state of the cpu.
  	 */
  	for_each_present_cpu(cpu) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  		int cpustate = st->state;
  
  		if (cpustate < state)
  			continue;
  
  		ret = cpuhp_issue_call(cpu, state, true, node);
  		if (ret) {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1389
  			if (sp->teardown.multi)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
  				cpuhp_rollback_install(cpu, state, node);
  			goto err;
  		}
  	}
  add_node:
  	ret = 0;
  	mutex_lock(&cpuhp_state_mutex);
  	hlist_add_head(node, &sp->list);
  	mutex_unlock(&cpuhp_state_mutex);
  
  err:
  	put_online_cpus();
  	return ret;
  }
  EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1405
1406
  /**
   * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1407
1408
1409
1410
1411
1412
1413
   * @state:		The state to setup
   * @invoke:		If true, the startup function is invoked for cpus where
   *			cpu state >= @state
   * @startup:		startup callback function
   * @teardown:		teardown callback function
   * @multi_instance:	State is set up for multiple instances which get
   *			added afterwards.
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1414
   *
512f09801   Boris Ostrovsky   cpu/hotplug: Clar...
1415
1416
1417
1418
1419
   * Returns:
   *   On success:
   *      Positive state number if @state is CPUHP_AP_ONLINE_DYN
   *      0 for all other states
   *   On failure: proper (negative) error code
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1420
1421
1422
1423
   */
  int __cpuhp_setup_state(enum cpuhp_state state,
  			const char *name, bool invoke,
  			int (*startup)(unsigned int cpu),
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1424
1425
  			int (*teardown)(unsigned int cpu),
  			bool multi_instance)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1426
1427
  {
  	int cpu, ret = 0;
b9d9d6911   Thomas Gleixner   smp/hotplug: Undo...
1428
  	bool dynstate;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1429
1430
1431
1432
1433
  
  	if (cpuhp_cb_check(state) || !name)
  		return -EINVAL;
  
  	get_online_cpus();
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1434
1435
  	ret = cpuhp_store_callbacks(state, name, startup, teardown,
  				    multi_instance);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1436

b9d9d6911   Thomas Gleixner   smp/hotplug: Undo...
1437
1438
1439
1440
1441
  	dynstate = state == CPUHP_AP_ONLINE_DYN;
  	if (ret > 0 && dynstate) {
  		state = ret;
  		ret = 0;
  	}
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1442
  	if (ret || !invoke || !startup)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
  		goto out;
  
  	/*
  	 * Try to call the startup callback for each present cpu
  	 * depending on the hotplug state of the cpu.
  	 */
  	for_each_present_cpu(cpu) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  		int cpustate = st->state;
  
  		if (cpustate < state)
  			continue;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1455
  		ret = cpuhp_issue_call(cpu, state, true, NULL);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1456
  		if (ret) {
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
1457
  			if (teardown)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1458
1459
  				cpuhp_rollback_install(cpu, state, NULL);
  			cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1460
1461
1462
1463
1464
  			goto out;
  		}
  	}
  out:
  	put_online_cpus();
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1465
1466
1467
1468
  	/*
  	 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
  	 * dynamically allocated state in case of success.
  	 */
b9d9d6911   Thomas Gleixner   smp/hotplug: Undo...
1469
  	if (!ret && dynstate)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1470
1471
1472
1473
  		return state;
  	return ret;
  }
  EXPORT_SYMBOL(__cpuhp_setup_state);
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
  int __cpuhp_state_remove_instance(enum cpuhp_state state,
  				  struct hlist_node *node, bool invoke)
  {
  	struct cpuhp_step *sp = cpuhp_get_step(state);
  	int cpu;
  
  	BUG_ON(cpuhp_cb_check(state));
  
  	if (!sp->multi_instance)
  		return -EINVAL;
  
  	get_online_cpus();
  	if (!invoke || !cpuhp_get_teardown_cb(state))
  		goto remove;
  	/*
  	 * Call the teardown callback for each present cpu depending
  	 * on the hotplug state of the cpu. This function is not
  	 * allowed to fail currently!
  	 */
  	for_each_present_cpu(cpu) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  		int cpustate = st->state;
  
  		if (cpustate >= state)
  			cpuhp_issue_call(cpu, state, false, node);
  	}
  
  remove:
  	mutex_lock(&cpuhp_state_mutex);
  	hlist_del(node);
  	mutex_unlock(&cpuhp_state_mutex);
  	put_online_cpus();
  
  	return 0;
  }
  EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
  /**
   * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
   * @state:	The state to remove
   * @invoke:	If true, the teardown function is invoked for cpus where
   *		cpu state >= @state
   *
   * The teardown callback is currently not allowed to fail. Think
   * about module removal!
   */
  void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
  {
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1521
  	struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1522
1523
1524
1525
1526
  	int cpu;
  
  	BUG_ON(cpuhp_cb_check(state));
  
  	get_online_cpus();
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1527
1528
1529
1530
1531
1532
1533
  	if (sp->multi_instance) {
  		WARN(!hlist_empty(&sp->list),
  		     "Error: Removing state %d which has instances left.
  ",
  		     state);
  		goto remove;
  	}
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
1534
  	if (!invoke || !cpuhp_get_teardown_cb(state))
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
  		goto remove;
  
  	/*
  	 * Call the teardown callback for each present cpu depending
  	 * on the hotplug state of the cpu. This function is not
  	 * allowed to fail currently!
  	 */
  	for_each_present_cpu(cpu) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  		int cpustate = st->state;
  
  		if (cpustate >= state)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1547
  			cpuhp_issue_call(cpu, state, false, NULL);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1548
1549
  	}
  remove:
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1550
  	cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1551
1552
1553
  	put_online_cpus();
  }
  EXPORT_SYMBOL(__cpuhp_remove_state);
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
  #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
  static ssize_t show_cpuhp_state(struct device *dev,
  				struct device_attribute *attr, char *buf)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  
  	return sprintf(buf, "%d
  ", st->state);
  }
  static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
  static ssize_t write_cpuhp_target(struct device *dev,
  				  struct device_attribute *attr,
  				  const char *buf, size_t count)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  	struct cpuhp_step *sp;
  	int target, ret;
  
  	ret = kstrtoint(buf, 10, &target);
  	if (ret)
  		return ret;
  
  #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
  	if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
  		return -EINVAL;
  #else
  	if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
  		return -EINVAL;
  #endif
  
  	ret = lock_device_hotplug_sysfs();
  	if (ret)
  		return ret;
  
  	mutex_lock(&cpuhp_state_mutex);
  	sp = cpuhp_get_step(target);
  	ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
  	mutex_unlock(&cpuhp_state_mutex);
  	if (ret)
  		return ret;
  
  	if (st->state < target)
  		ret = do_cpu_up(dev->id, target);
  	else
  		ret = do_cpu_down(dev->id, target);
  
  	unlock_device_hotplug();
  	return ret ? ret : count;
  }
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
1603
1604
1605
1606
1607
1608
1609
1610
  static ssize_t show_cpuhp_target(struct device *dev,
  				 struct device_attribute *attr, char *buf)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  
  	return sprintf(buf, "%d
  ", st->target);
  }
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1611
  static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
  
  static struct attribute *cpuhp_cpu_attrs[] = {
  	&dev_attr_state.attr,
  	&dev_attr_target.attr,
  	NULL
  };
  
  static struct attribute_group cpuhp_cpu_attr_group = {
  	.attrs = cpuhp_cpu_attrs,
  	.name = "hotplug",
  	NULL
  };
  
  static ssize_t show_cpuhp_states(struct device *dev,
  				 struct device_attribute *attr, char *buf)
  {
  	ssize_t cur, res = 0;
  	int i;
  
  	mutex_lock(&cpuhp_state_mutex);
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1632
  	for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
  		struct cpuhp_step *sp = cpuhp_get_step(i);
  
  		if (sp->name) {
  			cur = sprintf(buf, "%3d: %s
  ", i, sp->name);
  			buf += cur;
  			res += cur;
  		}
  	}
  	mutex_unlock(&cpuhp_state_mutex);
  	return res;
  }
  static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
  
  static struct attribute *cpuhp_cpu_root_attrs[] = {
  	&dev_attr_states.attr,
  	NULL
  };
  
  static struct attribute_group cpuhp_cpu_root_attr_group = {
  	.attrs = cpuhp_cpu_root_attrs,
  	.name = "hotplug",
  	NULL
  };
  
  static int __init cpuhp_sysfs_init(void)
  {
  	int cpu, ret;
  
  	ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
  				 &cpuhp_cpu_root_attr_group);
  	if (ret)
  		return ret;
  
  	for_each_possible_cpu(cpu) {
  		struct device *dev = get_cpu_device(cpu);
  
  		if (!dev)
  			continue;
  		ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
  		if (ret)
  			return ret;
  	}
  	return 0;
  }
  device_initcall(cpuhp_sysfs_init);
  #endif
e56b3bc79   Linus Torvalds   cpu masks: optimi...
1680
1681
1682
1683
  /*
   * cpu_bit_bitmap[] is a special, "compressed" data structure that
   * represents all NR_CPUS bits binary values of 1<<nr.
   *
e0b582ec5   Rusty Russell   cpumask: convert ...
1684
   * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc79   Linus Torvalds   cpu masks: optimi...
1685
1686
   * mask value that has a single bit set only.
   */
b8d317d10   Mike Travis   cpumask: make cpu...
1687

e56b3bc79   Linus Torvalds   cpu masks: optimi...
1688
  /* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e4   Michael Rodriguez   kernel/cpu.c: fix...
1689
  #define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
e56b3bc79   Linus Torvalds   cpu masks: optimi...
1690
1691
1692
  #define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
  #define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
  #define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d10   Mike Travis   cpumask: make cpu...
1693

e56b3bc79   Linus Torvalds   cpu masks: optimi...
1694
1695
1696
1697
1698
1699
1700
  const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
  
  	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
  	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
  #if BITS_PER_LONG > 32
  	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
  	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
b8d317d10   Mike Travis   cpumask: make cpu...
1701
1702
  #endif
  };
e56b3bc79   Linus Torvalds   cpu masks: optimi...
1703
  EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a37   Rusty Russell   cpumask: introduc...
1704
1705
1706
  
  const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
  EXPORT_SYMBOL(cpu_all_bits);
b3199c025   Rusty Russell   cpumask: switch o...
1707
1708
  
  #ifdef CONFIG_INIT_ALL_POSSIBLE
4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
1709
  struct cpumask __cpu_possible_mask __read_mostly
c4c54dd1c   Rasmus Villemoes   kernel/cpu.c: cha...
1710
  	= {CPU_BITS_ALL};
b3199c025   Rusty Russell   cpumask: switch o...
1711
  #else
4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
1712
  struct cpumask __cpu_possible_mask __read_mostly;
b3199c025   Rusty Russell   cpumask: switch o...
1713
  #endif
4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
1714
  EXPORT_SYMBOL(__cpu_possible_mask);
b3199c025   Rusty Russell   cpumask: switch o...
1715

4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
1716
1717
  struct cpumask __cpu_online_mask __read_mostly;
  EXPORT_SYMBOL(__cpu_online_mask);
b3199c025   Rusty Russell   cpumask: switch o...
1718

4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
1719
1720
  struct cpumask __cpu_present_mask __read_mostly;
  EXPORT_SYMBOL(__cpu_present_mask);
b3199c025   Rusty Russell   cpumask: switch o...
1721

4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
1722
1723
  struct cpumask __cpu_active_mask __read_mostly;
  EXPORT_SYMBOL(__cpu_active_mask);
3fa415206   Rusty Russell   cpumask: make set...
1724

3fa415206   Rusty Russell   cpumask: make set...
1725
1726
  void init_cpu_present(const struct cpumask *src)
  {
c4c54dd1c   Rasmus Villemoes   kernel/cpu.c: cha...
1727
  	cpumask_copy(&__cpu_present_mask, src);
3fa415206   Rusty Russell   cpumask: make set...
1728
1729
1730
1731
  }
  
  void init_cpu_possible(const struct cpumask *src)
  {
c4c54dd1c   Rasmus Villemoes   kernel/cpu.c: cha...
1732
  	cpumask_copy(&__cpu_possible_mask, src);
3fa415206   Rusty Russell   cpumask: make set...
1733
1734
1735
1736
  }
  
  void init_cpu_online(const struct cpumask *src)
  {
c4c54dd1c   Rasmus Villemoes   kernel/cpu.c: cha...
1737
  	cpumask_copy(&__cpu_online_mask, src);
3fa415206   Rusty Russell   cpumask: make set...
1738
  }
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
  
  /*
   * Activate the first processor.
   */
  void __init boot_cpu_init(void)
  {
  	int cpu = smp_processor_id();
  
  	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
  	set_cpu_online(cpu, true);
  	set_cpu_active(cpu, true);
  	set_cpu_present(cpu, true);
  	set_cpu_possible(cpu, true);
  }
  
  /*
   * Must be called _AFTER_ setting up the per_cpu areas
   */
  void __init boot_cpu_state_init(void)
  {
  	per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
  }