Blame view

kernel/cpu.c 66.9 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
  /* CPU control.
   * (C) 2001, 2002, 2003, 2004 Rusty Russell
   *
   * This code is licenced under the GPL.
   */
bf2c59fce   Peter Zijlstra   sched/core: Fix i...
6
  #include <linux/sched/mm.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
7
8
9
10
  #include <linux/proc_fs.h>
  #include <linux/smp.h>
  #include <linux/init.h>
  #include <linux/notifier.h>
3f07c0144   Ingo Molnar   sched/headers: Pr...
11
  #include <linux/sched/signal.h>
ef8bd77f3   Ingo Molnar   sched/headers: Pr...
12
  #include <linux/sched/hotplug.h>
9ca12ac04   Nicholas Piggin   kernel/cpu: Allow...
13
  #include <linux/sched/isolation.h>
299300258   Ingo Molnar   sched/headers: Pr...
14
  #include <linux/sched/task.h>
a74cfffb0   Thomas Gleixner   x86/speculation: ...
15
  #include <linux/sched/smt.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
16
17
  #include <linux/unistd.h>
  #include <linux/cpu.h>
cb79295e2   Anton Vorontsov   cpu: introduce cl...
18
19
  #include <linux/oom.h>
  #include <linux/rcupdate.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
20
  #include <linux/export.h>
e4cc2f873   Anton Vorontsov   kernel/cpu.c: doc...
21
  #include <linux/bug.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
22
23
  #include <linux/kthread.h>
  #include <linux/stop_machine.h>
81615b624   Ingo Molnar   [PATCH] Convert k...
24
  #include <linux/mutex.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
25
  #include <linux/gfp.h>
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
26
  #include <linux/suspend.h>
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
27
  #include <linux/lockdep.h>
345527b1e   Preeti U Murthy   clockevents: Fix ...
28
  #include <linux/tick.h>
a89941816   Thomas Gleixner   hotplug: Prevent ...
29
  #include <linux/irq.h>
941154bd6   Thomas Gleixner   watchdog/hardlock...
30
  #include <linux/nmi.h>
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
31
  #include <linux/smpboot.h>
e6d4989a9   Richard Weinberger   relayfs: Convert ...
32
  #include <linux/relay.h>
6731d4f12   Sebastian Andrzej Siewior   slab: Convert to ...
33
  #include <linux/slab.h>
fc8dffd37   Thomas Gleixner   cpu/hotplug: Conv...
34
  #include <linux/percpu-rwsem.h>
c6e5f9d7c   Syed Rameez Mustafa   ANDROID: cpu-hotp...
35
  #include <uapi/linux/sched/types.h>
1d3a64fbd   Stephen Dickey   ANDROID: cpu/hotp...
36
  #include <linux/cpuset.h>
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
37

bb3632c61   Todd E Brandt   PM / sleep: trace...
38
  #include <trace/events/power.h>
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
39
40
  #define CREATE_TRACE_POINTS
  #include <trace/events/cpuhp.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
41

782131fed   Stephen Dickey   ANDROID: cpu/hotp...
42
43
  #undef CREATE_TRACE_POINTS
  #include <trace/hooks/sched.h>
38498a67a   Thomas Gleixner   smp: Add generic ...
44
  #include "smpboot.h"
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
45
46
47
48
  /**
   * cpuhp_cpu_state - Per cpu hotplug state storage
   * @state:	The current cpu state
   * @target:	The target state
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
49
50
   * @thread:	Pointer to the hotplug thread
   * @should_run:	Thread should execute
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
51
   * @rollback:	Perform a rollback
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
52
53
54
   * @single:	Single callback invocation
   * @bringup:	Single callback bringup or teardown selector
   * @cb_state:	The state for a single callback (install/uninstall)
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
55
   * @result:	Result of the operation
5ebe7742f   Peter Zijlstra   smp/hotplug: Diff...
56
57
   * @done_up:	Signal completion to the issuer of the task for cpu-up
   * @done_down:	Signal completion to the issuer of the task for cpu-down
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
58
59
60
61
   */
  struct cpuhp_cpu_state {
  	enum cpuhp_state	state;
  	enum cpuhp_state	target;
1db49484f   Peter Zijlstra   smp/hotplug: Hotp...
62
  	enum cpuhp_state	fail;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
63
64
65
  #ifdef CONFIG_SMP
  	struct task_struct	*thread;
  	bool			should_run;
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
66
  	bool			rollback;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
67
68
  	bool			single;
  	bool			bringup;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
69
  	struct hlist_node	*node;
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
70
  	struct hlist_node	*last;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
71
  	enum cpuhp_state	cb_state;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
72
  	int			result;
5ebe7742f   Peter Zijlstra   smp/hotplug: Diff...
73
74
  	struct completion	done_up;
  	struct completion	done_down;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
75
  #endif
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
76
  };
1db49484f   Peter Zijlstra   smp/hotplug: Hotp...
77
78
79
  static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
  	.fail = CPUHP_INVALID,
  };
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
80

e797bda3f   Thomas Gleixner   smp/hotplug: Trac...
81
82
83
  #ifdef CONFIG_SMP
  cpumask_t cpus_booted_once_mask;
  #endif
49dfe2a67   Thomas Gleixner   cpuhotplug: Link ...
84
  #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
5f4b55e10   Peter Zijlstra   smp/hotplug: Diff...
85
86
87
88
  static struct lockdep_map cpuhp_state_up_map =
  	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
  static struct lockdep_map cpuhp_state_down_map =
  	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
76dc6c097   Mathieu Malaterre   cpu/hotplug: Move...
89
  static inline void cpuhp_lock_acquire(bool bringup)
5f4b55e10   Peter Zijlstra   smp/hotplug: Diff...
90
91
92
  {
  	lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
  }
76dc6c097   Mathieu Malaterre   cpu/hotplug: Move...
93
  static inline void cpuhp_lock_release(bool bringup)
5f4b55e10   Peter Zijlstra   smp/hotplug: Diff...
94
95
96
97
  {
  	lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
  }
  #else
76dc6c097   Mathieu Malaterre   cpu/hotplug: Move...
98
99
  static inline void cpuhp_lock_acquire(bool bringup) { }
  static inline void cpuhp_lock_release(bool bringup) { }
5f4b55e10   Peter Zijlstra   smp/hotplug: Diff...
100

49dfe2a67   Thomas Gleixner   cpuhotplug: Link ...
101
  #endif
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
102
103
104
105
106
  /**
   * cpuhp_step - Hotplug state machine step
   * @name:	Name of the step
   * @startup:	Startup function of the step
   * @teardown:	Teardown function of the step
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
107
   * @cant_stop:	Bringup/teardown can't be stopped at this step
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
108
109
   */
  struct cpuhp_step {
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
110
111
  	const char		*name;
  	union {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
112
113
114
115
  		int		(*single)(unsigned int cpu);
  		int		(*multi)(unsigned int cpu,
  					 struct hlist_node *node);
  	} startup;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
116
  	union {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
117
118
119
120
  		int		(*single)(unsigned int cpu);
  		int		(*multi)(unsigned int cpu,
  					 struct hlist_node *node);
  	} teardown;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
121
  	struct hlist_head	list;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
122
123
  	bool			cant_stop;
  	bool			multi_instance;
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
124
  };
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
125
  static DEFINE_MUTEX(cpuhp_state_mutex);
17a2f1ced   Lai Jiangshan   cpu/hotplug: Merg...
126
  static struct cpuhp_step cpuhp_hp_states[];
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
127

a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
128
129
  static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
  {
17a2f1ced   Lai Jiangshan   cpu/hotplug: Merg...
130
  	return cpuhp_hp_states + state;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
131
  }
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
132
133
134
  /**
   * cpuhp_invoke_callback _ Invoke the callbacks for a given state
   * @cpu:	The cpu for which the callback should be invoked
96abb9685   Peter Zijlstra   smp/hotplug: Allo...
135
   * @state:	The state to do callbacks for
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
136
   * @bringup:	True if the bringup callback should be invoked
96abb9685   Peter Zijlstra   smp/hotplug: Allo...
137
138
   * @node:	For multi-instance, do a single entry callback for install/remove
   * @lastp:	For multi-instance rollback, remember how far we got
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
139
   *
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
140
   * Called from cpu hotplug and from the state register machinery.
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
141
   */
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
142
  static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
96abb9685   Peter Zijlstra   smp/hotplug: Allo...
143
144
  				 bool bringup, struct hlist_node *node,
  				 struct hlist_node **lastp)
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
145
146
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
147
  	struct cpuhp_step *step = cpuhp_get_step(state);
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
148
149
150
  	int (*cbm)(unsigned int cpu, struct hlist_node *node);
  	int (*cb)(unsigned int cpu);
  	int ret, cnt;
1db49484f   Peter Zijlstra   smp/hotplug: Hotp...
151
152
153
154
155
156
157
158
  	if (st->fail == state) {
  		st->fail = CPUHP_INVALID;
  
  		if (!(bringup ? step->startup.single : step->teardown.single))
  			return 0;
  
  		return -EAGAIN;
  	}
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
159
  	if (!step->multi_instance) {
96abb9685   Peter Zijlstra   smp/hotplug: Allo...
160
  		WARN_ON_ONCE(lastp && *lastp);
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
161
  		cb = bringup ? step->startup.single : step->teardown.single;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
162
163
  		if (!cb)
  			return 0;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
164
  		trace_cpuhp_enter(cpu, st->target, state, cb);
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
165
  		ret = cb(cpu);
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
166
  		trace_cpuhp_exit(cpu, st->state, state, ret);
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
167
168
  		return ret;
  	}
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
169
  	cbm = bringup ? step->startup.multi : step->teardown.multi;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
170
171
172
173
174
  	if (!cbm)
  		return 0;
  
  	/* Single invocation for instance add/remove */
  	if (node) {
96abb9685   Peter Zijlstra   smp/hotplug: Allo...
175
  		WARN_ON_ONCE(lastp && *lastp);
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
176
177
178
179
180
181
182
183
184
  		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
  		ret = cbm(cpu, node);
  		trace_cpuhp_exit(cpu, st->state, state, ret);
  		return ret;
  	}
  
  	/* State transition. Invoke on all instances */
  	cnt = 0;
  	hlist_for_each(node, &step->list) {
96abb9685   Peter Zijlstra   smp/hotplug: Allo...
185
186
  		if (lastp && node == *lastp)
  			break;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
187
188
189
  		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
  		ret = cbm(cpu, node);
  		trace_cpuhp_exit(cpu, st->state, state, ret);
96abb9685   Peter Zijlstra   smp/hotplug: Allo...
190
191
192
193
194
195
196
  		if (ret) {
  			if (!lastp)
  				goto err;
  
  			*lastp = node;
  			return ret;
  		}
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
197
198
  		cnt++;
  	}
96abb9685   Peter Zijlstra   smp/hotplug: Allo...
199
200
  	if (lastp)
  		*lastp = NULL;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
201
202
203
  	return 0;
  err:
  	/* Rollback the instances if one failed */
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
204
  	cbm = !bringup ? step->startup.multi : step->teardown.multi;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
205
206
207
208
209
210
  	if (!cbm)
  		return ret;
  
  	hlist_for_each(node, &step->list) {
  		if (!cnt--)
  			break;
724a86881   Peter Zijlstra   smp/hotplug: Call...
211
212
213
214
215
216
217
218
  
  		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
  		ret = cbm(cpu, node);
  		trace_cpuhp_exit(cpu, st->state, state, ret);
  		/*
  		 * Rollback must not fail,
  		 */
  		WARN_ON_ONCE(ret);
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
219
220
221
  	}
  	return ret;
  }
98a79d6a5   Rusty Russell   cpumask: centrali...
222
  #ifdef CONFIG_SMP
fcb3029a8   Arnd Bergmann   cpu/hotplug: Fix ...
223
224
225
226
227
228
229
230
  static bool cpuhp_is_ap_state(enum cpuhp_state state)
  {
  	/*
  	 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
  	 * purposes as that state is handled explicitly in cpu_down.
  	 */
  	return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
  }
5ebe7742f   Peter Zijlstra   smp/hotplug: Diff...
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
  static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
  {
  	struct completion *done = bringup ? &st->done_up : &st->done_down;
  	wait_for_completion(done);
  }
  
  static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
  {
  	struct completion *done = bringup ? &st->done_up : &st->done_down;
  	complete(done);
  }
  
  /*
   * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
   */
  static bool cpuhp_is_atomic_state(enum cpuhp_state state)
  {
  	return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
  }
b3199c025   Rusty Russell   cpumask: switch o...
250
  /* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa9538777   Linus Torvalds   cpu hotplug: simp...
251
  static DEFINE_MUTEX(cpu_add_remove_lock);
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
252
253
  bool cpuhp_tasks_frozen;
  EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
254

79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
255
  /*
93ae4f978   Srivatsa S. Bhat   CPU hotplug: Prov...
256
257
   * The following two APIs (cpu_maps_update_begin/done) must be used when
   * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
258
259
260
261
262
263
264
265
266
267
   */
  void cpu_maps_update_begin(void)
  {
  	mutex_lock(&cpu_add_remove_lock);
  }
  
  void cpu_maps_update_done(void)
  {
  	mutex_unlock(&cpu_add_remove_lock);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
268

fc8dffd37   Thomas Gleixner   cpu/hotplug: Conv...
269
270
  /*
   * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
271
272
273
   * Should always be manipulated under cpu_add_remove_lock
   */
  static int cpu_hotplug_disabled;
79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
274
  #ifdef CONFIG_HOTPLUG_CPU
fc8dffd37   Thomas Gleixner   cpu/hotplug: Conv...
275
  DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
a19423b98   Gautham R. Shenoy   CPU hotplug: Add ...
276

8f553c498   Thomas Gleixner   cpu/hotplug: Prov...
277
  void cpus_read_lock(void)
a9d9baa1e   Ashok Raj   [PATCH] clean up ...
278
  {
fc8dffd37   Thomas Gleixner   cpu/hotplug: Conv...
279
  	percpu_down_read(&cpu_hotplug_lock);
a9d9baa1e   Ashok Raj   [PATCH] clean up ...
280
  }
8f553c498   Thomas Gleixner   cpu/hotplug: Prov...
281
  EXPORT_SYMBOL_GPL(cpus_read_lock);
90d45d17f   Ashok Raj   [PATCH] cpu hotpl...
282

6f4ceee93   Waiman Long   cpu/hotplug: Add ...
283
284
285
286
287
  int cpus_read_trylock(void)
  {
  	return percpu_down_read_trylock(&cpu_hotplug_lock);
  }
  EXPORT_SYMBOL_GPL(cpus_read_trylock);
8f553c498   Thomas Gleixner   cpu/hotplug: Prov...
288
  void cpus_read_unlock(void)
a9d9baa1e   Ashok Raj   [PATCH] clean up ...
289
  {
fc8dffd37   Thomas Gleixner   cpu/hotplug: Conv...
290
  	percpu_up_read(&cpu_hotplug_lock);
a9d9baa1e   Ashok Raj   [PATCH] clean up ...
291
  }
8f553c498   Thomas Gleixner   cpu/hotplug: Prov...
292
  EXPORT_SYMBOL_GPL(cpus_read_unlock);
a9d9baa1e   Ashok Raj   [PATCH] clean up ...
293

8f553c498   Thomas Gleixner   cpu/hotplug: Prov...
294
  void cpus_write_lock(void)
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
295
  {
fc8dffd37   Thomas Gleixner   cpu/hotplug: Conv...
296
  	percpu_down_write(&cpu_hotplug_lock);
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
297
  }
87af9e7ff   David Hildenbrand   hotplugcpu: Avoid...
298

8f553c498   Thomas Gleixner   cpu/hotplug: Prov...
299
  void cpus_write_unlock(void)
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
300
  {
fc8dffd37   Thomas Gleixner   cpu/hotplug: Conv...
301
  	percpu_up_write(&cpu_hotplug_lock);
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
302
  }
fc8dffd37   Thomas Gleixner   cpu/hotplug: Conv...
303
  void lockdep_assert_cpus_held(void)
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
304
  {
ce48c457b   Valentin Schneider   cpu/hotplug: Mute...
305
306
307
308
309
310
311
312
  	/*
  	 * We can't have hotplug operations before userspace starts running,
  	 * and some init codepaths will knowingly not take the hotplug lock.
  	 * This is all valid, so mute lockdep until it makes sense to report
  	 * unheld locks.
  	 */
  	if (system_state < SYSTEM_RUNNING)
  		return;
fc8dffd37   Thomas Gleixner   cpu/hotplug: Conv...
313
  	percpu_rwsem_assert_held(&cpu_hotplug_lock);
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
314
  }
79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
315

cb92173d1   Peter Zijlstra   locking/lockdep, ...
316
317
  static void lockdep_acquire_cpus_lock(void)
  {
1751060e2   Peter Zijlstra   locking/percpu-rw...
318
  	rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
cb92173d1   Peter Zijlstra   locking/lockdep, ...
319
320
321
322
  }
  
  static void lockdep_release_cpus_lock(void)
  {
1751060e2   Peter Zijlstra   locking/percpu-rw...
323
  	rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
cb92173d1   Peter Zijlstra   locking/lockdep, ...
324
  }
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
325
326
327
328
329
330
331
332
333
334
  /*
   * Wait for currently running CPU hotplug operations to complete (if any) and
   * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
   * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
   * hotplug path before performing hotplug operations. So acquiring that lock
   * guarantees mutual exclusion from any currently running hotplug operations.
   */
  void cpu_hotplug_disable(void)
  {
  	cpu_maps_update_begin();
89af7ba57   Vitaly Kuznetsov   cpu-hotplug: conv...
335
  	cpu_hotplug_disabled++;
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
336
337
  	cpu_maps_update_done();
  }
32145c467   Vitaly Kuznetsov   cpu-hotplug: expo...
338
  EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
339

01b411590   Lianwei Wang   cpu/hotplug: Hand...
340
341
342
343
344
345
346
  static void __cpu_hotplug_enable(void)
  {
  	if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable
  "))
  		return;
  	cpu_hotplug_disabled--;
  }
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
347
348
349
  void cpu_hotplug_enable(void)
  {
  	cpu_maps_update_begin();
01b411590   Lianwei Wang   cpu/hotplug: Hand...
350
  	__cpu_hotplug_enable();
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
351
352
  	cpu_maps_update_done();
  }
32145c467   Vitaly Kuznetsov   cpu-hotplug: expo...
353
  EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
cb92173d1   Peter Zijlstra   locking/lockdep, ...
354
355
356
357
358
359
360
361
362
363
  
  #else
  
  static void lockdep_acquire_cpus_lock(void)
  {
  }
  
  static void lockdep_release_cpus_lock(void)
  {
  }
b9d10be7a   Toshi Kani   ACPI / processor:...
364
  #endif	/* CONFIG_HOTPLUG_CPU */
79a6cdeb7   Lai Jiangshan   cpuhotplug: do no...
365

a74cfffb0   Thomas Gleixner   x86/speculation: ...
366
367
368
369
370
  /*
   * Architectures that need SMT-specific errata handling during SMT hotplug
   * should override this.
   */
  void __weak arch_smt_update(void) { }
0cc3cd216   Thomas Gleixner   cpu/hotplug: Boot...
371
372
  #ifdef CONFIG_HOTPLUG_SMT
  enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
bc2d8d262   Thomas Gleixner   cpu/hotplug: Fix ...
373

8e1b706b6   Jiri Kosina   cpu/hotplug: Expo...
374
  void __init cpu_smt_disable(bool force)
0cc3cd216   Thomas Gleixner   cpu/hotplug: Boot...
375
  {
e1572f1d0   Vitaly Kuznetsov   cpu/SMT: create a...
376
  	if (!cpu_smt_possible())
8e1b706b6   Jiri Kosina   cpu/hotplug: Expo...
377
378
379
  		return;
  
  	if (force) {
0cc3cd216   Thomas Gleixner   cpu/hotplug: Boot...
380
381
382
  		pr_info("SMT: Force disabled
  ");
  		cpu_smt_control = CPU_SMT_FORCE_DISABLED;
8e1b706b6   Jiri Kosina   cpu/hotplug: Expo...
383
  	} else {
d0e7d1445   Borislav Petkov   cpu/SMT: State SM...
384
385
  		pr_info("SMT: disabled
  ");
8e1b706b6   Jiri Kosina   cpu/hotplug: Expo...
386
  		cpu_smt_control = CPU_SMT_DISABLED;
0cc3cd216   Thomas Gleixner   cpu/hotplug: Boot...
387
  	}
8e1b706b6   Jiri Kosina   cpu/hotplug: Expo...
388
  }
fee0aede6   Thomas Gleixner   cpu/hotplug: Set ...
389
390
  /*
   * The decision whether SMT is supported can only be done after the full
b284909ab   Josh Poimboeuf   cpu/hotplug: Fix ...
391
   * CPU identification. Called from architecture code.
bc2d8d262   Thomas Gleixner   cpu/hotplug: Fix ...
392
393
394
   */
  void __init cpu_smt_check_topology(void)
  {
b284909ab   Josh Poimboeuf   cpu/hotplug: Fix ...
395
  	if (!topology_smt_supported())
bc2d8d262   Thomas Gleixner   cpu/hotplug: Fix ...
396
397
  		cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
  }
8e1b706b6   Jiri Kosina   cpu/hotplug: Expo...
398
399
400
  static int __init smt_cmdline_disable(char *str)
  {
  	cpu_smt_disable(str && !strcmp(str, "force"));
0cc3cd216   Thomas Gleixner   cpu/hotplug: Boot...
401
402
403
404
405
406
  	return 0;
  }
  early_param("nosmt", smt_cmdline_disable);
  
  static inline bool cpu_smt_allowed(unsigned int cpu)
  {
b284909ab   Josh Poimboeuf   cpu/hotplug: Fix ...
407
  	if (cpu_smt_control == CPU_SMT_ENABLED)
0cc3cd216   Thomas Gleixner   cpu/hotplug: Boot...
408
  		return true;
b284909ab   Josh Poimboeuf   cpu/hotplug: Fix ...
409
  	if (topology_is_primary_thread(cpu))
0cc3cd216   Thomas Gleixner   cpu/hotplug: Boot...
410
411
412
413
414
  		return true;
  
  	/*
  	 * On x86 it's required to boot all logical CPUs at least once so
  	 * that the init code can get a chance to set CR4.MCE on each
182e073f6   Ethon Paul   cpu/hotplug: Fix ...
415
  	 * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
0cc3cd216   Thomas Gleixner   cpu/hotplug: Boot...
416
417
  	 * core will shutdown the machine.
  	 */
e797bda3f   Thomas Gleixner   smp/hotplug: Trac...
418
  	return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
0cc3cd216   Thomas Gleixner   cpu/hotplug: Boot...
419
  }
e1572f1d0   Vitaly Kuznetsov   cpu/SMT: create a...
420
421
422
423
424
425
426
427
  
  /* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
  bool cpu_smt_possible(void)
  {
  	return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
  		cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
  }
  EXPORT_SYMBOL_GPL(cpu_smt_possible);
0cc3cd216   Thomas Gleixner   cpu/hotplug: Boot...
428
429
430
  #else
  static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
  #endif
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
  static inline enum cpuhp_state
  cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
  {
  	enum cpuhp_state prev_state = st->state;
  
  	st->rollback = false;
  	st->last = NULL;
  
  	st->target = target;
  	st->single = false;
  	st->bringup = st->state < target;
  
  	return prev_state;
  }
  
  static inline void
  cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
  {
  	st->rollback = true;
  
  	/*
  	 * If we have st->last we need to undo partial multi_instance of this
  	 * state first. Otherwise start undo at the previous state.
  	 */
  	if (!st->last) {
  		if (st->bringup)
  			st->state--;
  		else
  			st->state++;
  	}
  
  	st->target = prev_state;
  	st->bringup = !st->bringup;
  }
  
  /* Regular hotplug invocation of the AP hotplug thread */
  static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
  {
  	if (!st->single && st->state == st->target)
  		return;
  
  	st->result = 0;
  	/*
  	 * Make sure the above stores are visible before should_run becomes
  	 * true. Paired with the mb() above in cpuhp_thread_fun()
  	 */
  	smp_mb();
  	st->should_run = true;
  	wake_up_process(st->thread);
5ebe7742f   Peter Zijlstra   smp/hotplug: Diff...
480
  	wait_for_ap_thread(st, st->bringup);
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
  }
  
  static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
  {
  	enum cpuhp_state prev_state;
  	int ret;
  
  	prev_state = cpuhp_set_state(st, target);
  	__cpuhp_kick_ap(st);
  	if ((ret = st->result)) {
  		cpuhp_reset_state(st, prev_state);
  		__cpuhp_kick_ap(st);
  	}
  
  	return ret;
  }
9cd4f1a4e   Thomas Gleixner   smp/hotplug: Move...
497

8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
498
499
500
  static int bringup_wait_for_ap(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
9cd4f1a4e   Thomas Gleixner   smp/hotplug: Move...
501
  	/* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
5ebe7742f   Peter Zijlstra   smp/hotplug: Diff...
502
  	wait_for_ap_thread(st, true);
dea1d0f5f   Thomas Gleixner   smp/hotplug: Repl...
503
504
  	if (WARN_ON_ONCE((!cpu_online(cpu))))
  		return -ECANCELED;
9cd4f1a4e   Thomas Gleixner   smp/hotplug: Move...
505

45178ac0c   Peter Zijlstra   cpu/hotplug, stop...
506
  	/* Unpark the hotplug thread of the target cpu */
9cd4f1a4e   Thomas Gleixner   smp/hotplug: Move...
507
  	kthread_unpark(st->thread);
0cc3cd216   Thomas Gleixner   cpu/hotplug: Boot...
508
509
510
  	/*
  	 * SMT soft disabling on X86 requires to bring the CPU out of the
  	 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
f56020110   Jiri Kosina   cpu/hotplug: Fix ...
511
  	 * CPU marked itself as booted_once in notify_cpu_starting() so the
0cc3cd216   Thomas Gleixner   cpu/hotplug: Boot...
512
513
514
515
516
  	 * cpu_smt_allowed() check will now return false if this is not the
  	 * primary sibling.
  	 */
  	if (!cpu_smt_allowed(cpu))
  		return -ECANCELED;
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
517
518
519
520
  	if (st->target <= CPUHP_AP_ONLINE_IDLE)
  		return 0;
  
  	return cpuhp_kick_ap(st, st->target);
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
521
  }
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
522
523
524
525
  static int bringup_cpu(unsigned int cpu)
  {
  	struct task_struct *idle = idle_thread_get(cpu);
  	int ret;
aa877175e   Boris Ostrovsky   cpu/hotplug: Prev...
526
527
528
529
530
531
  	/*
  	 * Some architectures have to walk the irq descriptors to
  	 * setup the vector space for the cpu which comes online.
  	 * Prevent irq alloc/free across the bringup.
  	 */
  	irq_lock_sparse();
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
532
533
  	/* Arch-specific enabling code. */
  	ret = __cpu_up(cpu, idle);
aa877175e   Boris Ostrovsky   cpu/hotplug: Prev...
534
  	irq_unlock_sparse();
530e9b76a   Thomas Gleixner   cpu/hotplug: Remo...
535
  	if (ret)
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
536
  		return ret;
9cd4f1a4e   Thomas Gleixner   smp/hotplug: Move...
537
  	return bringup_wait_for_ap(cpu);
ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
538
  }
bf2c59fce   Peter Zijlstra   sched/core: Fix i...
539
540
541
542
543
544
545
546
547
548
549
550
551
552
  static int finish_cpu(unsigned int cpu)
  {
  	struct task_struct *idle = idle_thread_get(cpu);
  	struct mm_struct *mm = idle->active_mm;
  
  	/*
  	 * idle_task_exit() will have switched to &init_mm, now
  	 * clean up any remaining active_mm state.
  	 */
  	if (mm != &init_mm)
  		idle->active_mm = &init_mm;
  	mmdrop(mm);
  	return 0;
  }
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
553
554
555
  /*
   * Hotplug state machine related functions
   */
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
556

a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
557
  static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
558
  {
6fb86d972   Mukesh Ojha   cpu/hotplug: Remo...
559
560
  	for (st->state--; st->state > st->target; st->state--)
  		cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
561
  }
206b92353   Thomas Gleixner   cpu/hotplug: Prev...
562
563
564
565
566
567
568
569
570
571
572
573
574
  static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
  {
  	if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
  		return true;
  	/*
  	 * When CPU hotplug is disabled, then taking the CPU down is not
  	 * possible because takedown_cpu() and the architecture and
  	 * subsystem specific mechanisms are not available. So the CPU
  	 * which would be completely unplugged again needs to stay around
  	 * in the current state.
  	 */
  	return st->state <= CPUHP_BRINGUP_CPU;
  }
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
575
  static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
576
  			      enum cpuhp_state target)
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
577
578
579
580
581
  {
  	enum cpuhp_state prev_state = st->state;
  	int ret = 0;
  
  	while (st->state < target) {
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
582
  		st->state++;
96abb9685   Peter Zijlstra   smp/hotplug: Allo...
583
  		ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
584
  		if (ret) {
206b92353   Thomas Gleixner   cpu/hotplug: Prev...
585
586
587
588
  			if (can_rollback_cpu(st)) {
  				st->target = prev_state;
  				undo_cpu_up(cpu, st);
  			}
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
589
590
591
592
593
  			break;
  		}
  	}
  	return ret;
  }
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
594
595
596
597
598
599
  /*
   * The cpu hotplug threads manage the bringup and teardown of the cpus
   */
  static void cpuhp_create(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
5ebe7742f   Peter Zijlstra   smp/hotplug: Diff...
600
601
  	init_completion(&st->done_up);
  	init_completion(&st->done_down);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
602
603
604
605
606
607
608
609
  }
  
  static int cpuhp_should_run(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  
  	return st->should_run;
  }
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
610
611
612
  /*
   * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
   * callbacks when a state gets [un]installed at runtime.
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
613
614
615
616
617
618
619
620
621
622
   *
   * Each invocation of this function by the smpboot thread does a single AP
   * state callback.
   *
   * It has 3 modes of operation:
   *  - single: runs st->cb_state
   *  - up:     runs ++st->state, while st->state < st->target
   *  - down:   runs st->state--, while st->state > st->target
   *
   * When complete or on error, should_run is cleared and the completion is fired.
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
623
624
625
626
   */
  static void cpuhp_thread_fun(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
627
628
  	bool bringup = st->bringup;
  	enum cpuhp_state state;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
629

f8b7530aa   Neeraj Upadhyay   cpu/hotplug: Adju...
630
631
  	if (WARN_ON_ONCE(!st->should_run))
  		return;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
632
  	/*
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
633
634
  	 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
  	 * that if we see ->should_run we also see the rest of the state.
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
635
636
  	 */
  	smp_mb();
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
637

cb92173d1   Peter Zijlstra   locking/lockdep, ...
638
639
640
641
642
643
  	/*
  	 * The BP holds the hotplug lock, but we're now running on the AP,
  	 * ensure that anybody asserting the lock is held, will actually find
  	 * it so.
  	 */
  	lockdep_acquire_cpus_lock();
5f4b55e10   Peter Zijlstra   smp/hotplug: Diff...
644
  	cpuhp_lock_acquire(bringup);
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
645

a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
646
  	if (st->single) {
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
647
648
649
650
651
652
653
654
  		state = st->cb_state;
  		st->should_run = false;
  	} else {
  		if (bringup) {
  			st->state++;
  			state = st->state;
  			st->should_run = (st->state < st->target);
  			WARN_ON_ONCE(st->state > st->target);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
655
  		} else {
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
656
657
658
659
  			state = st->state;
  			st->state--;
  			st->should_run = (st->state > st->target);
  			WARN_ON_ONCE(st->state < st->target);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
660
  		}
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
661
662
663
  	}
  
  	WARN_ON_ONCE(!cpuhp_is_ap_state(state));
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
664
665
666
667
  	if (cpuhp_is_atomic_state(state)) {
  		local_irq_disable();
  		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
  		local_irq_enable();
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
668

4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
669
670
671
672
  		/*
  		 * STARTING/DYING must not fail!
  		 */
  		WARN_ON_ONCE(st->result);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
673
  	} else {
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
674
675
676
677
678
679
680
681
682
683
684
  		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
  	}
  
  	if (st->result) {
  		/*
  		 * If we fail on a rollback, we're up a creek without no
  		 * paddle, no way forward, no way back. We loose, thanks for
  		 * playing.
  		 */
  		WARN_ON_ONCE(st->rollback);
  		st->should_run = false;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
685
  	}
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
686

5f4b55e10   Peter Zijlstra   smp/hotplug: Diff...
687
  	cpuhp_lock_release(bringup);
cb92173d1   Peter Zijlstra   locking/lockdep, ...
688
  	lockdep_release_cpus_lock();
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
689
690
  
  	if (!st->should_run)
5ebe7742f   Peter Zijlstra   smp/hotplug: Diff...
691
  		complete_ap_thread(st, bringup);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
692
693
694
  }
  
  /* Invoke a single callback on a remote cpu */
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
695
  static int
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
696
697
  cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
  			 struct hlist_node *node)
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
698
699
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
700
  	int ret;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
701
702
703
  
  	if (!cpu_online(cpu))
  		return 0;
5f4b55e10   Peter Zijlstra   smp/hotplug: Diff...
704
705
706
707
708
  	cpuhp_lock_acquire(false);
  	cpuhp_lock_release(false);
  
  	cpuhp_lock_acquire(true);
  	cpuhp_lock_release(true);
49dfe2a67   Thomas Gleixner   cpuhotplug: Link ...
709

6a4e24518   Thomas Gleixner   cpu/hotplug: Hand...
710
711
712
713
714
  	/*
  	 * If we are up and running, use the hotplug thread. For early calls
  	 * we invoke the thread function directly.
  	 */
  	if (!st->thread)
96abb9685   Peter Zijlstra   smp/hotplug: Allo...
715
  		return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
6a4e24518   Thomas Gleixner   cpu/hotplug: Hand...
716

4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
717
718
719
720
721
  	st->rollback = false;
  	st->last = NULL;
  
  	st->node = node;
  	st->bringup = bringup;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
722
  	st->cb_state = state;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
723
  	st->single = true;
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
724

4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
725
  	__cpuhp_kick_ap(st);
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
726

4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
727
  	/*
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
728
  	 * If we failed and did a partial, do a rollback.
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
729
  	 */
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
730
731
732
733
734
735
  	if ((ret = st->result) && st->last) {
  		st->rollback = true;
  		st->bringup = !bringup;
  
  		__cpuhp_kick_ap(st);
  	}
1f7c70d6b   Thomas Gleixner   cpu/hotplug: Rese...
736
737
738
739
740
  	/*
  	 * Clean up the leftovers so the next hotplug operation wont use stale
  	 * data.
  	 */
  	st->node = st->last = NULL;
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
741
  	return ret;
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
742
743
744
745
746
  }
  
  static int cpuhp_kick_ap_work(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
747
748
  	enum cpuhp_state prev_state = st->state;
  	int ret;
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
749

5f4b55e10   Peter Zijlstra   smp/hotplug: Diff...
750
751
752
753
754
  	cpuhp_lock_acquire(false);
  	cpuhp_lock_release(false);
  
  	cpuhp_lock_acquire(true);
  	cpuhp_lock_release(true);
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
755
756
757
758
759
760
  
  	trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
  	ret = cpuhp_kick_ap(st, st->target);
  	trace_cpuhp_exit(cpu, st->state, prev_state, ret);
  
  	return ret;
4cb28ced2   Thomas Gleixner   cpu/hotplug: Crea...
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
  }
  
  static struct smp_hotplug_thread cpuhp_threads = {
  	.store			= &cpuhp_state.thread,
  	.create			= &cpuhp_create,
  	.thread_should_run	= cpuhp_should_run,
  	.thread_fn		= cpuhp_thread_fun,
  	.thread_comm		= "cpuhp/%u",
  	.selfparking		= true,
  };
  
  void __init cpuhp_threads_init(void)
  {
  	BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
  	kthread_unpark(this_cpu_read(cpuhp_state.thread));
  }
777c6e0da   Michal Hocko   hotplug: Make reg...
777
  #ifdef CONFIG_HOTPLUG_CPU
8ff00399b   Nicholas Piggin   kernel/cpu: add a...
778
779
780
  #ifndef arch_clear_mm_cpumask_cpu
  #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
  #endif
e4cc2f873   Anton Vorontsov   kernel/cpu.c: doc...
781
782
783
784
785
786
787
788
789
790
791
792
  /**
   * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
   * @cpu: a CPU id
   *
   * This function walks all processes, finds a valid mm struct for each one and
   * then clears a corresponding bit in mm's cpumask.  While this all sounds
   * trivial, there are various non-obvious corner cases, which this function
   * tries to solve in a safe manner.
   *
   * Also note that the function uses a somewhat relaxed locking scheme, so it may
   * be called only for an already offlined CPU.
   */
cb79295e2   Anton Vorontsov   cpu: introduce cl...
793
794
795
796
797
798
799
800
801
802
803
  void clear_tasks_mm_cpumask(int cpu)
  {
  	struct task_struct *p;
  
  	/*
  	 * This function is called after the cpu is taken down and marked
  	 * offline, so its not like new tasks will ever get this cpu set in
  	 * their mm mask. -- Peter Zijlstra
  	 * Thus, we may use rcu_read_lock() here, instead of grabbing
  	 * full-fledged tasklist_lock.
  	 */
e4cc2f873   Anton Vorontsov   kernel/cpu.c: doc...
804
  	WARN_ON(cpu_online(cpu));
cb79295e2   Anton Vorontsov   cpu: introduce cl...
805
806
807
  	rcu_read_lock();
  	for_each_process(p) {
  		struct task_struct *t;
e4cc2f873   Anton Vorontsov   kernel/cpu.c: doc...
808
809
810
811
  		/*
  		 * Main thread might exit, but other threads may still have
  		 * a valid mm. Find one.
  		 */
cb79295e2   Anton Vorontsov   cpu: introduce cl...
812
813
814
  		t = find_lock_task_mm(p);
  		if (!t)
  			continue;
8ff00399b   Nicholas Piggin   kernel/cpu: add a...
815
  		arch_clear_mm_cpumask_cpu(cpu, t->mm);
cb79295e2   Anton Vorontsov   cpu: introduce cl...
816
817
818
819
  		task_unlock(t);
  	}
  	rcu_read_unlock();
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
820
  /* Take this CPU down. */
71cf5aeeb   Mathias Krause   kernel, cpu: Remo...
821
  static int take_cpu_down(void *_param)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
822
  {
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
823
824
  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  	enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
825
  	int err, cpu = smp_processor_id();
724a86881   Peter Zijlstra   smp/hotplug: Call...
826
  	int ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
827

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
828
829
830
  	/* Ensure this CPU doesn't handle any more interrupts. */
  	err = __cpu_disable();
  	if (err < 0)
f37051364   Zwane Mwaikambo   [PATCH] i386 CPU ...
831
  		return err;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
832

a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
833
834
835
836
837
838
  	/*
  	 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
  	 * do this step again.
  	 */
  	WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
  	st->state--;
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
839
  	/* Invoke the former CPU_DYING callbacks */
724a86881   Peter Zijlstra   smp/hotplug: Call...
840
841
842
843
844
845
846
  	for (; st->state > target; st->state--) {
  		ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
  		/*
  		 * DYING must not fail!
  		 */
  		WARN_ON_ONCE(ret);
  	}
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
847

52c063d1a   Thomas Gleixner   clockevents: Make...
848
849
  	/* Give up timekeeping duties */
  	tick_handover_do_timer();
1b72d4323   Thomas Gleixner   tick: Remove outg...
850
851
  	/* Remove CPU from timer broadcasting */
  	tick_offline_cpu(cpu);
14e568e78   Thomas Gleixner   stop_machine: Use...
852
  	/* Park the stopper thread */
090e77c39   Thomas Gleixner   cpu/hotplug: Rest...
853
  	stop_machine_park(cpu);
f37051364   Zwane Mwaikambo   [PATCH] i386 CPU ...
854
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
855
  }
984581728   Thomas Gleixner   cpu/hotplug: Spli...
856
  static int takedown_cpu(unsigned int cpu)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
857
  {
e69aab131   Thomas Gleixner   cpu/hotplug: Make...
858
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
984581728   Thomas Gleixner   cpu/hotplug: Spli...
859
  	int err;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
860

2a58c527b   Thomas Gleixner   cpu/hotplug: Fix ...
861
  	/* Park the smpboot threads */
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
862
  	kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
6acce3ef8   Peter Zijlstra   sched: Remove get...
863
  	/*
a89941816   Thomas Gleixner   hotplug: Prevent ...
864
865
  	 * Prevent irq alloc/free while the dying cpu reorganizes the
  	 * interrupt affinities.
6acce3ef8   Peter Zijlstra   sched: Remove get...
866
  	 */
a89941816   Thomas Gleixner   hotplug: Prevent ...
867
  	irq_lock_sparse();
6acce3ef8   Peter Zijlstra   sched: Remove get...
868

a89941816   Thomas Gleixner   hotplug: Prevent ...
869
870
871
  	/*
  	 * So now all preempt/rcu users must observe !cpu_active().
  	 */
210e21331   Sebastian Andrzej Siewior   cpu/hotplug: Use ...
872
  	err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
043215875   Rusty Russell   Hotplug CPU: don'...
873
  	if (err) {
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
874
  		/* CPU refused to die */
a89941816   Thomas Gleixner   hotplug: Prevent ...
875
  		irq_unlock_sparse();
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
876
877
  		/* Unpark the hotplug thread so we can rollback there */
  		kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
984581728   Thomas Gleixner   cpu/hotplug: Spli...
878
  		return err;
8fa1d7d3b   Satoru Takeuchi   [PATCH] cpu-hotpl...
879
  	}
043215875   Rusty Russell   Hotplug CPU: don'...
880
  	BUG_ON(cpu_online(cpu));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
881

48c5ccae8   Peter Zijlstra   sched: Simplify c...
882
  	/*
5b1ead680   Brendan Jackman   cpu/hotplug: Fix ...
883
884
  	 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
  	 * all runnable tasks from the CPU, there's only the idle task left now
48c5ccae8   Peter Zijlstra   sched: Simplify c...
885
  	 * that the migration thread is done doing the stop_machine thing.
51a96c778   Peter Zijlstra   cpu: Remove incor...
886
887
  	 *
  	 * Wait for the stop thread to go away.
48c5ccae8   Peter Zijlstra   sched: Simplify c...
888
  	 */
5ebe7742f   Peter Zijlstra   smp/hotplug: Diff...
889
  	wait_for_ap_thread(st, false);
e69aab131   Thomas Gleixner   cpu/hotplug: Make...
890
  	BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
891

a89941816   Thomas Gleixner   hotplug: Prevent ...
892
893
  	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
  	irq_unlock_sparse();
345527b1e   Preeti U Murthy   clockevents: Fix ...
894
  	hotplug_cpu__broadcast_tick_pull(cpu);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
895
896
  	/* This actually kills the CPU. */
  	__cpu_die(cpu);
a49b116dc   Thomas Gleixner   clockevents: Clea...
897
  	tick_cleanup_dead_cpu(cpu);
a58163d8c   Paul E. McKenney   rcu: Migrate call...
898
  	rcutree_migrate_callbacks(cpu);
984581728   Thomas Gleixner   cpu/hotplug: Spli...
899
900
  	return 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
901

71f87b2fc   Thomas Gleixner   cpu/hotplug: Plug...
902
903
904
  static void cpuhp_complete_idle_dead(void *arg)
  {
  	struct cpuhp_cpu_state *st = arg;
5ebe7742f   Peter Zijlstra   smp/hotplug: Diff...
905
  	complete_ap_thread(st, false);
71f87b2fc   Thomas Gleixner   cpu/hotplug: Plug...
906
  }
e69aab131   Thomas Gleixner   cpu/hotplug: Make...
907
908
909
910
911
  void cpuhp_report_idle_dead(void)
  {
  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
  
  	BUG_ON(st->state != CPUHP_AP_OFFLINE);
27d50c7ee   Thomas Gleixner   rcu: Make CPU_DYI...
912
  	rcu_report_dead(smp_processor_id());
71f87b2fc   Thomas Gleixner   cpu/hotplug: Plug...
913
914
915
916
917
918
919
  	st->state = CPUHP_AP_IDLE_DEAD;
  	/*
  	 * We cannot call complete after rcu_report_dead() so we delegate it
  	 * to an online cpu.
  	 */
  	smp_call_function_single(cpumask_first(cpu_online_mask),
  				 cpuhp_complete_idle_dead, st, 0);
e69aab131   Thomas Gleixner   cpu/hotplug: Make...
920
  }
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
921
922
  static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
  {
6fb86d972   Mukesh Ojha   cpu/hotplug: Remo...
923
924
  	for (st->state++; st->state < st->target; st->state++)
  		cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
925
926
927
928
929
930
931
932
933
934
935
936
  }
  
  static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
  				enum cpuhp_state target)
  {
  	enum cpuhp_state prev_state = st->state;
  	int ret = 0;
  
  	for (; st->state > target; st->state--) {
  		ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
  		if (ret) {
  			st->target = prev_state;
69fa6eb7d   Thomas Gleixner   cpu/hotplug: Prev...
937
938
  			if (st->state < prev_state)
  				undo_cpu_down(cpu, st);
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
939
940
941
942
943
  			break;
  		}
  	}
  	return ret;
  }
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
944

984581728   Thomas Gleixner   cpu/hotplug: Spli...
945
  /* Requires cpu_add_remove_lock to be held */
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
946
947
  static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
  			   enum cpuhp_state target)
984581728   Thomas Gleixner   cpu/hotplug: Spli...
948
  {
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
949
950
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  	int prev_state, ret = 0;
984581728   Thomas Gleixner   cpu/hotplug: Spli...
951

1734af629   Vincent Donnefort   ANDROID: cpu/hotp...
952
  	if (num_active_cpus() == 1)
984581728   Thomas Gleixner   cpu/hotplug: Spli...
953
  		return -EBUSY;
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
954
  	if (!cpu_present(cpu))
984581728   Thomas Gleixner   cpu/hotplug: Spli...
955
  		return -EINVAL;
8f553c498   Thomas Gleixner   cpu/hotplug: Prov...
956
  	cpus_write_lock();
984581728   Thomas Gleixner   cpu/hotplug: Spli...
957
958
  
  	cpuhp_tasks_frozen = tasks_frozen;
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
959
  	prev_state = cpuhp_set_state(st, target);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
960
961
962
963
  	/*
  	 * If the current CPU state is in the range of the AP hotplug thread,
  	 * then we need to kick the thread.
  	 */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
964
  	if (st->state > CPUHP_TEARDOWN_CPU) {
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
965
  		st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
966
967
968
969
970
971
972
973
974
975
976
977
  		ret = cpuhp_kick_ap_work(cpu);
  		/*
  		 * The AP side has done the error rollback already. Just
  		 * return the error code..
  		 */
  		if (ret)
  			goto out;
  
  		/*
  		 * We might have stopped still in the range of the AP hotplug
  		 * thread. Nothing to do anymore.
  		 */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
978
  		if (st->state > CPUHP_TEARDOWN_CPU)
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
979
  			goto out;
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
980
981
  
  		st->target = target;
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
982
983
  	}
  	/*
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
984
  	 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
985
986
  	 * to do the further cleanups.
  	 */
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
987
  	ret = cpuhp_down_callbacks(cpu, st, target);
69fa6eb7d   Thomas Gleixner   cpu/hotplug: Prev...
988
  	if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
989
990
  		cpuhp_reset_state(st, prev_state);
  		__cpuhp_kick_ap(st);
3b9d6da67   Sebastian Andrzej Siewior   cpu/hotplug: Fix ...
991
  	}
984581728   Thomas Gleixner   cpu/hotplug: Spli...
992

1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
993
  out:
8f553c498   Thomas Gleixner   cpu/hotplug: Prov...
994
  	cpus_write_unlock();
941154bd6   Thomas Gleixner   watchdog/hardlock...
995
996
997
998
999
  	/*
  	 * Do post unplug cleanup. This is still protected against
  	 * concurrent CPU hotplug via cpu_add_remove_lock.
  	 */
  	lockup_detector_cleanup();
a74cfffb0   Thomas Gleixner   x86/speculation: ...
1000
  	arch_smt_update();
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1001
  	return ret;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1002
  }
cc1fe215e   Thomas Gleixner   cpu/hotplug: Spli...
1003
1004
1005
1006
1007
1008
  static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
  {
  	if (cpu_hotplug_disabled)
  		return -EBUSY;
  	return _cpu_down(cpu, 0, target);
  }
33c3736ec   Qais Yousef   cpu/hotplug: Hide...
1009
  static int cpu_down(unsigned int cpu, enum cpuhp_state target)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1010
  {
9ea09af3b   Heiko Carstens   stop_machine: int...
1011
  	int err;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1012

d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1013
  	cpu_maps_update_begin();
cc1fe215e   Thomas Gleixner   cpu/hotplug: Spli...
1014
  	err = cpu_down_maps_locked(cpu, target);
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1015
  	cpu_maps_update_done();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1016
1017
  	return err;
  }
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
1018

33c3736ec   Qais Yousef   cpu/hotplug: Hide...
1019
1020
1021
1022
1023
1024
1025
1026
1027
  /**
   * cpu_device_down - Bring down a cpu device
   * @dev: Pointer to the cpu device to offline
   *
   * This function is meant to be used by device core cpu subsystem only.
   *
   * Other subsystems should use remove_cpu() instead.
   */
  int cpu_device_down(struct device *dev)
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1028
  {
33c3736ec   Qais Yousef   cpu/hotplug: Hide...
1029
  	return cpu_down(dev->id, CPUHP_OFFLINE);
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1030
  }
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
1031

93ef1429e   Qais Yousef   cpu/hotplug: Add ...
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
  int remove_cpu(unsigned int cpu)
  {
  	int ret;
  
  	lock_device_hotplug();
  	ret = device_offline(get_cpu_device(cpu));
  	unlock_device_hotplug();
  
  	return ret;
  }
  EXPORT_SYMBOL_GPL(remove_cpu);
683010f55   Vincent Donnefort   ANDROID: cpu/hotp...
1043
  extern bool dl_cpu_busy(unsigned int cpu);
e19b8ce90   Vincent Donnefort   ANDROID: cpu/hotp...
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
  int __pause_drain_rq(struct cpumask *cpus)
  {
  	unsigned int cpu;
  	int err = 0;
  
  	/*
  	 * Disabling preemption avoids that one of the stopper, started from
  	 * sched_cpu_drain_rq(), blocks firing draining for the whole cpumask.
  	 */
  	preempt_disable();
  	for_each_cpu(cpu, cpus) {
  		err = sched_cpu_drain_rq(cpu);
  		if (err)
  			break;
  	}
  	preempt_enable();
  
  	return err;
  }
  
  void __wait_drain_rq(struct cpumask *cpus)
  {
  	unsigned int cpu;
  
  	for_each_cpu(cpu, cpus)
  		sched_cpu_drain_rq_wait(cpu);
  }
683010f55   Vincent Donnefort   ANDROID: cpu/hotp...
1071
1072
1073
1074
  int pause_cpus(struct cpumask *cpus)
  {
  	int err = 0;
  	int cpu;
614afa949   Stephen Dickey   ANDROID: cpuhp/pa...
1075
1076
1077
  	u64 start_time = 0;
  
  	start_time = sched_clock();
683010f55   Vincent Donnefort   ANDROID: cpu/hotp...
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
  
  	cpu_maps_update_begin();
  
  	if (cpu_hotplug_disabled) {
  		err = -EBUSY;
  		goto err_cpu_maps_update;
  	}
  
  	/* Pausing an already inactive CPU isn't an error */
  	cpumask_and(cpus, cpus, cpu_active_mask);
  
  	for_each_cpu(cpu, cpus) {
  		if (!cpu_online(cpu) || dl_cpu_busy(cpu)) {
  			err = -EBUSY;
  			goto err_cpu_maps_update;
  		}
  	}
  
  	if (cpumask_weight(cpus) >= num_active_cpus()) {
  		err = -EBUSY;
  		goto err_cpu_maps_update;
  	}
  
  	if (cpumask_empty(cpus))
  		goto err_cpu_maps_update;
e19b8ce90   Vincent Donnefort   ANDROID: cpu/hotp...
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
  	/*
  	 * Lazy migration:
  	 *
  	 * We do care about how fast a CPU can go idle and stay this in this
  	 * state. If we try to take the cpus_write_lock() here, we would have
  	 * to wait for a few dozens of ms, as this function might schedule.
  	 * However, we can, as a first step, flip the active mask and migrate
  	 * anything currently on the run-queue, to give a chance to the paused
  	 * CPUs to reach quickly an idle state. There's a risk meanwhile for
  	 * another CPU to observe an out-of-date active_mask or to incompletely
  	 * update a cpuset. Both problems would be resolved later in the slow
  	 * path, which ensures active_mask synchronization, triggers a cpuset
  	 * rebuild and migrate any task that would have escaped the lazy
  	 * migration.
  	 */
  	for_each_cpu(cpu, cpus)
  		set_cpu_active(cpu, false);
  	err = __pause_drain_rq(cpus);
  	if (err) {
  		__wait_drain_rq(cpus);
  		for_each_cpu(cpu, cpus)
  			set_cpu_active(cpu, true);
  		goto err_cpu_maps_update;
  	}
  
  	/*
  	 * Slow path deactivation:
  	 *
  	 * Now that paused CPUs are most likely idle, we can go through a
  	 * complete scheduler deactivation.
  	 *
  	 * The cpu_active_mask being already set and cpus_write_lock calling
  	 * synchronize_rcu(), we know that all preempt-disabled and RCU users
  	 * will observe the updated value.
  	 */
683010f55   Vincent Donnefort   ANDROID: cpu/hotp...
1138
  	cpus_write_lock();
e19b8ce90   Vincent Donnefort   ANDROID: cpu/hotp...
1139
  	__wait_drain_rq(cpus);
683010f55   Vincent Donnefort   ANDROID: cpu/hotp...
1140
1141
1142
1143
1144
1145
  	cpuhp_tasks_frozen = 0;
  
  	if (sched_cpus_deactivate_nosync(cpus)) {
  		err = -EBUSY;
  		goto err_cpus_write_unlock;
  	}
e19b8ce90   Vincent Donnefort   ANDROID: cpu/hotp...
1146
1147
1148
1149
1150
1151
1152
  	err = __pause_drain_rq(cpus);
  	__wait_drain_rq(cpus);
  	if (err) {
  		for_each_cpu(cpu, cpus)
  			sched_cpu_activate(cpu);
  		goto err_cpus_write_unlock;
  	}
683010f55   Vincent Donnefort   ANDROID: cpu/hotp...
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
  	/*
  	 * Even if living on the side of the regular HP path, pause is using
  	 * one of the HP step (CPUHP_AP_ACTIVE). This should be reflected on the
  	 * current state of the CPU.
  	 */
  	for_each_cpu(cpu, cpus) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  
  		st->state = CPUHP_AP_ACTIVE - 1;
  		st->target = st->state;
  	}
  
  err_cpus_write_unlock:
  	cpus_write_unlock();
  err_cpu_maps_update:
  	cpu_maps_update_done();
614afa949   Stephen Dickey   ANDROID: cpuhp/pa...
1169
  	trace_cpuhp_pause(cpus, start_time, 1);
683010f55   Vincent Donnefort   ANDROID: cpu/hotp...
1170
1171
1172
1173
1174
1175
1176
1177
  	return err;
  }
  EXPORT_SYMBOL_GPL(pause_cpus);
  
  int resume_cpus(struct cpumask *cpus)
  {
  	unsigned int cpu;
  	int err = 0;
614afa949   Stephen Dickey   ANDROID: cpuhp/pa...
1178
1179
1180
  	u64 start_time = 0;
  
  	start_time = sched_clock();
683010f55   Vincent Donnefort   ANDROID: cpu/hotp...
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
  
  	cpu_maps_update_begin();
  
  	if (cpu_hotplug_disabled) {
  		err = -EBUSY;
  		goto err_cpu_maps_update;
  	}
  
  	/* Resuming an already active CPU isn't an error */
  	cpumask_andnot(cpus, cpus, cpu_active_mask);
  
  	for_each_cpu(cpu, cpus) {
  		if (!cpu_online(cpu)) {
  			err = -EBUSY;
  			goto err_cpu_maps_update;
  		}
  	}
  
  	if (cpumask_empty(cpus))
  		goto err_cpu_maps_update;
1d3a64fbd   Stephen Dickey   ANDROID: cpu/hotp...
1201
1202
  	for_each_cpu(cpu, cpus)
  		set_cpu_active(cpu, true);
782131fed   Stephen Dickey   ANDROID: cpu/hotp...
1203
1204
1205
  	trace_android_rvh_resume_cpus(cpus, &err);
  	if (err)
  		goto err_cpu_maps_update;
1d3a64fbd   Stephen Dickey   ANDROID: cpu/hotp...
1206
1207
1208
1209
1210
  	/* Lazy Resume.  Build domains immediately instead of scheduling
  	 * a workqueue.  This is so that the cpu can pull load when
  	 * sent a load balancing kick.
  	 */
  	cpuset_hotplug_workfn(NULL);
683010f55   Vincent Donnefort   ANDROID: cpu/hotp...
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
  	cpus_write_lock();
  
  	cpuhp_tasks_frozen = 0;
  
  	if (sched_cpus_activate(cpus)) {
  		err = -EBUSY;
  		goto err_cpus_write_unlock;
  	}
  
  	/*
  	 * see pause_cpus.
  	 */
  	for_each_cpu(cpu, cpus) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  
  		st->state = CPUHP_ONLINE;
  		st->target = st->state;
  	}
  
  err_cpus_write_unlock:
  	cpus_write_unlock();
  err_cpu_maps_update:
  	cpu_maps_update_done();
614afa949   Stephen Dickey   ANDROID: cpuhp/pa...
1234
  	trace_cpuhp_pause(cpus, start_time, 0);
683010f55   Vincent Donnefort   ANDROID: cpu/hotp...
1235
1236
1237
  	return err;
  }
  EXPORT_SYMBOL_GPL(resume_cpus);
0441a5597   Qais Yousef   cpu/hotplug: Crea...
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
  void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
  {
  	unsigned int cpu;
  	int error;
  
  	cpu_maps_update_begin();
  
  	/*
  	 * Make certain the cpu I'm about to reboot on is online.
  	 *
  	 * This is inline to what migrate_to_reboot_cpu() already do.
  	 */
  	if (!cpu_online(primary_cpu))
  		primary_cpu = cpumask_first(cpu_online_mask);
  
  	for_each_online_cpu(cpu) {
  		if (cpu == primary_cpu)
  			continue;
  
  		error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
  		if (error) {
  			pr_err("Failed to offline CPU%d - error=%d",
  				cpu, error);
  			break;
  		}
  	}
  
  	/*
  	 * Ensure all but the reboot CPU are offline.
  	 */
  	BUG_ON(num_online_cpus() > 1);
  
  	/*
  	 * Make sure the CPUs won't be enabled by someone else after this
  	 * point. Kexec will reboot to a new kernel shortly resetting
  	 * everything along the way.
  	 */
  	cpu_hotplug_disabled++;
  
  	cpu_maps_update_done();
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1278
  }
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
1279
1280
1281
  
  #else
  #define takedown_cpu		NULL
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1282
  #endif /*CONFIG_HOTPLUG_CPU*/
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1283
  /**
ee1e714b9   Thomas Gleixner   cpu/hotplug: Remo...
1284
   * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1285
1286
   * @cpu: cpu that just started
   *
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1287
1288
1289
1290
1291
1292
1293
   * It must be called by the arch code on the new cpu, before the new cpu
   * enables interrupts and before the "boot" cpu returns from __cpu_up().
   */
  void notify_cpu_starting(unsigned int cpu)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  	enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
724a86881   Peter Zijlstra   smp/hotplug: Call...
1294
  	int ret;
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1295

0c6d4576c   Sebastian Andrzej Siewior   cpu/hotplug: Get ...
1296
  	rcu_cpu_starting(cpu);	/* Enables RCU usage on this CPU. */
e797bda3f   Thomas Gleixner   smp/hotplug: Trac...
1297
  	cpumask_set_cpu(cpu, &cpus_booted_once_mask);
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1298
  	while (st->state < target) {
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1299
  		st->state++;
724a86881   Peter Zijlstra   smp/hotplug: Call...
1300
1301
1302
1303
1304
  		ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
  		/*
  		 * STARTING must not fail!
  		 */
  		WARN_ON_ONCE(ret);
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1305
1306
  	}
  }
949338e35   Thomas Gleixner   cpu/hotplug: Move...
1307
  /*
9cd4f1a4e   Thomas Gleixner   smp/hotplug: Move...
1308
   * Called from the idle task. Wake up the controlling task which brings the
45178ac0c   Peter Zijlstra   cpu/hotplug, stop...
1309
1310
   * hotplug thread of the upcoming CPU up and then delegates the rest of the
   * online bringup to the hotplug thread.
949338e35   Thomas Gleixner   cpu/hotplug: Move...
1311
   */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
1312
  void cpuhp_online_idle(enum cpuhp_state state)
949338e35   Thomas Gleixner   cpu/hotplug: Move...
1313
  {
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
1314
  	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
1315
1316
1317
1318
  
  	/* Happens for the boot cpu */
  	if (state != CPUHP_AP_ONLINE_IDLE)
  		return;
45178ac0c   Peter Zijlstra   cpu/hotplug, stop...
1319
1320
1321
1322
1323
  	/*
  	 * Unpart the stopper thread before we start the idle loop (and start
  	 * scheduling); this ensures the stopper task is always available.
  	 */
  	stop_machine_unpark(smp_processor_id());
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
1324
  	st->state = CPUHP_AP_ONLINE_IDLE;
5ebe7742f   Peter Zijlstra   smp/hotplug: Diff...
1325
  	complete_ap_thread(st, true);
949338e35   Thomas Gleixner   cpu/hotplug: Move...
1326
  }
c6e5f9d7c   Syed Rameez Mustafa   ANDROID: cpu-hotp...
1327
1328
1329
1330
1331
1332
1333
1334
  static int switch_to_rt_policy(void)
  {
  	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
  	unsigned int policy = current->policy;
  
  	if (policy == SCHED_NORMAL)
  		/* Switch to SCHED_FIFO from SCHED_NORMAL. */
  		return sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
c6e5f9d7c   Syed Rameez Mustafa   ANDROID: cpu-hotp...
1335
  	else
9da1f3710   Quentin Perret   ANDROID: cpu: Don...
1336
  		return 1;
c6e5f9d7c   Syed Rameez Mustafa   ANDROID: cpu-hotp...
1337
1338
1339
1340
1341
1342
1343
1344
  }
  
  static int switch_to_fair_policy(void)
  {
  	struct sched_param param = { .sched_priority = 0 };
  
  	return sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
  }
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1345
  /* Requires cpu_add_remove_lock to be held */
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1346
  static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1347
  {
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1348
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
3bb5d2ee3   Suresh Siddha   smp, idle: Alloca...
1349
  	struct task_struct *idle;
2e1a3483c   Thomas Gleixner   cpu/hotplug: Spli...
1350
  	int ret = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1351

8f553c498   Thomas Gleixner   cpu/hotplug: Prov...
1352
  	cpus_write_lock();
38498a67a   Thomas Gleixner   smp: Add generic ...
1353

757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1354
  	if (!cpu_present(cpu)) {
5e5041f35   Yasuaki Ishimatsu   ACPI / processor:...
1355
1356
1357
  		ret = -EINVAL;
  		goto out;
  	}
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1358
  	/*
33c3736ec   Qais Yousef   cpu/hotplug: Hide...
1359
1360
  	 * The caller of cpu_up() might have raced with another
  	 * caller. Nothing to do.
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1361
1362
  	 */
  	if (st->state >= target)
38498a67a   Thomas Gleixner   smp: Add generic ...
1363
  		goto out;
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1364
1365
1366
1367
1368
1369
1370
1371
  
  	if (st->state == CPUHP_OFFLINE) {
  		/* Let it fail before we try to bring the cpu up */
  		idle = idle_thread_get(cpu);
  		if (IS_ERR(idle)) {
  			ret = PTR_ERR(idle);
  			goto out;
  		}
3bb5d2ee3   Suresh Siddha   smp, idle: Alloca...
1372
  	}
38498a67a   Thomas Gleixner   smp: Add generic ...
1373

ba9974624   Thomas Gleixner   cpu/hotplug: Rest...
1374
  	cpuhp_tasks_frozen = tasks_frozen;
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
1375
  	cpuhp_set_state(st, target);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1376
1377
1378
1379
  	/*
  	 * If the current CPU state is in the range of the AP hotplug thread,
  	 * then we need to kick the thread once more.
  	 */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
1380
  	if (st->state > CPUHP_BRINGUP_CPU) {
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
  		ret = cpuhp_kick_ap_work(cpu);
  		/*
  		 * The AP side has done the error rollback already. Just
  		 * return the error code..
  		 */
  		if (ret)
  			goto out;
  	}
  
  	/*
  	 * Try to reach the target state. We max out on the BP at
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
1392
  	 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1393
1394
  	 * responsible for bringing it up to the target state.
  	 */
8df3e07e7   Thomas Gleixner   cpu/hotplug: Let ...
1395
  	target = min((int)target, CPUHP_BRINGUP_CPU);
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
1396
  	ret = cpuhp_up_callbacks(cpu, st, target);
38498a67a   Thomas Gleixner   smp: Add generic ...
1397
  out:
8f553c498   Thomas Gleixner   cpu/hotplug: Prov...
1398
  	cpus_write_unlock();
a74cfffb0   Thomas Gleixner   x86/speculation: ...
1399
  	arch_smt_update();
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1400
1401
  	return ret;
  }
33c3736ec   Qais Yousef   cpu/hotplug: Hide...
1402
  static int cpu_up(unsigned int cpu, enum cpuhp_state target)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1403
1404
  {
  	int err = 0;
c6e5f9d7c   Syed Rameez Mustafa   ANDROID: cpu-hotp...
1405
  	int switch_err;
cf23422b9   minskey guo   cpu/mem hotplug: ...
1406

e0b582ec5   Rusty Russell   cpumask: convert ...
1407
  	if (!cpu_possible(cpu)) {
84117da5b   Fabian Frederick   kernel/cpu.c: con...
1408
1409
1410
  		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time
  ",
  		       cpu);
87d5e0236   Chen Gong   kernel/cpu.c: del...
1411
  #if defined(CONFIG_IA64)
84117da5b   Fabian Frederick   kernel/cpu.c: con...
1412
1413
  		pr_err("please check additional_cpus= boot parameter
  ");
73e753a50   KAMEZAWA Hiroyuki   CPU HOTPLUG: avoi...
1414
1415
1416
  #endif
  		return -EINVAL;
  	}
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1417

c6e5f9d7c   Syed Rameez Mustafa   ANDROID: cpu-hotp...
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
  	/*
  	 * CPU hotplug operations consists of many steps and each step
  	 * calls a callback of core kernel subsystem. CPU hotplug-in
  	 * operation may get preempted by other CFS tasks and whole
  	 * operation of cpu hotplug in CPU gets delayed. Switch the
  	 * current task to SCHED_FIFO from SCHED_NORMAL, so that
  	 * hotplug in operation may complete quickly in heavy loaded
  	 * conditions and new CPU will start handle the workload.
  	 */
  
  	switch_err = switch_to_rt_policy();
c6e5f9d7c   Syed Rameez Mustafa   ANDROID: cpu-hotp...
1429

01b0f1970   Toshi Kani   cpu/mem hotplug: ...
1430
1431
  	err = try_online_node(cpu_to_node(cpu));
  	if (err)
c6e5f9d7c   Syed Rameez Mustafa   ANDROID: cpu-hotp...
1432
  		goto switch_out;
cf23422b9   minskey guo   cpu/mem hotplug: ...
1433

d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1434
  	cpu_maps_update_begin();
e761b7725   Max Krasnyansky   cpu hotplug, sche...
1435
1436
  
  	if (cpu_hotplug_disabled) {
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1437
  		err = -EBUSY;
e761b7725   Max Krasnyansky   cpu hotplug, sche...
1438
1439
  		goto out;
  	}
05736e4ac   Thomas Gleixner   cpu/hotplug: Prov...
1440
1441
1442
1443
  	if (!cpu_smt_allowed(cpu)) {
  		err = -EPERM;
  		goto out;
  	}
e761b7725   Max Krasnyansky   cpu hotplug, sche...
1444

af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1445
  	err = _cpu_up(cpu, 0, target);
e761b7725   Max Krasnyansky   cpu hotplug, sche...
1446
  out:
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1447
  	cpu_maps_update_done();
c6e5f9d7c   Syed Rameez Mustafa   ANDROID: cpu-hotp...
1448
1449
1450
1451
1452
1453
1454
1455
  switch_out:
  	if (!switch_err) {
  		switch_err = switch_to_fair_policy();
  		if (switch_err)
  			pr_err("Hotplug policy switch err=%d Task %s pid=%d
  ",
  				switch_err, current->comm, current->pid);
  	}
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1456
1457
  	return err;
  }
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1458

33c3736ec   Qais Yousef   cpu/hotplug: Hide...
1459
1460
1461
1462
1463
1464
1465
1466
1467
  /**
   * cpu_device_up - Bring up a cpu device
   * @dev: Pointer to the cpu device to online
   *
   * This function is meant to be used by device core cpu subsystem only.
   *
   * Other subsystems should use add_cpu() instead.
   */
  int cpu_device_up(struct device *dev)
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1468
  {
33c3736ec   Qais Yousef   cpu/hotplug: Hide...
1469
  	return cpu_up(dev->id, CPUHP_ONLINE);
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1470
  }
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1471

93ef1429e   Qais Yousef   cpu/hotplug: Add ...
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
  int add_cpu(unsigned int cpu)
  {
  	int ret;
  
  	lock_device_hotplug();
  	ret = device_online(get_cpu_device(cpu));
  	unlock_device_hotplug();
  
  	return ret;
  }
  EXPORT_SYMBOL_GPL(add_cpu);
d720f9860   Qais Yousef   cpu/hotplug: Prov...
1483
1484
1485
1486
1487
1488
1489
1490
1491
  /**
   * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
   * @sleep_cpu: The cpu we hibernated on and should be brought up.
   *
   * On some architectures like arm64, we can hibernate on any CPU, but on
   * wake up the CPU we hibernated on might be offline as a side effect of
   * using maxcpus= for example.
   */
  int bringup_hibernate_cpu(unsigned int sleep_cpu)
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1492
  {
d720f9860   Qais Yousef   cpu/hotplug: Prov...
1493
1494
1495
1496
1497
  	int ret;
  
  	if (!cpu_online(sleep_cpu)) {
  		pr_info("Hibernated on a CPU that is offline! Bringing CPU up.
  ");
33c3736ec   Qais Yousef   cpu/hotplug: Hide...
1498
  		ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
d720f9860   Qais Yousef   cpu/hotplug: Prov...
1499
1500
1501
1502
1503
1504
1505
1506
  		if (ret) {
  			pr_err("Failed to bring hibernate-CPU up!
  ");
  			return ret;
  		}
  	}
  	return 0;
  }
b99a26593   Qais Yousef   cpu/hotplug: Move...
1507
1508
1509
1510
1511
1512
1513
1514
  void bringup_nonboot_cpus(unsigned int setup_max_cpus)
  {
  	unsigned int cpu;
  
  	for_each_present_cpu(cpu) {
  		if (num_online_cpus() >= setup_max_cpus)
  			break;
  		if (!cpu_online(cpu))
33c3736ec   Qais Yousef   cpu/hotplug: Hide...
1515
  			cpu_up(cpu, CPUHP_ONLINE);
b99a26593   Qais Yousef   cpu/hotplug: Move...
1516
  	}
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1517
  }
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1518

f3de4be9d   Rafael J. Wysocki   PM: Fix dependenc...
1519
  #ifdef CONFIG_PM_SLEEP_SMP
e0b582ec5   Rusty Russell   cpumask: convert ...
1520
  static cpumask_var_t frozen_cpus;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1521

fb7fb84a0   Qais Yousef   cpu/hotplug: Remo...
1522
  int freeze_secondary_cpus(int primary)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1523
  {
d391e5522   James Morse   cpu/hotplug: Allo...
1524
  	int cpu, error = 0;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1525

d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1526
  	cpu_maps_update_begin();
9ca12ac04   Nicholas Piggin   kernel/cpu: Allow...
1527
  	if (primary == -1) {
d391e5522   James Morse   cpu/hotplug: Allo...
1528
  		primary = cpumask_first(cpu_online_mask);
9ca12ac04   Nicholas Piggin   kernel/cpu: Allow...
1529
1530
1531
1532
1533
1534
  		if (!housekeeping_cpu(primary, HK_FLAG_TIMER))
  			primary = housekeeping_any_cpu(HK_FLAG_TIMER);
  	} else {
  		if (!cpu_online(primary))
  			primary = cpumask_first(cpu_online_mask);
  	}
9ee349ad6   Xiaotian Feng   sched: Fix set_cp...
1535
1536
  	/*
  	 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1537
1538
  	 * with the userspace trying to use the CPU hotplug at the same time
  	 */
e0b582ec5   Rusty Russell   cpumask: convert ...
1539
  	cpumask_clear(frozen_cpus);
6ad4c1888   Peter Zijlstra   sched: Fix balanc...
1540

84117da5b   Fabian Frederick   kernel/cpu.c: con...
1541
1542
  	pr_info("Disabling non-boot CPUs ...
  ");
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1543
  	for_each_online_cpu(cpu) {
d391e5522   James Morse   cpu/hotplug: Allo...
1544
  		if (cpu == primary)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1545
  			continue;
a66d955e9   Pavankumar Kondeti   cpu/hotplug: Abor...
1546

fb7fb84a0   Qais Yousef   cpu/hotplug: Remo...
1547
  		if (pm_wakeup_pending()) {
a66d955e9   Pavankumar Kondeti   cpu/hotplug: Abor...
1548
1549
1550
1551
1552
  			pr_info("Wakeup pending. Abort CPU freeze
  ");
  			error = -EBUSY;
  			break;
  		}
bb3632c61   Todd E Brandt   PM / sleep: trace...
1553
  		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1554
  		error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
bb3632c61   Todd E Brandt   PM / sleep: trace...
1555
  		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
feae3203d   Mike Travis   timers, init: Lim...
1556
  		if (!error)
e0b582ec5   Rusty Russell   cpumask: convert ...
1557
  			cpumask_set_cpu(cpu, frozen_cpus);
feae3203d   Mike Travis   timers, init: Lim...
1558
  		else {
84117da5b   Fabian Frederick   kernel/cpu.c: con...
1559
1560
  			pr_err("Error taking CPU%d down: %d
  ", cpu, error);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1561
1562
1563
  			break;
  		}
  	}
86886e55b   Joseph Cihula   x86, intel_txt: I...
1564

89af7ba57   Vitaly Kuznetsov   cpu-hotplug: conv...
1565
  	if (!error)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1566
  		BUG_ON(num_online_cpus() > 1);
89af7ba57   Vitaly Kuznetsov   cpu-hotplug: conv...
1567
  	else
84117da5b   Fabian Frederick   kernel/cpu.c: con...
1568
1569
  		pr_err("Non-boot CPUs are not disabled
  ");
89af7ba57   Vitaly Kuznetsov   cpu-hotplug: conv...
1570
1571
1572
  
  	/*
  	 * Make sure the CPUs won't be enabled by someone else. We need to do
565558558   Qais Yousef   cpu/hotplug: Remo...
1573
1574
  	 * this even in case of failure as all freeze_secondary_cpus() users are
  	 * supposed to do thaw_secondary_cpus() on the failure path.
89af7ba57   Vitaly Kuznetsov   cpu-hotplug: conv...
1575
1576
  	 */
  	cpu_hotplug_disabled++;
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1577
  	cpu_maps_update_done();
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1578
1579
  	return error;
  }
565558558   Qais Yousef   cpu/hotplug: Remo...
1580
  void __weak arch_thaw_secondary_cpus_begin(void)
d0af9eed5   Suresh Siddha   x86, pat/mtrr: Re...
1581
1582
  {
  }
565558558   Qais Yousef   cpu/hotplug: Remo...
1583
  void __weak arch_thaw_secondary_cpus_end(void)
d0af9eed5   Suresh Siddha   x86, pat/mtrr: Re...
1584
1585
  {
  }
565558558   Qais Yousef   cpu/hotplug: Remo...
1586
  void thaw_secondary_cpus(void)
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1587
1588
  {
  	int cpu, error;
e6120dd58   Thierry Strudel   ANDROID: cpu: sen...
1589
  	struct device *cpu_device;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1590
1591
  
  	/* Allow everyone to use the CPU hotplug again */
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1592
  	cpu_maps_update_begin();
01b411590   Lianwei Wang   cpu/hotplug: Hand...
1593
  	__cpu_hotplug_enable();
e0b582ec5   Rusty Russell   cpumask: convert ...
1594
  	if (cpumask_empty(frozen_cpus))
1d64b9cb1   Rafael J. Wysocki   [PATCH] Fix micro...
1595
  		goto out;
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1596

84117da5b   Fabian Frederick   kernel/cpu.c: con...
1597
1598
  	pr_info("Enabling non-boot CPUs ...
  ");
d0af9eed5   Suresh Siddha   x86, pat/mtrr: Re...
1599

565558558   Qais Yousef   cpu/hotplug: Remo...
1600
  	arch_thaw_secondary_cpus_begin();
d0af9eed5   Suresh Siddha   x86, pat/mtrr: Re...
1601

e0b582ec5   Rusty Russell   cpumask: convert ...
1602
  	for_each_cpu(cpu, frozen_cpus) {
bb3632c61   Todd E Brandt   PM / sleep: trace...
1603
  		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
af1f40457   Thomas Gleixner   cpu/hotplug: Hand...
1604
  		error = _cpu_up(cpu, 1, CPUHP_ONLINE);
bb3632c61   Todd E Brandt   PM / sleep: trace...
1605
  		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1606
  		if (!error) {
84117da5b   Fabian Frederick   kernel/cpu.c: con...
1607
1608
  			pr_info("CPU%d is up
  ", cpu);
e6120dd58   Thierry Strudel   ANDROID: cpu: sen...
1609
1610
1611
1612
1613
1614
1615
  			cpu_device = get_cpu_device(cpu);
  			if (!cpu_device)
  				pr_err("%s: failed to get cpu%d device
  ",
  				       __func__, cpu);
  			else
  				kobject_uevent(&cpu_device->kobj, KOBJ_ONLINE);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1616
1617
  			continue;
  		}
84117da5b   Fabian Frederick   kernel/cpu.c: con...
1618
1619
  		pr_warn("Error taking CPU%d up: %d
  ", cpu, error);
e3920fb42   Rafael J. Wysocki   [PATCH] Disable C...
1620
  	}
d0af9eed5   Suresh Siddha   x86, pat/mtrr: Re...
1621

565558558   Qais Yousef   cpu/hotplug: Remo...
1622
  	arch_thaw_secondary_cpus_end();
d0af9eed5   Suresh Siddha   x86, pat/mtrr: Re...
1623

e0b582ec5   Rusty Russell   cpumask: convert ...
1624
  	cpumask_clear(frozen_cpus);
1d64b9cb1   Rafael J. Wysocki   [PATCH] Fix micro...
1625
  out:
d221938c0   Gautham R Shenoy   cpu-hotplug: refc...
1626
  	cpu_maps_update_done();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1627
  }
e0b582ec5   Rusty Russell   cpumask: convert ...
1628

d7268a31c   Fenghua Yu   CPU: Add right qu...
1629
  static int __init alloc_frozen_cpus(void)
e0b582ec5   Rusty Russell   cpumask: convert ...
1630
1631
1632
1633
1634
1635
  {
  	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
  		return -ENOMEM;
  	return 0;
  }
  core_initcall(alloc_frozen_cpus);
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1636
1637
  
  /*
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
   * When callbacks for CPU hotplug notifications are being executed, we must
   * ensure that the state of the system with respect to the tasks being frozen
   * or not, as reported by the notification, remains unchanged *throughout the
   * duration* of the execution of the callbacks.
   * Hence we need to prevent the freezer from racing with regular CPU hotplug.
   *
   * This synchronization is implemented by mutually excluding regular CPU
   * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
   * Hibernate notifications.
   */
  static int
  cpu_hotplug_pm_callback(struct notifier_block *nb,
  			unsigned long action, void *ptr)
  {
  	switch (action) {
  
  	case PM_SUSPEND_PREPARE:
  	case PM_HIBERNATION_PREPARE:
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
1656
  		cpu_hotplug_disable();
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1657
1658
1659
1660
  		break;
  
  	case PM_POST_SUSPEND:
  	case PM_POST_HIBERNATION:
16e53dbf1   Srivatsa S. Bhat   CPU hotplug: prov...
1661
  		cpu_hotplug_enable();
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1662
1663
1664
1665
1666
1667
1668
1669
  		break;
  
  	default:
  		return NOTIFY_DONE;
  	}
  
  	return NOTIFY_OK;
  }
d7268a31c   Fenghua Yu   CPU: Add right qu...
1670
  static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1671
  {
6e32d479d   Fenghua Yu   kernel/cpu.c: Add...
1672
1673
1674
1675
1676
  	/*
  	 * cpu_hotplug_pm_callback has higher priority than x86
  	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
  	 * to disable cpu hotplug to avoid cpu hotplug race.
  	 */
79cfbdfa8   Srivatsa S. Bhat   PM / Sleep: Fix r...
1677
1678
1679
1680
  	pm_notifier(cpu_hotplug_pm_callback, 0);
  	return 0;
  }
  core_initcall(cpu_hotplug_pm_sync_init);
f3de4be9d   Rafael J. Wysocki   PM: Fix dependenc...
1681
  #endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec0   Max Krasnyansky   sched: Move cpu m...
1682

8ce371f98   Peter Zijlstra   lockdep: Fix per-...
1683
  int __boot_cpu_id;
68f4f1ec0   Max Krasnyansky   sched: Move cpu m...
1684
  #endif /* CONFIG_SMP */
b8d317d10   Mike Travis   cpumask: make cpu...
1685

cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1686
  /* Boot processor state steps */
17a2f1ced   Lai Jiangshan   cpu/hotplug: Merg...
1687
  static struct cpuhp_step cpuhp_hp_states[] = {
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1688
1689
  	[CPUHP_OFFLINE] = {
  		.name			= "offline",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1690
1691
  		.startup.single		= NULL,
  		.teardown.single	= NULL,
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1692
1693
1694
  	},
  #ifdef CONFIG_SMP
  	[CPUHP_CREATE_THREADS]= {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1695
  		.name			= "threads:prepare",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1696
1697
  		.startup.single		= smpboot_create_threads,
  		.teardown.single	= NULL,
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1698
  		.cant_stop		= true,
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1699
  	},
00e16c3d6   Thomas Gleixner   perf/core: Conver...
1700
  	[CPUHP_PERF_PREPARE] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1701
1702
1703
  		.name			= "perf:prepare",
  		.startup.single		= perf_event_init_cpu,
  		.teardown.single	= perf_event_exit_cpu,
00e16c3d6   Thomas Gleixner   perf/core: Conver...
1704
  	},
7ee681b25   Thomas Gleixner   workqueue: Conver...
1705
  	[CPUHP_WORKQUEUE_PREP] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1706
1707
1708
  		.name			= "workqueue:prepare",
  		.startup.single		= workqueue_prepare_cpu,
  		.teardown.single	= NULL,
7ee681b25   Thomas Gleixner   workqueue: Conver...
1709
  	},
27590dc17   Thomas Gleixner   hrtimer: Convert ...
1710
  	[CPUHP_HRTIMERS_PREPARE] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1711
1712
1713
  		.name			= "hrtimers:prepare",
  		.startup.single		= hrtimers_prepare_cpu,
  		.teardown.single	= hrtimers_dead_cpu,
27590dc17   Thomas Gleixner   hrtimer: Convert ...
1714
  	},
31487f832   Richard Weinberger   smp/cfd: Convert ...
1715
  	[CPUHP_SMPCFD_PREPARE] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1716
  		.name			= "smpcfd:prepare",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1717
1718
  		.startup.single		= smpcfd_prepare_cpu,
  		.teardown.single	= smpcfd_dead_cpu,
31487f832   Richard Weinberger   smp/cfd: Convert ...
1719
  	},
e6d4989a9   Richard Weinberger   relayfs: Convert ...
1720
1721
1722
1723
1724
  	[CPUHP_RELAY_PREPARE] = {
  		.name			= "relay:prepare",
  		.startup.single		= relay_prepare_cpu,
  		.teardown.single	= NULL,
  	},
6731d4f12   Sebastian Andrzej Siewior   slab: Convert to ...
1725
1726
1727
1728
  	[CPUHP_SLAB_PREPARE] = {
  		.name			= "slab:prepare",
  		.startup.single		= slab_prepare_cpu,
  		.teardown.single	= slab_dead_cpu,
31487f832   Richard Weinberger   smp/cfd: Convert ...
1729
  	},
4df837425   Thomas Gleixner   rcu: Convert rcut...
1730
  	[CPUHP_RCUTREE_PREP] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1731
  		.name			= "RCU/tree:prepare",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1732
1733
  		.startup.single		= rcutree_prepare_cpu,
  		.teardown.single	= rcutree_dead_cpu,
4df837425   Thomas Gleixner   rcu: Convert rcut...
1734
  	},
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1735
  	/*
4fae16dff   Richard Cochran   timers/core: Corr...
1736
1737
1738
1739
  	 * On the tear-down path, timers_dead_cpu() must be invoked
  	 * before blk_mq_queue_reinit_notify() from notify_dead(),
  	 * otherwise a RCU stall occurs.
  	 */
26456f87a   Thomas Gleixner   timers: Reinitial...
1740
  	[CPUHP_TIMERS_PREPARE] = {
d018031f5   Mukesh Ojha   cpu/hotplug: Clar...
1741
  		.name			= "timers:prepare",
26456f87a   Thomas Gleixner   timers: Reinitial...
1742
  		.startup.single		= timers_prepare_cpu,
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1743
  		.teardown.single	= timers_dead_cpu,
4fae16dff   Richard Cochran   timers/core: Corr...
1744
  	},
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1745
  	/* Kicks the plugged cpu into life */
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
1746
1747
  	[CPUHP_BRINGUP_CPU] = {
  		.name			= "cpu:bringup",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1748
  		.startup.single		= bringup_cpu,
bf2c59fce   Peter Zijlstra   sched/core: Fix i...
1749
  		.teardown.single	= finish_cpu,
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
1750
  		.cant_stop		= true,
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1751
  	},
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
  	/* Final state before CPU kills itself */
  	[CPUHP_AP_IDLE_DEAD] = {
  		.name			= "idle:dead",
  	},
  	/*
  	 * Last state before CPU enters the idle loop to die. Transient state
  	 * for synchronization.
  	 */
  	[CPUHP_AP_OFFLINE] = {
  		.name			= "ap:offline",
  		.cant_stop		= true,
  	},
9cf7243d5   Thomas Gleixner   sched: Make set_c...
1764
1765
1766
  	/* First state is scheduler control. Interrupts are disabled */
  	[CPUHP_AP_SCHED_STARTING] = {
  		.name			= "sched:starting",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1767
1768
  		.startup.single		= sched_cpu_starting,
  		.teardown.single	= sched_cpu_dying,
9cf7243d5   Thomas Gleixner   sched: Make set_c...
1769
  	},
4df837425   Thomas Gleixner   rcu: Convert rcut...
1770
  	[CPUHP_AP_RCUTREE_DYING] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1771
  		.name			= "RCU/tree:dying",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1772
1773
  		.startup.single		= NULL,
  		.teardown.single	= rcutree_dying_cpu,
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1774
  	},
46febd37f   Lai Jiangshan   smp/hotplug: Move...
1775
1776
1777
1778
1779
  	[CPUHP_AP_SMPCFD_DYING] = {
  		.name			= "smpcfd:dying",
  		.startup.single		= NULL,
  		.teardown.single	= smpcfd_dying_cpu,
  	},
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1780
1781
1782
1783
1784
  	/* Entry state on starting. Interrupts enabled from here on. Transient
  	 * state for synchronsization */
  	[CPUHP_AP_ONLINE] = {
  		.name			= "ap:online",
  	},
17a2f1ced   Lai Jiangshan   cpu/hotplug: Merg...
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
  	/*
  	 * Handled on controll processor until the plugged processor manages
  	 * this itself.
  	 */
  	[CPUHP_TEARDOWN_CPU] = {
  		.name			= "cpu:teardown",
  		.startup.single		= NULL,
  		.teardown.single	= takedown_cpu,
  		.cant_stop		= true,
  	},
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1795
  	/* Handle smpboot threads park/unpark */
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1796
  	[CPUHP_AP_SMPBOOT_THREADS] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1797
  		.name			= "smpboot/threads:online",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1798
  		.startup.single		= smpboot_unpark_threads,
c4de65696   Thomas Gleixner   cpu/hotplug: Make...
1799
  		.teardown.single	= smpboot_park_threads,
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1800
  	},
c5cb83bb3   Thomas Gleixner   genirq/cpuhotplug...
1801
1802
1803
1804
1805
  	[CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
  		.name			= "irq/affinity:online",
  		.startup.single		= irq_affinity_online_cpu,
  		.teardown.single	= NULL,
  	},
00e16c3d6   Thomas Gleixner   perf/core: Conver...
1806
  	[CPUHP_AP_PERF_ONLINE] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1807
1808
1809
  		.name			= "perf:online",
  		.startup.single		= perf_event_init_cpu,
  		.teardown.single	= perf_event_exit_cpu,
00e16c3d6   Thomas Gleixner   perf/core: Conver...
1810
  	},
9cf57731b   Peter Zijlstra   watchdog/softlock...
1811
1812
1813
1814
1815
  	[CPUHP_AP_WATCHDOG_ONLINE] = {
  		.name			= "lockup_detector:online",
  		.startup.single		= lockup_detector_online_cpu,
  		.teardown.single	= lockup_detector_offline_cpu,
  	},
7ee681b25   Thomas Gleixner   workqueue: Conver...
1816
  	[CPUHP_AP_WORKQUEUE_ONLINE] = {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1817
1818
1819
  		.name			= "workqueue:online",
  		.startup.single		= workqueue_online_cpu,
  		.teardown.single	= workqueue_offline_cpu,
7ee681b25   Thomas Gleixner   workqueue: Conver...
1820
  	},
4df837425   Thomas Gleixner   rcu: Convert rcut...
1821
  	[CPUHP_AP_RCUTREE_ONLINE] = {
677f66465   Thomas Gleixner   cpu/hotplug: Make...
1822
  		.name			= "RCU/tree:online",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1823
1824
  		.startup.single		= rcutree_online_cpu,
  		.teardown.single	= rcutree_offline_cpu,
4df837425   Thomas Gleixner   rcu: Convert rcut...
1825
  	},
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1826
  #endif
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1827
1828
1829
  	/*
  	 * The dynamically registered state space is here
  	 */
aaddd7d1c   Thomas Gleixner   sched/hotplug: Ma...
1830
1831
1832
1833
  #ifdef CONFIG_SMP
  	/* Last state is scheduler control setting the cpu active */
  	[CPUHP_AP_ACTIVE] = {
  		.name			= "sched:active",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1834
1835
  		.startup.single		= sched_cpu_activate,
  		.teardown.single	= sched_cpu_deactivate,
aaddd7d1c   Thomas Gleixner   sched/hotplug: Ma...
1836
1837
  	},
  #endif
d10ef6f93   Thomas Gleixner   cpu/hotplug: Docu...
1838
  	/* CPU is fully up and running. */
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1839
1840
  	[CPUHP_ONLINE] = {
  		.name			= "online",
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1841
1842
  		.startup.single		= NULL,
  		.teardown.single	= NULL,
4baa0afc6   Thomas Gleixner   cpu/hotplug: Conv...
1843
1844
  	},
  };
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1845
1846
1847
1848
1849
1850
1851
  /* Sanity check for callbacks */
  static int cpuhp_cb_check(enum cpuhp_state state)
  {
  	if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
  		return -EINVAL;
  	return 0;
  }
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1852
1853
1854
1855
1856
1857
1858
  /*
   * Returns a free for dynamic slot assignment of the Online state. The states
   * are protected by the cpuhp_slot_states mutex and an empty slot is identified
   * by having no name assigned.
   */
  static int cpuhp_reserve_state(enum cpuhp_state state)
  {
4205e4786   Thomas Gleixner   cpu/hotplug: Prov...
1859
1860
  	enum cpuhp_state i, end;
  	struct cpuhp_step *step;
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1861

4205e4786   Thomas Gleixner   cpu/hotplug: Prov...
1862
1863
  	switch (state) {
  	case CPUHP_AP_ONLINE_DYN:
17a2f1ced   Lai Jiangshan   cpu/hotplug: Merg...
1864
  		step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
4205e4786   Thomas Gleixner   cpu/hotplug: Prov...
1865
1866
1867
  		end = CPUHP_AP_ONLINE_DYN_END;
  		break;
  	case CPUHP_BP_PREPARE_DYN:
17a2f1ced   Lai Jiangshan   cpu/hotplug: Merg...
1868
  		step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
4205e4786   Thomas Gleixner   cpu/hotplug: Prov...
1869
1870
1871
1872
1873
1874
1875
1876
  		end = CPUHP_BP_PREPARE_DYN_END;
  		break;
  	default:
  		return -EINVAL;
  	}
  
  	for (i = state; i <= end; i++, step++) {
  		if (!step->name)
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
  			return i;
  	}
  	WARN(1, "No more dynamic states available for CPU hotplug
  ");
  	return -ENOSPC;
  }
  
  static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
  				 int (*startup)(unsigned int cpu),
  				 int (*teardown)(unsigned int cpu),
  				 bool multi_instance)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1888
1889
1890
  {
  	/* (Un)Install the callbacks for further cpu hotplug operations */
  	struct cpuhp_step *sp;
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1891
  	int ret = 0;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1892

0c96b2730   Ethan Barnes   smp/hotplug: Hand...
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
  	/*
  	 * If name is NULL, then the state gets removed.
  	 *
  	 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
  	 * the first allocation from these dynamic ranges, so the removal
  	 * would trigger a new allocation and clear the wrong (already
  	 * empty) state, leaving the callbacks of the to be cleared state
  	 * dangling, which causes wreckage on the next hotplug operation.
  	 */
  	if (name && (state == CPUHP_AP_ONLINE_DYN ||
  		     state == CPUHP_BP_PREPARE_DYN)) {
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1904
1905
  		ret = cpuhp_reserve_state(state);
  		if (ret < 0)
dc434e056   Sebastian Andrzej Siewior   cpu/hotplug: Seri...
1906
  			return ret;
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1907
1908
  		state = ret;
  	}
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1909
  	sp = cpuhp_get_step(state);
dc434e056   Sebastian Andrzej Siewior   cpu/hotplug: Seri...
1910
1911
  	if (name && sp->name)
  		return -EBUSY;
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1912
1913
  	sp->startup.single = startup;
  	sp->teardown.single = teardown;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1914
  	sp->name = name;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1915
1916
  	sp->multi_instance = multi_instance;
  	INIT_HLIST_HEAD(&sp->list);
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
1917
  	return ret;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1918
1919
1920
1921
  }
  
  static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
  {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1922
  	return cpuhp_get_step(state)->teardown.single;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1923
  }
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1924
1925
1926
1927
  /*
   * Call the startup/teardown function for a step either on the AP or
   * on the current CPU.
   */
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1928
1929
  static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
  			    struct hlist_node *node)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1930
  {
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
1931
  	struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1932
  	int ret;
4dddfb5fa   Peter Zijlstra   smp/hotplug: Rewr...
1933
1934
1935
1936
  	/*
  	 * If there's nothing to do, we done.
  	 * Relies on the union for multi_instance.
  	 */
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1937
1938
  	if ((bringup && !sp->startup.single) ||
  	    (!bringup && !sp->teardown.single))
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1939
  		return 0;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1940
1941
1942
1943
  	/*
  	 * The non AP bound callbacks can fail on bringup. On teardown
  	 * e.g. module removal we crash for now.
  	 */
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1944
1945
  #ifdef CONFIG_SMP
  	if (cpuhp_is_ap_state(state))
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1946
  		ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1947
  	else
96abb9685   Peter Zijlstra   smp/hotplug: Allo...
1948
  		ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1949
  #else
96abb9685   Peter Zijlstra   smp/hotplug: Allo...
1950
  	ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1cf4f629d   Thomas Gleixner   cpu/hotplug: Move...
1951
  #endif
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
  	BUG_ON(ret && !bringup);
  	return ret;
  }
  
  /*
   * Called from __cpuhp_setup_state on a recoverable failure.
   *
   * Note: The teardown callbacks for rollback are not allowed to fail!
   */
  static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1962
  				   struct hlist_node *node)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1963
1964
  {
  	int cpu;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
  	/* Roll back the already executed steps on the other cpus */
  	for_each_present_cpu(cpu) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  		int cpustate = st->state;
  
  		if (cpu >= failedcpu)
  			break;
  
  		/* Did we invoke the startup call on that cpu ? */
  		if (cpustate >= state)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1975
  			cpuhp_issue_call(cpu, state, false, node);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
1976
1977
  	}
  }
9805c6733   Thomas Gleixner   cpu/hotplug: Add ...
1978
1979
1980
  int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
  					  struct hlist_node *node,
  					  bool invoke)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1981
1982
1983
1984
  {
  	struct cpuhp_step *sp;
  	int cpu;
  	int ret;
9805c6733   Thomas Gleixner   cpu/hotplug: Add ...
1985
  	lockdep_assert_cpus_held();
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1986
1987
1988
  	sp = cpuhp_get_step(state);
  	if (sp->multi_instance == false)
  		return -EINVAL;
dc434e056   Sebastian Andrzej Siewior   cpu/hotplug: Seri...
1989
  	mutex_lock(&cpuhp_state_mutex);
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1990

3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
1991
  	if (!invoke || !sp->startup.multi)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
  		goto add_node;
  
  	/*
  	 * Try to call the startup callback for each present cpu
  	 * depending on the hotplug state of the cpu.
  	 */
  	for_each_present_cpu(cpu) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  		int cpustate = st->state;
  
  		if (cpustate < state)
  			continue;
  
  		ret = cpuhp_issue_call(cpu, state, true, node);
  		if (ret) {
3c1627e99   Thomas Gleixner   cpu/hotplug: Repl...
2007
  			if (sp->teardown.multi)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
2008
  				cpuhp_rollback_install(cpu, state, node);
dc434e056   Sebastian Andrzej Siewior   cpu/hotplug: Seri...
2009
  			goto unlock;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
2010
2011
2012
2013
  		}
  	}
  add_node:
  	ret = 0;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
2014
  	hlist_add_head(node, &sp->list);
dc434e056   Sebastian Andrzej Siewior   cpu/hotplug: Seri...
2015
  unlock:
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
2016
  	mutex_unlock(&cpuhp_state_mutex);
9805c6733   Thomas Gleixner   cpu/hotplug: Add ...
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
  	return ret;
  }
  
  int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
  			       bool invoke)
  {
  	int ret;
  
  	cpus_read_lock();
  	ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
8f553c498   Thomas Gleixner   cpu/hotplug: Prov...
2027
  	cpus_read_unlock();
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
2028
2029
2030
  	return ret;
  }
  EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2031
  /**
71def423f   Sebastian Andrzej Siewior   cpu/hotplug: Prov...
2032
   * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
2033
2034
2035
2036
2037
2038
2039
   * @state:		The state to setup
   * @invoke:		If true, the startup function is invoked for cpus where
   *			cpu state >= @state
   * @startup:		startup callback function
   * @teardown:		teardown callback function
   * @multi_instance:	State is set up for multiple instances which get
   *			added afterwards.
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2040
   *
71def423f   Sebastian Andrzej Siewior   cpu/hotplug: Prov...
2041
   * The caller needs to hold cpus read locked while calling this function.
512f09801   Boris Ostrovsky   cpu/hotplug: Clar...
2042
2043
2044
2045
2046
   * Returns:
   *   On success:
   *      Positive state number if @state is CPUHP_AP_ONLINE_DYN
   *      0 for all other states
   *   On failure: proper (negative) error code
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2047
   */
71def423f   Sebastian Andrzej Siewior   cpu/hotplug: Prov...
2048
2049
2050
2051
2052
  int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
  				   const char *name, bool invoke,
  				   int (*startup)(unsigned int cpu),
  				   int (*teardown)(unsigned int cpu),
  				   bool multi_instance)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2053
2054
  {
  	int cpu, ret = 0;
b9d9d6911   Thomas Gleixner   smp/hotplug: Undo...
2055
  	bool dynstate;
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2056

71def423f   Sebastian Andrzej Siewior   cpu/hotplug: Prov...
2057
  	lockdep_assert_cpus_held();
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2058
2059
  	if (cpuhp_cb_check(state) || !name)
  		return -EINVAL;
dc434e056   Sebastian Andrzej Siewior   cpu/hotplug: Seri...
2060
  	mutex_lock(&cpuhp_state_mutex);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2061

dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
2062
2063
  	ret = cpuhp_store_callbacks(state, name, startup, teardown,
  				    multi_instance);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2064

b9d9d6911   Thomas Gleixner   smp/hotplug: Undo...
2065
2066
2067
2068
2069
  	dynstate = state == CPUHP_AP_ONLINE_DYN;
  	if (ret > 0 && dynstate) {
  		state = ret;
  		ret = 0;
  	}
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
2070
  	if (ret || !invoke || !startup)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
  		goto out;
  
  	/*
  	 * Try to call the startup callback for each present cpu
  	 * depending on the hotplug state of the cpu.
  	 */
  	for_each_present_cpu(cpu) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  		int cpustate = st->state;
  
  		if (cpustate < state)
  			continue;
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
2083
  		ret = cpuhp_issue_call(cpu, state, true, NULL);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2084
  		if (ret) {
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
2085
  			if (teardown)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
2086
2087
  				cpuhp_rollback_install(cpu, state, NULL);
  			cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2088
2089
2090
2091
  			goto out;
  		}
  	}
  out:
dc434e056   Sebastian Andrzej Siewior   cpu/hotplug: Seri...
2092
  	mutex_unlock(&cpuhp_state_mutex);
dc280d936   Thomas Gleixner   cpu/hotplug: Prev...
2093
2094
2095
2096
  	/*
  	 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
  	 * dynamically allocated state in case of success.
  	 */
b9d9d6911   Thomas Gleixner   smp/hotplug: Undo...
2097
  	if (!ret && dynstate)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2098
2099
2100
  		return state;
  	return ret;
  }
71def423f   Sebastian Andrzej Siewior   cpu/hotplug: Prov...
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
  EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
  
  int __cpuhp_setup_state(enum cpuhp_state state,
  			const char *name, bool invoke,
  			int (*startup)(unsigned int cpu),
  			int (*teardown)(unsigned int cpu),
  			bool multi_instance)
  {
  	int ret;
  
  	cpus_read_lock();
  	ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
  					     teardown, multi_instance);
  	cpus_read_unlock();
  	return ret;
  }
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2117
  EXPORT_SYMBOL(__cpuhp_setup_state);
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
  int __cpuhp_state_remove_instance(enum cpuhp_state state,
  				  struct hlist_node *node, bool invoke)
  {
  	struct cpuhp_step *sp = cpuhp_get_step(state);
  	int cpu;
  
  	BUG_ON(cpuhp_cb_check(state));
  
  	if (!sp->multi_instance)
  		return -EINVAL;
8f553c498   Thomas Gleixner   cpu/hotplug: Prov...
2128
  	cpus_read_lock();
dc434e056   Sebastian Andrzej Siewior   cpu/hotplug: Seri...
2129
  	mutex_lock(&cpuhp_state_mutex);
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
  	if (!invoke || !cpuhp_get_teardown_cb(state))
  		goto remove;
  	/*
  	 * Call the teardown callback for each present cpu depending
  	 * on the hotplug state of the cpu. This function is not
  	 * allowed to fail currently!
  	 */
  	for_each_present_cpu(cpu) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  		int cpustate = st->state;
  
  		if (cpustate >= state)
  			cpuhp_issue_call(cpu, state, false, node);
  	}
  
  remove:
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
2146
2147
  	hlist_del(node);
  	mutex_unlock(&cpuhp_state_mutex);
8f553c498   Thomas Gleixner   cpu/hotplug: Prov...
2148
  	cpus_read_unlock();
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
2149
2150
2151
2152
  
  	return 0;
  }
  EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
dc434e056   Sebastian Andrzej Siewior   cpu/hotplug: Seri...
2153

5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2154
  /**
71def423f   Sebastian Andrzej Siewior   cpu/hotplug: Prov...
2155
   * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2156
2157
2158
2159
   * @state:	The state to remove
   * @invoke:	If true, the teardown function is invoked for cpus where
   *		cpu state >= @state
   *
71def423f   Sebastian Andrzej Siewior   cpu/hotplug: Prov...
2160
   * The caller needs to hold cpus read locked while calling this function.
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2161
2162
2163
   * The teardown callback is currently not allowed to fail. Think
   * about module removal!
   */
71def423f   Sebastian Andrzej Siewior   cpu/hotplug: Prov...
2164
  void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2165
  {
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
2166
  	struct cpuhp_step *sp = cpuhp_get_step(state);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2167
2168
2169
  	int cpu;
  
  	BUG_ON(cpuhp_cb_check(state));
71def423f   Sebastian Andrzej Siewior   cpu/hotplug: Prov...
2170
  	lockdep_assert_cpus_held();
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2171

dc434e056   Sebastian Andrzej Siewior   cpu/hotplug: Seri...
2172
  	mutex_lock(&cpuhp_state_mutex);
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
2173
2174
2175
2176
2177
2178
2179
  	if (sp->multi_instance) {
  		WARN(!hlist_empty(&sp->list),
  		     "Error: Removing state %d which has instances left.
  ",
  		     state);
  		goto remove;
  	}
a724632ca   Thomas Gleixner   cpu/hotplug: Rewo...
2180
  	if (!invoke || !cpuhp_get_teardown_cb(state))
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
  		goto remove;
  
  	/*
  	 * Call the teardown callback for each present cpu depending
  	 * on the hotplug state of the cpu. This function is not
  	 * allowed to fail currently!
  	 */
  	for_each_present_cpu(cpu) {
  		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
  		int cpustate = st->state;
  
  		if (cpustate >= state)
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
2193
  			cpuhp_issue_call(cpu, state, false, NULL);
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2194
2195
  	}
  remove:
cf392d10b   Thomas Gleixner   cpu/hotplug: Add ...
2196
  	cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
dc434e056   Sebastian Andrzej Siewior   cpu/hotplug: Seri...
2197
  	mutex_unlock(&cpuhp_state_mutex);
71def423f   Sebastian Andrzej Siewior   cpu/hotplug: Prov...
2198
2199
2200
2201
2202
2203
2204
  }
  EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
  
  void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
  {
  	cpus_read_lock();
  	__cpuhp_remove_state_cpuslocked(state, invoke);
8f553c498   Thomas Gleixner   cpu/hotplug: Prov...
2205
  	cpus_read_unlock();
5b7aa87e0   Thomas Gleixner   cpu/hotplug: Impl...
2206
2207
  }
  EXPORT_SYMBOL(__cpuhp_remove_state);
dc8d37ed3   Arnd Bergmann   cpu/SMT: Fix x86 ...
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
  #ifdef CONFIG_HOTPLUG_SMT
  static void cpuhp_offline_cpu_device(unsigned int cpu)
  {
  	struct device *dev = get_cpu_device(cpu);
  
  	dev->offline = true;
  	/* Tell user space about the state change */
  	kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
  }
  
  static void cpuhp_online_cpu_device(unsigned int cpu)
  {
  	struct device *dev = get_cpu_device(cpu);
  
  	dev->offline = false;
  	/* Tell user space about the state change */
  	kobject_uevent(&dev->kobj, KOBJ_ONLINE);
  }
  
  int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
  {
  	int cpu, ret = 0;
  
  	cpu_maps_update_begin();
  	for_each_online_cpu(cpu) {
  		if (topology_is_primary_thread(cpu))
  			continue;
  		ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
  		if (ret)
  			break;
  		/*
  		 * As this needs to hold the cpu maps lock it's impossible
  		 * to call device_offline() because that ends up calling
  		 * cpu_down() which takes cpu maps lock. cpu maps lock
  		 * needs to be held as this might race against in kernel
  		 * abusers of the hotplug machinery (thermal management).
  		 *
  		 * So nothing would update device:offline state. That would
  		 * leave the sysfs entry stale and prevent onlining after
  		 * smt control has been changed to 'off' again. This is
  		 * called under the sysfs hotplug lock, so it is properly
  		 * serialized against the regular offline usage.
  		 */
  		cpuhp_offline_cpu_device(cpu);
  	}
  	if (!ret)
  		cpu_smt_control = ctrlval;
  	cpu_maps_update_done();
  	return ret;
  }
  
  int cpuhp_smt_enable(void)
  {
  	int cpu, ret = 0;
  
  	cpu_maps_update_begin();
  	cpu_smt_control = CPU_SMT_ENABLED;
  	for_each_present_cpu(cpu) {
  		/* Skip online CPUs and CPUs on offline nodes */
  		if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
  			continue;
  		ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
  		if (ret)
  			break;
  		/* See comment in cpuhp_smt_disable() */
  		cpuhp_online_cpu_device(cpu);
  	}
  	cpu_maps_update_done();
  	return ret;
  }
  #endif
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
  #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
  static ssize_t show_cpuhp_state(struct device *dev,
  				struct device_attribute *attr, char *buf)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  
  	return sprintf(buf, "%d
  ", st->state);
  }
  static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
  static ssize_t write_cpuhp_target(struct device *dev,
  				  struct device_attribute *attr,
  				  const char *buf, size_t count)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  	struct cpuhp_step *sp;
  	int target, ret;
  
  	ret = kstrtoint(buf, 10, &target);
  	if (ret)
  		return ret;
  
  #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
  	if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
  		return -EINVAL;
  #else
  	if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
  		return -EINVAL;
  #endif
  
  	ret = lock_device_hotplug_sysfs();
  	if (ret)
  		return ret;
  
  	mutex_lock(&cpuhp_state_mutex);
  	sp = cpuhp_get_step(target);
  	ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
  	mutex_unlock(&cpuhp_state_mutex);
  	if (ret)
40da1b11f   Sebastian Andrzej Siewior   cpu/hotplug: Drop...
2318
  		goto out;
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
2319
2320
  
  	if (st->state < target)
33c3736ec   Qais Yousef   cpu/hotplug: Hide...
2321
  		ret = cpu_up(dev->id, target);
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
2322
  	else
33c3736ec   Qais Yousef   cpu/hotplug: Hide...
2323
  		ret = cpu_down(dev->id, target);
40da1b11f   Sebastian Andrzej Siewior   cpu/hotplug: Drop...
2324
  out:
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
2325
2326
2327
  	unlock_device_hotplug();
  	return ret ? ret : count;
  }
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
2328
2329
2330
2331
2332
2333
2334
2335
  static ssize_t show_cpuhp_target(struct device *dev,
  				 struct device_attribute *attr, char *buf)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  
  	return sprintf(buf, "%d
  ", st->target);
  }
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
2336
  static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
2337

1db49484f   Peter Zijlstra   smp/hotplug: Hotp...
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
  
  static ssize_t write_cpuhp_fail(struct device *dev,
  				struct device_attribute *attr,
  				const char *buf, size_t count)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  	struct cpuhp_step *sp;
  	int fail, ret;
  
  	ret = kstrtoint(buf, 10, &fail);
  	if (ret)
  		return ret;
33d4a5a7a   Eiichi Tsukata   cpu/hotplug: Fix ...
2350
2351
  	if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
  		return -EINVAL;
1db49484f   Peter Zijlstra   smp/hotplug: Hotp...
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
  	/*
  	 * Cannot fail STARTING/DYING callbacks.
  	 */
  	if (cpuhp_is_atomic_state(fail))
  		return -EINVAL;
  
  	/*
  	 * Cannot fail anything that doesn't have callbacks.
  	 */
  	mutex_lock(&cpuhp_state_mutex);
  	sp = cpuhp_get_step(fail);
  	if (!sp->startup.single && !sp->teardown.single)
  		ret = -EINVAL;
  	mutex_unlock(&cpuhp_state_mutex);
  	if (ret)
  		return ret;
  
  	st->fail = fail;
  
  	return count;
  }
  
  static ssize_t show_cpuhp_fail(struct device *dev,
  			       struct device_attribute *attr, char *buf)
  {
  	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
  
  	return sprintf(buf, "%d
  ", st->fail);
  }
  
  static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
2384
2385
2386
  static struct attribute *cpuhp_cpu_attrs[] = {
  	&dev_attr_state.attr,
  	&dev_attr_target.attr,
1db49484f   Peter Zijlstra   smp/hotplug: Hotp...
2387
  	&dev_attr_fail.attr,
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
2388
2389
  	NULL
  };
993647a29   Arvind Yadav   cpu/hotplug: Cons...
2390
  static const struct attribute_group cpuhp_cpu_attr_group = {
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
  	.attrs = cpuhp_cpu_attrs,
  	.name = "hotplug",
  	NULL
  };
  
  static ssize_t show_cpuhp_states(struct device *dev,
  				 struct device_attribute *attr, char *buf)
  {
  	ssize_t cur, res = 0;
  	int i;
  
  	mutex_lock(&cpuhp_state_mutex);
757c989b9   Thomas Gleixner   cpu/hotplug: Make...
2403
  	for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
  		struct cpuhp_step *sp = cpuhp_get_step(i);
  
  		if (sp->name) {
  			cur = sprintf(buf, "%3d: %s
  ", i, sp->name);
  			buf += cur;
  			res += cur;
  		}
  	}
  	mutex_unlock(&cpuhp_state_mutex);
  	return res;
  }
  static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
  
  static struct attribute *cpuhp_cpu_root_attrs[] = {
  	&dev_attr_states.attr,
  	NULL
  };
993647a29   Arvind Yadav   cpu/hotplug: Cons...
2422
  static const struct attribute_group cpuhp_cpu_root_attr_group = {
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
2423
2424
2425
2426
  	.attrs = cpuhp_cpu_root_attrs,
  	.name = "hotplug",
  	NULL
  };
05736e4ac   Thomas Gleixner   cpu/hotplug: Prov...
2427
  #ifdef CONFIG_HOTPLUG_SMT
05736e4ac   Thomas Gleixner   cpu/hotplug: Prov...
2428
  static ssize_t
de7b77e5b   Josh Poimboeuf   cpu/hotplug: Crea...
2429
2430
  __store_smt_control(struct device *dev, struct device_attribute *attr,
  		    const char *buf, size_t count)
05736e4ac   Thomas Gleixner   cpu/hotplug: Prov...
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
  {
  	int ctrlval, ret;
  
  	if (sysfs_streq(buf, "on"))
  		ctrlval = CPU_SMT_ENABLED;
  	else if (sysfs_streq(buf, "off"))
  		ctrlval = CPU_SMT_DISABLED;
  	else if (sysfs_streq(buf, "forceoff"))
  		ctrlval = CPU_SMT_FORCE_DISABLED;
  	else
  		return -EINVAL;
  
  	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
  		return -EPERM;
  
  	if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
  		return -ENODEV;
  
  	ret = lock_device_hotplug_sysfs();
  	if (ret)
  		return ret;
  
  	if (ctrlval != cpu_smt_control) {
  		switch (ctrlval) {
  		case CPU_SMT_ENABLED:
215af5499   Thomas Gleixner   cpu/hotplug: Onli...
2456
  			ret = cpuhp_smt_enable();
05736e4ac   Thomas Gleixner   cpu/hotplug: Prov...
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
  			break;
  		case CPU_SMT_DISABLED:
  		case CPU_SMT_FORCE_DISABLED:
  			ret = cpuhp_smt_disable(ctrlval);
  			break;
  		}
  	}
  
  	unlock_device_hotplug();
  	return ret ? ret : count;
  }
de7b77e5b   Josh Poimboeuf   cpu/hotplug: Crea...
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
  
  #else /* !CONFIG_HOTPLUG_SMT */
  static ssize_t
  __store_smt_control(struct device *dev, struct device_attribute *attr,
  		    const char *buf, size_t count)
  {
  	return -ENODEV;
  }
  #endif /* CONFIG_HOTPLUG_SMT */
  
  static const char *smt_states[] = {
  	[CPU_SMT_ENABLED]		= "on",
  	[CPU_SMT_DISABLED]		= "off",
  	[CPU_SMT_FORCE_DISABLED]	= "forceoff",
  	[CPU_SMT_NOT_SUPPORTED]		= "notsupported",
  	[CPU_SMT_NOT_IMPLEMENTED]	= "notimplemented",
  };
  
  static ssize_t
  show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
  {
  	const char *state = smt_states[cpu_smt_control];
  
  	return snprintf(buf, PAGE_SIZE - 2, "%s
  ", state);
  }
  
  static ssize_t
  store_smt_control(struct device *dev, struct device_attribute *attr,
  		  const char *buf, size_t count)
  {
  	return __store_smt_control(dev, attr, buf, count);
  }
05736e4ac   Thomas Gleixner   cpu/hotplug: Prov...
2501
2502
2503
2504
2505
  static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
  
  static ssize_t
  show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
  {
de7b77e5b   Josh Poimboeuf   cpu/hotplug: Crea...
2506
2507
  	return snprintf(buf, PAGE_SIZE - 2, "%d
  ", sched_smt_active());
05736e4ac   Thomas Gleixner   cpu/hotplug: Prov...
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
  }
  static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
  
  static struct attribute *cpuhp_smt_attrs[] = {
  	&dev_attr_control.attr,
  	&dev_attr_active.attr,
  	NULL
  };
  
  static const struct attribute_group cpuhp_smt_attr_group = {
  	.attrs = cpuhp_smt_attrs,
  	.name = "smt",
  	NULL
  };
de7b77e5b   Josh Poimboeuf   cpu/hotplug: Crea...
2522
  static int __init cpu_smt_sysfs_init(void)
05736e4ac   Thomas Gleixner   cpu/hotplug: Prov...
2523
  {
05736e4ac   Thomas Gleixner   cpu/hotplug: Prov...
2524
2525
2526
  	return sysfs_create_group(&cpu_subsys.dev_root->kobj,
  				  &cpuhp_smt_attr_group);
  }
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
2527
2528
2529
  static int __init cpuhp_sysfs_init(void)
  {
  	int cpu, ret;
de7b77e5b   Josh Poimboeuf   cpu/hotplug: Crea...
2530
  	ret = cpu_smt_sysfs_init();
05736e4ac   Thomas Gleixner   cpu/hotplug: Prov...
2531
2532
  	if (ret)
  		return ret;
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
  	ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
  				 &cpuhp_cpu_root_attr_group);
  	if (ret)
  		return ret;
  
  	for_each_possible_cpu(cpu) {
  		struct device *dev = get_cpu_device(cpu);
  
  		if (!dev)
  			continue;
  		ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
  		if (ret)
  			return ret;
  	}
  	return 0;
  }
  device_initcall(cpuhp_sysfs_init);
de7b77e5b   Josh Poimboeuf   cpu/hotplug: Crea...
2550
  #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
98f8cdce1   Thomas Gleixner   cpu/hotplug: Add ...
2551

e56b3bc79   Linus Torvalds   cpu masks: optimi...
2552
2553
2554
2555
  /*
   * cpu_bit_bitmap[] is a special, "compressed" data structure that
   * represents all NR_CPUS bits binary values of 1<<nr.
   *
e0b582ec5   Rusty Russell   cpumask: convert ...
2556
   * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc79   Linus Torvalds   cpu masks: optimi...
2557
2558
   * mask value that has a single bit set only.
   */
b8d317d10   Mike Travis   cpumask: make cpu...
2559

e56b3bc79   Linus Torvalds   cpu masks: optimi...
2560
  /* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e4   Michael Rodriguez   kernel/cpu.c: fix...
2561
  #define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
e56b3bc79   Linus Torvalds   cpu masks: optimi...
2562
2563
2564
  #define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
  #define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
  #define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d10   Mike Travis   cpumask: make cpu...
2565

e56b3bc79   Linus Torvalds   cpu masks: optimi...
2566
2567
2568
2569
2570
2571
2572
  const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
  
  	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
  	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
  #if BITS_PER_LONG > 32
  	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
  	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
b8d317d10   Mike Travis   cpumask: make cpu...
2573
2574
  #endif
  };
e56b3bc79   Linus Torvalds   cpu masks: optimi...
2575
  EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a37   Rusty Russell   cpumask: introduc...
2576
2577
2578
  
  const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
  EXPORT_SYMBOL(cpu_all_bits);
b3199c025   Rusty Russell   cpumask: switch o...
2579
2580
  
  #ifdef CONFIG_INIT_ALL_POSSIBLE
4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
2581
  struct cpumask __cpu_possible_mask __read_mostly
c4c54dd1c   Rasmus Villemoes   kernel/cpu.c: cha...
2582
  	= {CPU_BITS_ALL};
b3199c025   Rusty Russell   cpumask: switch o...
2583
  #else
4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
2584
  struct cpumask __cpu_possible_mask __read_mostly;
b3199c025   Rusty Russell   cpumask: switch o...
2585
  #endif
4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
2586
  EXPORT_SYMBOL(__cpu_possible_mask);
b3199c025   Rusty Russell   cpumask: switch o...
2587

4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
2588
2589
  struct cpumask __cpu_online_mask __read_mostly;
  EXPORT_SYMBOL(__cpu_online_mask);
b3199c025   Rusty Russell   cpumask: switch o...
2590

4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
2591
2592
  struct cpumask __cpu_present_mask __read_mostly;
  EXPORT_SYMBOL(__cpu_present_mask);
b3199c025   Rusty Russell   cpumask: switch o...
2593

4b804c85d   Rasmus Villemoes   kernel/cpu.c: exp...
2594
2595
  struct cpumask __cpu_active_mask __read_mostly;
  EXPORT_SYMBOL(__cpu_active_mask);
3fa415206   Rusty Russell   cpumask: make set...
2596

0c09ab96f   Thomas Gleixner   cpu/hotplug: Cach...
2597
2598
  atomic_t __num_online_cpus __read_mostly;
  EXPORT_SYMBOL(__num_online_cpus);
3fa415206   Rusty Russell   cpumask: make set...
2599
2600
  void init_cpu_present(const struct cpumask *src)
  {
c4c54dd1c   Rasmus Villemoes   kernel/cpu.c: cha...
2601
  	cpumask_copy(&__cpu_present_mask, src);
3fa415206   Rusty Russell   cpumask: make set...
2602
2603
2604
2605
  }
  
  void init_cpu_possible(const struct cpumask *src)
  {
c4c54dd1c   Rasmus Villemoes   kernel/cpu.c: cha...
2606
  	cpumask_copy(&__cpu_possible_mask, src);
3fa415206   Rusty Russell   cpumask: make set...
2607
2608
2609
2610
  }
  
  void init_cpu_online(const struct cpumask *src)
  {
c4c54dd1c   Rasmus Villemoes   kernel/cpu.c: cha...
2611
  	cpumask_copy(&__cpu_online_mask, src);
3fa415206   Rusty Russell   cpumask: make set...
2612
  }
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
2613

0c09ab96f   Thomas Gleixner   cpu/hotplug: Cach...
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
  void set_cpu_online(unsigned int cpu, bool online)
  {
  	/*
  	 * atomic_inc/dec() is required to handle the horrid abuse of this
  	 * function by the reboot and kexec code which invoke it from
  	 * IPI/NMI broadcasts when shutting down CPUs. Invocation from
  	 * regular CPU hotplug is properly serialized.
  	 *
  	 * Note, that the fact that __num_online_cpus is of type atomic_t
  	 * does not protect readers which are not serialized against
  	 * concurrent hotplug operations.
  	 */
  	if (online) {
  		if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
  			atomic_inc(&__num_online_cpus);
  	} else {
  		if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
  			atomic_dec(&__num_online_cpus);
  	}
  }
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
  /*
   * Activate the first processor.
   */
  void __init boot_cpu_init(void)
  {
  	int cpu = smp_processor_id();
  
  	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
  	set_cpu_online(cpu, true);
  	set_cpu_active(cpu, true);
  	set_cpu_present(cpu, true);
  	set_cpu_possible(cpu, true);
8ce371f98   Peter Zijlstra   lockdep: Fix per-...
2646
2647
2648
2649
  
  #ifdef CONFIG_SMP
  	__boot_cpu_id = cpu;
  #endif
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
2650
2651
2652
2653
2654
  }
  
  /*
   * Must be called _AFTER_ setting up the per_cpu areas
   */
b5b1404d0   Linus Torvalds   init: rename and ...
2655
  void __init boot_cpu_hotplug_init(void)
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
2656
  {
269777aa5   Abel Vesa   cpu/hotplug: Non-...
2657
  #ifdef CONFIG_SMP
e797bda3f   Thomas Gleixner   smp/hotplug: Trac...
2658
  	cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
269777aa5   Abel Vesa   cpu/hotplug: Non-...
2659
  #endif
0cc3cd216   Thomas Gleixner   cpu/hotplug: Boot...
2660
  	this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
cff7d378d   Thomas Gleixner   cpu/hotplug: Conv...
2661
  }
98af84529   Josh Poimboeuf   cpu/speculation: ...
2662

731dc9df9   Tyler Hicks   cpu/speculation: ...
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
  /*
   * These are used for a global "mitigations=" cmdline option for toggling
   * optional CPU mitigations.
   */
  enum cpu_mitigations {
  	CPU_MITIGATIONS_OFF,
  	CPU_MITIGATIONS_AUTO,
  	CPU_MITIGATIONS_AUTO_NOSMT,
  };
  
  static enum cpu_mitigations cpu_mitigations __ro_after_init =
  	CPU_MITIGATIONS_AUTO;
98af84529   Josh Poimboeuf   cpu/speculation: ...
2675
2676
2677
2678
2679
2680
2681
2682
2683
  
  static int __init mitigations_parse_cmdline(char *arg)
  {
  	if (!strcmp(arg, "off"))
  		cpu_mitigations = CPU_MITIGATIONS_OFF;
  	else if (!strcmp(arg, "auto"))
  		cpu_mitigations = CPU_MITIGATIONS_AUTO;
  	else if (!strcmp(arg, "auto,nosmt"))
  		cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
1bf727202   Geert Uytterhoeven   cpu/speculation: ...
2684
2685
2686
2687
  	else
  		pr_crit("Unsupported mitigations=%s, system may still be vulnerable
  ",
  			arg);
98af84529   Josh Poimboeuf   cpu/speculation: ...
2688
2689
2690
2691
  
  	return 0;
  }
  early_param("mitigations", mitigations_parse_cmdline);
731dc9df9   Tyler Hicks   cpu/speculation: ...
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
  
  /* mitigations=off */
  bool cpu_mitigations_off(void)
  {
  	return cpu_mitigations == CPU_MITIGATIONS_OFF;
  }
  EXPORT_SYMBOL_GPL(cpu_mitigations_off);
  
  /* mitigations=auto,nosmt */
  bool cpu_mitigations_auto_nosmt(void)
  {
  	return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
  }
  EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);