Blame view
kernel/cpu.c
19.3 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 |
/* CPU control. * (C) 2001, 2002, 2003, 2004 Rusty Russell * * This code is licenced under the GPL. */ #include <linux/proc_fs.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/notifier.h> #include <linux/sched.h> #include <linux/unistd.h> #include <linux/cpu.h> |
cb79295e2
|
13 14 |
#include <linux/oom.h> #include <linux/rcupdate.h> |
9984de1a5
|
15 |
#include <linux/export.h> |
e4cc2f873
|
16 |
#include <linux/bug.h> |
1da177e4c
|
17 18 |
#include <linux/kthread.h> #include <linux/stop_machine.h> |
81615b624
|
19 |
#include <linux/mutex.h> |
5a0e3ad6a
|
20 |
#include <linux/gfp.h> |
79cfbdfa8
|
21 |
#include <linux/suspend.h> |
a19423b98
|
22 |
#include <linux/lockdep.h> |
bb3632c61
|
23 |
#include <trace/events/power.h> |
1da177e4c
|
24 |
|
38498a67a
|
25 |
#include "smpboot.h" |
98a79d6a5
|
26 |
#ifdef CONFIG_SMP |
b3199c025
|
27 |
/* Serializes the updates to cpu_online_mask, cpu_present_mask */ |
aa9538777
|
28 |
static DEFINE_MUTEX(cpu_add_remove_lock); |
1da177e4c
|
29 |
|
79a6cdeb7
|
30 |
/* |
93ae4f978
|
31 32 33 34 35 |
* The following two APIs (cpu_maps_update_begin/done) must be used when * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. * The APIs cpu_notifier_register_begin/done() must be used to protect CPU * hotplug callback (un)registration performed using __register_cpu_notifier() * or __unregister_cpu_notifier(). |
79a6cdeb7
|
36 37 38 39 40 |
*/ void cpu_maps_update_begin(void) { mutex_lock(&cpu_add_remove_lock); } |
93ae4f978
|
41 |
EXPORT_SYMBOL(cpu_notifier_register_begin); |
79a6cdeb7
|
42 43 44 45 46 |
void cpu_maps_update_done(void) { mutex_unlock(&cpu_add_remove_lock); } |
93ae4f978
|
47 |
EXPORT_SYMBOL(cpu_notifier_register_done); |
79a6cdeb7
|
48 |
|
5c113fbee
|
49 |
static RAW_NOTIFIER_HEAD(cpu_chain); |
1da177e4c
|
50 |
|
e3920fb42
|
51 52 53 54 |
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing. * Should always be manipulated under cpu_add_remove_lock */ static int cpu_hotplug_disabled; |
79a6cdeb7
|
55 |
#ifdef CONFIG_HOTPLUG_CPU |
d221938c0
|
56 57 58 59 60 61 62 63 |
static struct { struct task_struct *active_writer; struct mutex lock; /* Synchronizes accesses to refcount, */ /* * Also blocks the new readers during * an ongoing cpu hotplug operation. */ int refcount; |
b2c4623dc
|
64 65 |
/* And allows lockless put_online_cpus(). */ atomic_t puts_pending; |
a19423b98
|
66 67 68 69 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif |
31950eb66
|
70 71 72 73 |
} cpu_hotplug = { .active_writer = NULL, .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), .refcount = 0, |
a19423b98
|
74 75 76 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC .dep_map = {.name = "cpu_hotplug.lock" }, #endif |
31950eb66
|
77 |
}; |
d221938c0
|
78 |
|
a19423b98
|
79 80 |
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) |
dd56af42b
|
81 82 |
#define cpuhp_lock_acquire_tryread() \ lock_map_acquire_tryread(&cpu_hotplug.dep_map) |
a19423b98
|
83 84 |
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) |
86ef5c9a8
|
85 |
void get_online_cpus(void) |
a9d9baa1e
|
86 |
{ |
d221938c0
|
87 88 |
might_sleep(); if (cpu_hotplug.active_writer == current) |
aa9538777
|
89 |
return; |
a19423b98
|
90 |
cpuhp_lock_acquire_read(); |
d221938c0
|
91 92 93 |
mutex_lock(&cpu_hotplug.lock); cpu_hotplug.refcount++; mutex_unlock(&cpu_hotplug.lock); |
a9d9baa1e
|
94 |
} |
86ef5c9a8
|
95 |
EXPORT_SYMBOL_GPL(get_online_cpus); |
90d45d17f
|
96 |
|
dd56af42b
|
97 98 99 100 101 102 103 104 105 106 107 108 |
bool try_get_online_cpus(void) { if (cpu_hotplug.active_writer == current) return true; if (!mutex_trylock(&cpu_hotplug.lock)) return false; cpuhp_lock_acquire_tryread(); cpu_hotplug.refcount++; mutex_unlock(&cpu_hotplug.lock); return true; } EXPORT_SYMBOL_GPL(try_get_online_cpus); |
86ef5c9a8
|
109 |
void put_online_cpus(void) |
a9d9baa1e
|
110 |
{ |
d221938c0
|
111 |
if (cpu_hotplug.active_writer == current) |
aa9538777
|
112 |
return; |
b2c4623dc
|
113 114 115 116 117 |
if (!mutex_trylock(&cpu_hotplug.lock)) { atomic_inc(&cpu_hotplug.puts_pending); cpuhp_lock_release(); return; } |
075663d19
|
118 119 120 |
if (WARN_ON(!cpu_hotplug.refcount)) cpu_hotplug.refcount++; /* try to fix things up */ |
d2ba7e2ae
|
121 122 |
if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) wake_up_process(cpu_hotplug.active_writer); |
d221938c0
|
123 |
mutex_unlock(&cpu_hotplug.lock); |
a19423b98
|
124 |
cpuhp_lock_release(); |
d221938c0
|
125 |
|
a9d9baa1e
|
126 |
} |
86ef5c9a8
|
127 |
EXPORT_SYMBOL_GPL(put_online_cpus); |
a9d9baa1e
|
128 |
|
d221938c0
|
129 130 131 132 133 134 135 |
/* * This ensures that the hotplug operation can begin only when the * refcount goes to zero. * * Note that during a cpu-hotplug operation, the new readers, if any, * will be blocked by the cpu_hotplug.lock * |
d2ba7e2ae
|
136 137 |
* Since cpu_hotplug_begin() is always called after invoking * cpu_maps_update_begin(), we can be sure that only one writer is active. |
d221938c0
|
138 139 140 141 142 143 144 145 146 147 |
* * Note that theoretically, there is a possibility of a livelock: * - Refcount goes to zero, last reader wakes up the sleeping * writer. * - Last reader unlocks the cpu_hotplug.lock. * - A new reader arrives at this moment, bumps up the refcount. * - The writer acquires the cpu_hotplug.lock finds the refcount * non zero and goes to sleep again. * * However, this is very difficult to achieve in practice since |
86ef5c9a8
|
148 |
* get_online_cpus() not an api which is called all that often. |
d221938c0
|
149 150 |
* */ |
b9d10be7a
|
151 |
void cpu_hotplug_begin(void) |
d221938c0
|
152 |
{ |
d221938c0
|
153 |
cpu_hotplug.active_writer = current; |
d2ba7e2ae
|
154 |
|
a19423b98
|
155 |
cpuhp_lock_acquire(); |
d2ba7e2ae
|
156 157 |
for (;;) { mutex_lock(&cpu_hotplug.lock); |
b2c4623dc
|
158 159 160 161 162 163 |
if (atomic_read(&cpu_hotplug.puts_pending)) { int delta; delta = atomic_xchg(&cpu_hotplug.puts_pending, 0); cpu_hotplug.refcount -= delta; } |
d2ba7e2ae
|
164 165 166 |
if (likely(!cpu_hotplug.refcount)) break; __set_current_state(TASK_UNINTERRUPTIBLE); |
d221938c0
|
167 168 |
mutex_unlock(&cpu_hotplug.lock); schedule(); |
d221938c0
|
169 |
} |
d221938c0
|
170 |
} |
b9d10be7a
|
171 |
void cpu_hotplug_done(void) |
d221938c0
|
172 173 174 |
{ cpu_hotplug.active_writer = NULL; mutex_unlock(&cpu_hotplug.lock); |
a19423b98
|
175 |
cpuhp_lock_release(); |
d221938c0
|
176 |
} |
79a6cdeb7
|
177 |
|
16e53dbf1
|
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 |
/* * Wait for currently running CPU hotplug operations to complete (if any) and * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the * hotplug path before performing hotplug operations. So acquiring that lock * guarantees mutual exclusion from any currently running hotplug operations. */ void cpu_hotplug_disable(void) { cpu_maps_update_begin(); cpu_hotplug_disabled = 1; cpu_maps_update_done(); } void cpu_hotplug_enable(void) { cpu_maps_update_begin(); cpu_hotplug_disabled = 0; cpu_maps_update_done(); } |
b9d10be7a
|
198 |
#endif /* CONFIG_HOTPLUG_CPU */ |
79a6cdeb7
|
199 |
|
1da177e4c
|
200 |
/* Need to know about CPUs going up/down? */ |
f7b16c108
|
201 |
int __ref register_cpu_notifier(struct notifier_block *nb) |
1da177e4c
|
202 |
{ |
bd5349cfd
|
203 |
int ret; |
d221938c0
|
204 |
cpu_maps_update_begin(); |
bd5349cfd
|
205 |
ret = raw_notifier_chain_register(&cpu_chain, nb); |
d221938c0
|
206 |
cpu_maps_update_done(); |
bd5349cfd
|
207 |
return ret; |
1da177e4c
|
208 |
} |
65edc68c3
|
209 |
|
93ae4f978
|
210 211 212 213 |
int __ref __register_cpu_notifier(struct notifier_block *nb) { return raw_notifier_chain_register(&cpu_chain, nb); } |
e9fb7631e
|
214 215 216 |
static int __cpu_notify(unsigned long val, void *v, int nr_to_call, int *nr_calls) { |
e6bde73b0
|
217 218 219 |
int ret; ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, |
e9fb7631e
|
220 |
nr_calls); |
e6bde73b0
|
221 222 |
return notifier_to_errno(ret); |
e9fb7631e
|
223 224 225 226 227 228 |
} static int cpu_notify(unsigned long val, void *v) { return __cpu_notify(val, v, -1, NULL); } |
00b9b0af5
|
229 |
#ifdef CONFIG_HOTPLUG_CPU |
e9fb7631e
|
230 231 |
static void cpu_notify_nofail(unsigned long val, void *v) { |
00b9b0af5
|
232 |
BUG_ON(cpu_notify(val, v)); |
e9fb7631e
|
233 |
} |
1da177e4c
|
234 |
EXPORT_SYMBOL(register_cpu_notifier); |
93ae4f978
|
235 |
EXPORT_SYMBOL(__register_cpu_notifier); |
1da177e4c
|
236 |
|
9647155ff
|
237 |
void __ref unregister_cpu_notifier(struct notifier_block *nb) |
1da177e4c
|
238 |
{ |
d221938c0
|
239 |
cpu_maps_update_begin(); |
bd5349cfd
|
240 |
raw_notifier_chain_unregister(&cpu_chain, nb); |
d221938c0
|
241 |
cpu_maps_update_done(); |
1da177e4c
|
242 243 |
} EXPORT_SYMBOL(unregister_cpu_notifier); |
93ae4f978
|
244 245 246 247 248 |
void __ref __unregister_cpu_notifier(struct notifier_block *nb) { raw_notifier_chain_unregister(&cpu_chain, nb); } EXPORT_SYMBOL(__unregister_cpu_notifier); |
e4cc2f873
|
249 250 251 252 253 254 255 256 257 258 259 260 |
/** * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU * @cpu: a CPU id * * This function walks all processes, finds a valid mm struct for each one and * then clears a corresponding bit in mm's cpumask. While this all sounds * trivial, there are various non-obvious corner cases, which this function * tries to solve in a safe manner. * * Also note that the function uses a somewhat relaxed locking scheme, so it may * be called only for an already offlined CPU. */ |
cb79295e2
|
261 262 263 264 265 266 267 268 269 270 271 |
void clear_tasks_mm_cpumask(int cpu) { struct task_struct *p; /* * This function is called after the cpu is taken down and marked * offline, so its not like new tasks will ever get this cpu set in * their mm mask. -- Peter Zijlstra * Thus, we may use rcu_read_lock() here, instead of grabbing * full-fledged tasklist_lock. */ |
e4cc2f873
|
272 |
WARN_ON(cpu_online(cpu)); |
cb79295e2
|
273 274 275 |
rcu_read_lock(); for_each_process(p) { struct task_struct *t; |
e4cc2f873
|
276 277 278 279 |
/* * Main thread might exit, but other threads may still have * a valid mm. Find one. */ |
cb79295e2
|
280 281 282 283 284 285 286 287 |
t = find_lock_task_mm(p); if (!t) continue; cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); task_unlock(t); } rcu_read_unlock(); } |
b728ca060
|
288 |
static inline void check_for_tasks(int dead_cpu) |
1da177e4c
|
289 |
{ |
b728ca060
|
290 |
struct task_struct *g, *p; |
1da177e4c
|
291 |
|
b728ca060
|
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 |
read_lock_irq(&tasklist_lock); do_each_thread(g, p) { if (!p->on_rq) continue; /* * We do the check with unlocked task_rq(p)->lock. * Order the reading to do not warn about a task, * which was running on this cpu in the past, and * it's just been woken on another cpu. */ rmb(); if (task_cpu(p) != dead_cpu) continue; pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x) ", p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags); } while_each_thread(g, p); read_unlock_irq(&tasklist_lock); |
1da177e4c
|
311 |
} |
db912f963
|
312 313 314 315 |
struct take_cpu_down_param { unsigned long mod; void *hcpu; }; |
1da177e4c
|
316 |
/* Take this CPU down. */ |
514a20a5d
|
317 |
static int __ref take_cpu_down(void *_param) |
1da177e4c
|
318 |
{ |
db912f963
|
319 |
struct take_cpu_down_param *param = _param; |
1da177e4c
|
320 |
int err; |
1da177e4c
|
321 322 323 |
/* Ensure this CPU doesn't handle any more interrupts. */ err = __cpu_disable(); if (err < 0) |
f37051364
|
324 |
return err; |
1da177e4c
|
325 |
|
e9fb7631e
|
326 |
cpu_notify(CPU_DYING | param->mod, param->hcpu); |
14e568e78
|
327 328 |
/* Park the stopper thread */ kthread_park(current); |
f37051364
|
329 |
return 0; |
1da177e4c
|
330 |
} |
e3920fb42
|
331 |
/* Requires cpu_add_remove_lock to be held */ |
514a20a5d
|
332 |
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) |
1da177e4c
|
333 |
{ |
e7407dcc6
|
334 |
int err, nr_calls = 0; |
e7407dcc6
|
335 |
void *hcpu = (void *)(long)cpu; |
8bb784428
|
336 |
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
db912f963
|
337 338 339 340 |
struct take_cpu_down_param tcd_param = { .mod = mod, .hcpu = hcpu, }; |
1da177e4c
|
341 |
|
e3920fb42
|
342 343 |
if (num_online_cpus() == 1) return -EBUSY; |
1da177e4c
|
344 |
|
e3920fb42
|
345 346 |
if (!cpu_online(cpu)) return -EINVAL; |
1da177e4c
|
347 |
|
d221938c0
|
348 |
cpu_hotplug_begin(); |
4d51985e4
|
349 |
|
e9fb7631e
|
350 |
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); |
e6bde73b0
|
351 |
if (err) { |
a0d8cdb65
|
352 |
nr_calls--; |
e9fb7631e
|
353 |
__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); |
84117da5b
|
354 355 356 |
pr_warn("%s: attempt to take down CPU %u failed ", __func__, cpu); |
baaca49f4
|
357 |
goto out_release; |
1da177e4c
|
358 |
} |
6acce3ef8
|
359 360 361 362 363 364 365 |
/* * By now we've cleared cpu_active_mask, wait for all preempt-disabled * and RCU users of this state to go away such that all new such users * will observe it. * * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might * not imply sync_sched(), so explicitly call both. |
106dd5afd
|
366 367 |
* * Do sync before park smpboot threads to take care the rcu boost case. |
6acce3ef8
|
368 369 370 371 372 |
*/ #ifdef CONFIG_PREEMPT synchronize_sched(); #endif synchronize_rcu(); |
106dd5afd
|
373 |
smpboot_park_threads(cpu); |
6acce3ef8
|
374 375 376 |
/* * So now all preempt/rcu users must observe !cpu_active(). */ |
e0b582ec5
|
377 |
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); |
043215875
|
378 |
if (err) { |
1da177e4c
|
379 |
/* CPU didn't die: tell everyone. Can't complain. */ |
f97f8f06a
|
380 |
smpboot_unpark_threads(cpu); |
e9fb7631e
|
381 |
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); |
6a1bdc1b5
|
382 |
goto out_release; |
8fa1d7d3b
|
383 |
} |
043215875
|
384 |
BUG_ON(cpu_online(cpu)); |
1da177e4c
|
385 |
|
48c5ccae8
|
386 387 388 389 |
/* * The migration_call() CPU_DYING callback will have removed all * runnable tasks from the cpu, there's only the idle task left now * that the migration thread is done doing the stop_machine thing. |
51a96c778
|
390 391 |
* * Wait for the stop thread to go away. |
48c5ccae8
|
392 |
*/ |
51a96c778
|
393 394 |
while (!idle_cpu(cpu)) cpu_relax(); |
1da177e4c
|
395 396 397 |
/* This actually kills the CPU. */ __cpu_die(cpu); |
1da177e4c
|
398 |
/* CPU is completely dead: tell everyone. Too late to complain. */ |
e9fb7631e
|
399 |
cpu_notify_nofail(CPU_DEAD | mod, hcpu); |
1da177e4c
|
400 401 |
check_for_tasks(cpu); |
baaca49f4
|
402 |
out_release: |
d221938c0
|
403 |
cpu_hotplug_done(); |
e9fb7631e
|
404 405 |
if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); |
e3920fb42
|
406 407 |
return err; } |
514a20a5d
|
408 |
int __ref cpu_down(unsigned int cpu) |
e3920fb42
|
409 |
{ |
9ea09af3b
|
410 |
int err; |
e3920fb42
|
411 |
|
d221938c0
|
412 |
cpu_maps_update_begin(); |
e761b7725
|
413 414 |
if (cpu_hotplug_disabled) { |
e3920fb42
|
415 |
err = -EBUSY; |
e761b7725
|
416 417 |
goto out; } |
e761b7725
|
418 |
err = _cpu_down(cpu, 0); |
e3920fb42
|
419 |
|
e761b7725
|
420 |
out: |
d221938c0
|
421 |
cpu_maps_update_done(); |
1da177e4c
|
422 423 |
return err; } |
b62b8ef90
|
424 |
EXPORT_SYMBOL(cpu_down); |
1da177e4c
|
425 |
#endif /*CONFIG_HOTPLUG_CPU*/ |
e3920fb42
|
426 |
/* Requires cpu_add_remove_lock to be held */ |
0db0628d9
|
427 |
static int _cpu_up(unsigned int cpu, int tasks_frozen) |
1da177e4c
|
428 |
{ |
baaca49f4
|
429 |
int ret, nr_calls = 0; |
1da177e4c
|
430 |
void *hcpu = (void *)(long)cpu; |
8bb784428
|
431 |
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
3bb5d2ee3
|
432 |
struct task_struct *idle; |
1da177e4c
|
433 |
|
d221938c0
|
434 |
cpu_hotplug_begin(); |
38498a67a
|
435 |
|
5e5041f35
|
436 437 438 439 |
if (cpu_online(cpu) || !cpu_present(cpu)) { ret = -EINVAL; goto out; } |
3bb5d2ee3
|
440 441 442 |
idle = idle_thread_get(cpu); if (IS_ERR(idle)) { ret = PTR_ERR(idle); |
38498a67a
|
443 |
goto out; |
3bb5d2ee3
|
444 |
} |
38498a67a
|
445 |
|
f97f8f06a
|
446 447 448 |
ret = smpboot_create_threads(cpu); if (ret) goto out; |
e9fb7631e
|
449 |
ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); |
e6bde73b0
|
450 |
if (ret) { |
a0d8cdb65
|
451 |
nr_calls--; |
84117da5b
|
452 453 454 |
pr_warn("%s: attempt to bring up CPU %u failed ", __func__, cpu); |
1da177e4c
|
455 456 457 458 |
goto out_notify; } /* Arch-specific enabling code. */ |
3bb5d2ee3
|
459 |
ret = __cpu_up(cpu, idle); |
1da177e4c
|
460 461 |
if (ret != 0) goto out_notify; |
6978c7052
|
462 |
BUG_ON(!cpu_online(cpu)); |
1da177e4c
|
463 |
|
f97f8f06a
|
464 465 |
/* Wake the per cpu threads */ smpboot_unpark_threads(cpu); |
1da177e4c
|
466 |
/* Now call notifier in preparation. */ |
e9fb7631e
|
467 |
cpu_notify(CPU_ONLINE | mod, hcpu); |
1da177e4c
|
468 469 470 |
out_notify: if (ret != 0) |
e9fb7631e
|
471 |
__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); |
38498a67a
|
472 |
out: |
d221938c0
|
473 |
cpu_hotplug_done(); |
e3920fb42
|
474 475 476 |
return ret; } |
0db0628d9
|
477 |
int cpu_up(unsigned int cpu) |
e3920fb42
|
478 479 |
{ int err = 0; |
cf23422b9
|
480 |
|
e0b582ec5
|
481 |
if (!cpu_possible(cpu)) { |
84117da5b
|
482 483 484 |
pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time ", cpu); |
87d5e0236
|
485 |
#if defined(CONFIG_IA64) |
84117da5b
|
486 487 |
pr_err("please check additional_cpus= boot parameter "); |
73e753a50
|
488 489 490 |
#endif return -EINVAL; } |
e3920fb42
|
491 |
|
01b0f1970
|
492 493 494 |
err = try_online_node(cpu_to_node(cpu)); if (err) return err; |
cf23422b9
|
495 |
|
d221938c0
|
496 |
cpu_maps_update_begin(); |
e761b7725
|
497 498 |
if (cpu_hotplug_disabled) { |
e3920fb42
|
499 |
err = -EBUSY; |
e761b7725
|
500 501 502 503 |
goto out; } err = _cpu_up(cpu, 0); |
e761b7725
|
504 |
out: |
d221938c0
|
505 |
cpu_maps_update_done(); |
e3920fb42
|
506 507 |
return err; } |
a513f6bab
|
508 |
EXPORT_SYMBOL_GPL(cpu_up); |
e3920fb42
|
509 |
|
f3de4be9d
|
510 |
#ifdef CONFIG_PM_SLEEP_SMP |
e0b582ec5
|
511 |
static cpumask_var_t frozen_cpus; |
e3920fb42
|
512 513 514 |
int disable_nonboot_cpus(void) { |
e9a5f426b
|
515 |
int cpu, first_cpu, error = 0; |
e3920fb42
|
516 |
|
d221938c0
|
517 |
cpu_maps_update_begin(); |
e0b582ec5
|
518 |
first_cpu = cpumask_first(cpu_online_mask); |
9ee349ad6
|
519 520 |
/* * We take down all of the non-boot CPUs in one shot to avoid races |
e3920fb42
|
521 522 |
* with the userspace trying to use the CPU hotplug at the same time */ |
e0b582ec5
|
523 |
cpumask_clear(frozen_cpus); |
6ad4c1888
|
524 |
|
84117da5b
|
525 526 |
pr_info("Disabling non-boot CPUs ... "); |
e3920fb42
|
527 528 529 |
for_each_online_cpu(cpu) { if (cpu == first_cpu) continue; |
bb3632c61
|
530 |
trace_suspend_resume(TPS("CPU_OFF"), cpu, true); |
8bb784428
|
531 |
error = _cpu_down(cpu, 1); |
bb3632c61
|
532 |
trace_suspend_resume(TPS("CPU_OFF"), cpu, false); |
feae3203d
|
533 |
if (!error) |
e0b582ec5
|
534 |
cpumask_set_cpu(cpu, frozen_cpus); |
feae3203d
|
535 |
else { |
84117da5b
|
536 537 |
pr_err("Error taking CPU%d down: %d ", cpu, error); |
e3920fb42
|
538 539 540 |
break; } } |
86886e55b
|
541 |
|
e3920fb42
|
542 543 544 545 546 |
if (!error) { BUG_ON(num_online_cpus() > 1); /* Make sure the CPUs won't be enabled by someone else */ cpu_hotplug_disabled = 1; } else { |
84117da5b
|
547 548 |
pr_err("Non-boot CPUs are not disabled "); |
e3920fb42
|
549 |
} |
d221938c0
|
550 |
cpu_maps_update_done(); |
e3920fb42
|
551 552 |
return error; } |
d0af9eed5
|
553 554 555 556 557 558 559 |
void __weak arch_enable_nonboot_cpus_begin(void) { } void __weak arch_enable_nonboot_cpus_end(void) { } |
fa7303e22
|
560 |
void __ref enable_nonboot_cpus(void) |
e3920fb42
|
561 562 563 564 |
{ int cpu, error; /* Allow everyone to use the CPU hotplug again */ |
d221938c0
|
565 |
cpu_maps_update_begin(); |
e3920fb42
|
566 |
cpu_hotplug_disabled = 0; |
e0b582ec5
|
567 |
if (cpumask_empty(frozen_cpus)) |
1d64b9cb1
|
568 |
goto out; |
e3920fb42
|
569 |
|
84117da5b
|
570 571 |
pr_info("Enabling non-boot CPUs ... "); |
d0af9eed5
|
572 573 |
arch_enable_nonboot_cpus_begin(); |
e0b582ec5
|
574 |
for_each_cpu(cpu, frozen_cpus) { |
bb3632c61
|
575 |
trace_suspend_resume(TPS("CPU_ON"), cpu, true); |
8bb784428
|
576 |
error = _cpu_up(cpu, 1); |
bb3632c61
|
577 |
trace_suspend_resume(TPS("CPU_ON"), cpu, false); |
e3920fb42
|
578 |
if (!error) { |
84117da5b
|
579 580 |
pr_info("CPU%d is up ", cpu); |
e3920fb42
|
581 582 |
continue; } |
84117da5b
|
583 584 |
pr_warn("Error taking CPU%d up: %d ", cpu, error); |
e3920fb42
|
585 |
} |
d0af9eed5
|
586 587 |
arch_enable_nonboot_cpus_end(); |
e0b582ec5
|
588 |
cpumask_clear(frozen_cpus); |
1d64b9cb1
|
589 |
out: |
d221938c0
|
590 |
cpu_maps_update_done(); |
1da177e4c
|
591 |
} |
e0b582ec5
|
592 |
|
d7268a31c
|
593 |
static int __init alloc_frozen_cpus(void) |
e0b582ec5
|
594 595 596 597 598 599 |
{ if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) return -ENOMEM; return 0; } core_initcall(alloc_frozen_cpus); |
79cfbdfa8
|
600 601 |
/* |
79cfbdfa8
|
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 |
* When callbacks for CPU hotplug notifications are being executed, we must * ensure that the state of the system with respect to the tasks being frozen * or not, as reported by the notification, remains unchanged *throughout the * duration* of the execution of the callbacks. * Hence we need to prevent the freezer from racing with regular CPU hotplug. * * This synchronization is implemented by mutually excluding regular CPU * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ * Hibernate notifications. */ static int cpu_hotplug_pm_callback(struct notifier_block *nb, unsigned long action, void *ptr) { switch (action) { case PM_SUSPEND_PREPARE: case PM_HIBERNATION_PREPARE: |
16e53dbf1
|
620 |
cpu_hotplug_disable(); |
79cfbdfa8
|
621 622 623 624 |
break; case PM_POST_SUSPEND: case PM_POST_HIBERNATION: |
16e53dbf1
|
625 |
cpu_hotplug_enable(); |
79cfbdfa8
|
626 627 628 629 630 631 632 633 |
break; default: return NOTIFY_DONE; } return NOTIFY_OK; } |
d7268a31c
|
634 |
static int __init cpu_hotplug_pm_sync_init(void) |
79cfbdfa8
|
635 |
{ |
6e32d479d
|
636 637 638 639 640 |
/* * cpu_hotplug_pm_callback has higher priority than x86 * bsp_pm_callback which depends on cpu_hotplug_pm_callback * to disable cpu hotplug to avoid cpu hotplug race. */ |
79cfbdfa8
|
641 642 643 644 |
pm_notifier(cpu_hotplug_pm_callback, 0); return 0; } core_initcall(cpu_hotplug_pm_sync_init); |
f3de4be9d
|
645 |
#endif /* CONFIG_PM_SLEEP_SMP */ |
68f4f1ec0
|
646 |
|
e545a6140
|
647 648 649 650 651 652 653 654 |
/** * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers * @cpu: cpu that just started * * This function calls the cpu_chain notifiers with CPU_STARTING. * It must be called by the arch code on the new cpu, before the new cpu * enables interrupts and before the "boot" cpu returns from __cpu_up(). */ |
0db0628d9
|
655 |
void notify_cpu_starting(unsigned int cpu) |
e545a6140
|
656 657 658 659 |
{ unsigned long val = CPU_STARTING; #ifdef CONFIG_PM_SLEEP_SMP |
e0b582ec5
|
660 |
if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) |
e545a6140
|
661 662 |
val = CPU_STARTING_FROZEN; #endif /* CONFIG_PM_SLEEP_SMP */ |
e9fb7631e
|
663 |
cpu_notify(val, (void *)(long)cpu); |
e545a6140
|
664 |
} |
68f4f1ec0
|
665 |
#endif /* CONFIG_SMP */ |
b8d317d10
|
666 |
|
e56b3bc79
|
667 668 669 670 |
/* * cpu_bit_bitmap[] is a special, "compressed" data structure that * represents all NR_CPUS bits binary values of 1<<nr. * |
e0b582ec5
|
671 |
* It is used by cpumask_of() to get a constant address to a CPU |
e56b3bc79
|
672 673 |
* mask value that has a single bit set only. */ |
b8d317d10
|
674 |
|
e56b3bc79
|
675 |
/* cpu_bit_bitmap[0] is empty - so we can back into it */ |
4d51985e4
|
676 |
#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) |
e56b3bc79
|
677 678 679 |
#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) |
b8d317d10
|
680 |
|
e56b3bc79
|
681 682 683 684 685 686 687 |
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { MASK_DECLARE_8(0), MASK_DECLARE_8(8), MASK_DECLARE_8(16), MASK_DECLARE_8(24), #if BITS_PER_LONG > 32 MASK_DECLARE_8(32), MASK_DECLARE_8(40), MASK_DECLARE_8(48), MASK_DECLARE_8(56), |
b8d317d10
|
688 689 |
#endif }; |
e56b3bc79
|
690 |
EXPORT_SYMBOL_GPL(cpu_bit_bitmap); |
2d3854a37
|
691 692 693 |
const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; EXPORT_SYMBOL(cpu_all_bits); |
b3199c025
|
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 |
#ifdef CONFIG_INIT_ALL_POSSIBLE static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly = CPU_BITS_ALL; #else static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; #endif const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); EXPORT_SYMBOL(cpu_possible_mask); static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); EXPORT_SYMBOL(cpu_online_mask); static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); EXPORT_SYMBOL(cpu_present_mask); static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); EXPORT_SYMBOL(cpu_active_mask); |
3fa415206
|
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 |
void set_cpu_possible(unsigned int cpu, bool possible) { if (possible) cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); else cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); } void set_cpu_present(unsigned int cpu, bool present) { if (present) cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); else cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); } void set_cpu_online(unsigned int cpu, bool online) { |
6acbfb969
|
734 |
if (online) { |
3fa415206
|
735 |
cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); |
6acbfb969
|
736 737 |
cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); } else { |
3fa415206
|
738 |
cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); |
6acbfb969
|
739 |
} |
3fa415206
|
740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 |
} void set_cpu_active(unsigned int cpu, bool active) { if (active) cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); else cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); } void init_cpu_present(const struct cpumask *src) { cpumask_copy(to_cpumask(cpu_present_bits), src); } void init_cpu_possible(const struct cpumask *src) { cpumask_copy(to_cpumask(cpu_possible_bits), src); } void init_cpu_online(const struct cpumask *src) { cpumask_copy(to_cpumask(cpu_online_bits), src); } |