Blame view
kernel/cpu.c
43 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 |
/* CPU control. * (C) 2001, 2002, 2003, 2004 Rusty Russell * * This code is licenced under the GPL. */ #include <linux/proc_fs.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/notifier.h> #include <linux/sched.h> #include <linux/unistd.h> #include <linux/cpu.h> |
cb79295e2
|
13 14 |
#include <linux/oom.h> #include <linux/rcupdate.h> |
9984de1a5
|
15 |
#include <linux/export.h> |
e4cc2f873
|
16 |
#include <linux/bug.h> |
1da177e4c
|
17 18 |
#include <linux/kthread.h> #include <linux/stop_machine.h> |
81615b624
|
19 |
#include <linux/mutex.h> |
5a0e3ad6a
|
20 |
#include <linux/gfp.h> |
79cfbdfa8
|
21 |
#include <linux/suspend.h> |
a19423b98
|
22 |
#include <linux/lockdep.h> |
345527b1e
|
23 |
#include <linux/tick.h> |
a89941816
|
24 |
#include <linux/irq.h> |
4cb28ced2
|
25 |
#include <linux/smpboot.h> |
cff7d378d
|
26 |
|
bb3632c61
|
27 |
#include <trace/events/power.h> |
cff7d378d
|
28 29 |
#define CREATE_TRACE_POINTS #include <trace/events/cpuhp.h> |
1da177e4c
|
30 |
|
38498a67a
|
31 |
#include "smpboot.h" |
cff7d378d
|
32 33 34 35 |
/** * cpuhp_cpu_state - Per cpu hotplug state storage * @state: The current cpu state * @target: The target state |
4cb28ced2
|
36 37 |
* @thread: Pointer to the hotplug thread * @should_run: Thread should execute |
3b9d6da67
|
38 |
* @rollback: Perform a rollback |
4cb28ced2
|
39 40 41 42 |
* @cb_stat: The state for a single callback (install/uninstall) * @cb: Single callback function (install/uninstall) * @result: Result of the operation * @done: Signal completion to the issuer of the task |
cff7d378d
|
43 44 45 46 |
*/ struct cpuhp_cpu_state { enum cpuhp_state state; enum cpuhp_state target; |
4cb28ced2
|
47 48 49 |
#ifdef CONFIG_SMP struct task_struct *thread; bool should_run; |
3b9d6da67
|
50 |
bool rollback; |
4cb28ced2
|
51 52 53 54 55 |
enum cpuhp_state cb_state; int (*cb)(unsigned int cpu); int result; struct completion done; #endif |
cff7d378d
|
56 57 58 59 60 61 62 63 64 65 66 |
}; static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state); /** * cpuhp_step - Hotplug state machine step * @name: Name of the step * @startup: Startup function of the step * @teardown: Teardown function of the step * @skip_onerr: Do not invoke the functions on error rollback * Will go away once the notifiers are gone |
757c989b9
|
67 |
* @cant_stop: Bringup/teardown can't be stopped at this step |
cff7d378d
|
68 69 70 71 72 73 |
*/ struct cpuhp_step { const char *name; int (*startup)(unsigned int cpu); int (*teardown)(unsigned int cpu); bool skip_onerr; |
757c989b9
|
74 |
bool cant_stop; |
cff7d378d
|
75 |
}; |
98f8cdce1
|
76 |
static DEFINE_MUTEX(cpuhp_state_mutex); |
cff7d378d
|
77 |
static struct cpuhp_step cpuhp_bp_states[]; |
4baa0afc6
|
78 |
static struct cpuhp_step cpuhp_ap_states[]; |
cff7d378d
|
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
/** * cpuhp_invoke_callback _ Invoke the callbacks for a given state * @cpu: The cpu for which the callback should be invoked * @step: The step in the state machine * @cb: The callback function to invoke * * Called from cpu hotplug and from the state register machinery */ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step, int (*cb)(unsigned int)) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int ret = 0; if (cb) { trace_cpuhp_enter(cpu, st->target, step, cb); ret = cb(cpu); trace_cpuhp_exit(cpu, st->state, step, ret); } return ret; } |
98a79d6a5
|
101 |
#ifdef CONFIG_SMP |
b3199c025
|
102 |
/* Serializes the updates to cpu_online_mask, cpu_present_mask */ |
aa9538777
|
103 |
static DEFINE_MUTEX(cpu_add_remove_lock); |
090e77c39
|
104 105 |
bool cpuhp_tasks_frozen; EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen); |
1da177e4c
|
106 |
|
79a6cdeb7
|
107 |
/* |
93ae4f978
|
108 109 110 111 112 |
* The following two APIs (cpu_maps_update_begin/done) must be used when * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. * The APIs cpu_notifier_register_begin/done() must be used to protect CPU * hotplug callback (un)registration performed using __register_cpu_notifier() * or __unregister_cpu_notifier(). |
79a6cdeb7
|
113 114 115 116 117 |
*/ void cpu_maps_update_begin(void) { mutex_lock(&cpu_add_remove_lock); } |
93ae4f978
|
118 |
EXPORT_SYMBOL(cpu_notifier_register_begin); |
79a6cdeb7
|
119 120 121 122 123 |
void cpu_maps_update_done(void) { mutex_unlock(&cpu_add_remove_lock); } |
93ae4f978
|
124 |
EXPORT_SYMBOL(cpu_notifier_register_done); |
79a6cdeb7
|
125 |
|
5c113fbee
|
126 |
static RAW_NOTIFIER_HEAD(cpu_chain); |
1da177e4c
|
127 |
|
e3920fb42
|
128 129 130 131 |
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing. * Should always be manipulated under cpu_add_remove_lock */ static int cpu_hotplug_disabled; |
79a6cdeb7
|
132 |
#ifdef CONFIG_HOTPLUG_CPU |
d221938c0
|
133 134 |
static struct { struct task_struct *active_writer; |
87af9e7ff
|
135 136 137 138 |
/* wait queue to wake up the active_writer */ wait_queue_head_t wq; /* verifies that no writer will get active while readers are active */ struct mutex lock; |
d221938c0
|
139 140 141 142 |
/* * Also blocks the new readers during * an ongoing cpu hotplug operation. */ |
87af9e7ff
|
143 |
atomic_t refcount; |
a19423b98
|
144 145 146 147 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif |
31950eb66
|
148 149 |
} cpu_hotplug = { .active_writer = NULL, |
87af9e7ff
|
150 |
.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), |
31950eb66
|
151 |
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), |
a19423b98
|
152 153 154 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC .dep_map = {.name = "cpu_hotplug.lock" }, #endif |
31950eb66
|
155 |
}; |
d221938c0
|
156 |
|
a19423b98
|
157 158 |
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) |
dd56af42b
|
159 160 |
#define cpuhp_lock_acquire_tryread() \ lock_map_acquire_tryread(&cpu_hotplug.dep_map) |
a19423b98
|
161 162 |
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) |
62db99f47
|
163 |
|
86ef5c9a8
|
164 |
void get_online_cpus(void) |
a9d9baa1e
|
165 |
{ |
d221938c0
|
166 167 |
might_sleep(); if (cpu_hotplug.active_writer == current) |
aa9538777
|
168 |
return; |
a19423b98
|
169 |
cpuhp_lock_acquire_read(); |
d221938c0
|
170 |
mutex_lock(&cpu_hotplug.lock); |
87af9e7ff
|
171 |
atomic_inc(&cpu_hotplug.refcount); |
d221938c0
|
172 |
mutex_unlock(&cpu_hotplug.lock); |
a9d9baa1e
|
173 |
} |
86ef5c9a8
|
174 |
EXPORT_SYMBOL_GPL(get_online_cpus); |
90d45d17f
|
175 |
|
86ef5c9a8
|
176 |
void put_online_cpus(void) |
a9d9baa1e
|
177 |
{ |
87af9e7ff
|
178 |
int refcount; |
d221938c0
|
179 |
if (cpu_hotplug.active_writer == current) |
aa9538777
|
180 |
return; |
075663d19
|
181 |
|
87af9e7ff
|
182 183 184 185 186 187 |
refcount = atomic_dec_return(&cpu_hotplug.refcount); if (WARN_ON(refcount < 0)) /* try to fix things up */ atomic_inc(&cpu_hotplug.refcount); if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq)) wake_up(&cpu_hotplug.wq); |
075663d19
|
188 |
|
a19423b98
|
189 |
cpuhp_lock_release(); |
d221938c0
|
190 |
|
a9d9baa1e
|
191 |
} |
86ef5c9a8
|
192 |
EXPORT_SYMBOL_GPL(put_online_cpus); |
a9d9baa1e
|
193 |
|
d221938c0
|
194 195 196 197 198 199 200 |
/* * This ensures that the hotplug operation can begin only when the * refcount goes to zero. * * Note that during a cpu-hotplug operation, the new readers, if any, * will be blocked by the cpu_hotplug.lock * |
d2ba7e2ae
|
201 202 |
* Since cpu_hotplug_begin() is always called after invoking * cpu_maps_update_begin(), we can be sure that only one writer is active. |
d221938c0
|
203 204 205 206 207 208 209 210 211 212 |
* * Note that theoretically, there is a possibility of a livelock: * - Refcount goes to zero, last reader wakes up the sleeping * writer. * - Last reader unlocks the cpu_hotplug.lock. * - A new reader arrives at this moment, bumps up the refcount. * - The writer acquires the cpu_hotplug.lock finds the refcount * non zero and goes to sleep again. * * However, this is very difficult to achieve in practice since |
86ef5c9a8
|
213 |
* get_online_cpus() not an api which is called all that often. |
d221938c0
|
214 215 |
* */ |
b9d10be7a
|
216 |
void cpu_hotplug_begin(void) |
d221938c0
|
217 |
{ |
87af9e7ff
|
218 |
DEFINE_WAIT(wait); |
d2ba7e2ae
|
219 |
|
87af9e7ff
|
220 |
cpu_hotplug.active_writer = current; |
a19423b98
|
221 |
cpuhp_lock_acquire(); |
87af9e7ff
|
222 |
|
d2ba7e2ae
|
223 224 |
for (;;) { mutex_lock(&cpu_hotplug.lock); |
87af9e7ff
|
225 226 227 |
prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE); if (likely(!atomic_read(&cpu_hotplug.refcount))) break; |
d221938c0
|
228 229 |
mutex_unlock(&cpu_hotplug.lock); schedule(); |
d221938c0
|
230 |
} |
87af9e7ff
|
231 |
finish_wait(&cpu_hotplug.wq, &wait); |
d221938c0
|
232 |
} |
b9d10be7a
|
233 |
void cpu_hotplug_done(void) |
d221938c0
|
234 235 236 |
{ cpu_hotplug.active_writer = NULL; mutex_unlock(&cpu_hotplug.lock); |
a19423b98
|
237 |
cpuhp_lock_release(); |
d221938c0
|
238 |
} |
79a6cdeb7
|
239 |
|
16e53dbf1
|
240 241 242 243 244 245 246 247 248 249 |
/* * Wait for currently running CPU hotplug operations to complete (if any) and * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the * hotplug path before performing hotplug operations. So acquiring that lock * guarantees mutual exclusion from any currently running hotplug operations. */ void cpu_hotplug_disable(void) { cpu_maps_update_begin(); |
89af7ba57
|
250 |
cpu_hotplug_disabled++; |
16e53dbf1
|
251 252 |
cpu_maps_update_done(); } |
32145c467
|
253 |
EXPORT_SYMBOL_GPL(cpu_hotplug_disable); |
16e53dbf1
|
254 255 256 257 |
void cpu_hotplug_enable(void) { cpu_maps_update_begin(); |
89af7ba57
|
258 |
WARN_ON(--cpu_hotplug_disabled < 0); |
16e53dbf1
|
259 260 |
cpu_maps_update_done(); } |
32145c467
|
261 |
EXPORT_SYMBOL_GPL(cpu_hotplug_enable); |
b9d10be7a
|
262 |
#endif /* CONFIG_HOTPLUG_CPU */ |
79a6cdeb7
|
263 |
|
1da177e4c
|
264 |
/* Need to know about CPUs going up/down? */ |
71cf5aeeb
|
265 |
int register_cpu_notifier(struct notifier_block *nb) |
1da177e4c
|
266 |
{ |
bd5349cfd
|
267 |
int ret; |
d221938c0
|
268 |
cpu_maps_update_begin(); |
bd5349cfd
|
269 |
ret = raw_notifier_chain_register(&cpu_chain, nb); |
d221938c0
|
270 |
cpu_maps_update_done(); |
bd5349cfd
|
271 |
return ret; |
1da177e4c
|
272 |
} |
65edc68c3
|
273 |
|
71cf5aeeb
|
274 |
int __register_cpu_notifier(struct notifier_block *nb) |
93ae4f978
|
275 276 277 |
{ return raw_notifier_chain_register(&cpu_chain, nb); } |
090e77c39
|
278 |
static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call, |
e9fb7631e
|
279 280 |
int *nr_calls) { |
090e77c39
|
281 282 |
unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0; void *hcpu = (void *)(long)cpu; |
e6bde73b0
|
283 |
int ret; |
090e77c39
|
284 |
ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call, |
e9fb7631e
|
285 |
nr_calls); |
e6bde73b0
|
286 287 |
return notifier_to_errno(ret); |
e9fb7631e
|
288 |
} |
090e77c39
|
289 |
static int cpu_notify(unsigned long val, unsigned int cpu) |
e9fb7631e
|
290 |
{ |
090e77c39
|
291 |
return __cpu_notify(val, cpu, -1, NULL); |
e9fb7631e
|
292 |
} |
3b9d6da67
|
293 294 295 296 |
static void cpu_notify_nofail(unsigned long val, unsigned int cpu) { BUG_ON(cpu_notify(val, cpu)); } |
ba9974624
|
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 |
/* Notifier wrappers for transitioning to state machine */ static int notify_prepare(unsigned int cpu) { int nr_calls = 0; int ret; ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls); if (ret) { nr_calls--; printk(KERN_WARNING "%s: attempt to bring up CPU %u failed ", __func__, cpu); __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL); } return ret; } static int notify_online(unsigned int cpu) { cpu_notify(CPU_ONLINE, cpu); return 0; } |
4baa0afc6
|
319 320 321 322 323 |
static int notify_starting(unsigned int cpu) { cpu_notify(CPU_STARTING, cpu); return 0; } |
8df3e07e7
|
324 325 326 327 328 329 330 |
static int bringup_wait_for_ap(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); wait_for_completion(&st->done); return st->result; } |
ba9974624
|
331 332 333 334 335 336 337 338 339 340 341 |
static int bringup_cpu(unsigned int cpu) { struct task_struct *idle = idle_thread_get(cpu); int ret; /* Arch-specific enabling code. */ ret = __cpu_up(cpu, idle); if (ret) { cpu_notify(CPU_UP_CANCELED, cpu); return ret; } |
8df3e07e7
|
342 |
ret = bringup_wait_for_ap(cpu); |
ba9974624
|
343 |
BUG_ON(!cpu_online(cpu)); |
8df3e07e7
|
344 |
return ret; |
ba9974624
|
345 |
} |
2e1a3483c
|
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 |
/* * Hotplug state machine related functions */ static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st, struct cpuhp_step *steps) { for (st->state++; st->state < st->target; st->state++) { struct cpuhp_step *step = steps + st->state; if (!step->skip_onerr) cpuhp_invoke_callback(cpu, st->state, step->startup); } } static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, struct cpuhp_step *steps, enum cpuhp_state target) { enum cpuhp_state prev_state = st->state; int ret = 0; for (; st->state > target; st->state--) { struct cpuhp_step *step = steps + st->state; ret = cpuhp_invoke_callback(cpu, st->state, step->teardown); if (ret) { st->target = prev_state; undo_cpu_down(cpu, st, steps); break; } } return ret; } static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st, struct cpuhp_step *steps) { for (st->state--; st->state > st->target; st->state--) { struct cpuhp_step *step = steps + st->state; if (!step->skip_onerr) cpuhp_invoke_callback(cpu, st->state, step->teardown); } } static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, struct cpuhp_step *steps, enum cpuhp_state target) { enum cpuhp_state prev_state = st->state; int ret = 0; while (st->state < target) { struct cpuhp_step *step; st->state++; step = steps + st->state; ret = cpuhp_invoke_callback(cpu, st->state, step->startup); if (ret) { st->target = prev_state; undo_cpu_up(cpu, st, steps); break; } } return ret; } |
4cb28ced2
|
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 |
/* * The cpu hotplug threads manage the bringup and teardown of the cpus */ static void cpuhp_create(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); init_completion(&st->done); } static int cpuhp_should_run(unsigned int cpu) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); return st->should_run; } /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */ static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st) { |
1cf4f629d
|
430 |
enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU); |
4cb28ced2
|
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 |
return cpuhp_down_callbacks(cpu, st, cpuhp_ap_states, target); } /* Execute the online startup callbacks. Used to be CPU_ONLINE */ static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st) { return cpuhp_up_callbacks(cpu, st, cpuhp_ap_states, st->target); } /* * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke * callbacks when a state gets [un]installed at runtime. */ static void cpuhp_thread_fun(unsigned int cpu) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); int ret = 0; /* * Paired with the mb() in cpuhp_kick_ap_work and * cpuhp_invoke_ap_callback, so the work set is consistent visible. */ smp_mb(); if (!st->should_run) return; st->should_run = false; /* Single callback invocation for [un]install ? */ if (st->cb) { if (st->cb_state < CPUHP_AP_ONLINE) { local_irq_disable(); ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb); local_irq_enable(); } else { ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb); } |
3b9d6da67
|
469 470 471 472 473 474 475 476 477 478 |
} else if (st->rollback) { BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); undo_cpu_down(cpu, st, cpuhp_ap_states); /* * This is a momentary workaround to keep the notifier users * happy. Will go away once we got rid of the notifiers. */ cpu_notify_nofail(CPU_DOWN_FAILED, cpu); st->rollback = false; |
4cb28ced2
|
479 |
} else { |
1cf4f629d
|
480 |
/* Cannot happen .... */ |
8df3e07e7
|
481 |
BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); |
1cf4f629d
|
482 |
|
4cb28ced2
|
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 |
/* Regular hotplug work */ if (st->state < st->target) ret = cpuhp_ap_online(cpu, st); else if (st->state > st->target) ret = cpuhp_ap_offline(cpu, st); } st->result = ret; complete(&st->done); } /* Invoke a single callback on a remote cpu */ static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, int (*cb)(unsigned int)) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); if (!cpu_online(cpu)) return 0; |
6a4e24518
|
501 502 503 504 505 506 |
/* * If we are up and running, use the hotplug thread. For early calls * we invoke the thread function directly. */ if (!st->thread) return cpuhp_invoke_callback(cpu, state, cb); |
4cb28ced2
|
507 508 509 510 511 512 513 514 515 516 517 518 519 520 |
st->cb_state = state; st->cb = cb; /* * Make sure the above stores are visible before should_run becomes * true. Paired with the mb() above in cpuhp_thread_fun() */ smp_mb(); st->should_run = true; wake_up_process(st->thread); wait_for_completion(&st->done); return st->result; } /* Regular hotplug invocation of the AP hotplug thread */ |
1cf4f629d
|
521 |
static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st) |
4cb28ced2
|
522 |
{ |
4cb28ced2
|
523 524 525 526 527 528 529 530 531 |
st->result = 0; st->cb = NULL; /* * Make sure the above stores are visible before should_run becomes * true. Paired with the mb() above in cpuhp_thread_fun() */ smp_mb(); st->should_run = true; wake_up_process(st->thread); |
1cf4f629d
|
532 533 534 535 536 537 538 539 540 |
} static int cpuhp_kick_ap_work(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); enum cpuhp_state state = st->state; trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work); __cpuhp_kick_ap_work(st); |
4cb28ced2
|
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 |
wait_for_completion(&st->done); trace_cpuhp_exit(cpu, st->state, state, st->result); return st->result; } static struct smp_hotplug_thread cpuhp_threads = { .store = &cpuhp_state.thread, .create = &cpuhp_create, .thread_should_run = cpuhp_should_run, .thread_fn = cpuhp_thread_fun, .thread_comm = "cpuhp/%u", .selfparking = true, }; void __init cpuhp_threads_init(void) { BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads)); kthread_unpark(this_cpu_read(cpuhp_state.thread)); } |
00b9b0af5
|
560 |
#ifdef CONFIG_HOTPLUG_CPU |
1da177e4c
|
561 |
EXPORT_SYMBOL(register_cpu_notifier); |
93ae4f978
|
562 |
EXPORT_SYMBOL(__register_cpu_notifier); |
71cf5aeeb
|
563 |
void unregister_cpu_notifier(struct notifier_block *nb) |
1da177e4c
|
564 |
{ |
d221938c0
|
565 |
cpu_maps_update_begin(); |
bd5349cfd
|
566 |
raw_notifier_chain_unregister(&cpu_chain, nb); |
d221938c0
|
567 |
cpu_maps_update_done(); |
1da177e4c
|
568 569 |
} EXPORT_SYMBOL(unregister_cpu_notifier); |
71cf5aeeb
|
570 |
void __unregister_cpu_notifier(struct notifier_block *nb) |
93ae4f978
|
571 572 573 574 |
{ raw_notifier_chain_unregister(&cpu_chain, nb); } EXPORT_SYMBOL(__unregister_cpu_notifier); |
e4cc2f873
|
575 576 577 578 579 580 581 582 583 584 585 586 |
/** * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU * @cpu: a CPU id * * This function walks all processes, finds a valid mm struct for each one and * then clears a corresponding bit in mm's cpumask. While this all sounds * trivial, there are various non-obvious corner cases, which this function * tries to solve in a safe manner. * * Also note that the function uses a somewhat relaxed locking scheme, so it may * be called only for an already offlined CPU. */ |
cb79295e2
|
587 588 589 590 591 592 593 594 595 596 597 |
void clear_tasks_mm_cpumask(int cpu) { struct task_struct *p; /* * This function is called after the cpu is taken down and marked * offline, so its not like new tasks will ever get this cpu set in * their mm mask. -- Peter Zijlstra * Thus, we may use rcu_read_lock() here, instead of grabbing * full-fledged tasklist_lock. */ |
e4cc2f873
|
598 |
WARN_ON(cpu_online(cpu)); |
cb79295e2
|
599 600 601 |
rcu_read_lock(); for_each_process(p) { struct task_struct *t; |
e4cc2f873
|
602 603 604 605 |
/* * Main thread might exit, but other threads may still have * a valid mm. Find one. */ |
cb79295e2
|
606 607 608 609 610 611 612 613 |
t = find_lock_task_mm(p); if (!t) continue; cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); task_unlock(t); } rcu_read_unlock(); } |
b728ca060
|
614 |
static inline void check_for_tasks(int dead_cpu) |
1da177e4c
|
615 |
{ |
b728ca060
|
616 |
struct task_struct *g, *p; |
1da177e4c
|
617 |
|
a75a6068d
|
618 619 |
read_lock(&tasklist_lock); for_each_process_thread(g, p) { |
b728ca060
|
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 |
if (!p->on_rq) continue; /* * We do the check with unlocked task_rq(p)->lock. * Order the reading to do not warn about a task, * which was running on this cpu in the past, and * it's just been woken on another cpu. */ rmb(); if (task_cpu(p) != dead_cpu) continue; pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x) ", p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags); |
a75a6068d
|
635 636 |
} read_unlock(&tasklist_lock); |
1da177e4c
|
637 |
} |
984581728
|
638 639 640 641 642 643 644 645 646 647 648 649 650 651 |
static int notify_down_prepare(unsigned int cpu) { int err, nr_calls = 0; err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls); if (err) { nr_calls--; __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL); pr_warn("%s: attempt to take down CPU %u failed ", __func__, cpu); } return err; } |
4baa0afc6
|
652 653 654 655 656 |
static int notify_dying(unsigned int cpu) { cpu_notify(CPU_DYING, cpu); return 0; } |
1da177e4c
|
657 |
/* Take this CPU down. */ |
71cf5aeeb
|
658 |
static int take_cpu_down(void *_param) |
1da177e4c
|
659 |
{ |
4baa0afc6
|
660 661 |
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); |
090e77c39
|
662 |
int err, cpu = smp_processor_id(); |
1da177e4c
|
663 |
|
1da177e4c
|
664 665 666 |
/* Ensure this CPU doesn't handle any more interrupts. */ err = __cpu_disable(); if (err < 0) |
f37051364
|
667 |
return err; |
1da177e4c
|
668 |
|
4baa0afc6
|
669 670 671 672 673 674 |
/* Invoke the former CPU_DYING callbacks */ for (; st->state > target; st->state--) { struct cpuhp_step *step = cpuhp_ap_states + st->state; cpuhp_invoke_callback(cpu, st->state, step->teardown); } |
52c063d1a
|
675 676 |
/* Give up timekeeping duties */ tick_handover_do_timer(); |
14e568e78
|
677 |
/* Park the stopper thread */ |
090e77c39
|
678 |
stop_machine_park(cpu); |
f37051364
|
679 |
return 0; |
1da177e4c
|
680 |
} |
984581728
|
681 |
static int takedown_cpu(unsigned int cpu) |
1da177e4c
|
682 |
{ |
e69aab131
|
683 |
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
984581728
|
684 |
int err; |
1da177e4c
|
685 |
|
2a58c527b
|
686 |
/* Park the smpboot threads */ |
1cf4f629d
|
687 |
kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); |
2a58c527b
|
688 |
smpboot_park_threads(cpu); |
1cf4f629d
|
689 |
|
6acce3ef8
|
690 |
/* |
a89941816
|
691 692 |
* Prevent irq alloc/free while the dying cpu reorganizes the * interrupt affinities. |
6acce3ef8
|
693 |
*/ |
a89941816
|
694 |
irq_lock_sparse(); |
6acce3ef8
|
695 |
|
a89941816
|
696 697 698 |
/* * So now all preempt/rcu users must observe !cpu_active(). */ |
090e77c39
|
699 |
err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu)); |
043215875
|
700 |
if (err) { |
3b9d6da67
|
701 |
/* CPU refused to die */ |
a89941816
|
702 |
irq_unlock_sparse(); |
3b9d6da67
|
703 704 |
/* Unpark the hotplug thread so we can rollback there */ kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread); |
984581728
|
705 |
return err; |
8fa1d7d3b
|
706 |
} |
043215875
|
707 |
BUG_ON(cpu_online(cpu)); |
1da177e4c
|
708 |
|
48c5ccae8
|
709 710 711 712 |
/* * The migration_call() CPU_DYING callback will have removed all * runnable tasks from the cpu, there's only the idle task left now * that the migration thread is done doing the stop_machine thing. |
51a96c778
|
713 714 |
* * Wait for the stop thread to go away. |
48c5ccae8
|
715 |
*/ |
e69aab131
|
716 717 |
wait_for_completion(&st->done); BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); |
1da177e4c
|
718 |
|
a89941816
|
719 720 |
/* Interrupts are moved away from the dying cpu, reenable alloc/free */ irq_unlock_sparse(); |
345527b1e
|
721 |
hotplug_cpu__broadcast_tick_pull(cpu); |
1da177e4c
|
722 723 |
/* This actually kills the CPU. */ __cpu_die(cpu); |
a49b116dc
|
724 |
tick_cleanup_dead_cpu(cpu); |
984581728
|
725 726 |
return 0; } |
1da177e4c
|
727 |
|
984581728
|
728 729 730 |
static int notify_dead(unsigned int cpu) { cpu_notify_nofail(CPU_DEAD, cpu); |
1da177e4c
|
731 |
check_for_tasks(cpu); |
984581728
|
732 733 |
return 0; } |
71f87b2fc
|
734 735 736 737 738 739 |
static void cpuhp_complete_idle_dead(void *arg) { struct cpuhp_cpu_state *st = arg; complete(&st->done); } |
e69aab131
|
740 741 742 743 744 |
void cpuhp_report_idle_dead(void) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); BUG_ON(st->state != CPUHP_AP_OFFLINE); |
27d50c7ee
|
745 |
rcu_report_dead(smp_processor_id()); |
71f87b2fc
|
746 747 748 749 750 751 752 |
st->state = CPUHP_AP_IDLE_DEAD; /* * We cannot call complete after rcu_report_dead() so we delegate it * to an online cpu. */ smp_call_function_single(cpumask_first(cpu_online_mask), cpuhp_complete_idle_dead, st, 0); |
e69aab131
|
753 |
} |
cff7d378d
|
754 755 756 757 |
#else #define notify_down_prepare NULL #define takedown_cpu NULL #define notify_dead NULL |
4baa0afc6
|
758 |
#define notify_dying NULL |
cff7d378d
|
759 760 761 |
#endif #ifdef CONFIG_HOTPLUG_CPU |
cff7d378d
|
762 |
|
984581728
|
763 |
/* Requires cpu_add_remove_lock to be held */ |
af1f40457
|
764 765 |
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) |
984581728
|
766 |
{ |
cff7d378d
|
767 768 769 |
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int prev_state, ret = 0; bool hasdied = false; |
984581728
|
770 771 772 |
if (num_online_cpus() == 1) return -EBUSY; |
757c989b9
|
773 |
if (!cpu_present(cpu)) |
984581728
|
774 775 776 777 778 |
return -EINVAL; cpu_hotplug_begin(); cpuhp_tasks_frozen = tasks_frozen; |
cff7d378d
|
779 |
prev_state = st->state; |
af1f40457
|
780 |
st->target = target; |
1cf4f629d
|
781 782 783 784 |
/* * If the current CPU state is in the range of the AP hotplug thread, * then we need to kick the thread. */ |
8df3e07e7
|
785 |
if (st->state > CPUHP_TEARDOWN_CPU) { |
1cf4f629d
|
786 787 788 789 790 791 792 793 794 795 796 797 |
ret = cpuhp_kick_ap_work(cpu); /* * The AP side has done the error rollback already. Just * return the error code.. */ if (ret) goto out; /* * We might have stopped still in the range of the AP hotplug * thread. Nothing to do anymore. */ |
8df3e07e7
|
798 |
if (st->state > CPUHP_TEARDOWN_CPU) |
1cf4f629d
|
799 800 801 |
goto out; } /* |
8df3e07e7
|
802 |
* The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need |
1cf4f629d
|
803 804 |
* to do the further cleanups. */ |
2e1a3483c
|
805 |
ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target); |
3b9d6da67
|
806 807 808 809 810 |
if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { st->target = prev_state; st->rollback = true; cpuhp_kick_ap_work(cpu); } |
984581728
|
811 |
|
cff7d378d
|
812 |
hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE; |
1cf4f629d
|
813 |
out: |
d221938c0
|
814 |
cpu_hotplug_done(); |
cff7d378d
|
815 816 |
/* This post dead nonsense must die */ if (!ret && hasdied) |
090e77c39
|
817 |
cpu_notify_nofail(CPU_POST_DEAD, cpu); |
cff7d378d
|
818 |
return ret; |
e3920fb42
|
819 |
} |
af1f40457
|
820 |
static int do_cpu_down(unsigned int cpu, enum cpuhp_state target) |
e3920fb42
|
821 |
{ |
9ea09af3b
|
822 |
int err; |
e3920fb42
|
823 |
|
d221938c0
|
824 |
cpu_maps_update_begin(); |
e761b7725
|
825 826 |
if (cpu_hotplug_disabled) { |
e3920fb42
|
827 |
err = -EBUSY; |
e761b7725
|
828 829 |
goto out; } |
af1f40457
|
830 |
err = _cpu_down(cpu, 0, target); |
e3920fb42
|
831 |
|
e761b7725
|
832 |
out: |
d221938c0
|
833 |
cpu_maps_update_done(); |
1da177e4c
|
834 835 |
return err; } |
af1f40457
|
836 837 838 839 |
int cpu_down(unsigned int cpu) { return do_cpu_down(cpu, CPUHP_OFFLINE); } |
b62b8ef90
|
840 |
EXPORT_SYMBOL(cpu_down); |
1da177e4c
|
841 |
#endif /*CONFIG_HOTPLUG_CPU*/ |
4baa0afc6
|
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 |
/** * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers * @cpu: cpu that just started * * This function calls the cpu_chain notifiers with CPU_STARTING. * It must be called by the arch code on the new cpu, before the new cpu * enables interrupts and before the "boot" cpu returns from __cpu_up(). */ void notify_cpu_starting(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); while (st->state < target) { struct cpuhp_step *step; st->state++; step = cpuhp_ap_states + st->state; cpuhp_invoke_callback(cpu, st->state, step->startup); } } |
949338e35
|
863 864 |
/* * Called from the idle task. We need to set active here, so we can kick off |
8df3e07e7
|
865 866 867 |
* the stopper thread and unpark the smpboot threads. If the target state is * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the * cpu further. |
949338e35
|
868 |
*/ |
8df3e07e7
|
869 |
void cpuhp_online_idle(enum cpuhp_state state) |
949338e35
|
870 |
{ |
8df3e07e7
|
871 872 873 874 875 876 877 878 |
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); unsigned int cpu = smp_processor_id(); /* Happens for the boot cpu */ if (state != CPUHP_AP_ONLINE_IDLE) return; st->state = CPUHP_AP_ONLINE_IDLE; |
1cf4f629d
|
879 |
|
8df3e07e7
|
880 |
/* Unpark the stopper thread and the hotplug thread of this cpu */ |
949338e35
|
881 |
stop_machine_unpark(cpu); |
1cf4f629d
|
882 |
kthread_unpark(st->thread); |
8df3e07e7
|
883 884 885 886 887 888 |
/* Should we go further up ? */ if (st->target > CPUHP_AP_ONLINE_IDLE) __cpuhp_kick_ap_work(st); else complete(&st->done); |
949338e35
|
889 |
} |
e3920fb42
|
890 |
/* Requires cpu_add_remove_lock to be held */ |
af1f40457
|
891 |
static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) |
1da177e4c
|
892 |
{ |
cff7d378d
|
893 |
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
3bb5d2ee3
|
894 |
struct task_struct *idle; |
2e1a3483c
|
895 |
int ret = 0; |
1da177e4c
|
896 |
|
d221938c0
|
897 |
cpu_hotplug_begin(); |
38498a67a
|
898 |
|
757c989b9
|
899 |
if (!cpu_present(cpu)) { |
5e5041f35
|
900 901 902 |
ret = -EINVAL; goto out; } |
757c989b9
|
903 904 905 906 907 |
/* * The caller of do_cpu_up might have raced with another * caller. Ignore it for now. */ if (st->state >= target) |
38498a67a
|
908 |
goto out; |
757c989b9
|
909 910 911 912 913 914 915 916 |
if (st->state == CPUHP_OFFLINE) { /* Let it fail before we try to bring the cpu up */ idle = idle_thread_get(cpu); if (IS_ERR(idle)) { ret = PTR_ERR(idle); goto out; } |
3bb5d2ee3
|
917 |
} |
38498a67a
|
918 |
|
ba9974624
|
919 |
cpuhp_tasks_frozen = tasks_frozen; |
af1f40457
|
920 |
st->target = target; |
1cf4f629d
|
921 922 923 924 |
/* * If the current CPU state is in the range of the AP hotplug thread, * then we need to kick the thread once more. */ |
8df3e07e7
|
925 |
if (st->state > CPUHP_BRINGUP_CPU) { |
1cf4f629d
|
926 927 928 929 930 931 932 933 934 935 936 |
ret = cpuhp_kick_ap_work(cpu); /* * The AP side has done the error rollback already. Just * return the error code.. */ if (ret) goto out; } /* * Try to reach the target state. We max out on the BP at |
8df3e07e7
|
937 |
* CPUHP_BRINGUP_CPU. After that the AP hotplug thread is |
1cf4f629d
|
938 939 |
* responsible for bringing it up to the target state. */ |
8df3e07e7
|
940 |
target = min((int)target, CPUHP_BRINGUP_CPU); |
2e1a3483c
|
941 |
ret = cpuhp_up_callbacks(cpu, st, cpuhp_bp_states, target); |
38498a67a
|
942 |
out: |
d221938c0
|
943 |
cpu_hotplug_done(); |
e3920fb42
|
944 945 |
return ret; } |
af1f40457
|
946 |
static int do_cpu_up(unsigned int cpu, enum cpuhp_state target) |
e3920fb42
|
947 948 |
{ int err = 0; |
cf23422b9
|
949 |
|
e0b582ec5
|
950 |
if (!cpu_possible(cpu)) { |
84117da5b
|
951 952 953 |
pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time ", cpu); |
87d5e0236
|
954 |
#if defined(CONFIG_IA64) |
84117da5b
|
955 956 |
pr_err("please check additional_cpus= boot parameter "); |
73e753a50
|
957 958 959 |
#endif return -EINVAL; } |
e3920fb42
|
960 |
|
01b0f1970
|
961 962 963 |
err = try_online_node(cpu_to_node(cpu)); if (err) return err; |
cf23422b9
|
964 |
|
d221938c0
|
965 |
cpu_maps_update_begin(); |
e761b7725
|
966 967 |
if (cpu_hotplug_disabled) { |
e3920fb42
|
968 |
err = -EBUSY; |
e761b7725
|
969 970 |
goto out; } |
af1f40457
|
971 |
err = _cpu_up(cpu, 0, target); |
e761b7725
|
972 |
out: |
d221938c0
|
973 |
cpu_maps_update_done(); |
e3920fb42
|
974 975 |
return err; } |
af1f40457
|
976 977 978 979 980 |
int cpu_up(unsigned int cpu) { return do_cpu_up(cpu, CPUHP_ONLINE); } |
a513f6bab
|
981 |
EXPORT_SYMBOL_GPL(cpu_up); |
e3920fb42
|
982 |
|
f3de4be9d
|
983 |
#ifdef CONFIG_PM_SLEEP_SMP |
e0b582ec5
|
984 |
static cpumask_var_t frozen_cpus; |
e3920fb42
|
985 986 987 |
int disable_nonboot_cpus(void) { |
e9a5f426b
|
988 |
int cpu, first_cpu, error = 0; |
e3920fb42
|
989 |
|
d221938c0
|
990 |
cpu_maps_update_begin(); |
e0b582ec5
|
991 |
first_cpu = cpumask_first(cpu_online_mask); |
9ee349ad6
|
992 993 |
/* * We take down all of the non-boot CPUs in one shot to avoid races |
e3920fb42
|
994 995 |
* with the userspace trying to use the CPU hotplug at the same time */ |
e0b582ec5
|
996 |
cpumask_clear(frozen_cpus); |
6ad4c1888
|
997 |
|
84117da5b
|
998 999 |
pr_info("Disabling non-boot CPUs ... "); |
e3920fb42
|
1000 1001 1002 |
for_each_online_cpu(cpu) { if (cpu == first_cpu) continue; |
bb3632c61
|
1003 |
trace_suspend_resume(TPS("CPU_OFF"), cpu, true); |
af1f40457
|
1004 |
error = _cpu_down(cpu, 1, CPUHP_OFFLINE); |
bb3632c61
|
1005 |
trace_suspend_resume(TPS("CPU_OFF"), cpu, false); |
feae3203d
|
1006 |
if (!error) |
e0b582ec5
|
1007 |
cpumask_set_cpu(cpu, frozen_cpus); |
feae3203d
|
1008 |
else { |
84117da5b
|
1009 1010 |
pr_err("Error taking CPU%d down: %d ", cpu, error); |
e3920fb42
|
1011 1012 1013 |
break; } } |
86886e55b
|
1014 |
|
89af7ba57
|
1015 |
if (!error) |
e3920fb42
|
1016 |
BUG_ON(num_online_cpus() > 1); |
89af7ba57
|
1017 |
else |
84117da5b
|
1018 1019 |
pr_err("Non-boot CPUs are not disabled "); |
89af7ba57
|
1020 1021 1022 1023 1024 1025 1026 |
/* * Make sure the CPUs won't be enabled by someone else. We need to do * this even in case of failure as all disable_nonboot_cpus() users are * supposed to do enable_nonboot_cpus() on the failure path. */ cpu_hotplug_disabled++; |
d221938c0
|
1027 |
cpu_maps_update_done(); |
e3920fb42
|
1028 1029 |
return error; } |
d0af9eed5
|
1030 1031 1032 1033 1034 1035 1036 |
void __weak arch_enable_nonboot_cpus_begin(void) { } void __weak arch_enable_nonboot_cpus_end(void) { } |
71cf5aeeb
|
1037 |
void enable_nonboot_cpus(void) |
e3920fb42
|
1038 1039 1040 1041 |
{ int cpu, error; /* Allow everyone to use the CPU hotplug again */ |
d221938c0
|
1042 |
cpu_maps_update_begin(); |
89af7ba57
|
1043 |
WARN_ON(--cpu_hotplug_disabled < 0); |
e0b582ec5
|
1044 |
if (cpumask_empty(frozen_cpus)) |
1d64b9cb1
|
1045 |
goto out; |
e3920fb42
|
1046 |
|
84117da5b
|
1047 1048 |
pr_info("Enabling non-boot CPUs ... "); |
d0af9eed5
|
1049 1050 |
arch_enable_nonboot_cpus_begin(); |
e0b582ec5
|
1051 |
for_each_cpu(cpu, frozen_cpus) { |
bb3632c61
|
1052 |
trace_suspend_resume(TPS("CPU_ON"), cpu, true); |
af1f40457
|
1053 |
error = _cpu_up(cpu, 1, CPUHP_ONLINE); |
bb3632c61
|
1054 |
trace_suspend_resume(TPS("CPU_ON"), cpu, false); |
e3920fb42
|
1055 |
if (!error) { |
84117da5b
|
1056 1057 |
pr_info("CPU%d is up ", cpu); |
e3920fb42
|
1058 1059 |
continue; } |
84117da5b
|
1060 1061 |
pr_warn("Error taking CPU%d up: %d ", cpu, error); |
e3920fb42
|
1062 |
} |
d0af9eed5
|
1063 1064 |
arch_enable_nonboot_cpus_end(); |
e0b582ec5
|
1065 |
cpumask_clear(frozen_cpus); |
1d64b9cb1
|
1066 |
out: |
d221938c0
|
1067 |
cpu_maps_update_done(); |
1da177e4c
|
1068 |
} |
e0b582ec5
|
1069 |
|
d7268a31c
|
1070 |
static int __init alloc_frozen_cpus(void) |
e0b582ec5
|
1071 1072 1073 1074 1075 1076 |
{ if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) return -ENOMEM; return 0; } core_initcall(alloc_frozen_cpus); |
79cfbdfa8
|
1077 1078 |
/* |
79cfbdfa8
|
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 |
* When callbacks for CPU hotplug notifications are being executed, we must * ensure that the state of the system with respect to the tasks being frozen * or not, as reported by the notification, remains unchanged *throughout the * duration* of the execution of the callbacks. * Hence we need to prevent the freezer from racing with regular CPU hotplug. * * This synchronization is implemented by mutually excluding regular CPU * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ * Hibernate notifications. */ static int cpu_hotplug_pm_callback(struct notifier_block *nb, unsigned long action, void *ptr) { switch (action) { case PM_SUSPEND_PREPARE: case PM_HIBERNATION_PREPARE: |
16e53dbf1
|
1097 |
cpu_hotplug_disable(); |
79cfbdfa8
|
1098 1099 1100 1101 |
break; case PM_POST_SUSPEND: case PM_POST_HIBERNATION: |
16e53dbf1
|
1102 |
cpu_hotplug_enable(); |
79cfbdfa8
|
1103 1104 1105 1106 1107 1108 1109 1110 |
break; default: return NOTIFY_DONE; } return NOTIFY_OK; } |
d7268a31c
|
1111 |
static int __init cpu_hotplug_pm_sync_init(void) |
79cfbdfa8
|
1112 |
{ |
6e32d479d
|
1113 1114 1115 1116 1117 |
/* * cpu_hotplug_pm_callback has higher priority than x86 * bsp_pm_callback which depends on cpu_hotplug_pm_callback * to disable cpu hotplug to avoid cpu hotplug race. */ |
79cfbdfa8
|
1118 1119 1120 1121 |
pm_notifier(cpu_hotplug_pm_callback, 0); return 0; } core_initcall(cpu_hotplug_pm_sync_init); |
f3de4be9d
|
1122 |
#endif /* CONFIG_PM_SLEEP_SMP */ |
68f4f1ec0
|
1123 1124 |
#endif /* CONFIG_SMP */ |
b8d317d10
|
1125 |
|
cff7d378d
|
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 |
/* Boot processor state steps */ static struct cpuhp_step cpuhp_bp_states[] = { [CPUHP_OFFLINE] = { .name = "offline", .startup = NULL, .teardown = NULL, }, #ifdef CONFIG_SMP [CPUHP_CREATE_THREADS]= { .name = "threads:create", .startup = smpboot_create_threads, .teardown = NULL, |
757c989b9
|
1138 |
.cant_stop = true, |
cff7d378d
|
1139 |
}, |
00e16c3d6
|
1140 1141 1142 1143 1144 |
[CPUHP_PERF_PREPARE] = { .name = "perf prepare", .startup = perf_event_init_cpu, .teardown = perf_event_exit_cpu, }, |
7ee681b25
|
1145 1146 1147 1148 1149 |
[CPUHP_WORKQUEUE_PREP] = { .name = "workqueue prepare", .startup = workqueue_prepare_cpu, .teardown = NULL, }, |
27590dc17
|
1150 1151 1152 1153 1154 |
[CPUHP_HRTIMERS_PREPARE] = { .name = "hrtimers prepare", .startup = hrtimers_prepare_cpu, .teardown = hrtimers_dead_cpu, }, |
31487f832
|
1155 1156 1157 1158 1159 |
[CPUHP_SMPCFD_PREPARE] = { .name = "SMPCFD prepare", .startup = smpcfd_prepare_cpu, .teardown = smpcfd_dead_cpu, }, |
4df837425
|
1160 1161 1162 1163 1164 |
[CPUHP_RCUTREE_PREP] = { .name = "RCU-tree prepare", .startup = rcutree_prepare_cpu, .teardown = rcutree_dead_cpu, }, |
d10ef6f93
|
1165 1166 1167 1168 |
/* * Preparatory and dead notifiers. Will be replaced once the notifiers * are converted to states. */ |
cff7d378d
|
1169 1170 1171 1172 1173 |
[CPUHP_NOTIFY_PREPARE] = { .name = "notify:prepare", .startup = notify_prepare, .teardown = notify_dead, .skip_onerr = true, |
757c989b9
|
1174 |
.cant_stop = true, |
cff7d378d
|
1175 |
}, |
4fae16dff
|
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 |
/* * On the tear-down path, timers_dead_cpu() must be invoked * before blk_mq_queue_reinit_notify() from notify_dead(), * otherwise a RCU stall occurs. */ [CPUHP_TIMERS_DEAD] = { .name = "timers dead", .startup = NULL, .teardown = timers_dead_cpu, }, |
d10ef6f93
|
1186 |
/* Kicks the plugged cpu into life */ |
cff7d378d
|
1187 1188 1189 |
[CPUHP_BRINGUP_CPU] = { .name = "cpu:bringup", .startup = bringup_cpu, |
4baa0afc6
|
1190 |
.teardown = NULL, |
757c989b9
|
1191 |
.cant_stop = true, |
4baa0afc6
|
1192 |
}, |
31487f832
|
1193 1194 1195 1196 |
[CPUHP_AP_SMPCFD_DYING] = { .startup = NULL, .teardown = smpcfd_dying_cpu, }, |
d10ef6f93
|
1197 1198 1199 1200 |
/* * Handled on controll processor until the plugged processor manages * this itself. */ |
4baa0afc6
|
1201 1202 1203 |
[CPUHP_TEARDOWN_CPU] = { .name = "cpu:teardown", .startup = NULL, |
cff7d378d
|
1204 |
.teardown = takedown_cpu, |
757c989b9
|
1205 |
.cant_stop = true, |
cff7d378d
|
1206 |
}, |
a7c734140
|
1207 1208 |
#else [CPUHP_BRINGUP_CPU] = { }, |
cff7d378d
|
1209 |
#endif |
cff7d378d
|
1210 |
}; |
4baa0afc6
|
1211 1212 1213 |
/* Application processor state steps */ static struct cpuhp_step cpuhp_ap_states[] = { #ifdef CONFIG_SMP |
d10ef6f93
|
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 |
/* Final state before CPU kills itself */ [CPUHP_AP_IDLE_DEAD] = { .name = "idle:dead", }, /* * Last state before CPU enters the idle loop to die. Transient state * for synchronization. */ [CPUHP_AP_OFFLINE] = { .name = "ap:offline", .cant_stop = true, }, |
9cf7243d5
|
1226 1227 1228 1229 |
/* First state is scheduler control. Interrupts are disabled */ [CPUHP_AP_SCHED_STARTING] = { .name = "sched:starting", .startup = sched_cpu_starting, |
f2785ddb5
|
1230 |
.teardown = sched_cpu_dying, |
9cf7243d5
|
1231 |
}, |
4df837425
|
1232 1233 1234 1235 |
[CPUHP_AP_RCUTREE_DYING] = { .startup = NULL, .teardown = rcutree_dying_cpu, }, |
d10ef6f93
|
1236 1237 1238 1239 1240 |
/* * Low level startup/teardown notifiers. Run with interrupts * disabled. Will be removed once the notifiers are converted to * states. */ |
4baa0afc6
|
1241 1242 1243 1244 1245 |
[CPUHP_AP_NOTIFY_STARTING] = { .name = "notify:starting", .startup = notify_starting, .teardown = notify_dying, .skip_onerr = true, |
757c989b9
|
1246 |
.cant_stop = true, |
4baa0afc6
|
1247 |
}, |
d10ef6f93
|
1248 1249 1250 1251 1252 1253 |
/* Entry state on starting. Interrupts enabled from here on. Transient * state for synchronsization */ [CPUHP_AP_ONLINE] = { .name = "ap:online", }, /* Handle smpboot threads park/unpark */ |
1cf4f629d
|
1254 1255 1256 |
[CPUHP_AP_SMPBOOT_THREADS] = { .name = "smpboot:threads", .startup = smpboot_unpark_threads, |
2a58c527b
|
1257 |
.teardown = NULL, |
1cf4f629d
|
1258 |
}, |
00e16c3d6
|
1259 1260 1261 1262 1263 |
[CPUHP_AP_PERF_ONLINE] = { .name = "perf online", .startup = perf_event_init_cpu, .teardown = perf_event_exit_cpu, }, |
7ee681b25
|
1264 1265 1266 1267 1268 |
[CPUHP_AP_WORKQUEUE_ONLINE] = { .name = "workqueue online", .startup = workqueue_online_cpu, .teardown = workqueue_offline_cpu, }, |
4df837425
|
1269 1270 1271 1272 1273 |
[CPUHP_AP_RCUTREE_ONLINE] = { .name = "RCU-tree online", .startup = rcutree_online_cpu, .teardown = rcutree_offline_cpu, }, |
00e16c3d6
|
1274 |
|
d10ef6f93
|
1275 1276 1277 1278 |
/* * Online/down_prepare notifiers. Will be removed once the notifiers * are converted to states. */ |
1cf4f629d
|
1279 1280 1281 1282 |
[CPUHP_AP_NOTIFY_ONLINE] = { .name = "notify:online", .startup = notify_online, .teardown = notify_down_prepare, |
3b9d6da67
|
1283 |
.skip_onerr = true, |
1cf4f629d
|
1284 |
}, |
4baa0afc6
|
1285 |
#endif |
d10ef6f93
|
1286 1287 1288 |
/* * The dynamically registered state space is here */ |
aaddd7d1c
|
1289 1290 1291 1292 1293 1294 1295 1296 |
#ifdef CONFIG_SMP /* Last state is scheduler control setting the cpu active */ [CPUHP_AP_ACTIVE] = { .name = "sched:active", .startup = sched_cpu_activate, .teardown = sched_cpu_deactivate, }, #endif |
d10ef6f93
|
1297 |
/* CPU is fully up and running. */ |
4baa0afc6
|
1298 1299 1300 1301 1302 1303 |
[CPUHP_ONLINE] = { .name = "online", .startup = NULL, .teardown = NULL, }, }; |
5b7aa87e0
|
1304 1305 1306 1307 1308 1309 1310 |
/* Sanity check for callbacks */ static int cpuhp_cb_check(enum cpuhp_state state) { if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE) return -EINVAL; return 0; } |
98f8cdce1
|
1311 1312 |
static bool cpuhp_is_ap_state(enum cpuhp_state state) { |
d10ef6f93
|
1313 1314 1315 1316 1317 |
/* * The extra check for CPUHP_TEARDOWN_CPU is only for documentation * purposes as that state is handled explicitely in cpu_down. */ return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU; |
98f8cdce1
|
1318 1319 1320 1321 1322 1323 1324 1325 1326 |
} static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) { struct cpuhp_step *sp; sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states; return sp + state; } |
5b7aa87e0
|
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 |
static void cpuhp_store_callbacks(enum cpuhp_state state, const char *name, int (*startup)(unsigned int cpu), int (*teardown)(unsigned int cpu)) { /* (Un)Install the callbacks for further cpu hotplug operations */ struct cpuhp_step *sp; mutex_lock(&cpuhp_state_mutex); sp = cpuhp_get_step(state); sp->startup = startup; sp->teardown = teardown; sp->name = name; mutex_unlock(&cpuhp_state_mutex); } static void *cpuhp_get_teardown_cb(enum cpuhp_state state) { return cpuhp_get_step(state)->teardown; } |
5b7aa87e0
|
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 |
/* * Call the startup/teardown function for a step either on the AP or * on the current CPU. */ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, int (*cb)(unsigned int), bool bringup) { int ret; if (!cb) return 0; |
5b7aa87e0
|
1358 1359 1360 1361 |
/* * The non AP bound callbacks can fail on bringup. On teardown * e.g. module removal we crash for now. */ |
1cf4f629d
|
1362 1363 1364 1365 1366 1367 1368 1369 |
#ifdef CONFIG_SMP if (cpuhp_is_ap_state(state)) ret = cpuhp_invoke_ap_callback(cpu, state, cb); else ret = cpuhp_invoke_callback(cpu, state, cb); #else ret = cpuhp_invoke_callback(cpu, state, cb); #endif |
5b7aa87e0
|
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 |
BUG_ON(ret && !bringup); return ret; } /* * Called from __cpuhp_setup_state on a recoverable failure. * * Note: The teardown callbacks for rollback are not allowed to fail! */ static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, int (*teardown)(unsigned int cpu)) { int cpu; if (!teardown) return; /* Roll back the already executed steps on the other cpus */ for_each_present_cpu(cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int cpustate = st->state; if (cpu >= failedcpu) break; /* Did we invoke the startup call on that cpu ? */ if (cpustate >= state) cpuhp_issue_call(cpu, state, teardown, false); } } /* * Returns a free for dynamic slot assignment of the Online state. The states * are protected by the cpuhp_slot_states mutex and an empty slot is identified * by having no name assigned. */ static int cpuhp_reserve_state(enum cpuhp_state state) { enum cpuhp_state i; mutex_lock(&cpuhp_state_mutex); |
1cf4f629d
|
1411 1412 |
for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) { if (cpuhp_ap_states[i].name) |
5b7aa87e0
|
1413 |
continue; |
1cf4f629d
|
1414 |
cpuhp_ap_states[i].name = "Reserved"; |
5b7aa87e0
|
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 |
mutex_unlock(&cpuhp_state_mutex); return i; } mutex_unlock(&cpuhp_state_mutex); WARN(1, "No more dynamic states available for CPU hotplug "); return -ENOSPC; } /** * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state * @state: The state to setup * @invoke: If true, the startup function is invoked for cpus where * cpu state >= @state * @startup: startup callback function * @teardown: teardown callback function * * Returns 0 if successful, otherwise a proper error code */ int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke, int (*startup)(unsigned int cpu), int (*teardown)(unsigned int cpu)) { int cpu, ret = 0; int dyn_state = 0; if (cpuhp_cb_check(state) || !name) return -EINVAL; get_online_cpus(); /* currently assignments for the ONLINE state are possible */ |
1cf4f629d
|
1448 |
if (state == CPUHP_AP_ONLINE_DYN) { |
5b7aa87e0
|
1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 |
dyn_state = 1; ret = cpuhp_reserve_state(state); if (ret < 0) goto out; state = ret; } cpuhp_store_callbacks(state, name, startup, teardown); if (!invoke || !startup) goto out; /* * Try to call the startup callback for each present cpu * depending on the hotplug state of the cpu. */ for_each_present_cpu(cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int cpustate = st->state; if (cpustate < state) continue; ret = cpuhp_issue_call(cpu, state, startup, true); if (ret) { cpuhp_rollback_install(cpu, state, teardown); cpuhp_store_callbacks(state, NULL, NULL, NULL); goto out; } } out: put_online_cpus(); if (!ret && dyn_state) return state; return ret; } EXPORT_SYMBOL(__cpuhp_setup_state); /** * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state * @state: The state to remove * @invoke: If true, the teardown function is invoked for cpus where * cpu state >= @state * * The teardown callback is currently not allowed to fail. Think * about module removal! */ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) { int (*teardown)(unsigned int cpu) = cpuhp_get_teardown_cb(state); int cpu; BUG_ON(cpuhp_cb_check(state)); get_online_cpus(); if (!invoke || !teardown) goto remove; /* * Call the teardown callback for each present cpu depending * on the hotplug state of the cpu. This function is not * allowed to fail currently! */ for_each_present_cpu(cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int cpustate = st->state; if (cpustate >= state) cpuhp_issue_call(cpu, state, teardown, false); } remove: cpuhp_store_callbacks(state, NULL, NULL, NULL); put_online_cpus(); } EXPORT_SYMBOL(__cpuhp_remove_state); |
98f8cdce1
|
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 |
#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU) static ssize_t show_cpuhp_state(struct device *dev, struct device_attribute *attr, char *buf) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); return sprintf(buf, "%d ", st->state); } static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL); |
757c989b9
|
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 |
static ssize_t write_cpuhp_target(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); struct cpuhp_step *sp; int target, ret; ret = kstrtoint(buf, 10, &target); if (ret) return ret; #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE) return -EINVAL; #else if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE) return -EINVAL; #endif ret = lock_device_hotplug_sysfs(); if (ret) return ret; mutex_lock(&cpuhp_state_mutex); sp = cpuhp_get_step(target); ret = !sp->name || sp->cant_stop ? -EINVAL : 0; mutex_unlock(&cpuhp_state_mutex); if (ret) return ret; if (st->state < target) ret = do_cpu_up(dev->id, target); else ret = do_cpu_down(dev->id, target); unlock_device_hotplug(); return ret ? ret : count; } |
98f8cdce1
|
1574 1575 1576 1577 1578 1579 1580 1581 |
static ssize_t show_cpuhp_target(struct device *dev, struct device_attribute *attr, char *buf) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); return sprintf(buf, "%d ", st->target); } |
757c989b9
|
1582 |
static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target); |
98f8cdce1
|
1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 |
static struct attribute *cpuhp_cpu_attrs[] = { &dev_attr_state.attr, &dev_attr_target.attr, NULL }; static struct attribute_group cpuhp_cpu_attr_group = { .attrs = cpuhp_cpu_attrs, .name = "hotplug", NULL }; static ssize_t show_cpuhp_states(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t cur, res = 0; int i; mutex_lock(&cpuhp_state_mutex); |
757c989b9
|
1603 |
for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) { |
98f8cdce1
|
1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 |
struct cpuhp_step *sp = cpuhp_get_step(i); if (sp->name) { cur = sprintf(buf, "%3d: %s ", i, sp->name); buf += cur; res += cur; } } mutex_unlock(&cpuhp_state_mutex); return res; } static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL); static struct attribute *cpuhp_cpu_root_attrs[] = { &dev_attr_states.attr, NULL }; static struct attribute_group cpuhp_cpu_root_attr_group = { .attrs = cpuhp_cpu_root_attrs, .name = "hotplug", NULL }; static int __init cpuhp_sysfs_init(void) { int cpu, ret; ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, &cpuhp_cpu_root_attr_group); if (ret) return ret; for_each_possible_cpu(cpu) { struct device *dev = get_cpu_device(cpu); if (!dev) continue; ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group); if (ret) return ret; } return 0; } device_initcall(cpuhp_sysfs_init); #endif |
e56b3bc79
|
1651 1652 1653 1654 |
/* * cpu_bit_bitmap[] is a special, "compressed" data structure that * represents all NR_CPUS bits binary values of 1<<nr. * |
e0b582ec5
|
1655 |
* It is used by cpumask_of() to get a constant address to a CPU |
e56b3bc79
|
1656 1657 |
* mask value that has a single bit set only. */ |
b8d317d10
|
1658 |
|
e56b3bc79
|
1659 |
/* cpu_bit_bitmap[0] is empty - so we can back into it */ |
4d51985e4
|
1660 |
#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) |
e56b3bc79
|
1661 1662 1663 |
#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) |
b8d317d10
|
1664 |
|
e56b3bc79
|
1665 1666 1667 1668 1669 1670 1671 |
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { MASK_DECLARE_8(0), MASK_DECLARE_8(8), MASK_DECLARE_8(16), MASK_DECLARE_8(24), #if BITS_PER_LONG > 32 MASK_DECLARE_8(32), MASK_DECLARE_8(40), MASK_DECLARE_8(48), MASK_DECLARE_8(56), |
b8d317d10
|
1672 1673 |
#endif }; |
e56b3bc79
|
1674 |
EXPORT_SYMBOL_GPL(cpu_bit_bitmap); |
2d3854a37
|
1675 1676 1677 |
const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; EXPORT_SYMBOL(cpu_all_bits); |
b3199c025
|
1678 1679 |
#ifdef CONFIG_INIT_ALL_POSSIBLE |
4b804c85d
|
1680 |
struct cpumask __cpu_possible_mask __read_mostly |
c4c54dd1c
|
1681 |
= {CPU_BITS_ALL}; |
b3199c025
|
1682 |
#else |
4b804c85d
|
1683 |
struct cpumask __cpu_possible_mask __read_mostly; |
b3199c025
|
1684 |
#endif |
4b804c85d
|
1685 |
EXPORT_SYMBOL(__cpu_possible_mask); |
b3199c025
|
1686 |
|
4b804c85d
|
1687 1688 |
struct cpumask __cpu_online_mask __read_mostly; EXPORT_SYMBOL(__cpu_online_mask); |
b3199c025
|
1689 |
|
4b804c85d
|
1690 1691 |
struct cpumask __cpu_present_mask __read_mostly; EXPORT_SYMBOL(__cpu_present_mask); |
b3199c025
|
1692 |
|
4b804c85d
|
1693 1694 |
struct cpumask __cpu_active_mask __read_mostly; EXPORT_SYMBOL(__cpu_active_mask); |
3fa415206
|
1695 |
|
3fa415206
|
1696 1697 |
void init_cpu_present(const struct cpumask *src) { |
c4c54dd1c
|
1698 |
cpumask_copy(&__cpu_present_mask, src); |
3fa415206
|
1699 1700 1701 1702 |
} void init_cpu_possible(const struct cpumask *src) { |
c4c54dd1c
|
1703 |
cpumask_copy(&__cpu_possible_mask, src); |
3fa415206
|
1704 1705 1706 1707 |
} void init_cpu_online(const struct cpumask *src) { |
c4c54dd1c
|
1708 |
cpumask_copy(&__cpu_online_mask, src); |
3fa415206
|
1709 |
} |
cff7d378d
|
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 |
/* * Activate the first processor. */ void __init boot_cpu_init(void) { int cpu = smp_processor_id(); /* Mark the boot cpu "present", "online" etc for SMP and UP case */ set_cpu_online(cpu, true); set_cpu_active(cpu, true); set_cpu_present(cpu, true); set_cpu_possible(cpu, true); } /* * Must be called _AFTER_ setting up the per_cpu areas */ void __init boot_cpu_state_init(void) { per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE; } |