Blame view
kernel/stop_machine.c
17.8 KB
6ff3f917e
|
1 |
// SPDX-License-Identifier: GPL-2.0-or-later |
1142d8102
|
2 3 4 5 6 7 8 |
/* * kernel/stop_machine.c * * Copyright (C) 2008, 2005 IBM Corporation. * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au * Copyright (C) 2010 SUSE Linux Products GmbH * Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
e5582ca21
|
9 |
*/ |
b1fc58333
|
10 |
#include <linux/compiler.h> |
1142d8102
|
11 |
#include <linux/completion.h> |
1da177e4c
|
12 |
#include <linux/cpu.h> |
1142d8102
|
13 |
#include <linux/init.h> |
ee527cd3a
|
14 |
#include <linux/kthread.h> |
9984de1a5
|
15 |
#include <linux/export.h> |
1142d8102
|
16 |
#include <linux/percpu.h> |
ee527cd3a
|
17 18 |
#include <linux/sched.h> #include <linux/stop_machine.h> |
a12bb4447
|
19 |
#include <linux/interrupt.h> |
1142d8102
|
20 |
#include <linux/kallsyms.h> |
14e568e78
|
21 |
#include <linux/smpboot.h> |
60063497a
|
22 |
#include <linux/atomic.h> |
ce4f06dcb
|
23 |
#include <linux/nmi.h> |
0b26351b9
|
24 |
#include <linux/sched/wake_q.h> |
1142d8102
|
25 26 27 28 29 30 31 |
/* * Structure to determine completion condition and record errors. May * be shared by works on different cpus. */ struct cpu_stop_done { atomic_t nr_todo; /* nr left to execute */ |
1142d8102
|
32 33 34 35 36 37 |
int ret; /* collected return value */ struct completion completion; /* fired if nr_todo reaches 0 */ }; /* the actual stopper, one per every possible cpu, enabled on online cpus */ struct cpu_stopper { |
02cb7aa92
|
38 |
struct task_struct *thread; |
de5b55c1d
|
39 |
raw_spinlock_t lock; |
878ae1274
|
40 |
bool enabled; /* is this stopper enabled? */ |
1142d8102
|
41 |
struct list_head works; /* list of pending works */ |
02cb7aa92
|
42 43 |
struct cpu_stop_work stop_work; /* for stop_cpus */ |
a8b62fd08
|
44 45 |
unsigned long caller; cpu_stop_fn_t fn; |
1142d8102
|
46 47 48 |
}; static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); |
f445027e4
|
49 |
static bool stop_machine_initialized = false; |
1142d8102
|
50 |
|
a8b62fd08
|
51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
void print_stop_info(const char *log_lvl, struct task_struct *task) { /* * If @task is a stopper task, it cannot migrate and task_cpu() is * stable. */ struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task)); if (task != stopper->thread) return; printk("%sStopper: %pS <- %pS ", log_lvl, stopper->fn, (void *)stopper->caller); } |
e62539704
|
65 66 67 |
/* static data for stop_cpus */ static DEFINE_MUTEX(stop_cpus_mutex); static bool stop_cpus_in_progress; |
7053ea1a3
|
68 |
|
1142d8102
|
69 70 71 72 73 74 75 76 |
static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) { memset(done, 0, sizeof(*done)); atomic_set(&done->nr_todo, nr_todo); init_completion(&done->completion); } /* signal completion unless @done is NULL */ |
6fa3b826b
|
77 |
static void cpu_stop_signal_done(struct cpu_stop_done *done) |
1142d8102
|
78 |
{ |
dd2e3121e
|
79 80 |
if (atomic_dec_and_test(&done->nr_todo)) complete(&done->completion); |
1142d8102
|
81 |
} |
5caa1c089
|
82 |
static void __cpu_stop_queue_work(struct cpu_stopper *stopper, |
0b26351b9
|
83 84 |
struct cpu_stop_work *work, struct wake_q_head *wakeq) |
5caa1c089
|
85 86 |
{ list_add_tail(&work->list, &stopper->works); |
0b26351b9
|
87 |
wake_q_add(wakeq, stopper->thread); |
5caa1c089
|
88 |
} |
1142d8102
|
89 |
/* queue @work to @stopper. if offline, @work is completed immediately */ |
1b034bd98
|
90 |
static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) |
1142d8102
|
91 |
{ |
860a0ffaa
|
92 |
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
0b26351b9
|
93 |
DEFINE_WAKE_Q(wakeq); |
1142d8102
|
94 |
unsigned long flags; |
1b034bd98
|
95 |
bool enabled; |
1142d8102
|
96 |
|
cfd355145
|
97 |
preempt_disable(); |
de5b55c1d
|
98 |
raw_spin_lock_irqsave(&stopper->lock, flags); |
1b034bd98
|
99 100 |
enabled = stopper->enabled; if (enabled) |
0b26351b9
|
101 |
__cpu_stop_queue_work(stopper, work, &wakeq); |
dd2e3121e
|
102 |
else if (work->done) |
6fa3b826b
|
103 |
cpu_stop_signal_done(work->done); |
de5b55c1d
|
104 |
raw_spin_unlock_irqrestore(&stopper->lock, flags); |
1b034bd98
|
105 |
|
0b26351b9
|
106 |
wake_up_q(&wakeq); |
cfd355145
|
107 |
preempt_enable(); |
0b26351b9
|
108 |
|
1b034bd98
|
109 |
return enabled; |
1142d8102
|
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
} /** * stop_one_cpu - stop a cpu * @cpu: cpu to stop * @fn: function to execute * @arg: argument to @fn * * Execute @fn(@arg) on @cpu. @fn is run in a process context with * the highest priority preempting any task on the cpu and * monopolizing it. This function returns after the execution is * complete. * * This function doesn't guarantee @cpu stays online till @fn * completes. If @cpu goes down in the middle, execution may happen * partially or fully on different cpus. @fn should either be ready * for that or the caller should ensure that @cpu stays online until * this function completes. * * CONTEXT: * Might sleep. * * RETURNS: * -ENOENT if @fn(@arg) was not executed because @cpu was offline; * otherwise, the return value of @fn. */ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) { struct cpu_stop_done done; |
a8b62fd08
|
139 |
struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ }; |
1142d8102
|
140 141 |
cpu_stop_init_done(&done, 1); |
958c5f848
|
142 143 |
if (!cpu_stop_queue_work(cpu, &work)) return -ENOENT; |
bf89a3047
|
144 145 146 147 148 |
/* * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup * cycle by doing a preemption: */ cond_resched(); |
1142d8102
|
149 |
wait_for_completion(&done.completion); |
958c5f848
|
150 |
return done.ret; |
1142d8102
|
151 |
} |
1be0bd77c
|
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
/* This controls the threads on each CPU. */ enum multi_stop_state { /* Dummy starting state for thread. */ MULTI_STOP_NONE, /* Awaiting everyone to be scheduled. */ MULTI_STOP_PREPARE, /* Disable interrupts. */ MULTI_STOP_DISABLE_IRQ, /* Run the function */ MULTI_STOP_RUN, /* Exit */ MULTI_STOP_EXIT, }; struct multi_stop_data { |
9a301f22f
|
167 |
cpu_stop_fn_t fn; |
1be0bd77c
|
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
void *data; /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ unsigned int num_threads; const struct cpumask *active_cpus; enum multi_stop_state state; atomic_t thread_ack; }; static void set_state(struct multi_stop_data *msdata, enum multi_stop_state newstate) { /* Reset ack counter. */ atomic_set(&msdata->thread_ack, msdata->num_threads); smp_wmb(); |
b1fc58333
|
183 |
WRITE_ONCE(msdata->state, newstate); |
1be0bd77c
|
184 185 186 187 188 189 190 191 |
} /* Last one to ack a state moves to the next state. */ static void ack_state(struct multi_stop_data *msdata) { if (atomic_dec_and_test(&msdata->thread_ack)) set_state(msdata, msdata->state + 1); } |
4230e2dea
|
192 |
notrace void __weak stop_machine_yield(const struct cpumask *cpumask) |
4ecf0a43e
|
193 194 195 |
{ cpu_relax(); } |
1be0bd77c
|
196 197 198 199 |
/* This is the cpu_stop function which stops the CPU. */ static int multi_cpu_stop(void *data) { struct multi_stop_data *msdata = data; |
b1fc58333
|
200 |
enum multi_stop_state newstate, curstate = MULTI_STOP_NONE; |
1be0bd77c
|
201 |
int cpu = smp_processor_id(), err = 0; |
38f2c691a
|
202 |
const struct cpumask *cpumask; |
1be0bd77c
|
203 204 205 206 207 208 209 210 |
unsigned long flags; bool is_active; /* * When called from stop_machine_from_inactive_cpu(), irq might * already be disabled. Save the state and restore it on exit. */ local_save_flags(flags); |
38f2c691a
|
211 212 213 214 215 216 217 |
if (!msdata->active_cpus) { cpumask = cpu_online_mask; is_active = cpu == cpumask_first(cpumask); } else { cpumask = msdata->active_cpus; is_active = cpumask_test_cpu(cpu, cpumask); } |
1be0bd77c
|
218 219 220 221 |
/* Simple state machine */ do { /* Chill out and ensure we re-read multi_stop_state. */ |
4ecf0a43e
|
222 |
stop_machine_yield(cpumask); |
b1fc58333
|
223 224 225 |
newstate = READ_ONCE(msdata->state); if (newstate != curstate) { curstate = newstate; |
1be0bd77c
|
226 227 228 229 230 231 232 233 234 235 236 237 238 |
switch (curstate) { case MULTI_STOP_DISABLE_IRQ: local_irq_disable(); hard_irq_disable(); break; case MULTI_STOP_RUN: if (is_active) err = msdata->fn(msdata->data); break; default: break; } ack_state(msdata); |
ce4f06dcb
|
239 240 241 242 243 244 245 |
} else if (curstate > MULTI_STOP_PREPARE) { /* * At this stage all other CPUs we depend on must spin * in the same loop. Any reason for hard-lockup should * be detected and reported on their side. */ touch_nmi_watchdog(); |
1be0bd77c
|
246 |
} |
366237e7b
|
247 |
rcu_momentary_dyntick_idle(); |
1be0bd77c
|
248 249 250 251 252 |
} while (curstate != MULTI_STOP_EXIT); local_irq_restore(flags); return err; } |
5caa1c089
|
253 254 255 |
static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, int cpu2, struct cpu_stop_work *work2) { |
d8bc85358
|
256 257 |
struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); |
0b26351b9
|
258 |
DEFINE_WAKE_Q(wakeq); |
d8bc85358
|
259 |
int err; |
b80a2bfce
|
260 |
|
e62539704
|
261 |
retry: |
b80a2bfce
|
262 263 264 265 266 267 268 269 |
/* * The waking up of stopper threads has to happen in the same * scheduling context as the queueing. Otherwise, there is a * possibility of one of the above stoppers being woken up by another * CPU, and preempting us. This will cause us to not wake up the other * stopper forever. */ preempt_disable(); |
de5b55c1d
|
270 271 |
raw_spin_lock_irq(&stopper1->lock); raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); |
d8bc85358
|
272 |
|
b80a2bfce
|
273 274 |
if (!stopper1->enabled || !stopper2->enabled) { err = -ENOENT; |
d8bc85358
|
275 |
goto unlock; |
b80a2bfce
|
276 |
} |
e62539704
|
277 278 279 280 281 282 283 284 285 286 |
/* * Ensure that if we race with __stop_cpus() the stoppers won't get * queued up in reverse order leading to system deadlock. * * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has * queued a work on cpu1 but not on cpu2, we hold both locks. * * It can be falsely true but it is safe to spin until it is cleared, * queue_stop_cpus_work() does everything under preempt_disable(). */ |
b80a2bfce
|
287 288 289 290 |
if (unlikely(stop_cpus_in_progress)) { err = -EDEADLK; goto unlock; } |
d8bc85358
|
291 292 |
err = 0; |
0b26351b9
|
293 294 |
__cpu_stop_queue_work(stopper1, work1, &wakeq); __cpu_stop_queue_work(stopper2, work2, &wakeq); |
b80a2bfce
|
295 |
|
d8bc85358
|
296 |
unlock: |
de5b55c1d
|
297 298 |
raw_spin_unlock(&stopper2->lock); raw_spin_unlock_irq(&stopper1->lock); |
5caa1c089
|
299 |
|
e62539704
|
300 |
if (unlikely(err == -EDEADLK)) { |
b80a2bfce
|
301 |
preempt_enable(); |
e62539704
|
302 303 |
while (stop_cpus_in_progress) cpu_relax(); |
b80a2bfce
|
304 |
|
e62539704
|
305 306 |
goto retry; } |
0b26351b9
|
307 |
|
b80a2bfce
|
308 309 |
wake_up_q(&wakeq); preempt_enable(); |
0b26351b9
|
310 |
|
d8bc85358
|
311 |
return err; |
5caa1c089
|
312 |
} |
1be0bd77c
|
313 314 315 316 317 318 319 320 321 322 323 324 325 |
/** * stop_two_cpus - stops two cpus * @cpu1: the cpu to stop * @cpu2: the other cpu to stop * @fn: function to execute * @arg: argument to @fn * * Stops both the current and specified CPU and runs @fn on one of them. * * returns when both are completed. */ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) { |
1be0bd77c
|
326 327 |
struct cpu_stop_done done; struct cpu_stop_work work1, work2; |
6acce3ef8
|
328 |
struct multi_stop_data msdata; |
6acce3ef8
|
329 |
msdata = (struct multi_stop_data){ |
1be0bd77c
|
330 331 332 333 334 335 336 337 338 |
.fn = fn, .data = arg, .num_threads = 2, .active_cpus = cpumask_of(cpu1), }; work1 = work2 = (struct cpu_stop_work){ .fn = multi_cpu_stop, .arg = &msdata, |
a8b62fd08
|
339 340 |
.done = &done, .caller = _RET_IP_, |
1be0bd77c
|
341 |
}; |
1be0bd77c
|
342 343 |
cpu_stop_init_done(&done, 2); set_state(&msdata, MULTI_STOP_PREPARE); |
5caa1c089
|
344 345 |
if (cpu1 > cpu2) swap(cpu1, cpu2); |
6a1900515
|
346 |
if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) |
5caa1c089
|
347 |
return -ENOENT; |
1be0bd77c
|
348 349 |
wait_for_completion(&done.completion); |
6a1900515
|
350 |
return done.ret; |
1be0bd77c
|
351 |
} |
1142d8102
|
352 353 354 355 356 |
/** * stop_one_cpu_nowait - stop a cpu but don't wait for completion * @cpu: cpu to stop * @fn: function to execute * @arg: argument to @fn |
cf2500406
|
357 |
* @work_buf: pointer to cpu_stop_work structure |
1142d8102
|
358 359 360 361 362 363 364 |
* * Similar to stop_one_cpu() but doesn't wait for completion. The * caller is responsible for ensuring @work_buf is currently unused * and will remain untouched until stopper starts executing @fn. * * CONTEXT: * Don't care. |
1b034bd98
|
365 366 367 368 |
* * RETURNS: * true if cpu_stop_work was queued successfully and @fn will be called, * false otherwise. |
1142d8102
|
369 |
*/ |
1b034bd98
|
370 |
bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, |
1142d8102
|
371 372 |
struct cpu_stop_work *work_buf) { |
a8b62fd08
|
373 |
*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, }; |
1b034bd98
|
374 |
return cpu_stop_queue_work(cpu, work_buf); |
1142d8102
|
375 |
} |
4aff1ca69
|
376 |
static bool queue_stop_cpus_work(const struct cpumask *cpumask, |
fd7355ba1
|
377 378 |
cpu_stop_fn_t fn, void *arg, struct cpu_stop_done *done) |
1142d8102
|
379 380 |
{ struct cpu_stop_work *work; |
1142d8102
|
381 |
unsigned int cpu; |
4aff1ca69
|
382 |
bool queued = false; |
1142d8102
|
383 |
|
1142d8102
|
384 385 386 387 388 |
/* * Disable preemption while queueing to avoid getting * preempted by a stopper which might wait for other stoppers * to enter @fn which can lead to deadlock. */ |
e62539704
|
389 390 |
preempt_disable(); stop_cpus_in_progress = true; |
99d84bf8c
|
391 |
barrier(); |
b377c2a08
|
392 393 394 395 396 |
for_each_cpu(cpu, cpumask) { work = &per_cpu(cpu_stopper.stop_work, cpu); work->fn = fn; work->arg = arg; work->done = done; |
2a2f80ff6
|
397 |
work->caller = _RET_IP_; |
4aff1ca69
|
398 399 |
if (cpu_stop_queue_work(cpu, work)) queued = true; |
b377c2a08
|
400 |
} |
99d84bf8c
|
401 |
barrier(); |
e62539704
|
402 403 |
stop_cpus_in_progress = false; preempt_enable(); |
4aff1ca69
|
404 405 |
return queued; |
fd7355ba1
|
406 |
} |
1142d8102
|
407 |
|
fd7355ba1
|
408 409 410 411 412 413 |
static int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) { struct cpu_stop_done done; cpu_stop_init_done(&done, cpumask_weight(cpumask)); |
4aff1ca69
|
414 415 |
if (!queue_stop_cpus_work(cpumask, fn, arg, &done)) return -ENOENT; |
1142d8102
|
416 |
wait_for_completion(&done.completion); |
4aff1ca69
|
417 |
return done.ret; |
1142d8102
|
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 |
} /** * stop_cpus - stop multiple cpus * @cpumask: cpus to stop * @fn: function to execute * @arg: argument to @fn * * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, * @fn is run in a process context with the highest priority * preempting any task on the cpu and monopolizing it. This function * returns after all executions are complete. * * This function doesn't guarantee the cpus in @cpumask stay online * till @fn completes. If some cpus go down in the middle, execution * on the cpu may happen partially or fully on different cpus. @fn * should either be ready for that or the caller should ensure that * the cpus stay online until this function completes. * * All stop_cpus() calls are serialized making it safe for @fn to wait * for all cpus to start executing it. * * CONTEXT: * Might sleep. * * RETURNS: * -ENOENT if @fn(@arg) was not executed at all because all cpus in * @cpumask were offline; otherwise, 0 if all executions of @fn * returned 0, any non zero return value if any returned non zero. */ |
35f4cd96f
|
448 |
static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) |
1142d8102
|
449 450 451 452 453 454 455 456 457 |
{ int ret; /* static works are used, process one request at a time */ mutex_lock(&stop_cpus_mutex); ret = __stop_cpus(cpumask, fn, arg); mutex_unlock(&stop_cpus_mutex); return ret; } |
14e568e78
|
458 459 460 461 462 |
static int cpu_stop_should_run(unsigned int cpu) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); unsigned long flags; int run; |
de5b55c1d
|
463 |
raw_spin_lock_irqsave(&stopper->lock, flags); |
14e568e78
|
464 |
run = !list_empty(&stopper->works); |
de5b55c1d
|
465 |
raw_spin_unlock_irqrestore(&stopper->lock, flags); |
14e568e78
|
466 467 468 469 |
return run; } static void cpu_stopper_thread(unsigned int cpu) |
1142d8102
|
470 |
{ |
14e568e78
|
471 |
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
1142d8102
|
472 |
struct cpu_stop_work *work; |
1142d8102
|
473 474 |
repeat: |
1142d8102
|
475 |
work = NULL; |
de5b55c1d
|
476 |
raw_spin_lock_irq(&stopper->lock); |
1142d8102
|
477 478 479 480 481 |
if (!list_empty(&stopper->works)) { work = list_first_entry(&stopper->works, struct cpu_stop_work, list); list_del_init(&work->list); } |
de5b55c1d
|
482 |
raw_spin_unlock_irq(&stopper->lock); |
1142d8102
|
483 484 485 486 487 |
if (work) { cpu_stop_fn_t fn = work->fn; void *arg = work->arg; struct cpu_stop_done *done = work->done; |
accaf6ea3
|
488 |
int ret; |
1142d8102
|
489 |
|
accaf6ea3
|
490 |
/* cpu stop callbacks must not sleep, make in_atomic() == T */ |
a8b62fd08
|
491 492 |
stopper->caller = work->caller; stopper->fn = fn; |
accaf6ea3
|
493 |
preempt_count_inc(); |
1142d8102
|
494 |
ret = fn(arg); |
dd2e3121e
|
495 496 497 498 499 |
if (done) { if (ret) done->ret = ret; cpu_stop_signal_done(done); } |
accaf6ea3
|
500 |
preempt_count_dec(); |
a8b62fd08
|
501 502 |
stopper->fn = NULL; stopper->caller = 0; |
1142d8102
|
503 |
WARN_ONCE(preempt_count(), |
d75f773c8
|
504 505 |
"cpu_stop: %ps(%p) leaked preempt count ", fn, arg); |
14e568e78
|
506 507 |
goto repeat; } |
1142d8102
|
508 |
} |
233e7f267
|
509 510 511 512 513 514 515 516 517 518 519 |
void stop_machine_park(int cpu) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); /* * Lockless. cpu_stopper_thread() will take stopper->lock and flush * the pending works before it parks, until then it is fine to queue * the new works. */ stopper->enabled = false; kthread_park(stopper->thread); } |
34f971f6f
|
520 |
extern void sched_set_stop_task(int cpu, struct task_struct *stop); |
14e568e78
|
521 522 |
static void cpu_stop_create(unsigned int cpu) { |
02cb7aa92
|
523 |
sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu)); |
14e568e78
|
524 525 526 |
} static void cpu_stop_park(unsigned int cpu) |
1142d8102
|
527 |
{ |
1142d8102
|
528 |
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
1142d8102
|
529 |
|
233e7f267
|
530 |
WARN_ON(!list_empty(&stopper->works)); |
14e568e78
|
531 |
} |
1142d8102
|
532 |
|
c00166d87
|
533 534 535 |
void stop_machine_unpark(int cpu) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
f0cf16cbd
|
536 |
stopper->enabled = true; |
c00166d87
|
537 538 |
kthread_unpark(stopper->thread); } |
14e568e78
|
539 |
static struct smp_hotplug_thread cpu_stop_threads = { |
02cb7aa92
|
540 |
.store = &cpu_stopper.thread, |
14e568e78
|
541 542 543 544 |
.thread_should_run = cpu_stop_should_run, .thread_fn = cpu_stopper_thread, .thread_comm = "migration/%u", .create = cpu_stop_create, |
14e568e78
|
545 |
.park = cpu_stop_park, |
14e568e78
|
546 |
.selfparking = true, |
1142d8102
|
547 548 549 550 |
}; static int __init cpu_stop_init(void) { |
1142d8102
|
551 |
unsigned int cpu; |
1142d8102
|
552 553 554 |
for_each_possible_cpu(cpu) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
de5b55c1d
|
555 |
raw_spin_lock_init(&stopper->lock); |
1142d8102
|
556 557 |
INIT_LIST_HEAD(&stopper->works); } |
14e568e78
|
558 |
BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); |
c00166d87
|
559 |
stop_machine_unpark(raw_smp_processor_id()); |
f445027e4
|
560 |
stop_machine_initialized = true; |
1142d8102
|
561 562 563 |
return 0; } early_initcall(cpu_stop_init); |
1da177e4c
|
564 |
|
fe5595c07
|
565 566 |
int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) |
1da177e4c
|
567 |
{ |
1be0bd77c
|
568 569 570 571 572 573 |
struct multi_stop_data msdata = { .fn = fn, .data = data, .num_threads = num_online_cpus(), .active_cpus = cpus, }; |
3fc1f1e27
|
574 |
|
fe5595c07
|
575 |
lockdep_assert_cpus_held(); |
f445027e4
|
576 577 578 579 580 581 582 583 |
if (!stop_machine_initialized) { /* * Handle the case where stop_machine() is called * early in boot before stop_machine() has been * initialized. */ unsigned long flags; int ret; |
1be0bd77c
|
584 |
WARN_ON_ONCE(msdata.num_threads != 1); |
f445027e4
|
585 586 587 588 589 590 591 592 |
local_irq_save(flags); hard_irq_disable(); ret = (*fn)(data); local_irq_restore(flags); return ret; } |
3fc1f1e27
|
593 |
/* Set the initial state and stop all online cpus. */ |
1be0bd77c
|
594 595 |
set_state(&msdata, MULTI_STOP_PREPARE); return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata); |
1da177e4c
|
596 |
} |
9a301f22f
|
597 |
int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) |
1da177e4c
|
598 |
{ |
1da177e4c
|
599 600 601 |
int ret; /* No CPUs can come up or down during this. */ |
fe5595c07
|
602 603 604 |
cpus_read_lock(); ret = stop_machine_cpuslocked(fn, data, cpus); cpus_read_unlock(); |
1da177e4c
|
605 606 |
return ret; } |
eeec4fad9
|
607 |
EXPORT_SYMBOL_GPL(stop_machine); |
bbf1bb3ee
|
608 |
|
f740e6cd0
|
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 |
/** * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU * @fn: the function to run * @data: the data ptr for the @fn() * @cpus: the cpus to run the @fn() on (NULL = any online cpu) * * This is identical to stop_machine() but can be called from a CPU which * is not active. The local CPU is in the process of hotplug (so no other * CPU hotplug can start) and not marked active and doesn't have enough * context to sleep. * * This function provides stop_machine() functionality for such state by * using busy-wait for synchronization and executing @fn directly for local * CPU. * * CONTEXT: * Local CPU is inactive. Temporarily stops all active CPUs. * * RETURNS: * 0 if all executions of @fn returned 0, any non zero return value if any * returned non zero. */ |
9a301f22f
|
631 |
int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, |
f740e6cd0
|
632 633 |
const struct cpumask *cpus) { |
1be0bd77c
|
634 |
struct multi_stop_data msdata = { .fn = fn, .data = data, |
f740e6cd0
|
635 636 637 638 639 640 |
.active_cpus = cpus }; struct cpu_stop_done done; int ret; /* Local CPU must be inactive and CPU hotplug in progress. */ BUG_ON(cpu_active(raw_smp_processor_id())); |
1be0bd77c
|
641 |
msdata.num_threads = num_active_cpus() + 1; /* +1 for local */ |
f740e6cd0
|
642 643 644 645 646 647 |
/* No proper task established and can't sleep - busy wait for lock. */ while (!mutex_trylock(&stop_cpus_mutex)) cpu_relax(); /* Schedule work on other CPUs and execute directly for local CPU */ |
1be0bd77c
|
648 |
set_state(&msdata, MULTI_STOP_PREPARE); |
f740e6cd0
|
649 |
cpu_stop_init_done(&done, num_active_cpus()); |
1be0bd77c
|
650 |
queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, |
f740e6cd0
|
651 |
&done); |
1be0bd77c
|
652 |
ret = multi_cpu_stop(&msdata); |
f740e6cd0
|
653 654 655 656 657 658 659 660 |
/* Busy wait for completion. */ while (!completion_done(&done.completion)) cpu_relax(); mutex_unlock(&stop_cpus_mutex); return ret ?: done.ret; } |