Blame view
kernel/smp.c
26.8 KB
457c89965 treewide: Add SPD... |
1 |
// SPDX-License-Identifier: GPL-2.0-only |
3d4422332 Add generic helpe... |
2 3 4 5 |
/* * Generic helpers for smp ipi calls * * (C) Jens Axboe <jens.axboe@oracle.com> 2008 |
3d4422332 Add generic helpe... |
6 |
*/ |
ca7dfdbb3 kernel/smp: Defin... |
7 8 |
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
478850160 irq_work: Impleme... |
9 |
#include <linux/irq_work.h> |
3d4422332 Add generic helpe... |
10 |
#include <linux/rcupdate.h> |
59190f421 Merge branch 'gen... |
11 |
#include <linux/rculist.h> |
641cd4cfc generic-ipi: elim... |
12 |
#include <linux/kernel.h> |
9984de1a5 kernel: Map most ... |
13 |
#include <linux/export.h> |
0b13fda1e generic-ipi: clea... |
14 15 |
#include <linux/percpu.h> #include <linux/init.h> |
5a0e3ad6a include cleanup: ... |
16 |
#include <linux/gfp.h> |
3d4422332 Add generic helpe... |
17 |
#include <linux/smp.h> |
8969a5ede generic-ipi: remo... |
18 |
#include <linux/cpu.h> |
c6f4459fc smp: Add new wake... |
19 |
#include <linux/sched.h> |
4c822698c sched/headers: Pr... |
20 |
#include <linux/sched/idle.h> |
47ae4b05d virt, sched: Add ... |
21 |
#include <linux/hypervisor.h> |
35feb6047 kernel/smp: Provi... |
22 23 24 |
#include <linux/sched/clock.h> #include <linux/nmi.h> #include <linux/sched/debug.h> |
5ada76d05 ANDROID: sched/pa... |
25 |
#include <linux/suspend.h> |
3d4422332 Add generic helpe... |
26 |
|
3bb5d2ee3 smp, idle: Alloca... |
27 |
#include "smpboot.h" |
1f8db4150 sched/headers: Sp... |
28 |
#include "sched/smp.h" |
3bb5d2ee3 smp, idle: Alloca... |
29 |
|
4b44a21dd irq_work, smp: Al... |
30 |
#define CSD_TYPE(_csd) ((_csd)->flags & CSD_FLAG_TYPE_MASK) |
3d4422332 Add generic helpe... |
31 32 |
struct call_function_data { |
966a96711 smp: Avoid using ... |
33 |
call_single_data_t __percpu *csd; |
0b13fda1e generic-ipi: clea... |
34 |
cpumask_var_t cpumask; |
3fc5b3b6a smp: Avoid sendin... |
35 |
cpumask_var_t cpumask_ipi; |
3d4422332 Add generic helpe... |
36 |
}; |
a22793c79 smp: Do not mark ... |
37 |
static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data); |
e03bcb686 generic-ipi: Opti... |
38 |
|
6897fc22e kernel: use lockl... |
39 |
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); |
8969a5ede generic-ipi: remo... |
40 |
|
8d056c48e CPU hotplug, smp:... |
41 |
static void flush_smp_call_function_queue(bool warn_cpu_offline); |
31487f832 smp/cfd: Convert ... |
42 |
int smpcfd_prepare_cpu(unsigned int cpu) |
8969a5ede generic-ipi: remo... |
43 |
{ |
8969a5ede generic-ipi: remo... |
44 |
struct call_function_data *cfd = &per_cpu(cfd_data, cpu); |
31487f832 smp/cfd: Convert ... |
45 46 47 |
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, cpu_to_node(cpu))) return -ENOMEM; |
3fc5b3b6a smp: Avoid sendin... |
48 49 50 51 52 |
if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, cpu_to_node(cpu))) { free_cpumask_var(cfd->cpumask); return -ENOMEM; } |
966a96711 smp: Avoid using ... |
53 |
cfd->csd = alloc_percpu(call_single_data_t); |
31487f832 smp/cfd: Convert ... |
54 |
if (!cfd->csd) { |
8969a5ede generic-ipi: remo... |
55 |
free_cpumask_var(cfd->cpumask); |
3fc5b3b6a smp: Avoid sendin... |
56 |
free_cpumask_var(cfd->cpumask_ipi); |
31487f832 smp/cfd: Convert ... |
57 58 59 60 |
return -ENOMEM; } return 0; |
8969a5ede generic-ipi: remo... |
61 |
} |
31487f832 smp/cfd: Convert ... |
62 63 64 65 66 |
int smpcfd_dead_cpu(unsigned int cpu) { struct call_function_data *cfd = &per_cpu(cfd_data, cpu); free_cpumask_var(cfd->cpumask); |
3fc5b3b6a smp: Avoid sendin... |
67 |
free_cpumask_var(cfd->cpumask_ipi); |
31487f832 smp/cfd: Convert ... |
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
free_percpu(cfd->csd); return 0; } int smpcfd_dying_cpu(unsigned int cpu) { /* * The IPIs for the smp-call-function callbacks queued by other * CPUs might arrive late, either due to hardware latencies or * because this CPU disabled interrupts (inside stop-machine) * before the IPIs were sent. So flush out any pending callbacks * explicitly (without waiting for the IPIs to arrive), to * ensure that the outgoing CPU doesn't go offline with work * still pending. */ flush_smp_call_function_queue(false); |
afaa653c5 smp: Move irq_wor... |
84 |
irq_work_run(); |
31487f832 smp/cfd: Convert ... |
85 86 |
return 0; } |
8969a5ede generic-ipi: remo... |
87 |
|
d8ad7d112 generic-ipi: Fix ... |
88 |
void __init call_function_init(void) |
3d4422332 Add generic helpe... |
89 90 |
{ int i; |
6897fc22e kernel: use lockl... |
91 92 |
for_each_possible_cpu(i) init_llist_head(&per_cpu(call_single_queue, i)); |
8969a5ede generic-ipi: remo... |
93 |
|
31487f832 smp/cfd: Convert ... |
94 |
smpcfd_prepare_cpu(smp_processor_id()); |
3d4422332 Add generic helpe... |
95 |
} |
35feb6047 kernel/smp: Provi... |
96 97 98 99 100 101 102 |
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG static DEFINE_PER_CPU(call_single_data_t *, cur_csd); static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func); static DEFINE_PER_CPU(void *, cur_csd_info); #define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC) |
2b722160f smp: Make symbol ... |
103 |
static atomic_t csd_bug_count = ATOMIC_INIT(0); |
35feb6047 kernel/smp: Provi... |
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 |
/* Record current CSD work for current CPU, NULL to erase. */ static void csd_lock_record(call_single_data_t *csd) { if (!csd) { smp_mb(); /* NULL cur_csd after unlock. */ __this_cpu_write(cur_csd, NULL); return; } __this_cpu_write(cur_csd_func, csd->func); __this_cpu_write(cur_csd_info, csd->info); smp_wmb(); /* func and info before csd. */ __this_cpu_write(cur_csd, csd); smp_mb(); /* Update cur_csd before function call. */ /* Or before unlock, as the case may be. */ } static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd) { unsigned int csd_type; csd_type = CSD_TYPE(csd); if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC) return csd->dst; /* Other CSD_TYPE_ values might not have ->dst. */ return -1; } /* * Complain if too much time spent waiting. Note that only * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU, * so waiting on other types gets much less information. */ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id) { int cpu = -1; int cpux; bool firsttime; u64 ts2, ts_delta; call_single_data_t *cpu_cur_csd; unsigned int flags = READ_ONCE(csd->flags); if (!(flags & CSD_FLAG_LOCK)) { if (!unlikely(*bug_id)) return true; cpu = csd_lock_wait_getcpu(csd); pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock. ", *bug_id, raw_smp_processor_id(), cpu); return true; } ts2 = sched_clock(); ts_delta = ts2 - *ts1; if (likely(ts_delta <= CSD_LOCK_TIMEOUT)) return false; firsttime = !*bug_id; if (firsttime) *bug_id = atomic_inc_return(&csd_bug_count); cpu = csd_lock_wait_getcpu(csd); if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d ", __func__, cpu)) cpux = 0; else cpux = cpu; cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */ pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps). ", firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0, cpu, csd->func, csd->info); if (cpu_cur_csd && csd != cpu_cur_csd) { pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request. ", *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)), READ_ONCE(per_cpu(cur_csd_info, cpux))); } else { pr_alert("\tcsd: CSD lock (#%d) %s. ", *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request"); } if (cpu >= 0) { if (!trigger_single_cpu_backtrace(cpu)) dump_cpu_task(cpu); if (!cpu_cur_csd) { pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d ", *bug_id, raw_smp_processor_id(), cpu); arch_send_call_function_single_ipi(cpu); } } dump_stack(); *ts1 = ts2; return false; } |
8969a5ede generic-ipi: remo... |
198 |
/* |
8969a5ede generic-ipi: remo... |
199 200 |
* csd_lock/csd_unlock used to serialize access to per-cpu csd resources * |
0b13fda1e generic-ipi: clea... |
201 202 203 |
* For non-synchronous ipi calls the csd can still be in use by the * previous function call. For multi-cpu calls its even more interesting * as we'll have to ensure no other cpu is observing our csd. |
8969a5ede generic-ipi: remo... |
204 |
*/ |
966a96711 smp: Avoid using ... |
205 |
static __always_inline void csd_lock_wait(call_single_data_t *csd) |
8969a5ede generic-ipi: remo... |
206 |
{ |
35feb6047 kernel/smp: Provi... |
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 |
int bug_id = 0; u64 ts0, ts1; ts1 = ts0 = sched_clock(); for (;;) { if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id)) break; cpu_relax(); } smp_acquire__after_ctrl_dep(); } #else static void csd_lock_record(call_single_data_t *csd) { } static __always_inline void csd_lock_wait(call_single_data_t *csd) { |
1f03e8d29 locking/barriers:... |
226 |
smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); |
6e2756376 generic-ipi: remo... |
227 |
} |
35feb6047 kernel/smp: Provi... |
228 |
#endif |
6e2756376 generic-ipi: remo... |
229 |
|
966a96711 smp: Avoid using ... |
230 |
static __always_inline void csd_lock(call_single_data_t *csd) |
6e2756376 generic-ipi: remo... |
231 |
{ |
e1d12f327 kernel/smp.c: cle... |
232 233 |
csd_lock_wait(csd); csd->flags |= CSD_FLAG_LOCK; |
8969a5ede generic-ipi: remo... |
234 235 |
/* |
0b13fda1e generic-ipi: clea... |
236 237 |
* prevent CPU from reordering the above assignment * to ->flags with any subsequent assignments to other |
966a96711 smp: Avoid using ... |
238 |
* fields of the specified call_single_data_t structure: |
8969a5ede generic-ipi: remo... |
239 |
*/ |
8053871d0 smp: Fix smp_call... |
240 |
smp_wmb(); |
8969a5ede generic-ipi: remo... |
241 |
} |
966a96711 smp: Avoid using ... |
242 |
static __always_inline void csd_unlock(call_single_data_t *csd) |
8969a5ede generic-ipi: remo... |
243 |
{ |
8053871d0 smp: Fix smp_call... |
244 |
WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); |
0b13fda1e generic-ipi: clea... |
245 |
|
8969a5ede generic-ipi: remo... |
246 |
/* |
0b13fda1e generic-ipi: clea... |
247 |
* ensure we're all done before releasing data: |
8969a5ede generic-ipi: remo... |
248 |
*/ |
8053871d0 smp: Fix smp_call... |
249 |
smp_store_release(&csd->flags, 0); |
3d4422332 Add generic helpe... |
250 |
} |
966a96711 smp: Avoid using ... |
251 |
static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); |
8b28499a7 smp: Consolidate ... |
252 |
|
4b44a21dd irq_work, smp: Al... |
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 |
void __smp_call_single_queue(int cpu, struct llist_node *node) { /* * The list addition should be visible before sending the IPI * handler locks the list to pull the entry off it because of * normal cache coherency rules implied by spinlocks. * * If IPIs can go out of order to the cache coherency protocol * in an architecture, sufficient synchronisation should be added * to arch code to make it appear to obey cache coherency WRT * locking and barrier primitives. Generic code isn't really * equipped to do the right thing... */ if (llist_add(node, &per_cpu(call_single_queue, cpu))) send_call_function_single_ipi(cpu); } |
3d4422332 Add generic helpe... |
269 |
/* |
966a96711 smp: Avoid using ... |
270 |
* Insert a previously allocated call_single_data_t element |
0b13fda1e generic-ipi: clea... |
271 272 |
* for execution on the given CPU. data must already have * ->func, ->info, and ->flags set. |
3d4422332 Add generic helpe... |
273 |
*/ |
4b44a21dd irq_work, smp: Al... |
274 |
static int generic_exec_single(int cpu, call_single_data_t *csd) |
3d4422332 Add generic helpe... |
275 |
{ |
8b28499a7 smp: Consolidate ... |
276 |
if (cpu == smp_processor_id()) { |
4b44a21dd irq_work, smp: Al... |
277 278 |
smp_call_func_t func = csd->func; void *info = csd->info; |
8053871d0 smp: Fix smp_call... |
279 280 281 282 283 284 |
unsigned long flags; /* * We can unlock early even for the synchronous on-stack case, * since we're doing this from the same CPU.. */ |
35feb6047 kernel/smp: Provi... |
285 |
csd_lock_record(csd); |
8053871d0 smp: Fix smp_call... |
286 |
csd_unlock(csd); |
8b28499a7 smp: Consolidate ... |
287 288 |
local_irq_save(flags); func(info); |
35feb6047 kernel/smp: Provi... |
289 |
csd_lock_record(NULL); |
8b28499a7 smp: Consolidate ... |
290 291 292 |
local_irq_restore(flags); return 0; } |
5224b9613 smp: Fix error ca... |
293 294 |
if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { csd_unlock(csd); |
8b28499a7 smp: Consolidate ... |
295 |
return -ENXIO; |
5224b9613 smp: Fix error ca... |
296 |
} |
8b28499a7 smp: Consolidate ... |
297 |
|
4b44a21dd irq_work, smp: Al... |
298 |
__smp_call_single_queue(cpu, &csd->llist); |
3d4422332 Add generic helpe... |
299 |
|
8b28499a7 smp: Consolidate ... |
300 |
return 0; |
3d4422332 Add generic helpe... |
301 |
} |
8d056c48e CPU hotplug, smp:... |
302 303 304 305 306 |
/** * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks * * Invoked by arch to handle an IPI for call function single. * Must be called with interrupts disabled. |
3d4422332 Add generic helpe... |
307 308 309 |
*/ void generic_smp_call_function_single_interrupt(void) { |
8d056c48e CPU hotplug, smp:... |
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 |
flush_smp_call_function_queue(true); } /** * flush_smp_call_function_queue - Flush pending smp-call-function callbacks * * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an * offline CPU. Skip this check if set to 'false'. * * Flush any pending smp-call-function callbacks queued on this CPU. This is * invoked by the generic IPI handler, as well as by a CPU about to go offline, * to ensure that all pending IPI callbacks are run before it goes completely * offline. * * Loop through the call_single_queue and run all the queued callbacks. * Must be called with interrupts disabled. */ static void flush_smp_call_function_queue(bool warn_cpu_offline) { |
966a96711 smp: Avoid using ... |
329 |
call_single_data_t *csd, *csd_next; |
52103be07 smp: Optimize flu... |
330 331 |
struct llist_node *entry, *prev; struct llist_head *head; |
a219ccf46 smp: print more u... |
332 |
static bool warned; |
83efcbd02 smp/core: Use loc... |
333 |
lockdep_assert_irqs_disabled(); |
8d056c48e CPU hotplug, smp:... |
334 |
|
bb964a92c kernel misc: Repl... |
335 |
head = this_cpu_ptr(&call_single_queue); |
8d056c48e CPU hotplug, smp:... |
336 |
entry = llist_del_all(head); |
a219ccf46 smp: print more u... |
337 |
entry = llist_reverse_order(entry); |
3d4422332 Add generic helpe... |
338 |
|
8d056c48e CPU hotplug, smp:... |
339 340 341 |
/* There shouldn't be any pending callbacks on an offline CPU. */ if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && !warned && !llist_empty(head))) { |
a219ccf46 smp: print more u... |
342 343 344 345 346 347 348 349 |
warned = true; WARN(1, "IPI on offline CPU %d ", smp_processor_id()); /* * We don't have to use the _safe() variant here * because we are not invoking the IPI handlers yet. */ |
4b44a21dd irq_work, smp: Al... |
350 351 352 353 354 355 356 357 358 |
llist_for_each_entry(csd, entry, llist) { switch (CSD_TYPE(csd)) { case CSD_TYPE_ASYNC: case CSD_TYPE_SYNC: case CSD_TYPE_IRQ_WORK: pr_warn("IPI callback %pS sent to offline CPU ", csd->func); break; |
a14886648 sched: Replace rq... |
359 360 361 362 |
case CSD_TYPE_TTWU: pr_warn("IPI task-wakeup sent to offline CPU "); break; |
4b44a21dd irq_work, smp: Al... |
363 364 365 366 367 368 369 |
default: pr_warn("IPI callback, unknown type %d, sent to offline CPU ", CSD_TYPE(csd)); break; } } |
a219ccf46 smp: print more u... |
370 |
} |
3d4422332 Add generic helpe... |
371 |
|
52103be07 smp: Optimize flu... |
372 373 374 375 |
/* * First; run all SYNC callbacks, people are waiting for us. */ prev = NULL; |
5fd77595e smp: Iterate func... |
376 |
llist_for_each_entry_safe(csd, csd_next, entry, llist) { |
8053871d0 smp: Fix smp_call... |
377 |
/* Do we wait until *after* callback? */ |
4b44a21dd irq_work, smp: Al... |
378 379 380 |
if (CSD_TYPE(csd) == CSD_TYPE_SYNC) { smp_call_func_t func = csd->func; void *info = csd->info; |
52103be07 smp: Optimize flu... |
381 382 383 384 385 |
if (prev) { prev->next = &csd_next->llist; } else { entry = &csd_next->llist; } |
4b44a21dd irq_work, smp: Al... |
386 |
|
35feb6047 kernel/smp: Provi... |
387 |
csd_lock_record(csd); |
8053871d0 smp: Fix smp_call... |
388 389 |
func(info); csd_unlock(csd); |
35feb6047 kernel/smp: Provi... |
390 |
csd_lock_record(NULL); |
8053871d0 smp: Fix smp_call... |
391 |
} else { |
52103be07 smp: Optimize flu... |
392 |
prev = &csd->llist; |
8053871d0 smp: Fix smp_call... |
393 |
} |
3d4422332 Add generic helpe... |
394 |
} |
478850160 irq_work: Impleme... |
395 |
|
a14886648 sched: Replace rq... |
396 397 |
if (!entry) return; |
478850160 irq_work: Impleme... |
398 |
/* |
52103be07 smp: Optimize flu... |
399 |
* Second; run all !SYNC callbacks. |
478850160 irq_work: Impleme... |
400 |
*/ |
a14886648 sched: Replace rq... |
401 |
prev = NULL; |
52103be07 smp: Optimize flu... |
402 |
llist_for_each_entry_safe(csd, csd_next, entry, llist) { |
4b44a21dd irq_work, smp: Al... |
403 |
int type = CSD_TYPE(csd); |
52103be07 smp: Optimize flu... |
404 |
|
a14886648 sched: Replace rq... |
405 406 407 408 409 410 |
if (type != CSD_TYPE_TTWU) { if (prev) { prev->next = &csd_next->llist; } else { entry = &csd_next->llist; } |
4b44a21dd irq_work, smp: Al... |
411 |
|
a14886648 sched: Replace rq... |
412 413 414 |
if (type == CSD_TYPE_ASYNC) { smp_call_func_t func = csd->func; void *info = csd->info; |
35feb6047 kernel/smp: Provi... |
415 |
csd_lock_record(csd); |
a14886648 sched: Replace rq... |
416 417 |
csd_unlock(csd); func(info); |
35feb6047 kernel/smp: Provi... |
418 |
csd_lock_record(NULL); |
a14886648 sched: Replace rq... |
419 420 421 422 423 424 |
} else if (type == CSD_TYPE_IRQ_WORK) { irq_work_single(csd); } } else { prev = &csd->llist; |
4b44a21dd irq_work, smp: Al... |
425 |
} |
52103be07 smp: Optimize flu... |
426 |
} |
a14886648 sched: Replace rq... |
427 428 429 430 431 432 |
/* * Third; only CSD_TYPE_TTWU is left, issue those. */ if (entry) sched_ttwu_pending(entry); |
3d4422332 Add generic helpe... |
433 |
} |
b2a02fc43 smp: Optimize sen... |
434 435 436 437 438 439 440 441 442 443 |
void flush_smp_call_function_from_idle(void) { unsigned long flags; if (llist_empty(this_cpu_ptr(&call_single_queue))) return; local_irq_save(flags); flush_smp_call_function_queue(true); local_irq_restore(flags); |
3d4422332 Add generic helpe... |
444 445 446 447 448 449 |
} /* * smp_call_function_single - Run a function on a specific CPU * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. |
3d4422332 Add generic helpe... |
450 451 |
* @wait: If true, wait until function has completed on other CPUs. * |
72f279b25 generic-ipi: Fix ... |
452 |
* Returns 0 on success, else a negative status code. |
3d4422332 Add generic helpe... |
453 |
*/ |
3a5f65df5 Typedef SMP call ... |
454 |
int smp_call_function_single(int cpu, smp_call_func_t func, void *info, |
8691e5a8f smp_call_function... |
455 |
int wait) |
3d4422332 Add generic helpe... |
456 |
{ |
966a96711 smp: Avoid using ... |
457 458 |
call_single_data_t *csd; call_single_data_t csd_stack = { |
4b44a21dd irq_work, smp: Al... |
459 |
.flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, |
966a96711 smp: Avoid using ... |
460 |
}; |
0b13fda1e generic-ipi: clea... |
461 |
int this_cpu; |
8b28499a7 smp: Consolidate ... |
462 |
int err; |
3d4422332 Add generic helpe... |
463 |
|
0b13fda1e generic-ipi: clea... |
464 465 466 467 468 |
/* * prevent preemption and reschedule on another processor, * as well as CPU removal */ this_cpu = get_cpu(); |
269c861ba generic-ipi: Allo... |
469 470 471 472 473 474 475 476 |
/* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress); |
3d4422332 Add generic helpe... |
477 |
|
19dbdcb80 smp: Warn on func... |
478 479 480 481 482 483 484 |
/* * When @wait we can deadlock when we interrupt between llist_add() and * arch_send_call_function_ipi*(); when !@wait we can deadlock due to * csd_lock() on because the interrupt context uses the same csd * storage. */ WARN_ON_ONCE(!in_task()); |
8053871d0 smp: Fix smp_call... |
485 486 487 488 489 |
csd = &csd_stack; if (!wait) { csd = this_cpu_ptr(&csd_data); csd_lock(csd); } |
4b44a21dd irq_work, smp: Al... |
490 491 |
csd->func = func; csd->info = info; |
35feb6047 kernel/smp: Provi... |
492 493 |
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG csd->src = smp_processor_id(); |
e48c15b79 smp: Add source a... |
494 495 |
csd->dst = cpu; #endif |
4b44a21dd irq_work, smp: Al... |
496 497 |
err = generic_exec_single(cpu, csd); |
8053871d0 smp: Fix smp_call... |
498 499 500 |
if (wait) csd_lock_wait(csd); |
3d4422332 Add generic helpe... |
501 502 |
put_cpu(); |
0b13fda1e generic-ipi: clea... |
503 |
|
f73be6ded smp: have smp_cal... |
504 |
return err; |
3d4422332 Add generic helpe... |
505 506 |
} EXPORT_SYMBOL(smp_call_function_single); |
d7877c03f smp: Move __smp_c... |
507 |
/** |
c46fff2a3 smp: Rename __smp... |
508 509 |
* smp_call_function_single_async(): Run an asynchronous function on a * specific CPU. |
d7877c03f smp: Move __smp_c... |
510 511 |
* @cpu: The CPU to run on. * @csd: Pre-allocated and setup data structure |
d7877c03f smp: Move __smp_c... |
512 |
* |
c46fff2a3 smp: Rename __smp... |
513 514 515 516 517 518 519 |
* Like smp_call_function_single(), but the call is asynchonous and * can thus be done from contexts with disabled interrupts. * * The caller passes his own pre-allocated data structure * (ie: embedded in an object) and is responsible for synchronizing it * such that the IPIs performed on the @csd are strictly serialized. * |
5a18ceca6 smp: Allow smp_ca... |
520 521 522 523 524 |
* If the function is called with one csd which has not yet been * processed by previous call to smp_call_function_single_async(), the * function will return immediately with -EBUSY showing that the csd * object is still in progress. * |
c46fff2a3 smp: Rename __smp... |
525 526 |
* NOTE: Be careful, there is unfortunately no current debugging facility to * validate the correctness of this serialization. |
d7877c03f smp: Move __smp_c... |
527 |
*/ |
966a96711 smp: Avoid using ... |
528 |
int smp_call_function_single_async(int cpu, call_single_data_t *csd) |
d7877c03f smp: Move __smp_c... |
529 530 |
{ int err = 0; |
d7877c03f smp: Move __smp_c... |
531 |
|
fce8ad156 smp: Remove wait ... |
532 |
preempt_disable(); |
8053871d0 smp: Fix smp_call... |
533 |
|
5a18ceca6 smp: Allow smp_ca... |
534 535 536 537 |
if (csd->flags & CSD_FLAG_LOCK) { err = -EBUSY; goto out; } |
8053871d0 smp: Fix smp_call... |
538 539 540 |
csd->flags = CSD_FLAG_LOCK; smp_wmb(); |
4b44a21dd irq_work, smp: Al... |
541 |
err = generic_exec_single(cpu, csd); |
5a18ceca6 smp: Allow smp_ca... |
542 543 |
out: |
fce8ad156 smp: Remove wait ... |
544 |
preempt_enable(); |
d7877c03f smp: Move __smp_c... |
545 546 547 |
return err; } |
c46fff2a3 smp: Rename __smp... |
548 |
EXPORT_SYMBOL_GPL(smp_call_function_single_async); |
d7877c03f smp: Move __smp_c... |
549 |
|
2ea6dec4a generic-ipi: Add ... |
550 551 552 553 554 555 556 557 |
/* * smp_call_function_any - Run a function on any of the given cpus * @mask: The mask of cpus it can run on. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed. * * Returns 0 on success, else a negative status code (if no cpus were online). |
2ea6dec4a generic-ipi: Add ... |
558 559 560 561 562 563 564 |
* * Selection preference: * 1) current cpu if in @mask * 2) any cpu of current node if in @mask * 3) any other online cpu in @mask */ int smp_call_function_any(const struct cpumask *mask, |
3a5f65df5 Typedef SMP call ... |
565 |
smp_call_func_t func, void *info, int wait) |
2ea6dec4a generic-ipi: Add ... |
566 567 568 569 570 571 572 573 574 575 576 |
{ unsigned int cpu; const struct cpumask *nodemask; int ret; /* Try for same CPU (cheapest) */ cpu = get_cpu(); if (cpumask_test_cpu(cpu, mask)) goto call; /* Try for same node. */ |
af2422c42 smp_call_function... |
577 |
nodemask = cpumask_of_node(cpu_to_node(cpu)); |
2ea6dec4a generic-ipi: Add ... |
578 579 580 581 582 583 584 585 586 587 588 589 590 591 |
for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; cpu = cpumask_next_and(cpu, nodemask, mask)) { if (cpu_online(cpu)) goto call; } /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ cpu = cpumask_any_and(mask, cpu_online_mask); call: ret = smp_call_function_single(cpu, func, info, wait); put_cpu(); return ret; } EXPORT_SYMBOL_GPL(smp_call_function_any); |
67719ef25 smp: Add a smp_co... |
592 593 594 |
static void smp_call_function_many_cond(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait, smp_cond_func_t cond_func) |
3d4422332 Add generic helpe... |
595 |
{ |
e1d12f327 kernel/smp.c: cle... |
596 |
struct call_function_data *cfd; |
9a46ad6d6 smp: make smp_cal... |
597 |
int cpu, next_cpu, this_cpu = smp_processor_id(); |
3d4422332 Add generic helpe... |
598 |
|
269c861ba generic-ipi: Allo... |
599 600 601 602 603 604 605 |
/* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
bd924e8cb smp: Allow on_eac... |
606 |
&& !oops_in_progress && !early_boot_irqs_disabled); |
3d4422332 Add generic helpe... |
607 |
|
19dbdcb80 smp: Warn on func... |
608 609 610 611 612 613 614 |
/* * When @wait we can deadlock when we interrupt between llist_add() and * arch_send_call_function_ipi*(); when !@wait we can deadlock due to * csd_lock() on because the interrupt context uses the same csd * storage. */ WARN_ON_ONCE(!in_task()); |
723aae25d smp_call_function... |
615 |
/* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ |
54b11e6d5 cpumask: smp_call... |
616 |
cpu = cpumask_first_and(mask, cpu_online_mask); |
0b13fda1e generic-ipi: clea... |
617 |
if (cpu == this_cpu) |
54b11e6d5 cpumask: smp_call... |
618 |
cpu = cpumask_next_and(cpu, mask, cpu_online_mask); |
0b13fda1e generic-ipi: clea... |
619 |
|
54b11e6d5 cpumask: smp_call... |
620 621 622 623 624 625 |
/* No online cpus? We're done. */ if (cpu >= nr_cpu_ids) return; /* Do we have another CPU which isn't us? */ next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); |
0b13fda1e generic-ipi: clea... |
626 |
if (next_cpu == this_cpu) |
54b11e6d5 cpumask: smp_call... |
627 628 629 630 |
next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); /* Fastpath: do that cpu by itself. */ if (next_cpu >= nr_cpu_ids) { |
25a3a1541 smp: Remove super... |
631 |
if (!cond_func || cond_func(cpu, info)) |
67719ef25 smp: Add a smp_co... |
632 |
smp_call_function_single(cpu, func, info, wait); |
54b11e6d5 cpumask: smp_call... |
633 |
return; |
3d4422332 Add generic helpe... |
634 |
} |
bb964a92c kernel misc: Repl... |
635 |
cfd = this_cpu_ptr(&cfd_data); |
45a579192 call_function_man... |
636 |
|
e1d12f327 kernel/smp.c: cle... |
637 |
cpumask_and(cfd->cpumask, mask, cpu_online_mask); |
6c8557bdb smp, cpumask: Use... |
638 |
__cpumask_clear_cpu(this_cpu, cfd->cpumask); |
723aae25d smp_call_function... |
639 640 |
/* Some callers race with other cpus changing the passed mask */ |
e1d12f327 kernel/smp.c: cle... |
641 |
if (unlikely(!cpumask_weight(cfd->cpumask))) |
723aae25d smp_call_function... |
642 |
return; |
3d4422332 Add generic helpe... |
643 |
|
3fc5b3b6a smp: Avoid sendin... |
644 |
cpumask_clear(cfd->cpumask_ipi); |
e1d12f327 kernel/smp.c: cle... |
645 |
for_each_cpu(cpu, cfd->cpumask) { |
966a96711 smp: Avoid using ... |
646 |
call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); |
9a46ad6d6 smp: make smp_cal... |
647 |
|
67719ef25 smp: Add a smp_co... |
648 649 |
if (cond_func && !cond_func(cpu, info)) continue; |
9a46ad6d6 smp: make smp_cal... |
650 |
csd_lock(csd); |
8053871d0 smp: Fix smp_call... |
651 |
if (wait) |
4b44a21dd irq_work, smp: Al... |
652 |
csd->flags |= CSD_TYPE_SYNC; |
9a46ad6d6 smp: make smp_cal... |
653 654 |
csd->func = func; csd->info = info; |
35feb6047 kernel/smp: Provi... |
655 656 |
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG csd->src = smp_processor_id(); |
e48c15b79 smp: Add source a... |
657 658 |
csd->dst = cpu; #endif |
3fc5b3b6a smp: Avoid sendin... |
659 |
if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) |
6c8557bdb smp, cpumask: Use... |
660 |
__cpumask_set_cpu(cpu, cfd->cpumask_ipi); |
9a46ad6d6 smp: make smp_cal... |
661 |
} |
561920a0d generic-ipi: fix ... |
662 |
|
3d4422332 Add generic helpe... |
663 |
/* Send a message to all CPUs in the map */ |
3fc5b3b6a smp: Avoid sendin... |
664 |
arch_send_call_function_ipi_mask(cfd->cpumask_ipi); |
3d4422332 Add generic helpe... |
665 |
|
9a46ad6d6 smp: make smp_cal... |
666 |
if (wait) { |
e1d12f327 kernel/smp.c: cle... |
667 |
for_each_cpu(cpu, cfd->cpumask) { |
966a96711 smp: Avoid using ... |
668 |
call_single_data_t *csd; |
e1d12f327 kernel/smp.c: cle... |
669 670 |
csd = per_cpu_ptr(cfd->csd, cpu); |
9a46ad6d6 smp: make smp_cal... |
671 672 673 |
csd_lock_wait(csd); } } |
3d4422332 Add generic helpe... |
674 |
} |
67719ef25 smp: Add a smp_co... |
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 |
/** * smp_call_function_many(): Run a function on a set of other CPUs. * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * If @wait is true, then returns once @func has returned. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. Preemption * must be disabled when calling this function. */ void smp_call_function_many(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { smp_call_function_many_cond(mask, func, info, wait, NULL); } |
54b11e6d5 cpumask: smp_call... |
695 |
EXPORT_SYMBOL(smp_call_function_many); |
3d4422332 Add generic helpe... |
696 697 698 699 700 |
/** * smp_call_function(): Run a function on all other CPUs. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. |
0b13fda1e generic-ipi: clea... |
701 702 |
* @wait: If true, wait (atomically) until function has completed * on other CPUs. |
3d4422332 Add generic helpe... |
703 |
* |
54b11e6d5 cpumask: smp_call... |
704 |
* Returns 0. |
3d4422332 Add generic helpe... |
705 706 |
* * If @wait is true, then returns once @func has returned; otherwise |
72f279b25 generic-ipi: Fix ... |
707 |
* it returns just before the target cpu calls @func. |
3d4422332 Add generic helpe... |
708 709 710 711 |
* * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ |
caa759323 smp: Remove smp_c... |
712 |
void smp_call_function(smp_call_func_t func, void *info, int wait) |
3d4422332 Add generic helpe... |
713 |
{ |
3d4422332 Add generic helpe... |
714 |
preempt_disable(); |
54b11e6d5 cpumask: smp_call... |
715 |
smp_call_function_many(cpu_online_mask, func, info, wait); |
3d4422332 Add generic helpe... |
716 |
preempt_enable(); |
3d4422332 Add generic helpe... |
717 718 |
} EXPORT_SYMBOL(smp_call_function); |
351f8f8e6 kernel: clean up ... |
719 |
|
34db18a05 smp: move smp set... |
720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 |
/* Setup configured maximum number of CPUs to activate */ unsigned int setup_max_cpus = NR_CPUS; EXPORT_SYMBOL(setup_max_cpus); /* * Setup routine for controlling SMP activation * * Command-line option of "nosmp" or "maxcpus=0" will disable SMP * activation entirely (the MPS table probe still happens, though). * * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer * greater than 0, limits the maximum number of CPUs activated in * SMP mode to <NUM>. */ void __weak arch_disable_smp_support(void) { } static int __init nosmp(char *str) { setup_max_cpus = 0; arch_disable_smp_support(); return 0; } early_param("nosmp", nosmp); /* this is hard limit */ static int __init nrcpus(char *str) { int nr_cpus; |
589343569 smp: Fix a potent... |
752 |
if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids) |
34db18a05 smp: move smp set... |
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 |
nr_cpu_ids = nr_cpus; return 0; } early_param("nr_cpus", nrcpus); static int __init maxcpus(char *str) { get_option(&str, &setup_max_cpus); if (setup_max_cpus == 0) arch_disable_smp_support(); return 0; } early_param("maxcpus", maxcpus); /* Setup number of possible processor ids */ |
9b130ad5b treewide: make "n... |
772 |
unsigned int nr_cpu_ids __read_mostly = NR_CPUS; |
34db18a05 smp: move smp set... |
773 774 775 776 777 778 779 780 781 782 783 |
EXPORT_SYMBOL(nr_cpu_ids); /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ void __init setup_nr_cpu_ids(void) { nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; } /* Called by boot processor to activate the rest. */ void __init smp_init(void) { |
92b232782 kernel/smp: Make ... |
784 |
int num_nodes, num_cpus; |
34db18a05 smp: move smp set... |
785 |
|
3bb5d2ee3 smp, idle: Alloca... |
786 |
idle_threads_init(); |
4cb28ced2 cpu/hotplug: Crea... |
787 |
cpuhp_threads_init(); |
3bb5d2ee3 smp, idle: Alloca... |
788 |
|
51111dce2 kernel/smp: Tell ... |
789 790 |
pr_info("Bringing up secondary CPUs ... "); |
b99a26593 cpu/hotplug: Move... |
791 |
bringup_nonboot_cpus(setup_max_cpus); |
34db18a05 smp: move smp set... |
792 |
|
92b232782 kernel/smp: Make ... |
793 794 795 796 797 798 |
num_nodes = num_online_nodes(); num_cpus = num_online_cpus(); pr_info("Brought up %d node%s, %d CPU%s ", num_nodes, (num_nodes > 1 ? "s" : ""), num_cpus, (num_cpus > 1 ? "s" : "")); |
34db18a05 smp: move smp set... |
799 |
/* Any cleanup work */ |
34db18a05 smp: move smp set... |
800 801 |
smp_cpus_done(setup_max_cpus); } |
351f8f8e6 kernel: clean up ... |
802 |
/* |
bd924e8cb smp: Allow on_eac... |
803 804 805 |
* Call a function on all processors. May be used during early boot while * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead * of local_irq_disable/enable(). |
351f8f8e6 kernel: clean up ... |
806 |
*/ |
58eb7b77a smp: Use smp_call... |
807 |
void on_each_cpu(smp_call_func_t func, void *info, int wait) |
351f8f8e6 kernel: clean up ... |
808 |
{ |
bd924e8cb smp: Allow on_eac... |
809 |
unsigned long flags; |
351f8f8e6 kernel: clean up ... |
810 811 |
preempt_disable(); |
caa759323 smp: Remove smp_c... |
812 |
smp_call_function(func, info, wait); |
bd924e8cb smp: Allow on_eac... |
813 |
local_irq_save(flags); |
351f8f8e6 kernel: clean up ... |
814 |
func(info); |
bd924e8cb smp: Allow on_eac... |
815 |
local_irq_restore(flags); |
351f8f8e6 kernel: clean up ... |
816 |
preempt_enable(); |
351f8f8e6 kernel: clean up ... |
817 818 |
} EXPORT_SYMBOL(on_each_cpu); |
3fc498f16 smp: introduce a ... |
819 820 821 822 823 824 825 826 827 828 829 830 |
/** * on_each_cpu_mask(): Run a function on processors specified by * cpumask, which may include the local processor. * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * If @wait is true, then returns once @func has returned. * |
202da4005 kernel/smp.c: qui... |
831 832 833 834 |
* You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. The * exception is that it may be used during early boot while * early_boot_irqs_disabled is set. |
3fc498f16 smp: introduce a ... |
835 836 837 838 839 840 841 842 |
*/ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { int cpu = get_cpu(); smp_call_function_many(mask, func, info, wait); if (cpumask_test_cpu(cpu, mask)) { |
202da4005 kernel/smp.c: qui... |
843 844 |
unsigned long flags; local_irq_save(flags); |
3fc498f16 smp: introduce a ... |
845 |
func(info); |
202da4005 kernel/smp.c: qui... |
846 |
local_irq_restore(flags); |
3fc498f16 smp: introduce a ... |
847 848 849 850 |
} put_cpu(); } EXPORT_SYMBOL(on_each_cpu_mask); |
b3a7e98e0 smp: add func to ... |
851 852 853 854 855 856 857 |
/* * on_each_cpu_cond(): Call a function on each processor for which * the supplied function cond_func returns true, optionally waiting * for all the required CPUs to finish. This may include the local * processor. * @cond_func: A callback function that is passed a cpu id and |
7b7b8a2c9 kernel/: fix repe... |
858 |
* the info parameter. The function is called |
b3a7e98e0 smp: add func to ... |
859 860 861 862 863 864 865 866 |
* with preemption disabled. The function should * return a blooean value indicating whether to IPI * the specified CPU. * @func: The function to run on all applicable CPUs. * This must be fast and non-blocking. * @info: An arbitrary pointer to pass to both functions. * @wait: If true, wait (atomically) until function has * completed on other CPUs. |
b3a7e98e0 smp: add func to ... |
867 868 869 870 871 872 873 |
* * Preemption is disabled to protect against CPUs going offline but not online. * CPUs going online during the call will not be seen or sent an IPI. * * You must not call this function with disabled interrupts or * from a hardware interrupt handler or from a bottom half handler. */ |
5671d814d smp: Use smp_cond... |
874 |
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, |
cb923159b smp: Remove alloc... |
875 |
void *info, bool wait, const struct cpumask *mask) |
b3a7e98e0 smp: add func to ... |
876 |
{ |
67719ef25 smp: Add a smp_co... |
877 878 879 880 881 882 883 884 885 |
int cpu = get_cpu(); smp_call_function_many_cond(mask, func, info, wait, cond_func); if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) { unsigned long flags; local_irq_save(flags); func(info); local_irq_restore(flags); |
b3a7e98e0 smp: add func to ... |
886 |
} |
67719ef25 smp: Add a smp_co... |
887 |
put_cpu(); |
b3a7e98e0 smp: add func to ... |
888 |
} |
7d49b28a8 smp,cpumask: intr... |
889 |
EXPORT_SYMBOL(on_each_cpu_cond_mask); |
5671d814d smp: Use smp_cond... |
890 |
void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func, |
cb923159b smp: Remove alloc... |
891 |
void *info, bool wait) |
7d49b28a8 smp,cpumask: intr... |
892 |
{ |
cb923159b smp: Remove alloc... |
893 |
on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask); |
7d49b28a8 smp,cpumask: intr... |
894 |
} |
b3a7e98e0 smp: add func to ... |
895 |
EXPORT_SYMBOL(on_each_cpu_cond); |
f37f435f3 smp: Implement ki... |
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 |
static void do_nothing(void *unused) { } /** * kick_all_cpus_sync - Force all cpus out of idle * * Used to synchronize the update of pm_idle function pointer. It's * called after the pointer is updated and returns after the dummy * callback function has been executed on all cpus. The execution of * the function can only happen on the remote cpus after they have * left the idle function which had been called via pm_idle function * pointer. So it's guaranteed that nothing uses the previous pointer * anymore. */ void kick_all_cpus_sync(void) { /* Make sure the change is visible before we kick the cpus */ smp_mb(); smp_call_function(do_nothing, NULL, 1); } EXPORT_SYMBOL_GPL(kick_all_cpus_sync); |
c6f4459fc smp: Add new wake... |
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 |
/** * wake_up_all_idle_cpus - break all cpus out of idle * wake_up_all_idle_cpus try to break all cpus which is in idle state even * including idle polling cpus, for non-idle cpus, we will do nothing * for them. */ void wake_up_all_idle_cpus(void) { int cpu; preempt_disable(); for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; |
fc005b3ce ANDROID: fix 0-da... |
934 |
#if CONFIG_SUSPEND |
5ada76d05 ANDROID: sched/pa... |
935 |
if (s2idle_state == S2IDLE_STATE_ENTER || cpu_active(cpu)) |
fc005b3ce ANDROID: fix 0-da... |
936 |
#endif |
5ada76d05 ANDROID: sched/pa... |
937 |
wake_up_if_idle(cpu); |
c6f4459fc smp: Add new wake... |
938 939 940 941 |
} preempt_enable(); } EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus); |
df8ce9d78 smp: Add function... |
942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 |
/** * smp_call_on_cpu - Call a function on a specific cpu * * Used to call a function on a specific cpu and wait for it to return. * Optionally make sure the call is done on a specified physical cpu via vcpu * pinning in order to support virtualized environments. */ struct smp_call_on_cpu_struct { struct work_struct work; struct completion done; int (*func)(void *); void *data; int ret; int cpu; }; static void smp_call_on_cpu_callback(struct work_struct *work) { struct smp_call_on_cpu_struct *sscs; sscs = container_of(work, struct smp_call_on_cpu_struct, work); if (sscs->cpu >= 0) hypervisor_pin_vcpu(sscs->cpu); sscs->ret = sscs->func(sscs->data); if (sscs->cpu >= 0) hypervisor_pin_vcpu(-1); complete(&sscs->done); } int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) { struct smp_call_on_cpu_struct sscs = { |
df8ce9d78 smp: Add function... |
976 977 978 979 980 |
.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done), .func = func, .data = par, .cpu = phys ? cpu : -1, }; |
8db549491 smp: Allocate smp... |
981 |
INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); |
df8ce9d78 smp: Add function... |
982 983 984 985 986 987 988 989 990 |
if (cpu >= nr_cpu_ids || !cpu_online(cpu)) return -ENXIO; queue_work_on(cpu, system_wq, &sscs.work); wait_for_completion(&sscs.done); return sscs.ret; } EXPORT_SYMBOL_GPL(smp_call_on_cpu); |