Blame view
kernel/smp.c
20 KB
3d4422332
|
1 2 3 4 |
/* * Generic helpers for smp ipi calls * * (C) Jens Axboe <jens.axboe@oracle.com> 2008 |
3d4422332
|
5 |
*/ |
478850160
|
6 |
#include <linux/irq_work.h> |
3d4422332
|
7 |
#include <linux/rcupdate.h> |
59190f421
|
8 |
#include <linux/rculist.h> |
641cd4cfc
|
9 |
#include <linux/kernel.h> |
9984de1a5
|
10 |
#include <linux/export.h> |
0b13fda1e
|
11 12 |
#include <linux/percpu.h> #include <linux/init.h> |
5a0e3ad6a
|
13 |
#include <linux/gfp.h> |
3d4422332
|
14 |
#include <linux/smp.h> |
8969a5ede
|
15 |
#include <linux/cpu.h> |
c6f4459fc
|
16 |
#include <linux/sched.h> |
3d4422332
|
17 |
|
3bb5d2ee3
|
18 |
#include "smpboot.h" |
3d4422332
|
19 |
enum { |
6e2756376
|
20 |
CSD_FLAG_LOCK = 0x01, |
8053871d0
|
21 |
CSD_FLAG_SYNCHRONOUS = 0x02, |
3d4422332
|
22 23 24 |
}; struct call_function_data { |
9a46ad6d6
|
25 |
struct call_single_data __percpu *csd; |
0b13fda1e
|
26 |
cpumask_var_t cpumask; |
3d4422332
|
27 |
}; |
e03bcb686
|
28 |
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); |
6897fc22e
|
29 |
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); |
8969a5ede
|
30 |
|
8d056c48e
|
31 |
static void flush_smp_call_function_queue(bool warn_cpu_offline); |
8969a5ede
|
32 33 34 35 36 37 38 39 40 |
static int hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; struct call_function_data *cfd = &per_cpu(cfd_data, cpu); switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: |
eaa958402
|
41 |
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, |
8969a5ede
|
42 |
cpu_to_node(cpu))) |
80b5184cc
|
43 |
return notifier_from_errno(-ENOMEM); |
9a46ad6d6
|
44 45 46 47 48 |
cfd->csd = alloc_percpu(struct call_single_data); if (!cfd->csd) { free_cpumask_var(cfd->cpumask); return notifier_from_errno(-ENOMEM); } |
8969a5ede
|
49 |
break; |
69dd647f9
|
50 |
#ifdef CONFIG_HOTPLUG_CPU |
8969a5ede
|
51 52 |
case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: |
8d056c48e
|
53 |
/* Fall-through to the CPU_DEAD[_FROZEN] case. */ |
8969a5ede
|
54 55 56 57 |
case CPU_DEAD: case CPU_DEAD_FROZEN: free_cpumask_var(cfd->cpumask); |
9a46ad6d6
|
58 |
free_percpu(cfd->csd); |
8969a5ede
|
59 |
break; |
8d056c48e
|
60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
case CPU_DYING: case CPU_DYING_FROZEN: /* * The IPIs for the smp-call-function callbacks queued by other * CPUs might arrive late, either due to hardware latencies or * because this CPU disabled interrupts (inside stop-machine) * before the IPIs were sent. So flush out any pending callbacks * explicitly (without waiting for the IPIs to arrive), to * ensure that the outgoing CPU doesn't go offline with work * still pending. */ flush_smp_call_function_queue(false); break; |
8969a5ede
|
74 75 76 77 78 |
#endif }; return NOTIFY_OK; } |
0db0628d9
|
79 |
static struct notifier_block hotplug_cfd_notifier = { |
0b13fda1e
|
80 |
.notifier_call = hotplug_cfd, |
8969a5ede
|
81 |
}; |
d8ad7d112
|
82 |
void __init call_function_init(void) |
3d4422332
|
83 |
{ |
8969a5ede
|
84 |
void *cpu = (void *)(long)smp_processor_id(); |
3d4422332
|
85 |
int i; |
6897fc22e
|
86 87 |
for_each_possible_cpu(i) init_llist_head(&per_cpu(call_single_queue, i)); |
8969a5ede
|
88 89 90 |
hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); register_cpu_notifier(&hotplug_cfd_notifier); |
3d4422332
|
91 |
} |
8969a5ede
|
92 |
/* |
8969a5ede
|
93 94 |
* csd_lock/csd_unlock used to serialize access to per-cpu csd resources * |
0b13fda1e
|
95 96 97 |
* For non-synchronous ipi calls the csd can still be in use by the * previous function call. For multi-cpu calls its even more interesting * as we'll have to ensure no other cpu is observing our csd. |
8969a5ede
|
98 |
*/ |
90d109847
|
99 |
static __always_inline void csd_lock_wait(struct call_single_data *csd) |
8969a5ede
|
100 |
{ |
38460a217
|
101 |
smp_cond_acquire(!(csd->flags & CSD_FLAG_LOCK)); |
6e2756376
|
102 |
} |
90d109847
|
103 |
static __always_inline void csd_lock(struct call_single_data *csd) |
6e2756376
|
104 |
{ |
e1d12f327
|
105 106 |
csd_lock_wait(csd); csd->flags |= CSD_FLAG_LOCK; |
8969a5ede
|
107 108 |
/* |
0b13fda1e
|
109 110 111 |
* prevent CPU from reordering the above assignment * to ->flags with any subsequent assignments to other * fields of the specified call_single_data structure: |
8969a5ede
|
112 |
*/ |
8053871d0
|
113 |
smp_wmb(); |
8969a5ede
|
114 |
} |
90d109847
|
115 |
static __always_inline void csd_unlock(struct call_single_data *csd) |
8969a5ede
|
116 |
{ |
8053871d0
|
117 |
WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); |
0b13fda1e
|
118 |
|
8969a5ede
|
119 |
/* |
0b13fda1e
|
120 |
* ensure we're all done before releasing data: |
8969a5ede
|
121 |
*/ |
8053871d0
|
122 |
smp_store_release(&csd->flags, 0); |
3d4422332
|
123 |
} |
8b28499a7
|
124 |
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); |
3d4422332
|
125 |
/* |
0b13fda1e
|
126 127 128 |
* Insert a previously allocated call_single_data element * for execution on the given CPU. data must already have * ->func, ->info, and ->flags set. |
3d4422332
|
129 |
*/ |
8b28499a7
|
130 |
static int generic_exec_single(int cpu, struct call_single_data *csd, |
8053871d0
|
131 |
smp_call_func_t func, void *info) |
3d4422332
|
132 |
{ |
8b28499a7
|
133 |
if (cpu == smp_processor_id()) { |
8053871d0
|
134 135 136 137 138 139 140 |
unsigned long flags; /* * We can unlock early even for the synchronous on-stack case, * since we're doing this from the same CPU.. */ csd_unlock(csd); |
8b28499a7
|
141 142 143 144 145 |
local_irq_save(flags); func(info); local_irq_restore(flags); return 0; } |
5224b9613
|
146 147 |
if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { csd_unlock(csd); |
8b28499a7
|
148 |
return -ENXIO; |
5224b9613
|
149 |
} |
8b28499a7
|
150 |
|
8b28499a7
|
151 152 |
csd->func = func; csd->info = info; |
561920a0d
|
153 |
/* |
15d0d3b33
|
154 155 156 157 158 159 160 |
* The list addition should be visible before sending the IPI * handler locks the list to pull the entry off it because of * normal cache coherency rules implied by spinlocks. * * If IPIs can go out of order to the cache coherency protocol * in an architecture, sufficient synchronisation should be added * to arch code to make it appear to obey cache coherency WRT |
0b13fda1e
|
161 162 |
* locking and barrier primitives. Generic code isn't really * equipped to do the right thing... |
561920a0d
|
163 |
*/ |
6897fc22e
|
164 |
if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) |
3d4422332
|
165 |
arch_send_call_function_single_ipi(cpu); |
8b28499a7
|
166 |
return 0; |
3d4422332
|
167 |
} |
8d056c48e
|
168 169 170 171 172 |
/** * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks * * Invoked by arch to handle an IPI for call function single. * Must be called with interrupts disabled. |
3d4422332
|
173 174 175 |
*/ void generic_smp_call_function_single_interrupt(void) { |
8d056c48e
|
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 |
flush_smp_call_function_queue(true); } /** * flush_smp_call_function_queue - Flush pending smp-call-function callbacks * * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an * offline CPU. Skip this check if set to 'false'. * * Flush any pending smp-call-function callbacks queued on this CPU. This is * invoked by the generic IPI handler, as well as by a CPU about to go offline, * to ensure that all pending IPI callbacks are run before it goes completely * offline. * * Loop through the call_single_queue and run all the queued callbacks. * Must be called with interrupts disabled. */ static void flush_smp_call_function_queue(bool warn_cpu_offline) { struct llist_head *head; |
5fd77595e
|
196 197 |
struct llist_node *entry; struct call_single_data *csd, *csd_next; |
a219ccf46
|
198 |
static bool warned; |
8d056c48e
|
199 |
WARN_ON(!irqs_disabled()); |
bb964a92c
|
200 |
head = this_cpu_ptr(&call_single_queue); |
8d056c48e
|
201 |
entry = llist_del_all(head); |
a219ccf46
|
202 |
entry = llist_reverse_order(entry); |
3d4422332
|
203 |
|
8d056c48e
|
204 205 206 |
/* There shouldn't be any pending callbacks on an offline CPU. */ if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && !warned && !llist_empty(head))) { |
a219ccf46
|
207 208 209 210 211 212 213 214 215 216 217 218 219 |
warned = true; WARN(1, "IPI on offline CPU %d ", smp_processor_id()); /* * We don't have to use the _safe() variant here * because we are not invoking the IPI handlers yet. */ llist_for_each_entry(csd, entry, llist) pr_warn("IPI callback %pS sent to offline CPU ", csd->func); } |
3d4422332
|
220 |
|
5fd77595e
|
221 |
llist_for_each_entry_safe(csd, csd_next, entry, llist) { |
8053871d0
|
222 223 224 225 226 227 228 229 230 231 232 |
smp_call_func_t func = csd->func; void *info = csd->info; /* Do we wait until *after* callback? */ if (csd->flags & CSD_FLAG_SYNCHRONOUS) { func(info); csd_unlock(csd); } else { csd_unlock(csd); func(info); } |
3d4422332
|
233 |
} |
478850160
|
234 235 236 237 238 239 240 241 |
/* * Handle irq works queued remotely by irq_work_queue_on(). * Smp functions above are typically synchronous so they * better run first since some other CPUs may be busy waiting * for them. */ irq_work_run(); |
3d4422332
|
242 243 244 245 246 247 |
} /* * smp_call_function_single - Run a function on a specific CPU * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. |
3d4422332
|
248 249 |
* @wait: If true, wait until function has completed on other CPUs. * |
72f279b25
|
250 |
* Returns 0 on success, else a negative status code. |
3d4422332
|
251 |
*/ |
3a5f65df5
|
252 |
int smp_call_function_single(int cpu, smp_call_func_t func, void *info, |
8691e5a8f
|
253 |
int wait) |
3d4422332
|
254 |
{ |
8053871d0
|
255 256 |
struct call_single_data *csd; struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS }; |
0b13fda1e
|
257 |
int this_cpu; |
8b28499a7
|
258 |
int err; |
3d4422332
|
259 |
|
0b13fda1e
|
260 261 262 263 264 |
/* * prevent preemption and reschedule on another processor, * as well as CPU removal */ this_cpu = get_cpu(); |
269c861ba
|
265 266 267 268 269 270 271 272 |
/* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress); |
3d4422332
|
273 |
|
8053871d0
|
274 275 276 277 278 279 280 281 282 283 |
csd = &csd_stack; if (!wait) { csd = this_cpu_ptr(&csd_data); csd_lock(csd); } err = generic_exec_single(cpu, csd, func, info); if (wait) csd_lock_wait(csd); |
3d4422332
|
284 285 |
put_cpu(); |
0b13fda1e
|
286 |
|
f73be6ded
|
287 |
return err; |
3d4422332
|
288 289 |
} EXPORT_SYMBOL(smp_call_function_single); |
d7877c03f
|
290 |
/** |
c46fff2a3
|
291 292 |
* smp_call_function_single_async(): Run an asynchronous function on a * specific CPU. |
d7877c03f
|
293 294 |
* @cpu: The CPU to run on. * @csd: Pre-allocated and setup data structure |
d7877c03f
|
295 |
* |
c46fff2a3
|
296 297 298 299 300 301 302 303 304 |
* Like smp_call_function_single(), but the call is asynchonous and * can thus be done from contexts with disabled interrupts. * * The caller passes his own pre-allocated data structure * (ie: embedded in an object) and is responsible for synchronizing it * such that the IPIs performed on the @csd are strictly serialized. * * NOTE: Be careful, there is unfortunately no current debugging facility to * validate the correctness of this serialization. |
d7877c03f
|
305 |
*/ |
c46fff2a3
|
306 |
int smp_call_function_single_async(int cpu, struct call_single_data *csd) |
d7877c03f
|
307 308 |
{ int err = 0; |
d7877c03f
|
309 |
|
fce8ad156
|
310 |
preempt_disable(); |
8053871d0
|
311 312 313 314 315 316 317 318 319 |
/* We could deadlock if we have to wait here with interrupts disabled! */ if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK)) csd_lock_wait(csd); csd->flags = CSD_FLAG_LOCK; smp_wmb(); err = generic_exec_single(cpu, csd, csd->func, csd->info); |
fce8ad156
|
320 |
preempt_enable(); |
d7877c03f
|
321 322 323 |
return err; } |
c46fff2a3
|
324 |
EXPORT_SYMBOL_GPL(smp_call_function_single_async); |
d7877c03f
|
325 |
|
2ea6dec4a
|
326 327 328 329 330 331 332 333 |
/* * smp_call_function_any - Run a function on any of the given cpus * @mask: The mask of cpus it can run on. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed. * * Returns 0 on success, else a negative status code (if no cpus were online). |
2ea6dec4a
|
334 335 336 337 338 339 340 |
* * Selection preference: * 1) current cpu if in @mask * 2) any cpu of current node if in @mask * 3) any other online cpu in @mask */ int smp_call_function_any(const struct cpumask *mask, |
3a5f65df5
|
341 |
smp_call_func_t func, void *info, int wait) |
2ea6dec4a
|
342 343 344 345 346 347 348 349 350 351 352 |
{ unsigned int cpu; const struct cpumask *nodemask; int ret; /* Try for same CPU (cheapest) */ cpu = get_cpu(); if (cpumask_test_cpu(cpu, mask)) goto call; /* Try for same node. */ |
af2422c42
|
353 |
nodemask = cpumask_of_node(cpu_to_node(cpu)); |
2ea6dec4a
|
354 355 356 357 358 359 360 361 362 363 364 365 366 367 |
for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; cpu = cpumask_next_and(cpu, nodemask, mask)) { if (cpu_online(cpu)) goto call; } /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ cpu = cpumask_any_and(mask, cpu_online_mask); call: ret = smp_call_function_single(cpu, func, info, wait); put_cpu(); return ret; } EXPORT_SYMBOL_GPL(smp_call_function_any); |
3d4422332
|
368 |
/** |
54b11e6d5
|
369 370 |
* smp_call_function_many(): Run a function on a set of other CPUs. * @mask: The set of cpus to run on (only runs on online subset). |
3d4422332
|
371 372 |
* @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. |
0b13fda1e
|
373 374 |
* @wait: If true, wait (atomically) until function has completed * on other CPUs. |
3d4422332
|
375 |
* |
72f279b25
|
376 |
* If @wait is true, then returns once @func has returned. |
3d4422332
|
377 378 379 380 381 |
* * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. Preemption * must be disabled when calling this function. */ |
54b11e6d5
|
382 |
void smp_call_function_many(const struct cpumask *mask, |
3a5f65df5
|
383 |
smp_call_func_t func, void *info, bool wait) |
3d4422332
|
384 |
{ |
e1d12f327
|
385 |
struct call_function_data *cfd; |
9a46ad6d6
|
386 |
int cpu, next_cpu, this_cpu = smp_processor_id(); |
3d4422332
|
387 |
|
269c861ba
|
388 389 390 391 392 393 394 |
/* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
bd924e8cb
|
395 |
&& !oops_in_progress && !early_boot_irqs_disabled); |
3d4422332
|
396 |
|
723aae25d
|
397 |
/* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ |
54b11e6d5
|
398 |
cpu = cpumask_first_and(mask, cpu_online_mask); |
0b13fda1e
|
399 |
if (cpu == this_cpu) |
54b11e6d5
|
400 |
cpu = cpumask_next_and(cpu, mask, cpu_online_mask); |
0b13fda1e
|
401 |
|
54b11e6d5
|
402 403 404 405 406 407 |
/* No online cpus? We're done. */ if (cpu >= nr_cpu_ids) return; /* Do we have another CPU which isn't us? */ next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); |
0b13fda1e
|
408 |
if (next_cpu == this_cpu) |
54b11e6d5
|
409 410 411 412 413 414 |
next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); /* Fastpath: do that cpu by itself. */ if (next_cpu >= nr_cpu_ids) { smp_call_function_single(cpu, func, info, wait); return; |
3d4422332
|
415 |
} |
bb964a92c
|
416 |
cfd = this_cpu_ptr(&cfd_data); |
45a579192
|
417 |
|
e1d12f327
|
418 419 |
cpumask_and(cfd->cpumask, mask, cpu_online_mask); cpumask_clear_cpu(this_cpu, cfd->cpumask); |
723aae25d
|
420 421 |
/* Some callers race with other cpus changing the passed mask */ |
e1d12f327
|
422 |
if (unlikely(!cpumask_weight(cfd->cpumask))) |
723aae25d
|
423 |
return; |
3d4422332
|
424 |
|
e1d12f327
|
425 426 |
for_each_cpu(cpu, cfd->cpumask) { struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); |
9a46ad6d6
|
427 428 |
csd_lock(csd); |
8053871d0
|
429 430 |
if (wait) csd->flags |= CSD_FLAG_SYNCHRONOUS; |
9a46ad6d6
|
431 432 |
csd->func = func; csd->info = info; |
6897fc22e
|
433 |
llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); |
9a46ad6d6
|
434 |
} |
561920a0d
|
435 |
|
3d4422332
|
436 |
/* Send a message to all CPUs in the map */ |
73f945505
|
437 |
arch_send_call_function_ipi_mask(cfd->cpumask); |
3d4422332
|
438 |
|
9a46ad6d6
|
439 |
if (wait) { |
e1d12f327
|
440 441 442 443 |
for_each_cpu(cpu, cfd->cpumask) { struct call_single_data *csd; csd = per_cpu_ptr(cfd->csd, cpu); |
9a46ad6d6
|
444 445 446 |
csd_lock_wait(csd); } } |
3d4422332
|
447 |
} |
54b11e6d5
|
448 |
EXPORT_SYMBOL(smp_call_function_many); |
3d4422332
|
449 450 451 452 453 |
/** * smp_call_function(): Run a function on all other CPUs. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. |
0b13fda1e
|
454 455 |
* @wait: If true, wait (atomically) until function has completed * on other CPUs. |
3d4422332
|
456 |
* |
54b11e6d5
|
457 |
* Returns 0. |
3d4422332
|
458 459 |
* * If @wait is true, then returns once @func has returned; otherwise |
72f279b25
|
460 |
* it returns just before the target cpu calls @func. |
3d4422332
|
461 462 463 464 |
* * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ |
3a5f65df5
|
465 |
int smp_call_function(smp_call_func_t func, void *info, int wait) |
3d4422332
|
466 |
{ |
3d4422332
|
467 |
preempt_disable(); |
54b11e6d5
|
468 |
smp_call_function_many(cpu_online_mask, func, info, wait); |
3d4422332
|
469 |
preempt_enable(); |
0b13fda1e
|
470 |
|
54b11e6d5
|
471 |
return 0; |
3d4422332
|
472 473 |
} EXPORT_SYMBOL(smp_call_function); |
351f8f8e6
|
474 |
|
34db18a05
|
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 |
/* Setup configured maximum number of CPUs to activate */ unsigned int setup_max_cpus = NR_CPUS; EXPORT_SYMBOL(setup_max_cpus); /* * Setup routine for controlling SMP activation * * Command-line option of "nosmp" or "maxcpus=0" will disable SMP * activation entirely (the MPS table probe still happens, though). * * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer * greater than 0, limits the maximum number of CPUs activated in * SMP mode to <NUM>. */ void __weak arch_disable_smp_support(void) { } static int __init nosmp(char *str) { setup_max_cpus = 0; arch_disable_smp_support(); return 0; } early_param("nosmp", nosmp); /* this is hard limit */ static int __init nrcpus(char *str) { int nr_cpus; get_option(&str, &nr_cpus); if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) nr_cpu_ids = nr_cpus; return 0; } early_param("nr_cpus", nrcpus); static int __init maxcpus(char *str) { get_option(&str, &setup_max_cpus); if (setup_max_cpus == 0) arch_disable_smp_support(); return 0; } early_param("maxcpus", maxcpus); /* Setup number of possible processor ids */ int nr_cpu_ids __read_mostly = NR_CPUS; EXPORT_SYMBOL(nr_cpu_ids); /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ void __init setup_nr_cpu_ids(void) { nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; } |
a17bce4d1
|
537 538 539 540 541 |
void __weak smp_announce(void) { printk(KERN_INFO "Brought up %d CPUs ", num_online_cpus()); } |
34db18a05
|
542 543 544 545 |
/* Called by boot processor to activate the rest. */ void __init smp_init(void) { unsigned int cpu; |
3bb5d2ee3
|
546 |
idle_threads_init(); |
4cb28ced2
|
547 |
cpuhp_threads_init(); |
3bb5d2ee3
|
548 |
|
34db18a05
|
549 550 551 552 553 554 555 556 557 |
/* FIXME: This should be done in userspace --RR */ for_each_present_cpu(cpu) { if (num_online_cpus() >= setup_max_cpus) break; if (!cpu_online(cpu)) cpu_up(cpu); } /* Any cleanup work */ |
a17bce4d1
|
558 |
smp_announce(); |
34db18a05
|
559 560 |
smp_cpus_done(setup_max_cpus); } |
351f8f8e6
|
561 |
/* |
bd924e8cb
|
562 563 564 |
* Call a function on all processors. May be used during early boot while * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead * of local_irq_disable/enable(). |
351f8f8e6
|
565 566 567 |
*/ int on_each_cpu(void (*func) (void *info), void *info, int wait) { |
bd924e8cb
|
568 |
unsigned long flags; |
351f8f8e6
|
569 570 571 572 |
int ret = 0; preempt_disable(); ret = smp_call_function(func, info, wait); |
bd924e8cb
|
573 |
local_irq_save(flags); |
351f8f8e6
|
574 |
func(info); |
bd924e8cb
|
575 |
local_irq_restore(flags); |
351f8f8e6
|
576 577 578 579 |
preempt_enable(); return ret; } EXPORT_SYMBOL(on_each_cpu); |
3fc498f16
|
580 581 582 583 584 585 586 587 588 589 590 591 |
/** * on_each_cpu_mask(): Run a function on processors specified by * cpumask, which may include the local processor. * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * If @wait is true, then returns once @func has returned. * |
202da4005
|
592 593 594 595 |
* You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. The * exception is that it may be used during early boot while * early_boot_irqs_disabled is set. |
3fc498f16
|
596 597 598 599 600 601 602 603 |
*/ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { int cpu = get_cpu(); smp_call_function_many(mask, func, info, wait); if (cpumask_test_cpu(cpu, mask)) { |
202da4005
|
604 605 |
unsigned long flags; local_irq_save(flags); |
3fc498f16
|
606 |
func(info); |
202da4005
|
607 |
local_irq_restore(flags); |
3fc498f16
|
608 609 610 611 |
} put_cpu(); } EXPORT_SYMBOL(on_each_cpu_mask); |
b3a7e98e0
|
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 |
/* * on_each_cpu_cond(): Call a function on each processor for which * the supplied function cond_func returns true, optionally waiting * for all the required CPUs to finish. This may include the local * processor. * @cond_func: A callback function that is passed a cpu id and * the the info parameter. The function is called * with preemption disabled. The function should * return a blooean value indicating whether to IPI * the specified CPU. * @func: The function to run on all applicable CPUs. * This must be fast and non-blocking. * @info: An arbitrary pointer to pass to both functions. * @wait: If true, wait (atomically) until function has * completed on other CPUs. * @gfp_flags: GFP flags to use when allocating the cpumask * used internally by the function. * * The function might sleep if the GFP flags indicates a non * atomic allocation is allowed. * * Preemption is disabled to protect against CPUs going offline but not online. * CPUs going online during the call will not be seen or sent an IPI. * * You must not call this function with disabled interrupts or * from a hardware interrupt handler or from a bottom half handler. */ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags) { cpumask_var_t cpus; int cpu, ret; |
d0164adc8
|
646 |
might_sleep_if(gfpflags_allow_blocking(gfp_flags)); |
b3a7e98e0
|
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 |
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { preempt_disable(); for_each_online_cpu(cpu) if (cond_func(cpu, info)) cpumask_set_cpu(cpu, cpus); on_each_cpu_mask(cpus, func, info, wait); preempt_enable(); free_cpumask_var(cpus); } else { /* * No free cpumask, bother. No matter, we'll * just have to IPI them one by one. */ preempt_disable(); for_each_online_cpu(cpu) if (cond_func(cpu, info)) { ret = smp_call_function_single(cpu, func, info, wait); |
618fde872
|
666 |
WARN_ON_ONCE(ret); |
b3a7e98e0
|
667 668 669 670 671 |
} preempt_enable(); } } EXPORT_SYMBOL(on_each_cpu_cond); |
f37f435f3
|
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 |
static void do_nothing(void *unused) { } /** * kick_all_cpus_sync - Force all cpus out of idle * * Used to synchronize the update of pm_idle function pointer. It's * called after the pointer is updated and returns after the dummy * callback function has been executed on all cpus. The execution of * the function can only happen on the remote cpus after they have * left the idle function which had been called via pm_idle function * pointer. So it's guaranteed that nothing uses the previous pointer * anymore. */ void kick_all_cpus_sync(void) { /* Make sure the change is visible before we kick the cpus */ smp_mb(); smp_call_function(do_nothing, NULL, 1); } EXPORT_SYMBOL_GPL(kick_all_cpus_sync); |
c6f4459fc
|
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 |
/** * wake_up_all_idle_cpus - break all cpus out of idle * wake_up_all_idle_cpus try to break all cpus which is in idle state even * including idle polling cpus, for non-idle cpus, we will do nothing * for them. */ void wake_up_all_idle_cpus(void) { int cpu; preempt_disable(); for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; wake_up_if_idle(cpu); } preempt_enable(); } EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus); |