Blame view
kernel/smp.c
21.3 KB
3d4422332 Add generic helpe... |
1 2 3 4 |
/* * Generic helpers for smp ipi calls * * (C) Jens Axboe <jens.axboe@oracle.com> 2008 |
3d4422332 Add generic helpe... |
5 |
*/ |
ca7dfdbb3 kernel/smp: Defin... |
6 7 |
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
478850160 irq_work: Impleme... |
8 |
#include <linux/irq_work.h> |
3d4422332 Add generic helpe... |
9 |
#include <linux/rcupdate.h> |
59190f421 Merge branch 'gen... |
10 |
#include <linux/rculist.h> |
641cd4cfc generic-ipi: elim... |
11 |
#include <linux/kernel.h> |
9984de1a5 kernel: Map most ... |
12 |
#include <linux/export.h> |
0b13fda1e generic-ipi: clea... |
13 14 |
#include <linux/percpu.h> #include <linux/init.h> |
5a0e3ad6a include cleanup: ... |
15 |
#include <linux/gfp.h> |
3d4422332 Add generic helpe... |
16 |
#include <linux/smp.h> |
8969a5ede generic-ipi: remo... |
17 |
#include <linux/cpu.h> |
c6f4459fc smp: Add new wake... |
18 |
#include <linux/sched.h> |
4c822698c sched/headers: Pr... |
19 |
#include <linux/sched/idle.h> |
47ae4b05d virt, sched: Add ... |
20 |
#include <linux/hypervisor.h> |
3d4422332 Add generic helpe... |
21 |
|
3bb5d2ee3 smp, idle: Alloca... |
22 |
#include "smpboot.h" |
3d4422332 Add generic helpe... |
23 |
enum { |
6e2756376 generic-ipi: remo... |
24 |
CSD_FLAG_LOCK = 0x01, |
8053871d0 smp: Fix smp_call... |
25 |
CSD_FLAG_SYNCHRONOUS = 0x02, |
3d4422332 Add generic helpe... |
26 27 28 |
}; struct call_function_data { |
966a96711 smp: Avoid using ... |
29 |
call_single_data_t __percpu *csd; |
0b13fda1e generic-ipi: clea... |
30 |
cpumask_var_t cpumask; |
3fc5b3b6a smp: Avoid sendin... |
31 |
cpumask_var_t cpumask_ipi; |
3d4422332 Add generic helpe... |
32 |
}; |
e03bcb686 generic-ipi: Opti... |
33 |
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); |
6897fc22e kernel: use lockl... |
34 |
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); |
8969a5ede generic-ipi: remo... |
35 |
|
8d056c48e CPU hotplug, smp:... |
36 |
static void flush_smp_call_function_queue(bool warn_cpu_offline); |
31487f832 smp/cfd: Convert ... |
37 |
int smpcfd_prepare_cpu(unsigned int cpu) |
8969a5ede generic-ipi: remo... |
38 |
{ |
8969a5ede generic-ipi: remo... |
39 |
struct call_function_data *cfd = &per_cpu(cfd_data, cpu); |
31487f832 smp/cfd: Convert ... |
40 41 42 |
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, cpu_to_node(cpu))) return -ENOMEM; |
3fc5b3b6a smp: Avoid sendin... |
43 44 45 46 47 |
if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, cpu_to_node(cpu))) { free_cpumask_var(cfd->cpumask); return -ENOMEM; } |
966a96711 smp: Avoid using ... |
48 |
cfd->csd = alloc_percpu(call_single_data_t); |
31487f832 smp/cfd: Convert ... |
49 |
if (!cfd->csd) { |
8969a5ede generic-ipi: remo... |
50 |
free_cpumask_var(cfd->cpumask); |
3fc5b3b6a smp: Avoid sendin... |
51 |
free_cpumask_var(cfd->cpumask_ipi); |
31487f832 smp/cfd: Convert ... |
52 53 54 55 |
return -ENOMEM; } return 0; |
8969a5ede generic-ipi: remo... |
56 |
} |
31487f832 smp/cfd: Convert ... |
57 58 59 60 61 |
int smpcfd_dead_cpu(unsigned int cpu) { struct call_function_data *cfd = &per_cpu(cfd_data, cpu); free_cpumask_var(cfd->cpumask); |
3fc5b3b6a smp: Avoid sendin... |
62 |
free_cpumask_var(cfd->cpumask_ipi); |
31487f832 smp/cfd: Convert ... |
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
free_percpu(cfd->csd); return 0; } int smpcfd_dying_cpu(unsigned int cpu) { /* * The IPIs for the smp-call-function callbacks queued by other * CPUs might arrive late, either due to hardware latencies or * because this CPU disabled interrupts (inside stop-machine) * before the IPIs were sent. So flush out any pending callbacks * explicitly (without waiting for the IPIs to arrive), to * ensure that the outgoing CPU doesn't go offline with work * still pending. */ flush_smp_call_function_queue(false); return 0; } |
8969a5ede generic-ipi: remo... |
81 |
|
d8ad7d112 generic-ipi: Fix ... |
82 |
void __init call_function_init(void) |
3d4422332 Add generic helpe... |
83 84 |
{ int i; |
6897fc22e kernel: use lockl... |
85 86 |
for_each_possible_cpu(i) init_llist_head(&per_cpu(call_single_queue, i)); |
8969a5ede generic-ipi: remo... |
87 |
|
31487f832 smp/cfd: Convert ... |
88 |
smpcfd_prepare_cpu(smp_processor_id()); |
3d4422332 Add generic helpe... |
89 |
} |
8969a5ede generic-ipi: remo... |
90 |
/* |
8969a5ede generic-ipi: remo... |
91 92 |
* csd_lock/csd_unlock used to serialize access to per-cpu csd resources * |
0b13fda1e generic-ipi: clea... |
93 94 95 |
* For non-synchronous ipi calls the csd can still be in use by the * previous function call. For multi-cpu calls its even more interesting * as we'll have to ensure no other cpu is observing our csd. |
8969a5ede generic-ipi: remo... |
96 |
*/ |
966a96711 smp: Avoid using ... |
97 |
static __always_inline void csd_lock_wait(call_single_data_t *csd) |
8969a5ede generic-ipi: remo... |
98 |
{ |
1f03e8d29 locking/barriers:... |
99 |
smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); |
6e2756376 generic-ipi: remo... |
100 |
} |
966a96711 smp: Avoid using ... |
101 |
static __always_inline void csd_lock(call_single_data_t *csd) |
6e2756376 generic-ipi: remo... |
102 |
{ |
e1d12f327 kernel/smp.c: cle... |
103 104 |
csd_lock_wait(csd); csd->flags |= CSD_FLAG_LOCK; |
8969a5ede generic-ipi: remo... |
105 106 |
/* |
0b13fda1e generic-ipi: clea... |
107 108 |
* prevent CPU from reordering the above assignment * to ->flags with any subsequent assignments to other |
966a96711 smp: Avoid using ... |
109 |
* fields of the specified call_single_data_t structure: |
8969a5ede generic-ipi: remo... |
110 |
*/ |
8053871d0 smp: Fix smp_call... |
111 |
smp_wmb(); |
8969a5ede generic-ipi: remo... |
112 |
} |
966a96711 smp: Avoid using ... |
113 |
static __always_inline void csd_unlock(call_single_data_t *csd) |
8969a5ede generic-ipi: remo... |
114 |
{ |
8053871d0 smp: Fix smp_call... |
115 |
WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); |
0b13fda1e generic-ipi: clea... |
116 |
|
8969a5ede generic-ipi: remo... |
117 |
/* |
0b13fda1e generic-ipi: clea... |
118 |
* ensure we're all done before releasing data: |
8969a5ede generic-ipi: remo... |
119 |
*/ |
8053871d0 smp: Fix smp_call... |
120 |
smp_store_release(&csd->flags, 0); |
3d4422332 Add generic helpe... |
121 |
} |
966a96711 smp: Avoid using ... |
122 |
static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); |
8b28499a7 smp: Consolidate ... |
123 |
|
3d4422332 Add generic helpe... |
124 |
/* |
966a96711 smp: Avoid using ... |
125 |
* Insert a previously allocated call_single_data_t element |
0b13fda1e generic-ipi: clea... |
126 127 |
* for execution on the given CPU. data must already have * ->func, ->info, and ->flags set. |
3d4422332 Add generic helpe... |
128 |
*/ |
966a96711 smp: Avoid using ... |
129 |
static int generic_exec_single(int cpu, call_single_data_t *csd, |
8053871d0 smp: Fix smp_call... |
130 |
smp_call_func_t func, void *info) |
3d4422332 Add generic helpe... |
131 |
{ |
8b28499a7 smp: Consolidate ... |
132 |
if (cpu == smp_processor_id()) { |
8053871d0 smp: Fix smp_call... |
133 134 135 136 137 138 139 |
unsigned long flags; /* * We can unlock early even for the synchronous on-stack case, * since we're doing this from the same CPU.. */ csd_unlock(csd); |
8b28499a7 smp: Consolidate ... |
140 141 142 143 144 |
local_irq_save(flags); func(info); local_irq_restore(flags); return 0; } |
5224b9613 smp: Fix error ca... |
145 146 |
if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { csd_unlock(csd); |
8b28499a7 smp: Consolidate ... |
147 |
return -ENXIO; |
5224b9613 smp: Fix error ca... |
148 |
} |
8b28499a7 smp: Consolidate ... |
149 |
|
8b28499a7 smp: Consolidate ... |
150 151 |
csd->func = func; csd->info = info; |
561920a0d generic-ipi: fix ... |
152 |
/* |
15d0d3b33 generic IPI: simp... |
153 154 155 156 157 158 159 |
* The list addition should be visible before sending the IPI * handler locks the list to pull the entry off it because of * normal cache coherency rules implied by spinlocks. * * If IPIs can go out of order to the cache coherency protocol * in an architecture, sufficient synchronisation should be added * to arch code to make it appear to obey cache coherency WRT |
0b13fda1e generic-ipi: clea... |
160 161 |
* locking and barrier primitives. Generic code isn't really * equipped to do the right thing... |
561920a0d generic-ipi: fix ... |
162 |
*/ |
6897fc22e kernel: use lockl... |
163 |
if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) |
3d4422332 Add generic helpe... |
164 |
arch_send_call_function_single_ipi(cpu); |
8b28499a7 smp: Consolidate ... |
165 |
return 0; |
3d4422332 Add generic helpe... |
166 |
} |
8d056c48e CPU hotplug, smp:... |
167 168 169 170 171 |
/** * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks * * Invoked by arch to handle an IPI for call function single. * Must be called with interrupts disabled. |
3d4422332 Add generic helpe... |
172 173 174 |
*/ void generic_smp_call_function_single_interrupt(void) { |
8d056c48e CPU hotplug, smp:... |
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 |
flush_smp_call_function_queue(true); } /** * flush_smp_call_function_queue - Flush pending smp-call-function callbacks * * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an * offline CPU. Skip this check if set to 'false'. * * Flush any pending smp-call-function callbacks queued on this CPU. This is * invoked by the generic IPI handler, as well as by a CPU about to go offline, * to ensure that all pending IPI callbacks are run before it goes completely * offline. * * Loop through the call_single_queue and run all the queued callbacks. * Must be called with interrupts disabled. */ static void flush_smp_call_function_queue(bool warn_cpu_offline) { struct llist_head *head; |
5fd77595e smp: Iterate func... |
195 |
struct llist_node *entry; |
966a96711 smp: Avoid using ... |
196 |
call_single_data_t *csd, *csd_next; |
a219ccf46 smp: print more u... |
197 |
static bool warned; |
8d056c48e CPU hotplug, smp:... |
198 |
WARN_ON(!irqs_disabled()); |
bb964a92c kernel misc: Repl... |
199 |
head = this_cpu_ptr(&call_single_queue); |
8d056c48e CPU hotplug, smp:... |
200 |
entry = llist_del_all(head); |
a219ccf46 smp: print more u... |
201 |
entry = llist_reverse_order(entry); |
3d4422332 Add generic helpe... |
202 |
|
8d056c48e CPU hotplug, smp:... |
203 204 205 |
/* There shouldn't be any pending callbacks on an offline CPU. */ if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && !warned && !llist_empty(head))) { |
a219ccf46 smp: print more u... |
206 207 208 209 210 211 212 213 214 215 216 217 218 |
warned = true; WARN(1, "IPI on offline CPU %d ", smp_processor_id()); /* * We don't have to use the _safe() variant here * because we are not invoking the IPI handlers yet. */ llist_for_each_entry(csd, entry, llist) pr_warn("IPI callback %pS sent to offline CPU ", csd->func); } |
3d4422332 Add generic helpe... |
219 |
|
5fd77595e smp: Iterate func... |
220 |
llist_for_each_entry_safe(csd, csd_next, entry, llist) { |
8053871d0 smp: Fix smp_call... |
221 222 223 224 225 226 227 228 229 230 231 |
smp_call_func_t func = csd->func; void *info = csd->info; /* Do we wait until *after* callback? */ if (csd->flags & CSD_FLAG_SYNCHRONOUS) { func(info); csd_unlock(csd); } else { csd_unlock(csd); func(info); } |
3d4422332 Add generic helpe... |
232 |
} |
478850160 irq_work: Impleme... |
233 234 235 236 237 238 239 240 |
/* * Handle irq works queued remotely by irq_work_queue_on(). * Smp functions above are typically synchronous so they * better run first since some other CPUs may be busy waiting * for them. */ irq_work_run(); |
3d4422332 Add generic helpe... |
241 242 243 244 245 246 |
} /* * smp_call_function_single - Run a function on a specific CPU * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. |
3d4422332 Add generic helpe... |
247 248 |
* @wait: If true, wait until function has completed on other CPUs. * |
72f279b25 generic-ipi: Fix ... |
249 |
* Returns 0 on success, else a negative status code. |
3d4422332 Add generic helpe... |
250 |
*/ |
3a5f65df5 Typedef SMP call ... |
251 |
int smp_call_function_single(int cpu, smp_call_func_t func, void *info, |
8691e5a8f smp_call_function... |
252 |
int wait) |
3d4422332 Add generic helpe... |
253 |
{ |
966a96711 smp: Avoid using ... |
254 255 256 257 |
call_single_data_t *csd; call_single_data_t csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS, }; |
0b13fda1e generic-ipi: clea... |
258 |
int this_cpu; |
8b28499a7 smp: Consolidate ... |
259 |
int err; |
3d4422332 Add generic helpe... |
260 |
|
0b13fda1e generic-ipi: clea... |
261 262 263 264 265 |
/* * prevent preemption and reschedule on another processor, * as well as CPU removal */ this_cpu = get_cpu(); |
269c861ba generic-ipi: Allo... |
266 267 268 269 270 271 272 273 |
/* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress); |
3d4422332 Add generic helpe... |
274 |
|
8053871d0 smp: Fix smp_call... |
275 276 277 278 279 280 281 282 283 284 |
csd = &csd_stack; if (!wait) { csd = this_cpu_ptr(&csd_data); csd_lock(csd); } err = generic_exec_single(cpu, csd, func, info); if (wait) csd_lock_wait(csd); |
3d4422332 Add generic helpe... |
285 286 |
put_cpu(); |
0b13fda1e generic-ipi: clea... |
287 |
|
f73be6ded smp: have smp_cal... |
288 |
return err; |
3d4422332 Add generic helpe... |
289 290 |
} EXPORT_SYMBOL(smp_call_function_single); |
d7877c03f smp: Move __smp_c... |
291 |
/** |
c46fff2a3 smp: Rename __smp... |
292 293 |
* smp_call_function_single_async(): Run an asynchronous function on a * specific CPU. |
d7877c03f smp: Move __smp_c... |
294 295 |
* @cpu: The CPU to run on. * @csd: Pre-allocated and setup data structure |
d7877c03f smp: Move __smp_c... |
296 |
* |
c46fff2a3 smp: Rename __smp... |
297 298 299 300 301 302 303 304 305 |
* Like smp_call_function_single(), but the call is asynchonous and * can thus be done from contexts with disabled interrupts. * * The caller passes his own pre-allocated data structure * (ie: embedded in an object) and is responsible for synchronizing it * such that the IPIs performed on the @csd are strictly serialized. * * NOTE: Be careful, there is unfortunately no current debugging facility to * validate the correctness of this serialization. |
d7877c03f smp: Move __smp_c... |
306 |
*/ |
966a96711 smp: Avoid using ... |
307 |
int smp_call_function_single_async(int cpu, call_single_data_t *csd) |
d7877c03f smp: Move __smp_c... |
308 309 |
{ int err = 0; |
d7877c03f smp: Move __smp_c... |
310 |
|
fce8ad156 smp: Remove wait ... |
311 |
preempt_disable(); |
8053871d0 smp: Fix smp_call... |
312 313 314 315 316 317 318 319 320 |
/* We could deadlock if we have to wait here with interrupts disabled! */ if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK)) csd_lock_wait(csd); csd->flags = CSD_FLAG_LOCK; smp_wmb(); err = generic_exec_single(cpu, csd, csd->func, csd->info); |
fce8ad156 smp: Remove wait ... |
321 |
preempt_enable(); |
d7877c03f smp: Move __smp_c... |
322 323 324 |
return err; } |
c46fff2a3 smp: Rename __smp... |
325 |
EXPORT_SYMBOL_GPL(smp_call_function_single_async); |
d7877c03f smp: Move __smp_c... |
326 |
|
2ea6dec4a generic-ipi: Add ... |
327 328 329 330 331 332 333 334 |
/* * smp_call_function_any - Run a function on any of the given cpus * @mask: The mask of cpus it can run on. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed. * * Returns 0 on success, else a negative status code (if no cpus were online). |
2ea6dec4a generic-ipi: Add ... |
335 336 337 338 339 340 341 |
* * Selection preference: * 1) current cpu if in @mask * 2) any cpu of current node if in @mask * 3) any other online cpu in @mask */ int smp_call_function_any(const struct cpumask *mask, |
3a5f65df5 Typedef SMP call ... |
342 |
smp_call_func_t func, void *info, int wait) |
2ea6dec4a generic-ipi: Add ... |
343 344 345 346 347 348 349 350 351 352 353 |
{ unsigned int cpu; const struct cpumask *nodemask; int ret; /* Try for same CPU (cheapest) */ cpu = get_cpu(); if (cpumask_test_cpu(cpu, mask)) goto call; /* Try for same node. */ |
af2422c42 smp_call_function... |
354 |
nodemask = cpumask_of_node(cpu_to_node(cpu)); |
2ea6dec4a generic-ipi: Add ... |
355 356 357 358 359 360 361 362 363 364 365 366 367 368 |
for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; cpu = cpumask_next_and(cpu, nodemask, mask)) { if (cpu_online(cpu)) goto call; } /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ cpu = cpumask_any_and(mask, cpu_online_mask); call: ret = smp_call_function_single(cpu, func, info, wait); put_cpu(); return ret; } EXPORT_SYMBOL_GPL(smp_call_function_any); |
3d4422332 Add generic helpe... |
369 |
/** |
54b11e6d5 cpumask: smp_call... |
370 371 |
* smp_call_function_many(): Run a function on a set of other CPUs. * @mask: The set of cpus to run on (only runs on online subset). |
3d4422332 Add generic helpe... |
372 373 |
* @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. |
0b13fda1e generic-ipi: clea... |
374 375 |
* @wait: If true, wait (atomically) until function has completed * on other CPUs. |
3d4422332 Add generic helpe... |
376 |
* |
72f279b25 generic-ipi: Fix ... |
377 |
* If @wait is true, then returns once @func has returned. |
3d4422332 Add generic helpe... |
378 379 380 381 382 |
* * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. Preemption * must be disabled when calling this function. */ |
54b11e6d5 cpumask: smp_call... |
383 |
void smp_call_function_many(const struct cpumask *mask, |
3a5f65df5 Typedef SMP call ... |
384 |
smp_call_func_t func, void *info, bool wait) |
3d4422332 Add generic helpe... |
385 |
{ |
e1d12f327 kernel/smp.c: cle... |
386 |
struct call_function_data *cfd; |
9a46ad6d6 smp: make smp_cal... |
387 |
int cpu, next_cpu, this_cpu = smp_processor_id(); |
3d4422332 Add generic helpe... |
388 |
|
269c861ba generic-ipi: Allo... |
389 390 391 392 393 394 395 |
/* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() |
bd924e8cb smp: Allow on_eac... |
396 |
&& !oops_in_progress && !early_boot_irqs_disabled); |
3d4422332 Add generic helpe... |
397 |
|
723aae25d smp_call_function... |
398 |
/* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ |
54b11e6d5 cpumask: smp_call... |
399 |
cpu = cpumask_first_and(mask, cpu_online_mask); |
0b13fda1e generic-ipi: clea... |
400 |
if (cpu == this_cpu) |
54b11e6d5 cpumask: smp_call... |
401 |
cpu = cpumask_next_and(cpu, mask, cpu_online_mask); |
0b13fda1e generic-ipi: clea... |
402 |
|
54b11e6d5 cpumask: smp_call... |
403 404 405 406 407 408 |
/* No online cpus? We're done. */ if (cpu >= nr_cpu_ids) return; /* Do we have another CPU which isn't us? */ next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); |
0b13fda1e generic-ipi: clea... |
409 |
if (next_cpu == this_cpu) |
54b11e6d5 cpumask: smp_call... |
410 411 412 413 414 415 |
next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); /* Fastpath: do that cpu by itself. */ if (next_cpu >= nr_cpu_ids) { smp_call_function_single(cpu, func, info, wait); return; |
3d4422332 Add generic helpe... |
416 |
} |
bb964a92c kernel misc: Repl... |
417 |
cfd = this_cpu_ptr(&cfd_data); |
45a579192 call_function_man... |
418 |
|
e1d12f327 kernel/smp.c: cle... |
419 |
cpumask_and(cfd->cpumask, mask, cpu_online_mask); |
6c8557bdb smp, cpumask: Use... |
420 |
__cpumask_clear_cpu(this_cpu, cfd->cpumask); |
723aae25d smp_call_function... |
421 422 |
/* Some callers race with other cpus changing the passed mask */ |
e1d12f327 kernel/smp.c: cle... |
423 |
if (unlikely(!cpumask_weight(cfd->cpumask))) |
723aae25d smp_call_function... |
424 |
return; |
3d4422332 Add generic helpe... |
425 |
|
3fc5b3b6a smp: Avoid sendin... |
426 |
cpumask_clear(cfd->cpumask_ipi); |
e1d12f327 kernel/smp.c: cle... |
427 |
for_each_cpu(cpu, cfd->cpumask) { |
966a96711 smp: Avoid using ... |
428 |
call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); |
9a46ad6d6 smp: make smp_cal... |
429 430 |
csd_lock(csd); |
8053871d0 smp: Fix smp_call... |
431 432 |
if (wait) csd->flags |= CSD_FLAG_SYNCHRONOUS; |
9a46ad6d6 smp: make smp_cal... |
433 434 |
csd->func = func; csd->info = info; |
3fc5b3b6a smp: Avoid sendin... |
435 |
if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) |
6c8557bdb smp, cpumask: Use... |
436 |
__cpumask_set_cpu(cpu, cfd->cpumask_ipi); |
9a46ad6d6 smp: make smp_cal... |
437 |
} |
561920a0d generic-ipi: fix ... |
438 |
|
3d4422332 Add generic helpe... |
439 |
/* Send a message to all CPUs in the map */ |
3fc5b3b6a smp: Avoid sendin... |
440 |
arch_send_call_function_ipi_mask(cfd->cpumask_ipi); |
3d4422332 Add generic helpe... |
441 |
|
9a46ad6d6 smp: make smp_cal... |
442 |
if (wait) { |
e1d12f327 kernel/smp.c: cle... |
443 |
for_each_cpu(cpu, cfd->cpumask) { |
966a96711 smp: Avoid using ... |
444 |
call_single_data_t *csd; |
e1d12f327 kernel/smp.c: cle... |
445 446 |
csd = per_cpu_ptr(cfd->csd, cpu); |
9a46ad6d6 smp: make smp_cal... |
447 448 449 |
csd_lock_wait(csd); } } |
3d4422332 Add generic helpe... |
450 |
} |
54b11e6d5 cpumask: smp_call... |
451 |
EXPORT_SYMBOL(smp_call_function_many); |
3d4422332 Add generic helpe... |
452 453 454 455 456 |
/** * smp_call_function(): Run a function on all other CPUs. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. |
0b13fda1e generic-ipi: clea... |
457 458 |
* @wait: If true, wait (atomically) until function has completed * on other CPUs. |
3d4422332 Add generic helpe... |
459 |
* |
54b11e6d5 cpumask: smp_call... |
460 |
* Returns 0. |
3d4422332 Add generic helpe... |
461 462 |
* * If @wait is true, then returns once @func has returned; otherwise |
72f279b25 generic-ipi: Fix ... |
463 |
* it returns just before the target cpu calls @func. |
3d4422332 Add generic helpe... |
464 465 466 467 |
* * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ |
3a5f65df5 Typedef SMP call ... |
468 |
int smp_call_function(smp_call_func_t func, void *info, int wait) |
3d4422332 Add generic helpe... |
469 |
{ |
3d4422332 Add generic helpe... |
470 |
preempt_disable(); |
54b11e6d5 cpumask: smp_call... |
471 |
smp_call_function_many(cpu_online_mask, func, info, wait); |
3d4422332 Add generic helpe... |
472 |
preempt_enable(); |
0b13fda1e generic-ipi: clea... |
473 |
|
54b11e6d5 cpumask: smp_call... |
474 |
return 0; |
3d4422332 Add generic helpe... |
475 476 |
} EXPORT_SYMBOL(smp_call_function); |
351f8f8e6 kernel: clean up ... |
477 |
|
34db18a05 smp: move smp set... |
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 |
/* Setup configured maximum number of CPUs to activate */ unsigned int setup_max_cpus = NR_CPUS; EXPORT_SYMBOL(setup_max_cpus); /* * Setup routine for controlling SMP activation * * Command-line option of "nosmp" or "maxcpus=0" will disable SMP * activation entirely (the MPS table probe still happens, though). * * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer * greater than 0, limits the maximum number of CPUs activated in * SMP mode to <NUM>. */ void __weak arch_disable_smp_support(void) { } static int __init nosmp(char *str) { setup_max_cpus = 0; arch_disable_smp_support(); return 0; } early_param("nosmp", nosmp); /* this is hard limit */ static int __init nrcpus(char *str) { int nr_cpus; get_option(&str, &nr_cpus); if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) nr_cpu_ids = nr_cpus; return 0; } early_param("nr_cpus", nrcpus); static int __init maxcpus(char *str) { get_option(&str, &setup_max_cpus); if (setup_max_cpus == 0) arch_disable_smp_support(); return 0; } early_param("maxcpus", maxcpus); /* Setup number of possible processor ids */ |
9b130ad5b treewide: make "n... |
532 |
unsigned int nr_cpu_ids __read_mostly = NR_CPUS; |
34db18a05 smp: move smp set... |
533 534 535 536 537 538 539 540 541 542 543 |
EXPORT_SYMBOL(nr_cpu_ids); /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */ void __init setup_nr_cpu_ids(void) { nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; } /* Called by boot processor to activate the rest. */ void __init smp_init(void) { |
92b232782 kernel/smp: Make ... |
544 |
int num_nodes, num_cpus; |
34db18a05 smp: move smp set... |
545 |
unsigned int cpu; |
3bb5d2ee3 smp, idle: Alloca... |
546 |
idle_threads_init(); |
4cb28ced2 cpu/hotplug: Crea... |
547 |
cpuhp_threads_init(); |
3bb5d2ee3 smp, idle: Alloca... |
548 |
|
51111dce2 kernel/smp: Tell ... |
549 550 |
pr_info("Bringing up secondary CPUs ... "); |
34db18a05 smp: move smp set... |
551 552 553 554 555 556 557 |
/* FIXME: This should be done in userspace --RR */ for_each_present_cpu(cpu) { if (num_online_cpus() >= setup_max_cpus) break; if (!cpu_online(cpu)) cpu_up(cpu); } |
92b232782 kernel/smp: Make ... |
558 559 560 561 562 563 |
num_nodes = num_online_nodes(); num_cpus = num_online_cpus(); pr_info("Brought up %d node%s, %d CPU%s ", num_nodes, (num_nodes > 1 ? "s" : ""), num_cpus, (num_cpus > 1 ? "s" : "")); |
9eb0a3cce cpu/hotplug: Fix ... |
564 565 |
/* Final decision about SMT support */ cpu_smt_check_topology(); |
34db18a05 smp: move smp set... |
566 |
/* Any cleanup work */ |
34db18a05 smp: move smp set... |
567 568 |
smp_cpus_done(setup_max_cpus); } |
351f8f8e6 kernel: clean up ... |
569 |
/* |
bd924e8cb smp: Allow on_eac... |
570 571 572 |
* Call a function on all processors. May be used during early boot while * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead * of local_irq_disable/enable(). |
351f8f8e6 kernel: clean up ... |
573 574 575 |
*/ int on_each_cpu(void (*func) (void *info), void *info, int wait) { |
bd924e8cb smp: Allow on_eac... |
576 |
unsigned long flags; |
351f8f8e6 kernel: clean up ... |
577 578 579 580 |
int ret = 0; preempt_disable(); ret = smp_call_function(func, info, wait); |
bd924e8cb smp: Allow on_eac... |
581 |
local_irq_save(flags); |
351f8f8e6 kernel: clean up ... |
582 |
func(info); |
bd924e8cb smp: Allow on_eac... |
583 |
local_irq_restore(flags); |
351f8f8e6 kernel: clean up ... |
584 585 586 587 |
preempt_enable(); return ret; } EXPORT_SYMBOL(on_each_cpu); |
3fc498f16 smp: introduce a ... |
588 589 590 591 592 593 594 595 596 597 598 599 |
/** * on_each_cpu_mask(): Run a function on processors specified by * cpumask, which may include the local processor. * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * If @wait is true, then returns once @func has returned. * |
202da4005 kernel/smp.c: qui... |
600 601 602 603 |
* You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. The * exception is that it may be used during early boot while * early_boot_irqs_disabled is set. |
3fc498f16 smp: introduce a ... |
604 605 606 607 608 609 610 611 |
*/ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { int cpu = get_cpu(); smp_call_function_many(mask, func, info, wait); if (cpumask_test_cpu(cpu, mask)) { |
202da4005 kernel/smp.c: qui... |
612 613 |
unsigned long flags; local_irq_save(flags); |
3fc498f16 smp: introduce a ... |
614 |
func(info); |
202da4005 kernel/smp.c: qui... |
615 |
local_irq_restore(flags); |
3fc498f16 smp: introduce a ... |
616 617 618 619 |
} put_cpu(); } EXPORT_SYMBOL(on_each_cpu_mask); |
b3a7e98e0 smp: add func to ... |
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 |
/* * on_each_cpu_cond(): Call a function on each processor for which * the supplied function cond_func returns true, optionally waiting * for all the required CPUs to finish. This may include the local * processor. * @cond_func: A callback function that is passed a cpu id and * the the info parameter. The function is called * with preemption disabled. The function should * return a blooean value indicating whether to IPI * the specified CPU. * @func: The function to run on all applicable CPUs. * This must be fast and non-blocking. * @info: An arbitrary pointer to pass to both functions. * @wait: If true, wait (atomically) until function has * completed on other CPUs. * @gfp_flags: GFP flags to use when allocating the cpumask * used internally by the function. * * The function might sleep if the GFP flags indicates a non * atomic allocation is allowed. * * Preemption is disabled to protect against CPUs going offline but not online. * CPUs going online during the call will not be seen or sent an IPI. * * You must not call this function with disabled interrupts or * from a hardware interrupt handler or from a bottom half handler. */ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags) { cpumask_var_t cpus; int cpu, ret; |
d0164adc8 mm, page_alloc: d... |
654 |
might_sleep_if(gfpflags_allow_blocking(gfp_flags)); |
b3a7e98e0 smp: add func to ... |
655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 |
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { preempt_disable(); for_each_online_cpu(cpu) if (cond_func(cpu, info)) cpumask_set_cpu(cpu, cpus); on_each_cpu_mask(cpus, func, info, wait); preempt_enable(); free_cpumask_var(cpus); } else { /* * No free cpumask, bother. No matter, we'll * just have to IPI them one by one. */ preempt_disable(); for_each_online_cpu(cpu) if (cond_func(cpu, info)) { ret = smp_call_function_single(cpu, func, info, wait); |
618fde872 kernel/smp.c:on_e... |
674 |
WARN_ON_ONCE(ret); |
b3a7e98e0 smp: add func to ... |
675 676 677 678 679 |
} preempt_enable(); } } EXPORT_SYMBOL(on_each_cpu_cond); |
f37f435f3 smp: Implement ki... |
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 |
static void do_nothing(void *unused) { } /** * kick_all_cpus_sync - Force all cpus out of idle * * Used to synchronize the update of pm_idle function pointer. It's * called after the pointer is updated and returns after the dummy * callback function has been executed on all cpus. The execution of * the function can only happen on the remote cpus after they have * left the idle function which had been called via pm_idle function * pointer. So it's guaranteed that nothing uses the previous pointer * anymore. */ void kick_all_cpus_sync(void) { /* Make sure the change is visible before we kick the cpus */ smp_mb(); smp_call_function(do_nothing, NULL, 1); } EXPORT_SYMBOL_GPL(kick_all_cpus_sync); |
c6f4459fc smp: Add new wake... |
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 |
/** * wake_up_all_idle_cpus - break all cpus out of idle * wake_up_all_idle_cpus try to break all cpus which is in idle state even * including idle polling cpus, for non-idle cpus, we will do nothing * for them. */ void wake_up_all_idle_cpus(void) { int cpu; preempt_disable(); for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; wake_up_if_idle(cpu); } preempt_enable(); } EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus); |
df8ce9d78 smp: Add function... |
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 |
/** * smp_call_on_cpu - Call a function on a specific cpu * * Used to call a function on a specific cpu and wait for it to return. * Optionally make sure the call is done on a specified physical cpu via vcpu * pinning in order to support virtualized environments. */ struct smp_call_on_cpu_struct { struct work_struct work; struct completion done; int (*func)(void *); void *data; int ret; int cpu; }; static void smp_call_on_cpu_callback(struct work_struct *work) { struct smp_call_on_cpu_struct *sscs; sscs = container_of(work, struct smp_call_on_cpu_struct, work); if (sscs->cpu >= 0) hypervisor_pin_vcpu(sscs->cpu); sscs->ret = sscs->func(sscs->data); if (sscs->cpu >= 0) hypervisor_pin_vcpu(-1); complete(&sscs->done); } int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) { struct smp_call_on_cpu_struct sscs = { |
df8ce9d78 smp: Add function... |
758 759 760 761 762 |
.done = COMPLETION_INITIALIZER_ONSTACK(sscs.done), .func = func, .data = par, .cpu = phys ? cpu : -1, }; |
8db549491 smp: Allocate smp... |
763 |
INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); |
df8ce9d78 smp: Add function... |
764 765 766 767 768 769 770 771 772 |
if (cpu >= nr_cpu_ids || !cpu_online(cpu)) return -ENXIO; queue_work_on(cpu, system_wq, &sscs.work); wait_for_completion(&sscs.done); return sscs.ret; } EXPORT_SYMBOL_GPL(smp_call_on_cpu); |