Blame view
kernel/cpu.c
13.7 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
/* CPU control. * (C) 2001, 2002, 2003, 2004 Rusty Russell * * This code is licenced under the GPL. */ #include <linux/proc_fs.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/notifier.h> #include <linux/sched.h> #include <linux/unistd.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/kthread.h> #include <linux/stop_machine.h> |
81615b624
|
16 |
#include <linux/mutex.h> |
5a0e3ad6a
|
17 |
#include <linux/gfp.h> |
1da177e4c
|
18 |
|
98a79d6a5
|
19 |
#ifdef CONFIG_SMP |
b3199c025
|
20 |
/* Serializes the updates to cpu_online_mask, cpu_present_mask */ |
aa9538777
|
21 |
static DEFINE_MUTEX(cpu_add_remove_lock); |
1da177e4c
|
22 |
|
79a6cdeb7
|
23 24 25 26 27 28 29 30 31 32 33 34 35 |
/* * The following two API's must be used when attempting * to serialize the updates to cpu_online_mask, cpu_present_mask. */ void cpu_maps_update_begin(void) { mutex_lock(&cpu_add_remove_lock); } void cpu_maps_update_done(void) { mutex_unlock(&cpu_add_remove_lock); } |
5c113fbee
|
36 |
static RAW_NOTIFIER_HEAD(cpu_chain); |
1da177e4c
|
37 |
|
e3920fb42
|
38 39 40 41 |
/* If set, cpu_up and cpu_down will return -EBUSY and do nothing. * Should always be manipulated under cpu_add_remove_lock */ static int cpu_hotplug_disabled; |
79a6cdeb7
|
42 |
#ifdef CONFIG_HOTPLUG_CPU |
d221938c0
|
43 44 45 46 47 48 49 50 |
static struct { struct task_struct *active_writer; struct mutex lock; /* Synchronizes accesses to refcount, */ /* * Also blocks the new readers during * an ongoing cpu hotplug operation. */ int refcount; |
31950eb66
|
51 52 53 54 55 |
} cpu_hotplug = { .active_writer = NULL, .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), .refcount = 0, }; |
d221938c0
|
56 |
|
86ef5c9a8
|
57 |
void get_online_cpus(void) |
a9d9baa1e
|
58 |
{ |
d221938c0
|
59 60 |
might_sleep(); if (cpu_hotplug.active_writer == current) |
aa9538777
|
61 |
return; |
d221938c0
|
62 63 64 |
mutex_lock(&cpu_hotplug.lock); cpu_hotplug.refcount++; mutex_unlock(&cpu_hotplug.lock); |
a9d9baa1e
|
65 |
} |
86ef5c9a8
|
66 |
EXPORT_SYMBOL_GPL(get_online_cpus); |
90d45d17f
|
67 |
|
86ef5c9a8
|
68 |
void put_online_cpus(void) |
a9d9baa1e
|
69 |
{ |
d221938c0
|
70 |
if (cpu_hotplug.active_writer == current) |
aa9538777
|
71 |
return; |
d221938c0
|
72 |
mutex_lock(&cpu_hotplug.lock); |
d2ba7e2ae
|
73 74 |
if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) wake_up_process(cpu_hotplug.active_writer); |
d221938c0
|
75 |
mutex_unlock(&cpu_hotplug.lock); |
a9d9baa1e
|
76 |
} |
86ef5c9a8
|
77 |
EXPORT_SYMBOL_GPL(put_online_cpus); |
a9d9baa1e
|
78 |
|
d221938c0
|
79 80 81 82 83 84 85 |
/* * This ensures that the hotplug operation can begin only when the * refcount goes to zero. * * Note that during a cpu-hotplug operation, the new readers, if any, * will be blocked by the cpu_hotplug.lock * |
d2ba7e2ae
|
86 87 |
* Since cpu_hotplug_begin() is always called after invoking * cpu_maps_update_begin(), we can be sure that only one writer is active. |
d221938c0
|
88 89 90 91 92 93 94 95 96 97 |
* * Note that theoretically, there is a possibility of a livelock: * - Refcount goes to zero, last reader wakes up the sleeping * writer. * - Last reader unlocks the cpu_hotplug.lock. * - A new reader arrives at this moment, bumps up the refcount. * - The writer acquires the cpu_hotplug.lock finds the refcount * non zero and goes to sleep again. * * However, this is very difficult to achieve in practice since |
86ef5c9a8
|
98 |
* get_online_cpus() not an api which is called all that often. |
d221938c0
|
99 100 101 102 |
* */ static void cpu_hotplug_begin(void) { |
d221938c0
|
103 |
cpu_hotplug.active_writer = current; |
d2ba7e2ae
|
104 105 106 107 108 109 |
for (;;) { mutex_lock(&cpu_hotplug.lock); if (likely(!cpu_hotplug.refcount)) break; __set_current_state(TASK_UNINTERRUPTIBLE); |
d221938c0
|
110 111 |
mutex_unlock(&cpu_hotplug.lock); schedule(); |
d221938c0
|
112 |
} |
d221938c0
|
113 114 115 116 117 118 119 |
} static void cpu_hotplug_done(void) { cpu_hotplug.active_writer = NULL; mutex_unlock(&cpu_hotplug.lock); } |
79a6cdeb7
|
120 121 122 123 124 |
#else /* #if CONFIG_HOTPLUG_CPU */ static void cpu_hotplug_begin(void) {} static void cpu_hotplug_done(void) {} #endif /* #esle #if CONFIG_HOTPLUG_CPU */ |
1da177e4c
|
125 |
/* Need to know about CPUs going up/down? */ |
f7b16c108
|
126 |
int __ref register_cpu_notifier(struct notifier_block *nb) |
1da177e4c
|
127 |
{ |
bd5349cfd
|
128 |
int ret; |
d221938c0
|
129 |
cpu_maps_update_begin(); |
bd5349cfd
|
130 |
ret = raw_notifier_chain_register(&cpu_chain, nb); |
d221938c0
|
131 |
cpu_maps_update_done(); |
bd5349cfd
|
132 |
return ret; |
1da177e4c
|
133 |
} |
65edc68c3
|
134 |
|
e9fb7631e
|
135 136 137 |
static int __cpu_notify(unsigned long val, void *v, int nr_to_call, int *nr_calls) { |
e6bde73b0
|
138 139 140 |
int ret; ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, |
e9fb7631e
|
141 |
nr_calls); |
e6bde73b0
|
142 143 |
return notifier_to_errno(ret); |
e9fb7631e
|
144 145 146 147 148 149 |
} static int cpu_notify(unsigned long val, void *v) { return __cpu_notify(val, v, -1, NULL); } |
00b9b0af5
|
150 |
#ifdef CONFIG_HOTPLUG_CPU |
e9fb7631e
|
151 152 |
static void cpu_notify_nofail(unsigned long val, void *v) { |
00b9b0af5
|
153 |
BUG_ON(cpu_notify(val, v)); |
e9fb7631e
|
154 |
} |
1da177e4c
|
155 |
EXPORT_SYMBOL(register_cpu_notifier); |
9647155ff
|
156 |
void __ref unregister_cpu_notifier(struct notifier_block *nb) |
1da177e4c
|
157 |
{ |
d221938c0
|
158 |
cpu_maps_update_begin(); |
bd5349cfd
|
159 |
raw_notifier_chain_unregister(&cpu_chain, nb); |
d221938c0
|
160 |
cpu_maps_update_done(); |
1da177e4c
|
161 162 |
} EXPORT_SYMBOL(unregister_cpu_notifier); |
1da177e4c
|
163 164 165 166 167 168 |
static inline void check_for_tasks(int cpu) { struct task_struct *p; write_lock_irq(&tasklist_lock); for_each_process(p) { |
11854247e
|
169 |
if (task_cpu(p) == cpu && p->state == TASK_RUNNING && |
1da177e4c
|
170 171 |
(!cputime_eq(p->utime, cputime_zero) || !cputime_eq(p->stime, cputime_zero))) |
9d3cfc4c1
|
172 173 174 175 176 |
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " "(state = %ld, flags = %x) ", p->comm, task_pid_nr(p), cpu, p->state, p->flags); |
1da177e4c
|
177 178 179 |
} write_unlock_irq(&tasklist_lock); } |
db912f963
|
180 181 182 183 |
struct take_cpu_down_param { unsigned long mod; void *hcpu; }; |
1da177e4c
|
184 |
/* Take this CPU down. */ |
514a20a5d
|
185 |
static int __ref take_cpu_down(void *_param) |
1da177e4c
|
186 |
{ |
db912f963
|
187 |
struct take_cpu_down_param *param = _param; |
1da177e4c
|
188 |
int err; |
1da177e4c
|
189 190 191 |
/* Ensure this CPU doesn't handle any more interrupts. */ err = __cpu_disable(); if (err < 0) |
f37051364
|
192 |
return err; |
1da177e4c
|
193 |
|
e9fb7631e
|
194 |
cpu_notify(CPU_DYING | param->mod, param->hcpu); |
3ba35573a
|
195 |
|
f37051364
|
196 |
return 0; |
1da177e4c
|
197 |
} |
e3920fb42
|
198 |
/* Requires cpu_add_remove_lock to be held */ |
514a20a5d
|
199 |
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) |
1da177e4c
|
200 |
{ |
e7407dcc6
|
201 |
int err, nr_calls = 0; |
e7407dcc6
|
202 |
void *hcpu = (void *)(long)cpu; |
8bb784428
|
203 |
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
db912f963
|
204 205 206 207 |
struct take_cpu_down_param tcd_param = { .mod = mod, .hcpu = hcpu, }; |
1da177e4c
|
208 |
|
e3920fb42
|
209 210 |
if (num_online_cpus() == 1) return -EBUSY; |
1da177e4c
|
211 |
|
e3920fb42
|
212 213 |
if (!cpu_online(cpu)) return -EINVAL; |
1da177e4c
|
214 |
|
d221938c0
|
215 |
cpu_hotplug_begin(); |
e9fb7631e
|
216 |
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); |
e6bde73b0
|
217 |
if (err) { |
a0d8cdb65
|
218 |
nr_calls--; |
e9fb7631e
|
219 |
__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); |
1da177e4c
|
220 221 |
printk("%s: attempt to take down CPU %u failed ", |
af1f16d08
|
222 |
__func__, cpu); |
baaca49f4
|
223 |
goto out_release; |
1da177e4c
|
224 |
} |
e0b582ec5
|
225 |
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); |
043215875
|
226 |
if (err) { |
1da177e4c
|
227 |
/* CPU didn't die: tell everyone. Can't complain. */ |
e9fb7631e
|
228 |
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); |
1da177e4c
|
229 |
|
6a1bdc1b5
|
230 |
goto out_release; |
8fa1d7d3b
|
231 |
} |
043215875
|
232 |
BUG_ON(cpu_online(cpu)); |
1da177e4c
|
233 |
|
48c5ccae8
|
234 235 236 237 |
/* * The migration_call() CPU_DYING callback will have removed all * runnable tasks from the cpu, there's only the idle task left now * that the migration thread is done doing the stop_machine thing. |
51a96c778
|
238 239 |
* * Wait for the stop thread to go away. |
48c5ccae8
|
240 |
*/ |
51a96c778
|
241 242 |
while (!idle_cpu(cpu)) cpu_relax(); |
1da177e4c
|
243 244 245 |
/* This actually kills the CPU. */ __cpu_die(cpu); |
1da177e4c
|
246 |
/* CPU is completely dead: tell everyone. Too late to complain. */ |
e9fb7631e
|
247 |
cpu_notify_nofail(CPU_DEAD | mod, hcpu); |
1da177e4c
|
248 249 |
check_for_tasks(cpu); |
baaca49f4
|
250 |
out_release: |
d221938c0
|
251 |
cpu_hotplug_done(); |
e9fb7631e
|
252 253 |
if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); |
e3920fb42
|
254 255 |
return err; } |
514a20a5d
|
256 |
int __ref cpu_down(unsigned int cpu) |
e3920fb42
|
257 |
{ |
9ea09af3b
|
258 |
int err; |
e3920fb42
|
259 |
|
d221938c0
|
260 |
cpu_maps_update_begin(); |
e761b7725
|
261 262 |
if (cpu_hotplug_disabled) { |
e3920fb42
|
263 |
err = -EBUSY; |
e761b7725
|
264 265 |
goto out; } |
e761b7725
|
266 |
err = _cpu_down(cpu, 0); |
e3920fb42
|
267 |
|
e761b7725
|
268 |
out: |
d221938c0
|
269 |
cpu_maps_update_done(); |
1da177e4c
|
270 271 |
return err; } |
b62b8ef90
|
272 |
EXPORT_SYMBOL(cpu_down); |
1da177e4c
|
273 |
#endif /*CONFIG_HOTPLUG_CPU*/ |
e3920fb42
|
274 |
/* Requires cpu_add_remove_lock to be held */ |
8bb784428
|
275 |
static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) |
1da177e4c
|
276 |
{ |
baaca49f4
|
277 |
int ret, nr_calls = 0; |
1da177e4c
|
278 |
void *hcpu = (void *)(long)cpu; |
8bb784428
|
279 |
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
1da177e4c
|
280 |
|
e3920fb42
|
281 282 |
if (cpu_online(cpu) || !cpu_present(cpu)) return -EINVAL; |
90d45d17f
|
283 |
|
d221938c0
|
284 |
cpu_hotplug_begin(); |
e9fb7631e
|
285 |
ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); |
e6bde73b0
|
286 |
if (ret) { |
a0d8cdb65
|
287 |
nr_calls--; |
1da177e4c
|
288 289 |
printk("%s: attempt to bring up CPU %u failed ", |
af1f16d08
|
290 |
__func__, cpu); |
1da177e4c
|
291 292 293 294 295 296 297 |
goto out_notify; } /* Arch-specific enabling code. */ ret = __cpu_up(cpu); if (ret != 0) goto out_notify; |
6978c7052
|
298 |
BUG_ON(!cpu_online(cpu)); |
1da177e4c
|
299 300 |
/* Now call notifier in preparation. */ |
e9fb7631e
|
301 |
cpu_notify(CPU_ONLINE | mod, hcpu); |
1da177e4c
|
302 303 304 |
out_notify: if (ret != 0) |
e9fb7631e
|
305 |
__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); |
d221938c0
|
306 |
cpu_hotplug_done(); |
e3920fb42
|
307 308 309 |
return ret; } |
b282b6f8a
|
310 |
int __cpuinit cpu_up(unsigned int cpu) |
e3920fb42
|
311 312 |
{ int err = 0; |
cf23422b9
|
313 314 315 316 317 |
#ifdef CONFIG_MEMORY_HOTPLUG int nid; pg_data_t *pgdat; #endif |
e0b582ec5
|
318 |
if (!cpu_possible(cpu)) { |
73e753a50
|
319 320 321 |
printk(KERN_ERR "can't online cpu %d because it is not " "configured as may-hotadd at boot time ", cpu); |
87d5e0236
|
322 |
#if defined(CONFIG_IA64) |
73e753a50
|
323 324 325 326 327 328 |
printk(KERN_ERR "please check additional_cpus= boot " "parameter "); #endif return -EINVAL; } |
e3920fb42
|
329 |
|
cf23422b9
|
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 |
#ifdef CONFIG_MEMORY_HOTPLUG nid = cpu_to_node(cpu); if (!node_online(nid)) { err = mem_online_node(nid); if (err) return err; } pgdat = NODE_DATA(nid); if (!pgdat) { printk(KERN_ERR "Can't online cpu %d due to NULL pgdat ", cpu); return -ENOMEM; } |
4eaf3f643
|
345 346 |
if (pgdat->node_zonelists->_zonerefs->zone == NULL) { mutex_lock(&zonelists_mutex); |
1f522509c
|
347 |
build_all_zonelists(NULL); |
4eaf3f643
|
348 349 |
mutex_unlock(&zonelists_mutex); } |
cf23422b9
|
350 |
#endif |
d221938c0
|
351 |
cpu_maps_update_begin(); |
e761b7725
|
352 353 |
if (cpu_hotplug_disabled) { |
e3920fb42
|
354 |
err = -EBUSY; |
e761b7725
|
355 356 357 358 |
goto out; } err = _cpu_up(cpu, 0); |
e761b7725
|
359 |
out: |
d221938c0
|
360 |
cpu_maps_update_done(); |
e3920fb42
|
361 362 |
return err; } |
f3de4be9d
|
363 |
#ifdef CONFIG_PM_SLEEP_SMP |
e0b582ec5
|
364 |
static cpumask_var_t frozen_cpus; |
e3920fb42
|
365 |
|
3fb82d56a
|
366 367 368 369 370 371 372 |
void __weak arch_disable_nonboot_cpus_begin(void) { } void __weak arch_disable_nonboot_cpus_end(void) { } |
e3920fb42
|
373 374 |
int disable_nonboot_cpus(void) { |
e9a5f426b
|
375 |
int cpu, first_cpu, error = 0; |
e3920fb42
|
376 |
|
d221938c0
|
377 |
cpu_maps_update_begin(); |
e0b582ec5
|
378 |
first_cpu = cpumask_first(cpu_online_mask); |
9ee349ad6
|
379 380 |
/* * We take down all of the non-boot CPUs in one shot to avoid races |
e3920fb42
|
381 382 |
* with the userspace trying to use the CPU hotplug at the same time */ |
e0b582ec5
|
383 |
cpumask_clear(frozen_cpus); |
3fb82d56a
|
384 |
arch_disable_nonboot_cpus_begin(); |
6ad4c1888
|
385 |
|
e3920fb42
|
386 387 388 389 390 |
printk("Disabling non-boot CPUs ... "); for_each_online_cpu(cpu) { if (cpu == first_cpu) continue; |
8bb784428
|
391 |
error = _cpu_down(cpu, 1); |
feae3203d
|
392 |
if (!error) |
e0b582ec5
|
393 |
cpumask_set_cpu(cpu, frozen_cpus); |
feae3203d
|
394 |
else { |
e3920fb42
|
395 396 397 398 399 400 |
printk(KERN_ERR "Error taking CPU%d down: %d ", cpu, error); break; } } |
86886e55b
|
401 |
|
3fb82d56a
|
402 |
arch_disable_nonboot_cpus_end(); |
e3920fb42
|
403 404 405 406 407 |
if (!error) { BUG_ON(num_online_cpus() > 1); /* Make sure the CPUs won't be enabled by someone else */ cpu_hotplug_disabled = 1; } else { |
e1d9fd2e3
|
408 409 |
printk(KERN_ERR "Non-boot CPUs are not disabled "); |
e3920fb42
|
410 |
} |
d221938c0
|
411 |
cpu_maps_update_done(); |
e3920fb42
|
412 413 |
return error; } |
d0af9eed5
|
414 415 416 417 418 419 420 |
void __weak arch_enable_nonboot_cpus_begin(void) { } void __weak arch_enable_nonboot_cpus_end(void) { } |
fa7303e22
|
421 |
void __ref enable_nonboot_cpus(void) |
e3920fb42
|
422 423 424 425 |
{ int cpu, error; /* Allow everyone to use the CPU hotplug again */ |
d221938c0
|
426 |
cpu_maps_update_begin(); |
e3920fb42
|
427 |
cpu_hotplug_disabled = 0; |
e0b582ec5
|
428 |
if (cpumask_empty(frozen_cpus)) |
1d64b9cb1
|
429 |
goto out; |
e3920fb42
|
430 431 432 |
printk("Enabling non-boot CPUs ... "); |
d0af9eed5
|
433 434 |
arch_enable_nonboot_cpus_begin(); |
e0b582ec5
|
435 |
for_each_cpu(cpu, frozen_cpus) { |
8bb784428
|
436 |
error = _cpu_up(cpu, 1); |
e3920fb42
|
437 438 439 440 441 |
if (!error) { printk("CPU%d is up ", cpu); continue; } |
1d64b9cb1
|
442 443 |
printk(KERN_WARNING "Error taking CPU%d up: %d ", cpu, error); |
e3920fb42
|
444 |
} |
d0af9eed5
|
445 446 |
arch_enable_nonboot_cpus_end(); |
e0b582ec5
|
447 |
cpumask_clear(frozen_cpus); |
1d64b9cb1
|
448 |
out: |
d221938c0
|
449 |
cpu_maps_update_done(); |
1da177e4c
|
450 |
} |
e0b582ec5
|
451 452 453 454 455 456 457 458 |
static int alloc_frozen_cpus(void) { if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) return -ENOMEM; return 0; } core_initcall(alloc_frozen_cpus); |
f3de4be9d
|
459 |
#endif /* CONFIG_PM_SLEEP_SMP */ |
68f4f1ec0
|
460 |
|
e545a6140
|
461 462 463 464 465 466 467 468 |
/** * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers * @cpu: cpu that just started * * This function calls the cpu_chain notifiers with CPU_STARTING. * It must be called by the arch code on the new cpu, before the new cpu * enables interrupts and before the "boot" cpu returns from __cpu_up(). */ |
841964145
|
469 |
void __cpuinit notify_cpu_starting(unsigned int cpu) |
e545a6140
|
470 471 472 473 |
{ unsigned long val = CPU_STARTING; #ifdef CONFIG_PM_SLEEP_SMP |
e0b582ec5
|
474 |
if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) |
e545a6140
|
475 476 |
val = CPU_STARTING_FROZEN; #endif /* CONFIG_PM_SLEEP_SMP */ |
e9fb7631e
|
477 |
cpu_notify(val, (void *)(long)cpu); |
e545a6140
|
478 |
} |
68f4f1ec0
|
479 |
#endif /* CONFIG_SMP */ |
b8d317d10
|
480 |
|
e56b3bc79
|
481 482 483 484 |
/* * cpu_bit_bitmap[] is a special, "compressed" data structure that * represents all NR_CPUS bits binary values of 1<<nr. * |
e0b582ec5
|
485 |
* It is used by cpumask_of() to get a constant address to a CPU |
e56b3bc79
|
486 487 |
* mask value that has a single bit set only. */ |
b8d317d10
|
488 |
|
e56b3bc79
|
489 490 491 492 493 |
/* cpu_bit_bitmap[0] is empty - so we can back into it */ #define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x) #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) |
b8d317d10
|
494 |
|
e56b3bc79
|
495 496 497 498 499 500 501 |
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { MASK_DECLARE_8(0), MASK_DECLARE_8(8), MASK_DECLARE_8(16), MASK_DECLARE_8(24), #if BITS_PER_LONG > 32 MASK_DECLARE_8(32), MASK_DECLARE_8(40), MASK_DECLARE_8(48), MASK_DECLARE_8(56), |
b8d317d10
|
502 503 |
#endif }; |
e56b3bc79
|
504 |
EXPORT_SYMBOL_GPL(cpu_bit_bitmap); |
2d3854a37
|
505 506 507 |
const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; EXPORT_SYMBOL(cpu_all_bits); |
b3199c025
|
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 |
#ifdef CONFIG_INIT_ALL_POSSIBLE static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly = CPU_BITS_ALL; #else static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; #endif const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); EXPORT_SYMBOL(cpu_possible_mask); static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); EXPORT_SYMBOL(cpu_online_mask); static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); EXPORT_SYMBOL(cpu_present_mask); static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); EXPORT_SYMBOL(cpu_active_mask); |
3fa415206
|
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 |
void set_cpu_possible(unsigned int cpu, bool possible) { if (possible) cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); else cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); } void set_cpu_present(unsigned int cpu, bool present) { if (present) cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); else cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); } void set_cpu_online(unsigned int cpu, bool online) { if (online) cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); else cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); } void set_cpu_active(unsigned int cpu, bool active) { if (active) cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); else cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); } void init_cpu_present(const struct cpumask *src) { cpumask_copy(to_cpumask(cpu_present_bits), src); } void init_cpu_possible(const struct cpumask *src) { cpumask_copy(to_cpumask(cpu_possible_bits), src); } void init_cpu_online(const struct cpumask *src) { cpumask_copy(to_cpumask(cpu_online_bits), src); } |