Commit 0bc3cc03fa6e1c20aecb5a33356bcaae410640b9
Committed by
Ingo Molnar
1 parent
6524d938b3
Exists in
master
and in
39 other branches
cpumask: change cpumask_of_cpu_ptr to use new cpumask_of_cpu
* Replace previous instances of the cpumask_of_cpu_ptr* macros with a the new (lvalue capable) generic cpumask_of_cpu(). Signed-off-by: Mike Travis <travis@sgi.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Jack Steiner <steiner@sgi.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 17 changed files with 37 additions and 83 deletions Side-by-side Diff
- arch/x86/kernel/acpi/cstate.c
- arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
- arch/x86/kernel/cpu/cpufreq/powernow-k8.c
- arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
- arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
- arch/x86/kernel/cpu/intel_cacheinfo.c
- arch/x86/kernel/ldt.c
- arch/x86/kernel/microcode.c
- arch/x86/kernel/reboot.c
- drivers/acpi/processor_throttling.c
- drivers/firmware/dcdbas.c
- drivers/misc/sgi-xp/xpc_main.c
- kernel/stop_machine.c
- kernel/time/tick-common.c
- kernel/trace/trace_sysprof.c
- lib/smp_processor_id.c
- net/sunrpc/svc.c
arch/x86/kernel/acpi/cstate.c
... | ... | @@ -73,7 +73,6 @@ |
73 | 73 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
74 | 74 | |
75 | 75 | cpumask_t saved_mask; |
76 | - cpumask_of_cpu_ptr(new_mask, cpu); | |
77 | 76 | int retval; |
78 | 77 | unsigned int eax, ebx, ecx, edx; |
79 | 78 | unsigned int edx_part; |
... | ... | @@ -92,7 +91,7 @@ |
92 | 91 | |
93 | 92 | /* Make sure we are running on right CPU */ |
94 | 93 | saved_mask = current->cpus_allowed; |
95 | - retval = set_cpus_allowed_ptr(current, new_mask); | |
94 | + retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | |
96 | 95 | if (retval) |
97 | 96 | return -1; |
98 | 97 |
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
... | ... | @@ -200,12 +200,10 @@ |
200 | 200 | static void drv_write(struct drv_cmd *cmd) |
201 | 201 | { |
202 | 202 | cpumask_t saved_mask = current->cpus_allowed; |
203 | - cpumask_of_cpu_ptr_declare(cpu_mask); | |
204 | 203 | unsigned int i; |
205 | 204 | |
206 | 205 | for_each_cpu_mask_nr(i, cmd->mask) { |
207 | - cpumask_of_cpu_ptr_next(cpu_mask, i); | |
208 | - set_cpus_allowed_ptr(current, cpu_mask); | |
206 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); | |
209 | 207 | do_drv_write(cmd); |
210 | 208 | } |
211 | 209 | |
212 | 210 | |
... | ... | @@ -269,12 +267,11 @@ |
269 | 267 | } aperf_cur, mperf_cur; |
270 | 268 | |
271 | 269 | cpumask_t saved_mask; |
272 | - cpumask_of_cpu_ptr(cpu_mask, cpu); | |
273 | 270 | unsigned int perf_percent; |
274 | 271 | unsigned int retval; |
275 | 272 | |
276 | 273 | saved_mask = current->cpus_allowed; |
277 | - set_cpus_allowed_ptr(current, cpu_mask); | |
274 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | |
278 | 275 | if (get_cpu() != cpu) { |
279 | 276 | /* We were not able to run on requested processor */ |
280 | 277 | put_cpu(); |
... | ... | @@ -340,7 +337,6 @@ |
340 | 337 | |
341 | 338 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
342 | 339 | { |
343 | - cpumask_of_cpu_ptr(cpu_mask, cpu); | |
344 | 340 | struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); |
345 | 341 | unsigned int freq; |
346 | 342 | unsigned int cached_freq; |
... | ... | @@ -353,7 +349,7 @@ |
353 | 349 | } |
354 | 350 | |
355 | 351 | cached_freq = data->freq_table[data->acpi_data->state].frequency; |
356 | - freq = extract_freq(get_cur_val(cpu_mask), data); | |
352 | + freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); | |
357 | 353 | if (freq != cached_freq) { |
358 | 354 | /* |
359 | 355 | * The dreaded BIOS frequency change behind our back. |
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
... | ... | @@ -479,12 +479,11 @@ |
479 | 479 | static int check_supported_cpu(unsigned int cpu) |
480 | 480 | { |
481 | 481 | cpumask_t oldmask; |
482 | - cpumask_of_cpu_ptr(cpu_mask, cpu); | |
483 | 482 | u32 eax, ebx, ecx, edx; |
484 | 483 | unsigned int rc = 0; |
485 | 484 | |
486 | 485 | oldmask = current->cpus_allowed; |
487 | - set_cpus_allowed_ptr(current, cpu_mask); | |
486 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | |
488 | 487 | |
489 | 488 | if (smp_processor_id() != cpu) { |
490 | 489 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); |
... | ... | @@ -1017,7 +1016,6 @@ |
1017 | 1016 | static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) |
1018 | 1017 | { |
1019 | 1018 | cpumask_t oldmask; |
1020 | - cpumask_of_cpu_ptr(cpu_mask, pol->cpu); | |
1021 | 1019 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); |
1022 | 1020 | u32 checkfid; |
1023 | 1021 | u32 checkvid; |
... | ... | @@ -1032,7 +1030,7 @@ |
1032 | 1030 | |
1033 | 1031 | /* only run on specific CPU from here on */ |
1034 | 1032 | oldmask = current->cpus_allowed; |
1035 | - set_cpus_allowed_ptr(current, cpu_mask); | |
1033 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); | |
1036 | 1034 | |
1037 | 1035 | if (smp_processor_id() != pol->cpu) { |
1038 | 1036 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
... | ... | @@ -1107,7 +1105,6 @@ |
1107 | 1105 | { |
1108 | 1106 | struct powernow_k8_data *data; |
1109 | 1107 | cpumask_t oldmask; |
1110 | - cpumask_of_cpu_ptr_declare(newmask); | |
1111 | 1108 | int rc; |
1112 | 1109 | |
1113 | 1110 | if (!cpu_online(pol->cpu)) |
... | ... | @@ -1159,8 +1156,7 @@ |
1159 | 1156 | |
1160 | 1157 | /* only run on specific CPU from here on */ |
1161 | 1158 | oldmask = current->cpus_allowed; |
1162 | - cpumask_of_cpu_ptr_next(newmask, pol->cpu); | |
1163 | - set_cpus_allowed_ptr(current, newmask); | |
1159 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); | |
1164 | 1160 | |
1165 | 1161 | if (smp_processor_id() != pol->cpu) { |
1166 | 1162 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
... | ... | @@ -1182,7 +1178,7 @@ |
1182 | 1178 | set_cpus_allowed_ptr(current, &oldmask); |
1183 | 1179 | |
1184 | 1180 | if (cpu_family == CPU_HW_PSTATE) |
1185 | - pol->cpus = *newmask; | |
1181 | + pol->cpus = cpumask_of_cpu(pol->cpu); | |
1186 | 1182 | else |
1187 | 1183 | pol->cpus = per_cpu(cpu_core_map, pol->cpu); |
1188 | 1184 | data->available_cores = &(pol->cpus); |
... | ... | @@ -1248,7 +1244,6 @@ |
1248 | 1244 | { |
1249 | 1245 | struct powernow_k8_data *data; |
1250 | 1246 | cpumask_t oldmask = current->cpus_allowed; |
1251 | - cpumask_of_cpu_ptr(newmask, cpu); | |
1252 | 1247 | unsigned int khz = 0; |
1253 | 1248 | unsigned int first; |
1254 | 1249 | |
... | ... | @@ -1258,7 +1253,7 @@ |
1258 | 1253 | if (!data) |
1259 | 1254 | return -EINVAL; |
1260 | 1255 | |
1261 | - set_cpus_allowed_ptr(current, newmask); | |
1256 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | |
1262 | 1257 | if (smp_processor_id() != cpu) { |
1263 | 1258 | printk(KERN_ERR PFX |
1264 | 1259 | "limiting to CPU %d failed in powernowk8_get\n", cpu); |
arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
... | ... | @@ -324,10 +324,9 @@ |
324 | 324 | unsigned l, h; |
325 | 325 | unsigned clock_freq; |
326 | 326 | cpumask_t saved_mask; |
327 | - cpumask_of_cpu_ptr(new_mask, cpu); | |
328 | 327 | |
329 | 328 | saved_mask = current->cpus_allowed; |
330 | - set_cpus_allowed_ptr(current, new_mask); | |
329 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | |
331 | 330 | if (smp_processor_id() != cpu) |
332 | 331 | return 0; |
333 | 332 | |
334 | 333 | |
335 | 334 | |
... | ... | @@ -585,15 +584,12 @@ |
585 | 584 | * Best effort undo.. |
586 | 585 | */ |
587 | 586 | |
588 | - if (!cpus_empty(*covered_cpus)) { | |
589 | - cpumask_of_cpu_ptr_declare(new_mask); | |
590 | - | |
587 | + if (!cpus_empty(*covered_cpus)) | |
591 | 588 | for_each_cpu_mask_nr(j, *covered_cpus) { |
592 | - cpumask_of_cpu_ptr_next(new_mask, j); | |
593 | - set_cpus_allowed_ptr(current, new_mask); | |
589 | + set_cpus_allowed_ptr(current, | |
590 | + &cpumask_of_cpu(j)); | |
594 | 591 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); |
595 | 592 | } |
596 | - } | |
597 | 593 | |
598 | 594 | tmp = freqs.new; |
599 | 595 | freqs.new = freqs.old; |
arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
arch/x86/kernel/cpu/intel_cacheinfo.c
... | ... | @@ -516,7 +516,6 @@ |
516 | 516 | unsigned long j; |
517 | 517 | int retval; |
518 | 518 | cpumask_t oldmask; |
519 | - cpumask_of_cpu_ptr(newmask, cpu); | |
520 | 519 | |
521 | 520 | if (num_cache_leaves == 0) |
522 | 521 | return -ENOENT; |
... | ... | @@ -527,7 +526,7 @@ |
527 | 526 | return -ENOMEM; |
528 | 527 | |
529 | 528 | oldmask = current->cpus_allowed; |
530 | - retval = set_cpus_allowed_ptr(current, newmask); | |
529 | + retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | |
531 | 530 | if (retval) |
532 | 531 | goto out; |
533 | 532 |
arch/x86/kernel/ldt.c
... | ... | @@ -62,12 +62,10 @@ |
62 | 62 | |
63 | 63 | if (reload) { |
64 | 64 | #ifdef CONFIG_SMP |
65 | - cpumask_of_cpu_ptr_declare(mask); | |
66 | - | |
67 | 65 | preempt_disable(); |
68 | 66 | load_LDT(pc); |
69 | - cpumask_of_cpu_ptr_next(mask, smp_processor_id()); | |
70 | - if (!cpus_equal(current->mm->cpu_vm_mask, *mask)) | |
67 | + if (!cpus_equal(current->mm->cpu_vm_mask, | |
68 | + cpumask_of_cpu(smp_processor_id()))) | |
71 | 69 | smp_call_function(flush_ldt, current->mm, 1); |
72 | 70 | preempt_enable(); |
73 | 71 | #else |
arch/x86/kernel/microcode.c
... | ... | @@ -388,7 +388,6 @@ |
388 | 388 | void *new_mc = NULL; |
389 | 389 | int cpu; |
390 | 390 | cpumask_t old; |
391 | - cpumask_of_cpu_ptr_declare(newmask); | |
392 | 391 | |
393 | 392 | old = current->cpus_allowed; |
394 | 393 | |
... | ... | @@ -405,8 +404,7 @@ |
405 | 404 | |
406 | 405 | if (!uci->valid) |
407 | 406 | continue; |
408 | - cpumask_of_cpu_ptr_next(newmask, cpu); | |
409 | - set_cpus_allowed_ptr(current, newmask); | |
407 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | |
410 | 408 | error = get_maching_microcode(new_mc, cpu); |
411 | 409 | if (error < 0) |
412 | 410 | goto out; |
... | ... | @@ -576,7 +574,6 @@ |
576 | 574 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
577 | 575 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
578 | 576 | cpumask_t old; |
579 | - cpumask_of_cpu_ptr(newmask, cpu); | |
580 | 577 | unsigned int val[2]; |
581 | 578 | int err = 0; |
582 | 579 | |
... | ... | @@ -585,7 +582,7 @@ |
585 | 582 | return 0; |
586 | 583 | |
587 | 584 | old = current->cpus_allowed; |
588 | - set_cpus_allowed_ptr(current, newmask); | |
585 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | |
589 | 586 | |
590 | 587 | /* Check if the microcode we have in memory matches the CPU */ |
591 | 588 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || |
592 | 589 | |
... | ... | @@ -623,12 +620,11 @@ |
623 | 620 | static void microcode_init_cpu(int cpu, int resume) |
624 | 621 | { |
625 | 622 | cpumask_t old; |
626 | - cpumask_of_cpu_ptr(newmask, cpu); | |
627 | 623 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
628 | 624 | |
629 | 625 | old = current->cpus_allowed; |
630 | 626 | |
631 | - set_cpus_allowed_ptr(current, newmask); | |
627 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | |
632 | 628 | mutex_lock(µcode_mutex); |
633 | 629 | collect_cpu_info(cpu); |
634 | 630 | if (uci->valid && system_state == SYSTEM_RUNNING && !resume) |
635 | 631 | |
636 | 632 | |
... | ... | @@ -661,13 +657,10 @@ |
661 | 657 | if (end == buf) |
662 | 658 | return -EINVAL; |
663 | 659 | if (val == 1) { |
664 | - cpumask_t old; | |
665 | - cpumask_of_cpu_ptr(newmask, cpu); | |
660 | + cpumask_t old = current->cpus_allowed; | |
666 | 661 | |
667 | - old = current->cpus_allowed; | |
668 | - | |
669 | 662 | get_online_cpus(); |
670 | - set_cpus_allowed_ptr(current, newmask); | |
663 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | |
671 | 664 | |
672 | 665 | mutex_lock(µcode_mutex); |
673 | 666 | if (uci->valid) |
arch/x86/kernel/reboot.c
... | ... | @@ -414,25 +414,20 @@ |
414 | 414 | |
415 | 415 | /* The boot cpu is always logical cpu 0 */ |
416 | 416 | int reboot_cpu_id = 0; |
417 | - cpumask_of_cpu_ptr(newmask, reboot_cpu_id); | |
418 | 417 | |
419 | 418 | #ifdef CONFIG_X86_32 |
420 | 419 | /* See if there has been given a command line override */ |
421 | 420 | if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) && |
422 | - cpu_online(reboot_cpu)) { | |
421 | + cpu_online(reboot_cpu)) | |
423 | 422 | reboot_cpu_id = reboot_cpu; |
424 | - cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id); | |
425 | - } | |
426 | 423 | #endif |
427 | 424 | |
428 | 425 | /* Make certain the cpu I'm about to reboot on is online */ |
429 | - if (!cpu_online(reboot_cpu_id)) { | |
426 | + if (!cpu_online(reboot_cpu_id)) | |
430 | 427 | reboot_cpu_id = smp_processor_id(); |
431 | - cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id); | |
432 | - } | |
433 | 428 | |
434 | 429 | /* Make certain I only run on the appropriate processor */ |
435 | - set_cpus_allowed_ptr(current, newmask); | |
430 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id)); | |
436 | 431 | |
437 | 432 | /* O.K Now that I'm on the appropriate processor, |
438 | 433 | * stop all of the others. |
drivers/acpi/processor_throttling.c
... | ... | @@ -827,7 +827,6 @@ |
827 | 827 | static int acpi_processor_get_throttling(struct acpi_processor *pr) |
828 | 828 | { |
829 | 829 | cpumask_t saved_mask; |
830 | - cpumask_of_cpu_ptr_declare(new_mask); | |
831 | 830 | int ret; |
832 | 831 | |
833 | 832 | if (!pr) |
... | ... | @@ -839,8 +838,7 @@ |
839 | 838 | * Migrate task to the cpu pointed by pr. |
840 | 839 | */ |
841 | 840 | saved_mask = current->cpus_allowed; |
842 | - cpumask_of_cpu_ptr_next(new_mask, pr->id); | |
843 | - set_cpus_allowed_ptr(current, new_mask); | |
841 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); | |
844 | 842 | ret = pr->throttling.acpi_processor_get_throttling(pr); |
845 | 843 | /* restore the previous state */ |
846 | 844 | set_cpus_allowed_ptr(current, &saved_mask); |
... | ... | @@ -989,7 +987,6 @@ |
989 | 987 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state) |
990 | 988 | { |
991 | 989 | cpumask_t saved_mask; |
992 | - cpumask_of_cpu_ptr_declare(new_mask); | |
993 | 990 | int ret = 0; |
994 | 991 | unsigned int i; |
995 | 992 | struct acpi_processor *match_pr; |
... | ... | @@ -1028,8 +1025,7 @@ |
1028 | 1025 | * it can be called only for the cpu pointed by pr. |
1029 | 1026 | */ |
1030 | 1027 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { |
1031 | - cpumask_of_cpu_ptr_next(new_mask, pr->id); | |
1032 | - set_cpus_allowed_ptr(current, new_mask); | |
1028 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); | |
1033 | 1029 | ret = p_throttling->acpi_processor_set_throttling(pr, |
1034 | 1030 | t_state.target_state); |
1035 | 1031 | } else { |
... | ... | @@ -1060,8 +1056,7 @@ |
1060 | 1056 | continue; |
1061 | 1057 | } |
1062 | 1058 | t_state.cpu = i; |
1063 | - cpumask_of_cpu_ptr_next(new_mask, i); | |
1064 | - set_cpus_allowed_ptr(current, new_mask); | |
1059 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); | |
1065 | 1060 | ret = match_pr->throttling. |
1066 | 1061 | acpi_processor_set_throttling( |
1067 | 1062 | match_pr, t_state.target_state); |
drivers/firmware/dcdbas.c
... | ... | @@ -245,7 +245,6 @@ |
245 | 245 | static int smi_request(struct smi_cmd *smi_cmd) |
246 | 246 | { |
247 | 247 | cpumask_t old_mask; |
248 | - cpumask_of_cpu_ptr(new_mask, 0); | |
249 | 248 | int ret = 0; |
250 | 249 | |
251 | 250 | if (smi_cmd->magic != SMI_CMD_MAGIC) { |
... | ... | @@ -256,7 +255,7 @@ |
256 | 255 | |
257 | 256 | /* SMI requires CPU 0 */ |
258 | 257 | old_mask = current->cpus_allowed; |
259 | - set_cpus_allowed_ptr(current, new_mask); | |
258 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); | |
260 | 259 | if (smp_processor_id() != 0) { |
261 | 260 | dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", |
262 | 261 | __func__); |
drivers/misc/sgi-xp/xpc_main.c
... | ... | @@ -229,11 +229,10 @@ |
229 | 229 | int last_IRQ_count = 0; |
230 | 230 | int new_IRQ_count; |
231 | 231 | int force_IRQ = 0; |
232 | - cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU); | |
233 | 232 | |
234 | 233 | /* this thread was marked active by xpc_hb_init() */ |
235 | 234 | |
236 | - set_cpus_allowed_ptr(current, cpumask); | |
235 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU)); | |
237 | 236 | |
238 | 237 | /* set our heartbeating to other partitions into motion */ |
239 | 238 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); |
kernel/stop_machine.c
... | ... | @@ -33,9 +33,8 @@ |
33 | 33 | { |
34 | 34 | int irqs_disabled = 0; |
35 | 35 | int prepared = 0; |
36 | - cpumask_of_cpu_ptr(cpumask, (int)(long)cpu); | |
37 | 36 | |
38 | - set_cpus_allowed_ptr(current, cpumask); | |
37 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu)); | |
39 | 38 | |
40 | 39 | /* Ack: we are alive */ |
41 | 40 | smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ |
kernel/time/tick-common.c
... | ... | @@ -196,12 +196,10 @@ |
196 | 196 | struct tick_device *td; |
197 | 197 | int cpu, ret = NOTIFY_OK; |
198 | 198 | unsigned long flags; |
199 | - cpumask_of_cpu_ptr_declare(cpumask); | |
200 | 199 | |
201 | 200 | spin_lock_irqsave(&tick_device_lock, flags); |
202 | 201 | |
203 | 202 | cpu = smp_processor_id(); |
204 | - cpumask_of_cpu_ptr_next(cpumask, cpu); | |
205 | 203 | if (!cpu_isset(cpu, newdev->cpumask)) |
206 | 204 | goto out_bc; |
207 | 205 | |
... | ... | @@ -209,7 +207,7 @@ |
209 | 207 | curdev = td->evtdev; |
210 | 208 | |
211 | 209 | /* cpu local device ? */ |
212 | - if (!cpus_equal(newdev->cpumask, *cpumask)) { | |
210 | + if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) { | |
213 | 211 | |
214 | 212 | /* |
215 | 213 | * If the cpu affinity of the device interrupt can not |
... | ... | @@ -222,7 +220,7 @@ |
222 | 220 | * If we have a cpu local device already, do not replace it |
223 | 221 | * by a non cpu local device |
224 | 222 | */ |
225 | - if (curdev && cpus_equal(curdev->cpumask, *cpumask)) | |
223 | + if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu))) | |
226 | 224 | goto out_bc; |
227 | 225 | } |
228 | 226 | |
... | ... | @@ -254,7 +252,7 @@ |
254 | 252 | curdev = NULL; |
255 | 253 | } |
256 | 254 | clockevents_exchange_device(curdev, newdev); |
257 | - tick_setup_device(td, newdev, cpu, cpumask); | |
255 | + tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu)); | |
258 | 256 | if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) |
259 | 257 | tick_oneshot_notify(); |
260 | 258 |
kernel/trace/trace_sysprof.c
... | ... | @@ -213,9 +213,7 @@ |
213 | 213 | int cpu; |
214 | 214 | |
215 | 215 | for_each_online_cpu(cpu) { |
216 | - cpumask_of_cpu_ptr(new_mask, cpu); | |
217 | - | |
218 | - set_cpus_allowed_ptr(current, new_mask); | |
216 | + set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | |
219 | 217 | start_stack_timer(cpu); |
220 | 218 | } |
221 | 219 | set_cpus_allowed_ptr(current, &saved_mask); |
lib/smp_processor_id.c
... | ... | @@ -11,7 +11,6 @@ |
11 | 11 | { |
12 | 12 | unsigned long preempt_count = preempt_count(); |
13 | 13 | int this_cpu = raw_smp_processor_id(); |
14 | - cpumask_of_cpu_ptr_declare(this_mask); | |
15 | 14 | |
16 | 15 | if (likely(preempt_count)) |
17 | 16 | goto out; |
... | ... | @@ -23,9 +22,7 @@ |
23 | 22 | * Kernel threads bound to a single CPU can safely use |
24 | 23 | * smp_processor_id(): |
25 | 24 | */ |
26 | - cpumask_of_cpu_ptr_next(this_mask, this_cpu); | |
27 | - | |
28 | - if (cpus_equal(current->cpus_allowed, *this_mask)) | |
25 | + if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu))) | |
29 | 26 | goto out; |
30 | 27 | |
31 | 28 | /* |
net/sunrpc/svc.c