Commit 65c011845316d3c1381f478ca0d8265c43b3b039

Authored by Mike Travis
Committed by Ingo Molnar
1 parent bb2c018b09

cpumask: Replace cpumask_of_cpu with cpumask_of_cpu_ptr

* This patch replaces the dangerous lvalue version of cpumask_of_cpu
    with new cpumask_of_cpu_ptr macros.  These are patterned after the
    node_to_cpumask_ptr macros.

    In general terms, if there is a cpumask_of_cpu_map[] then a pointer to
    the cpumask_of_cpu_map[cpu] entry is used.  The cpumask_of_cpu_map
    is provided when there is a large NR_CPUS count, reducing
    greatly the amount of code generated and stack space used for
    cpumask_of_cpu().  The pointer to the cpumask_t value is needed for
    calling set_cpus_allowed_ptr() to reduce the amount of stack space
    needed to pass the cpumask_t value.

    If there isn't a cpumask_of_cpu_map[], then a temporary variable is
    declared and filled in with value from cpumask_of_cpu(cpu) as well as
    a pointer variable pointing to this temporary variable.  Afterwards,
    the pointer is used to reference the cpumask value.  The compiler
    will optimize out the extra dereference through the pointer as well
    as the stack space used for the pointer, resulting in identical code.

    A good example of the orthogonal usages is in net/sunrpc/svc.c:

	case SVC_POOL_PERCPU:
	{
		unsigned int cpu = m->pool_to[pidx];
		cpumask_of_cpu_ptr(cpumask, cpu);

		*oldmask = current->cpus_allowed;
		set_cpus_allowed_ptr(current, cpumask);
		return 1;
	}
	case SVC_POOL_PERNODE:
	{
		unsigned int node = m->pool_to[pidx];
		node_to_cpumask_ptr(nodecpumask, node);

		*oldmask = current->cpus_allowed;
		set_cpus_allowed_ptr(current, nodecpumask);
		return 1;
	}

Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 14 changed files with 91 additions and 35 deletions Side-by-side Diff

arch/x86/kernel/acpi/cstate.c
... ... @@ -73,6 +73,7 @@
73 73 struct cpuinfo_x86 *c = &cpu_data(cpu);
74 74  
75 75 cpumask_t saved_mask;
  76 + cpumask_of_cpu_ptr(new_mask, cpu);
76 77 int retval;
77 78 unsigned int eax, ebx, ecx, edx;
78 79 unsigned int edx_part;
... ... @@ -91,7 +92,7 @@
91 92  
92 93 /* Make sure we are running on right CPU */
93 94 saved_mask = current->cpus_allowed;
94   - retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
  95 + retval = set_cpus_allowed_ptr(current, new_mask);
95 96 if (retval)
96 97 return -1;
97 98  
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
... ... @@ -200,10 +200,12 @@
200 200 static void drv_write(struct drv_cmd *cmd)
201 201 {
202 202 cpumask_t saved_mask = current->cpus_allowed;
  203 + cpumask_of_cpu_ptr_declare(cpu_mask);
203 204 unsigned int i;
204 205  
205 206 for_each_cpu_mask_nr(i, cmd->mask) {
206   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
  207 + cpumask_of_cpu_ptr_next(cpu_mask, i);
  208 + set_cpus_allowed_ptr(current, cpu_mask);
207 209 do_drv_write(cmd);
208 210 }
209 211  
210 212  
... ... @@ -267,11 +269,12 @@
267 269 } aperf_cur, mperf_cur;
268 270  
269 271 cpumask_t saved_mask;
  272 + cpumask_of_cpu_ptr(cpu_mask, cpu);
270 273 unsigned int perf_percent;
271 274 unsigned int retval;
272 275  
273 276 saved_mask = current->cpus_allowed;
274   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
  277 + set_cpus_allowed_ptr(current, cpu_mask);
275 278 if (get_cpu() != cpu) {
276 279 /* We were not able to run on requested processor */
277 280 put_cpu();
... ... @@ -337,6 +340,7 @@
337 340  
338 341 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
339 342 {
  343 + cpumask_of_cpu_ptr(cpu_mask, cpu);
340 344 struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
341 345 unsigned int freq;
342 346 unsigned int cached_freq;
... ... @@ -349,7 +353,7 @@
349 353 }
350 354  
351 355 cached_freq = data->freq_table[data->acpi_data->state].frequency;
352   - freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
  356 + freq = extract_freq(get_cur_val(cpu_mask), data);
353 357 if (freq != cached_freq) {
354 358 /*
355 359 * The dreaded BIOS frequency change behind our back.
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
... ... @@ -479,11 +479,12 @@
479 479 static int check_supported_cpu(unsigned int cpu)
480 480 {
481 481 cpumask_t oldmask;
  482 + cpumask_of_cpu_ptr(cpu_mask, cpu);
482 483 u32 eax, ebx, ecx, edx;
483 484 unsigned int rc = 0;
484 485  
485 486 oldmask = current->cpus_allowed;
486   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
  487 + set_cpus_allowed_ptr(current, cpu_mask);
487 488  
488 489 if (smp_processor_id() != cpu) {
489 490 printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
... ... @@ -1016,6 +1017,7 @@
1016 1017 static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
1017 1018 {
1018 1019 cpumask_t oldmask;
  1020 + cpumask_of_cpu_ptr(cpu_mask, pol->cpu);
1019 1021 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
1020 1022 u32 checkfid;
1021 1023 u32 checkvid;
... ... @@ -1030,7 +1032,7 @@
1030 1032  
1031 1033 /* only run on specific CPU from here on */
1032 1034 oldmask = current->cpus_allowed;
1033   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
  1035 + set_cpus_allowed_ptr(current, cpu_mask);
1034 1036  
1035 1037 if (smp_processor_id() != pol->cpu) {
1036 1038 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
... ... @@ -1105,6 +1107,7 @@
1105 1107 {
1106 1108 struct powernow_k8_data *data;
1107 1109 cpumask_t oldmask;
  1110 + cpumask_of_cpu_ptr_declare(newmask);
1108 1111 int rc;
1109 1112  
1110 1113 if (!cpu_online(pol->cpu))
... ... @@ -1156,7 +1159,8 @@
1156 1159  
1157 1160 /* only run on specific CPU from here on */
1158 1161 oldmask = current->cpus_allowed;
1159   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu));
  1162 + cpumask_of_cpu_ptr_next(newmask, pol->cpu);
  1163 + set_cpus_allowed_ptr(current, newmask);
1160 1164  
1161 1165 if (smp_processor_id() != pol->cpu) {
1162 1166 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
... ... @@ -1178,7 +1182,7 @@
1178 1182 set_cpus_allowed_ptr(current, &oldmask);
1179 1183  
1180 1184 if (cpu_family == CPU_HW_PSTATE)
1181   - pol->cpus = cpumask_of_cpu(pol->cpu);
  1185 + pol->cpus = *newmask;
1182 1186 else
1183 1187 pol->cpus = per_cpu(cpu_core_map, pol->cpu);
1184 1188 data->available_cores = &(pol->cpus);
... ... @@ -1244,6 +1248,7 @@
1244 1248 {
1245 1249 struct powernow_k8_data *data;
1246 1250 cpumask_t oldmask = current->cpus_allowed;
  1251 + cpumask_of_cpu_ptr(newmask, cpu);
1247 1252 unsigned int khz = 0;
1248 1253 unsigned int first;
1249 1254  
... ... @@ -1253,7 +1258,7 @@
1253 1258 if (!data)
1254 1259 return -EINVAL;
1255 1260  
1256   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
  1261 + set_cpus_allowed_ptr(current, newmask);
1257 1262 if (smp_processor_id() != cpu) {
1258 1263 printk(KERN_ERR PFX
1259 1264 "limiting to CPU %d failed in powernowk8_get\n", cpu);
arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
... ... @@ -313,9 +313,10 @@
313 313 unsigned l, h;
314 314 unsigned clock_freq;
315 315 cpumask_t saved_mask;
  316 + cpumask_of_cpu_ptr(new_mask, cpu);
316 317  
317 318 saved_mask = current->cpus_allowed;
318   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
  319 + set_cpus_allowed_ptr(current, new_mask);
319 320 if (smp_processor_id() != cpu)
320 321 return 0;
321 322  
322 323  
... ... @@ -554,9 +555,11 @@
554 555 */
555 556  
556 557 if (!cpus_empty(covered_cpus)) {
  558 + cpumask_of_cpu_ptr_declare(new_mask);
  559 +
557 560 for_each_cpu_mask_nr(j, covered_cpus) {
558   - set_cpus_allowed_ptr(current,
559   - &cpumask_of_cpu(j));
  561 + cpumask_of_cpu_ptr_next(new_mask, j);
  562 + set_cpus_allowed_ptr(current, new_mask);
560 563 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
561 564 }
562 565 }
arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
... ... @@ -244,7 +244,8 @@
244 244  
245 245 static unsigned int speedstep_get(unsigned int cpu)
246 246 {
247   - return _speedstep_get(&cpumask_of_cpu(cpu));
  247 + cpumask_of_cpu_ptr(newmask, cpu);
  248 + return _speedstep_get(newmask);
248 249 }
249 250  
250 251 /**
arch/x86/kernel/cpu/intel_cacheinfo.c
... ... @@ -516,6 +516,7 @@
516 516 unsigned long j;
517 517 int retval;
518 518 cpumask_t oldmask;
  519 + cpumask_of_cpu_ptr(newmask, cpu);
519 520  
520 521 if (num_cache_leaves == 0)
521 522 return -ENOENT;
... ... @@ -526,7 +527,7 @@
526 527 return -ENOMEM;
527 528  
528 529 oldmask = current->cpus_allowed;
529   - retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
  530 + retval = set_cpus_allowed_ptr(current, newmask);
530 531 if (retval)
531 532 goto out;
532 533  
arch/x86/kernel/microcode.c
... ... @@ -388,6 +388,7 @@
388 388 void *new_mc = NULL;
389 389 int cpu;
390 390 cpumask_t old;
  391 + cpumask_of_cpu_ptr_declare(newmask);
391 392  
392 393 old = current->cpus_allowed;
393 394  
... ... @@ -404,7 +405,8 @@
404 405  
405 406 if (!uci->valid)
406 407 continue;
407   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
  408 + cpumask_of_cpu_ptr_next(newmask, cpu);
  409 + set_cpus_allowed_ptr(current, newmask);
408 410 error = get_maching_microcode(new_mc, cpu);
409 411 if (error < 0)
410 412 goto out;
... ... @@ -574,6 +576,7 @@
574 576 struct cpuinfo_x86 *c = &cpu_data(cpu);
575 577 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
576 578 cpumask_t old;
  579 + cpumask_of_cpu_ptr(newmask, cpu);
577 580 unsigned int val[2];
578 581 int err = 0;
579 582  
... ... @@ -582,7 +585,7 @@
582 585 return 0;
583 586  
584 587 old = current->cpus_allowed;
585   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
  588 + set_cpus_allowed_ptr(current, newmask);
586 589  
587 590 /* Check if the microcode we have in memory matches the CPU */
588 591 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
589 592  
... ... @@ -620,11 +623,12 @@
620 623 static void microcode_init_cpu(int cpu, int resume)
621 624 {
622 625 cpumask_t old;
  626 + cpumask_of_cpu_ptr(newmask, cpu);
623 627 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
624 628  
625 629 old = current->cpus_allowed;
626 630  
627   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
  631 + set_cpus_allowed_ptr(current, newmask);
628 632 mutex_lock(&microcode_mutex);
629 633 collect_cpu_info(cpu);
630 634 if (uci->valid && system_state == SYSTEM_RUNNING && !resume)
631 635  
... ... @@ -656,11 +660,12 @@
656 660 return -EINVAL;
657 661 if (val == 1) {
658 662 cpumask_t old;
  663 + cpumask_of_cpu_ptr(newmask, cpu);
659 664  
660 665 old = current->cpus_allowed;
661 666  
662 667 get_online_cpus();
663   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
  668 + set_cpus_allowed_ptr(current, newmask);
664 669  
665 670 mutex_lock(&microcode_mutex);
666 671 if (uci->valid)
arch/x86/kernel/reboot.c
... ... @@ -403,24 +403,28 @@
403 403 {
404 404 /* Stop the cpus and apics */
405 405 #ifdef CONFIG_SMP
406   - int reboot_cpu_id;
407 406  
408 407 /* The boot cpu is always logical cpu 0 */
409   - reboot_cpu_id = 0;
  408 + int reboot_cpu_id = 0;
  409 + cpumask_of_cpu_ptr(newmask, reboot_cpu_id);
410 410  
411 411 #ifdef CONFIG_X86_32
412 412 /* See if there has been given a command line override */
413 413 if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
414   - cpu_online(reboot_cpu))
  414 + cpu_online(reboot_cpu)) {
415 415 reboot_cpu_id = reboot_cpu;
  416 + cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
  417 + }
416 418 #endif
417 419  
418 420 /* Make certain the cpu I'm about to reboot on is online */
419   - if (!cpu_online(reboot_cpu_id))
  421 + if (!cpu_online(reboot_cpu_id)) {
420 422 reboot_cpu_id = smp_processor_id();
  423 + cpumask_of_cpu_ptr_next(newmask, reboot_cpu_id);
  424 + }
421 425  
422 426 /* Make certain I only run on the appropriate processor */
423   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id));
  427 + set_cpus_allowed_ptr(current, newmask);
424 428  
425 429 /* O.K Now that I'm on the appropriate processor,
426 430 * stop all of the others.
drivers/acpi/processor_throttling.c
... ... @@ -827,6 +827,7 @@
827 827 static int acpi_processor_get_throttling(struct acpi_processor *pr)
828 828 {
829 829 cpumask_t saved_mask;
  830 + cpumask_of_cpu_ptr_declare(new_mask);
830 831 int ret;
831 832  
832 833 if (!pr)
... ... @@ -838,7 +839,8 @@
838 839 * Migrate task to the cpu pointed by pr.
839 840 */
840 841 saved_mask = current->cpus_allowed;
841   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
  842 + cpumask_of_cpu_ptr_next(new_mask, pr->id);
  843 + set_cpus_allowed_ptr(current, new_mask);
842 844 ret = pr->throttling.acpi_processor_get_throttling(pr);
843 845 /* restore the previous state */
844 846 set_cpus_allowed_ptr(current, &saved_mask);
... ... @@ -987,6 +989,7 @@
987 989 int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
988 990 {
989 991 cpumask_t saved_mask;
  992 + cpumask_of_cpu_ptr_declare(new_mask);
990 993 int ret = 0;
991 994 unsigned int i;
992 995 struct acpi_processor *match_pr;
... ... @@ -1025,7 +1028,8 @@
1025 1028 * it can be called only for the cpu pointed by pr.
1026 1029 */
1027 1030 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
1028   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id));
  1031 + cpumask_of_cpu_ptr_next(new_mask, pr->id);
  1032 + set_cpus_allowed_ptr(current, new_mask);
1029 1033 ret = p_throttling->acpi_processor_set_throttling(pr,
1030 1034 t_state.target_state);
1031 1035 } else {
... ... @@ -1056,7 +1060,8 @@
1056 1060 continue;
1057 1061 }
1058 1062 t_state.cpu = i;
1059   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(i));
  1063 + cpumask_of_cpu_ptr_next(new_mask, i);
  1064 + set_cpus_allowed_ptr(current, new_mask);
1060 1065 ret = match_pr->throttling.
1061 1066 acpi_processor_set_throttling(
1062 1067 match_pr, t_state.target_state);
drivers/firmware/dcdbas.c
... ... @@ -254,6 +254,7 @@
254 254 static int smi_request(struct smi_cmd *smi_cmd)
255 255 {
256 256 cpumask_t old_mask;
  257 + cpumask_of_cpu_ptr(new_mask, 0);
257 258 int ret = 0;
258 259  
259 260 if (smi_cmd->magic != SMI_CMD_MAGIC) {
... ... @@ -264,7 +265,7 @@
264 265  
265 266 /* SMI requires CPU 0 */
266 267 old_mask = current->cpus_allowed;
267   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
  268 + set_cpus_allowed_ptr(current, new_mask);
268 269 if (smp_processor_id() != 0) {
269 270 dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
270 271 __func__);
include/linux/cpumask.h
... ... @@ -62,6 +62,15 @@
62 62 * int next_cpu_nr(cpu, mask) Next cpu past 'cpu', or nr_cpu_ids
63 63 *
64 64 * cpumask_t cpumask_of_cpu(cpu) Return cpumask with bit 'cpu' set
  65 + *ifdef CONFIG_HAS_CPUMASK_OF_CPU
  66 + * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t *v
  67 + * cpumask_of_cpu_ptr_next(v, cpu) Sets v = &cpumask_of_cpu_map[cpu]
  68 + * cpumask_of_cpu_ptr(v, cpu) Combines above two operations
  69 + *else
  70 + * cpumask_of_cpu_ptr_declare(v) Declares cpumask_t _v and *v = &_v
  71 + * cpumask_of_cpu_ptr_next(v, cpu) Sets _v = cpumask_of_cpu(cpu)
  72 + * cpumask_of_cpu_ptr(v, cpu) Combines above two operations
  73 + *endif
65 74 * CPU_MASK_ALL Initializer - all bits set
66 75 * CPU_MASK_NONE Initializer - no bits set
67 76 * unsigned long *cpus_addr(mask) Array of unsigned long's in mask
68 77  
... ... @@ -236,11 +245,16 @@
236 245  
237 246 #ifdef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
238 247 extern cpumask_t *cpumask_of_cpu_map;
239   -#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu])
240   -
  248 +#define cpumask_of_cpu(cpu) (cpumask_of_cpu_map[cpu])
  249 +#define cpumask_of_cpu_ptr(v, cpu) \
  250 + const cpumask_t *v = &cpumask_of_cpu(cpu)
  251 +#define cpumask_of_cpu_ptr_declare(v) \
  252 + const cpumask_t *v
  253 +#define cpumask_of_cpu_ptr_next(v, cpu) \
  254 + v = &cpumask_of_cpu(cpu)
241 255 #else
242 256 #define cpumask_of_cpu(cpu) \
243   -(*({ \
  257 +({ \
244 258 typeof(_unused_cpumask_arg_) m; \
245 259 if (sizeof(m) == sizeof(unsigned long)) { \
246 260 m.bits[0] = 1UL<<(cpu); \
... ... @@ -248,8 +262,16 @@
248 262 cpus_clear(m); \
249 263 cpu_set((cpu), m); \
250 264 } \
251   - &m; \
252   -}))
  265 + m; \
  266 +})
  267 +#define cpumask_of_cpu_ptr(v, cpu) \
  268 + cpumask_t _##v = cpumask_of_cpu(cpu); \
  269 + const cpumask_t *v = &_##v
  270 +#define cpumask_of_cpu_ptr_declare(v) \
  271 + cpumask_t _##v; \
  272 + const cpumask_t *v = &_##v
  273 +#define cpumask_of_cpu_ptr_next(v, cpu) \
  274 + _##v = cpumask_of_cpu(cpu)
253 275 #endif
254 276  
255 277 #define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
kernel/stop_machine.c
... ... @@ -33,8 +33,9 @@
33 33 {
34 34 int irqs_disabled = 0;
35 35 int prepared = 0;
  36 + cpumask_of_cpu_ptr(cpumask, (int)(long)cpu);
36 37  
37   - set_cpus_allowed_ptr(current, &cpumask_of_cpu((int)(long)cpu));
  38 + set_cpus_allowed_ptr(current, cpumask);
38 39  
39 40 /* Ack: we are alive */
40 41 smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
kernel/trace/trace_sysprof.c
... ... @@ -213,7 +213,9 @@
213 213 int cpu;
214 214  
215 215 for_each_online_cpu(cpu) {
216   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
  216 + cpumask_of_cpu_ptr(new_mask, cpu);
  217 +
  218 + set_cpus_allowed_ptr(current, new_mask);
217 219 start_stack_timer(cpu);
218 220 }
219 221 set_cpus_allowed_ptr(current, &saved_mask);
... ... @@ -314,9 +314,10 @@
314 314 case SVC_POOL_PERCPU:
315 315 {
316 316 unsigned int cpu = m->pool_to[pidx];
  317 + cpumask_of_cpu_ptr(cpumask, cpu);
317 318  
318 319 *oldmask = current->cpus_allowed;
319   - set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
  320 + set_cpus_allowed_ptr(current, cpumask);
320 321 return 1;
321 322 }
322 323 case SVC_POOL_PERNODE: