Commit 937e26c0d1843c92750dac9bca1c972d33e73306

Authored by KOSAKI Motohiro
Committed by Linus Torvalds
1 parent ba7328b2d8

m32r: convert cpumask api

We plan to remove cpus_xx() old cpumask APIs later.  Also, we plan to
change mm_cpu_mask() implementation, allocate only nr_cpu_ids, thus
*mm_cpu_mask() is dangerous operation.

Then, this patch convert them.

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 3 changed files with 51 additions and 52 deletions Side-by-side Diff

arch/m32r/include/asm/smp.h
... ... @@ -81,11 +81,11 @@
81 81  
82 82 static __inline__ unsigned int num_booting_cpus(void)
83 83 {
84   - return cpus_weight(cpu_callout_map);
  84 + return cpumask_weight(&cpu_callout_map);
85 85 }
86 86  
87 87 extern void smp_send_timer(void);
88   -extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
  88 +extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int);
89 89  
90 90 extern void arch_send_call_function_single_ipi(int cpu);
91 91 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
arch/m32r/kernel/smp.c
... ... @@ -87,7 +87,6 @@
87 87  
88 88 static void send_IPI_allbutself(int, int);
89 89 static void send_IPI_mask(const struct cpumask *, int, int);
90   -unsigned long send_IPI_mask_phys(cpumask_t, int, int);
91 90  
92 91 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
93 92 /* Rescheduling request Routines */
94 93  
... ... @@ -162,10 +161,10 @@
162 161 unsigned long *mask;
163 162  
164 163 preempt_disable();
165   - cpumask = cpu_online_map;
166   - cpu_clear(smp_processor_id(), cpumask);
  164 + cpumask_copy(&cpumask, cpu_online_mask);
  165 + cpumask_clear_cpu(smp_processor_id(), &cpumask);
167 166 spin_lock(&flushcache_lock);
168   - mask=cpus_addr(cpumask);
  167 + mask=cpumask_bits(&cpumask);
169 168 atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
170 169 send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
171 170 _flush_cache_copyback_all();
... ... @@ -263,8 +262,8 @@
263 262 preempt_disable();
264 263 cpu_id = smp_processor_id();
265 264 mmc = &mm->context[cpu_id];
266   - cpu_mask = *mm_cpumask(mm);
267   - cpu_clear(cpu_id, cpu_mask);
  265 + cpumask_copy(&cpu_mask, mm_cpumask(mm));
  266 + cpumask_clear_cpu(cpu_id, &cpu_mask);
268 267  
269 268 if (*mmc != NO_CONTEXT) {
270 269 local_irq_save(flags);
... ... @@ -275,7 +274,7 @@
275 274 cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
276 275 local_irq_restore(flags);
277 276 }
278   - if (!cpus_empty(cpu_mask))
  277 + if (!cpumask_empty(&cpu_mask))
279 278 flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
280 279  
281 280 preempt_enable();
... ... @@ -333,8 +332,8 @@
333 332 preempt_disable();
334 333 cpu_id = smp_processor_id();
335 334 mmc = &mm->context[cpu_id];
336   - cpu_mask = *mm_cpumask(mm);
337   - cpu_clear(cpu_id, cpu_mask);
  335 + cpumask_copy(&cpu_mask, mm_cpumask(mm));
  336 + cpumask_clear_cpu(cpu_id, &cpu_mask);
338 337  
339 338 #ifdef DEBUG_SMP
340 339 if (!mm)
... ... @@ -348,7 +347,7 @@
348 347 __flush_tlb_page(va);
349 348 local_irq_restore(flags);
350 349 }
351   - if (!cpus_empty(cpu_mask))
  350 + if (!cpumask_empty(&cpu_mask))
352 351 flush_tlb_others(cpu_mask, mm, vma, va);
353 352  
354 353 preempt_enable();
355 354  
356 355  
... ... @@ -395,14 +394,14 @@
395 394 * - current CPU must not be in mask
396 395 * - mask must exist :)
397 396 */
398   - BUG_ON(cpus_empty(cpumask));
  397 + BUG_ON(cpumask_empty(&cpumask));
399 398  
400   - BUG_ON(cpu_isset(smp_processor_id(), cpumask));
  399 + BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
401 400 BUG_ON(!mm);
402 401  
403 402 /* If a CPU which we ran on has gone down, OK. */
404   - cpus_and(cpumask, cpumask, cpu_online_map);
405   - if (cpus_empty(cpumask))
  403 + cpumask_and(&cpumask, &cpumask, cpu_online_mask);
  404 + if (cpumask_empty(&cpumask))
406 405 return;
407 406  
408 407 /*
... ... @@ -416,7 +415,7 @@
416 415 flush_mm = mm;
417 416 flush_vma = vma;
418 417 flush_va = va;
419   - mask=cpus_addr(cpumask);
  418 + mask=cpumask_bits(&cpumask);
420 419 atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
421 420  
422 421 /*
... ... @@ -425,7 +424,7 @@
425 424 */
426 425 send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
427 426  
428   - while (!cpus_empty(flush_cpumask)) {
  427 + while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
429 428 /* nothing. lockup detection does not belong here */
430 429 mb();
431 430 }
... ... @@ -460,7 +459,7 @@
460 459 int cpu_id = smp_processor_id();
461 460 unsigned long *mmc = &flush_mm->context[cpu_id];
462 461  
463   - if (!cpu_isset(cpu_id, flush_cpumask))
  462 + if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
464 463 return;
465 464  
466 465 if (flush_va == FLUSH_ALL) {
... ... @@ -478,7 +477,7 @@
478 477 __flush_tlb_page(va);
479 478 }
480 479 }
481   - cpu_clear(cpu_id, flush_cpumask);
  480 + cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask);
482 481 }
483 482  
484 483 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
... ... @@ -530,7 +529,7 @@
530 529 /*
531 530 * Remove this CPU:
532 531 */
533   - cpu_clear(cpu_id, cpu_online_map);
  532 + set_cpu_online(cpu_id, false);
534 533  
535 534 /*
536 535 * PSW IE = 1;
... ... @@ -725,8 +724,8 @@
725 724 {
726 725 cpumask_t cpumask;
727 726  
728   - cpumask = cpu_online_map;
729   - cpu_clear(smp_processor_id(), cpumask);
  727 + cpumask_copy(&cpumask, cpu_online_mask);
  728 + cpumask_clear_cpu(smp_processor_id(), &cpumask);
730 729  
731 730 send_IPI_mask(&cpumask, ipi_num, try);
732 731 }
733 732  
734 733  
... ... @@ -763,13 +762,13 @@
763 762 cpumask_and(&tmp, cpumask, cpu_online_mask);
764 763 BUG_ON(!cpumask_equal(cpumask, &tmp));
765 764  
766   - physid_mask = CPU_MASK_NONE;
  765 + cpumask_clear(&physid_mask);
767 766 for_each_cpu(cpu_id, cpumask) {
768 767 if ((phys_id = cpu_to_physid(cpu_id)) != -1)
769   - cpu_set(phys_id, physid_mask);
  768 + cpumask_set_cpu(phys_id, &physid_mask);
770 769 }
771 770  
772   - send_IPI_mask_phys(physid_mask, ipi_num, try);
  771 + send_IPI_mask_phys(&physid_mask, ipi_num, try);
773 772 }
774 773  
775 774 /*==========================================================================*
776 775  
... ... @@ -792,14 +791,14 @@
792 791 * ---------- --- --------------------------------------------------------
793 792 *
794 793 *==========================================================================*/
795   -unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
  794 +unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num,
796 795 int try)
797 796 {
798 797 spinlock_t *ipilock;
799 798 volatile unsigned long *ipicr_addr;
800 799 unsigned long ipicr_val;
801 800 unsigned long my_physid_mask;
802   - unsigned long mask = cpus_addr(physid_mask)[0];
  801 + unsigned long mask = cpumask_bits(physid_mask)[0];
803 802  
804 803  
805 804 if (mask & ~physids_coerce(phys_cpu_present_map))
arch/m32r/kernel/smpboot.c
... ... @@ -135,9 +135,9 @@
135 135 {
136 136 bsp_phys_id = hard_smp_processor_id();
137 137 physid_set(bsp_phys_id, phys_cpu_present_map);
138   - cpu_set(0, cpu_online_map); /* BSP's cpu_id == 0 */
139   - cpu_set(0, cpu_callout_map);
140   - cpu_set(0, cpu_callin_map);
  138 + set_cpu_online(0, true); /* BSP's cpu_id == 0 */
  139 + cpumask_set_cpu(0, &cpu_callout_map);
  140 + cpumask_set_cpu(0, &cpu_callin_map);
141 141  
142 142 /*
143 143 * Initialize the logical to physical CPU number mapping
... ... @@ -178,7 +178,7 @@
178 178 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
179 179 physid_set(phys_id, phys_cpu_present_map);
180 180 #ifndef CONFIG_HOTPLUG_CPU
181   - init_cpu_present(&cpu_possible_map);
  181 + init_cpu_present(cpu_possible_mask);
182 182 #endif
183 183  
184 184 show_mp_info(nr_cpu);
185 185  
... ... @@ -294,10 +294,10 @@
294 294 send_status = 0;
295 295 boot_status = 0;
296 296  
297   - cpu_set(phys_id, cpu_bootout_map);
  297 + cpumask_set_cpu(phys_id, &cpu_bootout_map);
298 298  
299 299 /* Send Startup IPI */
300   - send_IPI_mask_phys(cpumask_of_cpu(phys_id), CPU_BOOT_IPI, 0);
  300 + send_IPI_mask_phys(cpumask_of(phys_id), CPU_BOOT_IPI, 0);
301 301  
302 302 Dprintk("Waiting for send to finish...\n");
303 303 timeout = 0;
... ... @@ -306,7 +306,7 @@
306 306 do {
307 307 Dprintk("+");
308 308 udelay(1000);
309   - send_status = !cpu_isset(phys_id, cpu_bootin_map);
  309 + send_status = !cpumask_test_cpu(phys_id, &cpu_bootin_map);
310 310 } while (send_status && (timeout++ < 100));
311 311  
312 312 Dprintk("After Startup.\n");
313 313  
314 314  
... ... @@ -316,19 +316,19 @@
316 316 * allow APs to start initializing.
317 317 */
318 318 Dprintk("Before Callout %d.\n", cpu_id);
319   - cpu_set(cpu_id, cpu_callout_map);
  319 + cpumask_set_cpu(cpu_id, &cpu_callout_map);
320 320 Dprintk("After Callout %d.\n", cpu_id);
321 321  
322 322 /*
323 323 * Wait 5s total for a response
324 324 */
325 325 for (timeout = 0; timeout < 5000; timeout++) {
326   - if (cpu_isset(cpu_id, cpu_callin_map))
  326 + if (cpumask_test_cpu(cpu_id, &cpu_callin_map))
327 327 break; /* It has booted */
328 328 udelay(1000);
329 329 }
330 330  
331   - if (cpu_isset(cpu_id, cpu_callin_map)) {
  331 + if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
332 332 /* number CPUs logically, starting from 1 (BSP is 0) */
333 333 Dprintk("OK.\n");
334 334 } else {
... ... @@ -340,9 +340,9 @@
340 340  
341 341 if (send_status || boot_status) {
342 342 unmap_cpu_to_physid(cpu_id, phys_id);
343   - cpu_clear(cpu_id, cpu_callout_map);
344   - cpu_clear(cpu_id, cpu_callin_map);
345   - cpu_clear(cpu_id, cpu_initialized);
  343 + cpumask_clear_cpu(cpu_id, &cpu_callout_map);
  344 + cpumask_clear_cpu(cpu_id, &cpu_callin_map);
  345 + cpumask_clear_cpu(cpu_id, &cpu_initialized);
346 346 cpucount--;
347 347 }
348 348 }
349 349  
350 350  
... ... @@ -351,17 +351,17 @@
351 351 {
352 352 int timeout;
353 353  
354   - cpu_set(cpu_id, smp_commenced_mask);
  354 + cpumask_set_cpu(cpu_id, &smp_commenced_mask);
355 355  
356 356 /*
357 357 * Wait 5s total for a response
358 358 */
359 359 for (timeout = 0; timeout < 5000; timeout++) {
360   - if (cpu_isset(cpu_id, cpu_online_map))
  360 + if (cpu_online(cpu_id))
361 361 break;
362 362 udelay(1000);
363 363 }
364   - if (!cpu_isset(cpu_id, cpu_online_map))
  364 + if (!cpu_online(cpu_id))
365 365 BUG();
366 366  
367 367 return 0;
368 368  
... ... @@ -373,11 +373,11 @@
373 373 unsigned long bogosum = 0;
374 374  
375 375 for (timeout = 0; timeout < 5000; timeout++) {
376   - if (cpus_equal(cpu_callin_map, cpu_online_map))
  376 + if (cpumask_equal(&cpu_callin_map, cpu_online_mask))
377 377 break;
378 378 udelay(1000);
379 379 }
380   - if (!cpus_equal(cpu_callin_map, cpu_online_map))
  380 + if (!cpumask_equal(&cpu_callin_map, cpu_online_mask))
381 381 BUG();
382 382  
383 383 for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++)
... ... @@ -388,7 +388,7 @@
388 388 */
389 389 Dprintk("Before bogomips.\n");
390 390 if (cpucount) {
391   - for_each_cpu_mask(cpu_id, cpu_online_map)
  391 + for_each_cpu(cpu_id,cpu_online_mask)
392 392 bogosum += cpu_data[cpu_id].loops_per_jiffy;
393 393  
394 394 printk(KERN_INFO "Total of %d processors activated " \
... ... @@ -425,7 +425,7 @@
425 425 cpu_init();
426 426 preempt_disable();
427 427 smp_callin();
428   - while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
  428 + while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
429 429 cpu_relax();
430 430  
431 431 smp_online();
... ... @@ -463,7 +463,7 @@
463 463 int cpu_id = smp_processor_id();
464 464 unsigned long timeout;
465 465  
466   - if (cpu_isset(cpu_id, cpu_callin_map)) {
  466 + if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
467 467 printk("huh, phys CPU#%d, CPU#%d already present??\n",
468 468 phys_id, cpu_id);
469 469 BUG();
... ... @@ -474,7 +474,7 @@
474 474 timeout = jiffies + (2 * HZ);
475 475 while (time_before(jiffies, timeout)) {
476 476 /* Has the boot CPU finished it's STARTUP sequence ? */
477   - if (cpu_isset(cpu_id, cpu_callout_map))
  477 + if (cpumask_test_cpu(cpu_id, &cpu_callout_map))
478 478 break;
479 479 cpu_relax();
480 480 }
... ... @@ -486,7 +486,7 @@
486 486 }
487 487  
488 488 /* Allow the master to continue. */
489   - cpu_set(cpu_id, cpu_callin_map);
  489 + cpumask_set_cpu(cpu_id, &cpu_callin_map);
490 490 }
491 491  
492 492 static void __init smp_online(void)
... ... @@ -503,7 +503,7 @@
503 503 /* Save our processor parameters */
504 504 smp_store_cpu_info(cpu_id);
505 505  
506   - cpu_set(cpu_id, cpu_online_map);
  506 + set_cpu_online(cpu_id, true);
507 507 }
508 508  
509 509 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/