Commit 394e3902c55e667945f6f1c2bdbc59842cce70f7

Authored by Andrew Morton
Committed by Linus Torvalds
1 parent 63872f87a1

[PATCH] more for_each_cpu() conversions

When we stop allocating percpu memory for not-possible CPUs we must not touch
the percpu data for not-possible CPUs at all.  The correct way of doing this
is to test cpu_possible() or to use for_each_cpu().

This patch is a kernel-wide sweep of all instances of NR_CPUS.  I found very
few instances of this bug, if any.  But the patch converts lots of open-coded
test to use the preferred helper macros.

Cc: Mikael Starvik <starvik@axis.com>
Cc: David Howells <dhowells@redhat.com>
Acked-by: Kyle McMartin <kyle@parisc-linux.org>
Cc: Anton Blanchard <anton@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Christian Zankel <chris@zankel.net>
Cc: Philippe Elie <phil.el@wanadoo.fr>
Cc: Nathan Scott <nathans@sgi.com>
Cc: Jens Axboe <axboe@suse.de>
Cc: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 42 changed files with 137 additions and 222 deletions Side-by-side Diff

arch/cris/kernel/irq.c
... ... @@ -52,9 +52,8 @@
52 52  
53 53 if (i == 0) {
54 54 seq_printf(p, " ");
55   - for (j=0; j<NR_CPUS; j++)
56   - if (cpu_online(j))
57   - seq_printf(p, "CPU%d ",j);
  55 + for_each_online_cpu(j)
  56 + seq_printf(p, "CPU%d ",j);
58 57 seq_putc(p, '\n');
59 58 }
60 59  
... ... @@ -67,9 +66,8 @@
67 66 #ifndef CONFIG_SMP
68 67 seq_printf(p, "%10u ", kstat_irqs(i));
69 68 #else
70   - for (j = 0; j < NR_CPUS; j++)
71   - if (cpu_online(j))
72   - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
  69 + for_each_online_cpu(j)
  70 + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
73 71 #endif
74 72 seq_printf(p, " %14s", irq_desc[i].handler->typename);
75 73 seq_printf(p, " %s", action->name);
arch/frv/kernel/irq.c
... ... @@ -75,9 +75,8 @@
75 75 switch (i) {
76 76 case 0:
77 77 seq_printf(p, " ");
78   - for (j = 0; j < NR_CPUS; j++)
79   - if (cpu_online(j))
80   - seq_printf(p, "CPU%d ",j);
  78 + for_each_online_cpu(j)
  79 + seq_printf(p, "CPU%d ",j);
81 80  
82 81 seq_putc(p, '\n');
83 82 break;
... ... @@ -100,9 +99,8 @@
100 99 #ifndef CONFIG_SMP
101 100 seq_printf(p, "%10u ", kstat_irqs(i));
102 101 #else
103   - for (j = 0; j < NR_CPUS; j++)
104   - if (cpu_online(j))
105   - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]);
  102 + for_each_online_cpu(j)
  103 + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]);
106 104 #endif
107 105  
108 106 level = group->sources[ix]->level - frv_irq_levels;
arch/i386/kernel/cpu/cpufreq/powernow-k8.c
... ... @@ -1145,9 +1145,7 @@
1145 1145 {
1146 1146 unsigned int i, supported_cpus = 0;
1147 1147  
1148   - for (i=0; i<NR_CPUS; i++) {
1149   - if (!cpu_online(i))
1150   - continue;
  1148 + for_each_cpu(i) {
1151 1149 if (check_supported_cpu(i))
1152 1150 supported_cpus++;
1153 1151 }
arch/i386/kernel/io_apic.c
... ... @@ -351,8 +351,8 @@
351 351 {
352 352 int i, j;
353 353 Dprintk("Rotating IRQs among CPUs.\n");
354   - for (i = 0; i < NR_CPUS; i++) {
355   - for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) {
  354 + for_each_online_cpu(i) {
  355 + for (j = 0; j < NR_IRQS; j++) {
356 356 if (!irq_desc[j].action)
357 357 continue;
358 358 /* Is it a significant load ? */
... ... @@ -381,7 +381,7 @@
381 381 unsigned long imbalance = 0;
382 382 cpumask_t allowed_mask, target_cpu_mask, tmp;
383 383  
384   - for (i = 0; i < NR_CPUS; i++) {
  384 + for_each_cpu(i) {
385 385 int package_index;
386 386 CPU_IRQ(i) = 0;
387 387 if (!cpu_online(i))
... ... @@ -422,9 +422,7 @@
422 422 }
423 423 }
424 424 /* Find the least loaded processor package */
425   - for (i = 0; i < NR_CPUS; i++) {
426   - if (!cpu_online(i))
427   - continue;
  425 + for_each_online_cpu(i) {
428 426 if (i != CPU_TO_PACKAGEINDEX(i))
429 427 continue;
430 428 if (min_cpu_irq > CPU_IRQ(i)) {
... ... @@ -441,9 +439,7 @@
441 439 */
442 440 tmp_cpu_irq = 0;
443 441 tmp_loaded = -1;
444   - for (i = 0; i < NR_CPUS; i++) {
445   - if (!cpu_online(i))
446   - continue;
  442 + for_each_online_cpu(i) {
447 443 if (i != CPU_TO_PACKAGEINDEX(i))
448 444 continue;
449 445 if (max_cpu_irq <= CPU_IRQ(i))
... ... @@ -619,9 +615,7 @@
619 615 if (smp_num_siblings > 1 && !cpus_empty(tmp))
620 616 physical_balance = 1;
621 617  
622   - for (i = 0; i < NR_CPUS; i++) {
623   - if (!cpu_online(i))
624   - continue;
  618 + for_each_online_cpu(i) {
625 619 irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
626 620 irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
627 621 if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
628 622  
629 623  
... ... @@ -638,9 +632,11 @@
638 632 else
639 633 printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
640 634 failed:
641   - for (i = 0; i < NR_CPUS; i++) {
  635 + for_each_cpu(i) {
642 636 kfree(irq_cpu_data[i].irq_delta);
  637 + irq_cpu_data[i].irq_delta = NULL;
643 638 kfree(irq_cpu_data[i].last_irq);
  639 + irq_cpu_data[i].last_irq = NULL;
644 640 }
645 641 return 0;
646 642 }
arch/i386/kernel/nmi.c
... ... @@ -143,7 +143,7 @@
143 143 local_irq_enable();
144 144 mdelay((10*1000)/nmi_hz); // wait 10 ticks
145 145  
146   - for (cpu = 0; cpu < NR_CPUS; cpu++) {
  146 + for_each_cpu(cpu) {
147 147 #ifdef CONFIG_SMP
148 148 /* Check cpu_callin_map here because that is set
149 149 after the timer is started. */
... ... @@ -510,7 +510,7 @@
510 510 * Just reset the alert counters, (other CPUs might be
511 511 * spinning on locks we hold):
512 512 */
513   - for (i = 0; i < NR_CPUS; i++)
  513 + for_each_cpu(i)
514 514 alert_counter[i] = 0;
515 515  
516 516 /*
arch/i386/oprofile/nmi_int.c
... ... @@ -122,7 +122,7 @@
122 122 static void free_msrs(void)
123 123 {
124 124 int i;
125   - for (i = 0; i < NR_CPUS; ++i) {
  125 + for_each_cpu(i) {
126 126 kfree(cpu_msrs[i].counters);
127 127 cpu_msrs[i].counters = NULL;
128 128 kfree(cpu_msrs[i].controls);
... ... @@ -138,10 +138,7 @@
138 138 size_t counters_size = sizeof(struct op_msr) * model->num_counters;
139 139  
140 140 int i;
141   - for (i = 0; i < NR_CPUS; ++i) {
142   - if (!cpu_online(i))
143   - continue;
144   -
  141 + for_each_online_cpu(i) {
145 142 cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL);
146 143 if (!cpu_msrs[i].counters) {
147 144 success = 0;
arch/m32r/kernel/irq.c
... ... @@ -37,9 +37,8 @@
37 37  
38 38 if (i == 0) {
39 39 seq_printf(p, " ");
40   - for (j=0; j<NR_CPUS; j++)
41   - if (cpu_online(j))
42   - seq_printf(p, "CPU%d ",j);
  40 + for_each_online_cpu(j)
  41 + seq_printf(p, "CPU%d ",j);
43 42 seq_putc(p, '\n');
44 43 }
45 44  
... ... @@ -52,9 +51,8 @@
52 51 #ifndef CONFIG_SMP
53 52 seq_printf(p, "%10u ", kstat_irqs(i));
54 53 #else
55   - for (j = 0; j < NR_CPUS; j++)
56   - if (cpu_online(j))
57   - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
  54 + for_each_online_cpu(j)
  55 + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
58 56 #endif
59 57 seq_printf(p, " %14s", irq_desc[i].handler->typename);
60 58 seq_printf(p, " %s", action->name);
arch/mips/kernel/irq.c
... ... @@ -68,9 +68,8 @@
68 68  
69 69 if (i == 0) {
70 70 seq_printf(p, " ");
71   - for (j=0; j<NR_CPUS; j++)
72   - if (cpu_online(j))
73   - seq_printf(p, "CPU%d ",j);
  71 + for_each_online_cpu(j)
  72 + seq_printf(p, "CPU%d ",j);
74 73 seq_putc(p, '\n');
75 74 }
76 75  
... ... @@ -83,9 +82,8 @@
83 82 #ifndef CONFIG_SMP
84 83 seq_printf(p, "%10u ", kstat_irqs(i));
85 84 #else
86   - for (j = 0; j < NR_CPUS; j++)
87   - if (cpu_online(j))
88   - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
  85 + for_each_online_cpu(j)
  86 + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
89 87 #endif
90 88 seq_printf(p, " %14s", irq_desc[i].handler->typename);
91 89 seq_printf(p, " %s", action->name);
arch/mips/kernel/smp.c
... ... @@ -167,8 +167,8 @@
167 167 mb();
168 168  
169 169 /* Send a message to all other CPUs and wait for them to respond */
170   - for (i = 0; i < NR_CPUS; i++)
171   - if (cpu_online(i) && i != cpu)
  170 + for_each_online_cpu(i)
  171 + if (i != cpu)
172 172 core_send_ipi(i, SMP_CALL_FUNCTION);
173 173  
174 174 /* Wait for response */
arch/mips/sgi-ip27/ip27-irq.c
... ... @@ -88,11 +88,8 @@
88 88 {
89 89 int cpu, i;
90 90  
91   - for (cpu = 0; cpu <= NR_CPUS; cpu++) {
  91 + for_each_online_cpu(cpu) {
92 92 struct slice_data *si = cpu_data[cpu].data;
93   -
94   - if (!cpu_online(cpu))
95   - continue;
96 93  
97 94 for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++)
98 95 if (si->level_to_irq[i] == irq) {
arch/parisc/kernel/smp.c
... ... @@ -298,8 +298,8 @@
298 298 {
299 299 int i;
300 300  
301   - for (i = 0; i < NR_CPUS; i++) {
302   - if (cpu_online(i) && i != smp_processor_id())
  301 + for_each_online_cpu(i) {
  302 + if (i != smp_processor_id())
303 303 send_IPI_single(i, op);
304 304 }
305 305 }
306 306  
... ... @@ -643,14 +643,13 @@
643 643 if ( argc == 1 ){
644 644  
645 645 #ifdef DUMP_MORE_STATE
646   - for(i=0; i<NR_CPUS; i++) {
  646 + for_each_online_cpu(i) {
647 647 int cpus_per_line = 4;
648   - if(cpu_online(i)) {
649   - if (j++ % cpus_per_line)
650   - printk(" %3d",i);
651   - else
652   - printk("\n %3d",i);
653   - }
  648 +
  649 + if (j++ % cpus_per_line)
  650 + printk(" %3d",i);
  651 + else
  652 + printk("\n %3d",i);
654 653 }
655 654 printk("\n");
656 655 #else
... ... @@ -659,9 +658,7 @@
659 658 } else if((argc==2) && !(strcmp(argv[1],"-l"))) {
660 659 printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n");
661 660 #ifdef DUMP_MORE_STATE
662   - for(i=0;i<NR_CPUS;i++) {
663   - if (!cpu_online(i))
664   - continue;
  661 + for_each_online_cpu(i) {
665 662 if (cpu_data[i].cpuid != NO_PROC_ID) {
666 663 switch(cpu_data[i].state) {
667 664 case STATE_RENDEZVOUS:
... ... @@ -695,9 +692,7 @@
695 692 } else if ((argc==2) && !(strcmp(argv[1],"-s"))) {
696 693 #ifdef DUMP_MORE_STATE
697 694 printk("\nCPUSTATE CPUID\n");
698   - for (i=0;i<NR_CPUS;i++) {
699   - if (!cpu_online(i))
700   - continue;
  695 + for_each_online_cpu(i) {
701 696 if (cpu_data[i].cpuid != NO_PROC_ID) {
702 697 switch(cpu_data[i].state) {
703 698 case STATE_RENDEZVOUS:
arch/powerpc/kernel/irq.c
... ... @@ -135,9 +135,8 @@
135 135 #ifdef CONFIG_TAU_INT
136 136 if (tau_initialized){
137 137 seq_puts(p, "TAU: ");
138   - for (j = 0; j < NR_CPUS; j++)
139   - if (cpu_online(j))
140   - seq_printf(p, "%10u ", tau_interrupts(j));
  138 + for_each_online_cpu(j)
  139 + seq_printf(p, "%10u ", tau_interrupts(j));
141 140 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
142 141 }
143 142 #endif
arch/powerpc/kernel/setup-common.c
... ... @@ -162,9 +162,8 @@
162 162 #if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
163 163 unsigned long bogosum = 0;
164 164 int i;
165   - for (i = 0; i < NR_CPUS; ++i)
166   - if (cpu_online(i))
167   - bogosum += loops_per_jiffy;
  165 + for_each_online_cpu(i)
  166 + bogosum += loops_per_jiffy;
168 167 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
169 168 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
170 169 #endif /* CONFIG_SMP && CONFIG_PPC32 */
arch/powerpc/kernel/setup_32.c
... ... @@ -272,9 +272,8 @@
272 272 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
273 273  
274 274 /* register CPU devices */
275   - for (i = 0; i < NR_CPUS; i++)
276   - if (cpu_possible(i))
277   - register_cpu(&cpu_devices[i], i, NULL);
  275 + for_each_cpu(i)
  276 + register_cpu(&cpu_devices[i], i, NULL);
278 277  
279 278 /* call platform init */
280 279 if (ppc_md.init != NULL) {
arch/powerpc/platforms/powermac/smp.c
... ... @@ -191,9 +191,7 @@
191 191 if (num_online_cpus() < 2)
192 192 return;
193 193  
194   - for (i = 0; i < NR_CPUS; i++) {
195   - if (!cpu_online(i))
196   - continue;
  194 + for_each_online_cpu(i) {
197 195 if (target == MSG_ALL
198 196 || (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
199 197 || target == i) {
arch/ppc/kernel/setup.c
... ... @@ -168,9 +168,8 @@
168 168 /* Show summary information */
169 169 #ifdef CONFIG_SMP
170 170 unsigned long bogosum = 0;
171   - for (i = 0; i < NR_CPUS; ++i)
172   - if (cpu_online(i))
173   - bogosum += cpu_data[i].loops_per_jiffy;
  171 + for_each_online_cpu(i)
  172 + bogosum += cpu_data[i].loops_per_jiffy;
174 173 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
175 174 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
176 175 #endif /* CONFIG_SMP */
... ... @@ -712,9 +711,8 @@
712 711 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
713 712  
714 713 /* register CPU devices */
715   - for (i = 0; i < NR_CPUS; i++)
716   - if (cpu_possible(i))
717   - register_cpu(&cpu_devices[i], i, NULL);
  714 + for_each_cpu(i)
  715 + register_cpu(&cpu_devices[i], i, NULL);
718 716  
719 717 /* call platform init */
720 718 if (ppc_md.init != NULL) {
arch/s390/kernel/smp.c
... ... @@ -799,9 +799,7 @@
799 799 */
800 800 print_cpu_info(&S390_lowcore.cpu_data);
801 801  
802   - for(i = 0; i < NR_CPUS; i++) {
803   - if (!cpu_possible(i))
804   - continue;
  802 + for_each_cpu(i) {
805 803 lowcore_ptr[i] = (struct _lowcore *)
806 804 __get_free_pages(GFP_KERNEL|GFP_DMA,
807 805 sizeof(void*) == 8 ? 1 : 0);
arch/sh/kernel/irq.c
... ... @@ -35,9 +35,8 @@
35 35  
36 36 if (i == 0) {
37 37 seq_puts(p, " ");
38   - for (j=0; j<NR_CPUS; j++)
39   - if (cpu_online(j))
40   - seq_printf(p, "CPU%d ",j);
  38 + for_each_online_cpu(j)
  39 + seq_printf(p, "CPU%d ",j);
41 40 seq_putc(p, '\n');
42 41 }
43 42  
arch/sh/kernel/setup.c
... ... @@ -404,9 +404,8 @@
404 404 {
405 405 int cpu_id;
406 406  
407   - for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++)
408   - if (cpu_possible(cpu_id))
409   - register_cpu(&cpu[cpu_id], cpu_id, NULL);
  407 + for_each_cpu(cpu_id)
  408 + register_cpu(&cpu[cpu_id], cpu_id, NULL);
410 409  
411 410 return 0;
412 411 }
arch/sh64/kernel/irq.c
... ... @@ -53,9 +53,8 @@
53 53  
54 54 if (i == 0) {
55 55 seq_puts(p, " ");
56   - for (j=0; j<NR_CPUS; j++)
57   - if (cpu_online(j))
58   - seq_printf(p, "CPU%d ",j);
  56 + for_each_online_cpu(j)
  57 + seq_printf(p, "CPU%d ",j);
59 58 seq_putc(p, '\n');
60 59 }
61 60  
arch/sparc/kernel/irq.c
... ... @@ -184,9 +184,8 @@
184 184 #ifndef CONFIG_SMP
185 185 seq_printf(p, "%10u ", kstat_irqs(i));
186 186 #else
187   - for (j = 0; j < NR_CPUS; j++) {
188   - if (cpu_online(j))
189   - seq_printf(p, "%10u ",
  187 + for_each_online_cpu(j) {
  188 + seq_printf(p, "%10u ",
190 189 kstat_cpu(cpu_logical_map(j)).irqs[i]);
191 190 }
192 191 #endif
arch/sparc/kernel/smp.c
... ... @@ -243,9 +243,8 @@
243 243 return -EINVAL;
244 244  
245 245 spin_lock_irqsave(&prof_setup_lock, flags);
246   - for(i = 0; i < NR_CPUS; i++) {
247   - if (cpu_possible(i))
248   - load_profile_irq(i, lvl14_resolution / multiplier);
  246 + for_each_cpu(i) {
  247 + load_profile_irq(i, lvl14_resolution / multiplier);
249 248 prof_multiplier(i) = multiplier;
250 249 }
251 250 spin_unlock_irqrestore(&prof_setup_lock, flags);
... ... @@ -273,13 +272,12 @@
273 272 {
274 273 int i;
275 274  
276   - for (i = 0; i < NR_CPUS; i++) {
277   - if (cpu_online(i))
278   - seq_printf(m,
279   - "Cpu%dBogo\t: %lu.%02lu\n",
280   - i,
281   - cpu_data(i).udelay_val/(500000/HZ),
282   - (cpu_data(i).udelay_val/(5000/HZ))%100);
  275 + for_each_online_cpu(i) {
  276 + seq_printf(m,
  277 + "Cpu%dBogo\t: %lu.%02lu\n",
  278 + i,
  279 + cpu_data(i).udelay_val/(500000/HZ),
  280 + (cpu_data(i).udelay_val/(5000/HZ))%100);
283 281 }
284 282 }
285 283  
... ... @@ -288,9 +286,7 @@
288 286 int i;
289 287  
290 288 seq_printf(m, "State:\n");
291   - for (i = 0; i < NR_CPUS; i++) {
292   - if (cpu_online(i))
293   - seq_printf(m, "CPU%d\t\t: online\n", i);
294   - }
  289 + for_each_online_cpu(i)
  290 + seq_printf(m, "CPU%d\t\t: online\n", i);
295 291 }
arch/sparc/kernel/sun4d_irq.c
... ... @@ -103,11 +103,9 @@
103 103 #ifndef CONFIG_SMP
104 104 seq_printf(p, "%10u ", kstat_irqs(i));
105 105 #else
106   - for (x = 0; x < NR_CPUS; x++) {
107   - if (cpu_online(x))
108   - seq_printf(p, "%10u ",
109   - kstat_cpu(cpu_logical_map(x)).irqs[i]);
110   - }
  106 + for_each_online_cpu(x)
  107 + seq_printf(p, "%10u ",
  108 + kstat_cpu(cpu_logical_map(x)).irqs[i]);
111 109 #endif
112 110 seq_printf(p, "%c %s",
113 111 (action->flags & SA_INTERRUPT) ? '+' : ' ',
arch/sparc/kernel/sun4d_smp.c
... ... @@ -249,11 +249,9 @@
249 249 } else {
250 250 unsigned long bogosum = 0;
251 251  
252   - for(i = 0; i < NR_CPUS; i++) {
253   - if (cpu_isset(i, cpu_present_map)) {
254   - bogosum += cpu_data(i).udelay_val;
255   - smp_highest_cpu = i;
256   - }
  252 + for_each_present_cpu(i) {
  253 + bogosum += cpu_data(i).udelay_val;
  254 + smp_highest_cpu = i;
257 255 }
258 256 SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100));
259 257 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
arch/sparc/kernel/sun4m_smp.c
... ... @@ -218,10 +218,8 @@
218 218 cpu_present_map = cpumask_of_cpu(smp_processor_id());
219 219 } else {
220 220 unsigned long bogosum = 0;
221   - for(i = 0; i < NR_CPUS; i++) {
222   - if (cpu_isset(i, cpu_present_map))
223   - bogosum += cpu_data(i).udelay_val;
224   - }
  221 + for_each_present_cpu(i)
  222 + bogosum += cpu_data(i).udelay_val;
225 223 printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
226 224 cpucount + 1,
227 225 bogosum/(500000/HZ),
arch/sparc64/kernel/irq.c
... ... @@ -117,9 +117,7 @@
117 117 #ifndef CONFIG_SMP
118 118 seq_printf(p, "%10u ", kstat_irqs(i));
119 119 #else
120   - for (j = 0; j < NR_CPUS; j++) {
121   - if (!cpu_online(j))
122   - continue;
  120 + for_each_online_cpu(j) {
123 121 seq_printf(p, "%10u ",
124 122 kstat_cpu(j).irqs[i]);
125 123 }
arch/sparc64/kernel/smp.c
... ... @@ -57,25 +57,21 @@
57 57 int i;
58 58  
59 59 seq_printf(m, "State:\n");
60   - for (i = 0; i < NR_CPUS; i++) {
61   - if (cpu_online(i))
62   - seq_printf(m,
63   - "CPU%d:\t\tonline\n", i);
64   - }
  60 + for_each_online_cpu(i)
  61 + seq_printf(m, "CPU%d:\t\tonline\n", i);
65 62 }
66 63  
67 64 void smp_bogo(struct seq_file *m)
68 65 {
69 66 int i;
70 67  
71   - for (i = 0; i < NR_CPUS; i++)
72   - if (cpu_online(i))
73   - seq_printf(m,
74   - "Cpu%dBogo\t: %lu.%02lu\n"
75   - "Cpu%dClkTck\t: %016lx\n",
76   - i, cpu_data(i).udelay_val / (500000/HZ),
77   - (cpu_data(i).udelay_val / (5000/HZ)) % 100,
78   - i, cpu_data(i).clock_tick);
  68 + for_each_online_cpu(i)
  69 + seq_printf(m,
  70 + "Cpu%dBogo\t: %lu.%02lu\n"
  71 + "Cpu%dClkTck\t: %016lx\n",
  72 + i, cpu_data(i).udelay_val / (500000/HZ),
  73 + (cpu_data(i).udelay_val / (5000/HZ)) % 100,
  74 + i, cpu_data(i).clock_tick);
79 75 }
80 76  
81 77 void __init smp_store_cpu_info(int id)
... ... @@ -1282,7 +1278,7 @@
1282 1278 return -EINVAL;
1283 1279  
1284 1280 spin_lock_irqsave(&prof_setup_lock, flags);
1285   - for (i = 0; i < NR_CPUS; i++)
  1281 + for_each_cpu(i)
1286 1282 prof_multiplier(i) = multiplier;
1287 1283 current_tick_offset = (timer_tick_offset / multiplier);
1288 1284 spin_unlock_irqrestore(&prof_setup_lock, flags);
... ... @@ -1384,10 +1380,8 @@
1384 1380 unsigned long bogosum = 0;
1385 1381 int i;
1386 1382  
1387   - for (i = 0; i < NR_CPUS; i++) {
1388   - if (cpu_online(i))
1389   - bogosum += cpu_data(i).udelay_val;
1390   - }
  1383 + for_each_online_cpu(i)
  1384 + bogosum += cpu_data(i).udelay_val;
1391 1385 printk("Total of %ld processors activated "
1392 1386 "(%lu.%02lu BogoMIPS).\n",
1393 1387 (long) num_online_cpus(),
arch/x86_64/kernel/irq.c
... ... @@ -38,9 +38,8 @@
38 38  
39 39 if (i == 0) {
40 40 seq_printf(p, " ");
41   - for (j=0; j<NR_CPUS; j++)
42   - if (cpu_online(j))
43   - seq_printf(p, "CPU%d ",j);
  41 + for_each_online_cpu(j)
  42 + seq_printf(p, "CPU%d ",j);
44 43 seq_putc(p, '\n');
45 44 }
46 45  
... ... @@ -53,10 +52,8 @@
53 52 #ifndef CONFIG_SMP
54 53 seq_printf(p, "%10u ", kstat_irqs(i));
55 54 #else
56   - for (j=0; j<NR_CPUS; j++)
57   - if (cpu_online(j))
58   - seq_printf(p, "%10u ",
59   - kstat_cpu(j).irqs[i]);
  55 + for_each_online_cpu(j)
  56 + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
60 57 #endif
61 58 seq_printf(p, " %14s", irq_desc[i].handler->typename);
62 59  
63 60  
... ... @@ -68,15 +65,13 @@
68 65 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
69 66 } else if (i == NR_IRQS) {
70 67 seq_printf(p, "NMI: ");
71   - for (j = 0; j < NR_CPUS; j++)
72   - if (cpu_online(j))
73   - seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
  68 + for_each_online_cpu(j)
  69 + seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
74 70 seq_putc(p, '\n');
75 71 #ifdef CONFIG_X86_LOCAL_APIC
76 72 seq_printf(p, "LOC: ");
77   - for (j = 0; j < NR_CPUS; j++)
78   - if (cpu_online(j))
79   - seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
  73 + for_each_online_cpu(j)
  74 + seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
80 75 seq_putc(p, '\n');
81 76 #endif
82 77 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
arch/x86_64/kernel/nmi.c
... ... @@ -162,9 +162,7 @@
162 162 local_irq_enable();
163 163 mdelay((10*1000)/nmi_hz); // wait 10 ticks
164 164  
165   - for (cpu = 0; cpu < NR_CPUS; cpu++) {
166   - if (!cpu_online(cpu))
167   - continue;
  165 + for_each_online_cpu(cpu) {
168 166 if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
169 167 endflag = 1;
170 168 printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
arch/xtensa/kernel/irq.c
... ... @@ -83,9 +83,8 @@
83 83  
84 84 if (i == 0) {
85 85 seq_printf(p, " ");
86   - for (j=0; j<NR_CPUS; j++)
87   - if (cpu_online(j))
88   - seq_printf(p, "CPU%d ",j);
  86 + for_each_online_cpu(j)
  87 + seq_printf(p, "CPU%d ",j);
89 88 seq_putc(p, '\n');
90 89 }
91 90  
... ... @@ -98,9 +97,8 @@
98 97 #ifndef CONFIG_SMP
99 98 seq_printf(p, "%10u ", kstat_irqs(i));
100 99 #else
101   - for (j = 0; j < NR_CPUS; j++)
102   - if (cpu_online(j))
103   - seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
  100 + for_each_online_cpu(j)
  101 + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
104 102 #endif
105 103 seq_printf(p, " %14s", irq_desc[i].handler->typename);
106 104 seq_printf(p, " %s", action->name);
... ... @@ -113,9 +111,8 @@
113 111 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
114 112 } else if (i == NR_IRQS) {
115 113 seq_printf(p, "NMI: ");
116   - for (j = 0; j < NR_CPUS; j++)
117   - if (cpu_online(j))
118   - seq_printf(p, "%10u ", nmi_count(j));
  114 + for_each_online_cpu(j)
  115 + seq_printf(p, "%10u ", nmi_count(j));
119 116 seq_putc(p, '\n');
120 117 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
121 118 }
drivers/net/loopback.c
... ... @@ -172,11 +172,9 @@
172 172  
173 173 memset(stats, 0, sizeof(struct net_device_stats));
174 174  
175   - for (i=0; i < NR_CPUS; i++) {
  175 + for_each_cpu(i) {
176 176 struct net_device_stats *lb_stats;
177 177  
178   - if (!cpu_possible(i))
179   - continue;
180 178 lb_stats = &per_cpu(loopback_stats, i);
181 179 stats->rx_bytes += lb_stats->rx_bytes;
182 180 stats->tx_bytes += lb_stats->tx_bytes;
drivers/oprofile/cpu_buffer.c
... ... @@ -38,9 +38,8 @@
38 38 {
39 39 int i;
40 40  
41   - for_each_online_cpu(i) {
  41 + for_each_online_cpu(i)
42 42 vfree(cpu_buffer[i].buffer);
43   - }
44 43 }
45 44  
46 45 int alloc_cpu_buffers(void)
fs/xfs/linux-2.6/xfs_stats.c
... ... @@ -62,18 +62,15 @@
62 62 while (j < xstats[i].endpoint) {
63 63 val = 0;
64 64 /* sum over all cpus */
65   - for (c = 0; c < NR_CPUS; c++) {
66   - if (!cpu_possible(c)) continue;
  65 + for_each_cpu(c)
67 66 val += *(((__u32*)&per_cpu(xfsstats, c) + j));
68   - }
69 67 len += sprintf(buffer + len, " %u", val);
70 68 j++;
71 69 }
72 70 buffer[len++] = '\n';
73 71 }
74 72 /* extra precision counters */
75   - for (i = 0; i < NR_CPUS; i++) {
76   - if (!cpu_possible(i)) continue;
  73 + for_each_cpu(i) {
77 74 xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes;
78 75 xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes;
79 76 xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes;
fs/xfs/linux-2.6/xfs_sysctl.c
... ... @@ -38,8 +38,7 @@
38 38  
39 39 if (!ret && write && *valp) {
40 40 printk("XFS Clearing xfsstats\n");
41   - for (c = 0; c < NR_CPUS; c++) {
42   - if (!cpu_possible(c)) continue;
  41 + for_each_cpu(c) {
43 42 preempt_disable();
44 43 /* save vn_active, it's a universal truth! */
45 44 vn_active = per_cpu(xfsstats, c).vn_active;
include/asm-alpha/mmu_context.h
... ... @@ -231,9 +231,8 @@
231 231 {
232 232 int i;
233 233  
234   - for (i = 0; i < NR_CPUS; i++)
235   - if (cpu_online(i))
236   - mm->context[i] = 0;
  234 + for_each_online_cpu(i)
  235 + mm->context[i] = 0;
237 236 if (tsk != current)
238 237 task_thread_info(tsk)->pcb.ptbr
239 238 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
include/asm-alpha/topology.h
... ... @@ -27,8 +27,8 @@
27 27 cpumask_t node_cpu_mask = CPU_MASK_NONE;
28 28 int cpu;
29 29  
30   - for(cpu = 0; cpu < NR_CPUS; cpu++) {
31   - if (cpu_online(cpu) && (cpu_to_node(cpu) == node))
  30 + for_each_online_cpu(cpu) {
  31 + if (cpu_to_node(cpu) == node)
32 32 cpu_set(cpu, node_cpu_mask);
33 33 }
34 34  
include/asm-generic/percpu.h
... ... @@ -19,10 +19,9 @@
19 19 #define percpu_modcopy(pcpudst, src, size) \
20 20 do { \
21 21 unsigned int __i; \
22   - for (__i = 0; __i < NR_CPUS; __i++) \
23   - if (cpu_possible(__i)) \
24   - memcpy((pcpudst)+__per_cpu_offset[__i], \
25   - (src), (size)); \
  22 + for_each_cpu(__i) \
  23 + memcpy((pcpudst)+__per_cpu_offset[__i], \
  24 + (src), (size)); \
26 25 } while (0)
27 26 #else /* ! SMP */
28 27  
include/asm-powerpc/percpu.h
... ... @@ -27,10 +27,9 @@
27 27 #define percpu_modcopy(pcpudst, src, size) \
28 28 do { \
29 29 unsigned int __i; \
30   - for (__i = 0; __i < NR_CPUS; __i++) \
31   - if (cpu_possible(__i)) \
32   - memcpy((pcpudst)+__per_cpu_offset(__i), \
33   - (src), (size)); \
  30 + for_each_cpu(__i) \
  31 + memcpy((pcpudst)+__per_cpu_offset(__i), \
  32 + (src), (size)); \
34 33 } while (0)
35 34  
36 35 extern void setup_per_cpu_areas(void);
include/asm-s390/percpu.h
... ... @@ -46,10 +46,9 @@
46 46 #define percpu_modcopy(pcpudst, src, size) \
47 47 do { \
48 48 unsigned int __i; \
49   - for (__i = 0; __i < NR_CPUS; __i++) \
50   - if (cpu_possible(__i)) \
51   - memcpy((pcpudst)+__per_cpu_offset[__i], \
52   - (src), (size)); \
  49 + for_each_cpu(__i) \
  50 + memcpy((pcpudst)+__per_cpu_offset[__i], \
  51 + (src), (size)); \
53 52 } while (0)
54 53  
55 54 #else /* ! SMP */
include/asm-sparc64/percpu.h
... ... @@ -26,10 +26,9 @@
26 26 #define percpu_modcopy(pcpudst, src, size) \
27 27 do { \
28 28 unsigned int __i; \
29   - for (__i = 0; __i < NR_CPUS; __i++) \
30   - if (cpu_possible(__i)) \
31   - memcpy((pcpudst)+__per_cpu_offset(__i), \
32   - (src), (size)); \
  29 + for_each_cpu(__i) \
  30 + memcpy((pcpudst)+__per_cpu_offset(__i), \
  31 + (src), (size)); \
33 32 } while (0)
34 33 #else /* ! SMP */
35 34  
include/asm-x86_64/percpu.h
... ... @@ -26,10 +26,9 @@
26 26 #define percpu_modcopy(pcpudst, src, size) \
27 27 do { \
28 28 unsigned int __i; \
29   - for (__i = 0; __i < NR_CPUS; __i++) \
30   - if (cpu_possible(__i)) \
31   - memcpy((pcpudst)+__per_cpu_offset(__i), \
32   - (src), (size)); \
  29 + for_each_cpu(__i) \
  30 + memcpy((pcpudst)+__per_cpu_offset(__i), \
  31 + (src), (size)); \
33 32 } while (0)
34 33  
35 34 extern void setup_per_cpu_areas(void);
include/linux/genhd.h
... ... @@ -149,22 +149,16 @@
149 149 ({ \
150 150 typeof(gendiskp->dkstats->field) res = 0; \
151 151 int i; \
152   - for (i=0; i < NR_CPUS; i++) { \
153   - if (!cpu_possible(i)) \
154   - continue; \
  152 + for_each_cpu(i) \
155 153 res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
156   - } \
157 154 res; \
158 155 })
159 156  
160 157 static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
161 158 int i;
162   - for (i=0; i < NR_CPUS; i++) {
163   - if (cpu_possible(i)) {
164   - memset(per_cpu_ptr(gendiskp->dkstats, i), value,
165   - sizeof (struct disk_stats));
166   - }
167   - }
  159 + for_each_cpu(i)
  160 + memset(per_cpu_ptr(gendiskp->dkstats, i), value,
  161 + sizeof (struct disk_stats));
168 162 }
169 163  
170 164 #else