Commit 0a945022778f100115d0cb6234eb28fc1b15ccaf

Authored by KAMEZAWA Hiroyuki
Committed by Linus Torvalds
1 parent 631d6747e1

[PATCH] for_each_possible_cpu: fixes for generic part

replaces for_each_cpu with for_each_possible_cpu().

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 11 changed files with 17 additions and 17 deletions Side-by-side Diff

... ... @@ -3514,7 +3514,7 @@
3514 3514 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3515 3515 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
3516 3516  
3517   - for_each_cpu(i)
  3517 + for_each_possible_cpu(i)
3518 3518 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3519 3519  
3520 3520 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
... ... @@ -373,7 +373,7 @@
373 373 void __init files_defer_init(void)
374 374 {
375 375 int i;
376   - for_each_cpu(i)
  376 + for_each_possible_cpu(i)
377 377 fdtable_defer_list_init(i);
378 378 }
... ... @@ -534,7 +534,7 @@
534 534 if (wall_to_monotonic.tv_nsec)
535 535 --jif;
536 536  
537   - for_each_cpu(i) {
  537 + for_each_possible_cpu(i) {
538 538 int j;
539 539  
540 540 user = cputime64_add(user, kstat_cpu(i).cpustat.user);
include/asm-generic/percpu.h
... ... @@ -19,7 +19,7 @@
19 19 #define percpu_modcopy(pcpudst, src, size) \
20 20 do { \
21 21 unsigned int __i; \
22   - for_each_cpu(__i) \
  22 + for_each_possible_cpu(__i) \
23 23 memcpy((pcpudst)+__per_cpu_offset[__i], \
24 24 (src), (size)); \
25 25 } while (0)
include/linux/genhd.h
... ... @@ -152,14 +152,14 @@
152 152 ({ \
153 153 typeof(gendiskp->dkstats->field) res = 0; \
154 154 int i; \
155   - for_each_cpu(i) \
  155 + for_each_possible_cpu(i) \
156 156 res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
157 157 res; \
158 158 })
159 159  
160 160 static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
161 161 int i;
162   - for_each_cpu(i)
  162 + for_each_possible_cpu(i)
163 163 memset(per_cpu_ptr(gendiskp->dkstats, i), value,
164 164 sizeof (struct disk_stats));
165 165 }
include/linux/kernel_stat.h
... ... @@ -46,7 +46,7 @@
46 46 {
47 47 int cpu, sum = 0;
48 48  
49   - for_each_cpu(cpu)
  49 + for_each_possible_cpu(cpu)
50 50 sum += kstat_cpu(cpu).irqs[irq];
51 51  
52 52 return sum;
... ... @@ -341,7 +341,7 @@
341 341 #endif
342 342 ptr = alloc_bootmem(size * nr_possible_cpus);
343 343  
344   - for_each_cpu(i) {
  344 + for_each_possible_cpu(i) {
345 345 __per_cpu_offset[i] = ptr - __per_cpu_start;
346 346 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
347 347 ptr += size;
... ... @@ -301,7 +301,7 @@
301 301 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
302 302 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
303 303  
304   - for_each_cpu(cpu) {
  304 + for_each_possible_cpu(cpu) {
305 305 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
306 306 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
307 307 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
... ... @@ -535,7 +535,7 @@
535 535 atomic_set(&n_rcu_torture_error, 0);
536 536 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
537 537 atomic_set(&rcu_torture_wcount[i], 0);
538   - for_each_cpu(cpu) {
  538 + for_each_possible_cpu(cpu) {
539 539 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
540 540 per_cpu(rcu_torture_count, cpu)[i] = 0;
541 541 per_cpu(rcu_torture_batch, cpu)[i] = 0;
... ... @@ -1625,7 +1625,7 @@
1625 1625 {
1626 1626 unsigned long i, sum = 0;
1627 1627  
1628   - for_each_cpu(i)
  1628 + for_each_possible_cpu(i)
1629 1629 sum += cpu_rq(i)->nr_uninterruptible;
1630 1630  
1631 1631 /*
... ... @@ -1642,7 +1642,7 @@
1642 1642 {
1643 1643 unsigned long long i, sum = 0;
1644 1644  
1645   - for_each_cpu(i)
  1645 + for_each_possible_cpu(i)
1646 1646 sum += cpu_rq(i)->nr_switches;
1647 1647  
1648 1648 return sum;
... ... @@ -1652,7 +1652,7 @@
1652 1652 {
1653 1653 unsigned long i, sum = 0;
1654 1654  
1655   - for_each_cpu(i)
  1655 + for_each_possible_cpu(i)
1656 1656 sum += atomic_read(&cpu_rq(i)->nr_iowait);
1657 1657  
1658 1658 return sum;
... ... @@ -6080,7 +6080,7 @@
6080 6080 runqueue_t *rq;
6081 6081 int i, j, k;
6082 6082  
6083   - for_each_cpu(i) {
  6083 + for_each_possible_cpu(i) {
6084 6084 prio_array_t *array;
6085 6085  
6086 6086 rq = cpu_rq(i);
... ... @@ -3311,7 +3311,7 @@
3311 3311 * and we have no way of figuring out how to fix the array
3312 3312 * that we have allocated then....
3313 3313 */
3314   - for_each_cpu(i) {
  3314 + for_each_possible_cpu(i) {
3315 3315 int node = cpu_to_node(i);
3316 3316  
3317 3317 if (node_online(node))
... ... @@ -3398,7 +3398,7 @@
3398 3398 /*
3399 3399 * We allocate for all cpus so we cannot use for online cpu here.
3400 3400 */
3401   - for_each_cpu(i)
  3401 + for_each_possible_cpu(i)
3402 3402 kfree(p->ptrs[i]);
3403 3403 kfree(p);
3404 3404 }
... ... @@ -512,7 +512,7 @@
512 512  
513 513 spin_lock(&fbc->lock);
514 514 ret = fbc->count;
515   - for_each_cpu(cpu) {
  515 + for_each_possible_cpu(cpu) {
516 516 long *pcount = per_cpu_ptr(fbc->counters, cpu);
517 517 ret += *pcount;
518 518 }