Commit 174596a0b9f21e8844d70566a6bb29bf48a87750

Authored by Rusty Russell
1 parent 41c7bb9588

cpumask: convert mm/

Impact: Use new API

Convert kernel mm functions to use struct cpumask.

We skip include/linux/percpu.h and mm/allocpercpu.c, which are in flux.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>

Showing 5 changed files with 28 additions and 16 deletions Side-by-side Diff

... ... @@ -172,9 +172,18 @@
172 172 static int pdflush(void *dummy)
173 173 {
174 174 struct pdflush_work my_work;
175   - cpumask_t cpus_allowed;
  175 + cpumask_var_t cpus_allowed;
176 176  
177 177 /*
  178 + * Since the caller doesn't even check kthread_run() worked, let's not
  179 + * freak out too much if this fails.
  180 + */
  181 + if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
  182 + printk(KERN_WARNING "pdflush failed to allocate cpumask\n");
  183 + return 0;
  184 + }
  185 +
  186 + /*
178 187 * pdflush can spend a lot of time doing encryption via dm-crypt. We
179 188 * don't want to do that at keventd's priority.
180 189 */
... ... @@ -187,8 +196,9 @@
187 196 * This is needed as pdflush's are dynamically created and destroyed.
188 197 * The boottime pdflush's are easily placed w/o these 2 lines.
189 198 */
190   - cpuset_cpus_allowed(current, &cpus_allowed);
191   - set_cpus_allowed_ptr(current, &cpus_allowed);
  199 + cpuset_cpus_allowed(current, cpus_allowed);
  200 + set_cpus_allowed_ptr(current, cpus_allowed);
  201 + free_cpumask_var(cpus_allowed);
192 202  
193 203 return __pdflush(&my_work);
194 204 }
... ... @@ -2157,7 +2157,7 @@
2157 2157  
2158 2158 /*
2159 2159 * We use cache_chain_mutex to ensure a consistent view of
2160   - * cpu_online_map as well. Please see cpuup_callback
  2160 + * cpu_online_mask as well. Please see cpuup_callback
2161 2161 */
2162 2162 get_online_cpus();
2163 2163 mutex_lock(&cache_chain_mutex);
... ... @@ -1970,7 +1970,7 @@
1970 1970 kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
1971 1971  
1972 1972 static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
1973   -static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
  1973 +static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
1974 1974  
1975 1975 static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
1976 1976 int cpu, gfp_t flags)
1977 1977  
... ... @@ -2045,13 +2045,13 @@
2045 2045 {
2046 2046 int i;
2047 2047  
2048   - if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
  2048 + if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)))
2049 2049 return;
2050 2050  
2051 2051 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
2052 2052 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
2053 2053  
2054   - cpu_set(cpu, kmem_cach_cpu_free_init_once);
  2054 + cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once));
2055 2055 }
2056 2056  
2057 2057 static void __init init_alloc_cpu(void)
... ... @@ -3451,7 +3451,7 @@
3451 3451 long max_time;
3452 3452 long min_pid;
3453 3453 long max_pid;
3454   - cpumask_t cpus;
  3454 + DECLARE_BITMAP(cpus, NR_CPUS);
3455 3455 nodemask_t nodes;
3456 3456 };
3457 3457  
... ... @@ -3526,7 +3526,8 @@
3526 3526 if (track->pid > l->max_pid)
3527 3527 l->max_pid = track->pid;
3528 3528  
3529   - cpu_set(track->cpu, l->cpus);
  3529 + cpumask_set_cpu(track->cpu,
  3530 + to_cpumask(l->cpus));
3530 3531 }
3531 3532 node_set(page_to_nid(virt_to_page(track)), l->nodes);
3532 3533 return 1;
... ... @@ -3556,8 +3557,8 @@
3556 3557 l->max_time = age;
3557 3558 l->min_pid = track->pid;
3558 3559 l->max_pid = track->pid;
3559   - cpus_clear(l->cpus);
3560   - cpu_set(track->cpu, l->cpus);
  3560 + cpumask_clear(to_cpumask(l->cpus));
  3561 + cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
3561 3562 nodes_clear(l->nodes);
3562 3563 node_set(page_to_nid(virt_to_page(track)), l->nodes);
3563 3564 return 1;
3564 3565  
... ... @@ -3638,11 +3639,12 @@
3638 3639 len += sprintf(buf + len, " pid=%ld",
3639 3640 l->min_pid);
3640 3641  
3641   - if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
  3642 + if (num_online_cpus() > 1 &&
  3643 + !cpumask_empty(to_cpumask(l->cpus)) &&
3642 3644 len < PAGE_SIZE - 60) {
3643 3645 len += sprintf(buf + len, " cpus=");
3644 3646 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
3645   - &l->cpus);
  3647 + to_cpumask(l->cpus));
3646 3648 }
3647 3649  
3648 3650 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
... ... @@ -1902,7 +1902,7 @@
1902 1902 };
1903 1903 node_to_cpumask_ptr(cpumask, pgdat->node_id);
1904 1904  
1905   - if (!cpus_empty(*cpumask))
  1905 + if (!cpumask_empty(cpumask))
1906 1906 set_cpus_allowed_ptr(tsk, cpumask);
1907 1907 current->reclaim_state = &reclaim_state;
1908 1908  
... ... @@ -20,7 +20,7 @@
20 20 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
21 21 EXPORT_PER_CPU_SYMBOL(vm_event_states);
22 22  
23   -static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
  23 +static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
24 24 {
25 25 int cpu;
26 26 int i;
... ... @@ -43,7 +43,7 @@
43 43 void all_vm_events(unsigned long *ret)
44 44 {
45 45 get_online_cpus();
46   - sum_vm_events(ret, &cpu_online_map);
  46 + sum_vm_events(ret, cpu_online_mask);
47 47 put_online_cpus();
48 48 }
49 49 EXPORT_SYMBOL_GPL(all_vm_events);