Commit 98a79d6a50181ca1ecf7400eda01d5dc1bc0dbf0

Authored by Rusty Russell
1 parent 6c34bc2976

cpumask: centralize cpu_online_map and cpu_possible_map

Impact: cleanup

Each SMP arch defines these themselves.  Move them to a central
location.

Twists:
1) Some archs (m32, parisc, s390) set possible_map to all 1, so we add a
   CONFIG_INIT_ALL_POSSIBLE for this rather than break them.

2) mips and sparc32 '#define cpu_possible_map phys_cpu_present_map'.
   Those archs simply have phys_cpu_present_map replaced everywhere.

3) Alpha defined cpu_possible_map to cpu_present_map; this is tricky
   so I just manipulate them both in sync.

4) IA64, cris and m32r have gratuitous 'extern cpumask_t cpu_possible_map'
   declarations.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Reviewed-by: Grant Grundler <grundler@parisc-linux.org>
Tested-by: Tony Luck <tony.luck@intel.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Mike Travis <travis@sgi.com>
Cc: ink@jurassic.park.msu.ru
Cc: rmk@arm.linux.org.uk
Cc: starvik@axis.com
Cc: tony.luck@intel.com
Cc: takata@linux-m32r.org
Cc: ralf@linux-mips.org
Cc: grundler@parisc-linux.org
Cc: paulus@samba.org
Cc: schwidefsky@de.ibm.com
Cc: lethal@linux-sh.org
Cc: wli@holomorphy.com
Cc: davem@davemloft.net
Cc: jdike@addtoit.com
Cc: mingo@redhat.com

Showing 35 changed files with 42 additions and 132 deletions Side-by-side Diff

arch/alpha/include/asm/smp.h
... ... @@ -45,7 +45,6 @@
45 45 #define raw_smp_processor_id() (current_thread_info()->cpu)
46 46  
47 47 extern int smp_num_cpus;
48   -#define cpu_possible_map cpu_present_map
49 48  
50 49 extern void arch_send_call_function_single_ipi(int cpu);
51 50 extern void arch_send_call_function_ipi(cpumask_t mask);
arch/alpha/kernel/process.c
... ... @@ -94,6 +94,7 @@
94 94 flags |= 0x00040000UL; /* "remain halted" */
95 95 *pflags = flags;
96 96 cpu_clear(cpuid, cpu_present_map);
  97 + cpu_clear(cpuid, cpu_possible_map);
97 98 halt();
98 99 }
99 100 #endif
... ... @@ -120,6 +121,7 @@
120 121 #ifdef CONFIG_SMP
121 122 /* Wait for the secondaries to halt. */
122 123 cpu_clear(boot_cpuid, cpu_present_map);
  124 + cpu_clear(boot_cpuid, cpu_possible_map);
123 125 while (cpus_weight(cpu_present_map))
124 126 barrier();
125 127 #endif
arch/alpha/kernel/smp.c
... ... @@ -70,11 +70,6 @@
70 70 /* Set to a secondary's cpuid when it comes online. */
71 71 static int smp_secondary_alive __devinitdata = 0;
72 72  
73   -/* Which cpus ids came online. */
74   -cpumask_t cpu_online_map;
75   -
76   -EXPORT_SYMBOL(cpu_online_map);
77   -
78 73 int smp_num_probed; /* Internal processor count */
79 74 int smp_num_cpus = 1; /* Number that came online. */
80 75 EXPORT_SYMBOL(smp_num_cpus);
... ... @@ -440,6 +435,7 @@
440 435 ((char *)cpubase + i*hwrpb->processor_size);
441 436 if ((cpu->flags & 0x1cc) == 0x1cc) {
442 437 smp_num_probed++;
  438 + cpu_set(i, cpu_possible_map);
443 439 cpu_set(i, cpu_present_map);
444 440 cpu->pal_revision = boot_cpu_palrev;
445 441 }
... ... @@ -473,6 +469,7 @@
473 469  
474 470 /* Nothing to do on a UP box, or when told not to. */
475 471 if (smp_num_probed == 1 || max_cpus == 0) {
  472 + cpu_possible_map = cpumask_of_cpu(boot_cpuid);
476 473 cpu_present_map = cpumask_of_cpu(boot_cpuid);
477 474 printk(KERN_INFO "SMP mode deactivated.\n");
478 475 return;
arch/arm/kernel/smp.c
... ... @@ -34,16 +34,6 @@
34 34 #include <asm/ptrace.h>
35 35  
36 36 /*
37   - * bitmask of present and online CPUs.
38   - * The present bitmask indicates that the CPU is physically present.
39   - * The online bitmask indicates that the CPU is up and running.
40   - */
41   -cpumask_t cpu_possible_map;
42   -EXPORT_SYMBOL(cpu_possible_map);
43   -cpumask_t cpu_online_map;
44   -EXPORT_SYMBOL(cpu_online_map);
45   -
46   -/*
47 37 * as from 2.5, kernels no longer have an init_tasks structure
48 38 * so we need some other way of telling a new secondary core
49 39 * where to place its SVC stack
arch/cris/arch-v32/kernel/smp.c
... ... @@ -29,11 +29,7 @@
29 29 spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED};
30 30  
31 31 /* CPU masks */
32   -cpumask_t cpu_online_map = CPU_MASK_NONE;
33   -EXPORT_SYMBOL(cpu_online_map);
34 32 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
35   -cpumask_t cpu_possible_map;
36   -EXPORT_SYMBOL(cpu_possible_map);
37 33 EXPORT_SYMBOL(phys_cpu_present_map);
38 34  
39 35 /* Variables used during SMP boot */
arch/cris/include/asm/smp.h
... ... @@ -4,7 +4,6 @@
4 4 #include <linux/cpumask.h>
5 5  
6 6 extern cpumask_t phys_cpu_present_map;
7   -extern cpumask_t cpu_possible_map;
8 7  
9 8 #define raw_smp_processor_id() (current_thread_info()->cpu)
10 9  
arch/ia64/include/asm/smp.h
... ... @@ -57,7 +57,6 @@
57 57  
58 58 extern char no_int_routing __devinitdata;
59 59  
60   -extern cpumask_t cpu_online_map;
61 60 extern cpumask_t cpu_core_map[NR_CPUS];
62 61 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
63 62 extern int smp_num_siblings;
arch/ia64/kernel/smpboot.c
... ... @@ -131,12 +131,6 @@
131 131 */
132 132 DEFINE_PER_CPU(int, cpu_state);
133 133  
134   -/* Bitmasks of currently online, and possible CPUs */
135   -cpumask_t cpu_online_map;
136   -EXPORT_SYMBOL(cpu_online_map);
137   -cpumask_t cpu_possible_map = CPU_MASK_NONE;
138   -EXPORT_SYMBOL(cpu_possible_map);
139   -
140 134 cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
141 135 EXPORT_SYMBOL(cpu_core_map);
142 136 DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
... ... @@ -10,6 +10,7 @@
10 10 default y
11 11 select HAVE_IDE
12 12 select HAVE_OPROFILE
  13 + select INIT_ALL_POSSIBLE
13 14  
14 15 config SBUS
15 16 bool
arch/m32r/kernel/smpboot.c
... ... @@ -73,17 +73,11 @@
73 73 /* Bitmask of physically existing CPUs */
74 74 physid_mask_t phys_cpu_present_map;
75 75  
76   -/* Bitmask of currently online CPUs */
77   -cpumask_t cpu_online_map;
78   -EXPORT_SYMBOL(cpu_online_map);
79   -
80 76 cpumask_t cpu_bootout_map;
81 77 cpumask_t cpu_bootin_map;
82 78 static cpumask_t cpu_callin_map;
83 79 cpumask_t cpu_callout_map;
84 80 EXPORT_SYMBOL(cpu_callout_map);
85   -cpumask_t cpu_possible_map = CPU_MASK_ALL;
86   -EXPORT_SYMBOL(cpu_possible_map);
87 81  
88 82 /* Per CPU bogomips and other parameters */
89 83 struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned;
arch/mips/include/asm/smp.h
... ... @@ -38,9 +38,6 @@
38 38 #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
39 39 #define SMP_CALL_FUNCTION 0x2
40 40  
41   -extern cpumask_t phys_cpu_present_map;
42   -#define cpu_possible_map phys_cpu_present_map
43   -
44 41 extern void asmlinkage smp_bootstrap(void);
45 42  
46 43 /*
arch/mips/kernel/smp-cmp.c
... ... @@ -226,7 +226,7 @@
226 226  
227 227 for (i = 1; i < NR_CPUS; i++) {
228 228 if (amon_cpu_avail(i)) {
229   - cpu_set(i, phys_cpu_present_map);
  229 + cpu_set(i, cpu_possible_map);
230 230 __cpu_number_map[i] = ++ncpu;
231 231 __cpu_logical_map[ncpu] = i;
232 232 }
arch/mips/kernel/smp-mt.c
... ... @@ -70,7 +70,7 @@
70 70 write_vpe_c0_vpeconf0(tmp);
71 71  
72 72 /* Record this as available CPU */
73   - cpu_set(tc, phys_cpu_present_map);
  73 + cpu_set(tc, cpu_possible_map);
74 74 __cpu_number_map[tc] = ++ncpu;
75 75 __cpu_logical_map[ncpu] = tc;
76 76 }
arch/mips/kernel/smp.c
... ... @@ -44,15 +44,10 @@
44 44 #include <asm/mipsmtregs.h>
45 45 #endif /* CONFIG_MIPS_MT_SMTC */
46 46  
47   -cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
48 47 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
49   -cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
50 48 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
51 49 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
52 50  
53   -EXPORT_SYMBOL(phys_cpu_present_map);
54   -EXPORT_SYMBOL(cpu_online_map);
55   -
56 51 extern void cpu_idle(void);
57 52  
58 53 /* Number of TCs (or siblings in Intel speak) per CPU core */
... ... @@ -195,7 +190,7 @@
195 190 /* preload SMP state for boot cpu */
196 191 void __devinit smp_prepare_boot_cpu(void)
197 192 {
198   - cpu_set(0, phys_cpu_present_map);
  193 + cpu_set(0, cpu_possible_map);
199 194 cpu_set(0, cpu_online_map);
200 195 cpu_set(0, cpu_callin_map);
201 196 }
arch/mips/kernel/smtc.c
... ... @@ -290,7 +290,7 @@
290 290 * possibly leave some TCs/VPEs as "slave" processors.
291 291 *
292 292 * Use c0_MVPConf0 to find out how many TCs are available, setting up
293   - * phys_cpu_present_map and the logical/physical mappings.
  293 + * cpu_possible_map and the logical/physical mappings.
294 294 */
295 295  
296 296 int __init smtc_build_cpu_map(int start_cpu_slot)
... ... @@ -304,7 +304,7 @@
304 304 */
305 305 ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
306 306 for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
307   - cpu_set(i, phys_cpu_present_map);
  307 + cpu_set(i, cpu_possible_map);
308 308 __cpu_number_map[i] = i;
309 309 __cpu_logical_map[i] = i;
310 310 }
... ... @@ -521,7 +521,7 @@
521 521 * Pull any physically present but unused TCs out of circulation.
522 522 */
523 523 while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
524   - cpu_clear(tc, phys_cpu_present_map);
  524 + cpu_clear(tc, cpu_possible_map);
525 525 cpu_clear(tc, cpu_present_map);
526 526 tc++;
527 527 }
arch/mips/pmc-sierra/yosemite/smp.c
... ... @@ -141,7 +141,7 @@
141 141 }
142 142  
143 143 /*
144   - * Detect available CPUs, populate phys_cpu_present_map before smp_init
  144 + * Detect available CPUs, populate cpu_possible_map before smp_init
145 145 *
146 146 * We don't want to start the secondary CPU yet nor do we have a nice probing
147 147 * feature in PMON so we just assume presence of the secondary core.
148 148  
... ... @@ -150,10 +150,10 @@
150 150 {
151 151 int i;
152 152  
153   - cpus_clear(phys_cpu_present_map);
  153 + cpus_clear(cpu_possible_map);
154 154  
155 155 for (i = 0; i < 2; i++) {
156   - cpu_set(i, phys_cpu_present_map);
  156 + cpu_set(i, cpu_possible_map);
157 157 __cpu_number_map[i] = i;
158 158 __cpu_logical_map[i] = i;
159 159 }
arch/mips/sgi-ip27/ip27-smp.c
... ... @@ -76,7 +76,7 @@
76 76 /* Only let it join in if it's marked enabled */
77 77 if ((acpu->cpu_info.flags & KLINFO_ENABLE) &&
78 78 (tot_cpus_found != NR_CPUS)) {
79   - cpu_set(cpuid, phys_cpu_present_map);
  79 + cpu_set(cpuid, cpu_possible_map);
80 80 alloc_cpupda(cpuid, tot_cpus_found);
81 81 cpus_found++;
82 82 tot_cpus_found++;
arch/mips/sibyte/bcm1480/smp.c
... ... @@ -136,7 +136,7 @@
136 136  
137 137 /*
138 138 * Use CFE to find out how many CPUs are available, setting up
139   - * phys_cpu_present_map and the logical/physical mappings.
  139 + * cpu_possible_map and the logical/physical mappings.
140 140 * XXXKW will the boot CPU ever not be physical 0?
141 141 *
142 142 * Common setup before any secondaries are started
143 143  
... ... @@ -145,14 +145,14 @@
145 145 {
146 146 int i, num;
147 147  
148   - cpus_clear(phys_cpu_present_map);
149   - cpu_set(0, phys_cpu_present_map);
  148 + cpus_clear(cpu_possible_map);
  149 + cpu_set(0, cpu_possible_map);
150 150 __cpu_number_map[0] = 0;
151 151 __cpu_logical_map[0] = 0;
152 152  
153 153 for (i = 1, num = 0; i < NR_CPUS; i++) {
154 154 if (cfe_cpu_stop(i) == 0) {
155   - cpu_set(i, phys_cpu_present_map);
  155 + cpu_set(i, cpu_possible_map);
156 156 __cpu_number_map[i] = ++num;
157 157 __cpu_logical_map[num] = i;
158 158 }
arch/mips/sibyte/sb1250/smp.c
... ... @@ -124,7 +124,7 @@
124 124  
125 125 /*
126 126 * Use CFE to find out how many CPUs are available, setting up
127   - * phys_cpu_present_map and the logical/physical mappings.
  127 + * cpu_possible_map and the logical/physical mappings.
128 128 * XXXKW will the boot CPU ever not be physical 0?
129 129 *
130 130 * Common setup before any secondaries are started
131 131  
... ... @@ -133,14 +133,14 @@
133 133 {
134 134 int i, num;
135 135  
136   - cpus_clear(phys_cpu_present_map);
137   - cpu_set(0, phys_cpu_present_map);
  136 + cpus_clear(cpu_possible_map);
  137 + cpu_set(0, cpu_possible_map);
138 138 __cpu_number_map[0] = 0;
139 139 __cpu_logical_map[0] = 0;
140 140  
141 141 for (i = 1, num = 0; i < NR_CPUS; i++) {
142 142 if (cfe_cpu_stop(i) == 0) {
143   - cpu_set(i, phys_cpu_present_map);
  143 + cpu_set(i, cpu_possible_map);
144 144 __cpu_number_map[i] = ++num;
145 145 __cpu_logical_map[num] = i;
146 146 }
... ... @@ -11,6 +11,7 @@
11 11 select HAVE_OPROFILE
12 12 select RTC_CLASS
13 13 select RTC_DRV_PARISC
  14 + select INIT_ALL_POSSIBLE
14 15 help
15 16 The PA-RISC microprocessor is designed by Hewlett-Packard and used
16 17 in many of their workstations & servers (HP9000 700 and 800 series,
arch/parisc/kernel/smp.c
... ... @@ -67,21 +67,6 @@
67 67  
68 68 static int parisc_max_cpus __read_mostly = 1;
69 69  
70   -/* online cpus are ones that we've managed to bring up completely
71   - * possible cpus are all valid cpu
72   - * present cpus are all detected cpu
73   - *
74   - * On startup we bring up the "possible" cpus. Since we discover
75   - * CPUs later, we add them as hotplug, so the possible cpu mask is
76   - * empty in the beginning.
77   - */
78   -
79   -cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; /* Bitmap of online CPUs */
80   -cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; /* Bitmap of Present CPUs */
81   -
82   -EXPORT_SYMBOL(cpu_online_map);
83   -EXPORT_SYMBOL(cpu_possible_map);
84   -
85 70 DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED;
86 71  
87 72 enum ipi_message_type {
arch/powerpc/kernel/smp.c
... ... @@ -60,13 +60,9 @@
60 60 int smp_hw_index[NR_CPUS];
61 61 struct thread_info *secondary_ti;
62 62  
63   -cpumask_t cpu_possible_map = CPU_MASK_NONE;
64   -cpumask_t cpu_online_map = CPU_MASK_NONE;
65 63 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
66 64 DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
67 65  
68   -EXPORT_SYMBOL(cpu_online_map);
69   -EXPORT_SYMBOL(cpu_possible_map);
70 66 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
71 67 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
72 68  
... ... @@ -75,6 +75,7 @@
75 75 select HAVE_KRETPROBES
76 76 select HAVE_KVM if 64BIT
77 77 select HAVE_ARCH_TRACEHOOK
  78 + select INIT_ALL_POSSIBLE
78 79  
79 80 source "init/Kconfig"
80 81  
arch/s390/kernel/smp.c
... ... @@ -52,12 +52,6 @@
52 52 struct _lowcore *lowcore_ptr[NR_CPUS];
53 53 EXPORT_SYMBOL(lowcore_ptr);
54 54  
55   -cpumask_t cpu_online_map = CPU_MASK_NONE;
56   -EXPORT_SYMBOL(cpu_online_map);
57   -
58   -cpumask_t cpu_possible_map = CPU_MASK_ALL;
59   -EXPORT_SYMBOL(cpu_possible_map);
60   -
61 55 static struct task_struct *current_set[NR_CPUS];
62 56  
63 57 static u8 smp_cpu_type;
arch/sh/kernel/smp.c
... ... @@ -31,12 +31,6 @@
31 31 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
32 32 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
33 33  
34   -cpumask_t cpu_possible_map;
35   -EXPORT_SYMBOL(cpu_possible_map);
36   -
37   -cpumask_t cpu_online_map;
38   -EXPORT_SYMBOL(cpu_online_map);
39   -
40 34 static inline void __init smp_store_cpu_info(unsigned int cpu)
41 35 {
42 36 struct sh_cpuinfo *c = cpu_data + cpu;
arch/sparc/include/asm/smp_32.h
... ... @@ -29,8 +29,6 @@
29 29 */
30 30  
31 31 extern unsigned char boot_cpu_id;
32   -extern cpumask_t phys_cpu_present_map;
33   -#define cpu_possible_map phys_cpu_present_map
34 32  
35 33 typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
36 34 unsigned long, unsigned long);
arch/sparc/kernel/smp.c
... ... @@ -39,8 +39,6 @@
39 39 unsigned char boot_cpu_id = 0;
40 40 unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */
41 41  
42   -cpumask_t cpu_online_map = CPU_MASK_NONE;
43   -cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
44 42 cpumask_t smp_commenced_mask = CPU_MASK_NONE;
45 43  
46 44 /* The only guaranteed locking primitive available on all Sparc
... ... @@ -334,7 +332,7 @@
334 332 instance = 0;
335 333 while (!cpu_find_by_instance(instance, NULL, &mid)) {
336 334 if (mid < NR_CPUS) {
337   - cpu_set(mid, phys_cpu_present_map);
  335 + cpu_set(mid, cpu_possible_map);
338 336 cpu_set(mid, cpu_present_map);
339 337 }
340 338 instance++;
... ... @@ -354,7 +352,7 @@
354 352  
355 353 current_thread_info()->cpu = cpuid;
356 354 cpu_set(cpuid, cpu_online_map);
357   - cpu_set(cpuid, phys_cpu_present_map);
  355 + cpu_set(cpuid, cpu_possible_map);
358 356 }
359 357  
360 358 int __cpuinit __cpu_up(unsigned int cpu)
arch/sparc/kernel/sparc_ksyms.c
... ... @@ -113,10 +113,6 @@
113 113 #ifdef CONFIG_SMP
114 114 /* IRQ implementation. */
115 115 EXPORT_SYMBOL(synchronize_irq);
116   -
117   -/* CPU online map and active count. */
118   -EXPORT_SYMBOL(cpu_online_map);
119   -EXPORT_SYMBOL(phys_cpu_present_map);
120 116 #endif
121 117  
122 118 EXPORT_SYMBOL(__udelay);
arch/sparc64/kernel/smp.c
... ... @@ -49,14 +49,10 @@
49 49  
50 50 int sparc64_multi_core __read_mostly;
51 51  
52   -cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
53   -cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
54 52 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
55 53 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
56 54 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
57 55  
58   -EXPORT_SYMBOL(cpu_possible_map);
59   -EXPORT_SYMBOL(cpu_online_map);
60 56 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
61 57 EXPORT_SYMBOL(cpu_core_map);
62 58  
arch/um/kernel/smp.c
... ... @@ -25,13 +25,6 @@
25 25 #include "irq_user.h"
26 26 #include "os.h"
27 27  
28   -/* CPU online map, set by smp_boot_cpus */
29   -cpumask_t cpu_online_map = CPU_MASK_NONE;
30   -cpumask_t cpu_possible_map = CPU_MASK_NONE;
31   -
32   -EXPORT_SYMBOL(cpu_online_map);
33   -EXPORT_SYMBOL(cpu_possible_map);
34   -
35 28 /* Per CPU bogomips and other parameters
36 29 * The only piece used here is the ipi pipe, which is set before SMP is
37 30 * started and never changed.
arch/x86/kernel/smpboot.c
... ... @@ -101,14 +101,8 @@
101 101 /* Last level cache ID of each logical CPU */
102 102 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
103 103  
104   -/* bitmap of online cpus */
105   -cpumask_t cpu_online_map __read_mostly;
106   -EXPORT_SYMBOL(cpu_online_map);
107   -
108 104 cpumask_t cpu_callin_map;
109 105 cpumask_t cpu_callout_map;
110   -cpumask_t cpu_possible_map;
111   -EXPORT_SYMBOL(cpu_possible_map);
112 106  
113 107 /* representing HT siblings of each logical CPU */
114 108 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
arch/x86/mach-voyager/voyager_smp.c
... ... @@ -63,11 +63,6 @@
63 63 /* Used for the invalidate map that's also checked in the spinlock */
64 64 static volatile unsigned long smp_invalidate_needed;
65 65  
66   -/* Bitmask of currently online CPUs - used by setup.c for
67   - /proc/cpuinfo, visible externally but still physical */
68   -cpumask_t cpu_online_map = CPU_MASK_NONE;
69   -EXPORT_SYMBOL(cpu_online_map);
70   -
71 66 /* Bitmask of CPUs present in the system - exported by i386_syms.c, used
72 67 * by scheduler but indexed physically */
73 68 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
... ... @@ -218,8 +213,6 @@
218 213 /* This is for the new dynamic CPU boot code */
219 214 cpumask_t cpu_callin_map = CPU_MASK_NONE;
220 215 cpumask_t cpu_callout_map = CPU_MASK_NONE;
221   -cpumask_t cpu_possible_map = CPU_MASK_NONE;
222   -EXPORT_SYMBOL(cpu_possible_map);
223 216  
224 217 /* The per processor IRQ masks (these are usually kept in sync) */
225 218 static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
include/asm-m32r/smp.h
... ... @@ -63,8 +63,6 @@
63 63 #define raw_smp_processor_id() (current_thread_info()->cpu)
64 64  
65 65 extern cpumask_t cpu_callout_map;
66   -extern cpumask_t cpu_possible_map;
67   -extern cpumask_t cpu_present_map;
68 66  
69 67 static __inline__ int hard_smp_processor_id(void)
70 68 {
... ... @@ -916,6 +916,15 @@
916 916  
917 917 endif # MODULES
918 918  
  919 +config INIT_ALL_POSSIBLE
  920 + bool
  921 + help
  922 + Back when each arch used to define their own cpu_online_map and
  923 + cpu_possible_map, some of them chose to initialize cpu_possible_map
  924 + with all 1s, and others with all 0s. When they were centralised,
  925 + it was better to provide this option than to break all the archs
  926 + and have several arch maintainers persuing me down dark alleys.
  927 +
919 928 config STOP_MACHINE
920 929 bool
921 930 default y
... ... @@ -24,19 +24,20 @@
24 24 cpumask_t cpu_present_map __read_mostly;
25 25 EXPORT_SYMBOL(cpu_present_map);
26 26  
27   -#ifndef CONFIG_SMP
28   -
29 27 /*
30 28 * Represents all cpu's that are currently online.
31 29 */
32   -cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
  30 +cpumask_t cpu_online_map __read_mostly;
33 31 EXPORT_SYMBOL(cpu_online_map);
34 32  
  33 +#ifdef CONFIG_INIT_ALL_POSSIBLE
35 34 cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
  35 +#else
  36 +cpumask_t cpu_possible_map __read_mostly;
  37 +#endif
36 38 EXPORT_SYMBOL(cpu_possible_map);
37 39  
38   -#else /* CONFIG_SMP */
39   -
  40 +#ifdef CONFIG_SMP
40 41 /* Serializes the updates to cpu_online_map, cpu_present_map */
41 42 static DEFINE_MUTEX(cpu_add_remove_lock);
42 43