Commit b9d10be7a8e88fdcb12540387c219cdde87b0795

Authored by Toshi Kani
Committed by Rafael J. Wysocki
1 parent 22e7551eb6

ACPI / processor: Acquire writer lock to update CPU maps

CPU system maps are protected with reader/writer locks.  The reader
lock, get_online_cpus(), assures that the maps are not updated while
holding the lock.  The writer lock, cpu_hotplug_begin(), is used to
udpate the cpu maps along with cpu_maps_update_begin().

However, the ACPI processor handler updates the cpu maps without
holding the the writer lock.

acpi_map_lsapic() is called from acpi_processor_hotadd_init() to
update cpu_possible_mask and cpu_present_mask.  acpi_unmap_lsapic()
is called from acpi_processor_remove() to update cpu_possible_mask.
Currently, they are either unprotected or protected with the reader
lock, which is not correct.

For example, the get_online_cpus() below is supposed to assure that
cpu_possible_mask is not changed while the code is iterating with
for_each_possible_cpu().

        get_online_cpus();
        for_each_possible_cpu(cpu) {
		:
        }
        put_online_cpus();

However, this lock has no protection with CPU hotplug since the ACPI
processor handler does not use the writer lock when it updates
cpu_possible_mask.  The reader lock does not serialize within the
readers.

This patch protects them with the writer lock with cpu_hotplug_begin()
along with cpu_maps_update_begin(), which must be held before calling
cpu_hotplug_begin().  It also protects arch_register_cpu() /
arch_unregister_cpu(), which creates / deletes a sysfs cpu device
interface.  For this purpose it changes cpu_hotplug_begin() and
cpu_hotplug_done() to global and exports them in cpu.h.

Signed-off-by: Toshi Kani <toshi.kani@hp.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

Showing 3 changed files with 23 additions and 11 deletions Inline Diff

drivers/acpi/acpi_processor.c
1 /* 1 /*
2 * acpi_processor.c - ACPI processor enumeration support 2 * acpi_processor.c - ACPI processor enumeration support
3 * 3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * Copyright (C) 2013, Intel Corporation 8 * Copyright (C) 2013, Intel Corporation
9 * Rafael J. Wysocki <rafael.j.wysocki@intel.com> 9 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License version 2 as published 12 * under the terms of the GNU General Public License version 2 as published
13 * by the Free Software Foundation. 13 * by the Free Software Foundation.
14 */ 14 */
15 15
16 #include <linux/acpi.h> 16 #include <linux/acpi.h>
17 #include <linux/device.h> 17 #include <linux/device.h>
18 #include <linux/kernel.h> 18 #include <linux/kernel.h>
19 #include <linux/module.h> 19 #include <linux/module.h>
20 #include <linux/pci.h> 20 #include <linux/pci.h>
21 21
22 #include <acpi/processor.h> 22 #include <acpi/processor.h>
23 23
24 #include <asm/cpu.h> 24 #include <asm/cpu.h>
25 25
26 #include "internal.h" 26 #include "internal.h"
27 27
28 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 28 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
29 29
30 ACPI_MODULE_NAME("processor"); 30 ACPI_MODULE_NAME("processor");
31 31
32 DEFINE_PER_CPU(struct acpi_processor *, processors); 32 DEFINE_PER_CPU(struct acpi_processor *, processors);
33 EXPORT_PER_CPU_SYMBOL(processors); 33 EXPORT_PER_CPU_SYMBOL(processors);
34 34
35 /* -------------------------------------------------------------------------- 35 /* --------------------------------------------------------------------------
36 Errata Handling 36 Errata Handling
37 -------------------------------------------------------------------------- */ 37 -------------------------------------------------------------------------- */
38 38
39 struct acpi_processor_errata errata __read_mostly; 39 struct acpi_processor_errata errata __read_mostly;
40 EXPORT_SYMBOL_GPL(errata); 40 EXPORT_SYMBOL_GPL(errata);
41 41
42 static int acpi_processor_errata_piix4(struct pci_dev *dev) 42 static int acpi_processor_errata_piix4(struct pci_dev *dev)
43 { 43 {
44 u8 value1 = 0; 44 u8 value1 = 0;
45 u8 value2 = 0; 45 u8 value2 = 0;
46 46
47 47
48 if (!dev) 48 if (!dev)
49 return -EINVAL; 49 return -EINVAL;
50 50
51 /* 51 /*
52 * Note that 'dev' references the PIIX4 ACPI Controller. 52 * Note that 'dev' references the PIIX4 ACPI Controller.
53 */ 53 */
54 54
55 switch (dev->revision) { 55 switch (dev->revision) {
56 case 0: 56 case 0:
57 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n")); 57 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n"));
58 break; 58 break;
59 case 1: 59 case 1:
60 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n")); 60 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n"));
61 break; 61 break;
62 case 2: 62 case 2:
63 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n")); 63 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n"));
64 break; 64 break;
65 case 3: 65 case 3:
66 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n")); 66 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n"));
67 break; 67 break;
68 default: 68 default:
69 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n")); 69 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n"));
70 break; 70 break;
71 } 71 }
72 72
73 switch (dev->revision) { 73 switch (dev->revision) {
74 74
75 case 0: /* PIIX4 A-step */ 75 case 0: /* PIIX4 A-step */
76 case 1: /* PIIX4 B-step */ 76 case 1: /* PIIX4 B-step */
77 /* 77 /*
78 * See specification changes #13 ("Manual Throttle Duty Cycle") 78 * See specification changes #13 ("Manual Throttle Duty Cycle")
79 * and #14 ("Enabling and Disabling Manual Throttle"), plus 79 * and #14 ("Enabling and Disabling Manual Throttle"), plus
80 * erratum #5 ("STPCLK# Deassertion Time") from the January 80 * erratum #5 ("STPCLK# Deassertion Time") from the January
81 * 2002 PIIX4 specification update. Applies to only older 81 * 2002 PIIX4 specification update. Applies to only older
82 * PIIX4 models. 82 * PIIX4 models.
83 */ 83 */
84 errata.piix4.throttle = 1; 84 errata.piix4.throttle = 1;
85 85
86 case 2: /* PIIX4E */ 86 case 2: /* PIIX4E */
87 case 3: /* PIIX4M */ 87 case 3: /* PIIX4M */
88 /* 88 /*
89 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 89 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA
90 * Livelock") from the January 2002 PIIX4 specification update. 90 * Livelock") from the January 2002 PIIX4 specification update.
91 * Applies to all PIIX4 models. 91 * Applies to all PIIX4 models.
92 */ 92 */
93 93
94 /* 94 /*
95 * BM-IDE 95 * BM-IDE
96 * ------ 96 * ------
97 * Find the PIIX4 IDE Controller and get the Bus Master IDE 97 * Find the PIIX4 IDE Controller and get the Bus Master IDE
98 * Status register address. We'll use this later to read 98 * Status register address. We'll use this later to read
99 * each IDE controller's DMA status to make sure we catch all 99 * each IDE controller's DMA status to make sure we catch all
100 * DMA activity. 100 * DMA activity.
101 */ 101 */
102 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 102 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
103 PCI_DEVICE_ID_INTEL_82371AB, 103 PCI_DEVICE_ID_INTEL_82371AB,
104 PCI_ANY_ID, PCI_ANY_ID, NULL); 104 PCI_ANY_ID, PCI_ANY_ID, NULL);
105 if (dev) { 105 if (dev) {
106 errata.piix4.bmisx = pci_resource_start(dev, 4); 106 errata.piix4.bmisx = pci_resource_start(dev, 4);
107 pci_dev_put(dev); 107 pci_dev_put(dev);
108 } 108 }
109 109
110 /* 110 /*
111 * Type-F DMA 111 * Type-F DMA
112 * ---------- 112 * ----------
113 * Find the PIIX4 ISA Controller and read the Motherboard 113 * Find the PIIX4 ISA Controller and read the Motherboard
114 * DMA controller's status to see if Type-F (Fast) DMA mode 114 * DMA controller's status to see if Type-F (Fast) DMA mode
115 * is enabled (bit 7) on either channel. Note that we'll 115 * is enabled (bit 7) on either channel. Note that we'll
116 * disable C3 support if this is enabled, as some legacy 116 * disable C3 support if this is enabled, as some legacy
117 * devices won't operate well if fast DMA is disabled. 117 * devices won't operate well if fast DMA is disabled.
118 */ 118 */
119 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 119 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
120 PCI_DEVICE_ID_INTEL_82371AB_0, 120 PCI_DEVICE_ID_INTEL_82371AB_0,
121 PCI_ANY_ID, PCI_ANY_ID, NULL); 121 PCI_ANY_ID, PCI_ANY_ID, NULL);
122 if (dev) { 122 if (dev) {
123 pci_read_config_byte(dev, 0x76, &value1); 123 pci_read_config_byte(dev, 0x76, &value1);
124 pci_read_config_byte(dev, 0x77, &value2); 124 pci_read_config_byte(dev, 0x77, &value2);
125 if ((value1 & 0x80) || (value2 & 0x80)) 125 if ((value1 & 0x80) || (value2 & 0x80))
126 errata.piix4.fdma = 1; 126 errata.piix4.fdma = 1;
127 pci_dev_put(dev); 127 pci_dev_put(dev);
128 } 128 }
129 129
130 break; 130 break;
131 } 131 }
132 132
133 if (errata.piix4.bmisx) 133 if (errata.piix4.bmisx)
134 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 134 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
135 "Bus master activity detection (BM-IDE) erratum enabled\n")); 135 "Bus master activity detection (BM-IDE) erratum enabled\n"));
136 if (errata.piix4.fdma) 136 if (errata.piix4.fdma)
137 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 137 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
138 "Type-F DMA livelock erratum (C3 disabled)\n")); 138 "Type-F DMA livelock erratum (C3 disabled)\n"));
139 139
140 return 0; 140 return 0;
141 } 141 }
142 142
143 static int acpi_processor_errata(struct acpi_processor *pr) 143 static int acpi_processor_errata(struct acpi_processor *pr)
144 { 144 {
145 int result = 0; 145 int result = 0;
146 struct pci_dev *dev = NULL; 146 struct pci_dev *dev = NULL;
147 147
148 148
149 if (!pr) 149 if (!pr)
150 return -EINVAL; 150 return -EINVAL;
151 151
152 /* 152 /*
153 * PIIX4 153 * PIIX4
154 */ 154 */
155 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 155 dev = pci_get_subsys(PCI_VENDOR_ID_INTEL,
156 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, 156 PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID,
157 PCI_ANY_ID, NULL); 157 PCI_ANY_ID, NULL);
158 if (dev) { 158 if (dev) {
159 result = acpi_processor_errata_piix4(dev); 159 result = acpi_processor_errata_piix4(dev);
160 pci_dev_put(dev); 160 pci_dev_put(dev);
161 } 161 }
162 162
163 return result; 163 return result;
164 } 164 }
165 165
166 /* -------------------------------------------------------------------------- 166 /* --------------------------------------------------------------------------
167 Initialization 167 Initialization
168 -------------------------------------------------------------------------- */ 168 -------------------------------------------------------------------------- */
169 169
170 #ifdef CONFIG_ACPI_HOTPLUG_CPU 170 #ifdef CONFIG_ACPI_HOTPLUG_CPU
171 static int acpi_processor_hotadd_init(struct acpi_processor *pr) 171 static int acpi_processor_hotadd_init(struct acpi_processor *pr)
172 { 172 {
173 unsigned long long sta; 173 unsigned long long sta;
174 acpi_status status; 174 acpi_status status;
175 int ret; 175 int ret;
176 176
177 status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); 177 status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
178 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) 178 if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
179 return -ENODEV; 179 return -ENODEV;
180 180
181 cpu_maps_update_begin();
182 cpu_hotplug_begin();
183
181 ret = acpi_map_lsapic(pr->handle, &pr->id); 184 ret = acpi_map_lsapic(pr->handle, &pr->id);
182 if (ret) 185 if (ret)
183 return ret; 186 goto out;
184 187
185 ret = arch_register_cpu(pr->id); 188 ret = arch_register_cpu(pr->id);
186 if (ret) { 189 if (ret) {
187 acpi_unmap_lsapic(pr->id); 190 acpi_unmap_lsapic(pr->id);
188 return ret; 191 goto out;
189 } 192 }
190 193
191 /* 194 /*
192 * CPU got hot-added, but cpu_data is not initialized yet. Set a flag 195 * CPU got hot-added, but cpu_data is not initialized yet. Set a flag
193 * to delay cpu_idle/throttling initialization and do it when the CPU 196 * to delay cpu_idle/throttling initialization and do it when the CPU
194 * gets online for the first time. 197 * gets online for the first time.
195 */ 198 */
196 pr_info("CPU%d has been hot-added\n", pr->id); 199 pr_info("CPU%d has been hot-added\n", pr->id);
197 pr->flags.need_hotplug_init = 1; 200 pr->flags.need_hotplug_init = 1;
198 return 0; 201
202 out:
203 cpu_hotplug_done();
204 cpu_maps_update_done();
205 return ret;
199 } 206 }
200 #else 207 #else
201 static inline int acpi_processor_hotadd_init(struct acpi_processor *pr) 208 static inline int acpi_processor_hotadd_init(struct acpi_processor *pr)
202 { 209 {
203 return -ENODEV; 210 return -ENODEV;
204 } 211 }
205 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 212 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
206 213
207 static int acpi_processor_get_info(struct acpi_device *device) 214 static int acpi_processor_get_info(struct acpi_device *device)
208 { 215 {
209 union acpi_object object = { 0 }; 216 union acpi_object object = { 0 };
210 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 217 struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
211 struct acpi_processor *pr = acpi_driver_data(device); 218 struct acpi_processor *pr = acpi_driver_data(device);
212 int cpu_index, device_declaration = 0; 219 int cpu_index, device_declaration = 0;
213 acpi_status status = AE_OK; 220 acpi_status status = AE_OK;
214 static int cpu0_initialized; 221 static int cpu0_initialized;
215 222
216 if (num_online_cpus() > 1) 223 if (num_online_cpus() > 1)
217 errata.smp = TRUE; 224 errata.smp = TRUE;
218 225
219 acpi_processor_errata(pr); 226 acpi_processor_errata(pr);
220 227
221 /* 228 /*
222 * Check to see if we have bus mastering arbitration control. This 229 * Check to see if we have bus mastering arbitration control. This
223 * is required for proper C3 usage (to maintain cache coherency). 230 * is required for proper C3 usage (to maintain cache coherency).
224 */ 231 */
225 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) { 232 if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) {
226 pr->flags.bm_control = 1; 233 pr->flags.bm_control = 1;
227 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 234 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
228 "Bus mastering arbitration control present\n")); 235 "Bus mastering arbitration control present\n"));
229 } else 236 } else
230 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 237 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
231 "No bus mastering arbitration control\n")); 238 "No bus mastering arbitration control\n"));
232 239
233 if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { 240 if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) {
234 /* Declared with "Processor" statement; match ProcessorID */ 241 /* Declared with "Processor" statement; match ProcessorID */
235 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); 242 status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer);
236 if (ACPI_FAILURE(status)) { 243 if (ACPI_FAILURE(status)) {
237 dev_err(&device->dev, 244 dev_err(&device->dev,
238 "Failed to evaluate processor object (0x%x)\n", 245 "Failed to evaluate processor object (0x%x)\n",
239 status); 246 status);
240 return -ENODEV; 247 return -ENODEV;
241 } 248 }
242 249
243 /* 250 /*
244 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. 251 * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
245 * >>> 'acpi_get_processor_id(acpi_id, &id)' in 252 * >>> 'acpi_get_processor_id(acpi_id, &id)' in
246 * arch/xxx/acpi.c 253 * arch/xxx/acpi.c
247 */ 254 */
248 pr->acpi_id = object.processor.proc_id; 255 pr->acpi_id = object.processor.proc_id;
249 } else { 256 } else {
250 /* 257 /*
251 * Declared with "Device" statement; match _UID. 258 * Declared with "Device" statement; match _UID.
252 * Note that we don't handle string _UIDs yet. 259 * Note that we don't handle string _UIDs yet.
253 */ 260 */
254 unsigned long long value; 261 unsigned long long value;
255 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, 262 status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
256 NULL, &value); 263 NULL, &value);
257 if (ACPI_FAILURE(status)) { 264 if (ACPI_FAILURE(status)) {
258 dev_err(&device->dev, 265 dev_err(&device->dev,
259 "Failed to evaluate processor _UID (0x%x)\n", 266 "Failed to evaluate processor _UID (0x%x)\n",
260 status); 267 status);
261 return -ENODEV; 268 return -ENODEV;
262 } 269 }
263 device_declaration = 1; 270 device_declaration = 1;
264 pr->acpi_id = value; 271 pr->acpi_id = value;
265 } 272 }
266 cpu_index = acpi_get_cpuid(pr->handle, device_declaration, pr->acpi_id); 273 cpu_index = acpi_get_cpuid(pr->handle, device_declaration, pr->acpi_id);
267 274
268 /* Handle UP system running SMP kernel, with no LAPIC in MADT */ 275 /* Handle UP system running SMP kernel, with no LAPIC in MADT */
269 if (!cpu0_initialized && (cpu_index == -1) && 276 if (!cpu0_initialized && (cpu_index == -1) &&
270 (num_online_cpus() == 1)) { 277 (num_online_cpus() == 1)) {
271 cpu_index = 0; 278 cpu_index = 0;
272 } 279 }
273 280
274 cpu0_initialized = 1; 281 cpu0_initialized = 1;
275 282
276 pr->id = cpu_index; 283 pr->id = cpu_index;
277 284
278 /* 285 /*
279 * Extra Processor objects may be enumerated on MP systems with 286 * Extra Processor objects may be enumerated on MP systems with
280 * less than the max # of CPUs. They should be ignored _iff 287 * less than the max # of CPUs. They should be ignored _iff
281 * they are physically not present. 288 * they are physically not present.
282 */ 289 */
283 if (pr->id == -1) { 290 if (pr->id == -1) {
284 int ret = acpi_processor_hotadd_init(pr); 291 int ret = acpi_processor_hotadd_init(pr);
285 if (ret) 292 if (ret)
286 return ret; 293 return ret;
287 } 294 }
288 /* 295 /*
289 * On some boxes several processors use the same processor bus id. 296 * On some boxes several processors use the same processor bus id.
290 * But they are located in different scope. For example: 297 * But they are located in different scope. For example:
291 * \_SB.SCK0.CPU0 298 * \_SB.SCK0.CPU0
292 * \_SB.SCK1.CPU0 299 * \_SB.SCK1.CPU0
293 * Rename the processor device bus id. And the new bus id will be 300 * Rename the processor device bus id. And the new bus id will be
294 * generated as the following format: 301 * generated as the following format:
295 * CPU+CPU ID. 302 * CPU+CPU ID.
296 */ 303 */
297 sprintf(acpi_device_bid(device), "CPU%X", pr->id); 304 sprintf(acpi_device_bid(device), "CPU%X", pr->id);
298 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, 305 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id,
299 pr->acpi_id)); 306 pr->acpi_id));
300 307
301 if (!object.processor.pblk_address) 308 if (!object.processor.pblk_address)
302 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n")); 309 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n"));
303 else if (object.processor.pblk_length != 6) 310 else if (object.processor.pblk_length != 6)
304 dev_err(&device->dev, "Invalid PBLK length [%d]\n", 311 dev_err(&device->dev, "Invalid PBLK length [%d]\n",
305 object.processor.pblk_length); 312 object.processor.pblk_length);
306 else { 313 else {
307 pr->throttling.address = object.processor.pblk_address; 314 pr->throttling.address = object.processor.pblk_address;
308 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset; 315 pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset;
309 pr->throttling.duty_width = acpi_gbl_FADT.duty_width; 316 pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
310 317
311 pr->pblk = object.processor.pblk_address; 318 pr->pblk = object.processor.pblk_address;
312 319
313 /* 320 /*
314 * We don't care about error returns - we just try to mark 321 * We don't care about error returns - we just try to mark
315 * these reserved so that nobody else is confused into thinking 322 * these reserved so that nobody else is confused into thinking
316 * that this region might be unused.. 323 * that this region might be unused..
317 * 324 *
318 * (In particular, allocating the IO range for Cardbus) 325 * (In particular, allocating the IO range for Cardbus)
319 */ 326 */
320 request_region(pr->throttling.address, 6, "ACPI CPU throttle"); 327 request_region(pr->throttling.address, 6, "ACPI CPU throttle");
321 } 328 }
322 329
323 /* 330 /*
324 * If ACPI describes a slot number for this CPU, we can use it to 331 * If ACPI describes a slot number for this CPU, we can use it to
325 * ensure we get the right value in the "physical id" field 332 * ensure we get the right value in the "physical id" field
326 * of /proc/cpuinfo 333 * of /proc/cpuinfo
327 */ 334 */
328 status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer); 335 status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
329 if (ACPI_SUCCESS(status)) 336 if (ACPI_SUCCESS(status))
330 arch_fix_phys_package_id(pr->id, object.integer.value); 337 arch_fix_phys_package_id(pr->id, object.integer.value);
331 338
332 return 0; 339 return 0;
333 } 340 }
334 341
335 /* 342 /*
336 * Do not put anything in here which needs the core to be online. 343 * Do not put anything in here which needs the core to be online.
337 * For example MSR access or setting up things which check for cpuinfo_x86 344 * For example MSR access or setting up things which check for cpuinfo_x86
338 * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc. 345 * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc.
339 * Such things have to be put in and set up by the processor driver's .probe(). 346 * Such things have to be put in and set up by the processor driver's .probe().
340 */ 347 */
341 static DEFINE_PER_CPU(void *, processor_device_array); 348 static DEFINE_PER_CPU(void *, processor_device_array);
342 349
343 static int acpi_processor_add(struct acpi_device *device, 350 static int acpi_processor_add(struct acpi_device *device,
344 const struct acpi_device_id *id) 351 const struct acpi_device_id *id)
345 { 352 {
346 struct acpi_processor *pr; 353 struct acpi_processor *pr;
347 struct device *dev; 354 struct device *dev;
348 int result = 0; 355 int result = 0;
349 356
350 pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); 357 pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
351 if (!pr) 358 if (!pr)
352 return -ENOMEM; 359 return -ENOMEM;
353 360
354 if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 361 if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) {
355 result = -ENOMEM; 362 result = -ENOMEM;
356 goto err_free_pr; 363 goto err_free_pr;
357 } 364 }
358 365
359 pr->handle = device->handle; 366 pr->handle = device->handle;
360 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 367 strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME);
361 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); 368 strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS);
362 device->driver_data = pr; 369 device->driver_data = pr;
363 370
364 result = acpi_processor_get_info(device); 371 result = acpi_processor_get_info(device);
365 if (result) /* Processor is not physically present or unavailable */ 372 if (result) /* Processor is not physically present or unavailable */
366 return 0; 373 return 0;
367 374
368 #ifdef CONFIG_SMP 375 #ifdef CONFIG_SMP
369 if (pr->id >= setup_max_cpus && pr->id != 0) 376 if (pr->id >= setup_max_cpus && pr->id != 0)
370 return 0; 377 return 0;
371 #endif 378 #endif
372 379
373 BUG_ON(pr->id >= nr_cpu_ids); 380 BUG_ON(pr->id >= nr_cpu_ids);
374 381
375 /* 382 /*
376 * Buggy BIOS check. 383 * Buggy BIOS check.
377 * ACPI id of processors can be reported wrongly by the BIOS. 384 * ACPI id of processors can be reported wrongly by the BIOS.
378 * Don't trust it blindly 385 * Don't trust it blindly
379 */ 386 */
380 if (per_cpu(processor_device_array, pr->id) != NULL && 387 if (per_cpu(processor_device_array, pr->id) != NULL &&
381 per_cpu(processor_device_array, pr->id) != device) { 388 per_cpu(processor_device_array, pr->id) != device) {
382 dev_warn(&device->dev, 389 dev_warn(&device->dev,
383 "BIOS reported wrong ACPI id %d for the processor\n", 390 "BIOS reported wrong ACPI id %d for the processor\n",
384 pr->id); 391 pr->id);
385 /* Give up, but do not abort the namespace scan. */ 392 /* Give up, but do not abort the namespace scan. */
386 goto err; 393 goto err;
387 } 394 }
388 /* 395 /*
389 * processor_device_array is not cleared on errors to allow buggy BIOS 396 * processor_device_array is not cleared on errors to allow buggy BIOS
390 * checks. 397 * checks.
391 */ 398 */
392 per_cpu(processor_device_array, pr->id) = device; 399 per_cpu(processor_device_array, pr->id) = device;
393 per_cpu(processors, pr->id) = pr; 400 per_cpu(processors, pr->id) = pr;
394 401
395 dev = get_cpu_device(pr->id); 402 dev = get_cpu_device(pr->id);
396 if (!dev) { 403 if (!dev) {
397 result = -ENODEV; 404 result = -ENODEV;
398 goto err; 405 goto err;
399 } 406 }
400 407
401 result = acpi_bind_one(dev, pr->handle); 408 result = acpi_bind_one(dev, pr->handle);
402 if (result) 409 if (result)
403 goto err; 410 goto err;
404 411
405 pr->dev = dev; 412 pr->dev = dev;
406 dev->offline = pr->flags.need_hotplug_init; 413 dev->offline = pr->flags.need_hotplug_init;
407 414
408 /* Trigger the processor driver's .probe() if present. */ 415 /* Trigger the processor driver's .probe() if present. */
409 if (device_attach(dev) >= 0) 416 if (device_attach(dev) >= 0)
410 return 1; 417 return 1;
411 418
412 dev_err(dev, "Processor driver could not be attached\n"); 419 dev_err(dev, "Processor driver could not be attached\n");
413 acpi_unbind_one(dev); 420 acpi_unbind_one(dev);
414 421
415 err: 422 err:
416 free_cpumask_var(pr->throttling.shared_cpu_map); 423 free_cpumask_var(pr->throttling.shared_cpu_map);
417 device->driver_data = NULL; 424 device->driver_data = NULL;
418 per_cpu(processors, pr->id) = NULL; 425 per_cpu(processors, pr->id) = NULL;
419 err_free_pr: 426 err_free_pr:
420 kfree(pr); 427 kfree(pr);
421 return result; 428 return result;
422 } 429 }
423 430
424 #ifdef CONFIG_ACPI_HOTPLUG_CPU 431 #ifdef CONFIG_ACPI_HOTPLUG_CPU
425 /* -------------------------------------------------------------------------- 432 /* --------------------------------------------------------------------------
426 Removal 433 Removal
427 -------------------------------------------------------------------------- */ 434 -------------------------------------------------------------------------- */
428 435
429 static void acpi_processor_remove(struct acpi_device *device) 436 static void acpi_processor_remove(struct acpi_device *device)
430 { 437 {
431 struct acpi_processor *pr; 438 struct acpi_processor *pr;
432 439
433 if (!device || !acpi_driver_data(device)) 440 if (!device || !acpi_driver_data(device))
434 return; 441 return;
435 442
436 pr = acpi_driver_data(device); 443 pr = acpi_driver_data(device);
437 if (pr->id >= nr_cpu_ids) 444 if (pr->id >= nr_cpu_ids)
438 goto out; 445 goto out;
439 446
440 /* 447 /*
441 * The only reason why we ever get here is CPU hot-removal. The CPU is 448 * The only reason why we ever get here is CPU hot-removal. The CPU is
442 * already offline and the ACPI device removal locking prevents it from 449 * already offline and the ACPI device removal locking prevents it from
443 * being put back online at this point. 450 * being put back online at this point.
444 * 451 *
445 * Unbind the driver from the processor device and detach it from the 452 * Unbind the driver from the processor device and detach it from the
446 * ACPI companion object. 453 * ACPI companion object.
447 */ 454 */
448 device_release_driver(pr->dev); 455 device_release_driver(pr->dev);
449 acpi_unbind_one(pr->dev); 456 acpi_unbind_one(pr->dev);
450 457
451 /* Clean up. */ 458 /* Clean up. */
452 per_cpu(processor_device_array, pr->id) = NULL; 459 per_cpu(processor_device_array, pr->id) = NULL;
453 per_cpu(processors, pr->id) = NULL; 460 per_cpu(processors, pr->id) = NULL;
454 461
462 cpu_maps_update_begin();
463 cpu_hotplug_begin();
464
455 /* Remove the CPU. */ 465 /* Remove the CPU. */
456 get_online_cpus();
457 arch_unregister_cpu(pr->id); 466 arch_unregister_cpu(pr->id);
458 acpi_unmap_lsapic(pr->id); 467 acpi_unmap_lsapic(pr->id);
459 put_online_cpus(); 468
469 cpu_hotplug_done();
470 cpu_maps_update_done();
460 471
461 try_offline_node(cpu_to_node(pr->id)); 472 try_offline_node(cpu_to_node(pr->id));
462 473
463 out: 474 out:
464 free_cpumask_var(pr->throttling.shared_cpu_map); 475 free_cpumask_var(pr->throttling.shared_cpu_map);
465 kfree(pr); 476 kfree(pr);
466 } 477 }
467 #endif /* CONFIG_ACPI_HOTPLUG_CPU */ 478 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
468 479
469 /* 480 /*
470 * The following ACPI IDs are known to be suitable for representing as 481 * The following ACPI IDs are known to be suitable for representing as
471 * processor devices. 482 * processor devices.
472 */ 483 */
473 static const struct acpi_device_id processor_device_ids[] = { 484 static const struct acpi_device_id processor_device_ids[] = {
474 485
475 { ACPI_PROCESSOR_OBJECT_HID, }, 486 { ACPI_PROCESSOR_OBJECT_HID, },
476 { ACPI_PROCESSOR_DEVICE_HID, }, 487 { ACPI_PROCESSOR_DEVICE_HID, },
477 488
478 { } 489 { }
479 }; 490 };
480 491
481 static struct acpi_scan_handler __refdata processor_handler = { 492 static struct acpi_scan_handler __refdata processor_handler = {
482 .ids = processor_device_ids, 493 .ids = processor_device_ids,
483 .attach = acpi_processor_add, 494 .attach = acpi_processor_add,
484 #ifdef CONFIG_ACPI_HOTPLUG_CPU 495 #ifdef CONFIG_ACPI_HOTPLUG_CPU
485 .detach = acpi_processor_remove, 496 .detach = acpi_processor_remove,
486 #endif 497 #endif
487 .hotplug = { 498 .hotplug = {
488 .enabled = true, 499 .enabled = true,
489 }, 500 },
490 }; 501 };
491 502
492 void __init acpi_processor_init(void) 503 void __init acpi_processor_init(void)
493 { 504 {
494 acpi_scan_add_handler_with_hotplug(&processor_handler, "processor"); 505 acpi_scan_add_handler_with_hotplug(&processor_handler, "processor");
495 } 506 }
1 /* 1 /*
2 * include/linux/cpu.h - generic cpu definition 2 * include/linux/cpu.h - generic cpu definition
3 * 3 *
4 * This is mainly for topological representation. We define the 4 * This is mainly for topological representation. We define the
5 * basic 'struct cpu' here, which can be embedded in per-arch 5 * basic 'struct cpu' here, which can be embedded in per-arch
6 * definitions of processors. 6 * definitions of processors.
7 * 7 *
8 * Basic handling of the devices is done in drivers/base/cpu.c 8 * Basic handling of the devices is done in drivers/base/cpu.c
9 * 9 *
10 * CPUs are exported via sysfs in the devices/system/cpu 10 * CPUs are exported via sysfs in the devices/system/cpu
11 * directory. 11 * directory.
12 */ 12 */
13 #ifndef _LINUX_CPU_H_ 13 #ifndef _LINUX_CPU_H_
14 #define _LINUX_CPU_H_ 14 #define _LINUX_CPU_H_
15 15
16 #include <linux/node.h> 16 #include <linux/node.h>
17 #include <linux/compiler.h> 17 #include <linux/compiler.h>
18 #include <linux/cpumask.h> 18 #include <linux/cpumask.h>
19 19
20 struct device; 20 struct device;
21 21
22 struct cpu { 22 struct cpu {
23 int node_id; /* The node which contains the CPU */ 23 int node_id; /* The node which contains the CPU */
24 int hotpluggable; /* creates sysfs control file if hotpluggable */ 24 int hotpluggable; /* creates sysfs control file if hotpluggable */
25 struct device dev; 25 struct device dev;
26 }; 26 };
27 27
28 extern int register_cpu(struct cpu *cpu, int num); 28 extern int register_cpu(struct cpu *cpu, int num);
29 extern struct device *get_cpu_device(unsigned cpu); 29 extern struct device *get_cpu_device(unsigned cpu);
30 extern bool cpu_is_hotpluggable(unsigned cpu); 30 extern bool cpu_is_hotpluggable(unsigned cpu);
31 31
32 extern int cpu_add_dev_attr(struct device_attribute *attr); 32 extern int cpu_add_dev_attr(struct device_attribute *attr);
33 extern void cpu_remove_dev_attr(struct device_attribute *attr); 33 extern void cpu_remove_dev_attr(struct device_attribute *attr);
34 34
35 extern int cpu_add_dev_attr_group(struct attribute_group *attrs); 35 extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
36 extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); 36 extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
37 37
38 #ifdef CONFIG_HOTPLUG_CPU 38 #ifdef CONFIG_HOTPLUG_CPU
39 extern void unregister_cpu(struct cpu *cpu); 39 extern void unregister_cpu(struct cpu *cpu);
40 extern ssize_t arch_cpu_probe(const char *, size_t); 40 extern ssize_t arch_cpu_probe(const char *, size_t);
41 extern ssize_t arch_cpu_release(const char *, size_t); 41 extern ssize_t arch_cpu_release(const char *, size_t);
42 #endif 42 #endif
43 struct notifier_block; 43 struct notifier_block;
44 44
45 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE 45 #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
46 extern int arch_cpu_uevent(struct device *dev, struct kobj_uevent_env *env); 46 extern int arch_cpu_uevent(struct device *dev, struct kobj_uevent_env *env);
47 extern ssize_t arch_print_cpu_modalias(struct device *dev, 47 extern ssize_t arch_print_cpu_modalias(struct device *dev,
48 struct device_attribute *attr, 48 struct device_attribute *attr,
49 char *bufptr); 49 char *bufptr);
50 #endif 50 #endif
51 51
52 /* 52 /*
53 * CPU notifier priorities. 53 * CPU notifier priorities.
54 */ 54 */
55 enum { 55 enum {
56 /* 56 /*
57 * SCHED_ACTIVE marks a cpu which is coming up active during 57 * SCHED_ACTIVE marks a cpu which is coming up active during
58 * CPU_ONLINE and CPU_DOWN_FAILED and must be the first 58 * CPU_ONLINE and CPU_DOWN_FAILED and must be the first
59 * notifier. CPUSET_ACTIVE adjusts cpuset according to 59 * notifier. CPUSET_ACTIVE adjusts cpuset according to
60 * cpu_active mask right after SCHED_ACTIVE. During 60 * cpu_active mask right after SCHED_ACTIVE. During
61 * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are 61 * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
62 * ordered in the similar way. 62 * ordered in the similar way.
63 * 63 *
64 * This ordering guarantees consistent cpu_active mask and 64 * This ordering guarantees consistent cpu_active mask and
65 * migration behavior to all cpu notifiers. 65 * migration behavior to all cpu notifiers.
66 */ 66 */
67 CPU_PRI_SCHED_ACTIVE = INT_MAX, 67 CPU_PRI_SCHED_ACTIVE = INT_MAX,
68 CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1, 68 CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1,
69 CPU_PRI_SCHED_INACTIVE = INT_MIN + 1, 69 CPU_PRI_SCHED_INACTIVE = INT_MIN + 1,
70 CPU_PRI_CPUSET_INACTIVE = INT_MIN, 70 CPU_PRI_CPUSET_INACTIVE = INT_MIN,
71 71
72 /* migration should happen before other stuff but after perf */ 72 /* migration should happen before other stuff but after perf */
73 CPU_PRI_PERF = 20, 73 CPU_PRI_PERF = 20,
74 CPU_PRI_MIGRATION = 10, 74 CPU_PRI_MIGRATION = 10,
75 /* bring up workqueues before normal notifiers and down after */ 75 /* bring up workqueues before normal notifiers and down after */
76 CPU_PRI_WORKQUEUE_UP = 5, 76 CPU_PRI_WORKQUEUE_UP = 5,
77 CPU_PRI_WORKQUEUE_DOWN = -5, 77 CPU_PRI_WORKQUEUE_DOWN = -5,
78 }; 78 };
79 79
80 #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ 80 #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
81 #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ 81 #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
82 #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ 82 #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
83 #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ 83 #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
84 #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ 84 #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
85 #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ 85 #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
86 #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, 86 #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
87 * not handling interrupts, soon dead. 87 * not handling interrupts, soon dead.
88 * Called on the dying cpu, interrupts 88 * Called on the dying cpu, interrupts
89 * are already disabled. Must not 89 * are already disabled. Must not
90 * sleep, must not fail */ 90 * sleep, must not fail */
91 #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug 91 #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
92 * lock is dropped */ 92 * lock is dropped */
93 #define CPU_STARTING 0x000A /* CPU (unsigned)v soon running. 93 #define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
94 * Called on the new cpu, just before 94 * Called on the new cpu, just before
95 * enabling interrupts. Must not sleep, 95 * enabling interrupts. Must not sleep,
96 * must not fail */ 96 * must not fail */
97 97
98 /* Used for CPU hotplug events occurring while tasks are frozen due to a suspend 98 /* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
99 * operation in progress 99 * operation in progress
100 */ 100 */
101 #define CPU_TASKS_FROZEN 0x0010 101 #define CPU_TASKS_FROZEN 0x0010
102 102
103 #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN) 103 #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
104 #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN) 104 #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
105 #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN) 105 #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
106 #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) 106 #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
107 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) 107 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
108 #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) 108 #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
109 #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) 109 #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
110 #define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN) 110 #define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
111 111
112 112
113 #ifdef CONFIG_SMP 113 #ifdef CONFIG_SMP
114 /* Need to know about CPUs going up/down? */ 114 /* Need to know about CPUs going up/down? */
115 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) 115 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
116 #define cpu_notifier(fn, pri) { \ 116 #define cpu_notifier(fn, pri) { \
117 static struct notifier_block fn##_nb = \ 117 static struct notifier_block fn##_nb = \
118 { .notifier_call = fn, .priority = pri }; \ 118 { .notifier_call = fn, .priority = pri }; \
119 register_cpu_notifier(&fn##_nb); \ 119 register_cpu_notifier(&fn##_nb); \
120 } 120 }
121 #else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ 121 #else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
122 #define cpu_notifier(fn, pri) do { (void)(fn); } while (0) 122 #define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
123 #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ 123 #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
124 #ifdef CONFIG_HOTPLUG_CPU 124 #ifdef CONFIG_HOTPLUG_CPU
125 extern int register_cpu_notifier(struct notifier_block *nb); 125 extern int register_cpu_notifier(struct notifier_block *nb);
126 extern void unregister_cpu_notifier(struct notifier_block *nb); 126 extern void unregister_cpu_notifier(struct notifier_block *nb);
127 #else 127 #else
128 128
129 #ifndef MODULE 129 #ifndef MODULE
130 extern int register_cpu_notifier(struct notifier_block *nb); 130 extern int register_cpu_notifier(struct notifier_block *nb);
131 #else 131 #else
132 static inline int register_cpu_notifier(struct notifier_block *nb) 132 static inline int register_cpu_notifier(struct notifier_block *nb)
133 { 133 {
134 return 0; 134 return 0;
135 } 135 }
136 #endif 136 #endif
137 137
138 static inline void unregister_cpu_notifier(struct notifier_block *nb) 138 static inline void unregister_cpu_notifier(struct notifier_block *nb)
139 { 139 {
140 } 140 }
141 #endif 141 #endif
142 142
143 int cpu_up(unsigned int cpu); 143 int cpu_up(unsigned int cpu);
144 void notify_cpu_starting(unsigned int cpu); 144 void notify_cpu_starting(unsigned int cpu);
145 extern void cpu_maps_update_begin(void); 145 extern void cpu_maps_update_begin(void);
146 extern void cpu_maps_update_done(void); 146 extern void cpu_maps_update_done(void);
147 147
148 #else /* CONFIG_SMP */ 148 #else /* CONFIG_SMP */
149 149
150 #define cpu_notifier(fn, pri) do { (void)(fn); } while (0) 150 #define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
151 151
152 static inline int register_cpu_notifier(struct notifier_block *nb) 152 static inline int register_cpu_notifier(struct notifier_block *nb)
153 { 153 {
154 return 0; 154 return 0;
155 } 155 }
156 156
157 static inline void unregister_cpu_notifier(struct notifier_block *nb) 157 static inline void unregister_cpu_notifier(struct notifier_block *nb)
158 { 158 {
159 } 159 }
160 160
161 static inline void cpu_maps_update_begin(void) 161 static inline void cpu_maps_update_begin(void)
162 { 162 {
163 } 163 }
164 164
165 static inline void cpu_maps_update_done(void) 165 static inline void cpu_maps_update_done(void)
166 { 166 {
167 } 167 }
168 168
169 #endif /* CONFIG_SMP */ 169 #endif /* CONFIG_SMP */
170 extern struct bus_type cpu_subsys; 170 extern struct bus_type cpu_subsys;
171 171
172 #ifdef CONFIG_HOTPLUG_CPU 172 #ifdef CONFIG_HOTPLUG_CPU
173 /* Stop CPUs going up and down. */ 173 /* Stop CPUs going up and down. */
174 174
175 extern void cpu_hotplug_begin(void);
176 extern void cpu_hotplug_done(void);
175 extern void get_online_cpus(void); 177 extern void get_online_cpus(void);
176 extern void put_online_cpus(void); 178 extern void put_online_cpus(void);
177 extern void cpu_hotplug_disable(void); 179 extern void cpu_hotplug_disable(void);
178 extern void cpu_hotplug_enable(void); 180 extern void cpu_hotplug_enable(void);
179 #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) 181 #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
180 #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) 182 #define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
181 #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) 183 #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
182 void clear_tasks_mm_cpumask(int cpu); 184 void clear_tasks_mm_cpumask(int cpu);
183 int cpu_down(unsigned int cpu); 185 int cpu_down(unsigned int cpu);
184 186
185 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 187 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
186 extern void cpu_hotplug_driver_lock(void); 188 extern void cpu_hotplug_driver_lock(void);
187 extern void cpu_hotplug_driver_unlock(void); 189 extern void cpu_hotplug_driver_unlock(void);
188 #else 190 #else
189 static inline void cpu_hotplug_driver_lock(void) 191 static inline void cpu_hotplug_driver_lock(void)
190 { 192 {
191 } 193 }
192 194
193 static inline void cpu_hotplug_driver_unlock(void) 195 static inline void cpu_hotplug_driver_unlock(void)
194 { 196 {
195 } 197 }
196 #endif 198 #endif
197 199
198 #else /* CONFIG_HOTPLUG_CPU */ 200 #else /* CONFIG_HOTPLUG_CPU */
199 201
202 static inline void cpu_hotplug_begin(void) {}
203 static inline void cpu_hotplug_done(void) {}
200 #define get_online_cpus() do { } while (0) 204 #define get_online_cpus() do { } while (0)
201 #define put_online_cpus() do { } while (0) 205 #define put_online_cpus() do { } while (0)
202 #define cpu_hotplug_disable() do { } while (0) 206 #define cpu_hotplug_disable() do { } while (0)
203 #define cpu_hotplug_enable() do { } while (0) 207 #define cpu_hotplug_enable() do { } while (0)
204 #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) 208 #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
205 /* These aren't inline functions due to a GCC bug. */ 209 /* These aren't inline functions due to a GCC bug. */
206 #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) 210 #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
207 #define unregister_hotcpu_notifier(nb) ({ (void)(nb); }) 211 #define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
208 #endif /* CONFIG_HOTPLUG_CPU */ 212 #endif /* CONFIG_HOTPLUG_CPU */
209 213
210 #ifdef CONFIG_PM_SLEEP_SMP 214 #ifdef CONFIG_PM_SLEEP_SMP
211 extern int disable_nonboot_cpus(void); 215 extern int disable_nonboot_cpus(void);
212 extern void enable_nonboot_cpus(void); 216 extern void enable_nonboot_cpus(void);
213 #else /* !CONFIG_PM_SLEEP_SMP */ 217 #else /* !CONFIG_PM_SLEEP_SMP */
214 static inline int disable_nonboot_cpus(void) { return 0; } 218 static inline int disable_nonboot_cpus(void) { return 0; }
215 static inline void enable_nonboot_cpus(void) {} 219 static inline void enable_nonboot_cpus(void) {}
216 #endif /* !CONFIG_PM_SLEEP_SMP */ 220 #endif /* !CONFIG_PM_SLEEP_SMP */
217 221
218 enum cpuhp_state { 222 enum cpuhp_state {
219 CPUHP_OFFLINE, 223 CPUHP_OFFLINE,
220 CPUHP_ONLINE, 224 CPUHP_ONLINE,
221 }; 225 };
222 226
223 void cpu_startup_entry(enum cpuhp_state state); 227 void cpu_startup_entry(enum cpuhp_state state);
224 void cpu_idle(void); 228 void cpu_idle(void);
225 229
226 void cpu_idle_poll_ctrl(bool enable); 230 void cpu_idle_poll_ctrl(bool enable);
227 231
228 void arch_cpu_idle(void); 232 void arch_cpu_idle(void);
229 void arch_cpu_idle_prepare(void); 233 void arch_cpu_idle_prepare(void);
230 void arch_cpu_idle_enter(void); 234 void arch_cpu_idle_enter(void);
231 void arch_cpu_idle_exit(void); 235 void arch_cpu_idle_exit(void);
232 void arch_cpu_idle_dead(void); 236 void arch_cpu_idle_dead(void);
233 237
234 #endif /* _LINUX_CPU_H_ */ 238 #endif /* _LINUX_CPU_H_ */
235 239
1 /* CPU control. 1 /* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell 2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 * 3 *
4 * This code is licenced under the GPL. 4 * This code is licenced under the GPL.
5 */ 5 */
6 #include <linux/proc_fs.h> 6 #include <linux/proc_fs.h>
7 #include <linux/smp.h> 7 #include <linux/smp.h>
8 #include <linux/init.h> 8 #include <linux/init.h>
9 #include <linux/notifier.h> 9 #include <linux/notifier.h>
10 #include <linux/sched.h> 10 #include <linux/sched.h>
11 #include <linux/unistd.h> 11 #include <linux/unistd.h>
12 #include <linux/cpu.h> 12 #include <linux/cpu.h>
13 #include <linux/oom.h> 13 #include <linux/oom.h>
14 #include <linux/rcupdate.h> 14 #include <linux/rcupdate.h>
15 #include <linux/export.h> 15 #include <linux/export.h>
16 #include <linux/bug.h> 16 #include <linux/bug.h>
17 #include <linux/kthread.h> 17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h> 18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h> 19 #include <linux/mutex.h>
20 #include <linux/gfp.h> 20 #include <linux/gfp.h>
21 #include <linux/suspend.h> 21 #include <linux/suspend.h>
22 22
23 #include "smpboot.h" 23 #include "smpboot.h"
24 24
25 #ifdef CONFIG_SMP 25 #ifdef CONFIG_SMP
26 /* Serializes the updates to cpu_online_mask, cpu_present_mask */ 26 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
27 static DEFINE_MUTEX(cpu_add_remove_lock); 27 static DEFINE_MUTEX(cpu_add_remove_lock);
28 28
29 /* 29 /*
30 * The following two API's must be used when attempting 30 * The following two API's must be used when attempting
31 * to serialize the updates to cpu_online_mask, cpu_present_mask. 31 * to serialize the updates to cpu_online_mask, cpu_present_mask.
32 */ 32 */
33 void cpu_maps_update_begin(void) 33 void cpu_maps_update_begin(void)
34 { 34 {
35 mutex_lock(&cpu_add_remove_lock); 35 mutex_lock(&cpu_add_remove_lock);
36 } 36 }
37 37
38 void cpu_maps_update_done(void) 38 void cpu_maps_update_done(void)
39 { 39 {
40 mutex_unlock(&cpu_add_remove_lock); 40 mutex_unlock(&cpu_add_remove_lock);
41 } 41 }
42 42
43 static RAW_NOTIFIER_HEAD(cpu_chain); 43 static RAW_NOTIFIER_HEAD(cpu_chain);
44 44
45 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 45 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
46 * Should always be manipulated under cpu_add_remove_lock 46 * Should always be manipulated under cpu_add_remove_lock
47 */ 47 */
48 static int cpu_hotplug_disabled; 48 static int cpu_hotplug_disabled;
49 49
50 #ifdef CONFIG_HOTPLUG_CPU 50 #ifdef CONFIG_HOTPLUG_CPU
51 51
52 static struct { 52 static struct {
53 struct task_struct *active_writer; 53 struct task_struct *active_writer;
54 struct mutex lock; /* Synchronizes accesses to refcount, */ 54 struct mutex lock; /* Synchronizes accesses to refcount, */
55 /* 55 /*
56 * Also blocks the new readers during 56 * Also blocks the new readers during
57 * an ongoing cpu hotplug operation. 57 * an ongoing cpu hotplug operation.
58 */ 58 */
59 int refcount; 59 int refcount;
60 } cpu_hotplug = { 60 } cpu_hotplug = {
61 .active_writer = NULL, 61 .active_writer = NULL,
62 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), 62 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
63 .refcount = 0, 63 .refcount = 0,
64 }; 64 };
65 65
66 void get_online_cpus(void) 66 void get_online_cpus(void)
67 { 67 {
68 might_sleep(); 68 might_sleep();
69 if (cpu_hotplug.active_writer == current) 69 if (cpu_hotplug.active_writer == current)
70 return; 70 return;
71 mutex_lock(&cpu_hotplug.lock); 71 mutex_lock(&cpu_hotplug.lock);
72 cpu_hotplug.refcount++; 72 cpu_hotplug.refcount++;
73 mutex_unlock(&cpu_hotplug.lock); 73 mutex_unlock(&cpu_hotplug.lock);
74 74
75 } 75 }
76 EXPORT_SYMBOL_GPL(get_online_cpus); 76 EXPORT_SYMBOL_GPL(get_online_cpus);
77 77
78 void put_online_cpus(void) 78 void put_online_cpus(void)
79 { 79 {
80 if (cpu_hotplug.active_writer == current) 80 if (cpu_hotplug.active_writer == current)
81 return; 81 return;
82 mutex_lock(&cpu_hotplug.lock); 82 mutex_lock(&cpu_hotplug.lock);
83 83
84 if (WARN_ON(!cpu_hotplug.refcount)) 84 if (WARN_ON(!cpu_hotplug.refcount))
85 cpu_hotplug.refcount++; /* try to fix things up */ 85 cpu_hotplug.refcount++; /* try to fix things up */
86 86
87 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) 87 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
88 wake_up_process(cpu_hotplug.active_writer); 88 wake_up_process(cpu_hotplug.active_writer);
89 mutex_unlock(&cpu_hotplug.lock); 89 mutex_unlock(&cpu_hotplug.lock);
90 90
91 } 91 }
92 EXPORT_SYMBOL_GPL(put_online_cpus); 92 EXPORT_SYMBOL_GPL(put_online_cpus);
93 93
94 /* 94 /*
95 * This ensures that the hotplug operation can begin only when the 95 * This ensures that the hotplug operation can begin only when the
96 * refcount goes to zero. 96 * refcount goes to zero.
97 * 97 *
98 * Note that during a cpu-hotplug operation, the new readers, if any, 98 * Note that during a cpu-hotplug operation, the new readers, if any,
99 * will be blocked by the cpu_hotplug.lock 99 * will be blocked by the cpu_hotplug.lock
100 * 100 *
101 * Since cpu_hotplug_begin() is always called after invoking 101 * Since cpu_hotplug_begin() is always called after invoking
102 * cpu_maps_update_begin(), we can be sure that only one writer is active. 102 * cpu_maps_update_begin(), we can be sure that only one writer is active.
103 * 103 *
104 * Note that theoretically, there is a possibility of a livelock: 104 * Note that theoretically, there is a possibility of a livelock:
105 * - Refcount goes to zero, last reader wakes up the sleeping 105 * - Refcount goes to zero, last reader wakes up the sleeping
106 * writer. 106 * writer.
107 * - Last reader unlocks the cpu_hotplug.lock. 107 * - Last reader unlocks the cpu_hotplug.lock.
108 * - A new reader arrives at this moment, bumps up the refcount. 108 * - A new reader arrives at this moment, bumps up the refcount.
109 * - The writer acquires the cpu_hotplug.lock finds the refcount 109 * - The writer acquires the cpu_hotplug.lock finds the refcount
110 * non zero and goes to sleep again. 110 * non zero and goes to sleep again.
111 * 111 *
112 * However, this is very difficult to achieve in practice since 112 * However, this is very difficult to achieve in practice since
113 * get_online_cpus() not an api which is called all that often. 113 * get_online_cpus() not an api which is called all that often.
114 * 114 *
115 */ 115 */
116 static void cpu_hotplug_begin(void) 116 void cpu_hotplug_begin(void)
117 { 117 {
118 cpu_hotplug.active_writer = current; 118 cpu_hotplug.active_writer = current;
119 119
120 for (;;) { 120 for (;;) {
121 mutex_lock(&cpu_hotplug.lock); 121 mutex_lock(&cpu_hotplug.lock);
122 if (likely(!cpu_hotplug.refcount)) 122 if (likely(!cpu_hotplug.refcount))
123 break; 123 break;
124 __set_current_state(TASK_UNINTERRUPTIBLE); 124 __set_current_state(TASK_UNINTERRUPTIBLE);
125 mutex_unlock(&cpu_hotplug.lock); 125 mutex_unlock(&cpu_hotplug.lock);
126 schedule(); 126 schedule();
127 } 127 }
128 } 128 }
129 129
130 static void cpu_hotplug_done(void) 130 void cpu_hotplug_done(void)
131 { 131 {
132 cpu_hotplug.active_writer = NULL; 132 cpu_hotplug.active_writer = NULL;
133 mutex_unlock(&cpu_hotplug.lock); 133 mutex_unlock(&cpu_hotplug.lock);
134 } 134 }
135 135
136 /* 136 /*
137 * Wait for currently running CPU hotplug operations to complete (if any) and 137 * Wait for currently running CPU hotplug operations to complete (if any) and
138 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects 138 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
139 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the 139 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
140 * hotplug path before performing hotplug operations. So acquiring that lock 140 * hotplug path before performing hotplug operations. So acquiring that lock
141 * guarantees mutual exclusion from any currently running hotplug operations. 141 * guarantees mutual exclusion from any currently running hotplug operations.
142 */ 142 */
143 void cpu_hotplug_disable(void) 143 void cpu_hotplug_disable(void)
144 { 144 {
145 cpu_maps_update_begin(); 145 cpu_maps_update_begin();
146 cpu_hotplug_disabled = 1; 146 cpu_hotplug_disabled = 1;
147 cpu_maps_update_done(); 147 cpu_maps_update_done();
148 } 148 }
149 149
150 void cpu_hotplug_enable(void) 150 void cpu_hotplug_enable(void)
151 { 151 {
152 cpu_maps_update_begin(); 152 cpu_maps_update_begin();
153 cpu_hotplug_disabled = 0; 153 cpu_hotplug_disabled = 0;
154 cpu_maps_update_done(); 154 cpu_maps_update_done();
155 } 155 }
156 156
157 #else /* #if CONFIG_HOTPLUG_CPU */ 157 #endif /* CONFIG_HOTPLUG_CPU */
158 static void cpu_hotplug_begin(void) {}
159 static void cpu_hotplug_done(void) {}
160 #endif /* #else #if CONFIG_HOTPLUG_CPU */
161 158
162 /* Need to know about CPUs going up/down? */ 159 /* Need to know about CPUs going up/down? */
163 int __ref register_cpu_notifier(struct notifier_block *nb) 160 int __ref register_cpu_notifier(struct notifier_block *nb)
164 { 161 {
165 int ret; 162 int ret;
166 cpu_maps_update_begin(); 163 cpu_maps_update_begin();
167 ret = raw_notifier_chain_register(&cpu_chain, nb); 164 ret = raw_notifier_chain_register(&cpu_chain, nb);
168 cpu_maps_update_done(); 165 cpu_maps_update_done();
169 return ret; 166 return ret;
170 } 167 }
171 168
172 static int __cpu_notify(unsigned long val, void *v, int nr_to_call, 169 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
173 int *nr_calls) 170 int *nr_calls)
174 { 171 {
175 int ret; 172 int ret;
176 173
177 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, 174 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
178 nr_calls); 175 nr_calls);
179 176
180 return notifier_to_errno(ret); 177 return notifier_to_errno(ret);
181 } 178 }
182 179
183 static int cpu_notify(unsigned long val, void *v) 180 static int cpu_notify(unsigned long val, void *v)
184 { 181 {
185 return __cpu_notify(val, v, -1, NULL); 182 return __cpu_notify(val, v, -1, NULL);
186 } 183 }
187 184
188 #ifdef CONFIG_HOTPLUG_CPU 185 #ifdef CONFIG_HOTPLUG_CPU
189 186
190 static void cpu_notify_nofail(unsigned long val, void *v) 187 static void cpu_notify_nofail(unsigned long val, void *v)
191 { 188 {
192 BUG_ON(cpu_notify(val, v)); 189 BUG_ON(cpu_notify(val, v));
193 } 190 }
194 EXPORT_SYMBOL(register_cpu_notifier); 191 EXPORT_SYMBOL(register_cpu_notifier);
195 192
196 void __ref unregister_cpu_notifier(struct notifier_block *nb) 193 void __ref unregister_cpu_notifier(struct notifier_block *nb)
197 { 194 {
198 cpu_maps_update_begin(); 195 cpu_maps_update_begin();
199 raw_notifier_chain_unregister(&cpu_chain, nb); 196 raw_notifier_chain_unregister(&cpu_chain, nb);
200 cpu_maps_update_done(); 197 cpu_maps_update_done();
201 } 198 }
202 EXPORT_SYMBOL(unregister_cpu_notifier); 199 EXPORT_SYMBOL(unregister_cpu_notifier);
203 200
204 /** 201 /**
205 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU 202 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
206 * @cpu: a CPU id 203 * @cpu: a CPU id
207 * 204 *
208 * This function walks all processes, finds a valid mm struct for each one and 205 * This function walks all processes, finds a valid mm struct for each one and
209 * then clears a corresponding bit in mm's cpumask. While this all sounds 206 * then clears a corresponding bit in mm's cpumask. While this all sounds
210 * trivial, there are various non-obvious corner cases, which this function 207 * trivial, there are various non-obvious corner cases, which this function
211 * tries to solve in a safe manner. 208 * tries to solve in a safe manner.
212 * 209 *
213 * Also note that the function uses a somewhat relaxed locking scheme, so it may 210 * Also note that the function uses a somewhat relaxed locking scheme, so it may
214 * be called only for an already offlined CPU. 211 * be called only for an already offlined CPU.
215 */ 212 */
216 void clear_tasks_mm_cpumask(int cpu) 213 void clear_tasks_mm_cpumask(int cpu)
217 { 214 {
218 struct task_struct *p; 215 struct task_struct *p;
219 216
220 /* 217 /*
221 * This function is called after the cpu is taken down and marked 218 * This function is called after the cpu is taken down and marked
222 * offline, so its not like new tasks will ever get this cpu set in 219 * offline, so its not like new tasks will ever get this cpu set in
223 * their mm mask. -- Peter Zijlstra 220 * their mm mask. -- Peter Zijlstra
224 * Thus, we may use rcu_read_lock() here, instead of grabbing 221 * Thus, we may use rcu_read_lock() here, instead of grabbing
225 * full-fledged tasklist_lock. 222 * full-fledged tasklist_lock.
226 */ 223 */
227 WARN_ON(cpu_online(cpu)); 224 WARN_ON(cpu_online(cpu));
228 rcu_read_lock(); 225 rcu_read_lock();
229 for_each_process(p) { 226 for_each_process(p) {
230 struct task_struct *t; 227 struct task_struct *t;
231 228
232 /* 229 /*
233 * Main thread might exit, but other threads may still have 230 * Main thread might exit, but other threads may still have
234 * a valid mm. Find one. 231 * a valid mm. Find one.
235 */ 232 */
236 t = find_lock_task_mm(p); 233 t = find_lock_task_mm(p);
237 if (!t) 234 if (!t)
238 continue; 235 continue;
239 cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); 236 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
240 task_unlock(t); 237 task_unlock(t);
241 } 238 }
242 rcu_read_unlock(); 239 rcu_read_unlock();
243 } 240 }
244 241
245 static inline void check_for_tasks(int cpu) 242 static inline void check_for_tasks(int cpu)
246 { 243 {
247 struct task_struct *p; 244 struct task_struct *p;
248 cputime_t utime, stime; 245 cputime_t utime, stime;
249 246
250 write_lock_irq(&tasklist_lock); 247 write_lock_irq(&tasklist_lock);
251 for_each_process(p) { 248 for_each_process(p) {
252 task_cputime(p, &utime, &stime); 249 task_cputime(p, &utime, &stime);
253 if (task_cpu(p) == cpu && p->state == TASK_RUNNING && 250 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
254 (utime || stime)) 251 (utime || stime))
255 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " 252 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
256 "(state = %ld, flags = %x)\n", 253 "(state = %ld, flags = %x)\n",
257 p->comm, task_pid_nr(p), cpu, 254 p->comm, task_pid_nr(p), cpu,
258 p->state, p->flags); 255 p->state, p->flags);
259 } 256 }
260 write_unlock_irq(&tasklist_lock); 257 write_unlock_irq(&tasklist_lock);
261 } 258 }
262 259
263 struct take_cpu_down_param { 260 struct take_cpu_down_param {
264 unsigned long mod; 261 unsigned long mod;
265 void *hcpu; 262 void *hcpu;
266 }; 263 };
267 264
268 /* Take this CPU down. */ 265 /* Take this CPU down. */
269 static int __ref take_cpu_down(void *_param) 266 static int __ref take_cpu_down(void *_param)
270 { 267 {
271 struct take_cpu_down_param *param = _param; 268 struct take_cpu_down_param *param = _param;
272 int err; 269 int err;
273 270
274 /* Ensure this CPU doesn't handle any more interrupts. */ 271 /* Ensure this CPU doesn't handle any more interrupts. */
275 err = __cpu_disable(); 272 err = __cpu_disable();
276 if (err < 0) 273 if (err < 0)
277 return err; 274 return err;
278 275
279 cpu_notify(CPU_DYING | param->mod, param->hcpu); 276 cpu_notify(CPU_DYING | param->mod, param->hcpu);
280 /* Park the stopper thread */ 277 /* Park the stopper thread */
281 kthread_park(current); 278 kthread_park(current);
282 return 0; 279 return 0;
283 } 280 }
284 281
285 /* Requires cpu_add_remove_lock to be held */ 282 /* Requires cpu_add_remove_lock to be held */
286 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 283 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
287 { 284 {
288 int err, nr_calls = 0; 285 int err, nr_calls = 0;
289 void *hcpu = (void *)(long)cpu; 286 void *hcpu = (void *)(long)cpu;
290 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 287 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
291 struct take_cpu_down_param tcd_param = { 288 struct take_cpu_down_param tcd_param = {
292 .mod = mod, 289 .mod = mod,
293 .hcpu = hcpu, 290 .hcpu = hcpu,
294 }; 291 };
295 292
296 if (num_online_cpus() == 1) 293 if (num_online_cpus() == 1)
297 return -EBUSY; 294 return -EBUSY;
298 295
299 if (!cpu_online(cpu)) 296 if (!cpu_online(cpu))
300 return -EINVAL; 297 return -EINVAL;
301 298
302 cpu_hotplug_begin(); 299 cpu_hotplug_begin();
303 300
304 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); 301 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
305 if (err) { 302 if (err) {
306 nr_calls--; 303 nr_calls--;
307 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); 304 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
308 printk("%s: attempt to take down CPU %u failed\n", 305 printk("%s: attempt to take down CPU %u failed\n",
309 __func__, cpu); 306 __func__, cpu);
310 goto out_release; 307 goto out_release;
311 } 308 }
312 smpboot_park_threads(cpu); 309 smpboot_park_threads(cpu);
313 310
314 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 311 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
315 if (err) { 312 if (err) {
316 /* CPU didn't die: tell everyone. Can't complain. */ 313 /* CPU didn't die: tell everyone. Can't complain. */
317 smpboot_unpark_threads(cpu); 314 smpboot_unpark_threads(cpu);
318 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); 315 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
319 goto out_release; 316 goto out_release;
320 } 317 }
321 BUG_ON(cpu_online(cpu)); 318 BUG_ON(cpu_online(cpu));
322 319
323 /* 320 /*
324 * The migration_call() CPU_DYING callback will have removed all 321 * The migration_call() CPU_DYING callback will have removed all
325 * runnable tasks from the cpu, there's only the idle task left now 322 * runnable tasks from the cpu, there's only the idle task left now
326 * that the migration thread is done doing the stop_machine thing. 323 * that the migration thread is done doing the stop_machine thing.
327 * 324 *
328 * Wait for the stop thread to go away. 325 * Wait for the stop thread to go away.
329 */ 326 */
330 while (!idle_cpu(cpu)) 327 while (!idle_cpu(cpu))
331 cpu_relax(); 328 cpu_relax();
332 329
333 /* This actually kills the CPU. */ 330 /* This actually kills the CPU. */
334 __cpu_die(cpu); 331 __cpu_die(cpu);
335 332
336 /* CPU is completely dead: tell everyone. Too late to complain. */ 333 /* CPU is completely dead: tell everyone. Too late to complain. */
337 cpu_notify_nofail(CPU_DEAD | mod, hcpu); 334 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
338 335
339 check_for_tasks(cpu); 336 check_for_tasks(cpu);
340 337
341 out_release: 338 out_release:
342 cpu_hotplug_done(); 339 cpu_hotplug_done();
343 if (!err) 340 if (!err)
344 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); 341 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
345 return err; 342 return err;
346 } 343 }
347 344
348 int __ref cpu_down(unsigned int cpu) 345 int __ref cpu_down(unsigned int cpu)
349 { 346 {
350 int err; 347 int err;
351 348
352 cpu_maps_update_begin(); 349 cpu_maps_update_begin();
353 350
354 if (cpu_hotplug_disabled) { 351 if (cpu_hotplug_disabled) {
355 err = -EBUSY; 352 err = -EBUSY;
356 goto out; 353 goto out;
357 } 354 }
358 355
359 err = _cpu_down(cpu, 0); 356 err = _cpu_down(cpu, 0);
360 357
361 out: 358 out:
362 cpu_maps_update_done(); 359 cpu_maps_update_done();
363 return err; 360 return err;
364 } 361 }
365 EXPORT_SYMBOL(cpu_down); 362 EXPORT_SYMBOL(cpu_down);
366 #endif /*CONFIG_HOTPLUG_CPU*/ 363 #endif /*CONFIG_HOTPLUG_CPU*/
367 364
368 /* Requires cpu_add_remove_lock to be held */ 365 /* Requires cpu_add_remove_lock to be held */
369 static int _cpu_up(unsigned int cpu, int tasks_frozen) 366 static int _cpu_up(unsigned int cpu, int tasks_frozen)
370 { 367 {
371 int ret, nr_calls = 0; 368 int ret, nr_calls = 0;
372 void *hcpu = (void *)(long)cpu; 369 void *hcpu = (void *)(long)cpu;
373 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 370 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
374 struct task_struct *idle; 371 struct task_struct *idle;
375 372
376 cpu_hotplug_begin(); 373 cpu_hotplug_begin();
377 374
378 if (cpu_online(cpu) || !cpu_present(cpu)) { 375 if (cpu_online(cpu) || !cpu_present(cpu)) {
379 ret = -EINVAL; 376 ret = -EINVAL;
380 goto out; 377 goto out;
381 } 378 }
382 379
383 idle = idle_thread_get(cpu); 380 idle = idle_thread_get(cpu);
384 if (IS_ERR(idle)) { 381 if (IS_ERR(idle)) {
385 ret = PTR_ERR(idle); 382 ret = PTR_ERR(idle);
386 goto out; 383 goto out;
387 } 384 }
388 385
389 ret = smpboot_create_threads(cpu); 386 ret = smpboot_create_threads(cpu);
390 if (ret) 387 if (ret)
391 goto out; 388 goto out;
392 389
393 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); 390 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
394 if (ret) { 391 if (ret) {
395 nr_calls--; 392 nr_calls--;
396 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n", 393 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
397 __func__, cpu); 394 __func__, cpu);
398 goto out_notify; 395 goto out_notify;
399 } 396 }
400 397
401 /* Arch-specific enabling code. */ 398 /* Arch-specific enabling code. */
402 ret = __cpu_up(cpu, idle); 399 ret = __cpu_up(cpu, idle);
403 if (ret != 0) 400 if (ret != 0)
404 goto out_notify; 401 goto out_notify;
405 BUG_ON(!cpu_online(cpu)); 402 BUG_ON(!cpu_online(cpu));
406 403
407 /* Wake the per cpu threads */ 404 /* Wake the per cpu threads */
408 smpboot_unpark_threads(cpu); 405 smpboot_unpark_threads(cpu);
409 406
410 /* Now call notifier in preparation. */ 407 /* Now call notifier in preparation. */
411 cpu_notify(CPU_ONLINE | mod, hcpu); 408 cpu_notify(CPU_ONLINE | mod, hcpu);
412 409
413 out_notify: 410 out_notify:
414 if (ret != 0) 411 if (ret != 0)
415 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); 412 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
416 out: 413 out:
417 cpu_hotplug_done(); 414 cpu_hotplug_done();
418 415
419 return ret; 416 return ret;
420 } 417 }
421 418
422 int cpu_up(unsigned int cpu) 419 int cpu_up(unsigned int cpu)
423 { 420 {
424 int err = 0; 421 int err = 0;
425 422
426 #ifdef CONFIG_MEMORY_HOTPLUG 423 #ifdef CONFIG_MEMORY_HOTPLUG
427 int nid; 424 int nid;
428 pg_data_t *pgdat; 425 pg_data_t *pgdat;
429 #endif 426 #endif
430 427
431 if (!cpu_possible(cpu)) { 428 if (!cpu_possible(cpu)) {
432 printk(KERN_ERR "can't online cpu %d because it is not " 429 printk(KERN_ERR "can't online cpu %d because it is not "
433 "configured as may-hotadd at boot time\n", cpu); 430 "configured as may-hotadd at boot time\n", cpu);
434 #if defined(CONFIG_IA64) 431 #if defined(CONFIG_IA64)
435 printk(KERN_ERR "please check additional_cpus= boot " 432 printk(KERN_ERR "please check additional_cpus= boot "
436 "parameter\n"); 433 "parameter\n");
437 #endif 434 #endif
438 return -EINVAL; 435 return -EINVAL;
439 } 436 }
440 437
441 #ifdef CONFIG_MEMORY_HOTPLUG 438 #ifdef CONFIG_MEMORY_HOTPLUG
442 nid = cpu_to_node(cpu); 439 nid = cpu_to_node(cpu);
443 if (!node_online(nid)) { 440 if (!node_online(nid)) {
444 err = mem_online_node(nid); 441 err = mem_online_node(nid);
445 if (err) 442 if (err)
446 return err; 443 return err;
447 } 444 }
448 445
449 pgdat = NODE_DATA(nid); 446 pgdat = NODE_DATA(nid);
450 if (!pgdat) { 447 if (!pgdat) {
451 printk(KERN_ERR 448 printk(KERN_ERR
452 "Can't online cpu %d due to NULL pgdat\n", cpu); 449 "Can't online cpu %d due to NULL pgdat\n", cpu);
453 return -ENOMEM; 450 return -ENOMEM;
454 } 451 }
455 452
456 if (pgdat->node_zonelists->_zonerefs->zone == NULL) { 453 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
457 mutex_lock(&zonelists_mutex); 454 mutex_lock(&zonelists_mutex);
458 build_all_zonelists(NULL, NULL); 455 build_all_zonelists(NULL, NULL);
459 mutex_unlock(&zonelists_mutex); 456 mutex_unlock(&zonelists_mutex);
460 } 457 }
461 #endif 458 #endif
462 459
463 cpu_maps_update_begin(); 460 cpu_maps_update_begin();
464 461
465 if (cpu_hotplug_disabled) { 462 if (cpu_hotplug_disabled) {
466 err = -EBUSY; 463 err = -EBUSY;
467 goto out; 464 goto out;
468 } 465 }
469 466
470 err = _cpu_up(cpu, 0); 467 err = _cpu_up(cpu, 0);
471 468
472 out: 469 out:
473 cpu_maps_update_done(); 470 cpu_maps_update_done();
474 return err; 471 return err;
475 } 472 }
476 EXPORT_SYMBOL_GPL(cpu_up); 473 EXPORT_SYMBOL_GPL(cpu_up);
477 474
478 #ifdef CONFIG_PM_SLEEP_SMP 475 #ifdef CONFIG_PM_SLEEP_SMP
479 static cpumask_var_t frozen_cpus; 476 static cpumask_var_t frozen_cpus;
480 477
481 int disable_nonboot_cpus(void) 478 int disable_nonboot_cpus(void)
482 { 479 {
483 int cpu, first_cpu, error = 0; 480 int cpu, first_cpu, error = 0;
484 481
485 cpu_maps_update_begin(); 482 cpu_maps_update_begin();
486 first_cpu = cpumask_first(cpu_online_mask); 483 first_cpu = cpumask_first(cpu_online_mask);
487 /* 484 /*
488 * We take down all of the non-boot CPUs in one shot to avoid races 485 * We take down all of the non-boot CPUs in one shot to avoid races
489 * with the userspace trying to use the CPU hotplug at the same time 486 * with the userspace trying to use the CPU hotplug at the same time
490 */ 487 */
491 cpumask_clear(frozen_cpus); 488 cpumask_clear(frozen_cpus);
492 489
493 printk("Disabling non-boot CPUs ...\n"); 490 printk("Disabling non-boot CPUs ...\n");
494 for_each_online_cpu(cpu) { 491 for_each_online_cpu(cpu) {
495 if (cpu == first_cpu) 492 if (cpu == first_cpu)
496 continue; 493 continue;
497 error = _cpu_down(cpu, 1); 494 error = _cpu_down(cpu, 1);
498 if (!error) 495 if (!error)
499 cpumask_set_cpu(cpu, frozen_cpus); 496 cpumask_set_cpu(cpu, frozen_cpus);
500 else { 497 else {
501 printk(KERN_ERR "Error taking CPU%d down: %d\n", 498 printk(KERN_ERR "Error taking CPU%d down: %d\n",
502 cpu, error); 499 cpu, error);
503 break; 500 break;
504 } 501 }
505 } 502 }
506 503
507 if (!error) { 504 if (!error) {
508 BUG_ON(num_online_cpus() > 1); 505 BUG_ON(num_online_cpus() > 1);
509 /* Make sure the CPUs won't be enabled by someone else */ 506 /* Make sure the CPUs won't be enabled by someone else */
510 cpu_hotplug_disabled = 1; 507 cpu_hotplug_disabled = 1;
511 } else { 508 } else {
512 printk(KERN_ERR "Non-boot CPUs are not disabled\n"); 509 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
513 } 510 }
514 cpu_maps_update_done(); 511 cpu_maps_update_done();
515 return error; 512 return error;
516 } 513 }
517 514
518 void __weak arch_enable_nonboot_cpus_begin(void) 515 void __weak arch_enable_nonboot_cpus_begin(void)
519 { 516 {
520 } 517 }
521 518
522 void __weak arch_enable_nonboot_cpus_end(void) 519 void __weak arch_enable_nonboot_cpus_end(void)
523 { 520 {
524 } 521 }
525 522
526 void __ref enable_nonboot_cpus(void) 523 void __ref enable_nonboot_cpus(void)
527 { 524 {
528 int cpu, error; 525 int cpu, error;
529 526
530 /* Allow everyone to use the CPU hotplug again */ 527 /* Allow everyone to use the CPU hotplug again */
531 cpu_maps_update_begin(); 528 cpu_maps_update_begin();
532 cpu_hotplug_disabled = 0; 529 cpu_hotplug_disabled = 0;
533 if (cpumask_empty(frozen_cpus)) 530 if (cpumask_empty(frozen_cpus))
534 goto out; 531 goto out;
535 532
536 printk(KERN_INFO "Enabling non-boot CPUs ...\n"); 533 printk(KERN_INFO "Enabling non-boot CPUs ...\n");
537 534
538 arch_enable_nonboot_cpus_begin(); 535 arch_enable_nonboot_cpus_begin();
539 536
540 for_each_cpu(cpu, frozen_cpus) { 537 for_each_cpu(cpu, frozen_cpus) {
541 error = _cpu_up(cpu, 1); 538 error = _cpu_up(cpu, 1);
542 if (!error) { 539 if (!error) {
543 printk(KERN_INFO "CPU%d is up\n", cpu); 540 printk(KERN_INFO "CPU%d is up\n", cpu);
544 continue; 541 continue;
545 } 542 }
546 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 543 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
547 } 544 }
548 545
549 arch_enable_nonboot_cpus_end(); 546 arch_enable_nonboot_cpus_end();
550 547
551 cpumask_clear(frozen_cpus); 548 cpumask_clear(frozen_cpus);
552 out: 549 out:
553 cpu_maps_update_done(); 550 cpu_maps_update_done();
554 } 551 }
555 552
556 static int __init alloc_frozen_cpus(void) 553 static int __init alloc_frozen_cpus(void)
557 { 554 {
558 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) 555 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
559 return -ENOMEM; 556 return -ENOMEM;
560 return 0; 557 return 0;
561 } 558 }
562 core_initcall(alloc_frozen_cpus); 559 core_initcall(alloc_frozen_cpus);
563 560
564 /* 561 /*
565 * When callbacks for CPU hotplug notifications are being executed, we must 562 * When callbacks for CPU hotplug notifications are being executed, we must
566 * ensure that the state of the system with respect to the tasks being frozen 563 * ensure that the state of the system with respect to the tasks being frozen
567 * or not, as reported by the notification, remains unchanged *throughout the 564 * or not, as reported by the notification, remains unchanged *throughout the
568 * duration* of the execution of the callbacks. 565 * duration* of the execution of the callbacks.
569 * Hence we need to prevent the freezer from racing with regular CPU hotplug. 566 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
570 * 567 *
571 * This synchronization is implemented by mutually excluding regular CPU 568 * This synchronization is implemented by mutually excluding regular CPU
572 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ 569 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
573 * Hibernate notifications. 570 * Hibernate notifications.
574 */ 571 */
575 static int 572 static int
576 cpu_hotplug_pm_callback(struct notifier_block *nb, 573 cpu_hotplug_pm_callback(struct notifier_block *nb,
577 unsigned long action, void *ptr) 574 unsigned long action, void *ptr)
578 { 575 {
579 switch (action) { 576 switch (action) {
580 577
581 case PM_SUSPEND_PREPARE: 578 case PM_SUSPEND_PREPARE:
582 case PM_HIBERNATION_PREPARE: 579 case PM_HIBERNATION_PREPARE:
583 cpu_hotplug_disable(); 580 cpu_hotplug_disable();
584 break; 581 break;
585 582
586 case PM_POST_SUSPEND: 583 case PM_POST_SUSPEND:
587 case PM_POST_HIBERNATION: 584 case PM_POST_HIBERNATION:
588 cpu_hotplug_enable(); 585 cpu_hotplug_enable();
589 break; 586 break;
590 587
591 default: 588 default:
592 return NOTIFY_DONE; 589 return NOTIFY_DONE;
593 } 590 }
594 591
595 return NOTIFY_OK; 592 return NOTIFY_OK;
596 } 593 }
597 594
598 595
599 static int __init cpu_hotplug_pm_sync_init(void) 596 static int __init cpu_hotplug_pm_sync_init(void)
600 { 597 {
601 /* 598 /*
602 * cpu_hotplug_pm_callback has higher priority than x86 599 * cpu_hotplug_pm_callback has higher priority than x86
603 * bsp_pm_callback which depends on cpu_hotplug_pm_callback 600 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
604 * to disable cpu hotplug to avoid cpu hotplug race. 601 * to disable cpu hotplug to avoid cpu hotplug race.
605 */ 602 */
606 pm_notifier(cpu_hotplug_pm_callback, 0); 603 pm_notifier(cpu_hotplug_pm_callback, 0);
607 return 0; 604 return 0;
608 } 605 }
609 core_initcall(cpu_hotplug_pm_sync_init); 606 core_initcall(cpu_hotplug_pm_sync_init);
610 607
611 #endif /* CONFIG_PM_SLEEP_SMP */ 608 #endif /* CONFIG_PM_SLEEP_SMP */
612 609
613 /** 610 /**
614 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers 611 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
615 * @cpu: cpu that just started 612 * @cpu: cpu that just started
616 * 613 *
617 * This function calls the cpu_chain notifiers with CPU_STARTING. 614 * This function calls the cpu_chain notifiers with CPU_STARTING.
618 * It must be called by the arch code on the new cpu, before the new cpu 615 * It must be called by the arch code on the new cpu, before the new cpu
619 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 616 * enables interrupts and before the "boot" cpu returns from __cpu_up().
620 */ 617 */
621 void notify_cpu_starting(unsigned int cpu) 618 void notify_cpu_starting(unsigned int cpu)
622 { 619 {
623 unsigned long val = CPU_STARTING; 620 unsigned long val = CPU_STARTING;
624 621
625 #ifdef CONFIG_PM_SLEEP_SMP 622 #ifdef CONFIG_PM_SLEEP_SMP
626 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) 623 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
627 val = CPU_STARTING_FROZEN; 624 val = CPU_STARTING_FROZEN;
628 #endif /* CONFIG_PM_SLEEP_SMP */ 625 #endif /* CONFIG_PM_SLEEP_SMP */
629 cpu_notify(val, (void *)(long)cpu); 626 cpu_notify(val, (void *)(long)cpu);
630 } 627 }
631 628
632 #endif /* CONFIG_SMP */ 629 #endif /* CONFIG_SMP */
633 630
634 /* 631 /*
635 * cpu_bit_bitmap[] is a special, "compressed" data structure that 632 * cpu_bit_bitmap[] is a special, "compressed" data structure that
636 * represents all NR_CPUS bits binary values of 1<<nr. 633 * represents all NR_CPUS bits binary values of 1<<nr.
637 * 634 *
638 * It is used by cpumask_of() to get a constant address to a CPU 635 * It is used by cpumask_of() to get a constant address to a CPU
639 * mask value that has a single bit set only. 636 * mask value that has a single bit set only.
640 */ 637 */
641 638
642 /* cpu_bit_bitmap[0] is empty - so we can back into it */ 639 /* cpu_bit_bitmap[0] is empty - so we can back into it */
643 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) 640 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
644 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) 641 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
645 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) 642 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
646 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) 643 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
647 644
648 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { 645 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
649 646
650 MASK_DECLARE_8(0), MASK_DECLARE_8(8), 647 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
651 MASK_DECLARE_8(16), MASK_DECLARE_8(24), 648 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
652 #if BITS_PER_LONG > 32 649 #if BITS_PER_LONG > 32
653 MASK_DECLARE_8(32), MASK_DECLARE_8(40), 650 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
654 MASK_DECLARE_8(48), MASK_DECLARE_8(56), 651 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
655 #endif 652 #endif
656 }; 653 };
657 EXPORT_SYMBOL_GPL(cpu_bit_bitmap); 654 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
658 655
659 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 656 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
660 EXPORT_SYMBOL(cpu_all_bits); 657 EXPORT_SYMBOL(cpu_all_bits);
661 658
662 #ifdef CONFIG_INIT_ALL_POSSIBLE 659 #ifdef CONFIG_INIT_ALL_POSSIBLE
663 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly 660 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
664 = CPU_BITS_ALL; 661 = CPU_BITS_ALL;
665 #else 662 #else
666 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; 663 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
667 #endif 664 #endif
668 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); 665 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
669 EXPORT_SYMBOL(cpu_possible_mask); 666 EXPORT_SYMBOL(cpu_possible_mask);
670 667
671 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; 668 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
672 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); 669 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
673 EXPORT_SYMBOL(cpu_online_mask); 670 EXPORT_SYMBOL(cpu_online_mask);
674 671
675 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; 672 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
676 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); 673 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
677 EXPORT_SYMBOL(cpu_present_mask); 674 EXPORT_SYMBOL(cpu_present_mask);
678 675
679 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; 676 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
680 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); 677 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
681 EXPORT_SYMBOL(cpu_active_mask); 678 EXPORT_SYMBOL(cpu_active_mask);
682 679
683 void set_cpu_possible(unsigned int cpu, bool possible) 680 void set_cpu_possible(unsigned int cpu, bool possible)
684 { 681 {
685 if (possible) 682 if (possible)
686 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); 683 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
687 else 684 else
688 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); 685 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
689 } 686 }
690 687
691 void set_cpu_present(unsigned int cpu, bool present) 688 void set_cpu_present(unsigned int cpu, bool present)
692 { 689 {
693 if (present) 690 if (present)
694 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); 691 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
695 else 692 else
696 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); 693 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
697 } 694 }
698 695
699 void set_cpu_online(unsigned int cpu, bool online) 696 void set_cpu_online(unsigned int cpu, bool online)
700 { 697 {
701 if (online) 698 if (online)
702 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); 699 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
703 else 700 else
704 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); 701 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
705 } 702 }
706 703
707 void set_cpu_active(unsigned int cpu, bool active) 704 void set_cpu_active(unsigned int cpu, bool active)
708 { 705 {
709 if (active) 706 if (active)
710 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); 707 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
711 else 708 else
712 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); 709 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
713 } 710 }
714 711
715 void init_cpu_present(const struct cpumask *src) 712 void init_cpu_present(const struct cpumask *src)
716 { 713 {
717 cpumask_copy(to_cpumask(cpu_present_bits), src); 714 cpumask_copy(to_cpumask(cpu_present_bits), src);
718 } 715 }
719 716
720 void init_cpu_possible(const struct cpumask *src) 717 void init_cpu_possible(const struct cpumask *src)
721 { 718 {
722 cpumask_copy(to_cpumask(cpu_possible_bits), src); 719 cpumask_copy(to_cpumask(cpu_possible_bits), src);
723 } 720 }
724 721
725 void init_cpu_online(const struct cpumask *src) 722 void init_cpu_online(const struct cpumask *src)
726 { 723 {
727 cpumask_copy(to_cpumask(cpu_online_bits), src); 724 cpumask_copy(to_cpumask(cpu_online_bits), src);
728 } 725 }
729 726