Commit 14e94194d10ce2b4207ce7bcdcd5e92a1977fe9f

Authored by Linus Torvalds

Merge tag 'pm+acpi-3.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull ACPI and power management fixes from Rafael Wysocki:

 - ACPI-based memory hotplug stopped working after a recent change,
   because it's not possible to associate sufficiently many "physical"
   devices with one ACPI device object due to an artificial limit.  Fix
   from Rafael J Wysocki removes that limit and makes memory hotplug
   work again.

 - A change made in 3.9 uncovered a bug in the ACPI processor driver
   preventing NUMA nodes from being put offline due to an ordering
   issue.  Fix from Yasuaki Ishimatsu changes the ordering to make
   things work again.

 - One of the recent ACPI video commits (that hasn't been reverted so
   far) uncovered a bug in the code handling quirky BIOSes that caused
   some Asus machines to boot with backlight completely off which made
   it quite difficult to use them afterward.  Fix from Felipe Contreras
   improves the quirk to cover this particular case correctly.

 - A cpufreq user space interface change made in 3.10 inadvertently
   renamed the ignore_nice_load sysfs attribute to ignore_nice which
   resulted in some confusion.  Fix from Viresh Kumar changes the name
   back to ignore_nice_load.

 - An initialization ordering change made in 3.9 broke cpufreq on
   loongson2 boards.  Fix from Aaro Koskinen restores the correct
   initialization ordering there.

 - Fix breakage resulting from a mistake made in 3.9 and causing the
   detection of some graphics adapters (that were detected correctly
   before) to fail.  There are two objects representing the same PCIe
   port in the affected systems' ACPI tables and both appear as
   "enabled" and we are expected to guess which one to use.  We used to
   choose the right one before by pure luck, but when we tried to
   address another similar corner case, the luck went away.  This time
   we try to make our guessing a bit more educated which is reported to
   work on those systems.

 - The /proc/acpi/wakeup interface code is missing some locking which
   may lead to breakage if that file is written or read during hotplug
   of wakeup devices.  That should be rare but still possible, so it's
   better to start using the appropriate locking there.

* tag 'pm+acpi-3.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPI: Try harder to resolve _ADR collisions for bridges
  cpufreq: rename ignore_nice as ignore_nice_load
  cpufreq: loongson2: fix regression related to clock management
  ACPI / processor: move try_offline_node() after acpi_unmap_lsapic()
  ACPI: Drop physical_node_id_bitmap from struct acpi_device
  ACPI / PM: Walk physical_node_list under physical_node_lock
  ACPI / video: improve quirk check in acpi_video_bqc_quirk()

Showing 11 changed files Side-by-side Diff

drivers/acpi/acpi_processor.c
... ... @@ -451,13 +451,14 @@
451 451 /* Clean up. */
452 452 per_cpu(processor_device_array, pr->id) = NULL;
453 453 per_cpu(processors, pr->id) = NULL;
454   - try_offline_node(cpu_to_node(pr->id));
455 454  
456 455 /* Remove the CPU. */
457 456 get_online_cpus();
458 457 arch_unregister_cpu(pr->id);
459 458 acpi_unmap_lsapic(pr->id);
460 459 put_online_cpus();
  460 +
  461 + try_offline_node(cpu_to_node(pr->id));
461 462  
462 463 out:
463 464 free_cpumask_var(pr->throttling.shared_cpu_map);
... ... @@ -31,6 +31,7 @@
31 31 static DECLARE_RWSEM(bus_type_sem);
32 32  
33 33 #define PHYSICAL_NODE_STRING "physical_node"
  34 +#define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10)
34 35  
35 36 int register_acpi_bus_type(struct acpi_bus_type *type)
36 37 {
37 38  
38 39  
39 40  
40 41  
41 42  
42 43  
43 44  
44 45  
45 46  
46 47  
... ... @@ -78,41 +79,108 @@
78 79 return ret;
79 80 }
80 81  
81   -static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
82   - void *addr_p, void **ret_p)
  82 +static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
  83 + void *not_used, void **ret_p)
83 84 {
84   - unsigned long long addr, sta;
  85 + struct acpi_device *adev = NULL;
  86 +
  87 + acpi_bus_get_device(handle, &adev);
  88 + if (adev) {
  89 + *ret_p = handle;
  90 + return AE_CTRL_TERMINATE;
  91 + }
  92 + return AE_OK;
  93 +}
  94 +
  95 +static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
  96 +{
  97 + unsigned long long sta;
85 98 acpi_status status;
86 99  
  100 + status = acpi_bus_get_status_handle(handle, &sta);
  101 + if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
  102 + return false;
  103 +
  104 + if (is_bridge) {
  105 + void *test = NULL;
  106 +
  107 + /* Check if this object has at least one child device. */
  108 + acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
  109 + acpi_dev_present, NULL, NULL, &test);
  110 + return !!test;
  111 + }
  112 + return true;
  113 +}
  114 +
  115 +struct find_child_context {
  116 + u64 addr;
  117 + bool is_bridge;
  118 + acpi_handle ret;
  119 + bool ret_checked;
  120 +};
  121 +
  122 +static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
  123 + void *data, void **not_used)
  124 +{
  125 + struct find_child_context *context = data;
  126 + unsigned long long addr;
  127 + acpi_status status;
  128 +
87 129 status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
88   - if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) {
89   - *ret_p = handle;
90   - status = acpi_bus_get_status_handle(handle, &sta);
91   - if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_ENABLED))
  130 + if (ACPI_FAILURE(status) || addr != context->addr)
  131 + return AE_OK;
  132 +
  133 + if (!context->ret) {
  134 + /* This is the first matching object. Save its handle. */
  135 + context->ret = handle;
  136 + return AE_OK;
  137 + }
  138 + /*
  139 + * There is more than one matching object with the same _ADR value.
  140 + * That really is unexpected, so we are kind of beyond the scope of the
  141 + * spec here. We have to choose which one to return, though.
  142 + *
  143 + * First, check if the previously found object is good enough and return
  144 + * its handle if so. Second, check the same for the object that we've
  145 + * just found.
  146 + */
  147 + if (!context->ret_checked) {
  148 + if (acpi_extra_checks_passed(context->ret, context->is_bridge))
92 149 return AE_CTRL_TERMINATE;
  150 + else
  151 + context->ret_checked = true;
93 152 }
  153 + if (acpi_extra_checks_passed(handle, context->is_bridge)) {
  154 + context->ret = handle;
  155 + return AE_CTRL_TERMINATE;
  156 + }
94 157 return AE_OK;
95 158 }
96 159  
97   -acpi_handle acpi_get_child(acpi_handle parent, u64 address)
  160 +acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
98 161 {
99   - void *ret = NULL;
  162 + if (parent) {
  163 + struct find_child_context context = {
  164 + .addr = addr,
  165 + .is_bridge = is_bridge,
  166 + };
100 167  
101   - if (!parent)
102   - return NULL;
103   -
104   - acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL,
105   - do_acpi_find_child, &address, &ret);
106   - return (acpi_handle)ret;
  168 + acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
  169 + NULL, &context, NULL);
  170 + return context.ret;
  171 + }
  172 + return NULL;
107 173 }
108   -EXPORT_SYMBOL(acpi_get_child);
  174 +EXPORT_SYMBOL_GPL(acpi_find_child);
109 175  
110 176 int acpi_bind_one(struct device *dev, acpi_handle handle)
111 177 {
112 178 struct acpi_device *acpi_dev;
113 179 acpi_status status;
114 180 struct acpi_device_physical_node *physical_node, *pn;
115   - char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
  181 + char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
  182 + struct list_head *physnode_list;
  183 + unsigned int node_id;
116 184 int retval = -EINVAL;
117 185  
118 186 if (ACPI_HANDLE(dev)) {
119 187  
120 188  
121 189  
... ... @@ -139,25 +207,27 @@
139 207  
140 208 mutex_lock(&acpi_dev->physical_node_lock);
141 209  
142   - /* Sanity check. */
143   - list_for_each_entry(pn, &acpi_dev->physical_node_list, node)
  210 + /*
  211 + * Keep the list sorted by node_id so that the IDs of removed nodes can
  212 + * be recycled easily.
  213 + */
  214 + physnode_list = &acpi_dev->physical_node_list;
  215 + node_id = 0;
  216 + list_for_each_entry(pn, &acpi_dev->physical_node_list, node) {
  217 + /* Sanity check. */
144 218 if (pn->dev == dev) {
145 219 dev_warn(dev, "Already associated with ACPI node\n");
146 220 goto err_free;
147 221 }
148   -
149   - /* allocate physical node id according to physical_node_id_bitmap */
150   - physical_node->node_id =
151   - find_first_zero_bit(acpi_dev->physical_node_id_bitmap,
152   - ACPI_MAX_PHYSICAL_NODE);
153   - if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
154   - retval = -ENOSPC;
155   - goto err_free;
  222 + if (pn->node_id == node_id) {
  223 + physnode_list = &pn->node;
  224 + node_id++;
  225 + }
156 226 }
157 227  
158   - set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap);
  228 + physical_node->node_id = node_id;
159 229 physical_node->dev = dev;
160   - list_add_tail(&physical_node->node, &acpi_dev->physical_node_list);
  230 + list_add(&physical_node->node, physnode_list);
161 231 acpi_dev->physical_node_count++;
162 232  
163 233 mutex_unlock(&acpi_dev->physical_node_lock);
... ... @@ -208,7 +278,7 @@
208 278  
209 279 mutex_lock(&acpi_dev->physical_node_lock);
210 280 list_for_each_safe(node, next, &acpi_dev->physical_node_list) {
211   - char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
  281 + char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
212 282  
213 283 entry = list_entry(node, struct acpi_device_physical_node,
214 284 node);
... ... @@ -216,7 +286,6 @@
216 286 continue;
217 287  
218 288 list_del(node);
219   - clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap);
220 289  
221 290 acpi_dev->physical_node_count--;
222 291  
... ... @@ -311,6 +311,8 @@
311 311 dev->pnp.bus_id,
312 312 (u32) dev->wakeup.sleep_state);
313 313  
  314 + mutex_lock(&dev->physical_node_lock);
  315 +
314 316 if (!dev->physical_node_count) {
315 317 seq_printf(seq, "%c%-8s\n",
316 318 dev->wakeup.flags.run_wake ? '*' : ' ',
... ... @@ -338,6 +340,8 @@
338 340 put_device(ldev);
339 341 }
340 342 }
  343 +
  344 + mutex_unlock(&dev->physical_node_lock);
341 345 }
342 346 mutex_unlock(&acpi_device_lock);
343 347 return 0;
344 348  
... ... @@ -347,12 +351,16 @@
347 351 {
348 352 struct acpi_device_physical_node *entry;
349 353  
  354 + mutex_lock(&adev->physical_node_lock);
  355 +
350 356 list_for_each_entry(entry,
351 357 &adev->physical_node_list, node)
352 358 if (entry->dev && device_can_wakeup(entry->dev)) {
353 359 bool enable = !device_may_wakeup(entry->dev);
354 360 device_set_wakeup_enable(entry->dev, enable);
355 361 }
  362 +
  363 + mutex_unlock(&adev->physical_node_lock);
356 364 }
357 365  
358 366 static ssize_t
drivers/acpi/video.c
... ... @@ -689,7 +689,7 @@
689 689 * Some systems always report current brightness level as maximum
690 690 * through _BQC, we need to test another value for them.
691 691 */
692   - test_level = current_level == max_level ? br->levels[2] : max_level;
  692 + test_level = current_level == max_level ? br->levels[3] : max_level;
693 693  
694 694 result = acpi_video_device_lcd_set_level(device, test_level);
695 695 if (result)
drivers/cpufreq/cpufreq_conservative.c
... ... @@ -221,8 +221,8 @@
221 221 return count;
222 222 }
223 223  
224   -static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
225   - size_t count)
  224 +static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
  225 + const char *buf, size_t count)
226 226 {
227 227 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
228 228 unsigned int input, j;
229 229  
... ... @@ -235,10 +235,10 @@
235 235 if (input > 1)
236 236 input = 1;
237 237  
238   - if (input == cs_tuners->ignore_nice) /* nothing to do */
  238 + if (input == cs_tuners->ignore_nice_load) /* nothing to do */
239 239 return count;
240 240  
241   - cs_tuners->ignore_nice = input;
  241 + cs_tuners->ignore_nice_load = input;
242 242  
243 243 /* we need to re-evaluate prev_cpu_idle */
244 244 for_each_online_cpu(j) {
... ... @@ -246,7 +246,7 @@
246 246 dbs_info = &per_cpu(cs_cpu_dbs_info, j);
247 247 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
248 248 &dbs_info->cdbs.prev_cpu_wall, 0);
249   - if (cs_tuners->ignore_nice)
  249 + if (cs_tuners->ignore_nice_load)
250 250 dbs_info->cdbs.prev_cpu_nice =
251 251 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
252 252 }
... ... @@ -279,7 +279,7 @@
279 279 show_store_one(cs, sampling_down_factor);
280 280 show_store_one(cs, up_threshold);
281 281 show_store_one(cs, down_threshold);
282   -show_store_one(cs, ignore_nice);
  282 +show_store_one(cs, ignore_nice_load);
283 283 show_store_one(cs, freq_step);
284 284 declare_show_sampling_rate_min(cs);
285 285  
... ... @@ -287,7 +287,7 @@
287 287 gov_sys_pol_attr_rw(sampling_down_factor);
288 288 gov_sys_pol_attr_rw(up_threshold);
289 289 gov_sys_pol_attr_rw(down_threshold);
290   -gov_sys_pol_attr_rw(ignore_nice);
  290 +gov_sys_pol_attr_rw(ignore_nice_load);
291 291 gov_sys_pol_attr_rw(freq_step);
292 292 gov_sys_pol_attr_ro(sampling_rate_min);
293 293  
... ... @@ -297,7 +297,7 @@
297 297 &sampling_down_factor_gov_sys.attr,
298 298 &up_threshold_gov_sys.attr,
299 299 &down_threshold_gov_sys.attr,
300   - &ignore_nice_gov_sys.attr,
  300 + &ignore_nice_load_gov_sys.attr,
301 301 &freq_step_gov_sys.attr,
302 302 NULL
303 303 };
... ... @@ -313,7 +313,7 @@
313 313 &sampling_down_factor_gov_pol.attr,
314 314 &up_threshold_gov_pol.attr,
315 315 &down_threshold_gov_pol.attr,
316   - &ignore_nice_gov_pol.attr,
  316 + &ignore_nice_load_gov_pol.attr,
317 317 &freq_step_gov_pol.attr,
318 318 NULL
319 319 };
... ... @@ -338,7 +338,7 @@
338 338 tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
339 339 tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
340 340 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
341   - tuners->ignore_nice = 0;
  341 + tuners->ignore_nice_load = 0;
342 342 tuners->freq_step = DEF_FREQUENCY_STEP;
343 343  
344 344 dbs_data->tuners = tuners;
drivers/cpufreq/cpufreq_governor.c
... ... @@ -47,9 +47,9 @@
47 47 unsigned int j;
48 48  
49 49 if (dbs_data->cdata->governor == GOV_ONDEMAND)
50   - ignore_nice = od_tuners->ignore_nice;
  50 + ignore_nice = od_tuners->ignore_nice_load;
51 51 else
52   - ignore_nice = cs_tuners->ignore_nice;
  52 + ignore_nice = cs_tuners->ignore_nice_load;
53 53  
54 54 policy = cdbs->cur_policy;
55 55  
56 56  
... ... @@ -298,12 +298,12 @@
298 298 cs_tuners = dbs_data->tuners;
299 299 cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
300 300 sampling_rate = cs_tuners->sampling_rate;
301   - ignore_nice = cs_tuners->ignore_nice;
  301 + ignore_nice = cs_tuners->ignore_nice_load;
302 302 } else {
303 303 od_tuners = dbs_data->tuners;
304 304 od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
305 305 sampling_rate = od_tuners->sampling_rate;
306   - ignore_nice = od_tuners->ignore_nice;
  306 + ignore_nice = od_tuners->ignore_nice_load;
307 307 od_ops = dbs_data->cdata->gov_ops;
308 308 io_busy = od_tuners->io_is_busy;
309 309 }
drivers/cpufreq/cpufreq_governor.h
... ... @@ -165,7 +165,7 @@
165 165  
166 166 /* Per policy Governers sysfs tunables */
167 167 struct od_dbs_tuners {
168   - unsigned int ignore_nice;
  168 + unsigned int ignore_nice_load;
169 169 unsigned int sampling_rate;
170 170 unsigned int sampling_down_factor;
171 171 unsigned int up_threshold;
... ... @@ -175,7 +175,7 @@
175 175 };
176 176  
177 177 struct cs_dbs_tuners {
178   - unsigned int ignore_nice;
  178 + unsigned int ignore_nice_load;
179 179 unsigned int sampling_rate;
180 180 unsigned int sampling_down_factor;
181 181 unsigned int up_threshold;
drivers/cpufreq/cpufreq_ondemand.c
... ... @@ -403,8 +403,8 @@
403 403 return count;
404 404 }
405 405  
406   -static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
407   - size_t count)
  406 +static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
  407 + const char *buf, size_t count)
408 408 {
409 409 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
410 410 unsigned int input;
411 411  
... ... @@ -419,10 +419,10 @@
419 419 if (input > 1)
420 420 input = 1;
421 421  
422   - if (input == od_tuners->ignore_nice) { /* nothing to do */
  422 + if (input == od_tuners->ignore_nice_load) { /* nothing to do */
423 423 return count;
424 424 }
425   - od_tuners->ignore_nice = input;
  425 + od_tuners->ignore_nice_load = input;
426 426  
427 427 /* we need to re-evaluate prev_cpu_idle */
428 428 for_each_online_cpu(j) {
... ... @@ -430,7 +430,7 @@
430 430 dbs_info = &per_cpu(od_cpu_dbs_info, j);
431 431 dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
432 432 &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
433   - if (od_tuners->ignore_nice)
  433 + if (od_tuners->ignore_nice_load)
434 434 dbs_info->cdbs.prev_cpu_nice =
435 435 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
436 436  
... ... @@ -461,7 +461,7 @@
461 461 show_store_one(od, io_is_busy);
462 462 show_store_one(od, up_threshold);
463 463 show_store_one(od, sampling_down_factor);
464   -show_store_one(od, ignore_nice);
  464 +show_store_one(od, ignore_nice_load);
465 465 show_store_one(od, powersave_bias);
466 466 declare_show_sampling_rate_min(od);
467 467  
... ... @@ -469,7 +469,7 @@
469 469 gov_sys_pol_attr_rw(io_is_busy);
470 470 gov_sys_pol_attr_rw(up_threshold);
471 471 gov_sys_pol_attr_rw(sampling_down_factor);
472   -gov_sys_pol_attr_rw(ignore_nice);
  472 +gov_sys_pol_attr_rw(ignore_nice_load);
473 473 gov_sys_pol_attr_rw(powersave_bias);
474 474 gov_sys_pol_attr_ro(sampling_rate_min);
475 475  
... ... @@ -478,7 +478,7 @@
478 478 &sampling_rate_gov_sys.attr,
479 479 &up_threshold_gov_sys.attr,
480 480 &sampling_down_factor_gov_sys.attr,
481   - &ignore_nice_gov_sys.attr,
  481 + &ignore_nice_load_gov_sys.attr,
482 482 &powersave_bias_gov_sys.attr,
483 483 &io_is_busy_gov_sys.attr,
484 484 NULL
... ... @@ -494,7 +494,7 @@
494 494 &sampling_rate_gov_pol.attr,
495 495 &up_threshold_gov_pol.attr,
496 496 &sampling_down_factor_gov_pol.attr,
497   - &ignore_nice_gov_pol.attr,
  497 + &ignore_nice_load_gov_pol.attr,
498 498 &powersave_bias_gov_pol.attr,
499 499 &io_is_busy_gov_pol.attr,
500 500 NULL
... ... @@ -544,7 +544,7 @@
544 544 }
545 545  
546 546 tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
547   - tuners->ignore_nice = 0;
  547 + tuners->ignore_nice_load = 0;
548 548 tuners->powersave_bias = default_powersave_bias;
549 549 tuners->io_is_busy = should_io_be_busy();
550 550  
drivers/cpufreq/loongson2_cpufreq.c
... ... @@ -118,17 +118,18 @@
118 118 clk_put(cpuclk);
119 119 return -EINVAL;
120 120 }
121   - ret = clk_set_rate(cpuclk, rate);
122   - if (ret) {
123   - clk_put(cpuclk);
124   - return ret;
125   - }
126 121  
127 122 /* clock table init */
128 123 for (i = 2;
129 124 (loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END);
130 125 i++)
131 126 loongson2_clockmod_table[i].frequency = (rate * i) / 8;
  127 +
  128 + ret = clk_set_rate(cpuclk, rate);
  129 + if (ret) {
  130 + clk_put(cpuclk);
  131 + return ret;
  132 + }
132 133  
133 134 policy->cur = loongson2_cpufreq_get(policy->cpu);
134 135  
drivers/pci/pci-acpi.c
... ... @@ -317,13 +317,20 @@
317 317 /* ACPI bus type */
318 318 static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
319 319 {
320   - struct pci_dev * pci_dev;
321   - u64 addr;
  320 + struct pci_dev *pci_dev = to_pci_dev(dev);
  321 + bool is_bridge;
  322 + u64 addr;
322 323  
323   - pci_dev = to_pci_dev(dev);
  324 + /*
  325 + * pci_is_bridge() is not suitable here, because pci_dev->subordinate
  326 + * is set only after acpi_pci_find_device() has been called for the
  327 + * given device.
  328 + */
  329 + is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
  330 + || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
324 331 /* Please ref to ACPI spec for the syntax of _ADR */
325 332 addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
326   - *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr);
  333 + *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge);
327 334 if (!*handle)
328 335 return -ENODEV;
329 336 return 0;
include/acpi/acpi_bus.h
... ... @@ -274,15 +274,12 @@
274 274 };
275 275  
276 276 struct acpi_device_physical_node {
277   - u8 node_id;
  277 + unsigned int node_id;
278 278 struct list_head node;
279 279 struct device *dev;
280 280 bool put_online:1;
281 281 };
282 282  
283   -/* set maximum of physical nodes to 32 for expansibility */
284   -#define ACPI_MAX_PHYSICAL_NODE 32
285   -
286 283 /* Device */
287 284 struct acpi_device {
288 285 int device_type;
289 286  
... ... @@ -302,10 +299,9 @@
302 299 struct acpi_driver *driver;
303 300 void *driver_data;
304 301 struct device dev;
305   - u8 physical_node_count;
  302 + unsigned int physical_node_count;
306 303 struct list_head physical_node_list;
307 304 struct mutex physical_node_lock;
308   - DECLARE_BITMAP(physical_node_id_bitmap, ACPI_MAX_PHYSICAL_NODE);
309 305 struct list_head power_dependent;
310 306 void (*remove)(struct acpi_device *);
311 307 };
... ... @@ -445,7 +441,11 @@
445 441 };
446 442  
447 443 /* helper */
448   -acpi_handle acpi_get_child(acpi_handle, u64);
  444 +acpi_handle acpi_find_child(acpi_handle, u64, bool);
  445 +static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
  446 +{
  447 + return acpi_find_child(handle, addr, false);
  448 +}
449 449 int acpi_is_root_bridge(acpi_handle);
450 450 struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
451 451 #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev))