Commit e9f29c9a56ca06d0effa557823a737cbe7ec09f7

Authored by Linus Torvalds

Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6

* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (27 commits)
  x86: allocate space within a region top-down
  x86: update iomem_resource end based on CPU physical address capabilities
  x86/PCI: allocate space from the end of a region, not the beginning
  PCI: allocate bus resources from the top down
  resources: support allocating space within a region from the top down
  resources: handle overflow when aligning start of available area
  resources: ensure callback doesn't allocate outside available space
  resources: factor out resource_clip() to simplify find_resource()
  resources: add a default alignf to simplify find_resource()
  x86/PCI: MMCONFIG: fix region end calculation
  PCI: Add support for polling PME state on suspended legacy PCI devices
  PCI: Export some PCI PM functionality
  PCI: fix message typo
  PCI: log vendor/device ID always
  PCI: update Intel chipset names and defines
  PCI: use new ccflags variable in Makefile
  PCI: add PCI_MSIX_TABLE/PBA defines
  PCI: add PCI vendor id for STmicroelectronics
  x86/PCI: irq and pci_ids patch for Intel Patsburg DeviceIDs
  PCI: OLPC: Only enable PCI configuration type override on XO-1
  ...

Showing 27 changed files Side-by-side Diff

Documentation/kernel-parameters.txt
... ... @@ -2175,6 +2175,11 @@
2175 2175 reset_devices [KNL] Force drivers to reset the underlying device
2176 2176 during initialization.
2177 2177  
  2178 + resource_alloc_from_bottom
  2179 + Allocate new resources from the beginning of available
  2180 + space, not the end. If you need to use this, please
  2181 + report a bug.
  2182 +
2178 2183 resume= [SWSUSP]
2179 2184 Specify the partition device for software suspend
2180 2185  
arch/x86/kernel/setup.c
... ... @@ -769,6 +769,8 @@
769 769  
770 770 x86_init.oem.arch_setup();
771 771  
  772 + resource_alloc_from_bottom = 0;
  773 + iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
772 774 setup_memory_map();
773 775 parse_setup_data();
774 776 /* update the e820_saved too */
... ... @@ -65,16 +65,21 @@
65 65 resource_size_t size, resource_size_t align)
66 66 {
67 67 struct pci_dev *dev = data;
68   - resource_size_t start = res->start;
  68 + resource_size_t start = round_down(res->end - size + 1, align);
69 69  
70 70 if (res->flags & IORESOURCE_IO) {
71   - if (skip_isa_ioresource_align(dev))
72   - return start;
73   - if (start & 0x300)
74   - start = (start + 0x3ff) & ~0x3ff;
  71 +
  72 + /*
  73 + * If we're avoiding ISA aliases, the largest contiguous I/O
  74 + * port space is 256 bytes. Clearing bits 9 and 10 preserves
  75 + * all 256-byte and smaller alignments, so the result will
  76 + * still be correctly aligned.
  77 + */
  78 + if (!skip_isa_ioresource_align(dev))
  79 + start &= ~0x300;
75 80 } else if (res->flags & IORESOURCE_MEM) {
76 81 if (start < BIOS_END)
77   - start = BIOS_END;
  82 + start = res->end; /* fail; no space */
78 83 }
79 84 return start;
80 85 }
... ... @@ -584,27 +584,28 @@
584 584 case PCI_DEVICE_ID_INTEL_ICH9_3:
585 585 case PCI_DEVICE_ID_INTEL_ICH9_4:
586 586 case PCI_DEVICE_ID_INTEL_ICH9_5:
587   - case PCI_DEVICE_ID_INTEL_TOLAPAI_0:
  587 + case PCI_DEVICE_ID_INTEL_EP80579_0:
588 588 case PCI_DEVICE_ID_INTEL_ICH10_0:
589 589 case PCI_DEVICE_ID_INTEL_ICH10_1:
590 590 case PCI_DEVICE_ID_INTEL_ICH10_2:
591 591 case PCI_DEVICE_ID_INTEL_ICH10_3:
  592 + case PCI_DEVICE_ID_INTEL_PATSBURG_LPC:
592 593 r->name = "PIIX/ICH";
593 594 r->get = pirq_piix_get;
594 595 r->set = pirq_piix_set;
595 596 return 1;
596 597 }
597 598  
598   - if ((device >= PCI_DEVICE_ID_INTEL_PCH_LPC_MIN) &&
599   - (device <= PCI_DEVICE_ID_INTEL_PCH_LPC_MAX)) {
  599 + if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN) &&
  600 + (device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX)) {
600 601 r->name = "PIIX/ICH";
601 602 r->get = pirq_piix_get;
602 603 r->set = pirq_piix_set;
603 604 return 1;
604 605 }
605 606  
606   - if ((device >= PCI_DEVICE_ID_INTEL_CPT_LPC_MIN) &&
607   - (device <= PCI_DEVICE_ID_INTEL_CPT_LPC_MAX)) {
  607 + if ((device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN) &&
  608 + (device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)) {
608 609 r->name = "PIIX/ICH";
609 610 r->get = pirq_piix_get;
610 611 r->set = pirq_piix_set;
arch/x86/pci/mmconfig-shared.c
... ... @@ -65,7 +65,6 @@
65 65 int end, u64 addr)
66 66 {
67 67 struct pci_mmcfg_region *new;
68   - int num_buses;
69 68 struct resource *res;
70 69  
71 70 if (addr == 0)
72 71  
... ... @@ -82,10 +81,9 @@
82 81  
83 82 list_add_sorted(new);
84 83  
85   - num_buses = end - start + 1;
86 84 res = &new->res;
87 85 res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
88   - res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
  86 + res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
89 87 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
90 88 snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
91 89 "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
drivers/i2c/busses/Kconfig
... ... @@ -95,9 +95,9 @@
95 95 ESB2
96 96 ICH8
97 97 ICH9
98   - Tolapai
  98 + EP80579 (Tolapai)
99 99 ICH10
100   - 3400/5 Series (PCH)
  100 + 5/3400 Series (PCH)
101 101 Cougar Point (PCH)
102 102  
103 103 This driver can also be built as a module. If so, the module
drivers/i2c/busses/i2c-i801.c
... ... @@ -38,10 +38,10 @@
38 38 82801G (ICH7) 0x27da 32 hard yes yes yes
39 39 82801H (ICH8) 0x283e 32 hard yes yes yes
40 40 82801I (ICH9) 0x2930 32 hard yes yes yes
41   - Tolapai 0x5032 32 hard yes yes yes
  41 + EP80579 (Tolapai) 0x5032 32 hard yes yes yes
42 42 ICH10 0x3a30 32 hard yes yes yes
43 43 ICH10 0x3a60 32 hard yes yes yes
44   - 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
  44 + 5/3400 Series (PCH) 0x3b30 32 hard yes yes yes
45 45 Cougar Point (PCH) 0x1c22 32 hard yes yes yes
46 46  
47 47 Features supported by this driver:
48 48  
... ... @@ -587,11 +587,11 @@
587 587 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) },
588 588 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) },
589 589 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) },
590   - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) },
  590 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EP80579_1) },
591 591 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
592 592 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
593   - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
594   - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
  593 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS) },
  594 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS) },
595 595 { 0, }
596 596 };
597 597  
drivers/pci/Makefile
... ... @@ -65,7 +65,5 @@
65 65  
66 66 obj-$(CONFIG_PCI_STUB) += pci-stub.o
67 67  
68   -ifeq ($(CONFIG_PCI_DEBUG),y)
69   -EXTRA_CFLAGS += -DDEBUG
70   -endif
  68 +ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
... ... @@ -64,6 +64,49 @@
64 64 }
65 65 }
66 66  
  67 +/*
  68 + * Find the highest-address bus resource below the cursor "res". If the
  69 + * cursor is NULL, return the highest resource.
  70 + */
  71 +static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus,
  72 + unsigned int type,
  73 + struct resource *res)
  74 +{
  75 + struct resource *r, *prev = NULL;
  76 + int i;
  77 +
  78 + pci_bus_for_each_resource(bus, r, i) {
  79 + if (!r)
  80 + continue;
  81 +
  82 + if ((r->flags & IORESOURCE_TYPE_BITS) != type)
  83 + continue;
  84 +
  85 + /* If this resource is at or past the cursor, skip it */
  86 + if (res) {
  87 + if (r == res)
  88 + continue;
  89 + if (r->end > res->end)
  90 + continue;
  91 + if (r->end == res->end && r->start > res->start)
  92 + continue;
  93 + }
  94 +
  95 + if (!prev)
  96 + prev = r;
  97 +
  98 + /*
  99 + * A small resource is higher than a large one that ends at
  100 + * the same address.
  101 + */
  102 + if (r->end > prev->end ||
  103 + (r->end == prev->end && r->start > prev->start))
  104 + prev = r;
  105 + }
  106 +
  107 + return prev;
  108 +}
  109 +
67 110 /**
68 111 * pci_bus_alloc_resource - allocate a resource from a parent bus
69 112 * @bus: PCI bus
70 113  
... ... @@ -89,9 +132,10 @@
89 132 resource_size_t),
90 133 void *alignf_data)
91 134 {
92   - int i, ret = -ENOMEM;
  135 + int ret = -ENOMEM;
93 136 struct resource *r;
94 137 resource_size_t max = -1;
  138 + unsigned int type = res->flags & IORESOURCE_TYPE_BITS;
95 139  
96 140 type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
97 141  
... ... @@ -99,10 +143,9 @@
99 143 if (!(res->flags & IORESOURCE_MEM_64))
100 144 max = PCIBIOS_MAX_MEM_32;
101 145  
102   - pci_bus_for_each_resource(bus, r, i) {
103   - if (!r)
104   - continue;
105   -
  146 + /* Look for space at highest addresses first */
  147 + r = pci_bus_find_resource_prev(bus, type, NULL);
  148 + for ( ; r; r = pci_bus_find_resource_prev(bus, type, r)) {
106 149 /* type_mask must match */
107 150 if ((res->flags ^ r->flags) & type_mask)
108 151 continue;
drivers/pci/hotplug/ibmphp_hpc.c
... ... @@ -133,8 +133,8 @@
133 133 debug ("%s - Entry\n", __func__);
134 134  
135 135 mutex_init(&sem_hpcaccess);
136   - init_MUTEX (&semOperations);
137   - init_MUTEX_LOCKED (&sem_exit);
  136 + sema_init(&semOperations, 1);
  137 + sema_init(&sem_exit, 0);
138 138 to_debug = 0;
139 139  
140 140 debug ("%s - Exit\n", __func__);
... ... @@ -22,8 +22,8 @@
22 22 #define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
23 23 #define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
24 24  
25   -#define msix_table_offset_reg(base) (base + 0x04)
26   -#define msix_pba_offset_reg(base) (base + 0x08)
  25 +#define msix_table_offset_reg(base) (base + PCI_MSIX_TABLE)
  26 +#define msix_pba_offset_reg(base) (base + PCI_MSIX_PBA)
27 27 #define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
28 28 #define multi_msix_capable(control) msix_table_size((control))
29 29  
... ... @@ -38,6 +38,19 @@
38 38  
39 39 unsigned int pci_pm_d3_delay;
40 40  
  41 +static void pci_pme_list_scan(struct work_struct *work);
  42 +
  43 +static LIST_HEAD(pci_pme_list);
  44 +static DEFINE_MUTEX(pci_pme_list_mutex);
  45 +static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
  46 +
  47 +struct pci_pme_device {
  48 + struct list_head list;
  49 + struct pci_dev *dev;
  50 +};
  51 +
  52 +#define PME_TIMEOUT 1000 /* How long between PME checks */
  53 +
41 54 static void pci_dev_d3_sleep(struct pci_dev *dev)
42 55 {
43 56 unsigned int delay = dev->d3_delay;
44 57  
... ... @@ -1331,7 +1344,33 @@
1331 1344 return !!(dev->pme_support & (1 << state));
1332 1345 }
1333 1346  
  1347 +static void pci_pme_list_scan(struct work_struct *work)
  1348 +{
  1349 + struct pci_pme_device *pme_dev;
  1350 +
  1351 + mutex_lock(&pci_pme_list_mutex);
  1352 + if (!list_empty(&pci_pme_list)) {
  1353 + list_for_each_entry(pme_dev, &pci_pme_list, list)
  1354 + pci_pme_wakeup(pme_dev->dev, NULL);
  1355 + schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT));
  1356 + }
  1357 + mutex_unlock(&pci_pme_list_mutex);
  1358 +}
  1359 +
1334 1360 /**
  1361 + * pci_external_pme - is a device an external PCI PME source?
  1362 + * @dev: PCI device to check
  1363 + *
  1364 + */
  1365 +
  1366 +static bool pci_external_pme(struct pci_dev *dev)
  1367 +{
  1368 + if (pci_is_pcie(dev) || dev->bus->number == 0)
  1369 + return false;
  1370 + return true;
  1371 +}
  1372 +
  1373 +/**
1335 1374 * pci_pme_active - enable or disable PCI device's PME# function
1336 1375 * @dev: PCI device to handle.
1337 1376 * @enable: 'true' to enable PME# generation; 'false' to disable it.
... ... @@ -1354,6 +1393,44 @@
1354 1393  
1355 1394 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1356 1395  
  1396 + /* PCI (as opposed to PCIe) PME requires that the device have
  1397 + its PME# line hooked up correctly. Not all hardware vendors
  1398 + do this, so the PME never gets delivered and the device
  1399 + remains asleep. The easiest way around this is to
  1400 + periodically walk the list of suspended devices and check
  1401 + whether any have their PME flag set. The assumption is that
  1402 + we'll wake up often enough anyway that this won't be a huge
  1403 + hit, and the power savings from the devices will still be a
  1404 + win. */
  1405 +
  1406 + if (pci_external_pme(dev)) {
  1407 + struct pci_pme_device *pme_dev;
  1408 + if (enable) {
  1409 + pme_dev = kmalloc(sizeof(struct pci_pme_device),
  1410 + GFP_KERNEL);
  1411 + if (!pme_dev)
  1412 + goto out;
  1413 + pme_dev->dev = dev;
  1414 + mutex_lock(&pci_pme_list_mutex);
  1415 + list_add(&pme_dev->list, &pci_pme_list);
  1416 + if (list_is_singular(&pci_pme_list))
  1417 + schedule_delayed_work(&pci_pme_work,
  1418 + msecs_to_jiffies(PME_TIMEOUT));
  1419 + mutex_unlock(&pci_pme_list_mutex);
  1420 + } else {
  1421 + mutex_lock(&pci_pme_list_mutex);
  1422 + list_for_each_entry(pme_dev, &pci_pme_list, list) {
  1423 + if (pme_dev->dev == dev) {
  1424 + list_del(&pme_dev->list);
  1425 + kfree(pme_dev);
  1426 + break;
  1427 + }
  1428 + }
  1429 + mutex_unlock(&pci_pme_list_mutex);
  1430 + }
  1431 + }
  1432 +
  1433 +out:
1357 1434 dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
1358 1435 enable ? "enabled" : "disabled");
1359 1436 }
... ... @@ -2689,7 +2766,7 @@
2689 2766  
2690 2767 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
2691 2768 if (!ret)
2692   - ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
  2769 + ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
2693 2770  
2694 2771 return ret;
2695 2772 }
... ... @@ -63,11 +63,8 @@
63 63 extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
64 64 extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
65 65 extern void pci_disable_enabled_device(struct pci_dev *dev);
66   -extern bool pci_check_pme_status(struct pci_dev *dev);
67 66 extern int pci_finish_runtime_suspend(struct pci_dev *dev);
68   -extern void pci_wakeup_event(struct pci_dev *dev);
69 67 extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
70   -extern void pci_pme_wakeup_bus(struct pci_bus *bus);
71 68 extern void pci_pm_init(struct pci_dev *dev);
72 69 extern void platform_pci_wakeup_init(struct pci_dev *dev);
73 70 extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
drivers/pci/pcie/aer/aerdrv.c
... ... @@ -416,7 +416,7 @@
416 416 */
417 417 static int __init aer_service_init(void)
418 418 {
419   - if (!pci_aer_available())
  419 + if (!pci_aer_available() || aer_acpi_firmware_first())
420 420 return -ENXIO;
421 421 return pcie_port_service_register(&aerdriver);
422 422 }
drivers/pci/pcie/aer/aerdrv.h
... ... @@ -132,6 +132,7 @@
132 132  
133 133 #ifdef CONFIG_ACPI_APEI
134 134 extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
  135 +extern bool aer_acpi_firmware_first(void);
135 136 #else
136 137 static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
137 138 {
... ... @@ -139,6 +140,8 @@
139 140 return pci_dev->__aer_firmware_first;
140 141 return 0;
141 142 }
  143 +
  144 +static inline bool aer_acpi_firmware_first(void) { return false; }
142 145 #endif
143 146  
144 147 static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
drivers/pci/pcie/aer/aerdrv_acpi.c
... ... @@ -93,5 +93,39 @@
93 93 aer_set_firmware_first(dev);
94 94 return dev->__aer_firmware_first;
95 95 }
  96 +
  97 +static bool aer_firmware_first;
  98 +
  99 +static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data)
  100 +{
  101 + struct acpi_hest_aer_common *p;
  102 +
  103 + if (aer_firmware_first)
  104 + return 0;
  105 +
  106 + switch (hest_hdr->type) {
  107 + case ACPI_HEST_TYPE_AER_ROOT_PORT:
  108 + case ACPI_HEST_TYPE_AER_ENDPOINT:
  109 + case ACPI_HEST_TYPE_AER_BRIDGE:
  110 + p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
  111 + aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
  112 + default:
  113 + return 0;
  114 + }
  115 +}
  116 +
  117 +/**
  118 + * aer_acpi_firmware_first - Check if APEI should control AER.
  119 + */
  120 +bool aer_acpi_firmware_first(void)
  121 +{
  122 + static bool parsed = false;
  123 +
  124 + if (!parsed) {
  125 + apei_hest_parse(aer_hest_parse_aff, NULL);
  126 + parsed = true;
  127 + }
  128 + return aer_firmware_first;
  129 +}
96 130 #endif
drivers/pci/pcie/aer/aerdrv_core.c
... ... @@ -754,7 +754,7 @@
754 754 {
755 755 struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
756 756 struct pcie_device *p_device = rpc->rpd;
757   - struct aer_err_source e_src;
  757 + struct aer_err_source uninitialized_var(e_src);
758 758  
759 759 mutex_lock(&rpc->rpc_mutex);
760 760 while (get_e_source(rpc, &e_src))
drivers/pci/pcie/portdrv_acpi.c
... ... @@ -49,7 +49,7 @@
49 49 | OSC_PCI_EXPRESS_PME_CONTROL;
50 50  
51 51 if (pci_aer_available()) {
52   - if (pcie_aer_get_firmware_first(port))
  52 + if (aer_acpi_firmware_first())
53 53 dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
54 54 else
55 55 flags |= OSC_PCI_EXPRESS_AER_CONTROL;
... ... @@ -961,8 +961,8 @@
961 961 dev->class = class;
962 962 class >>= 8;
963 963  
964   - dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n",
965   - dev->vendor, dev->device, class, dev->hdr_type);
  964 + dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %d class %#08x\n",
  965 + dev->vendor, dev->device, dev->hdr_type, class);
966 966  
967 967 /* need to have dev->class ready */
968 968 dev->cfg_size = pci_cfg_space_size(dev);
... ... @@ -303,6 +303,7 @@
303 303 .read = proc_bus_pci_read,
304 304 .write = proc_bus_pci_write,
305 305 .unlocked_ioctl = proc_bus_pci_ioctl,
  306 + .compat_ioctl = proc_bus_pci_ioctl,
306 307 #ifdef HAVE_PCI_MMAP
307 308 .open = proc_bus_pci_open,
308 309 .release = proc_bus_pci_release,
drivers/pci/quirks.c
... ... @@ -2297,6 +2297,37 @@
2297 2297 PCI_DEVICE_ID_NVIDIA_NVENET_15,
2298 2298 nvenet_msi_disable);
2299 2299  
  2300 +/*
  2301 + * Some versions of the MCP55 bridge from nvidia have a legacy irq routing
  2302 + * config register. This register controls the routing of legacy interrupts
  2303 + * from devices that route through the MCP55. If this register is misprogramed
  2304 + * interrupts are only sent to the bsp, unlike conventional systems where the
  2305 + * irq is broadxast to all online cpus. Not having this register set
  2306 + * properly prevents kdump from booting up properly, so lets make sure that
  2307 + * we have it set correctly.
  2308 + * Note this is an undocumented register.
  2309 + */
  2310 +static void __devinit nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
  2311 +{
  2312 + u32 cfg;
  2313 +
  2314 + pci_read_config_dword(dev, 0x74, &cfg);
  2315 +
  2316 + if (cfg & ((1 << 2) | (1 << 15))) {
  2317 + printk(KERN_INFO "Rewriting irq routing register on MCP55\n");
  2318 + cfg &= ~((1 << 2) | (1 << 15));
  2319 + pci_write_config_dword(dev, 0x74, cfg);
  2320 + }
  2321 +}
  2322 +
  2323 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
  2324 + PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
  2325 + nvbridge_check_legacy_irq_routing);
  2326 +
  2327 +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
  2328 + PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
  2329 + nvbridge_check_legacy_irq_routing);
  2330 +
2300 2331 static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
2301 2332 {
2302 2333 int pos, ttl = 48;
drivers/pci/setup-res.c
... ... @@ -85,7 +85,7 @@
85 85 }
86 86 }
87 87 res->flags &= ~IORESOURCE_UNSET;
88   - dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx]\n",
  88 + dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n",
89 89 resno, res, (unsigned long long)region.start,
90 90 (unsigned long long)region.end);
91 91 }
include/linux/ioport.h
... ... @@ -112,6 +112,7 @@
112 112 /* PC/ISA/whatever - the normal PC address spaces: IO and memory */
113 113 extern struct resource ioport_resource;
114 114 extern struct resource iomem_resource;
  115 +extern int resource_alloc_from_bottom;
115 116  
116 117 extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
117 118 extern int request_resource(struct resource *root, struct resource *new);
... ... @@ -541,7 +541,7 @@
541 541 struct module;
542 542 struct pci_driver {
543 543 struct list_head node;
544   - char *name;
  544 + const char *name;
545 545 const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */
546 546 int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
547 547 void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
... ... @@ -819,6 +819,9 @@
819 819 int pci_prepare_to_sleep(struct pci_dev *dev);
820 820 int pci_back_from_sleep(struct pci_dev *dev);
821 821 bool pci_dev_run_wake(struct pci_dev *dev);
  822 +bool pci_check_pme_status(struct pci_dev *dev);
  823 +void pci_wakeup_event(struct pci_dev *dev);
  824 +void pci_pme_wakeup_bus(struct pci_bus *bus);
822 825  
823 826 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
824 827 bool enable)
include/linux/pci_ids.h
... ... @@ -767,6 +767,8 @@
767 767 #define PCI_DEVICE_ID_ELSA_MICROLINK 0x1000
768 768 #define PCI_DEVICE_ID_ELSA_QS3000 0x3000
769 769  
  770 +#define PCI_VENDOR_ID_STMICRO 0x104A
  771 +
770 772 #define PCI_VENDOR_ID_BUSLOGIC 0x104B
771 773 #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140
772 774 #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040
... ... @@ -1251,6 +1253,8 @@
1251 1253 #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348
1252 1254 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C
1253 1255 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E
  1256 +#define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0 0x0360
  1257 +#define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4 0x0364
1254 1258 #define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373
1255 1259 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7
1256 1260 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS 0x03EB
... ... @@ -2458,9 +2462,10 @@
2458 2462 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
2459 2463 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
2460 2464 #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
2461   -#define PCI_DEVICE_ID_INTEL_CPT_SMBUS 0x1c22
2462   -#define PCI_DEVICE_ID_INTEL_CPT_LPC_MIN 0x1c41
2463   -#define PCI_DEVICE_ID_INTEL_CPT_LPC_MAX 0x1c5f
  2465 +#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
  2466 +#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41
  2467 +#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f
  2468 +#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC 0x1d40
2464 2469 #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
2465 2470 #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
2466 2471 #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
... ... @@ -2669,9 +2674,9 @@
2669 2674 #define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a
2670 2675 #define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30
2671 2676 #define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60
2672   -#define PCI_DEVICE_ID_INTEL_PCH_LPC_MIN 0x3b00
2673   -#define PCI_DEVICE_ID_INTEL_PCH_LPC_MAX 0x3b1f
2674   -#define PCI_DEVICE_ID_INTEL_PCH_SMBUS 0x3b30
  2677 +#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00
  2678 +#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f
  2679 +#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
2675 2680 #define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
2676 2681 #define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
2677 2682 #define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
... ... @@ -2680,8 +2685,8 @@
2680 2685 #define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035
2681 2686 #define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036
2682 2687 #define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
2683   -#define PCI_DEVICE_ID_INTEL_TOLAPAI_0 0x5031
2684   -#define PCI_DEVICE_ID_INTEL_TOLAPAI_1 0x5032
  2688 +#define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031
  2689 +#define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032
2685 2690 #define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
2686 2691 #define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
2687 2692 #define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
include/linux/pci_regs.h
... ... @@ -300,12 +300,14 @@
300 300 #define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
301 301 #define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
302 302  
303   -/* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */
  303 +/* MSI-X registers */
304 304 #define PCI_MSIX_FLAGS 2
305 305 #define PCI_MSIX_FLAGS_QSIZE 0x7FF
306 306 #define PCI_MSIX_FLAGS_ENABLE (1 << 15)
307 307 #define PCI_MSIX_FLAGS_MASKALL (1 << 14)
308   -#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
  308 +#define PCI_MSIX_TABLE 4
  309 +#define PCI_MSIX_PBA 8
  310 +#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
309 311  
310 312 /* CompactPCI Hotswap Register */
311 313  
... ... @@ -40,6 +40,23 @@
40 40  
41 41 static DEFINE_RWLOCK(resource_lock);
42 42  
  43 +/*
  44 + * By default, we allocate free space bottom-up. The architecture can request
  45 + * top-down by clearing this flag. The user can override the architecture's
  46 + * choice with the "resource_alloc_from_bottom" kernel boot option, but that
  47 + * should only be a debugging tool.
  48 + */
  49 +int resource_alloc_from_bottom = 1;
  50 +
  51 +static __init int setup_alloc_from_bottom(char *s)
  52 +{
  53 + printk(KERN_INFO
  54 + "resource: allocating from bottom-up; please report a bug\n");
  55 + resource_alloc_from_bottom = 1;
  56 + return 0;
  57 +}
  58 +early_param("resource_alloc_from_bottom", setup_alloc_from_bottom);
  59 +
43 60 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
44 61 {
45 62 struct resource *p = v;
46 63  
47 64  
48 65  
... ... @@ -357,9 +374,98 @@
357 374 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
358 375 }
359 376  
  377 +static resource_size_t simple_align_resource(void *data,
  378 + const struct resource *avail,
  379 + resource_size_t size,
  380 + resource_size_t align)
  381 +{
  382 + return avail->start;
  383 +}
  384 +
  385 +static void resource_clip(struct resource *res, resource_size_t min,
  386 + resource_size_t max)
  387 +{
  388 + if (res->start < min)
  389 + res->start = min;
  390 + if (res->end > max)
  391 + res->end = max;
  392 +}
  393 +
  394 +static bool resource_contains(struct resource *res1, struct resource *res2)
  395 +{
  396 + return res1->start <= res2->start && res1->end >= res2->end;
  397 +}
  398 +
360 399 /*
  400 + * Find the resource before "child" in the sibling list of "root" children.
  401 + */
  402 +static struct resource *find_sibling_prev(struct resource *root, struct resource *child)
  403 +{
  404 + struct resource *this;
  405 +
  406 + for (this = root->child; this; this = this->sibling)
  407 + if (this->sibling == child)
  408 + return this;
  409 +
  410 + return NULL;
  411 +}
  412 +
  413 +/*
361 414 * Find empty slot in the resource tree given range and alignment.
  415 + * This version allocates from the end of the root resource first.
362 416 */
  417 +static int find_resource_from_top(struct resource *root, struct resource *new,
  418 + resource_size_t size, resource_size_t min,
  419 + resource_size_t max, resource_size_t align,
  420 + resource_size_t (*alignf)(void *,
  421 + const struct resource *,
  422 + resource_size_t,
  423 + resource_size_t),
  424 + void *alignf_data)
  425 +{
  426 + struct resource *this;
  427 + struct resource tmp, avail, alloc;
  428 +
  429 + tmp.start = root->end;
  430 + tmp.end = root->end;
  431 +
  432 + this = find_sibling_prev(root, NULL);
  433 + for (;;) {
  434 + if (this) {
  435 + if (this->end < root->end)
  436 + tmp.start = this->end + 1;
  437 + } else
  438 + tmp.start = root->start;
  439 +
  440 + resource_clip(&tmp, min, max);
  441 +
  442 + /* Check for overflow after ALIGN() */
  443 + avail = *new;
  444 + avail.start = ALIGN(tmp.start, align);
  445 + avail.end = tmp.end;
  446 + if (avail.start >= tmp.start) {
  447 + alloc.start = alignf(alignf_data, &avail, size, align);
  448 + alloc.end = alloc.start + size - 1;
  449 + if (resource_contains(&avail, &alloc)) {
  450 + new->start = alloc.start;
  451 + new->end = alloc.end;
  452 + return 0;
  453 + }
  454 + }
  455 +
  456 + if (!this || this->start == root->start)
  457 + break;
  458 +
  459 + tmp.end = this->start - 1;
  460 + this = find_sibling_prev(root, this);
  461 + }
  462 + return -EBUSY;
  463 +}
  464 +
  465 +/*
  466 + * Find empty slot in the resource tree given range and alignment.
  467 + * This version allocates from the beginning of the root resource first.
  468 + */
363 469 static int find_resource(struct resource *root, struct resource *new,
364 470 resource_size_t size, resource_size_t min,
365 471 resource_size_t max, resource_size_t align,
366 472  
367 473  
368 474  
369 475  
370 476  
... ... @@ -370,36 +476,43 @@
370 476 void *alignf_data)
371 477 {
372 478 struct resource *this = root->child;
373   - struct resource tmp = *new;
  479 + struct resource tmp = *new, avail, alloc;
374 480  
375 481 tmp.start = root->start;
376 482 /*
377   - * Skip past an allocated resource that starts at 0, since the assignment
378   - * of this->start - 1 to tmp->end below would cause an underflow.
  483 + * Skip past an allocated resource that starts at 0, since the
  484 + * assignment of this->start - 1 to tmp->end below would cause an
  485 + * underflow.
379 486 */
380 487 if (this && this->start == 0) {
381 488 tmp.start = this->end + 1;
382 489 this = this->sibling;
383 490 }
384   - for(;;) {
  491 + for (;;) {
385 492 if (this)
386 493 tmp.end = this->start - 1;
387 494 else
388 495 tmp.end = root->end;
389   - if (tmp.start < min)
390   - tmp.start = min;
391   - if (tmp.end > max)
392   - tmp.end = max;
393   - tmp.start = ALIGN(tmp.start, align);
394   - if (alignf)
395   - tmp.start = alignf(alignf_data, &tmp, size, align);
396   - if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) {
397   - new->start = tmp.start;
398   - new->end = tmp.start + size - 1;
399   - return 0;
  496 +
  497 + resource_clip(&tmp, min, max);
  498 +
  499 + /* Check for overflow after ALIGN() */
  500 + avail = *new;
  501 + avail.start = ALIGN(tmp.start, align);
  502 + avail.end = tmp.end;
  503 + if (avail.start >= tmp.start) {
  504 + alloc.start = alignf(alignf_data, &avail, size, align);
  505 + alloc.end = alloc.start + size - 1;
  506 + if (resource_contains(&avail, &alloc)) {
  507 + new->start = alloc.start;
  508 + new->end = alloc.end;
  509 + return 0;
  510 + }
400 511 }
  512 +
401 513 if (!this)
402 514 break;
  515 +
403 516 tmp.start = this->end + 1;
404 517 this = this->sibling;
405 518 }
406 519  
... ... @@ -428,8 +541,14 @@
428 541 {
429 542 int err;
430 543  
  544 + if (!alignf)
  545 + alignf = simple_align_resource;
  546 +
431 547 write_lock(&resource_lock);
432   - err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
  548 + if (resource_alloc_from_bottom)
  549 + err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
  550 + else
  551 + err = find_resource_from_top(root, new, size, min, max, align, alignf, alignf_data);
433 552 if (err >= 0 && __request_resource(root, new))
434 553 err = -EBUSY;
435 554 write_unlock(&resource_lock);