Commit d33a73c81241e3d9ab8da2d0558429bdd5b4ef9a
Committed by
Jeff Garzik
1 parent
0832b25a75
Exists in
master
and in
7 other branches
[PATCH] forcedeth: Add support for MSI/MSIX
This forcedeth patch adds support for MSI/MSIX interrupts. Signed-off-By: Ayaz Abdulla <aabdulla@nvidia.com> Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Showing 1 changed file with 436 additions and 31 deletions Side-by-side Diff
drivers/net/forcedeth.c
... | ... | @@ -104,6 +104,7 @@ |
104 | 104 | * 0.49: 10 Dec 2005: Fix tso for large buffers. |
105 | 105 | * 0.50: 20 Jan 2006: Add 8021pq tagging support. |
106 | 106 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. |
107 | + * 0.52: 20 Jan 2006: Add MSI/MSIX support. | |
107 | 108 | * |
108 | 109 | * Known bugs: |
109 | 110 | * We suspect that on some hardware no TX done interrupts are generated. |
... | ... | @@ -115,7 +116,7 @@ |
115 | 116 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
116 | 117 | * superfluous timer interrupts from the nic. |
117 | 118 | */ |
118 | -#define FORCEDETH_VERSION "0.51" | |
119 | +#define FORCEDETH_VERSION "0.52" | |
119 | 120 | #define DRV_NAME "forcedeth" |
120 | 121 | |
121 | 122 | #include <linux/module.h> |
... | ... | @@ -156,6 +157,8 @@ |
156 | 157 | #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ |
157 | 158 | #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ |
158 | 159 | #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ |
160 | +#define DEV_HAS_MSI 0x0040 /* device supports MSI */ | |
161 | +#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ | |
159 | 162 | |
160 | 163 | enum { |
161 | 164 | NvRegIrqStatus = 0x000, |
162 | 165 | |
163 | 166 | |
... | ... | @@ -169,14 +172,17 @@ |
169 | 172 | #define NVREG_IRQ_TX_OK 0x0010 |
170 | 173 | #define NVREG_IRQ_TIMER 0x0020 |
171 | 174 | #define NVREG_IRQ_LINK 0x0040 |
172 | -#define NVREG_IRQ_TX_ERROR 0x0080 | |
173 | -#define NVREG_IRQ_TX1 0x0100 | |
175 | +#define NVREG_IRQ_RX_FORCED 0x0080 | |
176 | +#define NVREG_IRQ_TX_FORCED 0x0100 | |
174 | 177 | #define NVREG_IRQMASK_THROUGHPUT 0x00df |
175 | 178 | #define NVREG_IRQMASK_CPU 0x0040 |
179 | +#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED) | |
180 | +#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED) | |
181 | +#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK) | |
176 | 182 | |
177 | 183 | #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \ |
178 | - NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \ | |
179 | - NVREG_IRQ_TX1)) | |
184 | + NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \ | |
185 | + NVREG_IRQ_TX_FORCED)) | |
180 | 186 | |
181 | 187 | NvRegUnknownSetupReg6 = 0x008, |
182 | 188 | #define NVREG_UNKSETUP6_VAL 3 |
... | ... | @@ -188,6 +194,10 @@ |
188 | 194 | NvRegPollingInterval = 0x00c, |
189 | 195 | #define NVREG_POLL_DEFAULT_THROUGHPUT 970 |
190 | 196 | #define NVREG_POLL_DEFAULT_CPU 13 |
197 | + NvRegMSIMap0 = 0x020, | |
198 | + NvRegMSIMap1 = 0x024, | |
199 | + NvRegMSIIrqMask = 0x030, | |
200 | +#define NVREG_MSI_VECTOR_0_ENABLED 0x01 | |
191 | 201 | NvRegMisc1 = 0x080, |
192 | 202 | #define NVREG_MISC1_HD 0x02 |
193 | 203 | #define NVREG_MISC1_FORCE 0x3b0f3c |
... | ... | @@ -312,6 +322,9 @@ |
312 | 322 | #define NVREG_POWERSTATE_D3 0x0003 |
313 | 323 | NvRegVlanControl = 0x300, |
314 | 324 | #define NVREG_VLANCONTROL_ENABLE 0x2000 |
325 | + NvRegMSIXMap0 = 0x3e0, | |
326 | + NvRegMSIXMap1 = 0x3e4, | |
327 | + NvRegMSIXIrqStatus = 0x3f0, | |
315 | 328 | }; |
316 | 329 | |
317 | 330 | /* Big endian: should work, but is untested */ |
318 | 331 | |
... | ... | @@ -489,7 +502,19 @@ |
489 | 502 | #define LPA_1000FULL 0x0800 |
490 | 503 | #define LPA_1000HALF 0x0400 |
491 | 504 | |
505 | +/* MSI/MSI-X defines */ | |
506 | +#define NV_MSI_X_MAX_VECTORS 8 | |
507 | +#define NV_MSI_X_VECTORS_MASK 0x000f | |
508 | +#define NV_MSI_CAPABLE 0x0010 | |
509 | +#define NV_MSI_X_CAPABLE 0x0020 | |
510 | +#define NV_MSI_ENABLED 0x0040 | |
511 | +#define NV_MSI_X_ENABLED 0x0080 | |
492 | 512 | |
513 | +#define NV_MSI_X_VECTOR_ALL 0x0 | |
514 | +#define NV_MSI_X_VECTOR_RX 0x0 | |
515 | +#define NV_MSI_X_VECTOR_TX 0x1 | |
516 | +#define NV_MSI_X_VECTOR_OTHER 0x2 | |
517 | + | |
493 | 518 | /* |
494 | 519 | * SMP locking: |
495 | 520 | * All hardware access under dev->priv->lock, except the performance |
... | ... | @@ -540,6 +565,7 @@ |
540 | 565 | unsigned int pkt_limit; |
541 | 566 | struct timer_list oom_kick; |
542 | 567 | struct timer_list nic_poll; |
568 | + u32 nic_poll_irq; | |
543 | 569 | |
544 | 570 | /* media detection workaround. |
545 | 571 | * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); |
... | ... | @@ -558,6 +584,10 @@ |
558 | 584 | |
559 | 585 | /* vlan fields */ |
560 | 586 | struct vlan_group *vlangrp; |
587 | + | |
588 | + /* msi/msi-x fields */ | |
589 | + u32 msi_flags; | |
590 | + struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS]; | |
561 | 591 | }; |
562 | 592 | |
563 | 593 | /* |
... | ... | @@ -585,6 +615,16 @@ |
585 | 615 | */ |
586 | 616 | static int poll_interval = -1; |
587 | 617 | |
618 | +/* | |
619 | + * Disable MSI interrupts | |
620 | + */ | |
621 | +static int disable_msi = 0; | |
622 | + | |
623 | +/* | |
624 | + * Disable MSIX interrupts | |
625 | + */ | |
626 | +static int disable_msix = 0; | |
627 | + | |
588 | 628 | static inline struct fe_priv *get_nvpriv(struct net_device *dev) |
589 | 629 | { |
590 | 630 | return netdev_priv(dev); |
591 | 631 | |
... | ... | @@ -948,14 +988,27 @@ |
948 | 988 | struct net_device *dev = (struct net_device *) data; |
949 | 989 | struct fe_priv *np = netdev_priv(dev); |
950 | 990 | |
951 | - disable_irq(dev->irq); | |
991 | + | |
992 | + if (!(np->msi_flags & NV_MSI_X_ENABLED) || | |
993 | + ((np->msi_flags & NV_MSI_X_ENABLED) && | |
994 | + ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | |
995 | + disable_irq(dev->irq); | |
996 | + } else { | |
997 | + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
998 | + } | |
952 | 999 | if (nv_alloc_rx(dev)) { |
953 | 1000 | spin_lock(&np->lock); |
954 | 1001 | if (!np->in_shutdown) |
955 | 1002 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
956 | 1003 | spin_unlock(&np->lock); |
957 | 1004 | } |
958 | - enable_irq(dev->irq); | |
1005 | + if (!(np->msi_flags & NV_MSI_X_ENABLED) || | |
1006 | + ((np->msi_flags & NV_MSI_X_ENABLED) && | |
1007 | + ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | |
1008 | + enable_irq(dev->irq); | |
1009 | + } else { | |
1010 | + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
1011 | + } | |
959 | 1012 | } |
960 | 1013 | |
961 | 1014 | static void nv_init_rx(struct net_device *dev) |
... | ... | @@ -1010,7 +1063,7 @@ |
1010 | 1063 | } |
1011 | 1064 | |
1012 | 1065 | if (np->tx_skbuff[skbnr]) { |
1013 | - dev_kfree_skb_irq(np->tx_skbuff[skbnr]); | |
1066 | + dev_kfree_skb_any(np->tx_skbuff[skbnr]); | |
1014 | 1067 | np->tx_skbuff[skbnr] = NULL; |
1015 | 1068 | return 1; |
1016 | 1069 | } else { |
1017 | 1070 | |
1018 | 1071 | |
... | ... | @@ -1261,10 +1314,15 @@ |
1261 | 1314 | { |
1262 | 1315 | struct fe_priv *np = netdev_priv(dev); |
1263 | 1316 | u8 __iomem *base = get_hwbase(dev); |
1317 | + u32 status; | |
1264 | 1318 | |
1265 | - printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, | |
1266 | - readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK); | |
1319 | + if (np->msi_flags & NV_MSI_X_ENABLED) | |
1320 | + status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | |
1321 | + else | |
1322 | + status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | |
1267 | 1323 | |
1324 | + printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); | |
1325 | + | |
1268 | 1326 | { |
1269 | 1327 | int i; |
1270 | 1328 | |
... | ... | @@ -1579,7 +1637,15 @@ |
1579 | 1637 | * guessed, there is probably a simpler approach. |
1580 | 1638 | * Changing the MTU is a rare event, it shouldn't matter. |
1581 | 1639 | */ |
1582 | - disable_irq(dev->irq); | |
1640 | + if (!(np->msi_flags & NV_MSI_X_ENABLED) || | |
1641 | + ((np->msi_flags & NV_MSI_X_ENABLED) && | |
1642 | + ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | |
1643 | + disable_irq(dev->irq); | |
1644 | + } else { | |
1645 | + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
1646 | + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | |
1647 | + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | |
1648 | + } | |
1583 | 1649 | spin_lock_bh(&dev->xmit_lock); |
1584 | 1650 | spin_lock(&np->lock); |
1585 | 1651 | /* stop engines */ |
... | ... | @@ -1612,7 +1678,15 @@ |
1612 | 1678 | nv_start_tx(dev); |
1613 | 1679 | spin_unlock(&np->lock); |
1614 | 1680 | spin_unlock_bh(&dev->xmit_lock); |
1615 | - enable_irq(dev->irq); | |
1681 | + if (!(np->msi_flags & NV_MSI_X_ENABLED) || | |
1682 | + ((np->msi_flags & NV_MSI_X_ENABLED) && | |
1683 | + ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | |
1684 | + enable_irq(dev->irq); | |
1685 | + } else { | |
1686 | + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
1687 | + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | |
1688 | + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | |
1689 | + } | |
1616 | 1690 | } |
1617 | 1691 | return 0; |
1618 | 1692 | } |
... | ... | @@ -1918,8 +1992,13 @@ |
1918 | 1992 | dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); |
1919 | 1993 | |
1920 | 1994 | for (i=0; ; i++) { |
1921 | - events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | |
1922 | - writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
1995 | + if (!(np->msi_flags & NV_MSI_X_ENABLED)) { | |
1996 | + events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; | |
1997 | + writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | |
1998 | + } else { | |
1999 | + events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | |
2000 | + writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); | |
2001 | + } | |
1923 | 2002 | pci_push(base); |
1924 | 2003 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); |
1925 | 2004 | if (!(events & np->irqmask)) |
1926 | 2005 | |
1927 | 2006 | |
... | ... | @@ -1959,11 +2038,16 @@ |
1959 | 2038 | if (i > max_interrupt_work) { |
1960 | 2039 | spin_lock(&np->lock); |
1961 | 2040 | /* disable interrupts on the nic */ |
1962 | - writel(0, base + NvRegIrqMask); | |
2041 | + if (!(np->msi_flags & NV_MSI_X_ENABLED)) | |
2042 | + writel(0, base + NvRegIrqMask); | |
2043 | + else | |
2044 | + writel(np->irqmask, base + NvRegIrqMask); | |
1963 | 2045 | pci_push(base); |
1964 | 2046 | |
1965 | - if (!np->in_shutdown) | |
2047 | + if (!np->in_shutdown) { | |
2048 | + np->nic_poll_irq = np->irqmask; | |
1966 | 2049 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
2050 | + } | |
1967 | 2051 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i); |
1968 | 2052 | spin_unlock(&np->lock); |
1969 | 2053 | break; |
1970 | 2054 | |
1971 | 2055 | |
1972 | 2056 | |
1973 | 2057 | |
1974 | 2058 | |
... | ... | @@ -1975,22 +2059,212 @@ |
1975 | 2059 | return IRQ_RETVAL(i); |
1976 | 2060 | } |
1977 | 2061 | |
2062 | +static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) | |
2063 | +{ | |
2064 | + struct net_device *dev = (struct net_device *) data; | |
2065 | + struct fe_priv *np = netdev_priv(dev); | |
2066 | + u8 __iomem *base = get_hwbase(dev); | |
2067 | + u32 events; | |
2068 | + int i; | |
2069 | + | |
2070 | + dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); | |
2071 | + | |
2072 | + for (i=0; ; i++) { | |
2073 | + events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; | |
2074 | + writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); | |
2075 | + pci_push(base); | |
2076 | + dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events); | |
2077 | + if (!(events & np->irqmask)) | |
2078 | + break; | |
2079 | + | |
2080 | + spin_lock(&np->lock); | |
2081 | + nv_tx_done(dev); | |
2082 | + spin_unlock(&np->lock); | |
2083 | + | |
2084 | + if (events & (NVREG_IRQ_TX_ERR)) { | |
2085 | + dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | |
2086 | + dev->name, events); | |
2087 | + } | |
2088 | + if (i > max_interrupt_work) { | |
2089 | + spin_lock(&np->lock); | |
2090 | + /* disable interrupts on the nic */ | |
2091 | + writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); | |
2092 | + pci_push(base); | |
2093 | + | |
2094 | + if (!np->in_shutdown) { | |
2095 | + np->nic_poll_irq |= NVREG_IRQ_TX_ALL; | |
2096 | + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
2097 | + } | |
2098 | + printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); | |
2099 | + spin_unlock(&np->lock); | |
2100 | + break; | |
2101 | + } | |
2102 | + | |
2103 | + } | |
2104 | + dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name); | |
2105 | + | |
2106 | + return IRQ_RETVAL(i); | |
2107 | +} | |
2108 | + | |
2109 | +static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |
2110 | +{ | |
2111 | + struct net_device *dev = (struct net_device *) data; | |
2112 | + struct fe_priv *np = netdev_priv(dev); | |
2113 | + u8 __iomem *base = get_hwbase(dev); | |
2114 | + u32 events; | |
2115 | + int i; | |
2116 | + | |
2117 | + dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); | |
2118 | + | |
2119 | + for (i=0; ; i++) { | |
2120 | + events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; | |
2121 | + writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); | |
2122 | + pci_push(base); | |
2123 | + dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); | |
2124 | + if (!(events & np->irqmask)) | |
2125 | + break; | |
2126 | + | |
2127 | + nv_rx_process(dev); | |
2128 | + if (nv_alloc_rx(dev)) { | |
2129 | + spin_lock(&np->lock); | |
2130 | + if (!np->in_shutdown) | |
2131 | + mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | |
2132 | + spin_unlock(&np->lock); | |
2133 | + } | |
2134 | + | |
2135 | + if (i > max_interrupt_work) { | |
2136 | + spin_lock(&np->lock); | |
2137 | + /* disable interrupts on the nic */ | |
2138 | + writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | |
2139 | + pci_push(base); | |
2140 | + | |
2141 | + if (!np->in_shutdown) { | |
2142 | + np->nic_poll_irq |= NVREG_IRQ_RX_ALL; | |
2143 | + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
2144 | + } | |
2145 | + printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); | |
2146 | + spin_unlock(&np->lock); | |
2147 | + break; | |
2148 | + } | |
2149 | + | |
2150 | + } | |
2151 | + dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); | |
2152 | + | |
2153 | + return IRQ_RETVAL(i); | |
2154 | +} | |
2155 | + | |
2156 | +static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |
2157 | +{ | |
2158 | + struct net_device *dev = (struct net_device *) data; | |
2159 | + struct fe_priv *np = netdev_priv(dev); | |
2160 | + u8 __iomem *base = get_hwbase(dev); | |
2161 | + u32 events; | |
2162 | + int i; | |
2163 | + | |
2164 | + dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); | |
2165 | + | |
2166 | + for (i=0; ; i++) { | |
2167 | + events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; | |
2168 | + writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); | |
2169 | + pci_push(base); | |
2170 | + dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | |
2171 | + if (!(events & np->irqmask)) | |
2172 | + break; | |
2173 | + | |
2174 | + if (events & NVREG_IRQ_LINK) { | |
2175 | + spin_lock(&np->lock); | |
2176 | + nv_link_irq(dev); | |
2177 | + spin_unlock(&np->lock); | |
2178 | + } | |
2179 | + if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { | |
2180 | + spin_lock(&np->lock); | |
2181 | + nv_linkchange(dev); | |
2182 | + spin_unlock(&np->lock); | |
2183 | + np->link_timeout = jiffies + LINK_TIMEOUT; | |
2184 | + } | |
2185 | + if (events & (NVREG_IRQ_UNKNOWN)) { | |
2186 | + printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", | |
2187 | + dev->name, events); | |
2188 | + } | |
2189 | + if (i > max_interrupt_work) { | |
2190 | + spin_lock(&np->lock); | |
2191 | + /* disable interrupts on the nic */ | |
2192 | + writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); | |
2193 | + pci_push(base); | |
2194 | + | |
2195 | + if (!np->in_shutdown) { | |
2196 | + np->nic_poll_irq |= NVREG_IRQ_OTHER; | |
2197 | + mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | |
2198 | + } | |
2199 | + printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); | |
2200 | + spin_unlock(&np->lock); | |
2201 | + break; | |
2202 | + } | |
2203 | + | |
2204 | + } | |
2205 | + dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name); | |
2206 | + | |
2207 | + return IRQ_RETVAL(i); | |
2208 | +} | |
2209 | + | |
1978 | 2210 | static void nv_do_nic_poll(unsigned long data) |
1979 | 2211 | { |
1980 | 2212 | struct net_device *dev = (struct net_device *) data; |
1981 | 2213 | struct fe_priv *np = netdev_priv(dev); |
1982 | 2214 | u8 __iomem *base = get_hwbase(dev); |
2215 | + u32 mask = 0; | |
1983 | 2216 | |
1984 | - disable_irq(dev->irq); | |
1985 | - /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | |
1986 | 2217 | /* |
2218 | + * First disable irq(s) and then | |
1987 | 2219 | * reenable interrupts on the nic, we have to do this before calling |
1988 | 2220 | * nv_nic_irq because that may decide to do otherwise |
1989 | 2221 | */ |
1990 | - writel(np->irqmask, base + NvRegIrqMask); | |
2222 | + | |
2223 | + if (!(np->msi_flags & NV_MSI_X_ENABLED) || | |
2224 | + ((np->msi_flags & NV_MSI_X_ENABLED) && | |
2225 | + ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | |
2226 | + disable_irq(dev->irq); | |
2227 | + mask = np->irqmask; | |
2228 | + } else { | |
2229 | + if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | |
2230 | + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
2231 | + mask |= NVREG_IRQ_RX_ALL; | |
2232 | + } | |
2233 | + if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | |
2234 | + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | |
2235 | + mask |= NVREG_IRQ_TX_ALL; | |
2236 | + } | |
2237 | + if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | |
2238 | + disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | |
2239 | + mask |= NVREG_IRQ_OTHER; | |
2240 | + } | |
2241 | + } | |
2242 | + np->nic_poll_irq = 0; | |
2243 | + | |
2244 | + /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | |
2245 | + | |
2246 | + writel(mask, base + NvRegIrqMask); | |
1991 | 2247 | pci_push(base); |
1992 | - nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); | |
1993 | - enable_irq(dev->irq); | |
2248 | + | |
2249 | + if (!(np->msi_flags & NV_MSI_X_ENABLED) || | |
2250 | + ((np->msi_flags & NV_MSI_X_ENABLED) && | |
2251 | + ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | |
2252 | + nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); | |
2253 | + enable_irq(dev->irq); | |
2254 | + } else { | |
2255 | + if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | |
2256 | + nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); | |
2257 | + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | |
2258 | + } | |
2259 | + if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) { | |
2260 | + nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL); | |
2261 | + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | |
2262 | + } | |
2263 | + if (np->nic_poll_irq & NVREG_IRQ_OTHER) { | |
2264 | + nv_nic_irq_other((int) 0, (void *) data, (struct pt_regs *) NULL); | |
2265 | + enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | |
2266 | + } | |
2267 | + } | |
1994 | 2268 | } |
1995 | 2269 | |
1996 | 2270 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1997 | 2271 | |
... | ... | @@ -2297,11 +2571,38 @@ |
2297 | 2571 | /* nothing to do */ |
2298 | 2572 | }; |
2299 | 2573 | |
2574 | +static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) | |
2575 | +{ | |
2576 | + u8 __iomem *base = get_hwbase(dev); | |
2577 | + int i; | |
2578 | + u32 msixmap = 0; | |
2579 | + | |
2580 | + /* Each interrupt bit can be mapped to a MSIX vector (4 bits). | |
2581 | + * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents | |
2582 | + * the remaining 8 interrupts. | |
2583 | + */ | |
2584 | + for (i = 0; i < 8; i++) { | |
2585 | + if ((irqmask >> i) & 0x1) { | |
2586 | + msixmap |= vector << (i << 2); | |
2587 | + } | |
2588 | + } | |
2589 | + writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); | |
2590 | + | |
2591 | + msixmap = 0; | |
2592 | + for (i = 0; i < 8; i++) { | |
2593 | + if ((irqmask >> (i + 8)) & 0x1) { | |
2594 | + msixmap |= vector << (i << 2); | |
2595 | + } | |
2596 | + } | |
2597 | + writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | |
2598 | +} | |
2599 | + | |
2300 | 2600 | static int nv_open(struct net_device *dev) |
2301 | 2601 | { |
2302 | 2602 | struct fe_priv *np = netdev_priv(dev); |
2303 | 2603 | u8 __iomem *base = get_hwbase(dev); |
2304 | - int ret, oom, i; | |
2604 | + int ret = 1; | |
2605 | + int oom, i; | |
2305 | 2606 | |
2306 | 2607 | dprintk(KERN_DEBUG "nv_open: begin\n"); |
2307 | 2608 | |
2308 | 2609 | |
... | ... | @@ -2392,10 +2693,78 @@ |
2392 | 2693 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); |
2393 | 2694 | pci_push(base); |
2394 | 2695 | |
2395 | - ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev); | |
2396 | - if (ret) | |
2397 | - goto out_drain; | |
2696 | + if (np->msi_flags & NV_MSI_X_CAPABLE) { | |
2697 | + for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | |
2698 | + np->msi_x_entry[i].entry = i; | |
2699 | + } | |
2700 | + if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | |
2701 | + np->msi_flags |= NV_MSI_X_ENABLED; | |
2702 | + if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | |
2703 | + /* Request irq for rx handling */ | |
2704 | + if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | |
2705 | + printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | |
2706 | + pci_disable_msix(np->pci_dev); | |
2707 | + np->msi_flags &= ~NV_MSI_X_ENABLED; | |
2708 | + goto out_drain; | |
2709 | + } | |
2710 | + /* Request irq for tx handling */ | |
2711 | + if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { | |
2712 | + printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); | |
2713 | + pci_disable_msix(np->pci_dev); | |
2714 | + np->msi_flags &= ~NV_MSI_X_ENABLED; | |
2715 | + goto out_drain; | |
2716 | + } | |
2717 | + /* Request irq for link and timer handling */ | |
2718 | + if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { | |
2719 | + printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); | |
2720 | + pci_disable_msix(np->pci_dev); | |
2721 | + np->msi_flags &= ~NV_MSI_X_ENABLED; | |
2722 | + goto out_drain; | |
2723 | + } | |
2398 | 2724 | |
2725 | + /* map interrupts to their respective vector */ | |
2726 | + writel(0, base + NvRegMSIXMap0); | |
2727 | + writel(0, base + NvRegMSIXMap1); | |
2728 | + set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | |
2729 | + set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | |
2730 | + set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | |
2731 | + } else { | |
2732 | + /* Request irq for all interrupts */ | |
2733 | + if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | |
2734 | + printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | |
2735 | + pci_disable_msix(np->pci_dev); | |
2736 | + np->msi_flags &= ~NV_MSI_X_ENABLED; | |
2737 | + goto out_drain; | |
2738 | + } | |
2739 | + | |
2740 | + /* map interrupts to vector 0 */ | |
2741 | + writel(0, base + NvRegMSIXMap0); | |
2742 | + writel(0, base + NvRegMSIXMap1); | |
2743 | + } | |
2744 | + } | |
2745 | + } | |
2746 | + if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | |
2747 | + if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | |
2748 | + np->msi_flags |= NV_MSI_ENABLED; | |
2749 | + if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | |
2750 | + printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | |
2751 | + pci_disable_msi(np->pci_dev); | |
2752 | + np->msi_flags &= ~NV_MSI_ENABLED; | |
2753 | + goto out_drain; | |
2754 | + } | |
2755 | + | |
2756 | + /* map interrupts to vector 0 */ | |
2757 | + writel(0, base + NvRegMSIMap0); | |
2758 | + writel(0, base + NvRegMSIMap1); | |
2759 | + /* enable msi vector 0 */ | |
2760 | + writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | |
2761 | + } | |
2762 | + } | |
2763 | + if (ret != 0) { | |
2764 | + if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) | |
2765 | + goto out_drain; | |
2766 | + } | |
2767 | + | |
2399 | 2768 | /* ask for interrupts */ |
2400 | 2769 | writel(np->irqmask, base + NvRegIrqMask); |
2401 | 2770 | |
... | ... | @@ -2441,6 +2810,7 @@ |
2441 | 2810 | { |
2442 | 2811 | struct fe_priv *np = netdev_priv(dev); |
2443 | 2812 | u8 __iomem *base; |
2813 | + int i; | |
2444 | 2814 | |
2445 | 2815 | spin_lock_irq(&np->lock); |
2446 | 2816 | np->in_shutdown = 1; |
2447 | 2817 | |
... | ... | @@ -2458,13 +2828,31 @@ |
2458 | 2828 | |
2459 | 2829 | /* disable interrupts on the nic or we will lock up */ |
2460 | 2830 | base = get_hwbase(dev); |
2461 | - writel(0, base + NvRegIrqMask); | |
2831 | + if (np->msi_flags & NV_MSI_X_ENABLED) { | |
2832 | + writel(np->irqmask, base + NvRegIrqMask); | |
2833 | + } else { | |
2834 | + if (np->msi_flags & NV_MSI_ENABLED) | |
2835 | + writel(0, base + NvRegMSIIrqMask); | |
2836 | + writel(0, base + NvRegIrqMask); | |
2837 | + } | |
2462 | 2838 | pci_push(base); |
2463 | 2839 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); |
2464 | 2840 | |
2465 | 2841 | spin_unlock_irq(&np->lock); |
2466 | 2842 | |
2467 | - free_irq(dev->irq, dev); | |
2843 | + if (np->msi_flags & NV_MSI_X_ENABLED) { | |
2844 | + for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | |
2845 | + free_irq(np->msi_x_entry[i].vector, dev); | |
2846 | + } | |
2847 | + pci_disable_msix(np->pci_dev); | |
2848 | + np->msi_flags &= ~NV_MSI_X_ENABLED; | |
2849 | + } else { | |
2850 | + free_irq(np->pci_dev->irq, dev); | |
2851 | + if (np->msi_flags & NV_MSI_ENABLED) { | |
2852 | + pci_disable_msi(np->pci_dev); | |
2853 | + np->msi_flags &= ~NV_MSI_ENABLED; | |
2854 | + } | |
2855 | + } | |
2468 | 2856 | |
2469 | 2857 | drain_ring(dev); |
2470 | 2858 | |
... | ... | @@ -2588,6 +2976,14 @@ |
2588 | 2976 | dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid; |
2589 | 2977 | } |
2590 | 2978 | |
2979 | + np->msi_flags = 0; | |
2980 | + if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) { | |
2981 | + np->msi_flags |= NV_MSI_CAPABLE; | |
2982 | + } | |
2983 | + if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) { | |
2984 | + np->msi_flags |= NV_MSI_X_CAPABLE; | |
2985 | + } | |
2986 | + | |
2591 | 2987 | err = -ENOMEM; |
2592 | 2988 | np->base = ioremap(addr, NV_PCI_REGSZ); |
2593 | 2989 | if (!np->base) |
2594 | 2990 | |
2595 | 2991 | |
... | ... | @@ -2670,10 +3066,15 @@ |
2670 | 3066 | } else { |
2671 | 3067 | np->tx_flags = NV_TX2_VALID; |
2672 | 3068 | } |
2673 | - if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) | |
3069 | + if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | |
2674 | 3070 | np->irqmask = NVREG_IRQMASK_THROUGHPUT; |
2675 | - else | |
3071 | + if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ | |
3072 | + np->msi_flags |= 0x0003; | |
3073 | + } else { | |
2676 | 3074 | np->irqmask = NVREG_IRQMASK_CPU; |
3075 | + if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */ | |
3076 | + np->msi_flags |= 0x0001; | |
3077 | + } | |
2677 | 3078 | |
2678 | 3079 | if (id->driver_data & DEV_NEED_TIMERIRQ) |
2679 | 3080 | np->irqmask |= NVREG_IRQ_TIMER; |
2680 | 3081 | |
... | ... | @@ -2829,11 +3230,11 @@ |
2829 | 3230 | }, |
2830 | 3231 | { /* MCP55 Ethernet Controller */ |
2831 | 3232 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), |
2832 | - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN, | |
3233 | + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, | |
2833 | 3234 | }, |
2834 | 3235 | { /* MCP55 Ethernet Controller */ |
2835 | 3236 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), |
2836 | - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN, | |
3237 | + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, | |
2837 | 3238 | }, |
2838 | 3239 | {0,}, |
2839 | 3240 | }; |
... | ... | @@ -2863,6 +3264,10 @@ |
2863 | 3264 | MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer."); |
2864 | 3265 | module_param(poll_interval, int, 0); |
2865 | 3266 | MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535."); |
3267 | +module_param(disable_msi, int, 0); | |
3268 | +MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1."); | |
3269 | +module_param(disable_msix, int, 0); | |
3270 | +MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1."); | |
2866 | 3271 | |
2867 | 3272 | MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>"); |
2868 | 3273 | MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver"); |