Commit ef1ea0b424d09452b27f5cb1a0c108b645cb25e0
Committed by
David S. Miller
1 parent
5cea73b0f7
Exists in
master
and in
7 other branches
pasemi_mac: add support for setting MTU
Currently keeping it at 1500 bytes or below since jumbo frames need special checksum offload on TX. Signed-off-by: Olof Johansson <olof@lixom.net> Signed-off-by: Jeff Garzik <jeff@garzik.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 2 changed files with 172 additions and 72 deletions Inline Diff
drivers/net/pasemi_mac.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2006-2007 PA Semi, Inc | 2 | * Copyright (C) 2006-2007 PA Semi, Inc |
3 | * | 3 | * |
4 | * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs | 4 | * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/dmaengine.h> | 24 | #include <linux/dmaengine.h> |
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/netdevice.h> | 26 | #include <linux/netdevice.h> |
27 | #include <linux/etherdevice.h> | 27 | #include <linux/etherdevice.h> |
28 | #include <asm/dma-mapping.h> | 28 | #include <asm/dma-mapping.h> |
29 | #include <linux/in.h> | 29 | #include <linux/in.h> |
30 | #include <linux/skbuff.h> | 30 | #include <linux/skbuff.h> |
31 | 31 | ||
32 | #include <linux/ip.h> | 32 | #include <linux/ip.h> |
33 | #include <linux/tcp.h> | 33 | #include <linux/tcp.h> |
34 | #include <net/checksum.h> | 34 | #include <net/checksum.h> |
35 | #include <linux/inet_lro.h> | 35 | #include <linux/inet_lro.h> |
36 | 36 | ||
37 | #include <asm/irq.h> | 37 | #include <asm/irq.h> |
38 | #include <asm/firmware.h> | 38 | #include <asm/firmware.h> |
39 | #include <asm/pasemi_dma.h> | 39 | #include <asm/pasemi_dma.h> |
40 | 40 | ||
41 | #include "pasemi_mac.h" | 41 | #include "pasemi_mac.h" |
42 | 42 | ||
43 | /* We have our own align, since ppc64 in general has it at 0 because | 43 | /* We have our own align, since ppc64 in general has it at 0 because |
44 | * of design flaws in some of the server bridge chips. However, for | 44 | * of design flaws in some of the server bridge chips. However, for |
45 | * PWRficient doing the unaligned copies is more expensive than doing | 45 | * PWRficient doing the unaligned copies is more expensive than doing |
46 | * unaligned DMA, so make sure the data is aligned instead. | 46 | * unaligned DMA, so make sure the data is aligned instead. |
47 | */ | 47 | */ |
48 | #define LOCAL_SKB_ALIGN 2 | 48 | #define LOCAL_SKB_ALIGN 2 |
49 | 49 | ||
50 | /* TODO list | 50 | /* TODO list |
51 | * | 51 | * |
52 | * - Multicast support | 52 | * - Multicast support |
53 | * - Large MTU support | 53 | * - Large MTU support |
54 | * - SW LRO | 54 | * - SW LRO |
55 | * - Multiqueue RX/TX | 55 | * - Multiqueue RX/TX |
56 | */ | 56 | */ |
57 | 57 | ||
58 | 58 | ||
59 | /* Must be a power of two */ | 59 | /* Must be a power of two */ |
60 | #define RX_RING_SIZE 2048 | 60 | #define RX_RING_SIZE 2048 |
61 | #define TX_RING_SIZE 4096 | 61 | #define TX_RING_SIZE 4096 |
62 | 62 | ||
63 | #define LRO_MAX_AGGR 64 | 63 | #define LRO_MAX_AGGR 64 |
64 | 64 | ||
65 | #define PE_MIN_MTU 64 | ||
66 | #define PE_MAX_MTU 1500 | ||
67 | #define PE_DEF_MTU ETH_DATA_LEN | ||
68 | |||
65 | #define DEFAULT_MSG_ENABLE \ | 69 | #define DEFAULT_MSG_ENABLE \ |
66 | (NETIF_MSG_DRV | \ | 70 | (NETIF_MSG_DRV | \ |
67 | NETIF_MSG_PROBE | \ | 71 | NETIF_MSG_PROBE | \ |
68 | NETIF_MSG_LINK | \ | 72 | NETIF_MSG_LINK | \ |
69 | NETIF_MSG_TIMER | \ | 73 | NETIF_MSG_TIMER | \ |
70 | NETIF_MSG_IFDOWN | \ | 74 | NETIF_MSG_IFDOWN | \ |
71 | NETIF_MSG_IFUP | \ | 75 | NETIF_MSG_IFUP | \ |
72 | NETIF_MSG_RX_ERR | \ | 76 | NETIF_MSG_RX_ERR | \ |
73 | NETIF_MSG_TX_ERR) | 77 | NETIF_MSG_TX_ERR) |
74 | 78 | ||
75 | #define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)]) | 79 | #define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)]) |
76 | #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)]) | 80 | #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)]) |
77 | #define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)]) | 81 | #define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)]) |
78 | #define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)]) | 82 | #define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)]) |
79 | #define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)]) | 83 | #define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)]) |
80 | 84 | ||
81 | #define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \ | 85 | #define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \ |
82 | & ((ring)->size - 1)) | 86 | & ((ring)->size - 1)) |
83 | #define RING_AVAIL(ring) ((ring->size) - RING_USED(ring)) | 87 | #define RING_AVAIL(ring) ((ring->size) - RING_USED(ring)) |
84 | 88 | ||
85 | #define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ | ||
86 | |||
87 | MODULE_LICENSE("GPL"); | 89 | MODULE_LICENSE("GPL"); |
88 | MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>"); | 90 | MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>"); |
89 | MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver"); | 91 | MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver"); |
90 | 92 | ||
91 | static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */ | 93 | static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */ |
92 | module_param(debug, int, 0); | 94 | module_param(debug, int, 0); |
93 | MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value"); | 95 | MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value"); |
94 | 96 | ||
95 | static int translation_enabled(void) | 97 | static int translation_enabled(void) |
96 | { | 98 | { |
97 | #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE) | 99 | #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE) |
98 | return 1; | 100 | return 1; |
99 | #else | 101 | #else |
100 | return firmware_has_feature(FW_FEATURE_LPAR); | 102 | return firmware_has_feature(FW_FEATURE_LPAR); |
101 | #endif | 103 | #endif |
102 | } | 104 | } |
103 | 105 | ||
104 | static void write_iob_reg(unsigned int reg, unsigned int val) | 106 | static void write_iob_reg(unsigned int reg, unsigned int val) |
105 | { | 107 | { |
106 | pasemi_write_iob_reg(reg, val); | 108 | pasemi_write_iob_reg(reg, val); |
107 | } | 109 | } |
108 | 110 | ||
109 | static unsigned int read_mac_reg(const struct pasemi_mac *mac, unsigned int reg) | 111 | static unsigned int read_mac_reg(const struct pasemi_mac *mac, unsigned int reg) |
110 | { | 112 | { |
111 | return pasemi_read_mac_reg(mac->dma_if, reg); | 113 | return pasemi_read_mac_reg(mac->dma_if, reg); |
112 | } | 114 | } |
113 | 115 | ||
114 | static void write_mac_reg(const struct pasemi_mac *mac, unsigned int reg, | 116 | static void write_mac_reg(const struct pasemi_mac *mac, unsigned int reg, |
115 | unsigned int val) | 117 | unsigned int val) |
116 | { | 118 | { |
117 | pasemi_write_mac_reg(mac->dma_if, reg, val); | 119 | pasemi_write_mac_reg(mac->dma_if, reg, val); |
118 | } | 120 | } |
119 | 121 | ||
120 | static unsigned int read_dma_reg(unsigned int reg) | 122 | static unsigned int read_dma_reg(unsigned int reg) |
121 | { | 123 | { |
122 | return pasemi_read_dma_reg(reg); | 124 | return pasemi_read_dma_reg(reg); |
123 | } | 125 | } |
124 | 126 | ||
125 | static void write_dma_reg(unsigned int reg, unsigned int val) | 127 | static void write_dma_reg(unsigned int reg, unsigned int val) |
126 | { | 128 | { |
127 | pasemi_write_dma_reg(reg, val); | 129 | pasemi_write_dma_reg(reg, val); |
128 | } | 130 | } |
129 | 131 | ||
130 | static struct pasemi_mac_rxring *rx_ring(const struct pasemi_mac *mac) | 132 | static struct pasemi_mac_rxring *rx_ring(const struct pasemi_mac *mac) |
131 | { | 133 | { |
132 | return mac->rx; | 134 | return mac->rx; |
133 | } | 135 | } |
134 | 136 | ||
135 | static struct pasemi_mac_txring *tx_ring(const struct pasemi_mac *mac) | 137 | static struct pasemi_mac_txring *tx_ring(const struct pasemi_mac *mac) |
136 | { | 138 | { |
137 | return mac->tx; | 139 | return mac->tx; |
138 | } | 140 | } |
139 | 141 | ||
140 | static inline void prefetch_skb(const struct sk_buff *skb) | 142 | static inline void prefetch_skb(const struct sk_buff *skb) |
141 | { | 143 | { |
142 | const void *d = skb; | 144 | const void *d = skb; |
143 | 145 | ||
144 | prefetch(d); | 146 | prefetch(d); |
145 | prefetch(d+64); | 147 | prefetch(d+64); |
146 | prefetch(d+128); | 148 | prefetch(d+128); |
147 | prefetch(d+192); | 149 | prefetch(d+192); |
148 | } | 150 | } |
149 | 151 | ||
150 | static int mac_to_intf(struct pasemi_mac *mac) | 152 | static int mac_to_intf(struct pasemi_mac *mac) |
151 | { | 153 | { |
152 | struct pci_dev *pdev = mac->pdev; | 154 | struct pci_dev *pdev = mac->pdev; |
153 | u32 tmp; | 155 | u32 tmp; |
154 | int nintf, off, i, j; | 156 | int nintf, off, i, j; |
155 | int devfn = pdev->devfn; | 157 | int devfn = pdev->devfn; |
156 | 158 | ||
157 | tmp = read_dma_reg(PAS_DMA_CAP_IFI); | 159 | tmp = read_dma_reg(PAS_DMA_CAP_IFI); |
158 | nintf = (tmp & PAS_DMA_CAP_IFI_NIN_M) >> PAS_DMA_CAP_IFI_NIN_S; | 160 | nintf = (tmp & PAS_DMA_CAP_IFI_NIN_M) >> PAS_DMA_CAP_IFI_NIN_S; |
159 | off = (tmp & PAS_DMA_CAP_IFI_IOFF_M) >> PAS_DMA_CAP_IFI_IOFF_S; | 161 | off = (tmp & PAS_DMA_CAP_IFI_IOFF_M) >> PAS_DMA_CAP_IFI_IOFF_S; |
160 | 162 | ||
161 | /* IOFF contains the offset to the registers containing the | 163 | /* IOFF contains the offset to the registers containing the |
162 | * DMA interface-to-MAC-pci-id mappings, and NIN contains number | 164 | * DMA interface-to-MAC-pci-id mappings, and NIN contains number |
163 | * of total interfaces. Each register contains 4 devfns. | 165 | * of total interfaces. Each register contains 4 devfns. |
164 | * Just do a linear search until we find the devfn of the MAC | 166 | * Just do a linear search until we find the devfn of the MAC |
165 | * we're trying to look up. | 167 | * we're trying to look up. |
166 | */ | 168 | */ |
167 | 169 | ||
168 | for (i = 0; i < (nintf+3)/4; i++) { | 170 | for (i = 0; i < (nintf+3)/4; i++) { |
169 | tmp = read_dma_reg(off+4*i); | 171 | tmp = read_dma_reg(off+4*i); |
170 | for (j = 0; j < 4; j++) { | 172 | for (j = 0; j < 4; j++) { |
171 | if (((tmp >> (8*j)) & 0xff) == devfn) | 173 | if (((tmp >> (8*j)) & 0xff) == devfn) |
172 | return i*4 + j; | 174 | return i*4 + j; |
173 | } | 175 | } |
174 | } | 176 | } |
175 | return -1; | 177 | return -1; |
176 | } | 178 | } |
177 | 179 | ||
180 | static void pasemi_mac_intf_disable(struct pasemi_mac *mac) | ||
181 | { | ||
182 | unsigned int flags; | ||
183 | |||
184 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); | ||
185 | flags &= ~PAS_MAC_CFG_PCFG_PE; | ||
186 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); | ||
187 | } | ||
188 | |||
189 | static void pasemi_mac_intf_enable(struct pasemi_mac *mac) | ||
190 | { | ||
191 | unsigned int flags; | ||
192 | |||
193 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); | ||
194 | flags |= PAS_MAC_CFG_PCFG_PE; | ||
195 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); | ||
196 | } | ||
197 | |||
178 | static int pasemi_get_mac_addr(struct pasemi_mac *mac) | 198 | static int pasemi_get_mac_addr(struct pasemi_mac *mac) |
179 | { | 199 | { |
180 | struct pci_dev *pdev = mac->pdev; | 200 | struct pci_dev *pdev = mac->pdev; |
181 | struct device_node *dn = pci_device_to_OF_node(pdev); | 201 | struct device_node *dn = pci_device_to_OF_node(pdev); |
182 | int len; | 202 | int len; |
183 | const u8 *maddr; | 203 | const u8 *maddr; |
184 | u8 addr[6]; | 204 | u8 addr[6]; |
185 | 205 | ||
186 | if (!dn) { | 206 | if (!dn) { |
187 | dev_dbg(&pdev->dev, | 207 | dev_dbg(&pdev->dev, |
188 | "No device node for mac, not configuring\n"); | 208 | "No device node for mac, not configuring\n"); |
189 | return -ENOENT; | 209 | return -ENOENT; |
190 | } | 210 | } |
191 | 211 | ||
192 | maddr = of_get_property(dn, "local-mac-address", &len); | 212 | maddr = of_get_property(dn, "local-mac-address", &len); |
193 | 213 | ||
194 | if (maddr && len == 6) { | 214 | if (maddr && len == 6) { |
195 | memcpy(mac->mac_addr, maddr, 6); | 215 | memcpy(mac->mac_addr, maddr, 6); |
196 | return 0; | 216 | return 0; |
197 | } | 217 | } |
198 | 218 | ||
199 | /* Some old versions of firmware mistakenly uses mac-address | 219 | /* Some old versions of firmware mistakenly uses mac-address |
200 | * (and as a string) instead of a byte array in local-mac-address. | 220 | * (and as a string) instead of a byte array in local-mac-address. |
201 | */ | 221 | */ |
202 | 222 | ||
203 | if (maddr == NULL) | 223 | if (maddr == NULL) |
204 | maddr = of_get_property(dn, "mac-address", NULL); | 224 | maddr = of_get_property(dn, "mac-address", NULL); |
205 | 225 | ||
206 | if (maddr == NULL) { | 226 | if (maddr == NULL) { |
207 | dev_warn(&pdev->dev, | 227 | dev_warn(&pdev->dev, |
208 | "no mac address in device tree, not configuring\n"); | 228 | "no mac address in device tree, not configuring\n"); |
209 | return -ENOENT; | 229 | return -ENOENT; |
210 | } | 230 | } |
211 | 231 | ||
212 | if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0], | 232 | if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0], |
213 | &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) { | 233 | &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) { |
214 | dev_warn(&pdev->dev, | 234 | dev_warn(&pdev->dev, |
215 | "can't parse mac address, not configuring\n"); | 235 | "can't parse mac address, not configuring\n"); |
216 | return -EINVAL; | 236 | return -EINVAL; |
217 | } | 237 | } |
218 | 238 | ||
219 | memcpy(mac->mac_addr, addr, 6); | 239 | memcpy(mac->mac_addr, addr, 6); |
220 | 240 | ||
221 | return 0; | 241 | return 0; |
222 | } | 242 | } |
223 | 243 | ||
224 | static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p) | 244 | static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p) |
225 | { | 245 | { |
226 | struct pasemi_mac *mac = netdev_priv(dev); | 246 | struct pasemi_mac *mac = netdev_priv(dev); |
227 | struct sockaddr *addr = p; | 247 | struct sockaddr *addr = p; |
228 | unsigned int adr0, adr1; | 248 | unsigned int adr0, adr1; |
229 | 249 | ||
230 | if (!is_valid_ether_addr(addr->sa_data)) | 250 | if (!is_valid_ether_addr(addr->sa_data)) |
231 | return -EINVAL; | 251 | return -EINVAL; |
232 | 252 | ||
233 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 253 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
234 | 254 | ||
235 | adr0 = dev->dev_addr[2] << 24 | | 255 | adr0 = dev->dev_addr[2] << 24 | |
236 | dev->dev_addr[3] << 16 | | 256 | dev->dev_addr[3] << 16 | |
237 | dev->dev_addr[4] << 8 | | 257 | dev->dev_addr[4] << 8 | |
238 | dev->dev_addr[5]; | 258 | dev->dev_addr[5]; |
239 | adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1); | 259 | adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1); |
240 | adr1 &= ~0xffff; | 260 | adr1 &= ~0xffff; |
241 | adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1]; | 261 | adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1]; |
242 | 262 | ||
243 | pasemi_mac_intf_disable(mac); | 263 | pasemi_mac_intf_disable(mac); |
244 | write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0); | 264 | write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0); |
245 | write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1); | 265 | write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1); |
246 | pasemi_mac_intf_enable(mac); | 266 | pasemi_mac_intf_enable(mac); |
247 | 267 | ||
248 | return 0; | 268 | return 0; |
249 | } | 269 | } |
250 | 270 | ||
251 | static int get_skb_hdr(struct sk_buff *skb, void **iphdr, | 271 | static int get_skb_hdr(struct sk_buff *skb, void **iphdr, |
252 | void **tcph, u64 *hdr_flags, void *data) | 272 | void **tcph, u64 *hdr_flags, void *data) |
253 | { | 273 | { |
254 | u64 macrx = (u64) data; | 274 | u64 macrx = (u64) data; |
255 | unsigned int ip_len; | 275 | unsigned int ip_len; |
256 | struct iphdr *iph; | 276 | struct iphdr *iph; |
257 | 277 | ||
258 | /* IPv4 header checksum failed */ | 278 | /* IPv4 header checksum failed */ |
259 | if ((macrx & XCT_MACRX_HTY_M) != XCT_MACRX_HTY_IPV4_OK) | 279 | if ((macrx & XCT_MACRX_HTY_M) != XCT_MACRX_HTY_IPV4_OK) |
260 | return -1; | 280 | return -1; |
261 | 281 | ||
262 | /* non tcp packet */ | 282 | /* non tcp packet */ |
263 | skb_reset_network_header(skb); | 283 | skb_reset_network_header(skb); |
264 | iph = ip_hdr(skb); | 284 | iph = ip_hdr(skb); |
265 | if (iph->protocol != IPPROTO_TCP) | 285 | if (iph->protocol != IPPROTO_TCP) |
266 | return -1; | 286 | return -1; |
267 | 287 | ||
268 | ip_len = ip_hdrlen(skb); | 288 | ip_len = ip_hdrlen(skb); |
269 | skb_set_transport_header(skb, ip_len); | 289 | skb_set_transport_header(skb, ip_len); |
270 | *tcph = tcp_hdr(skb); | 290 | *tcph = tcp_hdr(skb); |
271 | 291 | ||
272 | /* check if ip header and tcp header are complete */ | 292 | /* check if ip header and tcp header are complete */ |
273 | if (iph->tot_len < ip_len + tcp_hdrlen(skb)) | 293 | if (iph->tot_len < ip_len + tcp_hdrlen(skb)) |
274 | return -1; | 294 | return -1; |
275 | 295 | ||
276 | *hdr_flags = LRO_IPV4 | LRO_TCP; | 296 | *hdr_flags = LRO_IPV4 | LRO_TCP; |
277 | *iphdr = iph; | 297 | *iphdr = iph; |
278 | 298 | ||
279 | return 0; | 299 | return 0; |
280 | } | 300 | } |
281 | 301 | ||
282 | static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, | 302 | static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, |
283 | const int nfrags, | 303 | const int nfrags, |
284 | struct sk_buff *skb, | 304 | struct sk_buff *skb, |
285 | const dma_addr_t *dmas) | 305 | const dma_addr_t *dmas) |
286 | { | 306 | { |
287 | int f; | 307 | int f; |
288 | struct pci_dev *pdev = mac->dma_pdev; | 308 | struct pci_dev *pdev = mac->dma_pdev; |
289 | 309 | ||
290 | pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE); | 310 | pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE); |
291 | 311 | ||
292 | for (f = 0; f < nfrags; f++) { | 312 | for (f = 0; f < nfrags; f++) { |
293 | skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; | 313 | skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; |
294 | 314 | ||
295 | pci_unmap_page(pdev, dmas[f+1], frag->size, PCI_DMA_TODEVICE); | 315 | pci_unmap_page(pdev, dmas[f+1], frag->size, PCI_DMA_TODEVICE); |
296 | } | 316 | } |
297 | dev_kfree_skb_irq(skb); | 317 | dev_kfree_skb_irq(skb); |
298 | 318 | ||
299 | /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs, | 319 | /* Freed descriptor slot + main SKB ptr + nfrags additional ptrs, |
300 | * aligned up to a power of 2 | 320 | * aligned up to a power of 2 |
301 | */ | 321 | */ |
302 | return (nfrags + 3) & ~1; | 322 | return (nfrags + 3) & ~1; |
303 | } | 323 | } |
304 | 324 | ||
305 | static int pasemi_mac_setup_rx_resources(const struct net_device *dev) | 325 | static int pasemi_mac_setup_rx_resources(const struct net_device *dev) |
306 | { | 326 | { |
307 | struct pasemi_mac_rxring *ring; | 327 | struct pasemi_mac_rxring *ring; |
308 | struct pasemi_mac *mac = netdev_priv(dev); | 328 | struct pasemi_mac *mac = netdev_priv(dev); |
309 | int chno; | 329 | int chno; |
310 | unsigned int cfg; | 330 | unsigned int cfg; |
311 | 331 | ||
312 | ring = pasemi_dma_alloc_chan(RXCHAN, sizeof(struct pasemi_mac_rxring), | 332 | ring = pasemi_dma_alloc_chan(RXCHAN, sizeof(struct pasemi_mac_rxring), |
313 | offsetof(struct pasemi_mac_rxring, chan)); | 333 | offsetof(struct pasemi_mac_rxring, chan)); |
314 | 334 | ||
315 | if (!ring) { | 335 | if (!ring) { |
316 | dev_err(&mac->pdev->dev, "Can't allocate RX channel\n"); | 336 | dev_err(&mac->pdev->dev, "Can't allocate RX channel\n"); |
317 | goto out_chan; | 337 | goto out_chan; |
318 | } | 338 | } |
319 | chno = ring->chan.chno; | 339 | chno = ring->chan.chno; |
320 | 340 | ||
321 | spin_lock_init(&ring->lock); | 341 | spin_lock_init(&ring->lock); |
322 | 342 | ||
323 | ring->size = RX_RING_SIZE; | 343 | ring->size = RX_RING_SIZE; |
324 | ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * | 344 | ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * |
325 | RX_RING_SIZE, GFP_KERNEL); | 345 | RX_RING_SIZE, GFP_KERNEL); |
326 | 346 | ||
327 | if (!ring->ring_info) | 347 | if (!ring->ring_info) |
328 | goto out_ring_info; | 348 | goto out_ring_info; |
329 | 349 | ||
330 | /* Allocate descriptors */ | 350 | /* Allocate descriptors */ |
331 | if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) | 351 | if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE)) |
332 | goto out_ring_desc; | 352 | goto out_ring_desc; |
333 | 353 | ||
334 | ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, | 354 | ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, |
335 | RX_RING_SIZE * sizeof(u64), | 355 | RX_RING_SIZE * sizeof(u64), |
336 | &ring->buf_dma, GFP_KERNEL); | 356 | &ring->buf_dma, GFP_KERNEL); |
337 | if (!ring->buffers) | 357 | if (!ring->buffers) |
338 | goto out_ring_desc; | 358 | goto out_ring_desc; |
339 | 359 | ||
340 | memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64)); | 360 | memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64)); |
341 | 361 | ||
342 | write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno), | 362 | write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno), |
343 | PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma)); | 363 | PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma)); |
344 | 364 | ||
345 | write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno), | 365 | write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno), |
346 | PAS_DMA_RXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32) | | 366 | PAS_DMA_RXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32) | |
347 | PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3)); | 367 | PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3)); |
348 | 368 | ||
349 | cfg = PAS_DMA_RXCHAN_CFG_HBU(2); | 369 | cfg = PAS_DMA_RXCHAN_CFG_HBU(2); |
350 | 370 | ||
351 | if (translation_enabled()) | 371 | if (translation_enabled()) |
352 | cfg |= PAS_DMA_RXCHAN_CFG_CTR; | 372 | cfg |= PAS_DMA_RXCHAN_CFG_CTR; |
353 | 373 | ||
354 | write_dma_reg(PAS_DMA_RXCHAN_CFG(chno), cfg); | 374 | write_dma_reg(PAS_DMA_RXCHAN_CFG(chno), cfg); |
355 | 375 | ||
356 | write_dma_reg(PAS_DMA_RXINT_BASEL(mac->dma_if), | 376 | write_dma_reg(PAS_DMA_RXINT_BASEL(mac->dma_if), |
357 | PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma)); | 377 | PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma)); |
358 | 378 | ||
359 | write_dma_reg(PAS_DMA_RXINT_BASEU(mac->dma_if), | 379 | write_dma_reg(PAS_DMA_RXINT_BASEU(mac->dma_if), |
360 | PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) | | 380 | PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) | |
361 | PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); | 381 | PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3)); |
362 | 382 | ||
363 | cfg = PAS_DMA_RXINT_CFG_DHL(2) | PAS_DMA_RXINT_CFG_L2 | | 383 | cfg = PAS_DMA_RXINT_CFG_DHL(2) | PAS_DMA_RXINT_CFG_L2 | |
364 | PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP | | 384 | PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP | |
365 | PAS_DMA_RXINT_CFG_HEN; | 385 | PAS_DMA_RXINT_CFG_HEN; |
366 | 386 | ||
367 | if (translation_enabled()) | 387 | if (translation_enabled()) |
368 | cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR; | 388 | cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR; |
369 | 389 | ||
370 | write_dma_reg(PAS_DMA_RXINT_CFG(mac->dma_if), cfg); | 390 | write_dma_reg(PAS_DMA_RXINT_CFG(mac->dma_if), cfg); |
371 | 391 | ||
372 | ring->next_to_fill = 0; | 392 | ring->next_to_fill = 0; |
373 | ring->next_to_clean = 0; | 393 | ring->next_to_clean = 0; |
374 | ring->mac = mac; | 394 | ring->mac = mac; |
375 | mac->rx = ring; | 395 | mac->rx = ring; |
376 | 396 | ||
377 | return 0; | 397 | return 0; |
378 | 398 | ||
379 | out_ring_desc: | 399 | out_ring_desc: |
380 | kfree(ring->ring_info); | 400 | kfree(ring->ring_info); |
381 | out_ring_info: | 401 | out_ring_info: |
382 | pasemi_dma_free_chan(&ring->chan); | 402 | pasemi_dma_free_chan(&ring->chan); |
383 | out_chan: | 403 | out_chan: |
384 | return -ENOMEM; | 404 | return -ENOMEM; |
385 | } | 405 | } |
386 | 406 | ||
387 | static struct pasemi_mac_txring * | 407 | static struct pasemi_mac_txring * |
388 | pasemi_mac_setup_tx_resources(const struct net_device *dev) | 408 | pasemi_mac_setup_tx_resources(const struct net_device *dev) |
389 | { | 409 | { |
390 | struct pasemi_mac *mac = netdev_priv(dev); | 410 | struct pasemi_mac *mac = netdev_priv(dev); |
391 | u32 val; | 411 | u32 val; |
392 | struct pasemi_mac_txring *ring; | 412 | struct pasemi_mac_txring *ring; |
393 | unsigned int cfg; | 413 | unsigned int cfg; |
394 | int chno; | 414 | int chno; |
395 | 415 | ||
396 | ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_txring), | 416 | ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_txring), |
397 | offsetof(struct pasemi_mac_txring, chan)); | 417 | offsetof(struct pasemi_mac_txring, chan)); |
398 | 418 | ||
399 | if (!ring) { | 419 | if (!ring) { |
400 | dev_err(&mac->pdev->dev, "Can't allocate TX channel\n"); | 420 | dev_err(&mac->pdev->dev, "Can't allocate TX channel\n"); |
401 | goto out_chan; | 421 | goto out_chan; |
402 | } | 422 | } |
403 | 423 | ||
404 | chno = ring->chan.chno; | 424 | chno = ring->chan.chno; |
405 | 425 | ||
406 | spin_lock_init(&ring->lock); | 426 | spin_lock_init(&ring->lock); |
407 | 427 | ||
408 | ring->size = TX_RING_SIZE; | 428 | ring->size = TX_RING_SIZE; |
409 | ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * | 429 | ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) * |
410 | TX_RING_SIZE, GFP_KERNEL); | 430 | TX_RING_SIZE, GFP_KERNEL); |
411 | if (!ring->ring_info) | 431 | if (!ring->ring_info) |
412 | goto out_ring_info; | 432 | goto out_ring_info; |
413 | 433 | ||
414 | /* Allocate descriptors */ | 434 | /* Allocate descriptors */ |
415 | if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE)) | 435 | if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE)) |
416 | goto out_ring_desc; | 436 | goto out_ring_desc; |
417 | 437 | ||
418 | write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno), | 438 | write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno), |
419 | PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma)); | 439 | PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma)); |
420 | val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32); | 440 | val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32); |
421 | val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3); | 441 | val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3); |
422 | 442 | ||
423 | write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val); | 443 | write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val); |
424 | 444 | ||
425 | cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE | | 445 | cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE | |
426 | PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | | 446 | PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | |
427 | PAS_DMA_TXCHAN_CFG_UP | | 447 | PAS_DMA_TXCHAN_CFG_UP | |
428 | PAS_DMA_TXCHAN_CFG_WT(2); | 448 | PAS_DMA_TXCHAN_CFG_WT(2); |
429 | 449 | ||
430 | if (translation_enabled()) | 450 | if (translation_enabled()) |
431 | cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; | 451 | cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; |
432 | 452 | ||
433 | write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg); | 453 | write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg); |
434 | 454 | ||
435 | ring->next_to_fill = 0; | 455 | ring->next_to_fill = 0; |
436 | ring->next_to_clean = 0; | 456 | ring->next_to_clean = 0; |
437 | ring->mac = mac; | 457 | ring->mac = mac; |
438 | 458 | ||
439 | return ring; | 459 | return ring; |
440 | 460 | ||
441 | out_ring_desc: | 461 | out_ring_desc: |
442 | kfree(ring->ring_info); | 462 | kfree(ring->ring_info); |
443 | out_ring_info: | 463 | out_ring_info: |
444 | pasemi_dma_free_chan(&ring->chan); | 464 | pasemi_dma_free_chan(&ring->chan); |
445 | out_chan: | 465 | out_chan: |
446 | return NULL; | 466 | return NULL; |
447 | } | 467 | } |
448 | 468 | ||
449 | static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac) | 469 | static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac) |
450 | { | 470 | { |
451 | struct pasemi_mac_txring *txring = tx_ring(mac); | 471 | struct pasemi_mac_txring *txring = tx_ring(mac); |
452 | unsigned int i, j; | 472 | unsigned int i, j; |
453 | struct pasemi_mac_buffer *info; | 473 | struct pasemi_mac_buffer *info; |
454 | dma_addr_t dmas[MAX_SKB_FRAGS+1]; | 474 | dma_addr_t dmas[MAX_SKB_FRAGS+1]; |
455 | int freed, nfrags; | 475 | int freed, nfrags; |
456 | int start, limit; | 476 | int start, limit; |
457 | 477 | ||
458 | start = txring->next_to_clean; | 478 | start = txring->next_to_clean; |
459 | limit = txring->next_to_fill; | 479 | limit = txring->next_to_fill; |
460 | 480 | ||
461 | /* Compensate for when fill has wrapped and clean has not */ | 481 | /* Compensate for when fill has wrapped and clean has not */ |
462 | if (start > limit) | 482 | if (start > limit) |
463 | limit += TX_RING_SIZE; | 483 | limit += TX_RING_SIZE; |
464 | 484 | ||
465 | for (i = start; i < limit; i += freed) { | 485 | for (i = start; i < limit; i += freed) { |
466 | info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)]; | 486 | info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)]; |
467 | if (info->dma && info->skb) { | 487 | if (info->dma && info->skb) { |
468 | nfrags = skb_shinfo(info->skb)->nr_frags; | 488 | nfrags = skb_shinfo(info->skb)->nr_frags; |
469 | for (j = 0; j <= nfrags; j++) | 489 | for (j = 0; j <= nfrags; j++) |
470 | dmas[j] = txring->ring_info[(i+1+j) & | 490 | dmas[j] = txring->ring_info[(i+1+j) & |
471 | (TX_RING_SIZE-1)].dma; | 491 | (TX_RING_SIZE-1)].dma; |
472 | freed = pasemi_mac_unmap_tx_skb(mac, nfrags, | 492 | freed = pasemi_mac_unmap_tx_skb(mac, nfrags, |
473 | info->skb, dmas); | 493 | info->skb, dmas); |
474 | } else | 494 | } else |
475 | freed = 2; | 495 | freed = 2; |
476 | } | 496 | } |
477 | 497 | ||
478 | kfree(txring->ring_info); | 498 | kfree(txring->ring_info); |
479 | pasemi_dma_free_chan(&txring->chan); | 499 | pasemi_dma_free_chan(&txring->chan); |
480 | 500 | ||
481 | } | 501 | } |
482 | 502 | ||
483 | static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) | 503 | static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac) |
484 | { | 504 | { |
485 | struct pasemi_mac_rxring *rx = rx_ring(mac); | 505 | struct pasemi_mac_rxring *rx = rx_ring(mac); |
486 | unsigned int i; | 506 | unsigned int i; |
487 | struct pasemi_mac_buffer *info; | 507 | struct pasemi_mac_buffer *info; |
488 | 508 | ||
489 | for (i = 0; i < RX_RING_SIZE; i++) { | 509 | for (i = 0; i < RX_RING_SIZE; i++) { |
490 | info = &RX_DESC_INFO(rx, i); | 510 | info = &RX_DESC_INFO(rx, i); |
491 | if (info->skb && info->dma) { | 511 | if (info->skb && info->dma) { |
492 | pci_unmap_single(mac->dma_pdev, | 512 | pci_unmap_single(mac->dma_pdev, |
493 | info->dma, | 513 | info->dma, |
494 | info->skb->len, | 514 | info->skb->len, |
495 | PCI_DMA_FROMDEVICE); | 515 | PCI_DMA_FROMDEVICE); |
496 | dev_kfree_skb_any(info->skb); | 516 | dev_kfree_skb_any(info->skb); |
497 | } | 517 | } |
498 | info->dma = 0; | 518 | info->dma = 0; |
499 | info->skb = NULL; | 519 | info->skb = NULL; |
500 | } | 520 | } |
501 | 521 | ||
502 | for (i = 0; i < RX_RING_SIZE; i++) | 522 | for (i = 0; i < RX_RING_SIZE; i++) |
503 | RX_DESC(rx, i) = 0; | 523 | RX_BUFF(rx, i) = 0; |
524 | } | ||
504 | 525 | ||
526 | static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac) | ||
527 | { | ||
528 | pasemi_mac_free_rx_buffers(mac); | ||
529 | |||
505 | dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), | 530 | dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64), |
506 | rx_ring(mac)->buffers, rx_ring(mac)->buf_dma); | 531 | rx_ring(mac)->buffers, rx_ring(mac)->buf_dma); |
507 | 532 | ||
508 | kfree(rx_ring(mac)->ring_info); | 533 | kfree(rx_ring(mac)->ring_info); |
509 | pasemi_dma_free_chan(&rx_ring(mac)->chan); | 534 | pasemi_dma_free_chan(&rx_ring(mac)->chan); |
510 | mac->rx = NULL; | 535 | mac->rx = NULL; |
511 | } | 536 | } |
512 | 537 | ||
513 | static void pasemi_mac_replenish_rx_ring(const struct net_device *dev, | 538 | static void pasemi_mac_replenish_rx_ring(const struct net_device *dev, |
514 | const int limit) | 539 | const int limit) |
515 | { | 540 | { |
516 | const struct pasemi_mac *mac = netdev_priv(dev); | 541 | const struct pasemi_mac *mac = netdev_priv(dev); |
517 | struct pasemi_mac_rxring *rx = rx_ring(mac); | 542 | struct pasemi_mac_rxring *rx = rx_ring(mac); |
518 | int fill, count; | 543 | int fill, count; |
519 | 544 | ||
520 | if (limit <= 0) | 545 | if (limit <= 0) |
521 | return; | 546 | return; |
522 | 547 | ||
523 | fill = rx_ring(mac)->next_to_fill; | 548 | fill = rx_ring(mac)->next_to_fill; |
524 | for (count = 0; count < limit; count++) { | 549 | for (count = 0; count < limit; count++) { |
525 | struct pasemi_mac_buffer *info = &RX_DESC_INFO(rx, fill); | 550 | struct pasemi_mac_buffer *info = &RX_DESC_INFO(rx, fill); |
526 | u64 *buff = &RX_BUFF(rx, fill); | 551 | u64 *buff = &RX_BUFF(rx, fill); |
527 | struct sk_buff *skb; | 552 | struct sk_buff *skb; |
528 | dma_addr_t dma; | 553 | dma_addr_t dma; |
529 | 554 | ||
530 | /* Entry in use? */ | 555 | /* Entry in use? */ |
531 | WARN_ON(*buff); | 556 | WARN_ON(*buff); |
532 | 557 | ||
533 | skb = dev_alloc_skb(BUF_SIZE); | 558 | skb = dev_alloc_skb(mac->bufsz); |
534 | skb_reserve(skb, LOCAL_SKB_ALIGN); | 559 | skb_reserve(skb, LOCAL_SKB_ALIGN); |
535 | 560 | ||
536 | if (unlikely(!skb)) | 561 | if (unlikely(!skb)) |
537 | break; | 562 | break; |
538 | 563 | ||
539 | dma = pci_map_single(mac->dma_pdev, skb->data, | 564 | dma = pci_map_single(mac->dma_pdev, skb->data, |
540 | BUF_SIZE - LOCAL_SKB_ALIGN, | 565 | mac->bufsz - LOCAL_SKB_ALIGN, |
541 | PCI_DMA_FROMDEVICE); | 566 | PCI_DMA_FROMDEVICE); |
542 | 567 | ||
543 | if (unlikely(dma_mapping_error(dma))) { | 568 | if (unlikely(dma_mapping_error(dma))) { |
544 | dev_kfree_skb_irq(info->skb); | 569 | dev_kfree_skb_irq(info->skb); |
545 | break; | 570 | break; |
546 | } | 571 | } |
547 | 572 | ||
548 | info->skb = skb; | 573 | info->skb = skb; |
549 | info->dma = dma; | 574 | info->dma = dma; |
550 | *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma); | 575 | *buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma); |
551 | fill++; | 576 | fill++; |
552 | } | 577 | } |
553 | 578 | ||
554 | wmb(); | 579 | wmb(); |
555 | 580 | ||
556 | write_dma_reg(PAS_DMA_RXINT_INCR(mac->dma_if), count); | 581 | write_dma_reg(PAS_DMA_RXINT_INCR(mac->dma_if), count); |
557 | 582 | ||
558 | rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) & | 583 | rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) & |
559 | (RX_RING_SIZE - 1); | 584 | (RX_RING_SIZE - 1); |
560 | } | 585 | } |
561 | 586 | ||
562 | static void pasemi_mac_restart_rx_intr(const struct pasemi_mac *mac) | 587 | static void pasemi_mac_restart_rx_intr(const struct pasemi_mac *mac) |
563 | { | 588 | { |
564 | struct pasemi_mac_rxring *rx = rx_ring(mac); | 589 | struct pasemi_mac_rxring *rx = rx_ring(mac); |
565 | unsigned int reg, pcnt; | 590 | unsigned int reg, pcnt; |
566 | /* Re-enable packet count interrupts: finally | 591 | /* Re-enable packet count interrupts: finally |
567 | * ack the packet count interrupt we got in rx_intr. | 592 | * ack the packet count interrupt we got in rx_intr. |
568 | */ | 593 | */ |
569 | 594 | ||
570 | pcnt = *rx->chan.status & PAS_STATUS_PCNT_M; | 595 | pcnt = *rx->chan.status & PAS_STATUS_PCNT_M; |
571 | 596 | ||
572 | reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC; | 597 | reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC; |
573 | 598 | ||
574 | if (*rx->chan.status & PAS_STATUS_TIMER) | 599 | if (*rx->chan.status & PAS_STATUS_TIMER) |
575 | reg |= PAS_IOB_DMA_RXCH_RESET_TINTC; | 600 | reg |= PAS_IOB_DMA_RXCH_RESET_TINTC; |
576 | 601 | ||
577 | write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac->rx->chan.chno), reg); | 602 | write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac->rx->chan.chno), reg); |
578 | } | 603 | } |
579 | 604 | ||
580 | static void pasemi_mac_restart_tx_intr(const struct pasemi_mac *mac) | 605 | static void pasemi_mac_restart_tx_intr(const struct pasemi_mac *mac) |
581 | { | 606 | { |
582 | unsigned int reg, pcnt; | 607 | unsigned int reg, pcnt; |
583 | 608 | ||
584 | /* Re-enable packet count interrupts */ | 609 | /* Re-enable packet count interrupts */ |
585 | pcnt = *tx_ring(mac)->chan.status & PAS_STATUS_PCNT_M; | 610 | pcnt = *tx_ring(mac)->chan.status & PAS_STATUS_PCNT_M; |
586 | 611 | ||
587 | reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC; | 612 | reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC; |
588 | 613 | ||
589 | write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan.chno), reg); | 614 | write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan.chno), reg); |
590 | } | 615 | } |
591 | 616 | ||
592 | 617 | ||
593 | static inline void pasemi_mac_rx_error(const struct pasemi_mac *mac, | 618 | static inline void pasemi_mac_rx_error(const struct pasemi_mac *mac, |
594 | const u64 macrx) | 619 | const u64 macrx) |
595 | { | 620 | { |
596 | unsigned int rcmdsta, ccmdsta; | 621 | unsigned int rcmdsta, ccmdsta; |
597 | struct pasemi_dmachan *chan = &rx_ring(mac)->chan; | 622 | struct pasemi_dmachan *chan = &rx_ring(mac)->chan; |
598 | 623 | ||
599 | if (!netif_msg_rx_err(mac)) | 624 | if (!netif_msg_rx_err(mac)) |
600 | return; | 625 | return; |
601 | 626 | ||
602 | rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); | 627 | rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); |
603 | ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno)); | 628 | ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno)); |
604 | 629 | ||
605 | printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n", | 630 | printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n", |
606 | macrx, *chan->status); | 631 | macrx, *chan->status); |
607 | 632 | ||
608 | printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n", | 633 | printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n", |
609 | rcmdsta, ccmdsta); | 634 | rcmdsta, ccmdsta); |
610 | } | 635 | } |
611 | 636 | ||
612 | static inline void pasemi_mac_tx_error(const struct pasemi_mac *mac, | 637 | static inline void pasemi_mac_tx_error(const struct pasemi_mac *mac, |
613 | const u64 mactx) | 638 | const u64 mactx) |
614 | { | 639 | { |
615 | unsigned int cmdsta; | 640 | unsigned int cmdsta; |
616 | struct pasemi_dmachan *chan = &tx_ring(mac)->chan; | 641 | struct pasemi_dmachan *chan = &tx_ring(mac)->chan; |
617 | 642 | ||
618 | if (!netif_msg_tx_err(mac)) | 643 | if (!netif_msg_tx_err(mac)) |
619 | return; | 644 | return; |
620 | 645 | ||
621 | cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno)); | 646 | cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno)); |
622 | 647 | ||
623 | printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\ | 648 | printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\ |
624 | "tx status 0x%016lx\n", mactx, *chan->status); | 649 | "tx status 0x%016lx\n", mactx, *chan->status); |
625 | 650 | ||
626 | printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta); | 651 | printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta); |
627 | } | 652 | } |
628 | 653 | ||
629 | static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, | 654 | static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, |
630 | const int limit) | 655 | const int limit) |
631 | { | 656 | { |
632 | const struct pasemi_dmachan *chan = &rx->chan; | 657 | const struct pasemi_dmachan *chan = &rx->chan; |
633 | struct pasemi_mac *mac = rx->mac; | 658 | struct pasemi_mac *mac = rx->mac; |
634 | struct pci_dev *pdev = mac->dma_pdev; | 659 | struct pci_dev *pdev = mac->dma_pdev; |
635 | unsigned int n; | 660 | unsigned int n; |
636 | int count, buf_index, tot_bytes, packets; | 661 | int count, buf_index, tot_bytes, packets; |
637 | struct pasemi_mac_buffer *info; | 662 | struct pasemi_mac_buffer *info; |
638 | struct sk_buff *skb; | 663 | struct sk_buff *skb; |
639 | unsigned int len; | 664 | unsigned int len; |
640 | u64 macrx, eval; | 665 | u64 macrx, eval; |
641 | dma_addr_t dma; | 666 | dma_addr_t dma; |
642 | 667 | ||
643 | tot_bytes = 0; | 668 | tot_bytes = 0; |
644 | packets = 0; | 669 | packets = 0; |
645 | 670 | ||
646 | spin_lock(&rx->lock); | 671 | spin_lock(&rx->lock); |
647 | 672 | ||
648 | n = rx->next_to_clean; | 673 | n = rx->next_to_clean; |
649 | 674 | ||
650 | prefetch(&RX_DESC(rx, n)); | 675 | prefetch(&RX_DESC(rx, n)); |
651 | 676 | ||
652 | for (count = 0; count < limit; count++) { | 677 | for (count = 0; count < limit; count++) { |
653 | macrx = RX_DESC(rx, n); | 678 | macrx = RX_DESC(rx, n); |
654 | prefetch(&RX_DESC(rx, n+4)); | 679 | prefetch(&RX_DESC(rx, n+4)); |
655 | 680 | ||
656 | if ((macrx & XCT_MACRX_E) || | 681 | if ((macrx & XCT_MACRX_E) || |
657 | (*chan->status & PAS_STATUS_ERROR)) | 682 | (*chan->status & PAS_STATUS_ERROR)) |
658 | pasemi_mac_rx_error(mac, macrx); | 683 | pasemi_mac_rx_error(mac, macrx); |
659 | 684 | ||
660 | if (!(macrx & XCT_MACRX_O)) | 685 | if (!(macrx & XCT_MACRX_O)) |
661 | break; | 686 | break; |
662 | 687 | ||
663 | info = NULL; | 688 | info = NULL; |
664 | 689 | ||
665 | BUG_ON(!(macrx & XCT_MACRX_RR_8BRES)); | 690 | BUG_ON(!(macrx & XCT_MACRX_RR_8BRES)); |
666 | 691 | ||
667 | eval = (RX_DESC(rx, n+1) & XCT_RXRES_8B_EVAL_M) >> | 692 | eval = (RX_DESC(rx, n+1) & XCT_RXRES_8B_EVAL_M) >> |
668 | XCT_RXRES_8B_EVAL_S; | 693 | XCT_RXRES_8B_EVAL_S; |
669 | buf_index = eval-1; | 694 | buf_index = eval-1; |
670 | 695 | ||
671 | dma = (RX_DESC(rx, n+2) & XCT_PTR_ADDR_M); | 696 | dma = (RX_DESC(rx, n+2) & XCT_PTR_ADDR_M); |
672 | info = &RX_DESC_INFO(rx, buf_index); | 697 | info = &RX_DESC_INFO(rx, buf_index); |
673 | 698 | ||
674 | skb = info->skb; | 699 | skb = info->skb; |
675 | 700 | ||
676 | prefetch_skb(skb); | 701 | prefetch_skb(skb); |
677 | 702 | ||
678 | len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S; | 703 | len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S; |
679 | 704 | ||
680 | pci_unmap_single(pdev, dma, BUF_SIZE-LOCAL_SKB_ALIGN, | 705 | pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN, |
681 | PCI_DMA_FROMDEVICE); | 706 | PCI_DMA_FROMDEVICE); |
682 | 707 | ||
683 | if (macrx & XCT_MACRX_CRC) { | 708 | if (macrx & XCT_MACRX_CRC) { |
684 | /* CRC error flagged */ | 709 | /* CRC error flagged */ |
685 | mac->netdev->stats.rx_errors++; | 710 | mac->netdev->stats.rx_errors++; |
686 | mac->netdev->stats.rx_crc_errors++; | 711 | mac->netdev->stats.rx_crc_errors++; |
687 | /* No need to free skb, it'll be reused */ | 712 | /* No need to free skb, it'll be reused */ |
688 | goto next; | 713 | goto next; |
689 | } | 714 | } |
690 | 715 | ||
691 | info->skb = NULL; | 716 | info->skb = NULL; |
692 | info->dma = 0; | 717 | info->dma = 0; |
693 | 718 | ||
694 | if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) { | 719 | if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) { |
695 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 720 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
696 | skb->csum = (macrx & XCT_MACRX_CSUM_M) >> | 721 | skb->csum = (macrx & XCT_MACRX_CSUM_M) >> |
697 | XCT_MACRX_CSUM_S; | 722 | XCT_MACRX_CSUM_S; |
698 | } else | 723 | } else |
699 | skb->ip_summed = CHECKSUM_NONE; | 724 | skb->ip_summed = CHECKSUM_NONE; |
700 | 725 | ||
701 | packets++; | 726 | packets++; |
702 | tot_bytes += len; | 727 | tot_bytes += len; |
703 | 728 | ||
704 | /* Don't include CRC */ | 729 | /* Don't include CRC */ |
705 | skb_put(skb, len-4); | 730 | skb_put(skb, len-4); |
706 | 731 | ||
707 | skb->protocol = eth_type_trans(skb, mac->netdev); | 732 | skb->protocol = eth_type_trans(skb, mac->netdev); |
708 | lro_receive_skb(&mac->lro_mgr, skb, (void *)macrx); | 733 | lro_receive_skb(&mac->lro_mgr, skb, (void *)macrx); |
709 | 734 | ||
710 | next: | 735 | next: |
711 | RX_DESC(rx, n) = 0; | 736 | RX_DESC(rx, n) = 0; |
712 | RX_DESC(rx, n+1) = 0; | 737 | RX_DESC(rx, n+1) = 0; |
713 | 738 | ||
714 | /* Need to zero it out since hardware doesn't, since the | 739 | /* Need to zero it out since hardware doesn't, since the |
715 | * replenish loop uses it to tell when it's done. | 740 | * replenish loop uses it to tell when it's done. |
716 | */ | 741 | */ |
717 | RX_BUFF(rx, buf_index) = 0; | 742 | RX_BUFF(rx, buf_index) = 0; |
718 | 743 | ||
719 | n += 4; | 744 | n += 4; |
720 | } | 745 | } |
721 | 746 | ||
722 | if (n > RX_RING_SIZE) { | 747 | if (n > RX_RING_SIZE) { |
723 | /* Errata 5971 workaround: L2 target of headers */ | 748 | /* Errata 5971 workaround: L2 target of headers */ |
724 | write_iob_reg(PAS_IOB_COM_PKTHDRCNT, 0); | 749 | write_iob_reg(PAS_IOB_COM_PKTHDRCNT, 0); |
725 | n &= (RX_RING_SIZE-1); | 750 | n &= (RX_RING_SIZE-1); |
726 | } | 751 | } |
727 | 752 | ||
728 | rx_ring(mac)->next_to_clean = n; | 753 | rx_ring(mac)->next_to_clean = n; |
729 | 754 | ||
730 | lro_flush_all(&mac->lro_mgr); | 755 | lro_flush_all(&mac->lro_mgr); |
731 | 756 | ||
732 | /* Increase is in number of 16-byte entries, and since each descriptor | 757 | /* Increase is in number of 16-byte entries, and since each descriptor |
733 | * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with | 758 | * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with |
734 | * count*2. | 759 | * count*2. |
735 | */ | 760 | */ |
736 | write_dma_reg(PAS_DMA_RXCHAN_INCR(mac->rx->chan.chno), count << 1); | 761 | write_dma_reg(PAS_DMA_RXCHAN_INCR(mac->rx->chan.chno), count << 1); |
737 | 762 | ||
738 | pasemi_mac_replenish_rx_ring(mac->netdev, count); | 763 | pasemi_mac_replenish_rx_ring(mac->netdev, count); |
739 | 764 | ||
740 | mac->netdev->stats.rx_bytes += tot_bytes; | 765 | mac->netdev->stats.rx_bytes += tot_bytes; |
741 | mac->netdev->stats.rx_packets += packets; | 766 | mac->netdev->stats.rx_packets += packets; |
742 | 767 | ||
743 | spin_unlock(&rx_ring(mac)->lock); | 768 | spin_unlock(&rx_ring(mac)->lock); |
744 | 769 | ||
745 | return count; | 770 | return count; |
746 | } | 771 | } |
747 | 772 | ||
748 | /* Can't make this too large or we blow the kernel stack limits */ | 773 | /* Can't make this too large or we blow the kernel stack limits */ |
749 | #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS) | 774 | #define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS) |
750 | 775 | ||
751 | static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring) | 776 | static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring) |
752 | { | 777 | { |
753 | struct pasemi_dmachan *chan = &txring->chan; | 778 | struct pasemi_dmachan *chan = &txring->chan; |
754 | struct pasemi_mac *mac = txring->mac; | 779 | struct pasemi_mac *mac = txring->mac; |
755 | int i, j; | 780 | int i, j; |
756 | unsigned int start, descr_count, buf_count, batch_limit; | 781 | unsigned int start, descr_count, buf_count, batch_limit; |
757 | unsigned int ring_limit; | 782 | unsigned int ring_limit; |
758 | unsigned int total_count; | 783 | unsigned int total_count; |
759 | unsigned long flags; | 784 | unsigned long flags; |
760 | struct sk_buff *skbs[TX_CLEAN_BATCHSIZE]; | 785 | struct sk_buff *skbs[TX_CLEAN_BATCHSIZE]; |
761 | dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1]; | 786 | dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1]; |
762 | int nf[TX_CLEAN_BATCHSIZE]; | 787 | int nf[TX_CLEAN_BATCHSIZE]; |
763 | int nr_frags; | 788 | int nr_frags; |
764 | 789 | ||
765 | total_count = 0; | 790 | total_count = 0; |
766 | batch_limit = TX_CLEAN_BATCHSIZE; | 791 | batch_limit = TX_CLEAN_BATCHSIZE; |
767 | restart: | 792 | restart: |
768 | spin_lock_irqsave(&txring->lock, flags); | 793 | spin_lock_irqsave(&txring->lock, flags); |
769 | 794 | ||
770 | start = txring->next_to_clean; | 795 | start = txring->next_to_clean; |
771 | ring_limit = txring->next_to_fill; | 796 | ring_limit = txring->next_to_fill; |
772 | 797 | ||
773 | prefetch(&TX_DESC_INFO(txring, start+1).skb); | 798 | prefetch(&TX_DESC_INFO(txring, start+1).skb); |
774 | 799 | ||
775 | /* Compensate for when fill has wrapped but clean has not */ | 800 | /* Compensate for when fill has wrapped but clean has not */ |
776 | if (start > ring_limit) | 801 | if (start > ring_limit) |
777 | ring_limit += TX_RING_SIZE; | 802 | ring_limit += TX_RING_SIZE; |
778 | 803 | ||
779 | buf_count = 0; | 804 | buf_count = 0; |
780 | descr_count = 0; | 805 | descr_count = 0; |
781 | 806 | ||
782 | for (i = start; | 807 | for (i = start; |
783 | descr_count < batch_limit && i < ring_limit; | 808 | descr_count < batch_limit && i < ring_limit; |
784 | i += buf_count) { | 809 | i += buf_count) { |
785 | u64 mactx = TX_DESC(txring, i); | 810 | u64 mactx = TX_DESC(txring, i); |
786 | struct sk_buff *skb; | 811 | struct sk_buff *skb; |
787 | 812 | ||
788 | skb = TX_DESC_INFO(txring, i+1).skb; | 813 | skb = TX_DESC_INFO(txring, i+1).skb; |
789 | nr_frags = TX_DESC_INFO(txring, i).dma; | 814 | nr_frags = TX_DESC_INFO(txring, i).dma; |
790 | 815 | ||
791 | if ((mactx & XCT_MACTX_E) || | 816 | if ((mactx & XCT_MACTX_E) || |
792 | (*chan->status & PAS_STATUS_ERROR)) | 817 | (*chan->status & PAS_STATUS_ERROR)) |
793 | pasemi_mac_tx_error(mac, mactx); | 818 | pasemi_mac_tx_error(mac, mactx); |
794 | 819 | ||
795 | if (unlikely(mactx & XCT_MACTX_O)) | 820 | if (unlikely(mactx & XCT_MACTX_O)) |
796 | /* Not yet transmitted */ | 821 | /* Not yet transmitted */ |
797 | break; | 822 | break; |
798 | 823 | ||
799 | buf_count = 2 + nr_frags; | 824 | buf_count = 2 + nr_frags; |
800 | /* Since we always fill with an even number of entries, make | 825 | /* Since we always fill with an even number of entries, make |
801 | * sure we skip any unused one at the end as well. | 826 | * sure we skip any unused one at the end as well. |
802 | */ | 827 | */ |
803 | if (buf_count & 1) | 828 | if (buf_count & 1) |
804 | buf_count++; | 829 | buf_count++; |
805 | 830 | ||
806 | for (j = 0; j <= nr_frags; j++) | 831 | for (j = 0; j <= nr_frags; j++) |
807 | dmas[descr_count][j] = TX_DESC_INFO(txring, i+1+j).dma; | 832 | dmas[descr_count][j] = TX_DESC_INFO(txring, i+1+j).dma; |
808 | 833 | ||
809 | skbs[descr_count] = skb; | 834 | skbs[descr_count] = skb; |
810 | nf[descr_count] = nr_frags; | 835 | nf[descr_count] = nr_frags; |
811 | 836 | ||
812 | TX_DESC(txring, i) = 0; | 837 | TX_DESC(txring, i) = 0; |
813 | TX_DESC(txring, i+1) = 0; | 838 | TX_DESC(txring, i+1) = 0; |
814 | 839 | ||
815 | descr_count++; | 840 | descr_count++; |
816 | } | 841 | } |
817 | txring->next_to_clean = i & (TX_RING_SIZE-1); | 842 | txring->next_to_clean = i & (TX_RING_SIZE-1); |
818 | 843 | ||
819 | spin_unlock_irqrestore(&txring->lock, flags); | 844 | spin_unlock_irqrestore(&txring->lock, flags); |
820 | netif_wake_queue(mac->netdev); | 845 | netif_wake_queue(mac->netdev); |
821 | 846 | ||
822 | for (i = 0; i < descr_count; i++) | 847 | for (i = 0; i < descr_count; i++) |
823 | pasemi_mac_unmap_tx_skb(mac, nf[i], skbs[i], dmas[i]); | 848 | pasemi_mac_unmap_tx_skb(mac, nf[i], skbs[i], dmas[i]); |
824 | 849 | ||
825 | total_count += descr_count; | 850 | total_count += descr_count; |
826 | 851 | ||
827 | /* If the batch was full, try to clean more */ | 852 | /* If the batch was full, try to clean more */ |
828 | if (descr_count == batch_limit) | 853 | if (descr_count == batch_limit) |
829 | goto restart; | 854 | goto restart; |
830 | 855 | ||
831 | return total_count; | 856 | return total_count; |
832 | } | 857 | } |
833 | 858 | ||
834 | 859 | ||
835 | static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) | 860 | static irqreturn_t pasemi_mac_rx_intr(int irq, void *data) |
836 | { | 861 | { |
837 | const struct pasemi_mac_rxring *rxring = data; | 862 | const struct pasemi_mac_rxring *rxring = data; |
838 | struct pasemi_mac *mac = rxring->mac; | 863 | struct pasemi_mac *mac = rxring->mac; |
839 | struct net_device *dev = mac->netdev; | 864 | struct net_device *dev = mac->netdev; |
840 | const struct pasemi_dmachan *chan = &rxring->chan; | 865 | const struct pasemi_dmachan *chan = &rxring->chan; |
841 | unsigned int reg; | 866 | unsigned int reg; |
842 | 867 | ||
843 | if (!(*chan->status & PAS_STATUS_CAUSE_M)) | 868 | if (!(*chan->status & PAS_STATUS_CAUSE_M)) |
844 | return IRQ_NONE; | 869 | return IRQ_NONE; |
845 | 870 | ||
846 | /* Don't reset packet count so it won't fire again but clear | 871 | /* Don't reset packet count so it won't fire again but clear |
847 | * all others. | 872 | * all others. |
848 | */ | 873 | */ |
849 | 874 | ||
850 | reg = 0; | 875 | reg = 0; |
851 | if (*chan->status & PAS_STATUS_SOFT) | 876 | if (*chan->status & PAS_STATUS_SOFT) |
852 | reg |= PAS_IOB_DMA_RXCH_RESET_SINTC; | 877 | reg |= PAS_IOB_DMA_RXCH_RESET_SINTC; |
853 | if (*chan->status & PAS_STATUS_ERROR) | 878 | if (*chan->status & PAS_STATUS_ERROR) |
854 | reg |= PAS_IOB_DMA_RXCH_RESET_DINTC; | 879 | reg |= PAS_IOB_DMA_RXCH_RESET_DINTC; |
855 | 880 | ||
856 | netif_rx_schedule(dev, &mac->napi); | 881 | netif_rx_schedule(dev, &mac->napi); |
857 | 882 | ||
858 | write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg); | 883 | write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg); |
859 | 884 | ||
860 | return IRQ_HANDLED; | 885 | return IRQ_HANDLED; |
861 | } | 886 | } |
862 | 887 | ||
863 | #define TX_CLEAN_INTERVAL HZ | 888 | #define TX_CLEAN_INTERVAL HZ |
864 | 889 | ||
865 | static void pasemi_mac_tx_timer(unsigned long data) | 890 | static void pasemi_mac_tx_timer(unsigned long data) |
866 | { | 891 | { |
867 | struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data; | 892 | struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data; |
868 | struct pasemi_mac *mac = txring->mac; | 893 | struct pasemi_mac *mac = txring->mac; |
869 | 894 | ||
870 | pasemi_mac_clean_tx(txring); | 895 | pasemi_mac_clean_tx(txring); |
871 | 896 | ||
872 | mod_timer(&txring->clean_timer, jiffies + TX_CLEAN_INTERVAL); | 897 | mod_timer(&txring->clean_timer, jiffies + TX_CLEAN_INTERVAL); |
873 | 898 | ||
874 | pasemi_mac_restart_tx_intr(mac); | 899 | pasemi_mac_restart_tx_intr(mac); |
875 | } | 900 | } |
876 | 901 | ||
877 | static irqreturn_t pasemi_mac_tx_intr(int irq, void *data) | 902 | static irqreturn_t pasemi_mac_tx_intr(int irq, void *data) |
878 | { | 903 | { |
879 | struct pasemi_mac_txring *txring = data; | 904 | struct pasemi_mac_txring *txring = data; |
880 | const struct pasemi_dmachan *chan = &txring->chan; | 905 | const struct pasemi_dmachan *chan = &txring->chan; |
881 | struct pasemi_mac *mac = txring->mac; | 906 | struct pasemi_mac *mac = txring->mac; |
882 | unsigned int reg; | 907 | unsigned int reg; |
883 | 908 | ||
884 | if (!(*chan->status & PAS_STATUS_CAUSE_M)) | 909 | if (!(*chan->status & PAS_STATUS_CAUSE_M)) |
885 | return IRQ_NONE; | 910 | return IRQ_NONE; |
886 | 911 | ||
887 | reg = 0; | 912 | reg = 0; |
888 | 913 | ||
889 | if (*chan->status & PAS_STATUS_SOFT) | 914 | if (*chan->status & PAS_STATUS_SOFT) |
890 | reg |= PAS_IOB_DMA_TXCH_RESET_SINTC; | 915 | reg |= PAS_IOB_DMA_TXCH_RESET_SINTC; |
891 | if (*chan->status & PAS_STATUS_ERROR) | 916 | if (*chan->status & PAS_STATUS_ERROR) |
892 | reg |= PAS_IOB_DMA_TXCH_RESET_DINTC; | 917 | reg |= PAS_IOB_DMA_TXCH_RESET_DINTC; |
893 | 918 | ||
894 | mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2); | 919 | mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2); |
895 | 920 | ||
896 | netif_rx_schedule(mac->netdev, &mac->napi); | 921 | netif_rx_schedule(mac->netdev, &mac->napi); |
897 | 922 | ||
898 | if (reg) | 923 | if (reg) |
899 | write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg); | 924 | write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg); |
900 | 925 | ||
901 | return IRQ_HANDLED; | 926 | return IRQ_HANDLED; |
902 | } | 927 | } |
903 | 928 | ||
904 | static void pasemi_mac_intf_disable(struct pasemi_mac *mac) | ||
905 | { | ||
906 | unsigned int flags; | ||
907 | |||
908 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); | ||
909 | flags &= ~PAS_MAC_CFG_PCFG_PE; | ||
910 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); | ||
911 | } | ||
912 | |||
913 | static void pasemi_mac_intf_enable(struct pasemi_mac *mac) | ||
914 | { | ||
915 | unsigned int flags; | ||
916 | |||
917 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); | ||
918 | flags |= PAS_MAC_CFG_PCFG_PE; | ||
919 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); | ||
920 | } | ||
921 | |||
922 | static void pasemi_adjust_link(struct net_device *dev) | 929 | static void pasemi_adjust_link(struct net_device *dev) |
923 | { | 930 | { |
924 | struct pasemi_mac *mac = netdev_priv(dev); | 931 | struct pasemi_mac *mac = netdev_priv(dev); |
925 | int msg; | 932 | int msg; |
926 | unsigned int flags; | 933 | unsigned int flags; |
927 | unsigned int new_flags; | 934 | unsigned int new_flags; |
928 | 935 | ||
929 | if (!mac->phydev->link) { | 936 | if (!mac->phydev->link) { |
930 | /* If no link, MAC speed settings don't matter. Just report | 937 | /* If no link, MAC speed settings don't matter. Just report |
931 | * link down and return. | 938 | * link down and return. |
932 | */ | 939 | */ |
933 | if (mac->link && netif_msg_link(mac)) | 940 | if (mac->link && netif_msg_link(mac)) |
934 | printk(KERN_INFO "%s: Link is down.\n", dev->name); | 941 | printk(KERN_INFO "%s: Link is down.\n", dev->name); |
935 | 942 | ||
936 | netif_carrier_off(dev); | 943 | netif_carrier_off(dev); |
937 | pasemi_mac_intf_disable(mac); | 944 | pasemi_mac_intf_disable(mac); |
938 | mac->link = 0; | 945 | mac->link = 0; |
939 | 946 | ||
940 | return; | 947 | return; |
941 | } else { | 948 | } else { |
942 | pasemi_mac_intf_enable(mac); | 949 | pasemi_mac_intf_enable(mac); |
943 | netif_carrier_on(dev); | 950 | netif_carrier_on(dev); |
944 | } | 951 | } |
945 | 952 | ||
946 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); | 953 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); |
947 | new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M | | 954 | new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M | |
948 | PAS_MAC_CFG_PCFG_TSR_M); | 955 | PAS_MAC_CFG_PCFG_TSR_M); |
949 | 956 | ||
950 | if (!mac->phydev->duplex) | 957 | if (!mac->phydev->duplex) |
951 | new_flags |= PAS_MAC_CFG_PCFG_HD; | 958 | new_flags |= PAS_MAC_CFG_PCFG_HD; |
952 | 959 | ||
953 | switch (mac->phydev->speed) { | 960 | switch (mac->phydev->speed) { |
954 | case 1000: | 961 | case 1000: |
955 | new_flags |= PAS_MAC_CFG_PCFG_SPD_1G | | 962 | new_flags |= PAS_MAC_CFG_PCFG_SPD_1G | |
956 | PAS_MAC_CFG_PCFG_TSR_1G; | 963 | PAS_MAC_CFG_PCFG_TSR_1G; |
957 | break; | 964 | break; |
958 | case 100: | 965 | case 100: |
959 | new_flags |= PAS_MAC_CFG_PCFG_SPD_100M | | 966 | new_flags |= PAS_MAC_CFG_PCFG_SPD_100M | |
960 | PAS_MAC_CFG_PCFG_TSR_100M; | 967 | PAS_MAC_CFG_PCFG_TSR_100M; |
961 | break; | 968 | break; |
962 | case 10: | 969 | case 10: |
963 | new_flags |= PAS_MAC_CFG_PCFG_SPD_10M | | 970 | new_flags |= PAS_MAC_CFG_PCFG_SPD_10M | |
964 | PAS_MAC_CFG_PCFG_TSR_10M; | 971 | PAS_MAC_CFG_PCFG_TSR_10M; |
965 | break; | 972 | break; |
966 | default: | 973 | default: |
967 | printk("Unsupported speed %d\n", mac->phydev->speed); | 974 | printk("Unsupported speed %d\n", mac->phydev->speed); |
968 | } | 975 | } |
969 | 976 | ||
970 | /* Print on link or speed/duplex change */ | 977 | /* Print on link or speed/duplex change */ |
971 | msg = mac->link != mac->phydev->link || flags != new_flags; | 978 | msg = mac->link != mac->phydev->link || flags != new_flags; |
972 | 979 | ||
973 | mac->duplex = mac->phydev->duplex; | 980 | mac->duplex = mac->phydev->duplex; |
974 | mac->speed = mac->phydev->speed; | 981 | mac->speed = mac->phydev->speed; |
975 | mac->link = mac->phydev->link; | 982 | mac->link = mac->phydev->link; |
976 | 983 | ||
977 | if (new_flags != flags) | 984 | if (new_flags != flags) |
978 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags); | 985 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags); |
979 | 986 | ||
980 | if (msg && netif_msg_link(mac)) | 987 | if (msg && netif_msg_link(mac)) |
981 | printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n", | 988 | printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n", |
982 | dev->name, mac->speed, mac->duplex ? "full" : "half"); | 989 | dev->name, mac->speed, mac->duplex ? "full" : "half"); |
983 | } | 990 | } |
984 | 991 | ||
985 | static int pasemi_mac_phy_init(struct net_device *dev) | 992 | static int pasemi_mac_phy_init(struct net_device *dev) |
986 | { | 993 | { |
987 | struct pasemi_mac *mac = netdev_priv(dev); | 994 | struct pasemi_mac *mac = netdev_priv(dev); |
988 | struct device_node *dn, *phy_dn; | 995 | struct device_node *dn, *phy_dn; |
989 | struct phy_device *phydev; | 996 | struct phy_device *phydev; |
990 | unsigned int phy_id; | 997 | unsigned int phy_id; |
991 | const phandle *ph; | 998 | const phandle *ph; |
992 | const unsigned int *prop; | 999 | const unsigned int *prop; |
993 | struct resource r; | 1000 | struct resource r; |
994 | int ret; | 1001 | int ret; |
995 | 1002 | ||
996 | dn = pci_device_to_OF_node(mac->pdev); | 1003 | dn = pci_device_to_OF_node(mac->pdev); |
997 | ph = of_get_property(dn, "phy-handle", NULL); | 1004 | ph = of_get_property(dn, "phy-handle", NULL); |
998 | if (!ph) | 1005 | if (!ph) |
999 | return -ENODEV; | 1006 | return -ENODEV; |
1000 | phy_dn = of_find_node_by_phandle(*ph); | 1007 | phy_dn = of_find_node_by_phandle(*ph); |
1001 | 1008 | ||
1002 | prop = of_get_property(phy_dn, "reg", NULL); | 1009 | prop = of_get_property(phy_dn, "reg", NULL); |
1003 | ret = of_address_to_resource(phy_dn->parent, 0, &r); | 1010 | ret = of_address_to_resource(phy_dn->parent, 0, &r); |
1004 | if (ret) | 1011 | if (ret) |
1005 | goto err; | 1012 | goto err; |
1006 | 1013 | ||
1007 | phy_id = *prop; | 1014 | phy_id = *prop; |
1008 | snprintf(mac->phy_id, BUS_ID_SIZE, PHY_ID_FMT, (int)r.start, phy_id); | 1015 | snprintf(mac->phy_id, BUS_ID_SIZE, PHY_ID_FMT, (int)r.start, phy_id); |
1009 | 1016 | ||
1010 | of_node_put(phy_dn); | 1017 | of_node_put(phy_dn); |
1011 | 1018 | ||
1012 | mac->link = 0; | 1019 | mac->link = 0; |
1013 | mac->speed = 0; | 1020 | mac->speed = 0; |
1014 | mac->duplex = -1; | 1021 | mac->duplex = -1; |
1015 | 1022 | ||
1016 | phydev = phy_connect(dev, mac->phy_id, &pasemi_adjust_link, 0, PHY_INTERFACE_MODE_SGMII); | 1023 | phydev = phy_connect(dev, mac->phy_id, &pasemi_adjust_link, 0, PHY_INTERFACE_MODE_SGMII); |
1017 | 1024 | ||
1018 | if (IS_ERR(phydev)) { | 1025 | if (IS_ERR(phydev)) { |
1019 | printk(KERN_ERR "%s: Could not attach to phy\n", dev->name); | 1026 | printk(KERN_ERR "%s: Could not attach to phy\n", dev->name); |
1020 | return PTR_ERR(phydev); | 1027 | return PTR_ERR(phydev); |
1021 | } | 1028 | } |
1022 | 1029 | ||
1023 | mac->phydev = phydev; | 1030 | mac->phydev = phydev; |
1024 | 1031 | ||
1025 | return 0; | 1032 | return 0; |
1026 | 1033 | ||
1027 | err: | 1034 | err: |
1028 | of_node_put(phy_dn); | 1035 | of_node_put(phy_dn); |
1029 | return -ENODEV; | 1036 | return -ENODEV; |
1030 | } | 1037 | } |
1031 | 1038 | ||
1032 | 1039 | ||
1033 | static int pasemi_mac_open(struct net_device *dev) | 1040 | static int pasemi_mac_open(struct net_device *dev) |
1034 | { | 1041 | { |
1035 | struct pasemi_mac *mac = netdev_priv(dev); | 1042 | struct pasemi_mac *mac = netdev_priv(dev); |
1036 | unsigned int flags; | 1043 | unsigned int flags; |
1037 | int ret; | 1044 | int ret; |
1038 | 1045 | ||
1039 | /* enable rx section */ | 1046 | /* enable rx section */ |
1040 | write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN); | 1047 | write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN); |
1041 | 1048 | ||
1042 | /* enable tx section */ | 1049 | /* enable tx section */ |
1043 | write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN); | 1050 | write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN); |
1044 | 1051 | ||
1045 | flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) | | 1052 | flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) | |
1046 | PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) | | 1053 | PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) | |
1047 | PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12); | 1054 | PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12); |
1048 | 1055 | ||
1049 | write_mac_reg(mac, PAS_MAC_CFG_TXP, flags); | 1056 | write_mac_reg(mac, PAS_MAC_CFG_TXP, flags); |
1050 | 1057 | ||
1051 | ret = pasemi_mac_setup_rx_resources(dev); | 1058 | ret = pasemi_mac_setup_rx_resources(dev); |
1052 | if (ret) | 1059 | if (ret) |
1053 | goto out_rx_resources; | 1060 | goto out_rx_resources; |
1054 | 1061 | ||
1055 | mac->tx = pasemi_mac_setup_tx_resources(dev); | 1062 | mac->tx = pasemi_mac_setup_tx_resources(dev); |
1056 | 1063 | ||
1057 | if (!mac->tx) | 1064 | if (!mac->tx) |
1058 | goto out_tx_ring; | 1065 | goto out_tx_ring; |
1059 | 1066 | ||
1060 | /* 0x3ff with 33MHz clock is about 31us */ | 1067 | /* 0x3ff with 33MHz clock is about 31us */ |
1061 | write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG, | 1068 | write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG, |
1062 | PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff)); | 1069 | PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff)); |
1063 | 1070 | ||
1064 | write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac->rx->chan.chno), | 1071 | write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac->rx->chan.chno), |
1065 | PAS_IOB_DMA_RXCH_CFG_CNTTH(256)); | 1072 | PAS_IOB_DMA_RXCH_CFG_CNTTH(256)); |
1066 | 1073 | ||
1067 | write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac->tx->chan.chno), | 1074 | write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac->tx->chan.chno), |
1068 | PAS_IOB_DMA_TXCH_CFG_CNTTH(32)); | 1075 | PAS_IOB_DMA_TXCH_CFG_CNTTH(32)); |
1069 | 1076 | ||
1070 | write_mac_reg(mac, PAS_MAC_IPC_CHNL, | 1077 | write_mac_reg(mac, PAS_MAC_IPC_CHNL, |
1071 | PAS_MAC_IPC_CHNL_DCHNO(mac->rx->chan.chno) | | 1078 | PAS_MAC_IPC_CHNL_DCHNO(mac->rx->chan.chno) | |
1072 | PAS_MAC_IPC_CHNL_BCH(mac->rx->chan.chno)); | 1079 | PAS_MAC_IPC_CHNL_BCH(mac->rx->chan.chno)); |
1073 | 1080 | ||
1074 | /* enable rx if */ | 1081 | /* enable rx if */ |
1075 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), | 1082 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), |
1076 | PAS_DMA_RXINT_RCMDSTA_EN | | 1083 | PAS_DMA_RXINT_RCMDSTA_EN | |
1077 | PAS_DMA_RXINT_RCMDSTA_DROPS_M | | 1084 | PAS_DMA_RXINT_RCMDSTA_DROPS_M | |
1078 | PAS_DMA_RXINT_RCMDSTA_BP | | 1085 | PAS_DMA_RXINT_RCMDSTA_BP | |
1079 | PAS_DMA_RXINT_RCMDSTA_OO | | 1086 | PAS_DMA_RXINT_RCMDSTA_OO | |
1080 | PAS_DMA_RXINT_RCMDSTA_BT); | 1087 | PAS_DMA_RXINT_RCMDSTA_BT); |
1081 | 1088 | ||
1082 | /* enable rx channel */ | 1089 | /* enable rx channel */ |
1083 | pasemi_dma_start_chan(&rx_ring(mac)->chan, PAS_DMA_RXCHAN_CCMDSTA_DU | | 1090 | pasemi_dma_start_chan(&rx_ring(mac)->chan, PAS_DMA_RXCHAN_CCMDSTA_DU | |
1084 | PAS_DMA_RXCHAN_CCMDSTA_OD | | 1091 | PAS_DMA_RXCHAN_CCMDSTA_OD | |
1085 | PAS_DMA_RXCHAN_CCMDSTA_FD | | 1092 | PAS_DMA_RXCHAN_CCMDSTA_FD | |
1086 | PAS_DMA_RXCHAN_CCMDSTA_DT); | 1093 | PAS_DMA_RXCHAN_CCMDSTA_DT); |
1087 | 1094 | ||
1088 | /* enable tx channel */ | 1095 | /* enable tx channel */ |
1089 | pasemi_dma_start_chan(&tx_ring(mac)->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ | | 1096 | pasemi_dma_start_chan(&tx_ring(mac)->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ | |
1090 | PAS_DMA_TXCHAN_TCMDSTA_DB | | 1097 | PAS_DMA_TXCHAN_TCMDSTA_DB | |
1091 | PAS_DMA_TXCHAN_TCMDSTA_DE | | 1098 | PAS_DMA_TXCHAN_TCMDSTA_DE | |
1092 | PAS_DMA_TXCHAN_TCMDSTA_DA); | 1099 | PAS_DMA_TXCHAN_TCMDSTA_DA); |
1093 | 1100 | ||
1094 | pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE); | 1101 | pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE); |
1095 | 1102 | ||
1096 | write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac)->chan.chno), | 1103 | write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac)->chan.chno), |
1097 | RX_RING_SIZE>>1); | 1104 | RX_RING_SIZE>>1); |
1098 | 1105 | ||
1099 | /* Clear out any residual packet count state from firmware */ | 1106 | /* Clear out any residual packet count state from firmware */ |
1100 | pasemi_mac_restart_rx_intr(mac); | 1107 | pasemi_mac_restart_rx_intr(mac); |
1101 | pasemi_mac_restart_tx_intr(mac); | 1108 | pasemi_mac_restart_tx_intr(mac); |
1102 | 1109 | ||
1103 | flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE; | 1110 | flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE; |
1104 | 1111 | ||
1105 | if (mac->type == MAC_TYPE_GMAC) | 1112 | if (mac->type == MAC_TYPE_GMAC) |
1106 | flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G; | 1113 | flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G; |
1107 | else | 1114 | else |
1108 | flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G; | 1115 | flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G; |
1109 | 1116 | ||
1110 | /* Enable interface in MAC */ | 1117 | /* Enable interface in MAC */ |
1111 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); | 1118 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); |
1112 | 1119 | ||
1113 | ret = pasemi_mac_phy_init(dev); | 1120 | ret = pasemi_mac_phy_init(dev); |
1114 | if (ret) { | 1121 | if (ret) { |
1115 | /* Since we won't get link notification, just enable RX */ | 1122 | /* Since we won't get link notification, just enable RX */ |
1116 | pasemi_mac_intf_enable(mac); | 1123 | pasemi_mac_intf_enable(mac); |
1117 | if (mac->type == MAC_TYPE_GMAC) { | 1124 | if (mac->type == MAC_TYPE_GMAC) { |
1118 | /* Warn for missing PHY on SGMII (1Gig) ports */ | 1125 | /* Warn for missing PHY on SGMII (1Gig) ports */ |
1119 | dev_warn(&mac->pdev->dev, | 1126 | dev_warn(&mac->pdev->dev, |
1120 | "PHY init failed: %d.\n", ret); | 1127 | "PHY init failed: %d.\n", ret); |
1121 | dev_warn(&mac->pdev->dev, | 1128 | dev_warn(&mac->pdev->dev, |
1122 | "Defaulting to 1Gbit full duplex\n"); | 1129 | "Defaulting to 1Gbit full duplex\n"); |
1123 | } | 1130 | } |
1124 | } | 1131 | } |
1125 | 1132 | ||
1126 | netif_start_queue(dev); | 1133 | netif_start_queue(dev); |
1127 | napi_enable(&mac->napi); | 1134 | napi_enable(&mac->napi); |
1128 | 1135 | ||
1129 | snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx", | 1136 | snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx", |
1130 | dev->name); | 1137 | dev->name); |
1131 | 1138 | ||
1132 | ret = request_irq(mac->tx->chan.irq, &pasemi_mac_tx_intr, IRQF_DISABLED, | 1139 | ret = request_irq(mac->tx->chan.irq, &pasemi_mac_tx_intr, IRQF_DISABLED, |
1133 | mac->tx_irq_name, mac->tx); | 1140 | mac->tx_irq_name, mac->tx); |
1134 | if (ret) { | 1141 | if (ret) { |
1135 | dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", | 1142 | dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", |
1136 | mac->tx->chan.irq, ret); | 1143 | mac->tx->chan.irq, ret); |
1137 | goto out_tx_int; | 1144 | goto out_tx_int; |
1138 | } | 1145 | } |
1139 | 1146 | ||
1140 | snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx", | 1147 | snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx", |
1141 | dev->name); | 1148 | dev->name); |
1142 | 1149 | ||
1143 | ret = request_irq(mac->rx->chan.irq, &pasemi_mac_rx_intr, IRQF_DISABLED, | 1150 | ret = request_irq(mac->rx->chan.irq, &pasemi_mac_rx_intr, IRQF_DISABLED, |
1144 | mac->rx_irq_name, mac->rx); | 1151 | mac->rx_irq_name, mac->rx); |
1145 | if (ret) { | 1152 | if (ret) { |
1146 | dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", | 1153 | dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", |
1147 | mac->rx->chan.irq, ret); | 1154 | mac->rx->chan.irq, ret); |
1148 | goto out_rx_int; | 1155 | goto out_rx_int; |
1149 | } | 1156 | } |
1150 | 1157 | ||
1151 | if (mac->phydev) | 1158 | if (mac->phydev) |
1152 | phy_start(mac->phydev); | 1159 | phy_start(mac->phydev); |
1153 | 1160 | ||
1154 | init_timer(&mac->tx->clean_timer); | 1161 | init_timer(&mac->tx->clean_timer); |
1155 | mac->tx->clean_timer.function = pasemi_mac_tx_timer; | 1162 | mac->tx->clean_timer.function = pasemi_mac_tx_timer; |
1156 | mac->tx->clean_timer.data = (unsigned long)mac->tx; | 1163 | mac->tx->clean_timer.data = (unsigned long)mac->tx; |
1157 | mac->tx->clean_timer.expires = jiffies+HZ; | 1164 | mac->tx->clean_timer.expires = jiffies+HZ; |
1158 | add_timer(&mac->tx->clean_timer); | 1165 | add_timer(&mac->tx->clean_timer); |
1159 | 1166 | ||
1160 | return 0; | 1167 | return 0; |
1161 | 1168 | ||
1162 | out_rx_int: | 1169 | out_rx_int: |
1163 | free_irq(mac->tx->chan.irq, mac->tx); | 1170 | free_irq(mac->tx->chan.irq, mac->tx); |
1164 | out_tx_int: | 1171 | out_tx_int: |
1165 | napi_disable(&mac->napi); | 1172 | napi_disable(&mac->napi); |
1166 | netif_stop_queue(dev); | 1173 | netif_stop_queue(dev); |
1167 | out_tx_ring: | 1174 | out_tx_ring: |
1168 | if (mac->tx) | 1175 | if (mac->tx) |
1169 | pasemi_mac_free_tx_resources(mac); | 1176 | pasemi_mac_free_tx_resources(mac); |
1170 | pasemi_mac_free_rx_resources(mac); | 1177 | pasemi_mac_free_rx_resources(mac); |
1171 | out_rx_resources: | 1178 | out_rx_resources: |
1172 | 1179 | ||
1173 | return ret; | 1180 | return ret; |
1174 | } | 1181 | } |
1175 | 1182 | ||
1176 | #define MAX_RETRIES 5000 | 1183 | #define MAX_RETRIES 5000 |
1177 | 1184 | ||
1185 | static void pasemi_mac_pause_txchan(struct pasemi_mac *mac) | ||
1186 | { | ||
1187 | unsigned int sta, retries; | ||
1188 | int txch = tx_ring(mac)->chan.chno; | ||
1189 | |||
1190 | write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), | ||
1191 | PAS_DMA_TXCHAN_TCMDSTA_ST); | ||
1192 | |||
1193 | for (retries = 0; retries < MAX_RETRIES; retries++) { | ||
1194 | sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch)); | ||
1195 | if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) | ||
1196 | break; | ||
1197 | cond_resched(); | ||
1198 | } | ||
1199 | |||
1200 | if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT) | ||
1201 | dev_err(&mac->dma_pdev->dev, | ||
1202 | "Failed to stop tx channel, tcmdsta %08x\n", sta); | ||
1203 | |||
1204 | write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0); | ||
1205 | } | ||
1206 | |||
1207 | static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac) | ||
1208 | { | ||
1209 | unsigned int sta, retries; | ||
1210 | int rxch = rx_ring(mac)->chan.chno; | ||
1211 | |||
1212 | write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), | ||
1213 | PAS_DMA_RXCHAN_CCMDSTA_ST); | ||
1214 | for (retries = 0; retries < MAX_RETRIES; retries++) { | ||
1215 | sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); | ||
1216 | if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) | ||
1217 | break; | ||
1218 | cond_resched(); | ||
1219 | } | ||
1220 | |||
1221 | if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT) | ||
1222 | dev_err(&mac->dma_pdev->dev, | ||
1223 | "Failed to stop rx channel, ccmdsta 08%x\n", sta); | ||
1224 | write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0); | ||
1225 | } | ||
1226 | |||
1227 | static void pasemi_mac_pause_rxint(struct pasemi_mac *mac) | ||
1228 | { | ||
1229 | unsigned int sta, retries; | ||
1230 | |||
1231 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), | ||
1232 | PAS_DMA_RXINT_RCMDSTA_ST); | ||
1233 | for (retries = 0; retries < MAX_RETRIES; retries++) { | ||
1234 | sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); | ||
1235 | if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT)) | ||
1236 | break; | ||
1237 | cond_resched(); | ||
1238 | } | ||
1239 | |||
1240 | if (sta & PAS_DMA_RXINT_RCMDSTA_ACT) | ||
1241 | dev_err(&mac->dma_pdev->dev, | ||
1242 | "Failed to stop rx interface, rcmdsta %08x\n", sta); | ||
1243 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); | ||
1244 | } | ||
1245 | |||
1178 | static int pasemi_mac_close(struct net_device *dev) | 1246 | static int pasemi_mac_close(struct net_device *dev) |
1179 | { | 1247 | { |
1180 | struct pasemi_mac *mac = netdev_priv(dev); | 1248 | struct pasemi_mac *mac = netdev_priv(dev); |
1181 | unsigned int sta; | 1249 | unsigned int sta; |
1182 | int retries; | ||
1183 | int rxch, txch; | 1250 | int rxch, txch; |
1184 | 1251 | ||
1185 | rxch = rx_ring(mac)->chan.chno; | 1252 | rxch = rx_ring(mac)->chan.chno; |
1186 | txch = tx_ring(mac)->chan.chno; | 1253 | txch = tx_ring(mac)->chan.chno; |
1187 | 1254 | ||
1188 | if (mac->phydev) { | 1255 | if (mac->phydev) { |
1189 | phy_stop(mac->phydev); | 1256 | phy_stop(mac->phydev); |
1190 | phy_disconnect(mac->phydev); | 1257 | phy_disconnect(mac->phydev); |
1191 | } | 1258 | } |
1192 | 1259 | ||
1193 | del_timer_sync(&mac->tx->clean_timer); | 1260 | del_timer_sync(&mac->tx->clean_timer); |
1194 | 1261 | ||
1195 | netif_stop_queue(dev); | 1262 | netif_stop_queue(dev); |
1196 | napi_disable(&mac->napi); | 1263 | napi_disable(&mac->napi); |
1197 | 1264 | ||
1198 | sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); | 1265 | sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); |
1199 | if (sta & (PAS_DMA_RXINT_RCMDSTA_BP | | 1266 | if (sta & (PAS_DMA_RXINT_RCMDSTA_BP | |
1200 | PAS_DMA_RXINT_RCMDSTA_OO | | 1267 | PAS_DMA_RXINT_RCMDSTA_OO | |
1201 | PAS_DMA_RXINT_RCMDSTA_BT)) | 1268 | PAS_DMA_RXINT_RCMDSTA_BT)) |
1202 | printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta); | 1269 | printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta); |
1203 | 1270 | ||
1204 | sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); | 1271 | sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); |
1205 | if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU | | 1272 | if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU | |
1206 | PAS_DMA_RXCHAN_CCMDSTA_OD | | 1273 | PAS_DMA_RXCHAN_CCMDSTA_OD | |
1207 | PAS_DMA_RXCHAN_CCMDSTA_FD | | 1274 | PAS_DMA_RXCHAN_CCMDSTA_FD | |
1208 | PAS_DMA_RXCHAN_CCMDSTA_DT)) | 1275 | PAS_DMA_RXCHAN_CCMDSTA_DT)) |
1209 | printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta); | 1276 | printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta); |
1210 | 1277 | ||
1211 | sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch)); | 1278 | sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch)); |
1212 | if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB | | 1279 | if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB | |
1213 | PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA)) | 1280 | PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA)) |
1214 | printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta); | 1281 | printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta); |
1215 | 1282 | ||
1216 | /* Clean out any pending buffers */ | 1283 | /* Clean out any pending buffers */ |
1217 | pasemi_mac_clean_tx(tx_ring(mac)); | 1284 | pasemi_mac_clean_tx(tx_ring(mac)); |
1218 | pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); | 1285 | pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); |
1219 | 1286 | ||
1220 | /* Disable interface */ | 1287 | pasemi_mac_pause_txchan(mac); |
1221 | write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), | 1288 | pasemi_mac_pause_rxint(mac); |
1222 | PAS_DMA_TXCHAN_TCMDSTA_ST); | 1289 | pasemi_mac_pause_rxchan(mac); |
1223 | write_dma_reg( PAS_DMA_RXINT_RCMDSTA(mac->dma_if), | ||
1224 | PAS_DMA_RXINT_RCMDSTA_ST); | ||
1225 | write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), | ||
1226 | PAS_DMA_RXCHAN_CCMDSTA_ST); | ||
1227 | 1290 | ||
1228 | for (retries = 0; retries < MAX_RETRIES; retries++) { | ||
1229 | sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(rxch)); | ||
1230 | if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) | ||
1231 | break; | ||
1232 | cond_resched(); | ||
1233 | } | ||
1234 | |||
1235 | if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT) | ||
1236 | dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n"); | ||
1237 | |||
1238 | for (retries = 0; retries < MAX_RETRIES; retries++) { | ||
1239 | sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch)); | ||
1240 | if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) | ||
1241 | break; | ||
1242 | cond_resched(); | ||
1243 | } | ||
1244 | |||
1245 | if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT) | ||
1246 | dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n"); | ||
1247 | |||
1248 | for (retries = 0; retries < MAX_RETRIES; retries++) { | ||
1249 | sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); | ||
1250 | if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT)) | ||
1251 | break; | ||
1252 | cond_resched(); | ||
1253 | } | ||
1254 | |||
1255 | if (sta & PAS_DMA_RXINT_RCMDSTA_ACT) | ||
1256 | dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n"); | ||
1257 | |||
1258 | /* Then, disable the channel. This must be done separately from | ||
1259 | * stopping, since you can't disable when active. | ||
1260 | */ | ||
1261 | |||
1262 | write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0); | ||
1263 | write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0); | ||
1264 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0); | ||
1265 | |||
1266 | free_irq(mac->tx->chan.irq, mac->tx); | 1291 | free_irq(mac->tx->chan.irq, mac->tx); |
1267 | free_irq(mac->rx->chan.irq, mac->rx); | 1292 | free_irq(mac->rx->chan.irq, mac->rx); |
1268 | 1293 | ||
1269 | /* Free resources */ | 1294 | /* Free resources */ |
1270 | pasemi_mac_free_rx_resources(mac); | 1295 | pasemi_mac_free_rx_resources(mac); |
1271 | pasemi_mac_free_tx_resources(mac); | 1296 | pasemi_mac_free_tx_resources(mac); |
1272 | 1297 | ||
1273 | return 0; | 1298 | return 0; |
1274 | } | 1299 | } |
1275 | 1300 | ||
1276 | static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) | 1301 | static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) |
1277 | { | 1302 | { |
1278 | struct pasemi_mac *mac = netdev_priv(dev); | 1303 | struct pasemi_mac *mac = netdev_priv(dev); |
1279 | struct pasemi_mac_txring *txring; | 1304 | struct pasemi_mac_txring *txring; |
1280 | u64 dflags, mactx; | 1305 | u64 dflags, mactx; |
1281 | dma_addr_t map[MAX_SKB_FRAGS+1]; | 1306 | dma_addr_t map[MAX_SKB_FRAGS+1]; |
1282 | unsigned int map_size[MAX_SKB_FRAGS+1]; | 1307 | unsigned int map_size[MAX_SKB_FRAGS+1]; |
1283 | unsigned long flags; | 1308 | unsigned long flags; |
1284 | int i, nfrags; | 1309 | int i, nfrags; |
1285 | int fill; | 1310 | int fill; |
1286 | 1311 | ||
1287 | dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD; | 1312 | dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD; |
1288 | 1313 | ||
1289 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1314 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1290 | const unsigned char *nh = skb_network_header(skb); | 1315 | const unsigned char *nh = skb_network_header(skb); |
1291 | 1316 | ||
1292 | switch (ip_hdr(skb)->protocol) { | 1317 | switch (ip_hdr(skb)->protocol) { |
1293 | case IPPROTO_TCP: | 1318 | case IPPROTO_TCP: |
1294 | dflags |= XCT_MACTX_CSUM_TCP; | 1319 | dflags |= XCT_MACTX_CSUM_TCP; |
1295 | dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2); | 1320 | dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2); |
1296 | dflags |= XCT_MACTX_IPO(nh - skb->data); | 1321 | dflags |= XCT_MACTX_IPO(nh - skb->data); |
1297 | break; | 1322 | break; |
1298 | case IPPROTO_UDP: | 1323 | case IPPROTO_UDP: |
1299 | dflags |= XCT_MACTX_CSUM_UDP; | 1324 | dflags |= XCT_MACTX_CSUM_UDP; |
1300 | dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2); | 1325 | dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2); |
1301 | dflags |= XCT_MACTX_IPO(nh - skb->data); | 1326 | dflags |= XCT_MACTX_IPO(nh - skb->data); |
1302 | break; | 1327 | break; |
1303 | } | 1328 | } |
1304 | } | 1329 | } |
1305 | 1330 | ||
1306 | nfrags = skb_shinfo(skb)->nr_frags; | 1331 | nfrags = skb_shinfo(skb)->nr_frags; |
1307 | 1332 | ||
1308 | map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), | 1333 | map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), |
1309 | PCI_DMA_TODEVICE); | 1334 | PCI_DMA_TODEVICE); |
1310 | map_size[0] = skb_headlen(skb); | 1335 | map_size[0] = skb_headlen(skb); |
1311 | if (dma_mapping_error(map[0])) | 1336 | if (dma_mapping_error(map[0])) |
1312 | goto out_err_nolock; | 1337 | goto out_err_nolock; |
1313 | 1338 | ||
1314 | for (i = 0; i < nfrags; i++) { | 1339 | for (i = 0; i < nfrags; i++) { |
1315 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1340 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1316 | 1341 | ||
1317 | map[i+1] = pci_map_page(mac->dma_pdev, frag->page, | 1342 | map[i+1] = pci_map_page(mac->dma_pdev, frag->page, |
1318 | frag->page_offset, frag->size, | 1343 | frag->page_offset, frag->size, |
1319 | PCI_DMA_TODEVICE); | 1344 | PCI_DMA_TODEVICE); |
1320 | map_size[i+1] = frag->size; | 1345 | map_size[i+1] = frag->size; |
1321 | if (dma_mapping_error(map[i+1])) { | 1346 | if (dma_mapping_error(map[i+1])) { |
1322 | nfrags = i; | 1347 | nfrags = i; |
1323 | goto out_err_nolock; | 1348 | goto out_err_nolock; |
1324 | } | 1349 | } |
1325 | } | 1350 | } |
1326 | 1351 | ||
1327 | mactx = dflags | XCT_MACTX_LLEN(skb->len); | 1352 | mactx = dflags | XCT_MACTX_LLEN(skb->len); |
1328 | 1353 | ||
1329 | txring = tx_ring(mac); | 1354 | txring = tx_ring(mac); |
1330 | 1355 | ||
1331 | spin_lock_irqsave(&txring->lock, flags); | 1356 | spin_lock_irqsave(&txring->lock, flags); |
1332 | 1357 | ||
1333 | fill = txring->next_to_fill; | 1358 | fill = txring->next_to_fill; |
1334 | 1359 | ||
1335 | /* Avoid stepping on the same cache line that the DMA controller | 1360 | /* Avoid stepping on the same cache line that the DMA controller |
1336 | * is currently about to send, so leave at least 8 words available. | 1361 | * is currently about to send, so leave at least 8 words available. |
1337 | * Total free space needed is mactx + fragments + 8 | 1362 | * Total free space needed is mactx + fragments + 8 |
1338 | */ | 1363 | */ |
1339 | if (RING_AVAIL(txring) < nfrags + 10) { | 1364 | if (RING_AVAIL(txring) < nfrags + 10) { |
1340 | /* no room -- stop the queue and wait for tx intr */ | 1365 | /* no room -- stop the queue and wait for tx intr */ |
1341 | netif_stop_queue(dev); | 1366 | netif_stop_queue(dev); |
1342 | goto out_err; | 1367 | goto out_err; |
1343 | } | 1368 | } |
1344 | 1369 | ||
1345 | TX_DESC(txring, fill) = mactx; | 1370 | TX_DESC(txring, fill) = mactx; |
1346 | TX_DESC_INFO(txring, fill).dma = nfrags; | 1371 | TX_DESC_INFO(txring, fill).dma = nfrags; |
1347 | fill++; | 1372 | fill++; |
1348 | TX_DESC_INFO(txring, fill).skb = skb; | 1373 | TX_DESC_INFO(txring, fill).skb = skb; |
1349 | for (i = 0; i <= nfrags; i++) { | 1374 | for (i = 0; i <= nfrags; i++) { |
1350 | TX_DESC(txring, fill+i) = | 1375 | TX_DESC(txring, fill+i) = |
1351 | XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); | 1376 | XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); |
1352 | TX_DESC_INFO(txring, fill+i).dma = map[i]; | 1377 | TX_DESC_INFO(txring, fill+i).dma = map[i]; |
1353 | } | 1378 | } |
1354 | 1379 | ||
1355 | /* We have to add an even number of 8-byte entries to the ring | 1380 | /* We have to add an even number of 8-byte entries to the ring |
1356 | * even if the last one is unused. That means always an odd number | 1381 | * even if the last one is unused. That means always an odd number |
1357 | * of pointers + one mactx descriptor. | 1382 | * of pointers + one mactx descriptor. |
1358 | */ | 1383 | */ |
1359 | if (nfrags & 1) | 1384 | if (nfrags & 1) |
1360 | nfrags++; | 1385 | nfrags++; |
1361 | 1386 | ||
1362 | txring->next_to_fill = (fill + nfrags + 1) & (TX_RING_SIZE-1); | 1387 | txring->next_to_fill = (fill + nfrags + 1) & (TX_RING_SIZE-1); |
1363 | 1388 | ||
1364 | dev->stats.tx_packets++; | 1389 | dev->stats.tx_packets++; |
1365 | dev->stats.tx_bytes += skb->len; | 1390 | dev->stats.tx_bytes += skb->len; |
1366 | 1391 | ||
1367 | spin_unlock_irqrestore(&txring->lock, flags); | 1392 | spin_unlock_irqrestore(&txring->lock, flags); |
1368 | 1393 | ||
1369 | write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), (nfrags+2) >> 1); | 1394 | write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), (nfrags+2) >> 1); |
1370 | 1395 | ||
1371 | return NETDEV_TX_OK; | 1396 | return NETDEV_TX_OK; |
1372 | 1397 | ||
1373 | out_err: | 1398 | out_err: |
1374 | spin_unlock_irqrestore(&txring->lock, flags); | 1399 | spin_unlock_irqrestore(&txring->lock, flags); |
1375 | out_err_nolock: | 1400 | out_err_nolock: |
1376 | while (nfrags--) | 1401 | while (nfrags--) |
1377 | pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags], | 1402 | pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags], |
1378 | PCI_DMA_TODEVICE); | 1403 | PCI_DMA_TODEVICE); |
1379 | 1404 | ||
1380 | return NETDEV_TX_BUSY; | 1405 | return NETDEV_TX_BUSY; |
1381 | } | 1406 | } |
1382 | 1407 | ||
1383 | static void pasemi_mac_set_rx_mode(struct net_device *dev) | 1408 | static void pasemi_mac_set_rx_mode(struct net_device *dev) |
1384 | { | 1409 | { |
1385 | const struct pasemi_mac *mac = netdev_priv(dev); | 1410 | const struct pasemi_mac *mac = netdev_priv(dev); |
1386 | unsigned int flags; | 1411 | unsigned int flags; |
1387 | 1412 | ||
1388 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); | 1413 | flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG); |
1389 | 1414 | ||
1390 | /* Set promiscuous */ | 1415 | /* Set promiscuous */ |
1391 | if (dev->flags & IFF_PROMISC) | 1416 | if (dev->flags & IFF_PROMISC) |
1392 | flags |= PAS_MAC_CFG_PCFG_PR; | 1417 | flags |= PAS_MAC_CFG_PCFG_PR; |
1393 | else | 1418 | else |
1394 | flags &= ~PAS_MAC_CFG_PCFG_PR; | 1419 | flags &= ~PAS_MAC_CFG_PCFG_PR; |
1395 | 1420 | ||
1396 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); | 1421 | write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags); |
1397 | } | 1422 | } |
1398 | 1423 | ||
1399 | 1424 | ||
1400 | static int pasemi_mac_poll(struct napi_struct *napi, int budget) | 1425 | static int pasemi_mac_poll(struct napi_struct *napi, int budget) |
1401 | { | 1426 | { |
1402 | struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi); | 1427 | struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi); |
1403 | struct net_device *dev = mac->netdev; | 1428 | struct net_device *dev = mac->netdev; |
1404 | int pkts; | 1429 | int pkts; |
1405 | 1430 | ||
1406 | pasemi_mac_clean_tx(tx_ring(mac)); | 1431 | pasemi_mac_clean_tx(tx_ring(mac)); |
1407 | pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); | 1432 | pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); |
1408 | if (pkts < budget) { | 1433 | if (pkts < budget) { |
1409 | /* all done, no more packets present */ | 1434 | /* all done, no more packets present */ |
1410 | netif_rx_complete(dev, napi); | 1435 | netif_rx_complete(dev, napi); |
1411 | 1436 | ||
1412 | pasemi_mac_restart_rx_intr(mac); | 1437 | pasemi_mac_restart_rx_intr(mac); |
1413 | pasemi_mac_restart_tx_intr(mac); | 1438 | pasemi_mac_restart_tx_intr(mac); |
1414 | } | 1439 | } |
1415 | return pkts; | 1440 | return pkts; |
1416 | } | 1441 | } |
1417 | 1442 | ||
1443 | static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu) | ||
1444 | { | ||
1445 | struct pasemi_mac *mac = netdev_priv(dev); | ||
1446 | unsigned int reg; | ||
1447 | unsigned int rcmdsta; | ||
1448 | int running; | ||
1449 | |||
1450 | if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU) | ||
1451 | return -EINVAL; | ||
1452 | |||
1453 | running = netif_running(dev); | ||
1454 | |||
1455 | if (running) { | ||
1456 | /* Need to stop the interface, clean out all already | ||
1457 | * received buffers, free all unused buffers on the RX | ||
1458 | * interface ring, then finally re-fill the rx ring with | ||
1459 | * the new-size buffers and restart. | ||
1460 | */ | ||
1461 | |||
1462 | napi_disable(&mac->napi); | ||
1463 | netif_tx_disable(dev); | ||
1464 | pasemi_mac_intf_disable(mac); | ||
1465 | |||
1466 | rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)); | ||
1467 | pasemi_mac_pause_rxint(mac); | ||
1468 | pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); | ||
1469 | pasemi_mac_free_rx_buffers(mac); | ||
1470 | } | ||
1471 | |||
1472 | /* Change maxf, i.e. what size frames are accepted. | ||
1473 | * Need room for ethernet header and CRC word | ||
1474 | */ | ||
1475 | reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG); | ||
1476 | reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M; | ||
1477 | reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4); | ||
1478 | write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg); | ||
1479 | |||
1480 | dev->mtu = new_mtu; | ||
1481 | /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ | ||
1482 | mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; | ||
1483 | |||
1484 | if (running) { | ||
1485 | write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), | ||
1486 | rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN); | ||
1487 | |||
1488 | rx_ring(mac)->next_to_fill = 0; | ||
1489 | pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1); | ||
1490 | |||
1491 | napi_enable(&mac->napi); | ||
1492 | netif_start_queue(dev); | ||
1493 | pasemi_mac_intf_enable(mac); | ||
1494 | } | ||
1495 | |||
1496 | return 0; | ||
1497 | } | ||
1498 | |||
1418 | static int __devinit | 1499 | static int __devinit |
1419 | pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 1500 | pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
1420 | { | 1501 | { |
1421 | struct net_device *dev; | 1502 | struct net_device *dev; |
1422 | struct pasemi_mac *mac; | 1503 | struct pasemi_mac *mac; |
1423 | int err; | 1504 | int err; |
1424 | DECLARE_MAC_BUF(mac_buf); | 1505 | DECLARE_MAC_BUF(mac_buf); |
1425 | 1506 | ||
1426 | err = pci_enable_device(pdev); | 1507 | err = pci_enable_device(pdev); |
1427 | if (err) | 1508 | if (err) |
1428 | return err; | 1509 | return err; |
1429 | 1510 | ||
1430 | dev = alloc_etherdev(sizeof(struct pasemi_mac)); | 1511 | dev = alloc_etherdev(sizeof(struct pasemi_mac)); |
1431 | if (dev == NULL) { | 1512 | if (dev == NULL) { |
1432 | dev_err(&pdev->dev, | 1513 | dev_err(&pdev->dev, |
1433 | "pasemi_mac: Could not allocate ethernet device.\n"); | 1514 | "pasemi_mac: Could not allocate ethernet device.\n"); |
1434 | err = -ENOMEM; | 1515 | err = -ENOMEM; |
1435 | goto out_disable_device; | 1516 | goto out_disable_device; |
1436 | } | 1517 | } |
1437 | 1518 | ||
1438 | pci_set_drvdata(pdev, dev); | 1519 | pci_set_drvdata(pdev, dev); |
1439 | SET_NETDEV_DEV(dev, &pdev->dev); | 1520 | SET_NETDEV_DEV(dev, &pdev->dev); |
1440 | 1521 | ||
1441 | mac = netdev_priv(dev); | 1522 | mac = netdev_priv(dev); |
1442 | 1523 | ||
1443 | mac->pdev = pdev; | 1524 | mac->pdev = pdev; |
1444 | mac->netdev = dev; | 1525 | mac->netdev = dev; |
1445 | 1526 | ||
1446 | netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64); | 1527 | netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64); |
1447 | 1528 | ||
1448 | dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG | | 1529 | dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG | |
1449 | NETIF_F_HIGHDMA; | 1530 | NETIF_F_HIGHDMA; |
1450 | 1531 | ||
1451 | mac->lro_mgr.max_aggr = LRO_MAX_AGGR; | 1532 | mac->lro_mgr.max_aggr = LRO_MAX_AGGR; |
1452 | mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; | 1533 | mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; |
1453 | mac->lro_mgr.lro_arr = mac->lro_desc; | 1534 | mac->lro_mgr.lro_arr = mac->lro_desc; |
1454 | mac->lro_mgr.get_skb_header = get_skb_hdr; | 1535 | mac->lro_mgr.get_skb_header = get_skb_hdr; |
1455 | mac->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; | 1536 | mac->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; |
1456 | mac->lro_mgr.dev = mac->netdev; | 1537 | mac->lro_mgr.dev = mac->netdev; |
1457 | mac->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; | 1538 | mac->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; |
1458 | mac->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; | 1539 | mac->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; |
1459 | 1540 | ||
1460 | 1541 | ||
1461 | mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); | 1542 | mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); |
1462 | if (!mac->dma_pdev) { | 1543 | if (!mac->dma_pdev) { |
1463 | dev_err(&mac->pdev->dev, "Can't find DMA Controller\n"); | 1544 | dev_err(&mac->pdev->dev, "Can't find DMA Controller\n"); |
1464 | err = -ENODEV; | 1545 | err = -ENODEV; |
1465 | goto out; | 1546 | goto out; |
1466 | } | 1547 | } |
1467 | 1548 | ||
1468 | mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); | 1549 | mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); |
1469 | if (!mac->iob_pdev) { | 1550 | if (!mac->iob_pdev) { |
1470 | dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n"); | 1551 | dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n"); |
1471 | err = -ENODEV; | 1552 | err = -ENODEV; |
1472 | goto out; | 1553 | goto out; |
1473 | } | 1554 | } |
1474 | 1555 | ||
1475 | /* get mac addr from device tree */ | 1556 | /* get mac addr from device tree */ |
1476 | if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) { | 1557 | if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) { |
1477 | err = -ENODEV; | 1558 | err = -ENODEV; |
1478 | goto out; | 1559 | goto out; |
1479 | } | 1560 | } |
1480 | memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr)); | 1561 | memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr)); |
1481 | 1562 | ||
1482 | mac->dma_if = mac_to_intf(mac); | 1563 | mac->dma_if = mac_to_intf(mac); |
1483 | if (mac->dma_if < 0) { | 1564 | if (mac->dma_if < 0) { |
1484 | dev_err(&mac->pdev->dev, "Can't map DMA interface\n"); | 1565 | dev_err(&mac->pdev->dev, "Can't map DMA interface\n"); |
1485 | err = -ENODEV; | 1566 | err = -ENODEV; |
1486 | goto out; | 1567 | goto out; |
1487 | } | 1568 | } |
1488 | 1569 | ||
1489 | switch (pdev->device) { | 1570 | switch (pdev->device) { |
1490 | case 0xa005: | 1571 | case 0xa005: |
1491 | mac->type = MAC_TYPE_GMAC; | 1572 | mac->type = MAC_TYPE_GMAC; |
1492 | break; | 1573 | break; |
1493 | case 0xa006: | 1574 | case 0xa006: |
1494 | mac->type = MAC_TYPE_XAUI; | 1575 | mac->type = MAC_TYPE_XAUI; |
1495 | break; | 1576 | break; |
1496 | default: | 1577 | default: |
1497 | err = -ENODEV; | 1578 | err = -ENODEV; |
1498 | goto out; | 1579 | goto out; |
1499 | } | 1580 | } |
1500 | 1581 | ||
1501 | dev->open = pasemi_mac_open; | 1582 | dev->open = pasemi_mac_open; |
1502 | dev->stop = pasemi_mac_close; | 1583 | dev->stop = pasemi_mac_close; |
1503 | dev->hard_start_xmit = pasemi_mac_start_tx; | 1584 | dev->hard_start_xmit = pasemi_mac_start_tx; |
1504 | dev->set_multicast_list = pasemi_mac_set_rx_mode; | 1585 | dev->set_multicast_list = pasemi_mac_set_rx_mode; |
1505 | dev->set_mac_address = pasemi_mac_set_mac_addr; | 1586 | dev->set_mac_address = pasemi_mac_set_mac_addr; |
1587 | dev->mtu = PE_DEF_MTU; | ||
1588 | /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ | ||
1589 | mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; | ||
1590 | |||
1591 | dev->change_mtu = pasemi_mac_change_mtu; | ||
1506 | 1592 | ||
1507 | if (err) | 1593 | if (err) |
1508 | goto out; | 1594 | goto out; |
1509 | 1595 | ||
1510 | mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); | 1596 | mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); |
1511 | 1597 | ||
1512 | /* Enable most messages by default */ | 1598 | /* Enable most messages by default */ |
1513 | mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | 1599 | mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; |
1514 | 1600 | ||
1515 | err = register_netdev(dev); | 1601 | err = register_netdev(dev); |
1516 | 1602 | ||
1517 | if (err) { | 1603 | if (err) { |
1518 | dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n", | 1604 | dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n", |
1519 | err); | 1605 | err); |
1520 | goto out; | 1606 | goto out; |
1521 | } else if netif_msg_probe(mac) | 1607 | } else if netif_msg_probe(mac) |
1522 | printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %s\n", | 1608 | printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %s\n", |
1523 | dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI", | 1609 | dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI", |
1524 | mac->dma_if, print_mac(mac_buf, dev->dev_addr)); | 1610 | mac->dma_if, print_mac(mac_buf, dev->dev_addr)); |
1525 | 1611 | ||
1526 | return err; | 1612 | return err; |
1527 | 1613 | ||
1528 | out: | 1614 | out: |
1529 | if (mac->iob_pdev) | 1615 | if (mac->iob_pdev) |
1530 | pci_dev_put(mac->iob_pdev); | 1616 | pci_dev_put(mac->iob_pdev); |
1531 | if (mac->dma_pdev) | 1617 | if (mac->dma_pdev) |
1532 | pci_dev_put(mac->dma_pdev); | 1618 | pci_dev_put(mac->dma_pdev); |
1533 | 1619 | ||
1534 | free_netdev(dev); | 1620 | free_netdev(dev); |
drivers/net/pasemi_mac.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2006 PA Semi, Inc | 2 | * Copyright (C) 2006 PA Semi, Inc |
3 | * | 3 | * |
4 | * Driver for the PA6T-1682M onchip 1G/10G Ethernet MACs, soft state and | 4 | * Driver for the PA6T-1682M onchip 1G/10G Ethernet MACs, soft state and |
5 | * hardware register layouts. | 5 | * hardware register layouts. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #ifndef PASEMI_MAC_H | 21 | #ifndef PASEMI_MAC_H |
22 | #define PASEMI_MAC_H | 22 | #define PASEMI_MAC_H |
23 | 23 | ||
24 | #include <linux/ethtool.h> | 24 | #include <linux/ethtool.h> |
25 | #include <linux/netdevice.h> | 25 | #include <linux/netdevice.h> |
26 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
27 | #include <linux/phy.h> | 27 | #include <linux/phy.h> |
28 | 28 | ||
29 | #define MAX_LRO_DESCRIPTORS 8 | 29 | #define MAX_LRO_DESCRIPTORS 8 |
30 | 30 | ||
31 | struct pasemi_mac_txring { | 31 | struct pasemi_mac_txring { |
32 | struct pasemi_dmachan chan; /* Must be first */ | 32 | struct pasemi_dmachan chan; /* Must be first */ |
33 | spinlock_t lock; | 33 | spinlock_t lock; |
34 | unsigned int size; | 34 | unsigned int size; |
35 | unsigned int next_to_fill; | 35 | unsigned int next_to_fill; |
36 | unsigned int next_to_clean; | 36 | unsigned int next_to_clean; |
37 | struct pasemi_mac_buffer *ring_info; | 37 | struct pasemi_mac_buffer *ring_info; |
38 | struct pasemi_mac *mac; /* Needed in intr handler */ | 38 | struct pasemi_mac *mac; /* Needed in intr handler */ |
39 | struct timer_list clean_timer; | 39 | struct timer_list clean_timer; |
40 | }; | 40 | }; |
41 | 41 | ||
42 | struct pasemi_mac_rxring { | 42 | struct pasemi_mac_rxring { |
43 | struct pasemi_dmachan chan; /* Must be first */ | 43 | struct pasemi_dmachan chan; /* Must be first */ |
44 | spinlock_t lock; | 44 | spinlock_t lock; |
45 | u64 *buffers; /* RX interface buffer ring */ | 45 | u64 *buffers; /* RX interface buffer ring */ |
46 | dma_addr_t buf_dma; | 46 | dma_addr_t buf_dma; |
47 | unsigned int size; | 47 | unsigned int size; |
48 | unsigned int next_to_fill; | 48 | unsigned int next_to_fill; |
49 | unsigned int next_to_clean; | 49 | unsigned int next_to_clean; |
50 | struct pasemi_mac_buffer *ring_info; | 50 | struct pasemi_mac_buffer *ring_info; |
51 | struct pasemi_mac *mac; /* Needed in intr handler */ | 51 | struct pasemi_mac *mac; /* Needed in intr handler */ |
52 | }; | 52 | }; |
53 | 53 | ||
54 | struct pasemi_mac { | 54 | struct pasemi_mac { |
55 | struct net_device *netdev; | 55 | struct net_device *netdev; |
56 | struct pci_dev *pdev; | 56 | struct pci_dev *pdev; |
57 | struct pci_dev *dma_pdev; | 57 | struct pci_dev *dma_pdev; |
58 | struct pci_dev *iob_pdev; | 58 | struct pci_dev *iob_pdev; |
59 | struct phy_device *phydev; | 59 | struct phy_device *phydev; |
60 | struct napi_struct napi; | 60 | struct napi_struct napi; |
61 | 61 | ||
62 | int bufsz; /* RX ring buffer size */ | ||
62 | u8 type; | 63 | u8 type; |
63 | #define MAC_TYPE_GMAC 1 | 64 | #define MAC_TYPE_GMAC 1 |
64 | #define MAC_TYPE_XAUI 2 | 65 | #define MAC_TYPE_XAUI 2 |
65 | u32 dma_if; | 66 | u32 dma_if; |
66 | 67 | ||
67 | u8 mac_addr[6]; | 68 | u8 mac_addr[6]; |
68 | 69 | ||
69 | struct net_lro_mgr lro_mgr; | 70 | struct net_lro_mgr lro_mgr; |
70 | struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; | 71 | struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; |
71 | struct timer_list rxtimer; | 72 | struct timer_list rxtimer; |
72 | unsigned int lro_max_aggr; | 73 | unsigned int lro_max_aggr; |
73 | 74 | ||
74 | struct pasemi_mac_txring *tx; | 75 | struct pasemi_mac_txring *tx; |
75 | struct pasemi_mac_rxring *rx; | 76 | struct pasemi_mac_rxring *rx; |
76 | char tx_irq_name[10]; /* "eth%d tx" */ | 77 | char tx_irq_name[10]; /* "eth%d tx" */ |
77 | char rx_irq_name[10]; /* "eth%d rx" */ | 78 | char rx_irq_name[10]; /* "eth%d rx" */ |
78 | int link; | 79 | int link; |
79 | int speed; | 80 | int speed; |
80 | int duplex; | 81 | int duplex; |
81 | 82 | ||
82 | unsigned int msg_enable; | 83 | unsigned int msg_enable; |
83 | char phy_id[BUS_ID_SIZE]; | 84 | char phy_id[BUS_ID_SIZE]; |
84 | }; | 85 | }; |
85 | 86 | ||
86 | /* Software status descriptor (ring_info) */ | 87 | /* Software status descriptor (ring_info) */ |
87 | struct pasemi_mac_buffer { | 88 | struct pasemi_mac_buffer { |
88 | struct sk_buff *skb; | 89 | struct sk_buff *skb; |
89 | dma_addr_t dma; | 90 | dma_addr_t dma; |
90 | }; | 91 | }; |
91 | 92 | ||
92 | 93 | ||
93 | /* PCI register offsets and formats */ | 94 | /* PCI register offsets and formats */ |
94 | 95 | ||
95 | 96 | ||
96 | /* MAC CFG register offsets */ | 97 | /* MAC CFG register offsets */ |
97 | enum { | 98 | enum { |
98 | PAS_MAC_CFG_PCFG = 0x80, | 99 | PAS_MAC_CFG_PCFG = 0x80, |
100 | PAS_MAC_CFG_MACCFG = 0x84, | ||
99 | PAS_MAC_CFG_ADR0 = 0x8c, | 101 | PAS_MAC_CFG_ADR0 = 0x8c, |
100 | PAS_MAC_CFG_ADR1 = 0x90, | 102 | PAS_MAC_CFG_ADR1 = 0x90, |
101 | PAS_MAC_CFG_TXP = 0x98, | 103 | PAS_MAC_CFG_TXP = 0x98, |
102 | PAS_MAC_IPC_CHNL = 0x208, | 104 | PAS_MAC_IPC_CHNL = 0x208, |
103 | }; | 105 | }; |
104 | 106 | ||
105 | /* MAC CFG register fields */ | 107 | /* MAC CFG register fields */ |
106 | #define PAS_MAC_CFG_PCFG_PE 0x80000000 | 108 | #define PAS_MAC_CFG_PCFG_PE 0x80000000 |
107 | #define PAS_MAC_CFG_PCFG_CE 0x40000000 | 109 | #define PAS_MAC_CFG_PCFG_CE 0x40000000 |
108 | #define PAS_MAC_CFG_PCFG_BU 0x20000000 | 110 | #define PAS_MAC_CFG_PCFG_BU 0x20000000 |
109 | #define PAS_MAC_CFG_PCFG_TT 0x10000000 | 111 | #define PAS_MAC_CFG_PCFG_TT 0x10000000 |
110 | #define PAS_MAC_CFG_PCFG_TSR_M 0x0c000000 | 112 | #define PAS_MAC_CFG_PCFG_TSR_M 0x0c000000 |
111 | #define PAS_MAC_CFG_PCFG_TSR_10M 0x00000000 | 113 | #define PAS_MAC_CFG_PCFG_TSR_10M 0x00000000 |
112 | #define PAS_MAC_CFG_PCFG_TSR_100M 0x04000000 | 114 | #define PAS_MAC_CFG_PCFG_TSR_100M 0x04000000 |
113 | #define PAS_MAC_CFG_PCFG_TSR_1G 0x08000000 | 115 | #define PAS_MAC_CFG_PCFG_TSR_1G 0x08000000 |
114 | #define PAS_MAC_CFG_PCFG_TSR_10G 0x0c000000 | 116 | #define PAS_MAC_CFG_PCFG_TSR_10G 0x0c000000 |
115 | #define PAS_MAC_CFG_PCFG_T24 0x02000000 | 117 | #define PAS_MAC_CFG_PCFG_T24 0x02000000 |
116 | #define PAS_MAC_CFG_PCFG_PR 0x01000000 | 118 | #define PAS_MAC_CFG_PCFG_PR 0x01000000 |
117 | #define PAS_MAC_CFG_PCFG_CRO_M 0x00ff0000 | 119 | #define PAS_MAC_CFG_PCFG_CRO_M 0x00ff0000 |
118 | #define PAS_MAC_CFG_PCFG_CRO_S 16 | 120 | #define PAS_MAC_CFG_PCFG_CRO_S 16 |
119 | #define PAS_MAC_CFG_PCFG_IPO_M 0x0000ff00 | 121 | #define PAS_MAC_CFG_PCFG_IPO_M 0x0000ff00 |
120 | #define PAS_MAC_CFG_PCFG_IPO_S 8 | 122 | #define PAS_MAC_CFG_PCFG_IPO_S 8 |
121 | #define PAS_MAC_CFG_PCFG_S1 0x00000080 | 123 | #define PAS_MAC_CFG_PCFG_S1 0x00000080 |
122 | #define PAS_MAC_CFG_PCFG_IO_M 0x00000060 | 124 | #define PAS_MAC_CFG_PCFG_IO_M 0x00000060 |
123 | #define PAS_MAC_CFG_PCFG_IO_MAC 0x00000000 | 125 | #define PAS_MAC_CFG_PCFG_IO_MAC 0x00000000 |
124 | #define PAS_MAC_CFG_PCFG_IO_OFF 0x00000020 | 126 | #define PAS_MAC_CFG_PCFG_IO_OFF 0x00000020 |
125 | #define PAS_MAC_CFG_PCFG_IO_IND_ETH 0x00000040 | 127 | #define PAS_MAC_CFG_PCFG_IO_IND_ETH 0x00000040 |
126 | #define PAS_MAC_CFG_PCFG_IO_IND_IP 0x00000060 | 128 | #define PAS_MAC_CFG_PCFG_IO_IND_IP 0x00000060 |
127 | #define PAS_MAC_CFG_PCFG_LP 0x00000010 | 129 | #define PAS_MAC_CFG_PCFG_LP 0x00000010 |
128 | #define PAS_MAC_CFG_PCFG_TS 0x00000008 | 130 | #define PAS_MAC_CFG_PCFG_TS 0x00000008 |
129 | #define PAS_MAC_CFG_PCFG_HD 0x00000004 | 131 | #define PAS_MAC_CFG_PCFG_HD 0x00000004 |
130 | #define PAS_MAC_CFG_PCFG_SPD_M 0x00000003 | 132 | #define PAS_MAC_CFG_PCFG_SPD_M 0x00000003 |
131 | #define PAS_MAC_CFG_PCFG_SPD_10M 0x00000000 | 133 | #define PAS_MAC_CFG_PCFG_SPD_10M 0x00000000 |
132 | #define PAS_MAC_CFG_PCFG_SPD_100M 0x00000001 | 134 | #define PAS_MAC_CFG_PCFG_SPD_100M 0x00000001 |
133 | #define PAS_MAC_CFG_PCFG_SPD_1G 0x00000002 | 135 | #define PAS_MAC_CFG_PCFG_SPD_1G 0x00000002 |
134 | #define PAS_MAC_CFG_PCFG_SPD_10G 0x00000003 | 136 | #define PAS_MAC_CFG_PCFG_SPD_10G 0x00000003 |
137 | |||
138 | #define PAS_MAC_CFG_MACCFG_TXT_M 0x70000000 | ||
139 | #define PAS_MAC_CFG_MACCFG_TXT_S 28 | ||
140 | #define PAS_MAC_CFG_MACCFG_PRES_M 0x0f000000 | ||
141 | #define PAS_MAC_CFG_MACCFG_PRES_S 24 | ||
142 | #define PAS_MAC_CFG_MACCFG_MAXF_M 0x00ffff00 | ||
143 | #define PAS_MAC_CFG_MACCFG_MAXF_S 8 | ||
144 | #define PAS_MAC_CFG_MACCFG_MAXF(x) (((x) << PAS_MAC_CFG_MACCFG_MAXF_S) & \ | ||
145 | PAS_MAC_CFG_MACCFG_MAXF_M) | ||
146 | #define PAS_MAC_CFG_MACCFG_MINF_M 0x000000ff | ||
147 | #define PAS_MAC_CFG_MACCFG_MINF_S 0 | ||
148 | |||
135 | #define PAS_MAC_CFG_TXP_FCF 0x01000000 | 149 | #define PAS_MAC_CFG_TXP_FCF 0x01000000 |
136 | #define PAS_MAC_CFG_TXP_FCE 0x00800000 | 150 | #define PAS_MAC_CFG_TXP_FCE 0x00800000 |
137 | #define PAS_MAC_CFG_TXP_FC 0x00400000 | 151 | #define PAS_MAC_CFG_TXP_FC 0x00400000 |
138 | #define PAS_MAC_CFG_TXP_FPC_M 0x00300000 | 152 | #define PAS_MAC_CFG_TXP_FPC_M 0x00300000 |
139 | #define PAS_MAC_CFG_TXP_FPC_S 20 | 153 | #define PAS_MAC_CFG_TXP_FPC_S 20 |
140 | #define PAS_MAC_CFG_TXP_FPC(x) (((x) << PAS_MAC_CFG_TXP_FPC_S) & \ | 154 | #define PAS_MAC_CFG_TXP_FPC(x) (((x) << PAS_MAC_CFG_TXP_FPC_S) & \ |
141 | PAS_MAC_CFG_TXP_FPC_M) | 155 | PAS_MAC_CFG_TXP_FPC_M) |
142 | #define PAS_MAC_CFG_TXP_RT 0x00080000 | 156 | #define PAS_MAC_CFG_TXP_RT 0x00080000 |
143 | #define PAS_MAC_CFG_TXP_BL 0x00040000 | 157 | #define PAS_MAC_CFG_TXP_BL 0x00040000 |
144 | #define PAS_MAC_CFG_TXP_SL_M 0x00030000 | 158 | #define PAS_MAC_CFG_TXP_SL_M 0x00030000 |
145 | #define PAS_MAC_CFG_TXP_SL_S 16 | 159 | #define PAS_MAC_CFG_TXP_SL_S 16 |
146 | #define PAS_MAC_CFG_TXP_SL(x) (((x) << PAS_MAC_CFG_TXP_SL_S) & \ | 160 | #define PAS_MAC_CFG_TXP_SL(x) (((x) << PAS_MAC_CFG_TXP_SL_S) & \ |
147 | PAS_MAC_CFG_TXP_SL_M) | 161 | PAS_MAC_CFG_TXP_SL_M) |
148 | #define PAS_MAC_CFG_TXP_COB_M 0x0000f000 | 162 | #define PAS_MAC_CFG_TXP_COB_M 0x0000f000 |
149 | #define PAS_MAC_CFG_TXP_COB_S 12 | 163 | #define PAS_MAC_CFG_TXP_COB_S 12 |
150 | #define PAS_MAC_CFG_TXP_COB(x) (((x) << PAS_MAC_CFG_TXP_COB_S) & \ | 164 | #define PAS_MAC_CFG_TXP_COB(x) (((x) << PAS_MAC_CFG_TXP_COB_S) & \ |
151 | PAS_MAC_CFG_TXP_COB_M) | 165 | PAS_MAC_CFG_TXP_COB_M) |
152 | #define PAS_MAC_CFG_TXP_TIFT_M 0x00000f00 | 166 | #define PAS_MAC_CFG_TXP_TIFT_M 0x00000f00 |
153 | #define PAS_MAC_CFG_TXP_TIFT_S 8 | 167 | #define PAS_MAC_CFG_TXP_TIFT_S 8 |
154 | #define PAS_MAC_CFG_TXP_TIFT(x) (((x) << PAS_MAC_CFG_TXP_TIFT_S) & \ | 168 | #define PAS_MAC_CFG_TXP_TIFT(x) (((x) << PAS_MAC_CFG_TXP_TIFT_S) & \ |
155 | PAS_MAC_CFG_TXP_TIFT_M) | 169 | PAS_MAC_CFG_TXP_TIFT_M) |
156 | #define PAS_MAC_CFG_TXP_TIFG_M 0x000000ff | 170 | #define PAS_MAC_CFG_TXP_TIFG_M 0x000000ff |
157 | #define PAS_MAC_CFG_TXP_TIFG_S 0 | 171 | #define PAS_MAC_CFG_TXP_TIFG_S 0 |
158 | #define PAS_MAC_CFG_TXP_TIFG(x) (((x) << PAS_MAC_CFG_TXP_TIFG_S) & \ | 172 | #define PAS_MAC_CFG_TXP_TIFG(x) (((x) << PAS_MAC_CFG_TXP_TIFG_S) & \ |
159 | PAS_MAC_CFG_TXP_TIFG_M) | 173 | PAS_MAC_CFG_TXP_TIFG_M) |
160 | 174 | ||
161 | #define PAS_MAC_IPC_CHNL_DCHNO_M 0x003f0000 | 175 | #define PAS_MAC_IPC_CHNL_DCHNO_M 0x003f0000 |
162 | #define PAS_MAC_IPC_CHNL_DCHNO_S 16 | 176 | #define PAS_MAC_IPC_CHNL_DCHNO_S 16 |
163 | #define PAS_MAC_IPC_CHNL_DCHNO(x) (((x) << PAS_MAC_IPC_CHNL_DCHNO_S) & \ | 177 | #define PAS_MAC_IPC_CHNL_DCHNO(x) (((x) << PAS_MAC_IPC_CHNL_DCHNO_S) & \ |
164 | PAS_MAC_IPC_CHNL_DCHNO_M) | 178 | PAS_MAC_IPC_CHNL_DCHNO_M) |
165 | #define PAS_MAC_IPC_CHNL_BCH_M 0x0000003f | 179 | #define PAS_MAC_IPC_CHNL_BCH_M 0x0000003f |
166 | #define PAS_MAC_IPC_CHNL_BCH_S 0 | 180 | #define PAS_MAC_IPC_CHNL_BCH_S 0 |
167 | #define PAS_MAC_IPC_CHNL_BCH(x) (((x) << PAS_MAC_IPC_CHNL_BCH_S) & \ | 181 | #define PAS_MAC_IPC_CHNL_BCH(x) (((x) << PAS_MAC_IPC_CHNL_BCH_S) & \ |
168 | PAS_MAC_IPC_CHNL_BCH_M) | 182 | PAS_MAC_IPC_CHNL_BCH_M) |
169 | 183 | ||
170 | #endif /* PASEMI_MAC_H */ | 184 | #endif /* PASEMI_MAC_H */ |
171 | 185 |