Commit 39eddb4c3970e9aadbc87b8a7cab7b4fefff077f
Committed by
David S. Miller
1 parent
24e94de41e
Exists in
master
and in
7 other branches
macb: avoid lockup when TGO during underrun
In rare cases when an underrun occur, all macb buffers where consumed and the netif_queue was stopped infinitely. This happens then the TGO (transfer ongoing) bit in the TSR is set (and UND). It seems like clening up after the underrun makes the driver and the macb hardware end up in an inconsistent state. The result of this is that in the following calls to macb_tx no TX buffers are released -> the netif_queue was stopped, and never woken up again. The solution is to disable the transmitter, if TGO is set, before clening up after the underrun, and re-enable the transmitter when the cleaning up is done. Signed-off-by: Richard Röjfors <richard.rojfors@endian.se> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 1 changed file with 8 additions and 0 deletions Inline Diff
drivers/net/macb.c
1 | /* | 1 | /* |
2 | * Atmel MACB Ethernet Controller driver | 2 | * Atmel MACB Ethernet Controller driver |
3 | * | 3 | * |
4 | * Copyright (C) 2004-2006 Atmel Corporation | 4 | * Copyright (C) 2004-2006 Atmel Corporation |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/clk.h> | 11 | #include <linux/clk.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/moduleparam.h> | 13 | #include <linux/moduleparam.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/netdevice.h> | 18 | #include <linux/netdevice.h> |
19 | #include <linux/etherdevice.h> | 19 | #include <linux/etherdevice.h> |
20 | #include <linux/dma-mapping.h> | 20 | #include <linux/dma-mapping.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/phy.h> | 22 | #include <linux/phy.h> |
23 | 23 | ||
24 | #include <mach/board.h> | 24 | #include <mach/board.h> |
25 | #include <mach/cpu.h> | 25 | #include <mach/cpu.h> |
26 | 26 | ||
27 | #include "macb.h" | 27 | #include "macb.h" |
28 | 28 | ||
29 | #define RX_BUFFER_SIZE 128 | 29 | #define RX_BUFFER_SIZE 128 |
30 | #define RX_RING_SIZE 512 | 30 | #define RX_RING_SIZE 512 |
31 | #define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE) | 31 | #define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE) |
32 | 32 | ||
33 | /* Make the IP header word-aligned (the ethernet header is 14 bytes) */ | 33 | /* Make the IP header word-aligned (the ethernet header is 14 bytes) */ |
34 | #define RX_OFFSET 2 | 34 | #define RX_OFFSET 2 |
35 | 35 | ||
36 | #define TX_RING_SIZE 128 | 36 | #define TX_RING_SIZE 128 |
37 | #define DEF_TX_RING_PENDING (TX_RING_SIZE - 1) | 37 | #define DEF_TX_RING_PENDING (TX_RING_SIZE - 1) |
38 | #define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE) | 38 | #define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE) |
39 | 39 | ||
40 | #define TX_RING_GAP(bp) \ | 40 | #define TX_RING_GAP(bp) \ |
41 | (TX_RING_SIZE - (bp)->tx_pending) | 41 | (TX_RING_SIZE - (bp)->tx_pending) |
42 | #define TX_BUFFS_AVAIL(bp) \ | 42 | #define TX_BUFFS_AVAIL(bp) \ |
43 | (((bp)->tx_tail <= (bp)->tx_head) ? \ | 43 | (((bp)->tx_tail <= (bp)->tx_head) ? \ |
44 | (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \ | 44 | (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \ |
45 | (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp)) | 45 | (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp)) |
46 | #define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1)) | 46 | #define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1)) |
47 | 47 | ||
48 | #define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1)) | 48 | #define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1)) |
49 | 49 | ||
50 | /* minimum number of free TX descriptors before waking up TX process */ | 50 | /* minimum number of free TX descriptors before waking up TX process */ |
51 | #define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4) | 51 | #define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4) |
52 | 52 | ||
53 | #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ | 53 | #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ |
54 | | MACB_BIT(ISR_ROVR)) | 54 | | MACB_BIT(ISR_ROVR)) |
55 | 55 | ||
56 | static void __macb_set_hwaddr(struct macb *bp) | 56 | static void __macb_set_hwaddr(struct macb *bp) |
57 | { | 57 | { |
58 | u32 bottom; | 58 | u32 bottom; |
59 | u16 top; | 59 | u16 top; |
60 | 60 | ||
61 | bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); | 61 | bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); |
62 | macb_writel(bp, SA1B, bottom); | 62 | macb_writel(bp, SA1B, bottom); |
63 | top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); | 63 | top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); |
64 | macb_writel(bp, SA1T, top); | 64 | macb_writel(bp, SA1T, top); |
65 | } | 65 | } |
66 | 66 | ||
67 | static void __init macb_get_hwaddr(struct macb *bp) | 67 | static void __init macb_get_hwaddr(struct macb *bp) |
68 | { | 68 | { |
69 | u32 bottom; | 69 | u32 bottom; |
70 | u16 top; | 70 | u16 top; |
71 | u8 addr[6]; | 71 | u8 addr[6]; |
72 | 72 | ||
73 | bottom = macb_readl(bp, SA1B); | 73 | bottom = macb_readl(bp, SA1B); |
74 | top = macb_readl(bp, SA1T); | 74 | top = macb_readl(bp, SA1T); |
75 | 75 | ||
76 | addr[0] = bottom & 0xff; | 76 | addr[0] = bottom & 0xff; |
77 | addr[1] = (bottom >> 8) & 0xff; | 77 | addr[1] = (bottom >> 8) & 0xff; |
78 | addr[2] = (bottom >> 16) & 0xff; | 78 | addr[2] = (bottom >> 16) & 0xff; |
79 | addr[3] = (bottom >> 24) & 0xff; | 79 | addr[3] = (bottom >> 24) & 0xff; |
80 | addr[4] = top & 0xff; | 80 | addr[4] = top & 0xff; |
81 | addr[5] = (top >> 8) & 0xff; | 81 | addr[5] = (top >> 8) & 0xff; |
82 | 82 | ||
83 | if (is_valid_ether_addr(addr)) { | 83 | if (is_valid_ether_addr(addr)) { |
84 | memcpy(bp->dev->dev_addr, addr, sizeof(addr)); | 84 | memcpy(bp->dev->dev_addr, addr, sizeof(addr)); |
85 | } else { | 85 | } else { |
86 | dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); | 86 | dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); |
87 | random_ether_addr(bp->dev->dev_addr); | 87 | random_ether_addr(bp->dev->dev_addr); |
88 | } | 88 | } |
89 | } | 89 | } |
90 | 90 | ||
91 | static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | 91 | static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) |
92 | { | 92 | { |
93 | struct macb *bp = bus->priv; | 93 | struct macb *bp = bus->priv; |
94 | int value; | 94 | int value; |
95 | 95 | ||
96 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | 96 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) |
97 | | MACB_BF(RW, MACB_MAN_READ) | 97 | | MACB_BF(RW, MACB_MAN_READ) |
98 | | MACB_BF(PHYA, mii_id) | 98 | | MACB_BF(PHYA, mii_id) |
99 | | MACB_BF(REGA, regnum) | 99 | | MACB_BF(REGA, regnum) |
100 | | MACB_BF(CODE, MACB_MAN_CODE))); | 100 | | MACB_BF(CODE, MACB_MAN_CODE))); |
101 | 101 | ||
102 | /* wait for end of transfer */ | 102 | /* wait for end of transfer */ |
103 | while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) | 103 | while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) |
104 | cpu_relax(); | 104 | cpu_relax(); |
105 | 105 | ||
106 | value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); | 106 | value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); |
107 | 107 | ||
108 | return value; | 108 | return value; |
109 | } | 109 | } |
110 | 110 | ||
111 | static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | 111 | static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, |
112 | u16 value) | 112 | u16 value) |
113 | { | 113 | { |
114 | struct macb *bp = bus->priv; | 114 | struct macb *bp = bus->priv; |
115 | 115 | ||
116 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | 116 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) |
117 | | MACB_BF(RW, MACB_MAN_WRITE) | 117 | | MACB_BF(RW, MACB_MAN_WRITE) |
118 | | MACB_BF(PHYA, mii_id) | 118 | | MACB_BF(PHYA, mii_id) |
119 | | MACB_BF(REGA, regnum) | 119 | | MACB_BF(REGA, regnum) |
120 | | MACB_BF(CODE, MACB_MAN_CODE) | 120 | | MACB_BF(CODE, MACB_MAN_CODE) |
121 | | MACB_BF(DATA, value))); | 121 | | MACB_BF(DATA, value))); |
122 | 122 | ||
123 | /* wait for end of transfer */ | 123 | /* wait for end of transfer */ |
124 | while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) | 124 | while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) |
125 | cpu_relax(); | 125 | cpu_relax(); |
126 | 126 | ||
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int macb_mdio_reset(struct mii_bus *bus) | 130 | static int macb_mdio_reset(struct mii_bus *bus) |
131 | { | 131 | { |
132 | return 0; | 132 | return 0; |
133 | } | 133 | } |
134 | 134 | ||
135 | static void macb_handle_link_change(struct net_device *dev) | 135 | static void macb_handle_link_change(struct net_device *dev) |
136 | { | 136 | { |
137 | struct macb *bp = netdev_priv(dev); | 137 | struct macb *bp = netdev_priv(dev); |
138 | struct phy_device *phydev = bp->phy_dev; | 138 | struct phy_device *phydev = bp->phy_dev; |
139 | unsigned long flags; | 139 | unsigned long flags; |
140 | 140 | ||
141 | int status_change = 0; | 141 | int status_change = 0; |
142 | 142 | ||
143 | spin_lock_irqsave(&bp->lock, flags); | 143 | spin_lock_irqsave(&bp->lock, flags); |
144 | 144 | ||
145 | if (phydev->link) { | 145 | if (phydev->link) { |
146 | if ((bp->speed != phydev->speed) || | 146 | if ((bp->speed != phydev->speed) || |
147 | (bp->duplex != phydev->duplex)) { | 147 | (bp->duplex != phydev->duplex)) { |
148 | u32 reg; | 148 | u32 reg; |
149 | 149 | ||
150 | reg = macb_readl(bp, NCFGR); | 150 | reg = macb_readl(bp, NCFGR); |
151 | reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); | 151 | reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); |
152 | 152 | ||
153 | if (phydev->duplex) | 153 | if (phydev->duplex) |
154 | reg |= MACB_BIT(FD); | 154 | reg |= MACB_BIT(FD); |
155 | if (phydev->speed == SPEED_100) | 155 | if (phydev->speed == SPEED_100) |
156 | reg |= MACB_BIT(SPD); | 156 | reg |= MACB_BIT(SPD); |
157 | 157 | ||
158 | macb_writel(bp, NCFGR, reg); | 158 | macb_writel(bp, NCFGR, reg); |
159 | 159 | ||
160 | bp->speed = phydev->speed; | 160 | bp->speed = phydev->speed; |
161 | bp->duplex = phydev->duplex; | 161 | bp->duplex = phydev->duplex; |
162 | status_change = 1; | 162 | status_change = 1; |
163 | } | 163 | } |
164 | } | 164 | } |
165 | 165 | ||
166 | if (phydev->link != bp->link) { | 166 | if (phydev->link != bp->link) { |
167 | if (!phydev->link) { | 167 | if (!phydev->link) { |
168 | bp->speed = 0; | 168 | bp->speed = 0; |
169 | bp->duplex = -1; | 169 | bp->duplex = -1; |
170 | } | 170 | } |
171 | bp->link = phydev->link; | 171 | bp->link = phydev->link; |
172 | 172 | ||
173 | status_change = 1; | 173 | status_change = 1; |
174 | } | 174 | } |
175 | 175 | ||
176 | spin_unlock_irqrestore(&bp->lock, flags); | 176 | spin_unlock_irqrestore(&bp->lock, flags); |
177 | 177 | ||
178 | if (status_change) { | 178 | if (status_change) { |
179 | if (phydev->link) | 179 | if (phydev->link) |
180 | printk(KERN_INFO "%s: link up (%d/%s)\n", | 180 | printk(KERN_INFO "%s: link up (%d/%s)\n", |
181 | dev->name, phydev->speed, | 181 | dev->name, phydev->speed, |
182 | DUPLEX_FULL == phydev->duplex ? "Full":"Half"); | 182 | DUPLEX_FULL == phydev->duplex ? "Full":"Half"); |
183 | else | 183 | else |
184 | printk(KERN_INFO "%s: link down\n", dev->name); | 184 | printk(KERN_INFO "%s: link down\n", dev->name); |
185 | } | 185 | } |
186 | } | 186 | } |
187 | 187 | ||
188 | /* based on au1000_eth. c*/ | 188 | /* based on au1000_eth. c*/ |
189 | static int macb_mii_probe(struct net_device *dev) | 189 | static int macb_mii_probe(struct net_device *dev) |
190 | { | 190 | { |
191 | struct macb *bp = netdev_priv(dev); | 191 | struct macb *bp = netdev_priv(dev); |
192 | struct phy_device *phydev = NULL; | 192 | struct phy_device *phydev = NULL; |
193 | struct eth_platform_data *pdata; | 193 | struct eth_platform_data *pdata; |
194 | int phy_addr; | 194 | int phy_addr; |
195 | 195 | ||
196 | /* find the first phy */ | 196 | /* find the first phy */ |
197 | for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { | 197 | for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { |
198 | if (bp->mii_bus->phy_map[phy_addr]) { | 198 | if (bp->mii_bus->phy_map[phy_addr]) { |
199 | phydev = bp->mii_bus->phy_map[phy_addr]; | 199 | phydev = bp->mii_bus->phy_map[phy_addr]; |
200 | break; | 200 | break; |
201 | } | 201 | } |
202 | } | 202 | } |
203 | 203 | ||
204 | if (!phydev) { | 204 | if (!phydev) { |
205 | printk (KERN_ERR "%s: no PHY found\n", dev->name); | 205 | printk (KERN_ERR "%s: no PHY found\n", dev->name); |
206 | return -1; | 206 | return -1; |
207 | } | 207 | } |
208 | 208 | ||
209 | pdata = bp->pdev->dev.platform_data; | 209 | pdata = bp->pdev->dev.platform_data; |
210 | /* TODO : add pin_irq */ | 210 | /* TODO : add pin_irq */ |
211 | 211 | ||
212 | /* attach the mac to the phy */ | 212 | /* attach the mac to the phy */ |
213 | if (pdata && pdata->is_rmii) { | 213 | if (pdata && pdata->is_rmii) { |
214 | phydev = phy_connect(dev, phydev->dev.bus_id, | 214 | phydev = phy_connect(dev, phydev->dev.bus_id, |
215 | &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII); | 215 | &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII); |
216 | } else { | 216 | } else { |
217 | phydev = phy_connect(dev, phydev->dev.bus_id, | 217 | phydev = phy_connect(dev, phydev->dev.bus_id, |
218 | &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII); | 218 | &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII); |
219 | } | 219 | } |
220 | 220 | ||
221 | if (IS_ERR(phydev)) { | 221 | if (IS_ERR(phydev)) { |
222 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); | 222 | printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); |
223 | return PTR_ERR(phydev); | 223 | return PTR_ERR(phydev); |
224 | } | 224 | } |
225 | 225 | ||
226 | /* mask with MAC supported features */ | 226 | /* mask with MAC supported features */ |
227 | phydev->supported &= PHY_BASIC_FEATURES; | 227 | phydev->supported &= PHY_BASIC_FEATURES; |
228 | 228 | ||
229 | phydev->advertising = phydev->supported; | 229 | phydev->advertising = phydev->supported; |
230 | 230 | ||
231 | bp->link = 0; | 231 | bp->link = 0; |
232 | bp->speed = 0; | 232 | bp->speed = 0; |
233 | bp->duplex = -1; | 233 | bp->duplex = -1; |
234 | bp->phy_dev = phydev; | 234 | bp->phy_dev = phydev; |
235 | 235 | ||
236 | return 0; | 236 | return 0; |
237 | } | 237 | } |
238 | 238 | ||
239 | static int macb_mii_init(struct macb *bp) | 239 | static int macb_mii_init(struct macb *bp) |
240 | { | 240 | { |
241 | struct eth_platform_data *pdata; | 241 | struct eth_platform_data *pdata; |
242 | int err = -ENXIO, i; | 242 | int err = -ENXIO, i; |
243 | 243 | ||
244 | /* Enable managment port */ | 244 | /* Enable managment port */ |
245 | macb_writel(bp, NCR, MACB_BIT(MPE)); | 245 | macb_writel(bp, NCR, MACB_BIT(MPE)); |
246 | 246 | ||
247 | bp->mii_bus = mdiobus_alloc(); | 247 | bp->mii_bus = mdiobus_alloc(); |
248 | if (bp->mii_bus == NULL) { | 248 | if (bp->mii_bus == NULL) { |
249 | err = -ENOMEM; | 249 | err = -ENOMEM; |
250 | goto err_out; | 250 | goto err_out; |
251 | } | 251 | } |
252 | 252 | ||
253 | bp->mii_bus->name = "MACB_mii_bus"; | 253 | bp->mii_bus->name = "MACB_mii_bus"; |
254 | bp->mii_bus->read = &macb_mdio_read; | 254 | bp->mii_bus->read = &macb_mdio_read; |
255 | bp->mii_bus->write = &macb_mdio_write; | 255 | bp->mii_bus->write = &macb_mdio_write; |
256 | bp->mii_bus->reset = &macb_mdio_reset; | 256 | bp->mii_bus->reset = &macb_mdio_reset; |
257 | snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", bp->pdev->id); | 257 | snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", bp->pdev->id); |
258 | bp->mii_bus->priv = bp; | 258 | bp->mii_bus->priv = bp; |
259 | bp->mii_bus->parent = &bp->dev->dev; | 259 | bp->mii_bus->parent = &bp->dev->dev; |
260 | pdata = bp->pdev->dev.platform_data; | 260 | pdata = bp->pdev->dev.platform_data; |
261 | 261 | ||
262 | if (pdata) | 262 | if (pdata) |
263 | bp->mii_bus->phy_mask = pdata->phy_mask; | 263 | bp->mii_bus->phy_mask = pdata->phy_mask; |
264 | 264 | ||
265 | bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); | 265 | bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); |
266 | if (!bp->mii_bus->irq) { | 266 | if (!bp->mii_bus->irq) { |
267 | err = -ENOMEM; | 267 | err = -ENOMEM; |
268 | goto err_out_free_mdiobus; | 268 | goto err_out_free_mdiobus; |
269 | } | 269 | } |
270 | 270 | ||
271 | for (i = 0; i < PHY_MAX_ADDR; i++) | 271 | for (i = 0; i < PHY_MAX_ADDR; i++) |
272 | bp->mii_bus->irq[i] = PHY_POLL; | 272 | bp->mii_bus->irq[i] = PHY_POLL; |
273 | 273 | ||
274 | platform_set_drvdata(bp->dev, bp->mii_bus); | 274 | platform_set_drvdata(bp->dev, bp->mii_bus); |
275 | 275 | ||
276 | if (mdiobus_register(bp->mii_bus)) | 276 | if (mdiobus_register(bp->mii_bus)) |
277 | goto err_out_free_mdio_irq; | 277 | goto err_out_free_mdio_irq; |
278 | 278 | ||
279 | if (macb_mii_probe(bp->dev) != 0) { | 279 | if (macb_mii_probe(bp->dev) != 0) { |
280 | goto err_out_unregister_bus; | 280 | goto err_out_unregister_bus; |
281 | } | 281 | } |
282 | 282 | ||
283 | return 0; | 283 | return 0; |
284 | 284 | ||
285 | err_out_unregister_bus: | 285 | err_out_unregister_bus: |
286 | mdiobus_unregister(bp->mii_bus); | 286 | mdiobus_unregister(bp->mii_bus); |
287 | err_out_free_mdio_irq: | 287 | err_out_free_mdio_irq: |
288 | kfree(bp->mii_bus->irq); | 288 | kfree(bp->mii_bus->irq); |
289 | err_out_free_mdiobus: | 289 | err_out_free_mdiobus: |
290 | mdiobus_free(bp->mii_bus); | 290 | mdiobus_free(bp->mii_bus); |
291 | err_out: | 291 | err_out: |
292 | return err; | 292 | return err; |
293 | } | 293 | } |
294 | 294 | ||
295 | static void macb_update_stats(struct macb *bp) | 295 | static void macb_update_stats(struct macb *bp) |
296 | { | 296 | { |
297 | u32 __iomem *reg = bp->regs + MACB_PFR; | 297 | u32 __iomem *reg = bp->regs + MACB_PFR; |
298 | u32 *p = &bp->hw_stats.rx_pause_frames; | 298 | u32 *p = &bp->hw_stats.rx_pause_frames; |
299 | u32 *end = &bp->hw_stats.tx_pause_frames + 1; | 299 | u32 *end = &bp->hw_stats.tx_pause_frames + 1; |
300 | 300 | ||
301 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); | 301 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); |
302 | 302 | ||
303 | for(; p < end; p++, reg++) | 303 | for(; p < end; p++, reg++) |
304 | *p += __raw_readl(reg); | 304 | *p += __raw_readl(reg); |
305 | } | 305 | } |
306 | 306 | ||
307 | static void macb_tx(struct macb *bp) | 307 | static void macb_tx(struct macb *bp) |
308 | { | 308 | { |
309 | unsigned int tail; | 309 | unsigned int tail; |
310 | unsigned int head; | 310 | unsigned int head; |
311 | u32 status; | 311 | u32 status; |
312 | 312 | ||
313 | status = macb_readl(bp, TSR); | 313 | status = macb_readl(bp, TSR); |
314 | macb_writel(bp, TSR, status); | 314 | macb_writel(bp, TSR, status); |
315 | 315 | ||
316 | dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n", | 316 | dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n", |
317 | (unsigned long)status); | 317 | (unsigned long)status); |
318 | 318 | ||
319 | if (status & MACB_BIT(UND)) { | 319 | if (status & MACB_BIT(UND)) { |
320 | int i; | 320 | int i; |
321 | printk(KERN_ERR "%s: TX underrun, resetting buffers\n", | 321 | printk(KERN_ERR "%s: TX underrun, resetting buffers\n", |
322 | bp->dev->name); | 322 | bp->dev->name); |
323 | 323 | ||
324 | /* Transfer ongoing, disable transmitter, to avoid confusion */ | ||
325 | if (status & MACB_BIT(TGO)) | ||
326 | macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE)); | ||
327 | |||
324 | head = bp->tx_head; | 328 | head = bp->tx_head; |
325 | 329 | ||
326 | /*Mark all the buffer as used to avoid sending a lost buffer*/ | 330 | /*Mark all the buffer as used to avoid sending a lost buffer*/ |
327 | for (i = 0; i < TX_RING_SIZE; i++) | 331 | for (i = 0; i < TX_RING_SIZE; i++) |
328 | bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); | 332 | bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); |
329 | 333 | ||
330 | /* free transmit buffer in upper layer*/ | 334 | /* free transmit buffer in upper layer*/ |
331 | for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { | 335 | for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { |
332 | struct ring_info *rp = &bp->tx_skb[tail]; | 336 | struct ring_info *rp = &bp->tx_skb[tail]; |
333 | struct sk_buff *skb = rp->skb; | 337 | struct sk_buff *skb = rp->skb; |
334 | 338 | ||
335 | BUG_ON(skb == NULL); | 339 | BUG_ON(skb == NULL); |
336 | 340 | ||
337 | rmb(); | 341 | rmb(); |
338 | 342 | ||
339 | dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, | 343 | dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, |
340 | DMA_TO_DEVICE); | 344 | DMA_TO_DEVICE); |
341 | rp->skb = NULL; | 345 | rp->skb = NULL; |
342 | dev_kfree_skb_irq(skb); | 346 | dev_kfree_skb_irq(skb); |
343 | } | 347 | } |
344 | 348 | ||
345 | bp->tx_head = bp->tx_tail = 0; | 349 | bp->tx_head = bp->tx_tail = 0; |
350 | |||
351 | /* Enable the transmitter again */ | ||
352 | if (status & MACB_BIT(TGO)) | ||
353 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE)); | ||
346 | } | 354 | } |
347 | 355 | ||
348 | if (!(status & MACB_BIT(COMP))) | 356 | if (!(status & MACB_BIT(COMP))) |
349 | /* | 357 | /* |
350 | * This may happen when a buffer becomes complete | 358 | * This may happen when a buffer becomes complete |
351 | * between reading the ISR and scanning the | 359 | * between reading the ISR and scanning the |
352 | * descriptors. Nothing to worry about. | 360 | * descriptors. Nothing to worry about. |
353 | */ | 361 | */ |
354 | return; | 362 | return; |
355 | 363 | ||
356 | head = bp->tx_head; | 364 | head = bp->tx_head; |
357 | for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { | 365 | for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { |
358 | struct ring_info *rp = &bp->tx_skb[tail]; | 366 | struct ring_info *rp = &bp->tx_skb[tail]; |
359 | struct sk_buff *skb = rp->skb; | 367 | struct sk_buff *skb = rp->skb; |
360 | u32 bufstat; | 368 | u32 bufstat; |
361 | 369 | ||
362 | BUG_ON(skb == NULL); | 370 | BUG_ON(skb == NULL); |
363 | 371 | ||
364 | rmb(); | 372 | rmb(); |
365 | bufstat = bp->tx_ring[tail].ctrl; | 373 | bufstat = bp->tx_ring[tail].ctrl; |
366 | 374 | ||
367 | if (!(bufstat & MACB_BIT(TX_USED))) | 375 | if (!(bufstat & MACB_BIT(TX_USED))) |
368 | break; | 376 | break; |
369 | 377 | ||
370 | dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n", | 378 | dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n", |
371 | tail, skb->data); | 379 | tail, skb->data); |
372 | dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, | 380 | dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, |
373 | DMA_TO_DEVICE); | 381 | DMA_TO_DEVICE); |
374 | bp->stats.tx_packets++; | 382 | bp->stats.tx_packets++; |
375 | bp->stats.tx_bytes += skb->len; | 383 | bp->stats.tx_bytes += skb->len; |
376 | rp->skb = NULL; | 384 | rp->skb = NULL; |
377 | dev_kfree_skb_irq(skb); | 385 | dev_kfree_skb_irq(skb); |
378 | } | 386 | } |
379 | 387 | ||
380 | bp->tx_tail = tail; | 388 | bp->tx_tail = tail; |
381 | if (netif_queue_stopped(bp->dev) && | 389 | if (netif_queue_stopped(bp->dev) && |
382 | TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH) | 390 | TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH) |
383 | netif_wake_queue(bp->dev); | 391 | netif_wake_queue(bp->dev); |
384 | } | 392 | } |
385 | 393 | ||
386 | static int macb_rx_frame(struct macb *bp, unsigned int first_frag, | 394 | static int macb_rx_frame(struct macb *bp, unsigned int first_frag, |
387 | unsigned int last_frag) | 395 | unsigned int last_frag) |
388 | { | 396 | { |
389 | unsigned int len; | 397 | unsigned int len; |
390 | unsigned int frag; | 398 | unsigned int frag; |
391 | unsigned int offset = 0; | 399 | unsigned int offset = 0; |
392 | struct sk_buff *skb; | 400 | struct sk_buff *skb; |
393 | 401 | ||
394 | len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); | 402 | len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); |
395 | 403 | ||
396 | dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n", | 404 | dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n", |
397 | first_frag, last_frag, len); | 405 | first_frag, last_frag, len); |
398 | 406 | ||
399 | skb = dev_alloc_skb(len + RX_OFFSET); | 407 | skb = dev_alloc_skb(len + RX_OFFSET); |
400 | if (!skb) { | 408 | if (!skb) { |
401 | bp->stats.rx_dropped++; | 409 | bp->stats.rx_dropped++; |
402 | for (frag = first_frag; ; frag = NEXT_RX(frag)) { | 410 | for (frag = first_frag; ; frag = NEXT_RX(frag)) { |
403 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | 411 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); |
404 | if (frag == last_frag) | 412 | if (frag == last_frag) |
405 | break; | 413 | break; |
406 | } | 414 | } |
407 | wmb(); | 415 | wmb(); |
408 | return 1; | 416 | return 1; |
409 | } | 417 | } |
410 | 418 | ||
411 | skb_reserve(skb, RX_OFFSET); | 419 | skb_reserve(skb, RX_OFFSET); |
412 | skb->ip_summed = CHECKSUM_NONE; | 420 | skb->ip_summed = CHECKSUM_NONE; |
413 | skb_put(skb, len); | 421 | skb_put(skb, len); |
414 | 422 | ||
415 | for (frag = first_frag; ; frag = NEXT_RX(frag)) { | 423 | for (frag = first_frag; ; frag = NEXT_RX(frag)) { |
416 | unsigned int frag_len = RX_BUFFER_SIZE; | 424 | unsigned int frag_len = RX_BUFFER_SIZE; |
417 | 425 | ||
418 | if (offset + frag_len > len) { | 426 | if (offset + frag_len > len) { |
419 | BUG_ON(frag != last_frag); | 427 | BUG_ON(frag != last_frag); |
420 | frag_len = len - offset; | 428 | frag_len = len - offset; |
421 | } | 429 | } |
422 | skb_copy_to_linear_data_offset(skb, offset, | 430 | skb_copy_to_linear_data_offset(skb, offset, |
423 | (bp->rx_buffers + | 431 | (bp->rx_buffers + |
424 | (RX_BUFFER_SIZE * frag)), | 432 | (RX_BUFFER_SIZE * frag)), |
425 | frag_len); | 433 | frag_len); |
426 | offset += RX_BUFFER_SIZE; | 434 | offset += RX_BUFFER_SIZE; |
427 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | 435 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); |
428 | wmb(); | 436 | wmb(); |
429 | 437 | ||
430 | if (frag == last_frag) | 438 | if (frag == last_frag) |
431 | break; | 439 | break; |
432 | } | 440 | } |
433 | 441 | ||
434 | skb->protocol = eth_type_trans(skb, bp->dev); | 442 | skb->protocol = eth_type_trans(skb, bp->dev); |
435 | 443 | ||
436 | bp->stats.rx_packets++; | 444 | bp->stats.rx_packets++; |
437 | bp->stats.rx_bytes += len; | 445 | bp->stats.rx_bytes += len; |
438 | dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n", | 446 | dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n", |
439 | skb->len, skb->csum); | 447 | skb->len, skb->csum); |
440 | netif_receive_skb(skb); | 448 | netif_receive_skb(skb); |
441 | 449 | ||
442 | return 0; | 450 | return 0; |
443 | } | 451 | } |
444 | 452 | ||
445 | /* Mark DMA descriptors from begin up to and not including end as unused */ | 453 | /* Mark DMA descriptors from begin up to and not including end as unused */ |
446 | static void discard_partial_frame(struct macb *bp, unsigned int begin, | 454 | static void discard_partial_frame(struct macb *bp, unsigned int begin, |
447 | unsigned int end) | 455 | unsigned int end) |
448 | { | 456 | { |
449 | unsigned int frag; | 457 | unsigned int frag; |
450 | 458 | ||
451 | for (frag = begin; frag != end; frag = NEXT_RX(frag)) | 459 | for (frag = begin; frag != end; frag = NEXT_RX(frag)) |
452 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); | 460 | bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); |
453 | wmb(); | 461 | wmb(); |
454 | 462 | ||
455 | /* | 463 | /* |
456 | * When this happens, the hardware stats registers for | 464 | * When this happens, the hardware stats registers for |
457 | * whatever caused this is updated, so we don't have to record | 465 | * whatever caused this is updated, so we don't have to record |
458 | * anything. | 466 | * anything. |
459 | */ | 467 | */ |
460 | } | 468 | } |
461 | 469 | ||
462 | static int macb_rx(struct macb *bp, int budget) | 470 | static int macb_rx(struct macb *bp, int budget) |
463 | { | 471 | { |
464 | int received = 0; | 472 | int received = 0; |
465 | unsigned int tail = bp->rx_tail; | 473 | unsigned int tail = bp->rx_tail; |
466 | int first_frag = -1; | 474 | int first_frag = -1; |
467 | 475 | ||
468 | for (; budget > 0; tail = NEXT_RX(tail)) { | 476 | for (; budget > 0; tail = NEXT_RX(tail)) { |
469 | u32 addr, ctrl; | 477 | u32 addr, ctrl; |
470 | 478 | ||
471 | rmb(); | 479 | rmb(); |
472 | addr = bp->rx_ring[tail].addr; | 480 | addr = bp->rx_ring[tail].addr; |
473 | ctrl = bp->rx_ring[tail].ctrl; | 481 | ctrl = bp->rx_ring[tail].ctrl; |
474 | 482 | ||
475 | if (!(addr & MACB_BIT(RX_USED))) | 483 | if (!(addr & MACB_BIT(RX_USED))) |
476 | break; | 484 | break; |
477 | 485 | ||
478 | if (ctrl & MACB_BIT(RX_SOF)) { | 486 | if (ctrl & MACB_BIT(RX_SOF)) { |
479 | if (first_frag != -1) | 487 | if (first_frag != -1) |
480 | discard_partial_frame(bp, first_frag, tail); | 488 | discard_partial_frame(bp, first_frag, tail); |
481 | first_frag = tail; | 489 | first_frag = tail; |
482 | } | 490 | } |
483 | 491 | ||
484 | if (ctrl & MACB_BIT(RX_EOF)) { | 492 | if (ctrl & MACB_BIT(RX_EOF)) { |
485 | int dropped; | 493 | int dropped; |
486 | BUG_ON(first_frag == -1); | 494 | BUG_ON(first_frag == -1); |
487 | 495 | ||
488 | dropped = macb_rx_frame(bp, first_frag, tail); | 496 | dropped = macb_rx_frame(bp, first_frag, tail); |
489 | first_frag = -1; | 497 | first_frag = -1; |
490 | if (!dropped) { | 498 | if (!dropped) { |
491 | received++; | 499 | received++; |
492 | budget--; | 500 | budget--; |
493 | } | 501 | } |
494 | } | 502 | } |
495 | } | 503 | } |
496 | 504 | ||
497 | if (first_frag != -1) | 505 | if (first_frag != -1) |
498 | bp->rx_tail = first_frag; | 506 | bp->rx_tail = first_frag; |
499 | else | 507 | else |
500 | bp->rx_tail = tail; | 508 | bp->rx_tail = tail; |
501 | 509 | ||
502 | return received; | 510 | return received; |
503 | } | 511 | } |
504 | 512 | ||
505 | static int macb_poll(struct napi_struct *napi, int budget) | 513 | static int macb_poll(struct napi_struct *napi, int budget) |
506 | { | 514 | { |
507 | struct macb *bp = container_of(napi, struct macb, napi); | 515 | struct macb *bp = container_of(napi, struct macb, napi); |
508 | struct net_device *dev = bp->dev; | 516 | struct net_device *dev = bp->dev; |
509 | int work_done; | 517 | int work_done; |
510 | u32 status; | 518 | u32 status; |
511 | 519 | ||
512 | status = macb_readl(bp, RSR); | 520 | status = macb_readl(bp, RSR); |
513 | macb_writel(bp, RSR, status); | 521 | macb_writel(bp, RSR, status); |
514 | 522 | ||
515 | work_done = 0; | 523 | work_done = 0; |
516 | if (!status) { | 524 | if (!status) { |
517 | /* | 525 | /* |
518 | * This may happen if an interrupt was pending before | 526 | * This may happen if an interrupt was pending before |
519 | * this function was called last time, and no packets | 527 | * this function was called last time, and no packets |
520 | * have been received since. | 528 | * have been received since. |
521 | */ | 529 | */ |
522 | netif_rx_complete(napi); | 530 | netif_rx_complete(napi); |
523 | goto out; | 531 | goto out; |
524 | } | 532 | } |
525 | 533 | ||
526 | dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n", | 534 | dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n", |
527 | (unsigned long)status, budget); | 535 | (unsigned long)status, budget); |
528 | 536 | ||
529 | if (!(status & MACB_BIT(REC))) { | 537 | if (!(status & MACB_BIT(REC))) { |
530 | dev_warn(&bp->pdev->dev, | 538 | dev_warn(&bp->pdev->dev, |
531 | "No RX buffers complete, status = %02lx\n", | 539 | "No RX buffers complete, status = %02lx\n", |
532 | (unsigned long)status); | 540 | (unsigned long)status); |
533 | netif_rx_complete(napi); | 541 | netif_rx_complete(napi); |
534 | goto out; | 542 | goto out; |
535 | } | 543 | } |
536 | 544 | ||
537 | work_done = macb_rx(bp, budget); | 545 | work_done = macb_rx(bp, budget); |
538 | if (work_done < budget) | 546 | if (work_done < budget) |
539 | netif_rx_complete(napi); | 547 | netif_rx_complete(napi); |
540 | 548 | ||
541 | /* | 549 | /* |
542 | * We've done what we can to clean the buffers. Make sure we | 550 | * We've done what we can to clean the buffers. Make sure we |
543 | * get notified when new packets arrive. | 551 | * get notified when new packets arrive. |
544 | */ | 552 | */ |
545 | out: | 553 | out: |
546 | macb_writel(bp, IER, MACB_RX_INT_FLAGS); | 554 | macb_writel(bp, IER, MACB_RX_INT_FLAGS); |
547 | 555 | ||
548 | /* TODO: Handle errors */ | 556 | /* TODO: Handle errors */ |
549 | 557 | ||
550 | return work_done; | 558 | return work_done; |
551 | } | 559 | } |
552 | 560 | ||
553 | static irqreturn_t macb_interrupt(int irq, void *dev_id) | 561 | static irqreturn_t macb_interrupt(int irq, void *dev_id) |
554 | { | 562 | { |
555 | struct net_device *dev = dev_id; | 563 | struct net_device *dev = dev_id; |
556 | struct macb *bp = netdev_priv(dev); | 564 | struct macb *bp = netdev_priv(dev); |
557 | u32 status; | 565 | u32 status; |
558 | 566 | ||
559 | status = macb_readl(bp, ISR); | 567 | status = macb_readl(bp, ISR); |
560 | 568 | ||
561 | if (unlikely(!status)) | 569 | if (unlikely(!status)) |
562 | return IRQ_NONE; | 570 | return IRQ_NONE; |
563 | 571 | ||
564 | spin_lock(&bp->lock); | 572 | spin_lock(&bp->lock); |
565 | 573 | ||
566 | while (status) { | 574 | while (status) { |
567 | /* close possible race with dev_close */ | 575 | /* close possible race with dev_close */ |
568 | if (unlikely(!netif_running(dev))) { | 576 | if (unlikely(!netif_running(dev))) { |
569 | macb_writel(bp, IDR, ~0UL); | 577 | macb_writel(bp, IDR, ~0UL); |
570 | break; | 578 | break; |
571 | } | 579 | } |
572 | 580 | ||
573 | if (status & MACB_RX_INT_FLAGS) { | 581 | if (status & MACB_RX_INT_FLAGS) { |
574 | if (netif_rx_schedule_prep(&bp->napi)) { | 582 | if (netif_rx_schedule_prep(&bp->napi)) { |
575 | /* | 583 | /* |
576 | * There's no point taking any more interrupts | 584 | * There's no point taking any more interrupts |
577 | * until we have processed the buffers | 585 | * until we have processed the buffers |
578 | */ | 586 | */ |
579 | macb_writel(bp, IDR, MACB_RX_INT_FLAGS); | 587 | macb_writel(bp, IDR, MACB_RX_INT_FLAGS); |
580 | dev_dbg(&bp->pdev->dev, | 588 | dev_dbg(&bp->pdev->dev, |
581 | "scheduling RX softirq\n"); | 589 | "scheduling RX softirq\n"); |
582 | __netif_rx_schedule(&bp->napi); | 590 | __netif_rx_schedule(&bp->napi); |
583 | } | 591 | } |
584 | } | 592 | } |
585 | 593 | ||
586 | if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND))) | 594 | if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND))) |
587 | macb_tx(bp); | 595 | macb_tx(bp); |
588 | 596 | ||
589 | /* | 597 | /* |
590 | * Link change detection isn't possible with RMII, so we'll | 598 | * Link change detection isn't possible with RMII, so we'll |
591 | * add that if/when we get our hands on a full-blown MII PHY. | 599 | * add that if/when we get our hands on a full-blown MII PHY. |
592 | */ | 600 | */ |
593 | 601 | ||
594 | if (status & MACB_BIT(HRESP)) { | 602 | if (status & MACB_BIT(HRESP)) { |
595 | /* | 603 | /* |
596 | * TODO: Reset the hardware, and maybe move the printk | 604 | * TODO: Reset the hardware, and maybe move the printk |
597 | * to a lower-priority context as well (work queue?) | 605 | * to a lower-priority context as well (work queue?) |
598 | */ | 606 | */ |
599 | printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n", | 607 | printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n", |
600 | dev->name); | 608 | dev->name); |
601 | } | 609 | } |
602 | 610 | ||
603 | status = macb_readl(bp, ISR); | 611 | status = macb_readl(bp, ISR); |
604 | } | 612 | } |
605 | 613 | ||
606 | spin_unlock(&bp->lock); | 614 | spin_unlock(&bp->lock); |
607 | 615 | ||
608 | return IRQ_HANDLED; | 616 | return IRQ_HANDLED; |
609 | } | 617 | } |
610 | 618 | ||
611 | static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) | 619 | static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) |
612 | { | 620 | { |
613 | struct macb *bp = netdev_priv(dev); | 621 | struct macb *bp = netdev_priv(dev); |
614 | dma_addr_t mapping; | 622 | dma_addr_t mapping; |
615 | unsigned int len, entry; | 623 | unsigned int len, entry; |
616 | u32 ctrl; | 624 | u32 ctrl; |
617 | 625 | ||
618 | #ifdef DEBUG | 626 | #ifdef DEBUG |
619 | int i; | 627 | int i; |
620 | dev_dbg(&bp->pdev->dev, | 628 | dev_dbg(&bp->pdev->dev, |
621 | "start_xmit: len %u head %p data %p tail %p end %p\n", | 629 | "start_xmit: len %u head %p data %p tail %p end %p\n", |
622 | skb->len, skb->head, skb->data, | 630 | skb->len, skb->head, skb->data, |
623 | skb_tail_pointer(skb), skb_end_pointer(skb)); | 631 | skb_tail_pointer(skb), skb_end_pointer(skb)); |
624 | dev_dbg(&bp->pdev->dev, | 632 | dev_dbg(&bp->pdev->dev, |
625 | "data:"); | 633 | "data:"); |
626 | for (i = 0; i < 16; i++) | 634 | for (i = 0; i < 16; i++) |
627 | printk(" %02x", (unsigned int)skb->data[i]); | 635 | printk(" %02x", (unsigned int)skb->data[i]); |
628 | printk("\n"); | 636 | printk("\n"); |
629 | #endif | 637 | #endif |
630 | 638 | ||
631 | len = skb->len; | 639 | len = skb->len; |
632 | spin_lock_irq(&bp->lock); | 640 | spin_lock_irq(&bp->lock); |
633 | 641 | ||
634 | /* This is a hard error, log it. */ | 642 | /* This is a hard error, log it. */ |
635 | if (TX_BUFFS_AVAIL(bp) < 1) { | 643 | if (TX_BUFFS_AVAIL(bp) < 1) { |
636 | netif_stop_queue(dev); | 644 | netif_stop_queue(dev); |
637 | spin_unlock_irq(&bp->lock); | 645 | spin_unlock_irq(&bp->lock); |
638 | dev_err(&bp->pdev->dev, | 646 | dev_err(&bp->pdev->dev, |
639 | "BUG! Tx Ring full when queue awake!\n"); | 647 | "BUG! Tx Ring full when queue awake!\n"); |
640 | dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n", | 648 | dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n", |
641 | bp->tx_head, bp->tx_tail); | 649 | bp->tx_head, bp->tx_tail); |
642 | return 1; | 650 | return 1; |
643 | } | 651 | } |
644 | 652 | ||
645 | entry = bp->tx_head; | 653 | entry = bp->tx_head; |
646 | dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry); | 654 | dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry); |
647 | mapping = dma_map_single(&bp->pdev->dev, skb->data, | 655 | mapping = dma_map_single(&bp->pdev->dev, skb->data, |
648 | len, DMA_TO_DEVICE); | 656 | len, DMA_TO_DEVICE); |
649 | bp->tx_skb[entry].skb = skb; | 657 | bp->tx_skb[entry].skb = skb; |
650 | bp->tx_skb[entry].mapping = mapping; | 658 | bp->tx_skb[entry].mapping = mapping; |
651 | dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n", | 659 | dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n", |
652 | skb->data, (unsigned long)mapping); | 660 | skb->data, (unsigned long)mapping); |
653 | 661 | ||
654 | ctrl = MACB_BF(TX_FRMLEN, len); | 662 | ctrl = MACB_BF(TX_FRMLEN, len); |
655 | ctrl |= MACB_BIT(TX_LAST); | 663 | ctrl |= MACB_BIT(TX_LAST); |
656 | if (entry == (TX_RING_SIZE - 1)) | 664 | if (entry == (TX_RING_SIZE - 1)) |
657 | ctrl |= MACB_BIT(TX_WRAP); | 665 | ctrl |= MACB_BIT(TX_WRAP); |
658 | 666 | ||
659 | bp->tx_ring[entry].addr = mapping; | 667 | bp->tx_ring[entry].addr = mapping; |
660 | bp->tx_ring[entry].ctrl = ctrl; | 668 | bp->tx_ring[entry].ctrl = ctrl; |
661 | wmb(); | 669 | wmb(); |
662 | 670 | ||
663 | entry = NEXT_TX(entry); | 671 | entry = NEXT_TX(entry); |
664 | bp->tx_head = entry; | 672 | bp->tx_head = entry; |
665 | 673 | ||
666 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); | 674 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); |
667 | 675 | ||
668 | if (TX_BUFFS_AVAIL(bp) < 1) | 676 | if (TX_BUFFS_AVAIL(bp) < 1) |
669 | netif_stop_queue(dev); | 677 | netif_stop_queue(dev); |
670 | 678 | ||
671 | spin_unlock_irq(&bp->lock); | 679 | spin_unlock_irq(&bp->lock); |
672 | 680 | ||
673 | dev->trans_start = jiffies; | 681 | dev->trans_start = jiffies; |
674 | 682 | ||
675 | return 0; | 683 | return 0; |
676 | } | 684 | } |
677 | 685 | ||
678 | static void macb_free_consistent(struct macb *bp) | 686 | static void macb_free_consistent(struct macb *bp) |
679 | { | 687 | { |
680 | if (bp->tx_skb) { | 688 | if (bp->tx_skb) { |
681 | kfree(bp->tx_skb); | 689 | kfree(bp->tx_skb); |
682 | bp->tx_skb = NULL; | 690 | bp->tx_skb = NULL; |
683 | } | 691 | } |
684 | if (bp->rx_ring) { | 692 | if (bp->rx_ring) { |
685 | dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, | 693 | dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, |
686 | bp->rx_ring, bp->rx_ring_dma); | 694 | bp->rx_ring, bp->rx_ring_dma); |
687 | bp->rx_ring = NULL; | 695 | bp->rx_ring = NULL; |
688 | } | 696 | } |
689 | if (bp->tx_ring) { | 697 | if (bp->tx_ring) { |
690 | dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, | 698 | dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, |
691 | bp->tx_ring, bp->tx_ring_dma); | 699 | bp->tx_ring, bp->tx_ring_dma); |
692 | bp->tx_ring = NULL; | 700 | bp->tx_ring = NULL; |
693 | } | 701 | } |
694 | if (bp->rx_buffers) { | 702 | if (bp->rx_buffers) { |
695 | dma_free_coherent(&bp->pdev->dev, | 703 | dma_free_coherent(&bp->pdev->dev, |
696 | RX_RING_SIZE * RX_BUFFER_SIZE, | 704 | RX_RING_SIZE * RX_BUFFER_SIZE, |
697 | bp->rx_buffers, bp->rx_buffers_dma); | 705 | bp->rx_buffers, bp->rx_buffers_dma); |
698 | bp->rx_buffers = NULL; | 706 | bp->rx_buffers = NULL; |
699 | } | 707 | } |
700 | } | 708 | } |
701 | 709 | ||
702 | static int macb_alloc_consistent(struct macb *bp) | 710 | static int macb_alloc_consistent(struct macb *bp) |
703 | { | 711 | { |
704 | int size; | 712 | int size; |
705 | 713 | ||
706 | size = TX_RING_SIZE * sizeof(struct ring_info); | 714 | size = TX_RING_SIZE * sizeof(struct ring_info); |
707 | bp->tx_skb = kmalloc(size, GFP_KERNEL); | 715 | bp->tx_skb = kmalloc(size, GFP_KERNEL); |
708 | if (!bp->tx_skb) | 716 | if (!bp->tx_skb) |
709 | goto out_err; | 717 | goto out_err; |
710 | 718 | ||
711 | size = RX_RING_BYTES; | 719 | size = RX_RING_BYTES; |
712 | bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, | 720 | bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, |
713 | &bp->rx_ring_dma, GFP_KERNEL); | 721 | &bp->rx_ring_dma, GFP_KERNEL); |
714 | if (!bp->rx_ring) | 722 | if (!bp->rx_ring) |
715 | goto out_err; | 723 | goto out_err; |
716 | dev_dbg(&bp->pdev->dev, | 724 | dev_dbg(&bp->pdev->dev, |
717 | "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", | 725 | "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", |
718 | size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); | 726 | size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); |
719 | 727 | ||
720 | size = TX_RING_BYTES; | 728 | size = TX_RING_BYTES; |
721 | bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, | 729 | bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, |
722 | &bp->tx_ring_dma, GFP_KERNEL); | 730 | &bp->tx_ring_dma, GFP_KERNEL); |
723 | if (!bp->tx_ring) | 731 | if (!bp->tx_ring) |
724 | goto out_err; | 732 | goto out_err; |
725 | dev_dbg(&bp->pdev->dev, | 733 | dev_dbg(&bp->pdev->dev, |
726 | "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", | 734 | "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", |
727 | size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); | 735 | size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); |
728 | 736 | ||
729 | size = RX_RING_SIZE * RX_BUFFER_SIZE; | 737 | size = RX_RING_SIZE * RX_BUFFER_SIZE; |
730 | bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, | 738 | bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, |
731 | &bp->rx_buffers_dma, GFP_KERNEL); | 739 | &bp->rx_buffers_dma, GFP_KERNEL); |
732 | if (!bp->rx_buffers) | 740 | if (!bp->rx_buffers) |
733 | goto out_err; | 741 | goto out_err; |
734 | dev_dbg(&bp->pdev->dev, | 742 | dev_dbg(&bp->pdev->dev, |
735 | "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", | 743 | "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", |
736 | size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); | 744 | size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); |
737 | 745 | ||
738 | return 0; | 746 | return 0; |
739 | 747 | ||
740 | out_err: | 748 | out_err: |
741 | macb_free_consistent(bp); | 749 | macb_free_consistent(bp); |
742 | return -ENOMEM; | 750 | return -ENOMEM; |
743 | } | 751 | } |
744 | 752 | ||
745 | static void macb_init_rings(struct macb *bp) | 753 | static void macb_init_rings(struct macb *bp) |
746 | { | 754 | { |
747 | int i; | 755 | int i; |
748 | dma_addr_t addr; | 756 | dma_addr_t addr; |
749 | 757 | ||
750 | addr = bp->rx_buffers_dma; | 758 | addr = bp->rx_buffers_dma; |
751 | for (i = 0; i < RX_RING_SIZE; i++) { | 759 | for (i = 0; i < RX_RING_SIZE; i++) { |
752 | bp->rx_ring[i].addr = addr; | 760 | bp->rx_ring[i].addr = addr; |
753 | bp->rx_ring[i].ctrl = 0; | 761 | bp->rx_ring[i].ctrl = 0; |
754 | addr += RX_BUFFER_SIZE; | 762 | addr += RX_BUFFER_SIZE; |
755 | } | 763 | } |
756 | bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); | 764 | bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); |
757 | 765 | ||
758 | for (i = 0; i < TX_RING_SIZE; i++) { | 766 | for (i = 0; i < TX_RING_SIZE; i++) { |
759 | bp->tx_ring[i].addr = 0; | 767 | bp->tx_ring[i].addr = 0; |
760 | bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); | 768 | bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); |
761 | } | 769 | } |
762 | bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); | 770 | bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); |
763 | 771 | ||
764 | bp->rx_tail = bp->tx_head = bp->tx_tail = 0; | 772 | bp->rx_tail = bp->tx_head = bp->tx_tail = 0; |
765 | } | 773 | } |
766 | 774 | ||
767 | static void macb_reset_hw(struct macb *bp) | 775 | static void macb_reset_hw(struct macb *bp) |
768 | { | 776 | { |
769 | /* Make sure we have the write buffer for ourselves */ | 777 | /* Make sure we have the write buffer for ourselves */ |
770 | wmb(); | 778 | wmb(); |
771 | 779 | ||
772 | /* | 780 | /* |
773 | * Disable RX and TX (XXX: Should we halt the transmission | 781 | * Disable RX and TX (XXX: Should we halt the transmission |
774 | * more gracefully?) | 782 | * more gracefully?) |
775 | */ | 783 | */ |
776 | macb_writel(bp, NCR, 0); | 784 | macb_writel(bp, NCR, 0); |
777 | 785 | ||
778 | /* Clear the stats registers (XXX: Update stats first?) */ | 786 | /* Clear the stats registers (XXX: Update stats first?) */ |
779 | macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); | 787 | macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); |
780 | 788 | ||
781 | /* Clear all status flags */ | 789 | /* Clear all status flags */ |
782 | macb_writel(bp, TSR, ~0UL); | 790 | macb_writel(bp, TSR, ~0UL); |
783 | macb_writel(bp, RSR, ~0UL); | 791 | macb_writel(bp, RSR, ~0UL); |
784 | 792 | ||
785 | /* Disable all interrupts */ | 793 | /* Disable all interrupts */ |
786 | macb_writel(bp, IDR, ~0UL); | 794 | macb_writel(bp, IDR, ~0UL); |
787 | macb_readl(bp, ISR); | 795 | macb_readl(bp, ISR); |
788 | } | 796 | } |
789 | 797 | ||
790 | static void macb_init_hw(struct macb *bp) | 798 | static void macb_init_hw(struct macb *bp) |
791 | { | 799 | { |
792 | u32 config; | 800 | u32 config; |
793 | 801 | ||
794 | macb_reset_hw(bp); | 802 | macb_reset_hw(bp); |
795 | __macb_set_hwaddr(bp); | 803 | __macb_set_hwaddr(bp); |
796 | 804 | ||
797 | config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L); | 805 | config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L); |
798 | config |= MACB_BIT(PAE); /* PAuse Enable */ | 806 | config |= MACB_BIT(PAE); /* PAuse Enable */ |
799 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ | 807 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ |
800 | if (bp->dev->flags & IFF_PROMISC) | 808 | if (bp->dev->flags & IFF_PROMISC) |
801 | config |= MACB_BIT(CAF); /* Copy All Frames */ | 809 | config |= MACB_BIT(CAF); /* Copy All Frames */ |
802 | if (!(bp->dev->flags & IFF_BROADCAST)) | 810 | if (!(bp->dev->flags & IFF_BROADCAST)) |
803 | config |= MACB_BIT(NBC); /* No BroadCast */ | 811 | config |= MACB_BIT(NBC); /* No BroadCast */ |
804 | macb_writel(bp, NCFGR, config); | 812 | macb_writel(bp, NCFGR, config); |
805 | 813 | ||
806 | /* Initialize TX and RX buffers */ | 814 | /* Initialize TX and RX buffers */ |
807 | macb_writel(bp, RBQP, bp->rx_ring_dma); | 815 | macb_writel(bp, RBQP, bp->rx_ring_dma); |
808 | macb_writel(bp, TBQP, bp->tx_ring_dma); | 816 | macb_writel(bp, TBQP, bp->tx_ring_dma); |
809 | 817 | ||
810 | /* Enable TX and RX */ | 818 | /* Enable TX and RX */ |
811 | macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); | 819 | macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); |
812 | 820 | ||
813 | /* Enable interrupts */ | 821 | /* Enable interrupts */ |
814 | macb_writel(bp, IER, (MACB_BIT(RCOMP) | 822 | macb_writel(bp, IER, (MACB_BIT(RCOMP) |
815 | | MACB_BIT(RXUBR) | 823 | | MACB_BIT(RXUBR) |
816 | | MACB_BIT(ISR_TUND) | 824 | | MACB_BIT(ISR_TUND) |
817 | | MACB_BIT(ISR_RLE) | 825 | | MACB_BIT(ISR_RLE) |
818 | | MACB_BIT(TXERR) | 826 | | MACB_BIT(TXERR) |
819 | | MACB_BIT(TCOMP) | 827 | | MACB_BIT(TCOMP) |
820 | | MACB_BIT(ISR_ROVR) | 828 | | MACB_BIT(ISR_ROVR) |
821 | | MACB_BIT(HRESP))); | 829 | | MACB_BIT(HRESP))); |
822 | 830 | ||
823 | } | 831 | } |
824 | 832 | ||
825 | /* | 833 | /* |
826 | * The hash address register is 64 bits long and takes up two | 834 | * The hash address register is 64 bits long and takes up two |
827 | * locations in the memory map. The least significant bits are stored | 835 | * locations in the memory map. The least significant bits are stored |
828 | * in EMAC_HSL and the most significant bits in EMAC_HSH. | 836 | * in EMAC_HSL and the most significant bits in EMAC_HSH. |
829 | * | 837 | * |
830 | * The unicast hash enable and the multicast hash enable bits in the | 838 | * The unicast hash enable and the multicast hash enable bits in the |
831 | * network configuration register enable the reception of hash matched | 839 | * network configuration register enable the reception of hash matched |
832 | * frames. The destination address is reduced to a 6 bit index into | 840 | * frames. The destination address is reduced to a 6 bit index into |
833 | * the 64 bit hash register using the following hash function. The | 841 | * the 64 bit hash register using the following hash function. The |
834 | * hash function is an exclusive or of every sixth bit of the | 842 | * hash function is an exclusive or of every sixth bit of the |
835 | * destination address. | 843 | * destination address. |
836 | * | 844 | * |
837 | * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] | 845 | * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] |
838 | * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] | 846 | * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] |
839 | * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] | 847 | * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] |
840 | * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] | 848 | * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] |
841 | * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] | 849 | * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] |
842 | * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] | 850 | * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] |
843 | * | 851 | * |
844 | * da[0] represents the least significant bit of the first byte | 852 | * da[0] represents the least significant bit of the first byte |
845 | * received, that is, the multicast/unicast indicator, and da[47] | 853 | * received, that is, the multicast/unicast indicator, and da[47] |
846 | * represents the most significant bit of the last byte received. If | 854 | * represents the most significant bit of the last byte received. If |
847 | * the hash index, hi[n], points to a bit that is set in the hash | 855 | * the hash index, hi[n], points to a bit that is set in the hash |
848 | * register then the frame will be matched according to whether the | 856 | * register then the frame will be matched according to whether the |
849 | * frame is multicast or unicast. A multicast match will be signalled | 857 | * frame is multicast or unicast. A multicast match will be signalled |
850 | * if the multicast hash enable bit is set, da[0] is 1 and the hash | 858 | * if the multicast hash enable bit is set, da[0] is 1 and the hash |
851 | * index points to a bit set in the hash register. A unicast match | 859 | * index points to a bit set in the hash register. A unicast match |
852 | * will be signalled if the unicast hash enable bit is set, da[0] is 0 | 860 | * will be signalled if the unicast hash enable bit is set, da[0] is 0 |
853 | * and the hash index points to a bit set in the hash register. To | 861 | * and the hash index points to a bit set in the hash register. To |
854 | * receive all multicast frames, the hash register should be set with | 862 | * receive all multicast frames, the hash register should be set with |
855 | * all ones and the multicast hash enable bit should be set in the | 863 | * all ones and the multicast hash enable bit should be set in the |
856 | * network configuration register. | 864 | * network configuration register. |
857 | */ | 865 | */ |
858 | 866 | ||
859 | static inline int hash_bit_value(int bitnr, __u8 *addr) | 867 | static inline int hash_bit_value(int bitnr, __u8 *addr) |
860 | { | 868 | { |
861 | if (addr[bitnr / 8] & (1 << (bitnr % 8))) | 869 | if (addr[bitnr / 8] & (1 << (bitnr % 8))) |
862 | return 1; | 870 | return 1; |
863 | return 0; | 871 | return 0; |
864 | } | 872 | } |
865 | 873 | ||
866 | /* | 874 | /* |
867 | * Return the hash index value for the specified address. | 875 | * Return the hash index value for the specified address. |
868 | */ | 876 | */ |
869 | static int hash_get_index(__u8 *addr) | 877 | static int hash_get_index(__u8 *addr) |
870 | { | 878 | { |
871 | int i, j, bitval; | 879 | int i, j, bitval; |
872 | int hash_index = 0; | 880 | int hash_index = 0; |
873 | 881 | ||
874 | for (j = 0; j < 6; j++) { | 882 | for (j = 0; j < 6; j++) { |
875 | for (i = 0, bitval = 0; i < 8; i++) | 883 | for (i = 0, bitval = 0; i < 8; i++) |
876 | bitval ^= hash_bit_value(i*6 + j, addr); | 884 | bitval ^= hash_bit_value(i*6 + j, addr); |
877 | 885 | ||
878 | hash_index |= (bitval << j); | 886 | hash_index |= (bitval << j); |
879 | } | 887 | } |
880 | 888 | ||
881 | return hash_index; | 889 | return hash_index; |
882 | } | 890 | } |
883 | 891 | ||
884 | /* | 892 | /* |
885 | * Add multicast addresses to the internal multicast-hash table. | 893 | * Add multicast addresses to the internal multicast-hash table. |
886 | */ | 894 | */ |
887 | static void macb_sethashtable(struct net_device *dev) | 895 | static void macb_sethashtable(struct net_device *dev) |
888 | { | 896 | { |
889 | struct dev_mc_list *curr; | 897 | struct dev_mc_list *curr; |
890 | unsigned long mc_filter[2]; | 898 | unsigned long mc_filter[2]; |
891 | unsigned int i, bitnr; | 899 | unsigned int i, bitnr; |
892 | struct macb *bp = netdev_priv(dev); | 900 | struct macb *bp = netdev_priv(dev); |
893 | 901 | ||
894 | mc_filter[0] = mc_filter[1] = 0; | 902 | mc_filter[0] = mc_filter[1] = 0; |
895 | 903 | ||
896 | curr = dev->mc_list; | 904 | curr = dev->mc_list; |
897 | for (i = 0; i < dev->mc_count; i++, curr = curr->next) { | 905 | for (i = 0; i < dev->mc_count; i++, curr = curr->next) { |
898 | if (!curr) break; /* unexpected end of list */ | 906 | if (!curr) break; /* unexpected end of list */ |
899 | 907 | ||
900 | bitnr = hash_get_index(curr->dmi_addr); | 908 | bitnr = hash_get_index(curr->dmi_addr); |
901 | mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); | 909 | mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); |
902 | } | 910 | } |
903 | 911 | ||
904 | macb_writel(bp, HRB, mc_filter[0]); | 912 | macb_writel(bp, HRB, mc_filter[0]); |
905 | macb_writel(bp, HRT, mc_filter[1]); | 913 | macb_writel(bp, HRT, mc_filter[1]); |
906 | } | 914 | } |
907 | 915 | ||
908 | /* | 916 | /* |
909 | * Enable/Disable promiscuous and multicast modes. | 917 | * Enable/Disable promiscuous and multicast modes. |
910 | */ | 918 | */ |
911 | static void macb_set_rx_mode(struct net_device *dev) | 919 | static void macb_set_rx_mode(struct net_device *dev) |
912 | { | 920 | { |
913 | unsigned long cfg; | 921 | unsigned long cfg; |
914 | struct macb *bp = netdev_priv(dev); | 922 | struct macb *bp = netdev_priv(dev); |
915 | 923 | ||
916 | cfg = macb_readl(bp, NCFGR); | 924 | cfg = macb_readl(bp, NCFGR); |
917 | 925 | ||
918 | if (dev->flags & IFF_PROMISC) | 926 | if (dev->flags & IFF_PROMISC) |
919 | /* Enable promiscuous mode */ | 927 | /* Enable promiscuous mode */ |
920 | cfg |= MACB_BIT(CAF); | 928 | cfg |= MACB_BIT(CAF); |
921 | else if (dev->flags & (~IFF_PROMISC)) | 929 | else if (dev->flags & (~IFF_PROMISC)) |
922 | /* Disable promiscuous mode */ | 930 | /* Disable promiscuous mode */ |
923 | cfg &= ~MACB_BIT(CAF); | 931 | cfg &= ~MACB_BIT(CAF); |
924 | 932 | ||
925 | if (dev->flags & IFF_ALLMULTI) { | 933 | if (dev->flags & IFF_ALLMULTI) { |
926 | /* Enable all multicast mode */ | 934 | /* Enable all multicast mode */ |
927 | macb_writel(bp, HRB, -1); | 935 | macb_writel(bp, HRB, -1); |
928 | macb_writel(bp, HRT, -1); | 936 | macb_writel(bp, HRT, -1); |
929 | cfg |= MACB_BIT(NCFGR_MTI); | 937 | cfg |= MACB_BIT(NCFGR_MTI); |
930 | } else if (dev->mc_count > 0) { | 938 | } else if (dev->mc_count > 0) { |
931 | /* Enable specific multicasts */ | 939 | /* Enable specific multicasts */ |
932 | macb_sethashtable(dev); | 940 | macb_sethashtable(dev); |
933 | cfg |= MACB_BIT(NCFGR_MTI); | 941 | cfg |= MACB_BIT(NCFGR_MTI); |
934 | } else if (dev->flags & (~IFF_ALLMULTI)) { | 942 | } else if (dev->flags & (~IFF_ALLMULTI)) { |
935 | /* Disable all multicast mode */ | 943 | /* Disable all multicast mode */ |
936 | macb_writel(bp, HRB, 0); | 944 | macb_writel(bp, HRB, 0); |
937 | macb_writel(bp, HRT, 0); | 945 | macb_writel(bp, HRT, 0); |
938 | cfg &= ~MACB_BIT(NCFGR_MTI); | 946 | cfg &= ~MACB_BIT(NCFGR_MTI); |
939 | } | 947 | } |
940 | 948 | ||
941 | macb_writel(bp, NCFGR, cfg); | 949 | macb_writel(bp, NCFGR, cfg); |
942 | } | 950 | } |
943 | 951 | ||
944 | static int macb_open(struct net_device *dev) | 952 | static int macb_open(struct net_device *dev) |
945 | { | 953 | { |
946 | struct macb *bp = netdev_priv(dev); | 954 | struct macb *bp = netdev_priv(dev); |
947 | int err; | 955 | int err; |
948 | 956 | ||
949 | dev_dbg(&bp->pdev->dev, "open\n"); | 957 | dev_dbg(&bp->pdev->dev, "open\n"); |
950 | 958 | ||
951 | /* if the phy is not yet register, retry later*/ | 959 | /* if the phy is not yet register, retry later*/ |
952 | if (!bp->phy_dev) | 960 | if (!bp->phy_dev) |
953 | return -EAGAIN; | 961 | return -EAGAIN; |
954 | 962 | ||
955 | if (!is_valid_ether_addr(dev->dev_addr)) | 963 | if (!is_valid_ether_addr(dev->dev_addr)) |
956 | return -EADDRNOTAVAIL; | 964 | return -EADDRNOTAVAIL; |
957 | 965 | ||
958 | err = macb_alloc_consistent(bp); | 966 | err = macb_alloc_consistent(bp); |
959 | if (err) { | 967 | if (err) { |
960 | printk(KERN_ERR | 968 | printk(KERN_ERR |
961 | "%s: Unable to allocate DMA memory (error %d)\n", | 969 | "%s: Unable to allocate DMA memory (error %d)\n", |
962 | dev->name, err); | 970 | dev->name, err); |
963 | return err; | 971 | return err; |
964 | } | 972 | } |
965 | 973 | ||
966 | napi_enable(&bp->napi); | 974 | napi_enable(&bp->napi); |
967 | 975 | ||
968 | macb_init_rings(bp); | 976 | macb_init_rings(bp); |
969 | macb_init_hw(bp); | 977 | macb_init_hw(bp); |
970 | 978 | ||
971 | /* schedule a link state check */ | 979 | /* schedule a link state check */ |
972 | phy_start(bp->phy_dev); | 980 | phy_start(bp->phy_dev); |
973 | 981 | ||
974 | netif_start_queue(dev); | 982 | netif_start_queue(dev); |
975 | 983 | ||
976 | return 0; | 984 | return 0; |
977 | } | 985 | } |
978 | 986 | ||
979 | static int macb_close(struct net_device *dev) | 987 | static int macb_close(struct net_device *dev) |
980 | { | 988 | { |
981 | struct macb *bp = netdev_priv(dev); | 989 | struct macb *bp = netdev_priv(dev); |
982 | unsigned long flags; | 990 | unsigned long flags; |
983 | 991 | ||
984 | netif_stop_queue(dev); | 992 | netif_stop_queue(dev); |
985 | napi_disable(&bp->napi); | 993 | napi_disable(&bp->napi); |
986 | 994 | ||
987 | if (bp->phy_dev) | 995 | if (bp->phy_dev) |
988 | phy_stop(bp->phy_dev); | 996 | phy_stop(bp->phy_dev); |
989 | 997 | ||
990 | spin_lock_irqsave(&bp->lock, flags); | 998 | spin_lock_irqsave(&bp->lock, flags); |
991 | macb_reset_hw(bp); | 999 | macb_reset_hw(bp); |
992 | netif_carrier_off(dev); | 1000 | netif_carrier_off(dev); |
993 | spin_unlock_irqrestore(&bp->lock, flags); | 1001 | spin_unlock_irqrestore(&bp->lock, flags); |
994 | 1002 | ||
995 | macb_free_consistent(bp); | 1003 | macb_free_consistent(bp); |
996 | 1004 | ||
997 | return 0; | 1005 | return 0; |
998 | } | 1006 | } |
999 | 1007 | ||
1000 | static struct net_device_stats *macb_get_stats(struct net_device *dev) | 1008 | static struct net_device_stats *macb_get_stats(struct net_device *dev) |
1001 | { | 1009 | { |
1002 | struct macb *bp = netdev_priv(dev); | 1010 | struct macb *bp = netdev_priv(dev); |
1003 | struct net_device_stats *nstat = &bp->stats; | 1011 | struct net_device_stats *nstat = &bp->stats; |
1004 | struct macb_stats *hwstat = &bp->hw_stats; | 1012 | struct macb_stats *hwstat = &bp->hw_stats; |
1005 | 1013 | ||
1006 | /* read stats from hardware */ | 1014 | /* read stats from hardware */ |
1007 | macb_update_stats(bp); | 1015 | macb_update_stats(bp); |
1008 | 1016 | ||
1009 | /* Convert HW stats into netdevice stats */ | 1017 | /* Convert HW stats into netdevice stats */ |
1010 | nstat->rx_errors = (hwstat->rx_fcs_errors + | 1018 | nstat->rx_errors = (hwstat->rx_fcs_errors + |
1011 | hwstat->rx_align_errors + | 1019 | hwstat->rx_align_errors + |
1012 | hwstat->rx_resource_errors + | 1020 | hwstat->rx_resource_errors + |
1013 | hwstat->rx_overruns + | 1021 | hwstat->rx_overruns + |
1014 | hwstat->rx_oversize_pkts + | 1022 | hwstat->rx_oversize_pkts + |
1015 | hwstat->rx_jabbers + | 1023 | hwstat->rx_jabbers + |
1016 | hwstat->rx_undersize_pkts + | 1024 | hwstat->rx_undersize_pkts + |
1017 | hwstat->sqe_test_errors + | 1025 | hwstat->sqe_test_errors + |
1018 | hwstat->rx_length_mismatch); | 1026 | hwstat->rx_length_mismatch); |
1019 | nstat->tx_errors = (hwstat->tx_late_cols + | 1027 | nstat->tx_errors = (hwstat->tx_late_cols + |
1020 | hwstat->tx_excessive_cols + | 1028 | hwstat->tx_excessive_cols + |
1021 | hwstat->tx_underruns + | 1029 | hwstat->tx_underruns + |
1022 | hwstat->tx_carrier_errors); | 1030 | hwstat->tx_carrier_errors); |
1023 | nstat->collisions = (hwstat->tx_single_cols + | 1031 | nstat->collisions = (hwstat->tx_single_cols + |
1024 | hwstat->tx_multiple_cols + | 1032 | hwstat->tx_multiple_cols + |
1025 | hwstat->tx_excessive_cols); | 1033 | hwstat->tx_excessive_cols); |
1026 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + | 1034 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + |
1027 | hwstat->rx_jabbers + | 1035 | hwstat->rx_jabbers + |
1028 | hwstat->rx_undersize_pkts + | 1036 | hwstat->rx_undersize_pkts + |
1029 | hwstat->rx_length_mismatch); | 1037 | hwstat->rx_length_mismatch); |
1030 | nstat->rx_over_errors = hwstat->rx_resource_errors; | 1038 | nstat->rx_over_errors = hwstat->rx_resource_errors; |
1031 | nstat->rx_crc_errors = hwstat->rx_fcs_errors; | 1039 | nstat->rx_crc_errors = hwstat->rx_fcs_errors; |
1032 | nstat->rx_frame_errors = hwstat->rx_align_errors; | 1040 | nstat->rx_frame_errors = hwstat->rx_align_errors; |
1033 | nstat->rx_fifo_errors = hwstat->rx_overruns; | 1041 | nstat->rx_fifo_errors = hwstat->rx_overruns; |
1034 | /* XXX: What does "missed" mean? */ | 1042 | /* XXX: What does "missed" mean? */ |
1035 | nstat->tx_aborted_errors = hwstat->tx_excessive_cols; | 1043 | nstat->tx_aborted_errors = hwstat->tx_excessive_cols; |
1036 | nstat->tx_carrier_errors = hwstat->tx_carrier_errors; | 1044 | nstat->tx_carrier_errors = hwstat->tx_carrier_errors; |
1037 | nstat->tx_fifo_errors = hwstat->tx_underruns; | 1045 | nstat->tx_fifo_errors = hwstat->tx_underruns; |
1038 | /* Don't know about heartbeat or window errors... */ | 1046 | /* Don't know about heartbeat or window errors... */ |
1039 | 1047 | ||
1040 | return nstat; | 1048 | return nstat; |
1041 | } | 1049 | } |
1042 | 1050 | ||
1043 | static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 1051 | static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
1044 | { | 1052 | { |
1045 | struct macb *bp = netdev_priv(dev); | 1053 | struct macb *bp = netdev_priv(dev); |
1046 | struct phy_device *phydev = bp->phy_dev; | 1054 | struct phy_device *phydev = bp->phy_dev; |
1047 | 1055 | ||
1048 | if (!phydev) | 1056 | if (!phydev) |
1049 | return -ENODEV; | 1057 | return -ENODEV; |
1050 | 1058 | ||
1051 | return phy_ethtool_gset(phydev, cmd); | 1059 | return phy_ethtool_gset(phydev, cmd); |
1052 | } | 1060 | } |
1053 | 1061 | ||
1054 | static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 1062 | static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
1055 | { | 1063 | { |
1056 | struct macb *bp = netdev_priv(dev); | 1064 | struct macb *bp = netdev_priv(dev); |
1057 | struct phy_device *phydev = bp->phy_dev; | 1065 | struct phy_device *phydev = bp->phy_dev; |
1058 | 1066 | ||
1059 | if (!phydev) | 1067 | if (!phydev) |
1060 | return -ENODEV; | 1068 | return -ENODEV; |
1061 | 1069 | ||
1062 | return phy_ethtool_sset(phydev, cmd); | 1070 | return phy_ethtool_sset(phydev, cmd); |
1063 | } | 1071 | } |
1064 | 1072 | ||
1065 | static void macb_get_drvinfo(struct net_device *dev, | 1073 | static void macb_get_drvinfo(struct net_device *dev, |
1066 | struct ethtool_drvinfo *info) | 1074 | struct ethtool_drvinfo *info) |
1067 | { | 1075 | { |
1068 | struct macb *bp = netdev_priv(dev); | 1076 | struct macb *bp = netdev_priv(dev); |
1069 | 1077 | ||
1070 | strcpy(info->driver, bp->pdev->dev.driver->name); | 1078 | strcpy(info->driver, bp->pdev->dev.driver->name); |
1071 | strcpy(info->version, "$Revision: 1.14 $"); | 1079 | strcpy(info->version, "$Revision: 1.14 $"); |
1072 | strcpy(info->bus_info, bp->pdev->dev.bus_id); | 1080 | strcpy(info->bus_info, bp->pdev->dev.bus_id); |
1073 | } | 1081 | } |
1074 | 1082 | ||
1075 | static struct ethtool_ops macb_ethtool_ops = { | 1083 | static struct ethtool_ops macb_ethtool_ops = { |
1076 | .get_settings = macb_get_settings, | 1084 | .get_settings = macb_get_settings, |
1077 | .set_settings = macb_set_settings, | 1085 | .set_settings = macb_set_settings, |
1078 | .get_drvinfo = macb_get_drvinfo, | 1086 | .get_drvinfo = macb_get_drvinfo, |
1079 | .get_link = ethtool_op_get_link, | 1087 | .get_link = ethtool_op_get_link, |
1080 | }; | 1088 | }; |
1081 | 1089 | ||
1082 | static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 1090 | static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
1083 | { | 1091 | { |
1084 | struct macb *bp = netdev_priv(dev); | 1092 | struct macb *bp = netdev_priv(dev); |
1085 | struct phy_device *phydev = bp->phy_dev; | 1093 | struct phy_device *phydev = bp->phy_dev; |
1086 | 1094 | ||
1087 | if (!netif_running(dev)) | 1095 | if (!netif_running(dev)) |
1088 | return -EINVAL; | 1096 | return -EINVAL; |
1089 | 1097 | ||
1090 | if (!phydev) | 1098 | if (!phydev) |
1091 | return -ENODEV; | 1099 | return -ENODEV; |
1092 | 1100 | ||
1093 | return phy_mii_ioctl(phydev, if_mii(rq), cmd); | 1101 | return phy_mii_ioctl(phydev, if_mii(rq), cmd); |
1094 | } | 1102 | } |
1095 | 1103 | ||
1096 | static int __init macb_probe(struct platform_device *pdev) | 1104 | static int __init macb_probe(struct platform_device *pdev) |
1097 | { | 1105 | { |
1098 | struct eth_platform_data *pdata; | 1106 | struct eth_platform_data *pdata; |
1099 | struct resource *regs; | 1107 | struct resource *regs; |
1100 | struct net_device *dev; | 1108 | struct net_device *dev; |
1101 | struct macb *bp; | 1109 | struct macb *bp; |
1102 | struct phy_device *phydev; | 1110 | struct phy_device *phydev; |
1103 | unsigned long pclk_hz; | 1111 | unsigned long pclk_hz; |
1104 | u32 config; | 1112 | u32 config; |
1105 | int err = -ENXIO; | 1113 | int err = -ENXIO; |
1106 | 1114 | ||
1107 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1115 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1108 | if (!regs) { | 1116 | if (!regs) { |
1109 | dev_err(&pdev->dev, "no mmio resource defined\n"); | 1117 | dev_err(&pdev->dev, "no mmio resource defined\n"); |
1110 | goto err_out; | 1118 | goto err_out; |
1111 | } | 1119 | } |
1112 | 1120 | ||
1113 | err = -ENOMEM; | 1121 | err = -ENOMEM; |
1114 | dev = alloc_etherdev(sizeof(*bp)); | 1122 | dev = alloc_etherdev(sizeof(*bp)); |
1115 | if (!dev) { | 1123 | if (!dev) { |
1116 | dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n"); | 1124 | dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n"); |
1117 | goto err_out; | 1125 | goto err_out; |
1118 | } | 1126 | } |
1119 | 1127 | ||
1120 | SET_NETDEV_DEV(dev, &pdev->dev); | 1128 | SET_NETDEV_DEV(dev, &pdev->dev); |
1121 | 1129 | ||
1122 | /* TODO: Actually, we have some interesting features... */ | 1130 | /* TODO: Actually, we have some interesting features... */ |
1123 | dev->features |= 0; | 1131 | dev->features |= 0; |
1124 | 1132 | ||
1125 | bp = netdev_priv(dev); | 1133 | bp = netdev_priv(dev); |
1126 | bp->pdev = pdev; | 1134 | bp->pdev = pdev; |
1127 | bp->dev = dev; | 1135 | bp->dev = dev; |
1128 | 1136 | ||
1129 | spin_lock_init(&bp->lock); | 1137 | spin_lock_init(&bp->lock); |
1130 | 1138 | ||
1131 | #if defined(CONFIG_ARCH_AT91) | 1139 | #if defined(CONFIG_ARCH_AT91) |
1132 | bp->pclk = clk_get(&pdev->dev, "macb_clk"); | 1140 | bp->pclk = clk_get(&pdev->dev, "macb_clk"); |
1133 | if (IS_ERR(bp->pclk)) { | 1141 | if (IS_ERR(bp->pclk)) { |
1134 | dev_err(&pdev->dev, "failed to get macb_clk\n"); | 1142 | dev_err(&pdev->dev, "failed to get macb_clk\n"); |
1135 | goto err_out_free_dev; | 1143 | goto err_out_free_dev; |
1136 | } | 1144 | } |
1137 | clk_enable(bp->pclk); | 1145 | clk_enable(bp->pclk); |
1138 | #else | 1146 | #else |
1139 | bp->pclk = clk_get(&pdev->dev, "pclk"); | 1147 | bp->pclk = clk_get(&pdev->dev, "pclk"); |
1140 | if (IS_ERR(bp->pclk)) { | 1148 | if (IS_ERR(bp->pclk)) { |
1141 | dev_err(&pdev->dev, "failed to get pclk\n"); | 1149 | dev_err(&pdev->dev, "failed to get pclk\n"); |
1142 | goto err_out_free_dev; | 1150 | goto err_out_free_dev; |
1143 | } | 1151 | } |
1144 | bp->hclk = clk_get(&pdev->dev, "hclk"); | 1152 | bp->hclk = clk_get(&pdev->dev, "hclk"); |
1145 | if (IS_ERR(bp->hclk)) { | 1153 | if (IS_ERR(bp->hclk)) { |
1146 | dev_err(&pdev->dev, "failed to get hclk\n"); | 1154 | dev_err(&pdev->dev, "failed to get hclk\n"); |
1147 | goto err_out_put_pclk; | 1155 | goto err_out_put_pclk; |
1148 | } | 1156 | } |
1149 | 1157 | ||
1150 | clk_enable(bp->pclk); | 1158 | clk_enable(bp->pclk); |
1151 | clk_enable(bp->hclk); | 1159 | clk_enable(bp->hclk); |
1152 | #endif | 1160 | #endif |
1153 | 1161 | ||
1154 | bp->regs = ioremap(regs->start, regs->end - regs->start + 1); | 1162 | bp->regs = ioremap(regs->start, regs->end - regs->start + 1); |
1155 | if (!bp->regs) { | 1163 | if (!bp->regs) { |
1156 | dev_err(&pdev->dev, "failed to map registers, aborting.\n"); | 1164 | dev_err(&pdev->dev, "failed to map registers, aborting.\n"); |
1157 | err = -ENOMEM; | 1165 | err = -ENOMEM; |
1158 | goto err_out_disable_clocks; | 1166 | goto err_out_disable_clocks; |
1159 | } | 1167 | } |
1160 | 1168 | ||
1161 | dev->irq = platform_get_irq(pdev, 0); | 1169 | dev->irq = platform_get_irq(pdev, 0); |
1162 | err = request_irq(dev->irq, macb_interrupt, IRQF_SAMPLE_RANDOM, | 1170 | err = request_irq(dev->irq, macb_interrupt, IRQF_SAMPLE_RANDOM, |
1163 | dev->name, dev); | 1171 | dev->name, dev); |
1164 | if (err) { | 1172 | if (err) { |
1165 | printk(KERN_ERR | 1173 | printk(KERN_ERR |
1166 | "%s: Unable to request IRQ %d (error %d)\n", | 1174 | "%s: Unable to request IRQ %d (error %d)\n", |
1167 | dev->name, dev->irq, err); | 1175 | dev->name, dev->irq, err); |
1168 | goto err_out_iounmap; | 1176 | goto err_out_iounmap; |
1169 | } | 1177 | } |
1170 | 1178 | ||
1171 | dev->open = macb_open; | 1179 | dev->open = macb_open; |
1172 | dev->stop = macb_close; | 1180 | dev->stop = macb_close; |
1173 | dev->hard_start_xmit = macb_start_xmit; | 1181 | dev->hard_start_xmit = macb_start_xmit; |
1174 | dev->get_stats = macb_get_stats; | 1182 | dev->get_stats = macb_get_stats; |
1175 | dev->set_multicast_list = macb_set_rx_mode; | 1183 | dev->set_multicast_list = macb_set_rx_mode; |
1176 | dev->do_ioctl = macb_ioctl; | 1184 | dev->do_ioctl = macb_ioctl; |
1177 | netif_napi_add(dev, &bp->napi, macb_poll, 64); | 1185 | netif_napi_add(dev, &bp->napi, macb_poll, 64); |
1178 | dev->ethtool_ops = &macb_ethtool_ops; | 1186 | dev->ethtool_ops = &macb_ethtool_ops; |
1179 | 1187 | ||
1180 | dev->base_addr = regs->start; | 1188 | dev->base_addr = regs->start; |
1181 | 1189 | ||
1182 | /* Set MII management clock divider */ | 1190 | /* Set MII management clock divider */ |
1183 | pclk_hz = clk_get_rate(bp->pclk); | 1191 | pclk_hz = clk_get_rate(bp->pclk); |
1184 | if (pclk_hz <= 20000000) | 1192 | if (pclk_hz <= 20000000) |
1185 | config = MACB_BF(CLK, MACB_CLK_DIV8); | 1193 | config = MACB_BF(CLK, MACB_CLK_DIV8); |
1186 | else if (pclk_hz <= 40000000) | 1194 | else if (pclk_hz <= 40000000) |
1187 | config = MACB_BF(CLK, MACB_CLK_DIV16); | 1195 | config = MACB_BF(CLK, MACB_CLK_DIV16); |
1188 | else if (pclk_hz <= 80000000) | 1196 | else if (pclk_hz <= 80000000) |
1189 | config = MACB_BF(CLK, MACB_CLK_DIV32); | 1197 | config = MACB_BF(CLK, MACB_CLK_DIV32); |
1190 | else | 1198 | else |
1191 | config = MACB_BF(CLK, MACB_CLK_DIV64); | 1199 | config = MACB_BF(CLK, MACB_CLK_DIV64); |
1192 | macb_writel(bp, NCFGR, config); | 1200 | macb_writel(bp, NCFGR, config); |
1193 | 1201 | ||
1194 | macb_get_hwaddr(bp); | 1202 | macb_get_hwaddr(bp); |
1195 | pdata = pdev->dev.platform_data; | 1203 | pdata = pdev->dev.platform_data; |
1196 | 1204 | ||
1197 | if (pdata && pdata->is_rmii) | 1205 | if (pdata && pdata->is_rmii) |
1198 | #if defined(CONFIG_ARCH_AT91) | 1206 | #if defined(CONFIG_ARCH_AT91) |
1199 | macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) ); | 1207 | macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) ); |
1200 | #else | 1208 | #else |
1201 | macb_writel(bp, USRIO, 0); | 1209 | macb_writel(bp, USRIO, 0); |
1202 | #endif | 1210 | #endif |
1203 | else | 1211 | else |
1204 | #if defined(CONFIG_ARCH_AT91) | 1212 | #if defined(CONFIG_ARCH_AT91) |
1205 | macb_writel(bp, USRIO, MACB_BIT(CLKEN)); | 1213 | macb_writel(bp, USRIO, MACB_BIT(CLKEN)); |
1206 | #else | 1214 | #else |
1207 | macb_writel(bp, USRIO, MACB_BIT(MII)); | 1215 | macb_writel(bp, USRIO, MACB_BIT(MII)); |
1208 | #endif | 1216 | #endif |
1209 | 1217 | ||
1210 | bp->tx_pending = DEF_TX_RING_PENDING; | 1218 | bp->tx_pending = DEF_TX_RING_PENDING; |
1211 | 1219 | ||
1212 | err = register_netdev(dev); | 1220 | err = register_netdev(dev); |
1213 | if (err) { | 1221 | if (err) { |
1214 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); | 1222 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); |
1215 | goto err_out_free_irq; | 1223 | goto err_out_free_irq; |
1216 | } | 1224 | } |
1217 | 1225 | ||
1218 | if (macb_mii_init(bp) != 0) { | 1226 | if (macb_mii_init(bp) != 0) { |
1219 | goto err_out_unregister_netdev; | 1227 | goto err_out_unregister_netdev; |
1220 | } | 1228 | } |
1221 | 1229 | ||
1222 | platform_set_drvdata(pdev, dev); | 1230 | platform_set_drvdata(pdev, dev); |
1223 | 1231 | ||
1224 | printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d (%pM)\n", | 1232 | printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d (%pM)\n", |
1225 | dev->name, dev->base_addr, dev->irq, dev->dev_addr); | 1233 | dev->name, dev->base_addr, dev->irq, dev->dev_addr); |
1226 | 1234 | ||
1227 | phydev = bp->phy_dev; | 1235 | phydev = bp->phy_dev; |
1228 | printk(KERN_INFO "%s: attached PHY driver [%s] " | 1236 | printk(KERN_INFO "%s: attached PHY driver [%s] " |
1229 | "(mii_bus:phy_addr=%s, irq=%d)\n", | 1237 | "(mii_bus:phy_addr=%s, irq=%d)\n", |
1230 | dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); | 1238 | dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); |
1231 | 1239 | ||
1232 | return 0; | 1240 | return 0; |
1233 | 1241 | ||
1234 | err_out_unregister_netdev: | 1242 | err_out_unregister_netdev: |
1235 | unregister_netdev(dev); | 1243 | unregister_netdev(dev); |
1236 | err_out_free_irq: | 1244 | err_out_free_irq: |
1237 | free_irq(dev->irq, dev); | 1245 | free_irq(dev->irq, dev); |
1238 | err_out_iounmap: | 1246 | err_out_iounmap: |
1239 | iounmap(bp->regs); | 1247 | iounmap(bp->regs); |
1240 | err_out_disable_clocks: | 1248 | err_out_disable_clocks: |
1241 | #ifndef CONFIG_ARCH_AT91 | 1249 | #ifndef CONFIG_ARCH_AT91 |
1242 | clk_disable(bp->hclk); | 1250 | clk_disable(bp->hclk); |
1243 | clk_put(bp->hclk); | 1251 | clk_put(bp->hclk); |
1244 | #endif | 1252 | #endif |
1245 | clk_disable(bp->pclk); | 1253 | clk_disable(bp->pclk); |
1246 | #ifndef CONFIG_ARCH_AT91 | 1254 | #ifndef CONFIG_ARCH_AT91 |
1247 | err_out_put_pclk: | 1255 | err_out_put_pclk: |
1248 | #endif | 1256 | #endif |
1249 | clk_put(bp->pclk); | 1257 | clk_put(bp->pclk); |
1250 | err_out_free_dev: | 1258 | err_out_free_dev: |
1251 | free_netdev(dev); | 1259 | free_netdev(dev); |
1252 | err_out: | 1260 | err_out: |
1253 | platform_set_drvdata(pdev, NULL); | 1261 | platform_set_drvdata(pdev, NULL); |
1254 | return err; | 1262 | return err; |
1255 | } | 1263 | } |
1256 | 1264 | ||
1257 | static int __exit macb_remove(struct platform_device *pdev) | 1265 | static int __exit macb_remove(struct platform_device *pdev) |
1258 | { | 1266 | { |
1259 | struct net_device *dev; | 1267 | struct net_device *dev; |
1260 | struct macb *bp; | 1268 | struct macb *bp; |
1261 | 1269 | ||
1262 | dev = platform_get_drvdata(pdev); | 1270 | dev = platform_get_drvdata(pdev); |
1263 | 1271 | ||
1264 | if (dev) { | 1272 | if (dev) { |
1265 | bp = netdev_priv(dev); | 1273 | bp = netdev_priv(dev); |
1266 | if (bp->phy_dev) | 1274 | if (bp->phy_dev) |
1267 | phy_disconnect(bp->phy_dev); | 1275 | phy_disconnect(bp->phy_dev); |
1268 | mdiobus_unregister(bp->mii_bus); | 1276 | mdiobus_unregister(bp->mii_bus); |
1269 | kfree(bp->mii_bus->irq); | 1277 | kfree(bp->mii_bus->irq); |
1270 | mdiobus_free(bp->mii_bus); | 1278 | mdiobus_free(bp->mii_bus); |
1271 | unregister_netdev(dev); | 1279 | unregister_netdev(dev); |
1272 | free_irq(dev->irq, dev); | 1280 | free_irq(dev->irq, dev); |
1273 | iounmap(bp->regs); | 1281 | iounmap(bp->regs); |
1274 | #ifndef CONFIG_ARCH_AT91 | 1282 | #ifndef CONFIG_ARCH_AT91 |
1275 | clk_disable(bp->hclk); | 1283 | clk_disable(bp->hclk); |
1276 | clk_put(bp->hclk); | 1284 | clk_put(bp->hclk); |
1277 | #endif | 1285 | #endif |
1278 | clk_disable(bp->pclk); | 1286 | clk_disable(bp->pclk); |
1279 | clk_put(bp->pclk); | 1287 | clk_put(bp->pclk); |
1280 | free_netdev(dev); | 1288 | free_netdev(dev); |
1281 | platform_set_drvdata(pdev, NULL); | 1289 | platform_set_drvdata(pdev, NULL); |
1282 | } | 1290 | } |
1283 | 1291 | ||
1284 | return 0; | 1292 | return 0; |
1285 | } | 1293 | } |
1286 | 1294 | ||
1287 | #ifdef CONFIG_PM | 1295 | #ifdef CONFIG_PM |
1288 | static int macb_suspend(struct platform_device *pdev, pm_message_t state) | 1296 | static int macb_suspend(struct platform_device *pdev, pm_message_t state) |
1289 | { | 1297 | { |
1290 | struct net_device *netdev = platform_get_drvdata(pdev); | 1298 | struct net_device *netdev = platform_get_drvdata(pdev); |
1291 | struct macb *bp = netdev_priv(netdev); | 1299 | struct macb *bp = netdev_priv(netdev); |
1292 | 1300 | ||
1293 | netif_device_detach(netdev); | 1301 | netif_device_detach(netdev); |
1294 | 1302 | ||
1295 | #ifndef CONFIG_ARCH_AT91 | 1303 | #ifndef CONFIG_ARCH_AT91 |
1296 | clk_disable(bp->hclk); | 1304 | clk_disable(bp->hclk); |
1297 | #endif | 1305 | #endif |
1298 | clk_disable(bp->pclk); | 1306 | clk_disable(bp->pclk); |
1299 | 1307 | ||
1300 | return 0; | 1308 | return 0; |
1301 | } | 1309 | } |
1302 | 1310 | ||
1303 | static int macb_resume(struct platform_device *pdev) | 1311 | static int macb_resume(struct platform_device *pdev) |
1304 | { | 1312 | { |
1305 | struct net_device *netdev = platform_get_drvdata(pdev); | 1313 | struct net_device *netdev = platform_get_drvdata(pdev); |
1306 | struct macb *bp = netdev_priv(netdev); | 1314 | struct macb *bp = netdev_priv(netdev); |
1307 | 1315 | ||
1308 | clk_enable(bp->pclk); | 1316 | clk_enable(bp->pclk); |
1309 | #ifndef CONFIG_ARCH_AT91 | 1317 | #ifndef CONFIG_ARCH_AT91 |
1310 | clk_enable(bp->hclk); | 1318 | clk_enable(bp->hclk); |
1311 | #endif | 1319 | #endif |
1312 | 1320 | ||
1313 | netif_device_attach(netdev); | 1321 | netif_device_attach(netdev); |
1314 | 1322 | ||
1315 | return 0; | 1323 | return 0; |
1316 | } | 1324 | } |
1317 | #else | 1325 | #else |
1318 | #define macb_suspend NULL | 1326 | #define macb_suspend NULL |
1319 | #define macb_resume NULL | 1327 | #define macb_resume NULL |
1320 | #endif | 1328 | #endif |
1321 | 1329 | ||
1322 | static struct platform_driver macb_driver = { | 1330 | static struct platform_driver macb_driver = { |
1323 | .remove = __exit_p(macb_remove), | 1331 | .remove = __exit_p(macb_remove), |
1324 | .suspend = macb_suspend, | 1332 | .suspend = macb_suspend, |
1325 | .resume = macb_resume, | 1333 | .resume = macb_resume, |
1326 | .driver = { | 1334 | .driver = { |
1327 | .name = "macb", | 1335 | .name = "macb", |
1328 | .owner = THIS_MODULE, | 1336 | .owner = THIS_MODULE, |
1329 | }, | 1337 | }, |
1330 | }; | 1338 | }; |
1331 | 1339 | ||
1332 | static int __init macb_init(void) | 1340 | static int __init macb_init(void) |
1333 | { | 1341 | { |
1334 | return platform_driver_probe(&macb_driver, macb_probe); | 1342 | return platform_driver_probe(&macb_driver, macb_probe); |
1335 | } | 1343 | } |
1336 | 1344 | ||
1337 | static void __exit macb_exit(void) | 1345 | static void __exit macb_exit(void) |
1338 | { | 1346 | { |
1339 | platform_driver_unregister(&macb_driver); | 1347 | platform_driver_unregister(&macb_driver); |
1340 | } | 1348 | } |
1341 | 1349 | ||
1342 | module_init(macb_init); | 1350 | module_init(macb_init); |
1343 | module_exit(macb_exit); | 1351 | module_exit(macb_exit); |
1344 | 1352 | ||
1345 | MODULE_LICENSE("GPL"); | 1353 | MODULE_LICENSE("GPL"); |
1346 | MODULE_DESCRIPTION("Atmel MACB Ethernet driver"); | 1354 | MODULE_DESCRIPTION("Atmel MACB Ethernet driver"); |
1347 | MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>"); | 1355 | MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>"); |
1348 | MODULE_ALIAS("platform:macb"); | 1356 | MODULE_ALIAS("platform:macb"); |
1349 | 1357 |