Commit 8d2b169fd78ab7d7903d5686021596d8adb9e930

Authored by Fugang Duan
Committed by Jason Liu
1 parent 76f9b385fe
Exists in imx_3.0.35_4.1.0

ENGR00291667-01 net:fec: reinit multicast address when fec restart

Ptp multicast packet receive does not work after Ethernet link is lost
for a short time and then reconnected again. Because fec call restart()
to reset all multicast when cable hotplug.
(cherry picked from commit adfa64f0c2bf35f8b902ae5700f97e7e11ae1794)

Signed-off-by: Fugang Duan  <B38611@freescale.com>
(cherry picked from commit 57a3f0b6888dfa2a59c7f1b738badbec342b2d10)

Showing 1 changed file with 2 additions and 3 deletions Inline Diff

1 /* 1 /*
2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
4 * 4 *
5 * Right now, I am very wasteful with the buffers. I allocate memory 5 * Right now, I am very wasteful with the buffers. I allocate memory
6 * pages and then divide them into 2K frame buffers. This way I know I 6 * pages and then divide them into 2K frame buffers. This way I know I
7 * have buffers large enough to hold one frame within one buffer descriptor. 7 * have buffers large enough to hold one frame within one buffer descriptor.
8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
9 * will be much more memory efficient and will easily handle lots of 9 * will be much more memory efficient and will easily handle lots of
10 * small packets. 10 * small packets.
11 * 11 *
12 * Much better multiple PHY support by Magnus Damm. 12 * Much better multiple PHY support by Magnus Damm.
13 * Copyright (c) 2000 Ericsson Radio Systems AB. 13 * Copyright (c) 2000 Ericsson Radio Systems AB.
14 * 14 *
15 * Support for FEC controller of ColdFire processors. 15 * Support for FEC controller of ColdFire processors.
16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
17 * 17 *
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA. 19 * Copyright (c) 2004-2006 Macq Electronique SA.
20 * 20 *
21 * Support for FEC IEEE 1588. 21 * Support for FEC IEEE 1588.
22 * Copyright (C) 2010-2014 Freescale Semiconductor, Inc. 22 * Copyright (C) 2010-2014 Freescale Semiconductor, Inc.
23 */ 23 */
24 24
25 #include <linux/module.h> 25 #include <linux/module.h>
26 #include <linux/kernel.h> 26 #include <linux/kernel.h>
27 #include <linux/string.h> 27 #include <linux/string.h>
28 #include <linux/ptrace.h> 28 #include <linux/ptrace.h>
29 #include <linux/errno.h> 29 #include <linux/errno.h>
30 #include <linux/gpio.h> 30 #include <linux/gpio.h>
31 #include <linux/ioport.h> 31 #include <linux/ioport.h>
32 #include <linux/slab.h> 32 #include <linux/slab.h>
33 #include <linux/interrupt.h> 33 #include <linux/interrupt.h>
34 #include <linux/pci.h> 34 #include <linux/pci.h>
35 #include <linux/init.h> 35 #include <linux/init.h>
36 #include <linux/delay.h> 36 #include <linux/delay.h>
37 #include <linux/netdevice.h> 37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h> 38 #include <linux/etherdevice.h>
39 #include <linux/skbuff.h> 39 #include <linux/skbuff.h>
40 #include <linux/spinlock.h> 40 #include <linux/spinlock.h>
41 #include <linux/workqueue.h> 41 #include <linux/workqueue.h>
42 #include <linux/bitops.h> 42 #include <linux/bitops.h>
43 #include <linux/io.h> 43 #include <linux/io.h>
44 #include <linux/irq.h> 44 #include <linux/irq.h>
45 #include <linux/clk.h> 45 #include <linux/clk.h>
46 #include <mach/clock.h> 46 #include <mach/clock.h>
47 #include <linux/platform_device.h> 47 #include <linux/platform_device.h>
48 #include <linux/phy.h> 48 #include <linux/phy.h>
49 #include <linux/fec.h> 49 #include <linux/fec.h>
50 50
51 #include <asm/cacheflush.h> 51 #include <asm/cacheflush.h>
52 52
53 #ifndef CONFIG_ARM 53 #ifndef CONFIG_ARM
54 #include <asm/coldfire.h> 54 #include <asm/coldfire.h>
55 #include <asm/mcfsim.h> 55 #include <asm/mcfsim.h>
56 #endif 56 #endif
57 57
58 #include "fec.h" 58 #include "fec.h"
59 #include "fec_1588.h" 59 #include "fec_1588.h"
60 60
61 #if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) 61 #if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
62 #define FEC_ALIGNMENT 0xf 62 #define FEC_ALIGNMENT 0xf
63 #else 63 #else
64 #define FEC_ALIGNMENT 0x3 64 #define FEC_ALIGNMENT 0x3
65 #endif 65 #endif
66 66
67 #define DRIVER_NAME "fec" 67 #define DRIVER_NAME "fec"
68 68
69 /* Controller is ENET-MAC */ 69 /* Controller is ENET-MAC */
70 #define FEC_QUIRK_ENET_MAC (1 << 0) 70 #define FEC_QUIRK_ENET_MAC (1 << 0)
71 /* Controller needs driver to swap frame */ 71 /* Controller needs driver to swap frame */
72 #define FEC_QUIRK_SWAP_FRAME (1 << 1) 72 #define FEC_QUIRK_SWAP_FRAME (1 << 1)
73 /* ENET IP errata ticket TKT168103 */ 73 /* ENET IP errata ticket TKT168103 */
74 #define FEC_QUIRK_BUG_TKT168103 (1 << 2) 74 #define FEC_QUIRK_BUG_TKT168103 (1 << 2)
75 75
76 static struct platform_device_id fec_devtype[] = { 76 static struct platform_device_id fec_devtype[] = {
77 { 77 {
78 .name = "enet", 78 .name = "enet",
79 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_BUG_TKT168103, 79 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_BUG_TKT168103,
80 }, 80 },
81 { 81 {
82 .name = "fec", 82 .name = "fec",
83 .driver_data = 0, 83 .driver_data = 0,
84 }, 84 },
85 { 85 {
86 .name = "imx28-fec", 86 .name = "imx28-fec",
87 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 87 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
88 FEC_QUIRK_BUG_TKT168103, 88 FEC_QUIRK_BUG_TKT168103,
89 }, 89 },
90 { } 90 { }
91 }; 91 };
92 92
93 static unsigned char macaddr[ETH_ALEN]; 93 static unsigned char macaddr[ETH_ALEN];
94 module_param_array(macaddr, byte, NULL, 0); 94 module_param_array(macaddr, byte, NULL, 0);
95 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); 95 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
96 96
97 #if defined(CONFIG_M5272) 97 #if defined(CONFIG_M5272)
98 /* 98 /*
99 * Some hardware gets it MAC address out of local flash memory. 99 * Some hardware gets it MAC address out of local flash memory.
100 * if this is non-zero then assume it is the address to get MAC from. 100 * if this is non-zero then assume it is the address to get MAC from.
101 */ 101 */
102 #if defined(CONFIG_NETtel) 102 #if defined(CONFIG_NETtel)
103 #define FEC_FLASHMAC 0xf0006006 103 #define FEC_FLASHMAC 0xf0006006
104 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 104 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
105 #define FEC_FLASHMAC 0xf0006000 105 #define FEC_FLASHMAC 0xf0006000
106 #elif defined(CONFIG_CANCam) 106 #elif defined(CONFIG_CANCam)
107 #define FEC_FLASHMAC 0xf0020000 107 #define FEC_FLASHMAC 0xf0020000
108 #elif defined (CONFIG_M5272C3) 108 #elif defined (CONFIG_M5272C3)
109 #define FEC_FLASHMAC (0xffe04000 + 4) 109 #define FEC_FLASHMAC (0xffe04000 + 4)
110 #elif defined(CONFIG_MOD5272) 110 #elif defined(CONFIG_MOD5272)
111 #define FEC_FLASHMAC 0xffc0406b 111 #define FEC_FLASHMAC 0xffc0406b
112 #else 112 #else
113 #define FEC_FLASHMAC 0 113 #define FEC_FLASHMAC 0
114 #endif 114 #endif
115 #endif /* CONFIG_M5272 */ 115 #endif /* CONFIG_M5272 */
116 116
117 /* The number of Tx and Rx buffers. These are allocated from the page 117 /* The number of Tx and Rx buffers. These are allocated from the page
118 * pool. The code may assume these are power of two, so it it best 118 * pool. The code may assume these are power of two, so it it best
119 * to keep them that size. 119 * to keep them that size.
120 * We don't need to allocate pages for the transmitter. We just use 120 * We don't need to allocate pages for the transmitter. We just use
121 * the skbuffer directly. 121 * the skbuffer directly.
122 */ 122 */
123 #define FEC_ENET_RX_PAGES 192 123 #define FEC_ENET_RX_PAGES 192
124 #define FEC_ENET_RX_FRSIZE 2048 124 #define FEC_ENET_RX_FRSIZE 2048
125 #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) 125 #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
126 #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) 126 #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
127 #define FEC_ENET_TX_FRSIZE 2048 127 #define FEC_ENET_TX_FRSIZE 2048
128 #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) 128 #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
129 #define TX_RING_SIZE 128 /* Must be power of two */ 129 #define TX_RING_SIZE 128 /* Must be power of two */
130 #define TX_RING_MOD_MASK 127 /* for this to work */ 130 #define TX_RING_MOD_MASK 127 /* for this to work */
131 131
132 #define BUFDES_SIZE ((RX_RING_SIZE + TX_RING_SIZE) * sizeof(struct bufdesc)) 132 #define BUFDES_SIZE ((RX_RING_SIZE + TX_RING_SIZE) * sizeof(struct bufdesc))
133 133
134 /* The FEC stores dest/src/type, data, and checksum for receive packets. 134 /* The FEC stores dest/src/type, data, and checksum for receive packets.
135 */ 135 */
136 #define PKT_MAXBUF_SIZE 1518 136 #define PKT_MAXBUF_SIZE 1518
137 #define PKT_MINBUF_SIZE 64 137 #define PKT_MINBUF_SIZE 64
138 #define PKT_MAXBLR_SIZE 1520 138 #define PKT_MAXBLR_SIZE 1520
139 139
140 /* Pause frame feild and FIFO threshold */ 140 /* Pause frame feild and FIFO threshold */
141 #define FEC_ENET_FCE (1 << 5) 141 #define FEC_ENET_FCE (1 << 5)
142 #define FEC_ENET_RSEM_V 0x84 142 #define FEC_ENET_RSEM_V 0x84
143 #define FEC_ENET_RSFL_V 16 143 #define FEC_ENET_RSFL_V 16
144 #define FEC_ENET_RAEM_V 0x8 144 #define FEC_ENET_RAEM_V 0x8
145 #define FEC_ENET_RAFL_V 0x8 145 #define FEC_ENET_RAFL_V 0x8
146 #define FEC_ENET_OPD_V 0xFFF0 146 #define FEC_ENET_OPD_V 0xFFF0
147 147
148 /* 148 /*
149 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 149 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
150 * size bits. Other FEC hardware does not, so we need to take that into 150 * size bits. Other FEC hardware does not, so we need to take that into
151 * account when setting it. 151 * account when setting it.
152 */ 152 */
153 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 153 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
154 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ 154 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
155 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) 155 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
156 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 156 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
157 #else 157 #else
158 #define OPT_FRAME_SIZE 0 158 #define OPT_FRAME_SIZE 0
159 #endif 159 #endif
160 160
161 /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and 161 /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
162 * tx_bd_base always point to the base of the buffer descriptors. The 162 * tx_bd_base always point to the base of the buffer descriptors. The
163 * cur_rx and cur_tx point to the currently available buffer. 163 * cur_rx and cur_tx point to the currently available buffer.
164 * The dirty_tx tracks the current buffer that is being sent by the 164 * The dirty_tx tracks the current buffer that is being sent by the
165 * controller. The cur_tx and dirty_tx are equal under both completely 165 * controller. The cur_tx and dirty_tx are equal under both completely
166 * empty and completely full conditions. The empty/ready indicator in 166 * empty and completely full conditions. The empty/ready indicator in
167 * the buffer descriptor determines the actual condition. 167 * the buffer descriptor determines the actual condition.
168 */ 168 */
169 struct fec_enet_private { 169 struct fec_enet_private {
170 /* Hardware registers of the FEC device */ 170 /* Hardware registers of the FEC device */
171 void __iomem *hwp; 171 void __iomem *hwp;
172 172
173 struct net_device *netdev; 173 struct net_device *netdev;
174 174
175 struct clk *clk; 175 struct clk *clk;
176 struct clk *mdc_clk; 176 struct clk *mdc_clk;
177 177
178 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 178 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
179 unsigned char *tx_bounce[TX_RING_SIZE]; 179 unsigned char *tx_bounce[TX_RING_SIZE];
180 struct sk_buff* tx_skbuff[TX_RING_SIZE]; 180 struct sk_buff* tx_skbuff[TX_RING_SIZE];
181 struct sk_buff* rx_skbuff[RX_RING_SIZE]; 181 struct sk_buff* rx_skbuff[RX_RING_SIZE];
182 ushort skb_cur; 182 ushort skb_cur;
183 ushort skb_dirty; 183 ushort skb_dirty;
184 184
185 /* CPM dual port RAM relative addresses */ 185 /* CPM dual port RAM relative addresses */
186 dma_addr_t bd_dma; 186 dma_addr_t bd_dma;
187 /* Address of Rx and Tx buffers */ 187 /* Address of Rx and Tx buffers */
188 struct bufdesc *rx_bd_base; 188 struct bufdesc *rx_bd_base;
189 struct bufdesc *tx_bd_base; 189 struct bufdesc *tx_bd_base;
190 /* The next free ring entry */ 190 /* The next free ring entry */
191 struct bufdesc *cur_rx, *cur_tx; 191 struct bufdesc *cur_rx, *cur_tx;
192 /* The ring entries to be free()ed */ 192 /* The ring entries to be free()ed */
193 struct bufdesc *dirty_tx; 193 struct bufdesc *dirty_tx;
194 194
195 uint tx_full; 195 uint tx_full;
196 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 196 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
197 spinlock_t hw_lock; 197 spinlock_t hw_lock;
198 198
199 struct platform_device *pdev; 199 struct platform_device *pdev;
200 200
201 int opened; 201 int opened;
202 202
203 /* Phylib and MDIO interface */ 203 /* Phylib and MDIO interface */
204 struct mii_bus *mii_bus; 204 struct mii_bus *mii_bus;
205 struct phy_device *phy_dev; 205 struct phy_device *phy_dev;
206 int mii_timeout; 206 int mii_timeout;
207 uint phy_speed; 207 uint phy_speed;
208 phy_interface_t phy_interface; 208 phy_interface_t phy_interface;
209 int index; 209 int index;
210 int link; 210 int link;
211 int full_duplex; 211 int full_duplex;
212 struct completion mdio_done; 212 struct completion mdio_done;
213 struct delayed_work fixup_trigger_tx; 213 struct delayed_work fixup_trigger_tx;
214 214
215 struct fec_ptp_private *ptp_priv; 215 struct fec_ptp_private *ptp_priv;
216 uint ptimer_present; 216 uint ptimer_present;
217 217
218 struct napi_struct napi; 218 struct napi_struct napi;
219 int napi_weight; 219 int napi_weight;
220 bool use_napi; 220 bool use_napi;
221 }; 221 };
222 222
223 #define FEC_NAPI_WEIGHT 64 223 #define FEC_NAPI_WEIGHT 64
224 #ifdef CONFIG_FEC_NAPI 224 #ifdef CONFIG_FEC_NAPI
225 #define FEC_NAPI_ENABLE TRUE 225 #define FEC_NAPI_ENABLE TRUE
226 #else 226 #else
227 #define FEC_NAPI_ENABLE FALSE 227 #define FEC_NAPI_ENABLE FALSE
228 #endif 228 #endif
229 229
230 static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); 230 static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
231 static void fec_enet_tx(struct net_device *dev); 231 static void fec_enet_tx(struct net_device *dev);
232 static int fec_rx_poll(struct napi_struct *napi, int budget); 232 static int fec_rx_poll(struct napi_struct *napi, int budget);
233 static void fec_enet_rx(struct net_device *dev); 233 static void fec_enet_rx(struct net_device *dev);
234 static int fec_enet_close(struct net_device *dev); 234 static int fec_enet_close(struct net_device *dev);
235 static void fec_restart(struct net_device *dev, int duplex); 235 static void fec_restart(struct net_device *dev, int duplex);
236 static void fec_stop(struct net_device *dev); 236 static void fec_stop(struct net_device *dev);
237 237
238 /* FEC MII MMFR bits definition */ 238 /* FEC MII MMFR bits definition */
239 #define FEC_MMFR_ST (1 << 30) 239 #define FEC_MMFR_ST (1 << 30)
240 #define FEC_MMFR_OP_READ (2 << 28) 240 #define FEC_MMFR_OP_READ (2 << 28)
241 #define FEC_MMFR_OP_WRITE (1 << 28) 241 #define FEC_MMFR_OP_WRITE (1 << 28)
242 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) 242 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
243 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) 243 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
244 #define FEC_MMFR_TA (2 << 16) 244 #define FEC_MMFR_TA (2 << 16)
245 #define FEC_MMFR_DATA(v) (v & 0xffff) 245 #define FEC_MMFR_DATA(v) (v & 0xffff)
246 246
247 #define FEC_MII_TIMEOUT 30 /* ms */ 247 #define FEC_MII_TIMEOUT 30 /* ms */
248 248
249 /* Transmitter timeout */ 249 /* Transmitter timeout */
250 #define TX_TIMEOUT (2 * HZ) 250 #define TX_TIMEOUT (2 * HZ)
251 251
252 static void *swap_buffer(void *bufaddr, int len) 252 static void *swap_buffer(void *bufaddr, int len)
253 { 253 {
254 int i; 254 int i;
255 unsigned int *buf = bufaddr; 255 unsigned int *buf = bufaddr;
256 256
257 for (i = 0; i < (len + 3) / 4; i++, buf++) 257 for (i = 0; i < (len + 3) / 4; i++, buf++)
258 *buf = cpu_to_be32(*buf); 258 *buf = cpu_to_be32(*buf);
259 259
260 return bufaddr; 260 return bufaddr;
261 } 261 }
262 262
263 static inline 263 static inline
264 void *fec_enet_get_pre_txbd(struct net_device *ndev) 264 void *fec_enet_get_pre_txbd(struct net_device *ndev)
265 { 265 {
266 struct fec_enet_private *fep = netdev_priv(ndev); 266 struct fec_enet_private *fep = netdev_priv(ndev);
267 struct bufdesc *bdp = fep->cur_tx; 267 struct bufdesc *bdp = fep->cur_tx;
268 268
269 if (bdp == fep->tx_bd_base) 269 if (bdp == fep->tx_bd_base)
270 return bdp + TX_RING_SIZE; 270 return bdp + TX_RING_SIZE;
271 else 271 else
272 return bdp - 1; 272 return bdp - 1;
273 273
274 } 274 }
275 275
276 /* MTIP enet IP have one IC issue recorded at PDM ticket:TKT168103 276 /* MTIP enet IP have one IC issue recorded at PDM ticket:TKT168103
277 * The TDAR bit after being set by software is not acted upon by the 277 * The TDAR bit after being set by software is not acted upon by the
278 * ENET module due to the timing of when the ENET state machine 278 * ENET module due to the timing of when the ENET state machine
279 * clearing the TDAR bit occurring coincident or momentarily after 279 * clearing the TDAR bit occurring coincident or momentarily after
280 * the software sets the bit. 280 * the software sets the bit.
281 * This forces ENET module to check the Transmit buffer descriptor 281 * This forces ENET module to check the Transmit buffer descriptor
282 * and take action if the โ€œreadyโ€ flag is set. Otherwise the ENET 282 * and take action if the โ€œreadyโ€ flag is set. Otherwise the ENET
283 * returns to idle mode. 283 * returns to idle mode.
284 */ 284 */
285 static void fixup_trigger_tx_func(struct work_struct *work) 285 static void fixup_trigger_tx_func(struct work_struct *work)
286 { 286 {
287 struct fec_enet_private *fep = 287 struct fec_enet_private *fep =
288 container_of(work, struct fec_enet_private, 288 container_of(work, struct fec_enet_private,
289 fixup_trigger_tx.work); 289 fixup_trigger_tx.work);
290 290
291 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 291 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
292 } 292 }
293 293
294 static netdev_tx_t 294 static netdev_tx_t
295 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 295 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
296 { 296 {
297 struct fec_enet_private *fep = netdev_priv(ndev); 297 struct fec_enet_private *fep = netdev_priv(ndev);
298 const struct platform_device_id *id_entry = 298 const struct platform_device_id *id_entry =
299 platform_get_device_id(fep->pdev); 299 platform_get_device_id(fep->pdev);
300 struct bufdesc *bdp, *bdp_pre; 300 struct bufdesc *bdp, *bdp_pre;
301 void *bufaddr; 301 void *bufaddr;
302 unsigned short status; 302 unsigned short status;
303 unsigned long estatus; 303 unsigned long estatus;
304 unsigned long flags; 304 unsigned long flags;
305 305
306 spin_lock_irqsave(&fep->hw_lock, flags); 306 spin_lock_irqsave(&fep->hw_lock, flags);
307 if (!fep->link) { 307 if (!fep->link) {
308 /* Link is down or autonegotiation is in progress. */ 308 /* Link is down or autonegotiation is in progress. */
309 netif_stop_queue(ndev); 309 netif_stop_queue(ndev);
310 spin_unlock_irqrestore(&fep->hw_lock, flags); 310 spin_unlock_irqrestore(&fep->hw_lock, flags);
311 return NETDEV_TX_BUSY; 311 return NETDEV_TX_BUSY;
312 } 312 }
313 313
314 /* Fill in a Tx ring entry */ 314 /* Fill in a Tx ring entry */
315 bdp = fep->cur_tx; 315 bdp = fep->cur_tx;
316 status = bdp->cbd_sc; 316 status = bdp->cbd_sc;
317 317
318 if (status & BD_ENET_TX_READY) { 318 if (status & BD_ENET_TX_READY) {
319 /* Ooops. All transmit buffers are full. Bail out. 319 /* Ooops. All transmit buffers are full. Bail out.
320 * This should not happen, since ndev->tbusy should be set. 320 * This should not happen, since ndev->tbusy should be set.
321 */ 321 */
322 printk("%s: tx queue full!.\n", ndev->name); 322 printk("%s: tx queue full!.\n", ndev->name);
323 netif_stop_queue(ndev); 323 netif_stop_queue(ndev);
324 spin_unlock_irqrestore(&fep->hw_lock, flags); 324 spin_unlock_irqrestore(&fep->hw_lock, flags);
325 return NETDEV_TX_BUSY; 325 return NETDEV_TX_BUSY;
326 } 326 }
327 327
328 /* Clear all of the status flags */ 328 /* Clear all of the status flags */
329 status &= ~BD_ENET_TX_STATS; 329 status &= ~BD_ENET_TX_STATS;
330 330
331 /* Set buffer length and buffer pointer */ 331 /* Set buffer length and buffer pointer */
332 bufaddr = skb->data; 332 bufaddr = skb->data;
333 bdp->cbd_datlen = skb->len; 333 bdp->cbd_datlen = skb->len;
334 334
335 /* 335 /*
336 * On some FEC implementations data must be aligned on 336 * On some FEC implementations data must be aligned on
337 * 4-byte boundaries. Use bounce buffers to copy data 337 * 4-byte boundaries. Use bounce buffers to copy data
338 * and get it aligned. Ugh. 338 * and get it aligned. Ugh.
339 */ 339 */
340 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { 340 if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
341 unsigned int index; 341 unsigned int index;
342 index = bdp - fep->tx_bd_base; 342 index = bdp - fep->tx_bd_base;
343 bufaddr = PTR_ALIGN(fep->tx_bounce[index], FEC_ALIGNMENT + 1); 343 bufaddr = PTR_ALIGN(fep->tx_bounce[index], FEC_ALIGNMENT + 1);
344 memcpy(bufaddr, (void *)skb->data, skb->len); 344 memcpy(bufaddr, (void *)skb->data, skb->len);
345 } 345 }
346 346
347 if (fep->ptimer_present) { 347 if (fep->ptimer_present) {
348 if (fec_ptp_do_txstamp(skb)) { 348 if (fec_ptp_do_txstamp(skb)) {
349 estatus = BD_ENET_TX_TS; 349 estatus = BD_ENET_TX_TS;
350 status |= BD_ENET_TX_PTP; 350 status |= BD_ENET_TX_PTP;
351 } else 351 } else
352 estatus = 0; 352 estatus = 0;
353 #ifdef CONFIG_ENHANCED_BD 353 #ifdef CONFIG_ENHANCED_BD
354 bdp->cbd_esc = (estatus | BD_ENET_TX_INT); 354 bdp->cbd_esc = (estatus | BD_ENET_TX_INT);
355 bdp->cbd_bdu = 0; 355 bdp->cbd_bdu = 0;
356 #endif 356 #endif
357 } 357 }
358 /* 358 /*
359 * Some design made an incorrect assumption on endian mode of 359 * Some design made an incorrect assumption on endian mode of
360 * the system that it's running on. As the result, driver has to 360 * the system that it's running on. As the result, driver has to
361 * swap every frame going to and coming from the controller. 361 * swap every frame going to and coming from the controller.
362 */ 362 */
363 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) 363 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
364 swap_buffer(bufaddr, skb->len); 364 swap_buffer(bufaddr, skb->len);
365 365
366 /* Save skb pointer */ 366 /* Save skb pointer */
367 fep->tx_skbuff[fep->skb_cur] = skb; 367 fep->tx_skbuff[fep->skb_cur] = skb;
368 368
369 ndev->stats.tx_bytes += skb->len; 369 ndev->stats.tx_bytes += skb->len;
370 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; 370 fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
371 371
372 /* Push the data cache so the CPM does not get stale memory 372 /* Push the data cache so the CPM does not get stale memory
373 * data. 373 * data.
374 */ 374 */
375 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, 375 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
376 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 376 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
377 377
378 /* Send it on its way. Tell FEC it's ready, interrupt when done, 378 /* Send it on its way. Tell FEC it's ready, interrupt when done,
379 * it's the last BD of the frame, and to put the CRC on the end. 379 * it's the last BD of the frame, and to put the CRC on the end.
380 */ 380 */
381 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR 381 status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
382 | BD_ENET_TX_LAST | BD_ENET_TX_TC); 382 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
383 bdp->cbd_sc = status; 383 bdp->cbd_sc = status;
384 384
385 /* Trigger transmission start */ 385 /* Trigger transmission start */
386 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 386 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
387 387
388 bdp_pre = fec_enet_get_pre_txbd(ndev); 388 bdp_pre = fec_enet_get_pre_txbd(ndev);
389 if ((id_entry->driver_data & FEC_QUIRK_BUG_TKT168103) && 389 if ((id_entry->driver_data & FEC_QUIRK_BUG_TKT168103) &&
390 !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) 390 !(bdp_pre->cbd_sc & BD_ENET_TX_READY))
391 schedule_delayed_work(&fep->fixup_trigger_tx, 391 schedule_delayed_work(&fep->fixup_trigger_tx,
392 msecs_to_jiffies(1)); 392 msecs_to_jiffies(1));
393 393
394 /* If this was the last BD in the ring, start at the beginning again. */ 394 /* If this was the last BD in the ring, start at the beginning again. */
395 if (status & BD_ENET_TX_WRAP) 395 if (status & BD_ENET_TX_WRAP)
396 bdp = fep->tx_bd_base; 396 bdp = fep->tx_bd_base;
397 else 397 else
398 bdp++; 398 bdp++;
399 399
400 if (bdp == fep->dirty_tx) { 400 if (bdp == fep->dirty_tx) {
401 fep->tx_full = 1; 401 fep->tx_full = 1;
402 netif_stop_queue(ndev); 402 netif_stop_queue(ndev);
403 } 403 }
404 404
405 fep->cur_tx = bdp; 405 fep->cur_tx = bdp;
406 406
407 spin_unlock_irqrestore(&fep->hw_lock, flags); 407 spin_unlock_irqrestore(&fep->hw_lock, flags);
408 408
409 return NETDEV_TX_OK; 409 return NETDEV_TX_OK;
410 } 410 }
411 411
412 static void 412 static void
413 fec_timeout(struct net_device *ndev) 413 fec_timeout(struct net_device *ndev)
414 { 414 {
415 struct fec_enet_private *fep = netdev_priv(ndev); 415 struct fec_enet_private *fep = netdev_priv(ndev);
416 416
417 ndev->stats.tx_errors++; 417 ndev->stats.tx_errors++;
418 418
419 netif_device_detach(ndev); 419 netif_device_detach(ndev);
420 fec_stop(ndev); 420 fec_stop(ndev);
421 421
422 fec_restart(ndev, fep->full_duplex); 422 fec_restart(ndev, fep->full_duplex);
423 netif_device_attach(ndev); 423 netif_device_attach(ndev);
424 ndev->trans_start = jiffies; /* prevent tx timeout */ 424 ndev->trans_start = jiffies; /* prevent tx timeout */
425 if (fep->link && !fep->tx_full) 425 if (fep->link && !fep->tx_full)
426 netif_wake_queue(ndev); 426 netif_wake_queue(ndev);
427 } 427 }
428 428
429 static void 429 static void
430 fec_rx_int_is_enabled(struct net_device *ndev, bool enabled) 430 fec_rx_int_is_enabled(struct net_device *ndev, bool enabled)
431 { 431 {
432 struct fec_enet_private *fep = netdev_priv(ndev); 432 struct fec_enet_private *fep = netdev_priv(ndev);
433 uint int_events; 433 uint int_events;
434 434
435 int_events = readl(fep->hwp + FEC_IMASK); 435 int_events = readl(fep->hwp + FEC_IMASK);
436 if (enabled) 436 if (enabled)
437 int_events |= FEC_ENET_RXF; 437 int_events |= FEC_ENET_RXF;
438 else 438 else
439 int_events &= ~FEC_ENET_RXF; 439 int_events &= ~FEC_ENET_RXF;
440 writel(int_events, fep->hwp + FEC_IMASK); 440 writel(int_events, fep->hwp + FEC_IMASK);
441 } 441 }
442 442
443 #ifdef CONFIG_NET_POLL_CONTROLLER 443 #ifdef CONFIG_NET_POLL_CONTROLLER
444 static void fec_enet_netpoll(struct net_device *ndev) 444 static void fec_enet_netpoll(struct net_device *ndev)
445 { 445 {
446 disable_irq(ndev->irq); 446 disable_irq(ndev->irq);
447 fec_enet_interrupt(ndev->irq, ndev); 447 fec_enet_interrupt(ndev->irq, ndev);
448 enable_irq(ndev->irq); 448 enable_irq(ndev->irq);
449 } 449 }
450 #endif 450 #endif
451 451
452 static void 452 static void
453 fec_enet_tx(struct net_device *ndev) 453 fec_enet_tx(struct net_device *ndev)
454 { 454 {
455 struct fec_enet_private *fep; 455 struct fec_enet_private *fep;
456 struct fec_ptp_private *fpp; 456 struct fec_ptp_private *fpp;
457 struct bufdesc *bdp; 457 struct bufdesc *bdp;
458 unsigned short status; 458 unsigned short status;
459 struct sk_buff *skb; 459 struct sk_buff *skb;
460 460
461 fep = netdev_priv(ndev); 461 fep = netdev_priv(ndev);
462 fpp = fep->ptp_priv; 462 fpp = fep->ptp_priv;
463 spin_lock(&fep->hw_lock); 463 spin_lock(&fep->hw_lock);
464 bdp = fep->dirty_tx; 464 bdp = fep->dirty_tx;
465 465
466 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { 466 while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
467 if (bdp == fep->cur_tx && fep->tx_full == 0) 467 if (bdp == fep->cur_tx && fep->tx_full == 0)
468 break; 468 break;
469 469
470 if (bdp->cbd_bufaddr) 470 if (bdp->cbd_bufaddr)
471 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 471 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
472 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); 472 FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
473 bdp->cbd_bufaddr = 0; 473 bdp->cbd_bufaddr = 0;
474 474
475 skb = fep->tx_skbuff[fep->skb_dirty]; 475 skb = fep->tx_skbuff[fep->skb_dirty];
476 if (!skb) 476 if (!skb)
477 break; 477 break;
478 /* Check for errors. */ 478 /* Check for errors. */
479 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 479 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
480 BD_ENET_TX_RL | BD_ENET_TX_UN | 480 BD_ENET_TX_RL | BD_ENET_TX_UN |
481 BD_ENET_TX_CSL)) { 481 BD_ENET_TX_CSL)) {
482 ndev->stats.tx_errors++; 482 ndev->stats.tx_errors++;
483 if (status & BD_ENET_TX_HB) /* No heartbeat */ 483 if (status & BD_ENET_TX_HB) /* No heartbeat */
484 ndev->stats.tx_heartbeat_errors++; 484 ndev->stats.tx_heartbeat_errors++;
485 if (status & BD_ENET_TX_LC) /* Late collision */ 485 if (status & BD_ENET_TX_LC) /* Late collision */
486 ndev->stats.tx_window_errors++; 486 ndev->stats.tx_window_errors++;
487 if (status & BD_ENET_TX_RL) /* Retrans limit */ 487 if (status & BD_ENET_TX_RL) /* Retrans limit */
488 ndev->stats.tx_aborted_errors++; 488 ndev->stats.tx_aborted_errors++;
489 if (status & BD_ENET_TX_UN) /* Underrun */ 489 if (status & BD_ENET_TX_UN) /* Underrun */
490 ndev->stats.tx_fifo_errors++; 490 ndev->stats.tx_fifo_errors++;
491 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 491 if (status & BD_ENET_TX_CSL) /* Carrier lost */
492 ndev->stats.tx_carrier_errors++; 492 ndev->stats.tx_carrier_errors++;
493 } else { 493 } else {
494 ndev->stats.tx_packets++; 494 ndev->stats.tx_packets++;
495 } 495 }
496 496
497 if (status & BD_ENET_TX_READY) 497 if (status & BD_ENET_TX_READY)
498 printk("HEY! Enet xmit interrupt and TX_READY.\n"); 498 printk("HEY! Enet xmit interrupt and TX_READY.\n");
499 499
500 /* Deferred means some collisions occurred during transmit, 500 /* Deferred means some collisions occurred during transmit,
501 * but we eventually sent the packet OK. 501 * but we eventually sent the packet OK.
502 */ 502 */
503 if (status & BD_ENET_TX_DEF) 503 if (status & BD_ENET_TX_DEF)
504 ndev->stats.collisions++; 504 ndev->stats.collisions++;
505 505
506 #if defined(CONFIG_ENHANCED_BD) 506 #if defined(CONFIG_ENHANCED_BD)
507 if (fep->ptimer_present) { 507 if (fep->ptimer_present) {
508 if (bdp->cbd_esc & BD_ENET_TX_TS) 508 if (bdp->cbd_esc & BD_ENET_TX_TS)
509 fec_ptp_store_txstamp(fpp, skb, bdp); 509 fec_ptp_store_txstamp(fpp, skb, bdp);
510 } 510 }
511 #elif defined(CONFIG_IN_BAND) 511 #elif defined(CONFIG_IN_BAND)
512 if (fep->ptimer_present) { 512 if (fep->ptimer_present) {
513 if (status & BD_ENET_TX_PTP) 513 if (status & BD_ENET_TX_PTP)
514 fec_ptp_store_txstamp(fpp, skb, bdp); 514 fec_ptp_store_txstamp(fpp, skb, bdp);
515 } 515 }
516 #endif 516 #endif
517 517
518 /* Free the sk buffer associated with this last transmit */ 518 /* Free the sk buffer associated with this last transmit */
519 dev_kfree_skb_any(skb); 519 dev_kfree_skb_any(skb);
520 fep->tx_skbuff[fep->skb_dirty] = NULL; 520 fep->tx_skbuff[fep->skb_dirty] = NULL;
521 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; 521 fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
522 522
523 /* Update pointer to next buffer descriptor to be transmitted */ 523 /* Update pointer to next buffer descriptor to be transmitted */
524 if (status & BD_ENET_TX_WRAP) 524 if (status & BD_ENET_TX_WRAP)
525 bdp = fep->tx_bd_base; 525 bdp = fep->tx_bd_base;
526 else 526 else
527 bdp++; 527 bdp++;
528 528
529 /* Since we have freed up a buffer, the ring is no longer full 529 /* Since we have freed up a buffer, the ring is no longer full
530 */ 530 */
531 if (fep->tx_full) { 531 if (fep->tx_full) {
532 fep->tx_full = 0; 532 fep->tx_full = 0;
533 if (netif_queue_stopped(ndev)) 533 if (netif_queue_stopped(ndev))
534 netif_wake_queue(ndev); 534 netif_wake_queue(ndev);
535 } 535 }
536 } 536 }
537 fep->dirty_tx = bdp; 537 fep->dirty_tx = bdp;
538 spin_unlock(&fep->hw_lock); 538 spin_unlock(&fep->hw_lock);
539 } 539 }
540 540
541 /*NAPI polling Receive packets */ 541 /*NAPI polling Receive packets */
542 static int fec_rx_poll(struct napi_struct *napi, int budget) 542 static int fec_rx_poll(struct napi_struct *napi, int budget)
543 { 543 {
544 struct fec_enet_private *fep = 544 struct fec_enet_private *fep =
545 container_of(napi, struct fec_enet_private, napi); 545 container_of(napi, struct fec_enet_private, napi);
546 struct net_device *ndev = napi->dev; 546 struct net_device *ndev = napi->dev;
547 struct fec_ptp_private *fpp = fep->ptp_priv; 547 struct fec_ptp_private *fpp = fep->ptp_priv;
548 const struct platform_device_id *id_entry = 548 const struct platform_device_id *id_entry =
549 platform_get_device_id(fep->pdev); 549 platform_get_device_id(fep->pdev);
550 int pkt_received = 0; 550 int pkt_received = 0;
551 struct bufdesc *bdp; 551 struct bufdesc *bdp;
552 unsigned short status; 552 unsigned short status;
553 struct sk_buff *skb; 553 struct sk_buff *skb;
554 ushort pkt_len; 554 ushort pkt_len;
555 __u8 *data; 555 __u8 *data;
556 556
557 if (fep->use_napi) 557 if (fep->use_napi)
558 WARN_ON(!budget); 558 WARN_ON(!budget);
559 559
560 #ifdef CONFIG_M532x 560 #ifdef CONFIG_M532x
561 flush_cache_all(); 561 flush_cache_all();
562 #endif 562 #endif
563 563
564 /* First, grab all of the stats for the incoming packet. 564 /* First, grab all of the stats for the incoming packet.
565 * These get messed up if we get called due to a busy condition. 565 * These get messed up if we get called due to a busy condition.
566 */ 566 */
567 bdp = fep->cur_rx; 567 bdp = fep->cur_rx;
568 568
569 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { 569 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
570 if (pkt_received >= budget) 570 if (pkt_received >= budget)
571 break; 571 break;
572 pkt_received++; 572 pkt_received++;
573 573
574 /* Since we have allocated space to hold a complete frame, 574 /* Since we have allocated space to hold a complete frame,
575 * the last indicator should be set. 575 * the last indicator should be set.
576 */ 576 */
577 if ((status & BD_ENET_RX_LAST) == 0) 577 if ((status & BD_ENET_RX_LAST) == 0)
578 dev_err(&ndev->dev, "FEC ENET: rcv is not +last\n"); 578 dev_err(&ndev->dev, "FEC ENET: rcv is not +last\n");
579 579
580 if (!fep->opened) 580 if (!fep->opened)
581 goto rx_processing_done; 581 goto rx_processing_done;
582 582
583 /* Check for errors. */ 583 /* Check for errors. */
584 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 584 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
585 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 585 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
586 ndev->stats.rx_errors++; 586 ndev->stats.rx_errors++;
587 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 587 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
588 /* Frame too long or too short. */ 588 /* Frame too long or too short. */
589 ndev->stats.rx_length_errors++; 589 ndev->stats.rx_length_errors++;
590 } 590 }
591 if (status & BD_ENET_RX_NO) /* Frame alignment */ 591 if (status & BD_ENET_RX_NO) /* Frame alignment */
592 ndev->stats.rx_frame_errors++; 592 ndev->stats.rx_frame_errors++;
593 if (status & BD_ENET_RX_CR) /* CRC Error */ 593 if (status & BD_ENET_RX_CR) /* CRC Error */
594 ndev->stats.rx_crc_errors++; 594 ndev->stats.rx_crc_errors++;
595 if (status & BD_ENET_RX_OV) /* FIFO overrun */ 595 if (status & BD_ENET_RX_OV) /* FIFO overrun */
596 ndev->stats.rx_fifo_errors++; 596 ndev->stats.rx_fifo_errors++;
597 } 597 }
598 598
599 /* Report late collisions as a frame error. 599 /* Report late collisions as a frame error.
600 * On this error, the BD is closed, but we don't know what we 600 * On this error, the BD is closed, but we don't know what we
601 * have in the buffer. So, just drop this frame on the floor. 601 * have in the buffer. So, just drop this frame on the floor.
602 */ 602 */
603 if (status & BD_ENET_RX_CL) { 603 if (status & BD_ENET_RX_CL) {
604 ndev->stats.rx_errors++; 604 ndev->stats.rx_errors++;
605 ndev->stats.rx_frame_errors++; 605 ndev->stats.rx_frame_errors++;
606 goto rx_processing_done; 606 goto rx_processing_done;
607 } 607 }
608 608
609 /* Process the incoming frame. */ 609 /* Process the incoming frame. */
610 ndev->stats.rx_packets++; 610 ndev->stats.rx_packets++;
611 pkt_len = bdp->cbd_datlen; 611 pkt_len = bdp->cbd_datlen;
612 ndev->stats.rx_bytes += pkt_len; 612 ndev->stats.rx_bytes += pkt_len;
613 data = (__u8 *)__va(bdp->cbd_bufaddr); 613 data = (__u8 *)__va(bdp->cbd_bufaddr);
614 614
615 if (bdp->cbd_bufaddr) 615 if (bdp->cbd_bufaddr)
616 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 616 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
617 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 617 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
618 618
619 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) 619 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
620 swap_buffer(data, pkt_len); 620 swap_buffer(data, pkt_len);
621 621
622 /* This does 16 byte alignment, exactly what we need. 622 /* This does 16 byte alignment, exactly what we need.
623 * The packet length includes FCS, but we don't want to 623 * The packet length includes FCS, but we don't want to
624 * include that when passing upstream as it messes up 624 * include that when passing upstream as it messes up
625 * bridging applications. 625 * bridging applications.
626 */ 626 */
627 skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN); 627 skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
628 628
629 if (unlikely(!skb)) { 629 if (unlikely(!skb)) {
630 dev_err(&ndev->dev, 630 dev_err(&ndev->dev,
631 "%s: Memory squeeze, dropping packet.\n", ndev->name); 631 "%s: Memory squeeze, dropping packet.\n", ndev->name);
632 ndev->stats.rx_dropped++; 632 ndev->stats.rx_dropped++;
633 } else { 633 } else {
634 skb_reserve(skb, NET_IP_ALIGN); 634 skb_reserve(skb, NET_IP_ALIGN);
635 skb_put(skb, pkt_len - 4); /* Make room */ 635 skb_put(skb, pkt_len - 4); /* Make room */
636 skb_copy_to_linear_data(skb, data, pkt_len - 4); 636 skb_copy_to_linear_data(skb, data, pkt_len - 4);
637 /* 1588 messeage TS handle */ 637 /* 1588 messeage TS handle */
638 if (fep->ptimer_present) 638 if (fep->ptimer_present)
639 fec_ptp_store_rxstamp(fpp, skb, bdp); 639 fec_ptp_store_rxstamp(fpp, skb, bdp);
640 skb->protocol = eth_type_trans(skb, ndev); 640 skb->protocol = eth_type_trans(skb, ndev);
641 netif_receive_skb(skb); 641 netif_receive_skb(skb);
642 } 642 }
643 643
644 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, 644 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
645 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 645 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
646 rx_processing_done: 646 rx_processing_done:
647 /* Clear the status flags for this buffer */ 647 /* Clear the status flags for this buffer */
648 status &= ~BD_ENET_RX_STATS; 648 status &= ~BD_ENET_RX_STATS;
649 649
650 /* Mark the buffer empty */ 650 /* Mark the buffer empty */
651 status |= BD_ENET_RX_EMPTY; 651 status |= BD_ENET_RX_EMPTY;
652 bdp->cbd_sc = status; 652 bdp->cbd_sc = status;
653 #ifdef CONFIG_ENHANCED_BD 653 #ifdef CONFIG_ENHANCED_BD
654 bdp->cbd_esc = BD_ENET_RX_INT; 654 bdp->cbd_esc = BD_ENET_RX_INT;
655 bdp->cbd_prot = 0; 655 bdp->cbd_prot = 0;
656 bdp->cbd_bdu = 0; 656 bdp->cbd_bdu = 0;
657 #endif 657 #endif
658 658
659 /* Update BD pointer to next entry */ 659 /* Update BD pointer to next entry */
660 if (status & BD_ENET_RX_WRAP) 660 if (status & BD_ENET_RX_WRAP)
661 bdp = fep->rx_bd_base; 661 bdp = fep->rx_bd_base;
662 else 662 else
663 bdp++; 663 bdp++;
664 /* Doing this here will keep the FEC running while we process 664 /* Doing this here will keep the FEC running while we process
665 * incoming frames. On a heavily loaded network, we should be 665 * incoming frames. On a heavily loaded network, we should be
666 * able to keep up at the expense of system resources. 666 * able to keep up at the expense of system resources.
667 */ 667 */
668 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 668 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
669 } 669 }
670 fep->cur_rx = bdp; 670 fep->cur_rx = bdp;
671 671
672 if (pkt_received < budget) { 672 if (pkt_received < budget) {
673 napi_complete(napi); 673 napi_complete(napi);
674 fec_rx_int_is_enabled(ndev, true); 674 fec_rx_int_is_enabled(ndev, true);
675 } 675 }
676 676
677 return pkt_received; 677 return pkt_received;
678 } 678 }
679 679
680 /* During a receive, the cur_rx points to the current incoming buffer. 680 /* During a receive, the cur_rx points to the current incoming buffer.
681 * When we update through the ring, if the next incoming buffer has 681 * When we update through the ring, if the next incoming buffer has
682 * not been given to the system, we just set the empty indicator, 682 * not been given to the system, we just set the empty indicator,
683 * effectively tossing the packet. 683 * effectively tossing the packet.
684 */ 684 */
685 static void 685 static void
686 fec_enet_rx(struct net_device *ndev) 686 fec_enet_rx(struct net_device *ndev)
687 { 687 {
688 struct fec_enet_private *fep = netdev_priv(ndev); 688 struct fec_enet_private *fep = netdev_priv(ndev);
689 struct fec_ptp_private *fpp = fep->ptp_priv; 689 struct fec_ptp_private *fpp = fep->ptp_priv;
690 const struct platform_device_id *id_entry = 690 const struct platform_device_id *id_entry =
691 platform_get_device_id(fep->pdev); 691 platform_get_device_id(fep->pdev);
692 struct bufdesc *bdp; 692 struct bufdesc *bdp;
693 unsigned short status; 693 unsigned short status;
694 struct sk_buff *skb; 694 struct sk_buff *skb;
695 ushort pkt_len; 695 ushort pkt_len;
696 __u8 *data; 696 __u8 *data;
697 697
698 #ifdef CONFIG_M532x 698 #ifdef CONFIG_M532x
699 flush_cache_all(); 699 flush_cache_all();
700 #endif 700 #endif
701 701
702 /* First, grab all of the stats for the incoming packet. 702 /* First, grab all of the stats for the incoming packet.
703 * These get messed up if we get called due to a busy condition. 703 * These get messed up if we get called due to a busy condition.
704 */ 704 */
705 bdp = fep->cur_rx; 705 bdp = fep->cur_rx;
706 706
707 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { 707 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
708 708
709 /* Since we have allocated space to hold a complete frame, 709 /* Since we have allocated space to hold a complete frame,
710 * the last indicator should be set. 710 * the last indicator should be set.
711 */ 711 */
712 if ((status & BD_ENET_RX_LAST) == 0) 712 if ((status & BD_ENET_RX_LAST) == 0)
713 printk("FEC ENET: rcv is not +last\n"); 713 printk("FEC ENET: rcv is not +last\n");
714 714
715 if (!fep->opened) 715 if (!fep->opened)
716 goto rx_processing_done; 716 goto rx_processing_done;
717 717
718 /* Check for errors. */ 718 /* Check for errors. */
719 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 719 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
720 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 720 BD_ENET_RX_CR | BD_ENET_RX_OV)) {
721 ndev->stats.rx_errors++; 721 ndev->stats.rx_errors++;
722 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 722 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
723 /* Frame too long or too short. */ 723 /* Frame too long or too short. */
724 ndev->stats.rx_length_errors++; 724 ndev->stats.rx_length_errors++;
725 } 725 }
726 if (status & BD_ENET_RX_NO) /* Frame alignment */ 726 if (status & BD_ENET_RX_NO) /* Frame alignment */
727 ndev->stats.rx_frame_errors++; 727 ndev->stats.rx_frame_errors++;
728 if (status & BD_ENET_RX_CR) /* CRC Error */ 728 if (status & BD_ENET_RX_CR) /* CRC Error */
729 ndev->stats.rx_crc_errors++; 729 ndev->stats.rx_crc_errors++;
730 if (status & BD_ENET_RX_OV) /* FIFO overrun */ 730 if (status & BD_ENET_RX_OV) /* FIFO overrun */
731 ndev->stats.rx_fifo_errors++; 731 ndev->stats.rx_fifo_errors++;
732 } 732 }
733 733
734 /* Report late collisions as a frame error. 734 /* Report late collisions as a frame error.
735 * On this error, the BD is closed, but we don't know what we 735 * On this error, the BD is closed, but we don't know what we
736 * have in the buffer. So, just drop this frame on the floor. 736 * have in the buffer. So, just drop this frame on the floor.
737 */ 737 */
738 if (status & BD_ENET_RX_CL) { 738 if (status & BD_ENET_RX_CL) {
739 ndev->stats.rx_errors++; 739 ndev->stats.rx_errors++;
740 ndev->stats.rx_frame_errors++; 740 ndev->stats.rx_frame_errors++;
741 goto rx_processing_done; 741 goto rx_processing_done;
742 } 742 }
743 743
744 /* Process the incoming frame. */ 744 /* Process the incoming frame. */
745 ndev->stats.rx_packets++; 745 ndev->stats.rx_packets++;
746 pkt_len = bdp->cbd_datlen; 746 pkt_len = bdp->cbd_datlen;
747 ndev->stats.rx_bytes += pkt_len; 747 ndev->stats.rx_bytes += pkt_len;
748 data = (__u8*)__va(bdp->cbd_bufaddr); 748 data = (__u8*)__va(bdp->cbd_bufaddr);
749 749
750 if (bdp->cbd_bufaddr) 750 if (bdp->cbd_bufaddr)
751 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 751 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
752 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); 752 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
753 753
754 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) 754 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
755 swap_buffer(data, pkt_len); 755 swap_buffer(data, pkt_len);
756 756
757 /* This does 16 byte alignment, exactly what we need. 757 /* This does 16 byte alignment, exactly what we need.
758 * The packet length includes FCS, but we don't want to 758 * The packet length includes FCS, but we don't want to
759 * include that when passing upstream as it messes up 759 * include that when passing upstream as it messes up
760 * bridging applications. 760 * bridging applications.
761 */ 761 */
762 skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN); 762 skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
763 763
764 if (unlikely(!skb)) { 764 if (unlikely(!skb)) {
765 printk("%s: Memory squeeze, dropping packet.\n", 765 printk("%s: Memory squeeze, dropping packet.\n",
766 ndev->name); 766 ndev->name);
767 ndev->stats.rx_dropped++; 767 ndev->stats.rx_dropped++;
768 } else { 768 } else {
769 skb_reserve(skb, NET_IP_ALIGN); 769 skb_reserve(skb, NET_IP_ALIGN);
770 skb_put(skb, pkt_len - 4); /* Make room */ 770 skb_put(skb, pkt_len - 4); /* Make room */
771 skb_copy_to_linear_data(skb, data, pkt_len - 4); 771 skb_copy_to_linear_data(skb, data, pkt_len - 4);
772 /* 1588 messeage TS handle */ 772 /* 1588 messeage TS handle */
773 if (fep->ptimer_present) 773 if (fep->ptimer_present)
774 fec_ptp_store_rxstamp(fpp, skb, bdp); 774 fec_ptp_store_rxstamp(fpp, skb, bdp);
775 skb->protocol = eth_type_trans(skb, ndev); 775 skb->protocol = eth_type_trans(skb, ndev);
776 netif_rx(skb); 776 netif_rx(skb);
777 } 777 }
778 778
779 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, 779 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
780 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); 780 FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
781 rx_processing_done: 781 rx_processing_done:
782 /* Clear the status flags for this buffer */ 782 /* Clear the status flags for this buffer */
783 status &= ~BD_ENET_RX_STATS; 783 status &= ~BD_ENET_RX_STATS;
784 784
785 /* Mark the buffer empty */ 785 /* Mark the buffer empty */
786 status |= BD_ENET_RX_EMPTY; 786 status |= BD_ENET_RX_EMPTY;
787 bdp->cbd_sc = status; 787 bdp->cbd_sc = status;
788 #ifdef CONFIG_ENHANCED_BD 788 #ifdef CONFIG_ENHANCED_BD
789 bdp->cbd_esc = BD_ENET_RX_INT; 789 bdp->cbd_esc = BD_ENET_RX_INT;
790 bdp->cbd_prot = 0; 790 bdp->cbd_prot = 0;
791 bdp->cbd_bdu = 0; 791 bdp->cbd_bdu = 0;
792 #endif 792 #endif
793 793
794 /* Update BD pointer to next entry */ 794 /* Update BD pointer to next entry */
795 if (status & BD_ENET_RX_WRAP) 795 if (status & BD_ENET_RX_WRAP)
796 bdp = fep->rx_bd_base; 796 bdp = fep->rx_bd_base;
797 else 797 else
798 bdp++; 798 bdp++;
799 /* Doing this here will keep the FEC running while we process 799 /* Doing this here will keep the FEC running while we process
800 * incoming frames. On a heavily loaded network, we should be 800 * incoming frames. On a heavily loaded network, we should be
801 * able to keep up at the expense of system resources. 801 * able to keep up at the expense of system resources.
802 */ 802 */
803 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 803 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
804 } 804 }
805 fep->cur_rx = bdp; 805 fep->cur_rx = bdp;
806 } 806 }
807 807
808 static irqreturn_t 808 static irqreturn_t
809 fec_enet_interrupt(int irq, void *dev_id) 809 fec_enet_interrupt(int irq, void *dev_id)
810 { 810 {
811 struct net_device *ndev = dev_id; 811 struct net_device *ndev = dev_id;
812 struct fec_enet_private *fep = netdev_priv(ndev); 812 struct fec_enet_private *fep = netdev_priv(ndev);
813 struct fec_ptp_private *fpp = fep->ptp_priv; 813 struct fec_ptp_private *fpp = fep->ptp_priv;
814 uint int_events; 814 uint int_events;
815 ulong flags; 815 ulong flags;
816 irqreturn_t ret = IRQ_NONE; 816 irqreturn_t ret = IRQ_NONE;
817 817
818 do { 818 do {
819 int_events = readl(fep->hwp + FEC_IEVENT); 819 int_events = readl(fep->hwp + FEC_IEVENT);
820 writel(int_events & (~FEC_ENET_TS_TIMER), 820 writel(int_events & (~FEC_ENET_TS_TIMER),
821 fep->hwp + FEC_IEVENT); 821 fep->hwp + FEC_IEVENT);
822 822
823 if (fep->ptimer_present && fpp) { 823 if (fep->ptimer_present && fpp) {
824 if (int_events & FEC_ENET_TS_TIMER) { 824 if (int_events & FEC_ENET_TS_TIMER) {
825 ret = IRQ_HANDLED; 825 ret = IRQ_HANDLED;
826 fpp->prtc++; 826 fpp->prtc++;
827 fpp->prtc_acc_flag = true; 827 fpp->prtc_acc_flag = true;
828 828
829 writel(FEC_ENET_TS_TIMER, fep->hwp + FEC_IEVENT); 829 writel(FEC_ENET_TS_TIMER, fep->hwp + FEC_IEVENT);
830 } else { 830 } else {
831 fpp->prtc_acc_flag = false; 831 fpp->prtc_acc_flag = false;
832 } 832 }
833 } 833 }
834 834
835 if (int_events & FEC_ENET_RXF) { 835 if (int_events & FEC_ENET_RXF) {
836 ret = IRQ_HANDLED; 836 ret = IRQ_HANDLED;
837 spin_lock_irqsave(&fep->hw_lock, flags); 837 spin_lock_irqsave(&fep->hw_lock, flags);
838 838
839 if (fep->use_napi) { 839 if (fep->use_napi) {
840 /* Disable the RX interrupt */ 840 /* Disable the RX interrupt */
841 if (napi_schedule_prep(&fep->napi)) { 841 if (napi_schedule_prep(&fep->napi)) {
842 fec_rx_int_is_enabled(ndev, false); 842 fec_rx_int_is_enabled(ndev, false);
843 __napi_schedule(&fep->napi); 843 __napi_schedule(&fep->napi);
844 } 844 }
845 } else 845 } else
846 fec_enet_rx(ndev); 846 fec_enet_rx(ndev);
847 847
848 spin_unlock_irqrestore(&fep->hw_lock, flags); 848 spin_unlock_irqrestore(&fep->hw_lock, flags);
849 } 849 }
850 850
851 /* Transmit OK, or non-fatal error. Update the buffer 851 /* Transmit OK, or non-fatal error. Update the buffer
852 * descriptors. FEC handles all errors, we just discover 852 * descriptors. FEC handles all errors, we just discover
853 * them as part of the transmit process. 853 * them as part of the transmit process.
854 */ 854 */
855 if (int_events & FEC_ENET_TXF) { 855 if (int_events & FEC_ENET_TXF) {
856 ret = IRQ_HANDLED; 856 ret = IRQ_HANDLED;
857 fec_enet_tx(ndev); 857 fec_enet_tx(ndev);
858 } 858 }
859 859
860 if (int_events & FEC_ENET_MII) { 860 if (int_events & FEC_ENET_MII) {
861 ret = IRQ_HANDLED; 861 ret = IRQ_HANDLED;
862 complete(&fep->mdio_done); 862 complete(&fep->mdio_done);
863 } 863 }
864 } while (int_events); 864 } while (int_events);
865 865
866 return ret; 866 return ret;
867 } 867 }
868 868
869 869
870 870
871 /* ------------------------------------------------------------------------- */ 871 /* ------------------------------------------------------------------------- */
872 static void __inline__ fec_get_mac(struct net_device *ndev) 872 static void __inline__ fec_get_mac(struct net_device *ndev)
873 { 873 {
874 struct fec_enet_private *fep = netdev_priv(ndev); 874 struct fec_enet_private *fep = netdev_priv(ndev);
875 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 875 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
876 unsigned char *iap, tmpaddr[ETH_ALEN]; 876 unsigned char *iap, tmpaddr[ETH_ALEN];
877 877
878 /* 878 /*
879 * try to get mac address in following order: 879 * try to get mac address in following order:
880 * 880 *
881 * 1) module parameter via kernel command line in form 881 * 1) module parameter via kernel command line in form
882 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 882 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
883 */ 883 */
884 iap = macaddr; 884 iap = macaddr;
885 885
886 /* 886 /*
887 * 2) from flash or fuse (via platform data) 887 * 2) from flash or fuse (via platform data)
888 */ 888 */
889 if (!is_valid_ether_addr(iap)) { 889 if (!is_valid_ether_addr(iap)) {
890 #ifdef CONFIG_M5272 890 #ifdef CONFIG_M5272
891 if (FEC_FLASHMAC) 891 if (FEC_FLASHMAC)
892 iap = (unsigned char *)FEC_FLASHMAC; 892 iap = (unsigned char *)FEC_FLASHMAC;
893 #else 893 #else
894 if (pdata) 894 if (pdata)
895 memcpy(iap, pdata->mac, ETH_ALEN); 895 memcpy(iap, pdata->mac, ETH_ALEN);
896 #endif 896 #endif
897 } 897 }
898 898
899 /* 899 /*
900 * 3) FEC mac registers set by bootloader 900 * 3) FEC mac registers set by bootloader
901 */ 901 */
902 if (!is_valid_ether_addr(iap)) { 902 if (!is_valid_ether_addr(iap)) {
903 *((unsigned long *) &tmpaddr[0]) = 903 *((unsigned long *) &tmpaddr[0]) =
904 be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW)); 904 be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
905 *((unsigned short *) &tmpaddr[4]) = 905 *((unsigned short *) &tmpaddr[4]) =
906 be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 906 be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
907 iap = &tmpaddr[0]; 907 iap = &tmpaddr[0];
908 } 908 }
909 909
910 memcpy(ndev->dev_addr, iap, ETH_ALEN); 910 memcpy(ndev->dev_addr, iap, ETH_ALEN);
911 911
912 /* Adjust MAC if using macaddr */ 912 /* Adjust MAC if using macaddr */
913 if (iap == macaddr) 913 if (iap == macaddr)
914 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id; 914 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
915 } 915 }
916 916
917 /* ------------------------------------------------------------------------- */ 917 /* ------------------------------------------------------------------------- */
918 918
919 /* 919 /*
920 * Phy section 920 * Phy section
921 */ 921 */
922 static void fec_enet_adjust_link(struct net_device *ndev) 922 static void fec_enet_adjust_link(struct net_device *ndev)
923 { 923 {
924 struct fec_enet_private *fep = netdev_priv(ndev); 924 struct fec_enet_private *fep = netdev_priv(ndev);
925 struct phy_device *phy_dev = fep->phy_dev; 925 struct phy_device *phy_dev = fep->phy_dev;
926 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 926 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
927 unsigned long flags; 927 unsigned long flags;
928 928
929 int status_change = 0; 929 int status_change = 0;
930 930
931 spin_lock_irqsave(&fep->hw_lock, flags); 931 spin_lock_irqsave(&fep->hw_lock, flags);
932 932
933 /* Prevent a state halted on mii error */ 933 /* Prevent a state halted on mii error */
934 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { 934 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
935 phy_dev->state = PHY_RESUMING; 935 phy_dev->state = PHY_RESUMING;
936 goto spin_unlock; 936 goto spin_unlock;
937 } 937 }
938 938
939 /* Duplex link change */ 939 /* Duplex link change */
940 if (phy_dev->link) { 940 if (phy_dev->link) {
941 if (fep->full_duplex != phy_dev->duplex) { 941 if (fep->full_duplex != phy_dev->duplex) {
942 fec_restart(ndev, phy_dev->duplex); 942 fec_restart(ndev, phy_dev->duplex);
943 status_change = 1; 943 status_change = 1;
944 } 944 }
945 } 945 }
946 946
947 /* Link on or off change */ 947 /* Link on or off change */
948 if (phy_dev->link != fep->link) { 948 if (phy_dev->link != fep->link) {
949 fep->link = phy_dev->link; 949 fep->link = phy_dev->link;
950 if (phy_dev->link) { 950 if (phy_dev->link) {
951 fec_restart(ndev, phy_dev->duplex); 951 fec_restart(ndev, phy_dev->duplex);
952 if (!fep->tx_full) 952 if (!fep->tx_full)
953 netif_wake_queue(ndev); 953 netif_wake_queue(ndev);
954 } else 954 } else
955 fec_stop(ndev); 955 fec_stop(ndev);
956 status_change = 1; 956 status_change = 1;
957 } 957 }
958 958
959 spin_unlock: 959 spin_unlock:
960 spin_unlock_irqrestore(&fep->hw_lock, flags); 960 spin_unlock_irqrestore(&fep->hw_lock, flags);
961 961
962 if (status_change) { 962 if (status_change) {
963 if (!phy_dev->link && phy_dev && pdata && pdata->power_hibernate) 963 if (!phy_dev->link && phy_dev && pdata && pdata->power_hibernate)
964 pdata->power_hibernate(phy_dev); 964 pdata->power_hibernate(phy_dev);
965 phy_print_status(phy_dev); 965 phy_print_status(phy_dev);
966 } 966 }
967 } 967 }
968 968
969 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 969 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
970 { 970 {
971 struct fec_enet_private *fep = bus->priv; 971 struct fec_enet_private *fep = bus->priv;
972 unsigned long time_left; 972 unsigned long time_left;
973 973
974 fep->mii_timeout = 0; 974 fep->mii_timeout = 0;
975 init_completion(&fep->mdio_done); 975 init_completion(&fep->mdio_done);
976 976
977 /* start a read op */ 977 /* start a read op */
978 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | 978 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
979 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 979 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
980 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 980 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
981 981
982 /* wait for end of transfer */ 982 /* wait for end of transfer */
983 time_left = wait_for_completion_timeout(&fep->mdio_done, 983 time_left = wait_for_completion_timeout(&fep->mdio_done,
984 msecs_to_jiffies(FEC_MII_TIMEOUT)); 984 msecs_to_jiffies(FEC_MII_TIMEOUT));
985 if (time_left == 0) { 985 if (time_left == 0) {
986 fep->mii_timeout = 1; 986 fep->mii_timeout = 1;
987 printk(KERN_ERR "FEC: MDIO read timeout, mii_id=%d\n", mii_id); 987 printk(KERN_ERR "FEC: MDIO read timeout, mii_id=%d\n", mii_id);
988 return -ETIMEDOUT; 988 return -ETIMEDOUT;
989 } 989 }
990 990
991 /* return value */ 991 /* return value */
992 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 992 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
993 } 993 }
994 994
995 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 995 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
996 u16 value) 996 u16 value)
997 { 997 {
998 struct fec_enet_private *fep = bus->priv; 998 struct fec_enet_private *fep = bus->priv;
999 unsigned long time_left; 999 unsigned long time_left;
1000 1000
1001 fep->mii_timeout = 0; 1001 fep->mii_timeout = 0;
1002 init_completion(&fep->mdio_done); 1002 init_completion(&fep->mdio_done);
1003 1003
1004 /* start a write op */ 1004 /* start a write op */
1005 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | 1005 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
1006 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 1006 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
1007 FEC_MMFR_TA | FEC_MMFR_DATA(value), 1007 FEC_MMFR_TA | FEC_MMFR_DATA(value),
1008 fep->hwp + FEC_MII_DATA); 1008 fep->hwp + FEC_MII_DATA);
1009 1009
1010 /* wait for end of transfer */ 1010 /* wait for end of transfer */
1011 time_left = wait_for_completion_timeout(&fep->mdio_done, 1011 time_left = wait_for_completion_timeout(&fep->mdio_done,
1012 msecs_to_jiffies(FEC_MII_TIMEOUT)); 1012 msecs_to_jiffies(FEC_MII_TIMEOUT));
1013 if (time_left == 0) { 1013 if (time_left == 0) {
1014 fep->mii_timeout = 1; 1014 fep->mii_timeout = 1;
1015 printk(KERN_ERR "FEC: MDIO write timeout, mii_id=%d\n", mii_id); 1015 printk(KERN_ERR "FEC: MDIO write timeout, mii_id=%d\n", mii_id);
1016 return -ETIMEDOUT; 1016 return -ETIMEDOUT;
1017 } 1017 }
1018 1018
1019 return 0; 1019 return 0;
1020 } 1020 }
1021 1021
1022 static int fec_enet_mdio_reset(struct mii_bus *bus) 1022 static int fec_enet_mdio_reset(struct mii_bus *bus)
1023 { 1023 {
1024 return 0; 1024 return 0;
1025 } 1025 }
1026 1026
1027 static int fec_enet_mii_probe(struct net_device *ndev) 1027 static int fec_enet_mii_probe(struct net_device *ndev)
1028 { 1028 {
1029 struct fec_enet_private *fep = netdev_priv(ndev); 1029 struct fec_enet_private *fep = netdev_priv(ndev);
1030 struct phy_device *phy_dev = NULL; 1030 struct phy_device *phy_dev = NULL;
1031 char mdio_bus_id[MII_BUS_ID_SIZE]; 1031 char mdio_bus_id[MII_BUS_ID_SIZE];
1032 char phy_name[MII_BUS_ID_SIZE + 3]; 1032 char phy_name[MII_BUS_ID_SIZE + 3];
1033 int phy_id; 1033 int phy_id;
1034 int dev_id = fep->pdev->id; 1034 int dev_id = fep->pdev->id;
1035 1035
1036 fep->phy_dev = NULL; 1036 fep->phy_dev = NULL;
1037 1037
1038 /* check for attached phy */ 1038 /* check for attached phy */
1039 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { 1039 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
1040 if ((fep->mii_bus->phy_mask & (1 << phy_id))) 1040 if ((fep->mii_bus->phy_mask & (1 << phy_id)))
1041 continue; 1041 continue;
1042 if (fep->mii_bus->phy_map[phy_id] == NULL) 1042 if (fep->mii_bus->phy_map[phy_id] == NULL)
1043 continue; 1043 continue;
1044 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) 1044 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
1045 continue; 1045 continue;
1046 if (dev_id--) 1046 if (dev_id--)
1047 continue; 1047 continue;
1048 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 1048 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
1049 break; 1049 break;
1050 } 1050 }
1051 1051
1052 if (phy_id >= PHY_MAX_ADDR) { 1052 if (phy_id >= PHY_MAX_ADDR) {
1053 printk(KERN_INFO "%s: no PHY, assuming direct connection " 1053 printk(KERN_INFO "%s: no PHY, assuming direct connection "
1054 "to switch\n", ndev->name); 1054 "to switch\n", ndev->name);
1055 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); 1055 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
1056 phy_id = 0; 1056 phy_id = 0;
1057 } 1057 }
1058 1058
1059 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); 1059 snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
1060 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0, 1060 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
1061 fep->phy_interface); 1061 fep->phy_interface);
1062 1062
1063 if (IS_ERR(phy_dev)) { 1063 if (IS_ERR(phy_dev)) {
1064 printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name); 1064 printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
1065 return PTR_ERR(phy_dev); 1065 return PTR_ERR(phy_dev);
1066 } 1066 }
1067 1067
1068 /* mask with MAC supported features */ 1068 /* mask with MAC supported features */
1069 if (cpu_is_mx6q() || cpu_is_mx6dl()) 1069 if (cpu_is_mx6q() || cpu_is_mx6dl())
1070 phy_dev->supported &= PHY_GBIT_FEATURES; 1070 phy_dev->supported &= PHY_GBIT_FEATURES;
1071 else 1071 else
1072 phy_dev->supported &= PHY_BASIC_FEATURES; 1072 phy_dev->supported &= PHY_BASIC_FEATURES;
1073 1073
1074 /* enable phy pause frame for any platform */ 1074 /* enable phy pause frame for any platform */
1075 phy_dev->supported |= ADVERTISED_Pause; 1075 phy_dev->supported |= ADVERTISED_Pause;
1076 1076
1077 phy_dev->advertising = phy_dev->supported; 1077 phy_dev->advertising = phy_dev->supported;
1078 1078
1079 fep->phy_dev = phy_dev; 1079 fep->phy_dev = phy_dev;
1080 fep->link = 0; 1080 fep->link = 0;
1081 fep->full_duplex = 0; 1081 fep->full_duplex = 0;
1082 1082
1083 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " 1083 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
1084 "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name, 1084 "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
1085 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), 1085 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
1086 fep->phy_dev->irq); 1086 fep->phy_dev->irq);
1087 1087
1088 return 0; 1088 return 0;
1089 } 1089 }
1090 1090
1091 static int fec_enet_mii_init(struct platform_device *pdev) 1091 static int fec_enet_mii_init(struct platform_device *pdev)
1092 { 1092 {
1093 static struct mii_bus *fec0_mii_bus; 1093 static struct mii_bus *fec0_mii_bus;
1094 struct net_device *ndev = platform_get_drvdata(pdev); 1094 struct net_device *ndev = platform_get_drvdata(pdev);
1095 struct fec_enet_private *fep = netdev_priv(ndev); 1095 struct fec_enet_private *fep = netdev_priv(ndev);
1096 const struct platform_device_id *id_entry = 1096 const struct platform_device_id *id_entry =
1097 platform_get_device_id(fep->pdev); 1097 platform_get_device_id(fep->pdev);
1098 int err = -ENXIO, i; 1098 int err = -ENXIO, i;
1099 1099
1100 /* 1100 /*
1101 * The dual fec interfaces are not equivalent with enet-mac. 1101 * The dual fec interfaces are not equivalent with enet-mac.
1102 * Here are the differences: 1102 * Here are the differences:
1103 * 1103 *
1104 * - fec0 supports MII & RMII modes while fec1 only supports RMII 1104 * - fec0 supports MII & RMII modes while fec1 only supports RMII
1105 * - fec0 acts as the 1588 time master while fec1 is slave 1105 * - fec0 acts as the 1588 time master while fec1 is slave
1106 * - external phys can only be configured by fec0 1106 * - external phys can only be configured by fec0
1107 * 1107 *
1108 * That is to say fec1 can not work independently. It only works 1108 * That is to say fec1 can not work independently. It only works
1109 * when fec0 is working. The reason behind this design is that the 1109 * when fec0 is working. The reason behind this design is that the
1110 * second interface is added primarily for Switch mode. 1110 * second interface is added primarily for Switch mode.
1111 * 1111 *
1112 * Because of the last point above, both phys are attached on fec0 1112 * Because of the last point above, both phys are attached on fec0
1113 * mdio interface in board design, and need to be configured by 1113 * mdio interface in board design, and need to be configured by
1114 * fec0 mii_bus. 1114 * fec0 mii_bus.
1115 */ 1115 */
1116 if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) { 1116 if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) {
1117 /* fec1 uses fec0 mii_bus */ 1117 /* fec1 uses fec0 mii_bus */
1118 fep->mii_bus = fec0_mii_bus; 1118 fep->mii_bus = fec0_mii_bus;
1119 return 0; 1119 return 0;
1120 } 1120 }
1121 1121
1122 fep->mii_timeout = 0; 1122 fep->mii_timeout = 0;
1123 1123
1124 /* 1124 /*
1125 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) 1125 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
1126 */ 1126 */
1127 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->mdc_clk), 1127 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->mdc_clk),
1128 (FEC_ENET_MII_CLK << 2)) << 1; 1128 (FEC_ENET_MII_CLK << 2)) << 1;
1129 /* set hold time to 2 internal clock cycle */ 1129 /* set hold time to 2 internal clock cycle */
1130 if (cpu_is_mx6q() || cpu_is_mx6dl()) 1130 if (cpu_is_mx6q() || cpu_is_mx6dl())
1131 fep->phy_speed |= FEC_ENET_HOLD_TIME; 1131 fep->phy_speed |= FEC_ENET_HOLD_TIME;
1132 1132
1133 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1133 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1134 1134
1135 fep->mii_bus = mdiobus_alloc(); 1135 fep->mii_bus = mdiobus_alloc();
1136 if (fep->mii_bus == NULL) { 1136 if (fep->mii_bus == NULL) {
1137 err = -ENOMEM; 1137 err = -ENOMEM;
1138 goto err_out; 1138 goto err_out;
1139 } 1139 }
1140 1140
1141 fep->mii_bus->name = "fec_enet_mii_bus"; 1141 fep->mii_bus->name = "fec_enet_mii_bus";
1142 fep->mii_bus->read = fec_enet_mdio_read; 1142 fep->mii_bus->read = fec_enet_mdio_read;
1143 fep->mii_bus->write = fec_enet_mdio_write; 1143 fep->mii_bus->write = fec_enet_mdio_write;
1144 fep->mii_bus->reset = fec_enet_mdio_reset; 1144 fep->mii_bus->reset = fec_enet_mdio_reset;
1145 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1); 1145 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
1146 fep->mii_bus->priv = fep; 1146 fep->mii_bus->priv = fep;
1147 fep->mii_bus->parent = &pdev->dev; 1147 fep->mii_bus->parent = &pdev->dev;
1148 1148
1149 fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 1149 fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1150 if (!fep->mii_bus->irq) { 1150 if (!fep->mii_bus->irq) {
1151 err = -ENOMEM; 1151 err = -ENOMEM;
1152 goto err_out_free_mdiobus; 1152 goto err_out_free_mdiobus;
1153 } 1153 }
1154 1154
1155 for (i = 0; i < PHY_MAX_ADDR; i++) 1155 for (i = 0; i < PHY_MAX_ADDR; i++)
1156 fep->mii_bus->irq[i] = PHY_POLL; 1156 fep->mii_bus->irq[i] = PHY_POLL;
1157 1157
1158 if (mdiobus_register(fep->mii_bus)) 1158 if (mdiobus_register(fep->mii_bus))
1159 goto err_out_free_mdio_irq; 1159 goto err_out_free_mdio_irq;
1160 1160
1161 /* save fec0 mii_bus */ 1161 /* save fec0 mii_bus */
1162 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) 1162 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
1163 fec0_mii_bus = fep->mii_bus; 1163 fec0_mii_bus = fep->mii_bus;
1164 1164
1165 return 0; 1165 return 0;
1166 1166
1167 err_out_free_mdio_irq: 1167 err_out_free_mdio_irq:
1168 kfree(fep->mii_bus->irq); 1168 kfree(fep->mii_bus->irq);
1169 err_out_free_mdiobus: 1169 err_out_free_mdiobus:
1170 mdiobus_free(fep->mii_bus); 1170 mdiobus_free(fep->mii_bus);
1171 err_out: 1171 err_out:
1172 return err; 1172 return err;
1173 } 1173 }
1174 1174
1175 static void fec_enet_mii_remove(struct fec_enet_private *fep) 1175 static void fec_enet_mii_remove(struct fec_enet_private *fep)
1176 { 1176 {
1177 if (fep->phy_dev) 1177 if (fep->phy_dev)
1178 phy_disconnect(fep->phy_dev); 1178 phy_disconnect(fep->phy_dev);
1179 mdiobus_unregister(fep->mii_bus); 1179 mdiobus_unregister(fep->mii_bus);
1180 kfree(fep->mii_bus->irq); 1180 kfree(fep->mii_bus->irq);
1181 mdiobus_free(fep->mii_bus); 1181 mdiobus_free(fep->mii_bus);
1182 } 1182 }
1183 1183
1184 static int fec_enet_get_settings(struct net_device *ndev, 1184 static int fec_enet_get_settings(struct net_device *ndev,
1185 struct ethtool_cmd *cmd) 1185 struct ethtool_cmd *cmd)
1186 { 1186 {
1187 struct fec_enet_private *fep = netdev_priv(ndev); 1187 struct fec_enet_private *fep = netdev_priv(ndev);
1188 struct phy_device *phydev = fep->phy_dev; 1188 struct phy_device *phydev = fep->phy_dev;
1189 1189
1190 if (!phydev) 1190 if (!phydev)
1191 return -ENODEV; 1191 return -ENODEV;
1192 1192
1193 return phy_ethtool_gset(phydev, cmd); 1193 return phy_ethtool_gset(phydev, cmd);
1194 } 1194 }
1195 1195
1196 static int fec_enet_set_settings(struct net_device *ndev, 1196 static int fec_enet_set_settings(struct net_device *ndev,
1197 struct ethtool_cmd *cmd) 1197 struct ethtool_cmd *cmd)
1198 { 1198 {
1199 struct fec_enet_private *fep = netdev_priv(ndev); 1199 struct fec_enet_private *fep = netdev_priv(ndev);
1200 struct phy_device *phydev = fep->phy_dev; 1200 struct phy_device *phydev = fep->phy_dev;
1201 1201
1202 if (!phydev) 1202 if (!phydev)
1203 return -ENODEV; 1203 return -ENODEV;
1204 1204
1205 return phy_ethtool_sset(phydev, cmd); 1205 return phy_ethtool_sset(phydev, cmd);
1206 } 1206 }
1207 1207
1208 static void fec_enet_get_drvinfo(struct net_device *ndev, 1208 static void fec_enet_get_drvinfo(struct net_device *ndev,
1209 struct ethtool_drvinfo *info) 1209 struct ethtool_drvinfo *info)
1210 { 1210 {
1211 struct fec_enet_private *fep = netdev_priv(ndev); 1211 struct fec_enet_private *fep = netdev_priv(ndev);
1212 1212
1213 strcpy(info->driver, fep->pdev->dev.driver->name); 1213 strcpy(info->driver, fep->pdev->dev.driver->name);
1214 strcpy(info->version, "Revision: 1.0"); 1214 strcpy(info->version, "Revision: 1.0");
1215 strcpy(info->bus_info, dev_name(&ndev->dev)); 1215 strcpy(info->bus_info, dev_name(&ndev->dev));
1216 } 1216 }
1217 1217
1218 static struct ethtool_ops fec_enet_ethtool_ops = { 1218 static struct ethtool_ops fec_enet_ethtool_ops = {
1219 .get_settings = fec_enet_get_settings, 1219 .get_settings = fec_enet_get_settings,
1220 .set_settings = fec_enet_set_settings, 1220 .set_settings = fec_enet_set_settings,
1221 .get_drvinfo = fec_enet_get_drvinfo, 1221 .get_drvinfo = fec_enet_get_drvinfo,
1222 .get_link = ethtool_op_get_link, 1222 .get_link = ethtool_op_get_link,
1223 }; 1223 };
1224 1224
1225 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 1225 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1226 { 1226 {
1227 struct fec_enet_private *fep = netdev_priv(ndev); 1227 struct fec_enet_private *fep = netdev_priv(ndev);
1228 struct fec_ptp_private *priv = fep->ptp_priv; 1228 struct fec_ptp_private *priv = fep->ptp_priv;
1229 struct phy_device *phydev = fep->phy_dev; 1229 struct phy_device *phydev = fep->phy_dev;
1230 int retVal = 0; 1230 int retVal = 0;
1231 1231
1232 if (!netif_running(ndev)) 1232 if (!netif_running(ndev))
1233 return -EINVAL; 1233 return -EINVAL;
1234 1234
1235 if (!phydev) 1235 if (!phydev)
1236 return -ENODEV; 1236 return -ENODEV;
1237 1237
1238 if ((cmd >= PTP_ENBL_TXTS_IOCTL) && 1238 if ((cmd >= PTP_ENBL_TXTS_IOCTL) &&
1239 (cmd <= PTP_FLUSH_TIMESTAMP)) { 1239 (cmd <= PTP_FLUSH_TIMESTAMP)) {
1240 if (fep->ptimer_present) 1240 if (fep->ptimer_present)
1241 retVal = fec_ptp_ioctl(priv, rq, cmd); 1241 retVal = fec_ptp_ioctl(priv, rq, cmd);
1242 else 1242 else
1243 retVal = -ENODEV; 1243 retVal = -ENODEV;
1244 } else 1244 } else
1245 retVal = phy_mii_ioctl(phydev, rq, cmd); 1245 retVal = phy_mii_ioctl(phydev, rq, cmd);
1246 1246
1247 return retVal; 1247 return retVal;
1248 } 1248 }
1249 1249
1250 static void fec_enet_free_buffers(struct net_device *ndev) 1250 static void fec_enet_free_buffers(struct net_device *ndev)
1251 { 1251 {
1252 struct fec_enet_private *fep = netdev_priv(ndev); 1252 struct fec_enet_private *fep = netdev_priv(ndev);
1253 int i; 1253 int i;
1254 struct sk_buff *skb; 1254 struct sk_buff *skb;
1255 struct bufdesc *bdp; 1255 struct bufdesc *bdp;
1256 1256
1257 bdp = fep->rx_bd_base; 1257 bdp = fep->rx_bd_base;
1258 for (i = 0; i < RX_RING_SIZE; i++) { 1258 for (i = 0; i < RX_RING_SIZE; i++) {
1259 skb = fep->rx_skbuff[i]; 1259 skb = fep->rx_skbuff[i];
1260 1260
1261 if (bdp->cbd_bufaddr) 1261 if (bdp->cbd_bufaddr)
1262 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 1262 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1263 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1263 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1264 if (skb) 1264 if (skb)
1265 dev_kfree_skb(skb); 1265 dev_kfree_skb(skb);
1266 bdp++; 1266 bdp++;
1267 } 1267 }
1268 1268
1269 bdp = fep->tx_bd_base; 1269 bdp = fep->tx_bd_base;
1270 for (i = 0; i < TX_RING_SIZE; i++) 1270 for (i = 0; i < TX_RING_SIZE; i++)
1271 kfree(fep->tx_bounce[i]); 1271 kfree(fep->tx_bounce[i]);
1272 } 1272 }
1273 1273
1274 static int fec_enet_alloc_buffers(struct net_device *ndev) 1274 static int fec_enet_alloc_buffers(struct net_device *ndev)
1275 { 1275 {
1276 struct fec_enet_private *fep = netdev_priv(ndev); 1276 struct fec_enet_private *fep = netdev_priv(ndev);
1277 int i; 1277 int i;
1278 struct sk_buff *skb; 1278 struct sk_buff *skb;
1279 struct bufdesc *bdp; 1279 struct bufdesc *bdp;
1280 1280
1281 bdp = fep->rx_bd_base; 1281 bdp = fep->rx_bd_base;
1282 for (i = 0; i < RX_RING_SIZE; i++) { 1282 for (i = 0; i < RX_RING_SIZE; i++) {
1283 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); 1283 skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
1284 if (!skb) { 1284 if (!skb) {
1285 fec_enet_free_buffers(ndev); 1285 fec_enet_free_buffers(ndev);
1286 return -ENOMEM; 1286 return -ENOMEM;
1287 } 1287 }
1288 fep->rx_skbuff[i] = skb; 1288 fep->rx_skbuff[i] = skb;
1289 1289
1290 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, 1290 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
1291 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1291 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1292 bdp->cbd_sc = BD_ENET_RX_EMPTY; 1292 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1293 #ifdef CONFIG_ENHANCED_BD 1293 #ifdef CONFIG_ENHANCED_BD
1294 bdp->cbd_esc = BD_ENET_RX_INT; 1294 bdp->cbd_esc = BD_ENET_RX_INT;
1295 #endif 1295 #endif
1296 bdp++; 1296 bdp++;
1297 } 1297 }
1298 1298
1299 /* Set the last buffer to wrap. */ 1299 /* Set the last buffer to wrap. */
1300 bdp--; 1300 bdp--;
1301 bdp->cbd_sc |= BD_SC_WRAP; 1301 bdp->cbd_sc |= BD_SC_WRAP;
1302 1302
1303 bdp = fep->tx_bd_base; 1303 bdp = fep->tx_bd_base;
1304 for (i = 0; i < TX_RING_SIZE; i++) { 1304 for (i = 0; i < TX_RING_SIZE; i++) {
1305 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 1305 fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
1306 if (!fep->tx_bounce[i]) { 1306 if (!fep->tx_bounce[i]) {
1307 fec_enet_free_buffers(ndev); 1307 fec_enet_free_buffers(ndev);
1308 return -ENOMEM; 1308 return -ENOMEM;
1309 } 1309 }
1310 1310
1311 bdp->cbd_sc = 0; 1311 bdp->cbd_sc = 0;
1312 bdp->cbd_bufaddr = 0; 1312 bdp->cbd_bufaddr = 0;
1313 #ifdef CONFIG_ENHANCED_BD 1313 #ifdef CONFIG_ENHANCED_BD
1314 bdp->cbd_esc = BD_ENET_TX_INT; 1314 bdp->cbd_esc = BD_ENET_TX_INT;
1315 #endif 1315 #endif
1316 bdp++; 1316 bdp++;
1317 } 1317 }
1318 1318
1319 /* Set the last buffer to wrap. */ 1319 /* Set the last buffer to wrap. */
1320 bdp--; 1320 bdp--;
1321 bdp->cbd_sc |= BD_SC_WRAP; 1321 bdp->cbd_sc |= BD_SC_WRAP;
1322 1322
1323 return 0; 1323 return 0;
1324 } 1324 }
1325 1325
1326 static int 1326 static int
1327 fec_enet_open(struct net_device *ndev) 1327 fec_enet_open(struct net_device *ndev)
1328 { 1328 {
1329 struct fec_enet_private *fep = netdev_priv(ndev); 1329 struct fec_enet_private *fep = netdev_priv(ndev);
1330 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 1330 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1331 int ret; 1331 int ret;
1332 1332
1333 if (fep->use_napi) 1333 if (fep->use_napi)
1334 napi_enable(&fep->napi); 1334 napi_enable(&fep->napi);
1335 1335
1336 /* I should reset the ring buffers here, but I don't yet know 1336 /* I should reset the ring buffers here, but I don't yet know
1337 * a simple way to do that. 1337 * a simple way to do that.
1338 */ 1338 */
1339 clk_enable(fep->clk); 1339 clk_enable(fep->clk);
1340 ret = fec_enet_alloc_buffers(ndev); 1340 ret = fec_enet_alloc_buffers(ndev);
1341 if (ret) 1341 if (ret)
1342 return ret; 1342 return ret;
1343 1343
1344 /* Probe and connect to PHY when open the interface */ 1344 /* Probe and connect to PHY when open the interface */
1345 ret = fec_enet_mii_probe(ndev); 1345 ret = fec_enet_mii_probe(ndev);
1346 if (ret) { 1346 if (ret) {
1347 fec_enet_free_buffers(ndev); 1347 fec_enet_free_buffers(ndev);
1348 return ret; 1348 return ret;
1349 } 1349 }
1350 1350
1351 phy_start(fep->phy_dev); 1351 phy_start(fep->phy_dev);
1352 netif_start_queue(ndev); 1352 netif_start_queue(ndev);
1353 fep->opened = 1; 1353 fep->opened = 1;
1354 1354
1355 ret = -EINVAL; 1355 ret = -EINVAL;
1356 if (pdata->init && pdata->init(fep->phy_dev)) 1356 if (pdata->init && pdata->init(fep->phy_dev))
1357 return ret; 1357 return ret;
1358 1358
1359 return 0; 1359 return 0;
1360 } 1360 }
1361 1361
1362 static int 1362 static int
1363 fec_enet_close(struct net_device *ndev) 1363 fec_enet_close(struct net_device *ndev)
1364 { 1364 {
1365 struct fec_enet_private *fep = netdev_priv(ndev); 1365 struct fec_enet_private *fep = netdev_priv(ndev);
1366 1366
1367 fep->opened = 0; 1367 fep->opened = 0;
1368 if (fep->use_napi) 1368 if (fep->use_napi)
1369 napi_disable(&fep->napi); 1369 napi_disable(&fep->napi);
1370 1370
1371 fec_stop(ndev); 1371 fec_stop(ndev);
1372 1372
1373 if (fep->phy_dev) { 1373 if (fep->phy_dev) {
1374 phy_stop(fep->phy_dev); 1374 phy_stop(fep->phy_dev);
1375 phy_disconnect(fep->phy_dev); 1375 phy_disconnect(fep->phy_dev);
1376 } 1376 }
1377 1377
1378 fec_enet_free_buffers(ndev); 1378 fec_enet_free_buffers(ndev);
1379 1379
1380 /* Clock gate close for saving power */ 1380 /* Clock gate close for saving power */
1381 clk_disable(fep->clk); 1381 clk_disable(fep->clk);
1382 1382
1383 return 0; 1383 return 0;
1384 } 1384 }
1385 1385
1386 /* Set or clear the multicast filter for this adaptor. 1386 /* Set or clear the multicast filter for this adaptor.
1387 * Skeleton taken from sunlance driver. 1387 * Skeleton taken from sunlance driver.
1388 * The CPM Ethernet implementation allows Multicast as well as individual 1388 * The CPM Ethernet implementation allows Multicast as well as individual
1389 * MAC address filtering. Some of the drivers check to make sure it is 1389 * MAC address filtering. Some of the drivers check to make sure it is
1390 * a group multicast address, and discard those that are not. I guess I 1390 * a group multicast address, and discard those that are not. I guess I
1391 * will do the same for now, but just remove the test if you want 1391 * will do the same for now, but just remove the test if you want
1392 * individual filtering as well (do the upper net layers want or support 1392 * individual filtering as well (do the upper net layers want or support
1393 * this kind of feature?). 1393 * this kind of feature?).
1394 */ 1394 */
1395 1395
1396 #define HASH_BITS 6 /* #bits in hash */ 1396 #define HASH_BITS 6 /* #bits in hash */
1397 #define CRC32_POLY 0xEDB88320 1397 #define CRC32_POLY 0xEDB88320
1398 1398
1399 static void set_multicast_list(struct net_device *ndev) 1399 static void set_multicast_list(struct net_device *ndev)
1400 { 1400 {
1401 struct fec_enet_private *fep = netdev_priv(ndev); 1401 struct fec_enet_private *fep = netdev_priv(ndev);
1402 struct netdev_hw_addr *ha; 1402 struct netdev_hw_addr *ha;
1403 unsigned int i, bit, data, crc, tmp; 1403 unsigned int i, bit, data, crc, tmp;
1404 unsigned char hash; 1404 unsigned char hash;
1405 1405
1406 if (ndev->flags & IFF_PROMISC) { 1406 if (ndev->flags & IFF_PROMISC) {
1407 tmp = readl(fep->hwp + FEC_R_CNTRL); 1407 tmp = readl(fep->hwp + FEC_R_CNTRL);
1408 tmp |= 0x8; 1408 tmp |= 0x8;
1409 writel(tmp, fep->hwp + FEC_R_CNTRL); 1409 writel(tmp, fep->hwp + FEC_R_CNTRL);
1410 return; 1410 return;
1411 } 1411 }
1412 1412
1413 tmp = readl(fep->hwp + FEC_R_CNTRL); 1413 tmp = readl(fep->hwp + FEC_R_CNTRL);
1414 tmp &= ~0x8; 1414 tmp &= ~0x8;
1415 writel(tmp, fep->hwp + FEC_R_CNTRL); 1415 writel(tmp, fep->hwp + FEC_R_CNTRL);
1416 1416
1417 if (ndev->flags & IFF_ALLMULTI) { 1417 if (ndev->flags & IFF_ALLMULTI) {
1418 /* Catch all multicast addresses, so set the 1418 /* Catch all multicast addresses, so set the
1419 * filter to all 1's 1419 * filter to all 1's
1420 */ 1420 */
1421 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1421 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1422 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1422 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1423 1423
1424 return; 1424 return;
1425 } 1425 }
1426 1426
1427 /* Clear filter and add the addresses in hash register 1427 /* Clear filter and add the addresses in hash register
1428 */ 1428 */
1429 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1429 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1430 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1430 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1431 1431
1432 netdev_for_each_mc_addr(ha, ndev) { 1432 netdev_for_each_mc_addr(ha, ndev) {
1433 /* Only support group multicast for now */ 1433 /* Only support group multicast for now */
1434 if (!(ha->addr[0] & 1)) 1434 if (!(ha->addr[0] & 1))
1435 continue; 1435 continue;
1436 1436
1437 /* calculate crc32 value of mac address */ 1437 /* calculate crc32 value of mac address */
1438 crc = 0xffffffff; 1438 crc = 0xffffffff;
1439 1439
1440 for (i = 0; i < ndev->addr_len; i++) { 1440 for (i = 0; i < ndev->addr_len; i++) {
1441 data = ha->addr[i]; 1441 data = ha->addr[i];
1442 for (bit = 0; bit < 8; bit++, data >>= 1) { 1442 for (bit = 0; bit < 8; bit++, data >>= 1) {
1443 crc = (crc >> 1) ^ 1443 crc = (crc >> 1) ^
1444 (((crc ^ data) & 1) ? CRC32_POLY : 0); 1444 (((crc ^ data) & 1) ? CRC32_POLY : 0);
1445 } 1445 }
1446 } 1446 }
1447 1447
1448 /* only upper 6 bits (HASH_BITS) are used 1448 /* only upper 6 bits (HASH_BITS) are used
1449 * which point to specific bit in he hash registers 1449 * which point to specific bit in he hash registers
1450 */ 1450 */
1451 hash = (crc >> (32 - HASH_BITS)) & 0x3f; 1451 hash = (crc >> (32 - HASH_BITS)) & 0x3f;
1452 1452
1453 if (hash > 31) { 1453 if (hash > 31) {
1454 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1454 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1455 tmp |= 1 << (hash - 32); 1455 tmp |= 1 << (hash - 32);
1456 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1456 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1457 } else { 1457 } else {
1458 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1458 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1459 tmp |= 1 << hash; 1459 tmp |= 1 << hash;
1460 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 1460 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1461 } 1461 }
1462 } 1462 }
1463 } 1463 }
1464 1464
1465 /* Set a MAC change in hardware. */ 1465 /* Set a MAC change in hardware. */
1466 static int 1466 static int
1467 fec_set_mac_address(struct net_device *ndev, void *p) 1467 fec_set_mac_address(struct net_device *ndev, void *p)
1468 { 1468 {
1469 struct fec_enet_private *fep = netdev_priv(ndev); 1469 struct fec_enet_private *fep = netdev_priv(ndev);
1470 struct sockaddr *addr = p; 1470 struct sockaddr *addr = p;
1471 1471
1472 if (!is_valid_ether_addr(addr->sa_data)) 1472 if (!is_valid_ether_addr(addr->sa_data))
1473 return -EADDRNOTAVAIL; 1473 return -EADDRNOTAVAIL;
1474 1474
1475 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 1475 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
1476 1476
1477 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | 1477 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
1478 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), 1478 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
1479 fep->hwp + FEC_ADDR_LOW); 1479 fep->hwp + FEC_ADDR_LOW);
1480 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), 1480 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
1481 fep->hwp + FEC_ADDR_HIGH); 1481 fep->hwp + FEC_ADDR_HIGH);
1482 return 0; 1482 return 0;
1483 } 1483 }
1484 1484
1485 static const struct net_device_ops fec_netdev_ops = { 1485 static const struct net_device_ops fec_netdev_ops = {
1486 .ndo_open = fec_enet_open, 1486 .ndo_open = fec_enet_open,
1487 .ndo_stop = fec_enet_close, 1487 .ndo_stop = fec_enet_close,
1488 .ndo_start_xmit = fec_enet_start_xmit, 1488 .ndo_start_xmit = fec_enet_start_xmit,
1489 .ndo_set_multicast_list = set_multicast_list, 1489 .ndo_set_multicast_list = set_multicast_list,
1490 .ndo_change_mtu = eth_change_mtu, 1490 .ndo_change_mtu = eth_change_mtu,
1491 .ndo_validate_addr = eth_validate_addr, 1491 .ndo_validate_addr = eth_validate_addr,
1492 .ndo_tx_timeout = fec_timeout, 1492 .ndo_tx_timeout = fec_timeout,
1493 .ndo_set_mac_address = fec_set_mac_address, 1493 .ndo_set_mac_address = fec_set_mac_address,
1494 .ndo_do_ioctl = fec_enet_ioctl, 1494 .ndo_do_ioctl = fec_enet_ioctl,
1495 #ifdef CONFIG_NET_POLL_CONTROLLER 1495 #ifdef CONFIG_NET_POLL_CONTROLLER
1496 .ndo_poll_controller = fec_enet_netpoll, 1496 .ndo_poll_controller = fec_enet_netpoll,
1497 #endif 1497 #endif
1498 }; 1498 };
1499 1499
1500 /* Init TX buffer descriptors 1500 /* Init TX buffer descriptors
1501 */ 1501 */
1502 static void fec_enet_txbd_init(struct net_device *dev) 1502 static void fec_enet_txbd_init(struct net_device *dev)
1503 { 1503 {
1504 struct fec_enet_private *fep = netdev_priv(dev); 1504 struct fec_enet_private *fep = netdev_priv(dev);
1505 struct bufdesc *bdp; 1505 struct bufdesc *bdp;
1506 int i; 1506 int i;
1507 1507
1508 /* ...and the same for transmit */ 1508 /* ...and the same for transmit */
1509 bdp = fep->tx_bd_base; 1509 bdp = fep->tx_bd_base;
1510 for (i = 0; i < TX_RING_SIZE; i++) { 1510 for (i = 0; i < TX_RING_SIZE; i++) {
1511 1511
1512 /* Initialize the BD for every fragment in the page. */ 1512 /* Initialize the BD for every fragment in the page. */
1513 bdp->cbd_sc = 0; 1513 bdp->cbd_sc = 0;
1514 bdp++; 1514 bdp++;
1515 } 1515 }
1516 1516
1517 /* Set the last buffer to wrap */ 1517 /* Set the last buffer to wrap */
1518 bdp--; 1518 bdp--;
1519 bdp->cbd_sc |= BD_SC_WRAP; 1519 bdp->cbd_sc |= BD_SC_WRAP;
1520 } 1520 }
1521 1521
1522 /* 1522 /*
1523 * XXX: We need to clean up on failure exits here. 1523 * XXX: We need to clean up on failure exits here.
1524 * 1524 *
1525 */ 1525 */
1526 static int fec_enet_init(struct net_device *ndev) 1526 static int fec_enet_init(struct net_device *ndev)
1527 { 1527 {
1528 struct fec_enet_private *fep = netdev_priv(ndev); 1528 struct fec_enet_private *fep = netdev_priv(ndev);
1529 struct bufdesc *cbd_base; 1529 struct bufdesc *cbd_base;
1530 struct bufdesc *bdp; 1530 struct bufdesc *bdp;
1531 int i; 1531 int i;
1532 1532
1533 /* Allocate memory for buffer descriptors. */ 1533 /* Allocate memory for buffer descriptors. */
1534 cbd_base = dma_alloc_noncacheable(NULL, BUFDES_SIZE, &fep->bd_dma, 1534 cbd_base = dma_alloc_noncacheable(NULL, BUFDES_SIZE, &fep->bd_dma,
1535 GFP_KERNEL); 1535 GFP_KERNEL);
1536 if (!cbd_base) { 1536 if (!cbd_base) {
1537 printk("FEC: allocate descriptor memory failed?\n"); 1537 printk("FEC: allocate descriptor memory failed?\n");
1538 return -ENOMEM; 1538 return -ENOMEM;
1539 } 1539 }
1540 1540
1541 spin_lock_init(&fep->hw_lock); 1541 spin_lock_init(&fep->hw_lock);
1542 1542
1543 fep->netdev = ndev; 1543 fep->netdev = ndev;
1544 1544
1545 /* Get the Ethernet address */ 1545 /* Get the Ethernet address */
1546 fec_get_mac(ndev); 1546 fec_get_mac(ndev);
1547 1547
1548 /* Set receive and transmit descriptor base. */ 1548 /* Set receive and transmit descriptor base. */
1549 fep->rx_bd_base = cbd_base; 1549 fep->rx_bd_base = cbd_base;
1550 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 1550 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1551 1551
1552 /* The FEC Ethernet specific entries in the device structure */ 1552 /* The FEC Ethernet specific entries in the device structure */
1553 ndev->watchdog_timeo = TX_TIMEOUT; 1553 ndev->watchdog_timeo = TX_TIMEOUT;
1554 ndev->netdev_ops = &fec_netdev_ops; 1554 ndev->netdev_ops = &fec_netdev_ops;
1555 ndev->ethtool_ops = &fec_enet_ethtool_ops; 1555 ndev->ethtool_ops = &fec_enet_ethtool_ops;
1556 1556
1557 fep->use_napi = FEC_NAPI_ENABLE; 1557 fep->use_napi = FEC_NAPI_ENABLE;
1558 fep->napi_weight = FEC_NAPI_WEIGHT; 1558 fep->napi_weight = FEC_NAPI_WEIGHT;
1559 if (fep->use_napi) { 1559 if (fep->use_napi) {
1560 fec_rx_int_is_enabled(ndev, false); 1560 fec_rx_int_is_enabled(ndev, false);
1561 netif_napi_add(ndev, &fep->napi, fec_rx_poll, fep->napi_weight); 1561 netif_napi_add(ndev, &fep->napi, fec_rx_poll, fep->napi_weight);
1562 } 1562 }
1563 1563
1564 /* Initialize the receive buffer descriptors. */ 1564 /* Initialize the receive buffer descriptors. */
1565 bdp = fep->rx_bd_base; 1565 bdp = fep->rx_bd_base;
1566 for (i = 0; i < RX_RING_SIZE; i++) { 1566 for (i = 0; i < RX_RING_SIZE; i++) {
1567 1567
1568 /* Initialize the BD for every fragment in the page. */ 1568 /* Initialize the BD for every fragment in the page. */
1569 bdp->cbd_sc = 0; 1569 bdp->cbd_sc = 0;
1570 bdp->cbd_bufaddr = 0; 1570 bdp->cbd_bufaddr = 0;
1571 bdp++; 1571 bdp++;
1572 } 1572 }
1573 1573
1574 /* Set the last buffer to wrap */ 1574 /* Set the last buffer to wrap */
1575 bdp--; 1575 bdp--;
1576 bdp->cbd_sc |= BD_SC_WRAP; 1576 bdp->cbd_sc |= BD_SC_WRAP;
1577 1577
1578 /* Init transmit descriptors */ 1578 /* Init transmit descriptors */
1579 fec_enet_txbd_init(ndev); 1579 fec_enet_txbd_init(ndev);
1580 1580
1581 fec_restart(ndev, 0); 1581 fec_restart(ndev, 0);
1582 1582
1583 return 0; 1583 return 0;
1584 } 1584 }
1585 1585
1586 /* This function is called to start or restart the FEC during a link 1586 /* This function is called to start or restart the FEC during a link
1587 * change. This only happens when switching between half and full 1587 * change. This only happens when switching between half and full
1588 * duplex. 1588 * duplex.
1589 */ 1589 */
1590 static void 1590 static void
1591 fec_restart(struct net_device *dev, int duplex) 1591 fec_restart(struct net_device *dev, int duplex)
1592 { 1592 {
1593 struct fec_enet_private *fep = netdev_priv(dev); 1593 struct fec_enet_private *fep = netdev_priv(dev);
1594 const struct platform_device_id *id_entry = 1594 const struct platform_device_id *id_entry =
1595 platform_get_device_id(fep->pdev); 1595 platform_get_device_id(fep->pdev);
1596 int i, ret; 1596 int i, ret;
1597 u32 val, temp_mac[2], reg = 0; 1597 u32 val, temp_mac[2], reg = 0;
1598 1598
1599 /* Whack a reset. We should wait for this. */ 1599 /* Whack a reset. We should wait for this. */
1600 writel(1, fep->hwp + FEC_ECNTRL); 1600 writel(1, fep->hwp + FEC_ECNTRL);
1601 udelay(10); 1601 udelay(10);
1602 1602
1603 /* if uboot don't set MAC address, get MAC address 1603 /* if uboot don't set MAC address, get MAC address
1604 * from command line; if command line don't set MAC 1604 * from command line; if command line don't set MAC
1605 * address, get from OCOTP; otherwise, allocate random 1605 * address, get from OCOTP; otherwise, allocate random
1606 * address. 1606 * address.
1607 */ 1607 */
1608 memcpy(&temp_mac, dev->dev_addr, ETH_ALEN); 1608 memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
1609 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); 1609 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
1610 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); 1610 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
1611 1611
1612 /* Clear any outstanding interrupt. */ 1612 /* Clear any outstanding interrupt. */
1613 writel(0xffc00000, fep->hwp + FEC_IEVENT); 1613 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1614 1614
1615 /* Reset all multicast. */ 1615 /* Setup multicast filter. */
1616 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 1616 set_multicast_list(dev);
1617 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1618 #ifndef CONFIG_M5272 1617 #ifndef CONFIG_M5272
1619 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); 1618 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1620 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); 1619 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1621 #endif 1620 #endif
1622 1621
1623 /* Set maximum receive buffer size. */ 1622 /* Set maximum receive buffer size. */
1624 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); 1623 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
1625 1624
1626 /* Set receive and transmit descriptor base. */ 1625 /* Set receive and transmit descriptor base. */
1627 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); 1626 writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
1628 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, 1627 writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
1629 fep->hwp + FEC_X_DES_START); 1628 fep->hwp + FEC_X_DES_START);
1630 /* Reinit transmit descriptors */ 1629 /* Reinit transmit descriptors */
1631 fec_enet_txbd_init(dev); 1630 fec_enet_txbd_init(dev);
1632 1631
1633 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 1632 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
1634 fep->cur_rx = fep->rx_bd_base; 1633 fep->cur_rx = fep->rx_bd_base;
1635 1634
1636 /* Reset SKB transmit buffers. */ 1635 /* Reset SKB transmit buffers. */
1637 fep->skb_cur = fep->skb_dirty = 0; 1636 fep->skb_cur = fep->skb_dirty = 0;
1638 for (i = 0; i <= TX_RING_MOD_MASK; i++) { 1637 for (i = 0; i <= TX_RING_MOD_MASK; i++) {
1639 if (fep->tx_skbuff[i]) { 1638 if (fep->tx_skbuff[i]) {
1640 dev_kfree_skb_any(fep->tx_skbuff[i]); 1639 dev_kfree_skb_any(fep->tx_skbuff[i]);
1641 fep->tx_skbuff[i] = NULL; 1640 fep->tx_skbuff[i] = NULL;
1642 } 1641 }
1643 } 1642 }
1644 1643
1645 /* Enable MII mode */ 1644 /* Enable MII mode */
1646 if (duplex) { 1645 if (duplex) {
1647 /* MII enable / FD enable */ 1646 /* MII enable / FD enable */
1648 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL); 1647 writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
1649 writel(0x04, fep->hwp + FEC_X_CNTRL); 1648 writel(0x04, fep->hwp + FEC_X_CNTRL);
1650 } else { 1649 } else {
1651 /* MII enable / No Rcv on Xmit */ 1650 /* MII enable / No Rcv on Xmit */
1652 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL); 1651 writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
1653 writel(0x0, fep->hwp + FEC_X_CNTRL); 1652 writel(0x0, fep->hwp + FEC_X_CNTRL);
1654 } 1653 }
1655 fep->full_duplex = duplex; 1654 fep->full_duplex = duplex;
1656 1655
1657 /* Set MII speed */ 1656 /* Set MII speed */
1658 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1657 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1659 1658
1660 /* 1659 /*
1661 * The phy interface and speed need to get configured 1660 * The phy interface and speed need to get configured
1662 * differently on enet-mac. 1661 * differently on enet-mac.
1663 */ 1662 */
1664 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { 1663 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1665 val = readl(fep->hwp + FEC_R_CNTRL); 1664 val = readl(fep->hwp + FEC_R_CNTRL);
1666 1665
1667 /* MII or RMII */ 1666 /* MII or RMII */
1668 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII) 1667 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
1669 val |= (1 << 6); 1668 val |= (1 << 6);
1670 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1669 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1671 val |= (1 << 8); 1670 val |= (1 << 8);
1672 else 1671 else
1673 val &= ~(1 << 8); 1672 val &= ~(1 << 8);
1674 1673
1675 /* 10M or 100M */ 1674 /* 10M or 100M */
1676 if (fep->phy_dev && fep->phy_dev->speed == SPEED_100) 1675 if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
1677 val &= ~(1 << 9); 1676 val &= ~(1 << 9);
1678 else 1677 else
1679 val |= (1 << 9); 1678 val |= (1 << 9);
1680 1679
1681 /* Enable pause frame 1680 /* Enable pause frame
1682 * ENET pause frame has two issues as ticket TKT116501 1681 * ENET pause frame has two issues as ticket TKT116501
1683 * The issues have been fixed on Rigel TO1.1 and Arik TO1.2 1682 * The issues have been fixed on Rigel TO1.1 and Arik TO1.2
1684 */ 1683 */
1685 if ((cpu_is_mx6q() && 1684 if ((cpu_is_mx6q() &&
1686 (mx6q_revision() >= IMX_CHIP_REVISION_1_2)) || 1685 (mx6q_revision() >= IMX_CHIP_REVISION_1_2)) ||
1687 (cpu_is_mx6dl() && 1686 (cpu_is_mx6dl() &&
1688 (mx6dl_revision() >= IMX_CHIP_REVISION_1_1))) 1687 (mx6dl_revision() >= IMX_CHIP_REVISION_1_1)))
1689 val |= FEC_ENET_FCE; 1688 val |= FEC_ENET_FCE;
1690 1689
1691 writel(val, fep->hwp + FEC_R_CNTRL); 1690 writel(val, fep->hwp + FEC_R_CNTRL);
1692 } 1691 }
1693 1692
1694 if (fep->ptimer_present) { 1693 if (fep->ptimer_present) {
1695 /* Set Timer count */ 1694 /* Set Timer count */
1696 ret = fec_ptp_start(fep->ptp_priv); 1695 ret = fec_ptp_start(fep->ptp_priv);
1697 if (ret) { 1696 if (ret) {
1698 fep->ptimer_present = 0; 1697 fep->ptimer_present = 0;
1699 reg = 0x0; 1698 reg = 0x0;
1700 } else 1699 } else
1701 #if defined(CONFIG_SOC_IMX28) || defined(CONFIG_ARCH_MX6) 1700 #if defined(CONFIG_SOC_IMX28) || defined(CONFIG_ARCH_MX6)
1702 reg = 0x00000010; 1701 reg = 0x00000010;
1703 #else 1702 #else
1704 reg = 0x0; 1703 reg = 0x0;
1705 #endif 1704 #endif
1706 } else 1705 } else
1707 reg = 0x0; 1706 reg = 0x0;
1708 1707
1709 if (cpu_is_mx25() || cpu_is_mx53() || cpu_is_mx6sl()) { 1708 if (cpu_is_mx25() || cpu_is_mx53() || cpu_is_mx6sl()) {
1710 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { 1709 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
1711 /* disable the gasket and wait */ 1710 /* disable the gasket and wait */
1712 writel(0, fep->hwp + FEC_MIIGSK_ENR); 1711 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1713 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1712 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1714 udelay(1); 1713 udelay(1);
1715 1714
1716 /* 1715 /*
1717 * configure the gasket: 1716 * configure the gasket:
1718 * RMII, 50 MHz, no loopback, no echo 1717 * RMII, 50 MHz, no loopback, no echo
1719 */ 1718 */
1720 writel(1, fep->hwp + FEC_MIIGSK_CFGR); 1719 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
1721 1720
1722 /* re-enable the gasket */ 1721 /* re-enable the gasket */
1723 writel(2, fep->hwp + FEC_MIIGSK_ENR); 1722 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1724 udelay(10); 1723 udelay(10);
1725 if (!(readl(fep->hwp + FEC_MIIGSK_ENR) & 4)) { 1724 if (!(readl(fep->hwp + FEC_MIIGSK_ENR) & 4)) {
1726 udelay(100); 1725 udelay(100);
1727 if (!(readl(fep->hwp + FEC_MIIGSK_ENR) & 4)) 1726 if (!(readl(fep->hwp + FEC_MIIGSK_ENR) & 4))
1728 dev_err(&fep->pdev->dev, 1727 dev_err(&fep->pdev->dev,
1729 "switch to RMII failed!\n"); 1728 "switch to RMII failed!\n");
1730 } 1729 }
1731 } 1730 }
1732 } 1731 }
1733 1732
1734 /* ENET enable */ 1733 /* ENET enable */
1735 val = reg | (0x1 << 1); 1734 val = reg | (0x1 << 1);
1736 1735
1737 /* if phy work at 1G mode, set ENET RGMII speed to 1G */ 1736 /* if phy work at 1G mode, set ENET RGMII speed to 1G */
1738 if (fep->phy_dev && (fep->phy_dev->supported & 1737 if (fep->phy_dev && (fep->phy_dev->supported &
1739 (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) && 1738 (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) &&
1740 fep->phy_interface == PHY_INTERFACE_MODE_RGMII && 1739 fep->phy_interface == PHY_INTERFACE_MODE_RGMII &&
1741 fep->phy_dev->speed == SPEED_1000) 1740 fep->phy_dev->speed == SPEED_1000)
1742 val |= (0x1 << 5); 1741 val |= (0x1 << 5);
1743 1742
1744 /* RX FIFO threshold setting for ENET pause frame feature 1743 /* RX FIFO threshold setting for ENET pause frame feature
1745 * Only set the parameters after ticket TKT116501 fixed. 1744 * Only set the parameters after ticket TKT116501 fixed.
1746 * The issue has been fixed on Rigel TO1.1 and Arik TO1.2 1745 * The issue has been fixed on Rigel TO1.1 and Arik TO1.2
1747 */ 1746 */
1748 if ((cpu_is_mx6q() && 1747 if ((cpu_is_mx6q() &&
1749 (mx6q_revision() >= IMX_CHIP_REVISION_1_2)) || 1748 (mx6q_revision() >= IMX_CHIP_REVISION_1_2)) ||
1750 (cpu_is_mx6dl() && 1749 (cpu_is_mx6dl() &&
1751 (mx6dl_revision() >= IMX_CHIP_REVISION_1_1))) { 1750 (mx6dl_revision() >= IMX_CHIP_REVISION_1_1))) {
1752 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); 1751 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1753 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); 1752 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1754 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); 1753 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1755 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); 1754 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1756 1755
1757 /* OPD */ 1756 /* OPD */
1758 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); 1757 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1759 } 1758 }
1760 1759
1761 if (cpu_is_mx6q() || cpu_is_mx6dl()) { 1760 if (cpu_is_mx6q() || cpu_is_mx6dl()) {
1762 /* enable endian swap */ 1761 /* enable endian swap */
1763 val |= (0x1 << 8); 1762 val |= (0x1 << 8);
1764 /* enable ENET store and forward mode */ 1763 /* enable ENET store and forward mode */
1765 writel(0x1 << 8, fep->hwp + FEC_X_WMRK); 1764 writel(0x1 << 8, fep->hwp + FEC_X_WMRK);
1766 } 1765 }
1767 writel(val, fep->hwp + FEC_ECNTRL); 1766 writel(val, fep->hwp + FEC_ECNTRL);
1768 1767
1769 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 1768 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1770 1769
1771 /* Enable interrupts we wish to service */ 1770 /* Enable interrupts we wish to service */
1772 if (cpu_is_mx6q() || cpu_is_mx6dl() || cpu_is_mx2() || cpu_is_mx3()) 1771 if (cpu_is_mx6q() || cpu_is_mx6dl() || cpu_is_mx2() || cpu_is_mx3())
1773 val = (FEC_1588_IMASK | FEC_DEFAULT_IMASK); 1772 val = (FEC_1588_IMASK | FEC_DEFAULT_IMASK);
1774 else 1773 else
1775 val = FEC_DEFAULT_IMASK; 1774 val = FEC_DEFAULT_IMASK;
1776 writel(val, fep->hwp + FEC_IMASK); 1775 writel(val, fep->hwp + FEC_IMASK);
1777 } 1776 }
1778 1777
1779 static void 1778 static void
1780 fec_stop(struct net_device *dev) 1779 fec_stop(struct net_device *dev)
1781 { 1780 {
1782 struct fec_enet_private *fep = netdev_priv(dev); 1781 struct fec_enet_private *fep = netdev_priv(dev);
1783 1782
1784 /* We cannot expect a graceful transmit stop without link !!! */ 1783 /* We cannot expect a graceful transmit stop without link !!! */
1785 if (fep->link) { 1784 if (fep->link) {
1786 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ 1785 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1787 udelay(10); 1786 udelay(10);
1788 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) 1787 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1789 printk("fec_stop : Graceful transmit stop did not complete !\n"); 1788 printk("fec_stop : Graceful transmit stop did not complete !\n");
1790 } 1789 }
1791 1790
1792 /* Whack a reset. We should wait for this. */ 1791 /* Whack a reset. We should wait for this. */
1793 writel(1, fep->hwp + FEC_ECNTRL); 1792 writel(1, fep->hwp + FEC_ECNTRL);
1794 udelay(10); 1793 udelay(10);
1795 1794
1796 if (cpu_is_mx6q() || cpu_is_mx6dl()) 1795 if (cpu_is_mx6q() || cpu_is_mx6dl())
1797 /* FIXME: we have to enable enet to keep mii interrupt works. */ 1796 /* FIXME: we have to enable enet to keep mii interrupt works. */
1798 writel(2, fep->hwp + FEC_ECNTRL); 1797 writel(2, fep->hwp + FEC_ECNTRL);
1799 1798
1800 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1799 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1801 if (fep->ptimer_present) 1800 if (fep->ptimer_present)
1802 fec_ptp_stop(fep->ptp_priv); 1801 fec_ptp_stop(fep->ptp_priv);
1803 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1802 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1804 1803
1805 if (netif_running(dev)) 1804 if (netif_running(dev))
1806 netif_stop_queue(dev); 1805 netif_stop_queue(dev);
1807 netif_carrier_off(dev); /* prevent tx timeout */ 1806 netif_carrier_off(dev); /* prevent tx timeout */
1808 fep->link = 0; 1807 fep->link = 0;
1809 } 1808 }
1810 1809
1811 static int __devinit 1810 static int __devinit
1812 fec_probe(struct platform_device *pdev) 1811 fec_probe(struct platform_device *pdev)
1813 { 1812 {
1814 struct fec_enet_private *fep; 1813 struct fec_enet_private *fep;
1815 struct fec_platform_data *pdata; 1814 struct fec_platform_data *pdata;
1816 struct net_device *ndev; 1815 struct net_device *ndev;
1817 int i, irq, ret = 0; 1816 int i, irq, ret = 0;
1818 struct resource *r; 1817 struct resource *r;
1819 1818
1820 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1819 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1821 if (!r) 1820 if (!r)
1822 return -ENXIO; 1821 return -ENXIO;
1823 1822
1824 r = request_mem_region(r->start, resource_size(r), pdev->name); 1823 r = request_mem_region(r->start, resource_size(r), pdev->name);
1825 if (!r) 1824 if (!r)
1826 return -EBUSY; 1825 return -EBUSY;
1827 1826
1828 /* Init network device */ 1827 /* Init network device */
1829 ndev = alloc_etherdev(sizeof(struct fec_enet_private)); 1828 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
1830 if (!ndev) { 1829 if (!ndev) {
1831 ret = -ENOMEM; 1830 ret = -ENOMEM;
1832 goto failed_alloc_etherdev; 1831 goto failed_alloc_etherdev;
1833 } 1832 }
1834 1833
1835 SET_NETDEV_DEV(ndev, &pdev->dev); 1834 SET_NETDEV_DEV(ndev, &pdev->dev);
1836 1835
1837 /* setup board info structure */ 1836 /* setup board info structure */
1838 fep = netdev_priv(ndev); 1837 fep = netdev_priv(ndev);
1839 1838
1840 fep->hwp = ioremap(r->start, resource_size(r)); 1839 fep->hwp = ioremap(r->start, resource_size(r));
1841 fep->pdev = pdev; 1840 fep->pdev = pdev;
1842 1841
1843 if (!fep->hwp) { 1842 if (!fep->hwp) {
1844 ret = -ENOMEM; 1843 ret = -ENOMEM;
1845 goto failed_ioremap; 1844 goto failed_ioremap;
1846 } 1845 }
1847 1846
1848 platform_set_drvdata(pdev, ndev); 1847 platform_set_drvdata(pdev, ndev);
1849 1848
1850 pdata = pdev->dev.platform_data; 1849 pdata = pdev->dev.platform_data;
1851 if (pdata) 1850 if (pdata)
1852 fep->phy_interface = pdata->phy; 1851 fep->phy_interface = pdata->phy;
1853 1852
1854 if (pdata->gpio_irq > 0) { 1853 if (pdata->gpio_irq > 0) {
1855 gpio_request(pdata->gpio_irq, "gpio_enet_irq"); 1854 gpio_request(pdata->gpio_irq, "gpio_enet_irq");
1856 gpio_direction_input(pdata->gpio_irq); 1855 gpio_direction_input(pdata->gpio_irq);
1857 1856
1858 irq = gpio_to_irq(pdata->gpio_irq); 1857 irq = gpio_to_irq(pdata->gpio_irq);
1859 ret = request_irq(irq, fec_enet_interrupt, 1858 ret = request_irq(irq, fec_enet_interrupt,
1860 IRQF_TRIGGER_RISING, 1859 IRQF_TRIGGER_RISING,
1861 pdev->name, ndev); 1860 pdev->name, ndev);
1862 if (ret) 1861 if (ret)
1863 goto failed_irq; 1862 goto failed_irq;
1864 } else { 1863 } else {
1865 /* This device has up to three irqs on some platforms */ 1864 /* This device has up to three irqs on some platforms */
1866 for (i = 0; i < 3; i++) { 1865 for (i = 0; i < 3; i++) {
1867 irq = platform_get_irq(pdev, i); 1866 irq = platform_get_irq(pdev, i);
1868 if (i && irq < 0) 1867 if (i && irq < 0)
1869 break; 1868 break;
1870 ret = request_irq(irq, fec_enet_interrupt, 1869 ret = request_irq(irq, fec_enet_interrupt,
1871 IRQF_DISABLED, pdev->name, ndev); 1870 IRQF_DISABLED, pdev->name, ndev);
1872 if (ret) { 1871 if (ret) {
1873 while (--i >= 0) { 1872 while (--i >= 0) {
1874 irq = platform_get_irq(pdev, i); 1873 irq = platform_get_irq(pdev, i);
1875 free_irq(irq, ndev); 1874 free_irq(irq, ndev);
1876 } 1875 }
1877 goto failed_irq; 1876 goto failed_irq;
1878 } 1877 }
1879 } 1878 }
1880 } 1879 }
1881 1880
1882 fep->clk = clk_get(&pdev->dev, "fec_clk"); 1881 fep->clk = clk_get(&pdev->dev, "fec_clk");
1883 if (IS_ERR(fep->clk)) { 1882 if (IS_ERR(fep->clk)) {
1884 ret = PTR_ERR(fep->clk); 1883 ret = PTR_ERR(fep->clk);
1885 goto failed_clk; 1884 goto failed_clk;
1886 } 1885 }
1887 fep->mdc_clk = clk_get(&pdev->dev, "fec_mdc_clk"); 1886 fep->mdc_clk = clk_get(&pdev->dev, "fec_mdc_clk");
1888 if (IS_ERR(fep->mdc_clk)) { 1887 if (IS_ERR(fep->mdc_clk)) {
1889 ret = PTR_ERR(fep->mdc_clk); 1888 ret = PTR_ERR(fep->mdc_clk);
1890 goto failed_clk; 1889 goto failed_clk;
1891 } 1890 }
1892 clk_enable(fep->clk); 1891 clk_enable(fep->clk);
1893 1892
1894 ret = fec_enet_init(ndev); 1893 ret = fec_enet_init(ndev);
1895 if (ret) 1894 if (ret)
1896 goto failed_init; 1895 goto failed_init;
1897 1896
1898 ret = fec_enet_mii_init(pdev); 1897 ret = fec_enet_mii_init(pdev);
1899 if (ret) 1898 if (ret)
1900 goto failed_mii_init; 1899 goto failed_mii_init;
1901 1900
1902 if (fec_ptp_malloc_priv(&(fep->ptp_priv))) { 1901 if (fec_ptp_malloc_priv(&(fep->ptp_priv))) {
1903 if (fep->ptp_priv) { 1902 if (fep->ptp_priv) {
1904 fep->ptp_priv->hwp = fep->hwp; 1903 fep->ptp_priv->hwp = fep->hwp;
1905 ret = fec_ptp_init(fep->ptp_priv, pdev->id); 1904 ret = fec_ptp_init(fep->ptp_priv, pdev->id);
1906 if (ret) 1905 if (ret)
1907 printk(KERN_WARNING "IEEE1588: ptp-timer is unavailable\n"); 1906 printk(KERN_WARNING "IEEE1588: ptp-timer is unavailable\n");
1908 else 1907 else
1909 fep->ptimer_present = 1; 1908 fep->ptimer_present = 1;
1910 } else 1909 } else
1911 printk(KERN_ERR "IEEE1588: failed to malloc memory\n"); 1910 printk(KERN_ERR "IEEE1588: failed to malloc memory\n");
1912 } 1911 }
1913 1912
1914 /* Carrier starts down, phylib will bring it up */ 1913 /* Carrier starts down, phylib will bring it up */
1915 netif_carrier_off(ndev); 1914 netif_carrier_off(ndev);
1916 clk_disable(fep->clk); 1915 clk_disable(fep->clk);
1917 1916
1918 INIT_DELAYED_WORK(&fep->fixup_trigger_tx, fixup_trigger_tx_func); 1917 INIT_DELAYED_WORK(&fep->fixup_trigger_tx, fixup_trigger_tx_func);
1919 1918
1920 ret = register_netdev(ndev); 1919 ret = register_netdev(ndev);
1921 if (ret) 1920 if (ret)
1922 goto failed_register; 1921 goto failed_register;
1923 1922
1924 return 0; 1923 return 0;
1925 1924
1926 failed_register: 1925 failed_register:
1927 fec_enet_mii_remove(fep); 1926 fec_enet_mii_remove(fep);
1928 if (fep->ptimer_present) 1927 if (fep->ptimer_present)
1929 fec_ptp_cleanup(fep->ptp_priv); 1928 fec_ptp_cleanup(fep->ptp_priv);
1930 kfree(fep->ptp_priv); 1929 kfree(fep->ptp_priv);
1931 failed_mii_init: 1930 failed_mii_init:
1932 failed_init: 1931 failed_init:
1933 clk_disable(fep->clk); 1932 clk_disable(fep->clk);
1934 clk_put(fep->clk); 1933 clk_put(fep->clk);
1935 clk_put(fep->mdc_clk); 1934 clk_put(fep->mdc_clk);
1936 failed_clk: 1935 failed_clk:
1937 if (pdata->gpio_irq < 0) 1936 if (pdata->gpio_irq < 0)
1938 free_irq(irq, ndev); 1937 free_irq(irq, ndev);
1939 else { 1938 else {
1940 for (i = 0; i < 3; i++) { 1939 for (i = 0; i < 3; i++) {
1941 irq = platform_get_irq(pdev, i); 1940 irq = platform_get_irq(pdev, i);
1942 if (irq > 0) 1941 if (irq > 0)
1943 free_irq(irq, ndev); 1942 free_irq(irq, ndev);
1944 } 1943 }
1945 } 1944 }
1946 failed_irq: 1945 failed_irq:
1947 iounmap(fep->hwp); 1946 iounmap(fep->hwp);
1948 failed_ioremap: 1947 failed_ioremap:
1949 free_netdev(ndev); 1948 free_netdev(ndev);
1950 failed_alloc_etherdev: 1949 failed_alloc_etherdev:
1951 release_mem_region(r->start, resource_size(r)); 1950 release_mem_region(r->start, resource_size(r));
1952 1951
1953 return ret; 1952 return ret;
1954 } 1953 }
1955 1954
1956 static int __devexit 1955 static int __devexit
1957 fec_drv_remove(struct platform_device *pdev) 1956 fec_drv_remove(struct platform_device *pdev)
1958 { 1957 {
1959 struct net_device *ndev = platform_get_drvdata(pdev); 1958 struct net_device *ndev = platform_get_drvdata(pdev);
1960 struct fec_enet_private *fep = netdev_priv(ndev); 1959 struct fec_enet_private *fep = netdev_priv(ndev);
1961 struct resource *r; 1960 struct resource *r;
1962 1961
1963 cancel_delayed_work_sync(&fep->fixup_trigger_tx); 1962 cancel_delayed_work_sync(&fep->fixup_trigger_tx);
1964 fec_stop(ndev); 1963 fec_stop(ndev);
1965 fec_enet_mii_remove(fep); 1964 fec_enet_mii_remove(fep);
1966 clk_disable(fep->clk); 1965 clk_disable(fep->clk);
1967 clk_put(fep->clk); 1966 clk_put(fep->clk);
1968 clk_put(fep->mdc_clk); 1967 clk_put(fep->mdc_clk);
1969 iounmap((void __iomem *)ndev->base_addr); 1968 iounmap((void __iomem *)ndev->base_addr);
1970 if (fep->ptimer_present) 1969 if (fep->ptimer_present)
1971 fec_ptp_cleanup(fep->ptp_priv); 1970 fec_ptp_cleanup(fep->ptp_priv);
1972 kfree(fep->ptp_priv); 1971 kfree(fep->ptp_priv);
1973 unregister_netdev(ndev); 1972 unregister_netdev(ndev);
1974 free_netdev(ndev); 1973 free_netdev(ndev);
1975 1974
1976 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1975 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1977 BUG_ON(!r); 1976 BUG_ON(!r);
1978 release_mem_region(r->start, resource_size(r)); 1977 release_mem_region(r->start, resource_size(r));
1979 1978
1980 platform_set_drvdata(pdev, NULL); 1979 platform_set_drvdata(pdev, NULL);
1981 1980
1982 return 0; 1981 return 0;
1983 } 1982 }
1984 1983
1985 #ifdef CONFIG_PM 1984 #ifdef CONFIG_PM
1986 static int 1985 static int
1987 fec_suspend(struct device *dev) 1986 fec_suspend(struct device *dev)
1988 { 1987 {
1989 struct net_device *ndev = dev_get_drvdata(dev); 1988 struct net_device *ndev = dev_get_drvdata(dev);
1990 struct fec_enet_private *fep = netdev_priv(ndev); 1989 struct fec_enet_private *fep = netdev_priv(ndev);
1991 1990
1992 if (netif_running(ndev)) { 1991 if (netif_running(ndev)) {
1993 netif_device_detach(ndev); 1992 netif_device_detach(ndev);
1994 fec_stop(ndev); 1993 fec_stop(ndev);
1995 clk_disable(fep->clk); 1994 clk_disable(fep->clk);
1996 } 1995 }
1997 1996
1998 return 0; 1997 return 0;
1999 } 1998 }
2000 1999
2001 static int 2000 static int
2002 fec_resume(struct device *dev) 2001 fec_resume(struct device *dev)
2003 { 2002 {
2004 struct net_device *ndev = dev_get_drvdata(dev); 2003 struct net_device *ndev = dev_get_drvdata(dev);
2005 struct fec_enet_private *fep = netdev_priv(ndev); 2004 struct fec_enet_private *fep = netdev_priv(ndev);
2006 2005
2007 if (netif_running(ndev)) { 2006 if (netif_running(ndev)) {
2008 clk_enable(fep->clk); 2007 clk_enable(fep->clk);
2009 fec_restart(ndev, fep->full_duplex); 2008 fec_restart(ndev, fep->full_duplex);
2010 netif_device_attach(ndev); 2009 netif_device_attach(ndev);
2011 } 2010 }
2012 2011
2013 return 0; 2012 return 0;
2014 } 2013 }
2015 2014
2016 static const struct dev_pm_ops fec_pm_ops = { 2015 static const struct dev_pm_ops fec_pm_ops = {
2017 .suspend = fec_suspend, 2016 .suspend = fec_suspend,
2018 .resume = fec_resume, 2017 .resume = fec_resume,
2019 .freeze = fec_suspend, 2018 .freeze = fec_suspend,
2020 .thaw = fec_resume, 2019 .thaw = fec_resume,
2021 .poweroff = fec_suspend, 2020 .poweroff = fec_suspend,
2022 .restore = fec_resume, 2021 .restore = fec_resume,
2023 }; 2022 };
2024 #endif 2023 #endif
2025 2024
2026 static struct platform_driver fec_driver = { 2025 static struct platform_driver fec_driver = {
2027 .driver = { 2026 .driver = {
2028 .name = DRIVER_NAME, 2027 .name = DRIVER_NAME,
2029 .owner = THIS_MODULE, 2028 .owner = THIS_MODULE,
2030 #ifdef CONFIG_PM 2029 #ifdef CONFIG_PM
2031 .pm = &fec_pm_ops, 2030 .pm = &fec_pm_ops,
2032 #endif 2031 #endif
2033 }, 2032 },
2034 .id_table = fec_devtype, 2033 .id_table = fec_devtype,
2035 .probe = fec_probe, 2034 .probe = fec_probe,
2036 .remove = __devexit_p(fec_drv_remove), 2035 .remove = __devexit_p(fec_drv_remove),
2037 }; 2036 };
2038 2037
2039 static int fec_mac_addr_setup(char *mac_addr) 2038 static int fec_mac_addr_setup(char *mac_addr)
2040 { 2039 {
2041 char *ptr, *p = mac_addr; 2040 char *ptr, *p = mac_addr;
2042 unsigned long tmp; 2041 unsigned long tmp;
2043 int i = 0, ret = 0; 2042 int i = 0, ret = 0;
2044 2043
2045 while (p && (*p) && i < 6) { 2044 while (p && (*p) && i < 6) {
2046 ptr = strchr(p, ':'); 2045 ptr = strchr(p, ':');
2047 if (ptr) 2046 if (ptr)
2048 *ptr++ = '\0'; 2047 *ptr++ = '\0';
2049 2048
2050 if (strlen(p)) { 2049 if (strlen(p)) {
2051 ret = strict_strtoul(p, 16, &tmp); 2050 ret = strict_strtoul(p, 16, &tmp);
2052 if (ret < 0 || tmp > 0xff) 2051 if (ret < 0 || tmp > 0xff)
2053 break; 2052 break;
2054 macaddr[i++] = tmp; 2053 macaddr[i++] = tmp;
2055 } 2054 }
2056 p = ptr; 2055 p = ptr;
2057 } 2056 }
2058 2057
2059 return 0; 2058 return 0;
2060 } 2059 }
2061 2060
2062 __setup("fec_mac=", fec_mac_addr_setup); 2061 __setup("fec_mac=", fec_mac_addr_setup);
2063 2062
2064 static int __init 2063 static int __init
2065 fec_enet_module_init(void) 2064 fec_enet_module_init(void)
2066 { 2065 {
2067 printk(KERN_INFO "FEC Ethernet Driver\n"); 2066 printk(KERN_INFO "FEC Ethernet Driver\n");
2068 2067
2069 return platform_driver_register(&fec_driver); 2068 return platform_driver_register(&fec_driver);
2070 } 2069 }
2071 2070
2072 static void __exit 2071 static void __exit
2073 fec_enet_cleanup(void) 2072 fec_enet_cleanup(void)
2074 { 2073 {
2075 platform_driver_unregister(&fec_driver); 2074 platform_driver_unregister(&fec_driver);
2076 } 2075 }
2077 2076
2078 module_exit(fec_enet_cleanup); 2077 module_exit(fec_enet_cleanup);
2079 module_init(fec_enet_module_init); 2078 module_init(fec_enet_module_init);
2080 2079
2081 MODULE_LICENSE("GPL"); 2080 MODULE_LICENSE("GPL");
2082 2081