Commit 78abcb13dd573f80d76d12007b36200a86f1e494
Committed by
David S. Miller
1 parent
abf90cca97
Exists in
master
and in
7 other branches
net: fix section mismatch in fec.c
fec_enet_init is called by both fec_probe and fec_resume, so it shouldn't be marked as __init. Signed-off-by: Steven King <sfking@fdwdc.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 1 changed file with 1 additions and 1 deletions Inline Diff
drivers/net/fec.c
1 | /* | 1 | /* |
2 | * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. | 2 | * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. |
3 | * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) | 3 | * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) |
4 | * | 4 | * |
5 | * Right now, I am very wasteful with the buffers. I allocate memory | 5 | * Right now, I am very wasteful with the buffers. I allocate memory |
6 | * pages and then divide them into 2K frame buffers. This way I know I | 6 | * pages and then divide them into 2K frame buffers. This way I know I |
7 | * have buffers large enough to hold one frame within one buffer descriptor. | 7 | * have buffers large enough to hold one frame within one buffer descriptor. |
8 | * Once I get this working, I will use 64 or 128 byte CPM buffers, which | 8 | * Once I get this working, I will use 64 or 128 byte CPM buffers, which |
9 | * will be much more memory efficient and will easily handle lots of | 9 | * will be much more memory efficient and will easily handle lots of |
10 | * small packets. | 10 | * small packets. |
11 | * | 11 | * |
12 | * Much better multiple PHY support by Magnus Damm. | 12 | * Much better multiple PHY support by Magnus Damm. |
13 | * Copyright (c) 2000 Ericsson Radio Systems AB. | 13 | * Copyright (c) 2000 Ericsson Radio Systems AB. |
14 | * | 14 | * |
15 | * Support for FEC controller of ColdFire processors. | 15 | * Support for FEC controller of ColdFire processors. |
16 | * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) | 16 | * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) |
17 | * | 17 | * |
18 | * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) | 18 | * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) |
19 | * Copyright (c) 2004-2006 Macq Electronique SA. | 19 | * Copyright (c) 2004-2006 Macq Electronique SA. |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/string.h> | 24 | #include <linux/string.h> |
25 | #include <linux/ptrace.h> | 25 | #include <linux/ptrace.h> |
26 | #include <linux/errno.h> | 26 | #include <linux/errno.h> |
27 | #include <linux/ioport.h> | 27 | #include <linux/ioport.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
30 | #include <linux/pci.h> | 30 | #include <linux/pci.h> |
31 | #include <linux/init.h> | 31 | #include <linux/init.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <linux/netdevice.h> | 33 | #include <linux/netdevice.h> |
34 | #include <linux/etherdevice.h> | 34 | #include <linux/etherdevice.h> |
35 | #include <linux/skbuff.h> | 35 | #include <linux/skbuff.h> |
36 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
37 | #include <linux/workqueue.h> | 37 | #include <linux/workqueue.h> |
38 | #include <linux/bitops.h> | 38 | #include <linux/bitops.h> |
39 | #include <linux/io.h> | 39 | #include <linux/io.h> |
40 | #include <linux/irq.h> | 40 | #include <linux/irq.h> |
41 | #include <linux/clk.h> | 41 | #include <linux/clk.h> |
42 | #include <linux/platform_device.h> | 42 | #include <linux/platform_device.h> |
43 | 43 | ||
44 | #include <asm/cacheflush.h> | 44 | #include <asm/cacheflush.h> |
45 | 45 | ||
46 | #ifndef CONFIG_ARCH_MXC | 46 | #ifndef CONFIG_ARCH_MXC |
47 | #include <asm/coldfire.h> | 47 | #include <asm/coldfire.h> |
48 | #include <asm/mcfsim.h> | 48 | #include <asm/mcfsim.h> |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #include "fec.h" | 51 | #include "fec.h" |
52 | 52 | ||
53 | #ifdef CONFIG_ARCH_MXC | 53 | #ifdef CONFIG_ARCH_MXC |
54 | #include <mach/hardware.h> | 54 | #include <mach/hardware.h> |
55 | #define FEC_ALIGNMENT 0xf | 55 | #define FEC_ALIGNMENT 0xf |
56 | #else | 56 | #else |
57 | #define FEC_ALIGNMENT 0x3 | 57 | #define FEC_ALIGNMENT 0x3 |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * Define the fixed address of the FEC hardware. | 61 | * Define the fixed address of the FEC hardware. |
62 | */ | 62 | */ |
63 | #if defined(CONFIG_M5272) | 63 | #if defined(CONFIG_M5272) |
64 | #define HAVE_mii_link_interrupt | 64 | #define HAVE_mii_link_interrupt |
65 | 65 | ||
66 | static unsigned char fec_mac_default[] = { | 66 | static unsigned char fec_mac_default[] = { |
67 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | 67 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
68 | }; | 68 | }; |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * Some hardware gets it MAC address out of local flash memory. | 71 | * Some hardware gets it MAC address out of local flash memory. |
72 | * if this is non-zero then assume it is the address to get MAC from. | 72 | * if this is non-zero then assume it is the address to get MAC from. |
73 | */ | 73 | */ |
74 | #if defined(CONFIG_NETtel) | 74 | #if defined(CONFIG_NETtel) |
75 | #define FEC_FLASHMAC 0xf0006006 | 75 | #define FEC_FLASHMAC 0xf0006006 |
76 | #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) | 76 | #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) |
77 | #define FEC_FLASHMAC 0xf0006000 | 77 | #define FEC_FLASHMAC 0xf0006000 |
78 | #elif defined(CONFIG_CANCam) | 78 | #elif defined(CONFIG_CANCam) |
79 | #define FEC_FLASHMAC 0xf0020000 | 79 | #define FEC_FLASHMAC 0xf0020000 |
80 | #elif defined (CONFIG_M5272C3) | 80 | #elif defined (CONFIG_M5272C3) |
81 | #define FEC_FLASHMAC (0xffe04000 + 4) | 81 | #define FEC_FLASHMAC (0xffe04000 + 4) |
82 | #elif defined(CONFIG_MOD5272) | 82 | #elif defined(CONFIG_MOD5272) |
83 | #define FEC_FLASHMAC 0xffc0406b | 83 | #define FEC_FLASHMAC 0xffc0406b |
84 | #else | 84 | #else |
85 | #define FEC_FLASHMAC 0 | 85 | #define FEC_FLASHMAC 0 |
86 | #endif | 86 | #endif |
87 | #endif /* CONFIG_M5272 */ | 87 | #endif /* CONFIG_M5272 */ |
88 | 88 | ||
89 | /* Forward declarations of some structures to support different PHYs */ | 89 | /* Forward declarations of some structures to support different PHYs */ |
90 | 90 | ||
91 | typedef struct { | 91 | typedef struct { |
92 | uint mii_data; | 92 | uint mii_data; |
93 | void (*funct)(uint mii_reg, struct net_device *dev); | 93 | void (*funct)(uint mii_reg, struct net_device *dev); |
94 | } phy_cmd_t; | 94 | } phy_cmd_t; |
95 | 95 | ||
96 | typedef struct { | 96 | typedef struct { |
97 | uint id; | 97 | uint id; |
98 | char *name; | 98 | char *name; |
99 | 99 | ||
100 | const phy_cmd_t *config; | 100 | const phy_cmd_t *config; |
101 | const phy_cmd_t *startup; | 101 | const phy_cmd_t *startup; |
102 | const phy_cmd_t *ack_int; | 102 | const phy_cmd_t *ack_int; |
103 | const phy_cmd_t *shutdown; | 103 | const phy_cmd_t *shutdown; |
104 | } phy_info_t; | 104 | } phy_info_t; |
105 | 105 | ||
106 | /* The number of Tx and Rx buffers. These are allocated from the page | 106 | /* The number of Tx and Rx buffers. These are allocated from the page |
107 | * pool. The code may assume these are power of two, so it it best | 107 | * pool. The code may assume these are power of two, so it it best |
108 | * to keep them that size. | 108 | * to keep them that size. |
109 | * We don't need to allocate pages for the transmitter. We just use | 109 | * We don't need to allocate pages for the transmitter. We just use |
110 | * the skbuffer directly. | 110 | * the skbuffer directly. |
111 | */ | 111 | */ |
112 | #define FEC_ENET_RX_PAGES 8 | 112 | #define FEC_ENET_RX_PAGES 8 |
113 | #define FEC_ENET_RX_FRSIZE 2048 | 113 | #define FEC_ENET_RX_FRSIZE 2048 |
114 | #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) | 114 | #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) |
115 | #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) | 115 | #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) |
116 | #define FEC_ENET_TX_FRSIZE 2048 | 116 | #define FEC_ENET_TX_FRSIZE 2048 |
117 | #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) | 117 | #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) |
118 | #define TX_RING_SIZE 16 /* Must be power of two */ | 118 | #define TX_RING_SIZE 16 /* Must be power of two */ |
119 | #define TX_RING_MOD_MASK 15 /* for this to work */ | 119 | #define TX_RING_MOD_MASK 15 /* for this to work */ |
120 | 120 | ||
121 | #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) | 121 | #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) |
122 | #error "FEC: descriptor ring size constants too large" | 122 | #error "FEC: descriptor ring size constants too large" |
123 | #endif | 123 | #endif |
124 | 124 | ||
125 | /* Interrupt events/masks. */ | 125 | /* Interrupt events/masks. */ |
126 | #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ | 126 | #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ |
127 | #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ | 127 | #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ |
128 | #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ | 128 | #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ |
129 | #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ | 129 | #define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */ |
130 | #define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ | 130 | #define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */ |
131 | #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ | 131 | #define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */ |
132 | #define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ | 132 | #define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */ |
133 | #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ | 133 | #define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */ |
134 | #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ | 134 | #define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */ |
135 | #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ | 135 | #define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */ |
136 | 136 | ||
137 | /* The FEC stores dest/src/type, data, and checksum for receive packets. | 137 | /* The FEC stores dest/src/type, data, and checksum for receive packets. |
138 | */ | 138 | */ |
139 | #define PKT_MAXBUF_SIZE 1518 | 139 | #define PKT_MAXBUF_SIZE 1518 |
140 | #define PKT_MINBUF_SIZE 64 | 140 | #define PKT_MINBUF_SIZE 64 |
141 | #define PKT_MAXBLR_SIZE 1520 | 141 | #define PKT_MAXBLR_SIZE 1520 |
142 | 142 | ||
143 | 143 | ||
144 | /* | 144 | /* |
145 | * The 5270/5271/5280/5282/532x RX control register also contains maximum frame | 145 | * The 5270/5271/5280/5282/532x RX control register also contains maximum frame |
146 | * size bits. Other FEC hardware does not, so we need to take that into | 146 | * size bits. Other FEC hardware does not, so we need to take that into |
147 | * account when setting it. | 147 | * account when setting it. |
148 | */ | 148 | */ |
149 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ | 149 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
150 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) | 150 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) |
151 | #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) | 151 | #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) |
152 | #else | 152 | #else |
153 | #define OPT_FRAME_SIZE 0 | 153 | #define OPT_FRAME_SIZE 0 |
154 | #endif | 154 | #endif |
155 | 155 | ||
156 | /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and | 156 | /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and |
157 | * tx_bd_base always point to the base of the buffer descriptors. The | 157 | * tx_bd_base always point to the base of the buffer descriptors. The |
158 | * cur_rx and cur_tx point to the currently available buffer. | 158 | * cur_rx and cur_tx point to the currently available buffer. |
159 | * The dirty_tx tracks the current buffer that is being sent by the | 159 | * The dirty_tx tracks the current buffer that is being sent by the |
160 | * controller. The cur_tx and dirty_tx are equal under both completely | 160 | * controller. The cur_tx and dirty_tx are equal under both completely |
161 | * empty and completely full conditions. The empty/ready indicator in | 161 | * empty and completely full conditions. The empty/ready indicator in |
162 | * the buffer descriptor determines the actual condition. | 162 | * the buffer descriptor determines the actual condition. |
163 | */ | 163 | */ |
164 | struct fec_enet_private { | 164 | struct fec_enet_private { |
165 | /* Hardware registers of the FEC device */ | 165 | /* Hardware registers of the FEC device */ |
166 | void __iomem *hwp; | 166 | void __iomem *hwp; |
167 | 167 | ||
168 | struct net_device *netdev; | 168 | struct net_device *netdev; |
169 | 169 | ||
170 | struct clk *clk; | 170 | struct clk *clk; |
171 | 171 | ||
172 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ | 172 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ |
173 | unsigned char *tx_bounce[TX_RING_SIZE]; | 173 | unsigned char *tx_bounce[TX_RING_SIZE]; |
174 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | 174 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; |
175 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; | 175 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; |
176 | ushort skb_cur; | 176 | ushort skb_cur; |
177 | ushort skb_dirty; | 177 | ushort skb_dirty; |
178 | 178 | ||
179 | /* CPM dual port RAM relative addresses */ | 179 | /* CPM dual port RAM relative addresses */ |
180 | dma_addr_t bd_dma; | 180 | dma_addr_t bd_dma; |
181 | /* Address of Rx and Tx buffers */ | 181 | /* Address of Rx and Tx buffers */ |
182 | struct bufdesc *rx_bd_base; | 182 | struct bufdesc *rx_bd_base; |
183 | struct bufdesc *tx_bd_base; | 183 | struct bufdesc *tx_bd_base; |
184 | /* The next free ring entry */ | 184 | /* The next free ring entry */ |
185 | struct bufdesc *cur_rx, *cur_tx; | 185 | struct bufdesc *cur_rx, *cur_tx; |
186 | /* The ring entries to be free()ed */ | 186 | /* The ring entries to be free()ed */ |
187 | struct bufdesc *dirty_tx; | 187 | struct bufdesc *dirty_tx; |
188 | 188 | ||
189 | uint tx_full; | 189 | uint tx_full; |
190 | /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ | 190 | /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ |
191 | spinlock_t hw_lock; | 191 | spinlock_t hw_lock; |
192 | /* hold while accessing the mii_list_t() elements */ | 192 | /* hold while accessing the mii_list_t() elements */ |
193 | spinlock_t mii_lock; | 193 | spinlock_t mii_lock; |
194 | 194 | ||
195 | uint phy_id; | 195 | uint phy_id; |
196 | uint phy_id_done; | 196 | uint phy_id_done; |
197 | uint phy_status; | 197 | uint phy_status; |
198 | uint phy_speed; | 198 | uint phy_speed; |
199 | phy_info_t const *phy; | 199 | phy_info_t const *phy; |
200 | struct work_struct phy_task; | 200 | struct work_struct phy_task; |
201 | 201 | ||
202 | uint sequence_done; | 202 | uint sequence_done; |
203 | uint mii_phy_task_queued; | 203 | uint mii_phy_task_queued; |
204 | 204 | ||
205 | uint phy_addr; | 205 | uint phy_addr; |
206 | 206 | ||
207 | int index; | 207 | int index; |
208 | int opened; | 208 | int opened; |
209 | int link; | 209 | int link; |
210 | int old_link; | 210 | int old_link; |
211 | int full_duplex; | 211 | int full_duplex; |
212 | }; | 212 | }; |
213 | 213 | ||
214 | static void fec_enet_mii(struct net_device *dev); | 214 | static void fec_enet_mii(struct net_device *dev); |
215 | static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); | 215 | static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); |
216 | static void fec_enet_tx(struct net_device *dev); | 216 | static void fec_enet_tx(struct net_device *dev); |
217 | static void fec_enet_rx(struct net_device *dev); | 217 | static void fec_enet_rx(struct net_device *dev); |
218 | static int fec_enet_close(struct net_device *dev); | 218 | static int fec_enet_close(struct net_device *dev); |
219 | static void fec_restart(struct net_device *dev, int duplex); | 219 | static void fec_restart(struct net_device *dev, int duplex); |
220 | static void fec_stop(struct net_device *dev); | 220 | static void fec_stop(struct net_device *dev); |
221 | 221 | ||
222 | 222 | ||
223 | /* MII processing. We keep this as simple as possible. Requests are | 223 | /* MII processing. We keep this as simple as possible. Requests are |
224 | * placed on the list (if there is room). When the request is finished | 224 | * placed on the list (if there is room). When the request is finished |
225 | * by the MII, an optional function may be called. | 225 | * by the MII, an optional function may be called. |
226 | */ | 226 | */ |
227 | typedef struct mii_list { | 227 | typedef struct mii_list { |
228 | uint mii_regval; | 228 | uint mii_regval; |
229 | void (*mii_func)(uint val, struct net_device *dev); | 229 | void (*mii_func)(uint val, struct net_device *dev); |
230 | struct mii_list *mii_next; | 230 | struct mii_list *mii_next; |
231 | } mii_list_t; | 231 | } mii_list_t; |
232 | 232 | ||
233 | #define NMII 20 | 233 | #define NMII 20 |
234 | static mii_list_t mii_cmds[NMII]; | 234 | static mii_list_t mii_cmds[NMII]; |
235 | static mii_list_t *mii_free; | 235 | static mii_list_t *mii_free; |
236 | static mii_list_t *mii_head; | 236 | static mii_list_t *mii_head; |
237 | static mii_list_t *mii_tail; | 237 | static mii_list_t *mii_tail; |
238 | 238 | ||
239 | static int mii_queue(struct net_device *dev, int request, | 239 | static int mii_queue(struct net_device *dev, int request, |
240 | void (*func)(uint, struct net_device *)); | 240 | void (*func)(uint, struct net_device *)); |
241 | 241 | ||
242 | /* Make MII read/write commands for the FEC */ | 242 | /* Make MII read/write commands for the FEC */ |
243 | #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) | 243 | #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) |
244 | #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ | 244 | #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ |
245 | (VAL & 0xffff)) | 245 | (VAL & 0xffff)) |
246 | #define mk_mii_end 0 | 246 | #define mk_mii_end 0 |
247 | 247 | ||
248 | /* Transmitter timeout */ | 248 | /* Transmitter timeout */ |
249 | #define TX_TIMEOUT (2 * HZ) | 249 | #define TX_TIMEOUT (2 * HZ) |
250 | 250 | ||
251 | /* Register definitions for the PHY */ | 251 | /* Register definitions for the PHY */ |
252 | 252 | ||
253 | #define MII_REG_CR 0 /* Control Register */ | 253 | #define MII_REG_CR 0 /* Control Register */ |
254 | #define MII_REG_SR 1 /* Status Register */ | 254 | #define MII_REG_SR 1 /* Status Register */ |
255 | #define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */ | 255 | #define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */ |
256 | #define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */ | 256 | #define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */ |
257 | #define MII_REG_ANAR 4 /* A-N Advertisement Register */ | 257 | #define MII_REG_ANAR 4 /* A-N Advertisement Register */ |
258 | #define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */ | 258 | #define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */ |
259 | #define MII_REG_ANER 6 /* A-N Expansion Register */ | 259 | #define MII_REG_ANER 6 /* A-N Expansion Register */ |
260 | #define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */ | 260 | #define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */ |
261 | #define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */ | 261 | #define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */ |
262 | 262 | ||
263 | /* values for phy_status */ | 263 | /* values for phy_status */ |
264 | 264 | ||
265 | #define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ | 265 | #define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ |
266 | #define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ | 266 | #define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ |
267 | #define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ | 267 | #define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ |
268 | #define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ | 268 | #define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ |
269 | #define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ | 269 | #define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ |
270 | #define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ | 270 | #define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ |
271 | #define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ | 271 | #define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ |
272 | 272 | ||
273 | #define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ | 273 | #define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ |
274 | #define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ | 274 | #define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ |
275 | #define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ | 275 | #define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ |
276 | #define PHY_STAT_SPMASK 0xf000 /* mask for speed */ | 276 | #define PHY_STAT_SPMASK 0xf000 /* mask for speed */ |
277 | #define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ | 277 | #define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ |
278 | #define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ | 278 | #define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ |
279 | #define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ | 279 | #define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ |
280 | #define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ | 280 | #define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ |
281 | 281 | ||
282 | 282 | ||
283 | static int | 283 | static int |
284 | fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | 284 | fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) |
285 | { | 285 | { |
286 | struct fec_enet_private *fep = netdev_priv(dev); | 286 | struct fec_enet_private *fep = netdev_priv(dev); |
287 | struct bufdesc *bdp; | 287 | struct bufdesc *bdp; |
288 | void *bufaddr; | 288 | void *bufaddr; |
289 | unsigned short status; | 289 | unsigned short status; |
290 | unsigned long flags; | 290 | unsigned long flags; |
291 | 291 | ||
292 | if (!fep->link) { | 292 | if (!fep->link) { |
293 | /* Link is down or autonegotiation is in progress. */ | 293 | /* Link is down or autonegotiation is in progress. */ |
294 | return NETDEV_TX_BUSY; | 294 | return NETDEV_TX_BUSY; |
295 | } | 295 | } |
296 | 296 | ||
297 | spin_lock_irqsave(&fep->hw_lock, flags); | 297 | spin_lock_irqsave(&fep->hw_lock, flags); |
298 | /* Fill in a Tx ring entry */ | 298 | /* Fill in a Tx ring entry */ |
299 | bdp = fep->cur_tx; | 299 | bdp = fep->cur_tx; |
300 | 300 | ||
301 | status = bdp->cbd_sc; | 301 | status = bdp->cbd_sc; |
302 | 302 | ||
303 | if (status & BD_ENET_TX_READY) { | 303 | if (status & BD_ENET_TX_READY) { |
304 | /* Ooops. All transmit buffers are full. Bail out. | 304 | /* Ooops. All transmit buffers are full. Bail out. |
305 | * This should not happen, since dev->tbusy should be set. | 305 | * This should not happen, since dev->tbusy should be set. |
306 | */ | 306 | */ |
307 | printk("%s: tx queue full!.\n", dev->name); | 307 | printk("%s: tx queue full!.\n", dev->name); |
308 | spin_unlock_irqrestore(&fep->hw_lock, flags); | 308 | spin_unlock_irqrestore(&fep->hw_lock, flags); |
309 | return NETDEV_TX_BUSY; | 309 | return NETDEV_TX_BUSY; |
310 | } | 310 | } |
311 | 311 | ||
312 | /* Clear all of the status flags */ | 312 | /* Clear all of the status flags */ |
313 | status &= ~BD_ENET_TX_STATS; | 313 | status &= ~BD_ENET_TX_STATS; |
314 | 314 | ||
315 | /* Set buffer length and buffer pointer */ | 315 | /* Set buffer length and buffer pointer */ |
316 | bufaddr = skb->data; | 316 | bufaddr = skb->data; |
317 | bdp->cbd_datlen = skb->len; | 317 | bdp->cbd_datlen = skb->len; |
318 | 318 | ||
319 | /* | 319 | /* |
320 | * On some FEC implementations data must be aligned on | 320 | * On some FEC implementations data must be aligned on |
321 | * 4-byte boundaries. Use bounce buffers to copy data | 321 | * 4-byte boundaries. Use bounce buffers to copy data |
322 | * and get it aligned. Ugh. | 322 | * and get it aligned. Ugh. |
323 | */ | 323 | */ |
324 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { | 324 | if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { |
325 | unsigned int index; | 325 | unsigned int index; |
326 | index = bdp - fep->tx_bd_base; | 326 | index = bdp - fep->tx_bd_base; |
327 | memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); | 327 | memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); |
328 | bufaddr = fep->tx_bounce[index]; | 328 | bufaddr = fep->tx_bounce[index]; |
329 | } | 329 | } |
330 | 330 | ||
331 | /* Save skb pointer */ | 331 | /* Save skb pointer */ |
332 | fep->tx_skbuff[fep->skb_cur] = skb; | 332 | fep->tx_skbuff[fep->skb_cur] = skb; |
333 | 333 | ||
334 | dev->stats.tx_bytes += skb->len; | 334 | dev->stats.tx_bytes += skb->len; |
335 | fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; | 335 | fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; |
336 | 336 | ||
337 | /* Push the data cache so the CPM does not get stale memory | 337 | /* Push the data cache so the CPM does not get stale memory |
338 | * data. | 338 | * data. |
339 | */ | 339 | */ |
340 | bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr, | 340 | bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr, |
341 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); | 341 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); |
342 | 342 | ||
343 | /* Send it on its way. Tell FEC it's ready, interrupt when done, | 343 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
344 | * it's the last BD of the frame, and to put the CRC on the end. | 344 | * it's the last BD of the frame, and to put the CRC on the end. |
345 | */ | 345 | */ |
346 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | 346 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR |
347 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); | 347 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); |
348 | bdp->cbd_sc = status; | 348 | bdp->cbd_sc = status; |
349 | 349 | ||
350 | dev->trans_start = jiffies; | 350 | dev->trans_start = jiffies; |
351 | 351 | ||
352 | /* Trigger transmission start */ | 352 | /* Trigger transmission start */ |
353 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | 353 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); |
354 | 354 | ||
355 | /* If this was the last BD in the ring, start at the beginning again. */ | 355 | /* If this was the last BD in the ring, start at the beginning again. */ |
356 | if (status & BD_ENET_TX_WRAP) | 356 | if (status & BD_ENET_TX_WRAP) |
357 | bdp = fep->tx_bd_base; | 357 | bdp = fep->tx_bd_base; |
358 | else | 358 | else |
359 | bdp++; | 359 | bdp++; |
360 | 360 | ||
361 | if (bdp == fep->dirty_tx) { | 361 | if (bdp == fep->dirty_tx) { |
362 | fep->tx_full = 1; | 362 | fep->tx_full = 1; |
363 | netif_stop_queue(dev); | 363 | netif_stop_queue(dev); |
364 | } | 364 | } |
365 | 365 | ||
366 | fep->cur_tx = bdp; | 366 | fep->cur_tx = bdp; |
367 | 367 | ||
368 | spin_unlock_irqrestore(&fep->hw_lock, flags); | 368 | spin_unlock_irqrestore(&fep->hw_lock, flags); |
369 | 369 | ||
370 | return NETDEV_TX_OK; | 370 | return NETDEV_TX_OK; |
371 | } | 371 | } |
372 | 372 | ||
373 | static void | 373 | static void |
374 | fec_timeout(struct net_device *dev) | 374 | fec_timeout(struct net_device *dev) |
375 | { | 375 | { |
376 | struct fec_enet_private *fep = netdev_priv(dev); | 376 | struct fec_enet_private *fep = netdev_priv(dev); |
377 | 377 | ||
378 | dev->stats.tx_errors++; | 378 | dev->stats.tx_errors++; |
379 | 379 | ||
380 | fec_restart(dev, fep->full_duplex); | 380 | fec_restart(dev, fep->full_duplex); |
381 | netif_wake_queue(dev); | 381 | netif_wake_queue(dev); |
382 | } | 382 | } |
383 | 383 | ||
384 | static irqreturn_t | 384 | static irqreturn_t |
385 | fec_enet_interrupt(int irq, void * dev_id) | 385 | fec_enet_interrupt(int irq, void * dev_id) |
386 | { | 386 | { |
387 | struct net_device *dev = dev_id; | 387 | struct net_device *dev = dev_id; |
388 | struct fec_enet_private *fep = netdev_priv(dev); | 388 | struct fec_enet_private *fep = netdev_priv(dev); |
389 | uint int_events; | 389 | uint int_events; |
390 | irqreturn_t ret = IRQ_NONE; | 390 | irqreturn_t ret = IRQ_NONE; |
391 | 391 | ||
392 | do { | 392 | do { |
393 | int_events = readl(fep->hwp + FEC_IEVENT); | 393 | int_events = readl(fep->hwp + FEC_IEVENT); |
394 | writel(int_events, fep->hwp + FEC_IEVENT); | 394 | writel(int_events, fep->hwp + FEC_IEVENT); |
395 | 395 | ||
396 | if (int_events & FEC_ENET_RXF) { | 396 | if (int_events & FEC_ENET_RXF) { |
397 | ret = IRQ_HANDLED; | 397 | ret = IRQ_HANDLED; |
398 | fec_enet_rx(dev); | 398 | fec_enet_rx(dev); |
399 | } | 399 | } |
400 | 400 | ||
401 | /* Transmit OK, or non-fatal error. Update the buffer | 401 | /* Transmit OK, or non-fatal error. Update the buffer |
402 | * descriptors. FEC handles all errors, we just discover | 402 | * descriptors. FEC handles all errors, we just discover |
403 | * them as part of the transmit process. | 403 | * them as part of the transmit process. |
404 | */ | 404 | */ |
405 | if (int_events & FEC_ENET_TXF) { | 405 | if (int_events & FEC_ENET_TXF) { |
406 | ret = IRQ_HANDLED; | 406 | ret = IRQ_HANDLED; |
407 | fec_enet_tx(dev); | 407 | fec_enet_tx(dev); |
408 | } | 408 | } |
409 | 409 | ||
410 | if (int_events & FEC_ENET_MII) { | 410 | if (int_events & FEC_ENET_MII) { |
411 | ret = IRQ_HANDLED; | 411 | ret = IRQ_HANDLED; |
412 | fec_enet_mii(dev); | 412 | fec_enet_mii(dev); |
413 | } | 413 | } |
414 | 414 | ||
415 | } while (int_events); | 415 | } while (int_events); |
416 | 416 | ||
417 | return ret; | 417 | return ret; |
418 | } | 418 | } |
419 | 419 | ||
420 | 420 | ||
421 | static void | 421 | static void |
422 | fec_enet_tx(struct net_device *dev) | 422 | fec_enet_tx(struct net_device *dev) |
423 | { | 423 | { |
424 | struct fec_enet_private *fep; | 424 | struct fec_enet_private *fep; |
425 | struct bufdesc *bdp; | 425 | struct bufdesc *bdp; |
426 | unsigned short status; | 426 | unsigned short status; |
427 | struct sk_buff *skb; | 427 | struct sk_buff *skb; |
428 | 428 | ||
429 | fep = netdev_priv(dev); | 429 | fep = netdev_priv(dev); |
430 | spin_lock(&fep->hw_lock); | 430 | spin_lock(&fep->hw_lock); |
431 | bdp = fep->dirty_tx; | 431 | bdp = fep->dirty_tx; |
432 | 432 | ||
433 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { | 433 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { |
434 | if (bdp == fep->cur_tx && fep->tx_full == 0) | 434 | if (bdp == fep->cur_tx && fep->tx_full == 0) |
435 | break; | 435 | break; |
436 | 436 | ||
437 | dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); | 437 | dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); |
438 | bdp->cbd_bufaddr = 0; | 438 | bdp->cbd_bufaddr = 0; |
439 | 439 | ||
440 | skb = fep->tx_skbuff[fep->skb_dirty]; | 440 | skb = fep->tx_skbuff[fep->skb_dirty]; |
441 | /* Check for errors. */ | 441 | /* Check for errors. */ |
442 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | | 442 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
443 | BD_ENET_TX_RL | BD_ENET_TX_UN | | 443 | BD_ENET_TX_RL | BD_ENET_TX_UN | |
444 | BD_ENET_TX_CSL)) { | 444 | BD_ENET_TX_CSL)) { |
445 | dev->stats.tx_errors++; | 445 | dev->stats.tx_errors++; |
446 | if (status & BD_ENET_TX_HB) /* No heartbeat */ | 446 | if (status & BD_ENET_TX_HB) /* No heartbeat */ |
447 | dev->stats.tx_heartbeat_errors++; | 447 | dev->stats.tx_heartbeat_errors++; |
448 | if (status & BD_ENET_TX_LC) /* Late collision */ | 448 | if (status & BD_ENET_TX_LC) /* Late collision */ |
449 | dev->stats.tx_window_errors++; | 449 | dev->stats.tx_window_errors++; |
450 | if (status & BD_ENET_TX_RL) /* Retrans limit */ | 450 | if (status & BD_ENET_TX_RL) /* Retrans limit */ |
451 | dev->stats.tx_aborted_errors++; | 451 | dev->stats.tx_aborted_errors++; |
452 | if (status & BD_ENET_TX_UN) /* Underrun */ | 452 | if (status & BD_ENET_TX_UN) /* Underrun */ |
453 | dev->stats.tx_fifo_errors++; | 453 | dev->stats.tx_fifo_errors++; |
454 | if (status & BD_ENET_TX_CSL) /* Carrier lost */ | 454 | if (status & BD_ENET_TX_CSL) /* Carrier lost */ |
455 | dev->stats.tx_carrier_errors++; | 455 | dev->stats.tx_carrier_errors++; |
456 | } else { | 456 | } else { |
457 | dev->stats.tx_packets++; | 457 | dev->stats.tx_packets++; |
458 | } | 458 | } |
459 | 459 | ||
460 | if (status & BD_ENET_TX_READY) | 460 | if (status & BD_ENET_TX_READY) |
461 | printk("HEY! Enet xmit interrupt and TX_READY.\n"); | 461 | printk("HEY! Enet xmit interrupt and TX_READY.\n"); |
462 | 462 | ||
463 | /* Deferred means some collisions occurred during transmit, | 463 | /* Deferred means some collisions occurred during transmit, |
464 | * but we eventually sent the packet OK. | 464 | * but we eventually sent the packet OK. |
465 | */ | 465 | */ |
466 | if (status & BD_ENET_TX_DEF) | 466 | if (status & BD_ENET_TX_DEF) |
467 | dev->stats.collisions++; | 467 | dev->stats.collisions++; |
468 | 468 | ||
469 | /* Free the sk buffer associated with this last transmit */ | 469 | /* Free the sk buffer associated with this last transmit */ |
470 | dev_kfree_skb_any(skb); | 470 | dev_kfree_skb_any(skb); |
471 | fep->tx_skbuff[fep->skb_dirty] = NULL; | 471 | fep->tx_skbuff[fep->skb_dirty] = NULL; |
472 | fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; | 472 | fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; |
473 | 473 | ||
474 | /* Update pointer to next buffer descriptor to be transmitted */ | 474 | /* Update pointer to next buffer descriptor to be transmitted */ |
475 | if (status & BD_ENET_TX_WRAP) | 475 | if (status & BD_ENET_TX_WRAP) |
476 | bdp = fep->tx_bd_base; | 476 | bdp = fep->tx_bd_base; |
477 | else | 477 | else |
478 | bdp++; | 478 | bdp++; |
479 | 479 | ||
480 | /* Since we have freed up a buffer, the ring is no longer full | 480 | /* Since we have freed up a buffer, the ring is no longer full |
481 | */ | 481 | */ |
482 | if (fep->tx_full) { | 482 | if (fep->tx_full) { |
483 | fep->tx_full = 0; | 483 | fep->tx_full = 0; |
484 | if (netif_queue_stopped(dev)) | 484 | if (netif_queue_stopped(dev)) |
485 | netif_wake_queue(dev); | 485 | netif_wake_queue(dev); |
486 | } | 486 | } |
487 | } | 487 | } |
488 | fep->dirty_tx = bdp; | 488 | fep->dirty_tx = bdp; |
489 | spin_unlock(&fep->hw_lock); | 489 | spin_unlock(&fep->hw_lock); |
490 | } | 490 | } |
491 | 491 | ||
492 | 492 | ||
493 | /* During a receive, the cur_rx points to the current incoming buffer. | 493 | /* During a receive, the cur_rx points to the current incoming buffer. |
494 | * When we update through the ring, if the next incoming buffer has | 494 | * When we update through the ring, if the next incoming buffer has |
495 | * not been given to the system, we just set the empty indicator, | 495 | * not been given to the system, we just set the empty indicator, |
496 | * effectively tossing the packet. | 496 | * effectively tossing the packet. |
497 | */ | 497 | */ |
498 | static void | 498 | static void |
499 | fec_enet_rx(struct net_device *dev) | 499 | fec_enet_rx(struct net_device *dev) |
500 | { | 500 | { |
501 | struct fec_enet_private *fep = netdev_priv(dev); | 501 | struct fec_enet_private *fep = netdev_priv(dev); |
502 | struct bufdesc *bdp; | 502 | struct bufdesc *bdp; |
503 | unsigned short status; | 503 | unsigned short status; |
504 | struct sk_buff *skb; | 504 | struct sk_buff *skb; |
505 | ushort pkt_len; | 505 | ushort pkt_len; |
506 | __u8 *data; | 506 | __u8 *data; |
507 | 507 | ||
508 | #ifdef CONFIG_M532x | 508 | #ifdef CONFIG_M532x |
509 | flush_cache_all(); | 509 | flush_cache_all(); |
510 | #endif | 510 | #endif |
511 | 511 | ||
512 | spin_lock(&fep->hw_lock); | 512 | spin_lock(&fep->hw_lock); |
513 | 513 | ||
514 | /* First, grab all of the stats for the incoming packet. | 514 | /* First, grab all of the stats for the incoming packet. |
515 | * These get messed up if we get called due to a busy condition. | 515 | * These get messed up if we get called due to a busy condition. |
516 | */ | 516 | */ |
517 | bdp = fep->cur_rx; | 517 | bdp = fep->cur_rx; |
518 | 518 | ||
519 | while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { | 519 | while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { |
520 | 520 | ||
521 | /* Since we have allocated space to hold a complete frame, | 521 | /* Since we have allocated space to hold a complete frame, |
522 | * the last indicator should be set. | 522 | * the last indicator should be set. |
523 | */ | 523 | */ |
524 | if ((status & BD_ENET_RX_LAST) == 0) | 524 | if ((status & BD_ENET_RX_LAST) == 0) |
525 | printk("FEC ENET: rcv is not +last\n"); | 525 | printk("FEC ENET: rcv is not +last\n"); |
526 | 526 | ||
527 | if (!fep->opened) | 527 | if (!fep->opened) |
528 | goto rx_processing_done; | 528 | goto rx_processing_done; |
529 | 529 | ||
530 | /* Check for errors. */ | 530 | /* Check for errors. */ |
531 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | | 531 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | |
532 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | 532 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { |
533 | dev->stats.rx_errors++; | 533 | dev->stats.rx_errors++; |
534 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { | 534 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { |
535 | /* Frame too long or too short. */ | 535 | /* Frame too long or too short. */ |
536 | dev->stats.rx_length_errors++; | 536 | dev->stats.rx_length_errors++; |
537 | } | 537 | } |
538 | if (status & BD_ENET_RX_NO) /* Frame alignment */ | 538 | if (status & BD_ENET_RX_NO) /* Frame alignment */ |
539 | dev->stats.rx_frame_errors++; | 539 | dev->stats.rx_frame_errors++; |
540 | if (status & BD_ENET_RX_CR) /* CRC Error */ | 540 | if (status & BD_ENET_RX_CR) /* CRC Error */ |
541 | dev->stats.rx_crc_errors++; | 541 | dev->stats.rx_crc_errors++; |
542 | if (status & BD_ENET_RX_OV) /* FIFO overrun */ | 542 | if (status & BD_ENET_RX_OV) /* FIFO overrun */ |
543 | dev->stats.rx_fifo_errors++; | 543 | dev->stats.rx_fifo_errors++; |
544 | } | 544 | } |
545 | 545 | ||
546 | /* Report late collisions as a frame error. | 546 | /* Report late collisions as a frame error. |
547 | * On this error, the BD is closed, but we don't know what we | 547 | * On this error, the BD is closed, but we don't know what we |
548 | * have in the buffer. So, just drop this frame on the floor. | 548 | * have in the buffer. So, just drop this frame on the floor. |
549 | */ | 549 | */ |
550 | if (status & BD_ENET_RX_CL) { | 550 | if (status & BD_ENET_RX_CL) { |
551 | dev->stats.rx_errors++; | 551 | dev->stats.rx_errors++; |
552 | dev->stats.rx_frame_errors++; | 552 | dev->stats.rx_frame_errors++; |
553 | goto rx_processing_done; | 553 | goto rx_processing_done; |
554 | } | 554 | } |
555 | 555 | ||
556 | /* Process the incoming frame. */ | 556 | /* Process the incoming frame. */ |
557 | dev->stats.rx_packets++; | 557 | dev->stats.rx_packets++; |
558 | pkt_len = bdp->cbd_datlen; | 558 | pkt_len = bdp->cbd_datlen; |
559 | dev->stats.rx_bytes += pkt_len; | 559 | dev->stats.rx_bytes += pkt_len; |
560 | data = (__u8*)__va(bdp->cbd_bufaddr); | 560 | data = (__u8*)__va(bdp->cbd_bufaddr); |
561 | 561 | ||
562 | dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, | 562 | dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, |
563 | DMA_FROM_DEVICE); | 563 | DMA_FROM_DEVICE); |
564 | 564 | ||
565 | /* This does 16 byte alignment, exactly what we need. | 565 | /* This does 16 byte alignment, exactly what we need. |
566 | * The packet length includes FCS, but we don't want to | 566 | * The packet length includes FCS, but we don't want to |
567 | * include that when passing upstream as it messes up | 567 | * include that when passing upstream as it messes up |
568 | * bridging applications. | 568 | * bridging applications. |
569 | */ | 569 | */ |
570 | skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN); | 570 | skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN); |
571 | 571 | ||
572 | if (unlikely(!skb)) { | 572 | if (unlikely(!skb)) { |
573 | printk("%s: Memory squeeze, dropping packet.\n", | 573 | printk("%s: Memory squeeze, dropping packet.\n", |
574 | dev->name); | 574 | dev->name); |
575 | dev->stats.rx_dropped++; | 575 | dev->stats.rx_dropped++; |
576 | } else { | 576 | } else { |
577 | skb_reserve(skb, NET_IP_ALIGN); | 577 | skb_reserve(skb, NET_IP_ALIGN); |
578 | skb_put(skb, pkt_len - 4); /* Make room */ | 578 | skb_put(skb, pkt_len - 4); /* Make room */ |
579 | skb_copy_to_linear_data(skb, data, pkt_len - 4); | 579 | skb_copy_to_linear_data(skb, data, pkt_len - 4); |
580 | skb->protocol = eth_type_trans(skb, dev); | 580 | skb->protocol = eth_type_trans(skb, dev); |
581 | netif_rx(skb); | 581 | netif_rx(skb); |
582 | } | 582 | } |
583 | 583 | ||
584 | bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen, | 584 | bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen, |
585 | DMA_FROM_DEVICE); | 585 | DMA_FROM_DEVICE); |
586 | rx_processing_done: | 586 | rx_processing_done: |
587 | /* Clear the status flags for this buffer */ | 587 | /* Clear the status flags for this buffer */ |
588 | status &= ~BD_ENET_RX_STATS; | 588 | status &= ~BD_ENET_RX_STATS; |
589 | 589 | ||
590 | /* Mark the buffer empty */ | 590 | /* Mark the buffer empty */ |
591 | status |= BD_ENET_RX_EMPTY; | 591 | status |= BD_ENET_RX_EMPTY; |
592 | bdp->cbd_sc = status; | 592 | bdp->cbd_sc = status; |
593 | 593 | ||
594 | /* Update BD pointer to next entry */ | 594 | /* Update BD pointer to next entry */ |
595 | if (status & BD_ENET_RX_WRAP) | 595 | if (status & BD_ENET_RX_WRAP) |
596 | bdp = fep->rx_bd_base; | 596 | bdp = fep->rx_bd_base; |
597 | else | 597 | else |
598 | bdp++; | 598 | bdp++; |
599 | /* Doing this here will keep the FEC running while we process | 599 | /* Doing this here will keep the FEC running while we process |
600 | * incoming frames. On a heavily loaded network, we should be | 600 | * incoming frames. On a heavily loaded network, we should be |
601 | * able to keep up at the expense of system resources. | 601 | * able to keep up at the expense of system resources. |
602 | */ | 602 | */ |
603 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); | 603 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); |
604 | } | 604 | } |
605 | fep->cur_rx = bdp; | 605 | fep->cur_rx = bdp; |
606 | 606 | ||
607 | spin_unlock(&fep->hw_lock); | 607 | spin_unlock(&fep->hw_lock); |
608 | } | 608 | } |
609 | 609 | ||
610 | /* called from interrupt context */ | 610 | /* called from interrupt context */ |
611 | static void | 611 | static void |
612 | fec_enet_mii(struct net_device *dev) | 612 | fec_enet_mii(struct net_device *dev) |
613 | { | 613 | { |
614 | struct fec_enet_private *fep; | 614 | struct fec_enet_private *fep; |
615 | mii_list_t *mip; | 615 | mii_list_t *mip; |
616 | 616 | ||
617 | fep = netdev_priv(dev); | 617 | fep = netdev_priv(dev); |
618 | spin_lock(&fep->mii_lock); | 618 | spin_lock(&fep->mii_lock); |
619 | 619 | ||
620 | if ((mip = mii_head) == NULL) { | 620 | if ((mip = mii_head) == NULL) { |
621 | printk("MII and no head!\n"); | 621 | printk("MII and no head!\n"); |
622 | goto unlock; | 622 | goto unlock; |
623 | } | 623 | } |
624 | 624 | ||
625 | if (mip->mii_func != NULL) | 625 | if (mip->mii_func != NULL) |
626 | (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev); | 626 | (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev); |
627 | 627 | ||
628 | mii_head = mip->mii_next; | 628 | mii_head = mip->mii_next; |
629 | mip->mii_next = mii_free; | 629 | mip->mii_next = mii_free; |
630 | mii_free = mip; | 630 | mii_free = mip; |
631 | 631 | ||
632 | if ((mip = mii_head) != NULL) | 632 | if ((mip = mii_head) != NULL) |
633 | writel(mip->mii_regval, fep->hwp + FEC_MII_DATA); | 633 | writel(mip->mii_regval, fep->hwp + FEC_MII_DATA); |
634 | 634 | ||
635 | unlock: | 635 | unlock: |
636 | spin_unlock(&fep->mii_lock); | 636 | spin_unlock(&fep->mii_lock); |
637 | } | 637 | } |
638 | 638 | ||
639 | static int | 639 | static int |
640 | mii_queue_unlocked(struct net_device *dev, int regval, | 640 | mii_queue_unlocked(struct net_device *dev, int regval, |
641 | void (*func)(uint, struct net_device *)) | 641 | void (*func)(uint, struct net_device *)) |
642 | { | 642 | { |
643 | struct fec_enet_private *fep; | 643 | struct fec_enet_private *fep; |
644 | mii_list_t *mip; | 644 | mii_list_t *mip; |
645 | int retval; | 645 | int retval; |
646 | 646 | ||
647 | /* Add PHY address to register command */ | 647 | /* Add PHY address to register command */ |
648 | fep = netdev_priv(dev); | 648 | fep = netdev_priv(dev); |
649 | 649 | ||
650 | regval |= fep->phy_addr << 23; | 650 | regval |= fep->phy_addr << 23; |
651 | retval = 0; | 651 | retval = 0; |
652 | 652 | ||
653 | if ((mip = mii_free) != NULL) { | 653 | if ((mip = mii_free) != NULL) { |
654 | mii_free = mip->mii_next; | 654 | mii_free = mip->mii_next; |
655 | mip->mii_regval = regval; | 655 | mip->mii_regval = regval; |
656 | mip->mii_func = func; | 656 | mip->mii_func = func; |
657 | mip->mii_next = NULL; | 657 | mip->mii_next = NULL; |
658 | if (mii_head) { | 658 | if (mii_head) { |
659 | mii_tail->mii_next = mip; | 659 | mii_tail->mii_next = mip; |
660 | mii_tail = mip; | 660 | mii_tail = mip; |
661 | } else { | 661 | } else { |
662 | mii_head = mii_tail = mip; | 662 | mii_head = mii_tail = mip; |
663 | writel(regval, fep->hwp + FEC_MII_DATA); | 663 | writel(regval, fep->hwp + FEC_MII_DATA); |
664 | } | 664 | } |
665 | } else { | 665 | } else { |
666 | retval = 1; | 666 | retval = 1; |
667 | } | 667 | } |
668 | 668 | ||
669 | return retval; | 669 | return retval; |
670 | } | 670 | } |
671 | 671 | ||
672 | static int | 672 | static int |
673 | mii_queue(struct net_device *dev, int regval, | 673 | mii_queue(struct net_device *dev, int regval, |
674 | void (*func)(uint, struct net_device *)) | 674 | void (*func)(uint, struct net_device *)) |
675 | { | 675 | { |
676 | struct fec_enet_private *fep; | 676 | struct fec_enet_private *fep; |
677 | unsigned long flags; | 677 | unsigned long flags; |
678 | int retval; | 678 | int retval; |
679 | fep = netdev_priv(dev); | 679 | fep = netdev_priv(dev); |
680 | spin_lock_irqsave(&fep->mii_lock, flags); | 680 | spin_lock_irqsave(&fep->mii_lock, flags); |
681 | retval = mii_queue_unlocked(dev, regval, func); | 681 | retval = mii_queue_unlocked(dev, regval, func); |
682 | spin_unlock_irqrestore(&fep->mii_lock, flags); | 682 | spin_unlock_irqrestore(&fep->mii_lock, flags); |
683 | return retval; | 683 | return retval; |
684 | } | 684 | } |
685 | 685 | ||
686 | static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) | 686 | static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) |
687 | { | 687 | { |
688 | if(!c) | 688 | if(!c) |
689 | return; | 689 | return; |
690 | 690 | ||
691 | for (; c->mii_data != mk_mii_end; c++) | 691 | for (; c->mii_data != mk_mii_end; c++) |
692 | mii_queue(dev, c->mii_data, c->funct); | 692 | mii_queue(dev, c->mii_data, c->funct); |
693 | } | 693 | } |
694 | 694 | ||
695 | static void mii_parse_sr(uint mii_reg, struct net_device *dev) | 695 | static void mii_parse_sr(uint mii_reg, struct net_device *dev) |
696 | { | 696 | { |
697 | struct fec_enet_private *fep = netdev_priv(dev); | 697 | struct fec_enet_private *fep = netdev_priv(dev); |
698 | volatile uint *s = &(fep->phy_status); | 698 | volatile uint *s = &(fep->phy_status); |
699 | uint status; | 699 | uint status; |
700 | 700 | ||
701 | status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); | 701 | status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); |
702 | 702 | ||
703 | if (mii_reg & 0x0004) | 703 | if (mii_reg & 0x0004) |
704 | status |= PHY_STAT_LINK; | 704 | status |= PHY_STAT_LINK; |
705 | if (mii_reg & 0x0010) | 705 | if (mii_reg & 0x0010) |
706 | status |= PHY_STAT_FAULT; | 706 | status |= PHY_STAT_FAULT; |
707 | if (mii_reg & 0x0020) | 707 | if (mii_reg & 0x0020) |
708 | status |= PHY_STAT_ANC; | 708 | status |= PHY_STAT_ANC; |
709 | *s = status; | 709 | *s = status; |
710 | } | 710 | } |
711 | 711 | ||
712 | static void mii_parse_cr(uint mii_reg, struct net_device *dev) | 712 | static void mii_parse_cr(uint mii_reg, struct net_device *dev) |
713 | { | 713 | { |
714 | struct fec_enet_private *fep = netdev_priv(dev); | 714 | struct fec_enet_private *fep = netdev_priv(dev); |
715 | volatile uint *s = &(fep->phy_status); | 715 | volatile uint *s = &(fep->phy_status); |
716 | uint status; | 716 | uint status; |
717 | 717 | ||
718 | status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP); | 718 | status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP); |
719 | 719 | ||
720 | if (mii_reg & 0x1000) | 720 | if (mii_reg & 0x1000) |
721 | status |= PHY_CONF_ANE; | 721 | status |= PHY_CONF_ANE; |
722 | if (mii_reg & 0x4000) | 722 | if (mii_reg & 0x4000) |
723 | status |= PHY_CONF_LOOP; | 723 | status |= PHY_CONF_LOOP; |
724 | *s = status; | 724 | *s = status; |
725 | } | 725 | } |
726 | 726 | ||
727 | static void mii_parse_anar(uint mii_reg, struct net_device *dev) | 727 | static void mii_parse_anar(uint mii_reg, struct net_device *dev) |
728 | { | 728 | { |
729 | struct fec_enet_private *fep = netdev_priv(dev); | 729 | struct fec_enet_private *fep = netdev_priv(dev); |
730 | volatile uint *s = &(fep->phy_status); | 730 | volatile uint *s = &(fep->phy_status); |
731 | uint status; | 731 | uint status; |
732 | 732 | ||
733 | status = *s & ~(PHY_CONF_SPMASK); | 733 | status = *s & ~(PHY_CONF_SPMASK); |
734 | 734 | ||
735 | if (mii_reg & 0x0020) | 735 | if (mii_reg & 0x0020) |
736 | status |= PHY_CONF_10HDX; | 736 | status |= PHY_CONF_10HDX; |
737 | if (mii_reg & 0x0040) | 737 | if (mii_reg & 0x0040) |
738 | status |= PHY_CONF_10FDX; | 738 | status |= PHY_CONF_10FDX; |
739 | if (mii_reg & 0x0080) | 739 | if (mii_reg & 0x0080) |
740 | status |= PHY_CONF_100HDX; | 740 | status |= PHY_CONF_100HDX; |
741 | if (mii_reg & 0x00100) | 741 | if (mii_reg & 0x00100) |
742 | status |= PHY_CONF_100FDX; | 742 | status |= PHY_CONF_100FDX; |
743 | *s = status; | 743 | *s = status; |
744 | } | 744 | } |
745 | 745 | ||
746 | /* ------------------------------------------------------------------------- */ | 746 | /* ------------------------------------------------------------------------- */ |
747 | /* The Level one LXT970 is used by many boards */ | 747 | /* The Level one LXT970 is used by many boards */ |
748 | 748 | ||
749 | #define MII_LXT970_MIRROR 16 /* Mirror register */ | 749 | #define MII_LXT970_MIRROR 16 /* Mirror register */ |
750 | #define MII_LXT970_IER 17 /* Interrupt Enable Register */ | 750 | #define MII_LXT970_IER 17 /* Interrupt Enable Register */ |
751 | #define MII_LXT970_ISR 18 /* Interrupt Status Register */ | 751 | #define MII_LXT970_ISR 18 /* Interrupt Status Register */ |
752 | #define MII_LXT970_CONFIG 19 /* Configuration Register */ | 752 | #define MII_LXT970_CONFIG 19 /* Configuration Register */ |
753 | #define MII_LXT970_CSR 20 /* Chip Status Register */ | 753 | #define MII_LXT970_CSR 20 /* Chip Status Register */ |
754 | 754 | ||
755 | static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev) | 755 | static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev) |
756 | { | 756 | { |
757 | struct fec_enet_private *fep = netdev_priv(dev); | 757 | struct fec_enet_private *fep = netdev_priv(dev); |
758 | volatile uint *s = &(fep->phy_status); | 758 | volatile uint *s = &(fep->phy_status); |
759 | uint status; | 759 | uint status; |
760 | 760 | ||
761 | status = *s & ~(PHY_STAT_SPMASK); | 761 | status = *s & ~(PHY_STAT_SPMASK); |
762 | if (mii_reg & 0x0800) { | 762 | if (mii_reg & 0x0800) { |
763 | if (mii_reg & 0x1000) | 763 | if (mii_reg & 0x1000) |
764 | status |= PHY_STAT_100FDX; | 764 | status |= PHY_STAT_100FDX; |
765 | else | 765 | else |
766 | status |= PHY_STAT_100HDX; | 766 | status |= PHY_STAT_100HDX; |
767 | } else { | 767 | } else { |
768 | if (mii_reg & 0x1000) | 768 | if (mii_reg & 0x1000) |
769 | status |= PHY_STAT_10FDX; | 769 | status |= PHY_STAT_10FDX; |
770 | else | 770 | else |
771 | status |= PHY_STAT_10HDX; | 771 | status |= PHY_STAT_10HDX; |
772 | } | 772 | } |
773 | *s = status; | 773 | *s = status; |
774 | } | 774 | } |
775 | 775 | ||
776 | static phy_cmd_t const phy_cmd_lxt970_config[] = { | 776 | static phy_cmd_t const phy_cmd_lxt970_config[] = { |
777 | { mk_mii_read(MII_REG_CR), mii_parse_cr }, | 777 | { mk_mii_read(MII_REG_CR), mii_parse_cr }, |
778 | { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, | 778 | { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, |
779 | { mk_mii_end, } | 779 | { mk_mii_end, } |
780 | }; | 780 | }; |
781 | static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */ | 781 | static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */ |
782 | { mk_mii_write(MII_LXT970_IER, 0x0002), NULL }, | 782 | { mk_mii_write(MII_LXT970_IER, 0x0002), NULL }, |
783 | { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ | 783 | { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ |
784 | { mk_mii_end, } | 784 | { mk_mii_end, } |
785 | }; | 785 | }; |
786 | static phy_cmd_t const phy_cmd_lxt970_ack_int[] = { | 786 | static phy_cmd_t const phy_cmd_lxt970_ack_int[] = { |
787 | /* read SR and ISR to acknowledge */ | 787 | /* read SR and ISR to acknowledge */ |
788 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, | 788 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, |
789 | { mk_mii_read(MII_LXT970_ISR), NULL }, | 789 | { mk_mii_read(MII_LXT970_ISR), NULL }, |
790 | 790 | ||
791 | /* find out the current status */ | 791 | /* find out the current status */ |
792 | { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr }, | 792 | { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr }, |
793 | { mk_mii_end, } | 793 | { mk_mii_end, } |
794 | }; | 794 | }; |
795 | static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */ | 795 | static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */ |
796 | { mk_mii_write(MII_LXT970_IER, 0x0000), NULL }, | 796 | { mk_mii_write(MII_LXT970_IER, 0x0000), NULL }, |
797 | { mk_mii_end, } | 797 | { mk_mii_end, } |
798 | }; | 798 | }; |
799 | static phy_info_t const phy_info_lxt970 = { | 799 | static phy_info_t const phy_info_lxt970 = { |
800 | .id = 0x07810000, | 800 | .id = 0x07810000, |
801 | .name = "LXT970", | 801 | .name = "LXT970", |
802 | .config = phy_cmd_lxt970_config, | 802 | .config = phy_cmd_lxt970_config, |
803 | .startup = phy_cmd_lxt970_startup, | 803 | .startup = phy_cmd_lxt970_startup, |
804 | .ack_int = phy_cmd_lxt970_ack_int, | 804 | .ack_int = phy_cmd_lxt970_ack_int, |
805 | .shutdown = phy_cmd_lxt970_shutdown | 805 | .shutdown = phy_cmd_lxt970_shutdown |
806 | }; | 806 | }; |
807 | 807 | ||
808 | /* ------------------------------------------------------------------------- */ | 808 | /* ------------------------------------------------------------------------- */ |
809 | /* The Level one LXT971 is used on some of my custom boards */ | 809 | /* The Level one LXT971 is used on some of my custom boards */ |
810 | 810 | ||
811 | /* register definitions for the 971 */ | 811 | /* register definitions for the 971 */ |
812 | 812 | ||
813 | #define MII_LXT971_PCR 16 /* Port Control Register */ | 813 | #define MII_LXT971_PCR 16 /* Port Control Register */ |
814 | #define MII_LXT971_SR2 17 /* Status Register 2 */ | 814 | #define MII_LXT971_SR2 17 /* Status Register 2 */ |
815 | #define MII_LXT971_IER 18 /* Interrupt Enable Register */ | 815 | #define MII_LXT971_IER 18 /* Interrupt Enable Register */ |
816 | #define MII_LXT971_ISR 19 /* Interrupt Status Register */ | 816 | #define MII_LXT971_ISR 19 /* Interrupt Status Register */ |
817 | #define MII_LXT971_LCR 20 /* LED Control Register */ | 817 | #define MII_LXT971_LCR 20 /* LED Control Register */ |
818 | #define MII_LXT971_TCR 30 /* Transmit Control Register */ | 818 | #define MII_LXT971_TCR 30 /* Transmit Control Register */ |
819 | 819 | ||
820 | /* | 820 | /* |
821 | * I had some nice ideas of running the MDIO faster... | 821 | * I had some nice ideas of running the MDIO faster... |
822 | * The 971 should support 8MHz and I tried it, but things acted really | 822 | * The 971 should support 8MHz and I tried it, but things acted really |
823 | * weird, so 2.5 MHz ought to be enough for anyone... | 823 | * weird, so 2.5 MHz ought to be enough for anyone... |
824 | */ | 824 | */ |
825 | 825 | ||
826 | static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev) | 826 | static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev) |
827 | { | 827 | { |
828 | struct fec_enet_private *fep = netdev_priv(dev); | 828 | struct fec_enet_private *fep = netdev_priv(dev); |
829 | volatile uint *s = &(fep->phy_status); | 829 | volatile uint *s = &(fep->phy_status); |
830 | uint status; | 830 | uint status; |
831 | 831 | ||
832 | status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); | 832 | status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); |
833 | 833 | ||
834 | if (mii_reg & 0x0400) { | 834 | if (mii_reg & 0x0400) { |
835 | fep->link = 1; | 835 | fep->link = 1; |
836 | status |= PHY_STAT_LINK; | 836 | status |= PHY_STAT_LINK; |
837 | } else { | 837 | } else { |
838 | fep->link = 0; | 838 | fep->link = 0; |
839 | } | 839 | } |
840 | if (mii_reg & 0x0080) | 840 | if (mii_reg & 0x0080) |
841 | status |= PHY_STAT_ANC; | 841 | status |= PHY_STAT_ANC; |
842 | if (mii_reg & 0x4000) { | 842 | if (mii_reg & 0x4000) { |
843 | if (mii_reg & 0x0200) | 843 | if (mii_reg & 0x0200) |
844 | status |= PHY_STAT_100FDX; | 844 | status |= PHY_STAT_100FDX; |
845 | else | 845 | else |
846 | status |= PHY_STAT_100HDX; | 846 | status |= PHY_STAT_100HDX; |
847 | } else { | 847 | } else { |
848 | if (mii_reg & 0x0200) | 848 | if (mii_reg & 0x0200) |
849 | status |= PHY_STAT_10FDX; | 849 | status |= PHY_STAT_10FDX; |
850 | else | 850 | else |
851 | status |= PHY_STAT_10HDX; | 851 | status |= PHY_STAT_10HDX; |
852 | } | 852 | } |
853 | if (mii_reg & 0x0008) | 853 | if (mii_reg & 0x0008) |
854 | status |= PHY_STAT_FAULT; | 854 | status |= PHY_STAT_FAULT; |
855 | 855 | ||
856 | *s = status; | 856 | *s = status; |
857 | } | 857 | } |
858 | 858 | ||
859 | static phy_cmd_t const phy_cmd_lxt971_config[] = { | 859 | static phy_cmd_t const phy_cmd_lxt971_config[] = { |
860 | /* limit to 10MBit because my prototype board | 860 | /* limit to 10MBit because my prototype board |
861 | * doesn't work with 100. */ | 861 | * doesn't work with 100. */ |
862 | { mk_mii_read(MII_REG_CR), mii_parse_cr }, | 862 | { mk_mii_read(MII_REG_CR), mii_parse_cr }, |
863 | { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, | 863 | { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, |
864 | { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, | 864 | { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, |
865 | { mk_mii_end, } | 865 | { mk_mii_end, } |
866 | }; | 866 | }; |
867 | static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */ | 867 | static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */ |
868 | { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL }, | 868 | { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL }, |
869 | { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ | 869 | { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ |
870 | { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */ | 870 | { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */ |
871 | /* Somehow does the 971 tell me that the link is down | 871 | /* Somehow does the 971 tell me that the link is down |
872 | * the first read after power-up. | 872 | * the first read after power-up. |
873 | * read here to get a valid value in ack_int */ | 873 | * read here to get a valid value in ack_int */ |
874 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, | 874 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, |
875 | { mk_mii_end, } | 875 | { mk_mii_end, } |
876 | }; | 876 | }; |
877 | static phy_cmd_t const phy_cmd_lxt971_ack_int[] = { | 877 | static phy_cmd_t const phy_cmd_lxt971_ack_int[] = { |
878 | /* acknowledge the int before reading status ! */ | 878 | /* acknowledge the int before reading status ! */ |
879 | { mk_mii_read(MII_LXT971_ISR), NULL }, | 879 | { mk_mii_read(MII_LXT971_ISR), NULL }, |
880 | /* find out the current status */ | 880 | /* find out the current status */ |
881 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, | 881 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, |
882 | { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, | 882 | { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, |
883 | { mk_mii_end, } | 883 | { mk_mii_end, } |
884 | }; | 884 | }; |
885 | static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */ | 885 | static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */ |
886 | { mk_mii_write(MII_LXT971_IER, 0x0000), NULL }, | 886 | { mk_mii_write(MII_LXT971_IER, 0x0000), NULL }, |
887 | { mk_mii_end, } | 887 | { mk_mii_end, } |
888 | }; | 888 | }; |
889 | static phy_info_t const phy_info_lxt971 = { | 889 | static phy_info_t const phy_info_lxt971 = { |
890 | .id = 0x0001378e, | 890 | .id = 0x0001378e, |
891 | .name = "LXT971", | 891 | .name = "LXT971", |
892 | .config = phy_cmd_lxt971_config, | 892 | .config = phy_cmd_lxt971_config, |
893 | .startup = phy_cmd_lxt971_startup, | 893 | .startup = phy_cmd_lxt971_startup, |
894 | .ack_int = phy_cmd_lxt971_ack_int, | 894 | .ack_int = phy_cmd_lxt971_ack_int, |
895 | .shutdown = phy_cmd_lxt971_shutdown | 895 | .shutdown = phy_cmd_lxt971_shutdown |
896 | }; | 896 | }; |
897 | 897 | ||
898 | /* ------------------------------------------------------------------------- */ | 898 | /* ------------------------------------------------------------------------- */ |
899 | /* The Quality Semiconductor QS6612 is used on the RPX CLLF */ | 899 | /* The Quality Semiconductor QS6612 is used on the RPX CLLF */ |
900 | 900 | ||
901 | /* register definitions */ | 901 | /* register definitions */ |
902 | 902 | ||
903 | #define MII_QS6612_MCR 17 /* Mode Control Register */ | 903 | #define MII_QS6612_MCR 17 /* Mode Control Register */ |
904 | #define MII_QS6612_FTR 27 /* Factory Test Register */ | 904 | #define MII_QS6612_FTR 27 /* Factory Test Register */ |
905 | #define MII_QS6612_MCO 28 /* Misc. Control Register */ | 905 | #define MII_QS6612_MCO 28 /* Misc. Control Register */ |
906 | #define MII_QS6612_ISR 29 /* Interrupt Source Register */ | 906 | #define MII_QS6612_ISR 29 /* Interrupt Source Register */ |
907 | #define MII_QS6612_IMR 30 /* Interrupt Mask Register */ | 907 | #define MII_QS6612_IMR 30 /* Interrupt Mask Register */ |
908 | #define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */ | 908 | #define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */ |
909 | 909 | ||
910 | static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev) | 910 | static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev) |
911 | { | 911 | { |
912 | struct fec_enet_private *fep = netdev_priv(dev); | 912 | struct fec_enet_private *fep = netdev_priv(dev); |
913 | volatile uint *s = &(fep->phy_status); | 913 | volatile uint *s = &(fep->phy_status); |
914 | uint status; | 914 | uint status; |
915 | 915 | ||
916 | status = *s & ~(PHY_STAT_SPMASK); | 916 | status = *s & ~(PHY_STAT_SPMASK); |
917 | 917 | ||
918 | switch((mii_reg >> 2) & 7) { | 918 | switch((mii_reg >> 2) & 7) { |
919 | case 1: status |= PHY_STAT_10HDX; break; | 919 | case 1: status |= PHY_STAT_10HDX; break; |
920 | case 2: status |= PHY_STAT_100HDX; break; | 920 | case 2: status |= PHY_STAT_100HDX; break; |
921 | case 5: status |= PHY_STAT_10FDX; break; | 921 | case 5: status |= PHY_STAT_10FDX; break; |
922 | case 6: status |= PHY_STAT_100FDX; break; | 922 | case 6: status |= PHY_STAT_100FDX; break; |
923 | } | 923 | } |
924 | 924 | ||
925 | *s = status; | 925 | *s = status; |
926 | } | 926 | } |
927 | 927 | ||
928 | static phy_cmd_t const phy_cmd_qs6612_config[] = { | 928 | static phy_cmd_t const phy_cmd_qs6612_config[] = { |
929 | /* The PHY powers up isolated on the RPX, | 929 | /* The PHY powers up isolated on the RPX, |
930 | * so send a command to allow operation. | 930 | * so send a command to allow operation. |
931 | */ | 931 | */ |
932 | { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL }, | 932 | { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL }, |
933 | 933 | ||
934 | /* parse cr and anar to get some info */ | 934 | /* parse cr and anar to get some info */ |
935 | { mk_mii_read(MII_REG_CR), mii_parse_cr }, | 935 | { mk_mii_read(MII_REG_CR), mii_parse_cr }, |
936 | { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, | 936 | { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, |
937 | { mk_mii_end, } | 937 | { mk_mii_end, } |
938 | }; | 938 | }; |
939 | static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */ | 939 | static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */ |
940 | { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL }, | 940 | { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL }, |
941 | { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ | 941 | { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ |
942 | { mk_mii_end, } | 942 | { mk_mii_end, } |
943 | }; | 943 | }; |
944 | static phy_cmd_t const phy_cmd_qs6612_ack_int[] = { | 944 | static phy_cmd_t const phy_cmd_qs6612_ack_int[] = { |
945 | /* we need to read ISR, SR and ANER to acknowledge */ | 945 | /* we need to read ISR, SR and ANER to acknowledge */ |
946 | { mk_mii_read(MII_QS6612_ISR), NULL }, | 946 | { mk_mii_read(MII_QS6612_ISR), NULL }, |
947 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, | 947 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, |
948 | { mk_mii_read(MII_REG_ANER), NULL }, | 948 | { mk_mii_read(MII_REG_ANER), NULL }, |
949 | 949 | ||
950 | /* read pcr to get info */ | 950 | /* read pcr to get info */ |
951 | { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr }, | 951 | { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr }, |
952 | { mk_mii_end, } | 952 | { mk_mii_end, } |
953 | }; | 953 | }; |
954 | static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */ | 954 | static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */ |
955 | { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL }, | 955 | { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL }, |
956 | { mk_mii_end, } | 956 | { mk_mii_end, } |
957 | }; | 957 | }; |
958 | static phy_info_t const phy_info_qs6612 = { | 958 | static phy_info_t const phy_info_qs6612 = { |
959 | .id = 0x00181440, | 959 | .id = 0x00181440, |
960 | .name = "QS6612", | 960 | .name = "QS6612", |
961 | .config = phy_cmd_qs6612_config, | 961 | .config = phy_cmd_qs6612_config, |
962 | .startup = phy_cmd_qs6612_startup, | 962 | .startup = phy_cmd_qs6612_startup, |
963 | .ack_int = phy_cmd_qs6612_ack_int, | 963 | .ack_int = phy_cmd_qs6612_ack_int, |
964 | .shutdown = phy_cmd_qs6612_shutdown | 964 | .shutdown = phy_cmd_qs6612_shutdown |
965 | }; | 965 | }; |
966 | 966 | ||
967 | /* ------------------------------------------------------------------------- */ | 967 | /* ------------------------------------------------------------------------- */ |
968 | /* AMD AM79C874 phy */ | 968 | /* AMD AM79C874 phy */ |
969 | 969 | ||
970 | /* register definitions for the 874 */ | 970 | /* register definitions for the 874 */ |
971 | 971 | ||
972 | #define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */ | 972 | #define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */ |
973 | #define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */ | 973 | #define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */ |
974 | #define MII_AM79C874_DR 18 /* Diagnostic Register */ | 974 | #define MII_AM79C874_DR 18 /* Diagnostic Register */ |
975 | #define MII_AM79C874_PMLR 19 /* Power and Loopback Register */ | 975 | #define MII_AM79C874_PMLR 19 /* Power and Loopback Register */ |
976 | #define MII_AM79C874_MCR 21 /* ModeControl Register */ | 976 | #define MII_AM79C874_MCR 21 /* ModeControl Register */ |
977 | #define MII_AM79C874_DC 23 /* Disconnect Counter */ | 977 | #define MII_AM79C874_DC 23 /* Disconnect Counter */ |
978 | #define MII_AM79C874_REC 24 /* Recieve Error Counter */ | 978 | #define MII_AM79C874_REC 24 /* Recieve Error Counter */ |
979 | 979 | ||
980 | static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev) | 980 | static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev) |
981 | { | 981 | { |
982 | struct fec_enet_private *fep = netdev_priv(dev); | 982 | struct fec_enet_private *fep = netdev_priv(dev); |
983 | volatile uint *s = &(fep->phy_status); | 983 | volatile uint *s = &(fep->phy_status); |
984 | uint status; | 984 | uint status; |
985 | 985 | ||
986 | status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC); | 986 | status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC); |
987 | 987 | ||
988 | if (mii_reg & 0x0080) | 988 | if (mii_reg & 0x0080) |
989 | status |= PHY_STAT_ANC; | 989 | status |= PHY_STAT_ANC; |
990 | if (mii_reg & 0x0400) | 990 | if (mii_reg & 0x0400) |
991 | status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX); | 991 | status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX); |
992 | else | 992 | else |
993 | status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX); | 993 | status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX); |
994 | 994 | ||
995 | *s = status; | 995 | *s = status; |
996 | } | 996 | } |
997 | 997 | ||
998 | static phy_cmd_t const phy_cmd_am79c874_config[] = { | 998 | static phy_cmd_t const phy_cmd_am79c874_config[] = { |
999 | { mk_mii_read(MII_REG_CR), mii_parse_cr }, | 999 | { mk_mii_read(MII_REG_CR), mii_parse_cr }, |
1000 | { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, | 1000 | { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, |
1001 | { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, | 1001 | { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, |
1002 | { mk_mii_end, } | 1002 | { mk_mii_end, } |
1003 | }; | 1003 | }; |
1004 | static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */ | 1004 | static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */ |
1005 | { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL }, | 1005 | { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL }, |
1006 | { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ | 1006 | { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ |
1007 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, | 1007 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, |
1008 | { mk_mii_end, } | 1008 | { mk_mii_end, } |
1009 | }; | 1009 | }; |
1010 | static phy_cmd_t const phy_cmd_am79c874_ack_int[] = { | 1010 | static phy_cmd_t const phy_cmd_am79c874_ack_int[] = { |
1011 | /* find out the current status */ | 1011 | /* find out the current status */ |
1012 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, | 1012 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, |
1013 | { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, | 1013 | { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr }, |
1014 | /* we only need to read ISR to acknowledge */ | 1014 | /* we only need to read ISR to acknowledge */ |
1015 | { mk_mii_read(MII_AM79C874_ICSR), NULL }, | 1015 | { mk_mii_read(MII_AM79C874_ICSR), NULL }, |
1016 | { mk_mii_end, } | 1016 | { mk_mii_end, } |
1017 | }; | 1017 | }; |
1018 | static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */ | 1018 | static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */ |
1019 | { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL }, | 1019 | { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL }, |
1020 | { mk_mii_end, } | 1020 | { mk_mii_end, } |
1021 | }; | 1021 | }; |
1022 | static phy_info_t const phy_info_am79c874 = { | 1022 | static phy_info_t const phy_info_am79c874 = { |
1023 | .id = 0x00022561, | 1023 | .id = 0x00022561, |
1024 | .name = "AM79C874", | 1024 | .name = "AM79C874", |
1025 | .config = phy_cmd_am79c874_config, | 1025 | .config = phy_cmd_am79c874_config, |
1026 | .startup = phy_cmd_am79c874_startup, | 1026 | .startup = phy_cmd_am79c874_startup, |
1027 | .ack_int = phy_cmd_am79c874_ack_int, | 1027 | .ack_int = phy_cmd_am79c874_ack_int, |
1028 | .shutdown = phy_cmd_am79c874_shutdown | 1028 | .shutdown = phy_cmd_am79c874_shutdown |
1029 | }; | 1029 | }; |
1030 | 1030 | ||
1031 | 1031 | ||
1032 | /* ------------------------------------------------------------------------- */ | 1032 | /* ------------------------------------------------------------------------- */ |
1033 | /* Kendin KS8721BL phy */ | 1033 | /* Kendin KS8721BL phy */ |
1034 | 1034 | ||
1035 | /* register definitions for the 8721 */ | 1035 | /* register definitions for the 8721 */ |
1036 | 1036 | ||
1037 | #define MII_KS8721BL_RXERCR 21 | 1037 | #define MII_KS8721BL_RXERCR 21 |
1038 | #define MII_KS8721BL_ICSR 27 | 1038 | #define MII_KS8721BL_ICSR 27 |
1039 | #define MII_KS8721BL_PHYCR 31 | 1039 | #define MII_KS8721BL_PHYCR 31 |
1040 | 1040 | ||
1041 | static phy_cmd_t const phy_cmd_ks8721bl_config[] = { | 1041 | static phy_cmd_t const phy_cmd_ks8721bl_config[] = { |
1042 | { mk_mii_read(MII_REG_CR), mii_parse_cr }, | 1042 | { mk_mii_read(MII_REG_CR), mii_parse_cr }, |
1043 | { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, | 1043 | { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, |
1044 | { mk_mii_end, } | 1044 | { mk_mii_end, } |
1045 | }; | 1045 | }; |
1046 | static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */ | 1046 | static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */ |
1047 | { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL }, | 1047 | { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL }, |
1048 | { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ | 1048 | { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ |
1049 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, | 1049 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, |
1050 | { mk_mii_end, } | 1050 | { mk_mii_end, } |
1051 | }; | 1051 | }; |
1052 | static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = { | 1052 | static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = { |
1053 | /* find out the current status */ | 1053 | /* find out the current status */ |
1054 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, | 1054 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, |
1055 | /* we only need to read ISR to acknowledge */ | 1055 | /* we only need to read ISR to acknowledge */ |
1056 | { mk_mii_read(MII_KS8721BL_ICSR), NULL }, | 1056 | { mk_mii_read(MII_KS8721BL_ICSR), NULL }, |
1057 | { mk_mii_end, } | 1057 | { mk_mii_end, } |
1058 | }; | 1058 | }; |
1059 | static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */ | 1059 | static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */ |
1060 | { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL }, | 1060 | { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL }, |
1061 | { mk_mii_end, } | 1061 | { mk_mii_end, } |
1062 | }; | 1062 | }; |
1063 | static phy_info_t const phy_info_ks8721bl = { | 1063 | static phy_info_t const phy_info_ks8721bl = { |
1064 | .id = 0x00022161, | 1064 | .id = 0x00022161, |
1065 | .name = "KS8721BL", | 1065 | .name = "KS8721BL", |
1066 | .config = phy_cmd_ks8721bl_config, | 1066 | .config = phy_cmd_ks8721bl_config, |
1067 | .startup = phy_cmd_ks8721bl_startup, | 1067 | .startup = phy_cmd_ks8721bl_startup, |
1068 | .ack_int = phy_cmd_ks8721bl_ack_int, | 1068 | .ack_int = phy_cmd_ks8721bl_ack_int, |
1069 | .shutdown = phy_cmd_ks8721bl_shutdown | 1069 | .shutdown = phy_cmd_ks8721bl_shutdown |
1070 | }; | 1070 | }; |
1071 | 1071 | ||
1072 | /* ------------------------------------------------------------------------- */ | 1072 | /* ------------------------------------------------------------------------- */ |
1073 | /* register definitions for the DP83848 */ | 1073 | /* register definitions for the DP83848 */ |
1074 | 1074 | ||
1075 | #define MII_DP8384X_PHYSTST 16 /* PHY Status Register */ | 1075 | #define MII_DP8384X_PHYSTST 16 /* PHY Status Register */ |
1076 | 1076 | ||
1077 | static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev) | 1077 | static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev) |
1078 | { | 1078 | { |
1079 | struct fec_enet_private *fep = netdev_priv(dev); | 1079 | struct fec_enet_private *fep = netdev_priv(dev); |
1080 | volatile uint *s = &(fep->phy_status); | 1080 | volatile uint *s = &(fep->phy_status); |
1081 | 1081 | ||
1082 | *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); | 1082 | *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); |
1083 | 1083 | ||
1084 | /* Link up */ | 1084 | /* Link up */ |
1085 | if (mii_reg & 0x0001) { | 1085 | if (mii_reg & 0x0001) { |
1086 | fep->link = 1; | 1086 | fep->link = 1; |
1087 | *s |= PHY_STAT_LINK; | 1087 | *s |= PHY_STAT_LINK; |
1088 | } else | 1088 | } else |
1089 | fep->link = 0; | 1089 | fep->link = 0; |
1090 | /* Status of link */ | 1090 | /* Status of link */ |
1091 | if (mii_reg & 0x0010) /* Autonegotioation complete */ | 1091 | if (mii_reg & 0x0010) /* Autonegotioation complete */ |
1092 | *s |= PHY_STAT_ANC; | 1092 | *s |= PHY_STAT_ANC; |
1093 | if (mii_reg & 0x0002) { /* 10MBps? */ | 1093 | if (mii_reg & 0x0002) { /* 10MBps? */ |
1094 | if (mii_reg & 0x0004) /* Full Duplex? */ | 1094 | if (mii_reg & 0x0004) /* Full Duplex? */ |
1095 | *s |= PHY_STAT_10FDX; | 1095 | *s |= PHY_STAT_10FDX; |
1096 | else | 1096 | else |
1097 | *s |= PHY_STAT_10HDX; | 1097 | *s |= PHY_STAT_10HDX; |
1098 | } else { /* 100 Mbps? */ | 1098 | } else { /* 100 Mbps? */ |
1099 | if (mii_reg & 0x0004) /* Full Duplex? */ | 1099 | if (mii_reg & 0x0004) /* Full Duplex? */ |
1100 | *s |= PHY_STAT_100FDX; | 1100 | *s |= PHY_STAT_100FDX; |
1101 | else | 1101 | else |
1102 | *s |= PHY_STAT_100HDX; | 1102 | *s |= PHY_STAT_100HDX; |
1103 | } | 1103 | } |
1104 | if (mii_reg & 0x0008) | 1104 | if (mii_reg & 0x0008) |
1105 | *s |= PHY_STAT_FAULT; | 1105 | *s |= PHY_STAT_FAULT; |
1106 | } | 1106 | } |
1107 | 1107 | ||
1108 | static phy_info_t phy_info_dp83848= { | 1108 | static phy_info_t phy_info_dp83848= { |
1109 | 0x020005c9, | 1109 | 0x020005c9, |
1110 | "DP83848", | 1110 | "DP83848", |
1111 | 1111 | ||
1112 | (const phy_cmd_t []) { /* config */ | 1112 | (const phy_cmd_t []) { /* config */ |
1113 | { mk_mii_read(MII_REG_CR), mii_parse_cr }, | 1113 | { mk_mii_read(MII_REG_CR), mii_parse_cr }, |
1114 | { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, | 1114 | { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, |
1115 | { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 }, | 1115 | { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 }, |
1116 | { mk_mii_end, } | 1116 | { mk_mii_end, } |
1117 | }, | 1117 | }, |
1118 | (const phy_cmd_t []) { /* startup - enable interrupts */ | 1118 | (const phy_cmd_t []) { /* startup - enable interrupts */ |
1119 | { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ | 1119 | { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */ |
1120 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, | 1120 | { mk_mii_read(MII_REG_SR), mii_parse_sr }, |
1121 | { mk_mii_end, } | 1121 | { mk_mii_end, } |
1122 | }, | 1122 | }, |
1123 | (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */ | 1123 | (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */ |
1124 | { mk_mii_end, } | 1124 | { mk_mii_end, } |
1125 | }, | 1125 | }, |
1126 | (const phy_cmd_t []) { /* shutdown */ | 1126 | (const phy_cmd_t []) { /* shutdown */ |
1127 | { mk_mii_end, } | 1127 | { mk_mii_end, } |
1128 | }, | 1128 | }, |
1129 | }; | 1129 | }; |
1130 | 1130 | ||
1131 | /* ------------------------------------------------------------------------- */ | 1131 | /* ------------------------------------------------------------------------- */ |
1132 | 1132 | ||
1133 | static phy_info_t const * const phy_info[] = { | 1133 | static phy_info_t const * const phy_info[] = { |
1134 | &phy_info_lxt970, | 1134 | &phy_info_lxt970, |
1135 | &phy_info_lxt971, | 1135 | &phy_info_lxt971, |
1136 | &phy_info_qs6612, | 1136 | &phy_info_qs6612, |
1137 | &phy_info_am79c874, | 1137 | &phy_info_am79c874, |
1138 | &phy_info_ks8721bl, | 1138 | &phy_info_ks8721bl, |
1139 | &phy_info_dp83848, | 1139 | &phy_info_dp83848, |
1140 | NULL | 1140 | NULL |
1141 | }; | 1141 | }; |
1142 | 1142 | ||
1143 | /* ------------------------------------------------------------------------- */ | 1143 | /* ------------------------------------------------------------------------- */ |
1144 | #ifdef HAVE_mii_link_interrupt | 1144 | #ifdef HAVE_mii_link_interrupt |
1145 | static irqreturn_t | 1145 | static irqreturn_t |
1146 | mii_link_interrupt(int irq, void * dev_id); | 1146 | mii_link_interrupt(int irq, void * dev_id); |
1147 | 1147 | ||
1148 | /* | 1148 | /* |
1149 | * This is specific to the MII interrupt setup of the M5272EVB. | 1149 | * This is specific to the MII interrupt setup of the M5272EVB. |
1150 | */ | 1150 | */ |
1151 | static void __inline__ fec_request_mii_intr(struct net_device *dev) | 1151 | static void __inline__ fec_request_mii_intr(struct net_device *dev) |
1152 | { | 1152 | { |
1153 | if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0) | 1153 | if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0) |
1154 | printk("FEC: Could not allocate fec(MII) IRQ(66)!\n"); | 1154 | printk("FEC: Could not allocate fec(MII) IRQ(66)!\n"); |
1155 | } | 1155 | } |
1156 | 1156 | ||
1157 | static void __inline__ fec_disable_phy_intr(struct net_device *dev) | 1157 | static void __inline__ fec_disable_phy_intr(struct net_device *dev) |
1158 | { | 1158 | { |
1159 | free_irq(66, dev); | 1159 | free_irq(66, dev); |
1160 | } | 1160 | } |
1161 | #endif | 1161 | #endif |
1162 | 1162 | ||
1163 | #ifdef CONFIG_M5272 | 1163 | #ifdef CONFIG_M5272 |
1164 | static void __inline__ fec_get_mac(struct net_device *dev) | 1164 | static void __inline__ fec_get_mac(struct net_device *dev) |
1165 | { | 1165 | { |
1166 | struct fec_enet_private *fep = netdev_priv(dev); | 1166 | struct fec_enet_private *fep = netdev_priv(dev); |
1167 | unsigned char *iap, tmpaddr[ETH_ALEN]; | 1167 | unsigned char *iap, tmpaddr[ETH_ALEN]; |
1168 | 1168 | ||
1169 | if (FEC_FLASHMAC) { | 1169 | if (FEC_FLASHMAC) { |
1170 | /* | 1170 | /* |
1171 | * Get MAC address from FLASH. | 1171 | * Get MAC address from FLASH. |
1172 | * If it is all 1's or 0's, use the default. | 1172 | * If it is all 1's or 0's, use the default. |
1173 | */ | 1173 | */ |
1174 | iap = (unsigned char *)FEC_FLASHMAC; | 1174 | iap = (unsigned char *)FEC_FLASHMAC; |
1175 | if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && | 1175 | if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && |
1176 | (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) | 1176 | (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) |
1177 | iap = fec_mac_default; | 1177 | iap = fec_mac_default; |
1178 | if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && | 1178 | if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && |
1179 | (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) | 1179 | (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) |
1180 | iap = fec_mac_default; | 1180 | iap = fec_mac_default; |
1181 | } else { | 1181 | } else { |
1182 | *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); | 1182 | *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); |
1183 | *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); | 1183 | *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); |
1184 | iap = &tmpaddr[0]; | 1184 | iap = &tmpaddr[0]; |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | memcpy(dev->dev_addr, iap, ETH_ALEN); | 1187 | memcpy(dev->dev_addr, iap, ETH_ALEN); |
1188 | 1188 | ||
1189 | /* Adjust MAC if using default MAC address */ | 1189 | /* Adjust MAC if using default MAC address */ |
1190 | if (iap == fec_mac_default) | 1190 | if (iap == fec_mac_default) |
1191 | dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; | 1191 | dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; |
1192 | } | 1192 | } |
1193 | #endif | 1193 | #endif |
1194 | 1194 | ||
1195 | /* ------------------------------------------------------------------------- */ | 1195 | /* ------------------------------------------------------------------------- */ |
1196 | 1196 | ||
1197 | static void mii_display_status(struct net_device *dev) | 1197 | static void mii_display_status(struct net_device *dev) |
1198 | { | 1198 | { |
1199 | struct fec_enet_private *fep = netdev_priv(dev); | 1199 | struct fec_enet_private *fep = netdev_priv(dev); |
1200 | volatile uint *s = &(fep->phy_status); | 1200 | volatile uint *s = &(fep->phy_status); |
1201 | 1201 | ||
1202 | if (!fep->link && !fep->old_link) { | 1202 | if (!fep->link && !fep->old_link) { |
1203 | /* Link is still down - don't print anything */ | 1203 | /* Link is still down - don't print anything */ |
1204 | return; | 1204 | return; |
1205 | } | 1205 | } |
1206 | 1206 | ||
1207 | printk("%s: status: ", dev->name); | 1207 | printk("%s: status: ", dev->name); |
1208 | 1208 | ||
1209 | if (!fep->link) { | 1209 | if (!fep->link) { |
1210 | printk("link down"); | 1210 | printk("link down"); |
1211 | } else { | 1211 | } else { |
1212 | printk("link up"); | 1212 | printk("link up"); |
1213 | 1213 | ||
1214 | switch(*s & PHY_STAT_SPMASK) { | 1214 | switch(*s & PHY_STAT_SPMASK) { |
1215 | case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break; | 1215 | case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break; |
1216 | case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break; | 1216 | case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break; |
1217 | case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break; | 1217 | case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break; |
1218 | case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break; | 1218 | case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break; |
1219 | default: | 1219 | default: |
1220 | printk(", Unknown speed/duplex"); | 1220 | printk(", Unknown speed/duplex"); |
1221 | } | 1221 | } |
1222 | 1222 | ||
1223 | if (*s & PHY_STAT_ANC) | 1223 | if (*s & PHY_STAT_ANC) |
1224 | printk(", auto-negotiation complete"); | 1224 | printk(", auto-negotiation complete"); |
1225 | } | 1225 | } |
1226 | 1226 | ||
1227 | if (*s & PHY_STAT_FAULT) | 1227 | if (*s & PHY_STAT_FAULT) |
1228 | printk(", remote fault"); | 1228 | printk(", remote fault"); |
1229 | 1229 | ||
1230 | printk(".\n"); | 1230 | printk(".\n"); |
1231 | } | 1231 | } |
1232 | 1232 | ||
1233 | static void mii_display_config(struct work_struct *work) | 1233 | static void mii_display_config(struct work_struct *work) |
1234 | { | 1234 | { |
1235 | struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); | 1235 | struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); |
1236 | struct net_device *dev = fep->netdev; | 1236 | struct net_device *dev = fep->netdev; |
1237 | uint status = fep->phy_status; | 1237 | uint status = fep->phy_status; |
1238 | 1238 | ||
1239 | /* | 1239 | /* |
1240 | ** When we get here, phy_task is already removed from | 1240 | ** When we get here, phy_task is already removed from |
1241 | ** the workqueue. It is thus safe to allow to reuse it. | 1241 | ** the workqueue. It is thus safe to allow to reuse it. |
1242 | */ | 1242 | */ |
1243 | fep->mii_phy_task_queued = 0; | 1243 | fep->mii_phy_task_queued = 0; |
1244 | printk("%s: config: auto-negotiation ", dev->name); | 1244 | printk("%s: config: auto-negotiation ", dev->name); |
1245 | 1245 | ||
1246 | if (status & PHY_CONF_ANE) | 1246 | if (status & PHY_CONF_ANE) |
1247 | printk("on"); | 1247 | printk("on"); |
1248 | else | 1248 | else |
1249 | printk("off"); | 1249 | printk("off"); |
1250 | 1250 | ||
1251 | if (status & PHY_CONF_100FDX) | 1251 | if (status & PHY_CONF_100FDX) |
1252 | printk(", 100FDX"); | 1252 | printk(", 100FDX"); |
1253 | if (status & PHY_CONF_100HDX) | 1253 | if (status & PHY_CONF_100HDX) |
1254 | printk(", 100HDX"); | 1254 | printk(", 100HDX"); |
1255 | if (status & PHY_CONF_10FDX) | 1255 | if (status & PHY_CONF_10FDX) |
1256 | printk(", 10FDX"); | 1256 | printk(", 10FDX"); |
1257 | if (status & PHY_CONF_10HDX) | 1257 | if (status & PHY_CONF_10HDX) |
1258 | printk(", 10HDX"); | 1258 | printk(", 10HDX"); |
1259 | if (!(status & PHY_CONF_SPMASK)) | 1259 | if (!(status & PHY_CONF_SPMASK)) |
1260 | printk(", No speed/duplex selected?"); | 1260 | printk(", No speed/duplex selected?"); |
1261 | 1261 | ||
1262 | if (status & PHY_CONF_LOOP) | 1262 | if (status & PHY_CONF_LOOP) |
1263 | printk(", loopback enabled"); | 1263 | printk(", loopback enabled"); |
1264 | 1264 | ||
1265 | printk(".\n"); | 1265 | printk(".\n"); |
1266 | 1266 | ||
1267 | fep->sequence_done = 1; | 1267 | fep->sequence_done = 1; |
1268 | } | 1268 | } |
1269 | 1269 | ||
1270 | static void mii_relink(struct work_struct *work) | 1270 | static void mii_relink(struct work_struct *work) |
1271 | { | 1271 | { |
1272 | struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); | 1272 | struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); |
1273 | struct net_device *dev = fep->netdev; | 1273 | struct net_device *dev = fep->netdev; |
1274 | int duplex; | 1274 | int duplex; |
1275 | 1275 | ||
1276 | /* | 1276 | /* |
1277 | ** When we get here, phy_task is already removed from | 1277 | ** When we get here, phy_task is already removed from |
1278 | ** the workqueue. It is thus safe to allow to reuse it. | 1278 | ** the workqueue. It is thus safe to allow to reuse it. |
1279 | */ | 1279 | */ |
1280 | fep->mii_phy_task_queued = 0; | 1280 | fep->mii_phy_task_queued = 0; |
1281 | fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; | 1281 | fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; |
1282 | mii_display_status(dev); | 1282 | mii_display_status(dev); |
1283 | fep->old_link = fep->link; | 1283 | fep->old_link = fep->link; |
1284 | 1284 | ||
1285 | if (fep->link) { | 1285 | if (fep->link) { |
1286 | duplex = 0; | 1286 | duplex = 0; |
1287 | if (fep->phy_status | 1287 | if (fep->phy_status |
1288 | & (PHY_STAT_100FDX | PHY_STAT_10FDX)) | 1288 | & (PHY_STAT_100FDX | PHY_STAT_10FDX)) |
1289 | duplex = 1; | 1289 | duplex = 1; |
1290 | fec_restart(dev, duplex); | 1290 | fec_restart(dev, duplex); |
1291 | } else | 1291 | } else |
1292 | fec_stop(dev); | 1292 | fec_stop(dev); |
1293 | } | 1293 | } |
1294 | 1294 | ||
1295 | /* mii_queue_relink is called in interrupt context from mii_link_interrupt */ | 1295 | /* mii_queue_relink is called in interrupt context from mii_link_interrupt */ |
1296 | static void mii_queue_relink(uint mii_reg, struct net_device *dev) | 1296 | static void mii_queue_relink(uint mii_reg, struct net_device *dev) |
1297 | { | 1297 | { |
1298 | struct fec_enet_private *fep = netdev_priv(dev); | 1298 | struct fec_enet_private *fep = netdev_priv(dev); |
1299 | 1299 | ||
1300 | /* | 1300 | /* |
1301 | * We cannot queue phy_task twice in the workqueue. It | 1301 | * We cannot queue phy_task twice in the workqueue. It |
1302 | * would cause an endless loop in the workqueue. | 1302 | * would cause an endless loop in the workqueue. |
1303 | * Fortunately, if the last mii_relink entry has not yet been | 1303 | * Fortunately, if the last mii_relink entry has not yet been |
1304 | * executed now, it will do the job for the current interrupt, | 1304 | * executed now, it will do the job for the current interrupt, |
1305 | * which is just what we want. | 1305 | * which is just what we want. |
1306 | */ | 1306 | */ |
1307 | if (fep->mii_phy_task_queued) | 1307 | if (fep->mii_phy_task_queued) |
1308 | return; | 1308 | return; |
1309 | 1309 | ||
1310 | fep->mii_phy_task_queued = 1; | 1310 | fep->mii_phy_task_queued = 1; |
1311 | INIT_WORK(&fep->phy_task, mii_relink); | 1311 | INIT_WORK(&fep->phy_task, mii_relink); |
1312 | schedule_work(&fep->phy_task); | 1312 | schedule_work(&fep->phy_task); |
1313 | } | 1313 | } |
1314 | 1314 | ||
1315 | /* mii_queue_config is called in interrupt context from fec_enet_mii */ | 1315 | /* mii_queue_config is called in interrupt context from fec_enet_mii */ |
1316 | static void mii_queue_config(uint mii_reg, struct net_device *dev) | 1316 | static void mii_queue_config(uint mii_reg, struct net_device *dev) |
1317 | { | 1317 | { |
1318 | struct fec_enet_private *fep = netdev_priv(dev); | 1318 | struct fec_enet_private *fep = netdev_priv(dev); |
1319 | 1319 | ||
1320 | if (fep->mii_phy_task_queued) | 1320 | if (fep->mii_phy_task_queued) |
1321 | return; | 1321 | return; |
1322 | 1322 | ||
1323 | fep->mii_phy_task_queued = 1; | 1323 | fep->mii_phy_task_queued = 1; |
1324 | INIT_WORK(&fep->phy_task, mii_display_config); | 1324 | INIT_WORK(&fep->phy_task, mii_display_config); |
1325 | schedule_work(&fep->phy_task); | 1325 | schedule_work(&fep->phy_task); |
1326 | } | 1326 | } |
1327 | 1327 | ||
1328 | phy_cmd_t const phy_cmd_relink[] = { | 1328 | phy_cmd_t const phy_cmd_relink[] = { |
1329 | { mk_mii_read(MII_REG_CR), mii_queue_relink }, | 1329 | { mk_mii_read(MII_REG_CR), mii_queue_relink }, |
1330 | { mk_mii_end, } | 1330 | { mk_mii_end, } |
1331 | }; | 1331 | }; |
1332 | phy_cmd_t const phy_cmd_config[] = { | 1332 | phy_cmd_t const phy_cmd_config[] = { |
1333 | { mk_mii_read(MII_REG_CR), mii_queue_config }, | 1333 | { mk_mii_read(MII_REG_CR), mii_queue_config }, |
1334 | { mk_mii_end, } | 1334 | { mk_mii_end, } |
1335 | }; | 1335 | }; |
1336 | 1336 | ||
1337 | /* Read remainder of PHY ID. */ | 1337 | /* Read remainder of PHY ID. */ |
1338 | static void | 1338 | static void |
1339 | mii_discover_phy3(uint mii_reg, struct net_device *dev) | 1339 | mii_discover_phy3(uint mii_reg, struct net_device *dev) |
1340 | { | 1340 | { |
1341 | struct fec_enet_private *fep; | 1341 | struct fec_enet_private *fep; |
1342 | int i; | 1342 | int i; |
1343 | 1343 | ||
1344 | fep = netdev_priv(dev); | 1344 | fep = netdev_priv(dev); |
1345 | fep->phy_id |= (mii_reg & 0xffff); | 1345 | fep->phy_id |= (mii_reg & 0xffff); |
1346 | printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id); | 1346 | printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id); |
1347 | 1347 | ||
1348 | for(i = 0; phy_info[i]; i++) { | 1348 | for(i = 0; phy_info[i]; i++) { |
1349 | if(phy_info[i]->id == (fep->phy_id >> 4)) | 1349 | if(phy_info[i]->id == (fep->phy_id >> 4)) |
1350 | break; | 1350 | break; |
1351 | } | 1351 | } |
1352 | 1352 | ||
1353 | if (phy_info[i]) | 1353 | if (phy_info[i]) |
1354 | printk(" -- %s\n", phy_info[i]->name); | 1354 | printk(" -- %s\n", phy_info[i]->name); |
1355 | else | 1355 | else |
1356 | printk(" -- unknown PHY!\n"); | 1356 | printk(" -- unknown PHY!\n"); |
1357 | 1357 | ||
1358 | fep->phy = phy_info[i]; | 1358 | fep->phy = phy_info[i]; |
1359 | fep->phy_id_done = 1; | 1359 | fep->phy_id_done = 1; |
1360 | } | 1360 | } |
1361 | 1361 | ||
1362 | /* Scan all of the MII PHY addresses looking for someone to respond | 1362 | /* Scan all of the MII PHY addresses looking for someone to respond |
1363 | * with a valid ID. This usually happens quickly. | 1363 | * with a valid ID. This usually happens quickly. |
1364 | */ | 1364 | */ |
1365 | static void | 1365 | static void |
1366 | mii_discover_phy(uint mii_reg, struct net_device *dev) | 1366 | mii_discover_phy(uint mii_reg, struct net_device *dev) |
1367 | { | 1367 | { |
1368 | struct fec_enet_private *fep; | 1368 | struct fec_enet_private *fep; |
1369 | uint phytype; | 1369 | uint phytype; |
1370 | 1370 | ||
1371 | fep = netdev_priv(dev); | 1371 | fep = netdev_priv(dev); |
1372 | 1372 | ||
1373 | if (fep->phy_addr < 32) { | 1373 | if (fep->phy_addr < 32) { |
1374 | if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) { | 1374 | if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) { |
1375 | 1375 | ||
1376 | /* Got first part of ID, now get remainder */ | 1376 | /* Got first part of ID, now get remainder */ |
1377 | fep->phy_id = phytype << 16; | 1377 | fep->phy_id = phytype << 16; |
1378 | mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR2), | 1378 | mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR2), |
1379 | mii_discover_phy3); | 1379 | mii_discover_phy3); |
1380 | } else { | 1380 | } else { |
1381 | fep->phy_addr++; | 1381 | fep->phy_addr++; |
1382 | mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR1), | 1382 | mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR1), |
1383 | mii_discover_phy); | 1383 | mii_discover_phy); |
1384 | } | 1384 | } |
1385 | } else { | 1385 | } else { |
1386 | printk("FEC: No PHY device found.\n"); | 1386 | printk("FEC: No PHY device found.\n"); |
1387 | /* Disable external MII interface */ | 1387 | /* Disable external MII interface */ |
1388 | writel(0, fep->hwp + FEC_MII_SPEED); | 1388 | writel(0, fep->hwp + FEC_MII_SPEED); |
1389 | fep->phy_speed = 0; | 1389 | fep->phy_speed = 0; |
1390 | #ifdef HAVE_mii_link_interrupt | 1390 | #ifdef HAVE_mii_link_interrupt |
1391 | fec_disable_phy_intr(dev); | 1391 | fec_disable_phy_intr(dev); |
1392 | #endif | 1392 | #endif |
1393 | } | 1393 | } |
1394 | } | 1394 | } |
1395 | 1395 | ||
1396 | /* This interrupt occurs when the PHY detects a link change */ | 1396 | /* This interrupt occurs when the PHY detects a link change */ |
1397 | #ifdef HAVE_mii_link_interrupt | 1397 | #ifdef HAVE_mii_link_interrupt |
1398 | static irqreturn_t | 1398 | static irqreturn_t |
1399 | mii_link_interrupt(int irq, void * dev_id) | 1399 | mii_link_interrupt(int irq, void * dev_id) |
1400 | { | 1400 | { |
1401 | struct net_device *dev = dev_id; | 1401 | struct net_device *dev = dev_id; |
1402 | struct fec_enet_private *fep = netdev_priv(dev); | 1402 | struct fec_enet_private *fep = netdev_priv(dev); |
1403 | 1403 | ||
1404 | mii_do_cmd(dev, fep->phy->ack_int); | 1404 | mii_do_cmd(dev, fep->phy->ack_int); |
1405 | mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ | 1405 | mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ |
1406 | 1406 | ||
1407 | return IRQ_HANDLED; | 1407 | return IRQ_HANDLED; |
1408 | } | 1408 | } |
1409 | #endif | 1409 | #endif |
1410 | 1410 | ||
1411 | static void fec_enet_free_buffers(struct net_device *dev) | 1411 | static void fec_enet_free_buffers(struct net_device *dev) |
1412 | { | 1412 | { |
1413 | struct fec_enet_private *fep = netdev_priv(dev); | 1413 | struct fec_enet_private *fep = netdev_priv(dev); |
1414 | int i; | 1414 | int i; |
1415 | struct sk_buff *skb; | 1415 | struct sk_buff *skb; |
1416 | struct bufdesc *bdp; | 1416 | struct bufdesc *bdp; |
1417 | 1417 | ||
1418 | bdp = fep->rx_bd_base; | 1418 | bdp = fep->rx_bd_base; |
1419 | for (i = 0; i < RX_RING_SIZE; i++) { | 1419 | for (i = 0; i < RX_RING_SIZE; i++) { |
1420 | skb = fep->rx_skbuff[i]; | 1420 | skb = fep->rx_skbuff[i]; |
1421 | 1421 | ||
1422 | if (bdp->cbd_bufaddr) | 1422 | if (bdp->cbd_bufaddr) |
1423 | dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, | 1423 | dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, |
1424 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | 1424 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); |
1425 | if (skb) | 1425 | if (skb) |
1426 | dev_kfree_skb(skb); | 1426 | dev_kfree_skb(skb); |
1427 | bdp++; | 1427 | bdp++; |
1428 | } | 1428 | } |
1429 | 1429 | ||
1430 | bdp = fep->tx_bd_base; | 1430 | bdp = fep->tx_bd_base; |
1431 | for (i = 0; i < TX_RING_SIZE; i++) | 1431 | for (i = 0; i < TX_RING_SIZE; i++) |
1432 | kfree(fep->tx_bounce[i]); | 1432 | kfree(fep->tx_bounce[i]); |
1433 | } | 1433 | } |
1434 | 1434 | ||
1435 | static int fec_enet_alloc_buffers(struct net_device *dev) | 1435 | static int fec_enet_alloc_buffers(struct net_device *dev) |
1436 | { | 1436 | { |
1437 | struct fec_enet_private *fep = netdev_priv(dev); | 1437 | struct fec_enet_private *fep = netdev_priv(dev); |
1438 | int i; | 1438 | int i; |
1439 | struct sk_buff *skb; | 1439 | struct sk_buff *skb; |
1440 | struct bufdesc *bdp; | 1440 | struct bufdesc *bdp; |
1441 | 1441 | ||
1442 | bdp = fep->rx_bd_base; | 1442 | bdp = fep->rx_bd_base; |
1443 | for (i = 0; i < RX_RING_SIZE; i++) { | 1443 | for (i = 0; i < RX_RING_SIZE; i++) { |
1444 | skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); | 1444 | skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); |
1445 | if (!skb) { | 1445 | if (!skb) { |
1446 | fec_enet_free_buffers(dev); | 1446 | fec_enet_free_buffers(dev); |
1447 | return -ENOMEM; | 1447 | return -ENOMEM; |
1448 | } | 1448 | } |
1449 | fep->rx_skbuff[i] = skb; | 1449 | fep->rx_skbuff[i] = skb; |
1450 | 1450 | ||
1451 | bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, | 1451 | bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, |
1452 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | 1452 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); |
1453 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | 1453 | bdp->cbd_sc = BD_ENET_RX_EMPTY; |
1454 | bdp++; | 1454 | bdp++; |
1455 | } | 1455 | } |
1456 | 1456 | ||
1457 | /* Set the last buffer to wrap. */ | 1457 | /* Set the last buffer to wrap. */ |
1458 | bdp--; | 1458 | bdp--; |
1459 | bdp->cbd_sc |= BD_SC_WRAP; | 1459 | bdp->cbd_sc |= BD_SC_WRAP; |
1460 | 1460 | ||
1461 | bdp = fep->tx_bd_base; | 1461 | bdp = fep->tx_bd_base; |
1462 | for (i = 0; i < TX_RING_SIZE; i++) { | 1462 | for (i = 0; i < TX_RING_SIZE; i++) { |
1463 | fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); | 1463 | fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); |
1464 | 1464 | ||
1465 | bdp->cbd_sc = 0; | 1465 | bdp->cbd_sc = 0; |
1466 | bdp->cbd_bufaddr = 0; | 1466 | bdp->cbd_bufaddr = 0; |
1467 | bdp++; | 1467 | bdp++; |
1468 | } | 1468 | } |
1469 | 1469 | ||
1470 | /* Set the last buffer to wrap. */ | 1470 | /* Set the last buffer to wrap. */ |
1471 | bdp--; | 1471 | bdp--; |
1472 | bdp->cbd_sc |= BD_SC_WRAP; | 1472 | bdp->cbd_sc |= BD_SC_WRAP; |
1473 | 1473 | ||
1474 | return 0; | 1474 | return 0; |
1475 | } | 1475 | } |
1476 | 1476 | ||
1477 | static int | 1477 | static int |
1478 | fec_enet_open(struct net_device *dev) | 1478 | fec_enet_open(struct net_device *dev) |
1479 | { | 1479 | { |
1480 | struct fec_enet_private *fep = netdev_priv(dev); | 1480 | struct fec_enet_private *fep = netdev_priv(dev); |
1481 | int ret; | 1481 | int ret; |
1482 | 1482 | ||
1483 | /* I should reset the ring buffers here, but I don't yet know | 1483 | /* I should reset the ring buffers here, but I don't yet know |
1484 | * a simple way to do that. | 1484 | * a simple way to do that. |
1485 | */ | 1485 | */ |
1486 | 1486 | ||
1487 | ret = fec_enet_alloc_buffers(dev); | 1487 | ret = fec_enet_alloc_buffers(dev); |
1488 | if (ret) | 1488 | if (ret) |
1489 | return ret; | 1489 | return ret; |
1490 | 1490 | ||
1491 | fep->sequence_done = 0; | 1491 | fep->sequence_done = 0; |
1492 | fep->link = 0; | 1492 | fep->link = 0; |
1493 | 1493 | ||
1494 | fec_restart(dev, 1); | 1494 | fec_restart(dev, 1); |
1495 | 1495 | ||
1496 | if (fep->phy) { | 1496 | if (fep->phy) { |
1497 | mii_do_cmd(dev, fep->phy->ack_int); | 1497 | mii_do_cmd(dev, fep->phy->ack_int); |
1498 | mii_do_cmd(dev, fep->phy->config); | 1498 | mii_do_cmd(dev, fep->phy->config); |
1499 | mii_do_cmd(dev, phy_cmd_config); /* display configuration */ | 1499 | mii_do_cmd(dev, phy_cmd_config); /* display configuration */ |
1500 | 1500 | ||
1501 | /* Poll until the PHY tells us its configuration | 1501 | /* Poll until the PHY tells us its configuration |
1502 | * (not link state). | 1502 | * (not link state). |
1503 | * Request is initiated by mii_do_cmd above, but answer | 1503 | * Request is initiated by mii_do_cmd above, but answer |
1504 | * comes by interrupt. | 1504 | * comes by interrupt. |
1505 | * This should take about 25 usec per register at 2.5 MHz, | 1505 | * This should take about 25 usec per register at 2.5 MHz, |
1506 | * and we read approximately 5 registers. | 1506 | * and we read approximately 5 registers. |
1507 | */ | 1507 | */ |
1508 | while(!fep->sequence_done) | 1508 | while(!fep->sequence_done) |
1509 | schedule(); | 1509 | schedule(); |
1510 | 1510 | ||
1511 | mii_do_cmd(dev, fep->phy->startup); | 1511 | mii_do_cmd(dev, fep->phy->startup); |
1512 | } | 1512 | } |
1513 | 1513 | ||
1514 | /* Set the initial link state to true. A lot of hardware | 1514 | /* Set the initial link state to true. A lot of hardware |
1515 | * based on this device does not implement a PHY interrupt, | 1515 | * based on this device does not implement a PHY interrupt, |
1516 | * so we are never notified of link change. | 1516 | * so we are never notified of link change. |
1517 | */ | 1517 | */ |
1518 | fep->link = 1; | 1518 | fep->link = 1; |
1519 | 1519 | ||
1520 | netif_start_queue(dev); | 1520 | netif_start_queue(dev); |
1521 | fep->opened = 1; | 1521 | fep->opened = 1; |
1522 | return 0; | 1522 | return 0; |
1523 | } | 1523 | } |
1524 | 1524 | ||
1525 | static int | 1525 | static int |
1526 | fec_enet_close(struct net_device *dev) | 1526 | fec_enet_close(struct net_device *dev) |
1527 | { | 1527 | { |
1528 | struct fec_enet_private *fep = netdev_priv(dev); | 1528 | struct fec_enet_private *fep = netdev_priv(dev); |
1529 | 1529 | ||
1530 | /* Don't know what to do yet. */ | 1530 | /* Don't know what to do yet. */ |
1531 | fep->opened = 0; | 1531 | fep->opened = 0; |
1532 | netif_stop_queue(dev); | 1532 | netif_stop_queue(dev); |
1533 | fec_stop(dev); | 1533 | fec_stop(dev); |
1534 | 1534 | ||
1535 | fec_enet_free_buffers(dev); | 1535 | fec_enet_free_buffers(dev); |
1536 | 1536 | ||
1537 | return 0; | 1537 | return 0; |
1538 | } | 1538 | } |
1539 | 1539 | ||
1540 | /* Set or clear the multicast filter for this adaptor. | 1540 | /* Set or clear the multicast filter for this adaptor. |
1541 | * Skeleton taken from sunlance driver. | 1541 | * Skeleton taken from sunlance driver. |
1542 | * The CPM Ethernet implementation allows Multicast as well as individual | 1542 | * The CPM Ethernet implementation allows Multicast as well as individual |
1543 | * MAC address filtering. Some of the drivers check to make sure it is | 1543 | * MAC address filtering. Some of the drivers check to make sure it is |
1544 | * a group multicast address, and discard those that are not. I guess I | 1544 | * a group multicast address, and discard those that are not. I guess I |
1545 | * will do the same for now, but just remove the test if you want | 1545 | * will do the same for now, but just remove the test if you want |
1546 | * individual filtering as well (do the upper net layers want or support | 1546 | * individual filtering as well (do the upper net layers want or support |
1547 | * this kind of feature?). | 1547 | * this kind of feature?). |
1548 | */ | 1548 | */ |
1549 | 1549 | ||
1550 | #define HASH_BITS 6 /* #bits in hash */ | 1550 | #define HASH_BITS 6 /* #bits in hash */ |
1551 | #define CRC32_POLY 0xEDB88320 | 1551 | #define CRC32_POLY 0xEDB88320 |
1552 | 1552 | ||
1553 | static void set_multicast_list(struct net_device *dev) | 1553 | static void set_multicast_list(struct net_device *dev) |
1554 | { | 1554 | { |
1555 | struct fec_enet_private *fep = netdev_priv(dev); | 1555 | struct fec_enet_private *fep = netdev_priv(dev); |
1556 | struct dev_mc_list *dmi; | 1556 | struct dev_mc_list *dmi; |
1557 | unsigned int i, j, bit, data, crc, tmp; | 1557 | unsigned int i, j, bit, data, crc, tmp; |
1558 | unsigned char hash; | 1558 | unsigned char hash; |
1559 | 1559 | ||
1560 | if (dev->flags & IFF_PROMISC) { | 1560 | if (dev->flags & IFF_PROMISC) { |
1561 | tmp = readl(fep->hwp + FEC_R_CNTRL); | 1561 | tmp = readl(fep->hwp + FEC_R_CNTRL); |
1562 | tmp |= 0x8; | 1562 | tmp |= 0x8; |
1563 | writel(tmp, fep->hwp + FEC_R_CNTRL); | 1563 | writel(tmp, fep->hwp + FEC_R_CNTRL); |
1564 | return; | 1564 | return; |
1565 | } | 1565 | } |
1566 | 1566 | ||
1567 | tmp = readl(fep->hwp + FEC_R_CNTRL); | 1567 | tmp = readl(fep->hwp + FEC_R_CNTRL); |
1568 | tmp &= ~0x8; | 1568 | tmp &= ~0x8; |
1569 | writel(tmp, fep->hwp + FEC_R_CNTRL); | 1569 | writel(tmp, fep->hwp + FEC_R_CNTRL); |
1570 | 1570 | ||
1571 | if (dev->flags & IFF_ALLMULTI) { | 1571 | if (dev->flags & IFF_ALLMULTI) { |
1572 | /* Catch all multicast addresses, so set the | 1572 | /* Catch all multicast addresses, so set the |
1573 | * filter to all 1's | 1573 | * filter to all 1's |
1574 | */ | 1574 | */ |
1575 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | 1575 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
1576 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | 1576 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
1577 | 1577 | ||
1578 | return; | 1578 | return; |
1579 | } | 1579 | } |
1580 | 1580 | ||
1581 | /* Clear filter and add the addresses in hash register | 1581 | /* Clear filter and add the addresses in hash register |
1582 | */ | 1582 | */ |
1583 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | 1583 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
1584 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | 1584 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
1585 | 1585 | ||
1586 | dmi = dev->mc_list; | 1586 | dmi = dev->mc_list; |
1587 | 1587 | ||
1588 | for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) { | 1588 | for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) { |
1589 | /* Only support group multicast for now */ | 1589 | /* Only support group multicast for now */ |
1590 | if (!(dmi->dmi_addr[0] & 1)) | 1590 | if (!(dmi->dmi_addr[0] & 1)) |
1591 | continue; | 1591 | continue; |
1592 | 1592 | ||
1593 | /* calculate crc32 value of mac address */ | 1593 | /* calculate crc32 value of mac address */ |
1594 | crc = 0xffffffff; | 1594 | crc = 0xffffffff; |
1595 | 1595 | ||
1596 | for (i = 0; i < dmi->dmi_addrlen; i++) { | 1596 | for (i = 0; i < dmi->dmi_addrlen; i++) { |
1597 | data = dmi->dmi_addr[i]; | 1597 | data = dmi->dmi_addr[i]; |
1598 | for (bit = 0; bit < 8; bit++, data >>= 1) { | 1598 | for (bit = 0; bit < 8; bit++, data >>= 1) { |
1599 | crc = (crc >> 1) ^ | 1599 | crc = (crc >> 1) ^ |
1600 | (((crc ^ data) & 1) ? CRC32_POLY : 0); | 1600 | (((crc ^ data) & 1) ? CRC32_POLY : 0); |
1601 | } | 1601 | } |
1602 | } | 1602 | } |
1603 | 1603 | ||
1604 | /* only upper 6 bits (HASH_BITS) are used | 1604 | /* only upper 6 bits (HASH_BITS) are used |
1605 | * which point to specific bit in he hash registers | 1605 | * which point to specific bit in he hash registers |
1606 | */ | 1606 | */ |
1607 | hash = (crc >> (32 - HASH_BITS)) & 0x3f; | 1607 | hash = (crc >> (32 - HASH_BITS)) & 0x3f; |
1608 | 1608 | ||
1609 | if (hash > 31) { | 1609 | if (hash > 31) { |
1610 | tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | 1610 | tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
1611 | tmp |= 1 << (hash - 32); | 1611 | tmp |= 1 << (hash - 32); |
1612 | writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | 1612 | writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
1613 | } else { | 1613 | } else { |
1614 | tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); | 1614 | tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
1615 | tmp |= 1 << hash; | 1615 | tmp |= 1 << hash; |
1616 | writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | 1616 | writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
1617 | } | 1617 | } |
1618 | } | 1618 | } |
1619 | } | 1619 | } |
1620 | 1620 | ||
1621 | /* Set a MAC change in hardware. */ | 1621 | /* Set a MAC change in hardware. */ |
1622 | static int | 1622 | static int |
1623 | fec_set_mac_address(struct net_device *dev, void *p) | 1623 | fec_set_mac_address(struct net_device *dev, void *p) |
1624 | { | 1624 | { |
1625 | struct fec_enet_private *fep = netdev_priv(dev); | 1625 | struct fec_enet_private *fep = netdev_priv(dev); |
1626 | struct sockaddr *addr = p; | 1626 | struct sockaddr *addr = p; |
1627 | 1627 | ||
1628 | if (!is_valid_ether_addr(addr->sa_data)) | 1628 | if (!is_valid_ether_addr(addr->sa_data)) |
1629 | return -EADDRNOTAVAIL; | 1629 | return -EADDRNOTAVAIL; |
1630 | 1630 | ||
1631 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 1631 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
1632 | 1632 | ||
1633 | writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) | | 1633 | writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) | |
1634 | (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24), | 1634 | (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24), |
1635 | fep->hwp + FEC_ADDR_LOW); | 1635 | fep->hwp + FEC_ADDR_LOW); |
1636 | writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24), | 1636 | writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24), |
1637 | fep + FEC_ADDR_HIGH); | 1637 | fep + FEC_ADDR_HIGH); |
1638 | return 0; | 1638 | return 0; |
1639 | } | 1639 | } |
1640 | 1640 | ||
1641 | static const struct net_device_ops fec_netdev_ops = { | 1641 | static const struct net_device_ops fec_netdev_ops = { |
1642 | .ndo_open = fec_enet_open, | 1642 | .ndo_open = fec_enet_open, |
1643 | .ndo_stop = fec_enet_close, | 1643 | .ndo_stop = fec_enet_close, |
1644 | .ndo_start_xmit = fec_enet_start_xmit, | 1644 | .ndo_start_xmit = fec_enet_start_xmit, |
1645 | .ndo_set_multicast_list = set_multicast_list, | 1645 | .ndo_set_multicast_list = set_multicast_list, |
1646 | .ndo_change_mtu = eth_change_mtu, | 1646 | .ndo_change_mtu = eth_change_mtu, |
1647 | .ndo_validate_addr = eth_validate_addr, | 1647 | .ndo_validate_addr = eth_validate_addr, |
1648 | .ndo_tx_timeout = fec_timeout, | 1648 | .ndo_tx_timeout = fec_timeout, |
1649 | .ndo_set_mac_address = fec_set_mac_address, | 1649 | .ndo_set_mac_address = fec_set_mac_address, |
1650 | }; | 1650 | }; |
1651 | 1651 | ||
1652 | /* | 1652 | /* |
1653 | * XXX: We need to clean up on failure exits here. | 1653 | * XXX: We need to clean up on failure exits here. |
1654 | * | 1654 | * |
1655 | * index is only used in legacy code | 1655 | * index is only used in legacy code |
1656 | */ | 1656 | */ |
1657 | int __init fec_enet_init(struct net_device *dev, int index) | 1657 | static int fec_enet_init(struct net_device *dev, int index) |
1658 | { | 1658 | { |
1659 | struct fec_enet_private *fep = netdev_priv(dev); | 1659 | struct fec_enet_private *fep = netdev_priv(dev); |
1660 | struct bufdesc *cbd_base; | 1660 | struct bufdesc *cbd_base; |
1661 | int i; | 1661 | int i; |
1662 | 1662 | ||
1663 | /* Allocate memory for buffer descriptors. */ | 1663 | /* Allocate memory for buffer descriptors. */ |
1664 | cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, | 1664 | cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, |
1665 | GFP_KERNEL); | 1665 | GFP_KERNEL); |
1666 | if (!cbd_base) { | 1666 | if (!cbd_base) { |
1667 | printk("FEC: allocate descriptor memory failed?\n"); | 1667 | printk("FEC: allocate descriptor memory failed?\n"); |
1668 | return -ENOMEM; | 1668 | return -ENOMEM; |
1669 | } | 1669 | } |
1670 | 1670 | ||
1671 | spin_lock_init(&fep->hw_lock); | 1671 | spin_lock_init(&fep->hw_lock); |
1672 | spin_lock_init(&fep->mii_lock); | 1672 | spin_lock_init(&fep->mii_lock); |
1673 | 1673 | ||
1674 | fep->index = index; | 1674 | fep->index = index; |
1675 | fep->hwp = (void __iomem *)dev->base_addr; | 1675 | fep->hwp = (void __iomem *)dev->base_addr; |
1676 | fep->netdev = dev; | 1676 | fep->netdev = dev; |
1677 | 1677 | ||
1678 | /* Set the Ethernet address */ | 1678 | /* Set the Ethernet address */ |
1679 | #ifdef CONFIG_M5272 | 1679 | #ifdef CONFIG_M5272 |
1680 | fec_get_mac(dev); | 1680 | fec_get_mac(dev); |
1681 | #else | 1681 | #else |
1682 | { | 1682 | { |
1683 | unsigned long l; | 1683 | unsigned long l; |
1684 | l = readl(fep->hwp + FEC_ADDR_LOW); | 1684 | l = readl(fep->hwp + FEC_ADDR_LOW); |
1685 | dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24); | 1685 | dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24); |
1686 | dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16); | 1686 | dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16); |
1687 | dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8); | 1687 | dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8); |
1688 | dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0); | 1688 | dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0); |
1689 | l = readl(fep->hwp + FEC_ADDR_HIGH); | 1689 | l = readl(fep->hwp + FEC_ADDR_HIGH); |
1690 | dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24); | 1690 | dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24); |
1691 | dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16); | 1691 | dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16); |
1692 | } | 1692 | } |
1693 | #endif | 1693 | #endif |
1694 | 1694 | ||
1695 | /* Set receive and transmit descriptor base. */ | 1695 | /* Set receive and transmit descriptor base. */ |
1696 | fep->rx_bd_base = cbd_base; | 1696 | fep->rx_bd_base = cbd_base; |
1697 | fep->tx_bd_base = cbd_base + RX_RING_SIZE; | 1697 | fep->tx_bd_base = cbd_base + RX_RING_SIZE; |
1698 | 1698 | ||
1699 | #ifdef HAVE_mii_link_interrupt | 1699 | #ifdef HAVE_mii_link_interrupt |
1700 | fec_request_mii_intr(dev); | 1700 | fec_request_mii_intr(dev); |
1701 | #endif | 1701 | #endif |
1702 | /* The FEC Ethernet specific entries in the device structure */ | 1702 | /* The FEC Ethernet specific entries in the device structure */ |
1703 | dev->watchdog_timeo = TX_TIMEOUT; | 1703 | dev->watchdog_timeo = TX_TIMEOUT; |
1704 | dev->netdev_ops = &fec_netdev_ops; | 1704 | dev->netdev_ops = &fec_netdev_ops; |
1705 | 1705 | ||
1706 | for (i=0; i<NMII-1; i++) | 1706 | for (i=0; i<NMII-1; i++) |
1707 | mii_cmds[i].mii_next = &mii_cmds[i+1]; | 1707 | mii_cmds[i].mii_next = &mii_cmds[i+1]; |
1708 | mii_free = mii_cmds; | 1708 | mii_free = mii_cmds; |
1709 | 1709 | ||
1710 | /* Set MII speed to 2.5 MHz */ | 1710 | /* Set MII speed to 2.5 MHz */ |
1711 | fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999) | 1711 | fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999) |
1712 | / 2500000) / 2) & 0x3F) << 1; | 1712 | / 2500000) / 2) & 0x3F) << 1; |
1713 | fec_restart(dev, 0); | 1713 | fec_restart(dev, 0); |
1714 | 1714 | ||
1715 | /* Queue up command to detect the PHY and initialize the | 1715 | /* Queue up command to detect the PHY and initialize the |
1716 | * remainder of the interface. | 1716 | * remainder of the interface. |
1717 | */ | 1717 | */ |
1718 | fep->phy_id_done = 0; | 1718 | fep->phy_id_done = 0; |
1719 | fep->phy_addr = 0; | 1719 | fep->phy_addr = 0; |
1720 | mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy); | 1720 | mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy); |
1721 | 1721 | ||
1722 | return 0; | 1722 | return 0; |
1723 | } | 1723 | } |
1724 | 1724 | ||
1725 | /* This function is called to start or restart the FEC during a link | 1725 | /* This function is called to start or restart the FEC during a link |
1726 | * change. This only happens when switching between half and full | 1726 | * change. This only happens when switching between half and full |
1727 | * duplex. | 1727 | * duplex. |
1728 | */ | 1728 | */ |
1729 | static void | 1729 | static void |
1730 | fec_restart(struct net_device *dev, int duplex) | 1730 | fec_restart(struct net_device *dev, int duplex) |
1731 | { | 1731 | { |
1732 | struct fec_enet_private *fep = netdev_priv(dev); | 1732 | struct fec_enet_private *fep = netdev_priv(dev); |
1733 | struct bufdesc *bdp; | 1733 | struct bufdesc *bdp; |
1734 | int i; | 1734 | int i; |
1735 | 1735 | ||
1736 | /* Whack a reset. We should wait for this. */ | 1736 | /* Whack a reset. We should wait for this. */ |
1737 | writel(1, fep->hwp + FEC_ECNTRL); | 1737 | writel(1, fep->hwp + FEC_ECNTRL); |
1738 | udelay(10); | 1738 | udelay(10); |
1739 | 1739 | ||
1740 | /* Clear any outstanding interrupt. */ | 1740 | /* Clear any outstanding interrupt. */ |
1741 | writel(0xffc00000, fep->hwp + FEC_IEVENT); | 1741 | writel(0xffc00000, fep->hwp + FEC_IEVENT); |
1742 | 1742 | ||
1743 | /* Reset all multicast. */ | 1743 | /* Reset all multicast. */ |
1744 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | 1744 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
1745 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | 1745 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
1746 | #ifndef CONFIG_M5272 | 1746 | #ifndef CONFIG_M5272 |
1747 | writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); | 1747 | writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); |
1748 | writel(0, fep->hwp + FEC_HASH_TABLE_LOW); | 1748 | writel(0, fep->hwp + FEC_HASH_TABLE_LOW); |
1749 | #endif | 1749 | #endif |
1750 | 1750 | ||
1751 | /* Set maximum receive buffer size. */ | 1751 | /* Set maximum receive buffer size. */ |
1752 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); | 1752 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); |
1753 | 1753 | ||
1754 | /* Set receive and transmit descriptor base. */ | 1754 | /* Set receive and transmit descriptor base. */ |
1755 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); | 1755 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); |
1756 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, | 1756 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, |
1757 | fep->hwp + FEC_X_DES_START); | 1757 | fep->hwp + FEC_X_DES_START); |
1758 | 1758 | ||
1759 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; | 1759 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; |
1760 | fep->cur_rx = fep->rx_bd_base; | 1760 | fep->cur_rx = fep->rx_bd_base; |
1761 | 1761 | ||
1762 | /* Reset SKB transmit buffers. */ | 1762 | /* Reset SKB transmit buffers. */ |
1763 | fep->skb_cur = fep->skb_dirty = 0; | 1763 | fep->skb_cur = fep->skb_dirty = 0; |
1764 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { | 1764 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { |
1765 | if (fep->tx_skbuff[i]) { | 1765 | if (fep->tx_skbuff[i]) { |
1766 | dev_kfree_skb_any(fep->tx_skbuff[i]); | 1766 | dev_kfree_skb_any(fep->tx_skbuff[i]); |
1767 | fep->tx_skbuff[i] = NULL; | 1767 | fep->tx_skbuff[i] = NULL; |
1768 | } | 1768 | } |
1769 | } | 1769 | } |
1770 | 1770 | ||
1771 | /* Initialize the receive buffer descriptors. */ | 1771 | /* Initialize the receive buffer descriptors. */ |
1772 | bdp = fep->rx_bd_base; | 1772 | bdp = fep->rx_bd_base; |
1773 | for (i = 0; i < RX_RING_SIZE; i++) { | 1773 | for (i = 0; i < RX_RING_SIZE; i++) { |
1774 | 1774 | ||
1775 | /* Initialize the BD for every fragment in the page. */ | 1775 | /* Initialize the BD for every fragment in the page. */ |
1776 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | 1776 | bdp->cbd_sc = BD_ENET_RX_EMPTY; |
1777 | bdp++; | 1777 | bdp++; |
1778 | } | 1778 | } |
1779 | 1779 | ||
1780 | /* Set the last buffer to wrap */ | 1780 | /* Set the last buffer to wrap */ |
1781 | bdp--; | 1781 | bdp--; |
1782 | bdp->cbd_sc |= BD_SC_WRAP; | 1782 | bdp->cbd_sc |= BD_SC_WRAP; |
1783 | 1783 | ||
1784 | /* ...and the same for transmit */ | 1784 | /* ...and the same for transmit */ |
1785 | bdp = fep->tx_bd_base; | 1785 | bdp = fep->tx_bd_base; |
1786 | for (i = 0; i < TX_RING_SIZE; i++) { | 1786 | for (i = 0; i < TX_RING_SIZE; i++) { |
1787 | 1787 | ||
1788 | /* Initialize the BD for every fragment in the page. */ | 1788 | /* Initialize the BD for every fragment in the page. */ |
1789 | bdp->cbd_sc = 0; | 1789 | bdp->cbd_sc = 0; |
1790 | bdp->cbd_bufaddr = 0; | 1790 | bdp->cbd_bufaddr = 0; |
1791 | bdp++; | 1791 | bdp++; |
1792 | } | 1792 | } |
1793 | 1793 | ||
1794 | /* Set the last buffer to wrap */ | 1794 | /* Set the last buffer to wrap */ |
1795 | bdp--; | 1795 | bdp--; |
1796 | bdp->cbd_sc |= BD_SC_WRAP; | 1796 | bdp->cbd_sc |= BD_SC_WRAP; |
1797 | 1797 | ||
1798 | /* Enable MII mode */ | 1798 | /* Enable MII mode */ |
1799 | if (duplex) { | 1799 | if (duplex) { |
1800 | /* MII enable / FD enable */ | 1800 | /* MII enable / FD enable */ |
1801 | writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL); | 1801 | writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL); |
1802 | writel(0x04, fep->hwp + FEC_X_CNTRL); | 1802 | writel(0x04, fep->hwp + FEC_X_CNTRL); |
1803 | } else { | 1803 | } else { |
1804 | /* MII enable / No Rcv on Xmit */ | 1804 | /* MII enable / No Rcv on Xmit */ |
1805 | writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL); | 1805 | writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL); |
1806 | writel(0x0, fep->hwp + FEC_X_CNTRL); | 1806 | writel(0x0, fep->hwp + FEC_X_CNTRL); |
1807 | } | 1807 | } |
1808 | fep->full_duplex = duplex; | 1808 | fep->full_duplex = duplex; |
1809 | 1809 | ||
1810 | /* Set MII speed */ | 1810 | /* Set MII speed */ |
1811 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | 1811 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
1812 | 1812 | ||
1813 | /* And last, enable the transmit and receive processing */ | 1813 | /* And last, enable the transmit and receive processing */ |
1814 | writel(2, fep->hwp + FEC_ECNTRL); | 1814 | writel(2, fep->hwp + FEC_ECNTRL); |
1815 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); | 1815 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); |
1816 | 1816 | ||
1817 | /* Enable interrupts we wish to service */ | 1817 | /* Enable interrupts we wish to service */ |
1818 | writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII, | 1818 | writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII, |
1819 | fep->hwp + FEC_IMASK); | 1819 | fep->hwp + FEC_IMASK); |
1820 | } | 1820 | } |
1821 | 1821 | ||
1822 | static void | 1822 | static void |
1823 | fec_stop(struct net_device *dev) | 1823 | fec_stop(struct net_device *dev) |
1824 | { | 1824 | { |
1825 | struct fec_enet_private *fep = netdev_priv(dev); | 1825 | struct fec_enet_private *fep = netdev_priv(dev); |
1826 | 1826 | ||
1827 | /* We cannot expect a graceful transmit stop without link !!! */ | 1827 | /* We cannot expect a graceful transmit stop without link !!! */ |
1828 | if (fep->link) { | 1828 | if (fep->link) { |
1829 | writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ | 1829 | writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ |
1830 | udelay(10); | 1830 | udelay(10); |
1831 | if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) | 1831 | if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) |
1832 | printk("fec_stop : Graceful transmit stop did not complete !\n"); | 1832 | printk("fec_stop : Graceful transmit stop did not complete !\n"); |
1833 | } | 1833 | } |
1834 | 1834 | ||
1835 | /* Whack a reset. We should wait for this. */ | 1835 | /* Whack a reset. We should wait for this. */ |
1836 | writel(1, fep->hwp + FEC_ECNTRL); | 1836 | writel(1, fep->hwp + FEC_ECNTRL); |
1837 | udelay(10); | 1837 | udelay(10); |
1838 | 1838 | ||
1839 | /* Clear outstanding MII command interrupts. */ | 1839 | /* Clear outstanding MII command interrupts. */ |
1840 | writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); | 1840 | writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); |
1841 | 1841 | ||
1842 | writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); | 1842 | writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); |
1843 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | 1843 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
1844 | } | 1844 | } |
1845 | 1845 | ||
1846 | static int __devinit | 1846 | static int __devinit |
1847 | fec_probe(struct platform_device *pdev) | 1847 | fec_probe(struct platform_device *pdev) |
1848 | { | 1848 | { |
1849 | struct fec_enet_private *fep; | 1849 | struct fec_enet_private *fep; |
1850 | struct net_device *ndev; | 1850 | struct net_device *ndev; |
1851 | int i, irq, ret = 0; | 1851 | int i, irq, ret = 0; |
1852 | struct resource *r; | 1852 | struct resource *r; |
1853 | 1853 | ||
1854 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1854 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1855 | if (!r) | 1855 | if (!r) |
1856 | return -ENXIO; | 1856 | return -ENXIO; |
1857 | 1857 | ||
1858 | r = request_mem_region(r->start, resource_size(r), pdev->name); | 1858 | r = request_mem_region(r->start, resource_size(r), pdev->name); |
1859 | if (!r) | 1859 | if (!r) |
1860 | return -EBUSY; | 1860 | return -EBUSY; |
1861 | 1861 | ||
1862 | /* Init network device */ | 1862 | /* Init network device */ |
1863 | ndev = alloc_etherdev(sizeof(struct fec_enet_private)); | 1863 | ndev = alloc_etherdev(sizeof(struct fec_enet_private)); |
1864 | if (!ndev) | 1864 | if (!ndev) |
1865 | return -ENOMEM; | 1865 | return -ENOMEM; |
1866 | 1866 | ||
1867 | SET_NETDEV_DEV(ndev, &pdev->dev); | 1867 | SET_NETDEV_DEV(ndev, &pdev->dev); |
1868 | 1868 | ||
1869 | /* setup board info structure */ | 1869 | /* setup board info structure */ |
1870 | fep = netdev_priv(ndev); | 1870 | fep = netdev_priv(ndev); |
1871 | memset(fep, 0, sizeof(*fep)); | 1871 | memset(fep, 0, sizeof(*fep)); |
1872 | 1872 | ||
1873 | ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); | 1873 | ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); |
1874 | 1874 | ||
1875 | if (!ndev->base_addr) { | 1875 | if (!ndev->base_addr) { |
1876 | ret = -ENOMEM; | 1876 | ret = -ENOMEM; |
1877 | goto failed_ioremap; | 1877 | goto failed_ioremap; |
1878 | } | 1878 | } |
1879 | 1879 | ||
1880 | platform_set_drvdata(pdev, ndev); | 1880 | platform_set_drvdata(pdev, ndev); |
1881 | 1881 | ||
1882 | /* This device has up to three irqs on some platforms */ | 1882 | /* This device has up to three irqs on some platforms */ |
1883 | for (i = 0; i < 3; i++) { | 1883 | for (i = 0; i < 3; i++) { |
1884 | irq = platform_get_irq(pdev, i); | 1884 | irq = platform_get_irq(pdev, i); |
1885 | if (i && irq < 0) | 1885 | if (i && irq < 0) |
1886 | break; | 1886 | break; |
1887 | ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); | 1887 | ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); |
1888 | if (ret) { | 1888 | if (ret) { |
1889 | while (i >= 0) { | 1889 | while (i >= 0) { |
1890 | irq = platform_get_irq(pdev, i); | 1890 | irq = platform_get_irq(pdev, i); |
1891 | free_irq(irq, ndev); | 1891 | free_irq(irq, ndev); |
1892 | i--; | 1892 | i--; |
1893 | } | 1893 | } |
1894 | goto failed_irq; | 1894 | goto failed_irq; |
1895 | } | 1895 | } |
1896 | } | 1896 | } |
1897 | 1897 | ||
1898 | fep->clk = clk_get(&pdev->dev, "fec_clk"); | 1898 | fep->clk = clk_get(&pdev->dev, "fec_clk"); |
1899 | if (IS_ERR(fep->clk)) { | 1899 | if (IS_ERR(fep->clk)) { |
1900 | ret = PTR_ERR(fep->clk); | 1900 | ret = PTR_ERR(fep->clk); |
1901 | goto failed_clk; | 1901 | goto failed_clk; |
1902 | } | 1902 | } |
1903 | clk_enable(fep->clk); | 1903 | clk_enable(fep->clk); |
1904 | 1904 | ||
1905 | ret = fec_enet_init(ndev, 0); | 1905 | ret = fec_enet_init(ndev, 0); |
1906 | if (ret) | 1906 | if (ret) |
1907 | goto failed_init; | 1907 | goto failed_init; |
1908 | 1908 | ||
1909 | ret = register_netdev(ndev); | 1909 | ret = register_netdev(ndev); |
1910 | if (ret) | 1910 | if (ret) |
1911 | goto failed_register; | 1911 | goto failed_register; |
1912 | 1912 | ||
1913 | return 0; | 1913 | return 0; |
1914 | 1914 | ||
1915 | failed_register: | 1915 | failed_register: |
1916 | failed_init: | 1916 | failed_init: |
1917 | clk_disable(fep->clk); | 1917 | clk_disable(fep->clk); |
1918 | clk_put(fep->clk); | 1918 | clk_put(fep->clk); |
1919 | failed_clk: | 1919 | failed_clk: |
1920 | for (i = 0; i < 3; i++) { | 1920 | for (i = 0; i < 3; i++) { |
1921 | irq = platform_get_irq(pdev, i); | 1921 | irq = platform_get_irq(pdev, i); |
1922 | if (irq > 0) | 1922 | if (irq > 0) |
1923 | free_irq(irq, ndev); | 1923 | free_irq(irq, ndev); |
1924 | } | 1924 | } |
1925 | failed_irq: | 1925 | failed_irq: |
1926 | iounmap((void __iomem *)ndev->base_addr); | 1926 | iounmap((void __iomem *)ndev->base_addr); |
1927 | failed_ioremap: | 1927 | failed_ioremap: |
1928 | free_netdev(ndev); | 1928 | free_netdev(ndev); |
1929 | 1929 | ||
1930 | return ret; | 1930 | return ret; |
1931 | } | 1931 | } |
1932 | 1932 | ||
1933 | static int __devexit | 1933 | static int __devexit |
1934 | fec_drv_remove(struct platform_device *pdev) | 1934 | fec_drv_remove(struct platform_device *pdev) |
1935 | { | 1935 | { |
1936 | struct net_device *ndev = platform_get_drvdata(pdev); | 1936 | struct net_device *ndev = platform_get_drvdata(pdev); |
1937 | struct fec_enet_private *fep = netdev_priv(ndev); | 1937 | struct fec_enet_private *fep = netdev_priv(ndev); |
1938 | 1938 | ||
1939 | platform_set_drvdata(pdev, NULL); | 1939 | platform_set_drvdata(pdev, NULL); |
1940 | 1940 | ||
1941 | fec_stop(ndev); | 1941 | fec_stop(ndev); |
1942 | clk_disable(fep->clk); | 1942 | clk_disable(fep->clk); |
1943 | clk_put(fep->clk); | 1943 | clk_put(fep->clk); |
1944 | iounmap((void __iomem *)ndev->base_addr); | 1944 | iounmap((void __iomem *)ndev->base_addr); |
1945 | unregister_netdev(ndev); | 1945 | unregister_netdev(ndev); |
1946 | free_netdev(ndev); | 1946 | free_netdev(ndev); |
1947 | return 0; | 1947 | return 0; |
1948 | } | 1948 | } |
1949 | 1949 | ||
1950 | static int | 1950 | static int |
1951 | fec_suspend(struct platform_device *dev, pm_message_t state) | 1951 | fec_suspend(struct platform_device *dev, pm_message_t state) |
1952 | { | 1952 | { |
1953 | struct net_device *ndev = platform_get_drvdata(dev); | 1953 | struct net_device *ndev = platform_get_drvdata(dev); |
1954 | struct fec_enet_private *fep; | 1954 | struct fec_enet_private *fep; |
1955 | 1955 | ||
1956 | if (ndev) { | 1956 | if (ndev) { |
1957 | fep = netdev_priv(ndev); | 1957 | fep = netdev_priv(ndev); |
1958 | if (netif_running(ndev)) { | 1958 | if (netif_running(ndev)) { |
1959 | netif_device_detach(ndev); | 1959 | netif_device_detach(ndev); |
1960 | fec_stop(ndev); | 1960 | fec_stop(ndev); |
1961 | } | 1961 | } |
1962 | } | 1962 | } |
1963 | return 0; | 1963 | return 0; |
1964 | } | 1964 | } |
1965 | 1965 | ||
1966 | static int | 1966 | static int |
1967 | fec_resume(struct platform_device *dev) | 1967 | fec_resume(struct platform_device *dev) |
1968 | { | 1968 | { |
1969 | struct net_device *ndev = platform_get_drvdata(dev); | 1969 | struct net_device *ndev = platform_get_drvdata(dev); |
1970 | 1970 | ||
1971 | if (ndev) { | 1971 | if (ndev) { |
1972 | if (netif_running(ndev)) { | 1972 | if (netif_running(ndev)) { |
1973 | fec_enet_init(ndev, 0); | 1973 | fec_enet_init(ndev, 0); |
1974 | netif_device_attach(ndev); | 1974 | netif_device_attach(ndev); |
1975 | } | 1975 | } |
1976 | } | 1976 | } |
1977 | return 0; | 1977 | return 0; |
1978 | } | 1978 | } |
1979 | 1979 | ||
1980 | static struct platform_driver fec_driver = { | 1980 | static struct platform_driver fec_driver = { |
1981 | .driver = { | 1981 | .driver = { |
1982 | .name = "fec", | 1982 | .name = "fec", |
1983 | .owner = THIS_MODULE, | 1983 | .owner = THIS_MODULE, |
1984 | }, | 1984 | }, |
1985 | .probe = fec_probe, | 1985 | .probe = fec_probe, |
1986 | .remove = __devexit_p(fec_drv_remove), | 1986 | .remove = __devexit_p(fec_drv_remove), |
1987 | .suspend = fec_suspend, | 1987 | .suspend = fec_suspend, |
1988 | .resume = fec_resume, | 1988 | .resume = fec_resume, |
1989 | }; | 1989 | }; |
1990 | 1990 | ||
1991 | static int __init | 1991 | static int __init |
1992 | fec_enet_module_init(void) | 1992 | fec_enet_module_init(void) |
1993 | { | 1993 | { |
1994 | printk(KERN_INFO "FEC Ethernet Driver\n"); | 1994 | printk(KERN_INFO "FEC Ethernet Driver\n"); |
1995 | 1995 | ||
1996 | return platform_driver_register(&fec_driver); | 1996 | return platform_driver_register(&fec_driver); |
1997 | } | 1997 | } |
1998 | 1998 | ||
1999 | static void __exit | 1999 | static void __exit |
2000 | fec_enet_cleanup(void) | 2000 | fec_enet_cleanup(void) |
2001 | { | 2001 | { |
2002 | platform_driver_unregister(&fec_driver); | 2002 | platform_driver_unregister(&fec_driver); |
2003 | } | 2003 | } |
2004 | 2004 | ||
2005 | module_exit(fec_enet_cleanup); | 2005 | module_exit(fec_enet_cleanup); |
2006 | module_init(fec_enet_module_init); | 2006 | module_init(fec_enet_module_init); |
2007 | 2007 | ||
2008 | MODULE_LICENSE("GPL"); | 2008 | MODULE_LICENSE("GPL"); |
2009 | 2009 |