Commit 976de6a8c304dcc43e38efcb8a0bace7866b6242

Authored by Scott Wood
Committed by David S. Miller
1 parent 0d0d9c150c

fs_enet: Be an of_platform device when CONFIG_PPC_CPM_NEW_BINDING is set.

The existing OF glue code was crufty and broken.  Rather than fix it, it
will be removed, and the ethernet driver now talks to the device tree
directly.

The old, non-CONFIG_PPC_CPM_NEW_BINDING code can go away once CPM
platforms are dropped from arch/ppc (which will hopefully be soon), and
existing arch/powerpc boards that I wasn't able to test on for this
patchset get converted (which should be even sooner).

Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>

Showing 9 changed files with 714 additions and 178 deletions Inline Diff

drivers/net/fs_enet/Kconfig
1 config FS_ENET 1 config FS_ENET
2 tristate "Freescale Ethernet Driver" 2 tristate "Freescale Ethernet Driver"
3 depends on CPM1 || CPM2 3 depends on CPM1 || CPM2
4 select MII 4 select MII
5 5
6 config FS_ENET_HAS_SCC 6 config FS_ENET_HAS_SCC
7 bool "Chip has an SCC usable for ethernet" 7 bool "Chip has an SCC usable for ethernet"
8 depends on FS_ENET && (CPM1 || CPM2) 8 depends on FS_ENET && (CPM1 || CPM2)
9 default y 9 default y
10 10
11 config FS_ENET_HAS_FCC 11 config FS_ENET_HAS_FCC
12 bool "Chip has an FCC usable for ethernet" 12 bool "Chip has an FCC usable for ethernet"
13 depends on FS_ENET && CPM2 13 depends on FS_ENET && CPM2
14 select MDIO_BITBANG
14 default y 15 default y
15 16
16 config FS_ENET_HAS_FEC 17 config FS_ENET_HAS_FEC
17 bool "Chip has an FEC usable for ethernet" 18 bool "Chip has an FEC usable for ethernet"
18 depends on FS_ENET && CPM1 19 depends on FS_ENET && CPM1
19 default y 20 default y
20 21
21 22
drivers/net/fs_enet/fs_enet-main.c
1 /* 1 /*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. 2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 * 3 *
4 * Copyright (c) 2003 Intracom S.A. 4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr> 5 * by Pantelis Antoniou <panto@intracom.gr>
6 * 6 *
7 * 2005 (c) MontaVista Software, Inc. 7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com> 8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 * 9 *
10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com> 10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se> 11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
12 * 12 *
13 * This file is licensed under the terms of the GNU General Public License 13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any 14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied. 15 * kind, whether express or implied.
16 */ 16 */
17 17
18 #include <linux/module.h> 18 #include <linux/module.h>
19 #include <linux/kernel.h> 19 #include <linux/kernel.h>
20 #include <linux/types.h> 20 #include <linux/types.h>
21 #include <linux/string.h> 21 #include <linux/string.h>
22 #include <linux/ptrace.h> 22 #include <linux/ptrace.h>
23 #include <linux/errno.h> 23 #include <linux/errno.h>
24 #include <linux/ioport.h> 24 #include <linux/ioport.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 #include <linux/interrupt.h> 26 #include <linux/interrupt.h>
27 #include <linux/init.h> 27 #include <linux/init.h>
28 #include <linux/delay.h> 28 #include <linux/delay.h>
29 #include <linux/netdevice.h> 29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h> 30 #include <linux/etherdevice.h>
31 #include <linux/skbuff.h> 31 #include <linux/skbuff.h>
32 #include <linux/spinlock.h> 32 #include <linux/spinlock.h>
33 #include <linux/mii.h> 33 #include <linux/mii.h>
34 #include <linux/ethtool.h> 34 #include <linux/ethtool.h>
35 #include <linux/bitops.h> 35 #include <linux/bitops.h>
36 #include <linux/fs.h> 36 #include <linux/fs.h>
37 #include <linux/platform_device.h> 37 #include <linux/platform_device.h>
38 #include <linux/phy.h> 38 #include <linux/phy.h>
39 39
40 #include <linux/vmalloc.h> 40 #include <linux/vmalloc.h>
41 #include <asm/pgtable.h> 41 #include <asm/pgtable.h>
42 #include <asm/irq.h> 42 #include <asm/irq.h>
43 #include <asm/uaccess.h> 43 #include <asm/uaccess.h>
44 44
45 #ifdef CONFIG_PPC_CPM_NEW_BINDING
46 #include <asm/of_platform.h>
47 #endif
48
45 #include "fs_enet.h" 49 #include "fs_enet.h"
46 50
47 /*************************************************/ 51 /*************************************************/
48 52
53 #ifndef CONFIG_PPC_CPM_NEW_BINDING
49 static char version[] __devinitdata = 54 static char version[] __devinitdata =
50 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n"; 55 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
56 #endif
51 57
52 MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>"); 58 MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
53 MODULE_DESCRIPTION("Freescale Ethernet Driver"); 59 MODULE_DESCRIPTION("Freescale Ethernet Driver");
54 MODULE_LICENSE("GPL"); 60 MODULE_LICENSE("GPL");
55 MODULE_VERSION(DRV_MODULE_VERSION); 61 MODULE_VERSION(DRV_MODULE_VERSION);
56 62
57 int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */ 63 int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
58 module_param(fs_enet_debug, int, 0); 64 module_param(fs_enet_debug, int, 0);
59 MODULE_PARM_DESC(fs_enet_debug, 65 MODULE_PARM_DESC(fs_enet_debug,
60 "Freescale bitmapped debugging message enable value"); 66 "Freescale bitmapped debugging message enable value");
61 67
62 #ifdef CONFIG_NET_POLL_CONTROLLER 68 #ifdef CONFIG_NET_POLL_CONTROLLER
63 static void fs_enet_netpoll(struct net_device *dev); 69 static void fs_enet_netpoll(struct net_device *dev);
64 #endif 70 #endif
65 71
66 static void fs_set_multicast_list(struct net_device *dev) 72 static void fs_set_multicast_list(struct net_device *dev)
67 { 73 {
68 struct fs_enet_private *fep = netdev_priv(dev); 74 struct fs_enet_private *fep = netdev_priv(dev);
69 75
70 (*fep->ops->set_multicast_list)(dev); 76 (*fep->ops->set_multicast_list)(dev);
71 } 77 }
72 78
73 static void skb_align(struct sk_buff *skb, int align) 79 static void skb_align(struct sk_buff *skb, int align)
74 { 80 {
75 int off = ((unsigned long)skb->data) & (align - 1); 81 int off = ((unsigned long)skb->data) & (align - 1);
76 82
77 if (off) 83 if (off)
78 skb_reserve(skb, align - off); 84 skb_reserve(skb, align - off);
79 } 85 }
80 86
81 /* NAPI receive function */ 87 /* NAPI receive function */
82 static int fs_enet_rx_napi(struct napi_struct *napi, int budget) 88 static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
83 { 89 {
84 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi); 90 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
85 struct net_device *dev = to_net_dev(fep->dev); 91 struct net_device *dev = to_net_dev(fep->dev);
86 const struct fs_platform_info *fpi = fep->fpi; 92 const struct fs_platform_info *fpi = fep->fpi;
87 cbd_t *bdp; 93 cbd_t *bdp;
88 struct sk_buff *skb, *skbn, *skbt; 94 struct sk_buff *skb, *skbn, *skbt;
89 int received = 0; 95 int received = 0;
90 u16 pkt_len, sc; 96 u16 pkt_len, sc;
91 int curidx; 97 int curidx;
92 98
93 if (!netif_running(dev)) 99 if (!netif_running(dev))
94 return 0; 100 return 0;
95 101
96 /* 102 /*
97 * First, grab all of the stats for the incoming packet. 103 * First, grab all of the stats for the incoming packet.
98 * These get messed up if we get called due to a busy condition. 104 * These get messed up if we get called due to a busy condition.
99 */ 105 */
100 bdp = fep->cur_rx; 106 bdp = fep->cur_rx;
101 107
102 /* clear RX status bits for napi*/ 108 /* clear RX status bits for napi*/
103 (*fep->ops->napi_clear_rx_event)(dev); 109 (*fep->ops->napi_clear_rx_event)(dev);
104 110
105 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { 111 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
106 curidx = bdp - fep->rx_bd_base; 112 curidx = bdp - fep->rx_bd_base;
107 113
108 /* 114 /*
109 * Since we have allocated space to hold a complete frame, 115 * Since we have allocated space to hold a complete frame,
110 * the last indicator should be set. 116 * the last indicator should be set.
111 */ 117 */
112 if ((sc & BD_ENET_RX_LAST) == 0) 118 if ((sc & BD_ENET_RX_LAST) == 0)
113 printk(KERN_WARNING DRV_MODULE_NAME 119 printk(KERN_WARNING DRV_MODULE_NAME
114 ": %s rcv is not +last\n", 120 ": %s rcv is not +last\n",
115 dev->name); 121 dev->name);
116 122
117 /* 123 /*
118 * Check for errors. 124 * Check for errors.
119 */ 125 */
120 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | 126 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
121 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { 127 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
122 fep->stats.rx_errors++; 128 fep->stats.rx_errors++;
123 /* Frame too long or too short. */ 129 /* Frame too long or too short. */
124 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) 130 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
125 fep->stats.rx_length_errors++; 131 fep->stats.rx_length_errors++;
126 /* Frame alignment */ 132 /* Frame alignment */
127 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) 133 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
128 fep->stats.rx_frame_errors++; 134 fep->stats.rx_frame_errors++;
129 /* CRC Error */ 135 /* CRC Error */
130 if (sc & BD_ENET_RX_CR) 136 if (sc & BD_ENET_RX_CR)
131 fep->stats.rx_crc_errors++; 137 fep->stats.rx_crc_errors++;
132 /* FIFO overrun */ 138 /* FIFO overrun */
133 if (sc & BD_ENET_RX_OV) 139 if (sc & BD_ENET_RX_OV)
134 fep->stats.rx_crc_errors++; 140 fep->stats.rx_crc_errors++;
135 141
136 skb = fep->rx_skbuff[curidx]; 142 skb = fep->rx_skbuff[curidx];
137 143
138 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 144 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
139 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 145 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
140 DMA_FROM_DEVICE); 146 DMA_FROM_DEVICE);
141 147
142 skbn = skb; 148 skbn = skb;
143 149
144 } else { 150 } else {
145 skb = fep->rx_skbuff[curidx]; 151 skb = fep->rx_skbuff[curidx];
146 152
147 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 153 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
148 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 154 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
149 DMA_FROM_DEVICE); 155 DMA_FROM_DEVICE);
150 156
151 /* 157 /*
152 * Process the incoming frame. 158 * Process the incoming frame.
153 */ 159 */
154 fep->stats.rx_packets++; 160 fep->stats.rx_packets++;
155 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ 161 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
156 fep->stats.rx_bytes += pkt_len + 4; 162 fep->stats.rx_bytes += pkt_len + 4;
157 163
158 if (pkt_len <= fpi->rx_copybreak) { 164 if (pkt_len <= fpi->rx_copybreak) {
159 /* +2 to make IP header L1 cache aligned */ 165 /* +2 to make IP header L1 cache aligned */
160 skbn = dev_alloc_skb(pkt_len + 2); 166 skbn = dev_alloc_skb(pkt_len + 2);
161 if (skbn != NULL) { 167 if (skbn != NULL) {
162 skb_reserve(skbn, 2); /* align IP header */ 168 skb_reserve(skbn, 2); /* align IP header */
163 skb_copy_from_linear_data(skb, 169 skb_copy_from_linear_data(skb,
164 skbn->data, pkt_len); 170 skbn->data, pkt_len);
165 /* swap */ 171 /* swap */
166 skbt = skb; 172 skbt = skb;
167 skb = skbn; 173 skb = skbn;
168 skbn = skbt; 174 skbn = skbt;
169 } 175 }
170 } else { 176 } else {
171 skbn = dev_alloc_skb(ENET_RX_FRSIZE); 177 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
172 178
173 if (skbn) 179 if (skbn)
174 skb_align(skbn, ENET_RX_ALIGN); 180 skb_align(skbn, ENET_RX_ALIGN);
175 } 181 }
176 182
177 if (skbn != NULL) { 183 if (skbn != NULL) {
178 skb_put(skb, pkt_len); /* Make room */ 184 skb_put(skb, pkt_len); /* Make room */
179 skb->protocol = eth_type_trans(skb, dev); 185 skb->protocol = eth_type_trans(skb, dev);
180 received++; 186 received++;
181 netif_receive_skb(skb); 187 netif_receive_skb(skb);
182 } else { 188 } else {
183 printk(KERN_WARNING DRV_MODULE_NAME 189 printk(KERN_WARNING DRV_MODULE_NAME
184 ": %s Memory squeeze, dropping packet.\n", 190 ": %s Memory squeeze, dropping packet.\n",
185 dev->name); 191 dev->name);
186 fep->stats.rx_dropped++; 192 fep->stats.rx_dropped++;
187 skbn = skb; 193 skbn = skb;
188 } 194 }
189 } 195 }
190 196
191 fep->rx_skbuff[curidx] = skbn; 197 fep->rx_skbuff[curidx] = skbn;
192 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, 198 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
193 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 199 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
194 DMA_FROM_DEVICE)); 200 DMA_FROM_DEVICE));
195 CBDW_DATLEN(bdp, 0); 201 CBDW_DATLEN(bdp, 0);
196 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); 202 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
197 203
198 /* 204 /*
199 * Update BD pointer to next entry. 205 * Update BD pointer to next entry.
200 */ 206 */
201 if ((sc & BD_ENET_RX_WRAP) == 0) 207 if ((sc & BD_ENET_RX_WRAP) == 0)
202 bdp++; 208 bdp++;
203 else 209 else
204 bdp = fep->rx_bd_base; 210 bdp = fep->rx_bd_base;
205 211
206 (*fep->ops->rx_bd_done)(dev); 212 (*fep->ops->rx_bd_done)(dev);
207 213
208 if (received >= budget) 214 if (received >= budget)
209 break; 215 break;
210 } 216 }
211 217
212 fep->cur_rx = bdp; 218 fep->cur_rx = bdp;
213 219
214 if (received >= budget) { 220 if (received >= budget) {
215 /* done */ 221 /* done */
216 netif_rx_complete(dev, napi); 222 netif_rx_complete(dev, napi);
217 (*fep->ops->napi_enable_rx)(dev); 223 (*fep->ops->napi_enable_rx)(dev);
218 } 224 }
219 return received; 225 return received;
220 } 226 }
221 227
222 /* non NAPI receive function */ 228 /* non NAPI receive function */
223 static int fs_enet_rx_non_napi(struct net_device *dev) 229 static int fs_enet_rx_non_napi(struct net_device *dev)
224 { 230 {
225 struct fs_enet_private *fep = netdev_priv(dev); 231 struct fs_enet_private *fep = netdev_priv(dev);
226 const struct fs_platform_info *fpi = fep->fpi; 232 const struct fs_platform_info *fpi = fep->fpi;
227 cbd_t *bdp; 233 cbd_t *bdp;
228 struct sk_buff *skb, *skbn, *skbt; 234 struct sk_buff *skb, *skbn, *skbt;
229 int received = 0; 235 int received = 0;
230 u16 pkt_len, sc; 236 u16 pkt_len, sc;
231 int curidx; 237 int curidx;
232 /* 238 /*
233 * First, grab all of the stats for the incoming packet. 239 * First, grab all of the stats for the incoming packet.
234 * These get messed up if we get called due to a busy condition. 240 * These get messed up if we get called due to a busy condition.
235 */ 241 */
236 bdp = fep->cur_rx; 242 bdp = fep->cur_rx;
237 243
238 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { 244 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
239 245
240 curidx = bdp - fep->rx_bd_base; 246 curidx = bdp - fep->rx_bd_base;
241 247
242 /* 248 /*
243 * Since we have allocated space to hold a complete frame, 249 * Since we have allocated space to hold a complete frame,
244 * the last indicator should be set. 250 * the last indicator should be set.
245 */ 251 */
246 if ((sc & BD_ENET_RX_LAST) == 0) 252 if ((sc & BD_ENET_RX_LAST) == 0)
247 printk(KERN_WARNING DRV_MODULE_NAME 253 printk(KERN_WARNING DRV_MODULE_NAME
248 ": %s rcv is not +last\n", 254 ": %s rcv is not +last\n",
249 dev->name); 255 dev->name);
250 256
251 /* 257 /*
252 * Check for errors. 258 * Check for errors.
253 */ 259 */
254 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | 260 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
255 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { 261 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
256 fep->stats.rx_errors++; 262 fep->stats.rx_errors++;
257 /* Frame too long or too short. */ 263 /* Frame too long or too short. */
258 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) 264 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
259 fep->stats.rx_length_errors++; 265 fep->stats.rx_length_errors++;
260 /* Frame alignment */ 266 /* Frame alignment */
261 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) 267 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
262 fep->stats.rx_frame_errors++; 268 fep->stats.rx_frame_errors++;
263 /* CRC Error */ 269 /* CRC Error */
264 if (sc & BD_ENET_RX_CR) 270 if (sc & BD_ENET_RX_CR)
265 fep->stats.rx_crc_errors++; 271 fep->stats.rx_crc_errors++;
266 /* FIFO overrun */ 272 /* FIFO overrun */
267 if (sc & BD_ENET_RX_OV) 273 if (sc & BD_ENET_RX_OV)
268 fep->stats.rx_crc_errors++; 274 fep->stats.rx_crc_errors++;
269 275
270 skb = fep->rx_skbuff[curidx]; 276 skb = fep->rx_skbuff[curidx];
271 277
272 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 278 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
273 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 279 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
274 DMA_FROM_DEVICE); 280 DMA_FROM_DEVICE);
275 281
276 skbn = skb; 282 skbn = skb;
277 283
278 } else { 284 } else {
279 285
280 skb = fep->rx_skbuff[curidx]; 286 skb = fep->rx_skbuff[curidx];
281 287
282 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 288 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
283 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 289 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
284 DMA_FROM_DEVICE); 290 DMA_FROM_DEVICE);
285 291
286 /* 292 /*
287 * Process the incoming frame. 293 * Process the incoming frame.
288 */ 294 */
289 fep->stats.rx_packets++; 295 fep->stats.rx_packets++;
290 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ 296 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
291 fep->stats.rx_bytes += pkt_len + 4; 297 fep->stats.rx_bytes += pkt_len + 4;
292 298
293 if (pkt_len <= fpi->rx_copybreak) { 299 if (pkt_len <= fpi->rx_copybreak) {
294 /* +2 to make IP header L1 cache aligned */ 300 /* +2 to make IP header L1 cache aligned */
295 skbn = dev_alloc_skb(pkt_len + 2); 301 skbn = dev_alloc_skb(pkt_len + 2);
296 if (skbn != NULL) { 302 if (skbn != NULL) {
297 skb_reserve(skbn, 2); /* align IP header */ 303 skb_reserve(skbn, 2); /* align IP header */
298 skb_copy_from_linear_data(skb, 304 skb_copy_from_linear_data(skb,
299 skbn->data, pkt_len); 305 skbn->data, pkt_len);
300 /* swap */ 306 /* swap */
301 skbt = skb; 307 skbt = skb;
302 skb = skbn; 308 skb = skbn;
303 skbn = skbt; 309 skbn = skbt;
304 } 310 }
305 } else { 311 } else {
306 skbn = dev_alloc_skb(ENET_RX_FRSIZE); 312 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
307 313
308 if (skbn) 314 if (skbn)
309 skb_align(skbn, ENET_RX_ALIGN); 315 skb_align(skbn, ENET_RX_ALIGN);
310 } 316 }
311 317
312 if (skbn != NULL) { 318 if (skbn != NULL) {
313 skb_put(skb, pkt_len); /* Make room */ 319 skb_put(skb, pkt_len); /* Make room */
314 skb->protocol = eth_type_trans(skb, dev); 320 skb->protocol = eth_type_trans(skb, dev);
315 received++; 321 received++;
316 netif_rx(skb); 322 netif_rx(skb);
317 } else { 323 } else {
318 printk(KERN_WARNING DRV_MODULE_NAME 324 printk(KERN_WARNING DRV_MODULE_NAME
319 ": %s Memory squeeze, dropping packet.\n", 325 ": %s Memory squeeze, dropping packet.\n",
320 dev->name); 326 dev->name);
321 fep->stats.rx_dropped++; 327 fep->stats.rx_dropped++;
322 skbn = skb; 328 skbn = skb;
323 } 329 }
324 } 330 }
325 331
326 fep->rx_skbuff[curidx] = skbn; 332 fep->rx_skbuff[curidx] = skbn;
327 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data, 333 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
328 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 334 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
329 DMA_FROM_DEVICE)); 335 DMA_FROM_DEVICE));
330 CBDW_DATLEN(bdp, 0); 336 CBDW_DATLEN(bdp, 0);
331 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); 337 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
332 338
333 /* 339 /*
334 * Update BD pointer to next entry. 340 * Update BD pointer to next entry.
335 */ 341 */
336 if ((sc & BD_ENET_RX_WRAP) == 0) 342 if ((sc & BD_ENET_RX_WRAP) == 0)
337 bdp++; 343 bdp++;
338 else 344 else
339 bdp = fep->rx_bd_base; 345 bdp = fep->rx_bd_base;
340 346
341 (*fep->ops->rx_bd_done)(dev); 347 (*fep->ops->rx_bd_done)(dev);
342 } 348 }
343 349
344 fep->cur_rx = bdp; 350 fep->cur_rx = bdp;
345 351
346 return 0; 352 return 0;
347 } 353 }
348 354
349 static void fs_enet_tx(struct net_device *dev) 355 static void fs_enet_tx(struct net_device *dev)
350 { 356 {
351 struct fs_enet_private *fep = netdev_priv(dev); 357 struct fs_enet_private *fep = netdev_priv(dev);
352 cbd_t *bdp; 358 cbd_t *bdp;
353 struct sk_buff *skb; 359 struct sk_buff *skb;
354 int dirtyidx, do_wake, do_restart; 360 int dirtyidx, do_wake, do_restart;
355 u16 sc; 361 u16 sc;
356 362
357 spin_lock(&fep->tx_lock); 363 spin_lock(&fep->tx_lock);
358 bdp = fep->dirty_tx; 364 bdp = fep->dirty_tx;
359 365
360 do_wake = do_restart = 0; 366 do_wake = do_restart = 0;
361 while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { 367 while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
362 dirtyidx = bdp - fep->tx_bd_base; 368 dirtyidx = bdp - fep->tx_bd_base;
363 369
364 if (fep->tx_free == fep->tx_ring) 370 if (fep->tx_free == fep->tx_ring)
365 break; 371 break;
366 372
367 skb = fep->tx_skbuff[dirtyidx]; 373 skb = fep->tx_skbuff[dirtyidx];
368 374
369 /* 375 /*
370 * Check for errors. 376 * Check for errors.
371 */ 377 */
372 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | 378 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
373 BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { 379 BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
374 380
375 if (sc & BD_ENET_TX_HB) /* No heartbeat */ 381 if (sc & BD_ENET_TX_HB) /* No heartbeat */
376 fep->stats.tx_heartbeat_errors++; 382 fep->stats.tx_heartbeat_errors++;
377 if (sc & BD_ENET_TX_LC) /* Late collision */ 383 if (sc & BD_ENET_TX_LC) /* Late collision */
378 fep->stats.tx_window_errors++; 384 fep->stats.tx_window_errors++;
379 if (sc & BD_ENET_TX_RL) /* Retrans limit */ 385 if (sc & BD_ENET_TX_RL) /* Retrans limit */
380 fep->stats.tx_aborted_errors++; 386 fep->stats.tx_aborted_errors++;
381 if (sc & BD_ENET_TX_UN) /* Underrun */ 387 if (sc & BD_ENET_TX_UN) /* Underrun */
382 fep->stats.tx_fifo_errors++; 388 fep->stats.tx_fifo_errors++;
383 if (sc & BD_ENET_TX_CSL) /* Carrier lost */ 389 if (sc & BD_ENET_TX_CSL) /* Carrier lost */
384 fep->stats.tx_carrier_errors++; 390 fep->stats.tx_carrier_errors++;
385 391
386 if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { 392 if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
387 fep->stats.tx_errors++; 393 fep->stats.tx_errors++;
388 do_restart = 1; 394 do_restart = 1;
389 } 395 }
390 } else 396 } else
391 fep->stats.tx_packets++; 397 fep->stats.tx_packets++;
392 398
393 if (sc & BD_ENET_TX_READY) 399 if (sc & BD_ENET_TX_READY)
394 printk(KERN_WARNING DRV_MODULE_NAME 400 printk(KERN_WARNING DRV_MODULE_NAME
395 ": %s HEY! Enet xmit interrupt and TX_READY.\n", 401 ": %s HEY! Enet xmit interrupt and TX_READY.\n",
396 dev->name); 402 dev->name);
397 403
398 /* 404 /*
399 * Deferred means some collisions occurred during transmit, 405 * Deferred means some collisions occurred during transmit,
400 * but we eventually sent the packet OK. 406 * but we eventually sent the packet OK.
401 */ 407 */
402 if (sc & BD_ENET_TX_DEF) 408 if (sc & BD_ENET_TX_DEF)
403 fep->stats.collisions++; 409 fep->stats.collisions++;
404 410
405 /* unmap */ 411 /* unmap */
406 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 412 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
407 skb->len, DMA_TO_DEVICE); 413 skb->len, DMA_TO_DEVICE);
408 414
409 /* 415 /*
410 * Free the sk buffer associated with this last transmit. 416 * Free the sk buffer associated with this last transmit.
411 */ 417 */
412 dev_kfree_skb_irq(skb); 418 dev_kfree_skb_irq(skb);
413 fep->tx_skbuff[dirtyidx] = NULL; 419 fep->tx_skbuff[dirtyidx] = NULL;
414 420
415 /* 421 /*
416 * Update pointer to next buffer descriptor to be transmitted. 422 * Update pointer to next buffer descriptor to be transmitted.
417 */ 423 */
418 if ((sc & BD_ENET_TX_WRAP) == 0) 424 if ((sc & BD_ENET_TX_WRAP) == 0)
419 bdp++; 425 bdp++;
420 else 426 else
421 bdp = fep->tx_bd_base; 427 bdp = fep->tx_bd_base;
422 428
423 /* 429 /*
424 * Since we have freed up a buffer, the ring is no longer 430 * Since we have freed up a buffer, the ring is no longer
425 * full. 431 * full.
426 */ 432 */
427 if (!fep->tx_free++) 433 if (!fep->tx_free++)
428 do_wake = 1; 434 do_wake = 1;
429 } 435 }
430 436
431 fep->dirty_tx = bdp; 437 fep->dirty_tx = bdp;
432 438
433 if (do_restart) 439 if (do_restart)
434 (*fep->ops->tx_restart)(dev); 440 (*fep->ops->tx_restart)(dev);
435 441
436 spin_unlock(&fep->tx_lock); 442 spin_unlock(&fep->tx_lock);
437 443
438 if (do_wake) 444 if (do_wake)
439 netif_wake_queue(dev); 445 netif_wake_queue(dev);
440 } 446 }
441 447
442 /* 448 /*
443 * The interrupt handler. 449 * The interrupt handler.
444 * This is called from the MPC core interrupt. 450 * This is called from the MPC core interrupt.
445 */ 451 */
446 static irqreturn_t 452 static irqreturn_t
447 fs_enet_interrupt(int irq, void *dev_id) 453 fs_enet_interrupt(int irq, void *dev_id)
448 { 454 {
449 struct net_device *dev = dev_id; 455 struct net_device *dev = dev_id;
450 struct fs_enet_private *fep; 456 struct fs_enet_private *fep;
451 const struct fs_platform_info *fpi; 457 const struct fs_platform_info *fpi;
452 u32 int_events; 458 u32 int_events;
453 u32 int_clr_events; 459 u32 int_clr_events;
454 int nr, napi_ok; 460 int nr, napi_ok;
455 int handled; 461 int handled;
456 462
457 fep = netdev_priv(dev); 463 fep = netdev_priv(dev);
458 fpi = fep->fpi; 464 fpi = fep->fpi;
459 465
460 nr = 0; 466 nr = 0;
461 while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) { 467 while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
462 nr++; 468 nr++;
463 469
464 int_clr_events = int_events; 470 int_clr_events = int_events;
465 if (fpi->use_napi) 471 if (fpi->use_napi)
466 int_clr_events &= ~fep->ev_napi_rx; 472 int_clr_events &= ~fep->ev_napi_rx;
467 473
468 (*fep->ops->clear_int_events)(dev, int_clr_events); 474 (*fep->ops->clear_int_events)(dev, int_clr_events);
469 475
470 if (int_events & fep->ev_err) 476 if (int_events & fep->ev_err)
471 (*fep->ops->ev_error)(dev, int_events); 477 (*fep->ops->ev_error)(dev, int_events);
472 478
473 if (int_events & fep->ev_rx) { 479 if (int_events & fep->ev_rx) {
474 if (!fpi->use_napi) 480 if (!fpi->use_napi)
475 fs_enet_rx_non_napi(dev); 481 fs_enet_rx_non_napi(dev);
476 else { 482 else {
477 napi_ok = napi_schedule_prep(&fep->napi); 483 napi_ok = napi_schedule_prep(&fep->napi);
478 484
479 (*fep->ops->napi_disable_rx)(dev); 485 (*fep->ops->napi_disable_rx)(dev);
480 (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx); 486 (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
481 487
482 /* NOTE: it is possible for FCCs in NAPI mode */ 488 /* NOTE: it is possible for FCCs in NAPI mode */
483 /* to submit a spurious interrupt while in poll */ 489 /* to submit a spurious interrupt while in poll */
484 if (napi_ok) 490 if (napi_ok)
485 __netif_rx_schedule(dev, &fep->napi); 491 __netif_rx_schedule(dev, &fep->napi);
486 } 492 }
487 } 493 }
488 494
489 if (int_events & fep->ev_tx) 495 if (int_events & fep->ev_tx)
490 fs_enet_tx(dev); 496 fs_enet_tx(dev);
491 } 497 }
492 498
493 handled = nr > 0; 499 handled = nr > 0;
494 return IRQ_RETVAL(handled); 500 return IRQ_RETVAL(handled);
495 } 501 }
496 502
497 void fs_init_bds(struct net_device *dev) 503 void fs_init_bds(struct net_device *dev)
498 { 504 {
499 struct fs_enet_private *fep = netdev_priv(dev); 505 struct fs_enet_private *fep = netdev_priv(dev);
500 cbd_t *bdp; 506 cbd_t *bdp;
501 struct sk_buff *skb; 507 struct sk_buff *skb;
502 int i; 508 int i;
503 509
504 fs_cleanup_bds(dev); 510 fs_cleanup_bds(dev);
505 511
506 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; 512 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
507 fep->tx_free = fep->tx_ring; 513 fep->tx_free = fep->tx_ring;
508 fep->cur_rx = fep->rx_bd_base; 514 fep->cur_rx = fep->rx_bd_base;
509 515
510 /* 516 /*
511 * Initialize the receive buffer descriptors. 517 * Initialize the receive buffer descriptors.
512 */ 518 */
513 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { 519 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
514 skb = dev_alloc_skb(ENET_RX_FRSIZE); 520 skb = dev_alloc_skb(ENET_RX_FRSIZE);
515 if (skb == NULL) { 521 if (skb == NULL) {
516 printk(KERN_WARNING DRV_MODULE_NAME 522 printk(KERN_WARNING DRV_MODULE_NAME
517 ": %s Memory squeeze, unable to allocate skb\n", 523 ": %s Memory squeeze, unable to allocate skb\n",
518 dev->name); 524 dev->name);
519 break; 525 break;
520 } 526 }
521 skb_align(skb, ENET_RX_ALIGN); 527 skb_align(skb, ENET_RX_ALIGN);
522 fep->rx_skbuff[i] = skb; 528 fep->rx_skbuff[i] = skb;
523 CBDW_BUFADDR(bdp, 529 CBDW_BUFADDR(bdp,
524 dma_map_single(fep->dev, skb->data, 530 dma_map_single(fep->dev, skb->data,
525 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 531 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
526 DMA_FROM_DEVICE)); 532 DMA_FROM_DEVICE));
527 CBDW_DATLEN(bdp, 0); /* zero */ 533 CBDW_DATLEN(bdp, 0); /* zero */
528 CBDW_SC(bdp, BD_ENET_RX_EMPTY | 534 CBDW_SC(bdp, BD_ENET_RX_EMPTY |
529 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); 535 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
530 } 536 }
531 /* 537 /*
532 * if we failed, fillup remainder 538 * if we failed, fillup remainder
533 */ 539 */
534 for (; i < fep->rx_ring; i++, bdp++) { 540 for (; i < fep->rx_ring; i++, bdp++) {
535 fep->rx_skbuff[i] = NULL; 541 fep->rx_skbuff[i] = NULL;
536 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); 542 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
537 } 543 }
538 544
539 /* 545 /*
540 * ...and the same for transmit. 546 * ...and the same for transmit.
541 */ 547 */
542 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { 548 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
543 fep->tx_skbuff[i] = NULL; 549 fep->tx_skbuff[i] = NULL;
544 CBDW_BUFADDR(bdp, 0); 550 CBDW_BUFADDR(bdp, 0);
545 CBDW_DATLEN(bdp, 0); 551 CBDW_DATLEN(bdp, 0);
546 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); 552 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
547 } 553 }
548 } 554 }
549 555
550 void fs_cleanup_bds(struct net_device *dev) 556 void fs_cleanup_bds(struct net_device *dev)
551 { 557 {
552 struct fs_enet_private *fep = netdev_priv(dev); 558 struct fs_enet_private *fep = netdev_priv(dev);
553 struct sk_buff *skb; 559 struct sk_buff *skb;
554 cbd_t *bdp; 560 cbd_t *bdp;
555 int i; 561 int i;
556 562
557 /* 563 /*
558 * Reset SKB transmit buffers. 564 * Reset SKB transmit buffers.
559 */ 565 */
560 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { 566 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
561 if ((skb = fep->tx_skbuff[i]) == NULL) 567 if ((skb = fep->tx_skbuff[i]) == NULL)
562 continue; 568 continue;
563 569
564 /* unmap */ 570 /* unmap */
565 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 571 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
566 skb->len, DMA_TO_DEVICE); 572 skb->len, DMA_TO_DEVICE);
567 573
568 fep->tx_skbuff[i] = NULL; 574 fep->tx_skbuff[i] = NULL;
569 dev_kfree_skb(skb); 575 dev_kfree_skb(skb);
570 } 576 }
571 577
572 /* 578 /*
573 * Reset SKB receive buffers 579 * Reset SKB receive buffers
574 */ 580 */
575 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { 581 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
576 if ((skb = fep->rx_skbuff[i]) == NULL) 582 if ((skb = fep->rx_skbuff[i]) == NULL)
577 continue; 583 continue;
578 584
579 /* unmap */ 585 /* unmap */
580 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), 586 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
581 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), 587 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
582 DMA_FROM_DEVICE); 588 DMA_FROM_DEVICE);
583 589
584 fep->rx_skbuff[i] = NULL; 590 fep->rx_skbuff[i] = NULL;
585 591
586 dev_kfree_skb(skb); 592 dev_kfree_skb(skb);
587 } 593 }
588 } 594 }
589 595
590 /**********************************************************************************/ 596 /**********************************************************************************/
591 597
592 static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 598 static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
593 { 599 {
594 struct fs_enet_private *fep = netdev_priv(dev); 600 struct fs_enet_private *fep = netdev_priv(dev);
595 cbd_t *bdp; 601 cbd_t *bdp;
596 int curidx; 602 int curidx;
597 u16 sc; 603 u16 sc;
598 unsigned long flags; 604 unsigned long flags;
599 605
600 spin_lock_irqsave(&fep->tx_lock, flags); 606 spin_lock_irqsave(&fep->tx_lock, flags);
601 607
602 /* 608 /*
603 * Fill in a Tx ring entry 609 * Fill in a Tx ring entry
604 */ 610 */
605 bdp = fep->cur_tx; 611 bdp = fep->cur_tx;
606 612
607 if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { 613 if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
608 netif_stop_queue(dev); 614 netif_stop_queue(dev);
609 spin_unlock_irqrestore(&fep->tx_lock, flags); 615 spin_unlock_irqrestore(&fep->tx_lock, flags);
610 616
611 /* 617 /*
612 * Ooops. All transmit buffers are full. Bail out. 618 * Ooops. All transmit buffers are full. Bail out.
613 * This should not happen, since the tx queue should be stopped. 619 * This should not happen, since the tx queue should be stopped.
614 */ 620 */
615 printk(KERN_WARNING DRV_MODULE_NAME 621 printk(KERN_WARNING DRV_MODULE_NAME
616 ": %s tx queue full!.\n", dev->name); 622 ": %s tx queue full!.\n", dev->name);
617 return NETDEV_TX_BUSY; 623 return NETDEV_TX_BUSY;
618 } 624 }
619 625
620 curidx = bdp - fep->tx_bd_base; 626 curidx = bdp - fep->tx_bd_base;
621 /* 627 /*
622 * Clear all of the status flags. 628 * Clear all of the status flags.
623 */ 629 */
624 CBDC_SC(bdp, BD_ENET_TX_STATS); 630 CBDC_SC(bdp, BD_ENET_TX_STATS);
625 631
626 /* 632 /*
627 * Save skb pointer. 633 * Save skb pointer.
628 */ 634 */
629 fep->tx_skbuff[curidx] = skb; 635 fep->tx_skbuff[curidx] = skb;
630 636
631 fep->stats.tx_bytes += skb->len; 637 fep->stats.tx_bytes += skb->len;
632 638
633 /* 639 /*
634 * Push the data cache so the CPM does not get stale memory data. 640 * Push the data cache so the CPM does not get stale memory data.
635 */ 641 */
636 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, 642 CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
637 skb->data, skb->len, DMA_TO_DEVICE)); 643 skb->data, skb->len, DMA_TO_DEVICE));
638 CBDW_DATLEN(bdp, skb->len); 644 CBDW_DATLEN(bdp, skb->len);
639 645
640 dev->trans_start = jiffies; 646 dev->trans_start = jiffies;
641 647
642 /* 648 /*
643 * If this was the last BD in the ring, start at the beginning again. 649 * If this was the last BD in the ring, start at the beginning again.
644 */ 650 */
645 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) 651 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
646 fep->cur_tx++; 652 fep->cur_tx++;
647 else 653 else
648 fep->cur_tx = fep->tx_bd_base; 654 fep->cur_tx = fep->tx_bd_base;
649 655
650 if (!--fep->tx_free) 656 if (!--fep->tx_free)
651 netif_stop_queue(dev); 657 netif_stop_queue(dev);
652 658
653 /* Trigger transmission start */ 659 /* Trigger transmission start */
654 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR | 660 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
655 BD_ENET_TX_LAST | BD_ENET_TX_TC; 661 BD_ENET_TX_LAST | BD_ENET_TX_TC;
656 662
657 /* note that while FEC does not have this bit 663 /* note that while FEC does not have this bit
658 * it marks it as available for software use 664 * it marks it as available for software use
659 * yay for hw reuse :) */ 665 * yay for hw reuse :) */
660 if (skb->len <= 60) 666 if (skb->len <= 60)
661 sc |= BD_ENET_TX_PAD; 667 sc |= BD_ENET_TX_PAD;
662 CBDS_SC(bdp, sc); 668 CBDS_SC(bdp, sc);
663 669
664 (*fep->ops->tx_kickstart)(dev); 670 (*fep->ops->tx_kickstart)(dev);
665 671
666 spin_unlock_irqrestore(&fep->tx_lock, flags); 672 spin_unlock_irqrestore(&fep->tx_lock, flags);
667 673
668 return NETDEV_TX_OK; 674 return NETDEV_TX_OK;
669 } 675 }
670 676
671 static int fs_request_irq(struct net_device *dev, int irq, const char *name, 677 static int fs_request_irq(struct net_device *dev, int irq, const char *name,
672 irq_handler_t irqf) 678 irq_handler_t irqf)
673 { 679 {
674 struct fs_enet_private *fep = netdev_priv(dev); 680 struct fs_enet_private *fep = netdev_priv(dev);
675 681
676 (*fep->ops->pre_request_irq)(dev, irq); 682 (*fep->ops->pre_request_irq)(dev, irq);
677 return request_irq(irq, irqf, IRQF_SHARED, name, dev); 683 return request_irq(irq, irqf, IRQF_SHARED, name, dev);
678 } 684 }
679 685
680 static void fs_free_irq(struct net_device *dev, int irq) 686 static void fs_free_irq(struct net_device *dev, int irq)
681 { 687 {
682 struct fs_enet_private *fep = netdev_priv(dev); 688 struct fs_enet_private *fep = netdev_priv(dev);
683 689
684 free_irq(irq, dev); 690 free_irq(irq, dev);
685 (*fep->ops->post_free_irq)(dev, irq); 691 (*fep->ops->post_free_irq)(dev, irq);
686 } 692 }
687 693
688 static void fs_timeout(struct net_device *dev) 694 static void fs_timeout(struct net_device *dev)
689 { 695 {
690 struct fs_enet_private *fep = netdev_priv(dev); 696 struct fs_enet_private *fep = netdev_priv(dev);
691 unsigned long flags; 697 unsigned long flags;
692 int wake = 0; 698 int wake = 0;
693 699
694 fep->stats.tx_errors++; 700 fep->stats.tx_errors++;
695 701
696 spin_lock_irqsave(&fep->lock, flags); 702 spin_lock_irqsave(&fep->lock, flags);
697 703
698 if (dev->flags & IFF_UP) { 704 if (dev->flags & IFF_UP) {
699 phy_stop(fep->phydev); 705 phy_stop(fep->phydev);
700 (*fep->ops->stop)(dev); 706 (*fep->ops->stop)(dev);
701 (*fep->ops->restart)(dev); 707 (*fep->ops->restart)(dev);
702 phy_start(fep->phydev); 708 phy_start(fep->phydev);
703 } 709 }
704 710
705 phy_start(fep->phydev); 711 phy_start(fep->phydev);
706 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); 712 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
707 spin_unlock_irqrestore(&fep->lock, flags); 713 spin_unlock_irqrestore(&fep->lock, flags);
708 714
709 if (wake) 715 if (wake)
710 netif_wake_queue(dev); 716 netif_wake_queue(dev);
711 } 717 }
712 718
713 /*----------------------------------------------------------------------------- 719 /*-----------------------------------------------------------------------------
714 * generic link-change handler - should be sufficient for most cases 720 * generic link-change handler - should be sufficient for most cases
715 *-----------------------------------------------------------------------------*/ 721 *-----------------------------------------------------------------------------*/
716 static void generic_adjust_link(struct net_device *dev) 722 static void generic_adjust_link(struct net_device *dev)
717 { 723 {
718 struct fs_enet_private *fep = netdev_priv(dev); 724 struct fs_enet_private *fep = netdev_priv(dev);
719 struct phy_device *phydev = fep->phydev; 725 struct phy_device *phydev = fep->phydev;
720 int new_state = 0; 726 int new_state = 0;
721 727
722 if (phydev->link) { 728 if (phydev->link) {
723 /* adjust to duplex mode */ 729 /* adjust to duplex mode */
724 if (phydev->duplex != fep->oldduplex) { 730 if (phydev->duplex != fep->oldduplex) {
725 new_state = 1; 731 new_state = 1;
726 fep->oldduplex = phydev->duplex; 732 fep->oldduplex = phydev->duplex;
727 } 733 }
728 734
729 if (phydev->speed != fep->oldspeed) { 735 if (phydev->speed != fep->oldspeed) {
730 new_state = 1; 736 new_state = 1;
731 fep->oldspeed = phydev->speed; 737 fep->oldspeed = phydev->speed;
732 } 738 }
733 739
734 if (!fep->oldlink) { 740 if (!fep->oldlink) {
735 new_state = 1; 741 new_state = 1;
736 fep->oldlink = 1; 742 fep->oldlink = 1;
737 netif_schedule(dev); 743 netif_schedule(dev);
738 netif_carrier_on(dev); 744 netif_carrier_on(dev);
739 netif_start_queue(dev); 745 netif_start_queue(dev);
740 } 746 }
741 747
742 if (new_state) 748 if (new_state)
743 fep->ops->restart(dev); 749 fep->ops->restart(dev);
744 } else if (fep->oldlink) { 750 } else if (fep->oldlink) {
745 new_state = 1; 751 new_state = 1;
746 fep->oldlink = 0; 752 fep->oldlink = 0;
747 fep->oldspeed = 0; 753 fep->oldspeed = 0;
748 fep->oldduplex = -1; 754 fep->oldduplex = -1;
749 netif_carrier_off(dev); 755 netif_carrier_off(dev);
750 netif_stop_queue(dev); 756 netif_stop_queue(dev);
751 } 757 }
752 758
753 if (new_state && netif_msg_link(fep)) 759 if (new_state && netif_msg_link(fep))
754 phy_print_status(phydev); 760 phy_print_status(phydev);
755 } 761 }
756 762
757 763
758 static void fs_adjust_link(struct net_device *dev) 764 static void fs_adjust_link(struct net_device *dev)
759 { 765 {
760 struct fs_enet_private *fep = netdev_priv(dev); 766 struct fs_enet_private *fep = netdev_priv(dev);
761 unsigned long flags; 767 unsigned long flags;
762 768
763 spin_lock_irqsave(&fep->lock, flags); 769 spin_lock_irqsave(&fep->lock, flags);
764 770
765 if(fep->ops->adjust_link) 771 if(fep->ops->adjust_link)
766 fep->ops->adjust_link(dev); 772 fep->ops->adjust_link(dev);
767 else 773 else
768 generic_adjust_link(dev); 774 generic_adjust_link(dev);
769 775
770 spin_unlock_irqrestore(&fep->lock, flags); 776 spin_unlock_irqrestore(&fep->lock, flags);
771 } 777 }
772 778
773 static int fs_init_phy(struct net_device *dev) 779 static int fs_init_phy(struct net_device *dev)
774 { 780 {
775 struct fs_enet_private *fep = netdev_priv(dev); 781 struct fs_enet_private *fep = netdev_priv(dev);
776 struct phy_device *phydev; 782 struct phy_device *phydev;
777 783
778 fep->oldlink = 0; 784 fep->oldlink = 0;
779 fep->oldspeed = 0; 785 fep->oldspeed = 0;
780 fep->oldduplex = -1; 786 fep->oldduplex = -1;
781 if(fep->fpi->bus_id) 787 if(fep->fpi->bus_id)
782 phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0, 788 phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0,
783 PHY_INTERFACE_MODE_MII); 789 PHY_INTERFACE_MODE_MII);
784 else { 790 else {
785 printk("No phy bus ID specified in BSP code\n"); 791 printk("No phy bus ID specified in BSP code\n");
786 return -EINVAL; 792 return -EINVAL;
787 } 793 }
788 if (IS_ERR(phydev)) { 794 if (IS_ERR(phydev)) {
789 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 795 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
790 return PTR_ERR(phydev); 796 return PTR_ERR(phydev);
791 } 797 }
792 798
793 fep->phydev = phydev; 799 fep->phydev = phydev;
794 800
795 return 0; 801 return 0;
796 } 802 }
797 803
798 static int fs_enet_open(struct net_device *dev) 804 static int fs_enet_open(struct net_device *dev)
799 { 805 {
800 struct fs_enet_private *fep = netdev_priv(dev); 806 struct fs_enet_private *fep = netdev_priv(dev);
801 int r; 807 int r;
802 int err; 808 int err;
803 809
804 napi_enable(&fep->napi); 810 napi_enable(&fep->napi);
805 811
806 /* Install our interrupt handler. */ 812 /* Install our interrupt handler. */
807 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); 813 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
808 if (r != 0) { 814 if (r != 0) {
809 printk(KERN_ERR DRV_MODULE_NAME 815 printk(KERN_ERR DRV_MODULE_NAME
810 ": %s Could not allocate FS_ENET IRQ!", dev->name); 816 ": %s Could not allocate FS_ENET IRQ!", dev->name);
811 napi_disable(&fep->napi); 817 napi_disable(&fep->napi);
812 return -EINVAL; 818 return -EINVAL;
813 } 819 }
814 820
815 err = fs_init_phy(dev); 821 err = fs_init_phy(dev);
816 if(err) { 822 if(err) {
817 napi_disable(&fep->napi); 823 napi_disable(&fep->napi);
818 return err; 824 return err;
819 } 825 }
820 phy_start(fep->phydev); 826 phy_start(fep->phydev);
821 827
822 return 0; 828 return 0;
823 } 829 }
824 830
825 static int fs_enet_close(struct net_device *dev) 831 static int fs_enet_close(struct net_device *dev)
826 { 832 {
827 struct fs_enet_private *fep = netdev_priv(dev); 833 struct fs_enet_private *fep = netdev_priv(dev);
828 unsigned long flags; 834 unsigned long flags;
829 835
830 netif_stop_queue(dev); 836 netif_stop_queue(dev);
831 netif_carrier_off(dev); 837 netif_carrier_off(dev);
832 napi_disable(&fep->napi); 838 napi_disable(&fep->napi);
833 phy_stop(fep->phydev); 839 phy_stop(fep->phydev);
834 840
835 spin_lock_irqsave(&fep->lock, flags); 841 spin_lock_irqsave(&fep->lock, flags);
836 spin_lock(&fep->tx_lock); 842 spin_lock(&fep->tx_lock);
837 (*fep->ops->stop)(dev); 843 (*fep->ops->stop)(dev);
838 spin_unlock(&fep->tx_lock); 844 spin_unlock(&fep->tx_lock);
839 spin_unlock_irqrestore(&fep->lock, flags); 845 spin_unlock_irqrestore(&fep->lock, flags);
840 846
841 /* release any irqs */ 847 /* release any irqs */
842 phy_disconnect(fep->phydev); 848 phy_disconnect(fep->phydev);
843 fep->phydev = NULL; 849 fep->phydev = NULL;
844 fs_free_irq(dev, fep->interrupt); 850 fs_free_irq(dev, fep->interrupt);
845 851
846 return 0; 852 return 0;
847 } 853 }
848 854
849 static struct net_device_stats *fs_enet_get_stats(struct net_device *dev) 855 static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
850 { 856 {
851 struct fs_enet_private *fep = netdev_priv(dev); 857 struct fs_enet_private *fep = netdev_priv(dev);
852 return &fep->stats; 858 return &fep->stats;
853 } 859 }
854 860
855 /*************************************************************************/ 861 /*************************************************************************/
856 862
857 static void fs_get_drvinfo(struct net_device *dev, 863 static void fs_get_drvinfo(struct net_device *dev,
858 struct ethtool_drvinfo *info) 864 struct ethtool_drvinfo *info)
859 { 865 {
860 strcpy(info->driver, DRV_MODULE_NAME); 866 strcpy(info->driver, DRV_MODULE_NAME);
861 strcpy(info->version, DRV_MODULE_VERSION); 867 strcpy(info->version, DRV_MODULE_VERSION);
862 } 868 }
863 869
864 static int fs_get_regs_len(struct net_device *dev) 870 static int fs_get_regs_len(struct net_device *dev)
865 { 871 {
866 struct fs_enet_private *fep = netdev_priv(dev); 872 struct fs_enet_private *fep = netdev_priv(dev);
867 873
868 return (*fep->ops->get_regs_len)(dev); 874 return (*fep->ops->get_regs_len)(dev);
869 } 875 }
870 876
871 static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, 877 static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
872 void *p) 878 void *p)
873 { 879 {
874 struct fs_enet_private *fep = netdev_priv(dev); 880 struct fs_enet_private *fep = netdev_priv(dev);
875 unsigned long flags; 881 unsigned long flags;
876 int r, len; 882 int r, len;
877 883
878 len = regs->len; 884 len = regs->len;
879 885
880 spin_lock_irqsave(&fep->lock, flags); 886 spin_lock_irqsave(&fep->lock, flags);
881 r = (*fep->ops->get_regs)(dev, p, &len); 887 r = (*fep->ops->get_regs)(dev, p, &len);
882 spin_unlock_irqrestore(&fep->lock, flags); 888 spin_unlock_irqrestore(&fep->lock, flags);
883 889
884 if (r == 0) 890 if (r == 0)
885 regs->version = 0; 891 regs->version = 0;
886 } 892 }
887 893
888 static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 894 static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
889 { 895 {
890 struct fs_enet_private *fep = netdev_priv(dev); 896 struct fs_enet_private *fep = netdev_priv(dev);
891 return phy_ethtool_gset(fep->phydev, cmd); 897 return phy_ethtool_gset(fep->phydev, cmd);
892 } 898 }
893 899
894 static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 900 static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
895 { 901 {
896 struct fs_enet_private *fep = netdev_priv(dev); 902 struct fs_enet_private *fep = netdev_priv(dev);
897 phy_ethtool_sset(fep->phydev, cmd); 903 phy_ethtool_sset(fep->phydev, cmd);
898 return 0; 904 return 0;
899 } 905 }
900 906
901 static int fs_nway_reset(struct net_device *dev) 907 static int fs_nway_reset(struct net_device *dev)
902 { 908 {
903 return 0; 909 return 0;
904 } 910 }
905 911
906 static u32 fs_get_msglevel(struct net_device *dev) 912 static u32 fs_get_msglevel(struct net_device *dev)
907 { 913 {
908 struct fs_enet_private *fep = netdev_priv(dev); 914 struct fs_enet_private *fep = netdev_priv(dev);
909 return fep->msg_enable; 915 return fep->msg_enable;
910 } 916 }
911 917
912 static void fs_set_msglevel(struct net_device *dev, u32 value) 918 static void fs_set_msglevel(struct net_device *dev, u32 value)
913 { 919 {
914 struct fs_enet_private *fep = netdev_priv(dev); 920 struct fs_enet_private *fep = netdev_priv(dev);
915 fep->msg_enable = value; 921 fep->msg_enable = value;
916 } 922 }
917 923
918 static const struct ethtool_ops fs_ethtool_ops = { 924 static const struct ethtool_ops fs_ethtool_ops = {
919 .get_drvinfo = fs_get_drvinfo, 925 .get_drvinfo = fs_get_drvinfo,
920 .get_regs_len = fs_get_regs_len, 926 .get_regs_len = fs_get_regs_len,
921 .get_settings = fs_get_settings, 927 .get_settings = fs_get_settings,
922 .set_settings = fs_set_settings, 928 .set_settings = fs_set_settings,
923 .nway_reset = fs_nway_reset, 929 .nway_reset = fs_nway_reset,
924 .get_link = ethtool_op_get_link, 930 .get_link = ethtool_op_get_link,
925 .get_msglevel = fs_get_msglevel, 931 .get_msglevel = fs_get_msglevel,
926 .set_msglevel = fs_set_msglevel, 932 .set_msglevel = fs_set_msglevel,
927 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ 933 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
928 .set_sg = ethtool_op_set_sg, 934 .set_sg = ethtool_op_set_sg,
929 .get_regs = fs_get_regs, 935 .get_regs = fs_get_regs,
930 }; 936 };
931 937
932 static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 938 static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
933 { 939 {
934 struct fs_enet_private *fep = netdev_priv(dev); 940 struct fs_enet_private *fep = netdev_priv(dev);
935 struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data; 941 struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
936 unsigned long flags; 942 unsigned long flags;
937 int rc; 943 int rc;
938 944
939 if (!netif_running(dev)) 945 if (!netif_running(dev))
940 return -EINVAL; 946 return -EINVAL;
941 947
942 spin_lock_irqsave(&fep->lock, flags); 948 spin_lock_irqsave(&fep->lock, flags);
943 rc = phy_mii_ioctl(fep->phydev, mii, cmd); 949 rc = phy_mii_ioctl(fep->phydev, mii, cmd);
944 spin_unlock_irqrestore(&fep->lock, flags); 950 spin_unlock_irqrestore(&fep->lock, flags);
945 return rc; 951 return rc;
946 } 952 }
947 953
948 extern int fs_mii_connect(struct net_device *dev); 954 extern int fs_mii_connect(struct net_device *dev);
949 extern void fs_mii_disconnect(struct net_device *dev); 955 extern void fs_mii_disconnect(struct net_device *dev);
950 956
957 #ifndef CONFIG_PPC_CPM_NEW_BINDING
951 static struct net_device *fs_init_instance(struct device *dev, 958 static struct net_device *fs_init_instance(struct device *dev,
952 struct fs_platform_info *fpi) 959 struct fs_platform_info *fpi)
953 { 960 {
954 struct net_device *ndev = NULL; 961 struct net_device *ndev = NULL;
955 struct fs_enet_private *fep = NULL; 962 struct fs_enet_private *fep = NULL;
956 int privsize, i, r, err = 0, registered = 0; 963 int privsize, i, r, err = 0, registered = 0;
957 964
958 fpi->fs_no = fs_get_id(fpi); 965 fpi->fs_no = fs_get_id(fpi);
959 /* guard */ 966 /* guard */
960 if ((unsigned int)fpi->fs_no >= FS_MAX_INDEX) 967 if ((unsigned int)fpi->fs_no >= FS_MAX_INDEX)
961 return ERR_PTR(-EINVAL); 968 return ERR_PTR(-EINVAL);
962 969
963 privsize = sizeof(*fep) + (sizeof(struct sk_buff **) * 970 privsize = sizeof(*fep) + (sizeof(struct sk_buff **) *
964 (fpi->rx_ring + fpi->tx_ring)); 971 (fpi->rx_ring + fpi->tx_ring));
965 972
966 ndev = alloc_etherdev(privsize); 973 ndev = alloc_etherdev(privsize);
967 if (!ndev) { 974 if (!ndev) {
968 err = -ENOMEM; 975 err = -ENOMEM;
969 goto err; 976 goto err;
970 } 977 }
971 978
972 fep = netdev_priv(ndev); 979 fep = netdev_priv(ndev);
973 980
974 fep->dev = dev; 981 fep->dev = dev;
975 dev_set_drvdata(dev, ndev); 982 dev_set_drvdata(dev, ndev);
976 fep->fpi = fpi; 983 fep->fpi = fpi;
977 if (fpi->init_ioports) 984 if (fpi->init_ioports)
978 fpi->init_ioports((struct fs_platform_info *)fpi); 985 fpi->init_ioports((struct fs_platform_info *)fpi);
979 986
980 #ifdef CONFIG_FS_ENET_HAS_FEC 987 #ifdef CONFIG_FS_ENET_HAS_FEC
981 if (fs_get_fec_index(fpi->fs_no) >= 0) 988 if (fs_get_fec_index(fpi->fs_no) >= 0)
982 fep->ops = &fs_fec_ops; 989 fep->ops = &fs_fec_ops;
983 #endif 990 #endif
984 991
985 #ifdef CONFIG_FS_ENET_HAS_SCC 992 #ifdef CONFIG_FS_ENET_HAS_SCC
986 if (fs_get_scc_index(fpi->fs_no) >=0) 993 if (fs_get_scc_index(fpi->fs_no) >=0)
987 fep->ops = &fs_scc_ops; 994 fep->ops = &fs_scc_ops;
988 #endif 995 #endif
989 996
990 #ifdef CONFIG_FS_ENET_HAS_FCC 997 #ifdef CONFIG_FS_ENET_HAS_FCC
991 if (fs_get_fcc_index(fpi->fs_no) >= 0) 998 if (fs_get_fcc_index(fpi->fs_no) >= 0)
992 fep->ops = &fs_fcc_ops; 999 fep->ops = &fs_fcc_ops;
993 #endif 1000 #endif
994 1001
995 if (fep->ops == NULL) { 1002 if (fep->ops == NULL) {
996 printk(KERN_ERR DRV_MODULE_NAME 1003 printk(KERN_ERR DRV_MODULE_NAME
997 ": %s No matching ops found (%d).\n", 1004 ": %s No matching ops found (%d).\n",
998 ndev->name, fpi->fs_no); 1005 ndev->name, fpi->fs_no);
999 err = -EINVAL; 1006 err = -EINVAL;
1000 goto err; 1007 goto err;
1001 } 1008 }
1002 1009
1003 r = (*fep->ops->setup_data)(ndev); 1010 r = (*fep->ops->setup_data)(ndev);
1004 if (r != 0) { 1011 if (r != 0) {
1005 printk(KERN_ERR DRV_MODULE_NAME 1012 printk(KERN_ERR DRV_MODULE_NAME
1006 ": %s setup_data failed\n", 1013 ": %s setup_data failed\n",
1007 ndev->name); 1014 ndev->name);
1008 err = r; 1015 err = r;
1009 goto err; 1016 goto err;
1010 } 1017 }
1011 1018
1012 /* point rx_skbuff, tx_skbuff */ 1019 /* point rx_skbuff, tx_skbuff */
1013 fep->rx_skbuff = (struct sk_buff **)&fep[1]; 1020 fep->rx_skbuff = (struct sk_buff **)&fep[1];
1014 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; 1021 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1015 1022
1016 /* init locks */ 1023 /* init locks */
1017 spin_lock_init(&fep->lock); 1024 spin_lock_init(&fep->lock);
1018 spin_lock_init(&fep->tx_lock); 1025 spin_lock_init(&fep->tx_lock);
1019 1026
1020 /* 1027 /*
1021 * Set the Ethernet address. 1028 * Set the Ethernet address.
1022 */ 1029 */
1023 for (i = 0; i < 6; i++) 1030 for (i = 0; i < 6; i++)
1024 ndev->dev_addr[i] = fpi->macaddr[i]; 1031 ndev->dev_addr[i] = fpi->macaddr[i];
1025 1032
1026 r = (*fep->ops->allocate_bd)(ndev); 1033 r = (*fep->ops->allocate_bd)(ndev);
1027 1034
1028 if (fep->ring_base == NULL) { 1035 if (fep->ring_base == NULL) {
1029 printk(KERN_ERR DRV_MODULE_NAME 1036 printk(KERN_ERR DRV_MODULE_NAME
1030 ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r); 1037 ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r);
1031 err = r; 1038 err = r;
1032 goto err; 1039 goto err;
1033 } 1040 }
1034 1041
1035 /* 1042 /*
1036 * Set receive and transmit descriptor base. 1043 * Set receive and transmit descriptor base.
1037 */ 1044 */
1038 fep->rx_bd_base = fep->ring_base; 1045 fep->rx_bd_base = fep->ring_base;
1039 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; 1046 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1040 1047
1041 /* initialize ring size variables */ 1048 /* initialize ring size variables */
1042 fep->tx_ring = fpi->tx_ring; 1049 fep->tx_ring = fpi->tx_ring;
1043 fep->rx_ring = fpi->rx_ring; 1050 fep->rx_ring = fpi->rx_ring;
1044 1051
1045 /* 1052 /*
1046 * The FEC Ethernet specific entries in the device structure. 1053 * The FEC Ethernet specific entries in the device structure.
1047 */ 1054 */
1048 ndev->open = fs_enet_open; 1055 ndev->open = fs_enet_open;
1049 ndev->hard_start_xmit = fs_enet_start_xmit; 1056 ndev->hard_start_xmit = fs_enet_start_xmit;
1050 ndev->tx_timeout = fs_timeout; 1057 ndev->tx_timeout = fs_timeout;
1051 ndev->watchdog_timeo = 2 * HZ; 1058 ndev->watchdog_timeo = 2 * HZ;
1052 ndev->stop = fs_enet_close; 1059 ndev->stop = fs_enet_close;
1053 ndev->get_stats = fs_enet_get_stats; 1060 ndev->get_stats = fs_enet_get_stats;
1054 ndev->set_multicast_list = fs_set_multicast_list; 1061 ndev->set_multicast_list = fs_set_multicast_list;
1055 1062
1056 #ifdef CONFIG_NET_POLL_CONTROLLER 1063 #ifdef CONFIG_NET_POLL_CONTROLLER
1057 ndev->poll_controller = fs_enet_netpoll; 1064 ndev->poll_controller = fs_enet_netpoll;
1058 #endif 1065 #endif
1059 1066
1060 netif_napi_add(ndev, &fep->napi, 1067 netif_napi_add(ndev, &fep->napi,
1061 fs_enet_rx_napi, fpi->napi_weight); 1068 fs_enet_rx_napi, fpi->napi_weight);
1062 1069
1063 ndev->ethtool_ops = &fs_ethtool_ops; 1070 ndev->ethtool_ops = &fs_ethtool_ops;
1064 ndev->do_ioctl = fs_ioctl; 1071 ndev->do_ioctl = fs_ioctl;
1065 1072
1066 init_timer(&fep->phy_timer_list); 1073 init_timer(&fep->phy_timer_list);
1067 1074
1068 netif_carrier_off(ndev); 1075 netif_carrier_off(ndev);
1069 1076
1070 err = register_netdev(ndev); 1077 err = register_netdev(ndev);
1071 if (err != 0) { 1078 if (err != 0) {
1072 printk(KERN_ERR DRV_MODULE_NAME 1079 printk(KERN_ERR DRV_MODULE_NAME
1073 ": %s register_netdev failed.\n", ndev->name); 1080 ": %s register_netdev failed.\n", ndev->name);
1074 goto err; 1081 goto err;
1075 } 1082 }
1076 registered = 1; 1083 registered = 1;
1077 1084
1078 1085
1079 return ndev; 1086 return ndev;
1080 1087
1081 err: 1088 err:
1082 if (ndev != NULL) { 1089 if (ndev != NULL) {
1083 if (registered) 1090 if (registered)
1084 unregister_netdev(ndev); 1091 unregister_netdev(ndev);
1085 1092
1086 if (fep != NULL) { 1093 if (fep != NULL) {
1087 (*fep->ops->free_bd)(ndev); 1094 (*fep->ops->free_bd)(ndev);
1088 (*fep->ops->cleanup_data)(ndev); 1095 (*fep->ops->cleanup_data)(ndev);
1089 } 1096 }
1090 1097
1091 free_netdev(ndev); 1098 free_netdev(ndev);
1092 } 1099 }
1093 1100
1094 dev_set_drvdata(dev, NULL); 1101 dev_set_drvdata(dev, NULL);
1095 1102
1096 return ERR_PTR(err); 1103 return ERR_PTR(err);
1097 } 1104 }
1098 1105
1099 static int fs_cleanup_instance(struct net_device *ndev) 1106 static int fs_cleanup_instance(struct net_device *ndev)
1100 { 1107 {
1101 struct fs_enet_private *fep; 1108 struct fs_enet_private *fep;
1102 const struct fs_platform_info *fpi; 1109 const struct fs_platform_info *fpi;
1103 struct device *dev; 1110 struct device *dev;
1104 1111
1105 if (ndev == NULL) 1112 if (ndev == NULL)
1106 return -EINVAL; 1113 return -EINVAL;
1107 1114
1108 fep = netdev_priv(ndev); 1115 fep = netdev_priv(ndev);
1109 if (fep == NULL) 1116 if (fep == NULL)
1110 return -EINVAL; 1117 return -EINVAL;
1111 1118
1112 fpi = fep->fpi; 1119 fpi = fep->fpi;
1113 1120
1114 unregister_netdev(ndev); 1121 unregister_netdev(ndev);
1115 1122
1116 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), 1123 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
1117 fep->ring_base, fep->ring_mem_addr); 1124 fep->ring_base, fep->ring_mem_addr);
1118 1125
1119 /* reset it */ 1126 /* reset it */
1120 (*fep->ops->cleanup_data)(ndev); 1127 (*fep->ops->cleanup_data)(ndev);
1121 1128
1122 dev = fep->dev; 1129 dev = fep->dev;
1123 if (dev != NULL) { 1130 if (dev != NULL) {
1124 dev_set_drvdata(dev, NULL); 1131 dev_set_drvdata(dev, NULL);
1125 fep->dev = NULL; 1132 fep->dev = NULL;
1126 } 1133 }
1127 1134
1128 free_netdev(ndev); 1135 free_netdev(ndev);
1129 1136
1130 return 0; 1137 return 0;
1131 } 1138 }
1139 #endif
1132 1140
1133 /**************************************************************************************/ 1141 /**************************************************************************************/
1134 1142
1135 /* handy pointer to the immap */ 1143 /* handy pointer to the immap */
1136 void *fs_enet_immap = NULL; 1144 void *fs_enet_immap = NULL;
1137 1145
1138 static int setup_immap(void) 1146 static int setup_immap(void)
1139 { 1147 {
1140 phys_addr_t paddr = 0;
1141 unsigned long size = 0;
1142
1143 #ifdef CONFIG_CPM1 1148 #ifdef CONFIG_CPM1
1144 paddr = IMAP_ADDR; 1149 fs_enet_immap = ioremap(IMAP_ADDR, 0x4000);
1145 size = 0x10000; /* map 64K */ 1150 WARN_ON(!fs_enet_immap);
1151 #elif defined(CONFIG_CPM2)
1152 fs_enet_immap = cpm2_immr;
1146 #endif 1153 #endif
1147 1154
1148 #ifdef CONFIG_CPM2
1149 paddr = CPM_MAP_ADDR;
1150 size = 0x40000; /* map 256 K */
1151 #endif
1152 fs_enet_immap = ioremap(paddr, size);
1153 if (fs_enet_immap == NULL)
1154 return -EBADF; /* XXX ahem; maybe just BUG_ON? */
1155
1156 return 0; 1155 return 0;
1157 } 1156 }
1158 1157
1159 static void cleanup_immap(void) 1158 static void cleanup_immap(void)
1160 { 1159 {
1161 if (fs_enet_immap != NULL) { 1160 #if defined(CONFIG_CPM1)
1162 iounmap(fs_enet_immap); 1161 iounmap(fs_enet_immap);
1163 fs_enet_immap = NULL; 1162 #endif
1164 }
1165 } 1163 }
1166 1164
1167 /**************************************************************************************/ 1165 /**************************************************************************************/
1168 1166
1167 #ifdef CONFIG_PPC_CPM_NEW_BINDING
1168 static int __devinit find_phy(struct device_node *np,
1169 struct fs_platform_info *fpi)
1170 {
1171 struct device_node *phynode, *mdionode;
1172 struct resource res;
1173 int ret = 0, len;
1174
1175 const u32 *data = of_get_property(np, "phy-handle", &len);
1176 if (!data || len != 4)
1177 return -EINVAL;
1178
1179 phynode = of_find_node_by_phandle(*data);
1180 if (!phynode)
1181 return -EINVAL;
1182
1183 mdionode = of_get_parent(phynode);
1184 if (!mdionode)
1185 goto out_put_phy;
1186
1187 ret = of_address_to_resource(mdionode, 0, &res);
1188 if (ret)
1189 goto out_put_mdio;
1190
1191 data = of_get_property(phynode, "reg", &len);
1192 if (!data || len != 4)
1193 goto out_put_mdio;
1194
1195 snprintf(fpi->bus_id, 16, PHY_ID_FMT, res.start, *data);
1196
1197 out_put_mdio:
1198 of_node_put(mdionode);
1199 out_put_phy:
1200 of_node_put(phynode);
1201 return ret;
1202 }
1203
1204 #ifdef CONFIG_FS_ENET_HAS_FEC
1205 #define IS_FEC(match) ((match)->data == &fs_fec_ops)
1206 #else
1207 #define IS_FEC(match) 0
1208 #endif
1209
1210 static int __devinit fs_enet_probe(struct of_device *ofdev,
1211 const struct of_device_id *match)
1212 {
1213 struct net_device *ndev;
1214 struct fs_enet_private *fep;
1215 struct fs_platform_info *fpi;
1216 const u32 *data;
1217 const u8 *mac_addr;
1218 int privsize, len, ret = -ENODEV;
1219
1220 fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
1221 if (!fpi)
1222 return -ENOMEM;
1223
1224 if (!IS_FEC(match)) {
1225 data = of_get_property(ofdev->node, "fsl,cpm-command", &len);
1226 if (!data || len != 4)
1227 goto out_free_fpi;
1228
1229 fpi->cp_command = *data;
1230 }
1231
1232 fpi->rx_ring = 32;
1233 fpi->tx_ring = 32;
1234 fpi->rx_copybreak = 240;
1235 fpi->use_napi = 0;
1236 fpi->napi_weight = 17;
1237
1238 ret = find_phy(ofdev->node, fpi);
1239 if (ret)
1240 goto out_free_fpi;
1241
1242 privsize = sizeof(*fep) +
1243 sizeof(struct sk_buff **) *
1244 (fpi->rx_ring + fpi->tx_ring);
1245
1246 ndev = alloc_etherdev(privsize);
1247 if (!ndev) {
1248 ret = -ENOMEM;
1249 goto out_free_fpi;
1250 }
1251
1252 SET_MODULE_OWNER(ndev);
1253 dev_set_drvdata(&ofdev->dev, ndev);
1254
1255 fep = netdev_priv(ndev);
1256 fep->dev = &ofdev->dev;
1257 fep->fpi = fpi;
1258 fep->ops = match->data;
1259
1260 ret = fep->ops->setup_data(ndev);
1261 if (ret)
1262 goto out_free_dev;
1263
1264 fep->rx_skbuff = (struct sk_buff **)&fep[1];
1265 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1266
1267 spin_lock_init(&fep->lock);
1268 spin_lock_init(&fep->tx_lock);
1269
1270 mac_addr = of_get_mac_address(ofdev->node);
1271 if (mac_addr)
1272 memcpy(ndev->dev_addr, mac_addr, 6);
1273
1274 ret = fep->ops->allocate_bd(ndev);
1275 if (ret)
1276 goto out_cleanup_data;
1277
1278 fep->rx_bd_base = fep->ring_base;
1279 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1280
1281 fep->tx_ring = fpi->tx_ring;
1282 fep->rx_ring = fpi->rx_ring;
1283
1284 ndev->open = fs_enet_open;
1285 ndev->hard_start_xmit = fs_enet_start_xmit;
1286 ndev->tx_timeout = fs_timeout;
1287 ndev->watchdog_timeo = 2 * HZ;
1288 ndev->stop = fs_enet_close;
1289 ndev->get_stats = fs_enet_get_stats;
1290 ndev->set_multicast_list = fs_set_multicast_list;
1291 if (fpi->use_napi) {
1292 ndev->poll = fs_enet_rx_napi;
1293 ndev->weight = fpi->napi_weight;
1294 }
1295 ndev->ethtool_ops = &fs_ethtool_ops;
1296 ndev->do_ioctl = fs_ioctl;
1297
1298 init_timer(&fep->phy_timer_list);
1299
1300 netif_carrier_off(ndev);
1301
1302 ret = register_netdev(ndev);
1303 if (ret)
1304 goto out_free_bd;
1305
1306 printk(KERN_INFO "%s: fs_enet: %02x:%02x:%02x:%02x:%02x:%02x\n",
1307 ndev->name,
1308 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
1309 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
1310
1311 return 0;
1312
1313 out_free_bd:
1314 fep->ops->free_bd(ndev);
1315 out_cleanup_data:
1316 fep->ops->cleanup_data(ndev);
1317 out_free_dev:
1318 free_netdev(ndev);
1319 dev_set_drvdata(&ofdev->dev, NULL);
1320 out_free_fpi:
1321 kfree(fpi);
1322 return ret;
1323 }
1324
1325 static int fs_enet_remove(struct of_device *ofdev)
1326 {
1327 struct net_device *ndev = dev_get_drvdata(&ofdev->dev);
1328 struct fs_enet_private *fep = netdev_priv(ndev);
1329
1330 unregister_netdev(ndev);
1331
1332 fep->ops->free_bd(ndev);
1333 fep->ops->cleanup_data(ndev);
1334 dev_set_drvdata(fep->dev, NULL);
1335
1336 free_netdev(ndev);
1337 return 0;
1338 }
1339
1340 static struct of_device_id fs_enet_match[] = {
1341 #ifdef CONFIG_FS_ENET_HAS_SCC
1342 {
1343 .compatible = "fsl,cpm1-scc-enet",
1344 .data = (void *)&fs_scc_ops,
1345 },
1346 #endif
1347 #ifdef CONFIG_FS_ENET_HAS_FCC
1348 {
1349 .compatible = "fsl,cpm2-fcc-enet",
1350 .data = (void *)&fs_fcc_ops,
1351 },
1352 #endif
1353 #ifdef CONFIG_FS_ENET_HAS_FEC
1354 {
1355 .compatible = "fsl,pq1-fec-enet",
1356 .data = (void *)&fs_fec_ops,
1357 },
1358 #endif
1359 {}
1360 };
1361
1362 static struct of_platform_driver fs_enet_driver = {
1363 .name = "fs_enet",
1364 .match_table = fs_enet_match,
1365 .probe = fs_enet_probe,
1366 .remove = fs_enet_remove,
1367 };
1368
1369 static int __init fs_init(void)
1370 {
1371 int r = setup_immap();
1372 if (r != 0)
1373 return r;
1374
1375 r = of_register_platform_driver(&fs_enet_driver);
1376 if (r != 0)
1377 goto out;
1378
1379 return 0;
1380
1381 out:
1382 cleanup_immap();
1383 return r;
1384 }
1385
1386 static void __exit fs_cleanup(void)
1387 {
1388 of_unregister_platform_driver(&fs_enet_driver);
1389 cleanup_immap();
1390 }
1391 #else
1169 static int __devinit fs_enet_probe(struct device *dev) 1392 static int __devinit fs_enet_probe(struct device *dev)
1170 { 1393 {
1171 struct net_device *ndev; 1394 struct net_device *ndev;
1172 1395
1173 /* no fixup - no device */ 1396 /* no fixup - no device */
1174 if (dev->platform_data == NULL) { 1397 if (dev->platform_data == NULL) {
1175 printk(KERN_INFO "fs_enet: " 1398 printk(KERN_INFO "fs_enet: "
1176 "probe called with no platform data; " 1399 "probe called with no platform data; "
1177 "remove unused devices\n"); 1400 "remove unused devices\n");
1178 return -ENODEV; 1401 return -ENODEV;
1179 } 1402 }
1180 1403
1181 ndev = fs_init_instance(dev, dev->platform_data); 1404 ndev = fs_init_instance(dev, dev->platform_data);
1182 if (IS_ERR(ndev)) 1405 if (IS_ERR(ndev))
1183 return PTR_ERR(ndev); 1406 return PTR_ERR(ndev);
1184 return 0; 1407 return 0;
1185 } 1408 }
1186 1409
1187 static int fs_enet_remove(struct device *dev) 1410 static int fs_enet_remove(struct device *dev)
1188 { 1411 {
1189 return fs_cleanup_instance(dev_get_drvdata(dev)); 1412 return fs_cleanup_instance(dev_get_drvdata(dev));
1190 } 1413 }
1191 1414
1192 static struct device_driver fs_enet_fec_driver = { 1415 static struct device_driver fs_enet_fec_driver = {
1193 .name = "fsl-cpm-fec", 1416 .name = "fsl-cpm-fec",
1194 .bus = &platform_bus_type, 1417 .bus = &platform_bus_type,
1195 .probe = fs_enet_probe, 1418 .probe = fs_enet_probe,
1196 .remove = fs_enet_remove, 1419 .remove = fs_enet_remove,
1197 #ifdef CONFIG_PM 1420 #ifdef CONFIG_PM
1198 /* .suspend = fs_enet_suspend, TODO */ 1421 /* .suspend = fs_enet_suspend, TODO */
1199 /* .resume = fs_enet_resume, TODO */ 1422 /* .resume = fs_enet_resume, TODO */
1200 #endif 1423 #endif
1201 }; 1424 };
1202 1425
1203 static struct device_driver fs_enet_scc_driver = { 1426 static struct device_driver fs_enet_scc_driver = {
1204 .name = "fsl-cpm-scc", 1427 .name = "fsl-cpm-scc",
1205 .bus = &platform_bus_type, 1428 .bus = &platform_bus_type,
1206 .probe = fs_enet_probe, 1429 .probe = fs_enet_probe,
1207 .remove = fs_enet_remove, 1430 .remove = fs_enet_remove,
1208 #ifdef CONFIG_PM 1431 #ifdef CONFIG_PM
1209 /* .suspend = fs_enet_suspend, TODO */ 1432 /* .suspend = fs_enet_suspend, TODO */
1210 /* .resume = fs_enet_resume, TODO */ 1433 /* .resume = fs_enet_resume, TODO */
1211 #endif 1434 #endif
1212 }; 1435 };
1213 1436
1214 static struct device_driver fs_enet_fcc_driver = { 1437 static struct device_driver fs_enet_fcc_driver = {
1215 .name = "fsl-cpm-fcc", 1438 .name = "fsl-cpm-fcc",
1216 .bus = &platform_bus_type, 1439 .bus = &platform_bus_type,
1217 .probe = fs_enet_probe, 1440 .probe = fs_enet_probe,
1218 .remove = fs_enet_remove, 1441 .remove = fs_enet_remove,
1219 #ifdef CONFIG_PM 1442 #ifdef CONFIG_PM
1220 /* .suspend = fs_enet_suspend, TODO */ 1443 /* .suspend = fs_enet_suspend, TODO */
1221 /* .resume = fs_enet_resume, TODO */ 1444 /* .resume = fs_enet_resume, TODO */
1222 #endif 1445 #endif
1223 }; 1446 };
1224 1447
1225 static int __init fs_init(void) 1448 static int __init fs_init(void)
1226 { 1449 {
1227 int r; 1450 int r;
1228 1451
1229 printk(KERN_INFO 1452 printk(KERN_INFO
1230 "%s", version); 1453 "%s", version);
1231 1454
1232 r = setup_immap(); 1455 r = setup_immap();
1233 if (r != 0) 1456 if (r != 0)
1234 return r; 1457 return r;
1235 1458
1236 #ifdef CONFIG_FS_ENET_HAS_FCC 1459 #ifdef CONFIG_FS_ENET_HAS_FCC
1237 /* let's insert mii stuff */ 1460 /* let's insert mii stuff */
1238 r = fs_enet_mdio_bb_init(); 1461 r = fs_enet_mdio_bb_init();
1239 1462
1240 if (r != 0) { 1463 if (r != 0) {
1241 printk(KERN_ERR DRV_MODULE_NAME 1464 printk(KERN_ERR DRV_MODULE_NAME
1242 "BB PHY init failed.\n"); 1465 "BB PHY init failed.\n");
1243 return r; 1466 return r;
1244 } 1467 }
1245 r = driver_register(&fs_enet_fcc_driver); 1468 r = driver_register(&fs_enet_fcc_driver);
1246 if (r != 0) 1469 if (r != 0)
1247 goto err; 1470 goto err;
1248 #endif 1471 #endif
1249 1472
1250 #ifdef CONFIG_FS_ENET_HAS_FEC 1473 #ifdef CONFIG_FS_ENET_HAS_FEC
1251 r = fs_enet_mdio_fec_init(); 1474 r = fs_enet_mdio_fec_init();
1252 if (r != 0) { 1475 if (r != 0) {
1253 printk(KERN_ERR DRV_MODULE_NAME 1476 printk(KERN_ERR DRV_MODULE_NAME
1254 "FEC PHY init failed.\n"); 1477 "FEC PHY init failed.\n");
1255 return r; 1478 return r;
1256 } 1479 }
1257 1480
1258 r = driver_register(&fs_enet_fec_driver); 1481 r = driver_register(&fs_enet_fec_driver);
1259 if (r != 0) 1482 if (r != 0)
1260 goto err; 1483 goto err;
1261 #endif 1484 #endif
1262 1485
1263 #ifdef CONFIG_FS_ENET_HAS_SCC 1486 #ifdef CONFIG_FS_ENET_HAS_SCC
1264 r = driver_register(&fs_enet_scc_driver); 1487 r = driver_register(&fs_enet_scc_driver);
1265 if (r != 0) 1488 if (r != 0)
1266 goto err; 1489 goto err;
1267 #endif 1490 #endif
1268 1491
1269 return 0; 1492 return 0;
1270 err: 1493 err:
1271 cleanup_immap(); 1494 cleanup_immap();
1272 return r; 1495 return r;
1273 } 1496 }
1274 1497
1275 static void __exit fs_cleanup(void) 1498 static void __exit fs_cleanup(void)
1276 { 1499 {
1277 driver_unregister(&fs_enet_fec_driver); 1500 driver_unregister(&fs_enet_fec_driver);
1278 driver_unregister(&fs_enet_fcc_driver); 1501 driver_unregister(&fs_enet_fcc_driver);
1279 driver_unregister(&fs_enet_scc_driver); 1502 driver_unregister(&fs_enet_scc_driver);
1280 cleanup_immap(); 1503 cleanup_immap();
1281 } 1504 }
1505 #endif
1282 1506
1283 #ifdef CONFIG_NET_POLL_CONTROLLER 1507 #ifdef CONFIG_NET_POLL_CONTROLLER
1284 static void fs_enet_netpoll(struct net_device *dev) 1508 static void fs_enet_netpoll(struct net_device *dev)
drivers/net/fs_enet/fs_enet.h
1 #ifndef FS_ENET_H 1 #ifndef FS_ENET_H
2 #define FS_ENET_H 2 #define FS_ENET_H
3 3
4 #include <linux/mii.h> 4 #include <linux/mii.h>
5 #include <linux/netdevice.h> 5 #include <linux/netdevice.h>
6 #include <linux/types.h> 6 #include <linux/types.h>
7 #include <linux/list.h> 7 #include <linux/list.h>
8 #include <linux/phy.h> 8 #include <linux/phy.h>
9 #include <linux/dma-mapping.h> 9 #include <linux/dma-mapping.h>
10 10
11 #include <linux/fs_enet_pd.h> 11 #include <linux/fs_enet_pd.h>
12 #include <asm/fs_pd.h> 12 #include <asm/fs_pd.h>
13 13
14 #ifdef CONFIG_CPM1 14 #ifdef CONFIG_CPM1
15 #include <asm/commproc.h> 15 #include <asm/commproc.h>
16 16
17 struct fec_info { 17 struct fec_info {
18 fec_t *fecp; 18 fec_t *fecp;
19 u32 mii_speed; 19 u32 mii_speed;
20 }; 20 };
21 #endif 21 #endif
22 22
23 #ifdef CONFIG_CPM2 23 #ifdef CONFIG_CPM2
24 #include <asm/cpm2.h> 24 #include <asm/cpm2.h>
25 #endif 25 #endif
26 26
27 /* This is used to operate with pins.
28 Note that the actual port size may
29 be different; cpm(s) handle it OK */
30 struct bb_info {
31 u8 mdio_dat_msk;
32 u8 mdio_dir_msk;
33 u8 *mdio_dir;
34 u8 *mdio_dat;
35 u8 mdc_msk;
36 u8 *mdc_dat;
37 int delay;
38 };
39
40 /* hw driver ops */ 27 /* hw driver ops */
41 struct fs_ops { 28 struct fs_ops {
42 int (*setup_data)(struct net_device *dev); 29 int (*setup_data)(struct net_device *dev);
43 int (*allocate_bd)(struct net_device *dev); 30 int (*allocate_bd)(struct net_device *dev);
44 void (*free_bd)(struct net_device *dev); 31 void (*free_bd)(struct net_device *dev);
45 void (*cleanup_data)(struct net_device *dev); 32 void (*cleanup_data)(struct net_device *dev);
46 void (*set_multicast_list)(struct net_device *dev); 33 void (*set_multicast_list)(struct net_device *dev);
47 void (*adjust_link)(struct net_device *dev); 34 void (*adjust_link)(struct net_device *dev);
48 void (*restart)(struct net_device *dev); 35 void (*restart)(struct net_device *dev);
49 void (*stop)(struct net_device *dev); 36 void (*stop)(struct net_device *dev);
50 void (*pre_request_irq)(struct net_device *dev, int irq); 37 void (*pre_request_irq)(struct net_device *dev, int irq);
51 void (*post_free_irq)(struct net_device *dev, int irq); 38 void (*post_free_irq)(struct net_device *dev, int irq);
52 void (*napi_clear_rx_event)(struct net_device *dev); 39 void (*napi_clear_rx_event)(struct net_device *dev);
53 void (*napi_enable_rx)(struct net_device *dev); 40 void (*napi_enable_rx)(struct net_device *dev);
54 void (*napi_disable_rx)(struct net_device *dev); 41 void (*napi_disable_rx)(struct net_device *dev);
55 void (*rx_bd_done)(struct net_device *dev); 42 void (*rx_bd_done)(struct net_device *dev);
56 void (*tx_kickstart)(struct net_device *dev); 43 void (*tx_kickstart)(struct net_device *dev);
57 u32 (*get_int_events)(struct net_device *dev); 44 u32 (*get_int_events)(struct net_device *dev);
58 void (*clear_int_events)(struct net_device *dev, u32 int_events); 45 void (*clear_int_events)(struct net_device *dev, u32 int_events);
59 void (*ev_error)(struct net_device *dev, u32 int_events); 46 void (*ev_error)(struct net_device *dev, u32 int_events);
60 int (*get_regs)(struct net_device *dev, void *p, int *sizep); 47 int (*get_regs)(struct net_device *dev, void *p, int *sizep);
61 int (*get_regs_len)(struct net_device *dev); 48 int (*get_regs_len)(struct net_device *dev);
62 void (*tx_restart)(struct net_device *dev); 49 void (*tx_restart)(struct net_device *dev);
63 }; 50 };
64 51
65 struct phy_info { 52 struct phy_info {
66 unsigned int id; 53 unsigned int id;
67 const char *name; 54 const char *name;
68 void (*startup) (struct net_device * dev); 55 void (*startup) (struct net_device * dev);
69 void (*shutdown) (struct net_device * dev); 56 void (*shutdown) (struct net_device * dev);
70 void (*ack_int) (struct net_device * dev); 57 void (*ack_int) (struct net_device * dev);
71 }; 58 };
72 59
73 /* The FEC stores dest/src/type, data, and checksum for receive packets. 60 /* The FEC stores dest/src/type, data, and checksum for receive packets.
74 */ 61 */
75 #define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */ 62 #define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */
76 #define MIN_MTU 46 /* this is data size */ 63 #define MIN_MTU 46 /* this is data size */
77 #define CRC_LEN 4 64 #define CRC_LEN 4
78 65
79 #define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN) 66 #define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN)
80 #define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN) 67 #define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN)
81 68
82 /* Must be a multiple of 32 (to cover both FEC & FCC) */ 69 /* Must be a multiple of 32 (to cover both FEC & FCC) */
83 #define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31) 70 #define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31)
84 /* This is needed so that invalidate_xxx wont invalidate too much */ 71 /* This is needed so that invalidate_xxx wont invalidate too much */
85 #define ENET_RX_ALIGN 16 72 #define ENET_RX_ALIGN 16
86 #define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1) 73 #define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE + ENET_RX_ALIGN - 1)
87 74
88 struct fs_enet_mii_bus {
89 struct list_head list;
90 spinlock_t mii_lock;
91 const struct fs_mii_bus_info *bus_info;
92 int refs;
93 u32 usage_map;
94
95 int (*mii_read)(struct fs_enet_mii_bus *bus,
96 int phy_id, int location);
97
98 void (*mii_write)(struct fs_enet_mii_bus *bus,
99 int phy_id, int location, int value);
100
101 union {
102 struct {
103 unsigned int mii_speed;
104 void *fecp;
105 } fec;
106
107 struct {
108 /* note that the actual port size may */
109 /* be different; cpm(s) handle it OK */
110 u8 mdio_msk;
111 u8 *mdio_dir;
112 u8 *mdio_dat;
113 u8 mdc_msk;
114 u8 *mdc_dir;
115 u8 *mdc_dat;
116 } bitbang;
117
118 struct {
119 u16 lpa;
120 } fixed;
121 };
122 };
123
124 struct fs_enet_private { 75 struct fs_enet_private {
125 struct napi_struct napi; 76 struct napi_struct napi;
126 struct device *dev; /* pointer back to the device (must be initialized first) */ 77 struct device *dev; /* pointer back to the device (must be initialized first) */
127 spinlock_t lock; /* during all ops except TX pckt processing */ 78 spinlock_t lock; /* during all ops except TX pckt processing */
128 spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */ 79 spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
129 const struct fs_platform_info *fpi; 80 struct fs_platform_info *fpi;
130 const struct fs_ops *ops; 81 const struct fs_ops *ops;
131 int rx_ring, tx_ring; 82 int rx_ring, tx_ring;
132 dma_addr_t ring_mem_addr; 83 dma_addr_t ring_mem_addr;
133 void *ring_base; 84 void *ring_base;
134 struct sk_buff **rx_skbuff; 85 struct sk_buff **rx_skbuff;
135 struct sk_buff **tx_skbuff; 86 struct sk_buff **tx_skbuff;
136 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ 87 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
137 cbd_t *tx_bd_base; 88 cbd_t *tx_bd_base;
138 cbd_t *dirty_tx; /* ring entries to be free()ed. */ 89 cbd_t *dirty_tx; /* ring entries to be free()ed. */
139 cbd_t *cur_rx; 90 cbd_t *cur_rx;
140 cbd_t *cur_tx; 91 cbd_t *cur_tx;
141 int tx_free; 92 int tx_free;
142 struct net_device_stats stats; 93 struct net_device_stats stats;
143 struct timer_list phy_timer_list; 94 struct timer_list phy_timer_list;
144 const struct phy_info *phy; 95 const struct phy_info *phy;
145 u32 msg_enable; 96 u32 msg_enable;
146 struct mii_if_info mii_if; 97 struct mii_if_info mii_if;
147 unsigned int last_mii_status; 98 unsigned int last_mii_status;
148 struct fs_enet_mii_bus *mii_bus;
149 int interrupt; 99 int interrupt;
150 100
151 struct phy_device *phydev; 101 struct phy_device *phydev;
152 int oldduplex, oldspeed, oldlink; /* current settings */ 102 int oldduplex, oldspeed, oldlink; /* current settings */
153 103
154 /* event masks */ 104 /* event masks */
155 u32 ev_napi_rx; /* mask of NAPI rx events */ 105 u32 ev_napi_rx; /* mask of NAPI rx events */
156 u32 ev_rx; /* rx event mask */ 106 u32 ev_rx; /* rx event mask */
157 u32 ev_tx; /* tx event mask */ 107 u32 ev_tx; /* tx event mask */
158 u32 ev_err; /* error event mask */ 108 u32 ev_err; /* error event mask */
159 109
160 u16 bd_rx_empty; /* mask of BD rx empty */ 110 u16 bd_rx_empty; /* mask of BD rx empty */
161 u16 bd_rx_err; /* mask of BD rx errors */ 111 u16 bd_rx_err; /* mask of BD rx errors */
162 112
163 union { 113 union {
164 struct { 114 struct {
165 int idx; /* FEC1 = 0, FEC2 = 1 */ 115 int idx; /* FEC1 = 0, FEC2 = 1 */
166 void *fecp; /* hw registers */ 116 void *fecp; /* hw registers */
167 u32 hthi, htlo; /* state for multicast */ 117 u32 hthi, htlo; /* state for multicast */
168 } fec; 118 } fec;
169 119
170 struct { 120 struct {
171 int idx; /* FCC1-3 = 0-2 */ 121 int idx; /* FCC1-3 = 0-2 */
172 void *fccp; /* hw registers */ 122 void *fccp; /* hw registers */
173 void *ep; /* parameter ram */ 123 void *ep; /* parameter ram */
174 void *fcccp; /* hw registers cont. */ 124 void *fcccp; /* hw registers cont. */
175 void *mem; /* FCC DPRAM */ 125 void *mem; /* FCC DPRAM */
176 u32 gaddrh, gaddrl; /* group address */ 126 u32 gaddrh, gaddrl; /* group address */
177 } fcc; 127 } fcc;
178 128
179 struct { 129 struct {
180 int idx; /* FEC1 = 0, FEC2 = 1 */ 130 int idx; /* FEC1 = 0, FEC2 = 1 */
181 void *sccp; /* hw registers */ 131 void *sccp; /* hw registers */
182 void *ep; /* parameter ram */ 132 void *ep; /* parameter ram */
183 u32 hthi, htlo; /* state for multicast */ 133 u32 hthi, htlo; /* state for multicast */
184 } scc; 134 } scc;
185 135
186 }; 136 };
187 }; 137 };
188 138
189 /***************************************************************************/ 139 /***************************************************************************/
140 #ifndef CONFIG_PPC_CPM_NEW_BINDING
190 int fs_enet_mdio_bb_init(void); 141 int fs_enet_mdio_bb_init(void);
191 int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
192 int fs_enet_mdio_fec_init(void); 142 int fs_enet_mdio_fec_init(void);
143 #endif
193 144
194 void fs_init_bds(struct net_device *dev); 145 void fs_init_bds(struct net_device *dev);
195 void fs_cleanup_bds(struct net_device *dev); 146 void fs_cleanup_bds(struct net_device *dev);
196 147
197 /***************************************************************************/ 148 /***************************************************************************/
198 149
199 #define DRV_MODULE_NAME "fs_enet" 150 #define DRV_MODULE_NAME "fs_enet"
200 #define PFX DRV_MODULE_NAME ": " 151 #define PFX DRV_MODULE_NAME ": "
201 #define DRV_MODULE_VERSION "1.0" 152 #define DRV_MODULE_VERSION "1.0"
202 #define DRV_MODULE_RELDATE "Aug 8, 2005" 153 #define DRV_MODULE_RELDATE "Aug 8, 2005"
203 154
204 /***************************************************************************/ 155 /***************************************************************************/
205 156
206 int fs_enet_platform_init(void); 157 int fs_enet_platform_init(void);
207 void fs_enet_platform_cleanup(void); 158 void fs_enet_platform_cleanup(void);
208 159
209 /***************************************************************************/ 160 /***************************************************************************/
210 /* buffer descriptor access macros */ 161 /* buffer descriptor access macros */
211 162
212 /* access macros */ 163 /* access macros */
213 #if defined(CONFIG_CPM1) 164 #if defined(CONFIG_CPM1)
214 /* for a a CPM1 __raw_xxx's are sufficient */ 165 /* for a a CPM1 __raw_xxx's are sufficient */
215 #define __cbd_out32(addr, x) __raw_writel(x, addr) 166 #define __cbd_out32(addr, x) __raw_writel(x, addr)
216 #define __cbd_out16(addr, x) __raw_writew(x, addr) 167 #define __cbd_out16(addr, x) __raw_writew(x, addr)
217 #define __cbd_in32(addr) __raw_readl(addr) 168 #define __cbd_in32(addr) __raw_readl(addr)
218 #define __cbd_in16(addr) __raw_readw(addr) 169 #define __cbd_in16(addr) __raw_readw(addr)
219 #else 170 #else
220 /* for others play it safe */ 171 /* for others play it safe */
221 #define __cbd_out32(addr, x) out_be32(addr, x) 172 #define __cbd_out32(addr, x) out_be32(addr, x)
222 #define __cbd_out16(addr, x) out_be16(addr, x) 173 #define __cbd_out16(addr, x) out_be16(addr, x)
223 #define __cbd_in32(addr) in_be32(addr) 174 #define __cbd_in32(addr) in_be32(addr)
224 #define __cbd_in16(addr) in_be16(addr) 175 #define __cbd_in16(addr) in_be16(addr)
225 #endif 176 #endif
226 177
227 /* write */ 178 /* write */
228 #define CBDW_SC(_cbd, _sc) __cbd_out16(&(_cbd)->cbd_sc, (_sc)) 179 #define CBDW_SC(_cbd, _sc) __cbd_out16(&(_cbd)->cbd_sc, (_sc))
229 #define CBDW_DATLEN(_cbd, _datlen) __cbd_out16(&(_cbd)->cbd_datlen, (_datlen)) 180 #define CBDW_DATLEN(_cbd, _datlen) __cbd_out16(&(_cbd)->cbd_datlen, (_datlen))
230 #define CBDW_BUFADDR(_cbd, _bufaddr) __cbd_out32(&(_cbd)->cbd_bufaddr, (_bufaddr)) 181 #define CBDW_BUFADDR(_cbd, _bufaddr) __cbd_out32(&(_cbd)->cbd_bufaddr, (_bufaddr))
231 182
232 /* read */ 183 /* read */
233 #define CBDR_SC(_cbd) __cbd_in16(&(_cbd)->cbd_sc) 184 #define CBDR_SC(_cbd) __cbd_in16(&(_cbd)->cbd_sc)
234 #define CBDR_DATLEN(_cbd) __cbd_in16(&(_cbd)->cbd_datlen) 185 #define CBDR_DATLEN(_cbd) __cbd_in16(&(_cbd)->cbd_datlen)
235 #define CBDR_BUFADDR(_cbd) __cbd_in32(&(_cbd)->cbd_bufaddr) 186 #define CBDR_BUFADDR(_cbd) __cbd_in32(&(_cbd)->cbd_bufaddr)
236 187
237 /* set bits */ 188 /* set bits */
238 #define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc)) 189 #define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc))
239 190
240 /* clear bits */ 191 /* clear bits */
241 #define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc)) 192 #define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc))
242 193
243 /*******************************************************************/ 194 /*******************************************************************/
244 195
245 extern const struct fs_ops fs_fec_ops; 196 extern const struct fs_ops fs_fec_ops;
246 extern const struct fs_ops fs_fcc_ops; 197 extern const struct fs_ops fs_fcc_ops;
247 extern const struct fs_ops fs_scc_ops; 198 extern const struct fs_ops fs_scc_ops;
248 199
249 /*******************************************************************/ 200 /*******************************************************************/
250 201
251 /* handy pointer to the immap */ 202 /* handy pointer to the immap */
252 extern void *fs_enet_immap; 203 extern void *fs_enet_immap;
253 204
254 /*******************************************************************/ 205 /*******************************************************************/
255 206
drivers/net/fs_enet/mac-fcc.c
1 /* 1 /*
2 * FCC driver for Motorola MPC82xx (PQ2). 2 * FCC driver for Motorola MPC82xx (PQ2).
3 * 3 *
4 * Copyright (c) 2003 Intracom S.A. 4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr> 5 * by Pantelis Antoniou <panto@intracom.gr>
6 * 6 *
7 * 2005 (c) MontaVista Software, Inc. 7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com> 8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 * 9 *
10 * This file is licensed under the terms of the GNU General Public License 10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any 11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied. 12 * kind, whether express or implied.
13 */ 13 */
14 14
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/types.h> 17 #include <linux/types.h>
18 #include <linux/string.h> 18 #include <linux/string.h>
19 #include <linux/ptrace.h> 19 #include <linux/ptrace.h>
20 #include <linux/errno.h> 20 #include <linux/errno.h>
21 #include <linux/ioport.h> 21 #include <linux/ioport.h>
22 #include <linux/slab.h> 22 #include <linux/slab.h>
23 #include <linux/interrupt.h> 23 #include <linux/interrupt.h>
24 #include <linux/init.h> 24 #include <linux/init.h>
25 #include <linux/delay.h> 25 #include <linux/delay.h>
26 #include <linux/netdevice.h> 26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h> 27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h> 28 #include <linux/skbuff.h>
29 #include <linux/spinlock.h> 29 #include <linux/spinlock.h>
30 #include <linux/mii.h> 30 #include <linux/mii.h>
31 #include <linux/ethtool.h> 31 #include <linux/ethtool.h>
32 #include <linux/bitops.h> 32 #include <linux/bitops.h>
33 #include <linux/fs.h> 33 #include <linux/fs.h>
34 #include <linux/platform_device.h> 34 #include <linux/platform_device.h>
35 #include <linux/phy.h> 35 #include <linux/phy.h>
36 36
37 #include <asm/immap_cpm2.h> 37 #include <asm/immap_cpm2.h>
38 #include <asm/mpc8260.h> 38 #include <asm/mpc8260.h>
39 #include <asm/cpm2.h> 39 #include <asm/cpm2.h>
40 40
41 #include <asm/pgtable.h> 41 #include <asm/pgtable.h>
42 #include <asm/irq.h> 42 #include <asm/irq.h>
43 #include <asm/uaccess.h> 43 #include <asm/uaccess.h>
44 44
45 #ifdef CONFIG_PPC_CPM_NEW_BINDING
46 #include <asm/of_device.h>
47 #endif
48
45 #include "fs_enet.h" 49 #include "fs_enet.h"
46 50
47 /*************************************************/ 51 /*************************************************/
48 52
49 /* FCC access macros */ 53 /* FCC access macros */
50 54
51 /* write, read, set bits, clear bits */ 55 /* write, read, set bits, clear bits */
52 #define W32(_p, _m, _v) out_be32(&(_p)->_m, (_v)) 56 #define W32(_p, _m, _v) out_be32(&(_p)->_m, (_v))
53 #define R32(_p, _m) in_be32(&(_p)->_m) 57 #define R32(_p, _m) in_be32(&(_p)->_m)
54 #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v)) 58 #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
55 #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v)) 59 #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
56 60
57 #define W16(_p, _m, _v) out_be16(&(_p)->_m, (_v)) 61 #define W16(_p, _m, _v) out_be16(&(_p)->_m, (_v))
58 #define R16(_p, _m) in_be16(&(_p)->_m) 62 #define R16(_p, _m) in_be16(&(_p)->_m)
59 #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v)) 63 #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
60 #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v)) 64 #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
61 65
62 #define W8(_p, _m, _v) out_8(&(_p)->_m, (_v)) 66 #define W8(_p, _m, _v) out_8(&(_p)->_m, (_v))
63 #define R8(_p, _m) in_8(&(_p)->_m) 67 #define R8(_p, _m) in_8(&(_p)->_m)
64 #define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v)) 68 #define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
65 #define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v)) 69 #define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
66 70
67 /*************************************************/ 71 /*************************************************/
68 72
69 #define FCC_MAX_MULTICAST_ADDRS 64 73 #define FCC_MAX_MULTICAST_ADDRS 64
70 74
71 #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) 75 #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
72 #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) 76 #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
73 #define mk_mii_end 0 77 #define mk_mii_end 0
74 78
75 #define MAX_CR_CMD_LOOPS 10000 79 #define MAX_CR_CMD_LOOPS 10000
76 80
77 static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 mcn, u32 op) 81 static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 op)
78 { 82 {
79 const struct fs_platform_info *fpi = fep->fpi; 83 const struct fs_platform_info *fpi = fep->fpi;
80 cpm2_map_t *immap = fs_enet_immap; 84 cpm2_map_t *immap = fs_enet_immap;
81 cpm_cpm2_t *cpmp = &immap->im_cpm; 85 cpm_cpm2_t *cpmp = &immap->im_cpm;
82 u32 v;
83 int i; 86 int i;
84 87
85 /* Currently I don't know what feature call will look like. But 88 W32(cpmp, cp_cpcr, fpi->cp_command | op | CPM_CR_FLG);
86 I guess there'd be something like do_cpm_cmd() which will require page & sblock */
87 v = mk_cr_cmd(fpi->cp_page, fpi->cp_block, mcn, op);
88 W32(cpmp, cp_cpcr, v | CPM_CR_FLG);
89 for (i = 0; i < MAX_CR_CMD_LOOPS; i++) 89 for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
90 if ((R32(cpmp, cp_cpcr) & CPM_CR_FLG) == 0) 90 if ((R32(cpmp, cp_cpcr) & CPM_CR_FLG) == 0)
91 break; 91 return 0;
92 92
93 if (i >= MAX_CR_CMD_LOOPS) { 93 printk(KERN_ERR "%s(): Not able to issue CPM command\n",
94 printk(KERN_ERR "%s(): Not able to issue CPM command\n", 94 __FUNCTION__);
95 __FUNCTION__); 95 return 1;
96 return 1;
97 }
98
99 return 0;
100 } 96 }
101 97
102 static int do_pd_setup(struct fs_enet_private *fep) 98 static int do_pd_setup(struct fs_enet_private *fep)
103 { 99 {
100 #ifdef CONFIG_PPC_CPM_NEW_BINDING
101 struct of_device *ofdev = to_of_device(fep->dev);
102 struct fs_platform_info *fpi = fep->fpi;
103 int ret = -EINVAL;
104
105 fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL);
106 if (fep->interrupt == NO_IRQ)
107 goto out;
108
109 fep->fcc.fccp = of_iomap(ofdev->node, 0);
110 if (!fep->fcc.fccp)
111 goto out;
112
113 fep->fcc.ep = of_iomap(ofdev->node, 1);
114 if (!fep->fcc.ep)
115 goto out_fccp;
116
117 fep->fcc.fcccp = of_iomap(ofdev->node, 2);
118 if (!fep->fcc.fcccp)
119 goto out_ep;
120
121 fep->fcc.mem = (void *)cpm_dpalloc(128, 8);
122 fpi->dpram_offset = (u32)cpm2_immr;
123 if (IS_ERR_VALUE(fpi->dpram_offset)) {
124 ret = fpi->dpram_offset;
125 goto out_fcccp;
126 }
127
128 return 0;
129
130 out_fcccp:
131 iounmap(fep->fcc.fcccp);
132 out_ep:
133 iounmap(fep->fcc.ep);
134 out_fccp:
135 iounmap(fep->fcc.fccp);
136 out:
137 return ret;
138 #else
104 struct platform_device *pdev = to_platform_device(fep->dev); 139 struct platform_device *pdev = to_platform_device(fep->dev);
105 struct resource *r; 140 struct resource *r;
106 141
107 /* Fill out IRQ field */ 142 /* Fill out IRQ field */
108 fep->interrupt = platform_get_irq(pdev, 0); 143 fep->interrupt = platform_get_irq(pdev, 0);
109 if (fep->interrupt < 0) 144 if (fep->interrupt < 0)
110 return -EINVAL; 145 return -EINVAL;
111 146
112 /* Attach the memory for the FCC Parameter RAM */ 147 /* Attach the memory for the FCC Parameter RAM */
113 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram"); 148 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram");
114 fep->fcc.ep = (void *)ioremap(r->start, r->end - r->start + 1); 149 fep->fcc.ep = (void *)ioremap(r->start, r->end - r->start + 1);
115 if (fep->fcc.ep == NULL) 150 if (fep->fcc.ep == NULL)
116 return -EINVAL; 151 return -EINVAL;
117 152
118 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs"); 153 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs");
119 fep->fcc.fccp = (void *)ioremap(r->start, r->end - r->start + 1); 154 fep->fcc.fccp = (void *)ioremap(r->start, r->end - r->start + 1);
120 if (fep->fcc.fccp == NULL) 155 if (fep->fcc.fccp == NULL)
121 return -EINVAL; 156 return -EINVAL;
122 157
123 if (fep->fpi->fcc_regs_c) { 158 if (fep->fpi->fcc_regs_c) {
124 159
125 fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c; 160 fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c;
126 } else { 161 } else {
127 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, 162 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
128 "fcc_regs_c"); 163 "fcc_regs_c");
129 fep->fcc.fcccp = (void *)ioremap(r->start, 164 fep->fcc.fcccp = (void *)ioremap(r->start,
130 r->end - r->start + 1); 165 r->end - r->start + 1);
131 } 166 }
132 167
133 if (fep->fcc.fcccp == NULL) 168 if (fep->fcc.fcccp == NULL)
134 return -EINVAL; 169 return -EINVAL;
135 170
136 fep->fcc.mem = (void *)fep->fpi->mem_offset; 171 fep->fcc.mem = (void *)fep->fpi->mem_offset;
137 if (fep->fcc.mem == NULL) 172 if (fep->fcc.mem == NULL)
138 return -EINVAL; 173 return -EINVAL;
139 174
140 return 0; 175 return 0;
176 #endif
141 } 177 }
142 178
143 #define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) 179 #define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
144 #define FCC_RX_EVENT (FCC_ENET_RXF) 180 #define FCC_RX_EVENT (FCC_ENET_RXF)
145 #define FCC_TX_EVENT (FCC_ENET_TXB) 181 #define FCC_TX_EVENT (FCC_ENET_TXB)
146 #define FCC_ERR_EVENT_MSK (FCC_ENET_TXE | FCC_ENET_BSY) 182 #define FCC_ERR_EVENT_MSK (FCC_ENET_TXE | FCC_ENET_BSY)
147 183
148 static int setup_data(struct net_device *dev) 184 static int setup_data(struct net_device *dev)
149 { 185 {
150 struct fs_enet_private *fep = netdev_priv(dev); 186 struct fs_enet_private *fep = netdev_priv(dev);
151 const struct fs_platform_info *fpi = fep->fpi; 187 #ifndef CONFIG_PPC_CPM_NEW_BINDING
188 struct fs_platform_info *fpi = fep->fpi;
152 189
190 fpi->cp_command = (fpi->cp_page << 26) |
191 (fpi->cp_block << 21) |
192 (12 << 6);
193
153 fep->fcc.idx = fs_get_fcc_index(fpi->fs_no); 194 fep->fcc.idx = fs_get_fcc_index(fpi->fs_no);
154 if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */ 195 if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */
155 return -EINVAL; 196 return -EINVAL;
197 #endif
156 198
157 if (do_pd_setup(fep) != 0) 199 if (do_pd_setup(fep) != 0)
158 return -EINVAL; 200 return -EINVAL;
159 201
160 fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK; 202 fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
161 fep->ev_rx = FCC_RX_EVENT; 203 fep->ev_rx = FCC_RX_EVENT;
162 fep->ev_tx = FCC_TX_EVENT; 204 fep->ev_tx = FCC_TX_EVENT;
163 fep->ev_err = FCC_ERR_EVENT_MSK; 205 fep->ev_err = FCC_ERR_EVENT_MSK;
164 206
165 return 0; 207 return 0;
166 } 208 }
167 209
168 static int allocate_bd(struct net_device *dev) 210 static int allocate_bd(struct net_device *dev)
169 { 211 {
170 struct fs_enet_private *fep = netdev_priv(dev); 212 struct fs_enet_private *fep = netdev_priv(dev);
171 const struct fs_platform_info *fpi = fep->fpi; 213 const struct fs_platform_info *fpi = fep->fpi;
172 214
173 fep->ring_base = dma_alloc_coherent(fep->dev, 215 fep->ring_base = dma_alloc_coherent(fep->dev,
174 (fpi->tx_ring + fpi->rx_ring) * 216 (fpi->tx_ring + fpi->rx_ring) *
175 sizeof(cbd_t), &fep->ring_mem_addr, 217 sizeof(cbd_t), &fep->ring_mem_addr,
176 GFP_KERNEL); 218 GFP_KERNEL);
177 if (fep->ring_base == NULL) 219 if (fep->ring_base == NULL)
178 return -ENOMEM; 220 return -ENOMEM;
179 221
180 return 0; 222 return 0;
181 } 223 }
182 224
183 static void free_bd(struct net_device *dev) 225 static void free_bd(struct net_device *dev)
184 { 226 {
185 struct fs_enet_private *fep = netdev_priv(dev); 227 struct fs_enet_private *fep = netdev_priv(dev);
186 const struct fs_platform_info *fpi = fep->fpi; 228 const struct fs_platform_info *fpi = fep->fpi;
187 229
188 if (fep->ring_base) 230 if (fep->ring_base)
189 dma_free_coherent(fep->dev, 231 dma_free_coherent(fep->dev,
190 (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), 232 (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
191 fep->ring_base, fep->ring_mem_addr); 233 fep->ring_base, fep->ring_mem_addr);
192 } 234 }
193 235
194 static void cleanup_data(struct net_device *dev) 236 static void cleanup_data(struct net_device *dev)
195 { 237 {
196 /* nothing */ 238 /* nothing */
197 } 239 }
198 240
199 static void set_promiscuous_mode(struct net_device *dev) 241 static void set_promiscuous_mode(struct net_device *dev)
200 { 242 {
201 struct fs_enet_private *fep = netdev_priv(dev); 243 struct fs_enet_private *fep = netdev_priv(dev);
202 fcc_t *fccp = fep->fcc.fccp; 244 fcc_t *fccp = fep->fcc.fccp;
203 245
204 S32(fccp, fcc_fpsmr, FCC_PSMR_PRO); 246 S32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
205 } 247 }
206 248
207 static void set_multicast_start(struct net_device *dev) 249 static void set_multicast_start(struct net_device *dev)
208 { 250 {
209 struct fs_enet_private *fep = netdev_priv(dev); 251 struct fs_enet_private *fep = netdev_priv(dev);
210 fcc_enet_t *ep = fep->fcc.ep; 252 fcc_enet_t *ep = fep->fcc.ep;
211 253
212 W32(ep, fen_gaddrh, 0); 254 W32(ep, fen_gaddrh, 0);
213 W32(ep, fen_gaddrl, 0); 255 W32(ep, fen_gaddrl, 0);
214 } 256 }
215 257
216 static void set_multicast_one(struct net_device *dev, const u8 *mac) 258 static void set_multicast_one(struct net_device *dev, const u8 *mac)
217 { 259 {
218 struct fs_enet_private *fep = netdev_priv(dev); 260 struct fs_enet_private *fep = netdev_priv(dev);
219 fcc_enet_t *ep = fep->fcc.ep; 261 fcc_enet_t *ep = fep->fcc.ep;
220 u16 taddrh, taddrm, taddrl; 262 u16 taddrh, taddrm, taddrl;
221 263
222 taddrh = ((u16)mac[5] << 8) | mac[4]; 264 taddrh = ((u16)mac[5] << 8) | mac[4];
223 taddrm = ((u16)mac[3] << 8) | mac[2]; 265 taddrm = ((u16)mac[3] << 8) | mac[2];
224 taddrl = ((u16)mac[1] << 8) | mac[0]; 266 taddrl = ((u16)mac[1] << 8) | mac[0];
225 267
226 W16(ep, fen_taddrh, taddrh); 268 W16(ep, fen_taddrh, taddrh);
227 W16(ep, fen_taddrm, taddrm); 269 W16(ep, fen_taddrm, taddrm);
228 W16(ep, fen_taddrl, taddrl); 270 W16(ep, fen_taddrl, taddrl);
229 fcc_cr_cmd(fep, 0x0C, CPM_CR_SET_GADDR); 271 fcc_cr_cmd(fep, CPM_CR_SET_GADDR);
230 } 272 }
231 273
232 static void set_multicast_finish(struct net_device *dev) 274 static void set_multicast_finish(struct net_device *dev)
233 { 275 {
234 struct fs_enet_private *fep = netdev_priv(dev); 276 struct fs_enet_private *fep = netdev_priv(dev);
235 fcc_t *fccp = fep->fcc.fccp; 277 fcc_t *fccp = fep->fcc.fccp;
236 fcc_enet_t *ep = fep->fcc.ep; 278 fcc_enet_t *ep = fep->fcc.ep;
237 279
238 /* clear promiscuous always */ 280 /* clear promiscuous always */
239 C32(fccp, fcc_fpsmr, FCC_PSMR_PRO); 281 C32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
240 282
241 /* if all multi or too many multicasts; just enable all */ 283 /* if all multi or too many multicasts; just enable all */
242 if ((dev->flags & IFF_ALLMULTI) != 0 || 284 if ((dev->flags & IFF_ALLMULTI) != 0 ||
243 dev->mc_count > FCC_MAX_MULTICAST_ADDRS) { 285 dev->mc_count > FCC_MAX_MULTICAST_ADDRS) {
244 286
245 W32(ep, fen_gaddrh, 0xffffffff); 287 W32(ep, fen_gaddrh, 0xffffffff);
246 W32(ep, fen_gaddrl, 0xffffffff); 288 W32(ep, fen_gaddrl, 0xffffffff);
247 } 289 }
248 290
249 /* read back */ 291 /* read back */
250 fep->fcc.gaddrh = R32(ep, fen_gaddrh); 292 fep->fcc.gaddrh = R32(ep, fen_gaddrh);
251 fep->fcc.gaddrl = R32(ep, fen_gaddrl); 293 fep->fcc.gaddrl = R32(ep, fen_gaddrl);
252 } 294 }
253 295
254 static void set_multicast_list(struct net_device *dev) 296 static void set_multicast_list(struct net_device *dev)
255 { 297 {
256 struct dev_mc_list *pmc; 298 struct dev_mc_list *pmc;
257 299
258 if ((dev->flags & IFF_PROMISC) == 0) { 300 if ((dev->flags & IFF_PROMISC) == 0) {
259 set_multicast_start(dev); 301 set_multicast_start(dev);
260 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next) 302 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
261 set_multicast_one(dev, pmc->dmi_addr); 303 set_multicast_one(dev, pmc->dmi_addr);
262 set_multicast_finish(dev); 304 set_multicast_finish(dev);
263 } else 305 } else
264 set_promiscuous_mode(dev); 306 set_promiscuous_mode(dev);
265 } 307 }
266 308
267 static void restart(struct net_device *dev) 309 static void restart(struct net_device *dev)
268 { 310 {
269 struct fs_enet_private *fep = netdev_priv(dev); 311 struct fs_enet_private *fep = netdev_priv(dev);
270 const struct fs_platform_info *fpi = fep->fpi; 312 const struct fs_platform_info *fpi = fep->fpi;
271 fcc_t *fccp = fep->fcc.fccp; 313 fcc_t *fccp = fep->fcc.fccp;
272 fcc_c_t *fcccp = fep->fcc.fcccp; 314 fcc_c_t *fcccp = fep->fcc.fcccp;
273 fcc_enet_t *ep = fep->fcc.ep; 315 fcc_enet_t *ep = fep->fcc.ep;
274 dma_addr_t rx_bd_base_phys, tx_bd_base_phys; 316 dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
275 u16 paddrh, paddrm, paddrl; 317 u16 paddrh, paddrm, paddrl;
276 u16 mem_addr; 318 u16 mem_addr;
277 const unsigned char *mac; 319 const unsigned char *mac;
278 int i; 320 int i;
279 321
280 C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT); 322 C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
281 323
282 /* clear everything (slow & steady does it) */ 324 /* clear everything (slow & steady does it) */
283 for (i = 0; i < sizeof(*ep); i++) 325 for (i = 0; i < sizeof(*ep); i++)
284 out_8((char *)ep + i, 0); 326 out_8((u8 __iomem *)ep + i, 0);
285 327
286 /* get physical address */ 328 /* get physical address */
287 rx_bd_base_phys = fep->ring_mem_addr; 329 rx_bd_base_phys = fep->ring_mem_addr;
288 tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring; 330 tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
289 331
290 /* point to bds */ 332 /* point to bds */
291 W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys); 333 W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys);
292 W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys); 334 W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys);
293 335
294 /* Set maximum bytes per receive buffer. 336 /* Set maximum bytes per receive buffer.
295 * It must be a multiple of 32. 337 * It must be a multiple of 32.
296 */ 338 */
297 W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE); 339 W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE);
298 340
299 W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24); 341 W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
300 W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24); 342 W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
301 343
302 /* Allocate space in the reserved FCC area of DPRAM for the 344 /* Allocate space in the reserved FCC area of DPRAM for the
303 * internal buffers. No one uses this space (yet), so we 345 * internal buffers. No one uses this space (yet), so we
304 * can do this. Later, we will add resource management for 346 * can do this. Later, we will add resource management for
305 * this area. 347 * this area.
306 */ 348 */
307 349
308 mem_addr = (u32) fep->fcc.mem; /* de-fixup dpram offset */ 350 mem_addr = (u32) fep->fcc.mem; /* de-fixup dpram offset */
309 351
310 W16(ep, fen_genfcc.fcc_riptr, (mem_addr & 0xffff)); 352 W16(ep, fen_genfcc.fcc_riptr, (mem_addr & 0xffff));
311 W16(ep, fen_genfcc.fcc_tiptr, ((mem_addr + 32) & 0xffff)); 353 W16(ep, fen_genfcc.fcc_tiptr, ((mem_addr + 32) & 0xffff));
312 W16(ep, fen_padptr, mem_addr + 64); 354 W16(ep, fen_padptr, mem_addr + 64);
313 355
314 /* fill with special symbol... */ 356 /* fill with special symbol... */
315 memset(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32); 357 memset(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32);
316 358
317 W32(ep, fen_genfcc.fcc_rbptr, 0); 359 W32(ep, fen_genfcc.fcc_rbptr, 0);
318 W32(ep, fen_genfcc.fcc_tbptr, 0); 360 W32(ep, fen_genfcc.fcc_tbptr, 0);
319 W32(ep, fen_genfcc.fcc_rcrc, 0); 361 W32(ep, fen_genfcc.fcc_rcrc, 0);
320 W32(ep, fen_genfcc.fcc_tcrc, 0); 362 W32(ep, fen_genfcc.fcc_tcrc, 0);
321 W16(ep, fen_genfcc.fcc_res1, 0); 363 W16(ep, fen_genfcc.fcc_res1, 0);
322 W32(ep, fen_genfcc.fcc_res2, 0); 364 W32(ep, fen_genfcc.fcc_res2, 0);
323 365
324 /* no CAM */ 366 /* no CAM */
325 W32(ep, fen_camptr, 0); 367 W32(ep, fen_camptr, 0);
326 368
327 /* Set CRC preset and mask */ 369 /* Set CRC preset and mask */
328 W32(ep, fen_cmask, 0xdebb20e3); 370 W32(ep, fen_cmask, 0xdebb20e3);
329 W32(ep, fen_cpres, 0xffffffff); 371 W32(ep, fen_cpres, 0xffffffff);
330 372
331 W32(ep, fen_crcec, 0); /* CRC Error counter */ 373 W32(ep, fen_crcec, 0); /* CRC Error counter */
332 W32(ep, fen_alec, 0); /* alignment error counter */ 374 W32(ep, fen_alec, 0); /* alignment error counter */
333 W32(ep, fen_disfc, 0); /* discard frame counter */ 375 W32(ep, fen_disfc, 0); /* discard frame counter */
334 W16(ep, fen_retlim, 15); /* Retry limit threshold */ 376 W16(ep, fen_retlim, 15); /* Retry limit threshold */
335 W16(ep, fen_pper, 0); /* Normal persistence */ 377 W16(ep, fen_pper, 0); /* Normal persistence */
336 378
337 /* set group address */ 379 /* set group address */
338 W32(ep, fen_gaddrh, fep->fcc.gaddrh); 380 W32(ep, fen_gaddrh, fep->fcc.gaddrh);
339 W32(ep, fen_gaddrl, fep->fcc.gaddrh); 381 W32(ep, fen_gaddrl, fep->fcc.gaddrh);
340 382
341 /* Clear hash filter tables */ 383 /* Clear hash filter tables */
342 W32(ep, fen_iaddrh, 0); 384 W32(ep, fen_iaddrh, 0);
343 W32(ep, fen_iaddrl, 0); 385 W32(ep, fen_iaddrl, 0);
344 386
345 /* Clear the Out-of-sequence TxBD */ 387 /* Clear the Out-of-sequence TxBD */
346 W16(ep, fen_tfcstat, 0); 388 W16(ep, fen_tfcstat, 0);
347 W16(ep, fen_tfclen, 0); 389 W16(ep, fen_tfclen, 0);
348 W32(ep, fen_tfcptr, 0); 390 W32(ep, fen_tfcptr, 0);
349 391
350 W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */ 392 W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */
351 W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */ 393 W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
352 394
353 /* set address */ 395 /* set address */
354 mac = dev->dev_addr; 396 mac = dev->dev_addr;
355 paddrh = ((u16)mac[5] << 8) | mac[4]; 397 paddrh = ((u16)mac[5] << 8) | mac[4];
356 paddrm = ((u16)mac[3] << 8) | mac[2]; 398 paddrm = ((u16)mac[3] << 8) | mac[2];
357 paddrl = ((u16)mac[1] << 8) | mac[0]; 399 paddrl = ((u16)mac[1] << 8) | mac[0];
358 400
359 W16(ep, fen_paddrh, paddrh); 401 W16(ep, fen_paddrh, paddrh);
360 W16(ep, fen_paddrm, paddrm); 402 W16(ep, fen_paddrm, paddrm);
361 W16(ep, fen_paddrl, paddrl); 403 W16(ep, fen_paddrl, paddrl);
362 404
363 W16(ep, fen_taddrh, 0); 405 W16(ep, fen_taddrh, 0);
364 W16(ep, fen_taddrm, 0); 406 W16(ep, fen_taddrm, 0);
365 W16(ep, fen_taddrl, 0); 407 W16(ep, fen_taddrl, 0);
366 408
367 W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */ 409 W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */
368 W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */ 410 W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */
369 411
370 /* Clear stat counters, in case we ever enable RMON */ 412 /* Clear stat counters, in case we ever enable RMON */
371 W32(ep, fen_octc, 0); 413 W32(ep, fen_octc, 0);
372 W32(ep, fen_colc, 0); 414 W32(ep, fen_colc, 0);
373 W32(ep, fen_broc, 0); 415 W32(ep, fen_broc, 0);
374 W32(ep, fen_mulc, 0); 416 W32(ep, fen_mulc, 0);
375 W32(ep, fen_uspc, 0); 417 W32(ep, fen_uspc, 0);
376 W32(ep, fen_frgc, 0); 418 W32(ep, fen_frgc, 0);
377 W32(ep, fen_ospc, 0); 419 W32(ep, fen_ospc, 0);
378 W32(ep, fen_jbrc, 0); 420 W32(ep, fen_jbrc, 0);
379 W32(ep, fen_p64c, 0); 421 W32(ep, fen_p64c, 0);
380 W32(ep, fen_p65c, 0); 422 W32(ep, fen_p65c, 0);
381 W32(ep, fen_p128c, 0); 423 W32(ep, fen_p128c, 0);
382 W32(ep, fen_p256c, 0); 424 W32(ep, fen_p256c, 0);
383 W32(ep, fen_p512c, 0); 425 W32(ep, fen_p512c, 0);
384 W32(ep, fen_p1024c, 0); 426 W32(ep, fen_p1024c, 0);
385 427
386 W16(ep, fen_rfthr, 0); /* Suggested by manual */ 428 W16(ep, fen_rfthr, 0); /* Suggested by manual */
387 W16(ep, fen_rfcnt, 0); 429 W16(ep, fen_rfcnt, 0);
388 W16(ep, fen_cftype, 0); 430 W16(ep, fen_cftype, 0);
389 431
390 fs_init_bds(dev); 432 fs_init_bds(dev);
391 433
392 /* adjust to speed (for RMII mode) */ 434 /* adjust to speed (for RMII mode) */
393 if (fpi->use_rmii) { 435 if (fpi->use_rmii) {
394 if (fep->phydev->speed == 100) 436 if (fep->phydev->speed == 100)
395 C8(fcccp, fcc_gfemr, 0x20); 437 C8(fcccp, fcc_gfemr, 0x20);
396 else 438 else
397 S8(fcccp, fcc_gfemr, 0x20); 439 S8(fcccp, fcc_gfemr, 0x20);
398 } 440 }
399 441
400 fcc_cr_cmd(fep, 0x0c, CPM_CR_INIT_TRX); 442 fcc_cr_cmd(fep, CPM_CR_INIT_TRX);
401 443
402 /* clear events */ 444 /* clear events */
403 W16(fccp, fcc_fcce, 0xffff); 445 W16(fccp, fcc_fcce, 0xffff);
404 446
405 /* Enable interrupts we wish to service */ 447 /* Enable interrupts we wish to service */
406 W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB); 448 W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);
407 449
408 /* Set GFMR to enable Ethernet operating mode */ 450 /* Set GFMR to enable Ethernet operating mode */
409 W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET); 451 W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);
410 452
411 /* set sync/delimiters */ 453 /* set sync/delimiters */
412 W16(fccp, fcc_fdsr, 0xd555); 454 W16(fccp, fcc_fdsr, 0xd555);
413 455
414 W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC); 456 W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC);
415 457
416 if (fpi->use_rmii) 458 if (fpi->use_rmii)
417 S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); 459 S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
418 460
419 /* adjust to duplex mode */ 461 /* adjust to duplex mode */
420 if (fep->phydev->duplex) 462 if (fep->phydev->duplex)
421 S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); 463 S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
422 else 464 else
423 C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); 465 C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
424 466
425 S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT); 467 S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
426 } 468 }
427 469
428 static void stop(struct net_device *dev) 470 static void stop(struct net_device *dev)
429 { 471 {
430 struct fs_enet_private *fep = netdev_priv(dev); 472 struct fs_enet_private *fep = netdev_priv(dev);
431 fcc_t *fccp = fep->fcc.fccp; 473 fcc_t *fccp = fep->fcc.fccp;
432 474
433 /* stop ethernet */ 475 /* stop ethernet */
434 C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT); 476 C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
435 477
436 /* clear events */ 478 /* clear events */
437 W16(fccp, fcc_fcce, 0xffff); 479 W16(fccp, fcc_fcce, 0xffff);
438 480
439 /* clear interrupt mask */ 481 /* clear interrupt mask */
440 W16(fccp, fcc_fccm, 0); 482 W16(fccp, fcc_fccm, 0);
441 483
442 fs_cleanup_bds(dev); 484 fs_cleanup_bds(dev);
443 } 485 }
444 486
445 static void pre_request_irq(struct net_device *dev, int irq) 487 static void pre_request_irq(struct net_device *dev, int irq)
446 { 488 {
447 /* nothing */ 489 /* nothing */
448 } 490 }
449 491
450 static void post_free_irq(struct net_device *dev, int irq) 492 static void post_free_irq(struct net_device *dev, int irq)
451 { 493 {
452 /* nothing */ 494 /* nothing */
453 } 495 }
454 496
455 static void napi_clear_rx_event(struct net_device *dev) 497 static void napi_clear_rx_event(struct net_device *dev)
456 { 498 {
457 struct fs_enet_private *fep = netdev_priv(dev); 499 struct fs_enet_private *fep = netdev_priv(dev);
458 fcc_t *fccp = fep->fcc.fccp; 500 fcc_t *fccp = fep->fcc.fccp;
459 501
460 W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK); 502 W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
461 } 503 }
462 504
463 static void napi_enable_rx(struct net_device *dev) 505 static void napi_enable_rx(struct net_device *dev)
464 { 506 {
465 struct fs_enet_private *fep = netdev_priv(dev); 507 struct fs_enet_private *fep = netdev_priv(dev);
466 fcc_t *fccp = fep->fcc.fccp; 508 fcc_t *fccp = fep->fcc.fccp;
467 509
468 S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK); 510 S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
469 } 511 }
470 512
471 static void napi_disable_rx(struct net_device *dev) 513 static void napi_disable_rx(struct net_device *dev)
472 { 514 {
473 struct fs_enet_private *fep = netdev_priv(dev); 515 struct fs_enet_private *fep = netdev_priv(dev);
474 fcc_t *fccp = fep->fcc.fccp; 516 fcc_t *fccp = fep->fcc.fccp;
475 517
476 C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK); 518 C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
477 } 519 }
478 520
479 static void rx_bd_done(struct net_device *dev) 521 static void rx_bd_done(struct net_device *dev)
480 { 522 {
481 /* nothing */ 523 /* nothing */
482 } 524 }
483 525
484 static void tx_kickstart(struct net_device *dev) 526 static void tx_kickstart(struct net_device *dev)
485 { 527 {
486 struct fs_enet_private *fep = netdev_priv(dev); 528 struct fs_enet_private *fep = netdev_priv(dev);
487 fcc_t *fccp = fep->fcc.fccp; 529 fcc_t *fccp = fep->fcc.fccp;
488 530
489 S16(fccp, fcc_ftodr, 0x8000); 531 S16(fccp, fcc_ftodr, 0x8000);
490 } 532 }
491 533
492 static u32 get_int_events(struct net_device *dev) 534 static u32 get_int_events(struct net_device *dev)
493 { 535 {
494 struct fs_enet_private *fep = netdev_priv(dev); 536 struct fs_enet_private *fep = netdev_priv(dev);
495 fcc_t *fccp = fep->fcc.fccp; 537 fcc_t *fccp = fep->fcc.fccp;
496 538
497 return (u32)R16(fccp, fcc_fcce); 539 return (u32)R16(fccp, fcc_fcce);
498 } 540 }
499 541
500 static void clear_int_events(struct net_device *dev, u32 int_events) 542 static void clear_int_events(struct net_device *dev, u32 int_events)
501 { 543 {
502 struct fs_enet_private *fep = netdev_priv(dev); 544 struct fs_enet_private *fep = netdev_priv(dev);
503 fcc_t *fccp = fep->fcc.fccp; 545 fcc_t *fccp = fep->fcc.fccp;
504 546
505 W16(fccp, fcc_fcce, int_events & 0xffff); 547 W16(fccp, fcc_fcce, int_events & 0xffff);
506 } 548 }
507 549
508 static void ev_error(struct net_device *dev, u32 int_events) 550 static void ev_error(struct net_device *dev, u32 int_events)
509 { 551 {
510 printk(KERN_WARNING DRV_MODULE_NAME 552 printk(KERN_WARNING DRV_MODULE_NAME
511 ": %s FS_ENET ERROR(s) 0x%x\n", dev->name, int_events); 553 ": %s FS_ENET ERROR(s) 0x%x\n", dev->name, int_events);
512 } 554 }
513 555
514 int get_regs(struct net_device *dev, void *p, int *sizep) 556 int get_regs(struct net_device *dev, void *p, int *sizep)
515 { 557 {
516 struct fs_enet_private *fep = netdev_priv(dev); 558 struct fs_enet_private *fep = netdev_priv(dev);
517 559
518 if (*sizep < sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t)) 560 if (*sizep < sizeof(fcc_t) + sizeof(fcc_enet_t) + 1)
519 return -EINVAL; 561 return -EINVAL;
520 562
521 memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t)); 563 memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t));
522 p = (char *)p + sizeof(fcc_t); 564 p = (char *)p + sizeof(fcc_t);
523 565
524 memcpy_fromio(p, fep->fcc.fcccp, sizeof(fcc_c_t));
525 p = (char *)p + sizeof(fcc_c_t);
526
527 memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t)); 566 memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t));
567 p = (char *)p + sizeof(fcc_enet_t);
528 568
569 memcpy_fromio(p, fep->fcc.fcccp, 1);
529 return 0; 570 return 0;
530 } 571 }
531 572
532 int get_regs_len(struct net_device *dev) 573 int get_regs_len(struct net_device *dev)
533 { 574 {
534 return sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t); 575 return sizeof(fcc_t) + sizeof(fcc_enet_t) + 1;
535 } 576 }
536 577
537 /* Some transmit errors cause the transmitter to shut 578 /* Some transmit errors cause the transmitter to shut
538 * down. We now issue a restart transmit. Since the 579 * down. We now issue a restart transmit. Since the
539 * errors close the BD and update the pointers, the restart 580 * errors close the BD and update the pointers, the restart
540 * _should_ pick up without having to reset any of our 581 * _should_ pick up without having to reset any of our
541 * pointers either. Also, To workaround 8260 device erratum 582 * pointers either. Also, To workaround 8260 device erratum
542 * CPM37, we must disable and then re-enable the transmitter 583 * CPM37, we must disable and then re-enable the transmitter
543 * following a Late Collision, Underrun, or Retry Limit error. 584 * following a Late Collision, Underrun, or Retry Limit error.
544 */ 585 */
545 void tx_restart(struct net_device *dev) 586 void tx_restart(struct net_device *dev)
546 { 587 {
547 struct fs_enet_private *fep = netdev_priv(dev); 588 struct fs_enet_private *fep = netdev_priv(dev);
548 fcc_t *fccp = fep->fcc.fccp; 589 fcc_t *fccp = fep->fcc.fccp;
549 590
550 C32(fccp, fcc_gfmr, FCC_GFMR_ENT); 591 C32(fccp, fcc_gfmr, FCC_GFMR_ENT);
551 udelay(10); 592 udelay(10);
552 S32(fccp, fcc_gfmr, FCC_GFMR_ENT); 593 S32(fccp, fcc_gfmr, FCC_GFMR_ENT);
553 594
554 fcc_cr_cmd(fep, 0x0C, CPM_CR_RESTART_TX); 595 fcc_cr_cmd(fep, CPM_CR_RESTART_TX);
555 } 596 }
556 597
557 /*************************************************************************/ 598 /*************************************************************************/
558 599
559 const struct fs_ops fs_fcc_ops = { 600 const struct fs_ops fs_fcc_ops = {
560 .setup_data = setup_data, 601 .setup_data = setup_data,
561 .cleanup_data = cleanup_data, 602 .cleanup_data = cleanup_data,
562 .set_multicast_list = set_multicast_list, 603 .set_multicast_list = set_multicast_list,
563 .restart = restart, 604 .restart = restart,
564 .stop = stop, 605 .stop = stop,
565 .pre_request_irq = pre_request_irq, 606 .pre_request_irq = pre_request_irq,
566 .post_free_irq = post_free_irq, 607 .post_free_irq = post_free_irq,
567 .napi_clear_rx_event = napi_clear_rx_event, 608 .napi_clear_rx_event = napi_clear_rx_event,
568 .napi_enable_rx = napi_enable_rx, 609 .napi_enable_rx = napi_enable_rx,
569 .napi_disable_rx = napi_disable_rx, 610 .napi_disable_rx = napi_disable_rx,
570 .rx_bd_done = rx_bd_done, 611 .rx_bd_done = rx_bd_done,
drivers/net/fs_enet/mac-fec.c
1 /* 1 /*
2 * Freescale Ethernet controllers 2 * Freescale Ethernet controllers
3 * 3 *
4 * Copyright (c) 2005 Intracom S.A. 4 * Copyright (c) 2005 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr> 5 * by Pantelis Antoniou <panto@intracom.gr>
6 * 6 *
7 * 2005 (c) MontaVista Software, Inc. 7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com> 8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 * 9 *
10 * This file is licensed under the terms of the GNU General Public License 10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any 11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied. 12 * kind, whether express or implied.
13 */ 13 */
14 14
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/types.h> 17 #include <linux/types.h>
18 #include <linux/string.h> 18 #include <linux/string.h>
19 #include <linux/ptrace.h> 19 #include <linux/ptrace.h>
20 #include <linux/errno.h> 20 #include <linux/errno.h>
21 #include <linux/ioport.h> 21 #include <linux/ioport.h>
22 #include <linux/slab.h> 22 #include <linux/slab.h>
23 #include <linux/interrupt.h> 23 #include <linux/interrupt.h>
24 #include <linux/init.h> 24 #include <linux/init.h>
25 #include <linux/delay.h> 25 #include <linux/delay.h>
26 #include <linux/netdevice.h> 26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h> 27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h> 28 #include <linux/skbuff.h>
29 #include <linux/spinlock.h> 29 #include <linux/spinlock.h>
30 #include <linux/mii.h> 30 #include <linux/mii.h>
31 #include <linux/ethtool.h> 31 #include <linux/ethtool.h>
32 #include <linux/bitops.h> 32 #include <linux/bitops.h>
33 #include <linux/fs.h> 33 #include <linux/fs.h>
34 #include <linux/platform_device.h> 34 #include <linux/platform_device.h>
35 35
36 #include <asm/irq.h> 36 #include <asm/irq.h>
37 #include <asm/uaccess.h> 37 #include <asm/uaccess.h>
38 38
39 #ifdef CONFIG_8xx 39 #ifdef CONFIG_8xx
40 #include <asm/8xx_immap.h> 40 #include <asm/8xx_immap.h>
41 #include <asm/pgtable.h> 41 #include <asm/pgtable.h>
42 #include <asm/mpc8xx.h> 42 #include <asm/mpc8xx.h>
43 #include <asm/commproc.h> 43 #include <asm/commproc.h>
44 #endif 44 #endif
45 45
46 #ifdef CONFIG_PPC_CPM_NEW_BINDING
47 #include <asm/of_device.h>
48 #endif
49
46 #include "fs_enet.h" 50 #include "fs_enet.h"
47 #include "fec.h" 51 #include "fec.h"
48 52
49 /*************************************************/ 53 /*************************************************/
50 54
51 #if defined(CONFIG_CPM1) 55 #if defined(CONFIG_CPM1)
52 /* for a CPM1 __raw_xxx's are sufficient */ 56 /* for a CPM1 __raw_xxx's are sufficient */
53 #define __fs_out32(addr, x) __raw_writel(x, addr) 57 #define __fs_out32(addr, x) __raw_writel(x, addr)
54 #define __fs_out16(addr, x) __raw_writew(x, addr) 58 #define __fs_out16(addr, x) __raw_writew(x, addr)
55 #define __fs_in32(addr) __raw_readl(addr) 59 #define __fs_in32(addr) __raw_readl(addr)
56 #define __fs_in16(addr) __raw_readw(addr) 60 #define __fs_in16(addr) __raw_readw(addr)
57 #else 61 #else
58 /* for others play it safe */ 62 /* for others play it safe */
59 #define __fs_out32(addr, x) out_be32(addr, x) 63 #define __fs_out32(addr, x) out_be32(addr, x)
60 #define __fs_out16(addr, x) out_be16(addr, x) 64 #define __fs_out16(addr, x) out_be16(addr, x)
61 #define __fs_in32(addr) in_be32(addr) 65 #define __fs_in32(addr) in_be32(addr)
62 #define __fs_in16(addr) in_be16(addr) 66 #define __fs_in16(addr) in_be16(addr)
63 #endif 67 #endif
64 68
65 /* write */ 69 /* write */
66 #define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v)) 70 #define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v))
67 71
68 /* read */ 72 /* read */
69 #define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg) 73 #define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg)
70 74
71 /* set bits */ 75 /* set bits */
72 #define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v)) 76 #define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
73 77
74 /* clear bits */ 78 /* clear bits */
75 #define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) 79 #define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
76 80
77 /* 81 /*
78 * Delay to wait for FEC reset command to complete (in us) 82 * Delay to wait for FEC reset command to complete (in us)
79 */ 83 */
80 #define FEC_RESET_DELAY 50 84 #define FEC_RESET_DELAY 50
81 85
82 static int whack_reset(fec_t * fecp) 86 static int whack_reset(fec_t * fecp)
83 { 87 {
84 int i; 88 int i;
85 89
86 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET); 90 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
87 for (i = 0; i < FEC_RESET_DELAY; i++) { 91 for (i = 0; i < FEC_RESET_DELAY; i++) {
88 if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0) 92 if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0)
89 return 0; /* OK */ 93 return 0; /* OK */
90 udelay(1); 94 udelay(1);
91 } 95 }
92 96
93 return -1; 97 return -1;
94 } 98 }
95 99
96 static int do_pd_setup(struct fs_enet_private *fep) 100 static int do_pd_setup(struct fs_enet_private *fep)
97 { 101 {
102 #ifdef CONFIG_PPC_CPM_NEW_BINDING
103 struct of_device *ofdev = to_of_device(fep->dev);
104
105 fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL);
106 if (fep->interrupt == NO_IRQ)
107 return -EINVAL;
108
109 fep->fec.fecp = of_iomap(ofdev->node, 0);
110 if (!fep->fcc.fccp)
111 return -EINVAL;
112
113 return 0;
114 #else
98 struct platform_device *pdev = to_platform_device(fep->dev); 115 struct platform_device *pdev = to_platform_device(fep->dev);
99 struct resource *r; 116 struct resource *r;
100 117
101 /* Fill out IRQ field */ 118 /* Fill out IRQ field */
102 fep->interrupt = platform_get_irq_byname(pdev,"interrupt"); 119 fep->interrupt = platform_get_irq_byname(pdev,"interrupt");
103 if (fep->interrupt < 0) 120 if (fep->interrupt < 0)
104 return -EINVAL; 121 return -EINVAL;
105 122
106 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 123 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
107 fep->fec.fecp = ioremap(r->start, r->end - r->start + 1); 124 fep->fec.fecp = ioremap(r->start, r->end - r->start + 1);
108 125
109 if(fep->fec.fecp == NULL) 126 if(fep->fec.fecp == NULL)
110 return -EINVAL; 127 return -EINVAL;
111 128
112 return 0; 129 return 0;
113 130 #endif
114 } 131 }
115 132
116 #define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) 133 #define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
117 #define FEC_RX_EVENT (FEC_ENET_RXF) 134 #define FEC_RX_EVENT (FEC_ENET_RXF)
118 #define FEC_TX_EVENT (FEC_ENET_TXF) 135 #define FEC_TX_EVENT (FEC_ENET_TXF)
119 #define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ 136 #define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
120 FEC_ENET_BABT | FEC_ENET_EBERR) 137 FEC_ENET_BABT | FEC_ENET_EBERR)
121 138
122 static int setup_data(struct net_device *dev) 139 static int setup_data(struct net_device *dev)
123 { 140 {
124 struct fs_enet_private *fep = netdev_priv(dev); 141 struct fs_enet_private *fep = netdev_priv(dev);
125 142
126 if (do_pd_setup(fep) != 0) 143 if (do_pd_setup(fep) != 0)
127 return -EINVAL; 144 return -EINVAL;
128 145
129 fep->fec.hthi = 0; 146 fep->fec.hthi = 0;
130 fep->fec.htlo = 0; 147 fep->fec.htlo = 0;
131 148
132 fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK; 149 fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
133 fep->ev_rx = FEC_RX_EVENT; 150 fep->ev_rx = FEC_RX_EVENT;
134 fep->ev_tx = FEC_TX_EVENT; 151 fep->ev_tx = FEC_TX_EVENT;
135 fep->ev_err = FEC_ERR_EVENT_MSK; 152 fep->ev_err = FEC_ERR_EVENT_MSK;
136 153
137 return 0; 154 return 0;
138 } 155 }
139 156
140 static int allocate_bd(struct net_device *dev) 157 static int allocate_bd(struct net_device *dev)
141 { 158 {
142 struct fs_enet_private *fep = netdev_priv(dev); 159 struct fs_enet_private *fep = netdev_priv(dev);
143 const struct fs_platform_info *fpi = fep->fpi; 160 const struct fs_platform_info *fpi = fep->fpi;
144 161
145 fep->ring_base = dma_alloc_coherent(fep->dev, 162 fep->ring_base = dma_alloc_coherent(fep->dev,
146 (fpi->tx_ring + fpi->rx_ring) * 163 (fpi->tx_ring + fpi->rx_ring) *
147 sizeof(cbd_t), &fep->ring_mem_addr, 164 sizeof(cbd_t), &fep->ring_mem_addr,
148 GFP_KERNEL); 165 GFP_KERNEL);
149 if (fep->ring_base == NULL) 166 if (fep->ring_base == NULL)
150 return -ENOMEM; 167 return -ENOMEM;
151 168
152 return 0; 169 return 0;
153 } 170 }
154 171
155 static void free_bd(struct net_device *dev) 172 static void free_bd(struct net_device *dev)
156 { 173 {
157 struct fs_enet_private *fep = netdev_priv(dev); 174 struct fs_enet_private *fep = netdev_priv(dev);
158 const struct fs_platform_info *fpi = fep->fpi; 175 const struct fs_platform_info *fpi = fep->fpi;
159 176
160 if(fep->ring_base) 177 if(fep->ring_base)
161 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) 178 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring)
162 * sizeof(cbd_t), 179 * sizeof(cbd_t),
163 fep->ring_base, 180 fep->ring_base,
164 fep->ring_mem_addr); 181 fep->ring_mem_addr);
165 } 182 }
166 183
167 static void cleanup_data(struct net_device *dev) 184 static void cleanup_data(struct net_device *dev)
168 { 185 {
169 /* nothing */ 186 /* nothing */
170 } 187 }
171 188
172 static void set_promiscuous_mode(struct net_device *dev) 189 static void set_promiscuous_mode(struct net_device *dev)
173 { 190 {
174 struct fs_enet_private *fep = netdev_priv(dev); 191 struct fs_enet_private *fep = netdev_priv(dev);
175 fec_t *fecp = fep->fec.fecp; 192 fec_t *fecp = fep->fec.fecp;
176 193
177 FS(fecp, r_cntrl, FEC_RCNTRL_PROM); 194 FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
178 } 195 }
179 196
180 static void set_multicast_start(struct net_device *dev) 197 static void set_multicast_start(struct net_device *dev)
181 { 198 {
182 struct fs_enet_private *fep = netdev_priv(dev); 199 struct fs_enet_private *fep = netdev_priv(dev);
183 200
184 fep->fec.hthi = 0; 201 fep->fec.hthi = 0;
185 fep->fec.htlo = 0; 202 fep->fec.htlo = 0;
186 } 203 }
187 204
188 static void set_multicast_one(struct net_device *dev, const u8 *mac) 205 static void set_multicast_one(struct net_device *dev, const u8 *mac)
189 { 206 {
190 struct fs_enet_private *fep = netdev_priv(dev); 207 struct fs_enet_private *fep = netdev_priv(dev);
191 int temp, hash_index, i, j; 208 int temp, hash_index, i, j;
192 u32 crc, csrVal; 209 u32 crc, csrVal;
193 u8 byte, msb; 210 u8 byte, msb;
194 211
195 crc = 0xffffffff; 212 crc = 0xffffffff;
196 for (i = 0; i < 6; i++) { 213 for (i = 0; i < 6; i++) {
197 byte = mac[i]; 214 byte = mac[i];
198 for (j = 0; j < 8; j++) { 215 for (j = 0; j < 8; j++) {
199 msb = crc >> 31; 216 msb = crc >> 31;
200 crc <<= 1; 217 crc <<= 1;
201 if (msb ^ (byte & 0x1)) 218 if (msb ^ (byte & 0x1))
202 crc ^= FEC_CRC_POLY; 219 crc ^= FEC_CRC_POLY;
203 byte >>= 1; 220 byte >>= 1;
204 } 221 }
205 } 222 }
206 223
207 temp = (crc & 0x3f) >> 1; 224 temp = (crc & 0x3f) >> 1;
208 hash_index = ((temp & 0x01) << 4) | 225 hash_index = ((temp & 0x01) << 4) |
209 ((temp & 0x02) << 2) | 226 ((temp & 0x02) << 2) |
210 ((temp & 0x04)) | 227 ((temp & 0x04)) |
211 ((temp & 0x08) >> 2) | 228 ((temp & 0x08) >> 2) |
212 ((temp & 0x10) >> 4); 229 ((temp & 0x10) >> 4);
213 csrVal = 1 << hash_index; 230 csrVal = 1 << hash_index;
214 if (crc & 1) 231 if (crc & 1)
215 fep->fec.hthi |= csrVal; 232 fep->fec.hthi |= csrVal;
216 else 233 else
217 fep->fec.htlo |= csrVal; 234 fep->fec.htlo |= csrVal;
218 } 235 }
219 236
220 static void set_multicast_finish(struct net_device *dev) 237 static void set_multicast_finish(struct net_device *dev)
221 { 238 {
222 struct fs_enet_private *fep = netdev_priv(dev); 239 struct fs_enet_private *fep = netdev_priv(dev);
223 fec_t *fecp = fep->fec.fecp; 240 fec_t *fecp = fep->fec.fecp;
224 241
225 /* if all multi or too many multicasts; just enable all */ 242 /* if all multi or too many multicasts; just enable all */
226 if ((dev->flags & IFF_ALLMULTI) != 0 || 243 if ((dev->flags & IFF_ALLMULTI) != 0 ||
227 dev->mc_count > FEC_MAX_MULTICAST_ADDRS) { 244 dev->mc_count > FEC_MAX_MULTICAST_ADDRS) {
228 fep->fec.hthi = 0xffffffffU; 245 fep->fec.hthi = 0xffffffffU;
229 fep->fec.htlo = 0xffffffffU; 246 fep->fec.htlo = 0xffffffffU;
230 } 247 }
231 248
232 FC(fecp, r_cntrl, FEC_RCNTRL_PROM); 249 FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
233 FW(fecp, hash_table_high, fep->fec.hthi); 250 FW(fecp, hash_table_high, fep->fec.hthi);
234 FW(fecp, hash_table_low, fep->fec.htlo); 251 FW(fecp, hash_table_low, fep->fec.htlo);
235 } 252 }
236 253
237 static void set_multicast_list(struct net_device *dev) 254 static void set_multicast_list(struct net_device *dev)
238 { 255 {
239 struct dev_mc_list *pmc; 256 struct dev_mc_list *pmc;
240 257
241 if ((dev->flags & IFF_PROMISC) == 0) { 258 if ((dev->flags & IFF_PROMISC) == 0) {
242 set_multicast_start(dev); 259 set_multicast_start(dev);
243 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next) 260 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
244 set_multicast_one(dev, pmc->dmi_addr); 261 set_multicast_one(dev, pmc->dmi_addr);
245 set_multicast_finish(dev); 262 set_multicast_finish(dev);
246 } else 263 } else
247 set_promiscuous_mode(dev); 264 set_promiscuous_mode(dev);
248 } 265 }
249 266
250 static void restart(struct net_device *dev) 267 static void restart(struct net_device *dev)
251 { 268 {
252 #ifdef CONFIG_DUET 269 #ifdef CONFIG_DUET
253 immap_t *immap = fs_enet_immap; 270 immap_t *immap = fs_enet_immap;
254 u32 cptr; 271 u32 cptr;
255 #endif 272 #endif
256 struct fs_enet_private *fep = netdev_priv(dev); 273 struct fs_enet_private *fep = netdev_priv(dev);
257 fec_t *fecp = fep->fec.fecp; 274 fec_t *fecp = fep->fec.fecp;
258 const struct fs_platform_info *fpi = fep->fpi; 275 const struct fs_platform_info *fpi = fep->fpi;
259 dma_addr_t rx_bd_base_phys, tx_bd_base_phys; 276 dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
260 int r; 277 int r;
261 u32 addrhi, addrlo; 278 u32 addrhi, addrlo;
262 279
263 struct mii_bus* mii = fep->phydev->bus; 280 struct mii_bus* mii = fep->phydev->bus;
264 struct fec_info* fec_inf = mii->priv; 281 struct fec_info* fec_inf = mii->priv;
265 282
266 r = whack_reset(fep->fec.fecp); 283 r = whack_reset(fep->fec.fecp);
267 if (r != 0) 284 if (r != 0)
268 printk(KERN_ERR DRV_MODULE_NAME 285 printk(KERN_ERR DRV_MODULE_NAME
269 ": %s FEC Reset FAILED!\n", dev->name); 286 ": %s FEC Reset FAILED!\n", dev->name);
270 /* 287 /*
271 * Set station address. 288 * Set station address.
272 */ 289 */
273 addrhi = ((u32) dev->dev_addr[0] << 24) | 290 addrhi = ((u32) dev->dev_addr[0] << 24) |
274 ((u32) dev->dev_addr[1] << 16) | 291 ((u32) dev->dev_addr[1] << 16) |
275 ((u32) dev->dev_addr[2] << 8) | 292 ((u32) dev->dev_addr[2] << 8) |
276 (u32) dev->dev_addr[3]; 293 (u32) dev->dev_addr[3];
277 addrlo = ((u32) dev->dev_addr[4] << 24) | 294 addrlo = ((u32) dev->dev_addr[4] << 24) |
278 ((u32) dev->dev_addr[5] << 16); 295 ((u32) dev->dev_addr[5] << 16);
279 FW(fecp, addr_low, addrhi); 296 FW(fecp, addr_low, addrhi);
280 FW(fecp, addr_high, addrlo); 297 FW(fecp, addr_high, addrlo);
281 298
282 /* 299 /*
283 * Reset all multicast. 300 * Reset all multicast.
284 */ 301 */
285 FW(fecp, hash_table_high, fep->fec.hthi); 302 FW(fecp, hash_table_high, fep->fec.hthi);
286 FW(fecp, hash_table_low, fep->fec.htlo); 303 FW(fecp, hash_table_low, fep->fec.htlo);
287 304
288 /* 305 /*
289 * Set maximum receive buffer size. 306 * Set maximum receive buffer size.
290 */ 307 */
291 FW(fecp, r_buff_size, PKT_MAXBLR_SIZE); 308 FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
292 FW(fecp, r_hash, PKT_MAXBUF_SIZE); 309 FW(fecp, r_hash, PKT_MAXBUF_SIZE);
293 310
294 /* get physical address */ 311 /* get physical address */
295 rx_bd_base_phys = fep->ring_mem_addr; 312 rx_bd_base_phys = fep->ring_mem_addr;
296 tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring; 313 tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
297 314
298 /* 315 /*
299 * Set receive and transmit descriptor base. 316 * Set receive and transmit descriptor base.
300 */ 317 */
301 FW(fecp, r_des_start, rx_bd_base_phys); 318 FW(fecp, r_des_start, rx_bd_base_phys);
302 FW(fecp, x_des_start, tx_bd_base_phys); 319 FW(fecp, x_des_start, tx_bd_base_phys);
303 320
304 fs_init_bds(dev); 321 fs_init_bds(dev);
305 322
306 /* 323 /*
307 * Enable big endian and don't care about SDMA FC. 324 * Enable big endian and don't care about SDMA FC.
308 */ 325 */
309 FW(fecp, fun_code, 0x78000000); 326 FW(fecp, fun_code, 0x78000000);
310 327
311 /* 328 /*
312 * Set MII speed. 329 * Set MII speed.
313 */ 330 */
314 FW(fecp, mii_speed, fec_inf->mii_speed); 331 FW(fecp, mii_speed, fec_inf->mii_speed);
315 332
316 /* 333 /*
317 * Clear any outstanding interrupt. 334 * Clear any outstanding interrupt.
318 */ 335 */
319 FW(fecp, ievent, 0xffc0); 336 FW(fecp, ievent, 0xffc0);
320 #ifndef CONFIG_PPC_MERGE 337 #ifndef CONFIG_PPC_MERGE
321 FW(fecp, ivec, (fep->interrupt / 2) << 29); 338 FW(fecp, ivec, (fep->interrupt / 2) << 29);
322 #else 339 #else
323 FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29); 340 FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29);
324 #endif 341 #endif
325 342
326 /* 343 /*
327 * adjust to speed (only for DUET & RMII) 344 * adjust to speed (only for DUET & RMII)
328 */ 345 */
329 #ifdef CONFIG_DUET 346 #ifdef CONFIG_DUET
330 if (fpi->use_rmii) { 347 if (fpi->use_rmii) {
331 cptr = in_be32(&immap->im_cpm.cp_cptr); 348 cptr = in_be32(&immap->im_cpm.cp_cptr);
332 switch (fs_get_fec_index(fpi->fs_no)) { 349 switch (fs_get_fec_index(fpi->fs_no)) {
333 case 0: 350 case 0:
334 cptr |= 0x100; 351 cptr |= 0x100;
335 if (fep->speed == 10) 352 if (fep->speed == 10)
336 cptr |= 0x0000010; 353 cptr |= 0x0000010;
337 else if (fep->speed == 100) 354 else if (fep->speed == 100)
338 cptr &= ~0x0000010; 355 cptr &= ~0x0000010;
339 break; 356 break;
340 case 1: 357 case 1:
341 cptr |= 0x80; 358 cptr |= 0x80;
342 if (fep->speed == 10) 359 if (fep->speed == 10)
343 cptr |= 0x0000008; 360 cptr |= 0x0000008;
344 else if (fep->speed == 100) 361 else if (fep->speed == 100)
345 cptr &= ~0x0000008; 362 cptr &= ~0x0000008;
346 break; 363 break;
347 default: 364 default:
348 BUG(); /* should never happen */ 365 BUG(); /* should never happen */
349 break; 366 break;
350 } 367 }
351 out_be32(&immap->im_cpm.cp_cptr, cptr); 368 out_be32(&immap->im_cpm.cp_cptr, cptr);
352 } 369 }
353 #endif 370 #endif
354 371
355 372
356 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ 373 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
357 /* 374 /*
358 * adjust to duplex mode 375 * adjust to duplex mode
359 */ 376 */
360 if (fep->phydev->duplex) { 377 if (fep->phydev->duplex) {
361 FC(fecp, r_cntrl, FEC_RCNTRL_DRT); 378 FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
362 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ 379 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
363 } else { 380 } else {
364 FS(fecp, r_cntrl, FEC_RCNTRL_DRT); 381 FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
365 FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */ 382 FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
366 } 383 }
367 384
368 /* 385 /*
369 * Enable interrupts we wish to service. 386 * Enable interrupts we wish to service.
370 */ 387 */
371 FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB | 388 FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
372 FEC_ENET_RXF | FEC_ENET_RXB); 389 FEC_ENET_RXF | FEC_ENET_RXB);
373 390
374 /* 391 /*
375 * And last, enable the transmit and receive processing. 392 * And last, enable the transmit and receive processing.
376 */ 393 */
377 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); 394 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
378 FW(fecp, r_des_active, 0x01000000); 395 FW(fecp, r_des_active, 0x01000000);
379 } 396 }
380 397
381 static void stop(struct net_device *dev) 398 static void stop(struct net_device *dev)
382 { 399 {
383 struct fs_enet_private *fep = netdev_priv(dev); 400 struct fs_enet_private *fep = netdev_priv(dev);
384 const struct fs_platform_info *fpi = fep->fpi; 401 const struct fs_platform_info *fpi = fep->fpi;
385 fec_t *fecp = fep->fec.fecp; 402 fec_t *fecp = fep->fec.fecp;
386 403
387 struct fec_info* feci= fep->phydev->bus->priv; 404 struct fec_info* feci= fep->phydev->bus->priv;
388 405
389 int i; 406 int i;
390 407
391 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) 408 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
392 return; /* already down */ 409 return; /* already down */
393 410
394 FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */ 411 FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
395 for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) && 412 for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
396 i < FEC_RESET_DELAY; i++) 413 i < FEC_RESET_DELAY; i++)
397 udelay(1); 414 udelay(1);
398 415
399 if (i == FEC_RESET_DELAY) 416 if (i == FEC_RESET_DELAY)
400 printk(KERN_WARNING DRV_MODULE_NAME 417 printk(KERN_WARNING DRV_MODULE_NAME
401 ": %s FEC timeout on graceful transmit stop\n", 418 ": %s FEC timeout on graceful transmit stop\n",
402 dev->name); 419 dev->name);
403 /* 420 /*
404 * Disable FEC. Let only MII interrupts. 421 * Disable FEC. Let only MII interrupts.
405 */ 422 */
406 FW(fecp, imask, 0); 423 FW(fecp, imask, 0);
407 FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN); 424 FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
408 425
409 fs_cleanup_bds(dev); 426 fs_cleanup_bds(dev);
410 427
411 /* shut down FEC1? that's where the mii bus is */ 428 /* shut down FEC1? that's where the mii bus is */
412 if (fpi->has_phy) { 429 if (fpi->has_phy) {
413 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ 430 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
414 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); 431 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
415 FW(fecp, ievent, FEC_ENET_MII); 432 FW(fecp, ievent, FEC_ENET_MII);
416 FW(fecp, mii_speed, feci->mii_speed); 433 FW(fecp, mii_speed, feci->mii_speed);
417 } 434 }
418 } 435 }
419 436
420 static void pre_request_irq(struct net_device *dev, int irq) 437 static void pre_request_irq(struct net_device *dev, int irq)
421 { 438 {
422 #ifndef CONFIG_PPC_MERGE 439 #ifndef CONFIG_PPC_MERGE
423 immap_t *immap = fs_enet_immap; 440 immap_t *immap = fs_enet_immap;
424 u32 siel; 441 u32 siel;
425 442
426 /* SIU interrupt */ 443 /* SIU interrupt */
427 if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) { 444 if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
428 445
429 siel = in_be32(&immap->im_siu_conf.sc_siel); 446 siel = in_be32(&immap->im_siu_conf.sc_siel);
430 if ((irq & 1) == 0) 447 if ((irq & 1) == 0)
431 siel |= (0x80000000 >> irq); 448 siel |= (0x80000000 >> irq);
432 else 449 else
433 siel &= ~(0x80000000 >> (irq & ~1)); 450 siel &= ~(0x80000000 >> (irq & ~1));
434 out_be32(&immap->im_siu_conf.sc_siel, siel); 451 out_be32(&immap->im_siu_conf.sc_siel, siel);
435 } 452 }
436 #endif 453 #endif
437 } 454 }
438 455
439 static void post_free_irq(struct net_device *dev, int irq) 456 static void post_free_irq(struct net_device *dev, int irq)
440 { 457 {
441 /* nothing */ 458 /* nothing */
442 } 459 }
443 460
444 static void napi_clear_rx_event(struct net_device *dev) 461 static void napi_clear_rx_event(struct net_device *dev)
445 { 462 {
446 struct fs_enet_private *fep = netdev_priv(dev); 463 struct fs_enet_private *fep = netdev_priv(dev);
447 fec_t *fecp = fep->fec.fecp; 464 fec_t *fecp = fep->fec.fecp;
448 465
449 FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK); 466 FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
450 } 467 }
451 468
452 static void napi_enable_rx(struct net_device *dev) 469 static void napi_enable_rx(struct net_device *dev)
453 { 470 {
454 struct fs_enet_private *fep = netdev_priv(dev); 471 struct fs_enet_private *fep = netdev_priv(dev);
455 fec_t *fecp = fep->fec.fecp; 472 fec_t *fecp = fep->fec.fecp;
456 473
457 FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK); 474 FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
458 } 475 }
459 476
460 static void napi_disable_rx(struct net_device *dev) 477 static void napi_disable_rx(struct net_device *dev)
461 { 478 {
462 struct fs_enet_private *fep = netdev_priv(dev); 479 struct fs_enet_private *fep = netdev_priv(dev);
463 fec_t *fecp = fep->fec.fecp; 480 fec_t *fecp = fep->fec.fecp;
464 481
465 FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK); 482 FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
466 } 483 }
467 484
468 static void rx_bd_done(struct net_device *dev) 485 static void rx_bd_done(struct net_device *dev)
469 { 486 {
470 struct fs_enet_private *fep = netdev_priv(dev); 487 struct fs_enet_private *fep = netdev_priv(dev);
471 fec_t *fecp = fep->fec.fecp; 488 fec_t *fecp = fep->fec.fecp;
472 489
473 FW(fecp, r_des_active, 0x01000000); 490 FW(fecp, r_des_active, 0x01000000);
474 } 491 }
475 492
476 static void tx_kickstart(struct net_device *dev) 493 static void tx_kickstart(struct net_device *dev)
477 { 494 {
478 struct fs_enet_private *fep = netdev_priv(dev); 495 struct fs_enet_private *fep = netdev_priv(dev);
479 fec_t *fecp = fep->fec.fecp; 496 fec_t *fecp = fep->fec.fecp;
480 497
481 FW(fecp, x_des_active, 0x01000000); 498 FW(fecp, x_des_active, 0x01000000);
482 } 499 }
483 500
484 static u32 get_int_events(struct net_device *dev) 501 static u32 get_int_events(struct net_device *dev)
485 { 502 {
486 struct fs_enet_private *fep = netdev_priv(dev); 503 struct fs_enet_private *fep = netdev_priv(dev);
487 fec_t *fecp = fep->fec.fecp; 504 fec_t *fecp = fep->fec.fecp;
488 505
489 return FR(fecp, ievent) & FR(fecp, imask); 506 return FR(fecp, ievent) & FR(fecp, imask);
490 } 507 }
491 508
492 static void clear_int_events(struct net_device *dev, u32 int_events) 509 static void clear_int_events(struct net_device *dev, u32 int_events)
493 { 510 {
494 struct fs_enet_private *fep = netdev_priv(dev); 511 struct fs_enet_private *fep = netdev_priv(dev);
495 fec_t *fecp = fep->fec.fecp; 512 fec_t *fecp = fep->fec.fecp;
496 513
497 FW(fecp, ievent, int_events); 514 FW(fecp, ievent, int_events);
498 } 515 }
499 516
500 static void ev_error(struct net_device *dev, u32 int_events) 517 static void ev_error(struct net_device *dev, u32 int_events)
501 { 518 {
502 printk(KERN_WARNING DRV_MODULE_NAME 519 printk(KERN_WARNING DRV_MODULE_NAME
503 ": %s FEC ERROR(s) 0x%x\n", dev->name, int_events); 520 ": %s FEC ERROR(s) 0x%x\n", dev->name, int_events);
504 } 521 }
505 522
506 int get_regs(struct net_device *dev, void *p, int *sizep) 523 int get_regs(struct net_device *dev, void *p, int *sizep)
507 { 524 {
508 struct fs_enet_private *fep = netdev_priv(dev); 525 struct fs_enet_private *fep = netdev_priv(dev);
509 526
510 if (*sizep < sizeof(fec_t)) 527 if (*sizep < sizeof(fec_t))
511 return -EINVAL; 528 return -EINVAL;
512 529
513 memcpy_fromio(p, fep->fec.fecp, sizeof(fec_t)); 530 memcpy_fromio(p, fep->fec.fecp, sizeof(fec_t));
514 531
515 return 0; 532 return 0;
516 } 533 }
517 534
518 int get_regs_len(struct net_device *dev) 535 int get_regs_len(struct net_device *dev)
519 { 536 {
520 return sizeof(fec_t); 537 return sizeof(fec_t);
521 } 538 }
522 539
523 void tx_restart(struct net_device *dev) 540 void tx_restart(struct net_device *dev)
524 { 541 {
525 /* nothing */ 542 /* nothing */
526 } 543 }
527 544
528 /*************************************************************************/ 545 /*************************************************************************/
529 546
530 const struct fs_ops fs_fec_ops = { 547 const struct fs_ops fs_fec_ops = {
531 .setup_data = setup_data, 548 .setup_data = setup_data,
532 .cleanup_data = cleanup_data, 549 .cleanup_data = cleanup_data,
533 .set_multicast_list = set_multicast_list, 550 .set_multicast_list = set_multicast_list,
534 .restart = restart, 551 .restart = restart,
535 .stop = stop, 552 .stop = stop,
536 .pre_request_irq = pre_request_irq, 553 .pre_request_irq = pre_request_irq,
537 .post_free_irq = post_free_irq, 554 .post_free_irq = post_free_irq,
538 .napi_clear_rx_event = napi_clear_rx_event, 555 .napi_clear_rx_event = napi_clear_rx_event,
539 .napi_enable_rx = napi_enable_rx, 556 .napi_enable_rx = napi_enable_rx,
540 .napi_disable_rx = napi_disable_rx, 557 .napi_disable_rx = napi_disable_rx,
541 .rx_bd_done = rx_bd_done, 558 .rx_bd_done = rx_bd_done,
542 .tx_kickstart = tx_kickstart, 559 .tx_kickstart = tx_kickstart,
543 .get_int_events = get_int_events, 560 .get_int_events = get_int_events,
544 .clear_int_events = clear_int_events, 561 .clear_int_events = clear_int_events,
545 .ev_error = ev_error, 562 .ev_error = ev_error,
546 .get_regs = get_regs, 563 .get_regs = get_regs,
547 .get_regs_len = get_regs_len, 564 .get_regs_len = get_regs_len,
548 .tx_restart = tx_restart, 565 .tx_restart = tx_restart,
549 .allocate_bd = allocate_bd, 566 .allocate_bd = allocate_bd,
550 .free_bd = free_bd, 567 .free_bd = free_bd,
551 }; 568 };
552 569
553 570
drivers/net/fs_enet/mac-scc.c
1 /* 1 /*
2 * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx. 2 * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
3 * 3 *
4 * Copyright (c) 2003 Intracom S.A. 4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr> 5 * by Pantelis Antoniou <panto@intracom.gr>
6 * 6 *
7 * 2005 (c) MontaVista Software, Inc. 7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com> 8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 * 9 *
10 * This file is licensed under the terms of the GNU General Public License 10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any 11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied. 12 * kind, whether express or implied.
13 */ 13 */
14 14
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/types.h> 17 #include <linux/types.h>
18 #include <linux/string.h> 18 #include <linux/string.h>
19 #include <linux/ptrace.h> 19 #include <linux/ptrace.h>
20 #include <linux/errno.h> 20 #include <linux/errno.h>
21 #include <linux/ioport.h> 21 #include <linux/ioport.h>
22 #include <linux/slab.h> 22 #include <linux/slab.h>
23 #include <linux/interrupt.h> 23 #include <linux/interrupt.h>
24 #include <linux/init.h> 24 #include <linux/init.h>
25 #include <linux/delay.h> 25 #include <linux/delay.h>
26 #include <linux/netdevice.h> 26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h> 27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h> 28 #include <linux/skbuff.h>
29 #include <linux/spinlock.h> 29 #include <linux/spinlock.h>
30 #include <linux/mii.h> 30 #include <linux/mii.h>
31 #include <linux/ethtool.h> 31 #include <linux/ethtool.h>
32 #include <linux/bitops.h> 32 #include <linux/bitops.h>
33 #include <linux/fs.h> 33 #include <linux/fs.h>
34 #include <linux/platform_device.h> 34 #include <linux/platform_device.h>
35 35
36 #include <asm/irq.h> 36 #include <asm/irq.h>
37 #include <asm/uaccess.h> 37 #include <asm/uaccess.h>
38 38
39 #ifdef CONFIG_8xx 39 #ifdef CONFIG_8xx
40 #include <asm/8xx_immap.h> 40 #include <asm/8xx_immap.h>
41 #include <asm/pgtable.h> 41 #include <asm/pgtable.h>
42 #include <asm/mpc8xx.h> 42 #include <asm/mpc8xx.h>
43 #include <asm/commproc.h> 43 #include <asm/commproc.h>
44 #endif 44 #endif
45 45
46 #ifdef CONFIG_PPC_CPM_NEW_BINDING
47 #include <asm/of_platform.h>
48 #endif
49
46 #include "fs_enet.h" 50 #include "fs_enet.h"
47 51
48 /*************************************************/ 52 /*************************************************/
49 53
50 #if defined(CONFIG_CPM1) 54 #if defined(CONFIG_CPM1)
51 /* for a 8xx __raw_xxx's are sufficient */ 55 /* for a 8xx __raw_xxx's are sufficient */
52 #define __fs_out32(addr, x) __raw_writel(x, addr) 56 #define __fs_out32(addr, x) __raw_writel(x, addr)
53 #define __fs_out16(addr, x) __raw_writew(x, addr) 57 #define __fs_out16(addr, x) __raw_writew(x, addr)
54 #define __fs_out8(addr, x) __raw_writeb(x, addr) 58 #define __fs_out8(addr, x) __raw_writeb(x, addr)
55 #define __fs_in32(addr) __raw_readl(addr) 59 #define __fs_in32(addr) __raw_readl(addr)
56 #define __fs_in16(addr) __raw_readw(addr) 60 #define __fs_in16(addr) __raw_readw(addr)
57 #define __fs_in8(addr) __raw_readb(addr) 61 #define __fs_in8(addr) __raw_readb(addr)
58 #else 62 #else
59 /* for others play it safe */ 63 /* for others play it safe */
60 #define __fs_out32(addr, x) out_be32(addr, x) 64 #define __fs_out32(addr, x) out_be32(addr, x)
61 #define __fs_out16(addr, x) out_be16(addr, x) 65 #define __fs_out16(addr, x) out_be16(addr, x)
62 #define __fs_in32(addr) in_be32(addr) 66 #define __fs_in32(addr) in_be32(addr)
63 #define __fs_in16(addr) in_be16(addr) 67 #define __fs_in16(addr) in_be16(addr)
64 #endif 68 #endif
65 69
66 /* write, read, set bits, clear bits */ 70 /* write, read, set bits, clear bits */
67 #define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v)) 71 #define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
68 #define R32(_p, _m) __fs_in32(&(_p)->_m) 72 #define R32(_p, _m) __fs_in32(&(_p)->_m)
69 #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v)) 73 #define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
70 #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v)) 74 #define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
71 75
72 #define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v)) 76 #define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
73 #define R16(_p, _m) __fs_in16(&(_p)->_m) 77 #define R16(_p, _m) __fs_in16(&(_p)->_m)
74 #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v)) 78 #define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
75 #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v)) 79 #define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
76 80
77 #define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v)) 81 #define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
78 #define R8(_p, _m) __fs_in8(&(_p)->_m) 82 #define R8(_p, _m) __fs_in8(&(_p)->_m)
79 #define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v)) 83 #define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
80 #define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v)) 84 #define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
81 85
82 #define SCC_MAX_MULTICAST_ADDRS 64 86 #define SCC_MAX_MULTICAST_ADDRS 64
83 87
84 /* 88 /*
85 * Delay to wait for SCC reset command to complete (in us) 89 * Delay to wait for SCC reset command to complete (in us)
86 */ 90 */
87 #define SCC_RESET_DELAY 50 91 #define SCC_RESET_DELAY 50
88 #define MAX_CR_CMD_LOOPS 10000 92 #define MAX_CR_CMD_LOOPS 10000
89 93
90 static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op) 94 static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
91 { 95 {
92 cpm8xx_t *cpmp = &((immap_t *)fs_enet_immap)->im_cpm; 96 const struct fs_platform_info *fpi = fep->fpi;
93 u32 v, ch; 97 int i;
94 int i = 0;
95 98
96 ch = fep->scc.idx << 2; 99 W16(cpmp, cp_cpcr, fpi->cp_command | CPM_CR_FLG | (op << 8));
97 v = mk_cr_cmd(ch, op);
98 W16(cpmp, cp_cpcr, v | CPM_CR_FLG);
99 for (i = 0; i < MAX_CR_CMD_LOOPS; i++) 100 for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
100 if ((R16(cpmp, cp_cpcr) & CPM_CR_FLG) == 0) 101 if ((R16(cpmp, cp_cpcr) & CPM_CR_FLG) == 0)
101 break; 102 return 0;
102 103
103 if (i >= MAX_CR_CMD_LOOPS) { 104 printk(KERN_ERR "%s(): Not able to issue CPM command\n",
104 printk(KERN_ERR "%s(): Not able to issue CPM command\n", 105 __FUNCTION__);
105 __FUNCTION__); 106 return 1;
106 return 1;
107 }
108 return 0;
109 } 107 }
110 108
111 static int do_pd_setup(struct fs_enet_private *fep) 109 static int do_pd_setup(struct fs_enet_private *fep)
112 { 110 {
111 #ifdef CONFIG_PPC_CPM_NEW_BINDING
112 struct of_device *ofdev = to_of_device(fep->dev);
113
114 fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL);
115 if (fep->interrupt == NO_IRQ)
116 return -EINVAL;
117
118 fep->scc.sccp = of_iomap(ofdev->node, 0);
119 if (!fep->scc.sccp)
120 return -EINVAL;
121
122 fep->scc.ep = of_iomap(ofdev->node, 1);
123 if (!fep->scc.ep) {
124 iounmap(fep->scc.sccp);
125 return -EINVAL;
126 }
127 #else
113 struct platform_device *pdev = to_platform_device(fep->dev); 128 struct platform_device *pdev = to_platform_device(fep->dev);
114 struct resource *r; 129 struct resource *r;
115 130
116 /* Fill out IRQ field */ 131 /* Fill out IRQ field */
117 fep->interrupt = platform_get_irq_byname(pdev, "interrupt"); 132 fep->interrupt = platform_get_irq_byname(pdev, "interrupt");
118 if (fep->interrupt < 0) 133 if (fep->interrupt < 0)
119 return -EINVAL; 134 return -EINVAL;
120 135
121 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 136 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
122 fep->scc.sccp = ioremap(r->start, r->end - r->start + 1); 137 fep->scc.sccp = ioremap(r->start, r->end - r->start + 1);
123 138
124 if (fep->scc.sccp == NULL) 139 if (fep->scc.sccp == NULL)
125 return -EINVAL; 140 return -EINVAL;
126 141
127 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pram"); 142 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pram");
128 fep->scc.ep = ioremap(r->start, r->end - r->start + 1); 143 fep->scc.ep = ioremap(r->start, r->end - r->start + 1);
129 144
130 if (fep->scc.ep == NULL) 145 if (fep->scc.ep == NULL)
131 return -EINVAL; 146 return -EINVAL;
147 #endif
132 148
133 return 0; 149 return 0;
134 } 150 }
135 151
136 #define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB) 152 #define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
137 #define SCC_RX_EVENT (SCCE_ENET_RXF) 153 #define SCC_RX_EVENT (SCCE_ENET_RXF)
138 #define SCC_TX_EVENT (SCCE_ENET_TXB) 154 #define SCC_TX_EVENT (SCCE_ENET_TXB)
139 #define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY) 155 #define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
140 156
141 static int setup_data(struct net_device *dev) 157 static int setup_data(struct net_device *dev)
142 { 158 {
143 struct fs_enet_private *fep = netdev_priv(dev); 159 struct fs_enet_private *fep = netdev_priv(dev);
144 const struct fs_platform_info *fpi = fep->fpi;
145 160
161 #ifdef CONFIG_PPC_CPM_NEW_BINDING
162 struct fs_platform_info *fpi = fep->fpi;
163
146 fep->scc.idx = fs_get_scc_index(fpi->fs_no); 164 fep->scc.idx = fs_get_scc_index(fpi->fs_no);
147 if ((unsigned int)fep->fcc.idx > 4) /* max 4 SCCs */ 165 if ((unsigned int)fep->fcc.idx >= 4) /* max 4 SCCs */
148 return -EINVAL; 166 return -EINVAL;
149 167
168 fpi->cp_command = fep->fcc.idx << 6;
169 #endif
170
150 do_pd_setup(fep); 171 do_pd_setup(fep);
151 172
152 fep->scc.hthi = 0; 173 fep->scc.hthi = 0;
153 fep->scc.htlo = 0; 174 fep->scc.htlo = 0;
154 175
155 fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK; 176 fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
156 fep->ev_rx = SCC_RX_EVENT; 177 fep->ev_rx = SCC_RX_EVENT;
157 fep->ev_tx = SCC_TX_EVENT; 178 fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE;
158 fep->ev_err = SCC_ERR_EVENT_MSK; 179 fep->ev_err = SCC_ERR_EVENT_MSK;
159 180
160 return 0; 181 return 0;
161 } 182 }
162 183
163 static int allocate_bd(struct net_device *dev) 184 static int allocate_bd(struct net_device *dev)
164 { 185 {
165 struct fs_enet_private *fep = netdev_priv(dev); 186 struct fs_enet_private *fep = netdev_priv(dev);
166 const struct fs_platform_info *fpi = fep->fpi; 187 const struct fs_platform_info *fpi = fep->fpi;
167 188
168 fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) * 189 fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
169 sizeof(cbd_t), 8); 190 sizeof(cbd_t), 8);
170 if (IS_ERR_VALUE(fep->ring_mem_addr)) 191 if (IS_ERR_VALUE(fep->ring_mem_addr))
171 return -ENOMEM; 192 return -ENOMEM;
172 193
173 fep->ring_base = cpm_dpram_addr(fep->ring_mem_addr); 194 fep->ring_base = cpm_dpram_addr(fep->ring_mem_addr);
174 195
175 return 0; 196 return 0;
176 } 197 }
177 198
178 static void free_bd(struct net_device *dev) 199 static void free_bd(struct net_device *dev)
179 { 200 {
180 struct fs_enet_private *fep = netdev_priv(dev); 201 struct fs_enet_private *fep = netdev_priv(dev);
181 202
182 if (fep->ring_base) 203 if (fep->ring_base)
183 cpm_dpfree(fep->ring_mem_addr); 204 cpm_dpfree(fep->ring_mem_addr);
184 } 205 }
185 206
186 static void cleanup_data(struct net_device *dev) 207 static void cleanup_data(struct net_device *dev)
187 { 208 {
188 /* nothing */ 209 /* nothing */
189 } 210 }
190 211
191 static void set_promiscuous_mode(struct net_device *dev) 212 static void set_promiscuous_mode(struct net_device *dev)
192 { 213 {
193 struct fs_enet_private *fep = netdev_priv(dev); 214 struct fs_enet_private *fep = netdev_priv(dev);
194 scc_t *sccp = fep->scc.sccp; 215 scc_t *sccp = fep->scc.sccp;
195 216
196 S16(sccp, scc_psmr, SCC_PSMR_PRO); 217 S16(sccp, scc_psmr, SCC_PSMR_PRO);
197 } 218 }
198 219
199 static void set_multicast_start(struct net_device *dev) 220 static void set_multicast_start(struct net_device *dev)
200 { 221 {
201 struct fs_enet_private *fep = netdev_priv(dev); 222 struct fs_enet_private *fep = netdev_priv(dev);
202 scc_enet_t *ep = fep->scc.ep; 223 scc_enet_t *ep = fep->scc.ep;
203 224
204 W16(ep, sen_gaddr1, 0); 225 W16(ep, sen_gaddr1, 0);
205 W16(ep, sen_gaddr2, 0); 226 W16(ep, sen_gaddr2, 0);
206 W16(ep, sen_gaddr3, 0); 227 W16(ep, sen_gaddr3, 0);
207 W16(ep, sen_gaddr4, 0); 228 W16(ep, sen_gaddr4, 0);
208 } 229 }
209 230
210 static void set_multicast_one(struct net_device *dev, const u8 * mac) 231 static void set_multicast_one(struct net_device *dev, const u8 * mac)
211 { 232 {
212 struct fs_enet_private *fep = netdev_priv(dev); 233 struct fs_enet_private *fep = netdev_priv(dev);
213 scc_enet_t *ep = fep->scc.ep; 234 scc_enet_t *ep = fep->scc.ep;
214 u16 taddrh, taddrm, taddrl; 235 u16 taddrh, taddrm, taddrl;
215 236
216 taddrh = ((u16) mac[5] << 8) | mac[4]; 237 taddrh = ((u16) mac[5] << 8) | mac[4];
217 taddrm = ((u16) mac[3] << 8) | mac[2]; 238 taddrm = ((u16) mac[3] << 8) | mac[2];
218 taddrl = ((u16) mac[1] << 8) | mac[0]; 239 taddrl = ((u16) mac[1] << 8) | mac[0];
219 240
220 W16(ep, sen_taddrh, taddrh); 241 W16(ep, sen_taddrh, taddrh);
221 W16(ep, sen_taddrm, taddrm); 242 W16(ep, sen_taddrm, taddrm);
222 W16(ep, sen_taddrl, taddrl); 243 W16(ep, sen_taddrl, taddrl);
223 scc_cr_cmd(fep, CPM_CR_SET_GADDR); 244 scc_cr_cmd(fep, CPM_CR_SET_GADDR);
224 } 245 }
225 246
226 static void set_multicast_finish(struct net_device *dev) 247 static void set_multicast_finish(struct net_device *dev)
227 { 248 {
228 struct fs_enet_private *fep = netdev_priv(dev); 249 struct fs_enet_private *fep = netdev_priv(dev);
229 scc_t *sccp = fep->scc.sccp; 250 scc_t *sccp = fep->scc.sccp;
230 scc_enet_t *ep = fep->scc.ep; 251 scc_enet_t *ep = fep->scc.ep;
231 252
232 /* clear promiscuous always */ 253 /* clear promiscuous always */
233 C16(sccp, scc_psmr, SCC_PSMR_PRO); 254 C16(sccp, scc_psmr, SCC_PSMR_PRO);
234 255
235 /* if all multi or too many multicasts; just enable all */ 256 /* if all multi or too many multicasts; just enable all */
236 if ((dev->flags & IFF_ALLMULTI) != 0 || 257 if ((dev->flags & IFF_ALLMULTI) != 0 ||
237 dev->mc_count > SCC_MAX_MULTICAST_ADDRS) { 258 dev->mc_count > SCC_MAX_MULTICAST_ADDRS) {
238 259
239 W16(ep, sen_gaddr1, 0xffff); 260 W16(ep, sen_gaddr1, 0xffff);
240 W16(ep, sen_gaddr2, 0xffff); 261 W16(ep, sen_gaddr2, 0xffff);
241 W16(ep, sen_gaddr3, 0xffff); 262 W16(ep, sen_gaddr3, 0xffff);
242 W16(ep, sen_gaddr4, 0xffff); 263 W16(ep, sen_gaddr4, 0xffff);
243 } 264 }
244 } 265 }
245 266
246 static void set_multicast_list(struct net_device *dev) 267 static void set_multicast_list(struct net_device *dev)
247 { 268 {
248 struct dev_mc_list *pmc; 269 struct dev_mc_list *pmc;
249 270
250 if ((dev->flags & IFF_PROMISC) == 0) { 271 if ((dev->flags & IFF_PROMISC) == 0) {
251 set_multicast_start(dev); 272 set_multicast_start(dev);
252 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next) 273 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
253 set_multicast_one(dev, pmc->dmi_addr); 274 set_multicast_one(dev, pmc->dmi_addr);
254 set_multicast_finish(dev); 275 set_multicast_finish(dev);
255 } else 276 } else
256 set_promiscuous_mode(dev); 277 set_promiscuous_mode(dev);
257 } 278 }
258 279
259 /* 280 /*
260 * This function is called to start or restart the FEC during a link 281 * This function is called to start or restart the FEC during a link
261 * change. This only happens when switching between half and full 282 * change. This only happens when switching between half and full
262 * duplex. 283 * duplex.
263 */ 284 */
264 static void restart(struct net_device *dev) 285 static void restart(struct net_device *dev)
265 { 286 {
266 struct fs_enet_private *fep = netdev_priv(dev); 287 struct fs_enet_private *fep = netdev_priv(dev);
267 scc_t *sccp = fep->scc.sccp; 288 scc_t *sccp = fep->scc.sccp;
268 scc_enet_t *ep = fep->scc.ep; 289 scc_enet_t *ep = fep->scc.ep;
269 const struct fs_platform_info *fpi = fep->fpi; 290 const struct fs_platform_info *fpi = fep->fpi;
270 u16 paddrh, paddrm, paddrl; 291 u16 paddrh, paddrm, paddrl;
271 const unsigned char *mac; 292 const unsigned char *mac;
272 int i; 293 int i;
273 294
274 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); 295 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
275 296
276 /* clear everything (slow & steady does it) */ 297 /* clear everything (slow & steady does it) */
277 for (i = 0; i < sizeof(*ep); i++) 298 for (i = 0; i < sizeof(*ep); i++)
278 __fs_out8((char *)ep + i, 0); 299 __fs_out8((char *)ep + i, 0);
279 300
280 /* point to bds */ 301 /* point to bds */
281 W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr); 302 W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
282 W16(ep, sen_genscc.scc_tbase, 303 W16(ep, sen_genscc.scc_tbase,
283 fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring); 304 fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
284 305
285 /* Initialize function code registers for big-endian. 306 /* Initialize function code registers for big-endian.
286 */ 307 */
287 W8(ep, sen_genscc.scc_rfcr, SCC_EB); 308 W8(ep, sen_genscc.scc_rfcr, SCC_EB);
288 W8(ep, sen_genscc.scc_tfcr, SCC_EB); 309 W8(ep, sen_genscc.scc_tfcr, SCC_EB);
289 310
290 /* Set maximum bytes per receive buffer. 311 /* Set maximum bytes per receive buffer.
291 * This appears to be an Ethernet frame size, not the buffer 312 * This appears to be an Ethernet frame size, not the buffer
292 * fragment size. It must be a multiple of four. 313 * fragment size. It must be a multiple of four.
293 */ 314 */
294 W16(ep, sen_genscc.scc_mrblr, 0x5f0); 315 W16(ep, sen_genscc.scc_mrblr, 0x5f0);
295 316
296 /* Set CRC preset and mask. 317 /* Set CRC preset and mask.
297 */ 318 */
298 W32(ep, sen_cpres, 0xffffffff); 319 W32(ep, sen_cpres, 0xffffffff);
299 W32(ep, sen_cmask, 0xdebb20e3); 320 W32(ep, sen_cmask, 0xdebb20e3);
300 321
301 W32(ep, sen_crcec, 0); /* CRC Error counter */ 322 W32(ep, sen_crcec, 0); /* CRC Error counter */
302 W32(ep, sen_alec, 0); /* alignment error counter */ 323 W32(ep, sen_alec, 0); /* alignment error counter */
303 W32(ep, sen_disfc, 0); /* discard frame counter */ 324 W32(ep, sen_disfc, 0); /* discard frame counter */
304 325
305 W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */ 326 W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */
306 W16(ep, sen_retlim, 15); /* Retry limit threshold */ 327 W16(ep, sen_retlim, 15); /* Retry limit threshold */
307 328
308 W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */ 329 W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */
309 330
310 W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */ 331 W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
311 332
312 W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */ 333 W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */
313 W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */ 334 W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */
314 335
315 /* Clear hash tables. 336 /* Clear hash tables.
316 */ 337 */
317 W16(ep, sen_gaddr1, 0); 338 W16(ep, sen_gaddr1, 0);
318 W16(ep, sen_gaddr2, 0); 339 W16(ep, sen_gaddr2, 0);
319 W16(ep, sen_gaddr3, 0); 340 W16(ep, sen_gaddr3, 0);
320 W16(ep, sen_gaddr4, 0); 341 W16(ep, sen_gaddr4, 0);
321 W16(ep, sen_iaddr1, 0); 342 W16(ep, sen_iaddr1, 0);
322 W16(ep, sen_iaddr2, 0); 343 W16(ep, sen_iaddr2, 0);
323 W16(ep, sen_iaddr3, 0); 344 W16(ep, sen_iaddr3, 0);
324 W16(ep, sen_iaddr4, 0); 345 W16(ep, sen_iaddr4, 0);
325 346
326 /* set address 347 /* set address
327 */ 348 */
328 mac = dev->dev_addr; 349 mac = dev->dev_addr;
329 paddrh = ((u16) mac[5] << 8) | mac[4]; 350 paddrh = ((u16) mac[5] << 8) | mac[4];
330 paddrm = ((u16) mac[3] << 8) | mac[2]; 351 paddrm = ((u16) mac[3] << 8) | mac[2];
331 paddrl = ((u16) mac[1] << 8) | mac[0]; 352 paddrl = ((u16) mac[1] << 8) | mac[0];
332 353
333 W16(ep, sen_paddrh, paddrh); 354 W16(ep, sen_paddrh, paddrh);
334 W16(ep, sen_paddrm, paddrm); 355 W16(ep, sen_paddrm, paddrm);
335 W16(ep, sen_paddrl, paddrl); 356 W16(ep, sen_paddrl, paddrl);
336 357
337 W16(ep, sen_pper, 0); 358 W16(ep, sen_pper, 0);
338 W16(ep, sen_taddrl, 0); 359 W16(ep, sen_taddrl, 0);
339 W16(ep, sen_taddrm, 0); 360 W16(ep, sen_taddrm, 0);
340 W16(ep, sen_taddrh, 0); 361 W16(ep, sen_taddrh, 0);
341 362
342 fs_init_bds(dev); 363 fs_init_bds(dev);
343 364
344 scc_cr_cmd(fep, CPM_CR_INIT_TRX); 365 scc_cr_cmd(fep, CPM_CR_INIT_TRX);
345 366
346 W16(sccp, scc_scce, 0xffff); 367 W16(sccp, scc_scce, 0xffff);
347 368
348 /* Enable interrupts we wish to service. 369 /* Enable interrupts we wish to service.
349 */ 370 */
350 W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB); 371 W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
351 372
352 /* Set GSMR_H to enable all normal operating modes. 373 /* Set GSMR_H to enable all normal operating modes.
353 * Set GSMR_L to enable Ethernet to MC68160. 374 * Set GSMR_L to enable Ethernet to MC68160.
354 */ 375 */
355 W32(sccp, scc_gsmrh, 0); 376 W32(sccp, scc_gsmrh, 0);
356 W32(sccp, scc_gsmrl, 377 W32(sccp, scc_gsmrl,
357 SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | 378 SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
358 SCC_GSMRL_MODE_ENET); 379 SCC_GSMRL_MODE_ENET);
359 380
360 /* Set sync/delimiters. 381 /* Set sync/delimiters.
361 */ 382 */
362 W16(sccp, scc_dsr, 0xd555); 383 W16(sccp, scc_dsr, 0xd555);
363 384
364 /* Set processing mode. Use Ethernet CRC, catch broadcast, and 385 /* Set processing mode. Use Ethernet CRC, catch broadcast, and
365 * start frame search 22 bit times after RENA. 386 * start frame search 22 bit times after RENA.
366 */ 387 */
367 W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); 388 W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
368 389
369 /* Set full duplex mode if needed */ 390 /* Set full duplex mode if needed */
370 if (fep->phydev->duplex) 391 if (fep->phydev->duplex)
371 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); 392 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
372 393
373 S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); 394 S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
374 } 395 }
375 396
376 static void stop(struct net_device *dev) 397 static void stop(struct net_device *dev)
377 { 398 {
378 struct fs_enet_private *fep = netdev_priv(dev); 399 struct fs_enet_private *fep = netdev_priv(dev);
379 scc_t *sccp = fep->scc.sccp; 400 scc_t *sccp = fep->scc.sccp;
380 int i; 401 int i;
381 402
382 for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++) 403 for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
383 udelay(1); 404 udelay(1);
384 405
385 if (i == SCC_RESET_DELAY) 406 if (i == SCC_RESET_DELAY)
386 printk(KERN_WARNING DRV_MODULE_NAME 407 printk(KERN_WARNING DRV_MODULE_NAME
387 ": %s SCC timeout on graceful transmit stop\n", 408 ": %s SCC timeout on graceful transmit stop\n",
388 dev->name); 409 dev->name);
389 410
390 W16(sccp, scc_sccm, 0); 411 W16(sccp, scc_sccm, 0);
391 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); 412 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
392 413
393 fs_cleanup_bds(dev); 414 fs_cleanup_bds(dev);
394 } 415 }
395 416
396 static void pre_request_irq(struct net_device *dev, int irq) 417 static void pre_request_irq(struct net_device *dev, int irq)
397 { 418 {
398 #ifndef CONFIG_PPC_MERGE 419 #ifndef CONFIG_PPC_MERGE
399 immap_t *immap = fs_enet_immap; 420 immap_t *immap = fs_enet_immap;
400 u32 siel; 421 u32 siel;
401 422
402 /* SIU interrupt */ 423 /* SIU interrupt */
403 if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) { 424 if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
404 425
405 siel = in_be32(&immap->im_siu_conf.sc_siel); 426 siel = in_be32(&immap->im_siu_conf.sc_siel);
406 if ((irq & 1) == 0) 427 if ((irq & 1) == 0)
407 siel |= (0x80000000 >> irq); 428 siel |= (0x80000000 >> irq);
408 else 429 else
409 siel &= ~(0x80000000 >> (irq & ~1)); 430 siel &= ~(0x80000000 >> (irq & ~1));
410 out_be32(&immap->im_siu_conf.sc_siel, siel); 431 out_be32(&immap->im_siu_conf.sc_siel, siel);
411 } 432 }
412 #endif 433 #endif
413 } 434 }
414 435
415 static void post_free_irq(struct net_device *dev, int irq) 436 static void post_free_irq(struct net_device *dev, int irq)
416 { 437 {
417 /* nothing */ 438 /* nothing */
418 } 439 }
419 440
420 static void napi_clear_rx_event(struct net_device *dev) 441 static void napi_clear_rx_event(struct net_device *dev)
421 { 442 {
422 struct fs_enet_private *fep = netdev_priv(dev); 443 struct fs_enet_private *fep = netdev_priv(dev);
423 scc_t *sccp = fep->scc.sccp; 444 scc_t *sccp = fep->scc.sccp;
424 445
425 W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK); 446 W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
426 } 447 }
427 448
428 static void napi_enable_rx(struct net_device *dev) 449 static void napi_enable_rx(struct net_device *dev)
429 { 450 {
430 struct fs_enet_private *fep = netdev_priv(dev); 451 struct fs_enet_private *fep = netdev_priv(dev);
431 scc_t *sccp = fep->scc.sccp; 452 scc_t *sccp = fep->scc.sccp;
432 453
433 S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK); 454 S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
434 } 455 }
435 456
436 static void napi_disable_rx(struct net_device *dev) 457 static void napi_disable_rx(struct net_device *dev)
437 { 458 {
438 struct fs_enet_private *fep = netdev_priv(dev); 459 struct fs_enet_private *fep = netdev_priv(dev);
439 scc_t *sccp = fep->scc.sccp; 460 scc_t *sccp = fep->scc.sccp;
440 461
441 C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK); 462 C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
442 } 463 }
443 464
444 static void rx_bd_done(struct net_device *dev) 465 static void rx_bd_done(struct net_device *dev)
445 { 466 {
446 /* nothing */ 467 /* nothing */
447 } 468 }
448 469
449 static void tx_kickstart(struct net_device *dev) 470 static void tx_kickstart(struct net_device *dev)
450 { 471 {
451 /* nothing */ 472 /* nothing */
452 } 473 }
453 474
454 static u32 get_int_events(struct net_device *dev) 475 static u32 get_int_events(struct net_device *dev)
455 { 476 {
456 struct fs_enet_private *fep = netdev_priv(dev); 477 struct fs_enet_private *fep = netdev_priv(dev);
457 scc_t *sccp = fep->scc.sccp; 478 scc_t *sccp = fep->scc.sccp;
458 479
459 return (u32) R16(sccp, scc_scce); 480 return (u32) R16(sccp, scc_scce);
460 } 481 }
461 482
462 static void clear_int_events(struct net_device *dev, u32 int_events) 483 static void clear_int_events(struct net_device *dev, u32 int_events)
463 { 484 {
464 struct fs_enet_private *fep = netdev_priv(dev); 485 struct fs_enet_private *fep = netdev_priv(dev);
465 scc_t *sccp = fep->scc.sccp; 486 scc_t *sccp = fep->scc.sccp;
466 487
467 W16(sccp, scc_scce, int_events & 0xffff); 488 W16(sccp, scc_scce, int_events & 0xffff);
468 } 489 }
469 490
470 static void ev_error(struct net_device *dev, u32 int_events) 491 static void ev_error(struct net_device *dev, u32 int_events)
471 { 492 {
472 printk(KERN_WARNING DRV_MODULE_NAME 493 printk(KERN_WARNING DRV_MODULE_NAME
473 ": %s SCC ERROR(s) 0x%x\n", dev->name, int_events); 494 ": %s SCC ERROR(s) 0x%x\n", dev->name, int_events);
474 } 495 }
475 496
476 static int get_regs(struct net_device *dev, void *p, int *sizep) 497 static int get_regs(struct net_device *dev, void *p, int *sizep)
477 { 498 {
478 struct fs_enet_private *fep = netdev_priv(dev); 499 struct fs_enet_private *fep = netdev_priv(dev);
479 500
480 if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t)) 501 if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t))
481 return -EINVAL; 502 return -EINVAL;
482 503
483 memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t)); 504 memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
484 p = (char *)p + sizeof(scc_t); 505 p = (char *)p + sizeof(scc_t);
485 506
486 memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t)); 507 memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t));
487 508
488 return 0; 509 return 0;
489 } 510 }
490 511
491 static int get_regs_len(struct net_device *dev) 512 static int get_regs_len(struct net_device *dev)
492 { 513 {
493 return sizeof(scc_t) + sizeof(scc_enet_t); 514 return sizeof(scc_t) + sizeof(scc_enet_t);
494 } 515 }
495 516
496 static void tx_restart(struct net_device *dev) 517 static void tx_restart(struct net_device *dev)
497 { 518 {
498 struct fs_enet_private *fep = netdev_priv(dev); 519 struct fs_enet_private *fep = netdev_priv(dev);
499 520
500 scc_cr_cmd(fep, CPM_CR_RESTART_TX); 521 scc_cr_cmd(fep, CPM_CR_RESTART_TX);
501 } 522 }
502 523
503 524
504 525
505 /*************************************************************************/ 526 /*************************************************************************/
506 527
507 const struct fs_ops fs_scc_ops = { 528 const struct fs_ops fs_scc_ops = {
508 .setup_data = setup_data, 529 .setup_data = setup_data,
509 .cleanup_data = cleanup_data, 530 .cleanup_data = cleanup_data,
510 .set_multicast_list = set_multicast_list, 531 .set_multicast_list = set_multicast_list,
511 .restart = restart, 532 .restart = restart,
512 .stop = stop, 533 .stop = stop,
513 .pre_request_irq = pre_request_irq, 534 .pre_request_irq = pre_request_irq,
514 .post_free_irq = post_free_irq, 535 .post_free_irq = post_free_irq,
515 .napi_clear_rx_event = napi_clear_rx_event, 536 .napi_clear_rx_event = napi_clear_rx_event,
516 .napi_enable_rx = napi_enable_rx, 537 .napi_enable_rx = napi_enable_rx,
517 .napi_disable_rx = napi_disable_rx, 538 .napi_disable_rx = napi_disable_rx,
518 .rx_bd_done = rx_bd_done, 539 .rx_bd_done = rx_bd_done,
519 .tx_kickstart = tx_kickstart, 540 .tx_kickstart = tx_kickstart,
520 .get_int_events = get_int_events, 541 .get_int_events = get_int_events,
521 .clear_int_events = clear_int_events, 542 .clear_int_events = clear_int_events,
522 .ev_error = ev_error, 543 .ev_error = ev_error,
drivers/net/fs_enet/mii-bitbang.c
1 /* 1 /*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. 2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 * 3 *
4 * Copyright (c) 2003 Intracom S.A. 4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr> 5 * by Pantelis Antoniou <panto@intracom.gr>
6 * 6 *
7 * 2005 (c) MontaVista Software, Inc. 7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com> 8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 * 9 *
10 * This file is licensed under the terms of the GNU General Public License 10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any 11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied. 12 * kind, whether express or implied.
13 */ 13 */
14 14
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/ptrace.h>
20 #include <linux/errno.h>
21 #include <linux/ioport.h> 16 #include <linux/ioport.h>
22 #include <linux/slab.h> 17 #include <linux/slab.h>
23 #include <linux/interrupt.h> 18 #include <linux/interrupt.h>
24 #include <linux/init.h> 19 #include <linux/init.h>
25 #include <linux/delay.h> 20 #include <linux/delay.h>
26 #include <linux/netdevice.h> 21 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h> 22 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/spinlock.h>
30 #include <linux/mii.h> 23 #include <linux/mii.h>
31 #include <linux/ethtool.h> 24 #include <linux/ethtool.h>
32 #include <linux/bitops.h> 25 #include <linux/bitops.h>
33 #include <linux/platform_device.h> 26 #include <linux/platform_device.h>
34 27
35 #include <asm/pgtable.h> 28 #ifdef CONFIG_PPC_CPM_NEW_BINDING
36 #include <asm/irq.h> 29 #include <linux/of_platform.h>
37 #include <asm/uaccess.h> 30 #endif
38 31
39 #include "fs_enet.h" 32 #include "fs_enet.h"
40 33
41 static int bitbang_prep_bit(u8 **datp, u8 *mskp, 34 struct bb_info {
42 struct fs_mii_bit *mii_bit) 35 __be32 __iomem *dir;
43 { 36 __be32 __iomem *dat;
44 void *dat; 37 u32 mdio_msk;
45 int adv; 38 u32 mdc_msk;
46 u8 msk; 39 int delay;
40 };
47 41
48 dat = (void*) mii_bit->offset; 42 /* FIXME: If any other users of GPIO crop up, then these will have to
49 43 * have some sort of global synchronization to avoid races with other
50 adv = mii_bit->bit >> 3; 44 * pins on the same port. The ideal solution would probably be to
51 dat = (char *)dat + adv; 45 * bind the ports to a GPIO driver, and have this be a client of it.
52 46 */
53 msk = 1 << (7 - (mii_bit->bit & 7)); 47 static inline void bb_set(u32 __iomem *p, u32 m)
54
55 *datp = dat;
56 *mskp = msk;
57
58 return 0;
59 }
60
61 static inline void bb_set(u8 *p, u8 m)
62 { 48 {
63 out_8(p, in_8(p) | m); 49 out_be32(p, in_be32(p) | m);
64 } 50 }
65 51
66 static inline void bb_clr(u8 *p, u8 m) 52 static inline void bb_clr(u32 __iomem *p, u32 m)
67 { 53 {
68 out_8(p, in_8(p) & ~m); 54 out_be32(p, in_be32(p) & ~m);
69 } 55 }
70 56
71 static inline int bb_read(u8 *p, u8 m) 57 static inline int bb_read(u32 __iomem *p, u32 m)
72 { 58 {
73 return (in_8(p) & m) != 0; 59 return (in_be32(p) & m) != 0;
74 } 60 }
75 61
76 static inline void mdio_active(struct bb_info *bitbang) 62 static inline void mdio_active(struct bb_info *bitbang)
77 { 63 {
78 bb_set(bitbang->mdio_dir, bitbang->mdio_dir_msk); 64 bb_set(bitbang->dir, bitbang->mdio_msk);
79 } 65 }
80 66
81 static inline void mdio_tristate(struct bb_info *bitbang ) 67 static inline void mdio_tristate(struct bb_info *bitbang)
82 { 68 {
83 bb_clr(bitbang->mdio_dir, bitbang->mdio_dir_msk); 69 bb_clr(bitbang->dir, bitbang->mdio_msk);
84 } 70 }
85 71
86 static inline int mdio_read(struct bb_info *bitbang ) 72 static inline int mdio_read(struct bb_info *bitbang)
87 { 73 {
88 return bb_read(bitbang->mdio_dat, bitbang->mdio_dat_msk); 74 return bb_read(bitbang->dat, bitbang->mdio_msk);
89 } 75 }
90 76
91 static inline void mdio(struct bb_info *bitbang , int what) 77 static inline void mdio(struct bb_info *bitbang, int what)
92 { 78 {
93 if (what) 79 if (what)
94 bb_set(bitbang->mdio_dat, bitbang->mdio_dat_msk); 80 bb_set(bitbang->dat, bitbang->mdio_msk);
95 else 81 else
96 bb_clr(bitbang->mdio_dat, bitbang->mdio_dat_msk); 82 bb_clr(bitbang->dat, bitbang->mdio_msk);
97 } 83 }
98 84
99 static inline void mdc(struct bb_info *bitbang , int what) 85 static inline void mdc(struct bb_info *bitbang, int what)
100 { 86 {
101 if (what) 87 if (what)
102 bb_set(bitbang->mdc_dat, bitbang->mdc_msk); 88 bb_set(bitbang->dat, bitbang->mdc_msk);
103 else 89 else
104 bb_clr(bitbang->mdc_dat, bitbang->mdc_msk); 90 bb_clr(bitbang->dat, bitbang->mdc_msk);
105 } 91 }
106 92
107 static inline void mii_delay(struct bb_info *bitbang ) 93 static inline void mii_delay(struct bb_info *bitbang)
108 { 94 {
109 udelay(bitbang->delay); 95 udelay(bitbang->delay);
110 } 96 }
111 97
112 /* Utility to send the preamble, address, and register (common to read and write). */ 98 /* Utility to send the preamble, address, and register (common to read and write). */
113 static void bitbang_pre(struct bb_info *bitbang , int read, u8 addr, u8 reg) 99 static void bitbang_pre(struct bb_info *bitbang , int read, u8 addr, u8 reg)
114 { 100 {
115 int j; 101 int j;
116 102
117 /* 103 /*
118 * Send a 32 bit preamble ('1's) with an extra '1' bit for good measure. 104 * Send a 32 bit preamble ('1's) with an extra '1' bit for good measure.
119 * The IEEE spec says this is a PHY optional requirement. The AMD 105 * The IEEE spec says this is a PHY optional requirement. The AMD
120 * 79C874 requires one after power up and one after a MII communications 106 * 79C874 requires one after power up and one after a MII communications
121 * error. This means that we are doing more preambles than we need, 107 * error. This means that we are doing more preambles than we need,
122 * but it is safer and will be much more robust. 108 * but it is safer and will be much more robust.
123 */ 109 */
124 110
125 mdio_active(bitbang); 111 mdio_active(bitbang);
126 mdio(bitbang, 1); 112 mdio(bitbang, 1);
127 for (j = 0; j < 32; j++) { 113 for (j = 0; j < 32; j++) {
128 mdc(bitbang, 0); 114 mdc(bitbang, 0);
129 mii_delay(bitbang); 115 mii_delay(bitbang);
130 mdc(bitbang, 1); 116 mdc(bitbang, 1);
131 mii_delay(bitbang); 117 mii_delay(bitbang);
132 } 118 }
133 119
134 /* send the start bit (01) and the read opcode (10) or write (10) */ 120 /* send the start bit (01) and the read opcode (10) or write (10) */
135 mdc(bitbang, 0); 121 mdc(bitbang, 0);
136 mdio(bitbang, 0); 122 mdio(bitbang, 0);
137 mii_delay(bitbang); 123 mii_delay(bitbang);
138 mdc(bitbang, 1); 124 mdc(bitbang, 1);
139 mii_delay(bitbang); 125 mii_delay(bitbang);
140 mdc(bitbang, 0); 126 mdc(bitbang, 0);
141 mdio(bitbang, 1); 127 mdio(bitbang, 1);
142 mii_delay(bitbang); 128 mii_delay(bitbang);
143 mdc(bitbang, 1); 129 mdc(bitbang, 1);
144 mii_delay(bitbang); 130 mii_delay(bitbang);
145 mdc(bitbang, 0); 131 mdc(bitbang, 0);
146 mdio(bitbang, read); 132 mdio(bitbang, read);
147 mii_delay(bitbang); 133 mii_delay(bitbang);
148 mdc(bitbang, 1); 134 mdc(bitbang, 1);
149 mii_delay(bitbang); 135 mii_delay(bitbang);
150 mdc(bitbang, 0); 136 mdc(bitbang, 0);
151 mdio(bitbang, !read); 137 mdio(bitbang, !read);
152 mii_delay(bitbang); 138 mii_delay(bitbang);
153 mdc(bitbang, 1); 139 mdc(bitbang, 1);
154 mii_delay(bitbang); 140 mii_delay(bitbang);
155 141
156 /* send the PHY address */ 142 /* send the PHY address */
157 for (j = 0; j < 5; j++) { 143 for (j = 0; j < 5; j++) {
158 mdc(bitbang, 0); 144 mdc(bitbang, 0);
159 mdio(bitbang, (addr & 0x10) != 0); 145 mdio(bitbang, (addr & 0x10) != 0);
160 mii_delay(bitbang); 146 mii_delay(bitbang);
161 mdc(bitbang, 1); 147 mdc(bitbang, 1);
162 mii_delay(bitbang); 148 mii_delay(bitbang);
163 addr <<= 1; 149 addr <<= 1;
164 } 150 }
165 151
166 /* send the register address */ 152 /* send the register address */
167 for (j = 0; j < 5; j++) { 153 for (j = 0; j < 5; j++) {
168 mdc(bitbang, 0); 154 mdc(bitbang, 0);
169 mdio(bitbang, (reg & 0x10) != 0); 155 mdio(bitbang, (reg & 0x10) != 0);
170 mii_delay(bitbang); 156 mii_delay(bitbang);
171 mdc(bitbang, 1); 157 mdc(bitbang, 1);
172 mii_delay(bitbang); 158 mii_delay(bitbang);
173 reg <<= 1; 159 reg <<= 1;
174 } 160 }
175 } 161 }
176 162
177 static int fs_enet_mii_bb_read(struct mii_bus *bus , int phy_id, int location) 163 static int fs_enet_mii_bb_read(struct mii_bus *bus , int phy_id, int location)
178 { 164 {
179 u16 rdreg; 165 u16 rdreg;
180 int ret, j; 166 int ret, j;
181 u8 addr = phy_id & 0xff; 167 u8 addr = phy_id & 0xff;
182 u8 reg = location & 0xff; 168 u8 reg = location & 0xff;
183 struct bb_info* bitbang = bus->priv; 169 struct bb_info* bitbang = bus->priv;
184 170
185 bitbang_pre(bitbang, 1, addr, reg); 171 bitbang_pre(bitbang, 1, addr, reg);
186 172
187 /* tri-state our MDIO I/O pin so we can read */ 173 /* tri-state our MDIO I/O pin so we can read */
188 mdc(bitbang, 0); 174 mdc(bitbang, 0);
189 mdio_tristate(bitbang); 175 mdio_tristate(bitbang);
190 mii_delay(bitbang); 176 mii_delay(bitbang);
191 mdc(bitbang, 1); 177 mdc(bitbang, 1);
192 mii_delay(bitbang); 178 mii_delay(bitbang);
193 179
194 /* check the turnaround bit: the PHY should be driving it to zero */ 180 /* check the turnaround bit: the PHY should be driving it to zero */
195 if (mdio_read(bitbang) != 0) { 181 if (mdio_read(bitbang) != 0) {
196 /* PHY didn't drive TA low */ 182 /* PHY didn't drive TA low */
197 for (j = 0; j < 32; j++) { 183 for (j = 0; j < 32; j++) {
198 mdc(bitbang, 0); 184 mdc(bitbang, 0);
199 mii_delay(bitbang); 185 mii_delay(bitbang);
200 mdc(bitbang, 1); 186 mdc(bitbang, 1);
201 mii_delay(bitbang); 187 mii_delay(bitbang);
202 } 188 }
203 ret = -1; 189 ret = -1;
204 goto out; 190 goto out;
205 } 191 }
206 192
207 mdc(bitbang, 0); 193 mdc(bitbang, 0);
208 mii_delay(bitbang); 194 mii_delay(bitbang);
209 195
210 /* read 16 bits of register data, MSB first */ 196 /* read 16 bits of register data, MSB first */
211 rdreg = 0; 197 rdreg = 0;
212 for (j = 0; j < 16; j++) { 198 for (j = 0; j < 16; j++) {
213 mdc(bitbang, 1); 199 mdc(bitbang, 1);
214 mii_delay(bitbang); 200 mii_delay(bitbang);
215 rdreg <<= 1; 201 rdreg <<= 1;
216 rdreg |= mdio_read(bitbang); 202 rdreg |= mdio_read(bitbang);
217 mdc(bitbang, 0); 203 mdc(bitbang, 0);
218 mii_delay(bitbang); 204 mii_delay(bitbang);
219 } 205 }
220 206
221 mdc(bitbang, 1); 207 mdc(bitbang, 1);
222 mii_delay(bitbang); 208 mii_delay(bitbang);
223 mdc(bitbang, 0); 209 mdc(bitbang, 0);
224 mii_delay(bitbang); 210 mii_delay(bitbang);
225 mdc(bitbang, 1); 211 mdc(bitbang, 1);
226 mii_delay(bitbang); 212 mii_delay(bitbang);
227 213
228 ret = rdreg; 214 ret = rdreg;
229 out: 215 out:
230 return ret; 216 return ret;
231 } 217 }
232 218
233 static int fs_enet_mii_bb_write(struct mii_bus *bus, int phy_id, int location, u16 val) 219 static int fs_enet_mii_bb_write(struct mii_bus *bus, int phy_id, int location, u16 val)
234 { 220 {
235 int j; 221 int j;
236 struct bb_info* bitbang = bus->priv; 222 struct bb_info* bitbang = bus->priv;
237 223
238 u8 addr = phy_id & 0xff; 224 u8 addr = phy_id & 0xff;
239 u8 reg = location & 0xff; 225 u8 reg = location & 0xff;
240 u16 value = val & 0xffff; 226 u16 value = val & 0xffff;
241 227
242 bitbang_pre(bitbang, 0, addr, reg); 228 bitbang_pre(bitbang, 0, addr, reg);
243 229
244 /* send the turnaround (10) */ 230 /* send the turnaround (10) */
245 mdc(bitbang, 0); 231 mdc(bitbang, 0);
246 mdio(bitbang, 1); 232 mdio(bitbang, 1);
247 mii_delay(bitbang); 233 mii_delay(bitbang);
248 mdc(bitbang, 1); 234 mdc(bitbang, 1);
249 mii_delay(bitbang); 235 mii_delay(bitbang);
250 mdc(bitbang, 0); 236 mdc(bitbang, 0);
251 mdio(bitbang, 0); 237 mdio(bitbang, 0);
252 mii_delay(bitbang); 238 mii_delay(bitbang);
253 mdc(bitbang, 1); 239 mdc(bitbang, 1);
254 mii_delay(bitbang); 240 mii_delay(bitbang);
255 241
256 /* write 16 bits of register data, MSB first */ 242 /* write 16 bits of register data, MSB first */
257 for (j = 0; j < 16; j++) { 243 for (j = 0; j < 16; j++) {
258 mdc(bitbang, 0); 244 mdc(bitbang, 0);
259 mdio(bitbang, (value & 0x8000) != 0); 245 mdio(bitbang, (value & 0x8000) != 0);
260 mii_delay(bitbang); 246 mii_delay(bitbang);
261 mdc(bitbang, 1); 247 mdc(bitbang, 1);
262 mii_delay(bitbang); 248 mii_delay(bitbang);
263 value <<= 1; 249 value <<= 1;
264 } 250 }
265 251
266 /* 252 /*
267 * Tri-state the MDIO line. 253 * Tri-state the MDIO line.
268 */ 254 */
269 mdio_tristate(bitbang); 255 mdio_tristate(bitbang);
270 mdc(bitbang, 0); 256 mdc(bitbang, 0);
271 mii_delay(bitbang); 257 mii_delay(bitbang);
272 mdc(bitbang, 1); 258 mdc(bitbang, 1);
273 mii_delay(bitbang); 259 mii_delay(bitbang);
274 return 0; 260 return 0;
275 } 261 }
276 262
277 static int fs_enet_mii_bb_reset(struct mii_bus *bus) 263 static int fs_enet_mii_bb_reset(struct mii_bus *bus)
278 { 264 {
279 /*nothing here - dunno how to reset it*/ 265 /*nothing here - dunno how to reset it*/
280 return 0; 266 return 0;
281 } 267 }
282 268
283 static int fs_mii_bitbang_init(struct bb_info *bitbang, struct fs_mii_bb_platform_info* fmpi) 269 #ifdef CONFIG_PPC_CPM_NEW_BINDING
270 static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
271 struct device_node *np)
284 { 272 {
285 int r; 273 struct resource res;
274 const u32 *data;
275 int mdio_pin, mdc_pin, len;
276 struct bb_info *bitbang = bus->priv;
286 277
287 bitbang->delay = fmpi->delay; 278 int ret = of_address_to_resource(np, 0, &res);
279 if (ret)
280 return ret;
288 281
289 r = bitbang_prep_bit(&bitbang->mdio_dir, 282 if (res.end - res.start < 13)
290 &bitbang->mdio_dir_msk, 283 return -ENODEV;
291 &fmpi->mdio_dir);
292 if (r != 0)
293 return r;
294 284
295 r = bitbang_prep_bit(&bitbang->mdio_dat, 285 /* This should really encode the pin number as well, but all
296 &bitbang->mdio_dat_msk, 286 * we get is an int, and the odds of multiple bitbang mdio buses
297 &fmpi->mdio_dat); 287 * is low enough that it's not worth going too crazy.
298 if (r != 0) 288 */
299 return r; 289 bus->id = res.start;
300 290
301 r = bitbang_prep_bit(&bitbang->mdc_dat, 291 data = of_get_property(np, "fsl,mdio-pin", &len);
302 &bitbang->mdc_msk, 292 if (!data || len != 4)
303 &fmpi->mdc_dat); 293 return -ENODEV;
304 if (r != 0) 294 mdio_pin = *data;
305 return r; 295
296 data = of_get_property(np, "fsl,mdc-pin", &len);
297 if (!data || len != 4)
298 return -ENODEV;
299 mdc_pin = *data;
300
301 bitbang->dir = ioremap(res.start, res.end - res.start + 1);
302 if (!bitbang->dir)
303 return -ENOMEM;
304
305 bitbang->dat = bitbang->dir + 4;
306 bitbang->mdio_msk = 1 << (31 - mdio_pin);
307 bitbang->mdc_msk = 1 << (31 - mdc_pin);
308 bitbang->delay = 1; /* 1 us between operations */
309
310 return 0;
311 }
312
313 static void __devinit add_phy(struct mii_bus *bus, struct device_node *np)
314 {
315 const u32 *data;
316 int len, id, irq;
317
318 data = of_get_property(np, "reg", &len);
319 if (!data || len != 4)
320 return;
321
322 id = *data;
323 bus->phy_mask &= ~(1 << id);
324
325 irq = of_irq_to_resource(np, 0, NULL);
326 if (irq != NO_IRQ)
327 bus->irq[id] = irq;
328 }
329
330 static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
331 const struct of_device_id *match)
332 {
333 struct device_node *np = NULL;
334 struct mii_bus *new_bus;
335 struct bb_info *bitbang;
336 int ret = -ENOMEM;
337 int i;
338
339 new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
340 if (!new_bus)
341 goto out;
342
343 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
344 if (!bitbang)
345 goto out_free_bus;
346
347 new_bus->priv = bitbang;
348 new_bus->name = "CPM2 Bitbanged MII",
349 new_bus->read = &fs_enet_mii_bb_read,
350 new_bus->write = &fs_enet_mii_bb_write,
351 new_bus->reset = &fs_enet_mii_bb_reset,
352
353 ret = fs_mii_bitbang_init(new_bus, ofdev->node);
354 if (ret)
355 goto out_free_bitbang;
356
357 new_bus->phy_mask = ~0;
358 new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
359 if (!new_bus->irq)
360 goto out_unmap_regs;
361
362 for (i = 0; i < PHY_MAX_ADDR; i++)
363 new_bus->irq[i] = -1;
364
365 while ((np = of_get_next_child(ofdev->node, np)))
366 if (!strcmp(np->type, "ethernet-phy"))
367 add_phy(new_bus, np);
368
369 new_bus->dev = &ofdev->dev;
370 dev_set_drvdata(&ofdev->dev, new_bus);
371
372 ret = mdiobus_register(new_bus);
373 if (ret)
374 goto out_free_irqs;
375
376 return 0;
377
378 out_free_irqs:
379 dev_set_drvdata(&ofdev->dev, NULL);
380 kfree(new_bus->irq);
381 out_unmap_regs:
382 iounmap(bitbang->dir);
383 out_free_bitbang:
384 kfree(bitbang);
385 out_free_bus:
386 kfree(new_bus);
387 out:
388 return ret;
389 }
390
391 static int fs_enet_mdio_remove(struct of_device *ofdev)
392 {
393 struct mii_bus *bus = dev_get_drvdata(&ofdev->dev);
394 struct bb_info *bitbang = bus->priv;
395
396 mdiobus_unregister(bus);
397 dev_set_drvdata(&ofdev->dev, NULL);
398 kfree(bus->irq);
399 iounmap(bitbang->dir);
400 kfree(bitbang);
401 kfree(bus);
402
403 return 0;
404 }
405
406 static struct of_device_id fs_enet_mdio_bb_match[] = {
407 {
408 .compatible = "fsl,cpm2-mdio-bitbang",
409 },
410 {},
411 };
412
413 static struct of_platform_driver fs_enet_bb_mdio_driver = {
414 .name = "fsl-bb-mdio",
415 .match_table = fs_enet_mdio_bb_match,
416 .probe = fs_enet_mdio_probe,
417 .remove = fs_enet_mdio_remove,
418 };
419
420 int fs_enet_mdio_bb_init(void)
421 {
422 return of_register_platform_driver(&fs_enet_bb_mdio_driver);
423 }
424
425 void fs_enet_mdio_bb_exit(void)
426 {
427 of_unregister_platform_driver(&fs_enet_bb_mdio_driver);
428 }
429
430 module_init(fs_enet_mdio_bb_init);
431 module_exit(fs_enet_mdio_bb_exit);
432 #else
433 static int __devinit fs_mii_bitbang_init(struct bb_info *bitbang,
434 struct fs_mii_bb_platform_info *fmpi)
435 {
436 bitbang->dir = (u32 __iomem *)fmpi->mdio_dir.offset;
437 bitbang->dat = (u32 __iomem *)fmpi->mdio_dat.offset;
438 bitbang->mdio_msk = 1U << (31 - fmpi->mdio_dat.bit);
439 bitbang->mdc_msk = 1U << (31 - fmpi->mdc_dat.bit);
440 bitbang->delay = fmpi->delay;
306 441
307 return 0; 442 return 0;
308 } 443 }
309 444
310 static int __devinit fs_enet_mdio_probe(struct device *dev) 445 static int __devinit fs_enet_mdio_probe(struct device *dev)
311 { 446 {
312 struct platform_device *pdev = to_platform_device(dev); 447 struct platform_device *pdev = to_platform_device(dev);
313 struct fs_mii_bb_platform_info *pdata; 448 struct fs_mii_bb_platform_info *pdata;
314 struct mii_bus *new_bus; 449 struct mii_bus *new_bus;
315 struct bb_info *bitbang; 450 struct bb_info *bitbang;
316 int err = 0; 451 int err = 0;
317 452
318 if (NULL == dev) 453 if (NULL == dev)
319 return -EINVAL; 454 return -EINVAL;
320 455
321 new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); 456 new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
322 457
323 if (NULL == new_bus) 458 if (NULL == new_bus)
324 return -ENOMEM; 459 return -ENOMEM;
325 460
326 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); 461 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
327 462
328 if (NULL == bitbang) 463 if (NULL == bitbang)
329 return -ENOMEM; 464 return -ENOMEM;
330 465
331 new_bus->name = "BB MII Bus", 466 new_bus->name = "BB MII Bus",
332 new_bus->read = &fs_enet_mii_bb_read, 467 new_bus->read = &fs_enet_mii_bb_read,
333 new_bus->write = &fs_enet_mii_bb_write, 468 new_bus->write = &fs_enet_mii_bb_write,
334 new_bus->reset = &fs_enet_mii_bb_reset, 469 new_bus->reset = &fs_enet_mii_bb_reset,
335 new_bus->id = pdev->id; 470 new_bus->id = pdev->id;
336 471
337 new_bus->phy_mask = ~0x9; 472 new_bus->phy_mask = ~0x9;
338 pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data; 473 pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data;
339 474
340 if (NULL == pdata) { 475 if (NULL == pdata) {
341 printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id); 476 printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id);
342 return -ENODEV; 477 return -ENODEV;
343 } 478 }
344 479
345 /*set up workspace*/ 480 /*set up workspace*/
346 fs_mii_bitbang_init(bitbang, pdata); 481 fs_mii_bitbang_init(bitbang, pdata);
347 482
348 new_bus->priv = bitbang; 483 new_bus->priv = bitbang;
349 484
350 new_bus->irq = pdata->irq; 485 new_bus->irq = pdata->irq;
351 486
352 new_bus->dev = dev; 487 new_bus->dev = dev;
353 dev_set_drvdata(dev, new_bus); 488 dev_set_drvdata(dev, new_bus);
354 489
355 err = mdiobus_register(new_bus); 490 err = mdiobus_register(new_bus);
356 491
357 if (0 != err) { 492 if (0 != err) {
358 printk (KERN_ERR "%s: Cannot register as MDIO bus\n", 493 printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
359 new_bus->name); 494 new_bus->name);
360 goto bus_register_fail; 495 goto bus_register_fail;
361 } 496 }
362 497
363 return 0; 498 return 0;
364 499
365 bus_register_fail: 500 bus_register_fail:
366 kfree(bitbang); 501 kfree(bitbang);
367 kfree(new_bus); 502 kfree(new_bus);
368 503
369 return err; 504 return err;
370 } 505 }
371 506
372 static int fs_enet_mdio_remove(struct device *dev) 507 static int fs_enet_mdio_remove(struct device *dev)
373 { 508 {
374 struct mii_bus *bus = dev_get_drvdata(dev); 509 struct mii_bus *bus = dev_get_drvdata(dev);
375 510
376 mdiobus_unregister(bus); 511 mdiobus_unregister(bus);
377 512
378 dev_set_drvdata(dev, NULL); 513 dev_set_drvdata(dev, NULL);
379 514
380 iounmap((void *) (&bus->priv)); 515 iounmap((void *) (&bus->priv));
381 bus->priv = NULL; 516 bus->priv = NULL;
382 kfree(bus); 517 kfree(bus);
383 518
384 return 0; 519 return 0;
385 } 520 }
386 521
drivers/net/fs_enet/mii-fec.c
1 /* 1 /*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx. 2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 * 3 *
4 * Copyright (c) 2003 Intracom S.A. 4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr> 5 * by Pantelis Antoniou <panto@intracom.gr>
6 * 6 *
7 * 2005 (c) MontaVista Software, Inc. 7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com> 8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 * 9 *
10 * This file is licensed under the terms of the GNU General Public License 10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any 11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied. 12 * kind, whether express or implied.
13 */ 13 */
14 14
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/types.h> 16 #include <linux/types.h>
17 #include <linux/kernel.h> 17 #include <linux/kernel.h>
18 #include <linux/string.h> 18 #include <linux/string.h>
19 #include <linux/ptrace.h> 19 #include <linux/ptrace.h>
20 #include <linux/errno.h> 20 #include <linux/errno.h>
21 #include <linux/ioport.h> 21 #include <linux/ioport.h>
22 #include <linux/slab.h> 22 #include <linux/slab.h>
23 #include <linux/interrupt.h> 23 #include <linux/interrupt.h>
24 #include <linux/init.h> 24 #include <linux/init.h>
25 #include <linux/delay.h> 25 #include <linux/delay.h>
26 #include <linux/netdevice.h> 26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h> 27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h> 28 #include <linux/skbuff.h>
29 #include <linux/spinlock.h> 29 #include <linux/spinlock.h>
30 #include <linux/mii.h> 30 #include <linux/mii.h>
31 #include <linux/ethtool.h> 31 #include <linux/ethtool.h>
32 #include <linux/bitops.h> 32 #include <linux/bitops.h>
33 #include <linux/platform_device.h> 33 #include <linux/platform_device.h>
34 34
35 #include <asm/pgtable.h> 35 #include <asm/pgtable.h>
36 #include <asm/irq.h> 36 #include <asm/irq.h>
37 #include <asm/uaccess.h> 37 #include <asm/uaccess.h>
38 38
39 #ifdef CONFIG_PPC_CPM_NEW_BINDING
40 #include <asm/of_platform.h>
41 #endif
42
39 #include "fs_enet.h" 43 #include "fs_enet.h"
40 #include "fec.h" 44 #include "fec.h"
41 45
42 /* Make MII read/write commands for the FEC. 46 /* Make MII read/write commands for the FEC.
43 */ 47 */
44 #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) 48 #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
45 #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) 49 #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
46 #define mk_mii_end 0 50 #define mk_mii_end 0
47 51
48 #define FEC_MII_LOOPS 10000 52 #define FEC_MII_LOOPS 10000
49 53
54 #ifndef CONFIG_PPC_CPM_NEW_BINDING
50 static int match_has_phy (struct device *dev, void* data) 55 static int match_has_phy (struct device *dev, void* data)
51 { 56 {
52 struct platform_device* pdev = container_of(dev, struct platform_device, dev); 57 struct platform_device* pdev = container_of(dev, struct platform_device, dev);
53 struct fs_platform_info* fpi; 58 struct fs_platform_info* fpi;
54 if(strcmp(pdev->name, (char*)data)) 59 if(strcmp(pdev->name, (char*)data))
55 { 60 {
56 return 0; 61 return 0;
57 } 62 }
58 63
59 fpi = pdev->dev.platform_data; 64 fpi = pdev->dev.platform_data;
60 if((fpi)&&(fpi->has_phy)) 65 if((fpi)&&(fpi->has_phy))
61 return 1; 66 return 1;
62 return 0; 67 return 0;
63 } 68 }
64 69
65 static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info *fmpi) 70 static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info *fmpi)
66 { 71 {
67 struct resource *r; 72 struct resource *r;
68 fec_t *fecp; 73 fec_t *fecp;
69 char* name = "fsl-cpm-fec"; 74 char* name = "fsl-cpm-fec";
70 75
71 /* we need fec in order to be useful */ 76 /* we need fec in order to be useful */
72 struct platform_device *fec_pdev = 77 struct platform_device *fec_pdev =
73 container_of(bus_find_device(&platform_bus_type, NULL, name, match_has_phy), 78 container_of(bus_find_device(&platform_bus_type, NULL, name, match_has_phy),
74 struct platform_device, dev); 79 struct platform_device, dev);
75 80
76 if(fec_pdev == NULL) { 81 if(fec_pdev == NULL) {
77 printk(KERN_ERR"Unable to find PHY for %s", name); 82 printk(KERN_ERR"Unable to find PHY for %s", name);
78 return -ENODEV; 83 return -ENODEV;
79 } 84 }
80 85
81 r = platform_get_resource_byname(fec_pdev, IORESOURCE_MEM, "regs"); 86 r = platform_get_resource_byname(fec_pdev, IORESOURCE_MEM, "regs");
82 87
83 fec->fecp = fecp = (fec_t*)ioremap(r->start,sizeof(fec_t)); 88 fec->fecp = fecp = (fec_t*)ioremap(r->start,sizeof(fec_t));
84 fec->mii_speed = fmpi->mii_speed; 89 fec->mii_speed = fmpi->mii_speed;
85 90
86 setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ 91 setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
87 setbits32(&fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); 92 setbits32(&fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
88 out_be32(&fecp->fec_ievent, FEC_ENET_MII); 93 out_be32(&fecp->fec_ievent, FEC_ENET_MII);
89 out_be32(&fecp->fec_mii_speed, fec->mii_speed); 94 out_be32(&fecp->fec_mii_speed, fec->mii_speed);
90 95
91 return 0; 96 return 0;
92 } 97 }
98 #endif
93 99
94 static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location) 100 static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
95 { 101 {
96 struct fec_info* fec = bus->priv; 102 struct fec_info* fec = bus->priv;
97 fec_t *fecp = fec->fecp; 103 fec_t *fecp = fec->fecp;
98 int i, ret = -1; 104 int i, ret = -1;
99 105
100 if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) 106 if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
101 BUG(); 107 BUG();
102 108
103 /* Add PHY address to register command. */ 109 /* Add PHY address to register command. */
104 out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location)); 110 out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location));
105 111
106 for (i = 0; i < FEC_MII_LOOPS; i++) 112 for (i = 0; i < FEC_MII_LOOPS; i++)
107 if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) 113 if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
108 break; 114 break;
109 115
110 if (i < FEC_MII_LOOPS) { 116 if (i < FEC_MII_LOOPS) {
111 out_be32(&fecp->fec_ievent, FEC_ENET_MII); 117 out_be32(&fecp->fec_ievent, FEC_ENET_MII);
112 ret = in_be32(&fecp->fec_mii_data) & 0xffff; 118 ret = in_be32(&fecp->fec_mii_data) & 0xffff;
113 } 119 }
114 120
115 return ret; 121 return ret;
116 } 122 }
117 123
118 static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val) 124 static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
119 { 125 {
120 struct fec_info* fec = bus->priv; 126 struct fec_info* fec = bus->priv;
121 fec_t *fecp = fec->fecp; 127 fec_t *fecp = fec->fecp;
122 int i; 128 int i;
123 129
124 /* this must never happen */ 130 /* this must never happen */
125 if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) 131 if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
126 BUG(); 132 BUG();
127 133
128 /* Add PHY address to register command. */ 134 /* Add PHY address to register command. */
129 out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val)); 135 out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val));
130 136
131 for (i = 0; i < FEC_MII_LOOPS; i++) 137 for (i = 0; i < FEC_MII_LOOPS; i++)
132 if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0) 138 if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
133 break; 139 break;
134 140
135 if (i < FEC_MII_LOOPS) 141 if (i < FEC_MII_LOOPS)
136 out_be32(&fecp->fec_ievent, FEC_ENET_MII); 142 out_be32(&fecp->fec_ievent, FEC_ENET_MII);
137 143
138 return 0; 144 return 0;
139 145
140 } 146 }
141 147
142 static int fs_enet_fec_mii_reset(struct mii_bus *bus) 148 static int fs_enet_fec_mii_reset(struct mii_bus *bus)
143 { 149 {
144 /* nothing here - for now */ 150 /* nothing here - for now */
145 return 0; 151 return 0;
146 } 152 }
147 153
154 #ifdef CONFIG_PPC_CPM_NEW_BINDING
155 static void __devinit add_phy(struct mii_bus *bus, struct device_node *np)
156 {
157 const u32 *data;
158 int len, id, irq;
159
160 data = of_get_property(np, "reg", &len);
161 if (!data || len != 4)
162 return;
163
164 id = *data;
165 bus->phy_mask &= ~(1 << id);
166
167 irq = of_irq_to_resource(np, 0, NULL);
168 if (irq != NO_IRQ)
169 bus->irq[id] = irq;
170 }
171
172 static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
173 const struct of_device_id *match)
174 {
175 struct device_node *np = NULL;
176 struct resource res;
177 struct mii_bus *new_bus;
178 struct fec_info *fec;
179 int ret = -ENOMEM, i;
180
181 new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
182 if (!new_bus)
183 goto out;
184
185 fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL);
186 if (!fec)
187 goto out_mii;
188
189 new_bus->priv = fec;
190 new_bus->name = "FEC MII Bus";
191 new_bus->read = &fs_enet_fec_mii_read;
192 new_bus->write = &fs_enet_fec_mii_write;
193 new_bus->reset = &fs_enet_fec_mii_reset;
194
195 ret = of_address_to_resource(ofdev->node, 0, &res);
196 if (ret)
197 return ret;
198
199 new_bus->id = res.start;
200
201 fec->fecp = ioremap(res.start, res.end - res.start + 1);
202 if (!fec->fecp)
203 goto out_fec;
204
205 fec->mii_speed = ((ppc_proc_freq + 4999999) / 5000000) << 1;
206
207 setbits32(&fec->fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE);
208 setbits32(&fec->fecp->fec_ecntrl, FEC_ECNTRL_PINMUX |
209 FEC_ECNTRL_ETHER_EN);
210 out_be32(&fec->fecp->fec_ievent, FEC_ENET_MII);
211 out_be32(&fec->fecp->fec_mii_speed, fec->mii_speed);
212
213 new_bus->phy_mask = ~0;
214 new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
215 if (!new_bus->irq)
216 goto out_unmap_regs;
217
218 for (i = 0; i < PHY_MAX_ADDR; i++)
219 new_bus->irq[i] = -1;
220
221 while ((np = of_get_next_child(ofdev->node, np)))
222 if (!strcmp(np->type, "ethernet-phy"))
223 add_phy(new_bus, np);
224
225 new_bus->dev = &ofdev->dev;
226 dev_set_drvdata(&ofdev->dev, new_bus);
227
228 ret = mdiobus_register(new_bus);
229 if (ret)
230 goto out_free_irqs;
231
232 return 0;
233
234 out_free_irqs:
235 dev_set_drvdata(&ofdev->dev, NULL);
236 kfree(new_bus->irq);
237 out_unmap_regs:
238 iounmap(fec->fecp);
239 out_fec:
240 kfree(fec);
241 out_mii:
242 kfree(new_bus);
243 out:
244 return ret;
245 }
246
247 static int fs_enet_mdio_remove(struct of_device *ofdev)
248 {
249 struct mii_bus *bus = dev_get_drvdata(&ofdev->dev);
250 struct fec_info *fec = bus->priv;
251
252 mdiobus_unregister(bus);
253 dev_set_drvdata(&ofdev->dev, NULL);
254 kfree(bus->irq);
255 iounmap(fec->fecp);
256 kfree(fec);
257 kfree(bus);
258
259 return 0;
260 }
261
262 static struct of_device_id fs_enet_mdio_fec_match[] = {
263 {
264 .compatible = "fsl,pq1-fec-mdio",
265 },
266 {},
267 };
268
269 static struct of_platform_driver fs_enet_fec_mdio_driver = {
270 .name = "fsl-fec-mdio",
271 .match_table = fs_enet_mdio_fec_match,
272 .probe = fs_enet_mdio_probe,
273 .remove = fs_enet_mdio_remove,
274 };
275
276 static int fs_enet_mdio_fec_init(void)
277 {
278 return of_register_platform_driver(&fs_enet_fec_mdio_driver);
279 }
280
281 static void fs_enet_mdio_fec_exit(void)
282 {
283 of_unregister_platform_driver(&fs_enet_fec_mdio_driver);
284 }
285
286 module_init(fs_enet_mdio_fec_init);
287 module_exit(fs_enet_mdio_fec_exit);
288 #else
148 static int __devinit fs_enet_fec_mdio_probe(struct device *dev) 289 static int __devinit fs_enet_fec_mdio_probe(struct device *dev)
149 { 290 {
150 struct platform_device *pdev = to_platform_device(dev); 291 struct platform_device *pdev = to_platform_device(dev);
151 struct fs_mii_fec_platform_info *pdata; 292 struct fs_mii_fec_platform_info *pdata;
152 struct mii_bus *new_bus; 293 struct mii_bus *new_bus;
153 struct fec_info *fec; 294 struct fec_info *fec;
154 int err = 0; 295 int err = 0;
155 if (NULL == dev) 296 if (NULL == dev)
156 return -EINVAL; 297 return -EINVAL;
157 new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); 298 new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
158 299
159 if (NULL == new_bus) 300 if (NULL == new_bus)
160 return -ENOMEM; 301 return -ENOMEM;
161 302
162 fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL); 303 fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL);
163 304
164 if (NULL == fec) 305 if (NULL == fec)
165 return -ENOMEM; 306 return -ENOMEM;
166 307
167 new_bus->name = "FEC MII Bus", 308 new_bus->name = "FEC MII Bus",
168 new_bus->read = &fs_enet_fec_mii_read, 309 new_bus->read = &fs_enet_fec_mii_read,
169 new_bus->write = &fs_enet_fec_mii_write, 310 new_bus->write = &fs_enet_fec_mii_write,
170 new_bus->reset = &fs_enet_fec_mii_reset, 311 new_bus->reset = &fs_enet_fec_mii_reset,
171 new_bus->id = pdev->id; 312 new_bus->id = pdev->id;
172 313
173 pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data; 314 pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data;
174 315
175 if (NULL == pdata) { 316 if (NULL == pdata) {
176 printk(KERN_ERR "fs_enet FEC mdio %d: Missing platform data!\n", pdev->id); 317 printk(KERN_ERR "fs_enet FEC mdio %d: Missing platform data!\n", pdev->id);
177 return -ENODEV; 318 return -ENODEV;
178 } 319 }
179 320
180 /*set up workspace*/ 321 /*set up workspace*/
181 322
182 fs_mii_fec_init(fec, pdata); 323 fs_mii_fec_init(fec, pdata);
183 new_bus->priv = fec; 324 new_bus->priv = fec;
184 325
185 new_bus->irq = pdata->irq; 326 new_bus->irq = pdata->irq;
186 327
187 new_bus->dev = dev; 328 new_bus->dev = dev;
188 dev_set_drvdata(dev, new_bus); 329 dev_set_drvdata(dev, new_bus);
189 330
190 err = mdiobus_register(new_bus); 331 err = mdiobus_register(new_bus);
191 332
192 if (0 != err) { 333 if (0 != err) {
193 printk (KERN_ERR "%s: Cannot register as MDIO bus\n", 334 printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
194 new_bus->name); 335 new_bus->name);
195 goto bus_register_fail; 336 goto bus_register_fail;
196 } 337 }
197 338
198 return 0; 339 return 0;
199 340
200 bus_register_fail: 341 bus_register_fail:
201 kfree(new_bus); 342 kfree(new_bus);
202 343
203 return err; 344 return err;
204 } 345 }
205 346
206 347
207 static int fs_enet_fec_mdio_remove(struct device *dev) 348 static int fs_enet_fec_mdio_remove(struct device *dev)
208 { 349 {
209 struct mii_bus *bus = dev_get_drvdata(dev); 350 struct mii_bus *bus = dev_get_drvdata(dev);
210 351
211 mdiobus_unregister(bus); 352 mdiobus_unregister(bus);
212 353
213 dev_set_drvdata(dev, NULL); 354 dev_set_drvdata(dev, NULL);
214 kfree(bus->priv); 355 kfree(bus->priv);
215 356
216 bus->priv = NULL; 357 bus->priv = NULL;
217 kfree(bus); 358 kfree(bus);
218 359
219 return 0; 360 return 0;
220 } 361 }
221 362
222 static struct device_driver fs_enet_fec_mdio_driver = { 363 static struct device_driver fs_enet_fec_mdio_driver = {
223 .name = "fsl-cpm-fec-mdio", 364 .name = "fsl-cpm-fec-mdio",
224 .bus = &platform_bus_type, 365 .bus = &platform_bus_type,
225 .probe = fs_enet_fec_mdio_probe, 366 .probe = fs_enet_fec_mdio_probe,
226 .remove = fs_enet_fec_mdio_remove, 367 .remove = fs_enet_fec_mdio_remove,
227 }; 368 };
228 369
229 int fs_enet_mdio_fec_init(void) 370 int fs_enet_mdio_fec_init(void)
230 { 371 {
231 return driver_register(&fs_enet_fec_mdio_driver); 372 return driver_register(&fs_enet_fec_mdio_driver);
232 } 373 }
233 374
234 void fs_enet_mdio_fec_exit(void) 375 void fs_enet_mdio_fec_exit(void)
235 { 376 {
236 driver_unregister(&fs_enet_fec_mdio_driver); 377 driver_unregister(&fs_enet_fec_mdio_driver);
237 } 378 }
379 #endif
238 380
include/linux/fs_enet_pd.h
1 /* 1 /*
2 * Platform information definitions for the 2 * Platform information definitions for the
3 * universal Freescale Ethernet driver. 3 * universal Freescale Ethernet driver.
4 * 4 *
5 * Copyright (c) 2003 Intracom S.A. 5 * Copyright (c) 2003 Intracom S.A.
6 * by Pantelis Antoniou <panto@intracom.gr> 6 * by Pantelis Antoniou <panto@intracom.gr>
7 * 7 *
8 * 2005 (c) MontaVista Software, Inc. 8 * 2005 (c) MontaVista Software, Inc.
9 * Vitaly Bordug <vbordug@ru.mvista.com> 9 * Vitaly Bordug <vbordug@ru.mvista.com>
10 * 10 *
11 * This file is licensed under the terms of the GNU General Public License 11 * This file is licensed under the terms of the GNU General Public License
12 * version 2. This program is licensed "as is" without any warranty of any 12 * version 2. This program is licensed "as is" without any warranty of any
13 * kind, whether express or implied. 13 * kind, whether express or implied.
14 */ 14 */
15 15
16 #ifndef FS_ENET_PD_H 16 #ifndef FS_ENET_PD_H
17 #define FS_ENET_PD_H 17 #define FS_ENET_PD_H
18 18
19 #include <linux/string.h> 19 #include <linux/string.h>
20 #include <asm/types.h> 20 #include <asm/types.h>
21 21
22 #define FS_ENET_NAME "fs_enet" 22 #define FS_ENET_NAME "fs_enet"
23 23
24 enum fs_id { 24 enum fs_id {
25 fsid_fec1, 25 fsid_fec1,
26 fsid_fec2, 26 fsid_fec2,
27 fsid_fcc1, 27 fsid_fcc1,
28 fsid_fcc2, 28 fsid_fcc2,
29 fsid_fcc3, 29 fsid_fcc3,
30 fsid_scc1, 30 fsid_scc1,
31 fsid_scc2, 31 fsid_scc2,
32 fsid_scc3, 32 fsid_scc3,
33 fsid_scc4, 33 fsid_scc4,
34 }; 34 };
35 35
36 #define FS_MAX_INDEX 9 36 #define FS_MAX_INDEX 9
37 37
38 static inline int fs_get_fec_index(enum fs_id id) 38 static inline int fs_get_fec_index(enum fs_id id)
39 { 39 {
40 if (id >= fsid_fec1 && id <= fsid_fec2) 40 if (id >= fsid_fec1 && id <= fsid_fec2)
41 return id - fsid_fec1; 41 return id - fsid_fec1;
42 return -1; 42 return -1;
43 } 43 }
44 44
45 static inline int fs_get_fcc_index(enum fs_id id) 45 static inline int fs_get_fcc_index(enum fs_id id)
46 { 46 {
47 if (id >= fsid_fcc1 && id <= fsid_fcc3) 47 if (id >= fsid_fcc1 && id <= fsid_fcc3)
48 return id - fsid_fcc1; 48 return id - fsid_fcc1;
49 return -1; 49 return -1;
50 } 50 }
51 51
52 static inline int fs_get_scc_index(enum fs_id id) 52 static inline int fs_get_scc_index(enum fs_id id)
53 { 53 {
54 if (id >= fsid_scc1 && id <= fsid_scc4) 54 if (id >= fsid_scc1 && id <= fsid_scc4)
55 return id - fsid_scc1; 55 return id - fsid_scc1;
56 return -1; 56 return -1;
57 } 57 }
58 58
59 static inline int fs_fec_index2id(int index) 59 static inline int fs_fec_index2id(int index)
60 { 60 {
61 int id = fsid_fec1 + index - 1; 61 int id = fsid_fec1 + index - 1;
62 if (id >= fsid_fec1 && id <= fsid_fec2) 62 if (id >= fsid_fec1 && id <= fsid_fec2)
63 return id; 63 return id;
64 return FS_MAX_INDEX; 64 return FS_MAX_INDEX;
65 } 65 }
66 66
67 static inline int fs_fcc_index2id(int index) 67 static inline int fs_fcc_index2id(int index)
68 { 68 {
69 int id = fsid_fcc1 + index - 1; 69 int id = fsid_fcc1 + index - 1;
70 if (id >= fsid_fcc1 && id <= fsid_fcc3) 70 if (id >= fsid_fcc1 && id <= fsid_fcc3)
71 return id; 71 return id;
72 return FS_MAX_INDEX; 72 return FS_MAX_INDEX;
73 } 73 }
74 74
75 static inline int fs_scc_index2id(int index) 75 static inline int fs_scc_index2id(int index)
76 { 76 {
77 int id = fsid_scc1 + index - 1; 77 int id = fsid_scc1 + index - 1;
78 if (id >= fsid_scc1 && id <= fsid_scc4) 78 if (id >= fsid_scc1 && id <= fsid_scc4)
79 return id; 79 return id;
80 return FS_MAX_INDEX; 80 return FS_MAX_INDEX;
81 } 81 }
82 82
83 enum fs_mii_method { 83 enum fs_mii_method {
84 fsmii_fixed, 84 fsmii_fixed,
85 fsmii_fec, 85 fsmii_fec,
86 fsmii_bitbang, 86 fsmii_bitbang,
87 }; 87 };
88 88
89 enum fs_ioport { 89 enum fs_ioport {
90 fsiop_porta, 90 fsiop_porta,
91 fsiop_portb, 91 fsiop_portb,
92 fsiop_portc, 92 fsiop_portc,
93 fsiop_portd, 93 fsiop_portd,
94 fsiop_porte, 94 fsiop_porte,
95 }; 95 };
96 96
97 struct fs_mii_bit { 97 struct fs_mii_bit {
98 u32 offset; 98 u32 offset;
99 u8 bit; 99 u8 bit;
100 u8 polarity; 100 u8 polarity;
101 }; 101 };
102 struct fs_mii_bb_platform_info { 102 struct fs_mii_bb_platform_info {
103 struct fs_mii_bit mdio_dir; 103 struct fs_mii_bit mdio_dir;
104 struct fs_mii_bit mdio_dat; 104 struct fs_mii_bit mdio_dat;
105 struct fs_mii_bit mdc_dat; 105 struct fs_mii_bit mdc_dat;
106 int mdio_port; /* port & bit for MDIO */ 106 int mdio_port; /* port & bit for MDIO */
107 int mdio_bit; 107 int mdio_bit;
108 int mdc_port; /* port & bit for MDC */ 108 int mdc_port; /* port & bit for MDC */
109 int mdc_bit; 109 int mdc_bit;
110 int delay; /* delay in us */ 110 int delay; /* delay in us */
111 int irq[32]; /* irqs per phy's */ 111 int irq[32]; /* irqs per phy's */
112 }; 112 };
113 113
114 struct fs_platform_info { 114 struct fs_platform_info {
115 115
116 void(*init_ioports)(struct fs_platform_info *); 116 void(*init_ioports)(struct fs_platform_info *);
117 /* device specific information */ 117 /* device specific information */
118 int fs_no; /* controller index */ 118 int fs_no; /* controller index */
119 char fs_type[4]; /* controller type */ 119 char fs_type[4]; /* controller type */
120 120
121 u32 cp_page; /* CPM page */ 121 u32 cp_page; /* CPM page */
122 u32 cp_block; /* CPM sblock */ 122 u32 cp_block; /* CPM sblock */
123 u32 cp_command; /* CPM page/sblock/mcn */
123 124
124 u32 clk_trx; /* some stuff for pins & mux configuration*/ 125 u32 clk_trx; /* some stuff for pins & mux configuration*/
125 u32 clk_rx; 126 u32 clk_rx;
126 u32 clk_tx; 127 u32 clk_tx;
127 u32 clk_route; 128 u32 clk_route;
128 u32 clk_mask; 129 u32 clk_mask;
129 130
130 u32 mem_offset; 131 u32 mem_offset;
131 u32 dpram_offset; 132 u32 dpram_offset;
132 u32 fcc_regs_c; 133 u32 fcc_regs_c;
133 134
134 u32 device_flags; 135 u32 device_flags;
135 136
136 int phy_addr; /* the phy address (-1 no phy) */ 137 int phy_addr; /* the phy address (-1 no phy) */
138 #ifdef CONFIG_PPC_CPM_NEW_BINDING
139 char bus_id[16];
140 #else
137 const char* bus_id; 141 const char* bus_id;
142 #endif
138 int phy_irq; /* the phy irq (if it exists) */ 143 int phy_irq; /* the phy irq (if it exists) */
139 144
140 const struct fs_mii_bus_info *bus_info; 145 const struct fs_mii_bus_info *bus_info;
141 146
142 int rx_ring, tx_ring; /* number of buffers on rx */ 147 int rx_ring, tx_ring; /* number of buffers on rx */
143 __u8 macaddr[6]; /* mac address */ 148 __u8 macaddr[6]; /* mac address */
144 int rx_copybreak; /* limit we copy small frames */ 149 int rx_copybreak; /* limit we copy small frames */
145 int use_napi; /* use NAPI */ 150 int use_napi; /* use NAPI */
146 int napi_weight; /* NAPI weight */ 151 int napi_weight; /* NAPI weight */
147 152
148 int use_rmii; /* use RMII mode */ 153 int use_rmii; /* use RMII mode */
149 int has_phy; /* if the network is phy container as well...*/ 154 int has_phy; /* if the network is phy container as well...*/
150 }; 155 };
151 struct fs_mii_fec_platform_info { 156 struct fs_mii_fec_platform_info {
152 u32 irq[32]; 157 u32 irq[32];
153 u32 mii_speed; 158 u32 mii_speed;
154 }; 159 };
155 160
156 static inline int fs_get_id(struct fs_platform_info *fpi) 161 static inline int fs_get_id(struct fs_platform_info *fpi)
157 { 162 {
158 if(strstr(fpi->fs_type, "SCC")) 163 if(strstr(fpi->fs_type, "SCC"))
159 return fs_scc_index2id(fpi->fs_no); 164 return fs_scc_index2id(fpi->fs_no);
160 if(strstr(fpi->fs_type, "FCC")) 165 if(strstr(fpi->fs_type, "FCC"))
161 return fs_fcc_index2id(fpi->fs_no); 166 return fs_fcc_index2id(fpi->fs_no);
162 if(strstr(fpi->fs_type, "FEC")) 167 if(strstr(fpi->fs_type, "FEC"))
163 return fs_fec_index2id(fpi->fs_no); 168 return fs_fec_index2id(fpi->fs_no);
164 return fpi->fs_no; 169 return fpi->fs_no;
165 } 170 }
166 171
167 #endif 172 #endif
168 173