Commit 48257c4f168e5d040394aeca4d37b59f68e0d36b

Authored by Pantelis Antoniou
Committed by Jeff Garzik
1 parent d8840ac907

Add fs_enet ethernet network driver, for several embedded platforms.

Showing 13 changed files with 4400 additions and 0 deletions Side-by-side Diff

... ... @@ -1775,6 +1775,7 @@
1775 1775 controller on the Renesas H8/300 processor.
1776 1776  
1777 1777 source "drivers/net/fec_8xx/Kconfig"
  1778 +source "drivers/net/fs_enet/Kconfig"
1778 1779  
1779 1780 endmenu
1780 1781  
drivers/net/Makefile
... ... @@ -203,4 +203,6 @@
203 203 obj-$(CONFIG_ETRAX_ETHERNET) += cris/
204 204  
205 205 obj-$(CONFIG_NETCONSOLE) += netconsole.o
  206 +
  207 +obj-$(CONFIG_FS_ENET) += fs_enet/
drivers/net/fs_enet/Kconfig
  1 +config FS_ENET
  2 + tristate "Freescale Ethernet Driver"
  3 + depends on NET_ETHERNET && (CPM1 || CPM2)
  4 + select MII
  5 +
  6 +config FS_ENET_HAS_SCC
  7 + bool "Chip has an SCC usable for ethernet"
  8 + depends on FS_ENET && (CPM1 || CPM2)
  9 + default y
  10 +
  11 +config FS_ENET_HAS_FCC
  12 + bool "Chip has an FCC usable for ethernet"
  13 + depends on FS_ENET && CPM2
  14 + default y
  15 +
  16 +config FS_ENET_HAS_FEC
  17 + bool "Chip has an FEC usable for ethernet"
  18 + depends on FS_ENET && CPM1
  19 + default y
drivers/net/fs_enet/Makefile
  1 +#
  2 +# Makefile for the Freescale Ethernet controllers
  3 +#
  4 +
  5 +obj-$(CONFIG_FS_ENET) += fs_enet.o
  6 +
  7 +obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o
  8 +obj-$(CONFIG_8260) += mac-fcc.o
  9 +
  10 +fs_enet-objs := fs_enet-main.o fs_enet-mii.o mii-bitbang.o mii-fixed.o
drivers/net/fs_enet/fs_enet-main.c
Changes suppressed. Click to show
  1 +/*
  2 + * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
  3 + *
  4 + * Copyright (c) 2003 Intracom S.A.
  5 + * by Pantelis Antoniou <panto@intracom.gr>
  6 + *
  7 + * 2005 (c) MontaVista Software, Inc.
  8 + * Vitaly Bordug <vbordug@ru.mvista.com>
  9 + *
  10 + * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
  11 + * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
  12 + *
  13 + * This file is licensed under the terms of the GNU General Public License
  14 + * version 2. This program is licensed "as is" without any warranty of any
  15 + * kind, whether express or implied.
  16 + */
  17 +
  18 +#include <linux/config.h>
  19 +#include <linux/module.h>
  20 +#include <linux/kernel.h>
  21 +#include <linux/types.h>
  22 +#include <linux/sched.h>
  23 +#include <linux/string.h>
  24 +#include <linux/ptrace.h>
  25 +#include <linux/errno.h>
  26 +#include <linux/ioport.h>
  27 +#include <linux/slab.h>
  28 +#include <linux/interrupt.h>
  29 +#include <linux/pci.h>
  30 +#include <linux/init.h>
  31 +#include <linux/delay.h>
  32 +#include <linux/netdevice.h>
  33 +#include <linux/etherdevice.h>
  34 +#include <linux/skbuff.h>
  35 +#include <linux/spinlock.h>
  36 +#include <linux/mii.h>
  37 +#include <linux/ethtool.h>
  38 +#include <linux/bitops.h>
  39 +#include <linux/fs.h>
  40 +
  41 +#include <linux/vmalloc.h>
  42 +#include <asm/pgtable.h>
  43 +
  44 +#include <asm/pgtable.h>
  45 +#include <asm/irq.h>
  46 +#include <asm/uaccess.h>
  47 +
  48 +#include "fs_enet.h"
  49 +
  50 +/*************************************************/
  51 +
  52 +static char version[] __devinitdata =
  53 + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
  54 +
  55 +MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
  56 +MODULE_DESCRIPTION("Freescale Ethernet Driver");
  57 +MODULE_LICENSE("GPL");
  58 +MODULE_VERSION(DRV_MODULE_VERSION);
  59 +
  60 +MODULE_PARM(fs_enet_debug, "i");
  61 +MODULE_PARM_DESC(fs_enet_debug,
  62 + "Freescale bitmapped debugging message enable value");
  63 +
  64 +int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
  65 +
  66 +static void fs_set_multicast_list(struct net_device *dev)
  67 +{
  68 + struct fs_enet_private *fep = netdev_priv(dev);
  69 +
  70 + (*fep->ops->set_multicast_list)(dev);
  71 +}
  72 +
  73 +/* NAPI receive function */
  74 +static int fs_enet_rx_napi(struct net_device *dev, int *budget)
  75 +{
  76 + struct fs_enet_private *fep = netdev_priv(dev);
  77 + const struct fs_platform_info *fpi = fep->fpi;
  78 + cbd_t *bdp;
  79 + struct sk_buff *skb, *skbn, *skbt;
  80 + int received = 0;
  81 + u16 pkt_len, sc;
  82 + int curidx;
  83 + int rx_work_limit = 0; /* pacify gcc */
  84 +
  85 + rx_work_limit = min(dev->quota, *budget);
  86 +
  87 + if (!netif_running(dev))
  88 + return 0;
  89 +
  90 + /*
  91 + * First, grab all of the stats for the incoming packet.
  92 + * These get messed up if we get called due to a busy condition.
  93 + */
  94 + bdp = fep->cur_rx;
  95 +
  96 + /* clear RX status bits for napi*/
  97 + (*fep->ops->napi_clear_rx_event)(dev);
  98 +
  99 + while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
  100 +
  101 + curidx = bdp - fep->rx_bd_base;
  102 +
  103 + /*
  104 + * Since we have allocated space to hold a complete frame,
  105 + * the last indicator should be set.
  106 + */
  107 + if ((sc & BD_ENET_RX_LAST) == 0)
  108 + printk(KERN_WARNING DRV_MODULE_NAME
  109 + ": %s rcv is not +last\n",
  110 + dev->name);
  111 +
  112 + /*
  113 + * Check for errors.
  114 + */
  115 + if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
  116 + BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
  117 + fep->stats.rx_errors++;
  118 + /* Frame too long or too short. */
  119 + if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
  120 + fep->stats.rx_length_errors++;
  121 + /* Frame alignment */
  122 + if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
  123 + fep->stats.rx_frame_errors++;
  124 + /* CRC Error */
  125 + if (sc & BD_ENET_RX_CR)
  126 + fep->stats.rx_crc_errors++;
  127 + /* FIFO overrun */
  128 + if (sc & BD_ENET_RX_OV)
  129 + fep->stats.rx_crc_errors++;
  130 +
  131 + skb = fep->rx_skbuff[curidx];
  132 +
  133 + dma_unmap_single(fep->dev, skb->data,
  134 + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  135 + DMA_FROM_DEVICE);
  136 +
  137 + skbn = skb;
  138 +
  139 + } else {
  140 +
  141 + /* napi, got packet but no quota */
  142 + if (--rx_work_limit < 0)
  143 + break;
  144 +
  145 + skb = fep->rx_skbuff[curidx];
  146 +
  147 + dma_unmap_single(fep->dev, skb->data,
  148 + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  149 + DMA_FROM_DEVICE);
  150 +
  151 + /*
  152 + * Process the incoming frame.
  153 + */
  154 + fep->stats.rx_packets++;
  155 + pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
  156 + fep->stats.rx_bytes += pkt_len + 4;
  157 +
  158 + if (pkt_len <= fpi->rx_copybreak) {
  159 + /* +2 to make IP header L1 cache aligned */
  160 + skbn = dev_alloc_skb(pkt_len + 2);
  161 + if (skbn != NULL) {
  162 + skb_reserve(skbn, 2); /* align IP header */
  163 + memcpy(skbn->data, skb->data, pkt_len);
  164 + /* swap */
  165 + skbt = skb;
  166 + skb = skbn;
  167 + skbn = skbt;
  168 + }
  169 + } else
  170 + skbn = dev_alloc_skb(ENET_RX_FRSIZE);
  171 +
  172 + if (skbn != NULL) {
  173 + skb->dev = dev;
  174 + skb_put(skb, pkt_len); /* Make room */
  175 + skb->protocol = eth_type_trans(skb, dev);
  176 + received++;
  177 + netif_receive_skb(skb);
  178 + } else {
  179 + printk(KERN_WARNING DRV_MODULE_NAME
  180 + ": %s Memory squeeze, dropping packet.\n",
  181 + dev->name);
  182 + fep->stats.rx_dropped++;
  183 + skbn = skb;
  184 + }
  185 + }
  186 +
  187 + fep->rx_skbuff[curidx] = skbn;
  188 + CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
  189 + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  190 + DMA_FROM_DEVICE));
  191 + CBDW_DATLEN(bdp, 0);
  192 + CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
  193 +
  194 + /*
  195 + * Update BD pointer to next entry.
  196 + */
  197 + if ((sc & BD_ENET_RX_WRAP) == 0)
  198 + bdp++;
  199 + else
  200 + bdp = fep->rx_bd_base;
  201 +
  202 + (*fep->ops->rx_bd_done)(dev);
  203 + }
  204 +
  205 + fep->cur_rx = bdp;
  206 +
  207 + dev->quota -= received;
  208 + *budget -= received;
  209 +
  210 + if (rx_work_limit < 0)
  211 + return 1; /* not done */
  212 +
  213 + /* done */
  214 + netif_rx_complete(dev);
  215 +
  216 + (*fep->ops->napi_enable_rx)(dev);
  217 +
  218 + return 0;
  219 +}
  220 +
  221 +/* non NAPI receive function */
  222 +static int fs_enet_rx_non_napi(struct net_device *dev)
  223 +{
  224 + struct fs_enet_private *fep = netdev_priv(dev);
  225 + const struct fs_platform_info *fpi = fep->fpi;
  226 + cbd_t *bdp;
  227 + struct sk_buff *skb, *skbn, *skbt;
  228 + int received = 0;
  229 + u16 pkt_len, sc;
  230 + int curidx;
  231 + /*
  232 + * First, grab all of the stats for the incoming packet.
  233 + * These get messed up if we get called due to a busy condition.
  234 + */
  235 + bdp = fep->cur_rx;
  236 +
  237 + while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
  238 +
  239 + curidx = bdp - fep->rx_bd_base;
  240 +
  241 + /*
  242 + * Since we have allocated space to hold a complete frame,
  243 + * the last indicator should be set.
  244 + */
  245 + if ((sc & BD_ENET_RX_LAST) == 0)
  246 + printk(KERN_WARNING DRV_MODULE_NAME
  247 + ": %s rcv is not +last\n",
  248 + dev->name);
  249 +
  250 + /*
  251 + * Check for errors.
  252 + */
  253 + if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
  254 + BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
  255 + fep->stats.rx_errors++;
  256 + /* Frame too long or too short. */
  257 + if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
  258 + fep->stats.rx_length_errors++;
  259 + /* Frame alignment */
  260 + if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
  261 + fep->stats.rx_frame_errors++;
  262 + /* CRC Error */
  263 + if (sc & BD_ENET_RX_CR)
  264 + fep->stats.rx_crc_errors++;
  265 + /* FIFO overrun */
  266 + if (sc & BD_ENET_RX_OV)
  267 + fep->stats.rx_crc_errors++;
  268 +
  269 + skb = fep->rx_skbuff[curidx];
  270 +
  271 + dma_unmap_single(fep->dev, skb->data,
  272 + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  273 + DMA_FROM_DEVICE);
  274 +
  275 + skbn = skb;
  276 +
  277 + } else {
  278 +
  279 + skb = fep->rx_skbuff[curidx];
  280 +
  281 + dma_unmap_single(fep->dev, skb->data,
  282 + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  283 + DMA_FROM_DEVICE);
  284 +
  285 + /*
  286 + * Process the incoming frame.
  287 + */
  288 + fep->stats.rx_packets++;
  289 + pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
  290 + fep->stats.rx_bytes += pkt_len + 4;
  291 +
  292 + if (pkt_len <= fpi->rx_copybreak) {
  293 + /* +2 to make IP header L1 cache aligned */
  294 + skbn = dev_alloc_skb(pkt_len + 2);
  295 + if (skbn != NULL) {
  296 + skb_reserve(skbn, 2); /* align IP header */
  297 + memcpy(skbn->data, skb->data, pkt_len);
  298 + /* swap */
  299 + skbt = skb;
  300 + skb = skbn;
  301 + skbn = skbt;
  302 + }
  303 + } else
  304 + skbn = dev_alloc_skb(ENET_RX_FRSIZE);
  305 +
  306 + if (skbn != NULL) {
  307 + skb->dev = dev;
  308 + skb_put(skb, pkt_len); /* Make room */
  309 + skb->protocol = eth_type_trans(skb, dev);
  310 + received++;
  311 + netif_rx(skb);
  312 + } else {
  313 + printk(KERN_WARNING DRV_MODULE_NAME
  314 + ": %s Memory squeeze, dropping packet.\n",
  315 + dev->name);
  316 + fep->stats.rx_dropped++;
  317 + skbn = skb;
  318 + }
  319 + }
  320 +
  321 + fep->rx_skbuff[curidx] = skbn;
  322 + CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
  323 + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  324 + DMA_FROM_DEVICE));
  325 + CBDW_DATLEN(bdp, 0);
  326 + CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
  327 +
  328 + /*
  329 + * Update BD pointer to next entry.
  330 + */
  331 + if ((sc & BD_ENET_RX_WRAP) == 0)
  332 + bdp++;
  333 + else
  334 + bdp = fep->rx_bd_base;
  335 +
  336 + (*fep->ops->rx_bd_done)(dev);
  337 + }
  338 +
  339 + fep->cur_rx = bdp;
  340 +
  341 + return 0;
  342 +}
  343 +
  344 +static void fs_enet_tx(struct net_device *dev)
  345 +{
  346 + struct fs_enet_private *fep = netdev_priv(dev);
  347 + cbd_t *bdp;
  348 + struct sk_buff *skb;
  349 + int dirtyidx, do_wake, do_restart;
  350 + u16 sc;
  351 +
  352 + spin_lock(&fep->lock);
  353 + bdp = fep->dirty_tx;
  354 +
  355 + do_wake = do_restart = 0;
  356 + while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
  357 +
  358 + dirtyidx = bdp - fep->tx_bd_base;
  359 +
  360 + if (fep->tx_free == fep->tx_ring)
  361 + break;
  362 +
  363 + skb = fep->tx_skbuff[dirtyidx];
  364 +
  365 + /*
  366 + * Check for errors.
  367 + */
  368 + if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
  369 + BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
  370 +
  371 + if (sc & BD_ENET_TX_HB) /* No heartbeat */
  372 + fep->stats.tx_heartbeat_errors++;
  373 + if (sc & BD_ENET_TX_LC) /* Late collision */
  374 + fep->stats.tx_window_errors++;
  375 + if (sc & BD_ENET_TX_RL) /* Retrans limit */
  376 + fep->stats.tx_aborted_errors++;
  377 + if (sc & BD_ENET_TX_UN) /* Underrun */
  378 + fep->stats.tx_fifo_errors++;
  379 + if (sc & BD_ENET_TX_CSL) /* Carrier lost */
  380 + fep->stats.tx_carrier_errors++;
  381 +
  382 + if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
  383 + fep->stats.tx_errors++;
  384 + do_restart = 1;
  385 + }
  386 + } else
  387 + fep->stats.tx_packets++;
  388 +
  389 + if (sc & BD_ENET_TX_READY)
  390 + printk(KERN_WARNING DRV_MODULE_NAME
  391 + ": %s HEY! Enet xmit interrupt and TX_READY.\n",
  392 + dev->name);
  393 +
  394 + /*
  395 + * Deferred means some collisions occurred during transmit,
  396 + * but we eventually sent the packet OK.
  397 + */
  398 + if (sc & BD_ENET_TX_DEF)
  399 + fep->stats.collisions++;
  400 +
  401 + /* unmap */
  402 + dma_unmap_single(fep->dev, skb->data, skb->len, DMA_TO_DEVICE);
  403 +
  404 + /*
  405 + * Free the sk buffer associated with this last transmit.
  406 + */
  407 + dev_kfree_skb_irq(skb);
  408 + fep->tx_skbuff[dirtyidx] = NULL;
  409 +
  410 + /*
  411 + * Update pointer to next buffer descriptor to be transmitted.
  412 + */
  413 + if ((sc & BD_ENET_TX_WRAP) == 0)
  414 + bdp++;
  415 + else
  416 + bdp = fep->tx_bd_base;
  417 +
  418 + /*
  419 + * Since we have freed up a buffer, the ring is no longer
  420 + * full.
  421 + */
  422 + if (!fep->tx_free++)
  423 + do_wake = 1;
  424 + }
  425 +
  426 + fep->dirty_tx = bdp;
  427 +
  428 + if (do_restart)
  429 + (*fep->ops->tx_restart)(dev);
  430 +
  431 + spin_unlock(&fep->lock);
  432 +
  433 + if (do_wake)
  434 + netif_wake_queue(dev);
  435 +}
  436 +
  437 +/*
  438 + * The interrupt handler.
  439 + * This is called from the MPC core interrupt.
  440 + */
  441 +static irqreturn_t
  442 +fs_enet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
  443 +{
  444 + struct net_device *dev = dev_id;
  445 + struct fs_enet_private *fep;
  446 + const struct fs_platform_info *fpi;
  447 + u32 int_events;
  448 + u32 int_clr_events;
  449 + int nr, napi_ok;
  450 + int handled;
  451 +
  452 + fep = netdev_priv(dev);
  453 + fpi = fep->fpi;
  454 +
  455 + nr = 0;
  456 + while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
  457 +
  458 + nr++;
  459 +
  460 + int_clr_events = int_events;
  461 + if (fpi->use_napi)
  462 + int_clr_events &= ~fep->ev_napi_rx;
  463 +
  464 + (*fep->ops->clear_int_events)(dev, int_clr_events);
  465 +
  466 + if (int_events & fep->ev_err)
  467 + (*fep->ops->ev_error)(dev, int_events);
  468 +
  469 + if (int_events & fep->ev_rx) {
  470 + if (!fpi->use_napi)
  471 + fs_enet_rx_non_napi(dev);
  472 + else {
  473 + napi_ok = netif_rx_schedule_prep(dev);
  474 +
  475 + (*fep->ops->napi_disable_rx)(dev);
  476 + (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
  477 +
  478 + /* NOTE: it is possible for FCCs in NAPI mode */
  479 + /* to submit a spurious interrupt while in poll */
  480 + if (napi_ok)
  481 + __netif_rx_schedule(dev);
  482 + }
  483 + }
  484 +
  485 + if (int_events & fep->ev_tx)
  486 + fs_enet_tx(dev);
  487 + }
  488 +
  489 + handled = nr > 0;
  490 + return IRQ_RETVAL(handled);
  491 +}
  492 +
  493 +void fs_init_bds(struct net_device *dev)
  494 +{
  495 + struct fs_enet_private *fep = netdev_priv(dev);
  496 + cbd_t *bdp;
  497 + struct sk_buff *skb;
  498 + int i;
  499 +
  500 + fs_cleanup_bds(dev);
  501 +
  502 + fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
  503 + fep->tx_free = fep->tx_ring;
  504 + fep->cur_rx = fep->rx_bd_base;
  505 +
  506 + /*
  507 + * Initialize the receive buffer descriptors.
  508 + */
  509 + for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
  510 + skb = dev_alloc_skb(ENET_RX_FRSIZE);
  511 + if (skb == NULL) {
  512 + printk(KERN_WARNING DRV_MODULE_NAME
  513 + ": %s Memory squeeze, unable to allocate skb\n",
  514 + dev->name);
  515 + break;
  516 + }
  517 + fep->rx_skbuff[i] = skb;
  518 + skb->dev = dev;
  519 + CBDW_BUFADDR(bdp,
  520 + dma_map_single(fep->dev, skb->data,
  521 + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  522 + DMA_FROM_DEVICE));
  523 + CBDW_DATLEN(bdp, 0); /* zero */
  524 + CBDW_SC(bdp, BD_ENET_RX_EMPTY |
  525 + ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
  526 + }
  527 + /*
  528 + * if we failed, fillup remainder
  529 + */
  530 + for (; i < fep->rx_ring; i++, bdp++) {
  531 + fep->rx_skbuff[i] = NULL;
  532 + CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
  533 + }
  534 +
  535 + /*
  536 + * ...and the same for transmit.
  537 + */
  538 + for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
  539 + fep->tx_skbuff[i] = NULL;
  540 + CBDW_BUFADDR(bdp, 0);
  541 + CBDW_DATLEN(bdp, 0);
  542 + CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
  543 + }
  544 +}
  545 +
  546 +void fs_cleanup_bds(struct net_device *dev)
  547 +{
  548 + struct fs_enet_private *fep = netdev_priv(dev);
  549 + struct sk_buff *skb;
  550 + int i;
  551 +
  552 + /*
  553 + * Reset SKB transmit buffers.
  554 + */
  555 + for (i = 0; i < fep->tx_ring; i++) {
  556 + if ((skb = fep->tx_skbuff[i]) == NULL)
  557 + continue;
  558 +
  559 + /* unmap */
  560 + dma_unmap_single(fep->dev, skb->data, skb->len, DMA_TO_DEVICE);
  561 +
  562 + fep->tx_skbuff[i] = NULL;
  563 + dev_kfree_skb(skb);
  564 + }
  565 +
  566 + /*
  567 + * Reset SKB receive buffers
  568 + */
  569 + for (i = 0; i < fep->rx_ring; i++) {
  570 + if ((skb = fep->rx_skbuff[i]) == NULL)
  571 + continue;
  572 +
  573 + /* unmap */
  574 + dma_unmap_single(fep->dev, skb->data,
  575 + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
  576 + DMA_FROM_DEVICE);
  577 +
  578 + fep->rx_skbuff[i] = NULL;
  579 +
  580 + dev_kfree_skb(skb);
  581 + }
  582 +}
  583 +
  584 +/**********************************************************************************/
  585 +
  586 +static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  587 +{
  588 + struct fs_enet_private *fep = netdev_priv(dev);
  589 + cbd_t *bdp;
  590 + int curidx;
  591 + u16 sc;
  592 + unsigned long flags;
  593 +
  594 + spin_lock_irqsave(&fep->tx_lock, flags);
  595 +
  596 + /*
  597 + * Fill in a Tx ring entry
  598 + */
  599 + bdp = fep->cur_tx;
  600 +
  601 + if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
  602 + netif_stop_queue(dev);
  603 + spin_unlock_irqrestore(&fep->tx_lock, flags);
  604 +
  605 + /*
  606 + * Ooops. All transmit buffers are full. Bail out.
  607 + * This should not happen, since the tx queue should be stopped.
  608 + */
  609 + printk(KERN_WARNING DRV_MODULE_NAME
  610 + ": %s tx queue full!.\n", dev->name);
  611 + return NETDEV_TX_BUSY;
  612 + }
  613 +
  614 + curidx = bdp - fep->tx_bd_base;
  615 + /*
  616 + * Clear all of the status flags.
  617 + */
  618 + CBDC_SC(bdp, BD_ENET_TX_STATS);
  619 +
  620 + /*
  621 + * Save skb pointer.
  622 + */
  623 + fep->tx_skbuff[curidx] = skb;
  624 +
  625 + fep->stats.tx_bytes += skb->len;
  626 +
  627 + /*
  628 + * Push the data cache so the CPM does not get stale memory data.
  629 + */
  630 + CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
  631 + skb->data, skb->len, DMA_TO_DEVICE));
  632 + CBDW_DATLEN(bdp, skb->len);
  633 +
  634 + dev->trans_start = jiffies;
  635 +
  636 + /*
  637 + * If this was the last BD in the ring, start at the beginning again.
  638 + */
  639 + if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
  640 + fep->cur_tx++;
  641 + else
  642 + fep->cur_tx = fep->tx_bd_base;
  643 +
  644 + if (!--fep->tx_free)
  645 + netif_stop_queue(dev);
  646 +
  647 + /* Trigger transmission start */
  648 + sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
  649 + BD_ENET_TX_LAST | BD_ENET_TX_TC;
  650 +
  651 + /* note that while FEC does not have this bit
  652 + * it marks it as available for software use
  653 + * yay for hw reuse :) */
  654 + if (skb->len <= 60)
  655 + sc |= BD_ENET_TX_PAD;
  656 + CBDS_SC(bdp, sc);
  657 +
  658 + (*fep->ops->tx_kickstart)(dev);
  659 +
  660 + spin_unlock_irqrestore(&fep->tx_lock, flags);
  661 +
  662 + return NETDEV_TX_OK;
  663 +}
  664 +
  665 +static int fs_request_irq(struct net_device *dev, int irq, const char *name,
  666 + irqreturn_t (*irqf)(int irq, void *dev_id, struct pt_regs *regs))
  667 +{
  668 + struct fs_enet_private *fep = netdev_priv(dev);
  669 +
  670 + (*fep->ops->pre_request_irq)(dev, irq);
  671 + return request_irq(irq, irqf, SA_SHIRQ, name, dev);
  672 +}
  673 +
  674 +static void fs_free_irq(struct net_device *dev, int irq)
  675 +{
  676 + struct fs_enet_private *fep = netdev_priv(dev);
  677 +
  678 + free_irq(irq, dev);
  679 + (*fep->ops->post_free_irq)(dev, irq);
  680 +}
  681 +
  682 +/**********************************************************************************/
  683 +
  684 +/* This interrupt occurs when the PHY detects a link change. */
  685 +static irqreturn_t
  686 +fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs)
  687 +{
  688 + struct net_device *dev = dev_id;
  689 + struct fs_enet_private *fep;
  690 + const struct fs_platform_info *fpi;
  691 +
  692 + fep = netdev_priv(dev);
  693 + fpi = fep->fpi;
  694 +
  695 + /*
  696 + * Acknowledge the interrupt if possible. If we have not
  697 + * found the PHY yet we can't process or acknowledge the
  698 + * interrupt now. Instead we ignore this interrupt for now,
  699 + * which we can do since it is edge triggered. It will be
  700 + * acknowledged later by fs_enet_open().
  701 + */
  702 + if (!fep->phy)
  703 + return IRQ_NONE;
  704 +
  705 + fs_mii_ack_int(dev);
  706 + fs_mii_link_status_change_check(dev, 0);
  707 +
  708 + return IRQ_HANDLED;
  709 +}
  710 +
  711 +static void fs_timeout(struct net_device *dev)
  712 +{
  713 + struct fs_enet_private *fep = netdev_priv(dev);
  714 + unsigned long flags;
  715 + int wake = 0;
  716 +
  717 + fep->stats.tx_errors++;
  718 +
  719 + spin_lock_irqsave(&fep->lock, flags);
  720 +
  721 + if (dev->flags & IFF_UP) {
  722 + (*fep->ops->stop)(dev);
  723 + (*fep->ops->restart)(dev);
  724 + }
  725 +
  726 + wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
  727 + spin_unlock_irqrestore(&fep->lock, flags);
  728 +
  729 + if (wake)
  730 + netif_wake_queue(dev);
  731 +}
  732 +
  733 +static int fs_enet_open(struct net_device *dev)
  734 +{
  735 + struct fs_enet_private *fep = netdev_priv(dev);
  736 + const struct fs_platform_info *fpi = fep->fpi;
  737 + int r;
  738 +
  739 + /* Install our interrupt handler. */
  740 + r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
  741 + if (r != 0) {
  742 + printk(KERN_ERR DRV_MODULE_NAME
  743 + ": %s Could not allocate FEC IRQ!", dev->name);
  744 + return -EINVAL;
  745 + }
  746 +
  747 + /* Install our phy interrupt handler */
  748 + if (fpi->phy_irq != -1) {
  749 +
  750 + r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt);
  751 + if (r != 0) {
  752 + printk(KERN_ERR DRV_MODULE_NAME
  753 + ": %s Could not allocate PHY IRQ!", dev->name);
  754 + fs_free_irq(dev, fep->interrupt);
  755 + return -EINVAL;
  756 + }
  757 + }
  758 +
  759 + fs_mii_startup(dev);
  760 + netif_carrier_off(dev);
  761 + fs_mii_link_status_change_check(dev, 1);
  762 +
  763 + return 0;
  764 +}
  765 +
  766 +static int fs_enet_close(struct net_device *dev)
  767 +{
  768 + struct fs_enet_private *fep = netdev_priv(dev);
  769 + const struct fs_platform_info *fpi = fep->fpi;
  770 + unsigned long flags;
  771 +
  772 + netif_stop_queue(dev);
  773 + netif_carrier_off(dev);
  774 + fs_mii_shutdown(dev);
  775 +
  776 + spin_lock_irqsave(&fep->lock, flags);
  777 + (*fep->ops->stop)(dev);
  778 + spin_unlock_irqrestore(&fep->lock, flags);
  779 +
  780 + /* release any irqs */
  781 + if (fpi->phy_irq != -1)
  782 + fs_free_irq(dev, fpi->phy_irq);
  783 + fs_free_irq(dev, fep->interrupt);
  784 +
  785 + return 0;
  786 +}
  787 +
  788 +static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
  789 +{
  790 + struct fs_enet_private *fep = netdev_priv(dev);
  791 + return &fep->stats;
  792 +}
  793 +
  794 +/*************************************************************************/
  795 +
  796 +static void fs_get_drvinfo(struct net_device *dev,
  797 + struct ethtool_drvinfo *info)
  798 +{
  799 + strcpy(info->driver, DRV_MODULE_NAME);
  800 + strcpy(info->version, DRV_MODULE_VERSION);
  801 +}
  802 +
  803 +static int fs_get_regs_len(struct net_device *dev)
  804 +{
  805 + struct fs_enet_private *fep = netdev_priv(dev);
  806 +
  807 + return (*fep->ops->get_regs_len)(dev);
  808 +}
  809 +
  810 +static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
  811 + void *p)
  812 +{
  813 + struct fs_enet_private *fep = netdev_priv(dev);
  814 + unsigned long flags;
  815 + int r, len;
  816 +
  817 + len = regs->len;
  818 +
  819 + spin_lock_irqsave(&fep->lock, flags);
  820 + r = (*fep->ops->get_regs)(dev, p, &len);
  821 + spin_unlock_irqrestore(&fep->lock, flags);
  822 +
  823 + if (r == 0)
  824 + regs->version = 0;
  825 +}
  826 +
  827 +static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  828 +{
  829 + struct fs_enet_private *fep = netdev_priv(dev);
  830 + unsigned long flags;
  831 + int rc;
  832 +
  833 + spin_lock_irqsave(&fep->lock, flags);
  834 + rc = mii_ethtool_gset(&fep->mii_if, cmd);
  835 + spin_unlock_irqrestore(&fep->lock, flags);
  836 +
  837 + return rc;
  838 +}
  839 +
  840 +static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  841 +{
  842 + struct fs_enet_private *fep = netdev_priv(dev);
  843 + unsigned long flags;
  844 + int rc;
  845 +
  846 + spin_lock_irqsave(&fep->lock, flags);
  847 + rc = mii_ethtool_sset(&fep->mii_if, cmd);
  848 + spin_unlock_irqrestore(&fep->lock, flags);
  849 +
  850 + return rc;
  851 +}
  852 +
  853 +static int fs_nway_reset(struct net_device *dev)
  854 +{
  855 + struct fs_enet_private *fep = netdev_priv(dev);
  856 + return mii_nway_restart(&fep->mii_if);
  857 +}
  858 +
  859 +static u32 fs_get_msglevel(struct net_device *dev)
  860 +{
  861 + struct fs_enet_private *fep = netdev_priv(dev);
  862 + return fep->msg_enable;
  863 +}
  864 +
  865 +static void fs_set_msglevel(struct net_device *dev, u32 value)
  866 +{
  867 + struct fs_enet_private *fep = netdev_priv(dev);
  868 + fep->msg_enable = value;
  869 +}
  870 +
  871 +static struct ethtool_ops fs_ethtool_ops = {
  872 + .get_drvinfo = fs_get_drvinfo,
  873 + .get_regs_len = fs_get_regs_len,
  874 + .get_settings = fs_get_settings,
  875 + .set_settings = fs_set_settings,
  876 + .nway_reset = fs_nway_reset,
  877 + .get_link = ethtool_op_get_link,
  878 + .get_msglevel = fs_get_msglevel,
  879 + .set_msglevel = fs_set_msglevel,
  880 + .get_tx_csum = ethtool_op_get_tx_csum,
  881 + .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
  882 + .get_sg = ethtool_op_get_sg,
  883 + .set_sg = ethtool_op_set_sg,
  884 + .get_regs = fs_get_regs,
  885 +};
  886 +
  887 +static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  888 +{
  889 + struct fs_enet_private *fep = netdev_priv(dev);
  890 + struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
  891 + unsigned long flags;
  892 + int rc;
  893 +
  894 + if (!netif_running(dev))
  895 + return -EINVAL;
  896 +
  897 + spin_lock_irqsave(&fep->lock, flags);
  898 + rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL);
  899 + spin_unlock_irqrestore(&fep->lock, flags);
  900 + return rc;
  901 +}
  902 +
  903 +extern int fs_mii_connect(struct net_device *dev);
  904 +extern void fs_mii_disconnect(struct net_device *dev);
  905 +
  906 +static struct net_device *fs_init_instance(struct device *dev,
  907 + const struct fs_platform_info *fpi)
  908 +{
  909 + struct net_device *ndev = NULL;
  910 + struct fs_enet_private *fep = NULL;
  911 + int privsize, i, r, err = 0, registered = 0;
  912 +
  913 + /* guard */
  914 + if ((unsigned int)fpi->fs_no >= FS_MAX_INDEX)
  915 + return ERR_PTR(-EINVAL);
  916 +
  917 + privsize = sizeof(*fep) + (sizeof(struct sk_buff **) *
  918 + (fpi->rx_ring + fpi->tx_ring));
  919 +
  920 + ndev = alloc_etherdev(privsize);
  921 + if (!ndev) {
  922 + err = -ENOMEM;
  923 + goto err;
  924 + }
  925 + SET_MODULE_OWNER(ndev);
  926 +
  927 + fep = netdev_priv(ndev);
  928 + memset(fep, 0, privsize); /* clear everything */
  929 +
  930 + fep->dev = dev;
  931 + dev_set_drvdata(dev, ndev);
  932 + fep->fpi = fpi;
  933 + if (fpi->init_ioports)
  934 + fpi->init_ioports();
  935 +
  936 +#ifdef CONFIG_FS_ENET_HAS_FEC
  937 + if (fs_get_fec_index(fpi->fs_no) >= 0)
  938 + fep->ops = &fs_fec_ops;
  939 +#endif
  940 +
  941 +#ifdef CONFIG_FS_ENET_HAS_SCC
  942 + if (fs_get_scc_index(fpi->fs_no) >=0 )
  943 + fep->ops = &fs_scc_ops;
  944 +#endif
  945 +
  946 +#ifdef CONFIG_FS_ENET_HAS_FCC
  947 + if (fs_get_fcc_index(fpi->fs_no) >= 0)
  948 + fep->ops = &fs_fcc_ops;
  949 +#endif
  950 +
  951 + if (fep->ops == NULL) {
  952 + printk(KERN_ERR DRV_MODULE_NAME
  953 + ": %s No matching ops found (%d).\n",
  954 + ndev->name, fpi->fs_no);
  955 + err = -EINVAL;
  956 + goto err;
  957 + }
  958 +
  959 + r = (*fep->ops->setup_data)(ndev);
  960 + if (r != 0) {
  961 + printk(KERN_ERR DRV_MODULE_NAME
  962 + ": %s setup_data failed\n",
  963 + ndev->name);
  964 + err = r;
  965 + goto err;
  966 + }
  967 +
  968 + /* point rx_skbuff, tx_skbuff */
  969 + fep->rx_skbuff = (struct sk_buff **)&fep[1];
  970 + fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
  971 +
  972 + /* init locks */
  973 + spin_lock_init(&fep->lock);
  974 + spin_lock_init(&fep->tx_lock);
  975 +
  976 + /*
  977 + * Set the Ethernet address.
  978 + */
  979 + for (i = 0; i < 6; i++)
  980 + ndev->dev_addr[i] = fpi->macaddr[i];
  981 +
  982 + r = (*fep->ops->allocate_bd)(ndev);
  983 +
  984 + if (fep->ring_base == NULL) {
  985 + printk(KERN_ERR DRV_MODULE_NAME
  986 + ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r);
  987 + err = r;
  988 + goto err;
  989 + }
  990 +
  991 + /*
  992 + * Set receive and transmit descriptor base.
  993 + */
  994 + fep->rx_bd_base = fep->ring_base;
  995 + fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
  996 +
  997 + /* initialize ring size variables */
  998 + fep->tx_ring = fpi->tx_ring;
  999 + fep->rx_ring = fpi->rx_ring;
  1000 +
  1001 + /*
  1002 + * The FEC Ethernet specific entries in the device structure.
  1003 + */
  1004 + ndev->open = fs_enet_open;
  1005 + ndev->hard_start_xmit = fs_enet_start_xmit;
  1006 + ndev->tx_timeout = fs_timeout;
  1007 + ndev->watchdog_timeo = 2 * HZ;
  1008 + ndev->stop = fs_enet_close;
  1009 + ndev->get_stats = fs_enet_get_stats;
  1010 + ndev->set_multicast_list = fs_set_multicast_list;
  1011 + if (fpi->use_napi) {
  1012 + ndev->poll = fs_enet_rx_napi;
  1013 + ndev->weight = fpi->napi_weight;
  1014 + }
  1015 + ndev->ethtool_ops = &fs_ethtool_ops;
  1016 + ndev->do_ioctl = fs_ioctl;
  1017 +
  1018 + init_timer(&fep->phy_timer_list);
  1019 +
  1020 + netif_carrier_off(ndev);
  1021 +
  1022 + err = register_netdev(ndev);
  1023 + if (err != 0) {
  1024 + printk(KERN_ERR DRV_MODULE_NAME
  1025 + ": %s register_netdev failed.\n", ndev->name);
  1026 + goto err;
  1027 + }
  1028 + registered = 1;
  1029 +
  1030 + err = fs_mii_connect(ndev);
  1031 + if (err != 0) {
  1032 + printk(KERN_ERR DRV_MODULE_NAME
  1033 + ": %s fs_mii_connect failed.\n", ndev->name);
  1034 + goto err;
  1035 + }
  1036 +
  1037 + return ndev;
  1038 +
  1039 + err:
  1040 + if (ndev != NULL) {
  1041 +
  1042 + if (registered)
  1043 + unregister_netdev(ndev);
  1044 +
  1045 + if (fep != NULL) {
  1046 + (*fep->ops->free_bd)(ndev);
  1047 + (*fep->ops->cleanup_data)(ndev);
  1048 + }
  1049 +
  1050 + free_netdev(ndev);
  1051 + }
  1052 +
  1053 + dev_set_drvdata(dev, NULL);
  1054 +
  1055 + return ERR_PTR(err);
  1056 +}
  1057 +
  1058 +static int fs_cleanup_instance(struct net_device *ndev)
  1059 +{
  1060 + struct fs_enet_private *fep;
  1061 + const struct fs_platform_info *fpi;
  1062 + struct device *dev;
  1063 +
  1064 + if (ndev == NULL)
  1065 + return -EINVAL;
  1066 +
  1067 + fep = netdev_priv(ndev);
  1068 + if (fep == NULL)
  1069 + return -EINVAL;
  1070 +
  1071 + fpi = fep->fpi;
  1072 +
  1073 + fs_mii_disconnect(ndev);
  1074 +
  1075 + unregister_netdev(ndev);
  1076 +
  1077 + dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
  1078 + fep->ring_base, fep->ring_mem_addr);
  1079 +
  1080 + /* reset it */
  1081 + (*fep->ops->cleanup_data)(ndev);
  1082 +
  1083 + dev = fep->dev;
  1084 + if (dev != NULL) {
  1085 + dev_set_drvdata(dev, NULL);
  1086 + fep->dev = NULL;
  1087 + }
  1088 +
  1089 + free_netdev(ndev);
  1090 +
  1091 + return 0;
  1092 +}
  1093 +
  1094 +/**************************************************************************************/
  1095 +
  1096 +/* handy pointer to the immap */
  1097 +void *fs_enet_immap = NULL;
  1098 +
  1099 +static int setup_immap(void)
  1100 +{
  1101 + phys_addr_t paddr = 0;
  1102 + unsigned long size = 0;
  1103 +
  1104 +#ifdef CONFIG_CPM1
  1105 + paddr = IMAP_ADDR;
  1106 + size = 0x10000; /* map 64K */
  1107 +#endif
  1108 +
  1109 +#ifdef CONFIG_CPM2
  1110 + paddr = CPM_MAP_ADDR;
  1111 + size = 0x40000; /* map 256 K */
  1112 +#endif
  1113 + fs_enet_immap = ioremap(paddr, size);
  1114 + if (fs_enet_immap == NULL)
  1115 + return -EBADF; /* XXX ahem; maybe just BUG_ON? */
  1116 +
  1117 + return 0;
  1118 +}
  1119 +
  1120 +static void cleanup_immap(void)
  1121 +{
  1122 + if (fs_enet_immap != NULL) {
  1123 + iounmap(fs_enet_immap);
  1124 + fs_enet_immap = NULL;
  1125 + }
  1126 +}
  1127 +
  1128 +/**************************************************************************************/
  1129 +
  1130 +static int __devinit fs_enet_probe(struct device *dev)
  1131 +{
  1132 + struct net_device *ndev;
  1133 +
  1134 + /* no fixup - no device */
  1135 + if (dev->platform_data == NULL) {
  1136 + printk(KERN_INFO "fs_enet: "
  1137 + "probe called with no platform data; "
  1138 + "remove unused devices\n");
  1139 + return -ENODEV;
  1140 + }
  1141 +
  1142 + ndev = fs_init_instance(dev, dev->platform_data);
  1143 + if (IS_ERR(ndev))
  1144 + return PTR_ERR(ndev);
  1145 + return 0;
  1146 +}
  1147 +
  1148 +static int fs_enet_remove(struct device *dev)
  1149 +{
  1150 + return fs_cleanup_instance(dev_get_drvdata(dev));
  1151 +}
  1152 +
  1153 +static struct device_driver fs_enet_fec_driver = {
  1154 + .name = "fsl-cpm-fec",
  1155 + .bus = &platform_bus_type,
  1156 + .probe = fs_enet_probe,
  1157 + .remove = fs_enet_remove,
  1158 +#ifdef CONFIG_PM
  1159 +/* .suspend = fs_enet_suspend, TODO */
  1160 +/* .resume = fs_enet_resume, TODO */
  1161 +#endif
  1162 +};
  1163 +
  1164 +static struct device_driver fs_enet_scc_driver = {
  1165 + .name = "fsl-cpm-scc",
  1166 + .bus = &platform_bus_type,
  1167 + .probe = fs_enet_probe,
  1168 + .remove = fs_enet_remove,
  1169 +#ifdef CONFIG_PM
  1170 +/* .suspend = fs_enet_suspend, TODO */
  1171 +/* .resume = fs_enet_resume, TODO */
  1172 +#endif
  1173 +};
  1174 +
  1175 +static struct device_driver fs_enet_fcc_driver = {
  1176 + .name = "fsl-cpm-fcc",
  1177 + .bus = &platform_bus_type,
  1178 + .probe = fs_enet_probe,
  1179 + .remove = fs_enet_remove,
  1180 +#ifdef CONFIG_PM
  1181 +/* .suspend = fs_enet_suspend, TODO */
  1182 +/* .resume = fs_enet_resume, TODO */
  1183 +#endif
  1184 +};
  1185 +
  1186 +static int __init fs_init(void)
  1187 +{
  1188 + int r;
  1189 +
  1190 + printk(KERN_INFO
  1191 + "%s", version);
  1192 +
  1193 + r = setup_immap();
  1194 + if (r != 0)
  1195 + return r;
  1196 + r = driver_register(&fs_enet_fec_driver);
  1197 + if (r != 0)
  1198 + goto err;
  1199 +
  1200 + r = driver_register(&fs_enet_fcc_driver);
  1201 + if (r != 0)
  1202 + goto err;
  1203 +
  1204 + r = driver_register(&fs_enet_scc_driver);
  1205 + if (r != 0)
  1206 + goto err;
  1207 +
  1208 + return 0;
  1209 +err:
  1210 + cleanup_immap();
  1211 + return r;
  1212 +
  1213 +}
  1214 +
  1215 +static void __exit fs_cleanup(void)
  1216 +{
  1217 + driver_unregister(&fs_enet_fec_driver);
  1218 + driver_unregister(&fs_enet_fcc_driver);
  1219 + driver_unregister(&fs_enet_scc_driver);
  1220 + cleanup_immap();
  1221 +}
  1222 +
  1223 +/**************************************************************************************/
  1224 +
  1225 +module_init(fs_init);
  1226 +module_exit(fs_cleanup);
drivers/net/fs_enet/fs_enet-mii.c
  1 +/*
  2 + * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
  3 + *
  4 + * Copyright (c) 2003 Intracom S.A.
  5 + * by Pantelis Antoniou <panto@intracom.gr>
  6 + *
  7 + * 2005 (c) MontaVista Software, Inc.
  8 + * Vitaly Bordug <vbordug@ru.mvista.com>
  9 + *
  10 + * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
  11 + * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
  12 + *
  13 + * This file is licensed under the terms of the GNU General Public License
  14 + * version 2. This program is licensed "as is" without any warranty of any
  15 + * kind, whether express or implied.
  16 + */
  17 +
  18 +
  19 +#include <linux/config.h>
  20 +#include <linux/module.h>
  21 +#include <linux/types.h>
  22 +#include <linux/kernel.h>
  23 +#include <linux/sched.h>
  24 +#include <linux/string.h>
  25 +#include <linux/ptrace.h>
  26 +#include <linux/errno.h>
  27 +#include <linux/ioport.h>
  28 +#include <linux/slab.h>
  29 +#include <linux/interrupt.h>
  30 +#include <linux/pci.h>
  31 +#include <linux/init.h>
  32 +#include <linux/delay.h>
  33 +#include <linux/netdevice.h>
  34 +#include <linux/etherdevice.h>
  35 +#include <linux/skbuff.h>
  36 +#include <linux/spinlock.h>
  37 +#include <linux/mii.h>
  38 +#include <linux/ethtool.h>
  39 +#include <linux/bitops.h>
  40 +
  41 +#include <asm/pgtable.h>
  42 +#include <asm/irq.h>
  43 +#include <asm/uaccess.h>
  44 +
  45 +#include "fs_enet.h"
  46 +
  47 +/*************************************************/
  48 +
  49 +/*
  50 + * Generic PHY support.
  51 + * Should work for all PHYs, but link change is detected by polling
  52 + */
  53 +
  54 +static void generic_timer_callback(unsigned long data)
  55 +{
  56 + struct net_device *dev = (struct net_device *)data;
  57 + struct fs_enet_private *fep = netdev_priv(dev);
  58 +
  59 + fep->phy_timer_list.expires = jiffies + HZ / 2;
  60 +
  61 + add_timer(&fep->phy_timer_list);
  62 +
  63 + fs_mii_link_status_change_check(dev, 0);
  64 +}
  65 +
  66 +static void generic_startup(struct net_device *dev)
  67 +{
  68 + struct fs_enet_private *fep = netdev_priv(dev);
  69 +
  70 + fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */
  71 + fep->phy_timer_list.data = (unsigned long)dev;
  72 + fep->phy_timer_list.function = generic_timer_callback;
  73 + add_timer(&fep->phy_timer_list);
  74 +}
  75 +
  76 +static void generic_shutdown(struct net_device *dev)
  77 +{
  78 + struct fs_enet_private *fep = netdev_priv(dev);
  79 +
  80 + del_timer_sync(&fep->phy_timer_list);
  81 +}
  82 +
  83 +/* ------------------------------------------------------------------------- */
  84 +/* The Davicom DM9161 is used on the NETTA board */
  85 +
  86 +/* register definitions */
  87 +
  88 +#define MII_DM9161_ANAR 4 /* Aux. Config Register */
  89 +#define MII_DM9161_ACR 16 /* Aux. Config Register */
  90 +#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */
  91 +#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */
  92 +#define MII_DM9161_INTR 21 /* Interrupt Register */
  93 +#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */
  94 +#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */
  95 +
  96 +static void dm9161_startup(struct net_device *dev)
  97 +{
  98 + struct fs_enet_private *fep = netdev_priv(dev);
  99 +
  100 + fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000);
  101 + /* Start autonegotiation */
  102 + fs_mii_write(dev, fep->mii_if.phy_id, MII_BMCR, 0x1200);
  103 +
  104 + set_current_state(TASK_UNINTERRUPTIBLE);
  105 + schedule_timeout(HZ*8);
  106 +}
  107 +
  108 +static void dm9161_ack_int(struct net_device *dev)
  109 +{
  110 + struct fs_enet_private *fep = netdev_priv(dev);
  111 +
  112 + fs_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR);
  113 +}
  114 +
  115 +static void dm9161_shutdown(struct net_device *dev)
  116 +{
  117 + struct fs_enet_private *fep = netdev_priv(dev);
  118 +
  119 + fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00);
  120 +}
  121 +
  122 +/**********************************************************************************/
  123 +
  124 +static const struct phy_info phy_info[] = {
  125 + {
  126 + .id = 0x00181b88,
  127 + .name = "DM9161",
  128 + .startup = dm9161_startup,
  129 + .ack_int = dm9161_ack_int,
  130 + .shutdown = dm9161_shutdown,
  131 + }, {
  132 + .id = 0,
  133 + .name = "GENERIC",
  134 + .startup = generic_startup,
  135 + .shutdown = generic_shutdown,
  136 + },
  137 +};
  138 +
  139 +/**********************************************************************************/
  140 +
  141 +static int phy_id_detect(struct net_device *dev)
  142 +{
  143 + struct fs_enet_private *fep = netdev_priv(dev);
  144 + const struct fs_platform_info *fpi = fep->fpi;
  145 + struct fs_enet_mii_bus *bus = fep->mii_bus;
  146 + int i, r, start, end, phytype, physubtype;
  147 + const struct phy_info *phy;
  148 + int phy_hwid, phy_id;
  149 +
  150 + phy_hwid = -1;
  151 + fep->phy = NULL;
  152 +
  153 + /* auto-detect? */
  154 + if (fpi->phy_addr == -1) {
  155 + start = 1;
  156 + end = 32;
  157 + } else { /* direct */
  158 + start = fpi->phy_addr;
  159 + end = start + 1;
  160 + }
  161 +
  162 + for (phy_id = start; phy_id < end; phy_id++) {
  163 + /* skip already used phy addresses on this bus */
  164 + if (bus->usage_map & (1 << phy_id))
  165 + continue;
  166 + r = fs_mii_read(dev, phy_id, MII_PHYSID1);
  167 + if (r == -1 || (phytype = (r & 0xffff)) == 0xffff)
  168 + continue;
  169 + r = fs_mii_read(dev, phy_id, MII_PHYSID2);
  170 + if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff)
  171 + continue;
  172 + phy_hwid = (phytype << 16) | physubtype;
  173 + if (phy_hwid != -1)
  174 + break;
  175 + }
  176 +
  177 + if (phy_hwid == -1) {
  178 + printk(KERN_ERR DRV_MODULE_NAME
  179 + ": %s No PHY detected! range=0x%02x-0x%02x\n",
  180 + dev->name, start, end);
  181 + return -1;
  182 + }
  183 +
  184 + for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++)
  185 + if (phy->id == (phy_hwid >> 4) || phy->id == 0)
  186 + break;
  187 +
  188 + if (i >= ARRAY_SIZE(phy_info)) {
  189 + printk(KERN_ERR DRV_MODULE_NAME
  190 + ": %s PHY id 0x%08x is not supported!\n",
  191 + dev->name, phy_hwid);
  192 + return -1;
  193 + }
  194 +
  195 + fep->phy = phy;
  196 +
  197 + /* mark this address as used */
  198 + bus->usage_map |= (1 << phy_id);
  199 +
  200 + printk(KERN_INFO DRV_MODULE_NAME
  201 + ": %s Phy @ 0x%x, type %s (0x%08x)%s\n",
  202 + dev->name, phy_id, fep->phy->name, phy_hwid,
  203 + fpi->phy_addr == -1 ? " (auto-detected)" : "");
  204 +
  205 + return phy_id;
  206 +}
  207 +
  208 +void fs_mii_startup(struct net_device *dev)
  209 +{
  210 + struct fs_enet_private *fep = netdev_priv(dev);
  211 +
  212 + if (fep->phy->startup)
  213 + (*fep->phy->startup) (dev);
  214 +}
  215 +
  216 +void fs_mii_shutdown(struct net_device *dev)
  217 +{
  218 + struct fs_enet_private *fep = netdev_priv(dev);
  219 +
  220 + if (fep->phy->shutdown)
  221 + (*fep->phy->shutdown) (dev);
  222 +}
  223 +
  224 +void fs_mii_ack_int(struct net_device *dev)
  225 +{
  226 + struct fs_enet_private *fep = netdev_priv(dev);
  227 +
  228 + if (fep->phy->ack_int)
  229 + (*fep->phy->ack_int) (dev);
  230 +}
  231 +
  232 +#define MII_LINK 0x0001
  233 +#define MII_HALF 0x0002
  234 +#define MII_FULL 0x0004
  235 +#define MII_BASE4 0x0008
  236 +#define MII_10M 0x0010
  237 +#define MII_100M 0x0020
  238 +#define MII_1G 0x0040
  239 +#define MII_10G 0x0080
  240 +
  241 +/* return full mii info at one gulp, with a usable form */
  242 +static unsigned int mii_full_status(struct mii_if_info *mii)
  243 +{
  244 + unsigned int status;
  245 + int bmsr, adv, lpa, neg;
  246 + struct fs_enet_private* fep = netdev_priv(mii->dev);
  247 +
  248 + /* first, a dummy read, needed to latch some MII phys */
  249 + (void)mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
  250 + bmsr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
  251 +
  252 + /* no link */
  253 + if ((bmsr & BMSR_LSTATUS) == 0)
  254 + return 0;
  255 +
  256 + status = MII_LINK;
  257 +
  258 + /* Lets look what ANEG says if it's supported - otherwize we shall
  259 + take the right values from the platform info*/
  260 + if(!mii->force_media) {
  261 + /* autoneg not completed; don't bother */
  262 + if ((bmsr & BMSR_ANEGCOMPLETE) == 0)
  263 + return 0;
  264 +
  265 + adv = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_ADVERTISE);
  266 + lpa = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_LPA);
  267 +
  268 + neg = lpa & adv;
  269 + } else {
  270 + neg = fep->fpi->bus_info->lpa;
  271 + }
  272 +
  273 + if (neg & LPA_100FULL)
  274 + status |= MII_FULL | MII_100M;
  275 + else if (neg & LPA_100BASE4)
  276 + status |= MII_FULL | MII_BASE4 | MII_100M;
  277 + else if (neg & LPA_100HALF)
  278 + status |= MII_HALF | MII_100M;
  279 + else if (neg & LPA_10FULL)
  280 + status |= MII_FULL | MII_10M;
  281 + else
  282 + status |= MII_HALF | MII_10M;
  283 +
  284 + return status;
  285 +}
  286 +
  287 +void fs_mii_link_status_change_check(struct net_device *dev, int init_media)
  288 +{
  289 + struct fs_enet_private *fep = netdev_priv(dev);
  290 + struct mii_if_info *mii = &fep->mii_if;
  291 + unsigned int mii_status;
  292 + int ok_to_print, link, duplex, speed;
  293 + unsigned long flags;
  294 +
  295 + ok_to_print = netif_msg_link(fep);
  296 +
  297 + mii_status = mii_full_status(mii);
  298 +
  299 + if (!init_media && mii_status == fep->last_mii_status)
  300 + return;
  301 +
  302 + fep->last_mii_status = mii_status;
  303 +
  304 + link = !!(mii_status & MII_LINK);
  305 + duplex = !!(mii_status & MII_FULL);
  306 + speed = (mii_status & MII_100M) ? 100 : 10;
  307 +
  308 + if (link == 0) {
  309 + netif_carrier_off(mii->dev);
  310 + netif_stop_queue(dev);
  311 + if (!init_media) {
  312 + spin_lock_irqsave(&fep->lock, flags);
  313 + (*fep->ops->stop)(dev);
  314 + spin_unlock_irqrestore(&fep->lock, flags);
  315 + }
  316 +
  317 + if (ok_to_print)
  318 + printk(KERN_INFO "%s: link down\n", mii->dev->name);
  319 +
  320 + } else {
  321 +
  322 + mii->full_duplex = duplex;
  323 +
  324 + netif_carrier_on(mii->dev);
  325 +
  326 + spin_lock_irqsave(&fep->lock, flags);
  327 + fep->duplex = duplex;
  328 + fep->speed = speed;
  329 + (*fep->ops->restart)(dev);
  330 + spin_unlock_irqrestore(&fep->lock, flags);
  331 +
  332 + netif_start_queue(dev);
  333 +
  334 + if (ok_to_print)
  335 + printk(KERN_INFO "%s: link up, %dMbps, %s-duplex\n",
  336 + dev->name, speed, duplex ? "full" : "half");
  337 + }
  338 +}
  339 +
  340 +/**********************************************************************************/
  341 +
  342 +int fs_mii_read(struct net_device *dev, int phy_id, int location)
  343 +{
  344 + struct fs_enet_private *fep = netdev_priv(dev);
  345 + struct fs_enet_mii_bus *bus = fep->mii_bus;
  346 +
  347 + unsigned long flags;
  348 + int ret;
  349 +
  350 + spin_lock_irqsave(&bus->mii_lock, flags);
  351 + ret = (*bus->mii_read)(bus, phy_id, location);
  352 + spin_unlock_irqrestore(&bus->mii_lock, flags);
  353 +
  354 + return ret;
  355 +}
  356 +
  357 +void fs_mii_write(struct net_device *dev, int phy_id, int location, int value)
  358 +{
  359 + struct fs_enet_private *fep = netdev_priv(dev);
  360 + struct fs_enet_mii_bus *bus = fep->mii_bus;
  361 + unsigned long flags;
  362 +
  363 + spin_lock_irqsave(&bus->mii_lock, flags);
  364 + (*bus->mii_write)(bus, phy_id, location, value);
  365 + spin_unlock_irqrestore(&bus->mii_lock, flags);
  366 +}
  367 +
  368 +/*****************************************************************************/
  369 +
  370 +/* list of all registered mii buses */
  371 +static LIST_HEAD(fs_mii_bus_list);
  372 +
  373 +static struct fs_enet_mii_bus *lookup_bus(int method, int id)
  374 +{
  375 + struct list_head *ptr;
  376 + struct fs_enet_mii_bus *bus;
  377 +
  378 + list_for_each(ptr, &fs_mii_bus_list) {
  379 + bus = list_entry(ptr, struct fs_enet_mii_bus, list);
  380 + if (bus->bus_info->method == method &&
  381 + bus->bus_info->id == id)
  382 + return bus;
  383 + }
  384 + return NULL;
  385 +}
  386 +
  387 +static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi)
  388 +{
  389 + struct fs_enet_mii_bus *bus;
  390 + int ret = 0;
  391 +
  392 + bus = kmalloc(sizeof(*bus), GFP_KERNEL);
  393 + if (bus == NULL) {
  394 + ret = -ENOMEM;
  395 + goto err;
  396 + }
  397 + memset(bus, 0, sizeof(*bus));
  398 + spin_lock_init(&bus->mii_lock);
  399 + bus->bus_info = bi;
  400 + bus->refs = 0;
  401 + bus->usage_map = 0;
  402 +
  403 + /* perform initialization */
  404 + switch (bi->method) {
  405 +
  406 + case fsmii_fixed:
  407 + ret = fs_mii_fixed_init(bus);
  408 + if (ret != 0)
  409 + goto err;
  410 + break;
  411 +
  412 + case fsmii_bitbang:
  413 + ret = fs_mii_bitbang_init(bus);
  414 + if (ret != 0)
  415 + goto err;
  416 + break;
  417 +#ifdef CONFIG_FS_ENET_HAS_FEC
  418 + case fsmii_fec:
  419 + ret = fs_mii_fec_init(bus);
  420 + if (ret != 0)
  421 + goto err;
  422 + break;
  423 +#endif
  424 + default:
  425 + ret = -EINVAL;
  426 + goto err;
  427 + }
  428 +
  429 + list_add(&bus->list, &fs_mii_bus_list);
  430 +
  431 + return bus;
  432 +
  433 +err:
  434 + if (bus)
  435 + kfree(bus);
  436 + return ERR_PTR(ret);
  437 +}
  438 +
  439 +static void destroy_bus(struct fs_enet_mii_bus *bus)
  440 +{
  441 + /* remove from bus list */
  442 + list_del(&bus->list);
  443 +
  444 + /* nothing more needed */
  445 + kfree(bus);
  446 +}
  447 +
  448 +int fs_mii_connect(struct net_device *dev)
  449 +{
  450 + struct fs_enet_private *fep = netdev_priv(dev);
  451 + const struct fs_platform_info *fpi = fep->fpi;
  452 + struct fs_enet_mii_bus *bus = NULL;
  453 +
  454 + /* check method validity */
  455 + switch (fpi->bus_info->method) {
  456 + case fsmii_fixed:
  457 + case fsmii_bitbang:
  458 + break;
  459 +#ifdef CONFIG_FS_ENET_HAS_FEC
  460 + case fsmii_fec:
  461 + break;
  462 +#endif
  463 + default:
  464 + printk(KERN_ERR DRV_MODULE_NAME
  465 + ": %s Unknown MII bus method (%d)!\n",
  466 + dev->name, fpi->bus_info->method);
  467 + return -EINVAL;
  468 + }
  469 +
  470 + bus = lookup_bus(fpi->bus_info->method, fpi->bus_info->id);
  471 +
  472 + /* if not found create new bus */
  473 + if (bus == NULL) {
  474 + bus = create_bus(fpi->bus_info);
  475 + if (IS_ERR(bus)) {
  476 + printk(KERN_ERR DRV_MODULE_NAME
  477 + ": %s MII bus creation failure!\n", dev->name);
  478 + return PTR_ERR(bus);
  479 + }
  480 + }
  481 +
  482 + bus->refs++;
  483 +
  484 + fep->mii_bus = bus;
  485 +
  486 + fep->mii_if.dev = dev;
  487 + fep->mii_if.phy_id_mask = 0x1f;
  488 + fep->mii_if.reg_num_mask = 0x1f;
  489 + fep->mii_if.mdio_read = fs_mii_read;
  490 + fep->mii_if.mdio_write = fs_mii_write;
  491 + fep->mii_if.force_media = fpi->bus_info->disable_aneg;
  492 + fep->mii_if.phy_id = phy_id_detect(dev);
  493 +
  494 + return 0;
  495 +}
  496 +
  497 +void fs_mii_disconnect(struct net_device *dev)
  498 +{
  499 + struct fs_enet_private *fep = netdev_priv(dev);
  500 + struct fs_enet_mii_bus *bus = NULL;
  501 +
  502 + bus = fep->mii_bus;
  503 + fep->mii_bus = NULL;
  504 +
  505 + if (--bus->refs <= 0)
  506 + destroy_bus(bus);
  507 +}
drivers/net/fs_enet/fs_enet.h
  1 +#ifndef FS_ENET_H
  2 +#define FS_ENET_H
  3 +
  4 +#include <linux/mii.h>
  5 +#include <linux/netdevice.h>
  6 +#include <linux/types.h>
  7 +#include <linux/version.h>
  8 +#include <linux/list.h>
  9 +
  10 +#include <linux/fs_enet_pd.h>
  11 +
  12 +#include <asm/dma-mapping.h>
  13 +
  14 +#ifdef CONFIG_CPM1
  15 +#include <asm/commproc.h>
  16 +#endif
  17 +
  18 +#ifdef CONFIG_CPM2
  19 +#include <asm/cpm2.h>
  20 +#endif
  21 +
  22 +/* hw driver ops */
  23 +struct fs_ops {
  24 + int (*setup_data)(struct net_device *dev);
  25 + int (*allocate_bd)(struct net_device *dev);
  26 + void (*free_bd)(struct net_device *dev);
  27 + void (*cleanup_data)(struct net_device *dev);
  28 + void (*set_multicast_list)(struct net_device *dev);
  29 + void (*restart)(struct net_device *dev);
  30 + void (*stop)(struct net_device *dev);
  31 + void (*pre_request_irq)(struct net_device *dev, int irq);
  32 + void (*post_free_irq)(struct net_device *dev, int irq);
  33 + void (*napi_clear_rx_event)(struct net_device *dev);
  34 + void (*napi_enable_rx)(struct net_device *dev);
  35 + void (*napi_disable_rx)(struct net_device *dev);
  36 + void (*rx_bd_done)(struct net_device *dev);
  37 + void (*tx_kickstart)(struct net_device *dev);
  38 + u32 (*get_int_events)(struct net_device *dev);
  39 + void (*clear_int_events)(struct net_device *dev, u32 int_events);
  40 + void (*ev_error)(struct net_device *dev, u32 int_events);
  41 + int (*get_regs)(struct net_device *dev, void *p, int *sizep);
  42 + int (*get_regs_len)(struct net_device *dev);
  43 + void (*tx_restart)(struct net_device *dev);
  44 +};
  45 +
  46 +struct phy_info {
  47 + unsigned int id;
  48 + const char *name;
  49 + void (*startup) (struct net_device * dev);
  50 + void (*shutdown) (struct net_device * dev);
  51 + void (*ack_int) (struct net_device * dev);
  52 +};
  53 +
  54 +/* The FEC stores dest/src/type, data, and checksum for receive packets.
  55 + */
  56 +#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */
  57 +#define MIN_MTU 46 /* this is data size */
  58 +#define CRC_LEN 4
  59 +
  60 +#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN)
  61 +#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN)
  62 +
  63 +/* Must be a multiple of 32 (to cover both FEC & FCC) */
  64 +#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31)
  65 +/* This is needed so that invalidate_xxx wont invalidate too much */
  66 +#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE)
  67 +
  68 +struct fs_enet_mii_bus {
  69 + struct list_head list;
  70 + spinlock_t mii_lock;
  71 + const struct fs_mii_bus_info *bus_info;
  72 + int refs;
  73 + u32 usage_map;
  74 +
  75 + int (*mii_read)(struct fs_enet_mii_bus *bus,
  76 + int phy_id, int location);
  77 +
  78 + void (*mii_write)(struct fs_enet_mii_bus *bus,
  79 + int phy_id, int location, int value);
  80 +
  81 + union {
  82 + struct {
  83 + unsigned int mii_speed;
  84 + void *fecp;
  85 + } fec;
  86 +
  87 + struct {
  88 + /* note that the actual port size may */
  89 + /* be different; cpm(s) handle it OK */
  90 + u8 mdio_msk;
  91 + u8 *mdio_dir;
  92 + u8 *mdio_dat;
  93 + u8 mdc_msk;
  94 + u8 *mdc_dir;
  95 + u8 *mdc_dat;
  96 + } bitbang;
  97 +
  98 + struct {
  99 + u16 lpa;
  100 + } fixed;
  101 + };
  102 +};
  103 +
  104 +int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus);
  105 +int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
  106 +int fs_mii_fec_init(struct fs_enet_mii_bus *bus);
  107 +
  108 +struct fs_enet_private {
  109 + struct device *dev; /* pointer back to the device (must be initialized first) */
  110 + spinlock_t lock; /* during all ops except TX pckt processing */
  111 + spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
  112 + const struct fs_platform_info *fpi;
  113 + const struct fs_ops *ops;
  114 + int rx_ring, tx_ring;
  115 + dma_addr_t ring_mem_addr;
  116 + void *ring_base;
  117 + struct sk_buff **rx_skbuff;
  118 + struct sk_buff **tx_skbuff;
  119 + cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
  120 + cbd_t *tx_bd_base;
  121 + cbd_t *dirty_tx; /* ring entries to be free()ed. */
  122 + cbd_t *cur_rx;
  123 + cbd_t *cur_tx;
  124 + int tx_free;
  125 + struct net_device_stats stats;
  126 + struct timer_list phy_timer_list;
  127 + const struct phy_info *phy;
  128 + u32 msg_enable;
  129 + struct mii_if_info mii_if;
  130 + unsigned int last_mii_status;
  131 + struct fs_enet_mii_bus *mii_bus;
  132 + int interrupt;
  133 +
  134 + int duplex, speed; /* current settings */
  135 +
  136 + /* event masks */
  137 + u32 ev_napi_rx; /* mask of NAPI rx events */
  138 + u32 ev_rx; /* rx event mask */
  139 + u32 ev_tx; /* tx event mask */
  140 + u32 ev_err; /* error event mask */
  141 +
  142 + u16 bd_rx_empty; /* mask of BD rx empty */
  143 + u16 bd_rx_err; /* mask of BD rx errors */
  144 +
  145 + union {
  146 + struct {
  147 + int idx; /* FEC1 = 0, FEC2 = 1 */
  148 + void *fecp; /* hw registers */
  149 + u32 hthi, htlo; /* state for multicast */
  150 + } fec;
  151 +
  152 + struct {
  153 + int idx; /* FCC1-3 = 0-2 */
  154 + void *fccp; /* hw registers */
  155 + void *ep; /* parameter ram */
  156 + void *fcccp; /* hw registers cont. */
  157 + void *mem; /* FCC DPRAM */
  158 + u32 gaddrh, gaddrl; /* group address */
  159 + } fcc;
  160 +
  161 + struct {
  162 + int idx; /* FEC1 = 0, FEC2 = 1 */
  163 + void *sccp; /* hw registers */
  164 + void *ep; /* parameter ram */
  165 + u32 hthi, htlo; /* state for multicast */
  166 + } scc;
  167 +
  168 + };
  169 +};
  170 +
  171 +/***************************************************************************/
  172 +
  173 +int fs_mii_read(struct net_device *dev, int phy_id, int location);
  174 +void fs_mii_write(struct net_device *dev, int phy_id, int location, int value);
  175 +
  176 +void fs_mii_startup(struct net_device *dev);
  177 +void fs_mii_shutdown(struct net_device *dev);
  178 +void fs_mii_ack_int(struct net_device *dev);
  179 +
  180 +void fs_mii_link_status_change_check(struct net_device *dev, int init_media);
  181 +
  182 +void fs_init_bds(struct net_device *dev);
  183 +void fs_cleanup_bds(struct net_device *dev);
  184 +
  185 +/***************************************************************************/
  186 +
  187 +#define DRV_MODULE_NAME "fs_enet"
  188 +#define PFX DRV_MODULE_NAME ": "
  189 +#define DRV_MODULE_VERSION "1.0"
  190 +#define DRV_MODULE_RELDATE "Aug 8, 2005"
  191 +
  192 +/***************************************************************************/
  193 +
  194 +int fs_enet_platform_init(void);
  195 +void fs_enet_platform_cleanup(void);
  196 +
  197 +/***************************************************************************/
  198 +
  199 +/* buffer descriptor access macros */
  200 +
  201 +/* access macros */
  202 +#if defined(CONFIG_CPM1)
  203 +/* for a a CPM1 __raw_xxx's are sufficient */
  204 +#define __cbd_out32(addr, x) __raw_writel(x, addr)
  205 +#define __cbd_out16(addr, x) __raw_writew(x, addr)
  206 +#define __cbd_in32(addr) __raw_readl(addr)
  207 +#define __cbd_in16(addr) __raw_readw(addr)
  208 +#else
  209 +/* for others play it safe */
  210 +#define __cbd_out32(addr, x) out_be32(addr, x)
  211 +#define __cbd_out16(addr, x) out_be16(addr, x)
  212 +#define __cbd_in32(addr) in_be32(addr)
  213 +#define __cbd_in16(addr) in_be16(addr)
  214 +#endif
  215 +
  216 +/* write */
  217 +#define CBDW_SC(_cbd, _sc) __cbd_out16(&(_cbd)->cbd_sc, (_sc))
  218 +#define CBDW_DATLEN(_cbd, _datlen) __cbd_out16(&(_cbd)->cbd_datlen, (_datlen))
  219 +#define CBDW_BUFADDR(_cbd, _bufaddr) __cbd_out32(&(_cbd)->cbd_bufaddr, (_bufaddr))
  220 +
  221 +/* read */
  222 +#define CBDR_SC(_cbd) __cbd_in16(&(_cbd)->cbd_sc)
  223 +#define CBDR_DATLEN(_cbd) __cbd_in16(&(_cbd)->cbd_datlen)
  224 +#define CBDR_BUFADDR(_cbd) __cbd_in32(&(_cbd)->cbd_bufaddr)
  225 +
  226 +/* set bits */
  227 +#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc))
  228 +
  229 +/* clear bits */
  230 +#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc))
  231 +
  232 +/*******************************************************************/
  233 +
  234 +extern const struct fs_ops fs_fec_ops;
  235 +extern const struct fs_ops fs_fcc_ops;
  236 +extern const struct fs_ops fs_scc_ops;
  237 +
  238 +/*******************************************************************/
  239 +
  240 +/* handy pointer to the immap */
  241 +extern void *fs_enet_immap;
  242 +
  243 +/*******************************************************************/
  244 +
  245 +#endif
drivers/net/fs_enet/mac-fcc.c
  1 +/*
  2 + * FCC driver for Motorola MPC82xx (PQ2).
  3 + *
  4 + * Copyright (c) 2003 Intracom S.A.
  5 + * by Pantelis Antoniou <panto@intracom.gr>
  6 + *
  7 + * 2005 (c) MontaVista Software, Inc.
  8 + * Vitaly Bordug <vbordug@ru.mvista.com>
  9 + *
  10 + * This file is licensed under the terms of the GNU General Public License
  11 + * version 2. This program is licensed "as is" without any warranty of any
  12 + * kind, whether express or implied.
  13 + */
  14 +
  15 +#include <linux/config.h>
  16 +#include <linux/module.h>
  17 +#include <linux/kernel.h>
  18 +#include <linux/types.h>
  19 +#include <linux/sched.h>
  20 +#include <linux/string.h>
  21 +#include <linux/ptrace.h>
  22 +#include <linux/errno.h>
  23 +#include <linux/ioport.h>
  24 +#include <linux/slab.h>
  25 +#include <linux/interrupt.h>
  26 +#include <linux/pci.h>
  27 +#include <linux/init.h>
  28 +#include <linux/delay.h>
  29 +#include <linux/netdevice.h>
  30 +#include <linux/etherdevice.h>
  31 +#include <linux/skbuff.h>
  32 +#include <linux/spinlock.h>
  33 +#include <linux/mii.h>
  34 +#include <linux/ethtool.h>
  35 +#include <linux/bitops.h>
  36 +#include <linux/fs.h>
  37 +
  38 +#include <asm/immap_cpm2.h>
  39 +#include <asm/mpc8260.h>
  40 +#include <asm/cpm2.h>
  41 +
  42 +#include <asm/pgtable.h>
  43 +#include <asm/irq.h>
  44 +#include <asm/uaccess.h>
  45 +
  46 +#include "fs_enet.h"
  47 +
  48 +/*************************************************/
  49 +
  50 +/* FCC access macros */
  51 +
  52 +#define __fcc_out32(addr, x) out_be32((unsigned *)addr, x)
  53 +#define __fcc_out16(addr, x) out_be16((unsigned short *)addr, x)
  54 +#define __fcc_out8(addr, x) out_8((unsigned char *)addr, x)
  55 +#define __fcc_in32(addr) in_be32((unsigned *)addr)
  56 +#define __fcc_in16(addr) in_be16((unsigned short *)addr)
  57 +#define __fcc_in8(addr) in_8((unsigned char *)addr)
  58 +
  59 +/* parameter space */
  60 +
  61 +/* write, read, set bits, clear bits */
  62 +#define W32(_p, _m, _v) __fcc_out32(&(_p)->_m, (_v))
  63 +#define R32(_p, _m) __fcc_in32(&(_p)->_m)
  64 +#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
  65 +#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
  66 +
  67 +#define W16(_p, _m, _v) __fcc_out16(&(_p)->_m, (_v))
  68 +#define R16(_p, _m) __fcc_in16(&(_p)->_m)
  69 +#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
  70 +#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
  71 +
  72 +#define W8(_p, _m, _v) __fcc_out8(&(_p)->_m, (_v))
  73 +#define R8(_p, _m) __fcc_in8(&(_p)->_m)
  74 +#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
  75 +#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
  76 +
  77 +/*************************************************/
  78 +
  79 +#define FCC_MAX_MULTICAST_ADDRS 64
  80 +
  81 +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
  82 +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
  83 +#define mk_mii_end 0
  84 +
  85 +#define MAX_CR_CMD_LOOPS 10000
  86 +
  87 +static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 mcn, u32 op)
  88 +{
  89 + const struct fs_platform_info *fpi = fep->fpi;
  90 +
  91 + cpm2_map_t *immap = fs_enet_immap;
  92 + cpm_cpm2_t *cpmp = &immap->im_cpm;
  93 + u32 v;
  94 + int i;
  95 +
  96 + /* Currently I don't know what feature call will look like. But
  97 + I guess there'd be something like do_cpm_cmd() which will require page & sblock */
  98 + v = mk_cr_cmd(fpi->cp_page, fpi->cp_block, mcn, op);
  99 + W32(cpmp, cp_cpcr, v | CPM_CR_FLG);
  100 + for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
  101 + if ((R32(cpmp, cp_cpcr) & CPM_CR_FLG) == 0)
  102 + break;
  103 +
  104 + if (i >= MAX_CR_CMD_LOOPS) {
  105 + printk(KERN_ERR "%s(): Not able to issue CPM command\n",
  106 + __FUNCTION__);
  107 + return 1;
  108 + }
  109 +
  110 + return 0;
  111 +}
  112 +
  113 +static int do_pd_setup(struct fs_enet_private *fep)
  114 +{
  115 + struct platform_device *pdev = to_platform_device(fep->dev);
  116 + struct resource *r;
  117 +
  118 + /* Fill out IRQ field */
  119 + fep->interrupt = platform_get_irq(pdev, 0);
  120 +
  121 + /* Attach the memory for the FCC Parameter RAM */
  122 + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram");
  123 + fep->fcc.ep = (void *)r->start;
  124 +
  125 + if (fep->fcc.ep == NULL)
  126 + return -EINVAL;
  127 +
  128 + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs");
  129 + fep->fcc.fccp = (void *)r->start;
  130 +
  131 + if (fep->fcc.fccp == NULL)
  132 + return -EINVAL;
  133 +
  134 + fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c;
  135 +
  136 + if (fep->fcc.fcccp == NULL)
  137 + return -EINVAL;
  138 +
  139 + return 0;
  140 +}
  141 +
  142 +#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
  143 +#define FCC_RX_EVENT (FCC_ENET_RXF)
  144 +#define FCC_TX_EVENT (FCC_ENET_TXB)
  145 +#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE | FCC_ENET_BSY)
  146 +
  147 +static int setup_data(struct net_device *dev)
  148 +{
  149 + struct fs_enet_private *fep = netdev_priv(dev);
  150 + const struct fs_platform_info *fpi = fep->fpi;
  151 +
  152 + fep->fcc.idx = fs_get_fcc_index(fpi->fs_no);
  153 + if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */
  154 + return -EINVAL;
  155 +
  156 + fep->fcc.mem = (void *)fpi->mem_offset;
  157 +
  158 + if (do_pd_setup(fep) != 0)
  159 + return -EINVAL;
  160 +
  161 + fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
  162 + fep->ev_rx = FCC_RX_EVENT;
  163 + fep->ev_tx = FCC_TX_EVENT;
  164 + fep->ev_err = FCC_ERR_EVENT_MSK;
  165 +
  166 + return 0;
  167 +}
  168 +
  169 +static int allocate_bd(struct net_device *dev)
  170 +{
  171 + struct fs_enet_private *fep = netdev_priv(dev);
  172 + const struct fs_platform_info *fpi = fep->fpi;
  173 +
  174 + fep->ring_base = dma_alloc_coherent(fep->dev,
  175 + (fpi->tx_ring + fpi->rx_ring) *
  176 + sizeof(cbd_t), &fep->ring_mem_addr,
  177 + GFP_KERNEL);
  178 + if (fep->ring_base == NULL)
  179 + return -ENOMEM;
  180 +
  181 + return 0;
  182 +}
  183 +
  184 +static void free_bd(struct net_device *dev)
  185 +{
  186 + struct fs_enet_private *fep = netdev_priv(dev);
  187 + const struct fs_platform_info *fpi = fep->fpi;
  188 +
  189 + if (fep->ring_base)
  190 + dma_free_coherent(fep->dev,
  191 + (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
  192 + fep->ring_base, fep->ring_mem_addr);
  193 +}
  194 +
  195 +static void cleanup_data(struct net_device *dev)
  196 +{
  197 + /* nothing */
  198 +}
  199 +
  200 +static void set_promiscuous_mode(struct net_device *dev)
  201 +{
  202 + struct fs_enet_private *fep = netdev_priv(dev);
  203 + fcc_t *fccp = fep->fcc.fccp;
  204 +
  205 + S32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
  206 +}
  207 +
  208 +static void set_multicast_start(struct net_device *dev)
  209 +{
  210 + struct fs_enet_private *fep = netdev_priv(dev);
  211 + fcc_enet_t *ep = fep->fcc.ep;
  212 +
  213 + W32(ep, fen_gaddrh, 0);
  214 + W32(ep, fen_gaddrl, 0);
  215 +}
  216 +
  217 +static void set_multicast_one(struct net_device *dev, const u8 *mac)
  218 +{
  219 + struct fs_enet_private *fep = netdev_priv(dev);
  220 + fcc_enet_t *ep = fep->fcc.ep;
  221 + u16 taddrh, taddrm, taddrl;
  222 +
  223 + taddrh = ((u16)mac[5] << 8) | mac[4];
  224 + taddrm = ((u16)mac[3] << 8) | mac[2];
  225 + taddrl = ((u16)mac[1] << 8) | mac[0];
  226 +
  227 + W16(ep, fen_taddrh, taddrh);
  228 + W16(ep, fen_taddrm, taddrm);
  229 + W16(ep, fen_taddrl, taddrl);
  230 + fcc_cr_cmd(fep, 0x0C, CPM_CR_SET_GADDR);
  231 +}
  232 +
  233 +static void set_multicast_finish(struct net_device *dev)
  234 +{
  235 + struct fs_enet_private *fep = netdev_priv(dev);
  236 + fcc_t *fccp = fep->fcc.fccp;
  237 + fcc_enet_t *ep = fep->fcc.ep;
  238 +
  239 + /* clear promiscuous always */
  240 + C32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
  241 +
  242 + /* if all multi or too many multicasts; just enable all */
  243 + if ((dev->flags & IFF_ALLMULTI) != 0 ||
  244 + dev->mc_count > FCC_MAX_MULTICAST_ADDRS) {
  245 +
  246 + W32(ep, fen_gaddrh, 0xffffffff);
  247 + W32(ep, fen_gaddrl, 0xffffffff);
  248 + }
  249 +
  250 + /* read back */
  251 + fep->fcc.gaddrh = R32(ep, fen_gaddrh);
  252 + fep->fcc.gaddrl = R32(ep, fen_gaddrl);
  253 +}
  254 +
  255 +static void set_multicast_list(struct net_device *dev)
  256 +{
  257 + struct dev_mc_list *pmc;
  258 +
  259 + if ((dev->flags & IFF_PROMISC) == 0) {
  260 + set_multicast_start(dev);
  261 + for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
  262 + set_multicast_one(dev, pmc->dmi_addr);
  263 + set_multicast_finish(dev);
  264 + } else
  265 + set_promiscuous_mode(dev);
  266 +}
  267 +
  268 +static void restart(struct net_device *dev)
  269 +{
  270 + struct fs_enet_private *fep = netdev_priv(dev);
  271 + const struct fs_platform_info *fpi = fep->fpi;
  272 + fcc_t *fccp = fep->fcc.fccp;
  273 + fcc_c_t *fcccp = fep->fcc.fcccp;
  274 + fcc_enet_t *ep = fep->fcc.ep;
  275 + dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
  276 + u16 paddrh, paddrm, paddrl;
  277 + u16 mem_addr;
  278 + const unsigned char *mac;
  279 + int i;
  280 +
  281 + C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
  282 +
  283 + /* clear everything (slow & steady does it) */
  284 + for (i = 0; i < sizeof(*ep); i++)
  285 + __fcc_out8((char *)ep + i, 0);
  286 +
  287 + /* get physical address */
  288 + rx_bd_base_phys = fep->ring_mem_addr;
  289 + tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
  290 +
  291 + /* point to bds */
  292 + W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys);
  293 + W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys);
  294 +
  295 + /* Set maximum bytes per receive buffer.
  296 + * It must be a multiple of 32.
  297 + */
  298 + W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE);
  299 +
  300 + W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
  301 + W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
  302 +
  303 + /* Allocate space in the reserved FCC area of DPRAM for the
  304 + * internal buffers. No one uses this space (yet), so we
  305 + * can do this. Later, we will add resource management for
  306 + * this area.
  307 + */
  308 +
  309 + mem_addr = (u32) fep->fcc.mem; /* de-fixup dpram offset */
  310 +
  311 + W16(ep, fen_genfcc.fcc_riptr, (mem_addr & 0xffff));
  312 + W16(ep, fen_genfcc.fcc_tiptr, ((mem_addr + 32) & 0xffff));
  313 + W16(ep, fen_padptr, mem_addr + 64);
  314 +
  315 + /* fill with special symbol... */
  316 + memset(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32);
  317 +
  318 + W32(ep, fen_genfcc.fcc_rbptr, 0);
  319 + W32(ep, fen_genfcc.fcc_tbptr, 0);
  320 + W32(ep, fen_genfcc.fcc_rcrc, 0);
  321 + W32(ep, fen_genfcc.fcc_tcrc, 0);
  322 + W16(ep, fen_genfcc.fcc_res1, 0);
  323 + W32(ep, fen_genfcc.fcc_res2, 0);
  324 +
  325 + /* no CAM */
  326 + W32(ep, fen_camptr, 0);
  327 +
  328 + /* Set CRC preset and mask */
  329 + W32(ep, fen_cmask, 0xdebb20e3);
  330 + W32(ep, fen_cpres, 0xffffffff);
  331 +
  332 + W32(ep, fen_crcec, 0); /* CRC Error counter */
  333 + W32(ep, fen_alec, 0); /* alignment error counter */
  334 + W32(ep, fen_disfc, 0); /* discard frame counter */
  335 + W16(ep, fen_retlim, 15); /* Retry limit threshold */
  336 + W16(ep, fen_pper, 0); /* Normal persistence */
  337 +
  338 + /* set group address */
  339 + W32(ep, fen_gaddrh, fep->fcc.gaddrh);
  340 + W32(ep, fen_gaddrl, fep->fcc.gaddrh);
  341 +
  342 + /* Clear hash filter tables */
  343 + W32(ep, fen_iaddrh, 0);
  344 + W32(ep, fen_iaddrl, 0);
  345 +
  346 + /* Clear the Out-of-sequence TxBD */
  347 + W16(ep, fen_tfcstat, 0);
  348 + W16(ep, fen_tfclen, 0);
  349 + W32(ep, fen_tfcptr, 0);
  350 +
  351 + W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */
  352 + W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
  353 +
  354 + /* set address */
  355 + mac = dev->dev_addr;
  356 + paddrh = ((u16)mac[5] << 8) | mac[4];
  357 + paddrm = ((u16)mac[3] << 8) | mac[2];
  358 + paddrl = ((u16)mac[1] << 8) | mac[0];
  359 +
  360 + W16(ep, fen_paddrh, paddrh);
  361 + W16(ep, fen_paddrm, paddrm);
  362 + W16(ep, fen_paddrl, paddrl);
  363 +
  364 + W16(ep, fen_taddrh, 0);
  365 + W16(ep, fen_taddrm, 0);
  366 + W16(ep, fen_taddrl, 0);
  367 +
  368 + W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */
  369 + W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */
  370 +
  371 + /* Clear stat counters, in case we ever enable RMON */
  372 + W32(ep, fen_octc, 0);
  373 + W32(ep, fen_colc, 0);
  374 + W32(ep, fen_broc, 0);
  375 + W32(ep, fen_mulc, 0);
  376 + W32(ep, fen_uspc, 0);
  377 + W32(ep, fen_frgc, 0);
  378 + W32(ep, fen_ospc, 0);
  379 + W32(ep, fen_jbrc, 0);
  380 + W32(ep, fen_p64c, 0);
  381 + W32(ep, fen_p65c, 0);
  382 + W32(ep, fen_p128c, 0);
  383 + W32(ep, fen_p256c, 0);
  384 + W32(ep, fen_p512c, 0);
  385 + W32(ep, fen_p1024c, 0);
  386 +
  387 + W16(ep, fen_rfthr, 0); /* Suggested by manual */
  388 + W16(ep, fen_rfcnt, 0);
  389 + W16(ep, fen_cftype, 0);
  390 +
  391 + fs_init_bds(dev);
  392 +
  393 + /* adjust to speed (for RMII mode) */
  394 + if (fpi->use_rmii) {
  395 + if (fep->speed == 100)
  396 + C8(fcccp, fcc_gfemr, 0x20);
  397 + else
  398 + S8(fcccp, fcc_gfemr, 0x20);
  399 + }
  400 +
  401 + fcc_cr_cmd(fep, 0x0c, CPM_CR_INIT_TRX);
  402 +
  403 + /* clear events */
  404 + W16(fccp, fcc_fcce, 0xffff);
  405 +
  406 + /* Enable interrupts we wish to service */
  407 + W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);
  408 +
  409 + /* Set GFMR to enable Ethernet operating mode */
  410 + W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);
  411 +
  412 + /* set sync/delimiters */
  413 + W16(fccp, fcc_fdsr, 0xd555);
  414 +
  415 + W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC);
  416 +
  417 + if (fpi->use_rmii)
  418 + S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
  419 +
  420 + /* adjust to duplex mode */
  421 + if (fep->duplex)
  422 + S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
  423 + else
  424 + C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
  425 +
  426 + S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
  427 +}
  428 +
  429 +static void stop(struct net_device *dev)
  430 +{
  431 + struct fs_enet_private *fep = netdev_priv(dev);
  432 + fcc_t *fccp = fep->fcc.fccp;
  433 +
  434 + /* stop ethernet */
  435 + C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
  436 +
  437 + /* clear events */
  438 + W16(fccp, fcc_fcce, 0xffff);
  439 +
  440 + /* clear interrupt mask */
  441 + W16(fccp, fcc_fccm, 0);
  442 +
  443 + fs_cleanup_bds(dev);
  444 +}
  445 +
  446 +static void pre_request_irq(struct net_device *dev, int irq)
  447 +{
  448 + /* nothing */
  449 +}
  450 +
  451 +static void post_free_irq(struct net_device *dev, int irq)
  452 +{
  453 + /* nothing */
  454 +}
  455 +
  456 +static void napi_clear_rx_event(struct net_device *dev)
  457 +{
  458 + struct fs_enet_private *fep = netdev_priv(dev);
  459 + fcc_t *fccp = fep->fcc.fccp;
  460 +
  461 + W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
  462 +}
  463 +
  464 +static void napi_enable_rx(struct net_device *dev)
  465 +{
  466 + struct fs_enet_private *fep = netdev_priv(dev);
  467 + fcc_t *fccp = fep->fcc.fccp;
  468 +
  469 + S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
  470 +}
  471 +
  472 +static void napi_disable_rx(struct net_device *dev)
  473 +{
  474 + struct fs_enet_private *fep = netdev_priv(dev);
  475 + fcc_t *fccp = fep->fcc.fccp;
  476 +
  477 + C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
  478 +}
  479 +
  480 +static void rx_bd_done(struct net_device *dev)
  481 +{
  482 + /* nothing */
  483 +}
  484 +
  485 +static void tx_kickstart(struct net_device *dev)
  486 +{
  487 + /* nothing */
  488 +}
  489 +
  490 +static u32 get_int_events(struct net_device *dev)
  491 +{
  492 + struct fs_enet_private *fep = netdev_priv(dev);
  493 + fcc_t *fccp = fep->fcc.fccp;
  494 +
  495 + return (u32)R16(fccp, fcc_fcce);
  496 +}
  497 +
  498 +static void clear_int_events(struct net_device *dev, u32 int_events)
  499 +{
  500 + struct fs_enet_private *fep = netdev_priv(dev);
  501 + fcc_t *fccp = fep->fcc.fccp;
  502 +
  503 + W16(fccp, fcc_fcce, int_events & 0xffff);
  504 +}
  505 +
  506 +static void ev_error(struct net_device *dev, u32 int_events)
  507 +{
  508 + printk(KERN_WARNING DRV_MODULE_NAME
  509 + ": %s FS_ENET ERROR(s) 0x%x\n", dev->name, int_events);
  510 +}
  511 +
  512 +int get_regs(struct net_device *dev, void *p, int *sizep)
  513 +{
  514 + struct fs_enet_private *fep = netdev_priv(dev);
  515 +
  516 + if (*sizep < sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t))
  517 + return -EINVAL;
  518 +
  519 + memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t));
  520 + p = (char *)p + sizeof(fcc_t);
  521 +
  522 + memcpy_fromio(p, fep->fcc.fcccp, sizeof(fcc_c_t));
  523 + p = (char *)p + sizeof(fcc_c_t);
  524 +
  525 + memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t));
  526 +
  527 + return 0;
  528 +}
  529 +
  530 +int get_regs_len(struct net_device *dev)
  531 +{
  532 + return sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t);
  533 +}
  534 +
  535 +/* Some transmit errors cause the transmitter to shut
  536 + * down. We now issue a restart transmit. Since the
  537 + * errors close the BD and update the pointers, the restart
  538 + * _should_ pick up without having to reset any of our
  539 + * pointers either. Also, To workaround 8260 device erratum
  540 + * CPM37, we must disable and then re-enable the transmitter
  541 + * following a Late Collision, Underrun, or Retry Limit error.
  542 + */
  543 +void tx_restart(struct net_device *dev)
  544 +{
  545 + struct fs_enet_private *fep = netdev_priv(dev);
  546 + fcc_t *fccp = fep->fcc.fccp;
  547 +
  548 + C32(fccp, fcc_gfmr, FCC_GFMR_ENT);
  549 + udelay(10);
  550 + S32(fccp, fcc_gfmr, FCC_GFMR_ENT);
  551 +
  552 + fcc_cr_cmd(fep, 0x0C, CPM_CR_RESTART_TX);
  553 +}
  554 +
  555 +/*************************************************************************/
  556 +
  557 +const struct fs_ops fs_fcc_ops = {
  558 + .setup_data = setup_data,
  559 + .cleanup_data = cleanup_data,
  560 + .set_multicast_list = set_multicast_list,
  561 + .restart = restart,
  562 + .stop = stop,
  563 + .pre_request_irq = pre_request_irq,
  564 + .post_free_irq = post_free_irq,
  565 + .napi_clear_rx_event = napi_clear_rx_event,
  566 + .napi_enable_rx = napi_enable_rx,
  567 + .napi_disable_rx = napi_disable_rx,
  568 + .rx_bd_done = rx_bd_done,
  569 + .tx_kickstart = tx_kickstart,
  570 + .get_int_events = get_int_events,
  571 + .clear_int_events = clear_int_events,
  572 + .ev_error = ev_error,
  573 + .get_regs = get_regs,
  574 + .get_regs_len = get_regs_len,
  575 + .tx_restart = tx_restart,
  576 + .allocate_bd = allocate_bd,
  577 + .free_bd = free_bd,
  578 +};
drivers/net/fs_enet/mac-fec.c
  1 +/*
  2 + * Freescale Ethernet controllers
  3 + *
  4 + * Copyright (c) 2005 Intracom S.A.
  5 + * by Pantelis Antoniou <panto@intracom.gr>
  6 + *
  7 + * 2005 (c) MontaVista Software, Inc.
  8 + * Vitaly Bordug <vbordug@ru.mvista.com>
  9 + *
  10 + * This file is licensed under the terms of the GNU General Public License
  11 + * version 2. This program is licensed "as is" without any warranty of any
  12 + * kind, whether express or implied.
  13 + */
  14 +
  15 +#include <linux/config.h>
  16 +#include <linux/module.h>
  17 +#include <linux/kernel.h>
  18 +#include <linux/types.h>
  19 +#include <linux/sched.h>
  20 +#include <linux/string.h>
  21 +#include <linux/ptrace.h>
  22 +#include <linux/errno.h>
  23 +#include <linux/ioport.h>
  24 +#include <linux/slab.h>
  25 +#include <linux/interrupt.h>
  26 +#include <linux/pci.h>
  27 +#include <linux/init.h>
  28 +#include <linux/delay.h>
  29 +#include <linux/netdevice.h>
  30 +#include <linux/etherdevice.h>
  31 +#include <linux/skbuff.h>
  32 +#include <linux/spinlock.h>
  33 +#include <linux/mii.h>
  34 +#include <linux/ethtool.h>
  35 +#include <linux/bitops.h>
  36 +#include <linux/fs.h>
  37 +
  38 +#include <asm/irq.h>
  39 +#include <asm/uaccess.h>
  40 +
  41 +#ifdef CONFIG_8xx
  42 +#include <asm/8xx_immap.h>
  43 +#include <asm/pgtable.h>
  44 +#include <asm/mpc8xx.h>
  45 +#include <asm/commproc.h>
  46 +#endif
  47 +
  48 +#include "fs_enet.h"
  49 +
  50 +/*************************************************/
  51 +
  52 +#if defined(CONFIG_CPM1)
  53 +/* for a CPM1 __raw_xxx's are sufficient */
  54 +#define __fs_out32(addr, x) __raw_writel(x, addr)
  55 +#define __fs_out16(addr, x) __raw_writew(x, addr)
  56 +#define __fs_in32(addr) __raw_readl(addr)
  57 +#define __fs_in16(addr) __raw_readw(addr)
  58 +#else
  59 +/* for others play it safe */
  60 +#define __fs_out32(addr, x) out_be32(addr, x)
  61 +#define __fs_out16(addr, x) out_be16(addr, x)
  62 +#define __fs_in32(addr) in_be32(addr)
  63 +#define __fs_in16(addr) in_be16(addr)
  64 +#endif
  65 +
  66 +/* write */
  67 +#define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v))
  68 +
  69 +/* read */
  70 +#define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg)
  71 +
  72 +/* set bits */
  73 +#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
  74 +
  75 +/* clear bits */
  76 +#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
  77 +
  78 +
  79 +/* CRC polynomium used by the FEC for the multicast group filtering */
  80 +#define FEC_CRC_POLY 0x04C11DB7
  81 +
  82 +#define FEC_MAX_MULTICAST_ADDRS 64
  83 +
  84 +/* Interrupt events/masks.
  85 +*/
  86 +#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
  87 +#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
  88 +#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
  89 +#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
  90 +#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
  91 +#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
  92 +#define FEC_ENET_RXF 0x02000000U /* Full frame received */
  93 +#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
  94 +#define FEC_ENET_MII 0x00800000U /* MII interrupt */
  95 +#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
  96 +
  97 +#define FEC_ECNTRL_PINMUX 0x00000004
  98 +#define FEC_ECNTRL_ETHER_EN 0x00000002
  99 +#define FEC_ECNTRL_RESET 0x00000001
  100 +
  101 +#define FEC_RCNTRL_BC_REJ 0x00000010
  102 +#define FEC_RCNTRL_PROM 0x00000008
  103 +#define FEC_RCNTRL_MII_MODE 0x00000004
  104 +#define FEC_RCNTRL_DRT 0x00000002
  105 +#define FEC_RCNTRL_LOOP 0x00000001
  106 +
  107 +#define FEC_TCNTRL_FDEN 0x00000004
  108 +#define FEC_TCNTRL_HBC 0x00000002
  109 +#define FEC_TCNTRL_GTS 0x00000001
  110 +
  111 +
  112 +/* Make MII read/write commands for the FEC.
  113 +*/
  114 +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
  115 +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
  116 +#define mk_mii_end 0
  117 +
  118 +#define FEC_MII_LOOPS 10000
  119 +
  120 +/*
  121 + * Delay to wait for FEC reset command to complete (in us)
  122 + */
  123 +#define FEC_RESET_DELAY 50
  124 +
  125 +static int whack_reset(fec_t * fecp)
  126 +{
  127 + int i;
  128 +
  129 + FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
  130 + for (i = 0; i < FEC_RESET_DELAY; i++) {
  131 + if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0)
  132 + return 0; /* OK */
  133 + udelay(1);
  134 + }
  135 +
  136 + return -1;
  137 +}
  138 +
  139 +static int do_pd_setup(struct fs_enet_private *fep)
  140 +{
  141 + struct platform_device *pdev = to_platform_device(fep->dev);
  142 + struct resource *r;
  143 +
  144 + /* Fill out IRQ field */
  145 + fep->interrupt = platform_get_irq_byname(pdev,"interrupt");
  146 +
  147 + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
  148 + fep->fec.fecp =(void*)r->start;
  149 +
  150 + if(fep->fec.fecp == NULL)
  151 + return -EINVAL;
  152 +
  153 + return 0;
  154 +
  155 +}
  156 +
  157 +#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
  158 +#define FEC_RX_EVENT (FEC_ENET_RXF)
  159 +#define FEC_TX_EVENT (FEC_ENET_TXF)
  160 +#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
  161 + FEC_ENET_BABT | FEC_ENET_EBERR)
  162 +
  163 +static int setup_data(struct net_device *dev)
  164 +{
  165 + struct fs_enet_private *fep = netdev_priv(dev);
  166 +
  167 + if (do_pd_setup(fep) != 0)
  168 + return -EINVAL;
  169 +
  170 + fep->fec.hthi = 0;
  171 + fep->fec.htlo = 0;
  172 +
  173 + fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
  174 + fep->ev_rx = FEC_RX_EVENT;
  175 + fep->ev_tx = FEC_TX_EVENT;
  176 + fep->ev_err = FEC_ERR_EVENT_MSK;
  177 +
  178 + return 0;
  179 +}
  180 +
  181 +static int allocate_bd(struct net_device *dev)
  182 +{
  183 + struct fs_enet_private *fep = netdev_priv(dev);
  184 + const struct fs_platform_info *fpi = fep->fpi;
  185 +
  186 + fep->ring_base = dma_alloc_coherent(fep->dev,
  187 + (fpi->tx_ring + fpi->rx_ring) *
  188 + sizeof(cbd_t), &fep->ring_mem_addr,
  189 + GFP_KERNEL);
  190 + if (fep->ring_base == NULL)
  191 + return -ENOMEM;
  192 +
  193 + return 0;
  194 +}
  195 +
  196 +static void free_bd(struct net_device *dev)
  197 +{
  198 + struct fs_enet_private *fep = netdev_priv(dev);
  199 + const struct fs_platform_info *fpi = fep->fpi;
  200 +
  201 + if(fep->ring_base)
  202 + dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring)
  203 + * sizeof(cbd_t),
  204 + fep->ring_base,
  205 + fep->ring_mem_addr);
  206 +}
  207 +
  208 +static void cleanup_data(struct net_device *dev)
  209 +{
  210 + /* nothing */
  211 +}
  212 +
  213 +static void set_promiscuous_mode(struct net_device *dev)
  214 +{
  215 + struct fs_enet_private *fep = netdev_priv(dev);
  216 + fec_t *fecp = fep->fec.fecp;
  217 +
  218 + FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
  219 +}
  220 +
  221 +static void set_multicast_start(struct net_device *dev)
  222 +{
  223 + struct fs_enet_private *fep = netdev_priv(dev);
  224 +
  225 + fep->fec.hthi = 0;
  226 + fep->fec.htlo = 0;
  227 +}
  228 +
  229 +static void set_multicast_one(struct net_device *dev, const u8 *mac)
  230 +{
  231 + struct fs_enet_private *fep = netdev_priv(dev);
  232 + int temp, hash_index, i, j;
  233 + u32 crc, csrVal;
  234 + u8 byte, msb;
  235 +
  236 + crc = 0xffffffff;
  237 + for (i = 0; i < 6; i++) {
  238 + byte = mac[i];
  239 + for (j = 0; j < 8; j++) {
  240 + msb = crc >> 31;
  241 + crc <<= 1;
  242 + if (msb ^ (byte & 0x1))
  243 + crc ^= FEC_CRC_POLY;
  244 + byte >>= 1;
  245 + }
  246 + }
  247 +
  248 + temp = (crc & 0x3f) >> 1;
  249 + hash_index = ((temp & 0x01) << 4) |
  250 + ((temp & 0x02) << 2) |
  251 + ((temp & 0x04)) |
  252 + ((temp & 0x08) >> 2) |
  253 + ((temp & 0x10) >> 4);
  254 + csrVal = 1 << hash_index;
  255 + if (crc & 1)
  256 + fep->fec.hthi |= csrVal;
  257 + else
  258 + fep->fec.htlo |= csrVal;
  259 +}
  260 +
  261 +static void set_multicast_finish(struct net_device *dev)
  262 +{
  263 + struct fs_enet_private *fep = netdev_priv(dev);
  264 + fec_t *fecp = fep->fec.fecp;
  265 +
  266 + /* if all multi or too many multicasts; just enable all */
  267 + if ((dev->flags & IFF_ALLMULTI) != 0 ||
  268 + dev->mc_count > FEC_MAX_MULTICAST_ADDRS) {
  269 + fep->fec.hthi = 0xffffffffU;
  270 + fep->fec.htlo = 0xffffffffU;
  271 + }
  272 +
  273 + FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
  274 + FW(fecp, hash_table_high, fep->fec.hthi);
  275 + FW(fecp, hash_table_low, fep->fec.htlo);
  276 +}
  277 +
  278 +static void set_multicast_list(struct net_device *dev)
  279 +{
  280 + struct dev_mc_list *pmc;
  281 +
  282 + if ((dev->flags & IFF_PROMISC) == 0) {
  283 + set_multicast_start(dev);
  284 + for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
  285 + set_multicast_one(dev, pmc->dmi_addr);
  286 + set_multicast_finish(dev);
  287 + } else
  288 + set_promiscuous_mode(dev);
  289 +}
  290 +
  291 +static void restart(struct net_device *dev)
  292 +{
  293 +#ifdef CONFIG_DUET
  294 + immap_t *immap = fs_enet_immap;
  295 + u32 cptr;
  296 +#endif
  297 + struct fs_enet_private *fep = netdev_priv(dev);
  298 + fec_t *fecp = fep->fec.fecp;
  299 + const struct fs_platform_info *fpi = fep->fpi;
  300 + dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
  301 + int r;
  302 + u32 addrhi, addrlo;
  303 +
  304 + r = whack_reset(fep->fec.fecp);
  305 + if (r != 0)
  306 + printk(KERN_ERR DRV_MODULE_NAME
  307 + ": %s FEC Reset FAILED!\n", dev->name);
  308 +
  309 + /*
  310 + * Set station address.
  311 + */
  312 + addrhi = ((u32) dev->dev_addr[0] << 24) |
  313 + ((u32) dev->dev_addr[1] << 16) |
  314 + ((u32) dev->dev_addr[2] << 8) |
  315 + (u32) dev->dev_addr[3];
  316 + addrlo = ((u32) dev->dev_addr[4] << 24) |
  317 + ((u32) dev->dev_addr[5] << 16);
  318 + FW(fecp, addr_low, addrhi);
  319 + FW(fecp, addr_high, addrlo);
  320 +
  321 + /*
  322 + * Reset all multicast.
  323 + */
  324 + FW(fecp, hash_table_high, fep->fec.hthi);
  325 + FW(fecp, hash_table_low, fep->fec.htlo);
  326 +
  327 + /*
  328 + * Set maximum receive buffer size.
  329 + */
  330 + FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
  331 + FW(fecp, r_hash, PKT_MAXBUF_SIZE);
  332 +
  333 + /* get physical address */
  334 + rx_bd_base_phys = fep->ring_mem_addr;
  335 + tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
  336 +
  337 + /*
  338 + * Set receive and transmit descriptor base.
  339 + */
  340 + FW(fecp, r_des_start, rx_bd_base_phys);
  341 + FW(fecp, x_des_start, tx_bd_base_phys);
  342 +
  343 + fs_init_bds(dev);
  344 +
  345 + /*
  346 + * Enable big endian and don't care about SDMA FC.
  347 + */
  348 + FW(fecp, fun_code, 0x78000000);
  349 +
  350 + /*
  351 + * Set MII speed.
  352 + */
  353 + FW(fecp, mii_speed, fep->mii_bus->fec.mii_speed);
  354 +
  355 + /*
  356 + * Clear any outstanding interrupt.
  357 + */
  358 + FW(fecp, ievent, 0xffc0);
  359 + FW(fecp, ivec, (fep->interrupt / 2) << 29);
  360 +
  361 +
  362 + /*
  363 + * adjust to speed (only for DUET & RMII)
  364 + */
  365 +#ifdef CONFIG_DUET
  366 + if (fpi->use_rmii) {
  367 + cptr = in_be32(&immap->im_cpm.cp_cptr);
  368 + switch (fs_get_fec_index(fpi->fs_no)) {
  369 + case 0:
  370 + cptr |= 0x100;
  371 + if (fep->speed == 10)
  372 + cptr |= 0x0000010;
  373 + else if (fep->speed == 100)
  374 + cptr &= ~0x0000010;
  375 + break;
  376 + case 1:
  377 + cptr |= 0x80;
  378 + if (fep->speed == 10)
  379 + cptr |= 0x0000008;
  380 + else if (fep->speed == 100)
  381 + cptr &= ~0x0000008;
  382 + break;
  383 + default:
  384 + BUG(); /* should never happen */
  385 + break;
  386 + }
  387 + out_be32(&immap->im_cpm.cp_cptr, cptr);
  388 + }
  389 +#endif
  390 +
  391 + FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
  392 + /*
  393 + * adjust to duplex mode
  394 + */
  395 + if (fep->duplex) {
  396 + FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
  397 + FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
  398 + } else {
  399 + FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
  400 + FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
  401 + }
  402 +
  403 + /*
  404 + * Enable interrupts we wish to service.
  405 + */
  406 + FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
  407 + FEC_ENET_RXF | FEC_ENET_RXB);
  408 +
  409 + /*
  410 + * And last, enable the transmit and receive processing.
  411 + */
  412 + FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
  413 + FW(fecp, r_des_active, 0x01000000);
  414 +}
  415 +
  416 +static void stop(struct net_device *dev)
  417 +{
  418 + struct fs_enet_private *fep = netdev_priv(dev);
  419 + fec_t *fecp = fep->fec.fecp;
  420 + struct fs_enet_mii_bus *bus = fep->mii_bus;
  421 + const struct fs_mii_bus_info *bi = bus->bus_info;
  422 + int i;
  423 +
  424 + if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
  425 + return; /* already down */
  426 +
  427 + FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
  428 + for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
  429 + i < FEC_RESET_DELAY; i++)
  430 + udelay(1);
  431 +
  432 + if (i == FEC_RESET_DELAY)
  433 + printk(KERN_WARNING DRV_MODULE_NAME
  434 + ": %s FEC timeout on graceful transmit stop\n",
  435 + dev->name);
  436 + /*
  437 + * Disable FEC. Let only MII interrupts.
  438 + */
  439 + FW(fecp, imask, 0);
  440 + FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
  441 +
  442 + fs_cleanup_bds(dev);
  443 +
  444 + /* shut down FEC1? that's where the mii bus is */
  445 + if (fep->fec.idx == 0 && bus->refs > 1 && bi->method == fsmii_fec) {
  446 + FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
  447 + FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
  448 + FW(fecp, ievent, FEC_ENET_MII);
  449 + FW(fecp, mii_speed, bus->fec.mii_speed);
  450 + }
  451 +}
  452 +
  453 +static void pre_request_irq(struct net_device *dev, int irq)
  454 +{
  455 + immap_t *immap = fs_enet_immap;
  456 + u32 siel;
  457 +
  458 + /* SIU interrupt */
  459 + if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
  460 +
  461 + siel = in_be32(&immap->im_siu_conf.sc_siel);
  462 + if ((irq & 1) == 0)
  463 + siel |= (0x80000000 >> irq);
  464 + else
  465 + siel &= ~(0x80000000 >> (irq & ~1));
  466 + out_be32(&immap->im_siu_conf.sc_siel, siel);
  467 + }
  468 +}
  469 +
  470 +static void post_free_irq(struct net_device *dev, int irq)
  471 +{
  472 + /* nothing */
  473 +}
  474 +
  475 +static void napi_clear_rx_event(struct net_device *dev)
  476 +{
  477 + struct fs_enet_private *fep = netdev_priv(dev);
  478 + fec_t *fecp = fep->fec.fecp;
  479 +
  480 + FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
  481 +}
  482 +
  483 +static void napi_enable_rx(struct net_device *dev)
  484 +{
  485 + struct fs_enet_private *fep = netdev_priv(dev);
  486 + fec_t *fecp = fep->fec.fecp;
  487 +
  488 + FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
  489 +}
  490 +
  491 +static void napi_disable_rx(struct net_device *dev)
  492 +{
  493 + struct fs_enet_private *fep = netdev_priv(dev);
  494 + fec_t *fecp = fep->fec.fecp;
  495 +
  496 + FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
  497 +}
  498 +
  499 +static void rx_bd_done(struct net_device *dev)
  500 +{
  501 + struct fs_enet_private *fep = netdev_priv(dev);
  502 + fec_t *fecp = fep->fec.fecp;
  503 +
  504 + FW(fecp, r_des_active, 0x01000000);
  505 +}
  506 +
  507 +static void tx_kickstart(struct net_device *dev)
  508 +{
  509 + struct fs_enet_private *fep = netdev_priv(dev);
  510 + fec_t *fecp = fep->fec.fecp;
  511 +
  512 + FW(fecp, x_des_active, 0x01000000);
  513 +}
  514 +
  515 +static u32 get_int_events(struct net_device *dev)
  516 +{
  517 + struct fs_enet_private *fep = netdev_priv(dev);
  518 + fec_t *fecp = fep->fec.fecp;
  519 +
  520 + return FR(fecp, ievent) & FR(fecp, imask);
  521 +}
  522 +
  523 +static void clear_int_events(struct net_device *dev, u32 int_events)
  524 +{
  525 + struct fs_enet_private *fep = netdev_priv(dev);
  526 + fec_t *fecp = fep->fec.fecp;
  527 +
  528 + FW(fecp, ievent, int_events);
  529 +}
  530 +
  531 +static void ev_error(struct net_device *dev, u32 int_events)
  532 +{
  533 + printk(KERN_WARNING DRV_MODULE_NAME
  534 + ": %s FEC ERROR(s) 0x%x\n", dev->name, int_events);
  535 +}
  536 +
  537 +int get_regs(struct net_device *dev, void *p, int *sizep)
  538 +{
  539 + struct fs_enet_private *fep = netdev_priv(dev);
  540 +
  541 + if (*sizep < sizeof(fec_t))
  542 + return -EINVAL;
  543 +
  544 + memcpy_fromio(p, fep->fec.fecp, sizeof(fec_t));
  545 +
  546 + return 0;
  547 +}
  548 +
  549 +int get_regs_len(struct net_device *dev)
  550 +{
  551 + return sizeof(fec_t);
  552 +}
  553 +
  554 +void tx_restart(struct net_device *dev)
  555 +{
  556 + /* nothing */
  557 +}
  558 +
  559 +/*************************************************************************/
  560 +
  561 +const struct fs_ops fs_fec_ops = {
  562 + .setup_data = setup_data,
  563 + .cleanup_data = cleanup_data,
  564 + .set_multicast_list = set_multicast_list,
  565 + .restart = restart,
  566 + .stop = stop,
  567 + .pre_request_irq = pre_request_irq,
  568 + .post_free_irq = post_free_irq,
  569 + .napi_clear_rx_event = napi_clear_rx_event,
  570 + .napi_enable_rx = napi_enable_rx,
  571 + .napi_disable_rx = napi_disable_rx,
  572 + .rx_bd_done = rx_bd_done,
  573 + .tx_kickstart = tx_kickstart,
  574 + .get_int_events = get_int_events,
  575 + .clear_int_events = clear_int_events,
  576 + .ev_error = ev_error,
  577 + .get_regs = get_regs,
  578 + .get_regs_len = get_regs_len,
  579 + .tx_restart = tx_restart,
  580 + .allocate_bd = allocate_bd,
  581 + .free_bd = free_bd,
  582 +};
  583 +
  584 +/***********************************************************************/
  585 +
  586 +static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
  587 +{
  588 + fec_t *fecp = bus->fec.fecp;
  589 + int i, ret = -1;
  590 +
  591 + if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
  592 + BUG();
  593 +
  594 + /* Add PHY address to register command. */
  595 + FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location));
  596 +
  597 + for (i = 0; i < FEC_MII_LOOPS; i++)
  598 + if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
  599 + break;
  600 +
  601 + if (i < FEC_MII_LOOPS) {
  602 + FW(fecp, ievent, FEC_ENET_MII);
  603 + ret = FR(fecp, mii_data) & 0xffff;
  604 + }
  605 +
  606 + return ret;
  607 +}
  608 +
  609 +static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int value)
  610 +{
  611 + fec_t *fecp = bus->fec.fecp;
  612 + int i;
  613 +
  614 + /* this must never happen */
  615 + if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
  616 + BUG();
  617 +
  618 + /* Add PHY address to register command. */
  619 + FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value));
  620 +
  621 + for (i = 0; i < FEC_MII_LOOPS; i++)
  622 + if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
  623 + break;
  624 +
  625 + if (i < FEC_MII_LOOPS)
  626 + FW(fecp, ievent, FEC_ENET_MII);
  627 +}
  628 +
  629 +int fs_mii_fec_init(struct fs_enet_mii_bus *bus)
  630 +{
  631 + bd_t *bd = (bd_t *)__res;
  632 + const struct fs_mii_bus_info *bi = bus->bus_info;
  633 + fec_t *fecp;
  634 +
  635 + if (bi->id != 0)
  636 + return -1;
  637 +
  638 + bus->fec.fecp = &((immap_t *)fs_enet_immap)->im_cpm.cp_fec;
  639 + bus->fec.mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2)
  640 + & 0x3F) << 1;
  641 +
  642 + fecp = bus->fec.fecp;
  643 +
  644 + FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
  645 + FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
  646 + FW(fecp, ievent, FEC_ENET_MII);
  647 + FW(fecp, mii_speed, bus->fec.mii_speed);
  648 +
  649 + bus->mii_read = mii_read;
  650 + bus->mii_write = mii_write;
  651 +
  652 + return 0;
  653 +}
drivers/net/fs_enet/mac-scc.c
  1 +/*
  2 + * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
  3 + *
  4 + * Copyright (c) 2003 Intracom S.A.
  5 + * by Pantelis Antoniou <panto@intracom.gr>
  6 + *
  7 + * 2005 (c) MontaVista Software, Inc.
  8 + * Vitaly Bordug <vbordug@ru.mvista.com>
  9 + *
  10 + * This file is licensed under the terms of the GNU General Public License
  11 + * version 2. This program is licensed "as is" without any warranty of any
  12 + * kind, whether express or implied.
  13 + */
  14 +
  15 +#include <linux/config.h>
  16 +#include <linux/module.h>
  17 +#include <linux/kernel.h>
  18 +#include <linux/types.h>
  19 +#include <linux/sched.h>
  20 +#include <linux/string.h>
  21 +#include <linux/ptrace.h>
  22 +#include <linux/errno.h>
  23 +#include <linux/ioport.h>
  24 +#include <linux/slab.h>
  25 +#include <linux/interrupt.h>
  26 +#include <linux/pci.h>
  27 +#include <linux/init.h>
  28 +#include <linux/delay.h>
  29 +#include <linux/netdevice.h>
  30 +#include <linux/etherdevice.h>
  31 +#include <linux/skbuff.h>
  32 +#include <linux/spinlock.h>
  33 +#include <linux/mii.h>
  34 +#include <linux/ethtool.h>
  35 +#include <linux/bitops.h>
  36 +#include <linux/fs.h>
  37 +
  38 +#include <asm/irq.h>
  39 +#include <asm/uaccess.h>
  40 +
  41 +#ifdef CONFIG_8xx
  42 +#include <asm/8xx_immap.h>
  43 +#include <asm/pgtable.h>
  44 +#include <asm/mpc8xx.h>
  45 +#include <asm/commproc.h>
  46 +#endif
  47 +
  48 +#include "fs_enet.h"
  49 +
  50 +/*************************************************/
  51 +
  52 +#if defined(CONFIG_CPM1)
  53 +/* for a 8xx __raw_xxx's are sufficient */
  54 +#define __fs_out32(addr, x) __raw_writel(x, addr)
  55 +#define __fs_out16(addr, x) __raw_writew(x, addr)
  56 +#define __fs_out8(addr, x) __raw_writeb(x, addr)
  57 +#define __fs_in32(addr) __raw_readl(addr)
  58 +#define __fs_in16(addr) __raw_readw(addr)
  59 +#define __fs_in8(addr) __raw_readb(addr)
  60 +#else
  61 +/* for others play it safe */
  62 +#define __fs_out32(addr, x) out_be32(addr, x)
  63 +#define __fs_out16(addr, x) out_be16(addr, x)
  64 +#define __fs_in32(addr) in_be32(addr)
  65 +#define __fs_in16(addr) in_be16(addr)
  66 +#endif
  67 +
  68 +/* write, read, set bits, clear bits */
  69 +#define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
  70 +#define R32(_p, _m) __fs_in32(&(_p)->_m)
  71 +#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
  72 +#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
  73 +
  74 +#define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
  75 +#define R16(_p, _m) __fs_in16(&(_p)->_m)
  76 +#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
  77 +#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
  78 +
  79 +#define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
  80 +#define R8(_p, _m) __fs_in8(&(_p)->_m)
  81 +#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
  82 +#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
  83 +
  84 +#define SCC_MAX_MULTICAST_ADDRS 64
  85 +
  86 +/*
  87 + * Delay to wait for SCC reset command to complete (in us)
  88 + */
  89 +#define SCC_RESET_DELAY 50
  90 +#define MAX_CR_CMD_LOOPS 10000
  91 +
  92 +static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
  93 +{
  94 + cpm8xx_t *cpmp = &((immap_t *)fs_enet_immap)->im_cpm;
  95 + u32 v, ch;
  96 + int i = 0;
  97 +
  98 + ch = fep->scc.idx << 2;
  99 + v = mk_cr_cmd(ch, op);
  100 + W16(cpmp, cp_cpcr, v | CPM_CR_FLG);
  101 + for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
  102 + if ((R16(cpmp, cp_cpcr) & CPM_CR_FLG) == 0)
  103 + break;
  104 +
  105 + if (i >= MAX_CR_CMD_LOOPS) {
  106 + printk(KERN_ERR "%s(): Not able to issue CPM command\n",
  107 + __FUNCTION__);
  108 + return 1;
  109 + }
  110 + return 0;
  111 +}
  112 +
  113 +static int do_pd_setup(struct fs_enet_private *fep)
  114 +{
  115 + struct platform_device *pdev = to_platform_device(fep->dev);
  116 + struct resource *r;
  117 +
  118 + /* Fill out IRQ field */
  119 + fep->interrupt = platform_get_irq_byname(pdev, "interrupt");
  120 +
  121 + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
  122 + fep->scc.sccp = (void *)r->start;
  123 +
  124 + if (fep->scc.sccp == NULL)
  125 + return -EINVAL;
  126 +
  127 + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pram");
  128 + fep->scc.ep = (void *)r->start;
  129 +
  130 + if (fep->scc.ep == NULL)
  131 + return -EINVAL;
  132 +
  133 + return 0;
  134 +}
  135 +
  136 +#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
  137 +#define SCC_RX_EVENT (SCCE_ENET_RXF)
  138 +#define SCC_TX_EVENT (SCCE_ENET_TXB)
  139 +#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
  140 +
  141 +static int setup_data(struct net_device *dev)
  142 +{
  143 + struct fs_enet_private *fep = netdev_priv(dev);
  144 + const struct fs_platform_info *fpi = fep->fpi;
  145 +
  146 + fep->scc.idx = fs_get_scc_index(fpi->fs_no);
  147 + if ((unsigned int)fep->fcc.idx > 4) /* max 4 SCCs */
  148 + return -EINVAL;
  149 +
  150 + do_pd_setup(fep);
  151 +
  152 + fep->scc.hthi = 0;
  153 + fep->scc.htlo = 0;
  154 +
  155 + fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
  156 + fep->ev_rx = SCC_RX_EVENT;
  157 + fep->ev_tx = SCC_TX_EVENT;
  158 + fep->ev_err = SCC_ERR_EVENT_MSK;
  159 +
  160 + return 0;
  161 +}
  162 +
  163 +static int allocate_bd(struct net_device *dev)
  164 +{
  165 + struct fs_enet_private *fep = netdev_priv(dev);
  166 + const struct fs_platform_info *fpi = fep->fpi;
  167 +
  168 + fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
  169 + sizeof(cbd_t), 8);
  170 + if (IS_DPERR(fep->ring_mem_addr))
  171 + return -ENOMEM;
  172 +
  173 + fep->ring_base = cpm_dpram_addr(fep->ring_mem_addr);
  174 +
  175 + return 0;
  176 +}
  177 +
  178 +static void free_bd(struct net_device *dev)
  179 +{
  180 + struct fs_enet_private *fep = netdev_priv(dev);
  181 +
  182 + if (fep->ring_base)
  183 + cpm_dpfree(fep->ring_mem_addr);
  184 +}
  185 +
  186 +static void cleanup_data(struct net_device *dev)
  187 +{
  188 + /* nothing */
  189 +}
  190 +
  191 +static void set_promiscuous_mode(struct net_device *dev)
  192 +{
  193 + struct fs_enet_private *fep = netdev_priv(dev);
  194 + scc_t *sccp = fep->scc.sccp;
  195 +
  196 + S16(sccp, scc_psmr, SCC_PSMR_PRO);
  197 +}
  198 +
  199 +static void set_multicast_start(struct net_device *dev)
  200 +{
  201 + struct fs_enet_private *fep = netdev_priv(dev);
  202 + scc_enet_t *ep = fep->scc.ep;
  203 +
  204 + W16(ep, sen_gaddr1, 0);
  205 + W16(ep, sen_gaddr2, 0);
  206 + W16(ep, sen_gaddr3, 0);
  207 + W16(ep, sen_gaddr4, 0);
  208 +}
  209 +
  210 +static void set_multicast_one(struct net_device *dev, const u8 * mac)
  211 +{
  212 + struct fs_enet_private *fep = netdev_priv(dev);
  213 + scc_enet_t *ep = fep->scc.ep;
  214 + u16 taddrh, taddrm, taddrl;
  215 +
  216 + taddrh = ((u16) mac[5] << 8) | mac[4];
  217 + taddrm = ((u16) mac[3] << 8) | mac[2];
  218 + taddrl = ((u16) mac[1] << 8) | mac[0];
  219 +
  220 + W16(ep, sen_taddrh, taddrh);
  221 + W16(ep, sen_taddrm, taddrm);
  222 + W16(ep, sen_taddrl, taddrl);
  223 + scc_cr_cmd(fep, CPM_CR_SET_GADDR);
  224 +}
  225 +
  226 +static void set_multicast_finish(struct net_device *dev)
  227 +{
  228 + struct fs_enet_private *fep = netdev_priv(dev);
  229 + scc_t *sccp = fep->scc.sccp;
  230 + scc_enet_t *ep = fep->scc.ep;
  231 +
  232 + /* clear promiscuous always */
  233 + C16(sccp, scc_psmr, SCC_PSMR_PRO);
  234 +
  235 + /* if all multi or too many multicasts; just enable all */
  236 + if ((dev->flags & IFF_ALLMULTI) != 0 ||
  237 + dev->mc_count > SCC_MAX_MULTICAST_ADDRS) {
  238 +
  239 + W16(ep, sen_gaddr1, 0xffff);
  240 + W16(ep, sen_gaddr2, 0xffff);
  241 + W16(ep, sen_gaddr3, 0xffff);
  242 + W16(ep, sen_gaddr4, 0xffff);
  243 + }
  244 +}
  245 +
  246 +static void set_multicast_list(struct net_device *dev)
  247 +{
  248 + struct dev_mc_list *pmc;
  249 +
  250 + if ((dev->flags & IFF_PROMISC) == 0) {
  251 + set_multicast_start(dev);
  252 + for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
  253 + set_multicast_one(dev, pmc->dmi_addr);
  254 + set_multicast_finish(dev);
  255 + } else
  256 + set_promiscuous_mode(dev);
  257 +}
  258 +
  259 +/*
  260 + * This function is called to start or restart the FEC during a link
  261 + * change. This only happens when switching between half and full
  262 + * duplex.
  263 + */
  264 +static void restart(struct net_device *dev)
  265 +{
  266 + struct fs_enet_private *fep = netdev_priv(dev);
  267 + scc_t *sccp = fep->scc.sccp;
  268 + scc_enet_t *ep = fep->scc.ep;
  269 + const struct fs_platform_info *fpi = fep->fpi;
  270 + u16 paddrh, paddrm, paddrl;
  271 + const unsigned char *mac;
  272 + int i;
  273 +
  274 + C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
  275 +
  276 + /* clear everything (slow & steady does it) */
  277 + for (i = 0; i < sizeof(*ep); i++)
  278 + __fs_out8((char *)ep + i, 0);
  279 +
  280 + /* point to bds */
  281 + W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
  282 + W16(ep, sen_genscc.scc_tbase,
  283 + fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
  284 +
  285 + /* Initialize function code registers for big-endian.
  286 + */
  287 + W8(ep, sen_genscc.scc_rfcr, SCC_EB);
  288 + W8(ep, sen_genscc.scc_tfcr, SCC_EB);
  289 +
  290 + /* Set maximum bytes per receive buffer.
  291 + * This appears to be an Ethernet frame size, not the buffer
  292 + * fragment size. It must be a multiple of four.
  293 + */
  294 + W16(ep, sen_genscc.scc_mrblr, 0x5f0);
  295 +
  296 + /* Set CRC preset and mask.
  297 + */
  298 + W32(ep, sen_cpres, 0xffffffff);
  299 + W32(ep, sen_cmask, 0xdebb20e3);
  300 +
  301 + W32(ep, sen_crcec, 0); /* CRC Error counter */
  302 + W32(ep, sen_alec, 0); /* alignment error counter */
  303 + W32(ep, sen_disfc, 0); /* discard frame counter */
  304 +
  305 + W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */
  306 + W16(ep, sen_retlim, 15); /* Retry limit threshold */
  307 +
  308 + W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */
  309 +
  310 + W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
  311 +
  312 + W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */
  313 + W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */
  314 +
  315 + /* Clear hash tables.
  316 + */
  317 + W16(ep, sen_gaddr1, 0);
  318 + W16(ep, sen_gaddr2, 0);
  319 + W16(ep, sen_gaddr3, 0);
  320 + W16(ep, sen_gaddr4, 0);
  321 + W16(ep, sen_iaddr1, 0);
  322 + W16(ep, sen_iaddr2, 0);
  323 + W16(ep, sen_iaddr3, 0);
  324 + W16(ep, sen_iaddr4, 0);
  325 +
  326 + /* set address
  327 + */
  328 + mac = dev->dev_addr;
  329 + paddrh = ((u16) mac[5] << 8) | mac[4];
  330 + paddrm = ((u16) mac[3] << 8) | mac[2];
  331 + paddrl = ((u16) mac[1] << 8) | mac[0];
  332 +
  333 + W16(ep, sen_paddrh, paddrh);
  334 + W16(ep, sen_paddrm, paddrm);
  335 + W16(ep, sen_paddrl, paddrl);
  336 +
  337 + W16(ep, sen_pper, 0);
  338 + W16(ep, sen_taddrl, 0);
  339 + W16(ep, sen_taddrm, 0);
  340 + W16(ep, sen_taddrh, 0);
  341 +
  342 + fs_init_bds(dev);
  343 +
  344 + scc_cr_cmd(fep, CPM_CR_INIT_TRX);
  345 +
  346 + W16(sccp, scc_scce, 0xffff);
  347 +
  348 + /* Enable interrupts we wish to service.
  349 + */
  350 + W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
  351 +
  352 + /* Set GSMR_H to enable all normal operating modes.
  353 + * Set GSMR_L to enable Ethernet to MC68160.
  354 + */
  355 + W32(sccp, scc_gsmrh, 0);
  356 + W32(sccp, scc_gsmrl,
  357 + SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
  358 + SCC_GSMRL_MODE_ENET);
  359 +
  360 + /* Set sync/delimiters.
  361 + */
  362 + W16(sccp, scc_dsr, 0xd555);
  363 +
  364 + /* Set processing mode. Use Ethernet CRC, catch broadcast, and
  365 + * start frame search 22 bit times after RENA.
  366 + */
  367 + W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
  368 +
  369 + /* Set full duplex mode if needed */
  370 + if (fep->duplex)
  371 + S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
  372 +
  373 + S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
  374 +}
  375 +
  376 +static void stop(struct net_device *dev)
  377 +{
  378 + struct fs_enet_private *fep = netdev_priv(dev);
  379 + scc_t *sccp = fep->scc.sccp;
  380 + int i;
  381 +
  382 + for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
  383 + udelay(1);
  384 +
  385 + if (i == SCC_RESET_DELAY)
  386 + printk(KERN_WARNING DRV_MODULE_NAME
  387 + ": %s SCC timeout on graceful transmit stop\n",
  388 + dev->name);
  389 +
  390 + W16(sccp, scc_sccm, 0);
  391 + C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
  392 +
  393 + fs_cleanup_bds(dev);
  394 +}
  395 +
  396 +static void pre_request_irq(struct net_device *dev, int irq)
  397 +{
  398 + immap_t *immap = fs_enet_immap;
  399 + u32 siel;
  400 +
  401 + /* SIU interrupt */
  402 + if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
  403 +
  404 + siel = in_be32(&immap->im_siu_conf.sc_siel);
  405 + if ((irq & 1) == 0)
  406 + siel |= (0x80000000 >> irq);
  407 + else
  408 + siel &= ~(0x80000000 >> (irq & ~1));
  409 + out_be32(&immap->im_siu_conf.sc_siel, siel);
  410 + }
  411 +}
  412 +
  413 +static void post_free_irq(struct net_device *dev, int irq)
  414 +{
  415 + /* nothing */
  416 +}
  417 +
  418 +static void napi_clear_rx_event(struct net_device *dev)
  419 +{
  420 + struct fs_enet_private *fep = netdev_priv(dev);
  421 + scc_t *sccp = fep->scc.sccp;
  422 +
  423 + W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
  424 +}
  425 +
  426 +static void napi_enable_rx(struct net_device *dev)
  427 +{
  428 + struct fs_enet_private *fep = netdev_priv(dev);
  429 + scc_t *sccp = fep->scc.sccp;
  430 +
  431 + S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
  432 +}
  433 +
  434 +static void napi_disable_rx(struct net_device *dev)
  435 +{
  436 + struct fs_enet_private *fep = netdev_priv(dev);
  437 + scc_t *sccp = fep->scc.sccp;
  438 +
  439 + C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
  440 +}
  441 +
  442 +static void rx_bd_done(struct net_device *dev)
  443 +{
  444 + /* nothing */
  445 +}
  446 +
  447 +static void tx_kickstart(struct net_device *dev)
  448 +{
  449 + /* nothing */
  450 +}
  451 +
  452 +static u32 get_int_events(struct net_device *dev)
  453 +{
  454 + struct fs_enet_private *fep = netdev_priv(dev);
  455 + scc_t *sccp = fep->scc.sccp;
  456 +
  457 + return (u32) R16(sccp, scc_scce);
  458 +}
  459 +
  460 +static void clear_int_events(struct net_device *dev, u32 int_events)
  461 +{
  462 + struct fs_enet_private *fep = netdev_priv(dev);
  463 + scc_t *sccp = fep->scc.sccp;
  464 +
  465 + W16(sccp, scc_scce, int_events & 0xffff);
  466 +}
  467 +
  468 +static void ev_error(struct net_device *dev, u32 int_events)
  469 +{
  470 + printk(KERN_WARNING DRV_MODULE_NAME
  471 + ": %s SCC ERROR(s) 0x%x\n", dev->name, int_events);
  472 +}
  473 +
  474 +static int get_regs(struct net_device *dev, void *p, int *sizep)
  475 +{
  476 + struct fs_enet_private *fep = netdev_priv(dev);
  477 +
  478 + if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t))
  479 + return -EINVAL;
  480 +
  481 + memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
  482 + p = (char *)p + sizeof(scc_t);
  483 +
  484 + memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t));
  485 +
  486 + return 0;
  487 +}
  488 +
  489 +static int get_regs_len(struct net_device *dev)
  490 +{
  491 + return sizeof(scc_t) + sizeof(scc_enet_t);
  492 +}
  493 +
  494 +static void tx_restart(struct net_device *dev)
  495 +{
  496 + struct fs_enet_private *fep = netdev_priv(dev);
  497 +
  498 + scc_cr_cmd(fep, CPM_CR_RESTART_TX);
  499 +}
  500 +
  501 +/*************************************************************************/
  502 +
  503 +const struct fs_ops fs_scc_ops = {
  504 + .setup_data = setup_data,
  505 + .cleanup_data = cleanup_data,
  506 + .set_multicast_list = set_multicast_list,
  507 + .restart = restart,
  508 + .stop = stop,
  509 + .pre_request_irq = pre_request_irq,
  510 + .post_free_irq = post_free_irq,
  511 + .napi_clear_rx_event = napi_clear_rx_event,
  512 + .napi_enable_rx = napi_enable_rx,
  513 + .napi_disable_rx = napi_disable_rx,
  514 + .rx_bd_done = rx_bd_done,
  515 + .tx_kickstart = tx_kickstart,
  516 + .get_int_events = get_int_events,
  517 + .clear_int_events = clear_int_events,
  518 + .ev_error = ev_error,
  519 + .get_regs = get_regs,
  520 + .get_regs_len = get_regs_len,
  521 + .tx_restart = tx_restart,
  522 + .allocate_bd = allocate_bd,
  523 + .free_bd = free_bd,
  524 +};
drivers/net/fs_enet/mii-bitbang.c
  1 +/*
  2 + * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
  3 + *
  4 + * Copyright (c) 2003 Intracom S.A.
  5 + * by Pantelis Antoniou <panto@intracom.gr>
  6 + *
  7 + * 2005 (c) MontaVista Software, Inc.
  8 + * Vitaly Bordug <vbordug@ru.mvista.com>
  9 + *
  10 + * This file is licensed under the terms of the GNU General Public License
  11 + * version 2. This program is licensed "as is" without any warranty of any
  12 + * kind, whether express or implied.
  13 + */
  14 +
  15 +
  16 +#include <linux/config.h>
  17 +#include <linux/module.h>
  18 +#include <linux/types.h>
  19 +#include <linux/kernel.h>
  20 +#include <linux/sched.h>
  21 +#include <linux/string.h>
  22 +#include <linux/ptrace.h>
  23 +#include <linux/errno.h>
  24 +#include <linux/ioport.h>
  25 +#include <linux/slab.h>
  26 +#include <linux/interrupt.h>
  27 +#include <linux/pci.h>
  28 +#include <linux/init.h>
  29 +#include <linux/delay.h>
  30 +#include <linux/netdevice.h>
  31 +#include <linux/etherdevice.h>
  32 +#include <linux/skbuff.h>
  33 +#include <linux/spinlock.h>
  34 +#include <linux/mii.h>
  35 +#include <linux/ethtool.h>
  36 +#include <linux/bitops.h>
  37 +
  38 +#include <asm/pgtable.h>
  39 +#include <asm/irq.h>
  40 +#include <asm/uaccess.h>
  41 +
  42 +#include "fs_enet.h"
  43 +
  44 +#ifdef CONFIG_8xx
  45 +static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
  46 +{
  47 + immap_t *im = (immap_t *)fs_enet_immap;
  48 + void *dir, *dat, *ppar;
  49 + int adv;
  50 + u8 msk;
  51 +
  52 + switch (port) {
  53 + case fsiop_porta:
  54 + dir = &im->im_ioport.iop_padir;
  55 + dat = &im->im_ioport.iop_padat;
  56 + ppar = &im->im_ioport.iop_papar;
  57 + break;
  58 +
  59 + case fsiop_portb:
  60 + dir = &im->im_cpm.cp_pbdir;
  61 + dat = &im->im_cpm.cp_pbdat;
  62 + ppar = &im->im_cpm.cp_pbpar;
  63 + break;
  64 +
  65 + case fsiop_portc:
  66 + dir = &im->im_ioport.iop_pcdir;
  67 + dat = &im->im_ioport.iop_pcdat;
  68 + ppar = &im->im_ioport.iop_pcpar;
  69 + break;
  70 +
  71 + case fsiop_portd:
  72 + dir = &im->im_ioport.iop_pddir;
  73 + dat = &im->im_ioport.iop_pddat;
  74 + ppar = &im->im_ioport.iop_pdpar;
  75 + break;
  76 +
  77 + case fsiop_porte:
  78 + dir = &im->im_cpm.cp_pedir;
  79 + dat = &im->im_cpm.cp_pedat;
  80 + ppar = &im->im_cpm.cp_pepar;
  81 + break;
  82 +
  83 + default:
  84 + printk(KERN_ERR DRV_MODULE_NAME
  85 + "Illegal port value %d!\n", port);
  86 + return -EINVAL;
  87 + }
  88 +
  89 + adv = bit >> 3;
  90 + dir = (char *)dir + adv;
  91 + dat = (char *)dat + adv;
  92 + ppar = (char *)ppar + adv;
  93 +
  94 + msk = 1 << (7 - (bit & 7));
  95 + if ((in_8(ppar) & msk) != 0) {
  96 + printk(KERN_ERR DRV_MODULE_NAME
  97 + "pin %d on port %d is not general purpose!\n", bit, port);
  98 + return -EINVAL;
  99 + }
  100 +
  101 + *dirp = dir;
  102 + *datp = dat;
  103 + *mskp = msk;
  104 +
  105 + return 0;
  106 +}
  107 +#endif
  108 +
  109 +#ifdef CONFIG_8260
  110 +static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
  111 +{
  112 + iop_cpm2_t *io = &((cpm2_map_t *)fs_enet_immap)->im_ioport;
  113 + void *dir, *dat, *ppar;
  114 + int adv;
  115 + u8 msk;
  116 +
  117 + switch (port) {
  118 + case fsiop_porta:
  119 + dir = &io->iop_pdira;
  120 + dat = &io->iop_pdata;
  121 + ppar = &io->iop_ppara;
  122 + break;
  123 +
  124 + case fsiop_portb:
  125 + dir = &io->iop_pdirb;
  126 + dat = &io->iop_pdatb;
  127 + ppar = &io->iop_pparb;
  128 + break;
  129 +
  130 + case fsiop_portc:
  131 + dir = &io->iop_pdirc;
  132 + dat = &io->iop_pdatc;
  133 + ppar = &io->iop_pparc;
  134 + break;
  135 +
  136 + case fsiop_portd:
  137 + dir = &io->iop_pdird;
  138 + dat = &io->iop_pdatd;
  139 + ppar = &io->iop_ppard;
  140 + break;
  141 +
  142 + default:
  143 + printk(KERN_ERR DRV_MODULE_NAME
  144 + "Illegal port value %d!\n", port);
  145 + return -EINVAL;
  146 + }
  147 +
  148 + adv = bit >> 3;
  149 + dir = (char *)dir + adv;
  150 + dat = (char *)dat + adv;
  151 + ppar = (char *)ppar + adv;
  152 +
  153 + msk = 1 << (7 - (bit & 7));
  154 + if ((in_8(ppar) & msk) != 0) {
  155 + printk(KERN_ERR DRV_MODULE_NAME
  156 + "pin %d on port %d is not general purpose!\n", bit, port);
  157 + return -EINVAL;
  158 + }
  159 +
  160 + *dirp = dir;
  161 + *datp = dat;
  162 + *mskp = msk;
  163 +
  164 + return 0;
  165 +}
  166 +#endif
  167 +
  168 +static inline void bb_set(u8 *p, u8 m)
  169 +{
  170 + out_8(p, in_8(p) | m);
  171 +}
  172 +
  173 +static inline void bb_clr(u8 *p, u8 m)
  174 +{
  175 + out_8(p, in_8(p) & ~m);
  176 +}
  177 +
  178 +static inline int bb_read(u8 *p, u8 m)
  179 +{
  180 + return (in_8(p) & m) != 0;
  181 +}
  182 +
  183 +static inline void mdio_active(struct fs_enet_mii_bus *bus)
  184 +{
  185 + bb_set(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk);
  186 +}
  187 +
  188 +static inline void mdio_tristate(struct fs_enet_mii_bus *bus)
  189 +{
  190 + bb_clr(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk);
  191 +}
  192 +
  193 +static inline int mdio_read(struct fs_enet_mii_bus *bus)
  194 +{
  195 + return bb_read(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
  196 +}
  197 +
  198 +static inline void mdio(struct fs_enet_mii_bus *bus, int what)
  199 +{
  200 + if (what)
  201 + bb_set(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
  202 + else
  203 + bb_clr(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
  204 +}
  205 +
  206 +static inline void mdc(struct fs_enet_mii_bus *bus, int what)
  207 +{
  208 + if (what)
  209 + bb_set(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk);
  210 + else
  211 + bb_clr(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk);
  212 +}
  213 +
  214 +static inline void mii_delay(struct fs_enet_mii_bus *bus)
  215 +{
  216 + udelay(bus->bus_info->i.bitbang.delay);
  217 +}
  218 +
  219 +/* Utility to send the preamble, address, and register (common to read and write). */
  220 +static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg)
  221 +{
  222 + int j;
  223 +
  224 + /*
  225 + * Send a 32 bit preamble ('1's) with an extra '1' bit for good measure.
  226 + * The IEEE spec says this is a PHY optional requirement. The AMD
  227 + * 79C874 requires one after power up and one after a MII communications
  228 + * error. This means that we are doing more preambles than we need,
  229 + * but it is safer and will be much more robust.
  230 + */
  231 +
  232 + mdio_active(bus);
  233 + mdio(bus, 1);
  234 + for (j = 0; j < 32; j++) {
  235 + mdc(bus, 0);
  236 + mii_delay(bus);
  237 + mdc(bus, 1);
  238 + mii_delay(bus);
  239 + }
  240 +
  241 + /* send the start bit (01) and the read opcode (10) or write (10) */
  242 + mdc(bus, 0);
  243 + mdio(bus, 0);
  244 + mii_delay(bus);
  245 + mdc(bus, 1);
  246 + mii_delay(bus);
  247 + mdc(bus, 0);
  248 + mdio(bus, 1);
  249 + mii_delay(bus);
  250 + mdc(bus, 1);
  251 + mii_delay(bus);
  252 + mdc(bus, 0);
  253 + mdio(bus, read);
  254 + mii_delay(bus);
  255 + mdc(bus, 1);
  256 + mii_delay(bus);
  257 + mdc(bus, 0);
  258 + mdio(bus, !read);
  259 + mii_delay(bus);
  260 + mdc(bus, 1);
  261 + mii_delay(bus);
  262 +
  263 + /* send the PHY address */
  264 + for (j = 0; j < 5; j++) {
  265 + mdc(bus, 0);
  266 + mdio(bus, (addr & 0x10) != 0);
  267 + mii_delay(bus);
  268 + mdc(bus, 1);
  269 + mii_delay(bus);
  270 + addr <<= 1;
  271 + }
  272 +
  273 + /* send the register address */
  274 + for (j = 0; j < 5; j++) {
  275 + mdc(bus, 0);
  276 + mdio(bus, (reg & 0x10) != 0);
  277 + mii_delay(bus);
  278 + mdc(bus, 1);
  279 + mii_delay(bus);
  280 + reg <<= 1;
  281 + }
  282 +}
  283 +
  284 +static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
  285 +{
  286 + u16 rdreg;
  287 + int ret, j;
  288 + u8 addr = phy_id & 0xff;
  289 + u8 reg = location & 0xff;
  290 +
  291 + bitbang_pre(bus, 1, addr, reg);
  292 +
  293 + /* tri-state our MDIO I/O pin so we can read */
  294 + mdc(bus, 0);
  295 + mdio_tristate(bus);
  296 + mii_delay(bus);
  297 + mdc(bus, 1);
  298 + mii_delay(bus);
  299 +
  300 + /* check the turnaround bit: the PHY should be driving it to zero */
  301 + if (mdio_read(bus) != 0) {
  302 + /* PHY didn't drive TA low */
  303 + for (j = 0; j < 32; j++) {
  304 + mdc(bus, 0);
  305 + mii_delay(bus);
  306 + mdc(bus, 1);
  307 + mii_delay(bus);
  308 + }
  309 + ret = -1;
  310 + goto out;
  311 + }
  312 +
  313 + mdc(bus, 0);
  314 + mii_delay(bus);
  315 +
  316 + /* read 16 bits of register data, MSB first */
  317 + rdreg = 0;
  318 + for (j = 0; j < 16; j++) {
  319 + mdc(bus, 1);
  320 + mii_delay(bus);
  321 + rdreg <<= 1;
  322 + rdreg |= mdio_read(bus);
  323 + mdc(bus, 0);
  324 + mii_delay(bus);
  325 + }
  326 +
  327 + mdc(bus, 1);
  328 + mii_delay(bus);
  329 + mdc(bus, 0);
  330 + mii_delay(bus);
  331 + mdc(bus, 1);
  332 + mii_delay(bus);
  333 +
  334 + ret = rdreg;
  335 +out:
  336 + return ret;
  337 +}
  338 +
  339 +static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
  340 +{
  341 + int j;
  342 + u8 addr = phy_id & 0xff;
  343 + u8 reg = location & 0xff;
  344 + u16 value = val & 0xffff;
  345 +
  346 + bitbang_pre(bus, 0, addr, reg);
  347 +
  348 + /* send the turnaround (10) */
  349 + mdc(bus, 0);
  350 + mdio(bus, 1);
  351 + mii_delay(bus);
  352 + mdc(bus, 1);
  353 + mii_delay(bus);
  354 + mdc(bus, 0);
  355 + mdio(bus, 0);
  356 + mii_delay(bus);
  357 + mdc(bus, 1);
  358 + mii_delay(bus);
  359 +
  360 + /* write 16 bits of register data, MSB first */
  361 + for (j = 0; j < 16; j++) {
  362 + mdc(bus, 0);
  363 + mdio(bus, (value & 0x8000) != 0);
  364 + mii_delay(bus);
  365 + mdc(bus, 1);
  366 + mii_delay(bus);
  367 + value <<= 1;
  368 + }
  369 +
  370 + /*
  371 + * Tri-state the MDIO line.
  372 + */
  373 + mdio_tristate(bus);
  374 + mdc(bus, 0);
  375 + mii_delay(bus);
  376 + mdc(bus, 1);
  377 + mii_delay(bus);
  378 +}
  379 +
  380 +int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus)
  381 +{
  382 + const struct fs_mii_bus_info *bi = bus->bus_info;
  383 + int r;
  384 +
  385 + r = bitbang_prep_bit(&bus->bitbang.mdio_dir,
  386 + &bus->bitbang.mdio_dat,
  387 + &bus->bitbang.mdio_msk,
  388 + bi->i.bitbang.mdio_port,
  389 + bi->i.bitbang.mdio_bit);
  390 + if (r != 0)
  391 + return r;
  392 +
  393 + r = bitbang_prep_bit(&bus->bitbang.mdc_dir,
  394 + &bus->bitbang.mdc_dat,
  395 + &bus->bitbang.mdc_msk,
  396 + bi->i.bitbang.mdc_port,
  397 + bi->i.bitbang.mdc_bit);
  398 + if (r != 0)
  399 + return r;
  400 +
  401 + bus->mii_read = mii_read;
  402 + bus->mii_write = mii_write;
  403 +
  404 + return 0;
  405 +}
drivers/net/fs_enet/mii-fixed.c
  1 +/*
  2 + * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
  3 + *
  4 + * Copyright (c) 2003 Intracom S.A.
  5 + * by Pantelis Antoniou <panto@intracom.gr>
  6 + *
  7 + * 2005 (c) MontaVista Software, Inc.
  8 + * Vitaly Bordug <vbordug@ru.mvista.com>
  9 + *
  10 + * This file is licensed under the terms of the GNU General Public License
  11 + * version 2. This program is licensed "as is" without any warranty of any
  12 + * kind, whether express or implied.
  13 + */
  14 +
  15 +
  16 +#include <linux/config.h>
  17 +#include <linux/module.h>
  18 +#include <linux/types.h>
  19 +#include <linux/kernel.h>
  20 +#include <linux/sched.h>
  21 +#include <linux/string.h>
  22 +#include <linux/ptrace.h>
  23 +#include <linux/errno.h>
  24 +#include <linux/ioport.h>
  25 +#include <linux/slab.h>
  26 +#include <linux/interrupt.h>
  27 +#include <linux/pci.h>
  28 +#include <linux/init.h>
  29 +#include <linux/delay.h>
  30 +#include <linux/netdevice.h>
  31 +#include <linux/etherdevice.h>
  32 +#include <linux/skbuff.h>
  33 +#include <linux/spinlock.h>
  34 +#include <linux/mii.h>
  35 +#include <linux/ethtool.h>
  36 +#include <linux/bitops.h>
  37 +
  38 +#include <asm/pgtable.h>
  39 +#include <asm/irq.h>
  40 +#include <asm/uaccess.h>
  41 +
  42 +#include "fs_enet.h"
  43 +
  44 +static const u16 mii_regs[7] = {
  45 + 0x3100,
  46 + 0x786d,
  47 + 0x0fff,
  48 + 0x0fff,
  49 + 0x01e1,
  50 + 0x45e1,
  51 + 0x0003,
  52 +};
  53 +
  54 +static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
  55 +{
  56 + int ret = 0;
  57 +
  58 + if ((unsigned int)location >= ARRAY_SIZE(mii_regs))
  59 + return -1;
  60 +
  61 + if (location != 5)
  62 + ret = mii_regs[location];
  63 + else
  64 + ret = bus->fixed.lpa;
  65 +
  66 + return ret;
  67 +}
  68 +
  69 +static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
  70 +{
  71 + /* do nothing */
  72 +}
  73 +
  74 +int fs_mii_fixed_init(struct fs_enet_mii_bus *bus)
  75 +{
  76 + const struct fs_mii_bus_info *bi = bus->bus_info;
  77 +
  78 + bus->fixed.lpa = 0x45e1; /* default 100Mb, full duplex */
  79 +
  80 + /* if speed is fixed at 10Mb, remove 100Mb modes */
  81 + if (bi->i.fixed.speed == 10)
  82 + bus->fixed.lpa &= ~LPA_100;
  83 +
  84 + /* if duplex is half, remove full duplex modes */
  85 + if (bi->i.fixed.duplex == 0)
  86 + bus->fixed.lpa &= ~LPA_DUPLEX;
  87 +
  88 + bus->mii_read = mii_read;
  89 + bus->mii_write = mii_write;
  90 +
  91 + return 0;
  92 +}
include/linux/fs_enet_pd.h
  1 +/*
  2 + * Platform information definitions for the
  3 + * universal Freescale Ethernet driver.
  4 + *
  5 + * Copyright (c) 2003 Intracom S.A.
  6 + * by Pantelis Antoniou <panto@intracom.gr>
  7 + *
  8 + * 2005 (c) MontaVista Software, Inc.
  9 + * Vitaly Bordug <vbordug@ru.mvista.com>
  10 + *
  11 + * This file is licensed under the terms of the GNU General Public License
  12 + * version 2. This program is licensed "as is" without any warranty of any
  13 + * kind, whether express or implied.
  14 + */
  15 +
  16 +#ifndef FS_ENET_PD_H
  17 +#define FS_ENET_PD_H
  18 +
  19 +#include <linux/version.h>
  20 +#include <asm/types.h>
  21 +
  22 +#define FS_ENET_NAME "fs_enet"
  23 +
  24 +enum fs_id {
  25 + fsid_fec1,
  26 + fsid_fec2,
  27 + fsid_fcc1,
  28 + fsid_fcc2,
  29 + fsid_fcc3,
  30 + fsid_scc1,
  31 + fsid_scc2,
  32 + fsid_scc3,
  33 + fsid_scc4,
  34 +};
  35 +
  36 +#define FS_MAX_INDEX 9
  37 +
  38 +static inline int fs_get_fec_index(enum fs_id id)
  39 +{
  40 + if (id >= fsid_fec1 && id <= fsid_fec2)
  41 + return id - fsid_fec1;
  42 + return -1;
  43 +}
  44 +
  45 +static inline int fs_get_fcc_index(enum fs_id id)
  46 +{
  47 + if (id >= fsid_fcc1 && id <= fsid_fcc3)
  48 + return id - fsid_fcc1;
  49 + return -1;
  50 +}
  51 +
  52 +static inline int fs_get_scc_index(enum fs_id id)
  53 +{
  54 + if (id >= fsid_scc1 && id <= fsid_scc4)
  55 + return id - fsid_scc1;
  56 + return -1;
  57 +}
  58 +
  59 +enum fs_mii_method {
  60 + fsmii_fixed,
  61 + fsmii_fec,
  62 + fsmii_bitbang,
  63 +};
  64 +
  65 +enum fs_ioport {
  66 + fsiop_porta,
  67 + fsiop_portb,
  68 + fsiop_portc,
  69 + fsiop_portd,
  70 + fsiop_porte,
  71 +};
  72 +
  73 +struct fs_mii_bus_info {
  74 + int method; /* mii method */
  75 + int id; /* the id of the mii_bus */
  76 + int disable_aneg; /* if the controller needs to negothiate speed & duplex */
  77 + int lpa; /* the default board-specific vallues will be applied otherwise */
  78 +
  79 + union {
  80 + struct {
  81 + int duplex;
  82 + int speed;
  83 + } fixed;
  84 +
  85 + struct {
  86 + /* nothing */
  87 + } fec;
  88 +
  89 + struct {
  90 + /* nothing */
  91 + } scc;
  92 +
  93 + struct {
  94 + int mdio_port; /* port & bit for MDIO */
  95 + int mdio_bit;
  96 + int mdc_port; /* port & bit for MDC */
  97 + int mdc_bit;
  98 + int delay; /* delay in us */
  99 + } bitbang;
  100 + } i;
  101 +};
  102 +
  103 +struct fs_platform_info {
  104 +
  105 + void(*init_ioports)(void);
  106 + /* device specific information */
  107 + int fs_no; /* controller index */
  108 +
  109 + u32 cp_page; /* CPM page */
  110 + u32 cp_block; /* CPM sblock */
  111 +
  112 + u32 clk_trx; /* some stuff for pins & mux configuration*/
  113 + u32 clk_route;
  114 + u32 clk_mask;
  115 +
  116 + u32 mem_offset;
  117 + u32 dpram_offset;
  118 + u32 fcc_regs_c;
  119 +
  120 + u32 device_flags;
  121 +
  122 + int phy_addr; /* the phy address (-1 no phy) */
  123 + int phy_irq; /* the phy irq (if it exists) */
  124 +
  125 + const struct fs_mii_bus_info *bus_info;
  126 +
  127 + int rx_ring, tx_ring; /* number of buffers on rx */
  128 + __u8 macaddr[6]; /* mac address */
  129 + int rx_copybreak; /* limit we copy small frames */
  130 + int use_napi; /* use NAPI */
  131 + int napi_weight; /* NAPI weight */
  132 +
  133 + int use_rmii; /* use RMII mode */
  134 +};
  135 +
  136 +#endif