Commit 7a2eaf9358250706672783eb8511835706b0922b

Authored by Christian Dietrich
Committed by Greg Kroah-Hartman
1 parent 9d17653c72

staging: octeon: use printk_ratelimited instead of printk_ratelimit

As per printk_ratelimit comment, it should not be used

Signed-off-by: Christian Dietrich <christian.dietrich@informatik.uni-erlangen.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

Showing 7 changed files with 70 additions and 65 deletions Inline Diff

drivers/staging/octeon/ethernet-mdio.c
1 /********************************************************************** 1 /**********************************************************************
2 * Author: Cavium Networks 2 * Author: Cavium Networks
3 * 3 *
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2007 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This file is distributed in the hope that it will be useful, but 13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more 16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details. 17 * details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software 20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/. 22 * or visit http://www.gnu.org/licenses/.
23 * 23 *
24 * This file may also be available under a different license from Cavium. 24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information 25 * Contact Cavium Networks for more information
26 **********************************************************************/ 26 **********************************************************************/
27 #include <linux/kernel.h> 27 #include <linux/kernel.h>
28 #include <linux/ethtool.h> 28 #include <linux/ethtool.h>
29 #include <linux/phy.h> 29 #include <linux/phy.h>
30 #include <linux/ratelimit.h>
30 31
31 #include <net/dst.h> 32 #include <net/dst.h>
32 33
33 #include <asm/octeon/octeon.h> 34 #include <asm/octeon/octeon.h>
34 35
35 #include "ethernet-defines.h" 36 #include "ethernet-defines.h"
36 #include "octeon-ethernet.h" 37 #include "octeon-ethernet.h"
37 #include "ethernet-mdio.h" 38 #include "ethernet-mdio.h"
38 #include "ethernet-util.h" 39 #include "ethernet-util.h"
39 40
40 #include "cvmx-helper-board.h" 41 #include "cvmx-helper-board.h"
41 42
42 #include "cvmx-smix-defs.h" 43 #include "cvmx-smix-defs.h"
43 44
44 static void cvm_oct_get_drvinfo(struct net_device *dev, 45 static void cvm_oct_get_drvinfo(struct net_device *dev,
45 struct ethtool_drvinfo *info) 46 struct ethtool_drvinfo *info)
46 { 47 {
47 strcpy(info->driver, "cavium-ethernet"); 48 strcpy(info->driver, "cavium-ethernet");
48 strcpy(info->version, OCTEON_ETHERNET_VERSION); 49 strcpy(info->version, OCTEON_ETHERNET_VERSION);
49 strcpy(info->bus_info, "Builtin"); 50 strcpy(info->bus_info, "Builtin");
50 } 51 }
51 52
52 static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 53 static int cvm_oct_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
53 { 54 {
54 struct octeon_ethernet *priv = netdev_priv(dev); 55 struct octeon_ethernet *priv = netdev_priv(dev);
55 56
56 if (priv->phydev) 57 if (priv->phydev)
57 return phy_ethtool_gset(priv->phydev, cmd); 58 return phy_ethtool_gset(priv->phydev, cmd);
58 59
59 return -EINVAL; 60 return -EINVAL;
60 } 61 }
61 62
62 static int cvm_oct_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 63 static int cvm_oct_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
63 { 64 {
64 struct octeon_ethernet *priv = netdev_priv(dev); 65 struct octeon_ethernet *priv = netdev_priv(dev);
65 66
66 if (!capable(CAP_NET_ADMIN)) 67 if (!capable(CAP_NET_ADMIN))
67 return -EPERM; 68 return -EPERM;
68 69
69 if (priv->phydev) 70 if (priv->phydev)
70 return phy_ethtool_sset(priv->phydev, cmd); 71 return phy_ethtool_sset(priv->phydev, cmd);
71 72
72 return -EINVAL; 73 return -EINVAL;
73 } 74 }
74 75
75 static int cvm_oct_nway_reset(struct net_device *dev) 76 static int cvm_oct_nway_reset(struct net_device *dev)
76 { 77 {
77 struct octeon_ethernet *priv = netdev_priv(dev); 78 struct octeon_ethernet *priv = netdev_priv(dev);
78 79
79 if (!capable(CAP_NET_ADMIN)) 80 if (!capable(CAP_NET_ADMIN))
80 return -EPERM; 81 return -EPERM;
81 82
82 if (priv->phydev) 83 if (priv->phydev)
83 return phy_start_aneg(priv->phydev); 84 return phy_start_aneg(priv->phydev);
84 85
85 return -EINVAL; 86 return -EINVAL;
86 } 87 }
87 88
88 const struct ethtool_ops cvm_oct_ethtool_ops = { 89 const struct ethtool_ops cvm_oct_ethtool_ops = {
89 .get_drvinfo = cvm_oct_get_drvinfo, 90 .get_drvinfo = cvm_oct_get_drvinfo,
90 .get_settings = cvm_oct_get_settings, 91 .get_settings = cvm_oct_get_settings,
91 .set_settings = cvm_oct_set_settings, 92 .set_settings = cvm_oct_set_settings,
92 .nway_reset = cvm_oct_nway_reset, 93 .nway_reset = cvm_oct_nway_reset,
93 .get_link = ethtool_op_get_link, 94 .get_link = ethtool_op_get_link,
94 }; 95 };
95 96
96 /** 97 /**
97 * cvm_oct_ioctl - IOCTL support for PHY control 98 * cvm_oct_ioctl - IOCTL support for PHY control
98 * @dev: Device to change 99 * @dev: Device to change
99 * @rq: the request 100 * @rq: the request
100 * @cmd: the command 101 * @cmd: the command
101 * 102 *
102 * Returns Zero on success 103 * Returns Zero on success
103 */ 104 */
104 int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 105 int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
105 { 106 {
106 struct octeon_ethernet *priv = netdev_priv(dev); 107 struct octeon_ethernet *priv = netdev_priv(dev);
107 108
108 if (!netif_running(dev)) 109 if (!netif_running(dev))
109 return -EINVAL; 110 return -EINVAL;
110 111
111 if (!priv->phydev) 112 if (!priv->phydev)
112 return -EINVAL; 113 return -EINVAL;
113 114
114 return phy_mii_ioctl(priv->phydev, rq, cmd); 115 return phy_mii_ioctl(priv->phydev, rq, cmd);
115 } 116 }
116 117
117 static void cvm_oct_adjust_link(struct net_device *dev) 118 static void cvm_oct_adjust_link(struct net_device *dev)
118 { 119 {
119 struct octeon_ethernet *priv = netdev_priv(dev); 120 struct octeon_ethernet *priv = netdev_priv(dev);
120 cvmx_helper_link_info_t link_info; 121 cvmx_helper_link_info_t link_info;
121 122
122 if (priv->last_link != priv->phydev->link) { 123 if (priv->last_link != priv->phydev->link) {
123 priv->last_link = priv->phydev->link; 124 priv->last_link = priv->phydev->link;
124 link_info.u64 = 0; 125 link_info.u64 = 0;
125 link_info.s.link_up = priv->last_link ? 1 : 0; 126 link_info.s.link_up = priv->last_link ? 1 : 0;
126 link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0; 127 link_info.s.full_duplex = priv->phydev->duplex ? 1 : 0;
127 link_info.s.speed = priv->phydev->speed; 128 link_info.s.speed = priv->phydev->speed;
128 cvmx_helper_link_set( priv->port, link_info); 129 cvmx_helper_link_set( priv->port, link_info);
129 if (priv->last_link) { 130 if (priv->last_link) {
130 netif_carrier_on(dev); 131 netif_carrier_on(dev);
131 if (priv->queue != -1) 132 if (priv->queue != -1)
132 DEBUGPRINT("%s: %u Mbps %s duplex, " 133 printk_ratelimited("%s: %u Mbps %s duplex, "
133 "port %2d, queue %2d\n", 134 "port %2d, queue %2d\n",
134 dev->name, priv->phydev->speed, 135 dev->name, priv->phydev->speed,
135 priv->phydev->duplex ? 136 priv->phydev->duplex ?
136 "Full" : "Half", 137 "Full" : "Half",
137 priv->port, priv->queue); 138 priv->port, priv->queue);
138 else 139 else
139 DEBUGPRINT("%s: %u Mbps %s duplex, " 140 printk_ratelimited("%s: %u Mbps %s duplex, "
140 "port %2d, POW\n", 141 "port %2d, POW\n",
141 dev->name, priv->phydev->speed, 142 dev->name, priv->phydev->speed,
142 priv->phydev->duplex ? 143 priv->phydev->duplex ?
143 "Full" : "Half", 144 "Full" : "Half",
144 priv->port); 145 priv->port);
145 } else { 146 } else {
146 netif_carrier_off(dev); 147 netif_carrier_off(dev);
147 DEBUGPRINT("%s: Link down\n", dev->name); 148 printk_ratelimited("%s: Link down\n", dev->name);
148 } 149 }
149 } 150 }
150 } 151 }
151 152
152 153
153 /** 154 /**
154 * cvm_oct_phy_setup_device - setup the PHY 155 * cvm_oct_phy_setup_device - setup the PHY
155 * 156 *
156 * @dev: Device to setup 157 * @dev: Device to setup
157 * 158 *
158 * Returns Zero on success, negative on failure 159 * Returns Zero on success, negative on failure
159 */ 160 */
160 int cvm_oct_phy_setup_device(struct net_device *dev) 161 int cvm_oct_phy_setup_device(struct net_device *dev)
161 { 162 {
162 struct octeon_ethernet *priv = netdev_priv(dev); 163 struct octeon_ethernet *priv = netdev_priv(dev);
163 164
164 int phy_addr = cvmx_helper_board_get_mii_address(priv->port); 165 int phy_addr = cvmx_helper_board_get_mii_address(priv->port);
165 if (phy_addr != -1) { 166 if (phy_addr != -1) {
166 char phy_id[20]; 167 char phy_id[20];
167 168
168 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", phy_addr); 169 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", phy_addr);
169 170
170 priv->phydev = phy_connect(dev, phy_id, cvm_oct_adjust_link, 0, 171 priv->phydev = phy_connect(dev, phy_id, cvm_oct_adjust_link, 0,
171 PHY_INTERFACE_MODE_GMII); 172 PHY_INTERFACE_MODE_GMII);
172 173
173 if (IS_ERR(priv->phydev)) { 174 if (IS_ERR(priv->phydev)) {
174 priv->phydev = NULL; 175 priv->phydev = NULL;
175 return -1; 176 return -1;
176 } 177 }
177 priv->last_link = 0; 178 priv->last_link = 0;
178 phy_start_aneg(priv->phydev); 179 phy_start_aneg(priv->phydev);
179 } 180 }
180 return 0; 181 return 0;
181 } 182 }
182 183
drivers/staging/octeon/ethernet-rgmii.c
1 /********************************************************************* 1 /*********************************************************************
2 * Author: Cavium Networks 2 * Author: Cavium Networks
3 * 3 *
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2007 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This file is distributed in the hope that it will be useful, but 13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more 16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details. 17 * details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software 20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/. 22 * or visit http://www.gnu.org/licenses/.
23 * 23 *
24 * This file may also be available under a different license from Cavium. 24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information 25 * Contact Cavium Networks for more information
26 **********************************************************************/ 26 **********************************************************************/
27 #include <linux/kernel.h> 27 #include <linux/kernel.h>
28 #include <linux/netdevice.h> 28 #include <linux/netdevice.h>
29 #include <linux/phy.h> 29 #include <linux/phy.h>
30 #include <linux/ratelimit.h>
30 #include <net/dst.h> 31 #include <net/dst.h>
31 32
32 #include <asm/octeon/octeon.h> 33 #include <asm/octeon/octeon.h>
33 34
34 #include "ethernet-defines.h" 35 #include "ethernet-defines.h"
35 #include "octeon-ethernet.h" 36 #include "octeon-ethernet.h"
36 #include "ethernet-util.h" 37 #include "ethernet-util.h"
37 38
38 #include "cvmx-helper.h" 39 #include "cvmx-helper.h"
39 40
40 #include <asm/octeon/cvmx-ipd-defs.h> 41 #include <asm/octeon/cvmx-ipd-defs.h>
41 #include <asm/octeon/cvmx-npi-defs.h> 42 #include <asm/octeon/cvmx-npi-defs.h>
42 #include "cvmx-gmxx-defs.h" 43 #include "cvmx-gmxx-defs.h"
43 44
44 DEFINE_SPINLOCK(global_register_lock); 45 DEFINE_SPINLOCK(global_register_lock);
45 46
46 static int number_rgmii_ports; 47 static int number_rgmii_ports;
47 48
48 static void cvm_oct_rgmii_poll(struct net_device *dev) 49 static void cvm_oct_rgmii_poll(struct net_device *dev)
49 { 50 {
50 struct octeon_ethernet *priv = netdev_priv(dev); 51 struct octeon_ethernet *priv = netdev_priv(dev);
51 unsigned long flags = 0; 52 unsigned long flags = 0;
52 cvmx_helper_link_info_t link_info; 53 cvmx_helper_link_info_t link_info;
53 int use_global_register_lock = (priv->phydev == NULL); 54 int use_global_register_lock = (priv->phydev == NULL);
54 55
55 BUG_ON(in_interrupt()); 56 BUG_ON(in_interrupt());
56 if (use_global_register_lock) { 57 if (use_global_register_lock) {
57 /* 58 /*
58 * Take the global register lock since we are going to 59 * Take the global register lock since we are going to
59 * touch registers that affect more than one port. 60 * touch registers that affect more than one port.
60 */ 61 */
61 spin_lock_irqsave(&global_register_lock, flags); 62 spin_lock_irqsave(&global_register_lock, flags);
62 } else { 63 } else {
63 mutex_lock(&priv->phydev->bus->mdio_lock); 64 mutex_lock(&priv->phydev->bus->mdio_lock);
64 } 65 }
65 66
66 link_info = cvmx_helper_link_get(priv->port); 67 link_info = cvmx_helper_link_get(priv->port);
67 if (link_info.u64 == priv->link_info) { 68 if (link_info.u64 == priv->link_info) {
68 69
69 /* 70 /*
70 * If the 10Mbps preamble workaround is supported and we're 71 * If the 10Mbps preamble workaround is supported and we're
71 * at 10Mbps we may need to do some special checking. 72 * at 10Mbps we may need to do some special checking.
72 */ 73 */
73 if (USE_10MBPS_PREAMBLE_WORKAROUND && (link_info.s.speed == 10)) { 74 if (USE_10MBPS_PREAMBLE_WORKAROUND && (link_info.s.speed == 10)) {
74 75
75 /* 76 /*
76 * Read the GMXX_RXX_INT_REG[PCTERR] bit and 77 * Read the GMXX_RXX_INT_REG[PCTERR] bit and
77 * see if we are getting preamble errors. 78 * see if we are getting preamble errors.
78 */ 79 */
79 int interface = INTERFACE(priv->port); 80 int interface = INTERFACE(priv->port);
80 int index = INDEX(priv->port); 81 int index = INDEX(priv->port);
81 union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg; 82 union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
82 gmxx_rxx_int_reg.u64 = 83 gmxx_rxx_int_reg.u64 =
83 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG 84 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
84 (index, interface)); 85 (index, interface));
85 if (gmxx_rxx_int_reg.s.pcterr) { 86 if (gmxx_rxx_int_reg.s.pcterr) {
86 87
87 /* 88 /*
88 * We are getting preamble errors at 89 * We are getting preamble errors at
89 * 10Mbps. Most likely the PHY is 90 * 10Mbps. Most likely the PHY is
90 * giving us packets with mis aligned 91 * giving us packets with mis aligned
91 * preambles. In order to get these 92 * preambles. In order to get these
92 * packets we need to disable preamble 93 * packets we need to disable preamble
93 * checking and do it in software. 94 * checking and do it in software.
94 */ 95 */
95 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; 96 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
96 union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs; 97 union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
97 98
98 /* Disable preamble checking */ 99 /* Disable preamble checking */
99 gmxx_rxx_frm_ctl.u64 = 100 gmxx_rxx_frm_ctl.u64 =
100 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL 101 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL
101 (index, interface)); 102 (index, interface));
102 gmxx_rxx_frm_ctl.s.pre_chk = 0; 103 gmxx_rxx_frm_ctl.s.pre_chk = 0;
103 cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL 104 cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL
104 (index, interface), 105 (index, interface),
105 gmxx_rxx_frm_ctl.u64); 106 gmxx_rxx_frm_ctl.u64);
106 107
107 /* Disable FCS stripping */ 108 /* Disable FCS stripping */
108 ipd_sub_port_fcs.u64 = 109 ipd_sub_port_fcs.u64 =
109 cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS); 110 cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
110 ipd_sub_port_fcs.s.port_bit &= 111 ipd_sub_port_fcs.s.port_bit &=
111 0xffffffffull ^ (1ull << priv->port); 112 0xffffffffull ^ (1ull << priv->port);
112 cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, 113 cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS,
113 ipd_sub_port_fcs.u64); 114 ipd_sub_port_fcs.u64);
114 115
115 /* Clear any error bits */ 116 /* Clear any error bits */
116 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG 117 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
117 (index, interface), 118 (index, interface),
118 gmxx_rxx_int_reg.u64); 119 gmxx_rxx_int_reg.u64);
119 DEBUGPRINT("%s: Using 10Mbps with software " 120 printk_ratelimited("%s: Using 10Mbps with software "
120 "preamble removal\n", 121 "preamble removal\n",
121 dev->name); 122 dev->name);
122 } 123 }
123 } 124 }
124 125
125 if (use_global_register_lock) 126 if (use_global_register_lock)
126 spin_unlock_irqrestore(&global_register_lock, flags); 127 spin_unlock_irqrestore(&global_register_lock, flags);
127 else 128 else
128 mutex_unlock(&priv->phydev->bus->mdio_lock); 129 mutex_unlock(&priv->phydev->bus->mdio_lock);
129 return; 130 return;
130 } 131 }
131 132
132 /* If the 10Mbps preamble workaround is allowed we need to on 133 /* If the 10Mbps preamble workaround is allowed we need to on
133 preamble checking, FCS stripping, and clear error bits on 134 preamble checking, FCS stripping, and clear error bits on
134 every speed change. If errors occur during 10Mbps operation 135 every speed change. If errors occur during 10Mbps operation
135 the above code will change this stuff */ 136 the above code will change this stuff */
136 if (USE_10MBPS_PREAMBLE_WORKAROUND) { 137 if (USE_10MBPS_PREAMBLE_WORKAROUND) {
137 138
138 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; 139 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
139 union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs; 140 union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
140 union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg; 141 union cvmx_gmxx_rxx_int_reg gmxx_rxx_int_reg;
141 int interface = INTERFACE(priv->port); 142 int interface = INTERFACE(priv->port);
142 int index = INDEX(priv->port); 143 int index = INDEX(priv->port);
143 144
144 /* Enable preamble checking */ 145 /* Enable preamble checking */
145 gmxx_rxx_frm_ctl.u64 = 146 gmxx_rxx_frm_ctl.u64 =
146 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); 147 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
147 gmxx_rxx_frm_ctl.s.pre_chk = 1; 148 gmxx_rxx_frm_ctl.s.pre_chk = 1;
148 cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface), 149 cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface),
149 gmxx_rxx_frm_ctl.u64); 150 gmxx_rxx_frm_ctl.u64);
150 /* Enable FCS stripping */ 151 /* Enable FCS stripping */
151 ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS); 152 ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
152 ipd_sub_port_fcs.s.port_bit |= 1ull << priv->port; 153 ipd_sub_port_fcs.s.port_bit |= 1ull << priv->port;
153 cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64); 154 cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
154 /* Clear any error bits */ 155 /* Clear any error bits */
155 gmxx_rxx_int_reg.u64 = 156 gmxx_rxx_int_reg.u64 =
156 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, interface)); 157 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, interface));
157 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface), 158 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, interface),
158 gmxx_rxx_int_reg.u64); 159 gmxx_rxx_int_reg.u64);
159 } 160 }
160 if (priv->phydev == NULL) { 161 if (priv->phydev == NULL) {
161 link_info = cvmx_helper_link_autoconf(priv->port); 162 link_info = cvmx_helper_link_autoconf(priv->port);
162 priv->link_info = link_info.u64; 163 priv->link_info = link_info.u64;
163 } 164 }
164 165
165 if (use_global_register_lock) 166 if (use_global_register_lock)
166 spin_unlock_irqrestore(&global_register_lock, flags); 167 spin_unlock_irqrestore(&global_register_lock, flags);
167 else { 168 else {
168 mutex_unlock(&priv->phydev->bus->mdio_lock); 169 mutex_unlock(&priv->phydev->bus->mdio_lock);
169 } 170 }
170 171
171 if (priv->phydev == NULL) { 172 if (priv->phydev == NULL) {
172 /* Tell core. */ 173 /* Tell core. */
173 if (link_info.s.link_up) { 174 if (link_info.s.link_up) {
174 if (!netif_carrier_ok(dev)) 175 if (!netif_carrier_ok(dev))
175 netif_carrier_on(dev); 176 netif_carrier_on(dev);
176 if (priv->queue != -1) 177 if (priv->queue != -1)
177 DEBUGPRINT("%s: %u Mbps %s duplex, " 178 printk_ratelimited("%s: %u Mbps %s duplex, "
178 "port %2d, queue %2d\n", 179 "port %2d, queue %2d\n",
179 dev->name, link_info.s.speed, 180 dev->name, link_info.s.speed,
180 (link_info.s.full_duplex) ? 181 (link_info.s.full_duplex) ?
181 "Full" : "Half", 182 "Full" : "Half",
182 priv->port, priv->queue); 183 priv->port, priv->queue);
183 else 184 else
184 DEBUGPRINT("%s: %u Mbps %s duplex, " 185 printk_ratelimited("%s: %u Mbps %s duplex, "
185 "port %2d, POW\n", 186 "port %2d, POW\n",
186 dev->name, link_info.s.speed, 187 dev->name, link_info.s.speed,
187 (link_info.s.full_duplex) ? 188 (link_info.s.full_duplex) ?
188 "Full" : "Half", 189 "Full" : "Half",
189 priv->port); 190 priv->port);
190 } else { 191 } else {
191 if (netif_carrier_ok(dev)) 192 if (netif_carrier_ok(dev))
192 netif_carrier_off(dev); 193 netif_carrier_off(dev);
193 DEBUGPRINT("%s: Link down\n", dev->name); 194 printk_ratelimited("%s: Link down\n", dev->name);
194 } 195 }
195 } 196 }
196 } 197 }
197 198
198 static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id) 199 static irqreturn_t cvm_oct_rgmii_rml_interrupt(int cpl, void *dev_id)
199 { 200 {
200 union cvmx_npi_rsl_int_blocks rsl_int_blocks; 201 union cvmx_npi_rsl_int_blocks rsl_int_blocks;
201 int index; 202 int index;
202 irqreturn_t return_status = IRQ_NONE; 203 irqreturn_t return_status = IRQ_NONE;
203 204
204 rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS); 205 rsl_int_blocks.u64 = cvmx_read_csr(CVMX_NPI_RSL_INT_BLOCKS);
205 206
206 /* Check and see if this interrupt was caused by the GMX0 block */ 207 /* Check and see if this interrupt was caused by the GMX0 block */
207 if (rsl_int_blocks.s.gmx0) { 208 if (rsl_int_blocks.s.gmx0) {
208 209
209 int interface = 0; 210 int interface = 0;
210 /* Loop through every port of this interface */ 211 /* Loop through every port of this interface */
211 for (index = 0; 212 for (index = 0;
212 index < cvmx_helper_ports_on_interface(interface); 213 index < cvmx_helper_ports_on_interface(interface);
213 index++) { 214 index++) {
214 215
215 /* Read the GMX interrupt status bits */ 216 /* Read the GMX interrupt status bits */
216 union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg; 217 union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
217 gmx_rx_int_reg.u64 = 218 gmx_rx_int_reg.u64 =
218 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG 219 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
219 (index, interface)); 220 (index, interface));
220 gmx_rx_int_reg.u64 &= 221 gmx_rx_int_reg.u64 &=
221 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN 222 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
222 (index, interface)); 223 (index, interface));
223 /* Poll the port if inband status changed */ 224 /* Poll the port if inband status changed */
224 if (gmx_rx_int_reg.s.phy_dupx 225 if (gmx_rx_int_reg.s.phy_dupx
225 || gmx_rx_int_reg.s.phy_link 226 || gmx_rx_int_reg.s.phy_link
226 || gmx_rx_int_reg.s.phy_spd) { 227 || gmx_rx_int_reg.s.phy_spd) {
227 228
228 struct net_device *dev = 229 struct net_device *dev =
229 cvm_oct_device[cvmx_helper_get_ipd_port 230 cvm_oct_device[cvmx_helper_get_ipd_port
230 (interface, index)]; 231 (interface, index)];
231 struct octeon_ethernet *priv = netdev_priv(dev); 232 struct octeon_ethernet *priv = netdev_priv(dev);
232 233
233 if (dev && !atomic_read(&cvm_oct_poll_queue_stopping)) 234 if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
234 queue_work(cvm_oct_poll_queue, &priv->port_work); 235 queue_work(cvm_oct_poll_queue, &priv->port_work);
235 236
236 gmx_rx_int_reg.u64 = 0; 237 gmx_rx_int_reg.u64 = 0;
237 gmx_rx_int_reg.s.phy_dupx = 1; 238 gmx_rx_int_reg.s.phy_dupx = 1;
238 gmx_rx_int_reg.s.phy_link = 1; 239 gmx_rx_int_reg.s.phy_link = 1;
239 gmx_rx_int_reg.s.phy_spd = 1; 240 gmx_rx_int_reg.s.phy_spd = 1;
240 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG 241 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
241 (index, interface), 242 (index, interface),
242 gmx_rx_int_reg.u64); 243 gmx_rx_int_reg.u64);
243 return_status = IRQ_HANDLED; 244 return_status = IRQ_HANDLED;
244 } 245 }
245 } 246 }
246 } 247 }
247 248
248 /* Check and see if this interrupt was caused by the GMX1 block */ 249 /* Check and see if this interrupt was caused by the GMX1 block */
249 if (rsl_int_blocks.s.gmx1) { 250 if (rsl_int_blocks.s.gmx1) {
250 251
251 int interface = 1; 252 int interface = 1;
252 /* Loop through every port of this interface */ 253 /* Loop through every port of this interface */
253 for (index = 0; 254 for (index = 0;
254 index < cvmx_helper_ports_on_interface(interface); 255 index < cvmx_helper_ports_on_interface(interface);
255 index++) { 256 index++) {
256 257
257 /* Read the GMX interrupt status bits */ 258 /* Read the GMX interrupt status bits */
258 union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg; 259 union cvmx_gmxx_rxx_int_reg gmx_rx_int_reg;
259 gmx_rx_int_reg.u64 = 260 gmx_rx_int_reg.u64 =
260 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG 261 cvmx_read_csr(CVMX_GMXX_RXX_INT_REG
261 (index, interface)); 262 (index, interface));
262 gmx_rx_int_reg.u64 &= 263 gmx_rx_int_reg.u64 &=
263 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN 264 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
264 (index, interface)); 265 (index, interface));
265 /* Poll the port if inband status changed */ 266 /* Poll the port if inband status changed */
266 if (gmx_rx_int_reg.s.phy_dupx 267 if (gmx_rx_int_reg.s.phy_dupx
267 || gmx_rx_int_reg.s.phy_link 268 || gmx_rx_int_reg.s.phy_link
268 || gmx_rx_int_reg.s.phy_spd) { 269 || gmx_rx_int_reg.s.phy_spd) {
269 270
270 struct net_device *dev = 271 struct net_device *dev =
271 cvm_oct_device[cvmx_helper_get_ipd_port 272 cvm_oct_device[cvmx_helper_get_ipd_port
272 (interface, index)]; 273 (interface, index)];
273 struct octeon_ethernet *priv = netdev_priv(dev); 274 struct octeon_ethernet *priv = netdev_priv(dev);
274 275
275 if (dev && !atomic_read(&cvm_oct_poll_queue_stopping)) 276 if (dev && !atomic_read(&cvm_oct_poll_queue_stopping))
276 queue_work(cvm_oct_poll_queue, &priv->port_work); 277 queue_work(cvm_oct_poll_queue, &priv->port_work);
277 278
278 gmx_rx_int_reg.u64 = 0; 279 gmx_rx_int_reg.u64 = 0;
279 gmx_rx_int_reg.s.phy_dupx = 1; 280 gmx_rx_int_reg.s.phy_dupx = 1;
280 gmx_rx_int_reg.s.phy_link = 1; 281 gmx_rx_int_reg.s.phy_link = 1;
281 gmx_rx_int_reg.s.phy_spd = 1; 282 gmx_rx_int_reg.s.phy_spd = 1;
282 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG 283 cvmx_write_csr(CVMX_GMXX_RXX_INT_REG
283 (index, interface), 284 (index, interface),
284 gmx_rx_int_reg.u64); 285 gmx_rx_int_reg.u64);
285 return_status = IRQ_HANDLED; 286 return_status = IRQ_HANDLED;
286 } 287 }
287 } 288 }
288 } 289 }
289 return return_status; 290 return return_status;
290 } 291 }
291 292
292 int cvm_oct_rgmii_open(struct net_device *dev) 293 int cvm_oct_rgmii_open(struct net_device *dev)
293 { 294 {
294 union cvmx_gmxx_prtx_cfg gmx_cfg; 295 union cvmx_gmxx_prtx_cfg gmx_cfg;
295 struct octeon_ethernet *priv = netdev_priv(dev); 296 struct octeon_ethernet *priv = netdev_priv(dev);
296 int interface = INTERFACE(priv->port); 297 int interface = INTERFACE(priv->port);
297 int index = INDEX(priv->port); 298 int index = INDEX(priv->port);
298 cvmx_helper_link_info_t link_info; 299 cvmx_helper_link_info_t link_info;
299 300
300 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); 301 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
301 gmx_cfg.s.en = 1; 302 gmx_cfg.s.en = 1;
302 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); 303 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
303 304
304 if (!octeon_is_simulation()) { 305 if (!octeon_is_simulation()) {
305 link_info = cvmx_helper_link_get(priv->port); 306 link_info = cvmx_helper_link_get(priv->port);
306 if (!link_info.s.link_up) 307 if (!link_info.s.link_up)
307 netif_carrier_off(dev); 308 netif_carrier_off(dev);
308 } 309 }
309 310
310 return 0; 311 return 0;
311 } 312 }
312 313
313 int cvm_oct_rgmii_stop(struct net_device *dev) 314 int cvm_oct_rgmii_stop(struct net_device *dev)
314 { 315 {
315 union cvmx_gmxx_prtx_cfg gmx_cfg; 316 union cvmx_gmxx_prtx_cfg gmx_cfg;
316 struct octeon_ethernet *priv = netdev_priv(dev); 317 struct octeon_ethernet *priv = netdev_priv(dev);
317 int interface = INTERFACE(priv->port); 318 int interface = INTERFACE(priv->port);
318 int index = INDEX(priv->port); 319 int index = INDEX(priv->port);
319 320
320 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); 321 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
321 gmx_cfg.s.en = 0; 322 gmx_cfg.s.en = 0;
322 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); 323 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
323 return 0; 324 return 0;
324 } 325 }
325 326
326 static void cvm_oct_rgmii_immediate_poll(struct work_struct *work) 327 static void cvm_oct_rgmii_immediate_poll(struct work_struct *work)
327 { 328 {
328 struct octeon_ethernet *priv = container_of(work, struct octeon_ethernet, port_work); 329 struct octeon_ethernet *priv = container_of(work, struct octeon_ethernet, port_work);
329 cvm_oct_rgmii_poll(cvm_oct_device[priv->port]); 330 cvm_oct_rgmii_poll(cvm_oct_device[priv->port]);
330 } 331 }
331 332
332 int cvm_oct_rgmii_init(struct net_device *dev) 333 int cvm_oct_rgmii_init(struct net_device *dev)
333 { 334 {
334 struct octeon_ethernet *priv = netdev_priv(dev); 335 struct octeon_ethernet *priv = netdev_priv(dev);
335 int r; 336 int r;
336 337
337 cvm_oct_common_init(dev); 338 cvm_oct_common_init(dev);
338 dev->netdev_ops->ndo_stop(dev); 339 dev->netdev_ops->ndo_stop(dev);
339 INIT_WORK(&priv->port_work, cvm_oct_rgmii_immediate_poll); 340 INIT_WORK(&priv->port_work, cvm_oct_rgmii_immediate_poll);
340 /* 341 /*
341 * Due to GMX errata in CN3XXX series chips, it is necessary 342 * Due to GMX errata in CN3XXX series chips, it is necessary
342 * to take the link down immediately when the PHY changes 343 * to take the link down immediately when the PHY changes
343 * state. In order to do this we call the poll function every 344 * state. In order to do this we call the poll function every
344 * time the RGMII inband status changes. This may cause 345 * time the RGMII inband status changes. This may cause
345 * problems if the PHY doesn't implement inband status 346 * problems if the PHY doesn't implement inband status
346 * properly. 347 * properly.
347 */ 348 */
348 if (number_rgmii_ports == 0) { 349 if (number_rgmii_ports == 0) {
349 r = request_irq(OCTEON_IRQ_RML, cvm_oct_rgmii_rml_interrupt, 350 r = request_irq(OCTEON_IRQ_RML, cvm_oct_rgmii_rml_interrupt,
350 IRQF_SHARED, "RGMII", &number_rgmii_ports); 351 IRQF_SHARED, "RGMII", &number_rgmii_ports);
351 if (r != 0) 352 if (r != 0)
352 return r; 353 return r;
353 } 354 }
354 number_rgmii_ports++; 355 number_rgmii_ports++;
355 356
356 /* 357 /*
357 * Only true RGMII ports need to be polled. In GMII mode, port 358 * Only true RGMII ports need to be polled. In GMII mode, port
358 * 0 is really a RGMII port. 359 * 0 is really a RGMII port.
359 */ 360 */
360 if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) 361 if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
361 && (priv->port == 0)) 362 && (priv->port == 0))
362 || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { 363 || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
363 364
364 if (!octeon_is_simulation()) { 365 if (!octeon_is_simulation()) {
365 366
366 union cvmx_gmxx_rxx_int_en gmx_rx_int_en; 367 union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
367 int interface = INTERFACE(priv->port); 368 int interface = INTERFACE(priv->port);
368 int index = INDEX(priv->port); 369 int index = INDEX(priv->port);
369 370
370 /* 371 /*
371 * Enable interrupts on inband status changes 372 * Enable interrupts on inband status changes
372 * for this port. 373 * for this port.
373 */ 374 */
374 gmx_rx_int_en.u64 = 375 gmx_rx_int_en.u64 =
375 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN 376 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
376 (index, interface)); 377 (index, interface));
377 gmx_rx_int_en.s.phy_dupx = 1; 378 gmx_rx_int_en.s.phy_dupx = 1;
378 gmx_rx_int_en.s.phy_link = 1; 379 gmx_rx_int_en.s.phy_link = 1;
379 gmx_rx_int_en.s.phy_spd = 1; 380 gmx_rx_int_en.s.phy_spd = 1;
380 cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), 381 cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface),
381 gmx_rx_int_en.u64); 382 gmx_rx_int_en.u64);
382 priv->poll = cvm_oct_rgmii_poll; 383 priv->poll = cvm_oct_rgmii_poll;
383 } 384 }
384 } 385 }
385 386
386 return 0; 387 return 0;
387 } 388 }
388 389
389 void cvm_oct_rgmii_uninit(struct net_device *dev) 390 void cvm_oct_rgmii_uninit(struct net_device *dev)
390 { 391 {
391 struct octeon_ethernet *priv = netdev_priv(dev); 392 struct octeon_ethernet *priv = netdev_priv(dev);
392 cvm_oct_common_uninit(dev); 393 cvm_oct_common_uninit(dev);
393 394
394 /* 395 /*
395 * Only true RGMII ports need to be polled. In GMII mode, port 396 * Only true RGMII ports need to be polled. In GMII mode, port
396 * 0 is really a RGMII port. 397 * 0 is really a RGMII port.
397 */ 398 */
398 if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII) 399 if (((priv->imode == CVMX_HELPER_INTERFACE_MODE_GMII)
399 && (priv->port == 0)) 400 && (priv->port == 0))
400 || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) { 401 || (priv->imode == CVMX_HELPER_INTERFACE_MODE_RGMII)) {
401 402
402 if (!octeon_is_simulation()) { 403 if (!octeon_is_simulation()) {
403 404
404 union cvmx_gmxx_rxx_int_en gmx_rx_int_en; 405 union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
405 int interface = INTERFACE(priv->port); 406 int interface = INTERFACE(priv->port);
406 int index = INDEX(priv->port); 407 int index = INDEX(priv->port);
407 408
408 /* 409 /*
409 * Disable interrupts on inband status changes 410 * Disable interrupts on inband status changes
410 * for this port. 411 * for this port.
411 */ 412 */
412 gmx_rx_int_en.u64 = 413 gmx_rx_int_en.u64 =
413 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN 414 cvmx_read_csr(CVMX_GMXX_RXX_INT_EN
414 (index, interface)); 415 (index, interface));
415 gmx_rx_int_en.s.phy_dupx = 0; 416 gmx_rx_int_en.s.phy_dupx = 0;
416 gmx_rx_int_en.s.phy_link = 0; 417 gmx_rx_int_en.s.phy_link = 0;
417 gmx_rx_int_en.s.phy_spd = 0; 418 gmx_rx_int_en.s.phy_spd = 0;
418 cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface), 419 cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, interface),
419 gmx_rx_int_en.u64); 420 gmx_rx_int_en.u64);
420 } 421 }
421 } 422 }
422 423
423 /* Remove the interrupt handler when the last port is removed. */ 424 /* Remove the interrupt handler when the last port is removed. */
424 number_rgmii_ports--; 425 number_rgmii_ports--;
425 if (number_rgmii_ports == 0) 426 if (number_rgmii_ports == 0)
426 free_irq(OCTEON_IRQ_RML, &number_rgmii_ports); 427 free_irq(OCTEON_IRQ_RML, &number_rgmii_ports);
427 cancel_work_sync(&priv->port_work); 428 cancel_work_sync(&priv->port_work);
428 } 429 }
429 430
drivers/staging/octeon/ethernet-rx.c
1 /********************************************************************** 1 /**********************************************************************
2 * Author: Cavium Networks 2 * Author: Cavium Networks
3 * 3 *
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2010 Cavium Networks 7 * Copyright (c) 2003-2010 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This file is distributed in the hope that it will be useful, but 13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more 16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details. 17 * details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software 20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/. 22 * or visit http://www.gnu.org/licenses/.
23 * 23 *
24 * This file may also be available under a different license from Cavium. 24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information 25 * Contact Cavium Networks for more information
26 **********************************************************************/ 26 **********************************************************************/
27 #include <linux/module.h> 27 #include <linux/module.h>
28 #include <linux/kernel.h> 28 #include <linux/kernel.h>
29 #include <linux/cache.h> 29 #include <linux/cache.h>
30 #include <linux/cpumask.h> 30 #include <linux/cpumask.h>
31 #include <linux/netdevice.h> 31 #include <linux/netdevice.h>
32 #include <linux/init.h> 32 #include <linux/init.h>
33 #include <linux/etherdevice.h> 33 #include <linux/etherdevice.h>
34 #include <linux/ip.h> 34 #include <linux/ip.h>
35 #include <linux/string.h> 35 #include <linux/string.h>
36 #include <linux/prefetch.h> 36 #include <linux/prefetch.h>
37 #include <linux/ratelimit.h>
37 #include <linux/smp.h> 38 #include <linux/smp.h>
38 #include <net/dst.h> 39 #include <net/dst.h>
39 #ifdef CONFIG_XFRM 40 #ifdef CONFIG_XFRM
40 #include <linux/xfrm.h> 41 #include <linux/xfrm.h>
41 #include <net/xfrm.h> 42 #include <net/xfrm.h>
42 #endif /* CONFIG_XFRM */ 43 #endif /* CONFIG_XFRM */
43 44
44 #include <asm/atomic.h> 45 #include <asm/atomic.h>
45 46
46 #include <asm/octeon/octeon.h> 47 #include <asm/octeon/octeon.h>
47 48
48 #include "ethernet-defines.h" 49 #include "ethernet-defines.h"
49 #include "ethernet-mem.h" 50 #include "ethernet-mem.h"
50 #include "ethernet-rx.h" 51 #include "ethernet-rx.h"
51 #include "octeon-ethernet.h" 52 #include "octeon-ethernet.h"
52 #include "ethernet-util.h" 53 #include "ethernet-util.h"
53 54
54 #include "cvmx-helper.h" 55 #include "cvmx-helper.h"
55 #include "cvmx-wqe.h" 56 #include "cvmx-wqe.h"
56 #include "cvmx-fau.h" 57 #include "cvmx-fau.h"
57 #include "cvmx-pow.h" 58 #include "cvmx-pow.h"
58 #include "cvmx-pip.h" 59 #include "cvmx-pip.h"
59 #include "cvmx-scratch.h" 60 #include "cvmx-scratch.h"
60 61
61 #include "cvmx-gmxx-defs.h" 62 #include "cvmx-gmxx-defs.h"
62 63
63 struct cvm_napi_wrapper { 64 struct cvm_napi_wrapper {
64 struct napi_struct napi; 65 struct napi_struct napi;
65 } ____cacheline_aligned_in_smp; 66 } ____cacheline_aligned_in_smp;
66 67
67 static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp; 68 static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp;
68 69
69 struct cvm_oct_core_state { 70 struct cvm_oct_core_state {
70 int baseline_cores; 71 int baseline_cores;
71 /* 72 /*
72 * The number of additional cores that could be processing 73 * The number of additional cores that could be processing
73 * input packtes. 74 * input packtes.
74 */ 75 */
75 atomic_t available_cores; 76 atomic_t available_cores;
76 cpumask_t cpu_state; 77 cpumask_t cpu_state;
77 } ____cacheline_aligned_in_smp; 78 } ____cacheline_aligned_in_smp;
78 79
79 static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp; 80 static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
80 81
81 static void cvm_oct_enable_napi(void *_) 82 static void cvm_oct_enable_napi(void *_)
82 { 83 {
83 int cpu = smp_processor_id(); 84 int cpu = smp_processor_id();
84 napi_schedule(&cvm_oct_napi[cpu].napi); 85 napi_schedule(&cvm_oct_napi[cpu].napi);
85 } 86 }
86 87
87 static void cvm_oct_enable_one_cpu(void) 88 static void cvm_oct_enable_one_cpu(void)
88 { 89 {
89 int v; 90 int v;
90 int cpu; 91 int cpu;
91 92
92 /* Check to see if more CPUs are available for receive processing... */ 93 /* Check to see if more CPUs are available for receive processing... */
93 v = atomic_sub_if_positive(1, &core_state.available_cores); 94 v = atomic_sub_if_positive(1, &core_state.available_cores);
94 if (v < 0) 95 if (v < 0)
95 return; 96 return;
96 97
97 /* ... if a CPU is available, Turn on NAPI polling for that CPU. */ 98 /* ... if a CPU is available, Turn on NAPI polling for that CPU. */
98 for_each_online_cpu(cpu) { 99 for_each_online_cpu(cpu) {
99 if (!cpu_test_and_set(cpu, core_state.cpu_state)) { 100 if (!cpu_test_and_set(cpu, core_state.cpu_state)) {
100 v = smp_call_function_single(cpu, cvm_oct_enable_napi, 101 v = smp_call_function_single(cpu, cvm_oct_enable_napi,
101 NULL, 0); 102 NULL, 0);
102 if (v) 103 if (v)
103 panic("Can't enable NAPI."); 104 panic("Can't enable NAPI.");
104 break; 105 break;
105 } 106 }
106 } 107 }
107 } 108 }
108 109
109 static void cvm_oct_no_more_work(void) 110 static void cvm_oct_no_more_work(void)
110 { 111 {
111 int cpu = smp_processor_id(); 112 int cpu = smp_processor_id();
112 113
113 /* 114 /*
114 * CPU zero is special. It always has the irq enabled when 115 * CPU zero is special. It always has the irq enabled when
115 * waiting for incoming packets. 116 * waiting for incoming packets.
116 */ 117 */
117 if (cpu == 0) { 118 if (cpu == 0) {
118 enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group); 119 enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
119 return; 120 return;
120 } 121 }
121 122
122 cpu_clear(cpu, core_state.cpu_state); 123 cpu_clear(cpu, core_state.cpu_state);
123 atomic_add(1, &core_state.available_cores); 124 atomic_add(1, &core_state.available_cores);
124 } 125 }
125 126
126 /** 127 /**
127 * cvm_oct_do_interrupt - interrupt handler. 128 * cvm_oct_do_interrupt - interrupt handler.
128 * 129 *
129 * The interrupt occurs whenever the POW has packets in our group. 130 * The interrupt occurs whenever the POW has packets in our group.
130 * 131 *
131 */ 132 */
132 static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id) 133 static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
133 { 134 {
134 /* Disable the IRQ and start napi_poll. */ 135 /* Disable the IRQ and start napi_poll. */
135 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); 136 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
136 cvm_oct_enable_napi(NULL); 137 cvm_oct_enable_napi(NULL);
137 138
138 return IRQ_HANDLED; 139 return IRQ_HANDLED;
139 } 140 }
140 141
141 /** 142 /**
142 * cvm_oct_check_rcv_error - process receive errors 143 * cvm_oct_check_rcv_error - process receive errors
143 * @work: Work queue entry pointing to the packet. 144 * @work: Work queue entry pointing to the packet.
144 * 145 *
145 * Returns Non-zero if the packet can be dropped, zero otherwise. 146 * Returns Non-zero if the packet can be dropped, zero otherwise.
146 */ 147 */
147 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) 148 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
148 { 149 {
149 if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) { 150 if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
150 /* 151 /*
151 * Ignore length errors on min size packets. Some 152 * Ignore length errors on min size packets. Some
152 * equipment incorrectly pads packets to 64+4FCS 153 * equipment incorrectly pads packets to 64+4FCS
153 * instead of 60+4FCS. Note these packets still get 154 * instead of 60+4FCS. Note these packets still get
154 * counted as frame errors. 155 * counted as frame errors.
155 */ 156 */
156 } else 157 } else
157 if (USE_10MBPS_PREAMBLE_WORKAROUND 158 if (USE_10MBPS_PREAMBLE_WORKAROUND
158 && ((work->word2.snoip.err_code == 5) 159 && ((work->word2.snoip.err_code == 5)
159 || (work->word2.snoip.err_code == 7))) { 160 || (work->word2.snoip.err_code == 7))) {
160 161
161 /* 162 /*
162 * We received a packet with either an alignment error 163 * We received a packet with either an alignment error
163 * or a FCS error. This may be signalling that we are 164 * or a FCS error. This may be signalling that we are
164 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK} 165 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK}
165 * off. If this is the case we need to parse the 166 * off. If this is the case we need to parse the
166 * packet to determine if we can remove a non spec 167 * packet to determine if we can remove a non spec
167 * preamble and generate a correct packet. 168 * preamble and generate a correct packet.
168 */ 169 */
169 int interface = cvmx_helper_get_interface_num(work->ipprt); 170 int interface = cvmx_helper_get_interface_num(work->ipprt);
170 int index = cvmx_helper_get_interface_index_num(work->ipprt); 171 int index = cvmx_helper_get_interface_index_num(work->ipprt);
171 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; 172 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
172 gmxx_rxx_frm_ctl.u64 = 173 gmxx_rxx_frm_ctl.u64 =
173 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); 174 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
174 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) { 175 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
175 176
176 uint8_t *ptr = 177 uint8_t *ptr =
177 cvmx_phys_to_ptr(work->packet_ptr.s.addr); 178 cvmx_phys_to_ptr(work->packet_ptr.s.addr);
178 int i = 0; 179 int i = 0;
179 180
180 while (i < work->len - 1) { 181 while (i < work->len - 1) {
181 if (*ptr != 0x55) 182 if (*ptr != 0x55)
182 break; 183 break;
183 ptr++; 184 ptr++;
184 i++; 185 i++;
185 } 186 }
186 187
187 if (*ptr == 0xd5) { 188 if (*ptr == 0xd5) {
188 /* 189 /*
189 DEBUGPRINT("Port %d received 0xd5 preamble\n", work->ipprt); 190 printk_ratelimited("Port %d received 0xd5 preamble\n", work->ipprt);
190 */ 191 */
191 work->packet_ptr.s.addr += i + 1; 192 work->packet_ptr.s.addr += i + 1;
192 work->len -= i + 5; 193 work->len -= i + 5;
193 } else if ((*ptr & 0xf) == 0xd) { 194 } else if ((*ptr & 0xf) == 0xd) {
194 /* 195 /*
195 DEBUGPRINT("Port %d received 0x?d preamble\n", work->ipprt); 196 printk_ratelimited("Port %d received 0x?d preamble\n", work->ipprt);
196 */ 197 */
197 work->packet_ptr.s.addr += i; 198 work->packet_ptr.s.addr += i;
198 work->len -= i + 4; 199 work->len -= i + 4;
199 for (i = 0; i < work->len; i++) { 200 for (i = 0; i < work->len; i++) {
200 *ptr = 201 *ptr =
201 ((*ptr & 0xf0) >> 4) | 202 ((*ptr & 0xf0) >> 4) |
202 ((*(ptr + 1) & 0xf) << 4); 203 ((*(ptr + 1) & 0xf) << 4);
203 ptr++; 204 ptr++;
204 } 205 }
205 } else { 206 } else {
206 DEBUGPRINT("Port %d unknown preamble, packet " 207 printk_ratelimited("Port %d unknown preamble, packet "
207 "dropped\n", 208 "dropped\n",
208 work->ipprt); 209 work->ipprt);
209 /* 210 /*
210 cvmx_helper_dump_packet(work); 211 cvmx_helper_dump_packet(work);
211 */ 212 */
212 cvm_oct_free_work(work); 213 cvm_oct_free_work(work);
213 return 1; 214 return 1;
214 } 215 }
215 } 216 }
216 } else { 217 } else {
217 DEBUGPRINT("Port %d receive error code %d, packet dropped\n", 218 printk_ratelimited("Port %d receive error code %d, packet dropped\n",
218 work->ipprt, work->word2.snoip.err_code); 219 work->ipprt, work->word2.snoip.err_code);
219 cvm_oct_free_work(work); 220 cvm_oct_free_work(work);
220 return 1; 221 return 1;
221 } 222 }
222 223
223 return 0; 224 return 0;
224 } 225 }
225 226
226 /** 227 /**
227 * cvm_oct_napi_poll - the NAPI poll function. 228 * cvm_oct_napi_poll - the NAPI poll function.
228 * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller 229 * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller
229 * @budget: Maximum number of packets to receive. 230 * @budget: Maximum number of packets to receive.
230 * 231 *
231 * Returns the number of packets processed. 232 * Returns the number of packets processed.
232 */ 233 */
233 static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) 234 static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
234 { 235 {
235 const int coreid = cvmx_get_core_num(); 236 const int coreid = cvmx_get_core_num();
236 uint64_t old_group_mask; 237 uint64_t old_group_mask;
237 uint64_t old_scratch; 238 uint64_t old_scratch;
238 int rx_count = 0; 239 int rx_count = 0;
239 int did_work_request = 0; 240 int did_work_request = 0;
240 int packet_not_copied; 241 int packet_not_copied;
241 242
242 /* Prefetch cvm_oct_device since we know we need it soon */ 243 /* Prefetch cvm_oct_device since we know we need it soon */
243 prefetch(cvm_oct_device); 244 prefetch(cvm_oct_device);
244 245
245 if (USE_ASYNC_IOBDMA) { 246 if (USE_ASYNC_IOBDMA) {
246 /* Save scratch in case userspace is using it */ 247 /* Save scratch in case userspace is using it */
247 CVMX_SYNCIOBDMA; 248 CVMX_SYNCIOBDMA;
248 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); 249 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
249 } 250 }
250 251
251 /* Only allow work for our group (and preserve priorities) */ 252 /* Only allow work for our group (and preserve priorities) */
252 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid)); 253 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
253 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), 254 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
254 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group); 255 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
255 256
256 if (USE_ASYNC_IOBDMA) { 257 if (USE_ASYNC_IOBDMA) {
257 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); 258 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
258 did_work_request = 1; 259 did_work_request = 1;
259 } 260 }
260 261
261 while (rx_count < budget) { 262 while (rx_count < budget) {
262 struct sk_buff *skb = NULL; 263 struct sk_buff *skb = NULL;
263 struct sk_buff **pskb = NULL; 264 struct sk_buff **pskb = NULL;
264 int skb_in_hw; 265 int skb_in_hw;
265 cvmx_wqe_t *work; 266 cvmx_wqe_t *work;
266 267
267 if (USE_ASYNC_IOBDMA && did_work_request) 268 if (USE_ASYNC_IOBDMA && did_work_request)
268 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); 269 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
269 else 270 else
270 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT); 271 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
271 272
272 prefetch(work); 273 prefetch(work);
273 did_work_request = 0; 274 did_work_request = 0;
274 if (work == NULL) { 275 if (work == NULL) {
275 union cvmx_pow_wq_int wq_int; 276 union cvmx_pow_wq_int wq_int;
276 wq_int.u64 = 0; 277 wq_int.u64 = 0;
277 wq_int.s.iq_dis = 1 << pow_receive_group; 278 wq_int.s.iq_dis = 1 << pow_receive_group;
278 wq_int.s.wq_int = 1 << pow_receive_group; 279 wq_int.s.wq_int = 1 << pow_receive_group;
279 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64); 280 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
280 break; 281 break;
281 } 282 }
282 pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *)); 283 pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
283 prefetch(pskb); 284 prefetch(pskb);
284 285
285 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) { 286 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
286 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); 287 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
287 did_work_request = 1; 288 did_work_request = 1;
288 } 289 }
289 290
290 if (rx_count == 0) { 291 if (rx_count == 0) {
291 /* 292 /*
292 * First time through, see if there is enough 293 * First time through, see if there is enough
293 * work waiting to merit waking another 294 * work waiting to merit waking another
294 * CPU. 295 * CPU.
295 */ 296 */
296 union cvmx_pow_wq_int_cntx counts; 297 union cvmx_pow_wq_int_cntx counts;
297 int backlog; 298 int backlog;
298 int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores); 299 int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores);
299 counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group)); 300 counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group));
300 backlog = counts.s.iq_cnt + counts.s.ds_cnt; 301 backlog = counts.s.iq_cnt + counts.s.ds_cnt;
301 if (backlog > budget * cores_in_use && napi != NULL) 302 if (backlog > budget * cores_in_use && napi != NULL)
302 cvm_oct_enable_one_cpu(); 303 cvm_oct_enable_one_cpu();
303 } 304 }
304 305
305 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; 306 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
306 if (likely(skb_in_hw)) { 307 if (likely(skb_in_hw)) {
307 skb = *pskb; 308 skb = *pskb;
308 prefetch(&skb->head); 309 prefetch(&skb->head);
309 prefetch(&skb->len); 310 prefetch(&skb->len);
310 } 311 }
311 prefetch(cvm_oct_device[work->ipprt]); 312 prefetch(cvm_oct_device[work->ipprt]);
312 313
313 /* Immediately throw away all packets with receive errors */ 314 /* Immediately throw away all packets with receive errors */
314 if (unlikely(work->word2.snoip.rcv_error)) { 315 if (unlikely(work->word2.snoip.rcv_error)) {
315 if (cvm_oct_check_rcv_error(work)) 316 if (cvm_oct_check_rcv_error(work))
316 continue; 317 continue;
317 } 318 }
318 319
319 /* 320 /*
320 * We can only use the zero copy path if skbuffs are 321 * We can only use the zero copy path if skbuffs are
321 * in the FPA pool and the packet fits in a single 322 * in the FPA pool and the packet fits in a single
322 * buffer. 323 * buffer.
323 */ 324 */
324 if (likely(skb_in_hw)) { 325 if (likely(skb_in_hw)) {
325 skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head); 326 skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head);
326 prefetch(skb->data); 327 prefetch(skb->data);
327 skb->len = work->len; 328 skb->len = work->len;
328 skb_set_tail_pointer(skb, skb->len); 329 skb_set_tail_pointer(skb, skb->len);
329 packet_not_copied = 1; 330 packet_not_copied = 1;
330 } else { 331 } else {
331 /* 332 /*
332 * We have to copy the packet. First allocate 333 * We have to copy the packet. First allocate
333 * an skbuff for it. 334 * an skbuff for it.
334 */ 335 */
335 skb = dev_alloc_skb(work->len); 336 skb = dev_alloc_skb(work->len);
336 if (!skb) { 337 if (!skb) {
337 DEBUGPRINT("Port %d failed to allocate skbuff, packet dropped\n", 338 printk_ratelimited("Port %d failed to allocate "
338 work->ipprt); 339 "skbuff, packet dropped\n",
340 work->ipprt);
339 cvm_oct_free_work(work); 341 cvm_oct_free_work(work);
340 continue; 342 continue;
341 } 343 }
342 344
343 /* 345 /*
344 * Check if we've received a packet that was 346 * Check if we've received a packet that was
345 * entirely stored in the work entry. 347 * entirely stored in the work entry.
346 */ 348 */
347 if (unlikely(work->word2.s.bufs == 0)) { 349 if (unlikely(work->word2.s.bufs == 0)) {
348 uint8_t *ptr = work->packet_data; 350 uint8_t *ptr = work->packet_data;
349 351
350 if (likely(!work->word2.s.not_IP)) { 352 if (likely(!work->word2.s.not_IP)) {
351 /* 353 /*
352 * The beginning of the packet 354 * The beginning of the packet
353 * moves for IP packets. 355 * moves for IP packets.
354 */ 356 */
355 if (work->word2.s.is_v6) 357 if (work->word2.s.is_v6)
356 ptr += 2; 358 ptr += 2;
357 else 359 else
358 ptr += 6; 360 ptr += 6;
359 } 361 }
360 memcpy(skb_put(skb, work->len), ptr, work->len); 362 memcpy(skb_put(skb, work->len), ptr, work->len);
361 /* No packet buffers to free */ 363 /* No packet buffers to free */
362 } else { 364 } else {
363 int segments = work->word2.s.bufs; 365 int segments = work->word2.s.bufs;
364 union cvmx_buf_ptr segment_ptr = work->packet_ptr; 366 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
365 int len = work->len; 367 int len = work->len;
366 368
367 while (segments--) { 369 while (segments--) {
368 union cvmx_buf_ptr next_ptr = 370 union cvmx_buf_ptr next_ptr =
369 *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8); 371 *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
370 372
371 /* 373 /*
372 * Octeon Errata PKI-100: The segment size is 374 * Octeon Errata PKI-100: The segment size is
373 * wrong. Until it is fixed, calculate the 375 * wrong. Until it is fixed, calculate the
374 * segment size based on the packet pool 376 * segment size based on the packet pool
375 * buffer size. When it is fixed, the 377 * buffer size. When it is fixed, the
376 * following line should be replaced with this 378 * following line should be replaced with this
377 * one: int segment_size = 379 * one: int segment_size =
378 * segment_ptr.s.size; 380 * segment_ptr.s.size;
379 */ 381 */
380 int segment_size = CVMX_FPA_PACKET_POOL_SIZE - 382 int segment_size = CVMX_FPA_PACKET_POOL_SIZE -
381 (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7)); 383 (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
382 /* 384 /*
383 * Don't copy more than what 385 * Don't copy more than what
384 * is left in the packet. 386 * is left in the packet.
385 */ 387 */
386 if (segment_size > len) 388 if (segment_size > len)
387 segment_size = len; 389 segment_size = len;
388 /* Copy the data into the packet */ 390 /* Copy the data into the packet */
389 memcpy(skb_put(skb, segment_size), 391 memcpy(skb_put(skb, segment_size),
390 cvmx_phys_to_ptr(segment_ptr.s.addr), 392 cvmx_phys_to_ptr(segment_ptr.s.addr),
391 segment_size); 393 segment_size);
392 len -= segment_size; 394 len -= segment_size;
393 segment_ptr = next_ptr; 395 segment_ptr = next_ptr;
394 } 396 }
395 } 397 }
396 packet_not_copied = 0; 398 packet_not_copied = 0;
397 } 399 }
398 400
399 if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) && 401 if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) &&
400 cvm_oct_device[work->ipprt])) { 402 cvm_oct_device[work->ipprt])) {
401 struct net_device *dev = cvm_oct_device[work->ipprt]; 403 struct net_device *dev = cvm_oct_device[work->ipprt];
402 struct octeon_ethernet *priv = netdev_priv(dev); 404 struct octeon_ethernet *priv = netdev_priv(dev);
403 405
404 /* 406 /*
405 * Only accept packets for devices that are 407 * Only accept packets for devices that are
406 * currently up. 408 * currently up.
407 */ 409 */
408 if (likely(dev->flags & IFF_UP)) { 410 if (likely(dev->flags & IFF_UP)) {
409 skb->protocol = eth_type_trans(skb, dev); 411 skb->protocol = eth_type_trans(skb, dev);
410 skb->dev = dev; 412 skb->dev = dev;
411 413
412 if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error)) 414 if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
413 skb->ip_summed = CHECKSUM_NONE; 415 skb->ip_summed = CHECKSUM_NONE;
414 else 416 else
415 skb->ip_summed = CHECKSUM_UNNECESSARY; 417 skb->ip_summed = CHECKSUM_UNNECESSARY;
416 418
417 /* Increment RX stats for virtual ports */ 419 /* Increment RX stats for virtual ports */
418 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) { 420 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
419 #ifdef CONFIG_64BIT 421 #ifdef CONFIG_64BIT
420 atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets); 422 atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
421 atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes); 423 atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
422 #else 424 #else
423 atomic_add(1, (atomic_t *)&priv->stats.rx_packets); 425 atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
424 atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes); 426 atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
425 #endif 427 #endif
426 } 428 }
427 netif_receive_skb(skb); 429 netif_receive_skb(skb);
428 rx_count++; 430 rx_count++;
429 } else { 431 } else {
430 /* Drop any packet received for a device that isn't up */ 432 /* Drop any packet received for a device that isn't up */
431 /* 433 /*
432 DEBUGPRINT("%s: Device not up, packet dropped\n", 434 printk_ratelimited("%s: Device not up, packet dropped\n",
433 dev->name); 435 dev->name);
434 */ 436 */
435 #ifdef CONFIG_64BIT 437 #ifdef CONFIG_64BIT
436 atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped); 438 atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
437 #else 439 #else
438 atomic_add(1, (atomic_t *)&priv->stats.rx_dropped); 440 atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
439 #endif 441 #endif
440 dev_kfree_skb_irq(skb); 442 dev_kfree_skb_irq(skb);
441 } 443 }
442 } else { 444 } else {
443 /* 445 /*
444 * Drop any packet received for a device that 446 * Drop any packet received for a device that
445 * doesn't exist. 447 * doesn't exist.
446 */ 448 */
447 DEBUGPRINT("Port %d not controlled by Linux, packet dropped\n", 449 printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
448 work->ipprt); 450 work->ipprt);
449 dev_kfree_skb_irq(skb); 451 dev_kfree_skb_irq(skb);
450 } 452 }
451 /* 453 /*
452 * Check to see if the skbuff and work share the same 454 * Check to see if the skbuff and work share the same
453 * packet buffer. 455 * packet buffer.
454 */ 456 */
455 if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) { 457 if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) {
456 /* 458 /*
457 * This buffer needs to be replaced, increment 459 * This buffer needs to be replaced, increment
458 * the number of buffers we need to free by 460 * the number of buffers we need to free by
459 * one. 461 * one.
460 */ 462 */
461 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 463 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
462 1); 464 1);
463 465
464 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 466 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL,
465 DONT_WRITEBACK(1)); 467 DONT_WRITEBACK(1));
466 } else { 468 } else {
467 cvm_oct_free_work(work); 469 cvm_oct_free_work(work);
468 } 470 }
469 } 471 }
470 /* Restore the original POW group mask */ 472 /* Restore the original POW group mask */
471 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); 473 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
472 if (USE_ASYNC_IOBDMA) { 474 if (USE_ASYNC_IOBDMA) {
473 /* Restore the scratch area */ 475 /* Restore the scratch area */
474 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); 476 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
475 } 477 }
476 cvm_oct_rx_refill_pool(0); 478 cvm_oct_rx_refill_pool(0);
477 479
478 if (rx_count < budget && napi != NULL) { 480 if (rx_count < budget && napi != NULL) {
479 /* No more work */ 481 /* No more work */
480 napi_complete(napi); 482 napi_complete(napi);
481 cvm_oct_no_more_work(); 483 cvm_oct_no_more_work();
482 } 484 }
483 return rx_count; 485 return rx_count;
484 } 486 }
485 487
486 #ifdef CONFIG_NET_POLL_CONTROLLER 488 #ifdef CONFIG_NET_POLL_CONTROLLER
487 /** 489 /**
488 * cvm_oct_poll_controller - poll for receive packets 490 * cvm_oct_poll_controller - poll for receive packets
489 * device. 491 * device.
490 * 492 *
491 * @dev: Device to poll. Unused 493 * @dev: Device to poll. Unused
492 */ 494 */
493 void cvm_oct_poll_controller(struct net_device *dev) 495 void cvm_oct_poll_controller(struct net_device *dev)
494 { 496 {
495 cvm_oct_napi_poll(NULL, 16); 497 cvm_oct_napi_poll(NULL, 16);
496 } 498 }
497 #endif 499 #endif
498 500
499 void cvm_oct_rx_initialize(void) 501 void cvm_oct_rx_initialize(void)
500 { 502 {
501 int i; 503 int i;
502 struct net_device *dev_for_napi = NULL; 504 struct net_device *dev_for_napi = NULL;
503 union cvmx_pow_wq_int_thrx int_thr; 505 union cvmx_pow_wq_int_thrx int_thr;
504 union cvmx_pow_wq_int_pc int_pc; 506 union cvmx_pow_wq_int_pc int_pc;
505 507
506 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) { 508 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
507 if (cvm_oct_device[i]) { 509 if (cvm_oct_device[i]) {
508 dev_for_napi = cvm_oct_device[i]; 510 dev_for_napi = cvm_oct_device[i];
509 break; 511 break;
510 } 512 }
511 } 513 }
512 514
513 if (NULL == dev_for_napi) 515 if (NULL == dev_for_napi)
514 panic("No net_devices were allocated."); 516 panic("No net_devices were allocated.");
515 517
516 if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus()) 518 if (max_rx_cpus > 1 && max_rx_cpus < num_online_cpus())
517 atomic_set(&core_state.available_cores, max_rx_cpus); 519 atomic_set(&core_state.available_cores, max_rx_cpus);
518 else 520 else
519 atomic_set(&core_state.available_cores, num_online_cpus()); 521 atomic_set(&core_state.available_cores, num_online_cpus());
520 core_state.baseline_cores = atomic_read(&core_state.available_cores); 522 core_state.baseline_cores = atomic_read(&core_state.available_cores);
521 523
522 core_state.cpu_state = CPU_MASK_NONE; 524 core_state.cpu_state = CPU_MASK_NONE;
523 for_each_possible_cpu(i) { 525 for_each_possible_cpu(i) {
524 netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi, 526 netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
525 cvm_oct_napi_poll, rx_napi_weight); 527 cvm_oct_napi_poll, rx_napi_weight);
526 napi_enable(&cvm_oct_napi[i].napi); 528 napi_enable(&cvm_oct_napi[i].napi);
527 } 529 }
528 /* Register an IRQ hander for to receive POW interrupts */ 530 /* Register an IRQ hander for to receive POW interrupts */
529 i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, 531 i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
530 cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device); 532 cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
531 533
532 if (i) 534 if (i)
533 panic("Could not acquire Ethernet IRQ %d\n", 535 panic("Could not acquire Ethernet IRQ %d\n",
534 OCTEON_IRQ_WORKQ0 + pow_receive_group); 536 OCTEON_IRQ_WORKQ0 + pow_receive_group);
535 537
536 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); 538 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
537 539
538 int_thr.u64 = 0; 540 int_thr.u64 = 0;
539 int_thr.s.tc_en = 1; 541 int_thr.s.tc_en = 1;
540 int_thr.s.tc_thr = 1; 542 int_thr.s.tc_thr = 1;
541 /* Enable POW interrupt when our port has at least one packet */ 543 /* Enable POW interrupt when our port has at least one packet */
542 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64); 544 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64);
543 545
544 int_pc.u64 = 0; 546 int_pc.u64 = 0;
545 int_pc.s.pc_thr = 5; 547 int_pc.s.pc_thr = 5;
546 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64); 548 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
547 549
548 550
549 /* Scheduld NAPI now. This will indirectly enable interrupts. */ 551 /* Scheduld NAPI now. This will indirectly enable interrupts. */
550 cvm_oct_enable_one_cpu(); 552 cvm_oct_enable_one_cpu();
551 } 553 }
552 554
553 void cvm_oct_rx_shutdown(void) 555 void cvm_oct_rx_shutdown(void)
554 { 556 {
555 int i; 557 int i;
556 /* Shutdown all of the NAPIs */ 558 /* Shutdown all of the NAPIs */
557 for_each_possible_cpu(i) 559 for_each_possible_cpu(i)
558 netif_napi_del(&cvm_oct_napi[i].napi); 560 netif_napi_del(&cvm_oct_napi[i].napi);
559 } 561 }
560 562
drivers/staging/octeon/ethernet-sgmii.c
1 /********************************************************************** 1 /**********************************************************************
2 * Author: Cavium Networks 2 * Author: Cavium Networks
3 * 3 *
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2007 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This file is distributed in the hope that it will be useful, but 13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more 16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details. 17 * details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software 20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/. 22 * or visit http://www.gnu.org/licenses/.
23 * 23 *
24 * This file may also be available under a different license from Cavium. 24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information 25 * Contact Cavium Networks for more information
26 **********************************************************************/ 26 **********************************************************************/
27 #include <linux/kernel.h> 27 #include <linux/kernel.h>
28 #include <linux/netdevice.h> 28 #include <linux/netdevice.h>
29 #include <linux/ratelimit.h>
29 #include <net/dst.h> 30 #include <net/dst.h>
30 31
31 #include <asm/octeon/octeon.h> 32 #include <asm/octeon/octeon.h>
32 33
33 #include "ethernet-defines.h" 34 #include "ethernet-defines.h"
34 #include "octeon-ethernet.h" 35 #include "octeon-ethernet.h"
35 #include "ethernet-util.h" 36 #include "ethernet-util.h"
36 37
37 #include "cvmx-helper.h" 38 #include "cvmx-helper.h"
38 39
39 #include "cvmx-gmxx-defs.h" 40 #include "cvmx-gmxx-defs.h"
40 41
41 int cvm_oct_sgmii_open(struct net_device *dev) 42 int cvm_oct_sgmii_open(struct net_device *dev)
42 { 43 {
43 union cvmx_gmxx_prtx_cfg gmx_cfg; 44 union cvmx_gmxx_prtx_cfg gmx_cfg;
44 struct octeon_ethernet *priv = netdev_priv(dev); 45 struct octeon_ethernet *priv = netdev_priv(dev);
45 int interface = INTERFACE(priv->port); 46 int interface = INTERFACE(priv->port);
46 int index = INDEX(priv->port); 47 int index = INDEX(priv->port);
47 cvmx_helper_link_info_t link_info; 48 cvmx_helper_link_info_t link_info;
48 49
49 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); 50 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
50 gmx_cfg.s.en = 1; 51 gmx_cfg.s.en = 1;
51 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); 52 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
52 53
53 if (!octeon_is_simulation()) { 54 if (!octeon_is_simulation()) {
54 link_info = cvmx_helper_link_get(priv->port); 55 link_info = cvmx_helper_link_get(priv->port);
55 if (!link_info.s.link_up) 56 if (!link_info.s.link_up)
56 netif_carrier_off(dev); 57 netif_carrier_off(dev);
57 } 58 }
58 59
59 return 0; 60 return 0;
60 } 61 }
61 62
62 int cvm_oct_sgmii_stop(struct net_device *dev) 63 int cvm_oct_sgmii_stop(struct net_device *dev)
63 { 64 {
64 union cvmx_gmxx_prtx_cfg gmx_cfg; 65 union cvmx_gmxx_prtx_cfg gmx_cfg;
65 struct octeon_ethernet *priv = netdev_priv(dev); 66 struct octeon_ethernet *priv = netdev_priv(dev);
66 int interface = INTERFACE(priv->port); 67 int interface = INTERFACE(priv->port);
67 int index = INDEX(priv->port); 68 int index = INDEX(priv->port);
68 69
69 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); 70 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
70 gmx_cfg.s.en = 0; 71 gmx_cfg.s.en = 0;
71 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); 72 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
72 return 0; 73 return 0;
73 } 74 }
74 75
75 static void cvm_oct_sgmii_poll(struct net_device *dev) 76 static void cvm_oct_sgmii_poll(struct net_device *dev)
76 { 77 {
77 struct octeon_ethernet *priv = netdev_priv(dev); 78 struct octeon_ethernet *priv = netdev_priv(dev);
78 cvmx_helper_link_info_t link_info; 79 cvmx_helper_link_info_t link_info;
79 80
80 link_info = cvmx_helper_link_get(priv->port); 81 link_info = cvmx_helper_link_get(priv->port);
81 if (link_info.u64 == priv->link_info) 82 if (link_info.u64 == priv->link_info)
82 return; 83 return;
83 84
84 link_info = cvmx_helper_link_autoconf(priv->port); 85 link_info = cvmx_helper_link_autoconf(priv->port);
85 priv->link_info = link_info.u64; 86 priv->link_info = link_info.u64;
86 87
87 /* Tell Linux */ 88 /* Tell Linux */
88 if (link_info.s.link_up) { 89 if (link_info.s.link_up) {
89 90
90 if (!netif_carrier_ok(dev)) 91 if (!netif_carrier_ok(dev))
91 netif_carrier_on(dev); 92 netif_carrier_on(dev);
92 if (priv->queue != -1) 93 if (priv->queue != -1)
93 DEBUGPRINT 94 printk_ratelimited
94 ("%s: %u Mbps %s duplex, port %2d, queue %2d\n", 95 ("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
95 dev->name, link_info.s.speed, 96 dev->name, link_info.s.speed,
96 (link_info.s.full_duplex) ? "Full" : "Half", 97 (link_info.s.full_duplex) ? "Full" : "Half",
97 priv->port, priv->queue); 98 priv->port, priv->queue);
98 else 99 else
99 DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n", 100 printk_ratelimited
100 dev->name, link_info.s.speed, 101 ("%s: %u Mbps %s duplex, port %2d, POW\n",
101 (link_info.s.full_duplex) ? "Full" : "Half", 102 dev->name, link_info.s.speed,
102 priv->port); 103 (link_info.s.full_duplex) ? "Full" : "Half",
104 priv->port);
103 } else { 105 } else {
104 if (netif_carrier_ok(dev)) 106 if (netif_carrier_ok(dev))
105 netif_carrier_off(dev); 107 netif_carrier_off(dev);
106 DEBUGPRINT("%s: Link down\n", dev->name); 108 printk_ratelimited("%s: Link down\n", dev->name);
107 } 109 }
108 } 110 }
109 111
110 int cvm_oct_sgmii_init(struct net_device *dev) 112 int cvm_oct_sgmii_init(struct net_device *dev)
111 { 113 {
112 struct octeon_ethernet *priv = netdev_priv(dev); 114 struct octeon_ethernet *priv = netdev_priv(dev);
113 cvm_oct_common_init(dev); 115 cvm_oct_common_init(dev);
114 dev->netdev_ops->ndo_stop(dev); 116 dev->netdev_ops->ndo_stop(dev);
115 if (!octeon_is_simulation() && priv->phydev == NULL) 117 if (!octeon_is_simulation() && priv->phydev == NULL)
116 priv->poll = cvm_oct_sgmii_poll; 118 priv->poll = cvm_oct_sgmii_poll;
117 119
118 /* FIXME: Need autoneg logic */ 120 /* FIXME: Need autoneg logic */
119 return 0; 121 return 0;
120 } 122 }
121 123
122 void cvm_oct_sgmii_uninit(struct net_device *dev) 124 void cvm_oct_sgmii_uninit(struct net_device *dev)
123 { 125 {
124 cvm_oct_common_uninit(dev); 126 cvm_oct_common_uninit(dev);
125 } 127 }
126 128
drivers/staging/octeon/ethernet-tx.c
1 /********************************************************************* 1 /*********************************************************************
2 * Author: Cavium Networks 2 * Author: Cavium Networks
3 * 3 *
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2010 Cavium Networks 7 * Copyright (c) 2003-2010 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This file is distributed in the hope that it will be useful, but 13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more 16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details. 17 * details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software 20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/. 22 * or visit http://www.gnu.org/licenses/.
23 * 23 *
24 * This file may also be available under a different license from Cavium. 24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information 25 * Contact Cavium Networks for more information
26 *********************************************************************/ 26 *********************************************************************/
27 #include <linux/module.h> 27 #include <linux/module.h>
28 #include <linux/kernel.h> 28 #include <linux/kernel.h>
29 #include <linux/netdevice.h> 29 #include <linux/netdevice.h>
30 #include <linux/init.h> 30 #include <linux/init.h>
31 #include <linux/etherdevice.h> 31 #include <linux/etherdevice.h>
32 #include <linux/ip.h> 32 #include <linux/ip.h>
33 #include <linux/ratelimit.h>
33 #include <linux/string.h> 34 #include <linux/string.h>
34 #include <net/dst.h> 35 #include <net/dst.h>
35 #ifdef CONFIG_XFRM 36 #ifdef CONFIG_XFRM
36 #include <linux/xfrm.h> 37 #include <linux/xfrm.h>
37 #include <net/xfrm.h> 38 #include <net/xfrm.h>
38 #endif /* CONFIG_XFRM */ 39 #endif /* CONFIG_XFRM */
39 40
40 #include <asm/atomic.h> 41 #include <asm/atomic.h>
41 42
42 #include <asm/octeon/octeon.h> 43 #include <asm/octeon/octeon.h>
43 44
44 #include "ethernet-defines.h" 45 #include "ethernet-defines.h"
45 #include "octeon-ethernet.h" 46 #include "octeon-ethernet.h"
46 #include "ethernet-tx.h" 47 #include "ethernet-tx.h"
47 #include "ethernet-util.h" 48 #include "ethernet-util.h"
48 49
49 #include "cvmx-wqe.h" 50 #include "cvmx-wqe.h"
50 #include "cvmx-fau.h" 51 #include "cvmx-fau.h"
51 #include "cvmx-pip.h" 52 #include "cvmx-pip.h"
52 #include "cvmx-pko.h" 53 #include "cvmx-pko.h"
53 #include "cvmx-helper.h" 54 #include "cvmx-helper.h"
54 55
55 #include "cvmx-gmxx-defs.h" 56 #include "cvmx-gmxx-defs.h"
56 57
57 #define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb)) 58 #define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
58 59
59 /* 60 /*
60 * You can define GET_SKBUFF_QOS() to override how the skbuff output 61 * You can define GET_SKBUFF_QOS() to override how the skbuff output
61 * function determines which output queue is used. The default 62 * function determines which output queue is used. The default
62 * implementation always uses the base queue for the port. If, for 63 * implementation always uses the base queue for the port. If, for
63 * example, you wanted to use the skb->priority fieid, define 64 * example, you wanted to use the skb->priority fieid, define
64 * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority) 65 * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
65 */ 66 */
66 #ifndef GET_SKBUFF_QOS 67 #ifndef GET_SKBUFF_QOS
67 #define GET_SKBUFF_QOS(skb) 0 68 #define GET_SKBUFF_QOS(skb) 0
68 #endif 69 #endif
69 70
70 static void cvm_oct_tx_do_cleanup(unsigned long arg); 71 static void cvm_oct_tx_do_cleanup(unsigned long arg);
71 static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0); 72 static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
72 73
73 /* Maximum number of SKBs to try to free per xmit packet. */ 74 /* Maximum number of SKBs to try to free per xmit packet. */
74 #define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2) 75 #define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
75 76
76 static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau) 77 static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
77 { 78 {
78 int32_t undo; 79 int32_t undo;
79 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE; 80 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
80 if (undo > 0) 81 if (undo > 0)
81 cvmx_fau_atomic_add32(fau, -undo); 82 cvmx_fau_atomic_add32(fau, -undo);
82 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free; 83 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free;
83 return skb_to_free; 84 return skb_to_free;
84 } 85 }
85 86
86 static void cvm_oct_kick_tx_poll_watchdog(void) 87 static void cvm_oct_kick_tx_poll_watchdog(void)
87 { 88 {
88 union cvmx_ciu_timx ciu_timx; 89 union cvmx_ciu_timx ciu_timx;
89 ciu_timx.u64 = 0; 90 ciu_timx.u64 = 0;
90 ciu_timx.s.one_shot = 1; 91 ciu_timx.s.one_shot = 1;
91 ciu_timx.s.len = cvm_oct_tx_poll_interval; 92 ciu_timx.s.len = cvm_oct_tx_poll_interval;
92 cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64); 93 cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
93 } 94 }
94 95
95 void cvm_oct_free_tx_skbs(struct net_device *dev) 96 void cvm_oct_free_tx_skbs(struct net_device *dev)
96 { 97 {
97 int32_t skb_to_free; 98 int32_t skb_to_free;
98 int qos, queues_per_port; 99 int qos, queues_per_port;
99 int total_freed = 0; 100 int total_freed = 0;
100 int total_remaining = 0; 101 int total_remaining = 0;
101 unsigned long flags; 102 unsigned long flags;
102 struct octeon_ethernet *priv = netdev_priv(dev); 103 struct octeon_ethernet *priv = netdev_priv(dev);
103 104
104 queues_per_port = cvmx_pko_get_num_queues(priv->port); 105 queues_per_port = cvmx_pko_get_num_queues(priv->port);
105 /* Drain any pending packets in the free list */ 106 /* Drain any pending packets in the free list */
106 for (qos = 0; qos < queues_per_port; qos++) { 107 for (qos = 0; qos < queues_per_port; qos++) {
107 if (skb_queue_len(&priv->tx_free_list[qos]) == 0) 108 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
108 continue; 109 continue;
109 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE); 110 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE);
110 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4); 111 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
111 112
112 113
113 total_freed += skb_to_free; 114 total_freed += skb_to_free;
114 if (skb_to_free > 0) { 115 if (skb_to_free > 0) {
115 struct sk_buff *to_free_list = NULL; 116 struct sk_buff *to_free_list = NULL;
116 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); 117 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
117 while (skb_to_free > 0) { 118 while (skb_to_free > 0) {
118 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]); 119 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
119 t->next = to_free_list; 120 t->next = to_free_list;
120 to_free_list = t; 121 to_free_list = t;
121 skb_to_free--; 122 skb_to_free--;
122 } 123 }
123 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); 124 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
124 /* Do the actual freeing outside of the lock. */ 125 /* Do the actual freeing outside of the lock. */
125 while (to_free_list) { 126 while (to_free_list) {
126 struct sk_buff *t = to_free_list; 127 struct sk_buff *t = to_free_list;
127 to_free_list = to_free_list->next; 128 to_free_list = to_free_list->next;
128 dev_kfree_skb_any(t); 129 dev_kfree_skb_any(t);
129 } 130 }
130 } 131 }
131 total_remaining += skb_queue_len(&priv->tx_free_list[qos]); 132 total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
132 } 133 }
133 if (total_freed >= 0 && netif_queue_stopped(dev)) 134 if (total_freed >= 0 && netif_queue_stopped(dev))
134 netif_wake_queue(dev); 135 netif_wake_queue(dev);
135 if (total_remaining) 136 if (total_remaining)
136 cvm_oct_kick_tx_poll_watchdog(); 137 cvm_oct_kick_tx_poll_watchdog();
137 } 138 }
138 139
139 /** 140 /**
140 * cvm_oct_xmit - transmit a packet 141 * cvm_oct_xmit - transmit a packet
141 * @skb: Packet to send 142 * @skb: Packet to send
142 * @dev: Device info structure 143 * @dev: Device info structure
143 * 144 *
144 * Returns Always returns NETDEV_TX_OK 145 * Returns Always returns NETDEV_TX_OK
145 */ 146 */
146 int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) 147 int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
147 { 148 {
148 cvmx_pko_command_word0_t pko_command; 149 cvmx_pko_command_word0_t pko_command;
149 union cvmx_buf_ptr hw_buffer; 150 union cvmx_buf_ptr hw_buffer;
150 uint64_t old_scratch; 151 uint64_t old_scratch;
151 uint64_t old_scratch2; 152 uint64_t old_scratch2;
152 int qos; 153 int qos;
153 int i; 154 int i;
154 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type; 155 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
155 struct octeon_ethernet *priv = netdev_priv(dev); 156 struct octeon_ethernet *priv = netdev_priv(dev);
156 struct sk_buff *to_free_list; 157 struct sk_buff *to_free_list;
157 int32_t skb_to_free; 158 int32_t skb_to_free;
158 int32_t buffers_to_free; 159 int32_t buffers_to_free;
159 u32 total_to_clean; 160 u32 total_to_clean;
160 unsigned long flags; 161 unsigned long flags;
161 #if REUSE_SKBUFFS_WITHOUT_FREE 162 #if REUSE_SKBUFFS_WITHOUT_FREE
162 unsigned char *fpa_head; 163 unsigned char *fpa_head;
163 #endif 164 #endif
164 165
165 /* 166 /*
166 * Prefetch the private data structure. It is larger that one 167 * Prefetch the private data structure. It is larger that one
167 * cache line. 168 * cache line.
168 */ 169 */
169 prefetch(priv); 170 prefetch(priv);
170 171
171 /* 172 /*
172 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to 173 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
173 * completely remove "qos" in the event neither interface 174 * completely remove "qos" in the event neither interface
174 * supports multiple queues per port. 175 * supports multiple queues per port.
175 */ 176 */
176 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || 177 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
177 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { 178 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
178 qos = GET_SKBUFF_QOS(skb); 179 qos = GET_SKBUFF_QOS(skb);
179 if (qos <= 0) 180 if (qos <= 0)
180 qos = 0; 181 qos = 0;
181 else if (qos >= cvmx_pko_get_num_queues(priv->port)) 182 else if (qos >= cvmx_pko_get_num_queues(priv->port))
182 qos = 0; 183 qos = 0;
183 } else 184 } else
184 qos = 0; 185 qos = 0;
185 186
186 if (USE_ASYNC_IOBDMA) { 187 if (USE_ASYNC_IOBDMA) {
187 /* Save scratch in case userspace is using it */ 188 /* Save scratch in case userspace is using it */
188 CVMX_SYNCIOBDMA; 189 CVMX_SYNCIOBDMA;
189 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); 190 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
190 old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); 191 old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
191 192
192 /* 193 /*
193 * Fetch and increment the number of packets to be 194 * Fetch and increment the number of packets to be
194 * freed. 195 * freed.
195 */ 196 */
196 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8, 197 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
197 FAU_NUM_PACKET_BUFFERS_TO_FREE, 198 FAU_NUM_PACKET_BUFFERS_TO_FREE,
198 0); 199 0);
199 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, 200 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
200 priv->fau + qos * 4, 201 priv->fau + qos * 4,
201 MAX_SKB_TO_FREE); 202 MAX_SKB_TO_FREE);
202 } 203 }
203 204
204 /* 205 /*
205 * We have space for 6 segment pointers, If there will be more 206 * We have space for 6 segment pointers, If there will be more
206 * than that, we must linearize. 207 * than that, we must linearize.
207 */ 208 */
208 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) { 209 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
209 if (unlikely(__skb_linearize(skb))) { 210 if (unlikely(__skb_linearize(skb))) {
210 queue_type = QUEUE_DROP; 211 queue_type = QUEUE_DROP;
211 if (USE_ASYNC_IOBDMA) { 212 if (USE_ASYNC_IOBDMA) {
212 /* Get the number of skbuffs in use by the hardware */ 213 /* Get the number of skbuffs in use by the hardware */
213 CVMX_SYNCIOBDMA; 214 CVMX_SYNCIOBDMA;
214 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH); 215 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
215 } else { 216 } else {
216 /* Get the number of skbuffs in use by the hardware */ 217 /* Get the number of skbuffs in use by the hardware */
217 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 218 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
218 MAX_SKB_TO_FREE); 219 MAX_SKB_TO_FREE);
219 } 220 }
220 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4); 221 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4);
221 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); 222 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
222 goto skip_xmit; 223 goto skip_xmit;
223 } 224 }
224 } 225 }
225 226
226 /* 227 /*
227 * The CN3XXX series of parts has an errata (GMX-401) which 228 * The CN3XXX series of parts has an errata (GMX-401) which
228 * causes the GMX block to hang if a collision occurs towards 229 * causes the GMX block to hang if a collision occurs towards
229 * the end of a <68 byte packet. As a workaround for this, we 230 * the end of a <68 byte packet. As a workaround for this, we
230 * pad packets to be 68 bytes whenever we are in half duplex 231 * pad packets to be 68 bytes whenever we are in half duplex
231 * mode. We don't handle the case of having a small packet but 232 * mode. We don't handle the case of having a small packet but
232 * no room to add the padding. The kernel should always give 233 * no room to add the padding. The kernel should always give
233 * us at least a cache line 234 * us at least a cache line
234 */ 235 */
235 if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) { 236 if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
236 union cvmx_gmxx_prtx_cfg gmx_prt_cfg; 237 union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
237 int interface = INTERFACE(priv->port); 238 int interface = INTERFACE(priv->port);
238 int index = INDEX(priv->port); 239 int index = INDEX(priv->port);
239 240
240 if (interface < 2) { 241 if (interface < 2) {
241 /* We only need to pad packet in half duplex mode */ 242 /* We only need to pad packet in half duplex mode */
242 gmx_prt_cfg.u64 = 243 gmx_prt_cfg.u64 =
243 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); 244 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
244 if (gmx_prt_cfg.s.duplex == 0) { 245 if (gmx_prt_cfg.s.duplex == 0) {
245 int add_bytes = 64 - skb->len; 246 int add_bytes = 64 - skb->len;
246 if ((skb_tail_pointer(skb) + add_bytes) <= 247 if ((skb_tail_pointer(skb) + add_bytes) <=
247 skb_end_pointer(skb)) 248 skb_end_pointer(skb))
248 memset(__skb_put(skb, add_bytes), 0, 249 memset(__skb_put(skb, add_bytes), 0,
249 add_bytes); 250 add_bytes);
250 } 251 }
251 } 252 }
252 } 253 }
253 254
254 /* Build the PKO command */ 255 /* Build the PKO command */
255 pko_command.u64 = 0; 256 pko_command.u64 = 0;
256 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ 257 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
257 pko_command.s.segs = 1; 258 pko_command.s.segs = 1;
258 pko_command.s.total_bytes = skb->len; 259 pko_command.s.total_bytes = skb->len;
259 pko_command.s.size0 = CVMX_FAU_OP_SIZE_32; 260 pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
260 pko_command.s.subone0 = 1; 261 pko_command.s.subone0 = 1;
261 262
262 pko_command.s.dontfree = 1; 263 pko_command.s.dontfree = 1;
263 264
264 /* Build the PKO buffer pointer */ 265 /* Build the PKO buffer pointer */
265 hw_buffer.u64 = 0; 266 hw_buffer.u64 = 0;
266 if (skb_shinfo(skb)->nr_frags == 0) { 267 if (skb_shinfo(skb)->nr_frags == 0) {
267 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); 268 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
268 hw_buffer.s.pool = 0; 269 hw_buffer.s.pool = 0;
269 hw_buffer.s.size = skb->len; 270 hw_buffer.s.size = skb->len;
270 } else { 271 } else {
271 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); 272 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
272 hw_buffer.s.pool = 0; 273 hw_buffer.s.pool = 0;
273 hw_buffer.s.size = skb_headlen(skb); 274 hw_buffer.s.size = skb_headlen(skb);
274 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; 275 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
275 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 276 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
276 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i; 277 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
277 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset)); 278 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset));
278 hw_buffer.s.size = fs->size; 279 hw_buffer.s.size = fs->size;
279 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; 280 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
280 } 281 }
281 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb)); 282 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
282 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1; 283 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
283 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; 284 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
284 pko_command.s.gather = 1; 285 pko_command.s.gather = 1;
285 goto dont_put_skbuff_in_hw; 286 goto dont_put_skbuff_in_hw;
286 } 287 }
287 288
288 /* 289 /*
289 * See if we can put this skb in the FPA pool. Any strange 290 * See if we can put this skb in the FPA pool. Any strange
290 * behavior from the Linux networking stack will most likely 291 * behavior from the Linux networking stack will most likely
291 * be caused by a bug in the following code. If some field is 292 * be caused by a bug in the following code. If some field is
292 * in use by the network stack and get carried over when a 293 * in use by the network stack and get carried over when a
293 * buffer is reused, bad thing may happen. If in doubt and 294 * buffer is reused, bad thing may happen. If in doubt and
294 * you dont need the absolute best performance, disable the 295 * you dont need the absolute best performance, disable the
295 * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has 296 * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
296 * shown a 25% increase in performance under some loads. 297 * shown a 25% increase in performance under some loads.
297 */ 298 */
298 #if REUSE_SKBUFFS_WITHOUT_FREE 299 #if REUSE_SKBUFFS_WITHOUT_FREE
299 fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f); 300 fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
300 if (unlikely(skb->data < fpa_head)) { 301 if (unlikely(skb->data < fpa_head)) {
301 /* 302 /*
302 * printk("TX buffer beginning can't meet FPA 303 * printk("TX buffer beginning can't meet FPA
303 * alignment constraints\n"); 304 * alignment constraints\n");
304 */ 305 */
305 goto dont_put_skbuff_in_hw; 306 goto dont_put_skbuff_in_hw;
306 } 307 }
307 if (unlikely 308 if (unlikely
308 ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) { 309 ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
309 /* 310 /*
310 printk("TX buffer isn't large enough for the FPA\n"); 311 printk("TX buffer isn't large enough for the FPA\n");
311 */ 312 */
312 goto dont_put_skbuff_in_hw; 313 goto dont_put_skbuff_in_hw;
313 } 314 }
314 if (unlikely(skb_shared(skb))) { 315 if (unlikely(skb_shared(skb))) {
315 /* 316 /*
316 printk("TX buffer sharing data with someone else\n"); 317 printk("TX buffer sharing data with someone else\n");
317 */ 318 */
318 goto dont_put_skbuff_in_hw; 319 goto dont_put_skbuff_in_hw;
319 } 320 }
320 if (unlikely(skb_cloned(skb))) { 321 if (unlikely(skb_cloned(skb))) {
321 /* 322 /*
322 printk("TX buffer has been cloned\n"); 323 printk("TX buffer has been cloned\n");
323 */ 324 */
324 goto dont_put_skbuff_in_hw; 325 goto dont_put_skbuff_in_hw;
325 } 326 }
326 if (unlikely(skb_header_cloned(skb))) { 327 if (unlikely(skb_header_cloned(skb))) {
327 /* 328 /*
328 printk("TX buffer header has been cloned\n"); 329 printk("TX buffer header has been cloned\n");
329 */ 330 */
330 goto dont_put_skbuff_in_hw; 331 goto dont_put_skbuff_in_hw;
331 } 332 }
332 if (unlikely(skb->destructor)) { 333 if (unlikely(skb->destructor)) {
333 /* 334 /*
334 printk("TX buffer has a destructor\n"); 335 printk("TX buffer has a destructor\n");
335 */ 336 */
336 goto dont_put_skbuff_in_hw; 337 goto dont_put_skbuff_in_hw;
337 } 338 }
338 if (unlikely(skb_shinfo(skb)->nr_frags)) { 339 if (unlikely(skb_shinfo(skb)->nr_frags)) {
339 /* 340 /*
340 printk("TX buffer has fragments\n"); 341 printk("TX buffer has fragments\n");
341 */ 342 */
342 goto dont_put_skbuff_in_hw; 343 goto dont_put_skbuff_in_hw;
343 } 344 }
344 if (unlikely 345 if (unlikely
345 (skb->truesize != 346 (skb->truesize !=
346 sizeof(*skb) + skb_end_pointer(skb) - skb->head)) { 347 sizeof(*skb) + skb_end_pointer(skb) - skb->head)) {
347 /* 348 /*
348 printk("TX buffer truesize has been changed\n"); 349 printk("TX buffer truesize has been changed\n");
349 */ 350 */
350 goto dont_put_skbuff_in_hw; 351 goto dont_put_skbuff_in_hw;
351 } 352 }
352 353
353 /* 354 /*
354 * We can use this buffer in the FPA. We don't need the FAU 355 * We can use this buffer in the FPA. We don't need the FAU
355 * update anymore 356 * update anymore
356 */ 357 */
357 pko_command.s.dontfree = 0; 358 pko_command.s.dontfree = 0;
358 359
359 hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7); 360 hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7);
360 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb; 361 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
361 362
362 /* 363 /*
363 * The skbuff will be reused without ever being freed. We must 364 * The skbuff will be reused without ever being freed. We must
364 * cleanup a bunch of core things. 365 * cleanup a bunch of core things.
365 */ 366 */
366 dst_release(skb_dst(skb)); 367 dst_release(skb_dst(skb));
367 skb_dst_set(skb, NULL); 368 skb_dst_set(skb, NULL);
368 #ifdef CONFIG_XFRM 369 #ifdef CONFIG_XFRM
369 secpath_put(skb->sp); 370 secpath_put(skb->sp);
370 skb->sp = NULL; 371 skb->sp = NULL;
371 #endif 372 #endif
372 nf_reset(skb); 373 nf_reset(skb);
373 374
374 #ifdef CONFIG_NET_SCHED 375 #ifdef CONFIG_NET_SCHED
375 skb->tc_index = 0; 376 skb->tc_index = 0;
376 #ifdef CONFIG_NET_CLS_ACT 377 #ifdef CONFIG_NET_CLS_ACT
377 skb->tc_verd = 0; 378 skb->tc_verd = 0;
378 #endif /* CONFIG_NET_CLS_ACT */ 379 #endif /* CONFIG_NET_CLS_ACT */
379 #endif /* CONFIG_NET_SCHED */ 380 #endif /* CONFIG_NET_SCHED */
380 #endif /* REUSE_SKBUFFS_WITHOUT_FREE */ 381 #endif /* REUSE_SKBUFFS_WITHOUT_FREE */
381 382
382 dont_put_skbuff_in_hw: 383 dont_put_skbuff_in_hw:
383 384
384 /* Check if we can use the hardware checksumming */ 385 /* Check if we can use the hardware checksumming */
385 if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) && 386 if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
386 (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) && 387 (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
387 ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14)) 388 ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
388 && ((ip_hdr(skb)->protocol == IPPROTO_TCP) 389 && ((ip_hdr(skb)->protocol == IPPROTO_TCP)
389 || (ip_hdr(skb)->protocol == IPPROTO_UDP))) { 390 || (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
390 /* Use hardware checksum calc */ 391 /* Use hardware checksum calc */
391 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1; 392 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
392 } 393 }
393 394
394 if (USE_ASYNC_IOBDMA) { 395 if (USE_ASYNC_IOBDMA) {
395 /* Get the number of skbuffs in use by the hardware */ 396 /* Get the number of skbuffs in use by the hardware */
396 CVMX_SYNCIOBDMA; 397 CVMX_SYNCIOBDMA;
397 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH); 398 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
398 buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8); 399 buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
399 } else { 400 } else {
400 /* Get the number of skbuffs in use by the hardware */ 401 /* Get the number of skbuffs in use by the hardware */
401 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4, 402 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
402 MAX_SKB_TO_FREE); 403 MAX_SKB_TO_FREE);
403 buffers_to_free = 404 buffers_to_free =
404 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); 405 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
405 } 406 }
406 407
407 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4); 408 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
408 409
409 /* 410 /*
410 * If we're sending faster than the receive can free them then 411 * If we're sending faster than the receive can free them then
411 * don't do the HW free. 412 * don't do the HW free.
412 */ 413 */
413 if ((buffers_to_free < -100) && !pko_command.s.dontfree) 414 if ((buffers_to_free < -100) && !pko_command.s.dontfree)
414 pko_command.s.dontfree = 1; 415 pko_command.s.dontfree = 1;
415 416
416 if (pko_command.s.dontfree) { 417 if (pko_command.s.dontfree) {
417 queue_type = QUEUE_CORE; 418 queue_type = QUEUE_CORE;
418 pko_command.s.reg0 = priv->fau+qos*4; 419 pko_command.s.reg0 = priv->fau+qos*4;
419 } else { 420 } else {
420 queue_type = QUEUE_HW; 421 queue_type = QUEUE_HW;
421 } 422 }
422 if (USE_ASYNC_IOBDMA) 423 if (USE_ASYNC_IOBDMA)
423 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1); 424 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
424 425
425 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); 426 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
426 427
427 /* Drop this packet if we have too many already queued to the HW */ 428 /* Drop this packet if we have too many already queued to the HW */
428 if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) { 429 if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
429 if (dev->tx_queue_len != 0) { 430 if (dev->tx_queue_len != 0) {
430 /* Drop the lock when notifying the core. */ 431 /* Drop the lock when notifying the core. */
431 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); 432 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
432 netif_stop_queue(dev); 433 netif_stop_queue(dev);
433 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); 434 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
434 } else { 435 } else {
435 /* If not using normal queueing. */ 436 /* If not using normal queueing. */
436 queue_type = QUEUE_DROP; 437 queue_type = QUEUE_DROP;
437 goto skip_xmit; 438 goto skip_xmit;
438 } 439 }
439 } 440 }
440 441
441 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, 442 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
442 CVMX_PKO_LOCK_NONE); 443 CVMX_PKO_LOCK_NONE);
443 444
444 /* Send the packet to the output queue */ 445 /* Send the packet to the output queue */
445 if (unlikely(cvmx_pko_send_packet_finish(priv->port, 446 if (unlikely(cvmx_pko_send_packet_finish(priv->port,
446 priv->queue + qos, 447 priv->queue + qos,
447 pko_command, hw_buffer, 448 pko_command, hw_buffer,
448 CVMX_PKO_LOCK_NONE))) { 449 CVMX_PKO_LOCK_NONE))) {
449 DEBUGPRINT("%s: Failed to send the packet\n", dev->name); 450 printk_ratelimited("%s: Failed to send the packet\n", dev->name);
450 queue_type = QUEUE_DROP; 451 queue_type = QUEUE_DROP;
451 } 452 }
452 skip_xmit: 453 skip_xmit:
453 to_free_list = NULL; 454 to_free_list = NULL;
454 455
455 switch (queue_type) { 456 switch (queue_type) {
456 case QUEUE_DROP: 457 case QUEUE_DROP:
457 skb->next = to_free_list; 458 skb->next = to_free_list;
458 to_free_list = skb; 459 to_free_list = skb;
459 priv->stats.tx_dropped++; 460 priv->stats.tx_dropped++;
460 break; 461 break;
461 case QUEUE_HW: 462 case QUEUE_HW:
462 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1); 463 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
463 break; 464 break;
464 case QUEUE_CORE: 465 case QUEUE_CORE:
465 __skb_queue_tail(&priv->tx_free_list[qos], skb); 466 __skb_queue_tail(&priv->tx_free_list[qos], skb);
466 break; 467 break;
467 default: 468 default:
468 BUG(); 469 BUG();
469 } 470 }
470 471
471 while (skb_to_free > 0) { 472 while (skb_to_free > 0) {
472 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]); 473 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
473 t->next = to_free_list; 474 t->next = to_free_list;
474 to_free_list = t; 475 to_free_list = t;
475 skb_to_free--; 476 skb_to_free--;
476 } 477 }
477 478
478 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); 479 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
479 480
480 /* Do the actual freeing outside of the lock. */ 481 /* Do the actual freeing outside of the lock. */
481 while (to_free_list) { 482 while (to_free_list) {
482 struct sk_buff *t = to_free_list; 483 struct sk_buff *t = to_free_list;
483 to_free_list = to_free_list->next; 484 to_free_list = to_free_list->next;
484 dev_kfree_skb_any(t); 485 dev_kfree_skb_any(t);
485 } 486 }
486 487
487 if (USE_ASYNC_IOBDMA) { 488 if (USE_ASYNC_IOBDMA) {
488 CVMX_SYNCIOBDMA; 489 CVMX_SYNCIOBDMA;
489 total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH); 490 total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
490 /* Restore the scratch area */ 491 /* Restore the scratch area */
491 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); 492 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
492 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); 493 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
493 } else { 494 } else {
494 total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1); 495 total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
495 } 496 }
496 497
497 if (total_to_clean & 0x3ff) { 498 if (total_to_clean & 0x3ff) {
498 /* 499 /*
499 * Schedule the cleanup tasklet every 1024 packets for 500 * Schedule the cleanup tasklet every 1024 packets for
500 * the pathological case of high traffic on one port 501 * the pathological case of high traffic on one port
501 * delaying clean up of packets on a different port 502 * delaying clean up of packets on a different port
502 * that is blocked waiting for the cleanup. 503 * that is blocked waiting for the cleanup.
503 */ 504 */
504 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); 505 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
505 } 506 }
506 507
507 cvm_oct_kick_tx_poll_watchdog(); 508 cvm_oct_kick_tx_poll_watchdog();
508 509
509 return NETDEV_TX_OK; 510 return NETDEV_TX_OK;
510 } 511 }
511 512
512 /** 513 /**
513 * cvm_oct_xmit_pow - transmit a packet to the POW 514 * cvm_oct_xmit_pow - transmit a packet to the POW
514 * @skb: Packet to send 515 * @skb: Packet to send
515 * @dev: Device info structure 516 * @dev: Device info structure
516 517
517 * Returns Always returns zero 518 * Returns Always returns zero
518 */ 519 */
519 int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev) 520 int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
520 { 521 {
521 struct octeon_ethernet *priv = netdev_priv(dev); 522 struct octeon_ethernet *priv = netdev_priv(dev);
522 void *packet_buffer; 523 void *packet_buffer;
523 void *copy_location; 524 void *copy_location;
524 525
525 /* Get a work queue entry */ 526 /* Get a work queue entry */
526 cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); 527 cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
527 if (unlikely(work == NULL)) { 528 if (unlikely(work == NULL)) {
528 DEBUGPRINT("%s: Failed to allocate a work queue entry\n", 529 printk_ratelimited("%s: Failed to allocate a work "
529 dev->name); 530 "queue entry\n", dev->name);
530 priv->stats.tx_dropped++; 531 priv->stats.tx_dropped++;
531 dev_kfree_skb(skb); 532 dev_kfree_skb(skb);
532 return 0; 533 return 0;
533 } 534 }
534 535
535 /* Get a packet buffer */ 536 /* Get a packet buffer */
536 packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL); 537 packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
537 if (unlikely(packet_buffer == NULL)) { 538 if (unlikely(packet_buffer == NULL)) {
538 DEBUGPRINT("%s: Failed to allocate a packet buffer\n", 539 printk_ratelimited("%s: Failed to allocate a packet buffer\n",
539 dev->name); 540 dev->name);
540 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); 541 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
541 priv->stats.tx_dropped++; 542 priv->stats.tx_dropped++;
542 dev_kfree_skb(skb); 543 dev_kfree_skb(skb);
543 return 0; 544 return 0;
544 } 545 }
545 546
546 /* 547 /*
547 * Calculate where we need to copy the data to. We need to 548 * Calculate where we need to copy the data to. We need to
548 * leave 8 bytes for a next pointer (unused). We also need to 549 * leave 8 bytes for a next pointer (unused). We also need to
549 * include any configure skip. Then we need to align the IP 550 * include any configure skip. Then we need to align the IP
550 * packet src and dest into the same 64bit word. The below 551 * packet src and dest into the same 64bit word. The below
551 * calculation may add a little extra, but that doesn't 552 * calculation may add a little extra, but that doesn't
552 * hurt. 553 * hurt.
553 */ 554 */
554 copy_location = packet_buffer + sizeof(uint64_t); 555 copy_location = packet_buffer + sizeof(uint64_t);
555 copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6; 556 copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
556 557
557 /* 558 /*
558 * We have to copy the packet since whoever processes this 559 * We have to copy the packet since whoever processes this
559 * packet will free it to a hardware pool. We can't use the 560 * packet will free it to a hardware pool. We can't use the
560 * trick of counting outstanding packets like in 561 * trick of counting outstanding packets like in
561 * cvm_oct_xmit. 562 * cvm_oct_xmit.
562 */ 563 */
563 memcpy(copy_location, skb->data, skb->len); 564 memcpy(copy_location, skb->data, skb->len);
564 565
565 /* 566 /*
566 * Fill in some of the work queue fields. We may need to add 567 * Fill in some of the work queue fields. We may need to add
567 * more if the software at the other end needs them. 568 * more if the software at the other end needs them.
568 */ 569 */
569 work->hw_chksum = skb->csum; 570 work->hw_chksum = skb->csum;
570 work->len = skb->len; 571 work->len = skb->len;
571 work->ipprt = priv->port; 572 work->ipprt = priv->port;
572 work->qos = priv->port & 0x7; 573 work->qos = priv->port & 0x7;
573 work->grp = pow_send_group; 574 work->grp = pow_send_group;
574 work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE; 575 work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
575 work->tag = pow_send_group; /* FIXME */ 576 work->tag = pow_send_group; /* FIXME */
576 /* Default to zero. Sets of zero later are commented out */ 577 /* Default to zero. Sets of zero later are commented out */
577 work->word2.u64 = 0; 578 work->word2.u64 = 0;
578 work->word2.s.bufs = 1; 579 work->word2.s.bufs = 1;
579 work->packet_ptr.u64 = 0; 580 work->packet_ptr.u64 = 0;
580 work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location); 581 work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
581 work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL; 582 work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
582 work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE; 583 work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
583 work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7; 584 work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
584 585
585 if (skb->protocol == htons(ETH_P_IP)) { 586 if (skb->protocol == htons(ETH_P_IP)) {
586 work->word2.s.ip_offset = 14; 587 work->word2.s.ip_offset = 14;
587 #if 0 588 #if 0
588 work->word2.s.vlan_valid = 0; /* FIXME */ 589 work->word2.s.vlan_valid = 0; /* FIXME */
589 work->word2.s.vlan_cfi = 0; /* FIXME */ 590 work->word2.s.vlan_cfi = 0; /* FIXME */
590 work->word2.s.vlan_id = 0; /* FIXME */ 591 work->word2.s.vlan_id = 0; /* FIXME */
591 work->word2.s.dec_ipcomp = 0; /* FIXME */ 592 work->word2.s.dec_ipcomp = 0; /* FIXME */
592 #endif 593 #endif
593 work->word2.s.tcp_or_udp = 594 work->word2.s.tcp_or_udp =
594 (ip_hdr(skb)->protocol == IPPROTO_TCP) 595 (ip_hdr(skb)->protocol == IPPROTO_TCP)
595 || (ip_hdr(skb)->protocol == IPPROTO_UDP); 596 || (ip_hdr(skb)->protocol == IPPROTO_UDP);
596 #if 0 597 #if 0
597 /* FIXME */ 598 /* FIXME */
598 work->word2.s.dec_ipsec = 0; 599 work->word2.s.dec_ipsec = 0;
599 /* We only support IPv4 right now */ 600 /* We only support IPv4 right now */
600 work->word2.s.is_v6 = 0; 601 work->word2.s.is_v6 = 0;
601 /* Hardware would set to zero */ 602 /* Hardware would set to zero */
602 work->word2.s.software = 0; 603 work->word2.s.software = 0;
603 /* No error, packet is internal */ 604 /* No error, packet is internal */
604 work->word2.s.L4_error = 0; 605 work->word2.s.L4_error = 0;
605 #endif 606 #endif
606 work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) 607 work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0)
607 || (ip_hdr(skb)->frag_off == 608 || (ip_hdr(skb)->frag_off ==
608 1 << 14)); 609 1 << 14));
609 #if 0 610 #if 0
610 /* Assume Linux is sending a good packet */ 611 /* Assume Linux is sending a good packet */
611 work->word2.s.IP_exc = 0; 612 work->word2.s.IP_exc = 0;
612 #endif 613 #endif
613 work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST); 614 work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
614 work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST); 615 work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
615 #if 0 616 #if 0
616 /* This is an IP packet */ 617 /* This is an IP packet */
617 work->word2.s.not_IP = 0; 618 work->word2.s.not_IP = 0;
618 /* No error, packet is internal */ 619 /* No error, packet is internal */
619 work->word2.s.rcv_error = 0; 620 work->word2.s.rcv_error = 0;
620 /* No error, packet is internal */ 621 /* No error, packet is internal */
621 work->word2.s.err_code = 0; 622 work->word2.s.err_code = 0;
622 #endif 623 #endif
623 624
624 /* 625 /*
625 * When copying the data, include 4 bytes of the 626 * When copying the data, include 4 bytes of the
626 * ethernet header to align the same way hardware 627 * ethernet header to align the same way hardware
627 * does. 628 * does.
628 */ 629 */
629 memcpy(work->packet_data, skb->data + 10, 630 memcpy(work->packet_data, skb->data + 10,
630 sizeof(work->packet_data)); 631 sizeof(work->packet_data));
631 } else { 632 } else {
632 #if 0 633 #if 0
633 work->word2.snoip.vlan_valid = 0; /* FIXME */ 634 work->word2.snoip.vlan_valid = 0; /* FIXME */
634 work->word2.snoip.vlan_cfi = 0; /* FIXME */ 635 work->word2.snoip.vlan_cfi = 0; /* FIXME */
635 work->word2.snoip.vlan_id = 0; /* FIXME */ 636 work->word2.snoip.vlan_id = 0; /* FIXME */
636 work->word2.snoip.software = 0; /* Hardware would set to zero */ 637 work->word2.snoip.software = 0; /* Hardware would set to zero */
637 #endif 638 #endif
638 work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP); 639 work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
639 work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP); 640 work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
640 work->word2.snoip.is_bcast = 641 work->word2.snoip.is_bcast =
641 (skb->pkt_type == PACKET_BROADCAST); 642 (skb->pkt_type == PACKET_BROADCAST);
642 work->word2.snoip.is_mcast = 643 work->word2.snoip.is_mcast =
643 (skb->pkt_type == PACKET_MULTICAST); 644 (skb->pkt_type == PACKET_MULTICAST);
644 work->word2.snoip.not_IP = 1; /* IP was done up above */ 645 work->word2.snoip.not_IP = 1; /* IP was done up above */
645 #if 0 646 #if 0
646 /* No error, packet is internal */ 647 /* No error, packet is internal */
647 work->word2.snoip.rcv_error = 0; 648 work->word2.snoip.rcv_error = 0;
648 /* No error, packet is internal */ 649 /* No error, packet is internal */
649 work->word2.snoip.err_code = 0; 650 work->word2.snoip.err_code = 0;
650 #endif 651 #endif
651 memcpy(work->packet_data, skb->data, sizeof(work->packet_data)); 652 memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
652 } 653 }
653 654
654 /* Submit the packet to the POW */ 655 /* Submit the packet to the POW */
655 cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos, 656 cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos,
656 work->grp); 657 work->grp);
657 priv->stats.tx_packets++; 658 priv->stats.tx_packets++;
658 priv->stats.tx_bytes += skb->len; 659 priv->stats.tx_bytes += skb->len;
659 dev_kfree_skb(skb); 660 dev_kfree_skb(skb);
660 return 0; 661 return 0;
661 } 662 }
662 663
663 /** 664 /**
664 * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX. 665 * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX.
665 * @dev: Device being shutdown 666 * @dev: Device being shutdown
666 * 667 *
667 */ 668 */
668 void cvm_oct_tx_shutdown_dev(struct net_device *dev) 669 void cvm_oct_tx_shutdown_dev(struct net_device *dev)
669 { 670 {
670 struct octeon_ethernet *priv = netdev_priv(dev); 671 struct octeon_ethernet *priv = netdev_priv(dev);
671 unsigned long flags; 672 unsigned long flags;
672 int qos; 673 int qos;
673 674
674 for (qos = 0; qos < 16; qos++) { 675 for (qos = 0; qos < 16; qos++) {
675 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); 676 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
676 while (skb_queue_len(&priv->tx_free_list[qos])) 677 while (skb_queue_len(&priv->tx_free_list[qos]))
677 dev_kfree_skb_any(__skb_dequeue 678 dev_kfree_skb_any(__skb_dequeue
678 (&priv->tx_free_list[qos])); 679 (&priv->tx_free_list[qos]));
679 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); 680 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
680 } 681 }
681 } 682 }
682 683
683 static void cvm_oct_tx_do_cleanup(unsigned long arg) 684 static void cvm_oct_tx_do_cleanup(unsigned long arg)
684 { 685 {
685 int port; 686 int port;
686 687
687 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { 688 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
688 if (cvm_oct_device[port]) { 689 if (cvm_oct_device[port]) {
689 struct net_device *dev = cvm_oct_device[port]; 690 struct net_device *dev = cvm_oct_device[port];
690 cvm_oct_free_tx_skbs(dev); 691 cvm_oct_free_tx_skbs(dev);
691 } 692 }
692 } 693 }
693 } 694 }
694 695
695 static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id) 696 static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
696 { 697 {
697 /* Disable the interrupt. */ 698 /* Disable the interrupt. */
698 cvmx_write_csr(CVMX_CIU_TIMX(1), 0); 699 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
699 /* Do the work in the tasklet. */ 700 /* Do the work in the tasklet. */
700 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet); 701 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
701 return IRQ_HANDLED; 702 return IRQ_HANDLED;
702 } 703 }
703 704
704 void cvm_oct_tx_initialize(void) 705 void cvm_oct_tx_initialize(void)
705 { 706 {
706 int i; 707 int i;
707 708
708 /* Disable the interrupt. */ 709 /* Disable the interrupt. */
709 cvmx_write_csr(CVMX_CIU_TIMX(1), 0); 710 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
710 /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */ 711 /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */
711 i = request_irq(OCTEON_IRQ_TIMER1, 712 i = request_irq(OCTEON_IRQ_TIMER1,
712 cvm_oct_tx_cleanup_watchdog, 0, 713 cvm_oct_tx_cleanup_watchdog, 0,
713 "Ethernet", cvm_oct_device); 714 "Ethernet", cvm_oct_device);
714 715
715 if (i) 716 if (i)
716 panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1); 717 panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
717 } 718 }
718 719
719 void cvm_oct_tx_shutdown(void) 720 void cvm_oct_tx_shutdown(void)
720 { 721 {
721 /* Free the interrupt handler */ 722 /* Free the interrupt handler */
722 free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device); 723 free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);
723 } 724 }
724 725
drivers/staging/octeon/ethernet-util.h
1 /********************************************************************** 1 /**********************************************************************
2 * Author: Cavium Networks 2 * Author: Cavium Networks
3 * 3 *
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2007 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This file is distributed in the hope that it will be useful, but 13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more 16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details. 17 * details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software 20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/. 22 * or visit http://www.gnu.org/licenses/.
23 * 23 *
24 * This file may also be available under a different license from Cavium. 24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information 25 * Contact Cavium Networks for more information
26 *********************************************************************/ 26 *********************************************************************/
27 27
28 #define DEBUGPRINT(format, ...) do { if (printk_ratelimit()) \
29 printk(format, ##__VA_ARGS__); \
30 } while (0)
31
32 /** 28 /**
33 * cvm_oct_get_buffer_ptr - convert packet data address to pointer 29 * cvm_oct_get_buffer_ptr - convert packet data address to pointer
34 * @packet_ptr: Packet data hardware address 30 * @packet_ptr: Packet data hardware address
35 * 31 *
36 * Returns Packet buffer pointer 32 * Returns Packet buffer pointer
37 */ 33 */
38 static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr) 34 static inline void *cvm_oct_get_buffer_ptr(union cvmx_buf_ptr packet_ptr)
39 { 35 {
40 return cvmx_phys_to_ptr(((packet_ptr.s.addr >> 7) - packet_ptr.s.back) 36 return cvmx_phys_to_ptr(((packet_ptr.s.addr >> 7) - packet_ptr.s.back)
41 << 7); 37 << 7);
42 } 38 }
43 39
44 /** 40 /**
45 * INTERFACE - convert IPD port to locgical interface 41 * INTERFACE - convert IPD port to locgical interface
46 * @ipd_port: Port to check 42 * @ipd_port: Port to check
47 * 43 *
48 * Returns Logical interface 44 * Returns Logical interface
49 */ 45 */
50 static inline int INTERFACE(int ipd_port) 46 static inline int INTERFACE(int ipd_port)
51 { 47 {
52 if (ipd_port < 32) /* Interface 0 or 1 for RGMII,GMII,SPI, etc */ 48 if (ipd_port < 32) /* Interface 0 or 1 for RGMII,GMII,SPI, etc */
53 return ipd_port >> 4; 49 return ipd_port >> 4;
54 else if (ipd_port < 36) /* Interface 2 for NPI */ 50 else if (ipd_port < 36) /* Interface 2 for NPI */
55 return 2; 51 return 2;
56 else if (ipd_port < 40) /* Interface 3 for loopback */ 52 else if (ipd_port < 40) /* Interface 3 for loopback */
57 return 3; 53 return 3;
58 else if (ipd_port == 40) /* Non existent interface for POW0 */ 54 else if (ipd_port == 40) /* Non existent interface for POW0 */
59 return 4; 55 return 4;
60 else 56 else
61 panic("Illegal ipd_port %d passed to INTERFACE\n", ipd_port); 57 panic("Illegal ipd_port %d passed to INTERFACE\n", ipd_port);
62 } 58 }
63 59
64 /** 60 /**
65 * INDEX - convert IPD/PKO port number to the port's interface index 61 * INDEX - convert IPD/PKO port number to the port's interface index
66 * @ipd_port: Port to check 62 * @ipd_port: Port to check
67 * 63 *
68 * Returns Index into interface port list 64 * Returns Index into interface port list
69 */ 65 */
70 static inline int INDEX(int ipd_port) 66 static inline int INDEX(int ipd_port)
71 { 67 {
72 if (ipd_port < 32) 68 if (ipd_port < 32)
73 return ipd_port & 15; 69 return ipd_port & 15;
74 else 70 else
75 return ipd_port & 3; 71 return ipd_port & 3;
76 } 72 }
77 73
drivers/staging/octeon/ethernet-xaui.c
1 /********************************************************************** 1 /**********************************************************************
2 * Author: Cavium Networks 2 * Author: Cavium Networks
3 * 3 *
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2007 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 * 12 *
13 * This file is distributed in the hope that it will be useful, but 13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more 16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details. 17 * details.
18 * 18 *
19 * You should have received a copy of the GNU General Public License 19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software 20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/. 22 * or visit http://www.gnu.org/licenses/.
23 * 23 *
24 * This file may also be available under a different license from Cavium. 24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information 25 * Contact Cavium Networks for more information
26 **********************************************************************/ 26 **********************************************************************/
27 #include <linux/kernel.h> 27 #include <linux/kernel.h>
28 #include <linux/netdevice.h> 28 #include <linux/netdevice.h>
29 #include <linux/ratelimit.h>
29 #include <net/dst.h> 30 #include <net/dst.h>
30 31
31 #include <asm/octeon/octeon.h> 32 #include <asm/octeon/octeon.h>
32 33
33 #include "ethernet-defines.h" 34 #include "ethernet-defines.h"
34 #include "octeon-ethernet.h" 35 #include "octeon-ethernet.h"
35 #include "ethernet-util.h" 36 #include "ethernet-util.h"
36 37
37 #include "cvmx-helper.h" 38 #include "cvmx-helper.h"
38 39
39 #include "cvmx-gmxx-defs.h" 40 #include "cvmx-gmxx-defs.h"
40 41
41 int cvm_oct_xaui_open(struct net_device *dev) 42 int cvm_oct_xaui_open(struct net_device *dev)
42 { 43 {
43 union cvmx_gmxx_prtx_cfg gmx_cfg; 44 union cvmx_gmxx_prtx_cfg gmx_cfg;
44 struct octeon_ethernet *priv = netdev_priv(dev); 45 struct octeon_ethernet *priv = netdev_priv(dev);
45 int interface = INTERFACE(priv->port); 46 int interface = INTERFACE(priv->port);
46 int index = INDEX(priv->port); 47 int index = INDEX(priv->port);
47 cvmx_helper_link_info_t link_info; 48 cvmx_helper_link_info_t link_info;
48 49
49 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); 50 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
50 gmx_cfg.s.en = 1; 51 gmx_cfg.s.en = 1;
51 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); 52 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
52 53
53 if (!octeon_is_simulation()) { 54 if (!octeon_is_simulation()) {
54 link_info = cvmx_helper_link_get(priv->port); 55 link_info = cvmx_helper_link_get(priv->port);
55 if (!link_info.s.link_up) 56 if (!link_info.s.link_up)
56 netif_carrier_off(dev); 57 netif_carrier_off(dev);
57 } 58 }
58 return 0; 59 return 0;
59 } 60 }
60 61
61 int cvm_oct_xaui_stop(struct net_device *dev) 62 int cvm_oct_xaui_stop(struct net_device *dev)
62 { 63 {
63 union cvmx_gmxx_prtx_cfg gmx_cfg; 64 union cvmx_gmxx_prtx_cfg gmx_cfg;
64 struct octeon_ethernet *priv = netdev_priv(dev); 65 struct octeon_ethernet *priv = netdev_priv(dev);
65 int interface = INTERFACE(priv->port); 66 int interface = INTERFACE(priv->port);
66 int index = INDEX(priv->port); 67 int index = INDEX(priv->port);
67 68
68 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); 69 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
69 gmx_cfg.s.en = 0; 70 gmx_cfg.s.en = 0;
70 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64); 71 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
71 return 0; 72 return 0;
72 } 73 }
73 74
74 static void cvm_oct_xaui_poll(struct net_device *dev) 75 static void cvm_oct_xaui_poll(struct net_device *dev)
75 { 76 {
76 struct octeon_ethernet *priv = netdev_priv(dev); 77 struct octeon_ethernet *priv = netdev_priv(dev);
77 cvmx_helper_link_info_t link_info; 78 cvmx_helper_link_info_t link_info;
78 79
79 link_info = cvmx_helper_link_get(priv->port); 80 link_info = cvmx_helper_link_get(priv->port);
80 if (link_info.u64 == priv->link_info) 81 if (link_info.u64 == priv->link_info)
81 return; 82 return;
82 83
83 link_info = cvmx_helper_link_autoconf(priv->port); 84 link_info = cvmx_helper_link_autoconf(priv->port);
84 priv->link_info = link_info.u64; 85 priv->link_info = link_info.u64;
85 86
86 /* Tell Linux */ 87 /* Tell Linux */
87 if (link_info.s.link_up) { 88 if (link_info.s.link_up) {
88 89
89 if (!netif_carrier_ok(dev)) 90 if (!netif_carrier_ok(dev))
90 netif_carrier_on(dev); 91 netif_carrier_on(dev);
91 if (priv->queue != -1) 92 if (priv->queue != -1)
92 DEBUGPRINT 93 printk_ratelimited
93 ("%s: %u Mbps %s duplex, port %2d, queue %2d\n", 94 ("%s: %u Mbps %s duplex, port %2d, queue %2d\n",
94 dev->name, link_info.s.speed, 95 dev->name, link_info.s.speed,
95 (link_info.s.full_duplex) ? "Full" : "Half", 96 (link_info.s.full_duplex) ? "Full" : "Half",
96 priv->port, priv->queue); 97 priv->port, priv->queue);
97 else 98 else
98 DEBUGPRINT("%s: %u Mbps %s duplex, port %2d, POW\n", 99 printk_ratelimited
99 dev->name, link_info.s.speed, 100 ("%s: %u Mbps %s duplex, port %2d, POW\n",
100 (link_info.s.full_duplex) ? "Full" : "Half", 101 dev->name, link_info.s.speed,
101 priv->port); 102 (link_info.s.full_duplex) ? "Full" : "Half",
103 priv->port);
102 } else { 104 } else {
103 if (netif_carrier_ok(dev)) 105 if (netif_carrier_ok(dev))
104 netif_carrier_off(dev); 106 netif_carrier_off(dev);
105 DEBUGPRINT("%s: Link down\n", dev->name); 107 printk_ratelimited("%s: Link down\n", dev->name);
106 } 108 }
107 } 109 }
108 110
109 int cvm_oct_xaui_init(struct net_device *dev) 111 int cvm_oct_xaui_init(struct net_device *dev)
110 { 112 {
111 struct octeon_ethernet *priv = netdev_priv(dev); 113 struct octeon_ethernet *priv = netdev_priv(dev);
112 cvm_oct_common_init(dev); 114 cvm_oct_common_init(dev);
113 dev->netdev_ops->ndo_stop(dev); 115 dev->netdev_ops->ndo_stop(dev);
114 if (!octeon_is_simulation() && priv->phydev == NULL) 116 if (!octeon_is_simulation() && priv->phydev == NULL)
115 priv->poll = cvm_oct_xaui_poll; 117 priv->poll = cvm_oct_xaui_poll;
116 118
117 return 0; 119 return 0;
118 } 120 }
119 121
120 void cvm_oct_xaui_uninit(struct net_device *dev) 122 void cvm_oct_xaui_uninit(struct net_device *dev)
121 { 123 {
122 cvm_oct_common_uninit(dev); 124 cvm_oct_common_uninit(dev);
123 } 125 }
124 126